Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Block Translation Table
4 * Copyright (c) 2014-2015, Intel Corporation.
5 */
6#include <linux/highmem.h>
7#include <linux/debugfs.h>
8#include <linux/blkdev.h>
9#include <linux/blk-integrity.h>
10#include <linux/pagemap.h>
11#include <linux/module.h>
12#include <linux/device.h>
13#include <linux/mutex.h>
14#include <linux/hdreg.h>
15#include <linux/sizes.h>
16#include <linux/ndctl.h>
17#include <linux/fs.h>
18#include <linux/nd.h>
19#include <linux/backing-dev.h>
20#include <linux/cleanup.h>
21#include "btt.h"
22#include "nd.h"
23
24enum log_ent_request {
25 LOG_NEW_ENT = 0,
26 LOG_OLD_ENT
27};
28
29static struct device *to_dev(struct arena_info *arena)
30{
31 return &arena->nd_btt->dev;
32}
33
34static u64 adjust_initial_offset(struct nd_btt *nd_btt, u64 offset)
35{
36 return offset + nd_btt->initial_offset;
37}
38
39static int arena_read_bytes(struct arena_info *arena, resource_size_t offset,
40 void *buf, size_t n, unsigned long flags)
41{
42 struct nd_btt *nd_btt = arena->nd_btt;
43 struct nd_namespace_common *ndns = nd_btt->ndns;
44
45 /* arena offsets may be shifted from the base of the device */
46 offset = adjust_initial_offset(nd_btt, offset);
47 return nvdimm_read_bytes(ndns, offset, buf, n, flags);
48}
49
50static int arena_write_bytes(struct arena_info *arena, resource_size_t offset,
51 void *buf, size_t n, unsigned long flags)
52{
53 struct nd_btt *nd_btt = arena->nd_btt;
54 struct nd_namespace_common *ndns = nd_btt->ndns;
55
56 /* arena offsets may be shifted from the base of the device */
57 offset = adjust_initial_offset(nd_btt, offset);
58 return nvdimm_write_bytes(ndns, offset, buf, n, flags);
59}
60
61static int btt_info_write(struct arena_info *arena, struct btt_sb *super)
62{
63 int ret;
64
65 /*
66 * infooff and info2off should always be at least 512B aligned.
67 * We rely on that to make sure rw_bytes does error clearing
68 * correctly, so make sure that is the case.
69 */
70 dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->infooff, 512),
71 "arena->infooff: %#llx is unaligned\n", arena->infooff);
72 dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->info2off, 512),
73 "arena->info2off: %#llx is unaligned\n", arena->info2off);
74
75 ret = arena_write_bytes(arena, arena->info2off, super,
76 sizeof(struct btt_sb), 0);
77 if (ret)
78 return ret;
79
80 return arena_write_bytes(arena, arena->infooff, super,
81 sizeof(struct btt_sb), 0);
82}
83
84static int btt_info_read(struct arena_info *arena, struct btt_sb *super)
85{
86 return arena_read_bytes(arena, arena->infooff, super,
87 sizeof(struct btt_sb), 0);
88}
89
90/*
91 * 'raw' version of btt_map write
92 * Assumptions:
93 * mapping is in little-endian
94 * mapping contains 'E' and 'Z' flags as desired
95 */
96static int __btt_map_write(struct arena_info *arena, u32 lba, __le32 mapping,
97 unsigned long flags)
98{
99 u64 ns_off = arena->mapoff + (lba * MAP_ENT_SIZE);
100
101 if (unlikely(lba >= arena->external_nlba))
102 dev_err_ratelimited(to_dev(arena),
103 "%s: lba %#x out of range (max: %#x)\n",
104 __func__, lba, arena->external_nlba);
105 return arena_write_bytes(arena, ns_off, &mapping, MAP_ENT_SIZE, flags);
106}
107
108static int btt_map_write(struct arena_info *arena, u32 lba, u32 mapping,
109 u32 z_flag, u32 e_flag, unsigned long rwb_flags)
110{
111 u32 ze;
112 __le32 mapping_le;
113
114 /*
115 * This 'mapping' is supposed to be just the LBA mapping, without
116 * any flags set, so strip the flag bits.
117 */
118 mapping = ent_lba(mapping);
119
120 ze = (z_flag << 1) + e_flag;
121 switch (ze) {
122 case 0:
123 /*
124 * We want to set neither of the Z or E flags, and
125 * in the actual layout, this means setting the bit
126 * positions of both to '1' to indicate a 'normal'
127 * map entry
128 */
129 mapping |= MAP_ENT_NORMAL;
130 break;
131 case 1:
132 mapping |= (1 << MAP_ERR_SHIFT);
133 break;
134 case 2:
135 mapping |= (1 << MAP_TRIM_SHIFT);
136 break;
137 default:
138 /*
139 * The case where Z and E are both sent in as '1' could be
140 * construed as a valid 'normal' case, but we decide not to,
141 * to avoid confusion
142 */
143 dev_err_ratelimited(to_dev(arena),
144 "Invalid use of Z and E flags\n");
145 return -EIO;
146 }
147
148 mapping_le = cpu_to_le32(mapping);
149 return __btt_map_write(arena, lba, mapping_le, rwb_flags);
150}
151
152static int btt_map_read(struct arena_info *arena, u32 lba, u32 *mapping,
153 int *trim, int *error, unsigned long rwb_flags)
154{
155 int ret;
156 __le32 in;
157 u32 raw_mapping, postmap, ze, z_flag, e_flag;
158 u64 ns_off = arena->mapoff + (lba * MAP_ENT_SIZE);
159
160 if (unlikely(lba >= arena->external_nlba))
161 dev_err_ratelimited(to_dev(arena),
162 "%s: lba %#x out of range (max: %#x)\n",
163 __func__, lba, arena->external_nlba);
164
165 ret = arena_read_bytes(arena, ns_off, &in, MAP_ENT_SIZE, rwb_flags);
166 if (ret)
167 return ret;
168
169 raw_mapping = le32_to_cpu(in);
170
171 z_flag = ent_z_flag(raw_mapping);
172 e_flag = ent_e_flag(raw_mapping);
173 ze = (z_flag << 1) + e_flag;
174 postmap = ent_lba(raw_mapping);
175
176 /* Reuse the {z,e}_flag variables for *trim and *error */
177 z_flag = 0;
178 e_flag = 0;
179
180 switch (ze) {
181 case 0:
182 /* Initial state. Return postmap = premap */
183 *mapping = lba;
184 break;
185 case 1:
186 *mapping = postmap;
187 e_flag = 1;
188 break;
189 case 2:
190 *mapping = postmap;
191 z_flag = 1;
192 break;
193 case 3:
194 *mapping = postmap;
195 break;
196 default:
197 return -EIO;
198 }
199
200 if (trim)
201 *trim = z_flag;
202 if (error)
203 *error = e_flag;
204
205 return ret;
206}
207
208static int btt_log_group_read(struct arena_info *arena, u32 lane,
209 struct log_group *log)
210{
211 return arena_read_bytes(arena,
212 arena->logoff + (lane * LOG_GRP_SIZE), log,
213 LOG_GRP_SIZE, 0);
214}
215
216static struct dentry *debugfs_root;
217
218static void arena_debugfs_init(struct arena_info *a, struct dentry *parent,
219 int idx)
220{
221 char dirname[32];
222 struct dentry *d;
223
224 /* If for some reason, parent bttN was not created, exit */
225 if (!parent)
226 return;
227
228 snprintf(dirname, 32, "arena%d", idx);
229 d = debugfs_create_dir(dirname, parent);
230 if (IS_ERR_OR_NULL(d))
231 return;
232 a->debugfs_dir = d;
233
234 debugfs_create_x64("size", S_IRUGO, d, &a->size);
235 debugfs_create_x64("external_lba_start", S_IRUGO, d,
236 &a->external_lba_start);
237 debugfs_create_x32("internal_nlba", S_IRUGO, d, &a->internal_nlba);
238 debugfs_create_u32("internal_lbasize", S_IRUGO, d,
239 &a->internal_lbasize);
240 debugfs_create_x32("external_nlba", S_IRUGO, d, &a->external_nlba);
241 debugfs_create_u32("external_lbasize", S_IRUGO, d,
242 &a->external_lbasize);
243 debugfs_create_u32("nfree", S_IRUGO, d, &a->nfree);
244 debugfs_create_u16("version_major", S_IRUGO, d, &a->version_major);
245 debugfs_create_u16("version_minor", S_IRUGO, d, &a->version_minor);
246 debugfs_create_x64("nextoff", S_IRUGO, d, &a->nextoff);
247 debugfs_create_x64("infooff", S_IRUGO, d, &a->infooff);
248 debugfs_create_x64("dataoff", S_IRUGO, d, &a->dataoff);
249 debugfs_create_x64("mapoff", S_IRUGO, d, &a->mapoff);
250 debugfs_create_x64("logoff", S_IRUGO, d, &a->logoff);
251 debugfs_create_x64("info2off", S_IRUGO, d, &a->info2off);
252 debugfs_create_x32("flags", S_IRUGO, d, &a->flags);
253 debugfs_create_u32("log_index_0", S_IRUGO, d, &a->log_index[0]);
254 debugfs_create_u32("log_index_1", S_IRUGO, d, &a->log_index[1]);
255}
256
257static void btt_debugfs_init(struct btt *btt)
258{
259 int i = 0;
260 struct arena_info *arena;
261
262 btt->debugfs_dir = debugfs_create_dir(dev_name(&btt->nd_btt->dev),
263 debugfs_root);
264 if (IS_ERR_OR_NULL(btt->debugfs_dir))
265 return;
266
267 list_for_each_entry(arena, &btt->arena_list, list) {
268 arena_debugfs_init(arena, btt->debugfs_dir, i);
269 i++;
270 }
271}
272
273static u32 log_seq(struct log_group *log, int log_idx)
274{
275 return le32_to_cpu(log->ent[log_idx].seq);
276}
277
278/*
279 * This function accepts two log entries, and uses the
280 * sequence number to find the 'older' entry.
281 * It also updates the sequence number in this old entry to
282 * make it the 'new' one if the mark_flag is set.
283 * Finally, it returns which of the entries was the older one.
284 *
285 * TODO The logic feels a bit kludge-y. make it better..
286 */
287static int btt_log_get_old(struct arena_info *a, struct log_group *log)
288{
289 int idx0 = a->log_index[0];
290 int idx1 = a->log_index[1];
291 int old;
292
293 /*
294 * the first ever time this is seen, the entry goes into [0]
295 * the next time, the following logic works out to put this
296 * (next) entry into [1]
297 */
298 if (log_seq(log, idx0) == 0) {
299 log->ent[idx0].seq = cpu_to_le32(1);
300 return 0;
301 }
302
303 if (log_seq(log, idx0) == log_seq(log, idx1))
304 return -EINVAL;
305 if (log_seq(log, idx0) + log_seq(log, idx1) > 5)
306 return -EINVAL;
307
308 if (log_seq(log, idx0) < log_seq(log, idx1)) {
309 if ((log_seq(log, idx1) - log_seq(log, idx0)) == 1)
310 old = 0;
311 else
312 old = 1;
313 } else {
314 if ((log_seq(log, idx0) - log_seq(log, idx1)) == 1)
315 old = 1;
316 else
317 old = 0;
318 }
319
320 return old;
321}
322
323/*
324 * This function copies the desired (old/new) log entry into ent if
325 * it is not NULL. It returns the sub-slot number (0 or 1)
326 * where the desired log entry was found. Negative return values
327 * indicate errors.
328 */
329static int btt_log_read(struct arena_info *arena, u32 lane,
330 struct log_entry *ent, int old_flag)
331{
332 int ret;
333 int old_ent, ret_ent;
334 struct log_group log;
335
336 ret = btt_log_group_read(arena, lane, &log);
337 if (ret)
338 return -EIO;
339
340 old_ent = btt_log_get_old(arena, &log);
341 if (old_ent < 0 || old_ent > 1) {
342 dev_err(to_dev(arena),
343 "log corruption (%d): lane %d seq [%d, %d]\n",
344 old_ent, lane, log.ent[arena->log_index[0]].seq,
345 log.ent[arena->log_index[1]].seq);
346 /* TODO set error state? */
347 return -EIO;
348 }
349
350 ret_ent = (old_flag ? old_ent : (1 - old_ent));
351
352 if (ent != NULL)
353 memcpy(ent, &log.ent[arena->log_index[ret_ent]], LOG_ENT_SIZE);
354
355 return ret_ent;
356}
357
358/*
359 * This function commits a log entry to media
360 * It does _not_ prepare the freelist entry for the next write
361 * btt_flog_write is the wrapper for updating the freelist elements
362 */
363static int __btt_log_write(struct arena_info *arena, u32 lane,
364 u32 sub, struct log_entry *ent, unsigned long flags)
365{
366 int ret;
367 u32 group_slot = arena->log_index[sub];
368 unsigned int log_half = LOG_ENT_SIZE / 2;
369 void *src = ent;
370 u64 ns_off;
371
372 ns_off = arena->logoff + (lane * LOG_GRP_SIZE) +
373 (group_slot * LOG_ENT_SIZE);
374 /* split the 16B write into atomic, durable halves */
375 ret = arena_write_bytes(arena, ns_off, src, log_half, flags);
376 if (ret)
377 return ret;
378
379 ns_off += log_half;
380 src += log_half;
381 return arena_write_bytes(arena, ns_off, src, log_half, flags);
382}
383
384static int btt_flog_write(struct arena_info *arena, u32 lane, u32 sub,
385 struct log_entry *ent)
386{
387 int ret;
388
389 ret = __btt_log_write(arena, lane, sub, ent, NVDIMM_IO_ATOMIC);
390 if (ret)
391 return ret;
392
393 /* prepare the next free entry */
394 arena->freelist[lane].sub = 1 - arena->freelist[lane].sub;
395 if (++(arena->freelist[lane].seq) == 4)
396 arena->freelist[lane].seq = 1;
397 if (ent_e_flag(le32_to_cpu(ent->old_map)))
398 arena->freelist[lane].has_err = 1;
399 arena->freelist[lane].block = ent_lba(le32_to_cpu(ent->old_map));
400
401 return ret;
402}
403
404/*
405 * This function initializes the BTT map to the initial state, which is
406 * all-zeroes, and indicates an identity mapping
407 */
408static int btt_map_init(struct arena_info *arena)
409{
410 int ret = -EINVAL;
411 void *zerobuf;
412 size_t offset = 0;
413 size_t chunk_size = SZ_2M;
414 size_t mapsize = arena->logoff - arena->mapoff;
415
416 zerobuf = kzalloc(chunk_size, GFP_KERNEL);
417 if (!zerobuf)
418 return -ENOMEM;
419
420 /*
421 * mapoff should always be at least 512B aligned. We rely on that to
422 * make sure rw_bytes does error clearing correctly, so make sure that
423 * is the case.
424 */
425 dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->mapoff, 512),
426 "arena->mapoff: %#llx is unaligned\n", arena->mapoff);
427
428 while (mapsize) {
429 size_t size = min(mapsize, chunk_size);
430
431 dev_WARN_ONCE(to_dev(arena), size < 512,
432 "chunk size: %#zx is unaligned\n", size);
433 ret = arena_write_bytes(arena, arena->mapoff + offset, zerobuf,
434 size, 0);
435 if (ret)
436 goto free;
437
438 offset += size;
439 mapsize -= size;
440 cond_resched();
441 }
442
443 free:
444 kfree(zerobuf);
445 return ret;
446}
447
448/*
449 * This function initializes the BTT log with 'fake' entries pointing
450 * to the initial reserved set of blocks as being free
451 */
452static int btt_log_init(struct arena_info *arena)
453{
454 size_t logsize = arena->info2off - arena->logoff;
455 size_t chunk_size = SZ_4K, offset = 0;
456 struct log_entry ent;
457 void *zerobuf;
458 int ret;
459 u32 i;
460
461 zerobuf = kzalloc(chunk_size, GFP_KERNEL);
462 if (!zerobuf)
463 return -ENOMEM;
464 /*
465 * logoff should always be at least 512B aligned. We rely on that to
466 * make sure rw_bytes does error clearing correctly, so make sure that
467 * is the case.
468 */
469 dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->logoff, 512),
470 "arena->logoff: %#llx is unaligned\n", arena->logoff);
471
472 while (logsize) {
473 size_t size = min(logsize, chunk_size);
474
475 dev_WARN_ONCE(to_dev(arena), size < 512,
476 "chunk size: %#zx is unaligned\n", size);
477 ret = arena_write_bytes(arena, arena->logoff + offset, zerobuf,
478 size, 0);
479 if (ret)
480 goto free;
481
482 offset += size;
483 logsize -= size;
484 cond_resched();
485 }
486
487 for (i = 0; i < arena->nfree; i++) {
488 ent.lba = cpu_to_le32(i);
489 ent.old_map = cpu_to_le32(arena->external_nlba + i);
490 ent.new_map = cpu_to_le32(arena->external_nlba + i);
491 ent.seq = cpu_to_le32(LOG_SEQ_INIT);
492 ret = __btt_log_write(arena, i, 0, &ent, 0);
493 if (ret)
494 goto free;
495 }
496
497 free:
498 kfree(zerobuf);
499 return ret;
500}
501
502static u64 to_namespace_offset(struct arena_info *arena, u64 lba)
503{
504 return arena->dataoff + ((u64)lba * arena->internal_lbasize);
505}
506
507static int arena_clear_freelist_error(struct arena_info *arena, u32 lane)
508{
509 int ret = 0;
510
511 if (arena->freelist[lane].has_err) {
512 void *zero_page = page_address(ZERO_PAGE(0));
513 u32 lba = arena->freelist[lane].block;
514 u64 nsoff = to_namespace_offset(arena, lba);
515 unsigned long len = arena->sector_size;
516
517 mutex_lock(&arena->err_lock);
518
519 while (len) {
520 unsigned long chunk = min(len, PAGE_SIZE);
521
522 ret = arena_write_bytes(arena, nsoff, zero_page,
523 chunk, 0);
524 if (ret)
525 break;
526 len -= chunk;
527 nsoff += chunk;
528 if (len == 0)
529 arena->freelist[lane].has_err = 0;
530 }
531 mutex_unlock(&arena->err_lock);
532 }
533 return ret;
534}
535
536static int btt_freelist_init(struct arena_info *arena)
537{
538 int new, ret;
539 struct log_entry log_new;
540 u32 i, map_entry, log_oldmap, log_newmap;
541
542 arena->freelist = kcalloc(arena->nfree, sizeof(struct free_entry),
543 GFP_KERNEL);
544 if (!arena->freelist)
545 return -ENOMEM;
546
547 for (i = 0; i < arena->nfree; i++) {
548 new = btt_log_read(arena, i, &log_new, LOG_NEW_ENT);
549 if (new < 0)
550 return new;
551
552 /* old and new map entries with any flags stripped out */
553 log_oldmap = ent_lba(le32_to_cpu(log_new.old_map));
554 log_newmap = ent_lba(le32_to_cpu(log_new.new_map));
555
556 /* sub points to the next one to be overwritten */
557 arena->freelist[i].sub = 1 - new;
558 arena->freelist[i].seq = nd_inc_seq(le32_to_cpu(log_new.seq));
559 arena->freelist[i].block = log_oldmap;
560
561 /*
562 * FIXME: if error clearing fails during init, we want to make
563 * the BTT read-only
564 */
565 if (ent_e_flag(le32_to_cpu(log_new.old_map)) &&
566 !ent_normal(le32_to_cpu(log_new.old_map))) {
567 arena->freelist[i].has_err = 1;
568 ret = arena_clear_freelist_error(arena, i);
569 if (ret)
570 dev_err_ratelimited(to_dev(arena),
571 "Unable to clear known errors\n");
572 }
573
574 /* This implies a newly created or untouched flog entry */
575 if (log_oldmap == log_newmap)
576 continue;
577
578 /* Check if map recovery is needed */
579 ret = btt_map_read(arena, le32_to_cpu(log_new.lba), &map_entry,
580 NULL, NULL, 0);
581 if (ret)
582 return ret;
583
584 /*
585 * The map_entry from btt_read_map is stripped of any flag bits,
586 * so use the stripped out versions from the log as well for
587 * testing whether recovery is needed. For restoration, use the
588 * 'raw' version of the log entries as that captured what we
589 * were going to write originally.
590 */
591 if ((log_newmap != map_entry) && (log_oldmap == map_entry)) {
592 /*
593 * Last transaction wrote the flog, but wasn't able
594 * to complete the map write. So fix up the map.
595 */
596 ret = btt_map_write(arena, le32_to_cpu(log_new.lba),
597 le32_to_cpu(log_new.new_map), 0, 0, 0);
598 if (ret)
599 return ret;
600 }
601 }
602
603 return 0;
604}
605
606static bool ent_is_padding(struct log_entry *ent)
607{
608 return (ent->lba == 0) && (ent->old_map == 0) && (ent->new_map == 0)
609 && (ent->seq == 0);
610}
611
612/*
613 * Detecting valid log indices: We read a log group (see the comments in btt.h
614 * for a description of a 'log_group' and its 'slots'), and iterate over its
615 * four slots. We expect that a padding slot will be all-zeroes, and use this
616 * to detect a padding slot vs. an actual entry.
617 *
618 * If a log_group is in the initial state, i.e. hasn't been used since the
619 * creation of this BTT layout, it will have three of the four slots with
620 * zeroes. We skip over these log_groups for the detection of log_index. If
621 * all log_groups are in the initial state (i.e. the BTT has never been
622 * written to), it is safe to assume the 'new format' of log entries in slots
623 * (0, 1).
624 */
625static int log_set_indices(struct arena_info *arena)
626{
627 bool idx_set = false, initial_state = true;
628 int ret, log_index[2] = {-1, -1};
629 u32 i, j, next_idx = 0;
630 struct log_group log;
631 u32 pad_count = 0;
632
633 for (i = 0; i < arena->nfree; i++) {
634 ret = btt_log_group_read(arena, i, &log);
635 if (ret < 0)
636 return ret;
637
638 for (j = 0; j < 4; j++) {
639 if (!idx_set) {
640 if (ent_is_padding(&log.ent[j])) {
641 pad_count++;
642 continue;
643 } else {
644 /* Skip if index has been recorded */
645 if ((next_idx == 1) &&
646 (j == log_index[0]))
647 continue;
648 /* valid entry, record index */
649 log_index[next_idx] = j;
650 next_idx++;
651 }
652 if (next_idx == 2) {
653 /* two valid entries found */
654 idx_set = true;
655 } else if (next_idx > 2) {
656 /* too many valid indices */
657 return -ENXIO;
658 }
659 } else {
660 /*
661 * once the indices have been set, just verify
662 * that all subsequent log groups are either in
663 * their initial state or follow the same
664 * indices.
665 */
666 if (j == log_index[0]) {
667 /* entry must be 'valid' */
668 if (ent_is_padding(&log.ent[j]))
669 return -ENXIO;
670 } else if (j == log_index[1]) {
671 ;
672 /*
673 * log_index[1] can be padding if the
674 * lane never got used and it is still
675 * in the initial state (three 'padding'
676 * entries)
677 */
678 } else {
679 /* entry must be invalid (padding) */
680 if (!ent_is_padding(&log.ent[j]))
681 return -ENXIO;
682 }
683 }
684 }
685 /*
686 * If any of the log_groups have more than one valid,
687 * non-padding entry, then the we are no longer in the
688 * initial_state
689 */
690 if (pad_count < 3)
691 initial_state = false;
692 pad_count = 0;
693 }
694
695 if (!initial_state && !idx_set)
696 return -ENXIO;
697
698 /*
699 * If all the entries in the log were in the initial state,
700 * assume new padding scheme
701 */
702 if (initial_state)
703 log_index[1] = 1;
704
705 /*
706 * Only allow the known permutations of log/padding indices,
707 * i.e. (0, 1), and (0, 2)
708 */
709 if ((log_index[0] == 0) && ((log_index[1] == 1) || (log_index[1] == 2)))
710 ; /* known index possibilities */
711 else {
712 dev_err(to_dev(arena), "Found an unknown padding scheme\n");
713 return -ENXIO;
714 }
715
716 arena->log_index[0] = log_index[0];
717 arena->log_index[1] = log_index[1];
718 dev_dbg(to_dev(arena), "log_index_0 = %d\n", log_index[0]);
719 dev_dbg(to_dev(arena), "log_index_1 = %d\n", log_index[1]);
720 return 0;
721}
722
723static int btt_rtt_init(struct arena_info *arena)
724{
725 arena->rtt = kcalloc(arena->nfree, sizeof(u32), GFP_KERNEL);
726 if (arena->rtt == NULL)
727 return -ENOMEM;
728
729 return 0;
730}
731
732static int btt_maplocks_init(struct arena_info *arena)
733{
734 u32 i;
735
736 arena->map_locks = kcalloc(arena->nfree, sizeof(struct aligned_lock),
737 GFP_KERNEL);
738 if (!arena->map_locks)
739 return -ENOMEM;
740
741 for (i = 0; i < arena->nfree; i++)
742 spin_lock_init(&arena->map_locks[i].lock);
743
744 return 0;
745}
746
747static struct arena_info *alloc_arena(struct btt *btt, size_t size,
748 size_t start, size_t arena_off)
749{
750 struct arena_info *arena;
751 u64 logsize, mapsize, datasize;
752 u64 available = size;
753
754 arena = kzalloc(sizeof(*arena), GFP_KERNEL);
755 if (!arena)
756 return NULL;
757 arena->nd_btt = btt->nd_btt;
758 arena->sector_size = btt->sector_size;
759 mutex_init(&arena->err_lock);
760
761 if (!size)
762 return arena;
763
764 arena->size = size;
765 arena->external_lba_start = start;
766 arena->external_lbasize = btt->lbasize;
767 arena->internal_lbasize = roundup(arena->external_lbasize,
768 INT_LBASIZE_ALIGNMENT);
769 arena->nfree = BTT_DEFAULT_NFREE;
770 arena->version_major = btt->nd_btt->version_major;
771 arena->version_minor = btt->nd_btt->version_minor;
772
773 if (available % BTT_PG_SIZE)
774 available -= (available % BTT_PG_SIZE);
775
776 /* Two pages are reserved for the super block and its copy */
777 available -= 2 * BTT_PG_SIZE;
778
779 /* The log takes a fixed amount of space based on nfree */
780 logsize = roundup(arena->nfree * LOG_GRP_SIZE, BTT_PG_SIZE);
781 available -= logsize;
782
783 /* Calculate optimal split between map and data area */
784 arena->internal_nlba = div_u64(available - BTT_PG_SIZE,
785 arena->internal_lbasize + MAP_ENT_SIZE);
786 arena->external_nlba = arena->internal_nlba - arena->nfree;
787
788 mapsize = roundup((arena->external_nlba * MAP_ENT_SIZE), BTT_PG_SIZE);
789 datasize = available - mapsize;
790
791 /* 'Absolute' values, relative to start of storage space */
792 arena->infooff = arena_off;
793 arena->dataoff = arena->infooff + BTT_PG_SIZE;
794 arena->mapoff = arena->dataoff + datasize;
795 arena->logoff = arena->mapoff + mapsize;
796 arena->info2off = arena->logoff + logsize;
797
798 /* Default log indices are (0,1) */
799 arena->log_index[0] = 0;
800 arena->log_index[1] = 1;
801 return arena;
802}
803
804static void free_arenas(struct btt *btt)
805{
806 struct arena_info *arena, *next;
807
808 list_for_each_entry_safe(arena, next, &btt->arena_list, list) {
809 list_del(&arena->list);
810 kfree(arena->rtt);
811 kfree(arena->map_locks);
812 kfree(arena->freelist);
813 debugfs_remove_recursive(arena->debugfs_dir);
814 kfree(arena);
815 }
816}
817
818/*
819 * This function reads an existing valid btt superblock and
820 * populates the corresponding arena_info struct
821 */
822static void parse_arena_meta(struct arena_info *arena, struct btt_sb *super,
823 u64 arena_off)
824{
825 arena->internal_nlba = le32_to_cpu(super->internal_nlba);
826 arena->internal_lbasize = le32_to_cpu(super->internal_lbasize);
827 arena->external_nlba = le32_to_cpu(super->external_nlba);
828 arena->external_lbasize = le32_to_cpu(super->external_lbasize);
829 arena->nfree = le32_to_cpu(super->nfree);
830 arena->version_major = le16_to_cpu(super->version_major);
831 arena->version_minor = le16_to_cpu(super->version_minor);
832
833 arena->nextoff = (super->nextoff == 0) ? 0 : (arena_off +
834 le64_to_cpu(super->nextoff));
835 arena->infooff = arena_off;
836 arena->dataoff = arena_off + le64_to_cpu(super->dataoff);
837 arena->mapoff = arena_off + le64_to_cpu(super->mapoff);
838 arena->logoff = arena_off + le64_to_cpu(super->logoff);
839 arena->info2off = arena_off + le64_to_cpu(super->info2off);
840
841 arena->size = (le64_to_cpu(super->nextoff) > 0)
842 ? (le64_to_cpu(super->nextoff))
843 : (arena->info2off - arena->infooff + BTT_PG_SIZE);
844
845 arena->flags = le32_to_cpu(super->flags);
846}
847
848static int discover_arenas(struct btt *btt)
849{
850 int ret = 0;
851 struct arena_info *arena;
852 size_t remaining = btt->rawsize;
853 u64 cur_nlba = 0;
854 size_t cur_off = 0;
855 int num_arenas = 0;
856
857 struct btt_sb *super __free(kfree) = kzalloc(sizeof(*super), GFP_KERNEL);
858 if (!super)
859 return -ENOMEM;
860
861 while (remaining) {
862 /* Alloc memory for arena */
863 arena = alloc_arena(btt, 0, 0, 0);
864 if (!arena)
865 return -ENOMEM;
866
867 arena->infooff = cur_off;
868 ret = btt_info_read(arena, super);
869 if (ret)
870 goto out;
871
872 if (!nd_btt_arena_is_valid(btt->nd_btt, super)) {
873 if (remaining == btt->rawsize) {
874 btt->init_state = INIT_NOTFOUND;
875 dev_info(to_dev(arena), "No existing arenas\n");
876 goto out;
877 } else {
878 dev_err(to_dev(arena),
879 "Found corrupted metadata!\n");
880 ret = -ENODEV;
881 goto out;
882 }
883 }
884
885 arena->external_lba_start = cur_nlba;
886 parse_arena_meta(arena, super, cur_off);
887
888 ret = log_set_indices(arena);
889 if (ret) {
890 dev_err(to_dev(arena),
891 "Unable to deduce log/padding indices\n");
892 goto out;
893 }
894
895 ret = btt_freelist_init(arena);
896 if (ret)
897 goto out;
898
899 ret = btt_rtt_init(arena);
900 if (ret)
901 goto out;
902
903 ret = btt_maplocks_init(arena);
904 if (ret)
905 goto out;
906
907 list_add_tail(&arena->list, &btt->arena_list);
908
909 remaining -= arena->size;
910 cur_off += arena->size;
911 cur_nlba += arena->external_nlba;
912 num_arenas++;
913
914 if (arena->nextoff == 0)
915 break;
916 }
917 btt->num_arenas = num_arenas;
918 btt->nlba = cur_nlba;
919 btt->init_state = INIT_READY;
920
921 return ret;
922
923 out:
924 kfree(arena);
925 free_arenas(btt);
926 return ret;
927}
928
929static int create_arenas(struct btt *btt)
930{
931 size_t remaining = btt->rawsize;
932 size_t cur_off = 0;
933
934 while (remaining) {
935 struct arena_info *arena;
936 size_t arena_size = min_t(u64, ARENA_MAX_SIZE, remaining);
937
938 remaining -= arena_size;
939 if (arena_size < ARENA_MIN_SIZE)
940 break;
941
942 arena = alloc_arena(btt, arena_size, btt->nlba, cur_off);
943 if (!arena) {
944 free_arenas(btt);
945 return -ENOMEM;
946 }
947 btt->nlba += arena->external_nlba;
948 if (remaining >= ARENA_MIN_SIZE)
949 arena->nextoff = arena->size;
950 else
951 arena->nextoff = 0;
952 cur_off += arena_size;
953 list_add_tail(&arena->list, &btt->arena_list);
954 }
955
956 return 0;
957}
958
959/*
960 * This function completes arena initialization by writing
961 * all the metadata.
962 * It is only called for an uninitialized arena when a write
963 * to that arena occurs for the first time.
964 */
965static int btt_arena_write_layout(struct arena_info *arena)
966{
967 int ret;
968 u64 sum;
969 struct btt_sb *super;
970 struct nd_btt *nd_btt = arena->nd_btt;
971 const uuid_t *parent_uuid = nd_dev_to_uuid(&nd_btt->ndns->dev);
972
973 ret = btt_map_init(arena);
974 if (ret)
975 return ret;
976
977 ret = btt_log_init(arena);
978 if (ret)
979 return ret;
980
981 super = kzalloc(sizeof(*super), GFP_NOIO);
982 if (!super)
983 return -ENOMEM;
984
985 strscpy(super->signature, BTT_SIG, sizeof(super->signature));
986 export_uuid(super->uuid, nd_btt->uuid);
987 export_uuid(super->parent_uuid, parent_uuid);
988 super->flags = cpu_to_le32(arena->flags);
989 super->version_major = cpu_to_le16(arena->version_major);
990 super->version_minor = cpu_to_le16(arena->version_minor);
991 super->external_lbasize = cpu_to_le32(arena->external_lbasize);
992 super->external_nlba = cpu_to_le32(arena->external_nlba);
993 super->internal_lbasize = cpu_to_le32(arena->internal_lbasize);
994 super->internal_nlba = cpu_to_le32(arena->internal_nlba);
995 super->nfree = cpu_to_le32(arena->nfree);
996 super->infosize = cpu_to_le32(sizeof(struct btt_sb));
997 super->nextoff = cpu_to_le64(arena->nextoff);
998 /*
999 * Subtract arena->infooff (arena start) so numbers are relative
1000 * to 'this' arena
1001 */
1002 super->dataoff = cpu_to_le64(arena->dataoff - arena->infooff);
1003 super->mapoff = cpu_to_le64(arena->mapoff - arena->infooff);
1004 super->logoff = cpu_to_le64(arena->logoff - arena->infooff);
1005 super->info2off = cpu_to_le64(arena->info2off - arena->infooff);
1006
1007 super->flags = 0;
1008 sum = nd_sb_checksum((struct nd_gen_sb *) super);
1009 super->checksum = cpu_to_le64(sum);
1010
1011 ret = btt_info_write(arena, super);
1012
1013 kfree(super);
1014 return ret;
1015}
1016
1017/*
1018 * This function completes the initialization for the BTT namespace
1019 * such that it is ready to accept IOs
1020 */
1021static int btt_meta_init(struct btt *btt)
1022{
1023 int ret = 0;
1024 struct arena_info *arena;
1025
1026 mutex_lock(&btt->init_lock);
1027 list_for_each_entry(arena, &btt->arena_list, list) {
1028 ret = btt_arena_write_layout(arena);
1029 if (ret)
1030 goto unlock;
1031
1032 ret = btt_freelist_init(arena);
1033 if (ret)
1034 goto unlock;
1035
1036 ret = btt_rtt_init(arena);
1037 if (ret)
1038 goto unlock;
1039
1040 ret = btt_maplocks_init(arena);
1041 if (ret)
1042 goto unlock;
1043 }
1044
1045 btt->init_state = INIT_READY;
1046
1047 unlock:
1048 mutex_unlock(&btt->init_lock);
1049 return ret;
1050}
1051
1052static u32 btt_meta_size(struct btt *btt)
1053{
1054 return btt->lbasize - btt->sector_size;
1055}
1056
1057/*
1058 * This function calculates the arena in which the given LBA lies
1059 * by doing a linear walk. This is acceptable since we expect only
1060 * a few arenas. If we have backing devices that get much larger,
1061 * we can construct a balanced binary tree of arenas at init time
1062 * so that this range search becomes faster.
1063 */
1064static int lba_to_arena(struct btt *btt, sector_t sector, __u32 *premap,
1065 struct arena_info **arena)
1066{
1067 struct arena_info *arena_list;
1068 __u64 lba = div_u64(sector << SECTOR_SHIFT, btt->sector_size);
1069
1070 list_for_each_entry(arena_list, &btt->arena_list, list) {
1071 if (lba < arena_list->external_nlba) {
1072 *arena = arena_list;
1073 *premap = lba;
1074 return 0;
1075 }
1076 lba -= arena_list->external_nlba;
1077 }
1078
1079 return -EIO;
1080}
1081
1082/*
1083 * The following (lock_map, unlock_map) are mostly just to improve
1084 * readability, since they index into an array of locks
1085 */
1086static void lock_map(struct arena_info *arena, u32 premap)
1087 __acquires(&arena->map_locks[idx].lock)
1088{
1089 u32 idx = (premap * MAP_ENT_SIZE / L1_CACHE_BYTES) % arena->nfree;
1090
1091 spin_lock(&arena->map_locks[idx].lock);
1092}
1093
1094static void unlock_map(struct arena_info *arena, u32 premap)
1095 __releases(&arena->map_locks[idx].lock)
1096{
1097 u32 idx = (premap * MAP_ENT_SIZE / L1_CACHE_BYTES) % arena->nfree;
1098
1099 spin_unlock(&arena->map_locks[idx].lock);
1100}
1101
1102static int btt_data_read(struct arena_info *arena, struct page *page,
1103 unsigned int off, u32 lba, u32 len)
1104{
1105 int ret;
1106 u64 nsoff = to_namespace_offset(arena, lba);
1107 void *mem = kmap_atomic(page);
1108
1109 ret = arena_read_bytes(arena, nsoff, mem + off, len, NVDIMM_IO_ATOMIC);
1110 kunmap_atomic(mem);
1111
1112 return ret;
1113}
1114
1115static int btt_data_write(struct arena_info *arena, u32 lba,
1116 struct page *page, unsigned int off, u32 len)
1117{
1118 int ret;
1119 u64 nsoff = to_namespace_offset(arena, lba);
1120 void *mem = kmap_atomic(page);
1121
1122 ret = arena_write_bytes(arena, nsoff, mem + off, len, NVDIMM_IO_ATOMIC);
1123 kunmap_atomic(mem);
1124
1125 return ret;
1126}
1127
1128static void zero_fill_data(struct page *page, unsigned int off, u32 len)
1129{
1130 void *mem = kmap_atomic(page);
1131
1132 memset(mem + off, 0, len);
1133 kunmap_atomic(mem);
1134}
1135
1136#ifdef CONFIG_BLK_DEV_INTEGRITY
1137static int btt_rw_integrity(struct btt *btt, struct bio_integrity_payload *bip,
1138 struct arena_info *arena, u32 postmap, int rw)
1139{
1140 unsigned int len = btt_meta_size(btt);
1141 u64 meta_nsoff;
1142 int ret = 0;
1143
1144 if (bip == NULL)
1145 return 0;
1146
1147 meta_nsoff = to_namespace_offset(arena, postmap) + btt->sector_size;
1148
1149 while (len) {
1150 unsigned int cur_len;
1151 struct bio_vec bv;
1152 void *mem;
1153
1154 bv = bvec_iter_bvec(bip->bip_vec, bip->bip_iter);
1155 /*
1156 * The 'bv' obtained from bvec_iter_bvec has its .bv_len and
1157 * .bv_offset already adjusted for iter->bi_bvec_done, and we
1158 * can use those directly
1159 */
1160
1161 cur_len = min(len, bv.bv_len);
1162 mem = bvec_kmap_local(&bv);
1163 if (rw)
1164 ret = arena_write_bytes(arena, meta_nsoff, mem, cur_len,
1165 NVDIMM_IO_ATOMIC);
1166 else
1167 ret = arena_read_bytes(arena, meta_nsoff, mem, cur_len,
1168 NVDIMM_IO_ATOMIC);
1169
1170 kunmap_local(mem);
1171 if (ret)
1172 return ret;
1173
1174 len -= cur_len;
1175 meta_nsoff += cur_len;
1176 if (!bvec_iter_advance(bip->bip_vec, &bip->bip_iter, cur_len))
1177 return -EIO;
1178 }
1179
1180 return ret;
1181}
1182
1183#else /* CONFIG_BLK_DEV_INTEGRITY */
1184static int btt_rw_integrity(struct btt *btt, struct bio_integrity_payload *bip,
1185 struct arena_info *arena, u32 postmap, int rw)
1186{
1187 return 0;
1188}
1189#endif
1190
1191static int btt_read_pg(struct btt *btt, struct bio_integrity_payload *bip,
1192 struct page *page, unsigned int off, sector_t sector,
1193 unsigned int len)
1194{
1195 int ret = 0;
1196 int t_flag, e_flag;
1197 struct arena_info *arena = NULL;
1198 u32 lane = 0, premap, postmap;
1199
1200 while (len) {
1201 u32 cur_len;
1202
1203 lane = nd_region_acquire_lane(btt->nd_region);
1204
1205 ret = lba_to_arena(btt, sector, &premap, &arena);
1206 if (ret)
1207 goto out_lane;
1208
1209 cur_len = min(btt->sector_size, len);
1210
1211 ret = btt_map_read(arena, premap, &postmap, &t_flag, &e_flag,
1212 NVDIMM_IO_ATOMIC);
1213 if (ret)
1214 goto out_lane;
1215
1216 /*
1217 * We loop to make sure that the post map LBA didn't change
1218 * from under us between writing the RTT and doing the actual
1219 * read.
1220 */
1221 while (1) {
1222 u32 new_map;
1223 int new_t, new_e;
1224
1225 if (t_flag) {
1226 zero_fill_data(page, off, cur_len);
1227 goto out_lane;
1228 }
1229
1230 if (e_flag) {
1231 ret = -EIO;
1232 goto out_lane;
1233 }
1234
1235 arena->rtt[lane] = RTT_VALID | postmap;
1236 /*
1237 * Barrier to make sure this write is not reordered
1238 * to do the verification map_read before the RTT store
1239 */
1240 barrier();
1241
1242 ret = btt_map_read(arena, premap, &new_map, &new_t,
1243 &new_e, NVDIMM_IO_ATOMIC);
1244 if (ret)
1245 goto out_rtt;
1246
1247 if ((postmap == new_map) && (t_flag == new_t) &&
1248 (e_flag == new_e))
1249 break;
1250
1251 postmap = new_map;
1252 t_flag = new_t;
1253 e_flag = new_e;
1254 }
1255
1256 ret = btt_data_read(arena, page, off, postmap, cur_len);
1257 if (ret) {
1258 /* Media error - set the e_flag */
1259 if (btt_map_write(arena, premap, postmap, 0, 1, NVDIMM_IO_ATOMIC))
1260 dev_warn_ratelimited(to_dev(arena),
1261 "Error persistently tracking bad blocks at %#x\n",
1262 premap);
1263 goto out_rtt;
1264 }
1265
1266 if (bip) {
1267 ret = btt_rw_integrity(btt, bip, arena, postmap, READ);
1268 if (ret)
1269 goto out_rtt;
1270 }
1271
1272 arena->rtt[lane] = RTT_INVALID;
1273 nd_region_release_lane(btt->nd_region, lane);
1274
1275 len -= cur_len;
1276 off += cur_len;
1277 sector += btt->sector_size >> SECTOR_SHIFT;
1278 }
1279
1280 return 0;
1281
1282 out_rtt:
1283 arena->rtt[lane] = RTT_INVALID;
1284 out_lane:
1285 nd_region_release_lane(btt->nd_region, lane);
1286 return ret;
1287}
1288
1289/*
1290 * Normally, arena_{read,write}_bytes will take care of the initial offset
1291 * adjustment, but in the case of btt_is_badblock, where we query is_bad_pmem,
1292 * we need the final, raw namespace offset here
1293 */
1294static bool btt_is_badblock(struct btt *btt, struct arena_info *arena,
1295 u32 postmap)
1296{
1297 u64 nsoff = adjust_initial_offset(arena->nd_btt,
1298 to_namespace_offset(arena, postmap));
1299 sector_t phys_sector = nsoff >> 9;
1300
1301 return is_bad_pmem(btt->phys_bb, phys_sector, arena->internal_lbasize);
1302}
1303
1304static int btt_write_pg(struct btt *btt, struct bio_integrity_payload *bip,
1305 sector_t sector, struct page *page, unsigned int off,
1306 unsigned int len)
1307{
1308 int ret = 0;
1309 struct arena_info *arena = NULL;
1310 u32 premap = 0, old_postmap, new_postmap, lane = 0, i;
1311 struct log_entry log;
1312 int sub;
1313
1314 while (len) {
1315 u32 cur_len;
1316 int e_flag;
1317
1318 retry:
1319 lane = nd_region_acquire_lane(btt->nd_region);
1320
1321 ret = lba_to_arena(btt, sector, &premap, &arena);
1322 if (ret)
1323 goto out_lane;
1324 cur_len = min(btt->sector_size, len);
1325
1326 if ((arena->flags & IB_FLAG_ERROR_MASK) != 0) {
1327 ret = -EIO;
1328 goto out_lane;
1329 }
1330
1331 if (btt_is_badblock(btt, arena, arena->freelist[lane].block))
1332 arena->freelist[lane].has_err = 1;
1333
1334 if (mutex_is_locked(&arena->err_lock)
1335 || arena->freelist[lane].has_err) {
1336 nd_region_release_lane(btt->nd_region, lane);
1337
1338 ret = arena_clear_freelist_error(arena, lane);
1339 if (ret)
1340 return ret;
1341
1342 /* OK to acquire a different lane/free block */
1343 goto retry;
1344 }
1345
1346 new_postmap = arena->freelist[lane].block;
1347
1348 /* Wait if the new block is being read from */
1349 for (i = 0; i < arena->nfree; i++)
1350 while (arena->rtt[i] == (RTT_VALID | new_postmap))
1351 cpu_relax();
1352
1353
1354 if (new_postmap >= arena->internal_nlba) {
1355 ret = -EIO;
1356 goto out_lane;
1357 }
1358
1359 ret = btt_data_write(arena, new_postmap, page, off, cur_len);
1360 if (ret)
1361 goto out_lane;
1362
1363 if (bip) {
1364 ret = btt_rw_integrity(btt, bip, arena, new_postmap,
1365 WRITE);
1366 if (ret)
1367 goto out_lane;
1368 }
1369
1370 lock_map(arena, premap);
1371 ret = btt_map_read(arena, premap, &old_postmap, NULL, &e_flag,
1372 NVDIMM_IO_ATOMIC);
1373 if (ret)
1374 goto out_map;
1375 if (old_postmap >= arena->internal_nlba) {
1376 ret = -EIO;
1377 goto out_map;
1378 }
1379 if (e_flag)
1380 set_e_flag(old_postmap);
1381
1382 log.lba = cpu_to_le32(premap);
1383 log.old_map = cpu_to_le32(old_postmap);
1384 log.new_map = cpu_to_le32(new_postmap);
1385 log.seq = cpu_to_le32(arena->freelist[lane].seq);
1386 sub = arena->freelist[lane].sub;
1387 ret = btt_flog_write(arena, lane, sub, &log);
1388 if (ret)
1389 goto out_map;
1390
1391 ret = btt_map_write(arena, premap, new_postmap, 0, 0,
1392 NVDIMM_IO_ATOMIC);
1393 if (ret)
1394 goto out_map;
1395
1396 unlock_map(arena, premap);
1397 nd_region_release_lane(btt->nd_region, lane);
1398
1399 if (e_flag) {
1400 ret = arena_clear_freelist_error(arena, lane);
1401 if (ret)
1402 return ret;
1403 }
1404
1405 len -= cur_len;
1406 off += cur_len;
1407 sector += btt->sector_size >> SECTOR_SHIFT;
1408 }
1409
1410 return 0;
1411
1412 out_map:
1413 unlock_map(arena, premap);
1414 out_lane:
1415 nd_region_release_lane(btt->nd_region, lane);
1416 return ret;
1417}
1418
1419static int btt_do_bvec(struct btt *btt, struct bio_integrity_payload *bip,
1420 struct page *page, unsigned int len, unsigned int off,
1421 enum req_op op, sector_t sector)
1422{
1423 int ret;
1424
1425 if (!op_is_write(op)) {
1426 ret = btt_read_pg(btt, bip, page, off, sector, len);
1427 flush_dcache_page(page);
1428 } else {
1429 flush_dcache_page(page);
1430 ret = btt_write_pg(btt, bip, sector, page, off, len);
1431 }
1432
1433 return ret;
1434}
1435
1436static void btt_submit_bio(struct bio *bio)
1437{
1438 struct bio_integrity_payload *bip = bio_integrity(bio);
1439 struct btt *btt = bio->bi_bdev->bd_disk->private_data;
1440 struct bvec_iter iter;
1441 unsigned long start;
1442 struct bio_vec bvec;
1443 int err = 0;
1444 bool do_acct;
1445
1446 if (!bio_integrity_prep(bio))
1447 return;
1448
1449 do_acct = blk_queue_io_stat(bio->bi_bdev->bd_disk->queue);
1450 if (do_acct)
1451 start = bio_start_io_acct(bio);
1452 bio_for_each_segment(bvec, bio, iter) {
1453 unsigned int len = bvec.bv_len;
1454
1455 if (len > PAGE_SIZE || len < btt->sector_size ||
1456 len % btt->sector_size) {
1457 dev_err_ratelimited(&btt->nd_btt->dev,
1458 "unaligned bio segment (len: %d)\n", len);
1459 bio->bi_status = BLK_STS_IOERR;
1460 break;
1461 }
1462
1463 err = btt_do_bvec(btt, bip, bvec.bv_page, len, bvec.bv_offset,
1464 bio_op(bio), iter.bi_sector);
1465 if (err) {
1466 dev_err(&btt->nd_btt->dev,
1467 "io error in %s sector %lld, len %d,\n",
1468 (op_is_write(bio_op(bio))) ? "WRITE" :
1469 "READ",
1470 (unsigned long long) iter.bi_sector, len);
1471 bio->bi_status = errno_to_blk_status(err);
1472 break;
1473 }
1474 }
1475 if (do_acct)
1476 bio_end_io_acct(bio, start);
1477
1478 bio_endio(bio);
1479}
1480
1481static int btt_getgeo(struct block_device *bd, struct hd_geometry *geo)
1482{
1483 /* some standard values */
1484 geo->heads = 1 << 6;
1485 geo->sectors = 1 << 5;
1486 geo->cylinders = get_capacity(bd->bd_disk) >> 11;
1487 return 0;
1488}
1489
1490static const struct block_device_operations btt_fops = {
1491 .owner = THIS_MODULE,
1492 .submit_bio = btt_submit_bio,
1493 .getgeo = btt_getgeo,
1494};
1495
1496static int btt_blk_init(struct btt *btt)
1497{
1498 struct nd_btt *nd_btt = btt->nd_btt;
1499 struct nd_namespace_common *ndns = nd_btt->ndns;
1500 struct queue_limits lim = {
1501 .logical_block_size = btt->sector_size,
1502 .max_hw_sectors = UINT_MAX,
1503 .max_integrity_segments = 1,
1504 .features = BLK_FEAT_SYNCHRONOUS,
1505 };
1506 int rc;
1507
1508 if (btt_meta_size(btt) && IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY)) {
1509 lim.integrity.tuple_size = btt_meta_size(btt);
1510 lim.integrity.tag_size = btt_meta_size(btt);
1511 }
1512
1513 btt->btt_disk = blk_alloc_disk(&lim, NUMA_NO_NODE);
1514 if (IS_ERR(btt->btt_disk))
1515 return PTR_ERR(btt->btt_disk);
1516
1517 nvdimm_namespace_disk_name(ndns, btt->btt_disk->disk_name);
1518 btt->btt_disk->first_minor = 0;
1519 btt->btt_disk->fops = &btt_fops;
1520 btt->btt_disk->private_data = btt;
1521
1522 set_capacity(btt->btt_disk, btt->nlba * btt->sector_size >> 9);
1523 rc = device_add_disk(&btt->nd_btt->dev, btt->btt_disk, NULL);
1524 if (rc)
1525 goto out_cleanup_disk;
1526
1527 btt->nd_btt->size = btt->nlba * (u64)btt->sector_size;
1528 nvdimm_check_and_set_ro(btt->btt_disk);
1529
1530 return 0;
1531
1532out_cleanup_disk:
1533 put_disk(btt->btt_disk);
1534 return rc;
1535}
1536
1537static void btt_blk_cleanup(struct btt *btt)
1538{
1539 del_gendisk(btt->btt_disk);
1540 put_disk(btt->btt_disk);
1541}
1542
1543/**
1544 * btt_init - initialize a block translation table for the given device
1545 * @nd_btt: device with BTT geometry and backing device info
1546 * @rawsize: raw size in bytes of the backing device
1547 * @lbasize: lba size of the backing device
1548 * @uuid: A uuid for the backing device - this is stored on media
1549 * @nd_region: &struct nd_region for the REGION device
1550 *
1551 * Initialize a Block Translation Table on a backing device to provide
1552 * single sector power fail atomicity.
1553 *
1554 * Context:
1555 * Might sleep.
1556 *
1557 * Returns:
1558 * Pointer to a new struct btt on success, NULL on failure.
1559 */
1560static struct btt *btt_init(struct nd_btt *nd_btt, unsigned long long rawsize,
1561 u32 lbasize, uuid_t *uuid,
1562 struct nd_region *nd_region)
1563{
1564 int ret;
1565 struct btt *btt;
1566 struct nd_namespace_io *nsio;
1567 struct device *dev = &nd_btt->dev;
1568
1569 btt = devm_kzalloc(dev, sizeof(struct btt), GFP_KERNEL);
1570 if (!btt)
1571 return NULL;
1572
1573 btt->nd_btt = nd_btt;
1574 btt->rawsize = rawsize;
1575 btt->lbasize = lbasize;
1576 btt->sector_size = ((lbasize >= 4096) ? 4096 : 512);
1577 INIT_LIST_HEAD(&btt->arena_list);
1578 mutex_init(&btt->init_lock);
1579 btt->nd_region = nd_region;
1580 nsio = to_nd_namespace_io(&nd_btt->ndns->dev);
1581 btt->phys_bb = &nsio->bb;
1582
1583 ret = discover_arenas(btt);
1584 if (ret) {
1585 dev_err(dev, "init: error in arena_discover: %d\n", ret);
1586 return NULL;
1587 }
1588
1589 if (btt->init_state != INIT_READY && nd_region->ro) {
1590 dev_warn(dev, "%s is read-only, unable to init btt metadata\n",
1591 dev_name(&nd_region->dev));
1592 return NULL;
1593 } else if (btt->init_state != INIT_READY) {
1594 btt->num_arenas = (rawsize / ARENA_MAX_SIZE) +
1595 ((rawsize % ARENA_MAX_SIZE) ? 1 : 0);
1596 dev_dbg(dev, "init: %d arenas for %llu rawsize\n",
1597 btt->num_arenas, rawsize);
1598
1599 ret = create_arenas(btt);
1600 if (ret) {
1601 dev_info(dev, "init: create_arenas: %d\n", ret);
1602 return NULL;
1603 }
1604
1605 ret = btt_meta_init(btt);
1606 if (ret) {
1607 dev_err(dev, "init: error in meta_init: %d\n", ret);
1608 return NULL;
1609 }
1610 }
1611
1612 ret = btt_blk_init(btt);
1613 if (ret) {
1614 dev_err(dev, "init: error in blk_init: %d\n", ret);
1615 return NULL;
1616 }
1617
1618 btt_debugfs_init(btt);
1619
1620 return btt;
1621}
1622
1623/**
1624 * btt_fini - de-initialize a BTT
1625 * @btt: the BTT handle that was generated by btt_init
1626 *
1627 * De-initialize a Block Translation Table on device removal
1628 *
1629 * Context:
1630 * Might sleep.
1631 */
1632static void btt_fini(struct btt *btt)
1633{
1634 if (btt) {
1635 btt_blk_cleanup(btt);
1636 free_arenas(btt);
1637 debugfs_remove_recursive(btt->debugfs_dir);
1638 }
1639}
1640
1641int nvdimm_namespace_attach_btt(struct nd_namespace_common *ndns)
1642{
1643 struct nd_btt *nd_btt = to_nd_btt(ndns->claim);
1644 struct nd_region *nd_region;
1645 struct btt_sb *btt_sb;
1646 struct btt *btt;
1647 size_t size, rawsize;
1648 int rc;
1649
1650 if (!nd_btt->uuid || !nd_btt->ndns || !nd_btt->lbasize) {
1651 dev_dbg(&nd_btt->dev, "incomplete btt configuration\n");
1652 return -ENODEV;
1653 }
1654
1655 btt_sb = devm_kzalloc(&nd_btt->dev, sizeof(*btt_sb), GFP_KERNEL);
1656 if (!btt_sb)
1657 return -ENOMEM;
1658
1659 size = nvdimm_namespace_capacity(ndns);
1660 rc = devm_namespace_enable(&nd_btt->dev, ndns, size);
1661 if (rc)
1662 return rc;
1663
1664 /*
1665 * If this returns < 0, that is ok as it just means there wasn't
1666 * an existing BTT, and we're creating a new one. We still need to
1667 * call this as we need the version dependent fields in nd_btt to be
1668 * set correctly based on the holder class
1669 */
1670 nd_btt_version(nd_btt, ndns, btt_sb);
1671
1672 rawsize = size - nd_btt->initial_offset;
1673 if (rawsize < ARENA_MIN_SIZE) {
1674 dev_dbg(&nd_btt->dev, "%s must be at least %ld bytes\n",
1675 dev_name(&ndns->dev),
1676 ARENA_MIN_SIZE + nd_btt->initial_offset);
1677 return -ENXIO;
1678 }
1679 nd_region = to_nd_region(nd_btt->dev.parent);
1680 btt = btt_init(nd_btt, rawsize, nd_btt->lbasize, nd_btt->uuid,
1681 nd_region);
1682 if (!btt)
1683 return -ENOMEM;
1684 nd_btt->btt = btt;
1685
1686 return 0;
1687}
1688EXPORT_SYMBOL(nvdimm_namespace_attach_btt);
1689
1690int nvdimm_namespace_detach_btt(struct nd_btt *nd_btt)
1691{
1692 struct btt *btt = nd_btt->btt;
1693
1694 btt_fini(btt);
1695 nd_btt->btt = NULL;
1696
1697 return 0;
1698}
1699EXPORT_SYMBOL(nvdimm_namespace_detach_btt);
1700
1701static int __init nd_btt_init(void)
1702{
1703 int rc = 0;
1704
1705 debugfs_root = debugfs_create_dir("btt", NULL);
1706 if (IS_ERR_OR_NULL(debugfs_root))
1707 rc = -ENXIO;
1708
1709 return rc;
1710}
1711
1712static void __exit nd_btt_exit(void)
1713{
1714 debugfs_remove_recursive(debugfs_root);
1715}
1716
1717MODULE_ALIAS_ND_DEVICE(ND_DEVICE_BTT);
1718MODULE_AUTHOR("Vishal Verma <vishal.l.verma@linux.intel.com>");
1719MODULE_DESCRIPTION("NVDIMM Block Translation Table");
1720MODULE_LICENSE("GPL v2");
1721module_init(nd_btt_init);
1722module_exit(nd_btt_exit);
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Block Translation Table
4 * Copyright (c) 2014-2015, Intel Corporation.
5 */
6#include <linux/highmem.h>
7#include <linux/debugfs.h>
8#include <linux/blkdev.h>
9#include <linux/pagemap.h>
10#include <linux/module.h>
11#include <linux/device.h>
12#include <linux/mutex.h>
13#include <linux/hdreg.h>
14#include <linux/sizes.h>
15#include <linux/ndctl.h>
16#include <linux/fs.h>
17#include <linux/nd.h>
18#include <linux/backing-dev.h>
19#include "btt.h"
20#include "nd.h"
21
22enum log_ent_request {
23 LOG_NEW_ENT = 0,
24 LOG_OLD_ENT
25};
26
27static struct device *to_dev(struct arena_info *arena)
28{
29 return &arena->nd_btt->dev;
30}
31
32static u64 adjust_initial_offset(struct nd_btt *nd_btt, u64 offset)
33{
34 return offset + nd_btt->initial_offset;
35}
36
37static int arena_read_bytes(struct arena_info *arena, resource_size_t offset,
38 void *buf, size_t n, unsigned long flags)
39{
40 struct nd_btt *nd_btt = arena->nd_btt;
41 struct nd_namespace_common *ndns = nd_btt->ndns;
42
43 /* arena offsets may be shifted from the base of the device */
44 offset = adjust_initial_offset(nd_btt, offset);
45 return nvdimm_read_bytes(ndns, offset, buf, n, flags);
46}
47
48static int arena_write_bytes(struct arena_info *arena, resource_size_t offset,
49 void *buf, size_t n, unsigned long flags)
50{
51 struct nd_btt *nd_btt = arena->nd_btt;
52 struct nd_namespace_common *ndns = nd_btt->ndns;
53
54 /* arena offsets may be shifted from the base of the device */
55 offset = adjust_initial_offset(nd_btt, offset);
56 return nvdimm_write_bytes(ndns, offset, buf, n, flags);
57}
58
59static int btt_info_write(struct arena_info *arena, struct btt_sb *super)
60{
61 int ret;
62
63 /*
64 * infooff and info2off should always be at least 512B aligned.
65 * We rely on that to make sure rw_bytes does error clearing
66 * correctly, so make sure that is the case.
67 */
68 dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->infooff, 512),
69 "arena->infooff: %#llx is unaligned\n", arena->infooff);
70 dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->info2off, 512),
71 "arena->info2off: %#llx is unaligned\n", arena->info2off);
72
73 ret = arena_write_bytes(arena, arena->info2off, super,
74 sizeof(struct btt_sb), 0);
75 if (ret)
76 return ret;
77
78 return arena_write_bytes(arena, arena->infooff, super,
79 sizeof(struct btt_sb), 0);
80}
81
82static int btt_info_read(struct arena_info *arena, struct btt_sb *super)
83{
84 return arena_read_bytes(arena, arena->infooff, super,
85 sizeof(struct btt_sb), 0);
86}
87
88/*
89 * 'raw' version of btt_map write
90 * Assumptions:
91 * mapping is in little-endian
92 * mapping contains 'E' and 'Z' flags as desired
93 */
94static int __btt_map_write(struct arena_info *arena, u32 lba, __le32 mapping,
95 unsigned long flags)
96{
97 u64 ns_off = arena->mapoff + (lba * MAP_ENT_SIZE);
98
99 if (unlikely(lba >= arena->external_nlba))
100 dev_err_ratelimited(to_dev(arena),
101 "%s: lba %#x out of range (max: %#x)\n",
102 __func__, lba, arena->external_nlba);
103 return arena_write_bytes(arena, ns_off, &mapping, MAP_ENT_SIZE, flags);
104}
105
106static int btt_map_write(struct arena_info *arena, u32 lba, u32 mapping,
107 u32 z_flag, u32 e_flag, unsigned long rwb_flags)
108{
109 u32 ze;
110 __le32 mapping_le;
111
112 /*
113 * This 'mapping' is supposed to be just the LBA mapping, without
114 * any flags set, so strip the flag bits.
115 */
116 mapping = ent_lba(mapping);
117
118 ze = (z_flag << 1) + e_flag;
119 switch (ze) {
120 case 0:
121 /*
122 * We want to set neither of the Z or E flags, and
123 * in the actual layout, this means setting the bit
124 * positions of both to '1' to indicate a 'normal'
125 * map entry
126 */
127 mapping |= MAP_ENT_NORMAL;
128 break;
129 case 1:
130 mapping |= (1 << MAP_ERR_SHIFT);
131 break;
132 case 2:
133 mapping |= (1 << MAP_TRIM_SHIFT);
134 break;
135 default:
136 /*
137 * The case where Z and E are both sent in as '1' could be
138 * construed as a valid 'normal' case, but we decide not to,
139 * to avoid confusion
140 */
141 dev_err_ratelimited(to_dev(arena),
142 "Invalid use of Z and E flags\n");
143 return -EIO;
144 }
145
146 mapping_le = cpu_to_le32(mapping);
147 return __btt_map_write(arena, lba, mapping_le, rwb_flags);
148}
149
150static int btt_map_read(struct arena_info *arena, u32 lba, u32 *mapping,
151 int *trim, int *error, unsigned long rwb_flags)
152{
153 int ret;
154 __le32 in;
155 u32 raw_mapping, postmap, ze, z_flag, e_flag;
156 u64 ns_off = arena->mapoff + (lba * MAP_ENT_SIZE);
157
158 if (unlikely(lba >= arena->external_nlba))
159 dev_err_ratelimited(to_dev(arena),
160 "%s: lba %#x out of range (max: %#x)\n",
161 __func__, lba, arena->external_nlba);
162
163 ret = arena_read_bytes(arena, ns_off, &in, MAP_ENT_SIZE, rwb_flags);
164 if (ret)
165 return ret;
166
167 raw_mapping = le32_to_cpu(in);
168
169 z_flag = ent_z_flag(raw_mapping);
170 e_flag = ent_e_flag(raw_mapping);
171 ze = (z_flag << 1) + e_flag;
172 postmap = ent_lba(raw_mapping);
173
174 /* Reuse the {z,e}_flag variables for *trim and *error */
175 z_flag = 0;
176 e_flag = 0;
177
178 switch (ze) {
179 case 0:
180 /* Initial state. Return postmap = premap */
181 *mapping = lba;
182 break;
183 case 1:
184 *mapping = postmap;
185 e_flag = 1;
186 break;
187 case 2:
188 *mapping = postmap;
189 z_flag = 1;
190 break;
191 case 3:
192 *mapping = postmap;
193 break;
194 default:
195 return -EIO;
196 }
197
198 if (trim)
199 *trim = z_flag;
200 if (error)
201 *error = e_flag;
202
203 return ret;
204}
205
206static int btt_log_group_read(struct arena_info *arena, u32 lane,
207 struct log_group *log)
208{
209 return arena_read_bytes(arena,
210 arena->logoff + (lane * LOG_GRP_SIZE), log,
211 LOG_GRP_SIZE, 0);
212}
213
214static struct dentry *debugfs_root;
215
216static void arena_debugfs_init(struct arena_info *a, struct dentry *parent,
217 int idx)
218{
219 char dirname[32];
220 struct dentry *d;
221
222 /* If for some reason, parent bttN was not created, exit */
223 if (!parent)
224 return;
225
226 snprintf(dirname, 32, "arena%d", idx);
227 d = debugfs_create_dir(dirname, parent);
228 if (IS_ERR_OR_NULL(d))
229 return;
230 a->debugfs_dir = d;
231
232 debugfs_create_x64("size", S_IRUGO, d, &a->size);
233 debugfs_create_x64("external_lba_start", S_IRUGO, d,
234 &a->external_lba_start);
235 debugfs_create_x32("internal_nlba", S_IRUGO, d, &a->internal_nlba);
236 debugfs_create_u32("internal_lbasize", S_IRUGO, d,
237 &a->internal_lbasize);
238 debugfs_create_x32("external_nlba", S_IRUGO, d, &a->external_nlba);
239 debugfs_create_u32("external_lbasize", S_IRUGO, d,
240 &a->external_lbasize);
241 debugfs_create_u32("nfree", S_IRUGO, d, &a->nfree);
242 debugfs_create_u16("version_major", S_IRUGO, d, &a->version_major);
243 debugfs_create_u16("version_minor", S_IRUGO, d, &a->version_minor);
244 debugfs_create_x64("nextoff", S_IRUGO, d, &a->nextoff);
245 debugfs_create_x64("infooff", S_IRUGO, d, &a->infooff);
246 debugfs_create_x64("dataoff", S_IRUGO, d, &a->dataoff);
247 debugfs_create_x64("mapoff", S_IRUGO, d, &a->mapoff);
248 debugfs_create_x64("logoff", S_IRUGO, d, &a->logoff);
249 debugfs_create_x64("info2off", S_IRUGO, d, &a->info2off);
250 debugfs_create_x32("flags", S_IRUGO, d, &a->flags);
251 debugfs_create_u32("log_index_0", S_IRUGO, d, &a->log_index[0]);
252 debugfs_create_u32("log_index_1", S_IRUGO, d, &a->log_index[1]);
253}
254
255static void btt_debugfs_init(struct btt *btt)
256{
257 int i = 0;
258 struct arena_info *arena;
259
260 btt->debugfs_dir = debugfs_create_dir(dev_name(&btt->nd_btt->dev),
261 debugfs_root);
262 if (IS_ERR_OR_NULL(btt->debugfs_dir))
263 return;
264
265 list_for_each_entry(arena, &btt->arena_list, list) {
266 arena_debugfs_init(arena, btt->debugfs_dir, i);
267 i++;
268 }
269}
270
271static u32 log_seq(struct log_group *log, int log_idx)
272{
273 return le32_to_cpu(log->ent[log_idx].seq);
274}
275
276/*
277 * This function accepts two log entries, and uses the
278 * sequence number to find the 'older' entry.
279 * It also updates the sequence number in this old entry to
280 * make it the 'new' one if the mark_flag is set.
281 * Finally, it returns which of the entries was the older one.
282 *
283 * TODO The logic feels a bit kludge-y. make it better..
284 */
285static int btt_log_get_old(struct arena_info *a, struct log_group *log)
286{
287 int idx0 = a->log_index[0];
288 int idx1 = a->log_index[1];
289 int old;
290
291 /*
292 * the first ever time this is seen, the entry goes into [0]
293 * the next time, the following logic works out to put this
294 * (next) entry into [1]
295 */
296 if (log_seq(log, idx0) == 0) {
297 log->ent[idx0].seq = cpu_to_le32(1);
298 return 0;
299 }
300
301 if (log_seq(log, idx0) == log_seq(log, idx1))
302 return -EINVAL;
303 if (log_seq(log, idx0) + log_seq(log, idx1) > 5)
304 return -EINVAL;
305
306 if (log_seq(log, idx0) < log_seq(log, idx1)) {
307 if ((log_seq(log, idx1) - log_seq(log, idx0)) == 1)
308 old = 0;
309 else
310 old = 1;
311 } else {
312 if ((log_seq(log, idx0) - log_seq(log, idx1)) == 1)
313 old = 1;
314 else
315 old = 0;
316 }
317
318 return old;
319}
320
321/*
322 * This function copies the desired (old/new) log entry into ent if
323 * it is not NULL. It returns the sub-slot number (0 or 1)
324 * where the desired log entry was found. Negative return values
325 * indicate errors.
326 */
327static int btt_log_read(struct arena_info *arena, u32 lane,
328 struct log_entry *ent, int old_flag)
329{
330 int ret;
331 int old_ent, ret_ent;
332 struct log_group log;
333
334 ret = btt_log_group_read(arena, lane, &log);
335 if (ret)
336 return -EIO;
337
338 old_ent = btt_log_get_old(arena, &log);
339 if (old_ent < 0 || old_ent > 1) {
340 dev_err(to_dev(arena),
341 "log corruption (%d): lane %d seq [%d, %d]\n",
342 old_ent, lane, log.ent[arena->log_index[0]].seq,
343 log.ent[arena->log_index[1]].seq);
344 /* TODO set error state? */
345 return -EIO;
346 }
347
348 ret_ent = (old_flag ? old_ent : (1 - old_ent));
349
350 if (ent != NULL)
351 memcpy(ent, &log.ent[arena->log_index[ret_ent]], LOG_ENT_SIZE);
352
353 return ret_ent;
354}
355
356/*
357 * This function commits a log entry to media
358 * It does _not_ prepare the freelist entry for the next write
359 * btt_flog_write is the wrapper for updating the freelist elements
360 */
361static int __btt_log_write(struct arena_info *arena, u32 lane,
362 u32 sub, struct log_entry *ent, unsigned long flags)
363{
364 int ret;
365 u32 group_slot = arena->log_index[sub];
366 unsigned int log_half = LOG_ENT_SIZE / 2;
367 void *src = ent;
368 u64 ns_off;
369
370 ns_off = arena->logoff + (lane * LOG_GRP_SIZE) +
371 (group_slot * LOG_ENT_SIZE);
372 /* split the 16B write into atomic, durable halves */
373 ret = arena_write_bytes(arena, ns_off, src, log_half, flags);
374 if (ret)
375 return ret;
376
377 ns_off += log_half;
378 src += log_half;
379 return arena_write_bytes(arena, ns_off, src, log_half, flags);
380}
381
382static int btt_flog_write(struct arena_info *arena, u32 lane, u32 sub,
383 struct log_entry *ent)
384{
385 int ret;
386
387 ret = __btt_log_write(arena, lane, sub, ent, NVDIMM_IO_ATOMIC);
388 if (ret)
389 return ret;
390
391 /* prepare the next free entry */
392 arena->freelist[lane].sub = 1 - arena->freelist[lane].sub;
393 if (++(arena->freelist[lane].seq) == 4)
394 arena->freelist[lane].seq = 1;
395 if (ent_e_flag(le32_to_cpu(ent->old_map)))
396 arena->freelist[lane].has_err = 1;
397 arena->freelist[lane].block = ent_lba(le32_to_cpu(ent->old_map));
398
399 return ret;
400}
401
402/*
403 * This function initializes the BTT map to the initial state, which is
404 * all-zeroes, and indicates an identity mapping
405 */
406static int btt_map_init(struct arena_info *arena)
407{
408 int ret = -EINVAL;
409 void *zerobuf;
410 size_t offset = 0;
411 size_t chunk_size = SZ_2M;
412 size_t mapsize = arena->logoff - arena->mapoff;
413
414 zerobuf = kzalloc(chunk_size, GFP_KERNEL);
415 if (!zerobuf)
416 return -ENOMEM;
417
418 /*
419 * mapoff should always be at least 512B aligned. We rely on that to
420 * make sure rw_bytes does error clearing correctly, so make sure that
421 * is the case.
422 */
423 dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->mapoff, 512),
424 "arena->mapoff: %#llx is unaligned\n", arena->mapoff);
425
426 while (mapsize) {
427 size_t size = min(mapsize, chunk_size);
428
429 dev_WARN_ONCE(to_dev(arena), size < 512,
430 "chunk size: %#zx is unaligned\n", size);
431 ret = arena_write_bytes(arena, arena->mapoff + offset, zerobuf,
432 size, 0);
433 if (ret)
434 goto free;
435
436 offset += size;
437 mapsize -= size;
438 cond_resched();
439 }
440
441 free:
442 kfree(zerobuf);
443 return ret;
444}
445
446/*
447 * This function initializes the BTT log with 'fake' entries pointing
448 * to the initial reserved set of blocks as being free
449 */
450static int btt_log_init(struct arena_info *arena)
451{
452 size_t logsize = arena->info2off - arena->logoff;
453 size_t chunk_size = SZ_4K, offset = 0;
454 struct log_entry ent;
455 void *zerobuf;
456 int ret;
457 u32 i;
458
459 zerobuf = kzalloc(chunk_size, GFP_KERNEL);
460 if (!zerobuf)
461 return -ENOMEM;
462 /*
463 * logoff should always be at least 512B aligned. We rely on that to
464 * make sure rw_bytes does error clearing correctly, so make sure that
465 * is the case.
466 */
467 dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->logoff, 512),
468 "arena->logoff: %#llx is unaligned\n", arena->logoff);
469
470 while (logsize) {
471 size_t size = min(logsize, chunk_size);
472
473 dev_WARN_ONCE(to_dev(arena), size < 512,
474 "chunk size: %#zx is unaligned\n", size);
475 ret = arena_write_bytes(arena, arena->logoff + offset, zerobuf,
476 size, 0);
477 if (ret)
478 goto free;
479
480 offset += size;
481 logsize -= size;
482 cond_resched();
483 }
484
485 for (i = 0; i < arena->nfree; i++) {
486 ent.lba = cpu_to_le32(i);
487 ent.old_map = cpu_to_le32(arena->external_nlba + i);
488 ent.new_map = cpu_to_le32(arena->external_nlba + i);
489 ent.seq = cpu_to_le32(LOG_SEQ_INIT);
490 ret = __btt_log_write(arena, i, 0, &ent, 0);
491 if (ret)
492 goto free;
493 }
494
495 free:
496 kfree(zerobuf);
497 return ret;
498}
499
500static u64 to_namespace_offset(struct arena_info *arena, u64 lba)
501{
502 return arena->dataoff + ((u64)lba * arena->internal_lbasize);
503}
504
505static int arena_clear_freelist_error(struct arena_info *arena, u32 lane)
506{
507 int ret = 0;
508
509 if (arena->freelist[lane].has_err) {
510 void *zero_page = page_address(ZERO_PAGE(0));
511 u32 lba = arena->freelist[lane].block;
512 u64 nsoff = to_namespace_offset(arena, lba);
513 unsigned long len = arena->sector_size;
514
515 mutex_lock(&arena->err_lock);
516
517 while (len) {
518 unsigned long chunk = min(len, PAGE_SIZE);
519
520 ret = arena_write_bytes(arena, nsoff, zero_page,
521 chunk, 0);
522 if (ret)
523 break;
524 len -= chunk;
525 nsoff += chunk;
526 if (len == 0)
527 arena->freelist[lane].has_err = 0;
528 }
529 mutex_unlock(&arena->err_lock);
530 }
531 return ret;
532}
533
534static int btt_freelist_init(struct arena_info *arena)
535{
536 int new, ret;
537 struct log_entry log_new;
538 u32 i, map_entry, log_oldmap, log_newmap;
539
540 arena->freelist = kcalloc(arena->nfree, sizeof(struct free_entry),
541 GFP_KERNEL);
542 if (!arena->freelist)
543 return -ENOMEM;
544
545 for (i = 0; i < arena->nfree; i++) {
546 new = btt_log_read(arena, i, &log_new, LOG_NEW_ENT);
547 if (new < 0)
548 return new;
549
550 /* old and new map entries with any flags stripped out */
551 log_oldmap = ent_lba(le32_to_cpu(log_new.old_map));
552 log_newmap = ent_lba(le32_to_cpu(log_new.new_map));
553
554 /* sub points to the next one to be overwritten */
555 arena->freelist[i].sub = 1 - new;
556 arena->freelist[i].seq = nd_inc_seq(le32_to_cpu(log_new.seq));
557 arena->freelist[i].block = log_oldmap;
558
559 /*
560 * FIXME: if error clearing fails during init, we want to make
561 * the BTT read-only
562 */
563 if (ent_e_flag(le32_to_cpu(log_new.old_map)) &&
564 !ent_normal(le32_to_cpu(log_new.old_map))) {
565 arena->freelist[i].has_err = 1;
566 ret = arena_clear_freelist_error(arena, i);
567 if (ret)
568 dev_err_ratelimited(to_dev(arena),
569 "Unable to clear known errors\n");
570 }
571
572 /* This implies a newly created or untouched flog entry */
573 if (log_oldmap == log_newmap)
574 continue;
575
576 /* Check if map recovery is needed */
577 ret = btt_map_read(arena, le32_to_cpu(log_new.lba), &map_entry,
578 NULL, NULL, 0);
579 if (ret)
580 return ret;
581
582 /*
583 * The map_entry from btt_read_map is stripped of any flag bits,
584 * so use the stripped out versions from the log as well for
585 * testing whether recovery is needed. For restoration, use the
586 * 'raw' version of the log entries as that captured what we
587 * were going to write originally.
588 */
589 if ((log_newmap != map_entry) && (log_oldmap == map_entry)) {
590 /*
591 * Last transaction wrote the flog, but wasn't able
592 * to complete the map write. So fix up the map.
593 */
594 ret = btt_map_write(arena, le32_to_cpu(log_new.lba),
595 le32_to_cpu(log_new.new_map), 0, 0, 0);
596 if (ret)
597 return ret;
598 }
599 }
600
601 return 0;
602}
603
604static bool ent_is_padding(struct log_entry *ent)
605{
606 return (ent->lba == 0) && (ent->old_map == 0) && (ent->new_map == 0)
607 && (ent->seq == 0);
608}
609
610/*
611 * Detecting valid log indices: We read a log group (see the comments in btt.h
612 * for a description of a 'log_group' and its 'slots'), and iterate over its
613 * four slots. We expect that a padding slot will be all-zeroes, and use this
614 * to detect a padding slot vs. an actual entry.
615 *
616 * If a log_group is in the initial state, i.e. hasn't been used since the
617 * creation of this BTT layout, it will have three of the four slots with
618 * zeroes. We skip over these log_groups for the detection of log_index. If
619 * all log_groups are in the initial state (i.e. the BTT has never been
620 * written to), it is safe to assume the 'new format' of log entries in slots
621 * (0, 1).
622 */
623static int log_set_indices(struct arena_info *arena)
624{
625 bool idx_set = false, initial_state = true;
626 int ret, log_index[2] = {-1, -1};
627 u32 i, j, next_idx = 0;
628 struct log_group log;
629 u32 pad_count = 0;
630
631 for (i = 0; i < arena->nfree; i++) {
632 ret = btt_log_group_read(arena, i, &log);
633 if (ret < 0)
634 return ret;
635
636 for (j = 0; j < 4; j++) {
637 if (!idx_set) {
638 if (ent_is_padding(&log.ent[j])) {
639 pad_count++;
640 continue;
641 } else {
642 /* Skip if index has been recorded */
643 if ((next_idx == 1) &&
644 (j == log_index[0]))
645 continue;
646 /* valid entry, record index */
647 log_index[next_idx] = j;
648 next_idx++;
649 }
650 if (next_idx == 2) {
651 /* two valid entries found */
652 idx_set = true;
653 } else if (next_idx > 2) {
654 /* too many valid indices */
655 return -ENXIO;
656 }
657 } else {
658 /*
659 * once the indices have been set, just verify
660 * that all subsequent log groups are either in
661 * their initial state or follow the same
662 * indices.
663 */
664 if (j == log_index[0]) {
665 /* entry must be 'valid' */
666 if (ent_is_padding(&log.ent[j]))
667 return -ENXIO;
668 } else if (j == log_index[1]) {
669 ;
670 /*
671 * log_index[1] can be padding if the
672 * lane never got used and it is still
673 * in the initial state (three 'padding'
674 * entries)
675 */
676 } else {
677 /* entry must be invalid (padding) */
678 if (!ent_is_padding(&log.ent[j]))
679 return -ENXIO;
680 }
681 }
682 }
683 /*
684 * If any of the log_groups have more than one valid,
685 * non-padding entry, then the we are no longer in the
686 * initial_state
687 */
688 if (pad_count < 3)
689 initial_state = false;
690 pad_count = 0;
691 }
692
693 if (!initial_state && !idx_set)
694 return -ENXIO;
695
696 /*
697 * If all the entries in the log were in the initial state,
698 * assume new padding scheme
699 */
700 if (initial_state)
701 log_index[1] = 1;
702
703 /*
704 * Only allow the known permutations of log/padding indices,
705 * i.e. (0, 1), and (0, 2)
706 */
707 if ((log_index[0] == 0) && ((log_index[1] == 1) || (log_index[1] == 2)))
708 ; /* known index possibilities */
709 else {
710 dev_err(to_dev(arena), "Found an unknown padding scheme\n");
711 return -ENXIO;
712 }
713
714 arena->log_index[0] = log_index[0];
715 arena->log_index[1] = log_index[1];
716 dev_dbg(to_dev(arena), "log_index_0 = %d\n", log_index[0]);
717 dev_dbg(to_dev(arena), "log_index_1 = %d\n", log_index[1]);
718 return 0;
719}
720
721static int btt_rtt_init(struct arena_info *arena)
722{
723 arena->rtt = kcalloc(arena->nfree, sizeof(u32), GFP_KERNEL);
724 if (arena->rtt == NULL)
725 return -ENOMEM;
726
727 return 0;
728}
729
730static int btt_maplocks_init(struct arena_info *arena)
731{
732 u32 i;
733
734 arena->map_locks = kcalloc(arena->nfree, sizeof(struct aligned_lock),
735 GFP_KERNEL);
736 if (!arena->map_locks)
737 return -ENOMEM;
738
739 for (i = 0; i < arena->nfree; i++)
740 spin_lock_init(&arena->map_locks[i].lock);
741
742 return 0;
743}
744
745static struct arena_info *alloc_arena(struct btt *btt, size_t size,
746 size_t start, size_t arena_off)
747{
748 struct arena_info *arena;
749 u64 logsize, mapsize, datasize;
750 u64 available = size;
751
752 arena = kzalloc(sizeof(struct arena_info), GFP_KERNEL);
753 if (!arena)
754 return NULL;
755 arena->nd_btt = btt->nd_btt;
756 arena->sector_size = btt->sector_size;
757 mutex_init(&arena->err_lock);
758
759 if (!size)
760 return arena;
761
762 arena->size = size;
763 arena->external_lba_start = start;
764 arena->external_lbasize = btt->lbasize;
765 arena->internal_lbasize = roundup(arena->external_lbasize,
766 INT_LBASIZE_ALIGNMENT);
767 arena->nfree = BTT_DEFAULT_NFREE;
768 arena->version_major = btt->nd_btt->version_major;
769 arena->version_minor = btt->nd_btt->version_minor;
770
771 if (available % BTT_PG_SIZE)
772 available -= (available % BTT_PG_SIZE);
773
774 /* Two pages are reserved for the super block and its copy */
775 available -= 2 * BTT_PG_SIZE;
776
777 /* The log takes a fixed amount of space based on nfree */
778 logsize = roundup(arena->nfree * LOG_GRP_SIZE, BTT_PG_SIZE);
779 available -= logsize;
780
781 /* Calculate optimal split between map and data area */
782 arena->internal_nlba = div_u64(available - BTT_PG_SIZE,
783 arena->internal_lbasize + MAP_ENT_SIZE);
784 arena->external_nlba = arena->internal_nlba - arena->nfree;
785
786 mapsize = roundup((arena->external_nlba * MAP_ENT_SIZE), BTT_PG_SIZE);
787 datasize = available - mapsize;
788
789 /* 'Absolute' values, relative to start of storage space */
790 arena->infooff = arena_off;
791 arena->dataoff = arena->infooff + BTT_PG_SIZE;
792 arena->mapoff = arena->dataoff + datasize;
793 arena->logoff = arena->mapoff + mapsize;
794 arena->info2off = arena->logoff + logsize;
795
796 /* Default log indices are (0,1) */
797 arena->log_index[0] = 0;
798 arena->log_index[1] = 1;
799 return arena;
800}
801
802static void free_arenas(struct btt *btt)
803{
804 struct arena_info *arena, *next;
805
806 list_for_each_entry_safe(arena, next, &btt->arena_list, list) {
807 list_del(&arena->list);
808 kfree(arena->rtt);
809 kfree(arena->map_locks);
810 kfree(arena->freelist);
811 debugfs_remove_recursive(arena->debugfs_dir);
812 kfree(arena);
813 }
814}
815
816/*
817 * This function reads an existing valid btt superblock and
818 * populates the corresponding arena_info struct
819 */
820static void parse_arena_meta(struct arena_info *arena, struct btt_sb *super,
821 u64 arena_off)
822{
823 arena->internal_nlba = le32_to_cpu(super->internal_nlba);
824 arena->internal_lbasize = le32_to_cpu(super->internal_lbasize);
825 arena->external_nlba = le32_to_cpu(super->external_nlba);
826 arena->external_lbasize = le32_to_cpu(super->external_lbasize);
827 arena->nfree = le32_to_cpu(super->nfree);
828 arena->version_major = le16_to_cpu(super->version_major);
829 arena->version_minor = le16_to_cpu(super->version_minor);
830
831 arena->nextoff = (super->nextoff == 0) ? 0 : (arena_off +
832 le64_to_cpu(super->nextoff));
833 arena->infooff = arena_off;
834 arena->dataoff = arena_off + le64_to_cpu(super->dataoff);
835 arena->mapoff = arena_off + le64_to_cpu(super->mapoff);
836 arena->logoff = arena_off + le64_to_cpu(super->logoff);
837 arena->info2off = arena_off + le64_to_cpu(super->info2off);
838
839 arena->size = (le64_to_cpu(super->nextoff) > 0)
840 ? (le64_to_cpu(super->nextoff))
841 : (arena->info2off - arena->infooff + BTT_PG_SIZE);
842
843 arena->flags = le32_to_cpu(super->flags);
844}
845
846static int discover_arenas(struct btt *btt)
847{
848 int ret = 0;
849 struct arena_info *arena;
850 struct btt_sb *super;
851 size_t remaining = btt->rawsize;
852 u64 cur_nlba = 0;
853 size_t cur_off = 0;
854 int num_arenas = 0;
855
856 super = kzalloc(sizeof(*super), GFP_KERNEL);
857 if (!super)
858 return -ENOMEM;
859
860 while (remaining) {
861 /* Alloc memory for arena */
862 arena = alloc_arena(btt, 0, 0, 0);
863 if (!arena) {
864 ret = -ENOMEM;
865 goto out_super;
866 }
867
868 arena->infooff = cur_off;
869 ret = btt_info_read(arena, super);
870 if (ret)
871 goto out;
872
873 if (!nd_btt_arena_is_valid(btt->nd_btt, super)) {
874 if (remaining == btt->rawsize) {
875 btt->init_state = INIT_NOTFOUND;
876 dev_info(to_dev(arena), "No existing arenas\n");
877 goto out;
878 } else {
879 dev_err(to_dev(arena),
880 "Found corrupted metadata!\n");
881 ret = -ENODEV;
882 goto out;
883 }
884 }
885
886 arena->external_lba_start = cur_nlba;
887 parse_arena_meta(arena, super, cur_off);
888
889 ret = log_set_indices(arena);
890 if (ret) {
891 dev_err(to_dev(arena),
892 "Unable to deduce log/padding indices\n");
893 goto out;
894 }
895
896 ret = btt_freelist_init(arena);
897 if (ret)
898 goto out;
899
900 ret = btt_rtt_init(arena);
901 if (ret)
902 goto out;
903
904 ret = btt_maplocks_init(arena);
905 if (ret)
906 goto out;
907
908 list_add_tail(&arena->list, &btt->arena_list);
909
910 remaining -= arena->size;
911 cur_off += arena->size;
912 cur_nlba += arena->external_nlba;
913 num_arenas++;
914
915 if (arena->nextoff == 0)
916 break;
917 }
918 btt->num_arenas = num_arenas;
919 btt->nlba = cur_nlba;
920 btt->init_state = INIT_READY;
921
922 kfree(super);
923 return ret;
924
925 out:
926 kfree(arena);
927 free_arenas(btt);
928 out_super:
929 kfree(super);
930 return ret;
931}
932
933static int create_arenas(struct btt *btt)
934{
935 size_t remaining = btt->rawsize;
936 size_t cur_off = 0;
937
938 while (remaining) {
939 struct arena_info *arena;
940 size_t arena_size = min_t(u64, ARENA_MAX_SIZE, remaining);
941
942 remaining -= arena_size;
943 if (arena_size < ARENA_MIN_SIZE)
944 break;
945
946 arena = alloc_arena(btt, arena_size, btt->nlba, cur_off);
947 if (!arena) {
948 free_arenas(btt);
949 return -ENOMEM;
950 }
951 btt->nlba += arena->external_nlba;
952 if (remaining >= ARENA_MIN_SIZE)
953 arena->nextoff = arena->size;
954 else
955 arena->nextoff = 0;
956 cur_off += arena_size;
957 list_add_tail(&arena->list, &btt->arena_list);
958 }
959
960 return 0;
961}
962
963/*
964 * This function completes arena initialization by writing
965 * all the metadata.
966 * It is only called for an uninitialized arena when a write
967 * to that arena occurs for the first time.
968 */
969static int btt_arena_write_layout(struct arena_info *arena)
970{
971 int ret;
972 u64 sum;
973 struct btt_sb *super;
974 struct nd_btt *nd_btt = arena->nd_btt;
975 const uuid_t *parent_uuid = nd_dev_to_uuid(&nd_btt->ndns->dev);
976
977 ret = btt_map_init(arena);
978 if (ret)
979 return ret;
980
981 ret = btt_log_init(arena);
982 if (ret)
983 return ret;
984
985 super = kzalloc(sizeof(struct btt_sb), GFP_NOIO);
986 if (!super)
987 return -ENOMEM;
988
989 strncpy(super->signature, BTT_SIG, BTT_SIG_LEN);
990 export_uuid(super->uuid, nd_btt->uuid);
991 export_uuid(super->parent_uuid, parent_uuid);
992 super->flags = cpu_to_le32(arena->flags);
993 super->version_major = cpu_to_le16(arena->version_major);
994 super->version_minor = cpu_to_le16(arena->version_minor);
995 super->external_lbasize = cpu_to_le32(arena->external_lbasize);
996 super->external_nlba = cpu_to_le32(arena->external_nlba);
997 super->internal_lbasize = cpu_to_le32(arena->internal_lbasize);
998 super->internal_nlba = cpu_to_le32(arena->internal_nlba);
999 super->nfree = cpu_to_le32(arena->nfree);
1000 super->infosize = cpu_to_le32(sizeof(struct btt_sb));
1001 super->nextoff = cpu_to_le64(arena->nextoff);
1002 /*
1003 * Subtract arena->infooff (arena start) so numbers are relative
1004 * to 'this' arena
1005 */
1006 super->dataoff = cpu_to_le64(arena->dataoff - arena->infooff);
1007 super->mapoff = cpu_to_le64(arena->mapoff - arena->infooff);
1008 super->logoff = cpu_to_le64(arena->logoff - arena->infooff);
1009 super->info2off = cpu_to_le64(arena->info2off - arena->infooff);
1010
1011 super->flags = 0;
1012 sum = nd_sb_checksum((struct nd_gen_sb *) super);
1013 super->checksum = cpu_to_le64(sum);
1014
1015 ret = btt_info_write(arena, super);
1016
1017 kfree(super);
1018 return ret;
1019}
1020
1021/*
1022 * This function completes the initialization for the BTT namespace
1023 * such that it is ready to accept IOs
1024 */
1025static int btt_meta_init(struct btt *btt)
1026{
1027 int ret = 0;
1028 struct arena_info *arena;
1029
1030 mutex_lock(&btt->init_lock);
1031 list_for_each_entry(arena, &btt->arena_list, list) {
1032 ret = btt_arena_write_layout(arena);
1033 if (ret)
1034 goto unlock;
1035
1036 ret = btt_freelist_init(arena);
1037 if (ret)
1038 goto unlock;
1039
1040 ret = btt_rtt_init(arena);
1041 if (ret)
1042 goto unlock;
1043
1044 ret = btt_maplocks_init(arena);
1045 if (ret)
1046 goto unlock;
1047 }
1048
1049 btt->init_state = INIT_READY;
1050
1051 unlock:
1052 mutex_unlock(&btt->init_lock);
1053 return ret;
1054}
1055
1056static u32 btt_meta_size(struct btt *btt)
1057{
1058 return btt->lbasize - btt->sector_size;
1059}
1060
1061/*
1062 * This function calculates the arena in which the given LBA lies
1063 * by doing a linear walk. This is acceptable since we expect only
1064 * a few arenas. If we have backing devices that get much larger,
1065 * we can construct a balanced binary tree of arenas at init time
1066 * so that this range search becomes faster.
1067 */
1068static int lba_to_arena(struct btt *btt, sector_t sector, __u32 *premap,
1069 struct arena_info **arena)
1070{
1071 struct arena_info *arena_list;
1072 __u64 lba = div_u64(sector << SECTOR_SHIFT, btt->sector_size);
1073
1074 list_for_each_entry(arena_list, &btt->arena_list, list) {
1075 if (lba < arena_list->external_nlba) {
1076 *arena = arena_list;
1077 *premap = lba;
1078 return 0;
1079 }
1080 lba -= arena_list->external_nlba;
1081 }
1082
1083 return -EIO;
1084}
1085
1086/*
1087 * The following (lock_map, unlock_map) are mostly just to improve
1088 * readability, since they index into an array of locks
1089 */
1090static void lock_map(struct arena_info *arena, u32 premap)
1091 __acquires(&arena->map_locks[idx].lock)
1092{
1093 u32 idx = (premap * MAP_ENT_SIZE / L1_CACHE_BYTES) % arena->nfree;
1094
1095 spin_lock(&arena->map_locks[idx].lock);
1096}
1097
1098static void unlock_map(struct arena_info *arena, u32 premap)
1099 __releases(&arena->map_locks[idx].lock)
1100{
1101 u32 idx = (premap * MAP_ENT_SIZE / L1_CACHE_BYTES) % arena->nfree;
1102
1103 spin_unlock(&arena->map_locks[idx].lock);
1104}
1105
1106static int btt_data_read(struct arena_info *arena, struct page *page,
1107 unsigned int off, u32 lba, u32 len)
1108{
1109 int ret;
1110 u64 nsoff = to_namespace_offset(arena, lba);
1111 void *mem = kmap_atomic(page);
1112
1113 ret = arena_read_bytes(arena, nsoff, mem + off, len, NVDIMM_IO_ATOMIC);
1114 kunmap_atomic(mem);
1115
1116 return ret;
1117}
1118
1119static int btt_data_write(struct arena_info *arena, u32 lba,
1120 struct page *page, unsigned int off, u32 len)
1121{
1122 int ret;
1123 u64 nsoff = to_namespace_offset(arena, lba);
1124 void *mem = kmap_atomic(page);
1125
1126 ret = arena_write_bytes(arena, nsoff, mem + off, len, NVDIMM_IO_ATOMIC);
1127 kunmap_atomic(mem);
1128
1129 return ret;
1130}
1131
1132static void zero_fill_data(struct page *page, unsigned int off, u32 len)
1133{
1134 void *mem = kmap_atomic(page);
1135
1136 memset(mem + off, 0, len);
1137 kunmap_atomic(mem);
1138}
1139
1140#ifdef CONFIG_BLK_DEV_INTEGRITY
1141static int btt_rw_integrity(struct btt *btt, struct bio_integrity_payload *bip,
1142 struct arena_info *arena, u32 postmap, int rw)
1143{
1144 unsigned int len = btt_meta_size(btt);
1145 u64 meta_nsoff;
1146 int ret = 0;
1147
1148 if (bip == NULL)
1149 return 0;
1150
1151 meta_nsoff = to_namespace_offset(arena, postmap) + btt->sector_size;
1152
1153 while (len) {
1154 unsigned int cur_len;
1155 struct bio_vec bv;
1156 void *mem;
1157
1158 bv = bvec_iter_bvec(bip->bip_vec, bip->bip_iter);
1159 /*
1160 * The 'bv' obtained from bvec_iter_bvec has its .bv_len and
1161 * .bv_offset already adjusted for iter->bi_bvec_done, and we
1162 * can use those directly
1163 */
1164
1165 cur_len = min(len, bv.bv_len);
1166 mem = bvec_kmap_local(&bv);
1167 if (rw)
1168 ret = arena_write_bytes(arena, meta_nsoff, mem, cur_len,
1169 NVDIMM_IO_ATOMIC);
1170 else
1171 ret = arena_read_bytes(arena, meta_nsoff, mem, cur_len,
1172 NVDIMM_IO_ATOMIC);
1173
1174 kunmap_local(mem);
1175 if (ret)
1176 return ret;
1177
1178 len -= cur_len;
1179 meta_nsoff += cur_len;
1180 if (!bvec_iter_advance(bip->bip_vec, &bip->bip_iter, cur_len))
1181 return -EIO;
1182 }
1183
1184 return ret;
1185}
1186
1187#else /* CONFIG_BLK_DEV_INTEGRITY */
1188static int btt_rw_integrity(struct btt *btt, struct bio_integrity_payload *bip,
1189 struct arena_info *arena, u32 postmap, int rw)
1190{
1191 return 0;
1192}
1193#endif
1194
1195static int btt_read_pg(struct btt *btt, struct bio_integrity_payload *bip,
1196 struct page *page, unsigned int off, sector_t sector,
1197 unsigned int len)
1198{
1199 int ret = 0;
1200 int t_flag, e_flag;
1201 struct arena_info *arena = NULL;
1202 u32 lane = 0, premap, postmap;
1203
1204 while (len) {
1205 u32 cur_len;
1206
1207 lane = nd_region_acquire_lane(btt->nd_region);
1208
1209 ret = lba_to_arena(btt, sector, &premap, &arena);
1210 if (ret)
1211 goto out_lane;
1212
1213 cur_len = min(btt->sector_size, len);
1214
1215 ret = btt_map_read(arena, premap, &postmap, &t_flag, &e_flag,
1216 NVDIMM_IO_ATOMIC);
1217 if (ret)
1218 goto out_lane;
1219
1220 /*
1221 * We loop to make sure that the post map LBA didn't change
1222 * from under us between writing the RTT and doing the actual
1223 * read.
1224 */
1225 while (1) {
1226 u32 new_map;
1227 int new_t, new_e;
1228
1229 if (t_flag) {
1230 zero_fill_data(page, off, cur_len);
1231 goto out_lane;
1232 }
1233
1234 if (e_flag) {
1235 ret = -EIO;
1236 goto out_lane;
1237 }
1238
1239 arena->rtt[lane] = RTT_VALID | postmap;
1240 /*
1241 * Barrier to make sure this write is not reordered
1242 * to do the verification map_read before the RTT store
1243 */
1244 barrier();
1245
1246 ret = btt_map_read(arena, premap, &new_map, &new_t,
1247 &new_e, NVDIMM_IO_ATOMIC);
1248 if (ret)
1249 goto out_rtt;
1250
1251 if ((postmap == new_map) && (t_flag == new_t) &&
1252 (e_flag == new_e))
1253 break;
1254
1255 postmap = new_map;
1256 t_flag = new_t;
1257 e_flag = new_e;
1258 }
1259
1260 ret = btt_data_read(arena, page, off, postmap, cur_len);
1261 if (ret) {
1262 /* Media error - set the e_flag */
1263 if (btt_map_write(arena, premap, postmap, 0, 1, NVDIMM_IO_ATOMIC))
1264 dev_warn_ratelimited(to_dev(arena),
1265 "Error persistently tracking bad blocks at %#x\n",
1266 premap);
1267 goto out_rtt;
1268 }
1269
1270 if (bip) {
1271 ret = btt_rw_integrity(btt, bip, arena, postmap, READ);
1272 if (ret)
1273 goto out_rtt;
1274 }
1275
1276 arena->rtt[lane] = RTT_INVALID;
1277 nd_region_release_lane(btt->nd_region, lane);
1278
1279 len -= cur_len;
1280 off += cur_len;
1281 sector += btt->sector_size >> SECTOR_SHIFT;
1282 }
1283
1284 return 0;
1285
1286 out_rtt:
1287 arena->rtt[lane] = RTT_INVALID;
1288 out_lane:
1289 nd_region_release_lane(btt->nd_region, lane);
1290 return ret;
1291}
1292
1293/*
1294 * Normally, arena_{read,write}_bytes will take care of the initial offset
1295 * adjustment, but in the case of btt_is_badblock, where we query is_bad_pmem,
1296 * we need the final, raw namespace offset here
1297 */
1298static bool btt_is_badblock(struct btt *btt, struct arena_info *arena,
1299 u32 postmap)
1300{
1301 u64 nsoff = adjust_initial_offset(arena->nd_btt,
1302 to_namespace_offset(arena, postmap));
1303 sector_t phys_sector = nsoff >> 9;
1304
1305 return is_bad_pmem(btt->phys_bb, phys_sector, arena->internal_lbasize);
1306}
1307
1308static int btt_write_pg(struct btt *btt, struct bio_integrity_payload *bip,
1309 sector_t sector, struct page *page, unsigned int off,
1310 unsigned int len)
1311{
1312 int ret = 0;
1313 struct arena_info *arena = NULL;
1314 u32 premap = 0, old_postmap, new_postmap, lane = 0, i;
1315 struct log_entry log;
1316 int sub;
1317
1318 while (len) {
1319 u32 cur_len;
1320 int e_flag;
1321
1322 retry:
1323 lane = nd_region_acquire_lane(btt->nd_region);
1324
1325 ret = lba_to_arena(btt, sector, &premap, &arena);
1326 if (ret)
1327 goto out_lane;
1328 cur_len = min(btt->sector_size, len);
1329
1330 if ((arena->flags & IB_FLAG_ERROR_MASK) != 0) {
1331 ret = -EIO;
1332 goto out_lane;
1333 }
1334
1335 if (btt_is_badblock(btt, arena, arena->freelist[lane].block))
1336 arena->freelist[lane].has_err = 1;
1337
1338 if (mutex_is_locked(&arena->err_lock)
1339 || arena->freelist[lane].has_err) {
1340 nd_region_release_lane(btt->nd_region, lane);
1341
1342 ret = arena_clear_freelist_error(arena, lane);
1343 if (ret)
1344 return ret;
1345
1346 /* OK to acquire a different lane/free block */
1347 goto retry;
1348 }
1349
1350 new_postmap = arena->freelist[lane].block;
1351
1352 /* Wait if the new block is being read from */
1353 for (i = 0; i < arena->nfree; i++)
1354 while (arena->rtt[i] == (RTT_VALID | new_postmap))
1355 cpu_relax();
1356
1357
1358 if (new_postmap >= arena->internal_nlba) {
1359 ret = -EIO;
1360 goto out_lane;
1361 }
1362
1363 ret = btt_data_write(arena, new_postmap, page, off, cur_len);
1364 if (ret)
1365 goto out_lane;
1366
1367 if (bip) {
1368 ret = btt_rw_integrity(btt, bip, arena, new_postmap,
1369 WRITE);
1370 if (ret)
1371 goto out_lane;
1372 }
1373
1374 lock_map(arena, premap);
1375 ret = btt_map_read(arena, premap, &old_postmap, NULL, &e_flag,
1376 NVDIMM_IO_ATOMIC);
1377 if (ret)
1378 goto out_map;
1379 if (old_postmap >= arena->internal_nlba) {
1380 ret = -EIO;
1381 goto out_map;
1382 }
1383 if (e_flag)
1384 set_e_flag(old_postmap);
1385
1386 log.lba = cpu_to_le32(premap);
1387 log.old_map = cpu_to_le32(old_postmap);
1388 log.new_map = cpu_to_le32(new_postmap);
1389 log.seq = cpu_to_le32(arena->freelist[lane].seq);
1390 sub = arena->freelist[lane].sub;
1391 ret = btt_flog_write(arena, lane, sub, &log);
1392 if (ret)
1393 goto out_map;
1394
1395 ret = btt_map_write(arena, premap, new_postmap, 0, 0,
1396 NVDIMM_IO_ATOMIC);
1397 if (ret)
1398 goto out_map;
1399
1400 unlock_map(arena, premap);
1401 nd_region_release_lane(btt->nd_region, lane);
1402
1403 if (e_flag) {
1404 ret = arena_clear_freelist_error(arena, lane);
1405 if (ret)
1406 return ret;
1407 }
1408
1409 len -= cur_len;
1410 off += cur_len;
1411 sector += btt->sector_size >> SECTOR_SHIFT;
1412 }
1413
1414 return 0;
1415
1416 out_map:
1417 unlock_map(arena, premap);
1418 out_lane:
1419 nd_region_release_lane(btt->nd_region, lane);
1420 return ret;
1421}
1422
1423static int btt_do_bvec(struct btt *btt, struct bio_integrity_payload *bip,
1424 struct page *page, unsigned int len, unsigned int off,
1425 enum req_op op, sector_t sector)
1426{
1427 int ret;
1428
1429 if (!op_is_write(op)) {
1430 ret = btt_read_pg(btt, bip, page, off, sector, len);
1431 flush_dcache_page(page);
1432 } else {
1433 flush_dcache_page(page);
1434 ret = btt_write_pg(btt, bip, sector, page, off, len);
1435 }
1436
1437 return ret;
1438}
1439
1440static void btt_submit_bio(struct bio *bio)
1441{
1442 struct bio_integrity_payload *bip = bio_integrity(bio);
1443 struct btt *btt = bio->bi_bdev->bd_disk->private_data;
1444 struct bvec_iter iter;
1445 unsigned long start;
1446 struct bio_vec bvec;
1447 int err = 0;
1448 bool do_acct;
1449
1450 if (!bio_integrity_prep(bio))
1451 return;
1452
1453 do_acct = blk_queue_io_stat(bio->bi_bdev->bd_disk->queue);
1454 if (do_acct)
1455 start = bio_start_io_acct(bio);
1456 bio_for_each_segment(bvec, bio, iter) {
1457 unsigned int len = bvec.bv_len;
1458
1459 if (len > PAGE_SIZE || len < btt->sector_size ||
1460 len % btt->sector_size) {
1461 dev_err_ratelimited(&btt->nd_btt->dev,
1462 "unaligned bio segment (len: %d)\n", len);
1463 bio->bi_status = BLK_STS_IOERR;
1464 break;
1465 }
1466
1467 err = btt_do_bvec(btt, bip, bvec.bv_page, len, bvec.bv_offset,
1468 bio_op(bio), iter.bi_sector);
1469 if (err) {
1470 dev_err(&btt->nd_btt->dev,
1471 "io error in %s sector %lld, len %d,\n",
1472 (op_is_write(bio_op(bio))) ? "WRITE" :
1473 "READ",
1474 (unsigned long long) iter.bi_sector, len);
1475 bio->bi_status = errno_to_blk_status(err);
1476 break;
1477 }
1478 }
1479 if (do_acct)
1480 bio_end_io_acct(bio, start);
1481
1482 bio_endio(bio);
1483}
1484
1485static int btt_rw_page(struct block_device *bdev, sector_t sector,
1486 struct page *page, enum req_op op)
1487{
1488 struct btt *btt = bdev->bd_disk->private_data;
1489 int rc;
1490
1491 rc = btt_do_bvec(btt, NULL, page, thp_size(page), 0, op, sector);
1492 if (rc == 0)
1493 page_endio(page, op_is_write(op), 0);
1494
1495 return rc;
1496}
1497
1498
1499static int btt_getgeo(struct block_device *bd, struct hd_geometry *geo)
1500{
1501 /* some standard values */
1502 geo->heads = 1 << 6;
1503 geo->sectors = 1 << 5;
1504 geo->cylinders = get_capacity(bd->bd_disk) >> 11;
1505 return 0;
1506}
1507
1508static const struct block_device_operations btt_fops = {
1509 .owner = THIS_MODULE,
1510 .submit_bio = btt_submit_bio,
1511 .rw_page = btt_rw_page,
1512 .getgeo = btt_getgeo,
1513};
1514
1515static int btt_blk_init(struct btt *btt)
1516{
1517 struct nd_btt *nd_btt = btt->nd_btt;
1518 struct nd_namespace_common *ndns = nd_btt->ndns;
1519 int rc = -ENOMEM;
1520
1521 btt->btt_disk = blk_alloc_disk(NUMA_NO_NODE);
1522 if (!btt->btt_disk)
1523 return -ENOMEM;
1524
1525 nvdimm_namespace_disk_name(ndns, btt->btt_disk->disk_name);
1526 btt->btt_disk->first_minor = 0;
1527 btt->btt_disk->fops = &btt_fops;
1528 btt->btt_disk->private_data = btt;
1529
1530 blk_queue_logical_block_size(btt->btt_disk->queue, btt->sector_size);
1531 blk_queue_max_hw_sectors(btt->btt_disk->queue, UINT_MAX);
1532 blk_queue_flag_set(QUEUE_FLAG_NONROT, btt->btt_disk->queue);
1533
1534 if (btt_meta_size(btt)) {
1535 rc = nd_integrity_init(btt->btt_disk, btt_meta_size(btt));
1536 if (rc)
1537 goto out_cleanup_disk;
1538 }
1539
1540 set_capacity(btt->btt_disk, btt->nlba * btt->sector_size >> 9);
1541 rc = device_add_disk(&btt->nd_btt->dev, btt->btt_disk, NULL);
1542 if (rc)
1543 goto out_cleanup_disk;
1544
1545 btt->nd_btt->size = btt->nlba * (u64)btt->sector_size;
1546 nvdimm_check_and_set_ro(btt->btt_disk);
1547
1548 return 0;
1549
1550out_cleanup_disk:
1551 put_disk(btt->btt_disk);
1552 return rc;
1553}
1554
1555static void btt_blk_cleanup(struct btt *btt)
1556{
1557 del_gendisk(btt->btt_disk);
1558 put_disk(btt->btt_disk);
1559}
1560
1561/**
1562 * btt_init - initialize a block translation table for the given device
1563 * @nd_btt: device with BTT geometry and backing device info
1564 * @rawsize: raw size in bytes of the backing device
1565 * @lbasize: lba size of the backing device
1566 * @uuid: A uuid for the backing device - this is stored on media
1567 * @maxlane: maximum number of parallel requests the device can handle
1568 *
1569 * Initialize a Block Translation Table on a backing device to provide
1570 * single sector power fail atomicity.
1571 *
1572 * Context:
1573 * Might sleep.
1574 *
1575 * Returns:
1576 * Pointer to a new struct btt on success, NULL on failure.
1577 */
1578static struct btt *btt_init(struct nd_btt *nd_btt, unsigned long long rawsize,
1579 u32 lbasize, uuid_t *uuid,
1580 struct nd_region *nd_region)
1581{
1582 int ret;
1583 struct btt *btt;
1584 struct nd_namespace_io *nsio;
1585 struct device *dev = &nd_btt->dev;
1586
1587 btt = devm_kzalloc(dev, sizeof(struct btt), GFP_KERNEL);
1588 if (!btt)
1589 return NULL;
1590
1591 btt->nd_btt = nd_btt;
1592 btt->rawsize = rawsize;
1593 btt->lbasize = lbasize;
1594 btt->sector_size = ((lbasize >= 4096) ? 4096 : 512);
1595 INIT_LIST_HEAD(&btt->arena_list);
1596 mutex_init(&btt->init_lock);
1597 btt->nd_region = nd_region;
1598 nsio = to_nd_namespace_io(&nd_btt->ndns->dev);
1599 btt->phys_bb = &nsio->bb;
1600
1601 ret = discover_arenas(btt);
1602 if (ret) {
1603 dev_err(dev, "init: error in arena_discover: %d\n", ret);
1604 return NULL;
1605 }
1606
1607 if (btt->init_state != INIT_READY && nd_region->ro) {
1608 dev_warn(dev, "%s is read-only, unable to init btt metadata\n",
1609 dev_name(&nd_region->dev));
1610 return NULL;
1611 } else if (btt->init_state != INIT_READY) {
1612 btt->num_arenas = (rawsize / ARENA_MAX_SIZE) +
1613 ((rawsize % ARENA_MAX_SIZE) ? 1 : 0);
1614 dev_dbg(dev, "init: %d arenas for %llu rawsize\n",
1615 btt->num_arenas, rawsize);
1616
1617 ret = create_arenas(btt);
1618 if (ret) {
1619 dev_info(dev, "init: create_arenas: %d\n", ret);
1620 return NULL;
1621 }
1622
1623 ret = btt_meta_init(btt);
1624 if (ret) {
1625 dev_err(dev, "init: error in meta_init: %d\n", ret);
1626 return NULL;
1627 }
1628 }
1629
1630 ret = btt_blk_init(btt);
1631 if (ret) {
1632 dev_err(dev, "init: error in blk_init: %d\n", ret);
1633 return NULL;
1634 }
1635
1636 btt_debugfs_init(btt);
1637
1638 return btt;
1639}
1640
1641/**
1642 * btt_fini - de-initialize a BTT
1643 * @btt: the BTT handle that was generated by btt_init
1644 *
1645 * De-initialize a Block Translation Table on device removal
1646 *
1647 * Context:
1648 * Might sleep.
1649 */
1650static void btt_fini(struct btt *btt)
1651{
1652 if (btt) {
1653 btt_blk_cleanup(btt);
1654 free_arenas(btt);
1655 debugfs_remove_recursive(btt->debugfs_dir);
1656 }
1657}
1658
1659int nvdimm_namespace_attach_btt(struct nd_namespace_common *ndns)
1660{
1661 struct nd_btt *nd_btt = to_nd_btt(ndns->claim);
1662 struct nd_region *nd_region;
1663 struct btt_sb *btt_sb;
1664 struct btt *btt;
1665 size_t size, rawsize;
1666 int rc;
1667
1668 if (!nd_btt->uuid || !nd_btt->ndns || !nd_btt->lbasize) {
1669 dev_dbg(&nd_btt->dev, "incomplete btt configuration\n");
1670 return -ENODEV;
1671 }
1672
1673 btt_sb = devm_kzalloc(&nd_btt->dev, sizeof(*btt_sb), GFP_KERNEL);
1674 if (!btt_sb)
1675 return -ENOMEM;
1676
1677 size = nvdimm_namespace_capacity(ndns);
1678 rc = devm_namespace_enable(&nd_btt->dev, ndns, size);
1679 if (rc)
1680 return rc;
1681
1682 /*
1683 * If this returns < 0, that is ok as it just means there wasn't
1684 * an existing BTT, and we're creating a new one. We still need to
1685 * call this as we need the version dependent fields in nd_btt to be
1686 * set correctly based on the holder class
1687 */
1688 nd_btt_version(nd_btt, ndns, btt_sb);
1689
1690 rawsize = size - nd_btt->initial_offset;
1691 if (rawsize < ARENA_MIN_SIZE) {
1692 dev_dbg(&nd_btt->dev, "%s must be at least %ld bytes\n",
1693 dev_name(&ndns->dev),
1694 ARENA_MIN_SIZE + nd_btt->initial_offset);
1695 return -ENXIO;
1696 }
1697 nd_region = to_nd_region(nd_btt->dev.parent);
1698 btt = btt_init(nd_btt, rawsize, nd_btt->lbasize, nd_btt->uuid,
1699 nd_region);
1700 if (!btt)
1701 return -ENOMEM;
1702 nd_btt->btt = btt;
1703
1704 return 0;
1705}
1706EXPORT_SYMBOL(nvdimm_namespace_attach_btt);
1707
1708int nvdimm_namespace_detach_btt(struct nd_btt *nd_btt)
1709{
1710 struct btt *btt = nd_btt->btt;
1711
1712 btt_fini(btt);
1713 nd_btt->btt = NULL;
1714
1715 return 0;
1716}
1717EXPORT_SYMBOL(nvdimm_namespace_detach_btt);
1718
1719static int __init nd_btt_init(void)
1720{
1721 int rc = 0;
1722
1723 debugfs_root = debugfs_create_dir("btt", NULL);
1724 if (IS_ERR_OR_NULL(debugfs_root))
1725 rc = -ENXIO;
1726
1727 return rc;
1728}
1729
1730static void __exit nd_btt_exit(void)
1731{
1732 debugfs_remove_recursive(debugfs_root);
1733}
1734
1735MODULE_ALIAS_ND_DEVICE(ND_DEVICE_BTT);
1736MODULE_AUTHOR("Vishal Verma <vishal.l.verma@linux.intel.com>");
1737MODULE_LICENSE("GPL v2");
1738module_init(nd_btt_init);
1739module_exit(nd_btt_exit);