Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Block Translation Table
4 * Copyright (c) 2014-2015, Intel Corporation.
5 */
6#include <linux/highmem.h>
7#include <linux/debugfs.h>
8#include <linux/blkdev.h>
9#include <linux/pagemap.h>
10#include <linux/module.h>
11#include <linux/device.h>
12#include <linux/mutex.h>
13#include <linux/hdreg.h>
14#include <linux/sizes.h>
15#include <linux/ndctl.h>
16#include <linux/fs.h>
17#include <linux/nd.h>
18#include <linux/backing-dev.h>
19#include <linux/cleanup.h>
20#include "btt.h"
21#include "nd.h"
22
23enum log_ent_request {
24 LOG_NEW_ENT = 0,
25 LOG_OLD_ENT
26};
27
28static struct device *to_dev(struct arena_info *arena)
29{
30 return &arena->nd_btt->dev;
31}
32
33static u64 adjust_initial_offset(struct nd_btt *nd_btt, u64 offset)
34{
35 return offset + nd_btt->initial_offset;
36}
37
38static int arena_read_bytes(struct arena_info *arena, resource_size_t offset,
39 void *buf, size_t n, unsigned long flags)
40{
41 struct nd_btt *nd_btt = arena->nd_btt;
42 struct nd_namespace_common *ndns = nd_btt->ndns;
43
44 /* arena offsets may be shifted from the base of the device */
45 offset = adjust_initial_offset(nd_btt, offset);
46 return nvdimm_read_bytes(ndns, offset, buf, n, flags);
47}
48
49static int arena_write_bytes(struct arena_info *arena, resource_size_t offset,
50 void *buf, size_t n, unsigned long flags)
51{
52 struct nd_btt *nd_btt = arena->nd_btt;
53 struct nd_namespace_common *ndns = nd_btt->ndns;
54
55 /* arena offsets may be shifted from the base of the device */
56 offset = adjust_initial_offset(nd_btt, offset);
57 return nvdimm_write_bytes(ndns, offset, buf, n, flags);
58}
59
60static int btt_info_write(struct arena_info *arena, struct btt_sb *super)
61{
62 int ret;
63
64 /*
65 * infooff and info2off should always be at least 512B aligned.
66 * We rely on that to make sure rw_bytes does error clearing
67 * correctly, so make sure that is the case.
68 */
69 dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->infooff, 512),
70 "arena->infooff: %#llx is unaligned\n", arena->infooff);
71 dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->info2off, 512),
72 "arena->info2off: %#llx is unaligned\n", arena->info2off);
73
74 ret = arena_write_bytes(arena, arena->info2off, super,
75 sizeof(struct btt_sb), 0);
76 if (ret)
77 return ret;
78
79 return arena_write_bytes(arena, arena->infooff, super,
80 sizeof(struct btt_sb), 0);
81}
82
83static int btt_info_read(struct arena_info *arena, struct btt_sb *super)
84{
85 return arena_read_bytes(arena, arena->infooff, super,
86 sizeof(struct btt_sb), 0);
87}
88
89/*
90 * 'raw' version of btt_map write
91 * Assumptions:
92 * mapping is in little-endian
93 * mapping contains 'E' and 'Z' flags as desired
94 */
95static int __btt_map_write(struct arena_info *arena, u32 lba, __le32 mapping,
96 unsigned long flags)
97{
98 u64 ns_off = arena->mapoff + (lba * MAP_ENT_SIZE);
99
100 if (unlikely(lba >= arena->external_nlba))
101 dev_err_ratelimited(to_dev(arena),
102 "%s: lba %#x out of range (max: %#x)\n",
103 __func__, lba, arena->external_nlba);
104 return arena_write_bytes(arena, ns_off, &mapping, MAP_ENT_SIZE, flags);
105}
106
107static int btt_map_write(struct arena_info *arena, u32 lba, u32 mapping,
108 u32 z_flag, u32 e_flag, unsigned long rwb_flags)
109{
110 u32 ze;
111 __le32 mapping_le;
112
113 /*
114 * This 'mapping' is supposed to be just the LBA mapping, without
115 * any flags set, so strip the flag bits.
116 */
117 mapping = ent_lba(mapping);
118
119 ze = (z_flag << 1) + e_flag;
120 switch (ze) {
121 case 0:
122 /*
123 * We want to set neither of the Z or E flags, and
124 * in the actual layout, this means setting the bit
125 * positions of both to '1' to indicate a 'normal'
126 * map entry
127 */
128 mapping |= MAP_ENT_NORMAL;
129 break;
130 case 1:
131 mapping |= (1 << MAP_ERR_SHIFT);
132 break;
133 case 2:
134 mapping |= (1 << MAP_TRIM_SHIFT);
135 break;
136 default:
137 /*
138 * The case where Z and E are both sent in as '1' could be
139 * construed as a valid 'normal' case, but we decide not to,
140 * to avoid confusion
141 */
142 dev_err_ratelimited(to_dev(arena),
143 "Invalid use of Z and E flags\n");
144 return -EIO;
145 }
146
147 mapping_le = cpu_to_le32(mapping);
148 return __btt_map_write(arena, lba, mapping_le, rwb_flags);
149}
150
151static int btt_map_read(struct arena_info *arena, u32 lba, u32 *mapping,
152 int *trim, int *error, unsigned long rwb_flags)
153{
154 int ret;
155 __le32 in;
156 u32 raw_mapping, postmap, ze, z_flag, e_flag;
157 u64 ns_off = arena->mapoff + (lba * MAP_ENT_SIZE);
158
159 if (unlikely(lba >= arena->external_nlba))
160 dev_err_ratelimited(to_dev(arena),
161 "%s: lba %#x out of range (max: %#x)\n",
162 __func__, lba, arena->external_nlba);
163
164 ret = arena_read_bytes(arena, ns_off, &in, MAP_ENT_SIZE, rwb_flags);
165 if (ret)
166 return ret;
167
168 raw_mapping = le32_to_cpu(in);
169
170 z_flag = ent_z_flag(raw_mapping);
171 e_flag = ent_e_flag(raw_mapping);
172 ze = (z_flag << 1) + e_flag;
173 postmap = ent_lba(raw_mapping);
174
175 /* Reuse the {z,e}_flag variables for *trim and *error */
176 z_flag = 0;
177 e_flag = 0;
178
179 switch (ze) {
180 case 0:
181 /* Initial state. Return postmap = premap */
182 *mapping = lba;
183 break;
184 case 1:
185 *mapping = postmap;
186 e_flag = 1;
187 break;
188 case 2:
189 *mapping = postmap;
190 z_flag = 1;
191 break;
192 case 3:
193 *mapping = postmap;
194 break;
195 default:
196 return -EIO;
197 }
198
199 if (trim)
200 *trim = z_flag;
201 if (error)
202 *error = e_flag;
203
204 return ret;
205}
206
207static int btt_log_group_read(struct arena_info *arena, u32 lane,
208 struct log_group *log)
209{
210 return arena_read_bytes(arena,
211 arena->logoff + (lane * LOG_GRP_SIZE), log,
212 LOG_GRP_SIZE, 0);
213}
214
215static struct dentry *debugfs_root;
216
217static void arena_debugfs_init(struct arena_info *a, struct dentry *parent,
218 int idx)
219{
220 char dirname[32];
221 struct dentry *d;
222
223 /* If for some reason, parent bttN was not created, exit */
224 if (!parent)
225 return;
226
227 snprintf(dirname, 32, "arena%d", idx);
228 d = debugfs_create_dir(dirname, parent);
229 if (IS_ERR_OR_NULL(d))
230 return;
231 a->debugfs_dir = d;
232
233 debugfs_create_x64("size", S_IRUGO, d, &a->size);
234 debugfs_create_x64("external_lba_start", S_IRUGO, d,
235 &a->external_lba_start);
236 debugfs_create_x32("internal_nlba", S_IRUGO, d, &a->internal_nlba);
237 debugfs_create_u32("internal_lbasize", S_IRUGO, d,
238 &a->internal_lbasize);
239 debugfs_create_x32("external_nlba", S_IRUGO, d, &a->external_nlba);
240 debugfs_create_u32("external_lbasize", S_IRUGO, d,
241 &a->external_lbasize);
242 debugfs_create_u32("nfree", S_IRUGO, d, &a->nfree);
243 debugfs_create_u16("version_major", S_IRUGO, d, &a->version_major);
244 debugfs_create_u16("version_minor", S_IRUGO, d, &a->version_minor);
245 debugfs_create_x64("nextoff", S_IRUGO, d, &a->nextoff);
246 debugfs_create_x64("infooff", S_IRUGO, d, &a->infooff);
247 debugfs_create_x64("dataoff", S_IRUGO, d, &a->dataoff);
248 debugfs_create_x64("mapoff", S_IRUGO, d, &a->mapoff);
249 debugfs_create_x64("logoff", S_IRUGO, d, &a->logoff);
250 debugfs_create_x64("info2off", S_IRUGO, d, &a->info2off);
251 debugfs_create_x32("flags", S_IRUGO, d, &a->flags);
252 debugfs_create_u32("log_index_0", S_IRUGO, d, &a->log_index[0]);
253 debugfs_create_u32("log_index_1", S_IRUGO, d, &a->log_index[1]);
254}
255
256static void btt_debugfs_init(struct btt *btt)
257{
258 int i = 0;
259 struct arena_info *arena;
260
261 btt->debugfs_dir = debugfs_create_dir(dev_name(&btt->nd_btt->dev),
262 debugfs_root);
263 if (IS_ERR_OR_NULL(btt->debugfs_dir))
264 return;
265
266 list_for_each_entry(arena, &btt->arena_list, list) {
267 arena_debugfs_init(arena, btt->debugfs_dir, i);
268 i++;
269 }
270}
271
272static u32 log_seq(struct log_group *log, int log_idx)
273{
274 return le32_to_cpu(log->ent[log_idx].seq);
275}
276
277/*
278 * This function accepts two log entries, and uses the
279 * sequence number to find the 'older' entry.
280 * It also updates the sequence number in this old entry to
281 * make it the 'new' one if the mark_flag is set.
282 * Finally, it returns which of the entries was the older one.
283 *
284 * TODO The logic feels a bit kludge-y. make it better..
285 */
286static int btt_log_get_old(struct arena_info *a, struct log_group *log)
287{
288 int idx0 = a->log_index[0];
289 int idx1 = a->log_index[1];
290 int old;
291
292 /*
293 * the first ever time this is seen, the entry goes into [0]
294 * the next time, the following logic works out to put this
295 * (next) entry into [1]
296 */
297 if (log_seq(log, idx0) == 0) {
298 log->ent[idx0].seq = cpu_to_le32(1);
299 return 0;
300 }
301
302 if (log_seq(log, idx0) == log_seq(log, idx1))
303 return -EINVAL;
304 if (log_seq(log, idx0) + log_seq(log, idx1) > 5)
305 return -EINVAL;
306
307 if (log_seq(log, idx0) < log_seq(log, idx1)) {
308 if ((log_seq(log, idx1) - log_seq(log, idx0)) == 1)
309 old = 0;
310 else
311 old = 1;
312 } else {
313 if ((log_seq(log, idx0) - log_seq(log, idx1)) == 1)
314 old = 1;
315 else
316 old = 0;
317 }
318
319 return old;
320}
321
322/*
323 * This function copies the desired (old/new) log entry into ent if
324 * it is not NULL. It returns the sub-slot number (0 or 1)
325 * where the desired log entry was found. Negative return values
326 * indicate errors.
327 */
328static int btt_log_read(struct arena_info *arena, u32 lane,
329 struct log_entry *ent, int old_flag)
330{
331 int ret;
332 int old_ent, ret_ent;
333 struct log_group log;
334
335 ret = btt_log_group_read(arena, lane, &log);
336 if (ret)
337 return -EIO;
338
339 old_ent = btt_log_get_old(arena, &log);
340 if (old_ent < 0 || old_ent > 1) {
341 dev_err(to_dev(arena),
342 "log corruption (%d): lane %d seq [%d, %d]\n",
343 old_ent, lane, log.ent[arena->log_index[0]].seq,
344 log.ent[arena->log_index[1]].seq);
345 /* TODO set error state? */
346 return -EIO;
347 }
348
349 ret_ent = (old_flag ? old_ent : (1 - old_ent));
350
351 if (ent != NULL)
352 memcpy(ent, &log.ent[arena->log_index[ret_ent]], LOG_ENT_SIZE);
353
354 return ret_ent;
355}
356
357/*
358 * This function commits a log entry to media
359 * It does _not_ prepare the freelist entry for the next write
360 * btt_flog_write is the wrapper for updating the freelist elements
361 */
362static int __btt_log_write(struct arena_info *arena, u32 lane,
363 u32 sub, struct log_entry *ent, unsigned long flags)
364{
365 int ret;
366 u32 group_slot = arena->log_index[sub];
367 unsigned int log_half = LOG_ENT_SIZE / 2;
368 void *src = ent;
369 u64 ns_off;
370
371 ns_off = arena->logoff + (lane * LOG_GRP_SIZE) +
372 (group_slot * LOG_ENT_SIZE);
373 /* split the 16B write into atomic, durable halves */
374 ret = arena_write_bytes(arena, ns_off, src, log_half, flags);
375 if (ret)
376 return ret;
377
378 ns_off += log_half;
379 src += log_half;
380 return arena_write_bytes(arena, ns_off, src, log_half, flags);
381}
382
383static int btt_flog_write(struct arena_info *arena, u32 lane, u32 sub,
384 struct log_entry *ent)
385{
386 int ret;
387
388 ret = __btt_log_write(arena, lane, sub, ent, NVDIMM_IO_ATOMIC);
389 if (ret)
390 return ret;
391
392 /* prepare the next free entry */
393 arena->freelist[lane].sub = 1 - arena->freelist[lane].sub;
394 if (++(arena->freelist[lane].seq) == 4)
395 arena->freelist[lane].seq = 1;
396 if (ent_e_flag(le32_to_cpu(ent->old_map)))
397 arena->freelist[lane].has_err = 1;
398 arena->freelist[lane].block = ent_lba(le32_to_cpu(ent->old_map));
399
400 return ret;
401}
402
403/*
404 * This function initializes the BTT map to the initial state, which is
405 * all-zeroes, and indicates an identity mapping
406 */
407static int btt_map_init(struct arena_info *arena)
408{
409 int ret = -EINVAL;
410 void *zerobuf;
411 size_t offset = 0;
412 size_t chunk_size = SZ_2M;
413 size_t mapsize = arena->logoff - arena->mapoff;
414
415 zerobuf = kzalloc(chunk_size, GFP_KERNEL);
416 if (!zerobuf)
417 return -ENOMEM;
418
419 /*
420 * mapoff should always be at least 512B aligned. We rely on that to
421 * make sure rw_bytes does error clearing correctly, so make sure that
422 * is the case.
423 */
424 dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->mapoff, 512),
425 "arena->mapoff: %#llx is unaligned\n", arena->mapoff);
426
427 while (mapsize) {
428 size_t size = min(mapsize, chunk_size);
429
430 dev_WARN_ONCE(to_dev(arena), size < 512,
431 "chunk size: %#zx is unaligned\n", size);
432 ret = arena_write_bytes(arena, arena->mapoff + offset, zerobuf,
433 size, 0);
434 if (ret)
435 goto free;
436
437 offset += size;
438 mapsize -= size;
439 cond_resched();
440 }
441
442 free:
443 kfree(zerobuf);
444 return ret;
445}
446
447/*
448 * This function initializes the BTT log with 'fake' entries pointing
449 * to the initial reserved set of blocks as being free
450 */
451static int btt_log_init(struct arena_info *arena)
452{
453 size_t logsize = arena->info2off - arena->logoff;
454 size_t chunk_size = SZ_4K, offset = 0;
455 struct log_entry ent;
456 void *zerobuf;
457 int ret;
458 u32 i;
459
460 zerobuf = kzalloc(chunk_size, GFP_KERNEL);
461 if (!zerobuf)
462 return -ENOMEM;
463 /*
464 * logoff should always be at least 512B aligned. We rely on that to
465 * make sure rw_bytes does error clearing correctly, so make sure that
466 * is the case.
467 */
468 dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->logoff, 512),
469 "arena->logoff: %#llx is unaligned\n", arena->logoff);
470
471 while (logsize) {
472 size_t size = min(logsize, chunk_size);
473
474 dev_WARN_ONCE(to_dev(arena), size < 512,
475 "chunk size: %#zx is unaligned\n", size);
476 ret = arena_write_bytes(arena, arena->logoff + offset, zerobuf,
477 size, 0);
478 if (ret)
479 goto free;
480
481 offset += size;
482 logsize -= size;
483 cond_resched();
484 }
485
486 for (i = 0; i < arena->nfree; i++) {
487 ent.lba = cpu_to_le32(i);
488 ent.old_map = cpu_to_le32(arena->external_nlba + i);
489 ent.new_map = cpu_to_le32(arena->external_nlba + i);
490 ent.seq = cpu_to_le32(LOG_SEQ_INIT);
491 ret = __btt_log_write(arena, i, 0, &ent, 0);
492 if (ret)
493 goto free;
494 }
495
496 free:
497 kfree(zerobuf);
498 return ret;
499}
500
501static u64 to_namespace_offset(struct arena_info *arena, u64 lba)
502{
503 return arena->dataoff + ((u64)lba * arena->internal_lbasize);
504}
505
506static int arena_clear_freelist_error(struct arena_info *arena, u32 lane)
507{
508 int ret = 0;
509
510 if (arena->freelist[lane].has_err) {
511 void *zero_page = page_address(ZERO_PAGE(0));
512 u32 lba = arena->freelist[lane].block;
513 u64 nsoff = to_namespace_offset(arena, lba);
514 unsigned long len = arena->sector_size;
515
516 mutex_lock(&arena->err_lock);
517
518 while (len) {
519 unsigned long chunk = min(len, PAGE_SIZE);
520
521 ret = arena_write_bytes(arena, nsoff, zero_page,
522 chunk, 0);
523 if (ret)
524 break;
525 len -= chunk;
526 nsoff += chunk;
527 if (len == 0)
528 arena->freelist[lane].has_err = 0;
529 }
530 mutex_unlock(&arena->err_lock);
531 }
532 return ret;
533}
534
535static int btt_freelist_init(struct arena_info *arena)
536{
537 int new, ret;
538 struct log_entry log_new;
539 u32 i, map_entry, log_oldmap, log_newmap;
540
541 arena->freelist = kcalloc(arena->nfree, sizeof(struct free_entry),
542 GFP_KERNEL);
543 if (!arena->freelist)
544 return -ENOMEM;
545
546 for (i = 0; i < arena->nfree; i++) {
547 new = btt_log_read(arena, i, &log_new, LOG_NEW_ENT);
548 if (new < 0)
549 return new;
550
551 /* old and new map entries with any flags stripped out */
552 log_oldmap = ent_lba(le32_to_cpu(log_new.old_map));
553 log_newmap = ent_lba(le32_to_cpu(log_new.new_map));
554
555 /* sub points to the next one to be overwritten */
556 arena->freelist[i].sub = 1 - new;
557 arena->freelist[i].seq = nd_inc_seq(le32_to_cpu(log_new.seq));
558 arena->freelist[i].block = log_oldmap;
559
560 /*
561 * FIXME: if error clearing fails during init, we want to make
562 * the BTT read-only
563 */
564 if (ent_e_flag(le32_to_cpu(log_new.old_map)) &&
565 !ent_normal(le32_to_cpu(log_new.old_map))) {
566 arena->freelist[i].has_err = 1;
567 ret = arena_clear_freelist_error(arena, i);
568 if (ret)
569 dev_err_ratelimited(to_dev(arena),
570 "Unable to clear known errors\n");
571 }
572
573 /* This implies a newly created or untouched flog entry */
574 if (log_oldmap == log_newmap)
575 continue;
576
577 /* Check if map recovery is needed */
578 ret = btt_map_read(arena, le32_to_cpu(log_new.lba), &map_entry,
579 NULL, NULL, 0);
580 if (ret)
581 return ret;
582
583 /*
584 * The map_entry from btt_read_map is stripped of any flag bits,
585 * so use the stripped out versions from the log as well for
586 * testing whether recovery is needed. For restoration, use the
587 * 'raw' version of the log entries as that captured what we
588 * were going to write originally.
589 */
590 if ((log_newmap != map_entry) && (log_oldmap == map_entry)) {
591 /*
592 * Last transaction wrote the flog, but wasn't able
593 * to complete the map write. So fix up the map.
594 */
595 ret = btt_map_write(arena, le32_to_cpu(log_new.lba),
596 le32_to_cpu(log_new.new_map), 0, 0, 0);
597 if (ret)
598 return ret;
599 }
600 }
601
602 return 0;
603}
604
605static bool ent_is_padding(struct log_entry *ent)
606{
607 return (ent->lba == 0) && (ent->old_map == 0) && (ent->new_map == 0)
608 && (ent->seq == 0);
609}
610
611/*
612 * Detecting valid log indices: We read a log group (see the comments in btt.h
613 * for a description of a 'log_group' and its 'slots'), and iterate over its
614 * four slots. We expect that a padding slot will be all-zeroes, and use this
615 * to detect a padding slot vs. an actual entry.
616 *
617 * If a log_group is in the initial state, i.e. hasn't been used since the
618 * creation of this BTT layout, it will have three of the four slots with
619 * zeroes. We skip over these log_groups for the detection of log_index. If
620 * all log_groups are in the initial state (i.e. the BTT has never been
621 * written to), it is safe to assume the 'new format' of log entries in slots
622 * (0, 1).
623 */
624static int log_set_indices(struct arena_info *arena)
625{
626 bool idx_set = false, initial_state = true;
627 int ret, log_index[2] = {-1, -1};
628 u32 i, j, next_idx = 0;
629 struct log_group log;
630 u32 pad_count = 0;
631
632 for (i = 0; i < arena->nfree; i++) {
633 ret = btt_log_group_read(arena, i, &log);
634 if (ret < 0)
635 return ret;
636
637 for (j = 0; j < 4; j++) {
638 if (!idx_set) {
639 if (ent_is_padding(&log.ent[j])) {
640 pad_count++;
641 continue;
642 } else {
643 /* Skip if index has been recorded */
644 if ((next_idx == 1) &&
645 (j == log_index[0]))
646 continue;
647 /* valid entry, record index */
648 log_index[next_idx] = j;
649 next_idx++;
650 }
651 if (next_idx == 2) {
652 /* two valid entries found */
653 idx_set = true;
654 } else if (next_idx > 2) {
655 /* too many valid indices */
656 return -ENXIO;
657 }
658 } else {
659 /*
660 * once the indices have been set, just verify
661 * that all subsequent log groups are either in
662 * their initial state or follow the same
663 * indices.
664 */
665 if (j == log_index[0]) {
666 /* entry must be 'valid' */
667 if (ent_is_padding(&log.ent[j]))
668 return -ENXIO;
669 } else if (j == log_index[1]) {
670 ;
671 /*
672 * log_index[1] can be padding if the
673 * lane never got used and it is still
674 * in the initial state (three 'padding'
675 * entries)
676 */
677 } else {
678 /* entry must be invalid (padding) */
679 if (!ent_is_padding(&log.ent[j]))
680 return -ENXIO;
681 }
682 }
683 }
684 /*
685 * If any of the log_groups have more than one valid,
686 * non-padding entry, then the we are no longer in the
687 * initial_state
688 */
689 if (pad_count < 3)
690 initial_state = false;
691 pad_count = 0;
692 }
693
694 if (!initial_state && !idx_set)
695 return -ENXIO;
696
697 /*
698 * If all the entries in the log were in the initial state,
699 * assume new padding scheme
700 */
701 if (initial_state)
702 log_index[1] = 1;
703
704 /*
705 * Only allow the known permutations of log/padding indices,
706 * i.e. (0, 1), and (0, 2)
707 */
708 if ((log_index[0] == 0) && ((log_index[1] == 1) || (log_index[1] == 2)))
709 ; /* known index possibilities */
710 else {
711 dev_err(to_dev(arena), "Found an unknown padding scheme\n");
712 return -ENXIO;
713 }
714
715 arena->log_index[0] = log_index[0];
716 arena->log_index[1] = log_index[1];
717 dev_dbg(to_dev(arena), "log_index_0 = %d\n", log_index[0]);
718 dev_dbg(to_dev(arena), "log_index_1 = %d\n", log_index[1]);
719 return 0;
720}
721
722static int btt_rtt_init(struct arena_info *arena)
723{
724 arena->rtt = kcalloc(arena->nfree, sizeof(u32), GFP_KERNEL);
725 if (arena->rtt == NULL)
726 return -ENOMEM;
727
728 return 0;
729}
730
731static int btt_maplocks_init(struct arena_info *arena)
732{
733 u32 i;
734
735 arena->map_locks = kcalloc(arena->nfree, sizeof(struct aligned_lock),
736 GFP_KERNEL);
737 if (!arena->map_locks)
738 return -ENOMEM;
739
740 for (i = 0; i < arena->nfree; i++)
741 spin_lock_init(&arena->map_locks[i].lock);
742
743 return 0;
744}
745
746static struct arena_info *alloc_arena(struct btt *btt, size_t size,
747 size_t start, size_t arena_off)
748{
749 struct arena_info *arena;
750 u64 logsize, mapsize, datasize;
751 u64 available = size;
752
753 arena = kzalloc(sizeof(struct arena_info), GFP_KERNEL);
754 if (!arena)
755 return NULL;
756 arena->nd_btt = btt->nd_btt;
757 arena->sector_size = btt->sector_size;
758 mutex_init(&arena->err_lock);
759
760 if (!size)
761 return arena;
762
763 arena->size = size;
764 arena->external_lba_start = start;
765 arena->external_lbasize = btt->lbasize;
766 arena->internal_lbasize = roundup(arena->external_lbasize,
767 INT_LBASIZE_ALIGNMENT);
768 arena->nfree = BTT_DEFAULT_NFREE;
769 arena->version_major = btt->nd_btt->version_major;
770 arena->version_minor = btt->nd_btt->version_minor;
771
772 if (available % BTT_PG_SIZE)
773 available -= (available % BTT_PG_SIZE);
774
775 /* Two pages are reserved for the super block and its copy */
776 available -= 2 * BTT_PG_SIZE;
777
778 /* The log takes a fixed amount of space based on nfree */
779 logsize = roundup(arena->nfree * LOG_GRP_SIZE, BTT_PG_SIZE);
780 available -= logsize;
781
782 /* Calculate optimal split between map and data area */
783 arena->internal_nlba = div_u64(available - BTT_PG_SIZE,
784 arena->internal_lbasize + MAP_ENT_SIZE);
785 arena->external_nlba = arena->internal_nlba - arena->nfree;
786
787 mapsize = roundup((arena->external_nlba * MAP_ENT_SIZE), BTT_PG_SIZE);
788 datasize = available - mapsize;
789
790 /* 'Absolute' values, relative to start of storage space */
791 arena->infooff = arena_off;
792 arena->dataoff = arena->infooff + BTT_PG_SIZE;
793 arena->mapoff = arena->dataoff + datasize;
794 arena->logoff = arena->mapoff + mapsize;
795 arena->info2off = arena->logoff + logsize;
796
797 /* Default log indices are (0,1) */
798 arena->log_index[0] = 0;
799 arena->log_index[1] = 1;
800 return arena;
801}
802
803static void free_arenas(struct btt *btt)
804{
805 struct arena_info *arena, *next;
806
807 list_for_each_entry_safe(arena, next, &btt->arena_list, list) {
808 list_del(&arena->list);
809 kfree(arena->rtt);
810 kfree(arena->map_locks);
811 kfree(arena->freelist);
812 debugfs_remove_recursive(arena->debugfs_dir);
813 kfree(arena);
814 }
815}
816
817/*
818 * This function reads an existing valid btt superblock and
819 * populates the corresponding arena_info struct
820 */
821static void parse_arena_meta(struct arena_info *arena, struct btt_sb *super,
822 u64 arena_off)
823{
824 arena->internal_nlba = le32_to_cpu(super->internal_nlba);
825 arena->internal_lbasize = le32_to_cpu(super->internal_lbasize);
826 arena->external_nlba = le32_to_cpu(super->external_nlba);
827 arena->external_lbasize = le32_to_cpu(super->external_lbasize);
828 arena->nfree = le32_to_cpu(super->nfree);
829 arena->version_major = le16_to_cpu(super->version_major);
830 arena->version_minor = le16_to_cpu(super->version_minor);
831
832 arena->nextoff = (super->nextoff == 0) ? 0 : (arena_off +
833 le64_to_cpu(super->nextoff));
834 arena->infooff = arena_off;
835 arena->dataoff = arena_off + le64_to_cpu(super->dataoff);
836 arena->mapoff = arena_off + le64_to_cpu(super->mapoff);
837 arena->logoff = arena_off + le64_to_cpu(super->logoff);
838 arena->info2off = arena_off + le64_to_cpu(super->info2off);
839
840 arena->size = (le64_to_cpu(super->nextoff) > 0)
841 ? (le64_to_cpu(super->nextoff))
842 : (arena->info2off - arena->infooff + BTT_PG_SIZE);
843
844 arena->flags = le32_to_cpu(super->flags);
845}
846
847static int discover_arenas(struct btt *btt)
848{
849 int ret = 0;
850 struct arena_info *arena;
851 size_t remaining = btt->rawsize;
852 u64 cur_nlba = 0;
853 size_t cur_off = 0;
854 int num_arenas = 0;
855
856 struct btt_sb *super __free(kfree) = kzalloc(sizeof(*super), GFP_KERNEL);
857 if (!super)
858 return -ENOMEM;
859
860 while (remaining) {
861 /* Alloc memory for arena */
862 arena = alloc_arena(btt, 0, 0, 0);
863 if (!arena)
864 return -ENOMEM;
865
866 arena->infooff = cur_off;
867 ret = btt_info_read(arena, super);
868 if (ret)
869 goto out;
870
871 if (!nd_btt_arena_is_valid(btt->nd_btt, super)) {
872 if (remaining == btt->rawsize) {
873 btt->init_state = INIT_NOTFOUND;
874 dev_info(to_dev(arena), "No existing arenas\n");
875 goto out;
876 } else {
877 dev_err(to_dev(arena),
878 "Found corrupted metadata!\n");
879 ret = -ENODEV;
880 goto out;
881 }
882 }
883
884 arena->external_lba_start = cur_nlba;
885 parse_arena_meta(arena, super, cur_off);
886
887 ret = log_set_indices(arena);
888 if (ret) {
889 dev_err(to_dev(arena),
890 "Unable to deduce log/padding indices\n");
891 goto out;
892 }
893
894 ret = btt_freelist_init(arena);
895 if (ret)
896 goto out;
897
898 ret = btt_rtt_init(arena);
899 if (ret)
900 goto out;
901
902 ret = btt_maplocks_init(arena);
903 if (ret)
904 goto out;
905
906 list_add_tail(&arena->list, &btt->arena_list);
907
908 remaining -= arena->size;
909 cur_off += arena->size;
910 cur_nlba += arena->external_nlba;
911 num_arenas++;
912
913 if (arena->nextoff == 0)
914 break;
915 }
916 btt->num_arenas = num_arenas;
917 btt->nlba = cur_nlba;
918 btt->init_state = INIT_READY;
919
920 return ret;
921
922 out:
923 kfree(arena);
924 free_arenas(btt);
925 return ret;
926}
927
928static int create_arenas(struct btt *btt)
929{
930 size_t remaining = btt->rawsize;
931 size_t cur_off = 0;
932
933 while (remaining) {
934 struct arena_info *arena;
935 size_t arena_size = min_t(u64, ARENA_MAX_SIZE, remaining);
936
937 remaining -= arena_size;
938 if (arena_size < ARENA_MIN_SIZE)
939 break;
940
941 arena = alloc_arena(btt, arena_size, btt->nlba, cur_off);
942 if (!arena) {
943 free_arenas(btt);
944 return -ENOMEM;
945 }
946 btt->nlba += arena->external_nlba;
947 if (remaining >= ARENA_MIN_SIZE)
948 arena->nextoff = arena->size;
949 else
950 arena->nextoff = 0;
951 cur_off += arena_size;
952 list_add_tail(&arena->list, &btt->arena_list);
953 }
954
955 return 0;
956}
957
958/*
959 * This function completes arena initialization by writing
960 * all the metadata.
961 * It is only called for an uninitialized arena when a write
962 * to that arena occurs for the first time.
963 */
964static int btt_arena_write_layout(struct arena_info *arena)
965{
966 int ret;
967 u64 sum;
968 struct btt_sb *super;
969 struct nd_btt *nd_btt = arena->nd_btt;
970 const uuid_t *parent_uuid = nd_dev_to_uuid(&nd_btt->ndns->dev);
971
972 ret = btt_map_init(arena);
973 if (ret)
974 return ret;
975
976 ret = btt_log_init(arena);
977 if (ret)
978 return ret;
979
980 super = kzalloc(sizeof(struct btt_sb), GFP_NOIO);
981 if (!super)
982 return -ENOMEM;
983
984 strscpy(super->signature, BTT_SIG, sizeof(super->signature));
985 export_uuid(super->uuid, nd_btt->uuid);
986 export_uuid(super->parent_uuid, parent_uuid);
987 super->flags = cpu_to_le32(arena->flags);
988 super->version_major = cpu_to_le16(arena->version_major);
989 super->version_minor = cpu_to_le16(arena->version_minor);
990 super->external_lbasize = cpu_to_le32(arena->external_lbasize);
991 super->external_nlba = cpu_to_le32(arena->external_nlba);
992 super->internal_lbasize = cpu_to_le32(arena->internal_lbasize);
993 super->internal_nlba = cpu_to_le32(arena->internal_nlba);
994 super->nfree = cpu_to_le32(arena->nfree);
995 super->infosize = cpu_to_le32(sizeof(struct btt_sb));
996 super->nextoff = cpu_to_le64(arena->nextoff);
997 /*
998 * Subtract arena->infooff (arena start) so numbers are relative
999 * to 'this' arena
1000 */
1001 super->dataoff = cpu_to_le64(arena->dataoff - arena->infooff);
1002 super->mapoff = cpu_to_le64(arena->mapoff - arena->infooff);
1003 super->logoff = cpu_to_le64(arena->logoff - arena->infooff);
1004 super->info2off = cpu_to_le64(arena->info2off - arena->infooff);
1005
1006 super->flags = 0;
1007 sum = nd_sb_checksum((struct nd_gen_sb *) super);
1008 super->checksum = cpu_to_le64(sum);
1009
1010 ret = btt_info_write(arena, super);
1011
1012 kfree(super);
1013 return ret;
1014}
1015
1016/*
1017 * This function completes the initialization for the BTT namespace
1018 * such that it is ready to accept IOs
1019 */
1020static int btt_meta_init(struct btt *btt)
1021{
1022 int ret = 0;
1023 struct arena_info *arena;
1024
1025 mutex_lock(&btt->init_lock);
1026 list_for_each_entry(arena, &btt->arena_list, list) {
1027 ret = btt_arena_write_layout(arena);
1028 if (ret)
1029 goto unlock;
1030
1031 ret = btt_freelist_init(arena);
1032 if (ret)
1033 goto unlock;
1034
1035 ret = btt_rtt_init(arena);
1036 if (ret)
1037 goto unlock;
1038
1039 ret = btt_maplocks_init(arena);
1040 if (ret)
1041 goto unlock;
1042 }
1043
1044 btt->init_state = INIT_READY;
1045
1046 unlock:
1047 mutex_unlock(&btt->init_lock);
1048 return ret;
1049}
1050
1051static u32 btt_meta_size(struct btt *btt)
1052{
1053 return btt->lbasize - btt->sector_size;
1054}
1055
1056/*
1057 * This function calculates the arena in which the given LBA lies
1058 * by doing a linear walk. This is acceptable since we expect only
1059 * a few arenas. If we have backing devices that get much larger,
1060 * we can construct a balanced binary tree of arenas at init time
1061 * so that this range search becomes faster.
1062 */
1063static int lba_to_arena(struct btt *btt, sector_t sector, __u32 *premap,
1064 struct arena_info **arena)
1065{
1066 struct arena_info *arena_list;
1067 __u64 lba = div_u64(sector << SECTOR_SHIFT, btt->sector_size);
1068
1069 list_for_each_entry(arena_list, &btt->arena_list, list) {
1070 if (lba < arena_list->external_nlba) {
1071 *arena = arena_list;
1072 *premap = lba;
1073 return 0;
1074 }
1075 lba -= arena_list->external_nlba;
1076 }
1077
1078 return -EIO;
1079}
1080
1081/*
1082 * The following (lock_map, unlock_map) are mostly just to improve
1083 * readability, since they index into an array of locks
1084 */
1085static void lock_map(struct arena_info *arena, u32 premap)
1086 __acquires(&arena->map_locks[idx].lock)
1087{
1088 u32 idx = (premap * MAP_ENT_SIZE / L1_CACHE_BYTES) % arena->nfree;
1089
1090 spin_lock(&arena->map_locks[idx].lock);
1091}
1092
1093static void unlock_map(struct arena_info *arena, u32 premap)
1094 __releases(&arena->map_locks[idx].lock)
1095{
1096 u32 idx = (premap * MAP_ENT_SIZE / L1_CACHE_BYTES) % arena->nfree;
1097
1098 spin_unlock(&arena->map_locks[idx].lock);
1099}
1100
1101static int btt_data_read(struct arena_info *arena, struct page *page,
1102 unsigned int off, u32 lba, u32 len)
1103{
1104 int ret;
1105 u64 nsoff = to_namespace_offset(arena, lba);
1106 void *mem = kmap_atomic(page);
1107
1108 ret = arena_read_bytes(arena, nsoff, mem + off, len, NVDIMM_IO_ATOMIC);
1109 kunmap_atomic(mem);
1110
1111 return ret;
1112}
1113
1114static int btt_data_write(struct arena_info *arena, u32 lba,
1115 struct page *page, unsigned int off, u32 len)
1116{
1117 int ret;
1118 u64 nsoff = to_namespace_offset(arena, lba);
1119 void *mem = kmap_atomic(page);
1120
1121 ret = arena_write_bytes(arena, nsoff, mem + off, len, NVDIMM_IO_ATOMIC);
1122 kunmap_atomic(mem);
1123
1124 return ret;
1125}
1126
1127static void zero_fill_data(struct page *page, unsigned int off, u32 len)
1128{
1129 void *mem = kmap_atomic(page);
1130
1131 memset(mem + off, 0, len);
1132 kunmap_atomic(mem);
1133}
1134
1135#ifdef CONFIG_BLK_DEV_INTEGRITY
1136static int btt_rw_integrity(struct btt *btt, struct bio_integrity_payload *bip,
1137 struct arena_info *arena, u32 postmap, int rw)
1138{
1139 unsigned int len = btt_meta_size(btt);
1140 u64 meta_nsoff;
1141 int ret = 0;
1142
1143 if (bip == NULL)
1144 return 0;
1145
1146 meta_nsoff = to_namespace_offset(arena, postmap) + btt->sector_size;
1147
1148 while (len) {
1149 unsigned int cur_len;
1150 struct bio_vec bv;
1151 void *mem;
1152
1153 bv = bvec_iter_bvec(bip->bip_vec, bip->bip_iter);
1154 /*
1155 * The 'bv' obtained from bvec_iter_bvec has its .bv_len and
1156 * .bv_offset already adjusted for iter->bi_bvec_done, and we
1157 * can use those directly
1158 */
1159
1160 cur_len = min(len, bv.bv_len);
1161 mem = bvec_kmap_local(&bv);
1162 if (rw)
1163 ret = arena_write_bytes(arena, meta_nsoff, mem, cur_len,
1164 NVDIMM_IO_ATOMIC);
1165 else
1166 ret = arena_read_bytes(arena, meta_nsoff, mem, cur_len,
1167 NVDIMM_IO_ATOMIC);
1168
1169 kunmap_local(mem);
1170 if (ret)
1171 return ret;
1172
1173 len -= cur_len;
1174 meta_nsoff += cur_len;
1175 if (!bvec_iter_advance(bip->bip_vec, &bip->bip_iter, cur_len))
1176 return -EIO;
1177 }
1178
1179 return ret;
1180}
1181
1182#else /* CONFIG_BLK_DEV_INTEGRITY */
1183static int btt_rw_integrity(struct btt *btt, struct bio_integrity_payload *bip,
1184 struct arena_info *arena, u32 postmap, int rw)
1185{
1186 return 0;
1187}
1188#endif
1189
1190static int btt_read_pg(struct btt *btt, struct bio_integrity_payload *bip,
1191 struct page *page, unsigned int off, sector_t sector,
1192 unsigned int len)
1193{
1194 int ret = 0;
1195 int t_flag, e_flag;
1196 struct arena_info *arena = NULL;
1197 u32 lane = 0, premap, postmap;
1198
1199 while (len) {
1200 u32 cur_len;
1201
1202 lane = nd_region_acquire_lane(btt->nd_region);
1203
1204 ret = lba_to_arena(btt, sector, &premap, &arena);
1205 if (ret)
1206 goto out_lane;
1207
1208 cur_len = min(btt->sector_size, len);
1209
1210 ret = btt_map_read(arena, premap, &postmap, &t_flag, &e_flag,
1211 NVDIMM_IO_ATOMIC);
1212 if (ret)
1213 goto out_lane;
1214
1215 /*
1216 * We loop to make sure that the post map LBA didn't change
1217 * from under us between writing the RTT and doing the actual
1218 * read.
1219 */
1220 while (1) {
1221 u32 new_map;
1222 int new_t, new_e;
1223
1224 if (t_flag) {
1225 zero_fill_data(page, off, cur_len);
1226 goto out_lane;
1227 }
1228
1229 if (e_flag) {
1230 ret = -EIO;
1231 goto out_lane;
1232 }
1233
1234 arena->rtt[lane] = RTT_VALID | postmap;
1235 /*
1236 * Barrier to make sure this write is not reordered
1237 * to do the verification map_read before the RTT store
1238 */
1239 barrier();
1240
1241 ret = btt_map_read(arena, premap, &new_map, &new_t,
1242 &new_e, NVDIMM_IO_ATOMIC);
1243 if (ret)
1244 goto out_rtt;
1245
1246 if ((postmap == new_map) && (t_flag == new_t) &&
1247 (e_flag == new_e))
1248 break;
1249
1250 postmap = new_map;
1251 t_flag = new_t;
1252 e_flag = new_e;
1253 }
1254
1255 ret = btt_data_read(arena, page, off, postmap, cur_len);
1256 if (ret) {
1257 /* Media error - set the e_flag */
1258 if (btt_map_write(arena, premap, postmap, 0, 1, NVDIMM_IO_ATOMIC))
1259 dev_warn_ratelimited(to_dev(arena),
1260 "Error persistently tracking bad blocks at %#x\n",
1261 premap);
1262 goto out_rtt;
1263 }
1264
1265 if (bip) {
1266 ret = btt_rw_integrity(btt, bip, arena, postmap, READ);
1267 if (ret)
1268 goto out_rtt;
1269 }
1270
1271 arena->rtt[lane] = RTT_INVALID;
1272 nd_region_release_lane(btt->nd_region, lane);
1273
1274 len -= cur_len;
1275 off += cur_len;
1276 sector += btt->sector_size >> SECTOR_SHIFT;
1277 }
1278
1279 return 0;
1280
1281 out_rtt:
1282 arena->rtt[lane] = RTT_INVALID;
1283 out_lane:
1284 nd_region_release_lane(btt->nd_region, lane);
1285 return ret;
1286}
1287
1288/*
1289 * Normally, arena_{read,write}_bytes will take care of the initial offset
1290 * adjustment, but in the case of btt_is_badblock, where we query is_bad_pmem,
1291 * we need the final, raw namespace offset here
1292 */
1293static bool btt_is_badblock(struct btt *btt, struct arena_info *arena,
1294 u32 postmap)
1295{
1296 u64 nsoff = adjust_initial_offset(arena->nd_btt,
1297 to_namespace_offset(arena, postmap));
1298 sector_t phys_sector = nsoff >> 9;
1299
1300 return is_bad_pmem(btt->phys_bb, phys_sector, arena->internal_lbasize);
1301}
1302
1303static int btt_write_pg(struct btt *btt, struct bio_integrity_payload *bip,
1304 sector_t sector, struct page *page, unsigned int off,
1305 unsigned int len)
1306{
1307 int ret = 0;
1308 struct arena_info *arena = NULL;
1309 u32 premap = 0, old_postmap, new_postmap, lane = 0, i;
1310 struct log_entry log;
1311 int sub;
1312
1313 while (len) {
1314 u32 cur_len;
1315 int e_flag;
1316
1317 retry:
1318 lane = nd_region_acquire_lane(btt->nd_region);
1319
1320 ret = lba_to_arena(btt, sector, &premap, &arena);
1321 if (ret)
1322 goto out_lane;
1323 cur_len = min(btt->sector_size, len);
1324
1325 if ((arena->flags & IB_FLAG_ERROR_MASK) != 0) {
1326 ret = -EIO;
1327 goto out_lane;
1328 }
1329
1330 if (btt_is_badblock(btt, arena, arena->freelist[lane].block))
1331 arena->freelist[lane].has_err = 1;
1332
1333 if (mutex_is_locked(&arena->err_lock)
1334 || arena->freelist[lane].has_err) {
1335 nd_region_release_lane(btt->nd_region, lane);
1336
1337 ret = arena_clear_freelist_error(arena, lane);
1338 if (ret)
1339 return ret;
1340
1341 /* OK to acquire a different lane/free block */
1342 goto retry;
1343 }
1344
1345 new_postmap = arena->freelist[lane].block;
1346
1347 /* Wait if the new block is being read from */
1348 for (i = 0; i < arena->nfree; i++)
1349 while (arena->rtt[i] == (RTT_VALID | new_postmap))
1350 cpu_relax();
1351
1352
1353 if (new_postmap >= arena->internal_nlba) {
1354 ret = -EIO;
1355 goto out_lane;
1356 }
1357
1358 ret = btt_data_write(arena, new_postmap, page, off, cur_len);
1359 if (ret)
1360 goto out_lane;
1361
1362 if (bip) {
1363 ret = btt_rw_integrity(btt, bip, arena, new_postmap,
1364 WRITE);
1365 if (ret)
1366 goto out_lane;
1367 }
1368
1369 lock_map(arena, premap);
1370 ret = btt_map_read(arena, premap, &old_postmap, NULL, &e_flag,
1371 NVDIMM_IO_ATOMIC);
1372 if (ret)
1373 goto out_map;
1374 if (old_postmap >= arena->internal_nlba) {
1375 ret = -EIO;
1376 goto out_map;
1377 }
1378 if (e_flag)
1379 set_e_flag(old_postmap);
1380
1381 log.lba = cpu_to_le32(premap);
1382 log.old_map = cpu_to_le32(old_postmap);
1383 log.new_map = cpu_to_le32(new_postmap);
1384 log.seq = cpu_to_le32(arena->freelist[lane].seq);
1385 sub = arena->freelist[lane].sub;
1386 ret = btt_flog_write(arena, lane, sub, &log);
1387 if (ret)
1388 goto out_map;
1389
1390 ret = btt_map_write(arena, premap, new_postmap, 0, 0,
1391 NVDIMM_IO_ATOMIC);
1392 if (ret)
1393 goto out_map;
1394
1395 unlock_map(arena, premap);
1396 nd_region_release_lane(btt->nd_region, lane);
1397
1398 if (e_flag) {
1399 ret = arena_clear_freelist_error(arena, lane);
1400 if (ret)
1401 return ret;
1402 }
1403
1404 len -= cur_len;
1405 off += cur_len;
1406 sector += btt->sector_size >> SECTOR_SHIFT;
1407 }
1408
1409 return 0;
1410
1411 out_map:
1412 unlock_map(arena, premap);
1413 out_lane:
1414 nd_region_release_lane(btt->nd_region, lane);
1415 return ret;
1416}
1417
1418static int btt_do_bvec(struct btt *btt, struct bio_integrity_payload *bip,
1419 struct page *page, unsigned int len, unsigned int off,
1420 enum req_op op, sector_t sector)
1421{
1422 int ret;
1423
1424 if (!op_is_write(op)) {
1425 ret = btt_read_pg(btt, bip, page, off, sector, len);
1426 flush_dcache_page(page);
1427 } else {
1428 flush_dcache_page(page);
1429 ret = btt_write_pg(btt, bip, sector, page, off, len);
1430 }
1431
1432 return ret;
1433}
1434
1435static void btt_submit_bio(struct bio *bio)
1436{
1437 struct bio_integrity_payload *bip = bio_integrity(bio);
1438 struct btt *btt = bio->bi_bdev->bd_disk->private_data;
1439 struct bvec_iter iter;
1440 unsigned long start;
1441 struct bio_vec bvec;
1442 int err = 0;
1443 bool do_acct;
1444
1445 if (!bio_integrity_prep(bio))
1446 return;
1447
1448 do_acct = blk_queue_io_stat(bio->bi_bdev->bd_disk->queue);
1449 if (do_acct)
1450 start = bio_start_io_acct(bio);
1451 bio_for_each_segment(bvec, bio, iter) {
1452 unsigned int len = bvec.bv_len;
1453
1454 if (len > PAGE_SIZE || len < btt->sector_size ||
1455 len % btt->sector_size) {
1456 dev_err_ratelimited(&btt->nd_btt->dev,
1457 "unaligned bio segment (len: %d)\n", len);
1458 bio->bi_status = BLK_STS_IOERR;
1459 break;
1460 }
1461
1462 err = btt_do_bvec(btt, bip, bvec.bv_page, len, bvec.bv_offset,
1463 bio_op(bio), iter.bi_sector);
1464 if (err) {
1465 dev_err(&btt->nd_btt->dev,
1466 "io error in %s sector %lld, len %d,\n",
1467 (op_is_write(bio_op(bio))) ? "WRITE" :
1468 "READ",
1469 (unsigned long long) iter.bi_sector, len);
1470 bio->bi_status = errno_to_blk_status(err);
1471 break;
1472 }
1473 }
1474 if (do_acct)
1475 bio_end_io_acct(bio, start);
1476
1477 bio_endio(bio);
1478}
1479
1480static int btt_getgeo(struct block_device *bd, struct hd_geometry *geo)
1481{
1482 /* some standard values */
1483 geo->heads = 1 << 6;
1484 geo->sectors = 1 << 5;
1485 geo->cylinders = get_capacity(bd->bd_disk) >> 11;
1486 return 0;
1487}
1488
1489static const struct block_device_operations btt_fops = {
1490 .owner = THIS_MODULE,
1491 .submit_bio = btt_submit_bio,
1492 .getgeo = btt_getgeo,
1493};
1494
1495static int btt_blk_init(struct btt *btt)
1496{
1497 struct nd_btt *nd_btt = btt->nd_btt;
1498 struct nd_namespace_common *ndns = nd_btt->ndns;
1499 int rc = -ENOMEM;
1500
1501 btt->btt_disk = blk_alloc_disk(NUMA_NO_NODE);
1502 if (!btt->btt_disk)
1503 return -ENOMEM;
1504
1505 nvdimm_namespace_disk_name(ndns, btt->btt_disk->disk_name);
1506 btt->btt_disk->first_minor = 0;
1507 btt->btt_disk->fops = &btt_fops;
1508 btt->btt_disk->private_data = btt;
1509
1510 blk_queue_logical_block_size(btt->btt_disk->queue, btt->sector_size);
1511 blk_queue_max_hw_sectors(btt->btt_disk->queue, UINT_MAX);
1512 blk_queue_flag_set(QUEUE_FLAG_NONROT, btt->btt_disk->queue);
1513 blk_queue_flag_set(QUEUE_FLAG_SYNCHRONOUS, btt->btt_disk->queue);
1514
1515 if (btt_meta_size(btt)) {
1516 rc = nd_integrity_init(btt->btt_disk, btt_meta_size(btt));
1517 if (rc)
1518 goto out_cleanup_disk;
1519 }
1520
1521 set_capacity(btt->btt_disk, btt->nlba * btt->sector_size >> 9);
1522 rc = device_add_disk(&btt->nd_btt->dev, btt->btt_disk, NULL);
1523 if (rc)
1524 goto out_cleanup_disk;
1525
1526 btt->nd_btt->size = btt->nlba * (u64)btt->sector_size;
1527 nvdimm_check_and_set_ro(btt->btt_disk);
1528
1529 return 0;
1530
1531out_cleanup_disk:
1532 put_disk(btt->btt_disk);
1533 return rc;
1534}
1535
1536static void btt_blk_cleanup(struct btt *btt)
1537{
1538 del_gendisk(btt->btt_disk);
1539 put_disk(btt->btt_disk);
1540}
1541
1542/**
1543 * btt_init - initialize a block translation table for the given device
1544 * @nd_btt: device with BTT geometry and backing device info
1545 * @rawsize: raw size in bytes of the backing device
1546 * @lbasize: lba size of the backing device
1547 * @uuid: A uuid for the backing device - this is stored on media
1548 * @nd_region: &struct nd_region for the REGION device
1549 *
1550 * Initialize a Block Translation Table on a backing device to provide
1551 * single sector power fail atomicity.
1552 *
1553 * Context:
1554 * Might sleep.
1555 *
1556 * Returns:
1557 * Pointer to a new struct btt on success, NULL on failure.
1558 */
1559static struct btt *btt_init(struct nd_btt *nd_btt, unsigned long long rawsize,
1560 u32 lbasize, uuid_t *uuid,
1561 struct nd_region *nd_region)
1562{
1563 int ret;
1564 struct btt *btt;
1565 struct nd_namespace_io *nsio;
1566 struct device *dev = &nd_btt->dev;
1567
1568 btt = devm_kzalloc(dev, sizeof(struct btt), GFP_KERNEL);
1569 if (!btt)
1570 return NULL;
1571
1572 btt->nd_btt = nd_btt;
1573 btt->rawsize = rawsize;
1574 btt->lbasize = lbasize;
1575 btt->sector_size = ((lbasize >= 4096) ? 4096 : 512);
1576 INIT_LIST_HEAD(&btt->arena_list);
1577 mutex_init(&btt->init_lock);
1578 btt->nd_region = nd_region;
1579 nsio = to_nd_namespace_io(&nd_btt->ndns->dev);
1580 btt->phys_bb = &nsio->bb;
1581
1582 ret = discover_arenas(btt);
1583 if (ret) {
1584 dev_err(dev, "init: error in arena_discover: %d\n", ret);
1585 return NULL;
1586 }
1587
1588 if (btt->init_state != INIT_READY && nd_region->ro) {
1589 dev_warn(dev, "%s is read-only, unable to init btt metadata\n",
1590 dev_name(&nd_region->dev));
1591 return NULL;
1592 } else if (btt->init_state != INIT_READY) {
1593 btt->num_arenas = (rawsize / ARENA_MAX_SIZE) +
1594 ((rawsize % ARENA_MAX_SIZE) ? 1 : 0);
1595 dev_dbg(dev, "init: %d arenas for %llu rawsize\n",
1596 btt->num_arenas, rawsize);
1597
1598 ret = create_arenas(btt);
1599 if (ret) {
1600 dev_info(dev, "init: create_arenas: %d\n", ret);
1601 return NULL;
1602 }
1603
1604 ret = btt_meta_init(btt);
1605 if (ret) {
1606 dev_err(dev, "init: error in meta_init: %d\n", ret);
1607 return NULL;
1608 }
1609 }
1610
1611 ret = btt_blk_init(btt);
1612 if (ret) {
1613 dev_err(dev, "init: error in blk_init: %d\n", ret);
1614 return NULL;
1615 }
1616
1617 btt_debugfs_init(btt);
1618
1619 return btt;
1620}
1621
1622/**
1623 * btt_fini - de-initialize a BTT
1624 * @btt: the BTT handle that was generated by btt_init
1625 *
1626 * De-initialize a Block Translation Table on device removal
1627 *
1628 * Context:
1629 * Might sleep.
1630 */
1631static void btt_fini(struct btt *btt)
1632{
1633 if (btt) {
1634 btt_blk_cleanup(btt);
1635 free_arenas(btt);
1636 debugfs_remove_recursive(btt->debugfs_dir);
1637 }
1638}
1639
1640int nvdimm_namespace_attach_btt(struct nd_namespace_common *ndns)
1641{
1642 struct nd_btt *nd_btt = to_nd_btt(ndns->claim);
1643 struct nd_region *nd_region;
1644 struct btt_sb *btt_sb;
1645 struct btt *btt;
1646 size_t size, rawsize;
1647 int rc;
1648
1649 if (!nd_btt->uuid || !nd_btt->ndns || !nd_btt->lbasize) {
1650 dev_dbg(&nd_btt->dev, "incomplete btt configuration\n");
1651 return -ENODEV;
1652 }
1653
1654 btt_sb = devm_kzalloc(&nd_btt->dev, sizeof(*btt_sb), GFP_KERNEL);
1655 if (!btt_sb)
1656 return -ENOMEM;
1657
1658 size = nvdimm_namespace_capacity(ndns);
1659 rc = devm_namespace_enable(&nd_btt->dev, ndns, size);
1660 if (rc)
1661 return rc;
1662
1663 /*
1664 * If this returns < 0, that is ok as it just means there wasn't
1665 * an existing BTT, and we're creating a new one. We still need to
1666 * call this as we need the version dependent fields in nd_btt to be
1667 * set correctly based on the holder class
1668 */
1669 nd_btt_version(nd_btt, ndns, btt_sb);
1670
1671 rawsize = size - nd_btt->initial_offset;
1672 if (rawsize < ARENA_MIN_SIZE) {
1673 dev_dbg(&nd_btt->dev, "%s must be at least %ld bytes\n",
1674 dev_name(&ndns->dev),
1675 ARENA_MIN_SIZE + nd_btt->initial_offset);
1676 return -ENXIO;
1677 }
1678 nd_region = to_nd_region(nd_btt->dev.parent);
1679 btt = btt_init(nd_btt, rawsize, nd_btt->lbasize, nd_btt->uuid,
1680 nd_region);
1681 if (!btt)
1682 return -ENOMEM;
1683 nd_btt->btt = btt;
1684
1685 return 0;
1686}
1687EXPORT_SYMBOL(nvdimm_namespace_attach_btt);
1688
1689int nvdimm_namespace_detach_btt(struct nd_btt *nd_btt)
1690{
1691 struct btt *btt = nd_btt->btt;
1692
1693 btt_fini(btt);
1694 nd_btt->btt = NULL;
1695
1696 return 0;
1697}
1698EXPORT_SYMBOL(nvdimm_namespace_detach_btt);
1699
1700static int __init nd_btt_init(void)
1701{
1702 int rc = 0;
1703
1704 debugfs_root = debugfs_create_dir("btt", NULL);
1705 if (IS_ERR_OR_NULL(debugfs_root))
1706 rc = -ENXIO;
1707
1708 return rc;
1709}
1710
1711static void __exit nd_btt_exit(void)
1712{
1713 debugfs_remove_recursive(debugfs_root);
1714}
1715
1716MODULE_ALIAS_ND_DEVICE(ND_DEVICE_BTT);
1717MODULE_AUTHOR("Vishal Verma <vishal.l.verma@linux.intel.com>");
1718MODULE_LICENSE("GPL v2");
1719module_init(nd_btt_init);
1720module_exit(nd_btt_exit);
1/*
2 * Block Translation Table
3 * Copyright (c) 2014-2015, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 */
14#include <linux/highmem.h>
15#include <linux/debugfs.h>
16#include <linux/blkdev.h>
17#include <linux/module.h>
18#include <linux/device.h>
19#include <linux/mutex.h>
20#include <linux/hdreg.h>
21#include <linux/genhd.h>
22#include <linux/sizes.h>
23#include <linux/ndctl.h>
24#include <linux/fs.h>
25#include <linux/nd.h>
26#include <linux/backing-dev.h>
27#include "btt.h"
28#include "nd.h"
29
30enum log_ent_request {
31 LOG_NEW_ENT = 0,
32 LOG_OLD_ENT
33};
34
35static struct device *to_dev(struct arena_info *arena)
36{
37 return &arena->nd_btt->dev;
38}
39
40static u64 adjust_initial_offset(struct nd_btt *nd_btt, u64 offset)
41{
42 return offset + nd_btt->initial_offset;
43}
44
45static int arena_read_bytes(struct arena_info *arena, resource_size_t offset,
46 void *buf, size_t n, unsigned long flags)
47{
48 struct nd_btt *nd_btt = arena->nd_btt;
49 struct nd_namespace_common *ndns = nd_btt->ndns;
50
51 /* arena offsets may be shifted from the base of the device */
52 offset = adjust_initial_offset(nd_btt, offset);
53 return nvdimm_read_bytes(ndns, offset, buf, n, flags);
54}
55
56static int arena_write_bytes(struct arena_info *arena, resource_size_t offset,
57 void *buf, size_t n, unsigned long flags)
58{
59 struct nd_btt *nd_btt = arena->nd_btt;
60 struct nd_namespace_common *ndns = nd_btt->ndns;
61
62 /* arena offsets may be shifted from the base of the device */
63 offset = adjust_initial_offset(nd_btt, offset);
64 return nvdimm_write_bytes(ndns, offset, buf, n, flags);
65}
66
67static int btt_info_write(struct arena_info *arena, struct btt_sb *super)
68{
69 int ret;
70
71 /*
72 * infooff and info2off should always be at least 512B aligned.
73 * We rely on that to make sure rw_bytes does error clearing
74 * correctly, so make sure that is the case.
75 */
76 dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->infooff, 512),
77 "arena->infooff: %#llx is unaligned\n", arena->infooff);
78 dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->info2off, 512),
79 "arena->info2off: %#llx is unaligned\n", arena->info2off);
80
81 ret = arena_write_bytes(arena, arena->info2off, super,
82 sizeof(struct btt_sb), 0);
83 if (ret)
84 return ret;
85
86 return arena_write_bytes(arena, arena->infooff, super,
87 sizeof(struct btt_sb), 0);
88}
89
90static int btt_info_read(struct arena_info *arena, struct btt_sb *super)
91{
92 return arena_read_bytes(arena, arena->infooff, super,
93 sizeof(struct btt_sb), 0);
94}
95
96/*
97 * 'raw' version of btt_map write
98 * Assumptions:
99 * mapping is in little-endian
100 * mapping contains 'E' and 'Z' flags as desired
101 */
102static int __btt_map_write(struct arena_info *arena, u32 lba, __le32 mapping,
103 unsigned long flags)
104{
105 u64 ns_off = arena->mapoff + (lba * MAP_ENT_SIZE);
106
107 if (unlikely(lba >= arena->external_nlba))
108 dev_err_ratelimited(to_dev(arena),
109 "%s: lba %#x out of range (max: %#x)\n",
110 __func__, lba, arena->external_nlba);
111 return arena_write_bytes(arena, ns_off, &mapping, MAP_ENT_SIZE, flags);
112}
113
114static int btt_map_write(struct arena_info *arena, u32 lba, u32 mapping,
115 u32 z_flag, u32 e_flag, unsigned long rwb_flags)
116{
117 u32 ze;
118 __le32 mapping_le;
119
120 /*
121 * This 'mapping' is supposed to be just the LBA mapping, without
122 * any flags set, so strip the flag bits.
123 */
124 mapping = ent_lba(mapping);
125
126 ze = (z_flag << 1) + e_flag;
127 switch (ze) {
128 case 0:
129 /*
130 * We want to set neither of the Z or E flags, and
131 * in the actual layout, this means setting the bit
132 * positions of both to '1' to indicate a 'normal'
133 * map entry
134 */
135 mapping |= MAP_ENT_NORMAL;
136 break;
137 case 1:
138 mapping |= (1 << MAP_ERR_SHIFT);
139 break;
140 case 2:
141 mapping |= (1 << MAP_TRIM_SHIFT);
142 break;
143 default:
144 /*
145 * The case where Z and E are both sent in as '1' could be
146 * construed as a valid 'normal' case, but we decide not to,
147 * to avoid confusion
148 */
149 dev_err_ratelimited(to_dev(arena),
150 "Invalid use of Z and E flags\n");
151 return -EIO;
152 }
153
154 mapping_le = cpu_to_le32(mapping);
155 return __btt_map_write(arena, lba, mapping_le, rwb_flags);
156}
157
158static int btt_map_read(struct arena_info *arena, u32 lba, u32 *mapping,
159 int *trim, int *error, unsigned long rwb_flags)
160{
161 int ret;
162 __le32 in;
163 u32 raw_mapping, postmap, ze, z_flag, e_flag;
164 u64 ns_off = arena->mapoff + (lba * MAP_ENT_SIZE);
165
166 if (unlikely(lba >= arena->external_nlba))
167 dev_err_ratelimited(to_dev(arena),
168 "%s: lba %#x out of range (max: %#x)\n",
169 __func__, lba, arena->external_nlba);
170
171 ret = arena_read_bytes(arena, ns_off, &in, MAP_ENT_SIZE, rwb_flags);
172 if (ret)
173 return ret;
174
175 raw_mapping = le32_to_cpu(in);
176
177 z_flag = ent_z_flag(raw_mapping);
178 e_flag = ent_e_flag(raw_mapping);
179 ze = (z_flag << 1) + e_flag;
180 postmap = ent_lba(raw_mapping);
181
182 /* Reuse the {z,e}_flag variables for *trim and *error */
183 z_flag = 0;
184 e_flag = 0;
185
186 switch (ze) {
187 case 0:
188 /* Initial state. Return postmap = premap */
189 *mapping = lba;
190 break;
191 case 1:
192 *mapping = postmap;
193 e_flag = 1;
194 break;
195 case 2:
196 *mapping = postmap;
197 z_flag = 1;
198 break;
199 case 3:
200 *mapping = postmap;
201 break;
202 default:
203 return -EIO;
204 }
205
206 if (trim)
207 *trim = z_flag;
208 if (error)
209 *error = e_flag;
210
211 return ret;
212}
213
214static int btt_log_group_read(struct arena_info *arena, u32 lane,
215 struct log_group *log)
216{
217 return arena_read_bytes(arena,
218 arena->logoff + (lane * LOG_GRP_SIZE), log,
219 LOG_GRP_SIZE, 0);
220}
221
222static struct dentry *debugfs_root;
223
224static void arena_debugfs_init(struct arena_info *a, struct dentry *parent,
225 int idx)
226{
227 char dirname[32];
228 struct dentry *d;
229
230 /* If for some reason, parent bttN was not created, exit */
231 if (!parent)
232 return;
233
234 snprintf(dirname, 32, "arena%d", idx);
235 d = debugfs_create_dir(dirname, parent);
236 if (IS_ERR_OR_NULL(d))
237 return;
238 a->debugfs_dir = d;
239
240 debugfs_create_x64("size", S_IRUGO, d, &a->size);
241 debugfs_create_x64("external_lba_start", S_IRUGO, d,
242 &a->external_lba_start);
243 debugfs_create_x32("internal_nlba", S_IRUGO, d, &a->internal_nlba);
244 debugfs_create_u32("internal_lbasize", S_IRUGO, d,
245 &a->internal_lbasize);
246 debugfs_create_x32("external_nlba", S_IRUGO, d, &a->external_nlba);
247 debugfs_create_u32("external_lbasize", S_IRUGO, d,
248 &a->external_lbasize);
249 debugfs_create_u32("nfree", S_IRUGO, d, &a->nfree);
250 debugfs_create_u16("version_major", S_IRUGO, d, &a->version_major);
251 debugfs_create_u16("version_minor", S_IRUGO, d, &a->version_minor);
252 debugfs_create_x64("nextoff", S_IRUGO, d, &a->nextoff);
253 debugfs_create_x64("infooff", S_IRUGO, d, &a->infooff);
254 debugfs_create_x64("dataoff", S_IRUGO, d, &a->dataoff);
255 debugfs_create_x64("mapoff", S_IRUGO, d, &a->mapoff);
256 debugfs_create_x64("logoff", S_IRUGO, d, &a->logoff);
257 debugfs_create_x64("info2off", S_IRUGO, d, &a->info2off);
258 debugfs_create_x32("flags", S_IRUGO, d, &a->flags);
259 debugfs_create_u32("log_index_0", S_IRUGO, d, &a->log_index[0]);
260 debugfs_create_u32("log_index_1", S_IRUGO, d, &a->log_index[1]);
261}
262
263static void btt_debugfs_init(struct btt *btt)
264{
265 int i = 0;
266 struct arena_info *arena;
267
268 btt->debugfs_dir = debugfs_create_dir(dev_name(&btt->nd_btt->dev),
269 debugfs_root);
270 if (IS_ERR_OR_NULL(btt->debugfs_dir))
271 return;
272
273 list_for_each_entry(arena, &btt->arena_list, list) {
274 arena_debugfs_init(arena, btt->debugfs_dir, i);
275 i++;
276 }
277}
278
279static u32 log_seq(struct log_group *log, int log_idx)
280{
281 return le32_to_cpu(log->ent[log_idx].seq);
282}
283
284/*
285 * This function accepts two log entries, and uses the
286 * sequence number to find the 'older' entry.
287 * It also updates the sequence number in this old entry to
288 * make it the 'new' one if the mark_flag is set.
289 * Finally, it returns which of the entries was the older one.
290 *
291 * TODO The logic feels a bit kludge-y. make it better..
292 */
293static int btt_log_get_old(struct arena_info *a, struct log_group *log)
294{
295 int idx0 = a->log_index[0];
296 int idx1 = a->log_index[1];
297 int old;
298
299 /*
300 * the first ever time this is seen, the entry goes into [0]
301 * the next time, the following logic works out to put this
302 * (next) entry into [1]
303 */
304 if (log_seq(log, idx0) == 0) {
305 log->ent[idx0].seq = cpu_to_le32(1);
306 return 0;
307 }
308
309 if (log_seq(log, idx0) == log_seq(log, idx1))
310 return -EINVAL;
311 if (log_seq(log, idx0) + log_seq(log, idx1) > 5)
312 return -EINVAL;
313
314 if (log_seq(log, idx0) < log_seq(log, idx1)) {
315 if ((log_seq(log, idx1) - log_seq(log, idx0)) == 1)
316 old = 0;
317 else
318 old = 1;
319 } else {
320 if ((log_seq(log, idx0) - log_seq(log, idx1)) == 1)
321 old = 1;
322 else
323 old = 0;
324 }
325
326 return old;
327}
328
329/*
330 * This function copies the desired (old/new) log entry into ent if
331 * it is not NULL. It returns the sub-slot number (0 or 1)
332 * where the desired log entry was found. Negative return values
333 * indicate errors.
334 */
335static int btt_log_read(struct arena_info *arena, u32 lane,
336 struct log_entry *ent, int old_flag)
337{
338 int ret;
339 int old_ent, ret_ent;
340 struct log_group log;
341
342 ret = btt_log_group_read(arena, lane, &log);
343 if (ret)
344 return -EIO;
345
346 old_ent = btt_log_get_old(arena, &log);
347 if (old_ent < 0 || old_ent > 1) {
348 dev_err(to_dev(arena),
349 "log corruption (%d): lane %d seq [%d, %d]\n",
350 old_ent, lane, log.ent[arena->log_index[0]].seq,
351 log.ent[arena->log_index[1]].seq);
352 /* TODO set error state? */
353 return -EIO;
354 }
355
356 ret_ent = (old_flag ? old_ent : (1 - old_ent));
357
358 if (ent != NULL)
359 memcpy(ent, &log.ent[arena->log_index[ret_ent]], LOG_ENT_SIZE);
360
361 return ret_ent;
362}
363
364/*
365 * This function commits a log entry to media
366 * It does _not_ prepare the freelist entry for the next write
367 * btt_flog_write is the wrapper for updating the freelist elements
368 */
369static int __btt_log_write(struct arena_info *arena, u32 lane,
370 u32 sub, struct log_entry *ent, unsigned long flags)
371{
372 int ret;
373 u32 group_slot = arena->log_index[sub];
374 unsigned int log_half = LOG_ENT_SIZE / 2;
375 void *src = ent;
376 u64 ns_off;
377
378 ns_off = arena->logoff + (lane * LOG_GRP_SIZE) +
379 (group_slot * LOG_ENT_SIZE);
380 /* split the 16B write into atomic, durable halves */
381 ret = arena_write_bytes(arena, ns_off, src, log_half, flags);
382 if (ret)
383 return ret;
384
385 ns_off += log_half;
386 src += log_half;
387 return arena_write_bytes(arena, ns_off, src, log_half, flags);
388}
389
390static int btt_flog_write(struct arena_info *arena, u32 lane, u32 sub,
391 struct log_entry *ent)
392{
393 int ret;
394
395 ret = __btt_log_write(arena, lane, sub, ent, NVDIMM_IO_ATOMIC);
396 if (ret)
397 return ret;
398
399 /* prepare the next free entry */
400 arena->freelist[lane].sub = 1 - arena->freelist[lane].sub;
401 if (++(arena->freelist[lane].seq) == 4)
402 arena->freelist[lane].seq = 1;
403 if (ent_e_flag(ent->old_map))
404 arena->freelist[lane].has_err = 1;
405 arena->freelist[lane].block = le32_to_cpu(ent_lba(ent->old_map));
406
407 return ret;
408}
409
410/*
411 * This function initializes the BTT map to the initial state, which is
412 * all-zeroes, and indicates an identity mapping
413 */
414static int btt_map_init(struct arena_info *arena)
415{
416 int ret = -EINVAL;
417 void *zerobuf;
418 size_t offset = 0;
419 size_t chunk_size = SZ_2M;
420 size_t mapsize = arena->logoff - arena->mapoff;
421
422 zerobuf = kzalloc(chunk_size, GFP_KERNEL);
423 if (!zerobuf)
424 return -ENOMEM;
425
426 /*
427 * mapoff should always be at least 512B aligned. We rely on that to
428 * make sure rw_bytes does error clearing correctly, so make sure that
429 * is the case.
430 */
431 dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->mapoff, 512),
432 "arena->mapoff: %#llx is unaligned\n", arena->mapoff);
433
434 while (mapsize) {
435 size_t size = min(mapsize, chunk_size);
436
437 dev_WARN_ONCE(to_dev(arena), size < 512,
438 "chunk size: %#zx is unaligned\n", size);
439 ret = arena_write_bytes(arena, arena->mapoff + offset, zerobuf,
440 size, 0);
441 if (ret)
442 goto free;
443
444 offset += size;
445 mapsize -= size;
446 cond_resched();
447 }
448
449 free:
450 kfree(zerobuf);
451 return ret;
452}
453
454/*
455 * This function initializes the BTT log with 'fake' entries pointing
456 * to the initial reserved set of blocks as being free
457 */
458static int btt_log_init(struct arena_info *arena)
459{
460 size_t logsize = arena->info2off - arena->logoff;
461 size_t chunk_size = SZ_4K, offset = 0;
462 struct log_entry ent;
463 void *zerobuf;
464 int ret;
465 u32 i;
466
467 zerobuf = kzalloc(chunk_size, GFP_KERNEL);
468 if (!zerobuf)
469 return -ENOMEM;
470 /*
471 * logoff should always be at least 512B aligned. We rely on that to
472 * make sure rw_bytes does error clearing correctly, so make sure that
473 * is the case.
474 */
475 dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->logoff, 512),
476 "arena->logoff: %#llx is unaligned\n", arena->logoff);
477
478 while (logsize) {
479 size_t size = min(logsize, chunk_size);
480
481 dev_WARN_ONCE(to_dev(arena), size < 512,
482 "chunk size: %#zx is unaligned\n", size);
483 ret = arena_write_bytes(arena, arena->logoff + offset, zerobuf,
484 size, 0);
485 if (ret)
486 goto free;
487
488 offset += size;
489 logsize -= size;
490 cond_resched();
491 }
492
493 for (i = 0; i < arena->nfree; i++) {
494 ent.lba = cpu_to_le32(i);
495 ent.old_map = cpu_to_le32(arena->external_nlba + i);
496 ent.new_map = cpu_to_le32(arena->external_nlba + i);
497 ent.seq = cpu_to_le32(LOG_SEQ_INIT);
498 ret = __btt_log_write(arena, i, 0, &ent, 0);
499 if (ret)
500 goto free;
501 }
502
503 free:
504 kfree(zerobuf);
505 return ret;
506}
507
508static u64 to_namespace_offset(struct arena_info *arena, u64 lba)
509{
510 return arena->dataoff + ((u64)lba * arena->internal_lbasize);
511}
512
513static int arena_clear_freelist_error(struct arena_info *arena, u32 lane)
514{
515 int ret = 0;
516
517 if (arena->freelist[lane].has_err) {
518 void *zero_page = page_address(ZERO_PAGE(0));
519 u32 lba = arena->freelist[lane].block;
520 u64 nsoff = to_namespace_offset(arena, lba);
521 unsigned long len = arena->sector_size;
522
523 mutex_lock(&arena->err_lock);
524
525 while (len) {
526 unsigned long chunk = min(len, PAGE_SIZE);
527
528 ret = arena_write_bytes(arena, nsoff, zero_page,
529 chunk, 0);
530 if (ret)
531 break;
532 len -= chunk;
533 nsoff += chunk;
534 if (len == 0)
535 arena->freelist[lane].has_err = 0;
536 }
537 mutex_unlock(&arena->err_lock);
538 }
539 return ret;
540}
541
542static int btt_freelist_init(struct arena_info *arena)
543{
544 int old, new, ret;
545 u32 i, map_entry;
546 struct log_entry log_new, log_old;
547
548 arena->freelist = kcalloc(arena->nfree, sizeof(struct free_entry),
549 GFP_KERNEL);
550 if (!arena->freelist)
551 return -ENOMEM;
552
553 for (i = 0; i < arena->nfree; i++) {
554 old = btt_log_read(arena, i, &log_old, LOG_OLD_ENT);
555 if (old < 0)
556 return old;
557
558 new = btt_log_read(arena, i, &log_new, LOG_NEW_ENT);
559 if (new < 0)
560 return new;
561
562 /* sub points to the next one to be overwritten */
563 arena->freelist[i].sub = 1 - new;
564 arena->freelist[i].seq = nd_inc_seq(le32_to_cpu(log_new.seq));
565 arena->freelist[i].block = le32_to_cpu(log_new.old_map);
566
567 /*
568 * FIXME: if error clearing fails during init, we want to make
569 * the BTT read-only
570 */
571 if (ent_e_flag(log_new.old_map)) {
572 ret = arena_clear_freelist_error(arena, i);
573 if (ret)
574 dev_err_ratelimited(to_dev(arena),
575 "Unable to clear known errors\n");
576 }
577
578 /* This implies a newly created or untouched flog entry */
579 if (log_new.old_map == log_new.new_map)
580 continue;
581
582 /* Check if map recovery is needed */
583 ret = btt_map_read(arena, le32_to_cpu(log_new.lba), &map_entry,
584 NULL, NULL, 0);
585 if (ret)
586 return ret;
587 if ((le32_to_cpu(log_new.new_map) != map_entry) &&
588 (le32_to_cpu(log_new.old_map) == map_entry)) {
589 /*
590 * Last transaction wrote the flog, but wasn't able
591 * to complete the map write. So fix up the map.
592 */
593 ret = btt_map_write(arena, le32_to_cpu(log_new.lba),
594 le32_to_cpu(log_new.new_map), 0, 0, 0);
595 if (ret)
596 return ret;
597 }
598 }
599
600 return 0;
601}
602
603static bool ent_is_padding(struct log_entry *ent)
604{
605 return (ent->lba == 0) && (ent->old_map == 0) && (ent->new_map == 0)
606 && (ent->seq == 0);
607}
608
609/*
610 * Detecting valid log indices: We read a log group (see the comments in btt.h
611 * for a description of a 'log_group' and its 'slots'), and iterate over its
612 * four slots. We expect that a padding slot will be all-zeroes, and use this
613 * to detect a padding slot vs. an actual entry.
614 *
615 * If a log_group is in the initial state, i.e. hasn't been used since the
616 * creation of this BTT layout, it will have three of the four slots with
617 * zeroes. We skip over these log_groups for the detection of log_index. If
618 * all log_groups are in the initial state (i.e. the BTT has never been
619 * written to), it is safe to assume the 'new format' of log entries in slots
620 * (0, 1).
621 */
622static int log_set_indices(struct arena_info *arena)
623{
624 bool idx_set = false, initial_state = true;
625 int ret, log_index[2] = {-1, -1};
626 u32 i, j, next_idx = 0;
627 struct log_group log;
628 u32 pad_count = 0;
629
630 for (i = 0; i < arena->nfree; i++) {
631 ret = btt_log_group_read(arena, i, &log);
632 if (ret < 0)
633 return ret;
634
635 for (j = 0; j < 4; j++) {
636 if (!idx_set) {
637 if (ent_is_padding(&log.ent[j])) {
638 pad_count++;
639 continue;
640 } else {
641 /* Skip if index has been recorded */
642 if ((next_idx == 1) &&
643 (j == log_index[0]))
644 continue;
645 /* valid entry, record index */
646 log_index[next_idx] = j;
647 next_idx++;
648 }
649 if (next_idx == 2) {
650 /* two valid entries found */
651 idx_set = true;
652 } else if (next_idx > 2) {
653 /* too many valid indices */
654 return -ENXIO;
655 }
656 } else {
657 /*
658 * once the indices have been set, just verify
659 * that all subsequent log groups are either in
660 * their initial state or follow the same
661 * indices.
662 */
663 if (j == log_index[0]) {
664 /* entry must be 'valid' */
665 if (ent_is_padding(&log.ent[j]))
666 return -ENXIO;
667 } else if (j == log_index[1]) {
668 ;
669 /*
670 * log_index[1] can be padding if the
671 * lane never got used and it is still
672 * in the initial state (three 'padding'
673 * entries)
674 */
675 } else {
676 /* entry must be invalid (padding) */
677 if (!ent_is_padding(&log.ent[j]))
678 return -ENXIO;
679 }
680 }
681 }
682 /*
683 * If any of the log_groups have more than one valid,
684 * non-padding entry, then the we are no longer in the
685 * initial_state
686 */
687 if (pad_count < 3)
688 initial_state = false;
689 pad_count = 0;
690 }
691
692 if (!initial_state && !idx_set)
693 return -ENXIO;
694
695 /*
696 * If all the entries in the log were in the initial state,
697 * assume new padding scheme
698 */
699 if (initial_state)
700 log_index[1] = 1;
701
702 /*
703 * Only allow the known permutations of log/padding indices,
704 * i.e. (0, 1), and (0, 2)
705 */
706 if ((log_index[0] == 0) && ((log_index[1] == 1) || (log_index[1] == 2)))
707 ; /* known index possibilities */
708 else {
709 dev_err(to_dev(arena), "Found an unknown padding scheme\n");
710 return -ENXIO;
711 }
712
713 arena->log_index[0] = log_index[0];
714 arena->log_index[1] = log_index[1];
715 dev_dbg(to_dev(arena), "log_index_0 = %d\n", log_index[0]);
716 dev_dbg(to_dev(arena), "log_index_1 = %d\n", log_index[1]);
717 return 0;
718}
719
720static int btt_rtt_init(struct arena_info *arena)
721{
722 arena->rtt = kcalloc(arena->nfree, sizeof(u32), GFP_KERNEL);
723 if (arena->rtt == NULL)
724 return -ENOMEM;
725
726 return 0;
727}
728
729static int btt_maplocks_init(struct arena_info *arena)
730{
731 u32 i;
732
733 arena->map_locks = kcalloc(arena->nfree, sizeof(struct aligned_lock),
734 GFP_KERNEL);
735 if (!arena->map_locks)
736 return -ENOMEM;
737
738 for (i = 0; i < arena->nfree; i++)
739 spin_lock_init(&arena->map_locks[i].lock);
740
741 return 0;
742}
743
744static struct arena_info *alloc_arena(struct btt *btt, size_t size,
745 size_t start, size_t arena_off)
746{
747 struct arena_info *arena;
748 u64 logsize, mapsize, datasize;
749 u64 available = size;
750
751 arena = kzalloc(sizeof(struct arena_info), GFP_KERNEL);
752 if (!arena)
753 return NULL;
754 arena->nd_btt = btt->nd_btt;
755 arena->sector_size = btt->sector_size;
756 mutex_init(&arena->err_lock);
757
758 if (!size)
759 return arena;
760
761 arena->size = size;
762 arena->external_lba_start = start;
763 arena->external_lbasize = btt->lbasize;
764 arena->internal_lbasize = roundup(arena->external_lbasize,
765 INT_LBASIZE_ALIGNMENT);
766 arena->nfree = BTT_DEFAULT_NFREE;
767 arena->version_major = btt->nd_btt->version_major;
768 arena->version_minor = btt->nd_btt->version_minor;
769
770 if (available % BTT_PG_SIZE)
771 available -= (available % BTT_PG_SIZE);
772
773 /* Two pages are reserved for the super block and its copy */
774 available -= 2 * BTT_PG_SIZE;
775
776 /* The log takes a fixed amount of space based on nfree */
777 logsize = roundup(arena->nfree * LOG_GRP_SIZE, BTT_PG_SIZE);
778 available -= logsize;
779
780 /* Calculate optimal split between map and data area */
781 arena->internal_nlba = div_u64(available - BTT_PG_SIZE,
782 arena->internal_lbasize + MAP_ENT_SIZE);
783 arena->external_nlba = arena->internal_nlba - arena->nfree;
784
785 mapsize = roundup((arena->external_nlba * MAP_ENT_SIZE), BTT_PG_SIZE);
786 datasize = available - mapsize;
787
788 /* 'Absolute' values, relative to start of storage space */
789 arena->infooff = arena_off;
790 arena->dataoff = arena->infooff + BTT_PG_SIZE;
791 arena->mapoff = arena->dataoff + datasize;
792 arena->logoff = arena->mapoff + mapsize;
793 arena->info2off = arena->logoff + logsize;
794
795 /* Default log indices are (0,1) */
796 arena->log_index[0] = 0;
797 arena->log_index[1] = 1;
798 return arena;
799}
800
801static void free_arenas(struct btt *btt)
802{
803 struct arena_info *arena, *next;
804
805 list_for_each_entry_safe(arena, next, &btt->arena_list, list) {
806 list_del(&arena->list);
807 kfree(arena->rtt);
808 kfree(arena->map_locks);
809 kfree(arena->freelist);
810 debugfs_remove_recursive(arena->debugfs_dir);
811 kfree(arena);
812 }
813}
814
815/*
816 * This function reads an existing valid btt superblock and
817 * populates the corresponding arena_info struct
818 */
819static void parse_arena_meta(struct arena_info *arena, struct btt_sb *super,
820 u64 arena_off)
821{
822 arena->internal_nlba = le32_to_cpu(super->internal_nlba);
823 arena->internal_lbasize = le32_to_cpu(super->internal_lbasize);
824 arena->external_nlba = le32_to_cpu(super->external_nlba);
825 arena->external_lbasize = le32_to_cpu(super->external_lbasize);
826 arena->nfree = le32_to_cpu(super->nfree);
827 arena->version_major = le16_to_cpu(super->version_major);
828 arena->version_minor = le16_to_cpu(super->version_minor);
829
830 arena->nextoff = (super->nextoff == 0) ? 0 : (arena_off +
831 le64_to_cpu(super->nextoff));
832 arena->infooff = arena_off;
833 arena->dataoff = arena_off + le64_to_cpu(super->dataoff);
834 arena->mapoff = arena_off + le64_to_cpu(super->mapoff);
835 arena->logoff = arena_off + le64_to_cpu(super->logoff);
836 arena->info2off = arena_off + le64_to_cpu(super->info2off);
837
838 arena->size = (le64_to_cpu(super->nextoff) > 0)
839 ? (le64_to_cpu(super->nextoff))
840 : (arena->info2off - arena->infooff + BTT_PG_SIZE);
841
842 arena->flags = le32_to_cpu(super->flags);
843}
844
845static int discover_arenas(struct btt *btt)
846{
847 int ret = 0;
848 struct arena_info *arena;
849 struct btt_sb *super;
850 size_t remaining = btt->rawsize;
851 u64 cur_nlba = 0;
852 size_t cur_off = 0;
853 int num_arenas = 0;
854
855 super = kzalloc(sizeof(*super), GFP_KERNEL);
856 if (!super)
857 return -ENOMEM;
858
859 while (remaining) {
860 /* Alloc memory for arena */
861 arena = alloc_arena(btt, 0, 0, 0);
862 if (!arena) {
863 ret = -ENOMEM;
864 goto out_super;
865 }
866
867 arena->infooff = cur_off;
868 ret = btt_info_read(arena, super);
869 if (ret)
870 goto out;
871
872 if (!nd_btt_arena_is_valid(btt->nd_btt, super)) {
873 if (remaining == btt->rawsize) {
874 btt->init_state = INIT_NOTFOUND;
875 dev_info(to_dev(arena), "No existing arenas\n");
876 goto out;
877 } else {
878 dev_err(to_dev(arena),
879 "Found corrupted metadata!\n");
880 ret = -ENODEV;
881 goto out;
882 }
883 }
884
885 arena->external_lba_start = cur_nlba;
886 parse_arena_meta(arena, super, cur_off);
887
888 ret = log_set_indices(arena);
889 if (ret) {
890 dev_err(to_dev(arena),
891 "Unable to deduce log/padding indices\n");
892 goto out;
893 }
894
895 ret = btt_freelist_init(arena);
896 if (ret)
897 goto out;
898
899 ret = btt_rtt_init(arena);
900 if (ret)
901 goto out;
902
903 ret = btt_maplocks_init(arena);
904 if (ret)
905 goto out;
906
907 list_add_tail(&arena->list, &btt->arena_list);
908
909 remaining -= arena->size;
910 cur_off += arena->size;
911 cur_nlba += arena->external_nlba;
912 num_arenas++;
913
914 if (arena->nextoff == 0)
915 break;
916 }
917 btt->num_arenas = num_arenas;
918 btt->nlba = cur_nlba;
919 btt->init_state = INIT_READY;
920
921 kfree(super);
922 return ret;
923
924 out:
925 kfree(arena);
926 free_arenas(btt);
927 out_super:
928 kfree(super);
929 return ret;
930}
931
932static int create_arenas(struct btt *btt)
933{
934 size_t remaining = btt->rawsize;
935 size_t cur_off = 0;
936
937 while (remaining) {
938 struct arena_info *arena;
939 size_t arena_size = min_t(u64, ARENA_MAX_SIZE, remaining);
940
941 remaining -= arena_size;
942 if (arena_size < ARENA_MIN_SIZE)
943 break;
944
945 arena = alloc_arena(btt, arena_size, btt->nlba, cur_off);
946 if (!arena) {
947 free_arenas(btt);
948 return -ENOMEM;
949 }
950 btt->nlba += arena->external_nlba;
951 if (remaining >= ARENA_MIN_SIZE)
952 arena->nextoff = arena->size;
953 else
954 arena->nextoff = 0;
955 cur_off += arena_size;
956 list_add_tail(&arena->list, &btt->arena_list);
957 }
958
959 return 0;
960}
961
962/*
963 * This function completes arena initialization by writing
964 * all the metadata.
965 * It is only called for an uninitialized arena when a write
966 * to that arena occurs for the first time.
967 */
968static int btt_arena_write_layout(struct arena_info *arena)
969{
970 int ret;
971 u64 sum;
972 struct btt_sb *super;
973 struct nd_btt *nd_btt = arena->nd_btt;
974 const u8 *parent_uuid = nd_dev_to_uuid(&nd_btt->ndns->dev);
975
976 ret = btt_map_init(arena);
977 if (ret)
978 return ret;
979
980 ret = btt_log_init(arena);
981 if (ret)
982 return ret;
983
984 super = kzalloc(sizeof(struct btt_sb), GFP_NOIO);
985 if (!super)
986 return -ENOMEM;
987
988 strncpy(super->signature, BTT_SIG, BTT_SIG_LEN);
989 memcpy(super->uuid, nd_btt->uuid, 16);
990 memcpy(super->parent_uuid, parent_uuid, 16);
991 super->flags = cpu_to_le32(arena->flags);
992 super->version_major = cpu_to_le16(arena->version_major);
993 super->version_minor = cpu_to_le16(arena->version_minor);
994 super->external_lbasize = cpu_to_le32(arena->external_lbasize);
995 super->external_nlba = cpu_to_le32(arena->external_nlba);
996 super->internal_lbasize = cpu_to_le32(arena->internal_lbasize);
997 super->internal_nlba = cpu_to_le32(arena->internal_nlba);
998 super->nfree = cpu_to_le32(arena->nfree);
999 super->infosize = cpu_to_le32(sizeof(struct btt_sb));
1000 super->nextoff = cpu_to_le64(arena->nextoff);
1001 /*
1002 * Subtract arena->infooff (arena start) so numbers are relative
1003 * to 'this' arena
1004 */
1005 super->dataoff = cpu_to_le64(arena->dataoff - arena->infooff);
1006 super->mapoff = cpu_to_le64(arena->mapoff - arena->infooff);
1007 super->logoff = cpu_to_le64(arena->logoff - arena->infooff);
1008 super->info2off = cpu_to_le64(arena->info2off - arena->infooff);
1009
1010 super->flags = 0;
1011 sum = nd_sb_checksum((struct nd_gen_sb *) super);
1012 super->checksum = cpu_to_le64(sum);
1013
1014 ret = btt_info_write(arena, super);
1015
1016 kfree(super);
1017 return ret;
1018}
1019
1020/*
1021 * This function completes the initialization for the BTT namespace
1022 * such that it is ready to accept IOs
1023 */
1024static int btt_meta_init(struct btt *btt)
1025{
1026 int ret = 0;
1027 struct arena_info *arena;
1028
1029 mutex_lock(&btt->init_lock);
1030 list_for_each_entry(arena, &btt->arena_list, list) {
1031 ret = btt_arena_write_layout(arena);
1032 if (ret)
1033 goto unlock;
1034
1035 ret = btt_freelist_init(arena);
1036 if (ret)
1037 goto unlock;
1038
1039 ret = btt_rtt_init(arena);
1040 if (ret)
1041 goto unlock;
1042
1043 ret = btt_maplocks_init(arena);
1044 if (ret)
1045 goto unlock;
1046 }
1047
1048 btt->init_state = INIT_READY;
1049
1050 unlock:
1051 mutex_unlock(&btt->init_lock);
1052 return ret;
1053}
1054
1055static u32 btt_meta_size(struct btt *btt)
1056{
1057 return btt->lbasize - btt->sector_size;
1058}
1059
1060/*
1061 * This function calculates the arena in which the given LBA lies
1062 * by doing a linear walk. This is acceptable since we expect only
1063 * a few arenas. If we have backing devices that get much larger,
1064 * we can construct a balanced binary tree of arenas at init time
1065 * so that this range search becomes faster.
1066 */
1067static int lba_to_arena(struct btt *btt, sector_t sector, __u32 *premap,
1068 struct arena_info **arena)
1069{
1070 struct arena_info *arena_list;
1071 __u64 lba = div_u64(sector << SECTOR_SHIFT, btt->sector_size);
1072
1073 list_for_each_entry(arena_list, &btt->arena_list, list) {
1074 if (lba < arena_list->external_nlba) {
1075 *arena = arena_list;
1076 *premap = lba;
1077 return 0;
1078 }
1079 lba -= arena_list->external_nlba;
1080 }
1081
1082 return -EIO;
1083}
1084
1085/*
1086 * The following (lock_map, unlock_map) are mostly just to improve
1087 * readability, since they index into an array of locks
1088 */
1089static void lock_map(struct arena_info *arena, u32 premap)
1090 __acquires(&arena->map_locks[idx].lock)
1091{
1092 u32 idx = (premap * MAP_ENT_SIZE / L1_CACHE_BYTES) % arena->nfree;
1093
1094 spin_lock(&arena->map_locks[idx].lock);
1095}
1096
1097static void unlock_map(struct arena_info *arena, u32 premap)
1098 __releases(&arena->map_locks[idx].lock)
1099{
1100 u32 idx = (premap * MAP_ENT_SIZE / L1_CACHE_BYTES) % arena->nfree;
1101
1102 spin_unlock(&arena->map_locks[idx].lock);
1103}
1104
1105static int btt_data_read(struct arena_info *arena, struct page *page,
1106 unsigned int off, u32 lba, u32 len)
1107{
1108 int ret;
1109 u64 nsoff = to_namespace_offset(arena, lba);
1110 void *mem = kmap_atomic(page);
1111
1112 ret = arena_read_bytes(arena, nsoff, mem + off, len, NVDIMM_IO_ATOMIC);
1113 kunmap_atomic(mem);
1114
1115 return ret;
1116}
1117
1118static int btt_data_write(struct arena_info *arena, u32 lba,
1119 struct page *page, unsigned int off, u32 len)
1120{
1121 int ret;
1122 u64 nsoff = to_namespace_offset(arena, lba);
1123 void *mem = kmap_atomic(page);
1124
1125 ret = arena_write_bytes(arena, nsoff, mem + off, len, NVDIMM_IO_ATOMIC);
1126 kunmap_atomic(mem);
1127
1128 return ret;
1129}
1130
1131static void zero_fill_data(struct page *page, unsigned int off, u32 len)
1132{
1133 void *mem = kmap_atomic(page);
1134
1135 memset(mem + off, 0, len);
1136 kunmap_atomic(mem);
1137}
1138
1139#ifdef CONFIG_BLK_DEV_INTEGRITY
1140static int btt_rw_integrity(struct btt *btt, struct bio_integrity_payload *bip,
1141 struct arena_info *arena, u32 postmap, int rw)
1142{
1143 unsigned int len = btt_meta_size(btt);
1144 u64 meta_nsoff;
1145 int ret = 0;
1146
1147 if (bip == NULL)
1148 return 0;
1149
1150 meta_nsoff = to_namespace_offset(arena, postmap) + btt->sector_size;
1151
1152 while (len) {
1153 unsigned int cur_len;
1154 struct bio_vec bv;
1155 void *mem;
1156
1157 bv = bvec_iter_bvec(bip->bip_vec, bip->bip_iter);
1158 /*
1159 * The 'bv' obtained from bvec_iter_bvec has its .bv_len and
1160 * .bv_offset already adjusted for iter->bi_bvec_done, and we
1161 * can use those directly
1162 */
1163
1164 cur_len = min(len, bv.bv_len);
1165 mem = kmap_atomic(bv.bv_page);
1166 if (rw)
1167 ret = arena_write_bytes(arena, meta_nsoff,
1168 mem + bv.bv_offset, cur_len,
1169 NVDIMM_IO_ATOMIC);
1170 else
1171 ret = arena_read_bytes(arena, meta_nsoff,
1172 mem + bv.bv_offset, cur_len,
1173 NVDIMM_IO_ATOMIC);
1174
1175 kunmap_atomic(mem);
1176 if (ret)
1177 return ret;
1178
1179 len -= cur_len;
1180 meta_nsoff += cur_len;
1181 if (!bvec_iter_advance(bip->bip_vec, &bip->bip_iter, cur_len))
1182 return -EIO;
1183 }
1184
1185 return ret;
1186}
1187
1188#else /* CONFIG_BLK_DEV_INTEGRITY */
1189static int btt_rw_integrity(struct btt *btt, struct bio_integrity_payload *bip,
1190 struct arena_info *arena, u32 postmap, int rw)
1191{
1192 return 0;
1193}
1194#endif
1195
1196static int btt_read_pg(struct btt *btt, struct bio_integrity_payload *bip,
1197 struct page *page, unsigned int off, sector_t sector,
1198 unsigned int len)
1199{
1200 int ret = 0;
1201 int t_flag, e_flag;
1202 struct arena_info *arena = NULL;
1203 u32 lane = 0, premap, postmap;
1204
1205 while (len) {
1206 u32 cur_len;
1207
1208 lane = nd_region_acquire_lane(btt->nd_region);
1209
1210 ret = lba_to_arena(btt, sector, &premap, &arena);
1211 if (ret)
1212 goto out_lane;
1213
1214 cur_len = min(btt->sector_size, len);
1215
1216 ret = btt_map_read(arena, premap, &postmap, &t_flag, &e_flag,
1217 NVDIMM_IO_ATOMIC);
1218 if (ret)
1219 goto out_lane;
1220
1221 /*
1222 * We loop to make sure that the post map LBA didn't change
1223 * from under us between writing the RTT and doing the actual
1224 * read.
1225 */
1226 while (1) {
1227 u32 new_map;
1228 int new_t, new_e;
1229
1230 if (t_flag) {
1231 zero_fill_data(page, off, cur_len);
1232 goto out_lane;
1233 }
1234
1235 if (e_flag) {
1236 ret = -EIO;
1237 goto out_lane;
1238 }
1239
1240 arena->rtt[lane] = RTT_VALID | postmap;
1241 /*
1242 * Barrier to make sure this write is not reordered
1243 * to do the verification map_read before the RTT store
1244 */
1245 barrier();
1246
1247 ret = btt_map_read(arena, premap, &new_map, &new_t,
1248 &new_e, NVDIMM_IO_ATOMIC);
1249 if (ret)
1250 goto out_rtt;
1251
1252 if ((postmap == new_map) && (t_flag == new_t) &&
1253 (e_flag == new_e))
1254 break;
1255
1256 postmap = new_map;
1257 t_flag = new_t;
1258 e_flag = new_e;
1259 }
1260
1261 ret = btt_data_read(arena, page, off, postmap, cur_len);
1262 if (ret) {
1263 int rc;
1264
1265 /* Media error - set the e_flag */
1266 rc = btt_map_write(arena, premap, postmap, 0, 1,
1267 NVDIMM_IO_ATOMIC);
1268 goto out_rtt;
1269 }
1270
1271 if (bip) {
1272 ret = btt_rw_integrity(btt, bip, arena, postmap, READ);
1273 if (ret)
1274 goto out_rtt;
1275 }
1276
1277 arena->rtt[lane] = RTT_INVALID;
1278 nd_region_release_lane(btt->nd_region, lane);
1279
1280 len -= cur_len;
1281 off += cur_len;
1282 sector += btt->sector_size >> SECTOR_SHIFT;
1283 }
1284
1285 return 0;
1286
1287 out_rtt:
1288 arena->rtt[lane] = RTT_INVALID;
1289 out_lane:
1290 nd_region_release_lane(btt->nd_region, lane);
1291 return ret;
1292}
1293
1294/*
1295 * Normally, arena_{read,write}_bytes will take care of the initial offset
1296 * adjustment, but in the case of btt_is_badblock, where we query is_bad_pmem,
1297 * we need the final, raw namespace offset here
1298 */
1299static bool btt_is_badblock(struct btt *btt, struct arena_info *arena,
1300 u32 postmap)
1301{
1302 u64 nsoff = adjust_initial_offset(arena->nd_btt,
1303 to_namespace_offset(arena, postmap));
1304 sector_t phys_sector = nsoff >> 9;
1305
1306 return is_bad_pmem(btt->phys_bb, phys_sector, arena->internal_lbasize);
1307}
1308
1309static int btt_write_pg(struct btt *btt, struct bio_integrity_payload *bip,
1310 sector_t sector, struct page *page, unsigned int off,
1311 unsigned int len)
1312{
1313 int ret = 0;
1314 struct arena_info *arena = NULL;
1315 u32 premap = 0, old_postmap, new_postmap, lane = 0, i;
1316 struct log_entry log;
1317 int sub;
1318
1319 while (len) {
1320 u32 cur_len;
1321 int e_flag;
1322
1323 retry:
1324 lane = nd_region_acquire_lane(btt->nd_region);
1325
1326 ret = lba_to_arena(btt, sector, &premap, &arena);
1327 if (ret)
1328 goto out_lane;
1329 cur_len = min(btt->sector_size, len);
1330
1331 if ((arena->flags & IB_FLAG_ERROR_MASK) != 0) {
1332 ret = -EIO;
1333 goto out_lane;
1334 }
1335
1336 if (btt_is_badblock(btt, arena, arena->freelist[lane].block))
1337 arena->freelist[lane].has_err = 1;
1338
1339 if (mutex_is_locked(&arena->err_lock)
1340 || arena->freelist[lane].has_err) {
1341 nd_region_release_lane(btt->nd_region, lane);
1342
1343 ret = arena_clear_freelist_error(arena, lane);
1344 if (ret)
1345 return ret;
1346
1347 /* OK to acquire a different lane/free block */
1348 goto retry;
1349 }
1350
1351 new_postmap = arena->freelist[lane].block;
1352
1353 /* Wait if the new block is being read from */
1354 for (i = 0; i < arena->nfree; i++)
1355 while (arena->rtt[i] == (RTT_VALID | new_postmap))
1356 cpu_relax();
1357
1358
1359 if (new_postmap >= arena->internal_nlba) {
1360 ret = -EIO;
1361 goto out_lane;
1362 }
1363
1364 ret = btt_data_write(arena, new_postmap, page, off, cur_len);
1365 if (ret)
1366 goto out_lane;
1367
1368 if (bip) {
1369 ret = btt_rw_integrity(btt, bip, arena, new_postmap,
1370 WRITE);
1371 if (ret)
1372 goto out_lane;
1373 }
1374
1375 lock_map(arena, premap);
1376 ret = btt_map_read(arena, premap, &old_postmap, NULL, &e_flag,
1377 NVDIMM_IO_ATOMIC);
1378 if (ret)
1379 goto out_map;
1380 if (old_postmap >= arena->internal_nlba) {
1381 ret = -EIO;
1382 goto out_map;
1383 }
1384 if (e_flag)
1385 set_e_flag(old_postmap);
1386
1387 log.lba = cpu_to_le32(premap);
1388 log.old_map = cpu_to_le32(old_postmap);
1389 log.new_map = cpu_to_le32(new_postmap);
1390 log.seq = cpu_to_le32(arena->freelist[lane].seq);
1391 sub = arena->freelist[lane].sub;
1392 ret = btt_flog_write(arena, lane, sub, &log);
1393 if (ret)
1394 goto out_map;
1395
1396 ret = btt_map_write(arena, premap, new_postmap, 0, 0,
1397 NVDIMM_IO_ATOMIC);
1398 if (ret)
1399 goto out_map;
1400
1401 unlock_map(arena, premap);
1402 nd_region_release_lane(btt->nd_region, lane);
1403
1404 if (e_flag) {
1405 ret = arena_clear_freelist_error(arena, lane);
1406 if (ret)
1407 return ret;
1408 }
1409
1410 len -= cur_len;
1411 off += cur_len;
1412 sector += btt->sector_size >> SECTOR_SHIFT;
1413 }
1414
1415 return 0;
1416
1417 out_map:
1418 unlock_map(arena, premap);
1419 out_lane:
1420 nd_region_release_lane(btt->nd_region, lane);
1421 return ret;
1422}
1423
1424static int btt_do_bvec(struct btt *btt, struct bio_integrity_payload *bip,
1425 struct page *page, unsigned int len, unsigned int off,
1426 bool is_write, sector_t sector)
1427{
1428 int ret;
1429
1430 if (!is_write) {
1431 ret = btt_read_pg(btt, bip, page, off, sector, len);
1432 flush_dcache_page(page);
1433 } else {
1434 flush_dcache_page(page);
1435 ret = btt_write_pg(btt, bip, sector, page, off, len);
1436 }
1437
1438 return ret;
1439}
1440
1441static blk_qc_t btt_make_request(struct request_queue *q, struct bio *bio)
1442{
1443 struct bio_integrity_payload *bip = bio_integrity(bio);
1444 struct btt *btt = q->queuedata;
1445 struct bvec_iter iter;
1446 unsigned long start;
1447 struct bio_vec bvec;
1448 int err = 0;
1449 bool do_acct;
1450
1451 if (!bio_integrity_prep(bio))
1452 return BLK_QC_T_NONE;
1453
1454 do_acct = nd_iostat_start(bio, &start);
1455 bio_for_each_segment(bvec, bio, iter) {
1456 unsigned int len = bvec.bv_len;
1457
1458 if (len > PAGE_SIZE || len < btt->sector_size ||
1459 len % btt->sector_size) {
1460 dev_err_ratelimited(&btt->nd_btt->dev,
1461 "unaligned bio segment (len: %d)\n", len);
1462 bio->bi_status = BLK_STS_IOERR;
1463 break;
1464 }
1465
1466 err = btt_do_bvec(btt, bip, bvec.bv_page, len, bvec.bv_offset,
1467 op_is_write(bio_op(bio)), iter.bi_sector);
1468 if (err) {
1469 dev_err(&btt->nd_btt->dev,
1470 "io error in %s sector %lld, len %d,\n",
1471 (op_is_write(bio_op(bio))) ? "WRITE" :
1472 "READ",
1473 (unsigned long long) iter.bi_sector, len);
1474 bio->bi_status = errno_to_blk_status(err);
1475 break;
1476 }
1477 }
1478 if (do_acct)
1479 nd_iostat_end(bio, start);
1480
1481 bio_endio(bio);
1482 return BLK_QC_T_NONE;
1483}
1484
1485static int btt_rw_page(struct block_device *bdev, sector_t sector,
1486 struct page *page, bool is_write)
1487{
1488 struct btt *btt = bdev->bd_disk->private_data;
1489 int rc;
1490 unsigned int len;
1491
1492 len = hpage_nr_pages(page) * PAGE_SIZE;
1493 rc = btt_do_bvec(btt, NULL, page, len, 0, is_write, sector);
1494 if (rc == 0)
1495 page_endio(page, is_write, 0);
1496
1497 return rc;
1498}
1499
1500
1501static int btt_getgeo(struct block_device *bd, struct hd_geometry *geo)
1502{
1503 /* some standard values */
1504 geo->heads = 1 << 6;
1505 geo->sectors = 1 << 5;
1506 geo->cylinders = get_capacity(bd->bd_disk) >> 11;
1507 return 0;
1508}
1509
1510static const struct block_device_operations btt_fops = {
1511 .owner = THIS_MODULE,
1512 .rw_page = btt_rw_page,
1513 .getgeo = btt_getgeo,
1514 .revalidate_disk = nvdimm_revalidate_disk,
1515};
1516
1517static int btt_blk_init(struct btt *btt)
1518{
1519 struct nd_btt *nd_btt = btt->nd_btt;
1520 struct nd_namespace_common *ndns = nd_btt->ndns;
1521
1522 /* create a new disk and request queue for btt */
1523 btt->btt_queue = blk_alloc_queue(GFP_KERNEL);
1524 if (!btt->btt_queue)
1525 return -ENOMEM;
1526
1527 btt->btt_disk = alloc_disk(0);
1528 if (!btt->btt_disk) {
1529 blk_cleanup_queue(btt->btt_queue);
1530 return -ENOMEM;
1531 }
1532
1533 nvdimm_namespace_disk_name(ndns, btt->btt_disk->disk_name);
1534 btt->btt_disk->first_minor = 0;
1535 btt->btt_disk->fops = &btt_fops;
1536 btt->btt_disk->private_data = btt;
1537 btt->btt_disk->queue = btt->btt_queue;
1538 btt->btt_disk->flags = GENHD_FL_EXT_DEVT;
1539 btt->btt_disk->queue->backing_dev_info->capabilities |=
1540 BDI_CAP_SYNCHRONOUS_IO;
1541
1542 blk_queue_make_request(btt->btt_queue, btt_make_request);
1543 blk_queue_logical_block_size(btt->btt_queue, btt->sector_size);
1544 blk_queue_max_hw_sectors(btt->btt_queue, UINT_MAX);
1545 blk_queue_flag_set(QUEUE_FLAG_NONROT, btt->btt_queue);
1546 btt->btt_queue->queuedata = btt;
1547
1548 if (btt_meta_size(btt)) {
1549 int rc = nd_integrity_init(btt->btt_disk, btt_meta_size(btt));
1550
1551 if (rc) {
1552 del_gendisk(btt->btt_disk);
1553 put_disk(btt->btt_disk);
1554 blk_cleanup_queue(btt->btt_queue);
1555 return rc;
1556 }
1557 }
1558 set_capacity(btt->btt_disk, btt->nlba * btt->sector_size >> 9);
1559 device_add_disk(&btt->nd_btt->dev, btt->btt_disk);
1560 btt->nd_btt->size = btt->nlba * (u64)btt->sector_size;
1561 revalidate_disk(btt->btt_disk);
1562
1563 return 0;
1564}
1565
1566static void btt_blk_cleanup(struct btt *btt)
1567{
1568 del_gendisk(btt->btt_disk);
1569 put_disk(btt->btt_disk);
1570 blk_cleanup_queue(btt->btt_queue);
1571}
1572
1573/**
1574 * btt_init - initialize a block translation table for the given device
1575 * @nd_btt: device with BTT geometry and backing device info
1576 * @rawsize: raw size in bytes of the backing device
1577 * @lbasize: lba size of the backing device
1578 * @uuid: A uuid for the backing device - this is stored on media
1579 * @maxlane: maximum number of parallel requests the device can handle
1580 *
1581 * Initialize a Block Translation Table on a backing device to provide
1582 * single sector power fail atomicity.
1583 *
1584 * Context:
1585 * Might sleep.
1586 *
1587 * Returns:
1588 * Pointer to a new struct btt on success, NULL on failure.
1589 */
1590static struct btt *btt_init(struct nd_btt *nd_btt, unsigned long long rawsize,
1591 u32 lbasize, u8 *uuid, struct nd_region *nd_region)
1592{
1593 int ret;
1594 struct btt *btt;
1595 struct nd_namespace_io *nsio;
1596 struct device *dev = &nd_btt->dev;
1597
1598 btt = devm_kzalloc(dev, sizeof(struct btt), GFP_KERNEL);
1599 if (!btt)
1600 return NULL;
1601
1602 btt->nd_btt = nd_btt;
1603 btt->rawsize = rawsize;
1604 btt->lbasize = lbasize;
1605 btt->sector_size = ((lbasize >= 4096) ? 4096 : 512);
1606 INIT_LIST_HEAD(&btt->arena_list);
1607 mutex_init(&btt->init_lock);
1608 btt->nd_region = nd_region;
1609 nsio = to_nd_namespace_io(&nd_btt->ndns->dev);
1610 btt->phys_bb = &nsio->bb;
1611
1612 ret = discover_arenas(btt);
1613 if (ret) {
1614 dev_err(dev, "init: error in arena_discover: %d\n", ret);
1615 return NULL;
1616 }
1617
1618 if (btt->init_state != INIT_READY && nd_region->ro) {
1619 dev_warn(dev, "%s is read-only, unable to init btt metadata\n",
1620 dev_name(&nd_region->dev));
1621 return NULL;
1622 } else if (btt->init_state != INIT_READY) {
1623 btt->num_arenas = (rawsize / ARENA_MAX_SIZE) +
1624 ((rawsize % ARENA_MAX_SIZE) ? 1 : 0);
1625 dev_dbg(dev, "init: %d arenas for %llu rawsize\n",
1626 btt->num_arenas, rawsize);
1627
1628 ret = create_arenas(btt);
1629 if (ret) {
1630 dev_info(dev, "init: create_arenas: %d\n", ret);
1631 return NULL;
1632 }
1633
1634 ret = btt_meta_init(btt);
1635 if (ret) {
1636 dev_err(dev, "init: error in meta_init: %d\n", ret);
1637 return NULL;
1638 }
1639 }
1640
1641 ret = btt_blk_init(btt);
1642 if (ret) {
1643 dev_err(dev, "init: error in blk_init: %d\n", ret);
1644 return NULL;
1645 }
1646
1647 btt_debugfs_init(btt);
1648
1649 return btt;
1650}
1651
1652/**
1653 * btt_fini - de-initialize a BTT
1654 * @btt: the BTT handle that was generated by btt_init
1655 *
1656 * De-initialize a Block Translation Table on device removal
1657 *
1658 * Context:
1659 * Might sleep.
1660 */
1661static void btt_fini(struct btt *btt)
1662{
1663 if (btt) {
1664 btt_blk_cleanup(btt);
1665 free_arenas(btt);
1666 debugfs_remove_recursive(btt->debugfs_dir);
1667 }
1668}
1669
1670int nvdimm_namespace_attach_btt(struct nd_namespace_common *ndns)
1671{
1672 struct nd_btt *nd_btt = to_nd_btt(ndns->claim);
1673 struct nd_region *nd_region;
1674 struct btt_sb *btt_sb;
1675 struct btt *btt;
1676 size_t rawsize;
1677
1678 if (!nd_btt->uuid || !nd_btt->ndns || !nd_btt->lbasize) {
1679 dev_dbg(&nd_btt->dev, "incomplete btt configuration\n");
1680 return -ENODEV;
1681 }
1682
1683 btt_sb = devm_kzalloc(&nd_btt->dev, sizeof(*btt_sb), GFP_KERNEL);
1684 if (!btt_sb)
1685 return -ENOMEM;
1686
1687 /*
1688 * If this returns < 0, that is ok as it just means there wasn't
1689 * an existing BTT, and we're creating a new one. We still need to
1690 * call this as we need the version dependent fields in nd_btt to be
1691 * set correctly based on the holder class
1692 */
1693 nd_btt_version(nd_btt, ndns, btt_sb);
1694
1695 rawsize = nvdimm_namespace_capacity(ndns) - nd_btt->initial_offset;
1696 if (rawsize < ARENA_MIN_SIZE) {
1697 dev_dbg(&nd_btt->dev, "%s must be at least %ld bytes\n",
1698 dev_name(&ndns->dev),
1699 ARENA_MIN_SIZE + nd_btt->initial_offset);
1700 return -ENXIO;
1701 }
1702 nd_region = to_nd_region(nd_btt->dev.parent);
1703 btt = btt_init(nd_btt, rawsize, nd_btt->lbasize, nd_btt->uuid,
1704 nd_region);
1705 if (!btt)
1706 return -ENOMEM;
1707 nd_btt->btt = btt;
1708
1709 return 0;
1710}
1711EXPORT_SYMBOL(nvdimm_namespace_attach_btt);
1712
1713int nvdimm_namespace_detach_btt(struct nd_btt *nd_btt)
1714{
1715 struct btt *btt = nd_btt->btt;
1716
1717 btt_fini(btt);
1718 nd_btt->btt = NULL;
1719
1720 return 0;
1721}
1722EXPORT_SYMBOL(nvdimm_namespace_detach_btt);
1723
1724static int __init nd_btt_init(void)
1725{
1726 int rc = 0;
1727
1728 debugfs_root = debugfs_create_dir("btt", NULL);
1729 if (IS_ERR_OR_NULL(debugfs_root))
1730 rc = -ENXIO;
1731
1732 return rc;
1733}
1734
1735static void __exit nd_btt_exit(void)
1736{
1737 debugfs_remove_recursive(debugfs_root);
1738}
1739
1740MODULE_ALIAS_ND_DEVICE(ND_DEVICE_BTT);
1741MODULE_AUTHOR("Vishal Verma <vishal.l.verma@linux.intel.com>");
1742MODULE_LICENSE("GPL v2");
1743module_init(nd_btt_init);
1744module_exit(nd_btt_exit);