Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Block Translation Table
4 * Copyright (c) 2014-2015, Intel Corporation.
5 */
6#include <linux/highmem.h>
7#include <linux/debugfs.h>
8#include <linux/blkdev.h>
9#include <linux/blk-integrity.h>
10#include <linux/pagemap.h>
11#include <linux/module.h>
12#include <linux/device.h>
13#include <linux/mutex.h>
14#include <linux/hdreg.h>
15#include <linux/sizes.h>
16#include <linux/ndctl.h>
17#include <linux/fs.h>
18#include <linux/nd.h>
19#include <linux/backing-dev.h>
20#include <linux/cleanup.h>
21#include "btt.h"
22#include "nd.h"
23
24enum log_ent_request {
25 LOG_NEW_ENT = 0,
26 LOG_OLD_ENT
27};
28
29static struct device *to_dev(struct arena_info *arena)
30{
31 return &arena->nd_btt->dev;
32}
33
34static u64 adjust_initial_offset(struct nd_btt *nd_btt, u64 offset)
35{
36 return offset + nd_btt->initial_offset;
37}
38
39static int arena_read_bytes(struct arena_info *arena, resource_size_t offset,
40 void *buf, size_t n, unsigned long flags)
41{
42 struct nd_btt *nd_btt = arena->nd_btt;
43 struct nd_namespace_common *ndns = nd_btt->ndns;
44
45 /* arena offsets may be shifted from the base of the device */
46 offset = adjust_initial_offset(nd_btt, offset);
47 return nvdimm_read_bytes(ndns, offset, buf, n, flags);
48}
49
50static int arena_write_bytes(struct arena_info *arena, resource_size_t offset,
51 void *buf, size_t n, unsigned long flags)
52{
53 struct nd_btt *nd_btt = arena->nd_btt;
54 struct nd_namespace_common *ndns = nd_btt->ndns;
55
56 /* arena offsets may be shifted from the base of the device */
57 offset = adjust_initial_offset(nd_btt, offset);
58 return nvdimm_write_bytes(ndns, offset, buf, n, flags);
59}
60
61static int btt_info_write(struct arena_info *arena, struct btt_sb *super)
62{
63 int ret;
64
65 /*
66 * infooff and info2off should always be at least 512B aligned.
67 * We rely on that to make sure rw_bytes does error clearing
68 * correctly, so make sure that is the case.
69 */
70 dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->infooff, 512),
71 "arena->infooff: %#llx is unaligned\n", arena->infooff);
72 dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->info2off, 512),
73 "arena->info2off: %#llx is unaligned\n", arena->info2off);
74
75 ret = arena_write_bytes(arena, arena->info2off, super,
76 sizeof(struct btt_sb), 0);
77 if (ret)
78 return ret;
79
80 return arena_write_bytes(arena, arena->infooff, super,
81 sizeof(struct btt_sb), 0);
82}
83
84static int btt_info_read(struct arena_info *arena, struct btt_sb *super)
85{
86 return arena_read_bytes(arena, arena->infooff, super,
87 sizeof(struct btt_sb), 0);
88}
89
90/*
91 * 'raw' version of btt_map write
92 * Assumptions:
93 * mapping is in little-endian
94 * mapping contains 'E' and 'Z' flags as desired
95 */
96static int __btt_map_write(struct arena_info *arena, u32 lba, __le32 mapping,
97 unsigned long flags)
98{
99 u64 ns_off = arena->mapoff + (lba * MAP_ENT_SIZE);
100
101 if (unlikely(lba >= arena->external_nlba))
102 dev_err_ratelimited(to_dev(arena),
103 "%s: lba %#x out of range (max: %#x)\n",
104 __func__, lba, arena->external_nlba);
105 return arena_write_bytes(arena, ns_off, &mapping, MAP_ENT_SIZE, flags);
106}
107
108static int btt_map_write(struct arena_info *arena, u32 lba, u32 mapping,
109 u32 z_flag, u32 e_flag, unsigned long rwb_flags)
110{
111 u32 ze;
112 __le32 mapping_le;
113
114 /*
115 * This 'mapping' is supposed to be just the LBA mapping, without
116 * any flags set, so strip the flag bits.
117 */
118 mapping = ent_lba(mapping);
119
120 ze = (z_flag << 1) + e_flag;
121 switch (ze) {
122 case 0:
123 /*
124 * We want to set neither of the Z or E flags, and
125 * in the actual layout, this means setting the bit
126 * positions of both to '1' to indicate a 'normal'
127 * map entry
128 */
129 mapping |= MAP_ENT_NORMAL;
130 break;
131 case 1:
132 mapping |= (1 << MAP_ERR_SHIFT);
133 break;
134 case 2:
135 mapping |= (1 << MAP_TRIM_SHIFT);
136 break;
137 default:
138 /*
139 * The case where Z and E are both sent in as '1' could be
140 * construed as a valid 'normal' case, but we decide not to,
141 * to avoid confusion
142 */
143 dev_err_ratelimited(to_dev(arena),
144 "Invalid use of Z and E flags\n");
145 return -EIO;
146 }
147
148 mapping_le = cpu_to_le32(mapping);
149 return __btt_map_write(arena, lba, mapping_le, rwb_flags);
150}
151
152static int btt_map_read(struct arena_info *arena, u32 lba, u32 *mapping,
153 int *trim, int *error, unsigned long rwb_flags)
154{
155 int ret;
156 __le32 in;
157 u32 raw_mapping, postmap, ze, z_flag, e_flag;
158 u64 ns_off = arena->mapoff + (lba * MAP_ENT_SIZE);
159
160 if (unlikely(lba >= arena->external_nlba))
161 dev_err_ratelimited(to_dev(arena),
162 "%s: lba %#x out of range (max: %#x)\n",
163 __func__, lba, arena->external_nlba);
164
165 ret = arena_read_bytes(arena, ns_off, &in, MAP_ENT_SIZE, rwb_flags);
166 if (ret)
167 return ret;
168
169 raw_mapping = le32_to_cpu(in);
170
171 z_flag = ent_z_flag(raw_mapping);
172 e_flag = ent_e_flag(raw_mapping);
173 ze = (z_flag << 1) + e_flag;
174 postmap = ent_lba(raw_mapping);
175
176 /* Reuse the {z,e}_flag variables for *trim and *error */
177 z_flag = 0;
178 e_flag = 0;
179
180 switch (ze) {
181 case 0:
182 /* Initial state. Return postmap = premap */
183 *mapping = lba;
184 break;
185 case 1:
186 *mapping = postmap;
187 e_flag = 1;
188 break;
189 case 2:
190 *mapping = postmap;
191 z_flag = 1;
192 break;
193 case 3:
194 *mapping = postmap;
195 break;
196 default:
197 return -EIO;
198 }
199
200 if (trim)
201 *trim = z_flag;
202 if (error)
203 *error = e_flag;
204
205 return ret;
206}
207
208static int btt_log_group_read(struct arena_info *arena, u32 lane,
209 struct log_group *log)
210{
211 return arena_read_bytes(arena,
212 arena->logoff + (lane * LOG_GRP_SIZE), log,
213 LOG_GRP_SIZE, 0);
214}
215
216static struct dentry *debugfs_root;
217
218static void arena_debugfs_init(struct arena_info *a, struct dentry *parent,
219 int idx)
220{
221 char dirname[32];
222 struct dentry *d;
223
224 /* If for some reason, parent bttN was not created, exit */
225 if (!parent)
226 return;
227
228 snprintf(dirname, 32, "arena%d", idx);
229 d = debugfs_create_dir(dirname, parent);
230 if (IS_ERR_OR_NULL(d))
231 return;
232 a->debugfs_dir = d;
233
234 debugfs_create_x64("size", S_IRUGO, d, &a->size);
235 debugfs_create_x64("external_lba_start", S_IRUGO, d,
236 &a->external_lba_start);
237 debugfs_create_x32("internal_nlba", S_IRUGO, d, &a->internal_nlba);
238 debugfs_create_u32("internal_lbasize", S_IRUGO, d,
239 &a->internal_lbasize);
240 debugfs_create_x32("external_nlba", S_IRUGO, d, &a->external_nlba);
241 debugfs_create_u32("external_lbasize", S_IRUGO, d,
242 &a->external_lbasize);
243 debugfs_create_u32("nfree", S_IRUGO, d, &a->nfree);
244 debugfs_create_u16("version_major", S_IRUGO, d, &a->version_major);
245 debugfs_create_u16("version_minor", S_IRUGO, d, &a->version_minor);
246 debugfs_create_x64("nextoff", S_IRUGO, d, &a->nextoff);
247 debugfs_create_x64("infooff", S_IRUGO, d, &a->infooff);
248 debugfs_create_x64("dataoff", S_IRUGO, d, &a->dataoff);
249 debugfs_create_x64("mapoff", S_IRUGO, d, &a->mapoff);
250 debugfs_create_x64("logoff", S_IRUGO, d, &a->logoff);
251 debugfs_create_x64("info2off", S_IRUGO, d, &a->info2off);
252 debugfs_create_x32("flags", S_IRUGO, d, &a->flags);
253 debugfs_create_u32("log_index_0", S_IRUGO, d, &a->log_index[0]);
254 debugfs_create_u32("log_index_1", S_IRUGO, d, &a->log_index[1]);
255}
256
257static void btt_debugfs_init(struct btt *btt)
258{
259 int i = 0;
260 struct arena_info *arena;
261
262 btt->debugfs_dir = debugfs_create_dir(dev_name(&btt->nd_btt->dev),
263 debugfs_root);
264 if (IS_ERR_OR_NULL(btt->debugfs_dir))
265 return;
266
267 list_for_each_entry(arena, &btt->arena_list, list) {
268 arena_debugfs_init(arena, btt->debugfs_dir, i);
269 i++;
270 }
271}
272
273static u32 log_seq(struct log_group *log, int log_idx)
274{
275 return le32_to_cpu(log->ent[log_idx].seq);
276}
277
278/*
279 * This function accepts two log entries, and uses the
280 * sequence number to find the 'older' entry.
281 * It also updates the sequence number in this old entry to
282 * make it the 'new' one if the mark_flag is set.
283 * Finally, it returns which of the entries was the older one.
284 *
285 * TODO The logic feels a bit kludge-y. make it better..
286 */
287static int btt_log_get_old(struct arena_info *a, struct log_group *log)
288{
289 int idx0 = a->log_index[0];
290 int idx1 = a->log_index[1];
291 int old;
292
293 /*
294 * the first ever time this is seen, the entry goes into [0]
295 * the next time, the following logic works out to put this
296 * (next) entry into [1]
297 */
298 if (log_seq(log, idx0) == 0) {
299 log->ent[idx0].seq = cpu_to_le32(1);
300 return 0;
301 }
302
303 if (log_seq(log, idx0) == log_seq(log, idx1))
304 return -EINVAL;
305 if (log_seq(log, idx0) + log_seq(log, idx1) > 5)
306 return -EINVAL;
307
308 if (log_seq(log, idx0) < log_seq(log, idx1)) {
309 if ((log_seq(log, idx1) - log_seq(log, idx0)) == 1)
310 old = 0;
311 else
312 old = 1;
313 } else {
314 if ((log_seq(log, idx0) - log_seq(log, idx1)) == 1)
315 old = 1;
316 else
317 old = 0;
318 }
319
320 return old;
321}
322
323/*
324 * This function copies the desired (old/new) log entry into ent if
325 * it is not NULL. It returns the sub-slot number (0 or 1)
326 * where the desired log entry was found. Negative return values
327 * indicate errors.
328 */
329static int btt_log_read(struct arena_info *arena, u32 lane,
330 struct log_entry *ent, int old_flag)
331{
332 int ret;
333 int old_ent, ret_ent;
334 struct log_group log;
335
336 ret = btt_log_group_read(arena, lane, &log);
337 if (ret)
338 return -EIO;
339
340 old_ent = btt_log_get_old(arena, &log);
341 if (old_ent < 0 || old_ent > 1) {
342 dev_err(to_dev(arena),
343 "log corruption (%d): lane %d seq [%d, %d]\n",
344 old_ent, lane, log.ent[arena->log_index[0]].seq,
345 log.ent[arena->log_index[1]].seq);
346 /* TODO set error state? */
347 return -EIO;
348 }
349
350 ret_ent = (old_flag ? old_ent : (1 - old_ent));
351
352 if (ent != NULL)
353 memcpy(ent, &log.ent[arena->log_index[ret_ent]], LOG_ENT_SIZE);
354
355 return ret_ent;
356}
357
358/*
359 * This function commits a log entry to media
360 * It does _not_ prepare the freelist entry for the next write
361 * btt_flog_write is the wrapper for updating the freelist elements
362 */
363static int __btt_log_write(struct arena_info *arena, u32 lane,
364 u32 sub, struct log_entry *ent, unsigned long flags)
365{
366 int ret;
367 u32 group_slot = arena->log_index[sub];
368 unsigned int log_half = LOG_ENT_SIZE / 2;
369 void *src = ent;
370 u64 ns_off;
371
372 ns_off = arena->logoff + (lane * LOG_GRP_SIZE) +
373 (group_slot * LOG_ENT_SIZE);
374 /* split the 16B write into atomic, durable halves */
375 ret = arena_write_bytes(arena, ns_off, src, log_half, flags);
376 if (ret)
377 return ret;
378
379 ns_off += log_half;
380 src += log_half;
381 return arena_write_bytes(arena, ns_off, src, log_half, flags);
382}
383
384static int btt_flog_write(struct arena_info *arena, u32 lane, u32 sub,
385 struct log_entry *ent)
386{
387 int ret;
388
389 ret = __btt_log_write(arena, lane, sub, ent, NVDIMM_IO_ATOMIC);
390 if (ret)
391 return ret;
392
393 /* prepare the next free entry */
394 arena->freelist[lane].sub = 1 - arena->freelist[lane].sub;
395 if (++(arena->freelist[lane].seq) == 4)
396 arena->freelist[lane].seq = 1;
397 if (ent_e_flag(le32_to_cpu(ent->old_map)))
398 arena->freelist[lane].has_err = 1;
399 arena->freelist[lane].block = ent_lba(le32_to_cpu(ent->old_map));
400
401 return ret;
402}
403
404/*
405 * This function initializes the BTT map to the initial state, which is
406 * all-zeroes, and indicates an identity mapping
407 */
408static int btt_map_init(struct arena_info *arena)
409{
410 int ret = -EINVAL;
411 void *zerobuf;
412 size_t offset = 0;
413 size_t chunk_size = SZ_2M;
414 size_t mapsize = arena->logoff - arena->mapoff;
415
416 zerobuf = kzalloc(chunk_size, GFP_KERNEL);
417 if (!zerobuf)
418 return -ENOMEM;
419
420 /*
421 * mapoff should always be at least 512B aligned. We rely on that to
422 * make sure rw_bytes does error clearing correctly, so make sure that
423 * is the case.
424 */
425 dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->mapoff, 512),
426 "arena->mapoff: %#llx is unaligned\n", arena->mapoff);
427
428 while (mapsize) {
429 size_t size = min(mapsize, chunk_size);
430
431 dev_WARN_ONCE(to_dev(arena), size < 512,
432 "chunk size: %#zx is unaligned\n", size);
433 ret = arena_write_bytes(arena, arena->mapoff + offset, zerobuf,
434 size, 0);
435 if (ret)
436 goto free;
437
438 offset += size;
439 mapsize -= size;
440 cond_resched();
441 }
442
443 free:
444 kfree(zerobuf);
445 return ret;
446}
447
448/*
449 * This function initializes the BTT log with 'fake' entries pointing
450 * to the initial reserved set of blocks as being free
451 */
452static int btt_log_init(struct arena_info *arena)
453{
454 size_t logsize = arena->info2off - arena->logoff;
455 size_t chunk_size = SZ_4K, offset = 0;
456 struct log_entry ent;
457 void *zerobuf;
458 int ret;
459 u32 i;
460
461 zerobuf = kzalloc(chunk_size, GFP_KERNEL);
462 if (!zerobuf)
463 return -ENOMEM;
464 /*
465 * logoff should always be at least 512B aligned. We rely on that to
466 * make sure rw_bytes does error clearing correctly, so make sure that
467 * is the case.
468 */
469 dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->logoff, 512),
470 "arena->logoff: %#llx is unaligned\n", arena->logoff);
471
472 while (logsize) {
473 size_t size = min(logsize, chunk_size);
474
475 dev_WARN_ONCE(to_dev(arena), size < 512,
476 "chunk size: %#zx is unaligned\n", size);
477 ret = arena_write_bytes(arena, arena->logoff + offset, zerobuf,
478 size, 0);
479 if (ret)
480 goto free;
481
482 offset += size;
483 logsize -= size;
484 cond_resched();
485 }
486
487 for (i = 0; i < arena->nfree; i++) {
488 ent.lba = cpu_to_le32(i);
489 ent.old_map = cpu_to_le32(arena->external_nlba + i);
490 ent.new_map = cpu_to_le32(arena->external_nlba + i);
491 ent.seq = cpu_to_le32(LOG_SEQ_INIT);
492 ret = __btt_log_write(arena, i, 0, &ent, 0);
493 if (ret)
494 goto free;
495 }
496
497 free:
498 kfree(zerobuf);
499 return ret;
500}
501
502static u64 to_namespace_offset(struct arena_info *arena, u64 lba)
503{
504 return arena->dataoff + ((u64)lba * arena->internal_lbasize);
505}
506
507static int arena_clear_freelist_error(struct arena_info *arena, u32 lane)
508{
509 int ret = 0;
510
511 if (arena->freelist[lane].has_err) {
512 void *zero_page = page_address(ZERO_PAGE(0));
513 u32 lba = arena->freelist[lane].block;
514 u64 nsoff = to_namespace_offset(arena, lba);
515 unsigned long len = arena->sector_size;
516
517 mutex_lock(&arena->err_lock);
518
519 while (len) {
520 unsigned long chunk = min(len, PAGE_SIZE);
521
522 ret = arena_write_bytes(arena, nsoff, zero_page,
523 chunk, 0);
524 if (ret)
525 break;
526 len -= chunk;
527 nsoff += chunk;
528 if (len == 0)
529 arena->freelist[lane].has_err = 0;
530 }
531 mutex_unlock(&arena->err_lock);
532 }
533 return ret;
534}
535
536static int btt_freelist_init(struct arena_info *arena)
537{
538 int new, ret;
539 struct log_entry log_new;
540 u32 i, map_entry, log_oldmap, log_newmap;
541
542 arena->freelist = kcalloc(arena->nfree, sizeof(struct free_entry),
543 GFP_KERNEL);
544 if (!arena->freelist)
545 return -ENOMEM;
546
547 for (i = 0; i < arena->nfree; i++) {
548 new = btt_log_read(arena, i, &log_new, LOG_NEW_ENT);
549 if (new < 0)
550 return new;
551
552 /* old and new map entries with any flags stripped out */
553 log_oldmap = ent_lba(le32_to_cpu(log_new.old_map));
554 log_newmap = ent_lba(le32_to_cpu(log_new.new_map));
555
556 /* sub points to the next one to be overwritten */
557 arena->freelist[i].sub = 1 - new;
558 arena->freelist[i].seq = nd_inc_seq(le32_to_cpu(log_new.seq));
559 arena->freelist[i].block = log_oldmap;
560
561 /*
562 * FIXME: if error clearing fails during init, we want to make
563 * the BTT read-only
564 */
565 if (ent_e_flag(le32_to_cpu(log_new.old_map)) &&
566 !ent_normal(le32_to_cpu(log_new.old_map))) {
567 arena->freelist[i].has_err = 1;
568 ret = arena_clear_freelist_error(arena, i);
569 if (ret)
570 dev_err_ratelimited(to_dev(arena),
571 "Unable to clear known errors\n");
572 }
573
574 /* This implies a newly created or untouched flog entry */
575 if (log_oldmap == log_newmap)
576 continue;
577
578 /* Check if map recovery is needed */
579 ret = btt_map_read(arena, le32_to_cpu(log_new.lba), &map_entry,
580 NULL, NULL, 0);
581 if (ret)
582 return ret;
583
584 /*
585 * The map_entry from btt_read_map is stripped of any flag bits,
586 * so use the stripped out versions from the log as well for
587 * testing whether recovery is needed. For restoration, use the
588 * 'raw' version of the log entries as that captured what we
589 * were going to write originally.
590 */
591 if ((log_newmap != map_entry) && (log_oldmap == map_entry)) {
592 /*
593 * Last transaction wrote the flog, but wasn't able
594 * to complete the map write. So fix up the map.
595 */
596 ret = btt_map_write(arena, le32_to_cpu(log_new.lba),
597 le32_to_cpu(log_new.new_map), 0, 0, 0);
598 if (ret)
599 return ret;
600 }
601 }
602
603 return 0;
604}
605
606static bool ent_is_padding(struct log_entry *ent)
607{
608 return (ent->lba == 0) && (ent->old_map == 0) && (ent->new_map == 0)
609 && (ent->seq == 0);
610}
611
612/*
613 * Detecting valid log indices: We read a log group (see the comments in btt.h
614 * for a description of a 'log_group' and its 'slots'), and iterate over its
615 * four slots. We expect that a padding slot will be all-zeroes, and use this
616 * to detect a padding slot vs. an actual entry.
617 *
618 * If a log_group is in the initial state, i.e. hasn't been used since the
619 * creation of this BTT layout, it will have three of the four slots with
620 * zeroes. We skip over these log_groups for the detection of log_index. If
621 * all log_groups are in the initial state (i.e. the BTT has never been
622 * written to), it is safe to assume the 'new format' of log entries in slots
623 * (0, 1).
624 */
625static int log_set_indices(struct arena_info *arena)
626{
627 bool idx_set = false, initial_state = true;
628 int ret, log_index[2] = {-1, -1};
629 u32 i, j, next_idx = 0;
630 struct log_group log;
631 u32 pad_count = 0;
632
633 for (i = 0; i < arena->nfree; i++) {
634 ret = btt_log_group_read(arena, i, &log);
635 if (ret < 0)
636 return ret;
637
638 for (j = 0; j < 4; j++) {
639 if (!idx_set) {
640 if (ent_is_padding(&log.ent[j])) {
641 pad_count++;
642 continue;
643 } else {
644 /* Skip if index has been recorded */
645 if ((next_idx == 1) &&
646 (j == log_index[0]))
647 continue;
648 /* valid entry, record index */
649 log_index[next_idx] = j;
650 next_idx++;
651 }
652 if (next_idx == 2) {
653 /* two valid entries found */
654 idx_set = true;
655 } else if (next_idx > 2) {
656 /* too many valid indices */
657 return -ENXIO;
658 }
659 } else {
660 /*
661 * once the indices have been set, just verify
662 * that all subsequent log groups are either in
663 * their initial state or follow the same
664 * indices.
665 */
666 if (j == log_index[0]) {
667 /* entry must be 'valid' */
668 if (ent_is_padding(&log.ent[j]))
669 return -ENXIO;
670 } else if (j == log_index[1]) {
671 ;
672 /*
673 * log_index[1] can be padding if the
674 * lane never got used and it is still
675 * in the initial state (three 'padding'
676 * entries)
677 */
678 } else {
679 /* entry must be invalid (padding) */
680 if (!ent_is_padding(&log.ent[j]))
681 return -ENXIO;
682 }
683 }
684 }
685 /*
686 * If any of the log_groups have more than one valid,
687 * non-padding entry, then the we are no longer in the
688 * initial_state
689 */
690 if (pad_count < 3)
691 initial_state = false;
692 pad_count = 0;
693 }
694
695 if (!initial_state && !idx_set)
696 return -ENXIO;
697
698 /*
699 * If all the entries in the log were in the initial state,
700 * assume new padding scheme
701 */
702 if (initial_state)
703 log_index[1] = 1;
704
705 /*
706 * Only allow the known permutations of log/padding indices,
707 * i.e. (0, 1), and (0, 2)
708 */
709 if ((log_index[0] == 0) && ((log_index[1] == 1) || (log_index[1] == 2)))
710 ; /* known index possibilities */
711 else {
712 dev_err(to_dev(arena), "Found an unknown padding scheme\n");
713 return -ENXIO;
714 }
715
716 arena->log_index[0] = log_index[0];
717 arena->log_index[1] = log_index[1];
718 dev_dbg(to_dev(arena), "log_index_0 = %d\n", log_index[0]);
719 dev_dbg(to_dev(arena), "log_index_1 = %d\n", log_index[1]);
720 return 0;
721}
722
723static int btt_rtt_init(struct arena_info *arena)
724{
725 arena->rtt = kcalloc(arena->nfree, sizeof(u32), GFP_KERNEL);
726 if (arena->rtt == NULL)
727 return -ENOMEM;
728
729 return 0;
730}
731
732static int btt_maplocks_init(struct arena_info *arena)
733{
734 u32 i;
735
736 arena->map_locks = kcalloc(arena->nfree, sizeof(struct aligned_lock),
737 GFP_KERNEL);
738 if (!arena->map_locks)
739 return -ENOMEM;
740
741 for (i = 0; i < arena->nfree; i++)
742 spin_lock_init(&arena->map_locks[i].lock);
743
744 return 0;
745}
746
747static struct arena_info *alloc_arena(struct btt *btt, size_t size,
748 size_t start, size_t arena_off)
749{
750 struct arena_info *arena;
751 u64 logsize, mapsize, datasize;
752 u64 available = size;
753
754 arena = kzalloc(sizeof(*arena), GFP_KERNEL);
755 if (!arena)
756 return NULL;
757 arena->nd_btt = btt->nd_btt;
758 arena->sector_size = btt->sector_size;
759 mutex_init(&arena->err_lock);
760
761 if (!size)
762 return arena;
763
764 arena->size = size;
765 arena->external_lba_start = start;
766 arena->external_lbasize = btt->lbasize;
767 arena->internal_lbasize = roundup(arena->external_lbasize,
768 INT_LBASIZE_ALIGNMENT);
769 arena->nfree = BTT_DEFAULT_NFREE;
770 arena->version_major = btt->nd_btt->version_major;
771 arena->version_minor = btt->nd_btt->version_minor;
772
773 if (available % BTT_PG_SIZE)
774 available -= (available % BTT_PG_SIZE);
775
776 /* Two pages are reserved for the super block and its copy */
777 available -= 2 * BTT_PG_SIZE;
778
779 /* The log takes a fixed amount of space based on nfree */
780 logsize = roundup(arena->nfree * LOG_GRP_SIZE, BTT_PG_SIZE);
781 available -= logsize;
782
783 /* Calculate optimal split between map and data area */
784 arena->internal_nlba = div_u64(available - BTT_PG_SIZE,
785 arena->internal_lbasize + MAP_ENT_SIZE);
786 arena->external_nlba = arena->internal_nlba - arena->nfree;
787
788 mapsize = roundup((arena->external_nlba * MAP_ENT_SIZE), BTT_PG_SIZE);
789 datasize = available - mapsize;
790
791 /* 'Absolute' values, relative to start of storage space */
792 arena->infooff = arena_off;
793 arena->dataoff = arena->infooff + BTT_PG_SIZE;
794 arena->mapoff = arena->dataoff + datasize;
795 arena->logoff = arena->mapoff + mapsize;
796 arena->info2off = arena->logoff + logsize;
797
798 /* Default log indices are (0,1) */
799 arena->log_index[0] = 0;
800 arena->log_index[1] = 1;
801 return arena;
802}
803
804static void free_arenas(struct btt *btt)
805{
806 struct arena_info *arena, *next;
807
808 list_for_each_entry_safe(arena, next, &btt->arena_list, list) {
809 list_del(&arena->list);
810 kfree(arena->rtt);
811 kfree(arena->map_locks);
812 kfree(arena->freelist);
813 debugfs_remove_recursive(arena->debugfs_dir);
814 kfree(arena);
815 }
816}
817
818/*
819 * This function reads an existing valid btt superblock and
820 * populates the corresponding arena_info struct
821 */
822static void parse_arena_meta(struct arena_info *arena, struct btt_sb *super,
823 u64 arena_off)
824{
825 arena->internal_nlba = le32_to_cpu(super->internal_nlba);
826 arena->internal_lbasize = le32_to_cpu(super->internal_lbasize);
827 arena->external_nlba = le32_to_cpu(super->external_nlba);
828 arena->external_lbasize = le32_to_cpu(super->external_lbasize);
829 arena->nfree = le32_to_cpu(super->nfree);
830 arena->version_major = le16_to_cpu(super->version_major);
831 arena->version_minor = le16_to_cpu(super->version_minor);
832
833 arena->nextoff = (super->nextoff == 0) ? 0 : (arena_off +
834 le64_to_cpu(super->nextoff));
835 arena->infooff = arena_off;
836 arena->dataoff = arena_off + le64_to_cpu(super->dataoff);
837 arena->mapoff = arena_off + le64_to_cpu(super->mapoff);
838 arena->logoff = arena_off + le64_to_cpu(super->logoff);
839 arena->info2off = arena_off + le64_to_cpu(super->info2off);
840
841 arena->size = (le64_to_cpu(super->nextoff) > 0)
842 ? (le64_to_cpu(super->nextoff))
843 : (arena->info2off - arena->infooff + BTT_PG_SIZE);
844
845 arena->flags = le32_to_cpu(super->flags);
846}
847
848static int discover_arenas(struct btt *btt)
849{
850 int ret = 0;
851 struct arena_info *arena;
852 size_t remaining = btt->rawsize;
853 u64 cur_nlba = 0;
854 size_t cur_off = 0;
855 int num_arenas = 0;
856
857 struct btt_sb *super __free(kfree) = kzalloc(sizeof(*super), GFP_KERNEL);
858 if (!super)
859 return -ENOMEM;
860
861 while (remaining) {
862 /* Alloc memory for arena */
863 arena = alloc_arena(btt, 0, 0, 0);
864 if (!arena)
865 return -ENOMEM;
866
867 arena->infooff = cur_off;
868 ret = btt_info_read(arena, super);
869 if (ret)
870 goto out;
871
872 if (!nd_btt_arena_is_valid(btt->nd_btt, super)) {
873 if (remaining == btt->rawsize) {
874 btt->init_state = INIT_NOTFOUND;
875 dev_info(to_dev(arena), "No existing arenas\n");
876 goto out;
877 } else {
878 dev_err(to_dev(arena),
879 "Found corrupted metadata!\n");
880 ret = -ENODEV;
881 goto out;
882 }
883 }
884
885 arena->external_lba_start = cur_nlba;
886 parse_arena_meta(arena, super, cur_off);
887
888 ret = log_set_indices(arena);
889 if (ret) {
890 dev_err(to_dev(arena),
891 "Unable to deduce log/padding indices\n");
892 goto out;
893 }
894
895 ret = btt_freelist_init(arena);
896 if (ret)
897 goto out;
898
899 ret = btt_rtt_init(arena);
900 if (ret)
901 goto out;
902
903 ret = btt_maplocks_init(arena);
904 if (ret)
905 goto out;
906
907 list_add_tail(&arena->list, &btt->arena_list);
908
909 remaining -= arena->size;
910 cur_off += arena->size;
911 cur_nlba += arena->external_nlba;
912 num_arenas++;
913
914 if (arena->nextoff == 0)
915 break;
916 }
917 btt->num_arenas = num_arenas;
918 btt->nlba = cur_nlba;
919 btt->init_state = INIT_READY;
920
921 return ret;
922
923 out:
924 kfree(arena);
925 free_arenas(btt);
926 return ret;
927}
928
929static int create_arenas(struct btt *btt)
930{
931 size_t remaining = btt->rawsize;
932 size_t cur_off = 0;
933
934 while (remaining) {
935 struct arena_info *arena;
936 size_t arena_size = min_t(u64, ARENA_MAX_SIZE, remaining);
937
938 remaining -= arena_size;
939 if (arena_size < ARENA_MIN_SIZE)
940 break;
941
942 arena = alloc_arena(btt, arena_size, btt->nlba, cur_off);
943 if (!arena) {
944 free_arenas(btt);
945 return -ENOMEM;
946 }
947 btt->nlba += arena->external_nlba;
948 if (remaining >= ARENA_MIN_SIZE)
949 arena->nextoff = arena->size;
950 else
951 arena->nextoff = 0;
952 cur_off += arena_size;
953 list_add_tail(&arena->list, &btt->arena_list);
954 }
955
956 return 0;
957}
958
959/*
960 * This function completes arena initialization by writing
961 * all the metadata.
962 * It is only called for an uninitialized arena when a write
963 * to that arena occurs for the first time.
964 */
965static int btt_arena_write_layout(struct arena_info *arena)
966{
967 int ret;
968 u64 sum;
969 struct btt_sb *super;
970 struct nd_btt *nd_btt = arena->nd_btt;
971 const uuid_t *parent_uuid = nd_dev_to_uuid(&nd_btt->ndns->dev);
972
973 ret = btt_map_init(arena);
974 if (ret)
975 return ret;
976
977 ret = btt_log_init(arena);
978 if (ret)
979 return ret;
980
981 super = kzalloc(sizeof(*super), GFP_NOIO);
982 if (!super)
983 return -ENOMEM;
984
985 strscpy(super->signature, BTT_SIG, sizeof(super->signature));
986 export_uuid(super->uuid, nd_btt->uuid);
987 export_uuid(super->parent_uuid, parent_uuid);
988 super->flags = cpu_to_le32(arena->flags);
989 super->version_major = cpu_to_le16(arena->version_major);
990 super->version_minor = cpu_to_le16(arena->version_minor);
991 super->external_lbasize = cpu_to_le32(arena->external_lbasize);
992 super->external_nlba = cpu_to_le32(arena->external_nlba);
993 super->internal_lbasize = cpu_to_le32(arena->internal_lbasize);
994 super->internal_nlba = cpu_to_le32(arena->internal_nlba);
995 super->nfree = cpu_to_le32(arena->nfree);
996 super->infosize = cpu_to_le32(sizeof(struct btt_sb));
997 super->nextoff = cpu_to_le64(arena->nextoff);
998 /*
999 * Subtract arena->infooff (arena start) so numbers are relative
1000 * to 'this' arena
1001 */
1002 super->dataoff = cpu_to_le64(arena->dataoff - arena->infooff);
1003 super->mapoff = cpu_to_le64(arena->mapoff - arena->infooff);
1004 super->logoff = cpu_to_le64(arena->logoff - arena->infooff);
1005 super->info2off = cpu_to_le64(arena->info2off - arena->infooff);
1006
1007 super->flags = 0;
1008 sum = nd_sb_checksum((struct nd_gen_sb *) super);
1009 super->checksum = cpu_to_le64(sum);
1010
1011 ret = btt_info_write(arena, super);
1012
1013 kfree(super);
1014 return ret;
1015}
1016
1017/*
1018 * This function completes the initialization for the BTT namespace
1019 * such that it is ready to accept IOs
1020 */
1021static int btt_meta_init(struct btt *btt)
1022{
1023 int ret = 0;
1024 struct arena_info *arena;
1025
1026 mutex_lock(&btt->init_lock);
1027 list_for_each_entry(arena, &btt->arena_list, list) {
1028 ret = btt_arena_write_layout(arena);
1029 if (ret)
1030 goto unlock;
1031
1032 ret = btt_freelist_init(arena);
1033 if (ret)
1034 goto unlock;
1035
1036 ret = btt_rtt_init(arena);
1037 if (ret)
1038 goto unlock;
1039
1040 ret = btt_maplocks_init(arena);
1041 if (ret)
1042 goto unlock;
1043 }
1044
1045 btt->init_state = INIT_READY;
1046
1047 unlock:
1048 mutex_unlock(&btt->init_lock);
1049 return ret;
1050}
1051
1052static u32 btt_meta_size(struct btt *btt)
1053{
1054 return btt->lbasize - btt->sector_size;
1055}
1056
1057/*
1058 * This function calculates the arena in which the given LBA lies
1059 * by doing a linear walk. This is acceptable since we expect only
1060 * a few arenas. If we have backing devices that get much larger,
1061 * we can construct a balanced binary tree of arenas at init time
1062 * so that this range search becomes faster.
1063 */
1064static int lba_to_arena(struct btt *btt, sector_t sector, __u32 *premap,
1065 struct arena_info **arena)
1066{
1067 struct arena_info *arena_list;
1068 __u64 lba = div_u64(sector << SECTOR_SHIFT, btt->sector_size);
1069
1070 list_for_each_entry(arena_list, &btt->arena_list, list) {
1071 if (lba < arena_list->external_nlba) {
1072 *arena = arena_list;
1073 *premap = lba;
1074 return 0;
1075 }
1076 lba -= arena_list->external_nlba;
1077 }
1078
1079 return -EIO;
1080}
1081
1082/*
1083 * The following (lock_map, unlock_map) are mostly just to improve
1084 * readability, since they index into an array of locks
1085 */
1086static void lock_map(struct arena_info *arena, u32 premap)
1087 __acquires(&arena->map_locks[idx].lock)
1088{
1089 u32 idx = (premap * MAP_ENT_SIZE / L1_CACHE_BYTES) % arena->nfree;
1090
1091 spin_lock(&arena->map_locks[idx].lock);
1092}
1093
1094static void unlock_map(struct arena_info *arena, u32 premap)
1095 __releases(&arena->map_locks[idx].lock)
1096{
1097 u32 idx = (premap * MAP_ENT_SIZE / L1_CACHE_BYTES) % arena->nfree;
1098
1099 spin_unlock(&arena->map_locks[idx].lock);
1100}
1101
1102static int btt_data_read(struct arena_info *arena, struct page *page,
1103 unsigned int off, u32 lba, u32 len)
1104{
1105 int ret;
1106 u64 nsoff = to_namespace_offset(arena, lba);
1107 void *mem = kmap_atomic(page);
1108
1109 ret = arena_read_bytes(arena, nsoff, mem + off, len, NVDIMM_IO_ATOMIC);
1110 kunmap_atomic(mem);
1111
1112 return ret;
1113}
1114
1115static int btt_data_write(struct arena_info *arena, u32 lba,
1116 struct page *page, unsigned int off, u32 len)
1117{
1118 int ret;
1119 u64 nsoff = to_namespace_offset(arena, lba);
1120 void *mem = kmap_atomic(page);
1121
1122 ret = arena_write_bytes(arena, nsoff, mem + off, len, NVDIMM_IO_ATOMIC);
1123 kunmap_atomic(mem);
1124
1125 return ret;
1126}
1127
1128static void zero_fill_data(struct page *page, unsigned int off, u32 len)
1129{
1130 void *mem = kmap_atomic(page);
1131
1132 memset(mem + off, 0, len);
1133 kunmap_atomic(mem);
1134}
1135
1136#ifdef CONFIG_BLK_DEV_INTEGRITY
1137static int btt_rw_integrity(struct btt *btt, struct bio_integrity_payload *bip,
1138 struct arena_info *arena, u32 postmap, int rw)
1139{
1140 unsigned int len = btt_meta_size(btt);
1141 u64 meta_nsoff;
1142 int ret = 0;
1143
1144 if (bip == NULL)
1145 return 0;
1146
1147 meta_nsoff = to_namespace_offset(arena, postmap) + btt->sector_size;
1148
1149 while (len) {
1150 unsigned int cur_len;
1151 struct bio_vec bv;
1152 void *mem;
1153
1154 bv = bvec_iter_bvec(bip->bip_vec, bip->bip_iter);
1155 /*
1156 * The 'bv' obtained from bvec_iter_bvec has its .bv_len and
1157 * .bv_offset already adjusted for iter->bi_bvec_done, and we
1158 * can use those directly
1159 */
1160
1161 cur_len = min(len, bv.bv_len);
1162 mem = bvec_kmap_local(&bv);
1163 if (rw)
1164 ret = arena_write_bytes(arena, meta_nsoff, mem, cur_len,
1165 NVDIMM_IO_ATOMIC);
1166 else
1167 ret = arena_read_bytes(arena, meta_nsoff, mem, cur_len,
1168 NVDIMM_IO_ATOMIC);
1169
1170 kunmap_local(mem);
1171 if (ret)
1172 return ret;
1173
1174 len -= cur_len;
1175 meta_nsoff += cur_len;
1176 if (!bvec_iter_advance(bip->bip_vec, &bip->bip_iter, cur_len))
1177 return -EIO;
1178 }
1179
1180 return ret;
1181}
1182
1183#else /* CONFIG_BLK_DEV_INTEGRITY */
1184static int btt_rw_integrity(struct btt *btt, struct bio_integrity_payload *bip,
1185 struct arena_info *arena, u32 postmap, int rw)
1186{
1187 return 0;
1188}
1189#endif
1190
1191static int btt_read_pg(struct btt *btt, struct bio_integrity_payload *bip,
1192 struct page *page, unsigned int off, sector_t sector,
1193 unsigned int len)
1194{
1195 int ret = 0;
1196 int t_flag, e_flag;
1197 struct arena_info *arena = NULL;
1198 u32 lane = 0, premap, postmap;
1199
1200 while (len) {
1201 u32 cur_len;
1202
1203 lane = nd_region_acquire_lane(btt->nd_region);
1204
1205 ret = lba_to_arena(btt, sector, &premap, &arena);
1206 if (ret)
1207 goto out_lane;
1208
1209 cur_len = min(btt->sector_size, len);
1210
1211 ret = btt_map_read(arena, premap, &postmap, &t_flag, &e_flag,
1212 NVDIMM_IO_ATOMIC);
1213 if (ret)
1214 goto out_lane;
1215
1216 /*
1217 * We loop to make sure that the post map LBA didn't change
1218 * from under us between writing the RTT and doing the actual
1219 * read.
1220 */
1221 while (1) {
1222 u32 new_map;
1223 int new_t, new_e;
1224
1225 if (t_flag) {
1226 zero_fill_data(page, off, cur_len);
1227 goto out_lane;
1228 }
1229
1230 if (e_flag) {
1231 ret = -EIO;
1232 goto out_lane;
1233 }
1234
1235 arena->rtt[lane] = RTT_VALID | postmap;
1236 /*
1237 * Barrier to make sure this write is not reordered
1238 * to do the verification map_read before the RTT store
1239 */
1240 barrier();
1241
1242 ret = btt_map_read(arena, premap, &new_map, &new_t,
1243 &new_e, NVDIMM_IO_ATOMIC);
1244 if (ret)
1245 goto out_rtt;
1246
1247 if ((postmap == new_map) && (t_flag == new_t) &&
1248 (e_flag == new_e))
1249 break;
1250
1251 postmap = new_map;
1252 t_flag = new_t;
1253 e_flag = new_e;
1254 }
1255
1256 ret = btt_data_read(arena, page, off, postmap, cur_len);
1257 if (ret) {
1258 /* Media error - set the e_flag */
1259 if (btt_map_write(arena, premap, postmap, 0, 1, NVDIMM_IO_ATOMIC))
1260 dev_warn_ratelimited(to_dev(arena),
1261 "Error persistently tracking bad blocks at %#x\n",
1262 premap);
1263 goto out_rtt;
1264 }
1265
1266 if (bip) {
1267 ret = btt_rw_integrity(btt, bip, arena, postmap, READ);
1268 if (ret)
1269 goto out_rtt;
1270 }
1271
1272 arena->rtt[lane] = RTT_INVALID;
1273 nd_region_release_lane(btt->nd_region, lane);
1274
1275 len -= cur_len;
1276 off += cur_len;
1277 sector += btt->sector_size >> SECTOR_SHIFT;
1278 }
1279
1280 return 0;
1281
1282 out_rtt:
1283 arena->rtt[lane] = RTT_INVALID;
1284 out_lane:
1285 nd_region_release_lane(btt->nd_region, lane);
1286 return ret;
1287}
1288
1289/*
1290 * Normally, arena_{read,write}_bytes will take care of the initial offset
1291 * adjustment, but in the case of btt_is_badblock, where we query is_bad_pmem,
1292 * we need the final, raw namespace offset here
1293 */
1294static bool btt_is_badblock(struct btt *btt, struct arena_info *arena,
1295 u32 postmap)
1296{
1297 u64 nsoff = adjust_initial_offset(arena->nd_btt,
1298 to_namespace_offset(arena, postmap));
1299 sector_t phys_sector = nsoff >> 9;
1300
1301 return is_bad_pmem(btt->phys_bb, phys_sector, arena->internal_lbasize);
1302}
1303
1304static int btt_write_pg(struct btt *btt, struct bio_integrity_payload *bip,
1305 sector_t sector, struct page *page, unsigned int off,
1306 unsigned int len)
1307{
1308 int ret = 0;
1309 struct arena_info *arena = NULL;
1310 u32 premap = 0, old_postmap, new_postmap, lane = 0, i;
1311 struct log_entry log;
1312 int sub;
1313
1314 while (len) {
1315 u32 cur_len;
1316 int e_flag;
1317
1318 retry:
1319 lane = nd_region_acquire_lane(btt->nd_region);
1320
1321 ret = lba_to_arena(btt, sector, &premap, &arena);
1322 if (ret)
1323 goto out_lane;
1324 cur_len = min(btt->sector_size, len);
1325
1326 if ((arena->flags & IB_FLAG_ERROR_MASK) != 0) {
1327 ret = -EIO;
1328 goto out_lane;
1329 }
1330
1331 if (btt_is_badblock(btt, arena, arena->freelist[lane].block))
1332 arena->freelist[lane].has_err = 1;
1333
1334 if (mutex_is_locked(&arena->err_lock)
1335 || arena->freelist[lane].has_err) {
1336 nd_region_release_lane(btt->nd_region, lane);
1337
1338 ret = arena_clear_freelist_error(arena, lane);
1339 if (ret)
1340 return ret;
1341
1342 /* OK to acquire a different lane/free block */
1343 goto retry;
1344 }
1345
1346 new_postmap = arena->freelist[lane].block;
1347
1348 /* Wait if the new block is being read from */
1349 for (i = 0; i < arena->nfree; i++)
1350 while (arena->rtt[i] == (RTT_VALID | new_postmap))
1351 cpu_relax();
1352
1353
1354 if (new_postmap >= arena->internal_nlba) {
1355 ret = -EIO;
1356 goto out_lane;
1357 }
1358
1359 ret = btt_data_write(arena, new_postmap, page, off, cur_len);
1360 if (ret)
1361 goto out_lane;
1362
1363 if (bip) {
1364 ret = btt_rw_integrity(btt, bip, arena, new_postmap,
1365 WRITE);
1366 if (ret)
1367 goto out_lane;
1368 }
1369
1370 lock_map(arena, premap);
1371 ret = btt_map_read(arena, premap, &old_postmap, NULL, &e_flag,
1372 NVDIMM_IO_ATOMIC);
1373 if (ret)
1374 goto out_map;
1375 if (old_postmap >= arena->internal_nlba) {
1376 ret = -EIO;
1377 goto out_map;
1378 }
1379 if (e_flag)
1380 set_e_flag(old_postmap);
1381
1382 log.lba = cpu_to_le32(premap);
1383 log.old_map = cpu_to_le32(old_postmap);
1384 log.new_map = cpu_to_le32(new_postmap);
1385 log.seq = cpu_to_le32(arena->freelist[lane].seq);
1386 sub = arena->freelist[lane].sub;
1387 ret = btt_flog_write(arena, lane, sub, &log);
1388 if (ret)
1389 goto out_map;
1390
1391 ret = btt_map_write(arena, premap, new_postmap, 0, 0,
1392 NVDIMM_IO_ATOMIC);
1393 if (ret)
1394 goto out_map;
1395
1396 unlock_map(arena, premap);
1397 nd_region_release_lane(btt->nd_region, lane);
1398
1399 if (e_flag) {
1400 ret = arena_clear_freelist_error(arena, lane);
1401 if (ret)
1402 return ret;
1403 }
1404
1405 len -= cur_len;
1406 off += cur_len;
1407 sector += btt->sector_size >> SECTOR_SHIFT;
1408 }
1409
1410 return 0;
1411
1412 out_map:
1413 unlock_map(arena, premap);
1414 out_lane:
1415 nd_region_release_lane(btt->nd_region, lane);
1416 return ret;
1417}
1418
1419static int btt_do_bvec(struct btt *btt, struct bio_integrity_payload *bip,
1420 struct page *page, unsigned int len, unsigned int off,
1421 enum req_op op, sector_t sector)
1422{
1423 int ret;
1424
1425 if (!op_is_write(op)) {
1426 ret = btt_read_pg(btt, bip, page, off, sector, len);
1427 flush_dcache_page(page);
1428 } else {
1429 flush_dcache_page(page);
1430 ret = btt_write_pg(btt, bip, sector, page, off, len);
1431 }
1432
1433 return ret;
1434}
1435
1436static void btt_submit_bio(struct bio *bio)
1437{
1438 struct bio_integrity_payload *bip = bio_integrity(bio);
1439 struct btt *btt = bio->bi_bdev->bd_disk->private_data;
1440 struct bvec_iter iter;
1441 unsigned long start;
1442 struct bio_vec bvec;
1443 int err = 0;
1444 bool do_acct;
1445
1446 if (!bio_integrity_prep(bio))
1447 return;
1448
1449 do_acct = blk_queue_io_stat(bio->bi_bdev->bd_disk->queue);
1450 if (do_acct)
1451 start = bio_start_io_acct(bio);
1452 bio_for_each_segment(bvec, bio, iter) {
1453 unsigned int len = bvec.bv_len;
1454
1455 if (len > PAGE_SIZE || len < btt->sector_size ||
1456 len % btt->sector_size) {
1457 dev_err_ratelimited(&btt->nd_btt->dev,
1458 "unaligned bio segment (len: %d)\n", len);
1459 bio->bi_status = BLK_STS_IOERR;
1460 break;
1461 }
1462
1463 err = btt_do_bvec(btt, bip, bvec.bv_page, len, bvec.bv_offset,
1464 bio_op(bio), iter.bi_sector);
1465 if (err) {
1466 dev_err(&btt->nd_btt->dev,
1467 "io error in %s sector %lld, len %d,\n",
1468 (op_is_write(bio_op(bio))) ? "WRITE" :
1469 "READ",
1470 (unsigned long long) iter.bi_sector, len);
1471 bio->bi_status = errno_to_blk_status(err);
1472 break;
1473 }
1474 }
1475 if (do_acct)
1476 bio_end_io_acct(bio, start);
1477
1478 bio_endio(bio);
1479}
1480
1481static int btt_getgeo(struct block_device *bd, struct hd_geometry *geo)
1482{
1483 /* some standard values */
1484 geo->heads = 1 << 6;
1485 geo->sectors = 1 << 5;
1486 geo->cylinders = get_capacity(bd->bd_disk) >> 11;
1487 return 0;
1488}
1489
1490static const struct block_device_operations btt_fops = {
1491 .owner = THIS_MODULE,
1492 .submit_bio = btt_submit_bio,
1493 .getgeo = btt_getgeo,
1494};
1495
1496static int btt_blk_init(struct btt *btt)
1497{
1498 struct nd_btt *nd_btt = btt->nd_btt;
1499 struct nd_namespace_common *ndns = nd_btt->ndns;
1500 struct queue_limits lim = {
1501 .logical_block_size = btt->sector_size,
1502 .max_hw_sectors = UINT_MAX,
1503 .max_integrity_segments = 1,
1504 .features = BLK_FEAT_SYNCHRONOUS,
1505 };
1506 int rc;
1507
1508 if (btt_meta_size(btt) && IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY)) {
1509 lim.integrity.tuple_size = btt_meta_size(btt);
1510 lim.integrity.tag_size = btt_meta_size(btt);
1511 }
1512
1513 btt->btt_disk = blk_alloc_disk(&lim, NUMA_NO_NODE);
1514 if (IS_ERR(btt->btt_disk))
1515 return PTR_ERR(btt->btt_disk);
1516
1517 nvdimm_namespace_disk_name(ndns, btt->btt_disk->disk_name);
1518 btt->btt_disk->first_minor = 0;
1519 btt->btt_disk->fops = &btt_fops;
1520 btt->btt_disk->private_data = btt;
1521
1522 set_capacity(btt->btt_disk, btt->nlba * btt->sector_size >> 9);
1523 rc = device_add_disk(&btt->nd_btt->dev, btt->btt_disk, NULL);
1524 if (rc)
1525 goto out_cleanup_disk;
1526
1527 btt->nd_btt->size = btt->nlba * (u64)btt->sector_size;
1528 nvdimm_check_and_set_ro(btt->btt_disk);
1529
1530 return 0;
1531
1532out_cleanup_disk:
1533 put_disk(btt->btt_disk);
1534 return rc;
1535}
1536
1537static void btt_blk_cleanup(struct btt *btt)
1538{
1539 del_gendisk(btt->btt_disk);
1540 put_disk(btt->btt_disk);
1541}
1542
1543/**
1544 * btt_init - initialize a block translation table for the given device
1545 * @nd_btt: device with BTT geometry and backing device info
1546 * @rawsize: raw size in bytes of the backing device
1547 * @lbasize: lba size of the backing device
1548 * @uuid: A uuid for the backing device - this is stored on media
1549 * @nd_region: &struct nd_region for the REGION device
1550 *
1551 * Initialize a Block Translation Table on a backing device to provide
1552 * single sector power fail atomicity.
1553 *
1554 * Context:
1555 * Might sleep.
1556 *
1557 * Returns:
1558 * Pointer to a new struct btt on success, NULL on failure.
1559 */
1560static struct btt *btt_init(struct nd_btt *nd_btt, unsigned long long rawsize,
1561 u32 lbasize, uuid_t *uuid,
1562 struct nd_region *nd_region)
1563{
1564 int ret;
1565 struct btt *btt;
1566 struct nd_namespace_io *nsio;
1567 struct device *dev = &nd_btt->dev;
1568
1569 btt = devm_kzalloc(dev, sizeof(struct btt), GFP_KERNEL);
1570 if (!btt)
1571 return NULL;
1572
1573 btt->nd_btt = nd_btt;
1574 btt->rawsize = rawsize;
1575 btt->lbasize = lbasize;
1576 btt->sector_size = ((lbasize >= 4096) ? 4096 : 512);
1577 INIT_LIST_HEAD(&btt->arena_list);
1578 mutex_init(&btt->init_lock);
1579 btt->nd_region = nd_region;
1580 nsio = to_nd_namespace_io(&nd_btt->ndns->dev);
1581 btt->phys_bb = &nsio->bb;
1582
1583 ret = discover_arenas(btt);
1584 if (ret) {
1585 dev_err(dev, "init: error in arena_discover: %d\n", ret);
1586 return NULL;
1587 }
1588
1589 if (btt->init_state != INIT_READY && nd_region->ro) {
1590 dev_warn(dev, "%s is read-only, unable to init btt metadata\n",
1591 dev_name(&nd_region->dev));
1592 return NULL;
1593 } else if (btt->init_state != INIT_READY) {
1594 btt->num_arenas = (rawsize / ARENA_MAX_SIZE) +
1595 ((rawsize % ARENA_MAX_SIZE) ? 1 : 0);
1596 dev_dbg(dev, "init: %d arenas for %llu rawsize\n",
1597 btt->num_arenas, rawsize);
1598
1599 ret = create_arenas(btt);
1600 if (ret) {
1601 dev_info(dev, "init: create_arenas: %d\n", ret);
1602 return NULL;
1603 }
1604
1605 ret = btt_meta_init(btt);
1606 if (ret) {
1607 dev_err(dev, "init: error in meta_init: %d\n", ret);
1608 return NULL;
1609 }
1610 }
1611
1612 ret = btt_blk_init(btt);
1613 if (ret) {
1614 dev_err(dev, "init: error in blk_init: %d\n", ret);
1615 return NULL;
1616 }
1617
1618 btt_debugfs_init(btt);
1619
1620 return btt;
1621}
1622
1623/**
1624 * btt_fini - de-initialize a BTT
1625 * @btt: the BTT handle that was generated by btt_init
1626 *
1627 * De-initialize a Block Translation Table on device removal
1628 *
1629 * Context:
1630 * Might sleep.
1631 */
1632static void btt_fini(struct btt *btt)
1633{
1634 if (btt) {
1635 btt_blk_cleanup(btt);
1636 free_arenas(btt);
1637 debugfs_remove_recursive(btt->debugfs_dir);
1638 }
1639}
1640
1641int nvdimm_namespace_attach_btt(struct nd_namespace_common *ndns)
1642{
1643 struct nd_btt *nd_btt = to_nd_btt(ndns->claim);
1644 struct nd_region *nd_region;
1645 struct btt_sb *btt_sb;
1646 struct btt *btt;
1647 size_t size, rawsize;
1648 int rc;
1649
1650 if (!nd_btt->uuid || !nd_btt->ndns || !nd_btt->lbasize) {
1651 dev_dbg(&nd_btt->dev, "incomplete btt configuration\n");
1652 return -ENODEV;
1653 }
1654
1655 btt_sb = devm_kzalloc(&nd_btt->dev, sizeof(*btt_sb), GFP_KERNEL);
1656 if (!btt_sb)
1657 return -ENOMEM;
1658
1659 size = nvdimm_namespace_capacity(ndns);
1660 rc = devm_namespace_enable(&nd_btt->dev, ndns, size);
1661 if (rc)
1662 return rc;
1663
1664 /*
1665 * If this returns < 0, that is ok as it just means there wasn't
1666 * an existing BTT, and we're creating a new one. We still need to
1667 * call this as we need the version dependent fields in nd_btt to be
1668 * set correctly based on the holder class
1669 */
1670 nd_btt_version(nd_btt, ndns, btt_sb);
1671
1672 rawsize = size - nd_btt->initial_offset;
1673 if (rawsize < ARENA_MIN_SIZE) {
1674 dev_dbg(&nd_btt->dev, "%s must be at least %ld bytes\n",
1675 dev_name(&ndns->dev),
1676 ARENA_MIN_SIZE + nd_btt->initial_offset);
1677 return -ENXIO;
1678 }
1679 nd_region = to_nd_region(nd_btt->dev.parent);
1680 btt = btt_init(nd_btt, rawsize, nd_btt->lbasize, nd_btt->uuid,
1681 nd_region);
1682 if (!btt)
1683 return -ENOMEM;
1684 nd_btt->btt = btt;
1685
1686 return 0;
1687}
1688EXPORT_SYMBOL(nvdimm_namespace_attach_btt);
1689
1690int nvdimm_namespace_detach_btt(struct nd_btt *nd_btt)
1691{
1692 struct btt *btt = nd_btt->btt;
1693
1694 btt_fini(btt);
1695 nd_btt->btt = NULL;
1696
1697 return 0;
1698}
1699EXPORT_SYMBOL(nvdimm_namespace_detach_btt);
1700
1701static int __init nd_btt_init(void)
1702{
1703 int rc = 0;
1704
1705 debugfs_root = debugfs_create_dir("btt", NULL);
1706 if (IS_ERR_OR_NULL(debugfs_root))
1707 rc = -ENXIO;
1708
1709 return rc;
1710}
1711
1712static void __exit nd_btt_exit(void)
1713{
1714 debugfs_remove_recursive(debugfs_root);
1715}
1716
1717MODULE_ALIAS_ND_DEVICE(ND_DEVICE_BTT);
1718MODULE_AUTHOR("Vishal Verma <vishal.l.verma@linux.intel.com>");
1719MODULE_DESCRIPTION("NVDIMM Block Translation Table");
1720MODULE_LICENSE("GPL v2");
1721module_init(nd_btt_init);
1722module_exit(nd_btt_exit);
1/*
2 * Block Translation Table
3 * Copyright (c) 2014-2015, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 */
14#include <linux/highmem.h>
15#include <linux/debugfs.h>
16#include <linux/blkdev.h>
17#include <linux/module.h>
18#include <linux/device.h>
19#include <linux/mutex.h>
20#include <linux/hdreg.h>
21#include <linux/genhd.h>
22#include <linux/sizes.h>
23#include <linux/ndctl.h>
24#include <linux/fs.h>
25#include <linux/nd.h>
26#include <linux/backing-dev.h>
27#include "btt.h"
28#include "nd.h"
29
30enum log_ent_request {
31 LOG_NEW_ENT = 0,
32 LOG_OLD_ENT
33};
34
35static struct device *to_dev(struct arena_info *arena)
36{
37 return &arena->nd_btt->dev;
38}
39
40static u64 adjust_initial_offset(struct nd_btt *nd_btt, u64 offset)
41{
42 return offset + nd_btt->initial_offset;
43}
44
45static int arena_read_bytes(struct arena_info *arena, resource_size_t offset,
46 void *buf, size_t n, unsigned long flags)
47{
48 struct nd_btt *nd_btt = arena->nd_btt;
49 struct nd_namespace_common *ndns = nd_btt->ndns;
50
51 /* arena offsets may be shifted from the base of the device */
52 offset = adjust_initial_offset(nd_btt, offset);
53 return nvdimm_read_bytes(ndns, offset, buf, n, flags);
54}
55
56static int arena_write_bytes(struct arena_info *arena, resource_size_t offset,
57 void *buf, size_t n, unsigned long flags)
58{
59 struct nd_btt *nd_btt = arena->nd_btt;
60 struct nd_namespace_common *ndns = nd_btt->ndns;
61
62 /* arena offsets may be shifted from the base of the device */
63 offset = adjust_initial_offset(nd_btt, offset);
64 return nvdimm_write_bytes(ndns, offset, buf, n, flags);
65}
66
67static int btt_info_write(struct arena_info *arena, struct btt_sb *super)
68{
69 int ret;
70
71 /*
72 * infooff and info2off should always be at least 512B aligned.
73 * We rely on that to make sure rw_bytes does error clearing
74 * correctly, so make sure that is the case.
75 */
76 dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->infooff, 512),
77 "arena->infooff: %#llx is unaligned\n", arena->infooff);
78 dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->info2off, 512),
79 "arena->info2off: %#llx is unaligned\n", arena->info2off);
80
81 ret = arena_write_bytes(arena, arena->info2off, super,
82 sizeof(struct btt_sb), 0);
83 if (ret)
84 return ret;
85
86 return arena_write_bytes(arena, arena->infooff, super,
87 sizeof(struct btt_sb), 0);
88}
89
90static int btt_info_read(struct arena_info *arena, struct btt_sb *super)
91{
92 return arena_read_bytes(arena, arena->infooff, super,
93 sizeof(struct btt_sb), 0);
94}
95
96/*
97 * 'raw' version of btt_map write
98 * Assumptions:
99 * mapping is in little-endian
100 * mapping contains 'E' and 'Z' flags as desired
101 */
102static int __btt_map_write(struct arena_info *arena, u32 lba, __le32 mapping,
103 unsigned long flags)
104{
105 u64 ns_off = arena->mapoff + (lba * MAP_ENT_SIZE);
106
107 if (unlikely(lba >= arena->external_nlba))
108 dev_err_ratelimited(to_dev(arena),
109 "%s: lba %#x out of range (max: %#x)\n",
110 __func__, lba, arena->external_nlba);
111 return arena_write_bytes(arena, ns_off, &mapping, MAP_ENT_SIZE, flags);
112}
113
114static int btt_map_write(struct arena_info *arena, u32 lba, u32 mapping,
115 u32 z_flag, u32 e_flag, unsigned long rwb_flags)
116{
117 u32 ze;
118 __le32 mapping_le;
119
120 /*
121 * This 'mapping' is supposed to be just the LBA mapping, without
122 * any flags set, so strip the flag bits.
123 */
124 mapping = ent_lba(mapping);
125
126 ze = (z_flag << 1) + e_flag;
127 switch (ze) {
128 case 0:
129 /*
130 * We want to set neither of the Z or E flags, and
131 * in the actual layout, this means setting the bit
132 * positions of both to '1' to indicate a 'normal'
133 * map entry
134 */
135 mapping |= MAP_ENT_NORMAL;
136 break;
137 case 1:
138 mapping |= (1 << MAP_ERR_SHIFT);
139 break;
140 case 2:
141 mapping |= (1 << MAP_TRIM_SHIFT);
142 break;
143 default:
144 /*
145 * The case where Z and E are both sent in as '1' could be
146 * construed as a valid 'normal' case, but we decide not to,
147 * to avoid confusion
148 */
149 dev_err_ratelimited(to_dev(arena),
150 "Invalid use of Z and E flags\n");
151 return -EIO;
152 }
153
154 mapping_le = cpu_to_le32(mapping);
155 return __btt_map_write(arena, lba, mapping_le, rwb_flags);
156}
157
158static int btt_map_read(struct arena_info *arena, u32 lba, u32 *mapping,
159 int *trim, int *error, unsigned long rwb_flags)
160{
161 int ret;
162 __le32 in;
163 u32 raw_mapping, postmap, ze, z_flag, e_flag;
164 u64 ns_off = arena->mapoff + (lba * MAP_ENT_SIZE);
165
166 if (unlikely(lba >= arena->external_nlba))
167 dev_err_ratelimited(to_dev(arena),
168 "%s: lba %#x out of range (max: %#x)\n",
169 __func__, lba, arena->external_nlba);
170
171 ret = arena_read_bytes(arena, ns_off, &in, MAP_ENT_SIZE, rwb_flags);
172 if (ret)
173 return ret;
174
175 raw_mapping = le32_to_cpu(in);
176
177 z_flag = ent_z_flag(raw_mapping);
178 e_flag = ent_e_flag(raw_mapping);
179 ze = (z_flag << 1) + e_flag;
180 postmap = ent_lba(raw_mapping);
181
182 /* Reuse the {z,e}_flag variables for *trim and *error */
183 z_flag = 0;
184 e_flag = 0;
185
186 switch (ze) {
187 case 0:
188 /* Initial state. Return postmap = premap */
189 *mapping = lba;
190 break;
191 case 1:
192 *mapping = postmap;
193 e_flag = 1;
194 break;
195 case 2:
196 *mapping = postmap;
197 z_flag = 1;
198 break;
199 case 3:
200 *mapping = postmap;
201 break;
202 default:
203 return -EIO;
204 }
205
206 if (trim)
207 *trim = z_flag;
208 if (error)
209 *error = e_flag;
210
211 return ret;
212}
213
214static int btt_log_group_read(struct arena_info *arena, u32 lane,
215 struct log_group *log)
216{
217 return arena_read_bytes(arena,
218 arena->logoff + (lane * LOG_GRP_SIZE), log,
219 LOG_GRP_SIZE, 0);
220}
221
222static struct dentry *debugfs_root;
223
224static void arena_debugfs_init(struct arena_info *a, struct dentry *parent,
225 int idx)
226{
227 char dirname[32];
228 struct dentry *d;
229
230 /* If for some reason, parent bttN was not created, exit */
231 if (!parent)
232 return;
233
234 snprintf(dirname, 32, "arena%d", idx);
235 d = debugfs_create_dir(dirname, parent);
236 if (IS_ERR_OR_NULL(d))
237 return;
238 a->debugfs_dir = d;
239
240 debugfs_create_x64("size", S_IRUGO, d, &a->size);
241 debugfs_create_x64("external_lba_start", S_IRUGO, d,
242 &a->external_lba_start);
243 debugfs_create_x32("internal_nlba", S_IRUGO, d, &a->internal_nlba);
244 debugfs_create_u32("internal_lbasize", S_IRUGO, d,
245 &a->internal_lbasize);
246 debugfs_create_x32("external_nlba", S_IRUGO, d, &a->external_nlba);
247 debugfs_create_u32("external_lbasize", S_IRUGO, d,
248 &a->external_lbasize);
249 debugfs_create_u32("nfree", S_IRUGO, d, &a->nfree);
250 debugfs_create_u16("version_major", S_IRUGO, d, &a->version_major);
251 debugfs_create_u16("version_minor", S_IRUGO, d, &a->version_minor);
252 debugfs_create_x64("nextoff", S_IRUGO, d, &a->nextoff);
253 debugfs_create_x64("infooff", S_IRUGO, d, &a->infooff);
254 debugfs_create_x64("dataoff", S_IRUGO, d, &a->dataoff);
255 debugfs_create_x64("mapoff", S_IRUGO, d, &a->mapoff);
256 debugfs_create_x64("logoff", S_IRUGO, d, &a->logoff);
257 debugfs_create_x64("info2off", S_IRUGO, d, &a->info2off);
258 debugfs_create_x32("flags", S_IRUGO, d, &a->flags);
259 debugfs_create_u32("log_index_0", S_IRUGO, d, &a->log_index[0]);
260 debugfs_create_u32("log_index_1", S_IRUGO, d, &a->log_index[1]);
261}
262
263static void btt_debugfs_init(struct btt *btt)
264{
265 int i = 0;
266 struct arena_info *arena;
267
268 btt->debugfs_dir = debugfs_create_dir(dev_name(&btt->nd_btt->dev),
269 debugfs_root);
270 if (IS_ERR_OR_NULL(btt->debugfs_dir))
271 return;
272
273 list_for_each_entry(arena, &btt->arena_list, list) {
274 arena_debugfs_init(arena, btt->debugfs_dir, i);
275 i++;
276 }
277}
278
279static u32 log_seq(struct log_group *log, int log_idx)
280{
281 return le32_to_cpu(log->ent[log_idx].seq);
282}
283
284/*
285 * This function accepts two log entries, and uses the
286 * sequence number to find the 'older' entry.
287 * It also updates the sequence number in this old entry to
288 * make it the 'new' one if the mark_flag is set.
289 * Finally, it returns which of the entries was the older one.
290 *
291 * TODO The logic feels a bit kludge-y. make it better..
292 */
293static int btt_log_get_old(struct arena_info *a, struct log_group *log)
294{
295 int idx0 = a->log_index[0];
296 int idx1 = a->log_index[1];
297 int old;
298
299 /*
300 * the first ever time this is seen, the entry goes into [0]
301 * the next time, the following logic works out to put this
302 * (next) entry into [1]
303 */
304 if (log_seq(log, idx0) == 0) {
305 log->ent[idx0].seq = cpu_to_le32(1);
306 return 0;
307 }
308
309 if (log_seq(log, idx0) == log_seq(log, idx1))
310 return -EINVAL;
311 if (log_seq(log, idx0) + log_seq(log, idx1) > 5)
312 return -EINVAL;
313
314 if (log_seq(log, idx0) < log_seq(log, idx1)) {
315 if ((log_seq(log, idx1) - log_seq(log, idx0)) == 1)
316 old = 0;
317 else
318 old = 1;
319 } else {
320 if ((log_seq(log, idx0) - log_seq(log, idx1)) == 1)
321 old = 1;
322 else
323 old = 0;
324 }
325
326 return old;
327}
328
329/*
330 * This function copies the desired (old/new) log entry into ent if
331 * it is not NULL. It returns the sub-slot number (0 or 1)
332 * where the desired log entry was found. Negative return values
333 * indicate errors.
334 */
335static int btt_log_read(struct arena_info *arena, u32 lane,
336 struct log_entry *ent, int old_flag)
337{
338 int ret;
339 int old_ent, ret_ent;
340 struct log_group log;
341
342 ret = btt_log_group_read(arena, lane, &log);
343 if (ret)
344 return -EIO;
345
346 old_ent = btt_log_get_old(arena, &log);
347 if (old_ent < 0 || old_ent > 1) {
348 dev_err(to_dev(arena),
349 "log corruption (%d): lane %d seq [%d, %d]\n",
350 old_ent, lane, log.ent[arena->log_index[0]].seq,
351 log.ent[arena->log_index[1]].seq);
352 /* TODO set error state? */
353 return -EIO;
354 }
355
356 ret_ent = (old_flag ? old_ent : (1 - old_ent));
357
358 if (ent != NULL)
359 memcpy(ent, &log.ent[arena->log_index[ret_ent]], LOG_ENT_SIZE);
360
361 return ret_ent;
362}
363
364/*
365 * This function commits a log entry to media
366 * It does _not_ prepare the freelist entry for the next write
367 * btt_flog_write is the wrapper for updating the freelist elements
368 */
369static int __btt_log_write(struct arena_info *arena, u32 lane,
370 u32 sub, struct log_entry *ent, unsigned long flags)
371{
372 int ret;
373 u32 group_slot = arena->log_index[sub];
374 unsigned int log_half = LOG_ENT_SIZE / 2;
375 void *src = ent;
376 u64 ns_off;
377
378 ns_off = arena->logoff + (lane * LOG_GRP_SIZE) +
379 (group_slot * LOG_ENT_SIZE);
380 /* split the 16B write into atomic, durable halves */
381 ret = arena_write_bytes(arena, ns_off, src, log_half, flags);
382 if (ret)
383 return ret;
384
385 ns_off += log_half;
386 src += log_half;
387 return arena_write_bytes(arena, ns_off, src, log_half, flags);
388}
389
390static int btt_flog_write(struct arena_info *arena, u32 lane, u32 sub,
391 struct log_entry *ent)
392{
393 int ret;
394
395 ret = __btt_log_write(arena, lane, sub, ent, NVDIMM_IO_ATOMIC);
396 if (ret)
397 return ret;
398
399 /* prepare the next free entry */
400 arena->freelist[lane].sub = 1 - arena->freelist[lane].sub;
401 if (++(arena->freelist[lane].seq) == 4)
402 arena->freelist[lane].seq = 1;
403 if (ent_e_flag(ent->old_map))
404 arena->freelist[lane].has_err = 1;
405 arena->freelist[lane].block = le32_to_cpu(ent_lba(ent->old_map));
406
407 return ret;
408}
409
410/*
411 * This function initializes the BTT map to the initial state, which is
412 * all-zeroes, and indicates an identity mapping
413 */
414static int btt_map_init(struct arena_info *arena)
415{
416 int ret = -EINVAL;
417 void *zerobuf;
418 size_t offset = 0;
419 size_t chunk_size = SZ_2M;
420 size_t mapsize = arena->logoff - arena->mapoff;
421
422 zerobuf = kzalloc(chunk_size, GFP_KERNEL);
423 if (!zerobuf)
424 return -ENOMEM;
425
426 /*
427 * mapoff should always be at least 512B aligned. We rely on that to
428 * make sure rw_bytes does error clearing correctly, so make sure that
429 * is the case.
430 */
431 dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->mapoff, 512),
432 "arena->mapoff: %#llx is unaligned\n", arena->mapoff);
433
434 while (mapsize) {
435 size_t size = min(mapsize, chunk_size);
436
437 dev_WARN_ONCE(to_dev(arena), size < 512,
438 "chunk size: %#zx is unaligned\n", size);
439 ret = arena_write_bytes(arena, arena->mapoff + offset, zerobuf,
440 size, 0);
441 if (ret)
442 goto free;
443
444 offset += size;
445 mapsize -= size;
446 cond_resched();
447 }
448
449 free:
450 kfree(zerobuf);
451 return ret;
452}
453
454/*
455 * This function initializes the BTT log with 'fake' entries pointing
456 * to the initial reserved set of blocks as being free
457 */
458static int btt_log_init(struct arena_info *arena)
459{
460 size_t logsize = arena->info2off - arena->logoff;
461 size_t chunk_size = SZ_4K, offset = 0;
462 struct log_entry ent;
463 void *zerobuf;
464 int ret;
465 u32 i;
466
467 zerobuf = kzalloc(chunk_size, GFP_KERNEL);
468 if (!zerobuf)
469 return -ENOMEM;
470 /*
471 * logoff should always be at least 512B aligned. We rely on that to
472 * make sure rw_bytes does error clearing correctly, so make sure that
473 * is the case.
474 */
475 dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->logoff, 512),
476 "arena->logoff: %#llx is unaligned\n", arena->logoff);
477
478 while (logsize) {
479 size_t size = min(logsize, chunk_size);
480
481 dev_WARN_ONCE(to_dev(arena), size < 512,
482 "chunk size: %#zx is unaligned\n", size);
483 ret = arena_write_bytes(arena, arena->logoff + offset, zerobuf,
484 size, 0);
485 if (ret)
486 goto free;
487
488 offset += size;
489 logsize -= size;
490 cond_resched();
491 }
492
493 for (i = 0; i < arena->nfree; i++) {
494 ent.lba = cpu_to_le32(i);
495 ent.old_map = cpu_to_le32(arena->external_nlba + i);
496 ent.new_map = cpu_to_le32(arena->external_nlba + i);
497 ent.seq = cpu_to_le32(LOG_SEQ_INIT);
498 ret = __btt_log_write(arena, i, 0, &ent, 0);
499 if (ret)
500 goto free;
501 }
502
503 free:
504 kfree(zerobuf);
505 return ret;
506}
507
508static u64 to_namespace_offset(struct arena_info *arena, u64 lba)
509{
510 return arena->dataoff + ((u64)lba * arena->internal_lbasize);
511}
512
513static int arena_clear_freelist_error(struct arena_info *arena, u32 lane)
514{
515 int ret = 0;
516
517 if (arena->freelist[lane].has_err) {
518 void *zero_page = page_address(ZERO_PAGE(0));
519 u32 lba = arena->freelist[lane].block;
520 u64 nsoff = to_namespace_offset(arena, lba);
521 unsigned long len = arena->sector_size;
522
523 mutex_lock(&arena->err_lock);
524
525 while (len) {
526 unsigned long chunk = min(len, PAGE_SIZE);
527
528 ret = arena_write_bytes(arena, nsoff, zero_page,
529 chunk, 0);
530 if (ret)
531 break;
532 len -= chunk;
533 nsoff += chunk;
534 if (len == 0)
535 arena->freelist[lane].has_err = 0;
536 }
537 mutex_unlock(&arena->err_lock);
538 }
539 return ret;
540}
541
542static int btt_freelist_init(struct arena_info *arena)
543{
544 int old, new, ret;
545 u32 i, map_entry;
546 struct log_entry log_new, log_old;
547
548 arena->freelist = kcalloc(arena->nfree, sizeof(struct free_entry),
549 GFP_KERNEL);
550 if (!arena->freelist)
551 return -ENOMEM;
552
553 for (i = 0; i < arena->nfree; i++) {
554 old = btt_log_read(arena, i, &log_old, LOG_OLD_ENT);
555 if (old < 0)
556 return old;
557
558 new = btt_log_read(arena, i, &log_new, LOG_NEW_ENT);
559 if (new < 0)
560 return new;
561
562 /* sub points to the next one to be overwritten */
563 arena->freelist[i].sub = 1 - new;
564 arena->freelist[i].seq = nd_inc_seq(le32_to_cpu(log_new.seq));
565 arena->freelist[i].block = le32_to_cpu(log_new.old_map);
566
567 /*
568 * FIXME: if error clearing fails during init, we want to make
569 * the BTT read-only
570 */
571 if (ent_e_flag(log_new.old_map)) {
572 ret = arena_clear_freelist_error(arena, i);
573 if (ret)
574 dev_err_ratelimited(to_dev(arena),
575 "Unable to clear known errors\n");
576 }
577
578 /* This implies a newly created or untouched flog entry */
579 if (log_new.old_map == log_new.new_map)
580 continue;
581
582 /* Check if map recovery is needed */
583 ret = btt_map_read(arena, le32_to_cpu(log_new.lba), &map_entry,
584 NULL, NULL, 0);
585 if (ret)
586 return ret;
587 if ((le32_to_cpu(log_new.new_map) != map_entry) &&
588 (le32_to_cpu(log_new.old_map) == map_entry)) {
589 /*
590 * Last transaction wrote the flog, but wasn't able
591 * to complete the map write. So fix up the map.
592 */
593 ret = btt_map_write(arena, le32_to_cpu(log_new.lba),
594 le32_to_cpu(log_new.new_map), 0, 0, 0);
595 if (ret)
596 return ret;
597 }
598 }
599
600 return 0;
601}
602
603static bool ent_is_padding(struct log_entry *ent)
604{
605 return (ent->lba == 0) && (ent->old_map == 0) && (ent->new_map == 0)
606 && (ent->seq == 0);
607}
608
609/*
610 * Detecting valid log indices: We read a log group (see the comments in btt.h
611 * for a description of a 'log_group' and its 'slots'), and iterate over its
612 * four slots. We expect that a padding slot will be all-zeroes, and use this
613 * to detect a padding slot vs. an actual entry.
614 *
615 * If a log_group is in the initial state, i.e. hasn't been used since the
616 * creation of this BTT layout, it will have three of the four slots with
617 * zeroes. We skip over these log_groups for the detection of log_index. If
618 * all log_groups are in the initial state (i.e. the BTT has never been
619 * written to), it is safe to assume the 'new format' of log entries in slots
620 * (0, 1).
621 */
622static int log_set_indices(struct arena_info *arena)
623{
624 bool idx_set = false, initial_state = true;
625 int ret, log_index[2] = {-1, -1};
626 u32 i, j, next_idx = 0;
627 struct log_group log;
628 u32 pad_count = 0;
629
630 for (i = 0; i < arena->nfree; i++) {
631 ret = btt_log_group_read(arena, i, &log);
632 if (ret < 0)
633 return ret;
634
635 for (j = 0; j < 4; j++) {
636 if (!idx_set) {
637 if (ent_is_padding(&log.ent[j])) {
638 pad_count++;
639 continue;
640 } else {
641 /* Skip if index has been recorded */
642 if ((next_idx == 1) &&
643 (j == log_index[0]))
644 continue;
645 /* valid entry, record index */
646 log_index[next_idx] = j;
647 next_idx++;
648 }
649 if (next_idx == 2) {
650 /* two valid entries found */
651 idx_set = true;
652 } else if (next_idx > 2) {
653 /* too many valid indices */
654 return -ENXIO;
655 }
656 } else {
657 /*
658 * once the indices have been set, just verify
659 * that all subsequent log groups are either in
660 * their initial state or follow the same
661 * indices.
662 */
663 if (j == log_index[0]) {
664 /* entry must be 'valid' */
665 if (ent_is_padding(&log.ent[j]))
666 return -ENXIO;
667 } else if (j == log_index[1]) {
668 ;
669 /*
670 * log_index[1] can be padding if the
671 * lane never got used and it is still
672 * in the initial state (three 'padding'
673 * entries)
674 */
675 } else {
676 /* entry must be invalid (padding) */
677 if (!ent_is_padding(&log.ent[j]))
678 return -ENXIO;
679 }
680 }
681 }
682 /*
683 * If any of the log_groups have more than one valid,
684 * non-padding entry, then the we are no longer in the
685 * initial_state
686 */
687 if (pad_count < 3)
688 initial_state = false;
689 pad_count = 0;
690 }
691
692 if (!initial_state && !idx_set)
693 return -ENXIO;
694
695 /*
696 * If all the entries in the log were in the initial state,
697 * assume new padding scheme
698 */
699 if (initial_state)
700 log_index[1] = 1;
701
702 /*
703 * Only allow the known permutations of log/padding indices,
704 * i.e. (0, 1), and (0, 2)
705 */
706 if ((log_index[0] == 0) && ((log_index[1] == 1) || (log_index[1] == 2)))
707 ; /* known index possibilities */
708 else {
709 dev_err(to_dev(arena), "Found an unknown padding scheme\n");
710 return -ENXIO;
711 }
712
713 arena->log_index[0] = log_index[0];
714 arena->log_index[1] = log_index[1];
715 dev_dbg(to_dev(arena), "log_index_0 = %d\n", log_index[0]);
716 dev_dbg(to_dev(arena), "log_index_1 = %d\n", log_index[1]);
717 return 0;
718}
719
720static int btt_rtt_init(struct arena_info *arena)
721{
722 arena->rtt = kcalloc(arena->nfree, sizeof(u32), GFP_KERNEL);
723 if (arena->rtt == NULL)
724 return -ENOMEM;
725
726 return 0;
727}
728
729static int btt_maplocks_init(struct arena_info *arena)
730{
731 u32 i;
732
733 arena->map_locks = kcalloc(arena->nfree, sizeof(struct aligned_lock),
734 GFP_KERNEL);
735 if (!arena->map_locks)
736 return -ENOMEM;
737
738 for (i = 0; i < arena->nfree; i++)
739 spin_lock_init(&arena->map_locks[i].lock);
740
741 return 0;
742}
743
744static struct arena_info *alloc_arena(struct btt *btt, size_t size,
745 size_t start, size_t arena_off)
746{
747 struct arena_info *arena;
748 u64 logsize, mapsize, datasize;
749 u64 available = size;
750
751 arena = kzalloc(sizeof(struct arena_info), GFP_KERNEL);
752 if (!arena)
753 return NULL;
754 arena->nd_btt = btt->nd_btt;
755 arena->sector_size = btt->sector_size;
756 mutex_init(&arena->err_lock);
757
758 if (!size)
759 return arena;
760
761 arena->size = size;
762 arena->external_lba_start = start;
763 arena->external_lbasize = btt->lbasize;
764 arena->internal_lbasize = roundup(arena->external_lbasize,
765 INT_LBASIZE_ALIGNMENT);
766 arena->nfree = BTT_DEFAULT_NFREE;
767 arena->version_major = btt->nd_btt->version_major;
768 arena->version_minor = btt->nd_btt->version_minor;
769
770 if (available % BTT_PG_SIZE)
771 available -= (available % BTT_PG_SIZE);
772
773 /* Two pages are reserved for the super block and its copy */
774 available -= 2 * BTT_PG_SIZE;
775
776 /* The log takes a fixed amount of space based on nfree */
777 logsize = roundup(arena->nfree * LOG_GRP_SIZE, BTT_PG_SIZE);
778 available -= logsize;
779
780 /* Calculate optimal split between map and data area */
781 arena->internal_nlba = div_u64(available - BTT_PG_SIZE,
782 arena->internal_lbasize + MAP_ENT_SIZE);
783 arena->external_nlba = arena->internal_nlba - arena->nfree;
784
785 mapsize = roundup((arena->external_nlba * MAP_ENT_SIZE), BTT_PG_SIZE);
786 datasize = available - mapsize;
787
788 /* 'Absolute' values, relative to start of storage space */
789 arena->infooff = arena_off;
790 arena->dataoff = arena->infooff + BTT_PG_SIZE;
791 arena->mapoff = arena->dataoff + datasize;
792 arena->logoff = arena->mapoff + mapsize;
793 arena->info2off = arena->logoff + logsize;
794
795 /* Default log indices are (0,1) */
796 arena->log_index[0] = 0;
797 arena->log_index[1] = 1;
798 return arena;
799}
800
801static void free_arenas(struct btt *btt)
802{
803 struct arena_info *arena, *next;
804
805 list_for_each_entry_safe(arena, next, &btt->arena_list, list) {
806 list_del(&arena->list);
807 kfree(arena->rtt);
808 kfree(arena->map_locks);
809 kfree(arena->freelist);
810 debugfs_remove_recursive(arena->debugfs_dir);
811 kfree(arena);
812 }
813}
814
815/*
816 * This function reads an existing valid btt superblock and
817 * populates the corresponding arena_info struct
818 */
819static void parse_arena_meta(struct arena_info *arena, struct btt_sb *super,
820 u64 arena_off)
821{
822 arena->internal_nlba = le32_to_cpu(super->internal_nlba);
823 arena->internal_lbasize = le32_to_cpu(super->internal_lbasize);
824 arena->external_nlba = le32_to_cpu(super->external_nlba);
825 arena->external_lbasize = le32_to_cpu(super->external_lbasize);
826 arena->nfree = le32_to_cpu(super->nfree);
827 arena->version_major = le16_to_cpu(super->version_major);
828 arena->version_minor = le16_to_cpu(super->version_minor);
829
830 arena->nextoff = (super->nextoff == 0) ? 0 : (arena_off +
831 le64_to_cpu(super->nextoff));
832 arena->infooff = arena_off;
833 arena->dataoff = arena_off + le64_to_cpu(super->dataoff);
834 arena->mapoff = arena_off + le64_to_cpu(super->mapoff);
835 arena->logoff = arena_off + le64_to_cpu(super->logoff);
836 arena->info2off = arena_off + le64_to_cpu(super->info2off);
837
838 arena->size = (le64_to_cpu(super->nextoff) > 0)
839 ? (le64_to_cpu(super->nextoff))
840 : (arena->info2off - arena->infooff + BTT_PG_SIZE);
841
842 arena->flags = le32_to_cpu(super->flags);
843}
844
845static int discover_arenas(struct btt *btt)
846{
847 int ret = 0;
848 struct arena_info *arena;
849 struct btt_sb *super;
850 size_t remaining = btt->rawsize;
851 u64 cur_nlba = 0;
852 size_t cur_off = 0;
853 int num_arenas = 0;
854
855 super = kzalloc(sizeof(*super), GFP_KERNEL);
856 if (!super)
857 return -ENOMEM;
858
859 while (remaining) {
860 /* Alloc memory for arena */
861 arena = alloc_arena(btt, 0, 0, 0);
862 if (!arena) {
863 ret = -ENOMEM;
864 goto out_super;
865 }
866
867 arena->infooff = cur_off;
868 ret = btt_info_read(arena, super);
869 if (ret)
870 goto out;
871
872 if (!nd_btt_arena_is_valid(btt->nd_btt, super)) {
873 if (remaining == btt->rawsize) {
874 btt->init_state = INIT_NOTFOUND;
875 dev_info(to_dev(arena), "No existing arenas\n");
876 goto out;
877 } else {
878 dev_err(to_dev(arena),
879 "Found corrupted metadata!\n");
880 ret = -ENODEV;
881 goto out;
882 }
883 }
884
885 arena->external_lba_start = cur_nlba;
886 parse_arena_meta(arena, super, cur_off);
887
888 ret = log_set_indices(arena);
889 if (ret) {
890 dev_err(to_dev(arena),
891 "Unable to deduce log/padding indices\n");
892 goto out;
893 }
894
895 ret = btt_freelist_init(arena);
896 if (ret)
897 goto out;
898
899 ret = btt_rtt_init(arena);
900 if (ret)
901 goto out;
902
903 ret = btt_maplocks_init(arena);
904 if (ret)
905 goto out;
906
907 list_add_tail(&arena->list, &btt->arena_list);
908
909 remaining -= arena->size;
910 cur_off += arena->size;
911 cur_nlba += arena->external_nlba;
912 num_arenas++;
913
914 if (arena->nextoff == 0)
915 break;
916 }
917 btt->num_arenas = num_arenas;
918 btt->nlba = cur_nlba;
919 btt->init_state = INIT_READY;
920
921 kfree(super);
922 return ret;
923
924 out:
925 kfree(arena);
926 free_arenas(btt);
927 out_super:
928 kfree(super);
929 return ret;
930}
931
932static int create_arenas(struct btt *btt)
933{
934 size_t remaining = btt->rawsize;
935 size_t cur_off = 0;
936
937 while (remaining) {
938 struct arena_info *arena;
939 size_t arena_size = min_t(u64, ARENA_MAX_SIZE, remaining);
940
941 remaining -= arena_size;
942 if (arena_size < ARENA_MIN_SIZE)
943 break;
944
945 arena = alloc_arena(btt, arena_size, btt->nlba, cur_off);
946 if (!arena) {
947 free_arenas(btt);
948 return -ENOMEM;
949 }
950 btt->nlba += arena->external_nlba;
951 if (remaining >= ARENA_MIN_SIZE)
952 arena->nextoff = arena->size;
953 else
954 arena->nextoff = 0;
955 cur_off += arena_size;
956 list_add_tail(&arena->list, &btt->arena_list);
957 }
958
959 return 0;
960}
961
962/*
963 * This function completes arena initialization by writing
964 * all the metadata.
965 * It is only called for an uninitialized arena when a write
966 * to that arena occurs for the first time.
967 */
968static int btt_arena_write_layout(struct arena_info *arena)
969{
970 int ret;
971 u64 sum;
972 struct btt_sb *super;
973 struct nd_btt *nd_btt = arena->nd_btt;
974 const u8 *parent_uuid = nd_dev_to_uuid(&nd_btt->ndns->dev);
975
976 ret = btt_map_init(arena);
977 if (ret)
978 return ret;
979
980 ret = btt_log_init(arena);
981 if (ret)
982 return ret;
983
984 super = kzalloc(sizeof(struct btt_sb), GFP_NOIO);
985 if (!super)
986 return -ENOMEM;
987
988 strncpy(super->signature, BTT_SIG, BTT_SIG_LEN);
989 memcpy(super->uuid, nd_btt->uuid, 16);
990 memcpy(super->parent_uuid, parent_uuid, 16);
991 super->flags = cpu_to_le32(arena->flags);
992 super->version_major = cpu_to_le16(arena->version_major);
993 super->version_minor = cpu_to_le16(arena->version_minor);
994 super->external_lbasize = cpu_to_le32(arena->external_lbasize);
995 super->external_nlba = cpu_to_le32(arena->external_nlba);
996 super->internal_lbasize = cpu_to_le32(arena->internal_lbasize);
997 super->internal_nlba = cpu_to_le32(arena->internal_nlba);
998 super->nfree = cpu_to_le32(arena->nfree);
999 super->infosize = cpu_to_le32(sizeof(struct btt_sb));
1000 super->nextoff = cpu_to_le64(arena->nextoff);
1001 /*
1002 * Subtract arena->infooff (arena start) so numbers are relative
1003 * to 'this' arena
1004 */
1005 super->dataoff = cpu_to_le64(arena->dataoff - arena->infooff);
1006 super->mapoff = cpu_to_le64(arena->mapoff - arena->infooff);
1007 super->logoff = cpu_to_le64(arena->logoff - arena->infooff);
1008 super->info2off = cpu_to_le64(arena->info2off - arena->infooff);
1009
1010 super->flags = 0;
1011 sum = nd_sb_checksum((struct nd_gen_sb *) super);
1012 super->checksum = cpu_to_le64(sum);
1013
1014 ret = btt_info_write(arena, super);
1015
1016 kfree(super);
1017 return ret;
1018}
1019
1020/*
1021 * This function completes the initialization for the BTT namespace
1022 * such that it is ready to accept IOs
1023 */
1024static int btt_meta_init(struct btt *btt)
1025{
1026 int ret = 0;
1027 struct arena_info *arena;
1028
1029 mutex_lock(&btt->init_lock);
1030 list_for_each_entry(arena, &btt->arena_list, list) {
1031 ret = btt_arena_write_layout(arena);
1032 if (ret)
1033 goto unlock;
1034
1035 ret = btt_freelist_init(arena);
1036 if (ret)
1037 goto unlock;
1038
1039 ret = btt_rtt_init(arena);
1040 if (ret)
1041 goto unlock;
1042
1043 ret = btt_maplocks_init(arena);
1044 if (ret)
1045 goto unlock;
1046 }
1047
1048 btt->init_state = INIT_READY;
1049
1050 unlock:
1051 mutex_unlock(&btt->init_lock);
1052 return ret;
1053}
1054
1055static u32 btt_meta_size(struct btt *btt)
1056{
1057 return btt->lbasize - btt->sector_size;
1058}
1059
1060/*
1061 * This function calculates the arena in which the given LBA lies
1062 * by doing a linear walk. This is acceptable since we expect only
1063 * a few arenas. If we have backing devices that get much larger,
1064 * we can construct a balanced binary tree of arenas at init time
1065 * so that this range search becomes faster.
1066 */
1067static int lba_to_arena(struct btt *btt, sector_t sector, __u32 *premap,
1068 struct arena_info **arena)
1069{
1070 struct arena_info *arena_list;
1071 __u64 lba = div_u64(sector << SECTOR_SHIFT, btt->sector_size);
1072
1073 list_for_each_entry(arena_list, &btt->arena_list, list) {
1074 if (lba < arena_list->external_nlba) {
1075 *arena = arena_list;
1076 *premap = lba;
1077 return 0;
1078 }
1079 lba -= arena_list->external_nlba;
1080 }
1081
1082 return -EIO;
1083}
1084
1085/*
1086 * The following (lock_map, unlock_map) are mostly just to improve
1087 * readability, since they index into an array of locks
1088 */
1089static void lock_map(struct arena_info *arena, u32 premap)
1090 __acquires(&arena->map_locks[idx].lock)
1091{
1092 u32 idx = (premap * MAP_ENT_SIZE / L1_CACHE_BYTES) % arena->nfree;
1093
1094 spin_lock(&arena->map_locks[idx].lock);
1095}
1096
1097static void unlock_map(struct arena_info *arena, u32 premap)
1098 __releases(&arena->map_locks[idx].lock)
1099{
1100 u32 idx = (premap * MAP_ENT_SIZE / L1_CACHE_BYTES) % arena->nfree;
1101
1102 spin_unlock(&arena->map_locks[idx].lock);
1103}
1104
1105static int btt_data_read(struct arena_info *arena, struct page *page,
1106 unsigned int off, u32 lba, u32 len)
1107{
1108 int ret;
1109 u64 nsoff = to_namespace_offset(arena, lba);
1110 void *mem = kmap_atomic(page);
1111
1112 ret = arena_read_bytes(arena, nsoff, mem + off, len, NVDIMM_IO_ATOMIC);
1113 kunmap_atomic(mem);
1114
1115 return ret;
1116}
1117
1118static int btt_data_write(struct arena_info *arena, u32 lba,
1119 struct page *page, unsigned int off, u32 len)
1120{
1121 int ret;
1122 u64 nsoff = to_namespace_offset(arena, lba);
1123 void *mem = kmap_atomic(page);
1124
1125 ret = arena_write_bytes(arena, nsoff, mem + off, len, NVDIMM_IO_ATOMIC);
1126 kunmap_atomic(mem);
1127
1128 return ret;
1129}
1130
1131static void zero_fill_data(struct page *page, unsigned int off, u32 len)
1132{
1133 void *mem = kmap_atomic(page);
1134
1135 memset(mem + off, 0, len);
1136 kunmap_atomic(mem);
1137}
1138
1139#ifdef CONFIG_BLK_DEV_INTEGRITY
1140static int btt_rw_integrity(struct btt *btt, struct bio_integrity_payload *bip,
1141 struct arena_info *arena, u32 postmap, int rw)
1142{
1143 unsigned int len = btt_meta_size(btt);
1144 u64 meta_nsoff;
1145 int ret = 0;
1146
1147 if (bip == NULL)
1148 return 0;
1149
1150 meta_nsoff = to_namespace_offset(arena, postmap) + btt->sector_size;
1151
1152 while (len) {
1153 unsigned int cur_len;
1154 struct bio_vec bv;
1155 void *mem;
1156
1157 bv = bvec_iter_bvec(bip->bip_vec, bip->bip_iter);
1158 /*
1159 * The 'bv' obtained from bvec_iter_bvec has its .bv_len and
1160 * .bv_offset already adjusted for iter->bi_bvec_done, and we
1161 * can use those directly
1162 */
1163
1164 cur_len = min(len, bv.bv_len);
1165 mem = kmap_atomic(bv.bv_page);
1166 if (rw)
1167 ret = arena_write_bytes(arena, meta_nsoff,
1168 mem + bv.bv_offset, cur_len,
1169 NVDIMM_IO_ATOMIC);
1170 else
1171 ret = arena_read_bytes(arena, meta_nsoff,
1172 mem + bv.bv_offset, cur_len,
1173 NVDIMM_IO_ATOMIC);
1174
1175 kunmap_atomic(mem);
1176 if (ret)
1177 return ret;
1178
1179 len -= cur_len;
1180 meta_nsoff += cur_len;
1181 if (!bvec_iter_advance(bip->bip_vec, &bip->bip_iter, cur_len))
1182 return -EIO;
1183 }
1184
1185 return ret;
1186}
1187
1188#else /* CONFIG_BLK_DEV_INTEGRITY */
1189static int btt_rw_integrity(struct btt *btt, struct bio_integrity_payload *bip,
1190 struct arena_info *arena, u32 postmap, int rw)
1191{
1192 return 0;
1193}
1194#endif
1195
1196static int btt_read_pg(struct btt *btt, struct bio_integrity_payload *bip,
1197 struct page *page, unsigned int off, sector_t sector,
1198 unsigned int len)
1199{
1200 int ret = 0;
1201 int t_flag, e_flag;
1202 struct arena_info *arena = NULL;
1203 u32 lane = 0, premap, postmap;
1204
1205 while (len) {
1206 u32 cur_len;
1207
1208 lane = nd_region_acquire_lane(btt->nd_region);
1209
1210 ret = lba_to_arena(btt, sector, &premap, &arena);
1211 if (ret)
1212 goto out_lane;
1213
1214 cur_len = min(btt->sector_size, len);
1215
1216 ret = btt_map_read(arena, premap, &postmap, &t_flag, &e_flag,
1217 NVDIMM_IO_ATOMIC);
1218 if (ret)
1219 goto out_lane;
1220
1221 /*
1222 * We loop to make sure that the post map LBA didn't change
1223 * from under us between writing the RTT and doing the actual
1224 * read.
1225 */
1226 while (1) {
1227 u32 new_map;
1228 int new_t, new_e;
1229
1230 if (t_flag) {
1231 zero_fill_data(page, off, cur_len);
1232 goto out_lane;
1233 }
1234
1235 if (e_flag) {
1236 ret = -EIO;
1237 goto out_lane;
1238 }
1239
1240 arena->rtt[lane] = RTT_VALID | postmap;
1241 /*
1242 * Barrier to make sure this write is not reordered
1243 * to do the verification map_read before the RTT store
1244 */
1245 barrier();
1246
1247 ret = btt_map_read(arena, premap, &new_map, &new_t,
1248 &new_e, NVDIMM_IO_ATOMIC);
1249 if (ret)
1250 goto out_rtt;
1251
1252 if ((postmap == new_map) && (t_flag == new_t) &&
1253 (e_flag == new_e))
1254 break;
1255
1256 postmap = new_map;
1257 t_flag = new_t;
1258 e_flag = new_e;
1259 }
1260
1261 ret = btt_data_read(arena, page, off, postmap, cur_len);
1262 if (ret) {
1263 int rc;
1264
1265 /* Media error - set the e_flag */
1266 rc = btt_map_write(arena, premap, postmap, 0, 1,
1267 NVDIMM_IO_ATOMIC);
1268 goto out_rtt;
1269 }
1270
1271 if (bip) {
1272 ret = btt_rw_integrity(btt, bip, arena, postmap, READ);
1273 if (ret)
1274 goto out_rtt;
1275 }
1276
1277 arena->rtt[lane] = RTT_INVALID;
1278 nd_region_release_lane(btt->nd_region, lane);
1279
1280 len -= cur_len;
1281 off += cur_len;
1282 sector += btt->sector_size >> SECTOR_SHIFT;
1283 }
1284
1285 return 0;
1286
1287 out_rtt:
1288 arena->rtt[lane] = RTT_INVALID;
1289 out_lane:
1290 nd_region_release_lane(btt->nd_region, lane);
1291 return ret;
1292}
1293
1294/*
1295 * Normally, arena_{read,write}_bytes will take care of the initial offset
1296 * adjustment, but in the case of btt_is_badblock, where we query is_bad_pmem,
1297 * we need the final, raw namespace offset here
1298 */
1299static bool btt_is_badblock(struct btt *btt, struct arena_info *arena,
1300 u32 postmap)
1301{
1302 u64 nsoff = adjust_initial_offset(arena->nd_btt,
1303 to_namespace_offset(arena, postmap));
1304 sector_t phys_sector = nsoff >> 9;
1305
1306 return is_bad_pmem(btt->phys_bb, phys_sector, arena->internal_lbasize);
1307}
1308
1309static int btt_write_pg(struct btt *btt, struct bio_integrity_payload *bip,
1310 sector_t sector, struct page *page, unsigned int off,
1311 unsigned int len)
1312{
1313 int ret = 0;
1314 struct arena_info *arena = NULL;
1315 u32 premap = 0, old_postmap, new_postmap, lane = 0, i;
1316 struct log_entry log;
1317 int sub;
1318
1319 while (len) {
1320 u32 cur_len;
1321 int e_flag;
1322
1323 retry:
1324 lane = nd_region_acquire_lane(btt->nd_region);
1325
1326 ret = lba_to_arena(btt, sector, &premap, &arena);
1327 if (ret)
1328 goto out_lane;
1329 cur_len = min(btt->sector_size, len);
1330
1331 if ((arena->flags & IB_FLAG_ERROR_MASK) != 0) {
1332 ret = -EIO;
1333 goto out_lane;
1334 }
1335
1336 if (btt_is_badblock(btt, arena, arena->freelist[lane].block))
1337 arena->freelist[lane].has_err = 1;
1338
1339 if (mutex_is_locked(&arena->err_lock)
1340 || arena->freelist[lane].has_err) {
1341 nd_region_release_lane(btt->nd_region, lane);
1342
1343 ret = arena_clear_freelist_error(arena, lane);
1344 if (ret)
1345 return ret;
1346
1347 /* OK to acquire a different lane/free block */
1348 goto retry;
1349 }
1350
1351 new_postmap = arena->freelist[lane].block;
1352
1353 /* Wait if the new block is being read from */
1354 for (i = 0; i < arena->nfree; i++)
1355 while (arena->rtt[i] == (RTT_VALID | new_postmap))
1356 cpu_relax();
1357
1358
1359 if (new_postmap >= arena->internal_nlba) {
1360 ret = -EIO;
1361 goto out_lane;
1362 }
1363
1364 ret = btt_data_write(arena, new_postmap, page, off, cur_len);
1365 if (ret)
1366 goto out_lane;
1367
1368 if (bip) {
1369 ret = btt_rw_integrity(btt, bip, arena, new_postmap,
1370 WRITE);
1371 if (ret)
1372 goto out_lane;
1373 }
1374
1375 lock_map(arena, premap);
1376 ret = btt_map_read(arena, premap, &old_postmap, NULL, &e_flag,
1377 NVDIMM_IO_ATOMIC);
1378 if (ret)
1379 goto out_map;
1380 if (old_postmap >= arena->internal_nlba) {
1381 ret = -EIO;
1382 goto out_map;
1383 }
1384 if (e_flag)
1385 set_e_flag(old_postmap);
1386
1387 log.lba = cpu_to_le32(premap);
1388 log.old_map = cpu_to_le32(old_postmap);
1389 log.new_map = cpu_to_le32(new_postmap);
1390 log.seq = cpu_to_le32(arena->freelist[lane].seq);
1391 sub = arena->freelist[lane].sub;
1392 ret = btt_flog_write(arena, lane, sub, &log);
1393 if (ret)
1394 goto out_map;
1395
1396 ret = btt_map_write(arena, premap, new_postmap, 0, 0,
1397 NVDIMM_IO_ATOMIC);
1398 if (ret)
1399 goto out_map;
1400
1401 unlock_map(arena, premap);
1402 nd_region_release_lane(btt->nd_region, lane);
1403
1404 if (e_flag) {
1405 ret = arena_clear_freelist_error(arena, lane);
1406 if (ret)
1407 return ret;
1408 }
1409
1410 len -= cur_len;
1411 off += cur_len;
1412 sector += btt->sector_size >> SECTOR_SHIFT;
1413 }
1414
1415 return 0;
1416
1417 out_map:
1418 unlock_map(arena, premap);
1419 out_lane:
1420 nd_region_release_lane(btt->nd_region, lane);
1421 return ret;
1422}
1423
1424static int btt_do_bvec(struct btt *btt, struct bio_integrity_payload *bip,
1425 struct page *page, unsigned int len, unsigned int off,
1426 bool is_write, sector_t sector)
1427{
1428 int ret;
1429
1430 if (!is_write) {
1431 ret = btt_read_pg(btt, bip, page, off, sector, len);
1432 flush_dcache_page(page);
1433 } else {
1434 flush_dcache_page(page);
1435 ret = btt_write_pg(btt, bip, sector, page, off, len);
1436 }
1437
1438 return ret;
1439}
1440
1441static blk_qc_t btt_make_request(struct request_queue *q, struct bio *bio)
1442{
1443 struct bio_integrity_payload *bip = bio_integrity(bio);
1444 struct btt *btt = q->queuedata;
1445 struct bvec_iter iter;
1446 unsigned long start;
1447 struct bio_vec bvec;
1448 int err = 0;
1449 bool do_acct;
1450
1451 if (!bio_integrity_prep(bio))
1452 return BLK_QC_T_NONE;
1453
1454 do_acct = nd_iostat_start(bio, &start);
1455 bio_for_each_segment(bvec, bio, iter) {
1456 unsigned int len = bvec.bv_len;
1457
1458 if (len > PAGE_SIZE || len < btt->sector_size ||
1459 len % btt->sector_size) {
1460 dev_err_ratelimited(&btt->nd_btt->dev,
1461 "unaligned bio segment (len: %d)\n", len);
1462 bio->bi_status = BLK_STS_IOERR;
1463 break;
1464 }
1465
1466 err = btt_do_bvec(btt, bip, bvec.bv_page, len, bvec.bv_offset,
1467 op_is_write(bio_op(bio)), iter.bi_sector);
1468 if (err) {
1469 dev_err(&btt->nd_btt->dev,
1470 "io error in %s sector %lld, len %d,\n",
1471 (op_is_write(bio_op(bio))) ? "WRITE" :
1472 "READ",
1473 (unsigned long long) iter.bi_sector, len);
1474 bio->bi_status = errno_to_blk_status(err);
1475 break;
1476 }
1477 }
1478 if (do_acct)
1479 nd_iostat_end(bio, start);
1480
1481 bio_endio(bio);
1482 return BLK_QC_T_NONE;
1483}
1484
1485static int btt_rw_page(struct block_device *bdev, sector_t sector,
1486 struct page *page, bool is_write)
1487{
1488 struct btt *btt = bdev->bd_disk->private_data;
1489 int rc;
1490 unsigned int len;
1491
1492 len = hpage_nr_pages(page) * PAGE_SIZE;
1493 rc = btt_do_bvec(btt, NULL, page, len, 0, is_write, sector);
1494 if (rc == 0)
1495 page_endio(page, is_write, 0);
1496
1497 return rc;
1498}
1499
1500
1501static int btt_getgeo(struct block_device *bd, struct hd_geometry *geo)
1502{
1503 /* some standard values */
1504 geo->heads = 1 << 6;
1505 geo->sectors = 1 << 5;
1506 geo->cylinders = get_capacity(bd->bd_disk) >> 11;
1507 return 0;
1508}
1509
1510static const struct block_device_operations btt_fops = {
1511 .owner = THIS_MODULE,
1512 .rw_page = btt_rw_page,
1513 .getgeo = btt_getgeo,
1514 .revalidate_disk = nvdimm_revalidate_disk,
1515};
1516
1517static int btt_blk_init(struct btt *btt)
1518{
1519 struct nd_btt *nd_btt = btt->nd_btt;
1520 struct nd_namespace_common *ndns = nd_btt->ndns;
1521
1522 /* create a new disk and request queue for btt */
1523 btt->btt_queue = blk_alloc_queue(GFP_KERNEL);
1524 if (!btt->btt_queue)
1525 return -ENOMEM;
1526
1527 btt->btt_disk = alloc_disk(0);
1528 if (!btt->btt_disk) {
1529 blk_cleanup_queue(btt->btt_queue);
1530 return -ENOMEM;
1531 }
1532
1533 nvdimm_namespace_disk_name(ndns, btt->btt_disk->disk_name);
1534 btt->btt_disk->first_minor = 0;
1535 btt->btt_disk->fops = &btt_fops;
1536 btt->btt_disk->private_data = btt;
1537 btt->btt_disk->queue = btt->btt_queue;
1538 btt->btt_disk->flags = GENHD_FL_EXT_DEVT;
1539 btt->btt_disk->queue->backing_dev_info->capabilities |=
1540 BDI_CAP_SYNCHRONOUS_IO;
1541
1542 blk_queue_make_request(btt->btt_queue, btt_make_request);
1543 blk_queue_logical_block_size(btt->btt_queue, btt->sector_size);
1544 blk_queue_max_hw_sectors(btt->btt_queue, UINT_MAX);
1545 blk_queue_flag_set(QUEUE_FLAG_NONROT, btt->btt_queue);
1546 btt->btt_queue->queuedata = btt;
1547
1548 if (btt_meta_size(btt)) {
1549 int rc = nd_integrity_init(btt->btt_disk, btt_meta_size(btt));
1550
1551 if (rc) {
1552 del_gendisk(btt->btt_disk);
1553 put_disk(btt->btt_disk);
1554 blk_cleanup_queue(btt->btt_queue);
1555 return rc;
1556 }
1557 }
1558 set_capacity(btt->btt_disk, btt->nlba * btt->sector_size >> 9);
1559 device_add_disk(&btt->nd_btt->dev, btt->btt_disk);
1560 btt->nd_btt->size = btt->nlba * (u64)btt->sector_size;
1561 revalidate_disk(btt->btt_disk);
1562
1563 return 0;
1564}
1565
1566static void btt_blk_cleanup(struct btt *btt)
1567{
1568 del_gendisk(btt->btt_disk);
1569 put_disk(btt->btt_disk);
1570 blk_cleanup_queue(btt->btt_queue);
1571}
1572
1573/**
1574 * btt_init - initialize a block translation table for the given device
1575 * @nd_btt: device with BTT geometry and backing device info
1576 * @rawsize: raw size in bytes of the backing device
1577 * @lbasize: lba size of the backing device
1578 * @uuid: A uuid for the backing device - this is stored on media
1579 * @maxlane: maximum number of parallel requests the device can handle
1580 *
1581 * Initialize a Block Translation Table on a backing device to provide
1582 * single sector power fail atomicity.
1583 *
1584 * Context:
1585 * Might sleep.
1586 *
1587 * Returns:
1588 * Pointer to a new struct btt on success, NULL on failure.
1589 */
1590static struct btt *btt_init(struct nd_btt *nd_btt, unsigned long long rawsize,
1591 u32 lbasize, u8 *uuid, struct nd_region *nd_region)
1592{
1593 int ret;
1594 struct btt *btt;
1595 struct nd_namespace_io *nsio;
1596 struct device *dev = &nd_btt->dev;
1597
1598 btt = devm_kzalloc(dev, sizeof(struct btt), GFP_KERNEL);
1599 if (!btt)
1600 return NULL;
1601
1602 btt->nd_btt = nd_btt;
1603 btt->rawsize = rawsize;
1604 btt->lbasize = lbasize;
1605 btt->sector_size = ((lbasize >= 4096) ? 4096 : 512);
1606 INIT_LIST_HEAD(&btt->arena_list);
1607 mutex_init(&btt->init_lock);
1608 btt->nd_region = nd_region;
1609 nsio = to_nd_namespace_io(&nd_btt->ndns->dev);
1610 btt->phys_bb = &nsio->bb;
1611
1612 ret = discover_arenas(btt);
1613 if (ret) {
1614 dev_err(dev, "init: error in arena_discover: %d\n", ret);
1615 return NULL;
1616 }
1617
1618 if (btt->init_state != INIT_READY && nd_region->ro) {
1619 dev_warn(dev, "%s is read-only, unable to init btt metadata\n",
1620 dev_name(&nd_region->dev));
1621 return NULL;
1622 } else if (btt->init_state != INIT_READY) {
1623 btt->num_arenas = (rawsize / ARENA_MAX_SIZE) +
1624 ((rawsize % ARENA_MAX_SIZE) ? 1 : 0);
1625 dev_dbg(dev, "init: %d arenas for %llu rawsize\n",
1626 btt->num_arenas, rawsize);
1627
1628 ret = create_arenas(btt);
1629 if (ret) {
1630 dev_info(dev, "init: create_arenas: %d\n", ret);
1631 return NULL;
1632 }
1633
1634 ret = btt_meta_init(btt);
1635 if (ret) {
1636 dev_err(dev, "init: error in meta_init: %d\n", ret);
1637 return NULL;
1638 }
1639 }
1640
1641 ret = btt_blk_init(btt);
1642 if (ret) {
1643 dev_err(dev, "init: error in blk_init: %d\n", ret);
1644 return NULL;
1645 }
1646
1647 btt_debugfs_init(btt);
1648
1649 return btt;
1650}
1651
1652/**
1653 * btt_fini - de-initialize a BTT
1654 * @btt: the BTT handle that was generated by btt_init
1655 *
1656 * De-initialize a Block Translation Table on device removal
1657 *
1658 * Context:
1659 * Might sleep.
1660 */
1661static void btt_fini(struct btt *btt)
1662{
1663 if (btt) {
1664 btt_blk_cleanup(btt);
1665 free_arenas(btt);
1666 debugfs_remove_recursive(btt->debugfs_dir);
1667 }
1668}
1669
1670int nvdimm_namespace_attach_btt(struct nd_namespace_common *ndns)
1671{
1672 struct nd_btt *nd_btt = to_nd_btt(ndns->claim);
1673 struct nd_region *nd_region;
1674 struct btt_sb *btt_sb;
1675 struct btt *btt;
1676 size_t rawsize;
1677
1678 if (!nd_btt->uuid || !nd_btt->ndns || !nd_btt->lbasize) {
1679 dev_dbg(&nd_btt->dev, "incomplete btt configuration\n");
1680 return -ENODEV;
1681 }
1682
1683 btt_sb = devm_kzalloc(&nd_btt->dev, sizeof(*btt_sb), GFP_KERNEL);
1684 if (!btt_sb)
1685 return -ENOMEM;
1686
1687 /*
1688 * If this returns < 0, that is ok as it just means there wasn't
1689 * an existing BTT, and we're creating a new one. We still need to
1690 * call this as we need the version dependent fields in nd_btt to be
1691 * set correctly based on the holder class
1692 */
1693 nd_btt_version(nd_btt, ndns, btt_sb);
1694
1695 rawsize = nvdimm_namespace_capacity(ndns) - nd_btt->initial_offset;
1696 if (rawsize < ARENA_MIN_SIZE) {
1697 dev_dbg(&nd_btt->dev, "%s must be at least %ld bytes\n",
1698 dev_name(&ndns->dev),
1699 ARENA_MIN_SIZE + nd_btt->initial_offset);
1700 return -ENXIO;
1701 }
1702 nd_region = to_nd_region(nd_btt->dev.parent);
1703 btt = btt_init(nd_btt, rawsize, nd_btt->lbasize, nd_btt->uuid,
1704 nd_region);
1705 if (!btt)
1706 return -ENOMEM;
1707 nd_btt->btt = btt;
1708
1709 return 0;
1710}
1711EXPORT_SYMBOL(nvdimm_namespace_attach_btt);
1712
1713int nvdimm_namespace_detach_btt(struct nd_btt *nd_btt)
1714{
1715 struct btt *btt = nd_btt->btt;
1716
1717 btt_fini(btt);
1718 nd_btt->btt = NULL;
1719
1720 return 0;
1721}
1722EXPORT_SYMBOL(nvdimm_namespace_detach_btt);
1723
1724static int __init nd_btt_init(void)
1725{
1726 int rc = 0;
1727
1728 debugfs_root = debugfs_create_dir("btt", NULL);
1729 if (IS_ERR_OR_NULL(debugfs_root))
1730 rc = -ENXIO;
1731
1732 return rc;
1733}
1734
1735static void __exit nd_btt_exit(void)
1736{
1737 debugfs_remove_recursive(debugfs_root);
1738}
1739
1740MODULE_ALIAS_ND_DEVICE(ND_DEVICE_BTT);
1741MODULE_AUTHOR("Vishal Verma <vishal.l.verma@linux.intel.com>");
1742MODULE_LICENSE("GPL v2");
1743module_init(nd_btt_init);
1744module_exit(nd_btt_exit);