Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright(c) 2017 Intel Corporation. All rights reserved.
4 */
5#include <linux/libnvdimm.h>
6#include <linux/badblocks.h>
7#include <linux/export.h>
8#include <linux/module.h>
9#include <linux/blkdev.h>
10#include <linux/device.h>
11#include <linux/ctype.h>
12#include <linux/ndctl.h>
13#include <linux/mutex.h>
14#include <linux/slab.h>
15#include <linux/io.h>
16#include "nd-core.h"
17#include "nd.h"
18
19void badrange_init(struct badrange *badrange)
20{
21 INIT_LIST_HEAD(&badrange->list);
22 spin_lock_init(&badrange->lock);
23}
24EXPORT_SYMBOL_GPL(badrange_init);
25
26static void append_badrange_entry(struct badrange *badrange,
27 struct badrange_entry *bre, u64 addr, u64 length)
28{
29 lockdep_assert_held(&badrange->lock);
30 bre->start = addr;
31 bre->length = length;
32 list_add_tail(&bre->list, &badrange->list);
33}
34
35static int alloc_and_append_badrange_entry(struct badrange *badrange,
36 u64 addr, u64 length, gfp_t flags)
37{
38 struct badrange_entry *bre;
39
40 bre = kzalloc(sizeof(*bre), flags);
41 if (!bre)
42 return -ENOMEM;
43
44 append_badrange_entry(badrange, bre, addr, length);
45 return 0;
46}
47
48static int add_badrange(struct badrange *badrange, u64 addr, u64 length)
49{
50 struct badrange_entry *bre, *bre_new;
51
52 spin_unlock(&badrange->lock);
53 bre_new = kzalloc(sizeof(*bre_new), GFP_KERNEL);
54 spin_lock(&badrange->lock);
55
56 if (list_empty(&badrange->list)) {
57 if (!bre_new)
58 return -ENOMEM;
59 append_badrange_entry(badrange, bre_new, addr, length);
60 return 0;
61 }
62
63 /*
64 * There is a chance this is a duplicate, check for those first.
65 * This will be the common case as ARS_STATUS returns all known
66 * errors in the SPA space, and we can't query it per region
67 */
68 list_for_each_entry(bre, &badrange->list, list)
69 if (bre->start == addr) {
70 /* If length has changed, update this list entry */
71 if (bre->length != length)
72 bre->length = length;
73 kfree(bre_new);
74 return 0;
75 }
76
77 /*
78 * If not a duplicate or a simple length update, add the entry as is,
79 * as any overlapping ranges will get resolved when the list is consumed
80 * and converted to badblocks
81 */
82 if (!bre_new)
83 return -ENOMEM;
84 append_badrange_entry(badrange, bre_new, addr, length);
85
86 return 0;
87}
88
89int badrange_add(struct badrange *badrange, u64 addr, u64 length)
90{
91 int rc;
92
93 spin_lock(&badrange->lock);
94 rc = add_badrange(badrange, addr, length);
95 spin_unlock(&badrange->lock);
96
97 return rc;
98}
99EXPORT_SYMBOL_GPL(badrange_add);
100
101void badrange_forget(struct badrange *badrange, phys_addr_t start,
102 unsigned int len)
103{
104 struct list_head *badrange_list = &badrange->list;
105 u64 clr_end = start + len - 1;
106 struct badrange_entry *bre, *next;
107
108 spin_lock(&badrange->lock);
109
110 /*
111 * [start, clr_end] is the badrange interval being cleared.
112 * [bre->start, bre_end] is the badrange_list entry we're comparing
113 * the above interval against. The badrange list entry may need
114 * to be modified (update either start or length), deleted, or
115 * split into two based on the overlap characteristics
116 */
117
118 list_for_each_entry_safe(bre, next, badrange_list, list) {
119 u64 bre_end = bre->start + bre->length - 1;
120
121 /* Skip intervals with no intersection */
122 if (bre_end < start)
123 continue;
124 if (bre->start > clr_end)
125 continue;
126 /* Delete completely overlapped badrange entries */
127 if ((bre->start >= start) && (bre_end <= clr_end)) {
128 list_del(&bre->list);
129 kfree(bre);
130 continue;
131 }
132 /* Adjust start point of partially cleared entries */
133 if ((start <= bre->start) && (clr_end > bre->start)) {
134 bre->length -= clr_end - bre->start + 1;
135 bre->start = clr_end + 1;
136 continue;
137 }
138 /* Adjust bre->length for partial clearing at the tail end */
139 if ((bre->start < start) && (bre_end <= clr_end)) {
140 /* bre->start remains the same */
141 bre->length = start - bre->start;
142 continue;
143 }
144 /*
145 * If clearing in the middle of an entry, we split it into
146 * two by modifying the current entry to represent one half of
147 * the split, and adding a new entry for the second half.
148 */
149 if ((bre->start < start) && (bre_end > clr_end)) {
150 u64 new_start = clr_end + 1;
151 u64 new_len = bre_end - new_start + 1;
152
153 /* Add new entry covering the right half */
154 alloc_and_append_badrange_entry(badrange, new_start,
155 new_len, GFP_NOWAIT);
156 /* Adjust this entry to cover the left half */
157 bre->length = start - bre->start;
158 continue;
159 }
160 }
161 spin_unlock(&badrange->lock);
162}
163EXPORT_SYMBOL_GPL(badrange_forget);
164
165static void set_badblock(struct badblocks *bb, sector_t s, int num)
166{
167 dev_dbg(bb->dev, "Found a bad range (0x%llx, 0x%llx)\n",
168 (u64) s * 512, (u64) num * 512);
169 /* this isn't an error as the hardware will still throw an exception */
170 if (badblocks_set(bb, s, num, 1))
171 dev_info_once(bb->dev, "%s: failed for sector %llx\n",
172 __func__, (u64) s);
173}
174
175/**
176 * __add_badblock_range() - Convert a physical address range to bad sectors
177 * @bb: badblocks instance to populate
178 * @ns_offset: namespace offset where the error range begins (in bytes)
179 * @len: number of bytes of badrange to be added
180 *
181 * This assumes that the range provided with (ns_offset, len) is within
182 * the bounds of physical addresses for this namespace, i.e. lies in the
183 * interval [ns_start, ns_start + ns_size)
184 */
185static void __add_badblock_range(struct badblocks *bb, u64 ns_offset, u64 len)
186{
187 const unsigned int sector_size = 512;
188 sector_t start_sector, end_sector;
189 u64 num_sectors;
190 u32 rem;
191
192 start_sector = div_u64(ns_offset, sector_size);
193 end_sector = div_u64_rem(ns_offset + len, sector_size, &rem);
194 if (rem)
195 end_sector++;
196 num_sectors = end_sector - start_sector;
197
198 if (unlikely(num_sectors > (u64)INT_MAX)) {
199 u64 remaining = num_sectors;
200 sector_t s = start_sector;
201
202 while (remaining) {
203 int done = min_t(u64, remaining, INT_MAX);
204
205 set_badblock(bb, s, done);
206 remaining -= done;
207 s += done;
208 }
209 } else
210 set_badblock(bb, start_sector, num_sectors);
211}
212
213static void badblocks_populate(struct badrange *badrange,
214 struct badblocks *bb, const struct range *range)
215{
216 struct badrange_entry *bre;
217
218 if (list_empty(&badrange->list))
219 return;
220
221 list_for_each_entry(bre, &badrange->list, list) {
222 u64 bre_end = bre->start + bre->length - 1;
223
224 /* Discard intervals with no intersection */
225 if (bre_end < range->start)
226 continue;
227 if (bre->start > range->end)
228 continue;
229 /* Deal with any overlap after start of the namespace */
230 if (bre->start >= range->start) {
231 u64 start = bre->start;
232 u64 len;
233
234 if (bre_end <= range->end)
235 len = bre->length;
236 else
237 len = range->start + range_len(range)
238 - bre->start;
239 __add_badblock_range(bb, start - range->start, len);
240 continue;
241 }
242 /*
243 * Deal with overlap for badrange starting before
244 * the namespace.
245 */
246 if (bre->start < range->start) {
247 u64 len;
248
249 if (bre_end < range->end)
250 len = bre->start + bre->length - range->start;
251 else
252 len = range_len(range);
253 __add_badblock_range(bb, 0, len);
254 }
255 }
256}
257
258/**
259 * nvdimm_badblocks_populate() - Convert a list of badranges to badblocks
260 * @nd_region: parent region of the range to interrogate
261 * @bb: badblocks instance to populate
262 * @range: resource range to consider
263 *
264 * The badrange list generated during bus initialization may contain
265 * multiple, possibly overlapping physical address ranges. Compare each
266 * of these ranges to the resource range currently being initialized,
267 * and add badblocks entries for all matching sub-ranges
268 */
269void nvdimm_badblocks_populate(struct nd_region *nd_region,
270 struct badblocks *bb, const struct range *range)
271{
272 struct nvdimm_bus *nvdimm_bus;
273
274 if (!is_memory(&nd_region->dev)) {
275 dev_WARN_ONCE(&nd_region->dev, 1,
276 "%s only valid for pmem regions\n", __func__);
277 return;
278 }
279 nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
280
281 nvdimm_bus_lock(&nvdimm_bus->dev);
282 badblocks_populate(&nvdimm_bus->badrange, bb, range);
283 nvdimm_bus_unlock(&nvdimm_bus->dev);
284}
285EXPORT_SYMBOL_GPL(nvdimm_badblocks_populate);
1/*
2 * Copyright(c) 2017 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13#include <linux/libnvdimm.h>
14#include <linux/badblocks.h>
15#include <linux/export.h>
16#include <linux/module.h>
17#include <linux/blkdev.h>
18#include <linux/device.h>
19#include <linux/ctype.h>
20#include <linux/ndctl.h>
21#include <linux/mutex.h>
22#include <linux/slab.h>
23#include <linux/io.h>
24#include "nd-core.h"
25#include "nd.h"
26
27void badrange_init(struct badrange *badrange)
28{
29 INIT_LIST_HEAD(&badrange->list);
30 spin_lock_init(&badrange->lock);
31}
32EXPORT_SYMBOL_GPL(badrange_init);
33
34static void append_badrange_entry(struct badrange *badrange,
35 struct badrange_entry *bre, u64 addr, u64 length)
36{
37 lockdep_assert_held(&badrange->lock);
38 bre->start = addr;
39 bre->length = length;
40 list_add_tail(&bre->list, &badrange->list);
41}
42
43static int alloc_and_append_badrange_entry(struct badrange *badrange,
44 u64 addr, u64 length, gfp_t flags)
45{
46 struct badrange_entry *bre;
47
48 bre = kzalloc(sizeof(*bre), flags);
49 if (!bre)
50 return -ENOMEM;
51
52 append_badrange_entry(badrange, bre, addr, length);
53 return 0;
54}
55
56static int add_badrange(struct badrange *badrange, u64 addr, u64 length)
57{
58 struct badrange_entry *bre, *bre_new;
59
60 spin_unlock(&badrange->lock);
61 bre_new = kzalloc(sizeof(*bre_new), GFP_KERNEL);
62 spin_lock(&badrange->lock);
63
64 if (list_empty(&badrange->list)) {
65 if (!bre_new)
66 return -ENOMEM;
67 append_badrange_entry(badrange, bre_new, addr, length);
68 return 0;
69 }
70
71 /*
72 * There is a chance this is a duplicate, check for those first.
73 * This will be the common case as ARS_STATUS returns all known
74 * errors in the SPA space, and we can't query it per region
75 */
76 list_for_each_entry(bre, &badrange->list, list)
77 if (bre->start == addr) {
78 /* If length has changed, update this list entry */
79 if (bre->length != length)
80 bre->length = length;
81 kfree(bre_new);
82 return 0;
83 }
84
85 /*
86 * If not a duplicate or a simple length update, add the entry as is,
87 * as any overlapping ranges will get resolved when the list is consumed
88 * and converted to badblocks
89 */
90 if (!bre_new)
91 return -ENOMEM;
92 append_badrange_entry(badrange, bre_new, addr, length);
93
94 return 0;
95}
96
97int badrange_add(struct badrange *badrange, u64 addr, u64 length)
98{
99 int rc;
100
101 spin_lock(&badrange->lock);
102 rc = add_badrange(badrange, addr, length);
103 spin_unlock(&badrange->lock);
104
105 return rc;
106}
107EXPORT_SYMBOL_GPL(badrange_add);
108
109void badrange_forget(struct badrange *badrange, phys_addr_t start,
110 unsigned int len)
111{
112 struct list_head *badrange_list = &badrange->list;
113 u64 clr_end = start + len - 1;
114 struct badrange_entry *bre, *next;
115
116 spin_lock(&badrange->lock);
117
118 /*
119 * [start, clr_end] is the badrange interval being cleared.
120 * [bre->start, bre_end] is the badrange_list entry we're comparing
121 * the above interval against. The badrange list entry may need
122 * to be modified (update either start or length), deleted, or
123 * split into two based on the overlap characteristics
124 */
125
126 list_for_each_entry_safe(bre, next, badrange_list, list) {
127 u64 bre_end = bre->start + bre->length - 1;
128
129 /* Skip intervals with no intersection */
130 if (bre_end < start)
131 continue;
132 if (bre->start > clr_end)
133 continue;
134 /* Delete completely overlapped badrange entries */
135 if ((bre->start >= start) && (bre_end <= clr_end)) {
136 list_del(&bre->list);
137 kfree(bre);
138 continue;
139 }
140 /* Adjust start point of partially cleared entries */
141 if ((start <= bre->start) && (clr_end > bre->start)) {
142 bre->length -= clr_end - bre->start + 1;
143 bre->start = clr_end + 1;
144 continue;
145 }
146 /* Adjust bre->length for partial clearing at the tail end */
147 if ((bre->start < start) && (bre_end <= clr_end)) {
148 /* bre->start remains the same */
149 bre->length = start - bre->start;
150 continue;
151 }
152 /*
153 * If clearing in the middle of an entry, we split it into
154 * two by modifying the current entry to represent one half of
155 * the split, and adding a new entry for the second half.
156 */
157 if ((bre->start < start) && (bre_end > clr_end)) {
158 u64 new_start = clr_end + 1;
159 u64 new_len = bre_end - new_start + 1;
160
161 /* Add new entry covering the right half */
162 alloc_and_append_badrange_entry(badrange, new_start,
163 new_len, GFP_NOWAIT);
164 /* Adjust this entry to cover the left half */
165 bre->length = start - bre->start;
166 continue;
167 }
168 }
169 spin_unlock(&badrange->lock);
170}
171EXPORT_SYMBOL_GPL(badrange_forget);
172
173static void set_badblock(struct badblocks *bb, sector_t s, int num)
174{
175 dev_dbg(bb->dev, "Found a bad range (0x%llx, 0x%llx)\n",
176 (u64) s * 512, (u64) num * 512);
177 /* this isn't an error as the hardware will still throw an exception */
178 if (badblocks_set(bb, s, num, 1))
179 dev_info_once(bb->dev, "%s: failed for sector %llx\n",
180 __func__, (u64) s);
181}
182
183/**
184 * __add_badblock_range() - Convert a physical address range to bad sectors
185 * @bb: badblocks instance to populate
186 * @ns_offset: namespace offset where the error range begins (in bytes)
187 * @len: number of bytes of badrange to be added
188 *
189 * This assumes that the range provided with (ns_offset, len) is within
190 * the bounds of physical addresses for this namespace, i.e. lies in the
191 * interval [ns_start, ns_start + ns_size)
192 */
193static void __add_badblock_range(struct badblocks *bb, u64 ns_offset, u64 len)
194{
195 const unsigned int sector_size = 512;
196 sector_t start_sector, end_sector;
197 u64 num_sectors;
198 u32 rem;
199
200 start_sector = div_u64(ns_offset, sector_size);
201 end_sector = div_u64_rem(ns_offset + len, sector_size, &rem);
202 if (rem)
203 end_sector++;
204 num_sectors = end_sector - start_sector;
205
206 if (unlikely(num_sectors > (u64)INT_MAX)) {
207 u64 remaining = num_sectors;
208 sector_t s = start_sector;
209
210 while (remaining) {
211 int done = min_t(u64, remaining, INT_MAX);
212
213 set_badblock(bb, s, done);
214 remaining -= done;
215 s += done;
216 }
217 } else
218 set_badblock(bb, start_sector, num_sectors);
219}
220
221static void badblocks_populate(struct badrange *badrange,
222 struct badblocks *bb, const struct resource *res)
223{
224 struct badrange_entry *bre;
225
226 if (list_empty(&badrange->list))
227 return;
228
229 list_for_each_entry(bre, &badrange->list, list) {
230 u64 bre_end = bre->start + bre->length - 1;
231
232 /* Discard intervals with no intersection */
233 if (bre_end < res->start)
234 continue;
235 if (bre->start > res->end)
236 continue;
237 /* Deal with any overlap after start of the namespace */
238 if (bre->start >= res->start) {
239 u64 start = bre->start;
240 u64 len;
241
242 if (bre_end <= res->end)
243 len = bre->length;
244 else
245 len = res->start + resource_size(res)
246 - bre->start;
247 __add_badblock_range(bb, start - res->start, len);
248 continue;
249 }
250 /*
251 * Deal with overlap for badrange starting before
252 * the namespace.
253 */
254 if (bre->start < res->start) {
255 u64 len;
256
257 if (bre_end < res->end)
258 len = bre->start + bre->length - res->start;
259 else
260 len = resource_size(res);
261 __add_badblock_range(bb, 0, len);
262 }
263 }
264}
265
266/**
267 * nvdimm_badblocks_populate() - Convert a list of badranges to badblocks
268 * @region: parent region of the range to interrogate
269 * @bb: badblocks instance to populate
270 * @res: resource range to consider
271 *
272 * The badrange list generated during bus initialization may contain
273 * multiple, possibly overlapping physical address ranges. Compare each
274 * of these ranges to the resource range currently being initialized,
275 * and add badblocks entries for all matching sub-ranges
276 */
277void nvdimm_badblocks_populate(struct nd_region *nd_region,
278 struct badblocks *bb, const struct resource *res)
279{
280 struct nvdimm_bus *nvdimm_bus;
281
282 if (!is_memory(&nd_region->dev)) {
283 dev_WARN_ONCE(&nd_region->dev, 1,
284 "%s only valid for pmem regions\n", __func__);
285 return;
286 }
287 nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
288
289 nvdimm_bus_lock(&nvdimm_bus->dev);
290 badblocks_populate(&nvdimm_bus->badrange, bb, res);
291 nvdimm_bus_unlock(&nvdimm_bus->dev);
292}
293EXPORT_SYMBOL_GPL(nvdimm_badblocks_populate);