Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * MTD Oops/Panic logger
4 *
5 * Copyright © 2007 Nokia Corporation. All rights reserved.
6 *
7 * Author: Richard Purdie <rpurdie@openedhand.com>
8 */
9
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/console.h>
13#include <linux/vmalloc.h>
14#include <linux/workqueue.h>
15#include <linux/sched.h>
16#include <linux/wait.h>
17#include <linux/delay.h>
18#include <linux/interrupt.h>
19#include <linux/mtd/mtd.h>
20#include <linux/kmsg_dump.h>
21
22/* Maximum MTD partition size */
23#define MTDOOPS_MAX_MTD_SIZE (8 * 1024 * 1024)
24
25#define MTDOOPS_KERNMSG_MAGIC 0x5d005d00
26#define MTDOOPS_HEADER_SIZE 8
27
28static unsigned long record_size = 4096;
29module_param(record_size, ulong, 0400);
30MODULE_PARM_DESC(record_size,
31 "record size for MTD OOPS pages in bytes (default 4096)");
32
33static char mtddev[80];
34module_param_string(mtddev, mtddev, 80, 0400);
35MODULE_PARM_DESC(mtddev,
36 "name or index number of the MTD device to use");
37
38static int dump_oops = 1;
39module_param(dump_oops, int, 0600);
40MODULE_PARM_DESC(dump_oops,
41 "set to 1 to dump oopses, 0 to only dump panics (default 1)");
42
43static struct mtdoops_context {
44 struct kmsg_dumper dump;
45
46 int mtd_index;
47 struct work_struct work_erase;
48 struct work_struct work_write;
49 struct mtd_info *mtd;
50 int oops_pages;
51 int nextpage;
52 int nextcount;
53 unsigned long *oops_page_used;
54
55 void *oops_buf;
56} oops_cxt;
57
58static void mark_page_used(struct mtdoops_context *cxt, int page)
59{
60 set_bit(page, cxt->oops_page_used);
61}
62
63static void mark_page_unused(struct mtdoops_context *cxt, int page)
64{
65 clear_bit(page, cxt->oops_page_used);
66}
67
68static int page_is_used(struct mtdoops_context *cxt, int page)
69{
70 return test_bit(page, cxt->oops_page_used);
71}
72
73static int mtdoops_erase_block(struct mtdoops_context *cxt, int offset)
74{
75 struct mtd_info *mtd = cxt->mtd;
76 u32 start_page_offset = mtd_div_by_eb(offset, mtd) * mtd->erasesize;
77 u32 start_page = start_page_offset / record_size;
78 u32 erase_pages = mtd->erasesize / record_size;
79 struct erase_info erase;
80 int ret;
81 int page;
82
83 erase.addr = offset;
84 erase.len = mtd->erasesize;
85
86 ret = mtd_erase(mtd, &erase);
87 if (ret) {
88 printk(KERN_WARNING "mtdoops: erase of region [0x%llx, 0x%llx] on \"%s\" failed\n",
89 (unsigned long long)erase.addr,
90 (unsigned long long)erase.len, mtddev);
91 return ret;
92 }
93
94 /* Mark pages as unused */
95 for (page = start_page; page < start_page + erase_pages; page++)
96 mark_page_unused(cxt, page);
97
98 return 0;
99}
100
101static void mtdoops_inc_counter(struct mtdoops_context *cxt)
102{
103 cxt->nextpage++;
104 if (cxt->nextpage >= cxt->oops_pages)
105 cxt->nextpage = 0;
106 cxt->nextcount++;
107 if (cxt->nextcount == 0xffffffff)
108 cxt->nextcount = 0;
109
110 if (page_is_used(cxt, cxt->nextpage)) {
111 schedule_work(&cxt->work_erase);
112 return;
113 }
114
115 printk(KERN_DEBUG "mtdoops: ready %d, %d (no erase)\n",
116 cxt->nextpage, cxt->nextcount);
117}
118
119/* Scheduled work - when we can't proceed without erasing a block */
120static void mtdoops_workfunc_erase(struct work_struct *work)
121{
122 struct mtdoops_context *cxt =
123 container_of(work, struct mtdoops_context, work_erase);
124 struct mtd_info *mtd = cxt->mtd;
125 int i = 0, j, ret, mod;
126
127 /* We were unregistered */
128 if (!mtd)
129 return;
130
131 mod = (cxt->nextpage * record_size) % mtd->erasesize;
132 if (mod != 0) {
133 cxt->nextpage = cxt->nextpage + ((mtd->erasesize - mod) / record_size);
134 if (cxt->nextpage >= cxt->oops_pages)
135 cxt->nextpage = 0;
136 }
137
138 while ((ret = mtd_block_isbad(mtd, cxt->nextpage * record_size)) > 0) {
139badblock:
140 printk(KERN_WARNING "mtdoops: bad block at %08lx\n",
141 cxt->nextpage * record_size);
142 i++;
143 cxt->nextpage = cxt->nextpage + (mtd->erasesize / record_size);
144 if (cxt->nextpage >= cxt->oops_pages)
145 cxt->nextpage = 0;
146 if (i == cxt->oops_pages / (mtd->erasesize / record_size)) {
147 printk(KERN_ERR "mtdoops: all blocks bad!\n");
148 return;
149 }
150 }
151
152 if (ret < 0) {
153 printk(KERN_ERR "mtdoops: mtd_block_isbad failed, aborting\n");
154 return;
155 }
156
157 for (j = 0, ret = -1; (j < 3) && (ret < 0); j++)
158 ret = mtdoops_erase_block(cxt, cxt->nextpage * record_size);
159
160 if (ret >= 0) {
161 printk(KERN_DEBUG "mtdoops: ready %d, %d\n",
162 cxt->nextpage, cxt->nextcount);
163 return;
164 }
165
166 if (ret == -EIO) {
167 ret = mtd_block_markbad(mtd, cxt->nextpage * record_size);
168 if (ret < 0 && ret != -EOPNOTSUPP) {
169 printk(KERN_ERR "mtdoops: block_markbad failed, aborting\n");
170 return;
171 }
172 }
173 goto badblock;
174}
175
176static void mtdoops_write(struct mtdoops_context *cxt, int panic)
177{
178 struct mtd_info *mtd = cxt->mtd;
179 size_t retlen;
180 u32 *hdr;
181 int ret;
182
183 /* Add mtdoops header to the buffer */
184 hdr = cxt->oops_buf;
185 hdr[0] = cxt->nextcount;
186 hdr[1] = MTDOOPS_KERNMSG_MAGIC;
187
188 if (panic) {
189 ret = mtd_panic_write(mtd, cxt->nextpage * record_size,
190 record_size, &retlen, cxt->oops_buf);
191 if (ret == -EOPNOTSUPP) {
192 printk(KERN_ERR "mtdoops: Cannot write from panic without panic_write\n");
193 return;
194 }
195 } else
196 ret = mtd_write(mtd, cxt->nextpage * record_size,
197 record_size, &retlen, cxt->oops_buf);
198
199 if (retlen != record_size || ret < 0)
200 printk(KERN_ERR "mtdoops: write failure at %ld (%td of %ld written), error %d\n",
201 cxt->nextpage * record_size, retlen, record_size, ret);
202 mark_page_used(cxt, cxt->nextpage);
203 memset(cxt->oops_buf, 0xff, record_size);
204
205 mtdoops_inc_counter(cxt);
206}
207
208static void mtdoops_workfunc_write(struct work_struct *work)
209{
210 struct mtdoops_context *cxt =
211 container_of(work, struct mtdoops_context, work_write);
212
213 mtdoops_write(cxt, 0);
214}
215
216static void find_next_position(struct mtdoops_context *cxt)
217{
218 struct mtd_info *mtd = cxt->mtd;
219 int ret, page, maxpos = 0;
220 u32 count[2], maxcount = 0xffffffff;
221 size_t retlen;
222
223 for (page = 0; page < cxt->oops_pages; page++) {
224 if (mtd_block_isbad(mtd, page * record_size))
225 continue;
226 /* Assume the page is used */
227 mark_page_used(cxt, page);
228 ret = mtd_read(mtd, page * record_size, MTDOOPS_HEADER_SIZE,
229 &retlen, (u_char *)&count[0]);
230 if (retlen != MTDOOPS_HEADER_SIZE ||
231 (ret < 0 && !mtd_is_bitflip(ret))) {
232 printk(KERN_ERR "mtdoops: read failure at %ld (%td of %d read), err %d\n",
233 page * record_size, retlen,
234 MTDOOPS_HEADER_SIZE, ret);
235 continue;
236 }
237
238 if (count[0] == 0xffffffff && count[1] == 0xffffffff)
239 mark_page_unused(cxt, page);
240 if (count[0] == 0xffffffff || count[1] != MTDOOPS_KERNMSG_MAGIC)
241 continue;
242 if (maxcount == 0xffffffff) {
243 maxcount = count[0];
244 maxpos = page;
245 } else if (count[0] < 0x40000000 && maxcount > 0xc0000000) {
246 maxcount = count[0];
247 maxpos = page;
248 } else if (count[0] > maxcount && count[0] < 0xc0000000) {
249 maxcount = count[0];
250 maxpos = page;
251 } else if (count[0] > maxcount && count[0] > 0xc0000000
252 && maxcount > 0x80000000) {
253 maxcount = count[0];
254 maxpos = page;
255 }
256 }
257 if (maxcount == 0xffffffff) {
258 cxt->nextpage = cxt->oops_pages - 1;
259 cxt->nextcount = 0;
260 }
261 else {
262 cxt->nextpage = maxpos;
263 cxt->nextcount = maxcount;
264 }
265
266 mtdoops_inc_counter(cxt);
267}
268
269static void mtdoops_do_dump(struct kmsg_dumper *dumper,
270 enum kmsg_dump_reason reason)
271{
272 struct mtdoops_context *cxt = container_of(dumper,
273 struct mtdoops_context, dump);
274
275 /* Only dump oopses if dump_oops is set */
276 if (reason == KMSG_DUMP_OOPS && !dump_oops)
277 return;
278
279 kmsg_dump_get_buffer(dumper, true, cxt->oops_buf + MTDOOPS_HEADER_SIZE,
280 record_size - MTDOOPS_HEADER_SIZE, NULL);
281
282 /* Panics must be written immediately */
283 if (reason != KMSG_DUMP_OOPS)
284 mtdoops_write(cxt, 1);
285
286 /* For other cases, schedule work to write it "nicely" */
287 schedule_work(&cxt->work_write);
288}
289
290static void mtdoops_notify_add(struct mtd_info *mtd)
291{
292 struct mtdoops_context *cxt = &oops_cxt;
293 u64 mtdoops_pages = div_u64(mtd->size, record_size);
294 int err;
295
296 if (!strcmp(mtd->name, mtddev))
297 cxt->mtd_index = mtd->index;
298
299 if (mtd->index != cxt->mtd_index || cxt->mtd_index < 0)
300 return;
301
302 if (mtd->size < mtd->erasesize * 2) {
303 printk(KERN_ERR "mtdoops: MTD partition %d not big enough for mtdoops\n",
304 mtd->index);
305 return;
306 }
307 if (mtd->erasesize < record_size) {
308 printk(KERN_ERR "mtdoops: eraseblock size of MTD partition %d too small\n",
309 mtd->index);
310 return;
311 }
312 if (mtd->size > MTDOOPS_MAX_MTD_SIZE) {
313 printk(KERN_ERR "mtdoops: mtd%d is too large (limit is %d MiB)\n",
314 mtd->index, MTDOOPS_MAX_MTD_SIZE / 1024 / 1024);
315 return;
316 }
317
318 /* oops_page_used is a bit field */
319 cxt->oops_page_used =
320 vmalloc(array_size(sizeof(unsigned long),
321 DIV_ROUND_UP(mtdoops_pages,
322 BITS_PER_LONG)));
323 if (!cxt->oops_page_used) {
324 printk(KERN_ERR "mtdoops: could not allocate page array\n");
325 return;
326 }
327
328 cxt->dump.max_reason = KMSG_DUMP_OOPS;
329 cxt->dump.dump = mtdoops_do_dump;
330 err = kmsg_dump_register(&cxt->dump);
331 if (err) {
332 printk(KERN_ERR "mtdoops: registering kmsg dumper failed, error %d\n", err);
333 vfree(cxt->oops_page_used);
334 cxt->oops_page_used = NULL;
335 return;
336 }
337
338 cxt->mtd = mtd;
339 cxt->oops_pages = (int)mtd->size / record_size;
340 find_next_position(cxt);
341 printk(KERN_INFO "mtdoops: Attached to MTD device %d\n", mtd->index);
342}
343
344static void mtdoops_notify_remove(struct mtd_info *mtd)
345{
346 struct mtdoops_context *cxt = &oops_cxt;
347
348 if (mtd->index != cxt->mtd_index || cxt->mtd_index < 0)
349 return;
350
351 if (kmsg_dump_unregister(&cxt->dump) < 0)
352 printk(KERN_WARNING "mtdoops: could not unregister kmsg_dumper\n");
353
354 cxt->mtd = NULL;
355 flush_work(&cxt->work_erase);
356 flush_work(&cxt->work_write);
357}
358
359
360static struct mtd_notifier mtdoops_notifier = {
361 .add = mtdoops_notify_add,
362 .remove = mtdoops_notify_remove,
363};
364
365static int __init mtdoops_init(void)
366{
367 struct mtdoops_context *cxt = &oops_cxt;
368 int mtd_index;
369 char *endp;
370
371 if (strlen(mtddev) == 0) {
372 printk(KERN_ERR "mtdoops: mtd device (mtddev=name/number) must be supplied\n");
373 return -EINVAL;
374 }
375 if ((record_size & 4095) != 0) {
376 printk(KERN_ERR "mtdoops: record_size must be a multiple of 4096\n");
377 return -EINVAL;
378 }
379 if (record_size < 4096) {
380 printk(KERN_ERR "mtdoops: record_size must be over 4096 bytes\n");
381 return -EINVAL;
382 }
383
384 /* Setup the MTD device to use */
385 cxt->mtd_index = -1;
386 mtd_index = simple_strtoul(mtddev, &endp, 0);
387 if (*endp == '\0')
388 cxt->mtd_index = mtd_index;
389
390 cxt->oops_buf = vmalloc(record_size);
391 if (!cxt->oops_buf) {
392 printk(KERN_ERR "mtdoops: failed to allocate buffer workspace\n");
393 return -ENOMEM;
394 }
395 memset(cxt->oops_buf, 0xff, record_size);
396
397 INIT_WORK(&cxt->work_erase, mtdoops_workfunc_erase);
398 INIT_WORK(&cxt->work_write, mtdoops_workfunc_write);
399
400 register_mtd_user(&mtdoops_notifier);
401 return 0;
402}
403
404static void __exit mtdoops_exit(void)
405{
406 struct mtdoops_context *cxt = &oops_cxt;
407
408 unregister_mtd_user(&mtdoops_notifier);
409 vfree(cxt->oops_buf);
410 vfree(cxt->oops_page_used);
411}
412
413
414module_init(mtdoops_init);
415module_exit(mtdoops_exit);
416
417MODULE_LICENSE("GPL");
418MODULE_AUTHOR("Richard Purdie <rpurdie@openedhand.com>");
419MODULE_DESCRIPTION("MTD Oops/Panic console logger/driver");
1/*
2 * MTD Oops/Panic logger
3 *
4 * Copyright © 2007 Nokia Corporation. All rights reserved.
5 *
6 * Author: Richard Purdie <rpurdie@openedhand.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 *
22 */
23
24#include <linux/kernel.h>
25#include <linux/module.h>
26#include <linux/console.h>
27#include <linux/vmalloc.h>
28#include <linux/workqueue.h>
29#include <linux/sched.h>
30#include <linux/wait.h>
31#include <linux/delay.h>
32#include <linux/interrupt.h>
33#include <linux/mtd/mtd.h>
34#include <linux/kmsg_dump.h>
35
36/* Maximum MTD partition size */
37#define MTDOOPS_MAX_MTD_SIZE (8 * 1024 * 1024)
38
39#define MTDOOPS_KERNMSG_MAGIC 0x5d005d00
40#define MTDOOPS_HEADER_SIZE 8
41
42static unsigned long record_size = 4096;
43module_param(record_size, ulong, 0400);
44MODULE_PARM_DESC(record_size,
45 "record size for MTD OOPS pages in bytes (default 4096)");
46
47static char mtddev[80];
48module_param_string(mtddev, mtddev, 80, 0400);
49MODULE_PARM_DESC(mtddev,
50 "name or index number of the MTD device to use");
51
52static int dump_oops = 1;
53module_param(dump_oops, int, 0600);
54MODULE_PARM_DESC(dump_oops,
55 "set to 1 to dump oopses, 0 to only dump panics (default 1)");
56
57static struct mtdoops_context {
58 struct kmsg_dumper dump;
59
60 int mtd_index;
61 struct work_struct work_erase;
62 struct work_struct work_write;
63 struct mtd_info *mtd;
64 int oops_pages;
65 int nextpage;
66 int nextcount;
67 unsigned long *oops_page_used;
68
69 void *oops_buf;
70} oops_cxt;
71
72static void mark_page_used(struct mtdoops_context *cxt, int page)
73{
74 set_bit(page, cxt->oops_page_used);
75}
76
77static void mark_page_unused(struct mtdoops_context *cxt, int page)
78{
79 clear_bit(page, cxt->oops_page_used);
80}
81
82static int page_is_used(struct mtdoops_context *cxt, int page)
83{
84 return test_bit(page, cxt->oops_page_used);
85}
86
87static int mtdoops_erase_block(struct mtdoops_context *cxt, int offset)
88{
89 struct mtd_info *mtd = cxt->mtd;
90 u32 start_page_offset = mtd_div_by_eb(offset, mtd) * mtd->erasesize;
91 u32 start_page = start_page_offset / record_size;
92 u32 erase_pages = mtd->erasesize / record_size;
93 struct erase_info erase;
94 int ret;
95 int page;
96
97 erase.addr = offset;
98 erase.len = mtd->erasesize;
99
100 ret = mtd_erase(mtd, &erase);
101 if (ret) {
102 printk(KERN_WARNING "mtdoops: erase of region [0x%llx, 0x%llx] on \"%s\" failed\n",
103 (unsigned long long)erase.addr,
104 (unsigned long long)erase.len, mtddev);
105 return ret;
106 }
107
108 /* Mark pages as unused */
109 for (page = start_page; page < start_page + erase_pages; page++)
110 mark_page_unused(cxt, page);
111
112 return 0;
113}
114
115static void mtdoops_inc_counter(struct mtdoops_context *cxt)
116{
117 cxt->nextpage++;
118 if (cxt->nextpage >= cxt->oops_pages)
119 cxt->nextpage = 0;
120 cxt->nextcount++;
121 if (cxt->nextcount == 0xffffffff)
122 cxt->nextcount = 0;
123
124 if (page_is_used(cxt, cxt->nextpage)) {
125 schedule_work(&cxt->work_erase);
126 return;
127 }
128
129 printk(KERN_DEBUG "mtdoops: ready %d, %d (no erase)\n",
130 cxt->nextpage, cxt->nextcount);
131}
132
133/* Scheduled work - when we can't proceed without erasing a block */
134static void mtdoops_workfunc_erase(struct work_struct *work)
135{
136 struct mtdoops_context *cxt =
137 container_of(work, struct mtdoops_context, work_erase);
138 struct mtd_info *mtd = cxt->mtd;
139 int i = 0, j, ret, mod;
140
141 /* We were unregistered */
142 if (!mtd)
143 return;
144
145 mod = (cxt->nextpage * record_size) % mtd->erasesize;
146 if (mod != 0) {
147 cxt->nextpage = cxt->nextpage + ((mtd->erasesize - mod) / record_size);
148 if (cxt->nextpage >= cxt->oops_pages)
149 cxt->nextpage = 0;
150 }
151
152 while ((ret = mtd_block_isbad(mtd, cxt->nextpage * record_size)) > 0) {
153badblock:
154 printk(KERN_WARNING "mtdoops: bad block at %08lx\n",
155 cxt->nextpage * record_size);
156 i++;
157 cxt->nextpage = cxt->nextpage + (mtd->erasesize / record_size);
158 if (cxt->nextpage >= cxt->oops_pages)
159 cxt->nextpage = 0;
160 if (i == cxt->oops_pages / (mtd->erasesize / record_size)) {
161 printk(KERN_ERR "mtdoops: all blocks bad!\n");
162 return;
163 }
164 }
165
166 if (ret < 0) {
167 printk(KERN_ERR "mtdoops: mtd_block_isbad failed, aborting\n");
168 return;
169 }
170
171 for (j = 0, ret = -1; (j < 3) && (ret < 0); j++)
172 ret = mtdoops_erase_block(cxt, cxt->nextpage * record_size);
173
174 if (ret >= 0) {
175 printk(KERN_DEBUG "mtdoops: ready %d, %d\n",
176 cxt->nextpage, cxt->nextcount);
177 return;
178 }
179
180 if (ret == -EIO) {
181 ret = mtd_block_markbad(mtd, cxt->nextpage * record_size);
182 if (ret < 0 && ret != -EOPNOTSUPP) {
183 printk(KERN_ERR "mtdoops: block_markbad failed, aborting\n");
184 return;
185 }
186 }
187 goto badblock;
188}
189
190static void mtdoops_write(struct mtdoops_context *cxt, int panic)
191{
192 struct mtd_info *mtd = cxt->mtd;
193 size_t retlen;
194 u32 *hdr;
195 int ret;
196
197 /* Add mtdoops header to the buffer */
198 hdr = cxt->oops_buf;
199 hdr[0] = cxt->nextcount;
200 hdr[1] = MTDOOPS_KERNMSG_MAGIC;
201
202 if (panic) {
203 ret = mtd_panic_write(mtd, cxt->nextpage * record_size,
204 record_size, &retlen, cxt->oops_buf);
205 if (ret == -EOPNOTSUPP) {
206 printk(KERN_ERR "mtdoops: Cannot write from panic without panic_write\n");
207 return;
208 }
209 } else
210 ret = mtd_write(mtd, cxt->nextpage * record_size,
211 record_size, &retlen, cxt->oops_buf);
212
213 if (retlen != record_size || ret < 0)
214 printk(KERN_ERR "mtdoops: write failure at %ld (%td of %ld written), error %d\n",
215 cxt->nextpage * record_size, retlen, record_size, ret);
216 mark_page_used(cxt, cxt->nextpage);
217 memset(cxt->oops_buf, 0xff, record_size);
218
219 mtdoops_inc_counter(cxt);
220}
221
222static void mtdoops_workfunc_write(struct work_struct *work)
223{
224 struct mtdoops_context *cxt =
225 container_of(work, struct mtdoops_context, work_write);
226
227 mtdoops_write(cxt, 0);
228}
229
230static void find_next_position(struct mtdoops_context *cxt)
231{
232 struct mtd_info *mtd = cxt->mtd;
233 int ret, page, maxpos = 0;
234 u32 count[2], maxcount = 0xffffffff;
235 size_t retlen;
236
237 for (page = 0; page < cxt->oops_pages; page++) {
238 if (mtd_block_isbad(mtd, page * record_size))
239 continue;
240 /* Assume the page is used */
241 mark_page_used(cxt, page);
242 ret = mtd_read(mtd, page * record_size, MTDOOPS_HEADER_SIZE,
243 &retlen, (u_char *)&count[0]);
244 if (retlen != MTDOOPS_HEADER_SIZE ||
245 (ret < 0 && !mtd_is_bitflip(ret))) {
246 printk(KERN_ERR "mtdoops: read failure at %ld (%td of %d read), err %d\n",
247 page * record_size, retlen,
248 MTDOOPS_HEADER_SIZE, ret);
249 continue;
250 }
251
252 if (count[0] == 0xffffffff && count[1] == 0xffffffff)
253 mark_page_unused(cxt, page);
254 if (count[0] == 0xffffffff || count[1] != MTDOOPS_KERNMSG_MAGIC)
255 continue;
256 if (maxcount == 0xffffffff) {
257 maxcount = count[0];
258 maxpos = page;
259 } else if (count[0] < 0x40000000 && maxcount > 0xc0000000) {
260 maxcount = count[0];
261 maxpos = page;
262 } else if (count[0] > maxcount && count[0] < 0xc0000000) {
263 maxcount = count[0];
264 maxpos = page;
265 } else if (count[0] > maxcount && count[0] > 0xc0000000
266 && maxcount > 0x80000000) {
267 maxcount = count[0];
268 maxpos = page;
269 }
270 }
271 if (maxcount == 0xffffffff) {
272 cxt->nextpage = cxt->oops_pages - 1;
273 cxt->nextcount = 0;
274 }
275 else {
276 cxt->nextpage = maxpos;
277 cxt->nextcount = maxcount;
278 }
279
280 mtdoops_inc_counter(cxt);
281}
282
283static void mtdoops_do_dump(struct kmsg_dumper *dumper,
284 enum kmsg_dump_reason reason)
285{
286 struct mtdoops_context *cxt = container_of(dumper,
287 struct mtdoops_context, dump);
288
289 /* Only dump oopses if dump_oops is set */
290 if (reason == KMSG_DUMP_OOPS && !dump_oops)
291 return;
292
293 kmsg_dump_get_buffer(dumper, true, cxt->oops_buf + MTDOOPS_HEADER_SIZE,
294 record_size - MTDOOPS_HEADER_SIZE, NULL);
295
296 /* Panics must be written immediately */
297 if (reason != KMSG_DUMP_OOPS)
298 mtdoops_write(cxt, 1);
299
300 /* For other cases, schedule work to write it "nicely" */
301 schedule_work(&cxt->work_write);
302}
303
304static void mtdoops_notify_add(struct mtd_info *mtd)
305{
306 struct mtdoops_context *cxt = &oops_cxt;
307 u64 mtdoops_pages = div_u64(mtd->size, record_size);
308 int err;
309
310 if (!strcmp(mtd->name, mtddev))
311 cxt->mtd_index = mtd->index;
312
313 if (mtd->index != cxt->mtd_index || cxt->mtd_index < 0)
314 return;
315
316 if (mtd->size < mtd->erasesize * 2) {
317 printk(KERN_ERR "mtdoops: MTD partition %d not big enough for mtdoops\n",
318 mtd->index);
319 return;
320 }
321 if (mtd->erasesize < record_size) {
322 printk(KERN_ERR "mtdoops: eraseblock size of MTD partition %d too small\n",
323 mtd->index);
324 return;
325 }
326 if (mtd->size > MTDOOPS_MAX_MTD_SIZE) {
327 printk(KERN_ERR "mtdoops: mtd%d is too large (limit is %d MiB)\n",
328 mtd->index, MTDOOPS_MAX_MTD_SIZE / 1024 / 1024);
329 return;
330 }
331
332 /* oops_page_used is a bit field */
333 cxt->oops_page_used = vmalloc(DIV_ROUND_UP(mtdoops_pages,
334 BITS_PER_LONG) * sizeof(unsigned long));
335 if (!cxt->oops_page_used) {
336 printk(KERN_ERR "mtdoops: could not allocate page array\n");
337 return;
338 }
339
340 cxt->dump.max_reason = KMSG_DUMP_OOPS;
341 cxt->dump.dump = mtdoops_do_dump;
342 err = kmsg_dump_register(&cxt->dump);
343 if (err) {
344 printk(KERN_ERR "mtdoops: registering kmsg dumper failed, error %d\n", err);
345 vfree(cxt->oops_page_used);
346 cxt->oops_page_used = NULL;
347 return;
348 }
349
350 cxt->mtd = mtd;
351 cxt->oops_pages = (int)mtd->size / record_size;
352 find_next_position(cxt);
353 printk(KERN_INFO "mtdoops: Attached to MTD device %d\n", mtd->index);
354}
355
356static void mtdoops_notify_remove(struct mtd_info *mtd)
357{
358 struct mtdoops_context *cxt = &oops_cxt;
359
360 if (mtd->index != cxt->mtd_index || cxt->mtd_index < 0)
361 return;
362
363 if (kmsg_dump_unregister(&cxt->dump) < 0)
364 printk(KERN_WARNING "mtdoops: could not unregister kmsg_dumper\n");
365
366 cxt->mtd = NULL;
367 flush_work(&cxt->work_erase);
368 flush_work(&cxt->work_write);
369}
370
371
372static struct mtd_notifier mtdoops_notifier = {
373 .add = mtdoops_notify_add,
374 .remove = mtdoops_notify_remove,
375};
376
377static int __init mtdoops_init(void)
378{
379 struct mtdoops_context *cxt = &oops_cxt;
380 int mtd_index;
381 char *endp;
382
383 if (strlen(mtddev) == 0) {
384 printk(KERN_ERR "mtdoops: mtd device (mtddev=name/number) must be supplied\n");
385 return -EINVAL;
386 }
387 if ((record_size & 4095) != 0) {
388 printk(KERN_ERR "mtdoops: record_size must be a multiple of 4096\n");
389 return -EINVAL;
390 }
391 if (record_size < 4096) {
392 printk(KERN_ERR "mtdoops: record_size must be over 4096 bytes\n");
393 return -EINVAL;
394 }
395
396 /* Setup the MTD device to use */
397 cxt->mtd_index = -1;
398 mtd_index = simple_strtoul(mtddev, &endp, 0);
399 if (*endp == '\0')
400 cxt->mtd_index = mtd_index;
401
402 cxt->oops_buf = vmalloc(record_size);
403 if (!cxt->oops_buf) {
404 printk(KERN_ERR "mtdoops: failed to allocate buffer workspace\n");
405 return -ENOMEM;
406 }
407 memset(cxt->oops_buf, 0xff, record_size);
408
409 INIT_WORK(&cxt->work_erase, mtdoops_workfunc_erase);
410 INIT_WORK(&cxt->work_write, mtdoops_workfunc_write);
411
412 register_mtd_user(&mtdoops_notifier);
413 return 0;
414}
415
416static void __exit mtdoops_exit(void)
417{
418 struct mtdoops_context *cxt = &oops_cxt;
419
420 unregister_mtd_user(&mtdoops_notifier);
421 vfree(cxt->oops_buf);
422 vfree(cxt->oops_page_used);
423}
424
425
426module_init(mtdoops_init);
427module_exit(mtdoops_exit);
428
429MODULE_LICENSE("GPL");
430MODULE_AUTHOR("Richard Purdie <rpurdie@openedhand.com>");
431MODULE_DESCRIPTION("MTD Oops/Panic console logger/driver");