Loading...
1/*
2 * block2mtd.c - create an mtd from a block device
3 *
4 * Copyright (C) 2001,2002 Simon Evans <spse@secret.org.uk>
5 * Copyright (C) 2004-2006 Joern Engel <joern@wh.fh-wedel.de>
6 *
7 * Licence: GPL
8 */
9
10#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
12/*
13 * When the first attempt at device initialization fails, we may need to
14 * wait a little bit and retry. This timeout, by default 3 seconds, gives
15 * device time to start up. Required on BCM2708 and a few other chipsets.
16 */
17#define MTD_DEFAULT_TIMEOUT 3
18
19#include <linux/module.h>
20#include <linux/delay.h>
21#include <linux/fs.h>
22#include <linux/blkdev.h>
23#include <linux/backing-dev.h>
24#include <linux/bio.h>
25#include <linux/pagemap.h>
26#include <linux/list.h>
27#include <linux/init.h>
28#include <linux/mtd/mtd.h>
29#include <linux/mutex.h>
30#include <linux/mount.h>
31#include <linux/slab.h>
32#include <linux/major.h>
33
34/* Maximum number of comma-separated items in the 'block2mtd=' parameter */
35#define BLOCK2MTD_PARAM_MAX_COUNT 3
36
37/* Info for the block device */
38struct block2mtd_dev {
39 struct list_head list;
40 struct bdev_handle *bdev_handle;
41 struct mtd_info mtd;
42 struct mutex write_mutex;
43};
44
45
46/* Static info about the MTD, used in cleanup_module */
47static LIST_HEAD(blkmtd_device_list);
48
49
50static struct page *page_read(struct address_space *mapping, pgoff_t index)
51{
52 return read_mapping_page(mapping, index, NULL);
53}
54
55/* erase a specified part of the device */
56static int _block2mtd_erase(struct block2mtd_dev *dev, loff_t to, size_t len)
57{
58 struct address_space *mapping =
59 dev->bdev_handle->bdev->bd_inode->i_mapping;
60 struct page *page;
61 pgoff_t index = to >> PAGE_SHIFT; // page index
62 int pages = len >> PAGE_SHIFT;
63 u_long *p;
64 u_long *max;
65
66 while (pages) {
67 page = page_read(mapping, index);
68 if (IS_ERR(page))
69 return PTR_ERR(page);
70
71 max = page_address(page) + PAGE_SIZE;
72 for (p=page_address(page); p<max; p++)
73 if (*p != -1UL) {
74 lock_page(page);
75 memset(page_address(page), 0xff, PAGE_SIZE);
76 set_page_dirty(page);
77 unlock_page(page);
78 balance_dirty_pages_ratelimited(mapping);
79 break;
80 }
81
82 put_page(page);
83 pages--;
84 index++;
85 }
86 return 0;
87}
88static int block2mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
89{
90 struct block2mtd_dev *dev = mtd->priv;
91 size_t from = instr->addr;
92 size_t len = instr->len;
93 int err;
94
95 mutex_lock(&dev->write_mutex);
96 err = _block2mtd_erase(dev, from, len);
97 mutex_unlock(&dev->write_mutex);
98 if (err)
99 pr_err("erase failed err = %d\n", err);
100
101 return err;
102}
103
104
105static int block2mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
106 size_t *retlen, u_char *buf)
107{
108 struct block2mtd_dev *dev = mtd->priv;
109 struct address_space *mapping =
110 dev->bdev_handle->bdev->bd_inode->i_mapping;
111 struct page *page;
112 pgoff_t index = from >> PAGE_SHIFT;
113 int offset = from & (PAGE_SIZE-1);
114 int cpylen;
115
116 while (len) {
117 if ((offset + len) > PAGE_SIZE)
118 cpylen = PAGE_SIZE - offset; // multiple pages
119 else
120 cpylen = len; // this page
121 len = len - cpylen;
122
123 page = page_read(mapping, index);
124 if (IS_ERR(page))
125 return PTR_ERR(page);
126
127 memcpy(buf, page_address(page) + offset, cpylen);
128 put_page(page);
129
130 if (retlen)
131 *retlen += cpylen;
132 buf += cpylen;
133 offset = 0;
134 index++;
135 }
136 return 0;
137}
138
139
140/* write data to the underlying device */
141static int _block2mtd_write(struct block2mtd_dev *dev, const u_char *buf,
142 loff_t to, size_t len, size_t *retlen)
143{
144 struct page *page;
145 struct address_space *mapping =
146 dev->bdev_handle->bdev->bd_inode->i_mapping;
147 pgoff_t index = to >> PAGE_SHIFT; // page index
148 int offset = to & ~PAGE_MASK; // page offset
149 int cpylen;
150
151 while (len) {
152 if ((offset+len) > PAGE_SIZE)
153 cpylen = PAGE_SIZE - offset; // multiple pages
154 else
155 cpylen = len; // this page
156 len = len - cpylen;
157
158 page = page_read(mapping, index);
159 if (IS_ERR(page))
160 return PTR_ERR(page);
161
162 if (memcmp(page_address(page)+offset, buf, cpylen)) {
163 lock_page(page);
164 memcpy(page_address(page) + offset, buf, cpylen);
165 set_page_dirty(page);
166 unlock_page(page);
167 balance_dirty_pages_ratelimited(mapping);
168 }
169 put_page(page);
170
171 if (retlen)
172 *retlen += cpylen;
173
174 buf += cpylen;
175 offset = 0;
176 index++;
177 }
178 return 0;
179}
180
181
182static int block2mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
183 size_t *retlen, const u_char *buf)
184{
185 struct block2mtd_dev *dev = mtd->priv;
186 int err;
187
188 mutex_lock(&dev->write_mutex);
189 err = _block2mtd_write(dev, buf, to, len, retlen);
190 mutex_unlock(&dev->write_mutex);
191 if (err > 0)
192 err = 0;
193 return err;
194}
195
196
197/* sync the device - wait until the write queue is empty */
198static void block2mtd_sync(struct mtd_info *mtd)
199{
200 struct block2mtd_dev *dev = mtd->priv;
201 sync_blockdev(dev->bdev_handle->bdev);
202 return;
203}
204
205
206static void block2mtd_free_device(struct block2mtd_dev *dev)
207{
208 if (!dev)
209 return;
210
211 kfree(dev->mtd.name);
212
213 if (dev->bdev_handle) {
214 invalidate_mapping_pages(
215 dev->bdev_handle->bdev->bd_inode->i_mapping, 0, -1);
216 bdev_release(dev->bdev_handle);
217 }
218
219 kfree(dev);
220}
221
222/*
223 * This function is marked __ref because it calls the __init marked
224 * early_lookup_bdev when called from the early boot code.
225 */
226static struct bdev_handle __ref *mdtblock_early_get_bdev(const char *devname,
227 blk_mode_t mode, int timeout, struct block2mtd_dev *dev)
228{
229 struct bdev_handle *bdev_handle = ERR_PTR(-ENODEV);
230#ifndef MODULE
231 int i;
232
233 /*
234 * We can't use early_lookup_bdev from a running system.
235 */
236 if (system_state >= SYSTEM_RUNNING)
237 return bdev_handle;
238
239 /*
240 * We might not have the root device mounted at this point.
241 * Try to resolve the device name by other means.
242 */
243 for (i = 0; i <= timeout; i++) {
244 dev_t devt;
245
246 if (i)
247 /*
248 * Calling wait_for_device_probe in the first loop
249 * was not enough, sleep for a bit in subsequent
250 * go-arounds.
251 */
252 msleep(1000);
253 wait_for_device_probe();
254
255 if (!early_lookup_bdev(devname, &devt)) {
256 bdev_handle = bdev_open_by_dev(devt, mode, dev, NULL);
257 if (!IS_ERR(bdev_handle))
258 break;
259 }
260 }
261#endif
262 return bdev_handle;
263}
264
265static struct block2mtd_dev *add_device(char *devname, int erase_size,
266 char *label, int timeout)
267{
268 const blk_mode_t mode = BLK_OPEN_READ | BLK_OPEN_WRITE;
269 struct bdev_handle *bdev_handle;
270 struct block_device *bdev;
271 struct block2mtd_dev *dev;
272 char *name;
273
274 if (!devname)
275 return NULL;
276
277 dev = kzalloc(sizeof(struct block2mtd_dev), GFP_KERNEL);
278 if (!dev)
279 return NULL;
280
281 /* Get a handle on the device */
282 bdev_handle = bdev_open_by_path(devname, mode, dev, NULL);
283 if (IS_ERR(bdev_handle))
284 bdev_handle = mdtblock_early_get_bdev(devname, mode, timeout,
285 dev);
286 if (IS_ERR(bdev_handle)) {
287 pr_err("error: cannot open device %s\n", devname);
288 goto err_free_block2mtd;
289 }
290 dev->bdev_handle = bdev_handle;
291 bdev = bdev_handle->bdev;
292
293 if (MAJOR(bdev->bd_dev) == MTD_BLOCK_MAJOR) {
294 pr_err("attempting to use an MTD device as a block device\n");
295 goto err_free_block2mtd;
296 }
297
298 if ((long)bdev->bd_inode->i_size % erase_size) {
299 pr_err("erasesize must be a divisor of device size\n");
300 goto err_free_block2mtd;
301 }
302
303 mutex_init(&dev->write_mutex);
304
305 /* Setup the MTD structure */
306 /* make the name contain the block device in */
307 if (!label)
308 name = kasprintf(GFP_KERNEL, "block2mtd: %s", devname);
309 else
310 name = kstrdup(label, GFP_KERNEL);
311 if (!name)
312 goto err_destroy_mutex;
313
314 dev->mtd.name = name;
315
316 dev->mtd.size = bdev->bd_inode->i_size & PAGE_MASK;
317 dev->mtd.erasesize = erase_size;
318 dev->mtd.writesize = 1;
319 dev->mtd.writebufsize = PAGE_SIZE;
320 dev->mtd.type = MTD_RAM;
321 dev->mtd.flags = MTD_CAP_RAM;
322 dev->mtd._erase = block2mtd_erase;
323 dev->mtd._write = block2mtd_write;
324 dev->mtd._sync = block2mtd_sync;
325 dev->mtd._read = block2mtd_read;
326 dev->mtd.priv = dev;
327 dev->mtd.owner = THIS_MODULE;
328
329 if (mtd_device_register(&dev->mtd, NULL, 0)) {
330 /* Device didn't get added, so free the entry */
331 goto err_destroy_mutex;
332 }
333
334 list_add(&dev->list, &blkmtd_device_list);
335 pr_info("mtd%d: [%s] erase_size = %dKiB [%d]\n",
336 dev->mtd.index,
337 label ? label : dev->mtd.name + strlen("block2mtd: "),
338 dev->mtd.erasesize >> 10, dev->mtd.erasesize);
339 return dev;
340
341err_destroy_mutex:
342 mutex_destroy(&dev->write_mutex);
343err_free_block2mtd:
344 block2mtd_free_device(dev);
345 return NULL;
346}
347
348
349/* This function works similar to reguler strtoul. In addition, it
350 * allows some suffixes for a more human-readable number format:
351 * ki, Ki, kiB, KiB - multiply result with 1024
352 * Mi, MiB - multiply result with 1024^2
353 * Gi, GiB - multiply result with 1024^3
354 */
355static int ustrtoul(const char *cp, char **endp, unsigned int base)
356{
357 unsigned long result = simple_strtoul(cp, endp, base);
358 switch (**endp) {
359 case 'G' :
360 result *= 1024;
361 fallthrough;
362 case 'M':
363 result *= 1024;
364 fallthrough;
365 case 'K':
366 case 'k':
367 result *= 1024;
368 /* By dwmw2 editorial decree, "ki", "Mi" or "Gi" are to be used. */
369 if ((*endp)[1] == 'i') {
370 if ((*endp)[2] == 'B')
371 (*endp) += 3;
372 else
373 (*endp) += 2;
374 }
375 }
376 return result;
377}
378
379
380static int parse_num(size_t *num, const char *token)
381{
382 char *endp;
383 size_t n;
384
385 n = (size_t) ustrtoul(token, &endp, 0);
386 if (*endp)
387 return -EINVAL;
388
389 *num = n;
390 return 0;
391}
392
393
394static inline void kill_final_newline(char *str)
395{
396 char *newline = strrchr(str, '\n');
397 if (newline && !newline[1])
398 *newline = 0;
399}
400
401
402#ifndef MODULE
403static int block2mtd_init_called = 0;
404/* 80 for device, 12 for erase size */
405static char block2mtd_paramline[80 + 12];
406#endif
407
408static int block2mtd_setup2(const char *val)
409{
410 /* 80 for device, 12 for erase size, 80 for name, 8 for timeout */
411 char buf[80 + 12 + 80 + 8];
412 char *str = buf;
413 char *token[BLOCK2MTD_PARAM_MAX_COUNT];
414 char *name;
415 char *label = NULL;
416 size_t erase_size = PAGE_SIZE;
417 unsigned long timeout = MTD_DEFAULT_TIMEOUT;
418 int i, ret;
419
420 if (strnlen(val, sizeof(buf)) >= sizeof(buf)) {
421 pr_err("parameter too long\n");
422 return 0;
423 }
424
425 strcpy(str, val);
426 kill_final_newline(str);
427
428 for (i = 0; i < BLOCK2MTD_PARAM_MAX_COUNT; i++)
429 token[i] = strsep(&str, ",");
430
431 if (str) {
432 pr_err("too many arguments\n");
433 return 0;
434 }
435
436 if (!token[0]) {
437 pr_err("no argument\n");
438 return 0;
439 }
440
441 name = token[0];
442 if (strlen(name) + 1 > 80) {
443 pr_err("device name too long\n");
444 return 0;
445 }
446
447 /* Optional argument when custom label is used */
448 if (token[1] && strlen(token[1])) {
449 ret = parse_num(&erase_size, token[1]);
450 if (ret) {
451 pr_err("illegal erase size\n");
452 return 0;
453 }
454 }
455
456 if (token[2]) {
457 label = token[2];
458 pr_info("Using custom MTD label '%s' for dev %s\n", label, name);
459 }
460
461 add_device(name, erase_size, label, timeout);
462
463 return 0;
464}
465
466
467static int block2mtd_setup(const char *val, const struct kernel_param *kp)
468{
469#ifdef MODULE
470 return block2mtd_setup2(val);
471#else
472 /* If more parameters are later passed in via
473 /sys/module/block2mtd/parameters/block2mtd
474 and block2mtd_init() has already been called,
475 we can parse the argument now. */
476
477 if (block2mtd_init_called)
478 return block2mtd_setup2(val);
479
480 /* During early boot stage, we only save the parameters
481 here. We must parse them later: if the param passed
482 from kernel boot command line, block2mtd_setup() is
483 called so early that it is not possible to resolve
484 the device (even kmalloc() fails). Deter that work to
485 block2mtd_setup2(). */
486
487 strscpy(block2mtd_paramline, val, sizeof(block2mtd_paramline));
488
489 return 0;
490#endif
491}
492
493
494module_param_call(block2mtd, block2mtd_setup, NULL, NULL, 0200);
495MODULE_PARM_DESC(block2mtd, "Device to use. \"block2mtd=<dev>[,[<erasesize>][,<label>]]\"");
496
497static int __init block2mtd_init(void)
498{
499 int ret = 0;
500
501#ifndef MODULE
502 if (strlen(block2mtd_paramline))
503 ret = block2mtd_setup2(block2mtd_paramline);
504 block2mtd_init_called = 1;
505#endif
506
507 return ret;
508}
509
510
511static void block2mtd_exit(void)
512{
513 struct list_head *pos, *next;
514
515 /* Remove the MTD devices */
516 list_for_each_safe(pos, next, &blkmtd_device_list) {
517 struct block2mtd_dev *dev = list_entry(pos, typeof(*dev), list);
518 block2mtd_sync(&dev->mtd);
519 mtd_device_unregister(&dev->mtd);
520 mutex_destroy(&dev->write_mutex);
521 pr_info("mtd%d: [%s] removed\n",
522 dev->mtd.index,
523 dev->mtd.name + strlen("block2mtd: "));
524 list_del(&dev->list);
525 block2mtd_free_device(dev);
526 }
527}
528
529late_initcall(block2mtd_init);
530module_exit(block2mtd_exit);
531
532MODULE_LICENSE("GPL");
533MODULE_AUTHOR("Joern Engel <joern@lazybastard.org>");
534MODULE_DESCRIPTION("Emulate an MTD using a block device");
1/*
2 * block2mtd.c - create an mtd from a block device
3 *
4 * Copyright (C) 2001,2002 Simon Evans <spse@secret.org.uk>
5 * Copyright (C) 2004-2006 Joern Engel <joern@wh.fh-wedel.de>
6 *
7 * Licence: GPL
8 */
9
10#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
12/*
13 * When the first attempt at device initialization fails, we may need to
14 * wait a little bit and retry. This timeout, by default 3 seconds, gives
15 * device time to start up. Required on BCM2708 and a few other chipsets.
16 */
17#define MTD_DEFAULT_TIMEOUT 3
18
19#include <linux/module.h>
20#include <linux/delay.h>
21#include <linux/fs.h>
22#include <linux/blkdev.h>
23#include <linux/backing-dev.h>
24#include <linux/bio.h>
25#include <linux/pagemap.h>
26#include <linux/list.h>
27#include <linux/init.h>
28#include <linux/mtd/mtd.h>
29#include <linux/mutex.h>
30#include <linux/mount.h>
31#include <linux/slab.h>
32#include <linux/major.h>
33
34/* Info for the block device */
35struct block2mtd_dev {
36 struct list_head list;
37 struct block_device *blkdev;
38 struct mtd_info mtd;
39 struct mutex write_mutex;
40};
41
42
43/* Static info about the MTD, used in cleanup_module */
44static LIST_HEAD(blkmtd_device_list);
45
46
47static struct page *page_read(struct address_space *mapping, pgoff_t index)
48{
49 return read_mapping_page(mapping, index, NULL);
50}
51
52/* erase a specified part of the device */
53static int _block2mtd_erase(struct block2mtd_dev *dev, loff_t to, size_t len)
54{
55 struct address_space *mapping = dev->blkdev->bd_inode->i_mapping;
56 struct page *page;
57 pgoff_t index = to >> PAGE_SHIFT; // page index
58 int pages = len >> PAGE_SHIFT;
59 u_long *p;
60 u_long *max;
61
62 while (pages) {
63 page = page_read(mapping, index);
64 if (IS_ERR(page))
65 return PTR_ERR(page);
66
67 max = page_address(page) + PAGE_SIZE;
68 for (p=page_address(page); p<max; p++)
69 if (*p != -1UL) {
70 lock_page(page);
71 memset(page_address(page), 0xff, PAGE_SIZE);
72 set_page_dirty(page);
73 unlock_page(page);
74 balance_dirty_pages_ratelimited(mapping);
75 break;
76 }
77
78 put_page(page);
79 pages--;
80 index++;
81 }
82 return 0;
83}
84static int block2mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
85{
86 struct block2mtd_dev *dev = mtd->priv;
87 size_t from = instr->addr;
88 size_t len = instr->len;
89 int err;
90
91 mutex_lock(&dev->write_mutex);
92 err = _block2mtd_erase(dev, from, len);
93 mutex_unlock(&dev->write_mutex);
94 if (err)
95 pr_err("erase failed err = %d\n", err);
96
97 return err;
98}
99
100
101static int block2mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
102 size_t *retlen, u_char *buf)
103{
104 struct block2mtd_dev *dev = mtd->priv;
105 struct page *page;
106 pgoff_t index = from >> PAGE_SHIFT;
107 int offset = from & (PAGE_SIZE-1);
108 int cpylen;
109
110 while (len) {
111 if ((offset + len) > PAGE_SIZE)
112 cpylen = PAGE_SIZE - offset; // multiple pages
113 else
114 cpylen = len; // this page
115 len = len - cpylen;
116
117 page = page_read(dev->blkdev->bd_inode->i_mapping, index);
118 if (IS_ERR(page))
119 return PTR_ERR(page);
120
121 memcpy(buf, page_address(page) + offset, cpylen);
122 put_page(page);
123
124 if (retlen)
125 *retlen += cpylen;
126 buf += cpylen;
127 offset = 0;
128 index++;
129 }
130 return 0;
131}
132
133
134/* write data to the underlying device */
135static int _block2mtd_write(struct block2mtd_dev *dev, const u_char *buf,
136 loff_t to, size_t len, size_t *retlen)
137{
138 struct page *page;
139 struct address_space *mapping = dev->blkdev->bd_inode->i_mapping;
140 pgoff_t index = to >> PAGE_SHIFT; // page index
141 int offset = to & ~PAGE_MASK; // page offset
142 int cpylen;
143
144 while (len) {
145 if ((offset+len) > PAGE_SIZE)
146 cpylen = PAGE_SIZE - offset; // multiple pages
147 else
148 cpylen = len; // this page
149 len = len - cpylen;
150
151 page = page_read(mapping, index);
152 if (IS_ERR(page))
153 return PTR_ERR(page);
154
155 if (memcmp(page_address(page)+offset, buf, cpylen)) {
156 lock_page(page);
157 memcpy(page_address(page) + offset, buf, cpylen);
158 set_page_dirty(page);
159 unlock_page(page);
160 balance_dirty_pages_ratelimited(mapping);
161 }
162 put_page(page);
163
164 if (retlen)
165 *retlen += cpylen;
166
167 buf += cpylen;
168 offset = 0;
169 index++;
170 }
171 return 0;
172}
173
174
175static int block2mtd_write(struct mtd_info *mtd, loff_t to, size_t len,
176 size_t *retlen, const u_char *buf)
177{
178 struct block2mtd_dev *dev = mtd->priv;
179 int err;
180
181 mutex_lock(&dev->write_mutex);
182 err = _block2mtd_write(dev, buf, to, len, retlen);
183 mutex_unlock(&dev->write_mutex);
184 if (err > 0)
185 err = 0;
186 return err;
187}
188
189
190/* sync the device - wait until the write queue is empty */
191static void block2mtd_sync(struct mtd_info *mtd)
192{
193 struct block2mtd_dev *dev = mtd->priv;
194 sync_blockdev(dev->blkdev);
195 return;
196}
197
198
199static void block2mtd_free_device(struct block2mtd_dev *dev)
200{
201 if (!dev)
202 return;
203
204 kfree(dev->mtd.name);
205
206 if (dev->blkdev) {
207 invalidate_mapping_pages(dev->blkdev->bd_inode->i_mapping,
208 0, -1);
209 blkdev_put(dev->blkdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
210 }
211
212 kfree(dev);
213}
214
215
216static struct block2mtd_dev *add_device(char *devname, int erase_size,
217 int timeout)
218{
219#ifndef MODULE
220 int i;
221#endif
222 const fmode_t mode = FMODE_READ | FMODE_WRITE | FMODE_EXCL;
223 struct block_device *bdev;
224 struct block2mtd_dev *dev;
225 char *name;
226
227 if (!devname)
228 return NULL;
229
230 dev = kzalloc(sizeof(struct block2mtd_dev), GFP_KERNEL);
231 if (!dev)
232 return NULL;
233
234 /* Get a handle on the device */
235 bdev = blkdev_get_by_path(devname, mode, dev);
236
237#ifndef MODULE
238 /*
239 * We might not have the root device mounted at this point.
240 * Try to resolve the device name by other means.
241 */
242 for (i = 0; IS_ERR(bdev) && i <= timeout; i++) {
243 dev_t devt;
244
245 if (i)
246 /*
247 * Calling wait_for_device_probe in the first loop
248 * was not enough, sleep for a bit in subsequent
249 * go-arounds.
250 */
251 msleep(1000);
252 wait_for_device_probe();
253
254 devt = name_to_dev_t(devname);
255 if (!devt)
256 continue;
257 bdev = blkdev_get_by_dev(devt, mode, dev);
258 }
259#endif
260
261 if (IS_ERR(bdev)) {
262 pr_err("error: cannot open device %s\n", devname);
263 goto err_free_block2mtd;
264 }
265 dev->blkdev = bdev;
266
267 if (MAJOR(bdev->bd_dev) == MTD_BLOCK_MAJOR) {
268 pr_err("attempting to use an MTD device as a block device\n");
269 goto err_free_block2mtd;
270 }
271
272 if ((long)dev->blkdev->bd_inode->i_size % erase_size) {
273 pr_err("erasesize must be a divisor of device size\n");
274 goto err_free_block2mtd;
275 }
276
277 mutex_init(&dev->write_mutex);
278
279 /* Setup the MTD structure */
280 /* make the name contain the block device in */
281 name = kasprintf(GFP_KERNEL, "block2mtd: %s", devname);
282 if (!name)
283 goto err_destroy_mutex;
284
285 dev->mtd.name = name;
286
287 dev->mtd.size = dev->blkdev->bd_inode->i_size & PAGE_MASK;
288 dev->mtd.erasesize = erase_size;
289 dev->mtd.writesize = 1;
290 dev->mtd.writebufsize = PAGE_SIZE;
291 dev->mtd.type = MTD_RAM;
292 dev->mtd.flags = MTD_CAP_RAM;
293 dev->mtd._erase = block2mtd_erase;
294 dev->mtd._write = block2mtd_write;
295 dev->mtd._sync = block2mtd_sync;
296 dev->mtd._read = block2mtd_read;
297 dev->mtd.priv = dev;
298 dev->mtd.owner = THIS_MODULE;
299
300 if (mtd_device_register(&dev->mtd, NULL, 0)) {
301 /* Device didn't get added, so free the entry */
302 goto err_destroy_mutex;
303 }
304
305 list_add(&dev->list, &blkmtd_device_list);
306 pr_info("mtd%d: [%s] erase_size = %dKiB [%d]\n",
307 dev->mtd.index,
308 dev->mtd.name + strlen("block2mtd: "),
309 dev->mtd.erasesize >> 10, dev->mtd.erasesize);
310 return dev;
311
312err_destroy_mutex:
313 mutex_destroy(&dev->write_mutex);
314err_free_block2mtd:
315 block2mtd_free_device(dev);
316 return NULL;
317}
318
319
320/* This function works similar to reguler strtoul. In addition, it
321 * allows some suffixes for a more human-readable number format:
322 * ki, Ki, kiB, KiB - multiply result with 1024
323 * Mi, MiB - multiply result with 1024^2
324 * Gi, GiB - multiply result with 1024^3
325 */
326static int ustrtoul(const char *cp, char **endp, unsigned int base)
327{
328 unsigned long result = simple_strtoul(cp, endp, base);
329 switch (**endp) {
330 case 'G' :
331 result *= 1024;
332 fallthrough;
333 case 'M':
334 result *= 1024;
335 fallthrough;
336 case 'K':
337 case 'k':
338 result *= 1024;
339 /* By dwmw2 editorial decree, "ki", "Mi" or "Gi" are to be used. */
340 if ((*endp)[1] == 'i') {
341 if ((*endp)[2] == 'B')
342 (*endp) += 3;
343 else
344 (*endp) += 2;
345 }
346 }
347 return result;
348}
349
350
351static int parse_num(size_t *num, const char *token)
352{
353 char *endp;
354 size_t n;
355
356 n = (size_t) ustrtoul(token, &endp, 0);
357 if (*endp)
358 return -EINVAL;
359
360 *num = n;
361 return 0;
362}
363
364
365static inline void kill_final_newline(char *str)
366{
367 char *newline = strrchr(str, '\n');
368 if (newline && !newline[1])
369 *newline = 0;
370}
371
372
373#ifndef MODULE
374static int block2mtd_init_called = 0;
375/* 80 for device, 12 for erase size */
376static char block2mtd_paramline[80 + 12];
377#endif
378
379static int block2mtd_setup2(const char *val)
380{
381 /* 80 for device, 12 for erase size, 80 for name, 8 for timeout */
382 char buf[80 + 12 + 80 + 8];
383 char *str = buf;
384 char *token[2];
385 char *name;
386 size_t erase_size = PAGE_SIZE;
387 unsigned long timeout = MTD_DEFAULT_TIMEOUT;
388 int i, ret;
389
390 if (strnlen(val, sizeof(buf)) >= sizeof(buf)) {
391 pr_err("parameter too long\n");
392 return 0;
393 }
394
395 strcpy(str, val);
396 kill_final_newline(str);
397
398 for (i = 0; i < 2; i++)
399 token[i] = strsep(&str, ",");
400
401 if (str) {
402 pr_err("too many arguments\n");
403 return 0;
404 }
405
406 if (!token[0]) {
407 pr_err("no argument\n");
408 return 0;
409 }
410
411 name = token[0];
412 if (strlen(name) + 1 > 80) {
413 pr_err("device name too long\n");
414 return 0;
415 }
416
417 if (token[1]) {
418 ret = parse_num(&erase_size, token[1]);
419 if (ret) {
420 pr_err("illegal erase size\n");
421 return 0;
422 }
423 }
424
425 add_device(name, erase_size, timeout);
426
427 return 0;
428}
429
430
431static int block2mtd_setup(const char *val, const struct kernel_param *kp)
432{
433#ifdef MODULE
434 return block2mtd_setup2(val);
435#else
436 /* If more parameters are later passed in via
437 /sys/module/block2mtd/parameters/block2mtd
438 and block2mtd_init() has already been called,
439 we can parse the argument now. */
440
441 if (block2mtd_init_called)
442 return block2mtd_setup2(val);
443
444 /* During early boot stage, we only save the parameters
445 here. We must parse them later: if the param passed
446 from kernel boot command line, block2mtd_setup() is
447 called so early that it is not possible to resolve
448 the device (even kmalloc() fails). Deter that work to
449 block2mtd_setup2(). */
450
451 strlcpy(block2mtd_paramline, val, sizeof(block2mtd_paramline));
452
453 return 0;
454#endif
455}
456
457
458module_param_call(block2mtd, block2mtd_setup, NULL, NULL, 0200);
459MODULE_PARM_DESC(block2mtd, "Device to use. \"block2mtd=<dev>[,<erasesize>]\"");
460
461static int __init block2mtd_init(void)
462{
463 int ret = 0;
464
465#ifndef MODULE
466 if (strlen(block2mtd_paramline))
467 ret = block2mtd_setup2(block2mtd_paramline);
468 block2mtd_init_called = 1;
469#endif
470
471 return ret;
472}
473
474
475static void block2mtd_exit(void)
476{
477 struct list_head *pos, *next;
478
479 /* Remove the MTD devices */
480 list_for_each_safe(pos, next, &blkmtd_device_list) {
481 struct block2mtd_dev *dev = list_entry(pos, typeof(*dev), list);
482 block2mtd_sync(&dev->mtd);
483 mtd_device_unregister(&dev->mtd);
484 mutex_destroy(&dev->write_mutex);
485 pr_info("mtd%d: [%s] removed\n",
486 dev->mtd.index,
487 dev->mtd.name + strlen("block2mtd: "));
488 list_del(&dev->list);
489 block2mtd_free_device(dev);
490 }
491}
492
493late_initcall(block2mtd_init);
494module_exit(block2mtd_exit);
495
496MODULE_LICENSE("GPL");
497MODULE_AUTHOR("Joern Engel <joern@lazybastard.org>");
498MODULE_DESCRIPTION("Emulate an MTD using a block device");