Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * MTD device concatenation layer
4 *
5 * Copyright © 2002 Robert Kaiser <rkaiser@sysgo.de>
6 * Copyright © 2002-2010 David Woodhouse <dwmw2@infradead.org>
7 *
8 * NAND support by Christian Gan <cgan@iders.ca>
9 */
10
11#include <linux/kernel.h>
12#include <linux/module.h>
13#include <linux/slab.h>
14#include <linux/sched.h>
15#include <linux/types.h>
16#include <linux/backing-dev.h>
17
18#include <linux/mtd/mtd.h>
19#include <linux/mtd/concat.h>
20
21#include <asm/div64.h>
22
23/*
24 * Our storage structure:
25 * Subdev points to an array of pointers to struct mtd_info objects
26 * which is allocated along with this structure
27 *
28 */
29struct mtd_concat {
30 struct mtd_info mtd;
31 int num_subdev;
32 struct mtd_info **subdev;
33};
34
35/*
36 * how to calculate the size required for the above structure,
37 * including the pointer array subdev points to:
38 */
39#define SIZEOF_STRUCT_MTD_CONCAT(num_subdev) \
40 ((sizeof(struct mtd_concat) + (num_subdev) * sizeof(struct mtd_info *)))
41
42/*
43 * Given a pointer to the MTD object in the mtd_concat structure,
44 * we can retrieve the pointer to that structure with this macro.
45 */
46#define CONCAT(x) ((struct mtd_concat *)(x))
47
48/*
49 * MTD methods which look up the relevant subdevice, translate the
50 * effective address and pass through to the subdevice.
51 */
52
53static int
54concat_read(struct mtd_info *mtd, loff_t from, size_t len,
55 size_t * retlen, u_char * buf)
56{
57 struct mtd_concat *concat = CONCAT(mtd);
58 int ret = 0, err;
59 int i;
60
61 for (i = 0; i < concat->num_subdev; i++) {
62 struct mtd_info *subdev = concat->subdev[i];
63 size_t size, retsize;
64
65 if (from >= subdev->size) {
66 /* Not destined for this subdev */
67 size = 0;
68 from -= subdev->size;
69 continue;
70 }
71 if (from + len > subdev->size)
72 /* First part goes into this subdev */
73 size = subdev->size - from;
74 else
75 /* Entire transaction goes into this subdev */
76 size = len;
77
78 err = mtd_read(subdev, from, size, &retsize, buf);
79
80 /* Save information about bitflips! */
81 if (unlikely(err)) {
82 if (mtd_is_eccerr(err)) {
83 mtd->ecc_stats.failed++;
84 ret = err;
85 } else if (mtd_is_bitflip(err)) {
86 mtd->ecc_stats.corrected++;
87 /* Do not overwrite -EBADMSG !! */
88 if (!ret)
89 ret = err;
90 } else
91 return err;
92 }
93
94 *retlen += retsize;
95 len -= size;
96 if (len == 0)
97 return ret;
98
99 buf += size;
100 from = 0;
101 }
102 return -EINVAL;
103}
104
105static int
106concat_write(struct mtd_info *mtd, loff_t to, size_t len,
107 size_t * retlen, const u_char * buf)
108{
109 struct mtd_concat *concat = CONCAT(mtd);
110 int err = -EINVAL;
111 int i;
112
113 for (i = 0; i < concat->num_subdev; i++) {
114 struct mtd_info *subdev = concat->subdev[i];
115 size_t size, retsize;
116
117 if (to >= subdev->size) {
118 size = 0;
119 to -= subdev->size;
120 continue;
121 }
122 if (to + len > subdev->size)
123 size = subdev->size - to;
124 else
125 size = len;
126
127 err = mtd_write(subdev, to, size, &retsize, buf);
128 if (err)
129 break;
130
131 *retlen += retsize;
132 len -= size;
133 if (len == 0)
134 break;
135
136 err = -EINVAL;
137 buf += size;
138 to = 0;
139 }
140 return err;
141}
142
143static int
144concat_writev(struct mtd_info *mtd, const struct kvec *vecs,
145 unsigned long count, loff_t to, size_t * retlen)
146{
147 struct mtd_concat *concat = CONCAT(mtd);
148 struct kvec *vecs_copy;
149 unsigned long entry_low, entry_high;
150 size_t total_len = 0;
151 int i;
152 int err = -EINVAL;
153
154 /* Calculate total length of data */
155 for (i = 0; i < count; i++)
156 total_len += vecs[i].iov_len;
157
158 /* Check alignment */
159 if (mtd->writesize > 1) {
160 uint64_t __to = to;
161 if (do_div(__to, mtd->writesize) || (total_len % mtd->writesize))
162 return -EINVAL;
163 }
164
165 /* make a copy of vecs */
166 vecs_copy = kmemdup(vecs, sizeof(struct kvec) * count, GFP_KERNEL);
167 if (!vecs_copy)
168 return -ENOMEM;
169
170 entry_low = 0;
171 for (i = 0; i < concat->num_subdev; i++) {
172 struct mtd_info *subdev = concat->subdev[i];
173 size_t size, wsize, retsize, old_iov_len;
174
175 if (to >= subdev->size) {
176 to -= subdev->size;
177 continue;
178 }
179
180 size = min_t(uint64_t, total_len, subdev->size - to);
181 wsize = size; /* store for future use */
182
183 entry_high = entry_low;
184 while (entry_high < count) {
185 if (size <= vecs_copy[entry_high].iov_len)
186 break;
187 size -= vecs_copy[entry_high++].iov_len;
188 }
189
190 old_iov_len = vecs_copy[entry_high].iov_len;
191 vecs_copy[entry_high].iov_len = size;
192
193 err = mtd_writev(subdev, &vecs_copy[entry_low],
194 entry_high - entry_low + 1, to, &retsize);
195
196 vecs_copy[entry_high].iov_len = old_iov_len - size;
197 vecs_copy[entry_high].iov_base += size;
198
199 entry_low = entry_high;
200
201 if (err)
202 break;
203
204 *retlen += retsize;
205 total_len -= wsize;
206
207 if (total_len == 0)
208 break;
209
210 err = -EINVAL;
211 to = 0;
212 }
213
214 kfree(vecs_copy);
215 return err;
216}
217
218static int
219concat_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
220{
221 struct mtd_concat *concat = CONCAT(mtd);
222 struct mtd_oob_ops devops = *ops;
223 int i, err, ret = 0;
224
225 ops->retlen = ops->oobretlen = 0;
226
227 for (i = 0; i < concat->num_subdev; i++) {
228 struct mtd_info *subdev = concat->subdev[i];
229
230 if (from >= subdev->size) {
231 from -= subdev->size;
232 continue;
233 }
234
235 /* partial read ? */
236 if (from + devops.len > subdev->size)
237 devops.len = subdev->size - from;
238
239 err = mtd_read_oob(subdev, from, &devops);
240 ops->retlen += devops.retlen;
241 ops->oobretlen += devops.oobretlen;
242
243 /* Save information about bitflips! */
244 if (unlikely(err)) {
245 if (mtd_is_eccerr(err)) {
246 mtd->ecc_stats.failed++;
247 ret = err;
248 } else if (mtd_is_bitflip(err)) {
249 mtd->ecc_stats.corrected++;
250 /* Do not overwrite -EBADMSG !! */
251 if (!ret)
252 ret = err;
253 } else
254 return err;
255 }
256
257 if (devops.datbuf) {
258 devops.len = ops->len - ops->retlen;
259 if (!devops.len)
260 return ret;
261 devops.datbuf += devops.retlen;
262 }
263 if (devops.oobbuf) {
264 devops.ooblen = ops->ooblen - ops->oobretlen;
265 if (!devops.ooblen)
266 return ret;
267 devops.oobbuf += ops->oobretlen;
268 }
269
270 from = 0;
271 }
272 return -EINVAL;
273}
274
275static int
276concat_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops)
277{
278 struct mtd_concat *concat = CONCAT(mtd);
279 struct mtd_oob_ops devops = *ops;
280 int i, err;
281
282 if (!(mtd->flags & MTD_WRITEABLE))
283 return -EROFS;
284
285 ops->retlen = ops->oobretlen = 0;
286
287 for (i = 0; i < concat->num_subdev; i++) {
288 struct mtd_info *subdev = concat->subdev[i];
289
290 if (to >= subdev->size) {
291 to -= subdev->size;
292 continue;
293 }
294
295 /* partial write ? */
296 if (to + devops.len > subdev->size)
297 devops.len = subdev->size - to;
298
299 err = mtd_write_oob(subdev, to, &devops);
300 ops->retlen += devops.retlen;
301 ops->oobretlen += devops.oobretlen;
302 if (err)
303 return err;
304
305 if (devops.datbuf) {
306 devops.len = ops->len - ops->retlen;
307 if (!devops.len)
308 return 0;
309 devops.datbuf += devops.retlen;
310 }
311 if (devops.oobbuf) {
312 devops.ooblen = ops->ooblen - ops->oobretlen;
313 if (!devops.ooblen)
314 return 0;
315 devops.oobbuf += devops.oobretlen;
316 }
317 to = 0;
318 }
319 return -EINVAL;
320}
321
322static int concat_erase(struct mtd_info *mtd, struct erase_info *instr)
323{
324 struct mtd_concat *concat = CONCAT(mtd);
325 struct mtd_info *subdev;
326 int i, err;
327 uint64_t length, offset = 0;
328 struct erase_info *erase;
329
330 /*
331 * Check for proper erase block alignment of the to-be-erased area.
332 * It is easier to do this based on the super device's erase
333 * region info rather than looking at each particular sub-device
334 * in turn.
335 */
336 if (!concat->mtd.numeraseregions) {
337 /* the easy case: device has uniform erase block size */
338 if (instr->addr & (concat->mtd.erasesize - 1))
339 return -EINVAL;
340 if (instr->len & (concat->mtd.erasesize - 1))
341 return -EINVAL;
342 } else {
343 /* device has variable erase size */
344 struct mtd_erase_region_info *erase_regions =
345 concat->mtd.eraseregions;
346
347 /*
348 * Find the erase region where the to-be-erased area begins:
349 */
350 for (i = 0; i < concat->mtd.numeraseregions &&
351 instr->addr >= erase_regions[i].offset; i++) ;
352 --i;
353
354 /*
355 * Now erase_regions[i] is the region in which the
356 * to-be-erased area begins. Verify that the starting
357 * offset is aligned to this region's erase size:
358 */
359 if (i < 0 || instr->addr & (erase_regions[i].erasesize - 1))
360 return -EINVAL;
361
362 /*
363 * now find the erase region where the to-be-erased area ends:
364 */
365 for (; i < concat->mtd.numeraseregions &&
366 (instr->addr + instr->len) >= erase_regions[i].offset;
367 ++i) ;
368 --i;
369 /*
370 * check if the ending offset is aligned to this region's erase size
371 */
372 if (i < 0 || ((instr->addr + instr->len) &
373 (erase_regions[i].erasesize - 1)))
374 return -EINVAL;
375 }
376
377 /* make a local copy of instr to avoid modifying the caller's struct */
378 erase = kmalloc(sizeof (struct erase_info), GFP_KERNEL);
379
380 if (!erase)
381 return -ENOMEM;
382
383 *erase = *instr;
384 length = instr->len;
385
386 /*
387 * find the subdevice where the to-be-erased area begins, adjust
388 * starting offset to be relative to the subdevice start
389 */
390 for (i = 0; i < concat->num_subdev; i++) {
391 subdev = concat->subdev[i];
392 if (subdev->size <= erase->addr) {
393 erase->addr -= subdev->size;
394 offset += subdev->size;
395 } else {
396 break;
397 }
398 }
399
400 /* must never happen since size limit has been verified above */
401 BUG_ON(i >= concat->num_subdev);
402
403 /* now do the erase: */
404 err = 0;
405 for (; length > 0; i++) {
406 /* loop for all subdevices affected by this request */
407 subdev = concat->subdev[i]; /* get current subdevice */
408
409 /* limit length to subdevice's size: */
410 if (erase->addr + length > subdev->size)
411 erase->len = subdev->size - erase->addr;
412 else
413 erase->len = length;
414
415 length -= erase->len;
416 if ((err = mtd_erase(subdev, erase))) {
417 /* sanity check: should never happen since
418 * block alignment has been checked above */
419 BUG_ON(err == -EINVAL);
420 if (erase->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
421 instr->fail_addr = erase->fail_addr + offset;
422 break;
423 }
424 /*
425 * erase->addr specifies the offset of the area to be
426 * erased *within the current subdevice*. It can be
427 * non-zero only the first time through this loop, i.e.
428 * for the first subdevice where blocks need to be erased.
429 * All the following erases must begin at the start of the
430 * current subdevice, i.e. at offset zero.
431 */
432 erase->addr = 0;
433 offset += subdev->size;
434 }
435 kfree(erase);
436
437 return err;
438}
439
440static int concat_xxlock(struct mtd_info *mtd, loff_t ofs, uint64_t len,
441 bool is_lock)
442{
443 struct mtd_concat *concat = CONCAT(mtd);
444 int i, err = -EINVAL;
445
446 for (i = 0; i < concat->num_subdev; i++) {
447 struct mtd_info *subdev = concat->subdev[i];
448 uint64_t size;
449
450 if (ofs >= subdev->size) {
451 size = 0;
452 ofs -= subdev->size;
453 continue;
454 }
455 if (ofs + len > subdev->size)
456 size = subdev->size - ofs;
457 else
458 size = len;
459
460 if (is_lock)
461 err = mtd_lock(subdev, ofs, size);
462 else
463 err = mtd_unlock(subdev, ofs, size);
464 if (err)
465 break;
466
467 len -= size;
468 if (len == 0)
469 break;
470
471 err = -EINVAL;
472 ofs = 0;
473 }
474
475 return err;
476}
477
478static int concat_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
479{
480 return concat_xxlock(mtd, ofs, len, true);
481}
482
483static int concat_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
484{
485 return concat_xxlock(mtd, ofs, len, false);
486}
487
488static int concat_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
489{
490 struct mtd_concat *concat = CONCAT(mtd);
491 int i, err = -EINVAL;
492
493 for (i = 0; i < concat->num_subdev; i++) {
494 struct mtd_info *subdev = concat->subdev[i];
495
496 if (ofs >= subdev->size) {
497 ofs -= subdev->size;
498 continue;
499 }
500
501 if (ofs + len > subdev->size)
502 break;
503
504 return mtd_is_locked(subdev, ofs, len);
505 }
506
507 return err;
508}
509
510static void concat_sync(struct mtd_info *mtd)
511{
512 struct mtd_concat *concat = CONCAT(mtd);
513 int i;
514
515 for (i = 0; i < concat->num_subdev; i++) {
516 struct mtd_info *subdev = concat->subdev[i];
517 mtd_sync(subdev);
518 }
519}
520
521static int concat_suspend(struct mtd_info *mtd)
522{
523 struct mtd_concat *concat = CONCAT(mtd);
524 int i, rc = 0;
525
526 for (i = 0; i < concat->num_subdev; i++) {
527 struct mtd_info *subdev = concat->subdev[i];
528 if ((rc = mtd_suspend(subdev)) < 0)
529 return rc;
530 }
531 return rc;
532}
533
534static void concat_resume(struct mtd_info *mtd)
535{
536 struct mtd_concat *concat = CONCAT(mtd);
537 int i;
538
539 for (i = 0; i < concat->num_subdev; i++) {
540 struct mtd_info *subdev = concat->subdev[i];
541 mtd_resume(subdev);
542 }
543}
544
545static int concat_block_isbad(struct mtd_info *mtd, loff_t ofs)
546{
547 struct mtd_concat *concat = CONCAT(mtd);
548 int i, res = 0;
549
550 if (!mtd_can_have_bb(concat->subdev[0]))
551 return res;
552
553 for (i = 0; i < concat->num_subdev; i++) {
554 struct mtd_info *subdev = concat->subdev[i];
555
556 if (ofs >= subdev->size) {
557 ofs -= subdev->size;
558 continue;
559 }
560
561 res = mtd_block_isbad(subdev, ofs);
562 break;
563 }
564
565 return res;
566}
567
568static int concat_block_markbad(struct mtd_info *mtd, loff_t ofs)
569{
570 struct mtd_concat *concat = CONCAT(mtd);
571 int i, err = -EINVAL;
572
573 for (i = 0; i < concat->num_subdev; i++) {
574 struct mtd_info *subdev = concat->subdev[i];
575
576 if (ofs >= subdev->size) {
577 ofs -= subdev->size;
578 continue;
579 }
580
581 err = mtd_block_markbad(subdev, ofs);
582 if (!err)
583 mtd->ecc_stats.badblocks++;
584 break;
585 }
586
587 return err;
588}
589
590/*
591 * This function constructs a virtual MTD device by concatenating
592 * num_devs MTD devices. A pointer to the new device object is
593 * stored to *new_dev upon success. This function does _not_
594 * register any devices: this is the caller's responsibility.
595 */
596struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to concatenate */
597 int num_devs, /* number of subdevices */
598 const char *name)
599{ /* name for the new device */
600 int i;
601 size_t size;
602 struct mtd_concat *concat;
603 uint32_t max_erasesize, curr_erasesize;
604 int num_erase_region;
605 int max_writebufsize = 0;
606
607 printk(KERN_NOTICE "Concatenating MTD devices:\n");
608 for (i = 0; i < num_devs; i++)
609 printk(KERN_NOTICE "(%d): \"%s\"\n", i, subdev[i]->name);
610 printk(KERN_NOTICE "into device \"%s\"\n", name);
611
612 /* allocate the device structure */
613 size = SIZEOF_STRUCT_MTD_CONCAT(num_devs);
614 concat = kzalloc(size, GFP_KERNEL);
615 if (!concat) {
616 printk
617 ("memory allocation error while creating concatenated device \"%s\"\n",
618 name);
619 return NULL;
620 }
621 concat->subdev = (struct mtd_info **) (concat + 1);
622
623 /*
624 * Set up the new "super" device's MTD object structure, check for
625 * incompatibilities between the subdevices.
626 */
627 concat->mtd.type = subdev[0]->type;
628 concat->mtd.flags = subdev[0]->flags;
629 concat->mtd.size = subdev[0]->size;
630 concat->mtd.erasesize = subdev[0]->erasesize;
631 concat->mtd.writesize = subdev[0]->writesize;
632
633 for (i = 0; i < num_devs; i++)
634 if (max_writebufsize < subdev[i]->writebufsize)
635 max_writebufsize = subdev[i]->writebufsize;
636 concat->mtd.writebufsize = max_writebufsize;
637
638 concat->mtd.subpage_sft = subdev[0]->subpage_sft;
639 concat->mtd.oobsize = subdev[0]->oobsize;
640 concat->mtd.oobavail = subdev[0]->oobavail;
641 if (subdev[0]->_writev)
642 concat->mtd._writev = concat_writev;
643 if (subdev[0]->_read_oob)
644 concat->mtd._read_oob = concat_read_oob;
645 if (subdev[0]->_write_oob)
646 concat->mtd._write_oob = concat_write_oob;
647 if (subdev[0]->_block_isbad)
648 concat->mtd._block_isbad = concat_block_isbad;
649 if (subdev[0]->_block_markbad)
650 concat->mtd._block_markbad = concat_block_markbad;
651
652 concat->mtd.ecc_stats.badblocks = subdev[0]->ecc_stats.badblocks;
653
654 concat->subdev[0] = subdev[0];
655
656 for (i = 1; i < num_devs; i++) {
657 if (concat->mtd.type != subdev[i]->type) {
658 kfree(concat);
659 printk("Incompatible device type on \"%s\"\n",
660 subdev[i]->name);
661 return NULL;
662 }
663 if (concat->mtd.flags != subdev[i]->flags) {
664 /*
665 * Expect all flags except MTD_WRITEABLE to be
666 * equal on all subdevices.
667 */
668 if ((concat->mtd.flags ^ subdev[i]->
669 flags) & ~MTD_WRITEABLE) {
670 kfree(concat);
671 printk("Incompatible device flags on \"%s\"\n",
672 subdev[i]->name);
673 return NULL;
674 } else
675 /* if writeable attribute differs,
676 make super device writeable */
677 concat->mtd.flags |=
678 subdev[i]->flags & MTD_WRITEABLE;
679 }
680
681 concat->mtd.size += subdev[i]->size;
682 concat->mtd.ecc_stats.badblocks +=
683 subdev[i]->ecc_stats.badblocks;
684 if (concat->mtd.writesize != subdev[i]->writesize ||
685 concat->mtd.subpage_sft != subdev[i]->subpage_sft ||
686 concat->mtd.oobsize != subdev[i]->oobsize ||
687 !concat->mtd._read_oob != !subdev[i]->_read_oob ||
688 !concat->mtd._write_oob != !subdev[i]->_write_oob) {
689 kfree(concat);
690 printk("Incompatible OOB or ECC data on \"%s\"\n",
691 subdev[i]->name);
692 return NULL;
693 }
694 concat->subdev[i] = subdev[i];
695
696 }
697
698 mtd_set_ooblayout(&concat->mtd, subdev[0]->ooblayout);
699
700 concat->num_subdev = num_devs;
701 concat->mtd.name = name;
702
703 concat->mtd._erase = concat_erase;
704 concat->mtd._read = concat_read;
705 concat->mtd._write = concat_write;
706 concat->mtd._sync = concat_sync;
707 concat->mtd._lock = concat_lock;
708 concat->mtd._unlock = concat_unlock;
709 concat->mtd._is_locked = concat_is_locked;
710 concat->mtd._suspend = concat_suspend;
711 concat->mtd._resume = concat_resume;
712
713 /*
714 * Combine the erase block size info of the subdevices:
715 *
716 * first, walk the map of the new device and see how
717 * many changes in erase size we have
718 */
719 max_erasesize = curr_erasesize = subdev[0]->erasesize;
720 num_erase_region = 1;
721 for (i = 0; i < num_devs; i++) {
722 if (subdev[i]->numeraseregions == 0) {
723 /* current subdevice has uniform erase size */
724 if (subdev[i]->erasesize != curr_erasesize) {
725 /* if it differs from the last subdevice's erase size, count it */
726 ++num_erase_region;
727 curr_erasesize = subdev[i]->erasesize;
728 if (curr_erasesize > max_erasesize)
729 max_erasesize = curr_erasesize;
730 }
731 } else {
732 /* current subdevice has variable erase size */
733 int j;
734 for (j = 0; j < subdev[i]->numeraseregions; j++) {
735
736 /* walk the list of erase regions, count any changes */
737 if (subdev[i]->eraseregions[j].erasesize !=
738 curr_erasesize) {
739 ++num_erase_region;
740 curr_erasesize =
741 subdev[i]->eraseregions[j].
742 erasesize;
743 if (curr_erasesize > max_erasesize)
744 max_erasesize = curr_erasesize;
745 }
746 }
747 }
748 }
749
750 if (num_erase_region == 1) {
751 /*
752 * All subdevices have the same uniform erase size.
753 * This is easy:
754 */
755 concat->mtd.erasesize = curr_erasesize;
756 concat->mtd.numeraseregions = 0;
757 } else {
758 uint64_t tmp64;
759
760 /*
761 * erase block size varies across the subdevices: allocate
762 * space to store the data describing the variable erase regions
763 */
764 struct mtd_erase_region_info *erase_region_p;
765 uint64_t begin, position;
766
767 concat->mtd.erasesize = max_erasesize;
768 concat->mtd.numeraseregions = num_erase_region;
769 concat->mtd.eraseregions = erase_region_p =
770 kmalloc_array(num_erase_region,
771 sizeof(struct mtd_erase_region_info),
772 GFP_KERNEL);
773 if (!erase_region_p) {
774 kfree(concat);
775 printk
776 ("memory allocation error while creating erase region list"
777 " for device \"%s\"\n", name);
778 return NULL;
779 }
780
781 /*
782 * walk the map of the new device once more and fill in
783 * in erase region info:
784 */
785 curr_erasesize = subdev[0]->erasesize;
786 begin = position = 0;
787 for (i = 0; i < num_devs; i++) {
788 if (subdev[i]->numeraseregions == 0) {
789 /* current subdevice has uniform erase size */
790 if (subdev[i]->erasesize != curr_erasesize) {
791 /*
792 * fill in an mtd_erase_region_info structure for the area
793 * we have walked so far:
794 */
795 erase_region_p->offset = begin;
796 erase_region_p->erasesize =
797 curr_erasesize;
798 tmp64 = position - begin;
799 do_div(tmp64, curr_erasesize);
800 erase_region_p->numblocks = tmp64;
801 begin = position;
802
803 curr_erasesize = subdev[i]->erasesize;
804 ++erase_region_p;
805 }
806 position += subdev[i]->size;
807 } else {
808 /* current subdevice has variable erase size */
809 int j;
810 for (j = 0; j < subdev[i]->numeraseregions; j++) {
811 /* walk the list of erase regions, count any changes */
812 if (subdev[i]->eraseregions[j].
813 erasesize != curr_erasesize) {
814 erase_region_p->offset = begin;
815 erase_region_p->erasesize =
816 curr_erasesize;
817 tmp64 = position - begin;
818 do_div(tmp64, curr_erasesize);
819 erase_region_p->numblocks = tmp64;
820 begin = position;
821
822 curr_erasesize =
823 subdev[i]->eraseregions[j].
824 erasesize;
825 ++erase_region_p;
826 }
827 position +=
828 subdev[i]->eraseregions[j].
829 numblocks * (uint64_t)curr_erasesize;
830 }
831 }
832 }
833 /* Now write the final entry */
834 erase_region_p->offset = begin;
835 erase_region_p->erasesize = curr_erasesize;
836 tmp64 = position - begin;
837 do_div(tmp64, curr_erasesize);
838 erase_region_p->numblocks = tmp64;
839 }
840
841 return &concat->mtd;
842}
843
844/* Cleans the context obtained from mtd_concat_create() */
845void mtd_concat_destroy(struct mtd_info *mtd)
846{
847 struct mtd_concat *concat = CONCAT(mtd);
848 if (concat->mtd.numeraseregions)
849 kfree(concat->mtd.eraseregions);
850 kfree(concat);
851}
852
853EXPORT_SYMBOL(mtd_concat_create);
854EXPORT_SYMBOL(mtd_concat_destroy);
855
856MODULE_LICENSE("GPL");
857MODULE_AUTHOR("Robert Kaiser <rkaiser@sysgo.de>");
858MODULE_DESCRIPTION("Generic support for concatenating of MTD devices");
1/*
2 * MTD device concatenation layer
3 *
4 * Copyright © 2002 Robert Kaiser <rkaiser@sysgo.de>
5 * Copyright © 2002-2010 David Woodhouse <dwmw2@infradead.org>
6 *
7 * NAND support by Christian Gan <cgan@iders.ca>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 *
23 */
24
25#include <linux/kernel.h>
26#include <linux/module.h>
27#include <linux/slab.h>
28#include <linux/sched.h>
29#include <linux/types.h>
30#include <linux/backing-dev.h>
31
32#include <linux/mtd/mtd.h>
33#include <linux/mtd/concat.h>
34
35#include <asm/div64.h>
36
37/*
38 * Our storage structure:
39 * Subdev points to an array of pointers to struct mtd_info objects
40 * which is allocated along with this structure
41 *
42 */
43struct mtd_concat {
44 struct mtd_info mtd;
45 int num_subdev;
46 struct mtd_info **subdev;
47};
48
49/*
50 * how to calculate the size required for the above structure,
51 * including the pointer array subdev points to:
52 */
53#define SIZEOF_STRUCT_MTD_CONCAT(num_subdev) \
54 ((sizeof(struct mtd_concat) + (num_subdev) * sizeof(struct mtd_info *)))
55
56/*
57 * Given a pointer to the MTD object in the mtd_concat structure,
58 * we can retrieve the pointer to that structure with this macro.
59 */
60#define CONCAT(x) ((struct mtd_concat *)(x))
61
62/*
63 * MTD methods which look up the relevant subdevice, translate the
64 * effective address and pass through to the subdevice.
65 */
66
67static int
68concat_read(struct mtd_info *mtd, loff_t from, size_t len,
69 size_t * retlen, u_char * buf)
70{
71 struct mtd_concat *concat = CONCAT(mtd);
72 int ret = 0, err;
73 int i;
74
75 for (i = 0; i < concat->num_subdev; i++) {
76 struct mtd_info *subdev = concat->subdev[i];
77 size_t size, retsize;
78
79 if (from >= subdev->size) {
80 /* Not destined for this subdev */
81 size = 0;
82 from -= subdev->size;
83 continue;
84 }
85 if (from + len > subdev->size)
86 /* First part goes into this subdev */
87 size = subdev->size - from;
88 else
89 /* Entire transaction goes into this subdev */
90 size = len;
91
92 err = mtd_read(subdev, from, size, &retsize, buf);
93
94 /* Save information about bitflips! */
95 if (unlikely(err)) {
96 if (mtd_is_eccerr(err)) {
97 mtd->ecc_stats.failed++;
98 ret = err;
99 } else if (mtd_is_bitflip(err)) {
100 mtd->ecc_stats.corrected++;
101 /* Do not overwrite -EBADMSG !! */
102 if (!ret)
103 ret = err;
104 } else
105 return err;
106 }
107
108 *retlen += retsize;
109 len -= size;
110 if (len == 0)
111 return ret;
112
113 buf += size;
114 from = 0;
115 }
116 return -EINVAL;
117}
118
119static int
120concat_write(struct mtd_info *mtd, loff_t to, size_t len,
121 size_t * retlen, const u_char * buf)
122{
123 struct mtd_concat *concat = CONCAT(mtd);
124 int err = -EINVAL;
125 int i;
126
127 for (i = 0; i < concat->num_subdev; i++) {
128 struct mtd_info *subdev = concat->subdev[i];
129 size_t size, retsize;
130
131 if (to >= subdev->size) {
132 size = 0;
133 to -= subdev->size;
134 continue;
135 }
136 if (to + len > subdev->size)
137 size = subdev->size - to;
138 else
139 size = len;
140
141 err = mtd_write(subdev, to, size, &retsize, buf);
142 if (err)
143 break;
144
145 *retlen += retsize;
146 len -= size;
147 if (len == 0)
148 break;
149
150 err = -EINVAL;
151 buf += size;
152 to = 0;
153 }
154 return err;
155}
156
157static int
158concat_writev(struct mtd_info *mtd, const struct kvec *vecs,
159 unsigned long count, loff_t to, size_t * retlen)
160{
161 struct mtd_concat *concat = CONCAT(mtd);
162 struct kvec *vecs_copy;
163 unsigned long entry_low, entry_high;
164 size_t total_len = 0;
165 int i;
166 int err = -EINVAL;
167
168 /* Calculate total length of data */
169 for (i = 0; i < count; i++)
170 total_len += vecs[i].iov_len;
171
172 /* Check alignment */
173 if (mtd->writesize > 1) {
174 uint64_t __to = to;
175 if (do_div(__to, mtd->writesize) || (total_len % mtd->writesize))
176 return -EINVAL;
177 }
178
179 /* make a copy of vecs */
180 vecs_copy = kmemdup(vecs, sizeof(struct kvec) * count, GFP_KERNEL);
181 if (!vecs_copy)
182 return -ENOMEM;
183
184 entry_low = 0;
185 for (i = 0; i < concat->num_subdev; i++) {
186 struct mtd_info *subdev = concat->subdev[i];
187 size_t size, wsize, retsize, old_iov_len;
188
189 if (to >= subdev->size) {
190 to -= subdev->size;
191 continue;
192 }
193
194 size = min_t(uint64_t, total_len, subdev->size - to);
195 wsize = size; /* store for future use */
196
197 entry_high = entry_low;
198 while (entry_high < count) {
199 if (size <= vecs_copy[entry_high].iov_len)
200 break;
201 size -= vecs_copy[entry_high++].iov_len;
202 }
203
204 old_iov_len = vecs_copy[entry_high].iov_len;
205 vecs_copy[entry_high].iov_len = size;
206
207 err = mtd_writev(subdev, &vecs_copy[entry_low],
208 entry_high - entry_low + 1, to, &retsize);
209
210 vecs_copy[entry_high].iov_len = old_iov_len - size;
211 vecs_copy[entry_high].iov_base += size;
212
213 entry_low = entry_high;
214
215 if (err)
216 break;
217
218 *retlen += retsize;
219 total_len -= wsize;
220
221 if (total_len == 0)
222 break;
223
224 err = -EINVAL;
225 to = 0;
226 }
227
228 kfree(vecs_copy);
229 return err;
230}
231
232static int
233concat_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
234{
235 struct mtd_concat *concat = CONCAT(mtd);
236 struct mtd_oob_ops devops = *ops;
237 int i, err, ret = 0;
238
239 ops->retlen = ops->oobretlen = 0;
240
241 for (i = 0; i < concat->num_subdev; i++) {
242 struct mtd_info *subdev = concat->subdev[i];
243
244 if (from >= subdev->size) {
245 from -= subdev->size;
246 continue;
247 }
248
249 /* partial read ? */
250 if (from + devops.len > subdev->size)
251 devops.len = subdev->size - from;
252
253 err = mtd_read_oob(subdev, from, &devops);
254 ops->retlen += devops.retlen;
255 ops->oobretlen += devops.oobretlen;
256
257 /* Save information about bitflips! */
258 if (unlikely(err)) {
259 if (mtd_is_eccerr(err)) {
260 mtd->ecc_stats.failed++;
261 ret = err;
262 } else if (mtd_is_bitflip(err)) {
263 mtd->ecc_stats.corrected++;
264 /* Do not overwrite -EBADMSG !! */
265 if (!ret)
266 ret = err;
267 } else
268 return err;
269 }
270
271 if (devops.datbuf) {
272 devops.len = ops->len - ops->retlen;
273 if (!devops.len)
274 return ret;
275 devops.datbuf += devops.retlen;
276 }
277 if (devops.oobbuf) {
278 devops.ooblen = ops->ooblen - ops->oobretlen;
279 if (!devops.ooblen)
280 return ret;
281 devops.oobbuf += ops->oobretlen;
282 }
283
284 from = 0;
285 }
286 return -EINVAL;
287}
288
289static int
290concat_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops)
291{
292 struct mtd_concat *concat = CONCAT(mtd);
293 struct mtd_oob_ops devops = *ops;
294 int i, err;
295
296 if (!(mtd->flags & MTD_WRITEABLE))
297 return -EROFS;
298
299 ops->retlen = ops->oobretlen = 0;
300
301 for (i = 0; i < concat->num_subdev; i++) {
302 struct mtd_info *subdev = concat->subdev[i];
303
304 if (to >= subdev->size) {
305 to -= subdev->size;
306 continue;
307 }
308
309 /* partial write ? */
310 if (to + devops.len > subdev->size)
311 devops.len = subdev->size - to;
312
313 err = mtd_write_oob(subdev, to, &devops);
314 ops->retlen += devops.oobretlen;
315 if (err)
316 return err;
317
318 if (devops.datbuf) {
319 devops.len = ops->len - ops->retlen;
320 if (!devops.len)
321 return 0;
322 devops.datbuf += devops.retlen;
323 }
324 if (devops.oobbuf) {
325 devops.ooblen = ops->ooblen - ops->oobretlen;
326 if (!devops.ooblen)
327 return 0;
328 devops.oobbuf += devops.oobretlen;
329 }
330 to = 0;
331 }
332 return -EINVAL;
333}
334
335static void concat_erase_callback(struct erase_info *instr)
336{
337 wake_up((wait_queue_head_t *) instr->priv);
338}
339
340static int concat_dev_erase(struct mtd_info *mtd, struct erase_info *erase)
341{
342 int err;
343 wait_queue_head_t waitq;
344 DECLARE_WAITQUEUE(wait, current);
345
346 /*
347 * This code was stol^H^H^H^Hinspired by mtdchar.c
348 */
349 init_waitqueue_head(&waitq);
350
351 erase->mtd = mtd;
352 erase->callback = concat_erase_callback;
353 erase->priv = (unsigned long) &waitq;
354
355 /*
356 * FIXME: Allow INTERRUPTIBLE. Which means
357 * not having the wait_queue head on the stack.
358 */
359 err = mtd_erase(mtd, erase);
360 if (!err) {
361 set_current_state(TASK_UNINTERRUPTIBLE);
362 add_wait_queue(&waitq, &wait);
363 if (erase->state != MTD_ERASE_DONE
364 && erase->state != MTD_ERASE_FAILED)
365 schedule();
366 remove_wait_queue(&waitq, &wait);
367 set_current_state(TASK_RUNNING);
368
369 err = (erase->state == MTD_ERASE_FAILED) ? -EIO : 0;
370 }
371 return err;
372}
373
374static int concat_erase(struct mtd_info *mtd, struct erase_info *instr)
375{
376 struct mtd_concat *concat = CONCAT(mtd);
377 struct mtd_info *subdev;
378 int i, err;
379 uint64_t length, offset = 0;
380 struct erase_info *erase;
381
382 /*
383 * Check for proper erase block alignment of the to-be-erased area.
384 * It is easier to do this based on the super device's erase
385 * region info rather than looking at each particular sub-device
386 * in turn.
387 */
388 if (!concat->mtd.numeraseregions) {
389 /* the easy case: device has uniform erase block size */
390 if (instr->addr & (concat->mtd.erasesize - 1))
391 return -EINVAL;
392 if (instr->len & (concat->mtd.erasesize - 1))
393 return -EINVAL;
394 } else {
395 /* device has variable erase size */
396 struct mtd_erase_region_info *erase_regions =
397 concat->mtd.eraseregions;
398
399 /*
400 * Find the erase region where the to-be-erased area begins:
401 */
402 for (i = 0; i < concat->mtd.numeraseregions &&
403 instr->addr >= erase_regions[i].offset; i++) ;
404 --i;
405
406 /*
407 * Now erase_regions[i] is the region in which the
408 * to-be-erased area begins. Verify that the starting
409 * offset is aligned to this region's erase size:
410 */
411 if (i < 0 || instr->addr & (erase_regions[i].erasesize - 1))
412 return -EINVAL;
413
414 /*
415 * now find the erase region where the to-be-erased area ends:
416 */
417 for (; i < concat->mtd.numeraseregions &&
418 (instr->addr + instr->len) >= erase_regions[i].offset;
419 ++i) ;
420 --i;
421 /*
422 * check if the ending offset is aligned to this region's erase size
423 */
424 if (i < 0 || ((instr->addr + instr->len) &
425 (erase_regions[i].erasesize - 1)))
426 return -EINVAL;
427 }
428
429 /* make a local copy of instr to avoid modifying the caller's struct */
430 erase = kmalloc(sizeof (struct erase_info), GFP_KERNEL);
431
432 if (!erase)
433 return -ENOMEM;
434
435 *erase = *instr;
436 length = instr->len;
437
438 /*
439 * find the subdevice where the to-be-erased area begins, adjust
440 * starting offset to be relative to the subdevice start
441 */
442 for (i = 0; i < concat->num_subdev; i++) {
443 subdev = concat->subdev[i];
444 if (subdev->size <= erase->addr) {
445 erase->addr -= subdev->size;
446 offset += subdev->size;
447 } else {
448 break;
449 }
450 }
451
452 /* must never happen since size limit has been verified above */
453 BUG_ON(i >= concat->num_subdev);
454
455 /* now do the erase: */
456 err = 0;
457 for (; length > 0; i++) {
458 /* loop for all subdevices affected by this request */
459 subdev = concat->subdev[i]; /* get current subdevice */
460
461 /* limit length to subdevice's size: */
462 if (erase->addr + length > subdev->size)
463 erase->len = subdev->size - erase->addr;
464 else
465 erase->len = length;
466
467 length -= erase->len;
468 if ((err = concat_dev_erase(subdev, erase))) {
469 /* sanity check: should never happen since
470 * block alignment has been checked above */
471 BUG_ON(err == -EINVAL);
472 if (erase->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
473 instr->fail_addr = erase->fail_addr + offset;
474 break;
475 }
476 /*
477 * erase->addr specifies the offset of the area to be
478 * erased *within the current subdevice*. It can be
479 * non-zero only the first time through this loop, i.e.
480 * for the first subdevice where blocks need to be erased.
481 * All the following erases must begin at the start of the
482 * current subdevice, i.e. at offset zero.
483 */
484 erase->addr = 0;
485 offset += subdev->size;
486 }
487 instr->state = erase->state;
488 kfree(erase);
489 if (err)
490 return err;
491
492 if (instr->callback)
493 instr->callback(instr);
494 return 0;
495}
496
497static int concat_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
498{
499 struct mtd_concat *concat = CONCAT(mtd);
500 int i, err = -EINVAL;
501
502 for (i = 0; i < concat->num_subdev; i++) {
503 struct mtd_info *subdev = concat->subdev[i];
504 uint64_t size;
505
506 if (ofs >= subdev->size) {
507 size = 0;
508 ofs -= subdev->size;
509 continue;
510 }
511 if (ofs + len > subdev->size)
512 size = subdev->size - ofs;
513 else
514 size = len;
515
516 err = mtd_lock(subdev, ofs, size);
517 if (err)
518 break;
519
520 len -= size;
521 if (len == 0)
522 break;
523
524 err = -EINVAL;
525 ofs = 0;
526 }
527
528 return err;
529}
530
531static int concat_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
532{
533 struct mtd_concat *concat = CONCAT(mtd);
534 int i, err = 0;
535
536 for (i = 0; i < concat->num_subdev; i++) {
537 struct mtd_info *subdev = concat->subdev[i];
538 uint64_t size;
539
540 if (ofs >= subdev->size) {
541 size = 0;
542 ofs -= subdev->size;
543 continue;
544 }
545 if (ofs + len > subdev->size)
546 size = subdev->size - ofs;
547 else
548 size = len;
549
550 err = mtd_unlock(subdev, ofs, size);
551 if (err)
552 break;
553
554 len -= size;
555 if (len == 0)
556 break;
557
558 err = -EINVAL;
559 ofs = 0;
560 }
561
562 return err;
563}
564
565static void concat_sync(struct mtd_info *mtd)
566{
567 struct mtd_concat *concat = CONCAT(mtd);
568 int i;
569
570 for (i = 0; i < concat->num_subdev; i++) {
571 struct mtd_info *subdev = concat->subdev[i];
572 mtd_sync(subdev);
573 }
574}
575
576static int concat_suspend(struct mtd_info *mtd)
577{
578 struct mtd_concat *concat = CONCAT(mtd);
579 int i, rc = 0;
580
581 for (i = 0; i < concat->num_subdev; i++) {
582 struct mtd_info *subdev = concat->subdev[i];
583 if ((rc = mtd_suspend(subdev)) < 0)
584 return rc;
585 }
586 return rc;
587}
588
589static void concat_resume(struct mtd_info *mtd)
590{
591 struct mtd_concat *concat = CONCAT(mtd);
592 int i;
593
594 for (i = 0; i < concat->num_subdev; i++) {
595 struct mtd_info *subdev = concat->subdev[i];
596 mtd_resume(subdev);
597 }
598}
599
600static int concat_block_isbad(struct mtd_info *mtd, loff_t ofs)
601{
602 struct mtd_concat *concat = CONCAT(mtd);
603 int i, res = 0;
604
605 if (!mtd_can_have_bb(concat->subdev[0]))
606 return res;
607
608 for (i = 0; i < concat->num_subdev; i++) {
609 struct mtd_info *subdev = concat->subdev[i];
610
611 if (ofs >= subdev->size) {
612 ofs -= subdev->size;
613 continue;
614 }
615
616 res = mtd_block_isbad(subdev, ofs);
617 break;
618 }
619
620 return res;
621}
622
623static int concat_block_markbad(struct mtd_info *mtd, loff_t ofs)
624{
625 struct mtd_concat *concat = CONCAT(mtd);
626 int i, err = -EINVAL;
627
628 for (i = 0; i < concat->num_subdev; i++) {
629 struct mtd_info *subdev = concat->subdev[i];
630
631 if (ofs >= subdev->size) {
632 ofs -= subdev->size;
633 continue;
634 }
635
636 err = mtd_block_markbad(subdev, ofs);
637 if (!err)
638 mtd->ecc_stats.badblocks++;
639 break;
640 }
641
642 return err;
643}
644
645/*
646 * try to support NOMMU mmaps on concatenated devices
647 * - we don't support subdev spanning as we can't guarantee it'll work
648 */
649static unsigned long concat_get_unmapped_area(struct mtd_info *mtd,
650 unsigned long len,
651 unsigned long offset,
652 unsigned long flags)
653{
654 struct mtd_concat *concat = CONCAT(mtd);
655 int i;
656
657 for (i = 0; i < concat->num_subdev; i++) {
658 struct mtd_info *subdev = concat->subdev[i];
659
660 if (offset >= subdev->size) {
661 offset -= subdev->size;
662 continue;
663 }
664
665 return mtd_get_unmapped_area(subdev, len, offset, flags);
666 }
667
668 return (unsigned long) -ENOSYS;
669}
670
671/*
672 * This function constructs a virtual MTD device by concatenating
673 * num_devs MTD devices. A pointer to the new device object is
674 * stored to *new_dev upon success. This function does _not_
675 * register any devices: this is the caller's responsibility.
676 */
677struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to concatenate */
678 int num_devs, /* number of subdevices */
679 const char *name)
680{ /* name for the new device */
681 int i;
682 size_t size;
683 struct mtd_concat *concat;
684 uint32_t max_erasesize, curr_erasesize;
685 int num_erase_region;
686 int max_writebufsize = 0;
687
688 printk(KERN_NOTICE "Concatenating MTD devices:\n");
689 for (i = 0; i < num_devs; i++)
690 printk(KERN_NOTICE "(%d): \"%s\"\n", i, subdev[i]->name);
691 printk(KERN_NOTICE "into device \"%s\"\n", name);
692
693 /* allocate the device structure */
694 size = SIZEOF_STRUCT_MTD_CONCAT(num_devs);
695 concat = kzalloc(size, GFP_KERNEL);
696 if (!concat) {
697 printk
698 ("memory allocation error while creating concatenated device \"%s\"\n",
699 name);
700 return NULL;
701 }
702 concat->subdev = (struct mtd_info **) (concat + 1);
703
704 /*
705 * Set up the new "super" device's MTD object structure, check for
706 * incompatibilities between the subdevices.
707 */
708 concat->mtd.type = subdev[0]->type;
709 concat->mtd.flags = subdev[0]->flags;
710 concat->mtd.size = subdev[0]->size;
711 concat->mtd.erasesize = subdev[0]->erasesize;
712 concat->mtd.writesize = subdev[0]->writesize;
713
714 for (i = 0; i < num_devs; i++)
715 if (max_writebufsize < subdev[i]->writebufsize)
716 max_writebufsize = subdev[i]->writebufsize;
717 concat->mtd.writebufsize = max_writebufsize;
718
719 concat->mtd.subpage_sft = subdev[0]->subpage_sft;
720 concat->mtd.oobsize = subdev[0]->oobsize;
721 concat->mtd.oobavail = subdev[0]->oobavail;
722 if (subdev[0]->_writev)
723 concat->mtd._writev = concat_writev;
724 if (subdev[0]->_read_oob)
725 concat->mtd._read_oob = concat_read_oob;
726 if (subdev[0]->_write_oob)
727 concat->mtd._write_oob = concat_write_oob;
728 if (subdev[0]->_block_isbad)
729 concat->mtd._block_isbad = concat_block_isbad;
730 if (subdev[0]->_block_markbad)
731 concat->mtd._block_markbad = concat_block_markbad;
732
733 concat->mtd.ecc_stats.badblocks = subdev[0]->ecc_stats.badblocks;
734
735 concat->mtd.backing_dev_info = subdev[0]->backing_dev_info;
736
737 concat->subdev[0] = subdev[0];
738
739 for (i = 1; i < num_devs; i++) {
740 if (concat->mtd.type != subdev[i]->type) {
741 kfree(concat);
742 printk("Incompatible device type on \"%s\"\n",
743 subdev[i]->name);
744 return NULL;
745 }
746 if (concat->mtd.flags != subdev[i]->flags) {
747 /*
748 * Expect all flags except MTD_WRITEABLE to be
749 * equal on all subdevices.
750 */
751 if ((concat->mtd.flags ^ subdev[i]->
752 flags) & ~MTD_WRITEABLE) {
753 kfree(concat);
754 printk("Incompatible device flags on \"%s\"\n",
755 subdev[i]->name);
756 return NULL;
757 } else
758 /* if writeable attribute differs,
759 make super device writeable */
760 concat->mtd.flags |=
761 subdev[i]->flags & MTD_WRITEABLE;
762 }
763
764 /* only permit direct mapping if the BDIs are all the same
765 * - copy-mapping is still permitted
766 */
767 if (concat->mtd.backing_dev_info !=
768 subdev[i]->backing_dev_info)
769 concat->mtd.backing_dev_info =
770 &default_backing_dev_info;
771
772 concat->mtd.size += subdev[i]->size;
773 concat->mtd.ecc_stats.badblocks +=
774 subdev[i]->ecc_stats.badblocks;
775 if (concat->mtd.writesize != subdev[i]->writesize ||
776 concat->mtd.subpage_sft != subdev[i]->subpage_sft ||
777 concat->mtd.oobsize != subdev[i]->oobsize ||
778 !concat->mtd._read_oob != !subdev[i]->_read_oob ||
779 !concat->mtd._write_oob != !subdev[i]->_write_oob) {
780 kfree(concat);
781 printk("Incompatible OOB or ECC data on \"%s\"\n",
782 subdev[i]->name);
783 return NULL;
784 }
785 concat->subdev[i] = subdev[i];
786
787 }
788
789 concat->mtd.ecclayout = subdev[0]->ecclayout;
790
791 concat->num_subdev = num_devs;
792 concat->mtd.name = name;
793
794 concat->mtd._erase = concat_erase;
795 concat->mtd._read = concat_read;
796 concat->mtd._write = concat_write;
797 concat->mtd._sync = concat_sync;
798 concat->mtd._lock = concat_lock;
799 concat->mtd._unlock = concat_unlock;
800 concat->mtd._suspend = concat_suspend;
801 concat->mtd._resume = concat_resume;
802 concat->mtd._get_unmapped_area = concat_get_unmapped_area;
803
804 /*
805 * Combine the erase block size info of the subdevices:
806 *
807 * first, walk the map of the new device and see how
808 * many changes in erase size we have
809 */
810 max_erasesize = curr_erasesize = subdev[0]->erasesize;
811 num_erase_region = 1;
812 for (i = 0; i < num_devs; i++) {
813 if (subdev[i]->numeraseregions == 0) {
814 /* current subdevice has uniform erase size */
815 if (subdev[i]->erasesize != curr_erasesize) {
816 /* if it differs from the last subdevice's erase size, count it */
817 ++num_erase_region;
818 curr_erasesize = subdev[i]->erasesize;
819 if (curr_erasesize > max_erasesize)
820 max_erasesize = curr_erasesize;
821 }
822 } else {
823 /* current subdevice has variable erase size */
824 int j;
825 for (j = 0; j < subdev[i]->numeraseregions; j++) {
826
827 /* walk the list of erase regions, count any changes */
828 if (subdev[i]->eraseregions[j].erasesize !=
829 curr_erasesize) {
830 ++num_erase_region;
831 curr_erasesize =
832 subdev[i]->eraseregions[j].
833 erasesize;
834 if (curr_erasesize > max_erasesize)
835 max_erasesize = curr_erasesize;
836 }
837 }
838 }
839 }
840
841 if (num_erase_region == 1) {
842 /*
843 * All subdevices have the same uniform erase size.
844 * This is easy:
845 */
846 concat->mtd.erasesize = curr_erasesize;
847 concat->mtd.numeraseregions = 0;
848 } else {
849 uint64_t tmp64;
850
851 /*
852 * erase block size varies across the subdevices: allocate
853 * space to store the data describing the variable erase regions
854 */
855 struct mtd_erase_region_info *erase_region_p;
856 uint64_t begin, position;
857
858 concat->mtd.erasesize = max_erasesize;
859 concat->mtd.numeraseregions = num_erase_region;
860 concat->mtd.eraseregions = erase_region_p =
861 kmalloc(num_erase_region *
862 sizeof (struct mtd_erase_region_info), GFP_KERNEL);
863 if (!erase_region_p) {
864 kfree(concat);
865 printk
866 ("memory allocation error while creating erase region list"
867 " for device \"%s\"\n", name);
868 return NULL;
869 }
870
871 /*
872 * walk the map of the new device once more and fill in
873 * in erase region info:
874 */
875 curr_erasesize = subdev[0]->erasesize;
876 begin = position = 0;
877 for (i = 0; i < num_devs; i++) {
878 if (subdev[i]->numeraseregions == 0) {
879 /* current subdevice has uniform erase size */
880 if (subdev[i]->erasesize != curr_erasesize) {
881 /*
882 * fill in an mtd_erase_region_info structure for the area
883 * we have walked so far:
884 */
885 erase_region_p->offset = begin;
886 erase_region_p->erasesize =
887 curr_erasesize;
888 tmp64 = position - begin;
889 do_div(tmp64, curr_erasesize);
890 erase_region_p->numblocks = tmp64;
891 begin = position;
892
893 curr_erasesize = subdev[i]->erasesize;
894 ++erase_region_p;
895 }
896 position += subdev[i]->size;
897 } else {
898 /* current subdevice has variable erase size */
899 int j;
900 for (j = 0; j < subdev[i]->numeraseregions; j++) {
901 /* walk the list of erase regions, count any changes */
902 if (subdev[i]->eraseregions[j].
903 erasesize != curr_erasesize) {
904 erase_region_p->offset = begin;
905 erase_region_p->erasesize =
906 curr_erasesize;
907 tmp64 = position - begin;
908 do_div(tmp64, curr_erasesize);
909 erase_region_p->numblocks = tmp64;
910 begin = position;
911
912 curr_erasesize =
913 subdev[i]->eraseregions[j].
914 erasesize;
915 ++erase_region_p;
916 }
917 position +=
918 subdev[i]->eraseregions[j].
919 numblocks * (uint64_t)curr_erasesize;
920 }
921 }
922 }
923 /* Now write the final entry */
924 erase_region_p->offset = begin;
925 erase_region_p->erasesize = curr_erasesize;
926 tmp64 = position - begin;
927 do_div(tmp64, curr_erasesize);
928 erase_region_p->numblocks = tmp64;
929 }
930
931 return &concat->mtd;
932}
933
934/*
935 * This function destroys an MTD object obtained from concat_mtd_devs()
936 */
937
938void mtd_concat_destroy(struct mtd_info *mtd)
939{
940 struct mtd_concat *concat = CONCAT(mtd);
941 if (concat->mtd.numeraseregions)
942 kfree(concat->mtd.eraseregions);
943 kfree(concat);
944}
945
946EXPORT_SYMBOL(mtd_concat_create);
947EXPORT_SYMBOL(mtd_concat_destroy);
948
949MODULE_LICENSE("GPL");
950MODULE_AUTHOR("Robert Kaiser <rkaiser@sysgo.de>");
951MODULE_DESCRIPTION("Generic support for concatenating of MTD devices");