Loading...
1/*
2 * MTD device concatenation layer
3 *
4 * Copyright © 2002 Robert Kaiser <rkaiser@sysgo.de>
5 * Copyright © 2002-2010 David Woodhouse <dwmw2@infradead.org>
6 *
7 * NAND support by Christian Gan <cgan@iders.ca>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 *
23 */
24
25#include <linux/kernel.h>
26#include <linux/module.h>
27#include <linux/slab.h>
28#include <linux/sched.h>
29#include <linux/types.h>
30#include <linux/backing-dev.h>
31
32#include <linux/mtd/mtd.h>
33#include <linux/mtd/concat.h>
34
35#include <asm/div64.h>
36
37/*
38 * Our storage structure:
39 * Subdev points to an array of pointers to struct mtd_info objects
40 * which is allocated along with this structure
41 *
42 */
43struct mtd_concat {
44 struct mtd_info mtd;
45 int num_subdev;
46 struct mtd_info **subdev;
47};
48
49/*
50 * how to calculate the size required for the above structure,
51 * including the pointer array subdev points to:
52 */
53#define SIZEOF_STRUCT_MTD_CONCAT(num_subdev) \
54 ((sizeof(struct mtd_concat) + (num_subdev) * sizeof(struct mtd_info *)))
55
56/*
57 * Given a pointer to the MTD object in the mtd_concat structure,
58 * we can retrieve the pointer to that structure with this macro.
59 */
60#define CONCAT(x) ((struct mtd_concat *)(x))
61
62/*
63 * MTD methods which look up the relevant subdevice, translate the
64 * effective address and pass through to the subdevice.
65 */
66
67static int
68concat_read(struct mtd_info *mtd, loff_t from, size_t len,
69 size_t * retlen, u_char * buf)
70{
71 struct mtd_concat *concat = CONCAT(mtd);
72 int ret = 0, err;
73 int i;
74
75 *retlen = 0;
76
77 for (i = 0; i < concat->num_subdev; i++) {
78 struct mtd_info *subdev = concat->subdev[i];
79 size_t size, retsize;
80
81 if (from >= subdev->size) {
82 /* Not destined for this subdev */
83 size = 0;
84 from -= subdev->size;
85 continue;
86 }
87 if (from + len > subdev->size)
88 /* First part goes into this subdev */
89 size = subdev->size - from;
90 else
91 /* Entire transaction goes into this subdev */
92 size = len;
93
94 err = subdev->read(subdev, from, size, &retsize, buf);
95
96 /* Save information about bitflips! */
97 if (unlikely(err)) {
98 if (err == -EBADMSG) {
99 mtd->ecc_stats.failed++;
100 ret = err;
101 } else if (err == -EUCLEAN) {
102 mtd->ecc_stats.corrected++;
103 /* Do not overwrite -EBADMSG !! */
104 if (!ret)
105 ret = err;
106 } else
107 return err;
108 }
109
110 *retlen += retsize;
111 len -= size;
112 if (len == 0)
113 return ret;
114
115 buf += size;
116 from = 0;
117 }
118 return -EINVAL;
119}
120
121static int
122concat_write(struct mtd_info *mtd, loff_t to, size_t len,
123 size_t * retlen, const u_char * buf)
124{
125 struct mtd_concat *concat = CONCAT(mtd);
126 int err = -EINVAL;
127 int i;
128
129 if (!(mtd->flags & MTD_WRITEABLE))
130 return -EROFS;
131
132 *retlen = 0;
133
134 for (i = 0; i < concat->num_subdev; i++) {
135 struct mtd_info *subdev = concat->subdev[i];
136 size_t size, retsize;
137
138 if (to >= subdev->size) {
139 size = 0;
140 to -= subdev->size;
141 continue;
142 }
143 if (to + len > subdev->size)
144 size = subdev->size - to;
145 else
146 size = len;
147
148 if (!(subdev->flags & MTD_WRITEABLE))
149 err = -EROFS;
150 else
151 err = subdev->write(subdev, to, size, &retsize, buf);
152
153 if (err)
154 break;
155
156 *retlen += retsize;
157 len -= size;
158 if (len == 0)
159 break;
160
161 err = -EINVAL;
162 buf += size;
163 to = 0;
164 }
165 return err;
166}
167
168static int
169concat_writev(struct mtd_info *mtd, const struct kvec *vecs,
170 unsigned long count, loff_t to, size_t * retlen)
171{
172 struct mtd_concat *concat = CONCAT(mtd);
173 struct kvec *vecs_copy;
174 unsigned long entry_low, entry_high;
175 size_t total_len = 0;
176 int i;
177 int err = -EINVAL;
178
179 if (!(mtd->flags & MTD_WRITEABLE))
180 return -EROFS;
181
182 *retlen = 0;
183
184 /* Calculate total length of data */
185 for (i = 0; i < count; i++)
186 total_len += vecs[i].iov_len;
187
188 /* Do not allow write past end of device */
189 if ((to + total_len) > mtd->size)
190 return -EINVAL;
191
192 /* Check alignment */
193 if (mtd->writesize > 1) {
194 uint64_t __to = to;
195 if (do_div(__to, mtd->writesize) || (total_len % mtd->writesize))
196 return -EINVAL;
197 }
198
199 /* make a copy of vecs */
200 vecs_copy = kmemdup(vecs, sizeof(struct kvec) * count, GFP_KERNEL);
201 if (!vecs_copy)
202 return -ENOMEM;
203
204 entry_low = 0;
205 for (i = 0; i < concat->num_subdev; i++) {
206 struct mtd_info *subdev = concat->subdev[i];
207 size_t size, wsize, retsize, old_iov_len;
208
209 if (to >= subdev->size) {
210 to -= subdev->size;
211 continue;
212 }
213
214 size = min_t(uint64_t, total_len, subdev->size - to);
215 wsize = size; /* store for future use */
216
217 entry_high = entry_low;
218 while (entry_high < count) {
219 if (size <= vecs_copy[entry_high].iov_len)
220 break;
221 size -= vecs_copy[entry_high++].iov_len;
222 }
223
224 old_iov_len = vecs_copy[entry_high].iov_len;
225 vecs_copy[entry_high].iov_len = size;
226
227 if (!(subdev->flags & MTD_WRITEABLE))
228 err = -EROFS;
229 else
230 err = subdev->writev(subdev, &vecs_copy[entry_low],
231 entry_high - entry_low + 1, to, &retsize);
232
233 vecs_copy[entry_high].iov_len = old_iov_len - size;
234 vecs_copy[entry_high].iov_base += size;
235
236 entry_low = entry_high;
237
238 if (err)
239 break;
240
241 *retlen += retsize;
242 total_len -= wsize;
243
244 if (total_len == 0)
245 break;
246
247 err = -EINVAL;
248 to = 0;
249 }
250
251 kfree(vecs_copy);
252 return err;
253}
254
255static int
256concat_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
257{
258 struct mtd_concat *concat = CONCAT(mtd);
259 struct mtd_oob_ops devops = *ops;
260 int i, err, ret = 0;
261
262 ops->retlen = ops->oobretlen = 0;
263
264 for (i = 0; i < concat->num_subdev; i++) {
265 struct mtd_info *subdev = concat->subdev[i];
266
267 if (from >= subdev->size) {
268 from -= subdev->size;
269 continue;
270 }
271
272 /* partial read ? */
273 if (from + devops.len > subdev->size)
274 devops.len = subdev->size - from;
275
276 err = subdev->read_oob(subdev, from, &devops);
277 ops->retlen += devops.retlen;
278 ops->oobretlen += devops.oobretlen;
279
280 /* Save information about bitflips! */
281 if (unlikely(err)) {
282 if (err == -EBADMSG) {
283 mtd->ecc_stats.failed++;
284 ret = err;
285 } else if (err == -EUCLEAN) {
286 mtd->ecc_stats.corrected++;
287 /* Do not overwrite -EBADMSG !! */
288 if (!ret)
289 ret = err;
290 } else
291 return err;
292 }
293
294 if (devops.datbuf) {
295 devops.len = ops->len - ops->retlen;
296 if (!devops.len)
297 return ret;
298 devops.datbuf += devops.retlen;
299 }
300 if (devops.oobbuf) {
301 devops.ooblen = ops->ooblen - ops->oobretlen;
302 if (!devops.ooblen)
303 return ret;
304 devops.oobbuf += ops->oobretlen;
305 }
306
307 from = 0;
308 }
309 return -EINVAL;
310}
311
312static int
313concat_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops)
314{
315 struct mtd_concat *concat = CONCAT(mtd);
316 struct mtd_oob_ops devops = *ops;
317 int i, err;
318
319 if (!(mtd->flags & MTD_WRITEABLE))
320 return -EROFS;
321
322 ops->retlen = ops->oobretlen = 0;
323
324 for (i = 0; i < concat->num_subdev; i++) {
325 struct mtd_info *subdev = concat->subdev[i];
326
327 if (to >= subdev->size) {
328 to -= subdev->size;
329 continue;
330 }
331
332 /* partial write ? */
333 if (to + devops.len > subdev->size)
334 devops.len = subdev->size - to;
335
336 err = subdev->write_oob(subdev, to, &devops);
337 ops->retlen += devops.oobretlen;
338 if (err)
339 return err;
340
341 if (devops.datbuf) {
342 devops.len = ops->len - ops->retlen;
343 if (!devops.len)
344 return 0;
345 devops.datbuf += devops.retlen;
346 }
347 if (devops.oobbuf) {
348 devops.ooblen = ops->ooblen - ops->oobretlen;
349 if (!devops.ooblen)
350 return 0;
351 devops.oobbuf += devops.oobretlen;
352 }
353 to = 0;
354 }
355 return -EINVAL;
356}
357
358static void concat_erase_callback(struct erase_info *instr)
359{
360 wake_up((wait_queue_head_t *) instr->priv);
361}
362
363static int concat_dev_erase(struct mtd_info *mtd, struct erase_info *erase)
364{
365 int err;
366 wait_queue_head_t waitq;
367 DECLARE_WAITQUEUE(wait, current);
368
369 /*
370 * This code was stol^H^H^H^Hinspired by mtdchar.c
371 */
372 init_waitqueue_head(&waitq);
373
374 erase->mtd = mtd;
375 erase->callback = concat_erase_callback;
376 erase->priv = (unsigned long) &waitq;
377
378 /*
379 * FIXME: Allow INTERRUPTIBLE. Which means
380 * not having the wait_queue head on the stack.
381 */
382 err = mtd->erase(mtd, erase);
383 if (!err) {
384 set_current_state(TASK_UNINTERRUPTIBLE);
385 add_wait_queue(&waitq, &wait);
386 if (erase->state != MTD_ERASE_DONE
387 && erase->state != MTD_ERASE_FAILED)
388 schedule();
389 remove_wait_queue(&waitq, &wait);
390 set_current_state(TASK_RUNNING);
391
392 err = (erase->state == MTD_ERASE_FAILED) ? -EIO : 0;
393 }
394 return err;
395}
396
397static int concat_erase(struct mtd_info *mtd, struct erase_info *instr)
398{
399 struct mtd_concat *concat = CONCAT(mtd);
400 struct mtd_info *subdev;
401 int i, err;
402 uint64_t length, offset = 0;
403 struct erase_info *erase;
404
405 if (!(mtd->flags & MTD_WRITEABLE))
406 return -EROFS;
407
408 if (instr->addr > concat->mtd.size)
409 return -EINVAL;
410
411 if (instr->len + instr->addr > concat->mtd.size)
412 return -EINVAL;
413
414 /*
415 * Check for proper erase block alignment of the to-be-erased area.
416 * It is easier to do this based on the super device's erase
417 * region info rather than looking at each particular sub-device
418 * in turn.
419 */
420 if (!concat->mtd.numeraseregions) {
421 /* the easy case: device has uniform erase block size */
422 if (instr->addr & (concat->mtd.erasesize - 1))
423 return -EINVAL;
424 if (instr->len & (concat->mtd.erasesize - 1))
425 return -EINVAL;
426 } else {
427 /* device has variable erase size */
428 struct mtd_erase_region_info *erase_regions =
429 concat->mtd.eraseregions;
430
431 /*
432 * Find the erase region where the to-be-erased area begins:
433 */
434 for (i = 0; i < concat->mtd.numeraseregions &&
435 instr->addr >= erase_regions[i].offset; i++) ;
436 --i;
437
438 /*
439 * Now erase_regions[i] is the region in which the
440 * to-be-erased area begins. Verify that the starting
441 * offset is aligned to this region's erase size:
442 */
443 if (i < 0 || instr->addr & (erase_regions[i].erasesize - 1))
444 return -EINVAL;
445
446 /*
447 * now find the erase region where the to-be-erased area ends:
448 */
449 for (; i < concat->mtd.numeraseregions &&
450 (instr->addr + instr->len) >= erase_regions[i].offset;
451 ++i) ;
452 --i;
453 /*
454 * check if the ending offset is aligned to this region's erase size
455 */
456 if (i < 0 || ((instr->addr + instr->len) &
457 (erase_regions[i].erasesize - 1)))
458 return -EINVAL;
459 }
460
461 instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
462
463 /* make a local copy of instr to avoid modifying the caller's struct */
464 erase = kmalloc(sizeof (struct erase_info), GFP_KERNEL);
465
466 if (!erase)
467 return -ENOMEM;
468
469 *erase = *instr;
470 length = instr->len;
471
472 /*
473 * find the subdevice where the to-be-erased area begins, adjust
474 * starting offset to be relative to the subdevice start
475 */
476 for (i = 0; i < concat->num_subdev; i++) {
477 subdev = concat->subdev[i];
478 if (subdev->size <= erase->addr) {
479 erase->addr -= subdev->size;
480 offset += subdev->size;
481 } else {
482 break;
483 }
484 }
485
486 /* must never happen since size limit has been verified above */
487 BUG_ON(i >= concat->num_subdev);
488
489 /* now do the erase: */
490 err = 0;
491 for (; length > 0; i++) {
492 /* loop for all subdevices affected by this request */
493 subdev = concat->subdev[i]; /* get current subdevice */
494
495 /* limit length to subdevice's size: */
496 if (erase->addr + length > subdev->size)
497 erase->len = subdev->size - erase->addr;
498 else
499 erase->len = length;
500
501 if (!(subdev->flags & MTD_WRITEABLE)) {
502 err = -EROFS;
503 break;
504 }
505 length -= erase->len;
506 if ((err = concat_dev_erase(subdev, erase))) {
507 /* sanity check: should never happen since
508 * block alignment has been checked above */
509 BUG_ON(err == -EINVAL);
510 if (erase->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
511 instr->fail_addr = erase->fail_addr + offset;
512 break;
513 }
514 /*
515 * erase->addr specifies the offset of the area to be
516 * erased *within the current subdevice*. It can be
517 * non-zero only the first time through this loop, i.e.
518 * for the first subdevice where blocks need to be erased.
519 * All the following erases must begin at the start of the
520 * current subdevice, i.e. at offset zero.
521 */
522 erase->addr = 0;
523 offset += subdev->size;
524 }
525 instr->state = erase->state;
526 kfree(erase);
527 if (err)
528 return err;
529
530 if (instr->callback)
531 instr->callback(instr);
532 return 0;
533}
534
535static int concat_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
536{
537 struct mtd_concat *concat = CONCAT(mtd);
538 int i, err = -EINVAL;
539
540 if ((len + ofs) > mtd->size)
541 return -EINVAL;
542
543 for (i = 0; i < concat->num_subdev; i++) {
544 struct mtd_info *subdev = concat->subdev[i];
545 uint64_t size;
546
547 if (ofs >= subdev->size) {
548 size = 0;
549 ofs -= subdev->size;
550 continue;
551 }
552 if (ofs + len > subdev->size)
553 size = subdev->size - ofs;
554 else
555 size = len;
556
557 if (subdev->lock) {
558 err = subdev->lock(subdev, ofs, size);
559 if (err)
560 break;
561 } else
562 err = -EOPNOTSUPP;
563
564 len -= size;
565 if (len == 0)
566 break;
567
568 err = -EINVAL;
569 ofs = 0;
570 }
571
572 return err;
573}
574
575static int concat_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
576{
577 struct mtd_concat *concat = CONCAT(mtd);
578 int i, err = 0;
579
580 if ((len + ofs) > mtd->size)
581 return -EINVAL;
582
583 for (i = 0; i < concat->num_subdev; i++) {
584 struct mtd_info *subdev = concat->subdev[i];
585 uint64_t size;
586
587 if (ofs >= subdev->size) {
588 size = 0;
589 ofs -= subdev->size;
590 continue;
591 }
592 if (ofs + len > subdev->size)
593 size = subdev->size - ofs;
594 else
595 size = len;
596
597 if (subdev->unlock) {
598 err = subdev->unlock(subdev, ofs, size);
599 if (err)
600 break;
601 } else
602 err = -EOPNOTSUPP;
603
604 len -= size;
605 if (len == 0)
606 break;
607
608 err = -EINVAL;
609 ofs = 0;
610 }
611
612 return err;
613}
614
615static void concat_sync(struct mtd_info *mtd)
616{
617 struct mtd_concat *concat = CONCAT(mtd);
618 int i;
619
620 for (i = 0; i < concat->num_subdev; i++) {
621 struct mtd_info *subdev = concat->subdev[i];
622 subdev->sync(subdev);
623 }
624}
625
626static int concat_suspend(struct mtd_info *mtd)
627{
628 struct mtd_concat *concat = CONCAT(mtd);
629 int i, rc = 0;
630
631 for (i = 0; i < concat->num_subdev; i++) {
632 struct mtd_info *subdev = concat->subdev[i];
633 if ((rc = subdev->suspend(subdev)) < 0)
634 return rc;
635 }
636 return rc;
637}
638
639static void concat_resume(struct mtd_info *mtd)
640{
641 struct mtd_concat *concat = CONCAT(mtd);
642 int i;
643
644 for (i = 0; i < concat->num_subdev; i++) {
645 struct mtd_info *subdev = concat->subdev[i];
646 subdev->resume(subdev);
647 }
648}
649
650static int concat_block_isbad(struct mtd_info *mtd, loff_t ofs)
651{
652 struct mtd_concat *concat = CONCAT(mtd);
653 int i, res = 0;
654
655 if (!concat->subdev[0]->block_isbad)
656 return res;
657
658 if (ofs > mtd->size)
659 return -EINVAL;
660
661 for (i = 0; i < concat->num_subdev; i++) {
662 struct mtd_info *subdev = concat->subdev[i];
663
664 if (ofs >= subdev->size) {
665 ofs -= subdev->size;
666 continue;
667 }
668
669 res = subdev->block_isbad(subdev, ofs);
670 break;
671 }
672
673 return res;
674}
675
676static int concat_block_markbad(struct mtd_info *mtd, loff_t ofs)
677{
678 struct mtd_concat *concat = CONCAT(mtd);
679 int i, err = -EINVAL;
680
681 if (!concat->subdev[0]->block_markbad)
682 return 0;
683
684 if (ofs > mtd->size)
685 return -EINVAL;
686
687 for (i = 0; i < concat->num_subdev; i++) {
688 struct mtd_info *subdev = concat->subdev[i];
689
690 if (ofs >= subdev->size) {
691 ofs -= subdev->size;
692 continue;
693 }
694
695 err = subdev->block_markbad(subdev, ofs);
696 if (!err)
697 mtd->ecc_stats.badblocks++;
698 break;
699 }
700
701 return err;
702}
703
704/*
705 * try to support NOMMU mmaps on concatenated devices
706 * - we don't support subdev spanning as we can't guarantee it'll work
707 */
708static unsigned long concat_get_unmapped_area(struct mtd_info *mtd,
709 unsigned long len,
710 unsigned long offset,
711 unsigned long flags)
712{
713 struct mtd_concat *concat = CONCAT(mtd);
714 int i;
715
716 for (i = 0; i < concat->num_subdev; i++) {
717 struct mtd_info *subdev = concat->subdev[i];
718
719 if (offset >= subdev->size) {
720 offset -= subdev->size;
721 continue;
722 }
723
724 /* we've found the subdev over which the mapping will reside */
725 if (offset + len > subdev->size)
726 return (unsigned long) -EINVAL;
727
728 if (subdev->get_unmapped_area)
729 return subdev->get_unmapped_area(subdev, len, offset,
730 flags);
731
732 break;
733 }
734
735 return (unsigned long) -ENOSYS;
736}
737
738/*
739 * This function constructs a virtual MTD device by concatenating
740 * num_devs MTD devices. A pointer to the new device object is
741 * stored to *new_dev upon success. This function does _not_
742 * register any devices: this is the caller's responsibility.
743 */
744struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to concatenate */
745 int num_devs, /* number of subdevices */
746 const char *name)
747{ /* name for the new device */
748 int i;
749 size_t size;
750 struct mtd_concat *concat;
751 uint32_t max_erasesize, curr_erasesize;
752 int num_erase_region;
753 int max_writebufsize = 0;
754
755 printk(KERN_NOTICE "Concatenating MTD devices:\n");
756 for (i = 0; i < num_devs; i++)
757 printk(KERN_NOTICE "(%d): \"%s\"\n", i, subdev[i]->name);
758 printk(KERN_NOTICE "into device \"%s\"\n", name);
759
760 /* allocate the device structure */
761 size = SIZEOF_STRUCT_MTD_CONCAT(num_devs);
762 concat = kzalloc(size, GFP_KERNEL);
763 if (!concat) {
764 printk
765 ("memory allocation error while creating concatenated device \"%s\"\n",
766 name);
767 return NULL;
768 }
769 concat->subdev = (struct mtd_info **) (concat + 1);
770
771 /*
772 * Set up the new "super" device's MTD object structure, check for
773 * incompatibilites between the subdevices.
774 */
775 concat->mtd.type = subdev[0]->type;
776 concat->mtd.flags = subdev[0]->flags;
777 concat->mtd.size = subdev[0]->size;
778 concat->mtd.erasesize = subdev[0]->erasesize;
779 concat->mtd.writesize = subdev[0]->writesize;
780
781 for (i = 0; i < num_devs; i++)
782 if (max_writebufsize < subdev[i]->writebufsize)
783 max_writebufsize = subdev[i]->writebufsize;
784 concat->mtd.writebufsize = max_writebufsize;
785
786 concat->mtd.subpage_sft = subdev[0]->subpage_sft;
787 concat->mtd.oobsize = subdev[0]->oobsize;
788 concat->mtd.oobavail = subdev[0]->oobavail;
789 if (subdev[0]->writev)
790 concat->mtd.writev = concat_writev;
791 if (subdev[0]->read_oob)
792 concat->mtd.read_oob = concat_read_oob;
793 if (subdev[0]->write_oob)
794 concat->mtd.write_oob = concat_write_oob;
795 if (subdev[0]->block_isbad)
796 concat->mtd.block_isbad = concat_block_isbad;
797 if (subdev[0]->block_markbad)
798 concat->mtd.block_markbad = concat_block_markbad;
799
800 concat->mtd.ecc_stats.badblocks = subdev[0]->ecc_stats.badblocks;
801
802 concat->mtd.backing_dev_info = subdev[0]->backing_dev_info;
803
804 concat->subdev[0] = subdev[0];
805
806 for (i = 1; i < num_devs; i++) {
807 if (concat->mtd.type != subdev[i]->type) {
808 kfree(concat);
809 printk("Incompatible device type on \"%s\"\n",
810 subdev[i]->name);
811 return NULL;
812 }
813 if (concat->mtd.flags != subdev[i]->flags) {
814 /*
815 * Expect all flags except MTD_WRITEABLE to be
816 * equal on all subdevices.
817 */
818 if ((concat->mtd.flags ^ subdev[i]->
819 flags) & ~MTD_WRITEABLE) {
820 kfree(concat);
821 printk("Incompatible device flags on \"%s\"\n",
822 subdev[i]->name);
823 return NULL;
824 } else
825 /* if writeable attribute differs,
826 make super device writeable */
827 concat->mtd.flags |=
828 subdev[i]->flags & MTD_WRITEABLE;
829 }
830
831 /* only permit direct mapping if the BDIs are all the same
832 * - copy-mapping is still permitted
833 */
834 if (concat->mtd.backing_dev_info !=
835 subdev[i]->backing_dev_info)
836 concat->mtd.backing_dev_info =
837 &default_backing_dev_info;
838
839 concat->mtd.size += subdev[i]->size;
840 concat->mtd.ecc_stats.badblocks +=
841 subdev[i]->ecc_stats.badblocks;
842 if (concat->mtd.writesize != subdev[i]->writesize ||
843 concat->mtd.subpage_sft != subdev[i]->subpage_sft ||
844 concat->mtd.oobsize != subdev[i]->oobsize ||
845 !concat->mtd.read_oob != !subdev[i]->read_oob ||
846 !concat->mtd.write_oob != !subdev[i]->write_oob) {
847 kfree(concat);
848 printk("Incompatible OOB or ECC data on \"%s\"\n",
849 subdev[i]->name);
850 return NULL;
851 }
852 concat->subdev[i] = subdev[i];
853
854 }
855
856 concat->mtd.ecclayout = subdev[0]->ecclayout;
857
858 concat->num_subdev = num_devs;
859 concat->mtd.name = name;
860
861 concat->mtd.erase = concat_erase;
862 concat->mtd.read = concat_read;
863 concat->mtd.write = concat_write;
864 concat->mtd.sync = concat_sync;
865 concat->mtd.lock = concat_lock;
866 concat->mtd.unlock = concat_unlock;
867 concat->mtd.suspend = concat_suspend;
868 concat->mtd.resume = concat_resume;
869 concat->mtd.get_unmapped_area = concat_get_unmapped_area;
870
871 /*
872 * Combine the erase block size info of the subdevices:
873 *
874 * first, walk the map of the new device and see how
875 * many changes in erase size we have
876 */
877 max_erasesize = curr_erasesize = subdev[0]->erasesize;
878 num_erase_region = 1;
879 for (i = 0; i < num_devs; i++) {
880 if (subdev[i]->numeraseregions == 0) {
881 /* current subdevice has uniform erase size */
882 if (subdev[i]->erasesize != curr_erasesize) {
883 /* if it differs from the last subdevice's erase size, count it */
884 ++num_erase_region;
885 curr_erasesize = subdev[i]->erasesize;
886 if (curr_erasesize > max_erasesize)
887 max_erasesize = curr_erasesize;
888 }
889 } else {
890 /* current subdevice has variable erase size */
891 int j;
892 for (j = 0; j < subdev[i]->numeraseregions; j++) {
893
894 /* walk the list of erase regions, count any changes */
895 if (subdev[i]->eraseregions[j].erasesize !=
896 curr_erasesize) {
897 ++num_erase_region;
898 curr_erasesize =
899 subdev[i]->eraseregions[j].
900 erasesize;
901 if (curr_erasesize > max_erasesize)
902 max_erasesize = curr_erasesize;
903 }
904 }
905 }
906 }
907
908 if (num_erase_region == 1) {
909 /*
910 * All subdevices have the same uniform erase size.
911 * This is easy:
912 */
913 concat->mtd.erasesize = curr_erasesize;
914 concat->mtd.numeraseregions = 0;
915 } else {
916 uint64_t tmp64;
917
918 /*
919 * erase block size varies across the subdevices: allocate
920 * space to store the data describing the variable erase regions
921 */
922 struct mtd_erase_region_info *erase_region_p;
923 uint64_t begin, position;
924
925 concat->mtd.erasesize = max_erasesize;
926 concat->mtd.numeraseregions = num_erase_region;
927 concat->mtd.eraseregions = erase_region_p =
928 kmalloc(num_erase_region *
929 sizeof (struct mtd_erase_region_info), GFP_KERNEL);
930 if (!erase_region_p) {
931 kfree(concat);
932 printk
933 ("memory allocation error while creating erase region list"
934 " for device \"%s\"\n", name);
935 return NULL;
936 }
937
938 /*
939 * walk the map of the new device once more and fill in
940 * in erase region info:
941 */
942 curr_erasesize = subdev[0]->erasesize;
943 begin = position = 0;
944 for (i = 0; i < num_devs; i++) {
945 if (subdev[i]->numeraseregions == 0) {
946 /* current subdevice has uniform erase size */
947 if (subdev[i]->erasesize != curr_erasesize) {
948 /*
949 * fill in an mtd_erase_region_info structure for the area
950 * we have walked so far:
951 */
952 erase_region_p->offset = begin;
953 erase_region_p->erasesize =
954 curr_erasesize;
955 tmp64 = position - begin;
956 do_div(tmp64, curr_erasesize);
957 erase_region_p->numblocks = tmp64;
958 begin = position;
959
960 curr_erasesize = subdev[i]->erasesize;
961 ++erase_region_p;
962 }
963 position += subdev[i]->size;
964 } else {
965 /* current subdevice has variable erase size */
966 int j;
967 for (j = 0; j < subdev[i]->numeraseregions; j++) {
968 /* walk the list of erase regions, count any changes */
969 if (subdev[i]->eraseregions[j].
970 erasesize != curr_erasesize) {
971 erase_region_p->offset = begin;
972 erase_region_p->erasesize =
973 curr_erasesize;
974 tmp64 = position - begin;
975 do_div(tmp64, curr_erasesize);
976 erase_region_p->numblocks = tmp64;
977 begin = position;
978
979 curr_erasesize =
980 subdev[i]->eraseregions[j].
981 erasesize;
982 ++erase_region_p;
983 }
984 position +=
985 subdev[i]->eraseregions[j].
986 numblocks * (uint64_t)curr_erasesize;
987 }
988 }
989 }
990 /* Now write the final entry */
991 erase_region_p->offset = begin;
992 erase_region_p->erasesize = curr_erasesize;
993 tmp64 = position - begin;
994 do_div(tmp64, curr_erasesize);
995 erase_region_p->numblocks = tmp64;
996 }
997
998 return &concat->mtd;
999}
1000
1001/*
1002 * This function destroys an MTD object obtained from concat_mtd_devs()
1003 */
1004
1005void mtd_concat_destroy(struct mtd_info *mtd)
1006{
1007 struct mtd_concat *concat = CONCAT(mtd);
1008 if (concat->mtd.numeraseregions)
1009 kfree(concat->mtd.eraseregions);
1010 kfree(concat);
1011}
1012
1013EXPORT_SYMBOL(mtd_concat_create);
1014EXPORT_SYMBOL(mtd_concat_destroy);
1015
1016MODULE_LICENSE("GPL");
1017MODULE_AUTHOR("Robert Kaiser <rkaiser@sysgo.de>");
1018MODULE_DESCRIPTION("Generic support for concatenating of MTD devices");
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * MTD device concatenation layer
4 *
5 * Copyright © 2002 Robert Kaiser <rkaiser@sysgo.de>
6 * Copyright © 2002-2010 David Woodhouse <dwmw2@infradead.org>
7 *
8 * NAND support by Christian Gan <cgan@iders.ca>
9 */
10
11#include <linux/kernel.h>
12#include <linux/module.h>
13#include <linux/slab.h>
14#include <linux/sched.h>
15#include <linux/types.h>
16#include <linux/backing-dev.h>
17
18#include <linux/mtd/mtd.h>
19#include <linux/mtd/concat.h>
20
21#include <asm/div64.h>
22
23/*
24 * Our storage structure:
25 * Subdev points to an array of pointers to struct mtd_info objects
26 * which is allocated along with this structure
27 *
28 */
29struct mtd_concat {
30 struct mtd_info mtd;
31 int num_subdev;
32 struct mtd_info **subdev;
33};
34
35/*
36 * how to calculate the size required for the above structure,
37 * including the pointer array subdev points to:
38 */
39#define SIZEOF_STRUCT_MTD_CONCAT(num_subdev) \
40 ((sizeof(struct mtd_concat) + (num_subdev) * sizeof(struct mtd_info *)))
41
42/*
43 * Given a pointer to the MTD object in the mtd_concat structure,
44 * we can retrieve the pointer to that structure with this macro.
45 */
46#define CONCAT(x) ((struct mtd_concat *)(x))
47
48/*
49 * MTD methods which look up the relevant subdevice, translate the
50 * effective address and pass through to the subdevice.
51 */
52
53static int
54concat_read(struct mtd_info *mtd, loff_t from, size_t len,
55 size_t * retlen, u_char * buf)
56{
57 struct mtd_concat *concat = CONCAT(mtd);
58 int ret = 0, err;
59 int i;
60
61 for (i = 0; i < concat->num_subdev; i++) {
62 struct mtd_info *subdev = concat->subdev[i];
63 size_t size, retsize;
64
65 if (from >= subdev->size) {
66 /* Not destined for this subdev */
67 size = 0;
68 from -= subdev->size;
69 continue;
70 }
71 if (from + len > subdev->size)
72 /* First part goes into this subdev */
73 size = subdev->size - from;
74 else
75 /* Entire transaction goes into this subdev */
76 size = len;
77
78 err = mtd_read(subdev, from, size, &retsize, buf);
79
80 /* Save information about bitflips! */
81 if (unlikely(err)) {
82 if (mtd_is_eccerr(err)) {
83 mtd->ecc_stats.failed++;
84 ret = err;
85 } else if (mtd_is_bitflip(err)) {
86 mtd->ecc_stats.corrected++;
87 /* Do not overwrite -EBADMSG !! */
88 if (!ret)
89 ret = err;
90 } else
91 return err;
92 }
93
94 *retlen += retsize;
95 len -= size;
96 if (len == 0)
97 return ret;
98
99 buf += size;
100 from = 0;
101 }
102 return -EINVAL;
103}
104
105static int
106concat_write(struct mtd_info *mtd, loff_t to, size_t len,
107 size_t * retlen, const u_char * buf)
108{
109 struct mtd_concat *concat = CONCAT(mtd);
110 int err = -EINVAL;
111 int i;
112
113 for (i = 0; i < concat->num_subdev; i++) {
114 struct mtd_info *subdev = concat->subdev[i];
115 size_t size, retsize;
116
117 if (to >= subdev->size) {
118 size = 0;
119 to -= subdev->size;
120 continue;
121 }
122 if (to + len > subdev->size)
123 size = subdev->size - to;
124 else
125 size = len;
126
127 err = mtd_write(subdev, to, size, &retsize, buf);
128 if (err)
129 break;
130
131 *retlen += retsize;
132 len -= size;
133 if (len == 0)
134 break;
135
136 err = -EINVAL;
137 buf += size;
138 to = 0;
139 }
140 return err;
141}
142
143static int
144concat_writev(struct mtd_info *mtd, const struct kvec *vecs,
145 unsigned long count, loff_t to, size_t * retlen)
146{
147 struct mtd_concat *concat = CONCAT(mtd);
148 struct kvec *vecs_copy;
149 unsigned long entry_low, entry_high;
150 size_t total_len = 0;
151 int i;
152 int err = -EINVAL;
153
154 /* Calculate total length of data */
155 for (i = 0; i < count; i++)
156 total_len += vecs[i].iov_len;
157
158 /* Check alignment */
159 if (mtd->writesize > 1) {
160 uint64_t __to = to;
161 if (do_div(__to, mtd->writesize) || (total_len % mtd->writesize))
162 return -EINVAL;
163 }
164
165 /* make a copy of vecs */
166 vecs_copy = kmemdup(vecs, sizeof(struct kvec) * count, GFP_KERNEL);
167 if (!vecs_copy)
168 return -ENOMEM;
169
170 entry_low = 0;
171 for (i = 0; i < concat->num_subdev; i++) {
172 struct mtd_info *subdev = concat->subdev[i];
173 size_t size, wsize, retsize, old_iov_len;
174
175 if (to >= subdev->size) {
176 to -= subdev->size;
177 continue;
178 }
179
180 size = min_t(uint64_t, total_len, subdev->size - to);
181 wsize = size; /* store for future use */
182
183 entry_high = entry_low;
184 while (entry_high < count) {
185 if (size <= vecs_copy[entry_high].iov_len)
186 break;
187 size -= vecs_copy[entry_high++].iov_len;
188 }
189
190 old_iov_len = vecs_copy[entry_high].iov_len;
191 vecs_copy[entry_high].iov_len = size;
192
193 err = mtd_writev(subdev, &vecs_copy[entry_low],
194 entry_high - entry_low + 1, to, &retsize);
195
196 vecs_copy[entry_high].iov_len = old_iov_len - size;
197 vecs_copy[entry_high].iov_base += size;
198
199 entry_low = entry_high;
200
201 if (err)
202 break;
203
204 *retlen += retsize;
205 total_len -= wsize;
206
207 if (total_len == 0)
208 break;
209
210 err = -EINVAL;
211 to = 0;
212 }
213
214 kfree(vecs_copy);
215 return err;
216}
217
218static int
219concat_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
220{
221 struct mtd_concat *concat = CONCAT(mtd);
222 struct mtd_oob_ops devops = *ops;
223 int i, err, ret = 0;
224
225 ops->retlen = ops->oobretlen = 0;
226
227 for (i = 0; i < concat->num_subdev; i++) {
228 struct mtd_info *subdev = concat->subdev[i];
229
230 if (from >= subdev->size) {
231 from -= subdev->size;
232 continue;
233 }
234
235 /* partial read ? */
236 if (from + devops.len > subdev->size)
237 devops.len = subdev->size - from;
238
239 err = mtd_read_oob(subdev, from, &devops);
240 ops->retlen += devops.retlen;
241 ops->oobretlen += devops.oobretlen;
242
243 /* Save information about bitflips! */
244 if (unlikely(err)) {
245 if (mtd_is_eccerr(err)) {
246 mtd->ecc_stats.failed++;
247 ret = err;
248 } else if (mtd_is_bitflip(err)) {
249 mtd->ecc_stats.corrected++;
250 /* Do not overwrite -EBADMSG !! */
251 if (!ret)
252 ret = err;
253 } else
254 return err;
255 }
256
257 if (devops.datbuf) {
258 devops.len = ops->len - ops->retlen;
259 if (!devops.len)
260 return ret;
261 devops.datbuf += devops.retlen;
262 }
263 if (devops.oobbuf) {
264 devops.ooblen = ops->ooblen - ops->oobretlen;
265 if (!devops.ooblen)
266 return ret;
267 devops.oobbuf += ops->oobretlen;
268 }
269
270 from = 0;
271 }
272 return -EINVAL;
273}
274
275static int
276concat_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops)
277{
278 struct mtd_concat *concat = CONCAT(mtd);
279 struct mtd_oob_ops devops = *ops;
280 int i, err;
281
282 if (!(mtd->flags & MTD_WRITEABLE))
283 return -EROFS;
284
285 ops->retlen = ops->oobretlen = 0;
286
287 for (i = 0; i < concat->num_subdev; i++) {
288 struct mtd_info *subdev = concat->subdev[i];
289
290 if (to >= subdev->size) {
291 to -= subdev->size;
292 continue;
293 }
294
295 /* partial write ? */
296 if (to + devops.len > subdev->size)
297 devops.len = subdev->size - to;
298
299 err = mtd_write_oob(subdev, to, &devops);
300 ops->retlen += devops.retlen;
301 ops->oobretlen += devops.oobretlen;
302 if (err)
303 return err;
304
305 if (devops.datbuf) {
306 devops.len = ops->len - ops->retlen;
307 if (!devops.len)
308 return 0;
309 devops.datbuf += devops.retlen;
310 }
311 if (devops.oobbuf) {
312 devops.ooblen = ops->ooblen - ops->oobretlen;
313 if (!devops.ooblen)
314 return 0;
315 devops.oobbuf += devops.oobretlen;
316 }
317 to = 0;
318 }
319 return -EINVAL;
320}
321
322static int concat_erase(struct mtd_info *mtd, struct erase_info *instr)
323{
324 struct mtd_concat *concat = CONCAT(mtd);
325 struct mtd_info *subdev;
326 int i, err;
327 uint64_t length, offset = 0;
328 struct erase_info *erase;
329
330 /*
331 * Check for proper erase block alignment of the to-be-erased area.
332 * It is easier to do this based on the super device's erase
333 * region info rather than looking at each particular sub-device
334 * in turn.
335 */
336 if (!concat->mtd.numeraseregions) {
337 /* the easy case: device has uniform erase block size */
338 if (instr->addr & (concat->mtd.erasesize - 1))
339 return -EINVAL;
340 if (instr->len & (concat->mtd.erasesize - 1))
341 return -EINVAL;
342 } else {
343 /* device has variable erase size */
344 struct mtd_erase_region_info *erase_regions =
345 concat->mtd.eraseregions;
346
347 /*
348 * Find the erase region where the to-be-erased area begins:
349 */
350 for (i = 0; i < concat->mtd.numeraseregions &&
351 instr->addr >= erase_regions[i].offset; i++) ;
352 --i;
353
354 /*
355 * Now erase_regions[i] is the region in which the
356 * to-be-erased area begins. Verify that the starting
357 * offset is aligned to this region's erase size:
358 */
359 if (i < 0 || instr->addr & (erase_regions[i].erasesize - 1))
360 return -EINVAL;
361
362 /*
363 * now find the erase region where the to-be-erased area ends:
364 */
365 for (; i < concat->mtd.numeraseregions &&
366 (instr->addr + instr->len) >= erase_regions[i].offset;
367 ++i) ;
368 --i;
369 /*
370 * check if the ending offset is aligned to this region's erase size
371 */
372 if (i < 0 || ((instr->addr + instr->len) &
373 (erase_regions[i].erasesize - 1)))
374 return -EINVAL;
375 }
376
377 /* make a local copy of instr to avoid modifying the caller's struct */
378 erase = kmalloc(sizeof (struct erase_info), GFP_KERNEL);
379
380 if (!erase)
381 return -ENOMEM;
382
383 *erase = *instr;
384 length = instr->len;
385
386 /*
387 * find the subdevice where the to-be-erased area begins, adjust
388 * starting offset to be relative to the subdevice start
389 */
390 for (i = 0; i < concat->num_subdev; i++) {
391 subdev = concat->subdev[i];
392 if (subdev->size <= erase->addr) {
393 erase->addr -= subdev->size;
394 offset += subdev->size;
395 } else {
396 break;
397 }
398 }
399
400 /* must never happen since size limit has been verified above */
401 BUG_ON(i >= concat->num_subdev);
402
403 /* now do the erase: */
404 err = 0;
405 for (; length > 0; i++) {
406 /* loop for all subdevices affected by this request */
407 subdev = concat->subdev[i]; /* get current subdevice */
408
409 /* limit length to subdevice's size: */
410 if (erase->addr + length > subdev->size)
411 erase->len = subdev->size - erase->addr;
412 else
413 erase->len = length;
414
415 length -= erase->len;
416 if ((err = mtd_erase(subdev, erase))) {
417 /* sanity check: should never happen since
418 * block alignment has been checked above */
419 BUG_ON(err == -EINVAL);
420 if (erase->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
421 instr->fail_addr = erase->fail_addr + offset;
422 break;
423 }
424 /*
425 * erase->addr specifies the offset of the area to be
426 * erased *within the current subdevice*. It can be
427 * non-zero only the first time through this loop, i.e.
428 * for the first subdevice where blocks need to be erased.
429 * All the following erases must begin at the start of the
430 * current subdevice, i.e. at offset zero.
431 */
432 erase->addr = 0;
433 offset += subdev->size;
434 }
435 kfree(erase);
436
437 return err;
438}
439
440static int concat_xxlock(struct mtd_info *mtd, loff_t ofs, uint64_t len,
441 bool is_lock)
442{
443 struct mtd_concat *concat = CONCAT(mtd);
444 int i, err = -EINVAL;
445
446 for (i = 0; i < concat->num_subdev; i++) {
447 struct mtd_info *subdev = concat->subdev[i];
448 uint64_t size;
449
450 if (ofs >= subdev->size) {
451 size = 0;
452 ofs -= subdev->size;
453 continue;
454 }
455 if (ofs + len > subdev->size)
456 size = subdev->size - ofs;
457 else
458 size = len;
459
460 if (is_lock)
461 err = mtd_lock(subdev, ofs, size);
462 else
463 err = mtd_unlock(subdev, ofs, size);
464 if (err)
465 break;
466
467 len -= size;
468 if (len == 0)
469 break;
470
471 err = -EINVAL;
472 ofs = 0;
473 }
474
475 return err;
476}
477
478static int concat_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
479{
480 return concat_xxlock(mtd, ofs, len, true);
481}
482
483static int concat_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
484{
485 return concat_xxlock(mtd, ofs, len, false);
486}
487
488static int concat_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
489{
490 struct mtd_concat *concat = CONCAT(mtd);
491 int i, err = -EINVAL;
492
493 for (i = 0; i < concat->num_subdev; i++) {
494 struct mtd_info *subdev = concat->subdev[i];
495
496 if (ofs >= subdev->size) {
497 ofs -= subdev->size;
498 continue;
499 }
500
501 if (ofs + len > subdev->size)
502 break;
503
504 return mtd_is_locked(subdev, ofs, len);
505 }
506
507 return err;
508}
509
510static void concat_sync(struct mtd_info *mtd)
511{
512 struct mtd_concat *concat = CONCAT(mtd);
513 int i;
514
515 for (i = 0; i < concat->num_subdev; i++) {
516 struct mtd_info *subdev = concat->subdev[i];
517 mtd_sync(subdev);
518 }
519}
520
521static int concat_suspend(struct mtd_info *mtd)
522{
523 struct mtd_concat *concat = CONCAT(mtd);
524 int i, rc = 0;
525
526 for (i = 0; i < concat->num_subdev; i++) {
527 struct mtd_info *subdev = concat->subdev[i];
528 if ((rc = mtd_suspend(subdev)) < 0)
529 return rc;
530 }
531 return rc;
532}
533
534static void concat_resume(struct mtd_info *mtd)
535{
536 struct mtd_concat *concat = CONCAT(mtd);
537 int i;
538
539 for (i = 0; i < concat->num_subdev; i++) {
540 struct mtd_info *subdev = concat->subdev[i];
541 mtd_resume(subdev);
542 }
543}
544
545static int concat_block_isbad(struct mtd_info *mtd, loff_t ofs)
546{
547 struct mtd_concat *concat = CONCAT(mtd);
548 int i, res = 0;
549
550 if (!mtd_can_have_bb(concat->subdev[0]))
551 return res;
552
553 for (i = 0; i < concat->num_subdev; i++) {
554 struct mtd_info *subdev = concat->subdev[i];
555
556 if (ofs >= subdev->size) {
557 ofs -= subdev->size;
558 continue;
559 }
560
561 res = mtd_block_isbad(subdev, ofs);
562 break;
563 }
564
565 return res;
566}
567
568static int concat_block_markbad(struct mtd_info *mtd, loff_t ofs)
569{
570 struct mtd_concat *concat = CONCAT(mtd);
571 int i, err = -EINVAL;
572
573 for (i = 0; i < concat->num_subdev; i++) {
574 struct mtd_info *subdev = concat->subdev[i];
575
576 if (ofs >= subdev->size) {
577 ofs -= subdev->size;
578 continue;
579 }
580
581 err = mtd_block_markbad(subdev, ofs);
582 if (!err)
583 mtd->ecc_stats.badblocks++;
584 break;
585 }
586
587 return err;
588}
589
590/*
591 * This function constructs a virtual MTD device by concatenating
592 * num_devs MTD devices. A pointer to the new device object is
593 * stored to *new_dev upon success. This function does _not_
594 * register any devices: this is the caller's responsibility.
595 */
596struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to concatenate */
597 int num_devs, /* number of subdevices */
598 const char *name)
599{ /* name for the new device */
600 int i;
601 size_t size;
602 struct mtd_concat *concat;
603 uint32_t max_erasesize, curr_erasesize;
604 int num_erase_region;
605 int max_writebufsize = 0;
606
607 printk(KERN_NOTICE "Concatenating MTD devices:\n");
608 for (i = 0; i < num_devs; i++)
609 printk(KERN_NOTICE "(%d): \"%s\"\n", i, subdev[i]->name);
610 printk(KERN_NOTICE "into device \"%s\"\n", name);
611
612 /* allocate the device structure */
613 size = SIZEOF_STRUCT_MTD_CONCAT(num_devs);
614 concat = kzalloc(size, GFP_KERNEL);
615 if (!concat) {
616 printk
617 ("memory allocation error while creating concatenated device \"%s\"\n",
618 name);
619 return NULL;
620 }
621 concat->subdev = (struct mtd_info **) (concat + 1);
622
623 /*
624 * Set up the new "super" device's MTD object structure, check for
625 * incompatibilities between the subdevices.
626 */
627 concat->mtd.type = subdev[0]->type;
628 concat->mtd.flags = subdev[0]->flags;
629 concat->mtd.size = subdev[0]->size;
630 concat->mtd.erasesize = subdev[0]->erasesize;
631 concat->mtd.writesize = subdev[0]->writesize;
632
633 for (i = 0; i < num_devs; i++)
634 if (max_writebufsize < subdev[i]->writebufsize)
635 max_writebufsize = subdev[i]->writebufsize;
636 concat->mtd.writebufsize = max_writebufsize;
637
638 concat->mtd.subpage_sft = subdev[0]->subpage_sft;
639 concat->mtd.oobsize = subdev[0]->oobsize;
640 concat->mtd.oobavail = subdev[0]->oobavail;
641 if (subdev[0]->_writev)
642 concat->mtd._writev = concat_writev;
643 if (subdev[0]->_read_oob)
644 concat->mtd._read_oob = concat_read_oob;
645 if (subdev[0]->_write_oob)
646 concat->mtd._write_oob = concat_write_oob;
647 if (subdev[0]->_block_isbad)
648 concat->mtd._block_isbad = concat_block_isbad;
649 if (subdev[0]->_block_markbad)
650 concat->mtd._block_markbad = concat_block_markbad;
651
652 concat->mtd.ecc_stats.badblocks = subdev[0]->ecc_stats.badblocks;
653
654 concat->subdev[0] = subdev[0];
655
656 for (i = 1; i < num_devs; i++) {
657 if (concat->mtd.type != subdev[i]->type) {
658 kfree(concat);
659 printk("Incompatible device type on \"%s\"\n",
660 subdev[i]->name);
661 return NULL;
662 }
663 if (concat->mtd.flags != subdev[i]->flags) {
664 /*
665 * Expect all flags except MTD_WRITEABLE to be
666 * equal on all subdevices.
667 */
668 if ((concat->mtd.flags ^ subdev[i]->
669 flags) & ~MTD_WRITEABLE) {
670 kfree(concat);
671 printk("Incompatible device flags on \"%s\"\n",
672 subdev[i]->name);
673 return NULL;
674 } else
675 /* if writeable attribute differs,
676 make super device writeable */
677 concat->mtd.flags |=
678 subdev[i]->flags & MTD_WRITEABLE;
679 }
680
681 concat->mtd.size += subdev[i]->size;
682 concat->mtd.ecc_stats.badblocks +=
683 subdev[i]->ecc_stats.badblocks;
684 if (concat->mtd.writesize != subdev[i]->writesize ||
685 concat->mtd.subpage_sft != subdev[i]->subpage_sft ||
686 concat->mtd.oobsize != subdev[i]->oobsize ||
687 !concat->mtd._read_oob != !subdev[i]->_read_oob ||
688 !concat->mtd._write_oob != !subdev[i]->_write_oob) {
689 kfree(concat);
690 printk("Incompatible OOB or ECC data on \"%s\"\n",
691 subdev[i]->name);
692 return NULL;
693 }
694 concat->subdev[i] = subdev[i];
695
696 }
697
698 mtd_set_ooblayout(&concat->mtd, subdev[0]->ooblayout);
699
700 concat->num_subdev = num_devs;
701 concat->mtd.name = name;
702
703 concat->mtd._erase = concat_erase;
704 concat->mtd._read = concat_read;
705 concat->mtd._write = concat_write;
706 concat->mtd._sync = concat_sync;
707 concat->mtd._lock = concat_lock;
708 concat->mtd._unlock = concat_unlock;
709 concat->mtd._is_locked = concat_is_locked;
710 concat->mtd._suspend = concat_suspend;
711 concat->mtd._resume = concat_resume;
712
713 /*
714 * Combine the erase block size info of the subdevices:
715 *
716 * first, walk the map of the new device and see how
717 * many changes in erase size we have
718 */
719 max_erasesize = curr_erasesize = subdev[0]->erasesize;
720 num_erase_region = 1;
721 for (i = 0; i < num_devs; i++) {
722 if (subdev[i]->numeraseregions == 0) {
723 /* current subdevice has uniform erase size */
724 if (subdev[i]->erasesize != curr_erasesize) {
725 /* if it differs from the last subdevice's erase size, count it */
726 ++num_erase_region;
727 curr_erasesize = subdev[i]->erasesize;
728 if (curr_erasesize > max_erasesize)
729 max_erasesize = curr_erasesize;
730 }
731 } else {
732 /* current subdevice has variable erase size */
733 int j;
734 for (j = 0; j < subdev[i]->numeraseregions; j++) {
735
736 /* walk the list of erase regions, count any changes */
737 if (subdev[i]->eraseregions[j].erasesize !=
738 curr_erasesize) {
739 ++num_erase_region;
740 curr_erasesize =
741 subdev[i]->eraseregions[j].
742 erasesize;
743 if (curr_erasesize > max_erasesize)
744 max_erasesize = curr_erasesize;
745 }
746 }
747 }
748 }
749
750 if (num_erase_region == 1) {
751 /*
752 * All subdevices have the same uniform erase size.
753 * This is easy:
754 */
755 concat->mtd.erasesize = curr_erasesize;
756 concat->mtd.numeraseregions = 0;
757 } else {
758 uint64_t tmp64;
759
760 /*
761 * erase block size varies across the subdevices: allocate
762 * space to store the data describing the variable erase regions
763 */
764 struct mtd_erase_region_info *erase_region_p;
765 uint64_t begin, position;
766
767 concat->mtd.erasesize = max_erasesize;
768 concat->mtd.numeraseregions = num_erase_region;
769 concat->mtd.eraseregions = erase_region_p =
770 kmalloc_array(num_erase_region,
771 sizeof(struct mtd_erase_region_info),
772 GFP_KERNEL);
773 if (!erase_region_p) {
774 kfree(concat);
775 printk
776 ("memory allocation error while creating erase region list"
777 " for device \"%s\"\n", name);
778 return NULL;
779 }
780
781 /*
782 * walk the map of the new device once more and fill in
783 * in erase region info:
784 */
785 curr_erasesize = subdev[0]->erasesize;
786 begin = position = 0;
787 for (i = 0; i < num_devs; i++) {
788 if (subdev[i]->numeraseregions == 0) {
789 /* current subdevice has uniform erase size */
790 if (subdev[i]->erasesize != curr_erasesize) {
791 /*
792 * fill in an mtd_erase_region_info structure for the area
793 * we have walked so far:
794 */
795 erase_region_p->offset = begin;
796 erase_region_p->erasesize =
797 curr_erasesize;
798 tmp64 = position - begin;
799 do_div(tmp64, curr_erasesize);
800 erase_region_p->numblocks = tmp64;
801 begin = position;
802
803 curr_erasesize = subdev[i]->erasesize;
804 ++erase_region_p;
805 }
806 position += subdev[i]->size;
807 } else {
808 /* current subdevice has variable erase size */
809 int j;
810 for (j = 0; j < subdev[i]->numeraseregions; j++) {
811 /* walk the list of erase regions, count any changes */
812 if (subdev[i]->eraseregions[j].
813 erasesize != curr_erasesize) {
814 erase_region_p->offset = begin;
815 erase_region_p->erasesize =
816 curr_erasesize;
817 tmp64 = position - begin;
818 do_div(tmp64, curr_erasesize);
819 erase_region_p->numblocks = tmp64;
820 begin = position;
821
822 curr_erasesize =
823 subdev[i]->eraseregions[j].
824 erasesize;
825 ++erase_region_p;
826 }
827 position +=
828 subdev[i]->eraseregions[j].
829 numblocks * (uint64_t)curr_erasesize;
830 }
831 }
832 }
833 /* Now write the final entry */
834 erase_region_p->offset = begin;
835 erase_region_p->erasesize = curr_erasesize;
836 tmp64 = position - begin;
837 do_div(tmp64, curr_erasesize);
838 erase_region_p->numblocks = tmp64;
839 }
840
841 return &concat->mtd;
842}
843
844/*
845 * This function destroys an MTD object obtained from concat_mtd_devs()
846 */
847
848void mtd_concat_destroy(struct mtd_info *mtd)
849{
850 struct mtd_concat *concat = CONCAT(mtd);
851 if (concat->mtd.numeraseregions)
852 kfree(concat->mtd.eraseregions);
853 kfree(concat);
854}
855
856EXPORT_SYMBOL(mtd_concat_create);
857EXPORT_SYMBOL(mtd_concat_destroy);
858
859MODULE_LICENSE("GPL");
860MODULE_AUTHOR("Robert Kaiser <rkaiser@sysgo.de>");
861MODULE_DESCRIPTION("Generic support for concatenating of MTD devices");