Linux Audio

Check our new training course

Loading...
v3.1
  1/*
  2 * Simple MTD partitioning layer
  3 *
  4 * Copyright © 2000 Nicolas Pitre <nico@fluxnic.net>
  5 * Copyright © 2002 Thomas Gleixner <gleixner@linutronix.de>
  6 * Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org>
  7 *
  8 * This program is free software; you can redistribute it and/or modify
  9 * it under the terms of the GNU General Public License as published by
 10 * the Free Software Foundation; either version 2 of the License, or
 11 * (at your option) any later version.
 12 *
 13 * This program is distributed in the hope that it will be useful,
 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 16 * GNU General Public License for more details.
 17 *
 18 * You should have received a copy of the GNU General Public License
 19 * along with this program; if not, write to the Free Software
 20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
 21 *
 22 */
 23
 24#include <linux/module.h>
 25#include <linux/types.h>
 26#include <linux/kernel.h>
 27#include <linux/slab.h>
 28#include <linux/list.h>
 29#include <linux/kmod.h>
 30#include <linux/mtd/mtd.h>
 31#include <linux/mtd/partitions.h>
 32#include <linux/err.h>
 
 33
 34#include "mtdcore.h"
 35
 36/* Our partition linked list */
 37static LIST_HEAD(mtd_partitions);
 38static DEFINE_MUTEX(mtd_partitions_mutex);
 39
 40/* Our partition node structure */
 
 
 
 
 
 
 41struct mtd_part {
 42	struct mtd_info mtd;
 43	struct mtd_info *master;
 44	uint64_t offset;
 45	struct list_head list;
 46};
 47
 48/*
 49 * Given a pointer to the MTD object in the mtd_part structure, we can retrieve
 50 * the pointer to that structure with this macro.
 51 */
 52#define PART(x)  ((struct mtd_part *)(x))
 
 
 
 53
 54
 55/*
 56 * MTD methods which simply translate the effective address and pass through
 57 * to the _real_ device.
 58 */
 59
 60static int part_read(struct mtd_info *mtd, loff_t from, size_t len,
 61		size_t *retlen, u_char *buf)
 62{
 63	struct mtd_part *part = PART(mtd);
 64	struct mtd_ecc_stats stats;
 65	int res;
 66
 67	stats = part->master->ecc_stats;
 68
 69	if (from >= mtd->size)
 70		len = 0;
 71	else if (from + len > mtd->size)
 72		len = mtd->size - from;
 73	res = part->master->read(part->master, from + part->offset,
 74				   len, retlen, buf);
 75	if (unlikely(res)) {
 76		if (res == -EUCLEAN)
 77			mtd->ecc_stats.corrected += part->master->ecc_stats.corrected - stats.corrected;
 78		if (res == -EBADMSG)
 79			mtd->ecc_stats.failed += part->master->ecc_stats.failed - stats.failed;
 80	}
 81	return res;
 82}
 83
 84static int part_point(struct mtd_info *mtd, loff_t from, size_t len,
 85		size_t *retlen, void **virt, resource_size_t *phys)
 86{
 87	struct mtd_part *part = PART(mtd);
 88	if (from >= mtd->size)
 89		len = 0;
 90	else if (from + len > mtd->size)
 91		len = mtd->size - from;
 92	return part->master->point (part->master, from + part->offset,
 93				    len, retlen, virt, phys);
 94}
 95
 96static void part_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
 97{
 98	struct mtd_part *part = PART(mtd);
 99
100	part->master->unpoint(part->master, from + part->offset, len);
 
101}
102
103static unsigned long part_get_unmapped_area(struct mtd_info *mtd,
104					    unsigned long len,
105					    unsigned long offset,
106					    unsigned long flags)
107{
108	struct mtd_part *part = PART(mtd);
109
110	offset += part->offset;
111	return part->master->get_unmapped_area(part->master, len, offset,
112					       flags);
113}
114
115static int part_read_oob(struct mtd_info *mtd, loff_t from,
116		struct mtd_oob_ops *ops)
117{
118	struct mtd_part *part = PART(mtd);
 
119	int res;
120
121	if (from >= mtd->size)
122		return -EINVAL;
123	if (ops->datbuf && from + ops->len > mtd->size)
124		return -EINVAL;
125
126	/*
127	 * If OOB is also requested, make sure that we do not read past the end
128	 * of this partition.
129	 */
130	if (ops->oobbuf) {
131		size_t len, pages;
132
133		if (ops->mode == MTD_OOB_AUTO)
134			len = mtd->oobavail;
135		else
136			len = mtd->oobsize;
137		pages = mtd_div_by_ws(mtd->size, mtd);
138		pages -= mtd_div_by_ws(from, mtd);
139		if (ops->ooboffs + ops->ooblen > pages * len)
140			return -EINVAL;
141	}
142
143	res = part->master->read_oob(part->master, from + part->offset, ops);
144	if (unlikely(res)) {
145		if (res == -EUCLEAN)
146			mtd->ecc_stats.corrected++;
147		if (res == -EBADMSG)
148			mtd->ecc_stats.failed++;
149	}
150	return res;
151}
152
153static int part_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
154		size_t len, size_t *retlen, u_char *buf)
155{
156	struct mtd_part *part = PART(mtd);
157	return part->master->read_user_prot_reg(part->master, from,
158					len, retlen, buf);
159}
160
161static int part_get_user_prot_info(struct mtd_info *mtd,
162		struct otp_info *buf, size_t len)
163{
164	struct mtd_part *part = PART(mtd);
165	return part->master->get_user_prot_info(part->master, buf, len);
 
166}
167
168static int part_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
169		size_t len, size_t *retlen, u_char *buf)
170{
171	struct mtd_part *part = PART(mtd);
172	return part->master->read_fact_prot_reg(part->master, from,
173					len, retlen, buf);
174}
175
176static int part_get_fact_prot_info(struct mtd_info *mtd, struct otp_info *buf,
177		size_t len)
178{
179	struct mtd_part *part = PART(mtd);
180	return part->master->get_fact_prot_info(part->master, buf, len);
 
181}
182
183static int part_write(struct mtd_info *mtd, loff_t to, size_t len,
184		size_t *retlen, const u_char *buf)
185{
186	struct mtd_part *part = PART(mtd);
187	if (!(mtd->flags & MTD_WRITEABLE))
188		return -EROFS;
189	if (to >= mtd->size)
190		len = 0;
191	else if (to + len > mtd->size)
192		len = mtd->size - to;
193	return part->master->write(part->master, to + part->offset,
194				    len, retlen, buf);
195}
196
197static int part_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
198		size_t *retlen, const u_char *buf)
199{
200	struct mtd_part *part = PART(mtd);
201	if (!(mtd->flags & MTD_WRITEABLE))
202		return -EROFS;
203	if (to >= mtd->size)
204		len = 0;
205	else if (to + len > mtd->size)
206		len = mtd->size - to;
207	return part->master->panic_write(part->master, to + part->offset,
208				    len, retlen, buf);
209}
210
211static int part_write_oob(struct mtd_info *mtd, loff_t to,
212		struct mtd_oob_ops *ops)
213{
214	struct mtd_part *part = PART(mtd);
215
216	if (!(mtd->flags & MTD_WRITEABLE))
217		return -EROFS;
218
219	if (to >= mtd->size)
220		return -EINVAL;
221	if (ops->datbuf && to + ops->len > mtd->size)
222		return -EINVAL;
223	return part->master->write_oob(part->master, to + part->offset, ops);
224}
225
226static int part_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
227		size_t len, size_t *retlen, u_char *buf)
228{
229	struct mtd_part *part = PART(mtd);
230	return part->master->write_user_prot_reg(part->master, from,
231					len, retlen, buf);
232}
233
234static int part_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
235		size_t len)
236{
237	struct mtd_part *part = PART(mtd);
238	return part->master->lock_user_prot_reg(part->master, from, len);
239}
240
241static int part_writev(struct mtd_info *mtd, const struct kvec *vecs,
242		unsigned long count, loff_t to, size_t *retlen)
243{
244	struct mtd_part *part = PART(mtd);
245	if (!(mtd->flags & MTD_WRITEABLE))
246		return -EROFS;
247	return part->master->writev(part->master, vecs, count,
248					to + part->offset, retlen);
249}
250
251static int part_erase(struct mtd_info *mtd, struct erase_info *instr)
252{
253	struct mtd_part *part = PART(mtd);
254	int ret;
255	if (!(mtd->flags & MTD_WRITEABLE))
256		return -EROFS;
257	if (instr->addr >= mtd->size)
258		return -EINVAL;
259	instr->addr += part->offset;
260	ret = part->master->erase(part->master, instr);
261	if (ret) {
262		if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
263			instr->fail_addr -= part->offset;
264		instr->addr -= part->offset;
265	}
266	return ret;
267}
268
269void mtd_erase_callback(struct erase_info *instr)
270{
271	if (instr->mtd->erase == part_erase) {
272		struct mtd_part *part = PART(instr->mtd);
 
273
274		if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
275			instr->fail_addr -= part->offset;
276		instr->addr -= part->offset;
277	}
278	if (instr->callback)
279		instr->callback(instr);
280}
281EXPORT_SYMBOL_GPL(mtd_erase_callback);
282
283static int part_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
284{
285	struct mtd_part *part = PART(mtd);
286	if ((len + ofs) > mtd->size)
287		return -EINVAL;
288	return part->master->lock(part->master, ofs + part->offset, len);
289}
290
291static int part_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
292{
293	struct mtd_part *part = PART(mtd);
294	if ((len + ofs) > mtd->size)
295		return -EINVAL;
296	return part->master->unlock(part->master, ofs + part->offset, len);
297}
298
299static int part_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
300{
301	struct mtd_part *part = PART(mtd);
302	if ((len + ofs) > mtd->size)
303		return -EINVAL;
304	return part->master->is_locked(part->master, ofs + part->offset, len);
305}
306
307static void part_sync(struct mtd_info *mtd)
308{
309	struct mtd_part *part = PART(mtd);
310	part->master->sync(part->master);
311}
312
313static int part_suspend(struct mtd_info *mtd)
314{
315	struct mtd_part *part = PART(mtd);
316	return part->master->suspend(part->master);
317}
318
319static void part_resume(struct mtd_info *mtd)
320{
321	struct mtd_part *part = PART(mtd);
322	part->master->resume(part->master);
 
 
 
 
 
 
 
323}
324
325static int part_block_isbad(struct mtd_info *mtd, loff_t ofs)
326{
327	struct mtd_part *part = PART(mtd);
328	if (ofs >= mtd->size)
329		return -EINVAL;
330	ofs += part->offset;
331	return part->master->block_isbad(part->master, ofs);
332}
333
334static int part_block_markbad(struct mtd_info *mtd, loff_t ofs)
335{
336	struct mtd_part *part = PART(mtd);
337	int res;
338
339	if (!(mtd->flags & MTD_WRITEABLE))
340		return -EROFS;
341	if (ofs >= mtd->size)
342		return -EINVAL;
343	ofs += part->offset;
344	res = part->master->block_markbad(part->master, ofs);
345	if (!res)
346		mtd->ecc_stats.badblocks++;
347	return res;
348}
349
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
350static inline void free_partition(struct mtd_part *p)
351{
352	kfree(p->mtd.name);
353	kfree(p);
354}
355
356/*
357 * This function unregisters and destroy all slave MTD objects which are
358 * attached to the given master MTD object.
 
 
 
 
 
 
 
359 */
360
361int del_mtd_partitions(struct mtd_info *master)
362{
363	struct mtd_part *slave, *next;
364	int ret, err = 0;
365
366	mutex_lock(&mtd_partitions_mutex);
367	list_for_each_entry_safe(slave, next, &mtd_partitions, list)
368		if (slave->master == master) {
369			ret = del_mtd_device(&slave->mtd);
370			if (ret < 0) {
371				err = ret;
372				continue;
373			}
374			list_del(&slave->list);
375			free_partition(slave);
376		}
377	mutex_unlock(&mtd_partitions_mutex);
378
379	return err;
380}
381
382static struct mtd_part *allocate_partition(struct mtd_info *master,
383			const struct mtd_partition *part, int partno,
384			uint64_t cur_offset)
385{
 
 
386	struct mtd_part *slave;
 
387	char *name;
 
388
389	/* allocate the partition structure */
390	slave = kzalloc(sizeof(*slave), GFP_KERNEL);
391	name = kstrdup(part->name, GFP_KERNEL);
392	if (!name || !slave) {
393		printk(KERN_ERR"memory allocation error while creating partitions for \"%s\"\n",
394		       master->name);
395		kfree(name);
396		kfree(slave);
397		return ERR_PTR(-ENOMEM);
398	}
399
400	/* set up the MTD object for this partition */
401	slave->mtd.type = master->type;
402	slave->mtd.flags = master->flags & ~part->mask_flags;
403	slave->mtd.size = part->size;
404	slave->mtd.writesize = master->writesize;
405	slave->mtd.writebufsize = master->writebufsize;
406	slave->mtd.oobsize = master->oobsize;
407	slave->mtd.oobavail = master->oobavail;
408	slave->mtd.subpage_sft = master->subpage_sft;
 
409
410	slave->mtd.name = name;
411	slave->mtd.owner = master->owner;
412	slave->mtd.backing_dev_info = master->backing_dev_info;
413
414	/* NOTE:  we don't arrange MTDs as a tree; it'd be error-prone
415	 * to have the same data be in two different partitions.
 
 
 
 
 
416	 */
417	slave->mtd.dev.parent = master->dev.parent;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
418
419	slave->mtd.read = part_read;
420	slave->mtd.write = part_write;
421
422	if (master->panic_write)
423		slave->mtd.panic_write = part_panic_write;
424
425	if (master->point && master->unpoint) {
426		slave->mtd.point = part_point;
427		slave->mtd.unpoint = part_unpoint;
428	}
429
430	if (master->get_unmapped_area)
431		slave->mtd.get_unmapped_area = part_get_unmapped_area;
432	if (master->read_oob)
433		slave->mtd.read_oob = part_read_oob;
434	if (master->write_oob)
435		slave->mtd.write_oob = part_write_oob;
436	if (master->read_user_prot_reg)
437		slave->mtd.read_user_prot_reg = part_read_user_prot_reg;
438	if (master->read_fact_prot_reg)
439		slave->mtd.read_fact_prot_reg = part_read_fact_prot_reg;
440	if (master->write_user_prot_reg)
441		slave->mtd.write_user_prot_reg = part_write_user_prot_reg;
442	if (master->lock_user_prot_reg)
443		slave->mtd.lock_user_prot_reg = part_lock_user_prot_reg;
444	if (master->get_user_prot_info)
445		slave->mtd.get_user_prot_info = part_get_user_prot_info;
446	if (master->get_fact_prot_info)
447		slave->mtd.get_fact_prot_info = part_get_fact_prot_info;
448	if (master->sync)
449		slave->mtd.sync = part_sync;
450	if (!partno && !master->dev.class && master->suspend && master->resume) {
451			slave->mtd.suspend = part_suspend;
452			slave->mtd.resume = part_resume;
453	}
454	if (master->writev)
455		slave->mtd.writev = part_writev;
456	if (master->lock)
457		slave->mtd.lock = part_lock;
458	if (master->unlock)
459		slave->mtd.unlock = part_unlock;
460	if (master->is_locked)
461		slave->mtd.is_locked = part_is_locked;
462	if (master->block_isbad)
463		slave->mtd.block_isbad = part_block_isbad;
464	if (master->block_markbad)
465		slave->mtd.block_markbad = part_block_markbad;
466	slave->mtd.erase = part_erase;
467	slave->master = master;
468	slave->offset = part->offset;
469
470	if (slave->offset == MTDPART_OFS_APPEND)
471		slave->offset = cur_offset;
472	if (slave->offset == MTDPART_OFS_NXTBLK) {
 
473		slave->offset = cur_offset;
474		if (mtd_mod_by_eb(cur_offset, master) != 0) {
475			/* Round up to next erasesize */
476			slave->offset = (mtd_div_by_eb(cur_offset, master) + 1) * master->erasesize;
477			printk(KERN_NOTICE "Moving partition %d: "
478			       "0x%012llx -> 0x%012llx\n", partno,
479			       (unsigned long long)cur_offset, (unsigned long long)slave->offset);
480		}
481	}
 
 
 
 
 
 
 
 
 
 
 
 
 
482	if (slave->mtd.size == MTDPART_SIZ_FULL)
483		slave->mtd.size = master->size - slave->offset;
484
485	printk(KERN_NOTICE "0x%012llx-0x%012llx : \"%s\"\n", (unsigned long long)slave->offset,
486		(unsigned long long)(slave->offset + slave->mtd.size), slave->mtd.name);
487
488	/* let's do some sanity checks */
489	if (slave->offset >= master->size) {
490		/* let's register it anyway to preserve ordering */
491		slave->offset = 0;
492		slave->mtd.size = 0;
493		printk(KERN_ERR"mtd: partition \"%s\" is out of reach -- disabled\n",
494			part->name);
495		goto out_register;
496	}
497	if (slave->offset + slave->mtd.size > master->size) {
498		slave->mtd.size = master->size - slave->offset;
499		printk(KERN_WARNING"mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#llx\n",
500			part->name, master->name, (unsigned long long)slave->mtd.size);
501	}
502	if (master->numeraseregions > 1) {
503		/* Deal with variable erase size stuff */
504		int i, max = master->numeraseregions;
505		u64 end = slave->offset + slave->mtd.size;
506		struct mtd_erase_region_info *regions = master->eraseregions;
507
508		/* Find the first erase regions which is part of this
509		 * partition. */
510		for (i = 0; i < max && regions[i].offset <= slave->offset; i++)
511			;
512		/* The loop searched for the region _behind_ the first one */
513		if (i > 0)
514			i--;
515
516		/* Pick biggest erasesize */
517		for (; i < max && regions[i].offset < end; i++) {
518			if (slave->mtd.erasesize < regions[i].erasesize) {
519				slave->mtd.erasesize = regions[i].erasesize;
520			}
521		}
522		BUG_ON(slave->mtd.erasesize == 0);
523	} else {
524		/* Single erase size */
525		slave->mtd.erasesize = master->erasesize;
526	}
527
528	if ((slave->mtd.flags & MTD_WRITEABLE) &&
529	    mtd_mod_by_eb(slave->offset, &slave->mtd)) {
 
 
 
 
 
 
 
 
 
530		/* Doesn't start on a boundary of major erase size */
531		/* FIXME: Let it be writable if it is on a boundary of
532		 * _minor_ erase size though */
533		slave->mtd.flags &= ~MTD_WRITEABLE;
534		printk(KERN_WARNING"mtd: partition \"%s\" doesn't start on an erase block boundary -- force read-only\n",
535			part->name);
536	}
537	if ((slave->mtd.flags & MTD_WRITEABLE) &&
538	    mtd_mod_by_eb(slave->mtd.size, &slave->mtd)) {
 
 
539		slave->mtd.flags &= ~MTD_WRITEABLE;
540		printk(KERN_WARNING"mtd: partition \"%s\" doesn't end on an erase block -- force read-only\n",
541			part->name);
542	}
543
544	slave->mtd.ecclayout = master->ecclayout;
545	if (master->block_isbad) {
 
 
 
 
546		uint64_t offs = 0;
547
548		while (offs < slave->mtd.size) {
549			if (master->block_isbad(master,
550						offs + slave->offset))
 
551				slave->mtd.ecc_stats.badblocks++;
552			offs += slave->mtd.erasesize;
553		}
554	}
555
556out_register:
557	return slave;
558}
559
560int mtd_add_partition(struct mtd_info *master, char *name,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
561		      long long offset, long long length)
562{
563	struct mtd_partition part;
564	struct mtd_part *p, *new;
565	uint64_t start, end;
566	int ret = 0;
567
568	/* the direct offset is expected */
569	if (offset == MTDPART_OFS_APPEND ||
570	    offset == MTDPART_OFS_NXTBLK)
571		return -EINVAL;
572
573	if (length == MTDPART_SIZ_FULL)
574		length = master->size - offset;
575
576	if (length <= 0)
577		return -EINVAL;
578
 
579	part.name = name;
580	part.size = length;
581	part.offset = offset;
582	part.mask_flags = 0;
583	part.ecclayout = NULL;
584
585	new = allocate_partition(master, &part, -1, offset);
586	if (IS_ERR(new))
587		return PTR_ERR(new);
588
589	start = offset;
590	end = offset + length;
591
592	mutex_lock(&mtd_partitions_mutex);
593	list_for_each_entry(p, &mtd_partitions, list)
594		if (p->master == master) {
595			if ((start >= p->offset) &&
596			    (start < (p->offset + p->mtd.size)))
597				goto err_inv;
598
599			if ((end >= p->offset) &&
600			    (end < (p->offset + p->mtd.size)))
601				goto err_inv;
602		}
603
604	list_add(&new->list, &mtd_partitions);
605	mutex_unlock(&mtd_partitions_mutex);
606
607	add_mtd_device(&new->mtd);
608
 
 
609	return ret;
610err_inv:
611	mutex_unlock(&mtd_partitions_mutex);
612	free_partition(new);
613	return -EINVAL;
614}
615EXPORT_SYMBOL_GPL(mtd_add_partition);
616
617int mtd_del_partition(struct mtd_info *master, int partno)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
618{
619	struct mtd_part *slave, *next;
620	int ret = -EINVAL;
621
622	mutex_lock(&mtd_partitions_mutex);
623	list_for_each_entry_safe(slave, next, &mtd_partitions, list)
624		if ((slave->master == master) &&
625		    (slave->mtd.index == partno)) {
626			ret = del_mtd_device(&slave->mtd);
627			if (ret < 0)
628				break;
 
 
629
630			list_del(&slave->list);
631			free_partition(slave);
 
 
 
 
 
 
 
 
 
 
 
632			break;
633		}
634	mutex_unlock(&mtd_partitions_mutex);
635
636	return ret;
637}
638EXPORT_SYMBOL_GPL(mtd_del_partition);
639
640/*
641 * This function, given a master MTD object and a partition table, creates
642 * and registers slave MTD objects which are bound to the master according to
643 * the partition definitions.
644 *
645 * We don't register the master, or expect the caller to have done so,
646 * for reasons of data integrity.
647 */
648
649int add_mtd_partitions(struct mtd_info *master,
650		       const struct mtd_partition *parts,
651		       int nbparts)
652{
653	struct mtd_part *slave;
654	uint64_t cur_offset = 0;
655	int i;
656
657	printk(KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n", nbparts, master->name);
658
659	for (i = 0; i < nbparts; i++) {
660		slave = allocate_partition(master, parts + i, i, cur_offset);
661		if (IS_ERR(slave))
 
662			return PTR_ERR(slave);
 
663
664		mutex_lock(&mtd_partitions_mutex);
665		list_add(&slave->list, &mtd_partitions);
666		mutex_unlock(&mtd_partitions_mutex);
667
668		add_mtd_device(&slave->mtd);
 
 
 
669
670		cur_offset = slave->offset + slave->mtd.size;
671	}
672
673	return 0;
674}
675
676static DEFINE_SPINLOCK(part_parser_lock);
677static LIST_HEAD(part_parsers);
678
679static struct mtd_part_parser *get_partition_parser(const char *name)
680{
681	struct mtd_part_parser *p, *ret = NULL;
682
683	spin_lock(&part_parser_lock);
684
685	list_for_each_entry(p, &part_parsers, list)
686		if (!strcmp(p->name, name) && try_module_get(p->owner)) {
687			ret = p;
688			break;
689		}
690
691	spin_unlock(&part_parser_lock);
692
693	return ret;
694}
695
696int register_mtd_parser(struct mtd_part_parser *p)
697{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
698	spin_lock(&part_parser_lock);
699	list_add(&p->list, &part_parsers);
700	spin_unlock(&part_parser_lock);
701
702	return 0;
703}
704EXPORT_SYMBOL_GPL(register_mtd_parser);
705
706int deregister_mtd_parser(struct mtd_part_parser *p)
707{
708	spin_lock(&part_parser_lock);
709	list_del(&p->list);
710	spin_unlock(&part_parser_lock);
711	return 0;
712}
713EXPORT_SYMBOL_GPL(deregister_mtd_parser);
714
715int parse_mtd_partitions(struct mtd_info *master, const char **types,
716			 struct mtd_partition **pparts, unsigned long origin)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
717{
718	struct mtd_part_parser *parser;
719	int ret = 0;
 
 
 
 
720
721	for ( ; ret <= 0 && *types; types++) {
722		parser = get_partition_parser(*types);
723		if (!parser && !request_module("%s", *types))
724				parser = get_partition_parser(*types);
725		if (!parser)
726			continue;
727		ret = (*parser->parse_fn)(master, pparts, origin);
728		if (ret > 0) {
729			printk(KERN_NOTICE "%d %s partitions found on MTD device %s\n",
730			       ret, parser->name, master->name);
731		}
732		put_partition_parser(parser);
 
 
733	}
734	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
735}
736EXPORT_SYMBOL_GPL(parse_mtd_partitions);
737
738int mtd_is_partition(struct mtd_info *mtd)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
739{
740	struct mtd_part *part;
741	int ispart = 0;
742
743	mutex_lock(&mtd_partitions_mutex);
744	list_for_each_entry(part, &mtd_partitions, list)
745		if (&part->mtd == mtd) {
746			ispart = 1;
747			break;
748		}
749	mutex_unlock(&mtd_partitions_mutex);
750
751	return ispart;
752}
753EXPORT_SYMBOL_GPL(mtd_is_partition);
v4.17
   1/*
   2 * Simple MTD partitioning layer
   3 *
   4 * Copyright © 2000 Nicolas Pitre <nico@fluxnic.net>
   5 * Copyright © 2002 Thomas Gleixner <gleixner@linutronix.de>
   6 * Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org>
   7 *
   8 * This program is free software; you can redistribute it and/or modify
   9 * it under the terms of the GNU General Public License as published by
  10 * the Free Software Foundation; either version 2 of the License, or
  11 * (at your option) any later version.
  12 *
  13 * This program is distributed in the hope that it will be useful,
  14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  16 * GNU General Public License for more details.
  17 *
  18 * You should have received a copy of the GNU General Public License
  19 * along with this program; if not, write to the Free Software
  20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  21 *
  22 */
  23
  24#include <linux/module.h>
  25#include <linux/types.h>
  26#include <linux/kernel.h>
  27#include <linux/slab.h>
  28#include <linux/list.h>
  29#include <linux/kmod.h>
  30#include <linux/mtd/mtd.h>
  31#include <linux/mtd/partitions.h>
  32#include <linux/err.h>
  33#include <linux/of.h>
  34
  35#include "mtdcore.h"
  36
  37/* Our partition linked list */
  38static LIST_HEAD(mtd_partitions);
  39static DEFINE_MUTEX(mtd_partitions_mutex);
  40
  41/**
  42 * struct mtd_part - our partition node structure
  43 *
  44 * @mtd: struct holding partition details
  45 * @parent: parent mtd - flash device or another partition
  46 * @offset: partition offset relative to the *flash device*
  47 */
  48struct mtd_part {
  49	struct mtd_info mtd;
  50	struct mtd_info *parent;
  51	uint64_t offset;
  52	struct list_head list;
  53};
  54
  55/*
  56 * Given a pointer to the MTD object in the mtd_part structure, we can retrieve
  57 * the pointer to that structure.
  58 */
  59static inline struct mtd_part *mtd_to_part(const struct mtd_info *mtd)
  60{
  61	return container_of(mtd, struct mtd_part, mtd);
  62}
  63
  64
  65/*
  66 * MTD methods which simply translate the effective address and pass through
  67 * to the _real_ device.
  68 */
  69
  70static int part_read(struct mtd_info *mtd, loff_t from, size_t len,
  71		size_t *retlen, u_char *buf)
  72{
  73	struct mtd_part *part = mtd_to_part(mtd);
  74	struct mtd_ecc_stats stats;
  75	int res;
  76
  77	stats = part->parent->ecc_stats;
  78	res = part->parent->_read(part->parent, from + part->offset, len,
  79				  retlen, buf);
  80	if (unlikely(mtd_is_eccerr(res)))
  81		mtd->ecc_stats.failed +=
  82			part->parent->ecc_stats.failed - stats.failed;
  83	else
  84		mtd->ecc_stats.corrected +=
  85			part->parent->ecc_stats.corrected - stats.corrected;
 
 
 
 
 
  86	return res;
  87}
  88
  89static int part_point(struct mtd_info *mtd, loff_t from, size_t len,
  90		size_t *retlen, void **virt, resource_size_t *phys)
  91{
  92	struct mtd_part *part = mtd_to_part(mtd);
 
 
 
 
 
 
 
 
 
 
 
  93
  94	return part->parent->_point(part->parent, from + part->offset, len,
  95				    retlen, virt, phys);
  96}
  97
  98static int part_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
 
 
 
  99{
 100	struct mtd_part *part = mtd_to_part(mtd);
 101
 102	return part->parent->_unpoint(part->parent, from + part->offset, len);
 
 
 103}
 104
 105static int part_read_oob(struct mtd_info *mtd, loff_t from,
 106		struct mtd_oob_ops *ops)
 107{
 108	struct mtd_part *part = mtd_to_part(mtd);
 109	struct mtd_ecc_stats stats;
 110	int res;
 111
 112	stats = part->parent->ecc_stats;
 113	res = part->parent->_read_oob(part->parent, from + part->offset, ops);
 114	if (unlikely(mtd_is_eccerr(res)))
 115		mtd->ecc_stats.failed +=
 116			part->parent->ecc_stats.failed - stats.failed;
 117	else
 118		mtd->ecc_stats.corrected +=
 119			part->parent->ecc_stats.corrected - stats.corrected;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 120	return res;
 121}
 122
 123static int part_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
 124		size_t len, size_t *retlen, u_char *buf)
 125{
 126	struct mtd_part *part = mtd_to_part(mtd);
 127	return part->parent->_read_user_prot_reg(part->parent, from, len,
 128						 retlen, buf);
 129}
 130
 131static int part_get_user_prot_info(struct mtd_info *mtd, size_t len,
 132				   size_t *retlen, struct otp_info *buf)
 133{
 134	struct mtd_part *part = mtd_to_part(mtd);
 135	return part->parent->_get_user_prot_info(part->parent, len, retlen,
 136						 buf);
 137}
 138
 139static int part_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
 140		size_t len, size_t *retlen, u_char *buf)
 141{
 142	struct mtd_part *part = mtd_to_part(mtd);
 143	return part->parent->_read_fact_prot_reg(part->parent, from, len,
 144						 retlen, buf);
 145}
 146
 147static int part_get_fact_prot_info(struct mtd_info *mtd, size_t len,
 148				   size_t *retlen, struct otp_info *buf)
 149{
 150	struct mtd_part *part = mtd_to_part(mtd);
 151	return part->parent->_get_fact_prot_info(part->parent, len, retlen,
 152						 buf);
 153}
 154
 155static int part_write(struct mtd_info *mtd, loff_t to, size_t len,
 156		size_t *retlen, const u_char *buf)
 157{
 158	struct mtd_part *part = mtd_to_part(mtd);
 159	return part->parent->_write(part->parent, to + part->offset, len,
 160				    retlen, buf);
 
 
 
 
 
 
 161}
 162
 163static int part_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
 164		size_t *retlen, const u_char *buf)
 165{
 166	struct mtd_part *part = mtd_to_part(mtd);
 167	return part->parent->_panic_write(part->parent, to + part->offset, len,
 168					  retlen, buf);
 
 
 
 
 
 
 169}
 170
 171static int part_write_oob(struct mtd_info *mtd, loff_t to,
 172		struct mtd_oob_ops *ops)
 173{
 174	struct mtd_part *part = mtd_to_part(mtd);
 175
 176	return part->parent->_write_oob(part->parent, to + part->offset, ops);
 
 
 
 
 
 
 
 177}
 178
 179static int part_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
 180		size_t len, size_t *retlen, u_char *buf)
 181{
 182	struct mtd_part *part = mtd_to_part(mtd);
 183	return part->parent->_write_user_prot_reg(part->parent, from, len,
 184						  retlen, buf);
 185}
 186
 187static int part_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
 188		size_t len)
 189{
 190	struct mtd_part *part = mtd_to_part(mtd);
 191	return part->parent->_lock_user_prot_reg(part->parent, from, len);
 192}
 193
 194static int part_writev(struct mtd_info *mtd, const struct kvec *vecs,
 195		unsigned long count, loff_t to, size_t *retlen)
 196{
 197	struct mtd_part *part = mtd_to_part(mtd);
 198	return part->parent->_writev(part->parent, vecs, count,
 199				     to + part->offset, retlen);
 
 
 200}
 201
 202static int part_erase(struct mtd_info *mtd, struct erase_info *instr)
 203{
 204	struct mtd_part *part = mtd_to_part(mtd);
 205	int ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 206
 207	instr->addr += part->offset;
 208	ret = part->parent->_erase(part->parent, instr);
 209	if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
 210		instr->fail_addr -= part->offset;
 211	instr->addr -= part->offset;
 212
 213	return ret;
 
 
 
 
 
 214}
 
 215
 216static int part_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
 217{
 218	struct mtd_part *part = mtd_to_part(mtd);
 219	return part->parent->_lock(part->parent, ofs + part->offset, len);
 
 
 220}
 221
 222static int part_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
 223{
 224	struct mtd_part *part = mtd_to_part(mtd);
 225	return part->parent->_unlock(part->parent, ofs + part->offset, len);
 
 
 226}
 227
 228static int part_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
 229{
 230	struct mtd_part *part = mtd_to_part(mtd);
 231	return part->parent->_is_locked(part->parent, ofs + part->offset, len);
 
 
 232}
 233
 234static void part_sync(struct mtd_info *mtd)
 235{
 236	struct mtd_part *part = mtd_to_part(mtd);
 237	part->parent->_sync(part->parent);
 238}
 239
 240static int part_suspend(struct mtd_info *mtd)
 241{
 242	struct mtd_part *part = mtd_to_part(mtd);
 243	return part->parent->_suspend(part->parent);
 244}
 245
 246static void part_resume(struct mtd_info *mtd)
 247{
 248	struct mtd_part *part = mtd_to_part(mtd);
 249	part->parent->_resume(part->parent);
 250}
 251
 252static int part_block_isreserved(struct mtd_info *mtd, loff_t ofs)
 253{
 254	struct mtd_part *part = mtd_to_part(mtd);
 255	ofs += part->offset;
 256	return part->parent->_block_isreserved(part->parent, ofs);
 257}
 258
 259static int part_block_isbad(struct mtd_info *mtd, loff_t ofs)
 260{
 261	struct mtd_part *part = mtd_to_part(mtd);
 
 
 262	ofs += part->offset;
 263	return part->parent->_block_isbad(part->parent, ofs);
 264}
 265
 266static int part_block_markbad(struct mtd_info *mtd, loff_t ofs)
 267{
 268	struct mtd_part *part = mtd_to_part(mtd);
 269	int res;
 270
 
 
 
 
 271	ofs += part->offset;
 272	res = part->parent->_block_markbad(part->parent, ofs);
 273	if (!res)
 274		mtd->ecc_stats.badblocks++;
 275	return res;
 276}
 277
 278static int part_get_device(struct mtd_info *mtd)
 279{
 280	struct mtd_part *part = mtd_to_part(mtd);
 281	return part->parent->_get_device(part->parent);
 282}
 283
 284static void part_put_device(struct mtd_info *mtd)
 285{
 286	struct mtd_part *part = mtd_to_part(mtd);
 287	part->parent->_put_device(part->parent);
 288}
 289
 290static int part_ooblayout_ecc(struct mtd_info *mtd, int section,
 291			      struct mtd_oob_region *oobregion)
 292{
 293	struct mtd_part *part = mtd_to_part(mtd);
 294
 295	return mtd_ooblayout_ecc(part->parent, section, oobregion);
 296}
 297
 298static int part_ooblayout_free(struct mtd_info *mtd, int section,
 299			       struct mtd_oob_region *oobregion)
 300{
 301	struct mtd_part *part = mtd_to_part(mtd);
 302
 303	return mtd_ooblayout_free(part->parent, section, oobregion);
 304}
 305
 306static const struct mtd_ooblayout_ops part_ooblayout_ops = {
 307	.ecc = part_ooblayout_ecc,
 308	.free = part_ooblayout_free,
 309};
 310
 311static int part_max_bad_blocks(struct mtd_info *mtd, loff_t ofs, size_t len)
 312{
 313	struct mtd_part *part = mtd_to_part(mtd);
 314
 315	return part->parent->_max_bad_blocks(part->parent,
 316					     ofs + part->offset, len);
 317}
 318
 319static inline void free_partition(struct mtd_part *p)
 320{
 321	kfree(p->mtd.name);
 322	kfree(p);
 323}
 324
 325/**
 326 * mtd_parse_part - parse MTD partition looking for subpartitions
 327 *
 328 * @slave: part that is supposed to be a container and should be parsed
 329 * @types: NULL-terminated array with names of partition parsers to try
 330 *
 331 * Some partitions are kind of containers with extra subpartitions (volumes).
 332 * There can be various formats of such containers. This function tries to use
 333 * specified parsers to analyze given partition and registers found
 334 * subpartitions on success.
 335 */
 336static int mtd_parse_part(struct mtd_part *slave, const char *const *types)
 
 337{
 338	struct mtd_partitions parsed;
 339	int err;
 340
 341	err = parse_mtd_partitions(&slave->mtd, types, &parsed, NULL);
 342	if (err)
 343		return err;
 344	else if (!parsed.nr_parts)
 345		return -ENOENT;
 346
 347	err = add_mtd_partitions(&slave->mtd, parsed.parts, parsed.nr_parts);
 348
 349	mtd_part_parser_cleanup(&parsed);
 
 
 
 350
 351	return err;
 352}
 353
 354static struct mtd_part *allocate_partition(struct mtd_info *parent,
 355			const struct mtd_partition *part, int partno,
 356			uint64_t cur_offset)
 357{
 358	int wr_alignment = (parent->flags & MTD_NO_ERASE) ? parent->writesize :
 359							    parent->erasesize;
 360	struct mtd_part *slave;
 361	u32 remainder;
 362	char *name;
 363	u64 tmp;
 364
 365	/* allocate the partition structure */
 366	slave = kzalloc(sizeof(*slave), GFP_KERNEL);
 367	name = kstrdup(part->name, GFP_KERNEL);
 368	if (!name || !slave) {
 369		printk(KERN_ERR"memory allocation error while creating partitions for \"%s\"\n",
 370		       parent->name);
 371		kfree(name);
 372		kfree(slave);
 373		return ERR_PTR(-ENOMEM);
 374	}
 375
 376	/* set up the MTD object for this partition */
 377	slave->mtd.type = parent->type;
 378	slave->mtd.flags = parent->flags & ~part->mask_flags;
 379	slave->mtd.size = part->size;
 380	slave->mtd.writesize = parent->writesize;
 381	slave->mtd.writebufsize = parent->writebufsize;
 382	slave->mtd.oobsize = parent->oobsize;
 383	slave->mtd.oobavail = parent->oobavail;
 384	slave->mtd.subpage_sft = parent->subpage_sft;
 385	slave->mtd.pairing = parent->pairing;
 386
 387	slave->mtd.name = name;
 388	slave->mtd.owner = parent->owner;
 
 389
 390	/* NOTE: Historically, we didn't arrange MTDs as a tree out of
 391	 * concern for showing the same data in multiple partitions.
 392	 * However, it is very useful to have the master node present,
 393	 * so the MTD_PARTITIONED_MASTER option allows that. The master
 394	 * will have device nodes etc only if this is set, so make the
 395	 * parent conditional on that option. Note, this is a way to
 396	 * distinguish between the master and the partition in sysfs.
 397	 */
 398	slave->mtd.dev.parent = IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER) || mtd_is_partition(parent) ?
 399				&parent->dev :
 400				parent->dev.parent;
 401	slave->mtd.dev.of_node = part->of_node;
 402
 403	if (parent->_read)
 404		slave->mtd._read = part_read;
 405	if (parent->_write)
 406		slave->mtd._write = part_write;
 407
 408	if (parent->_panic_write)
 409		slave->mtd._panic_write = part_panic_write;
 410
 411	if (parent->_point && parent->_unpoint) {
 412		slave->mtd._point = part_point;
 413		slave->mtd._unpoint = part_unpoint;
 414	}
 415
 416	if (parent->_read_oob)
 417		slave->mtd._read_oob = part_read_oob;
 418	if (parent->_write_oob)
 419		slave->mtd._write_oob = part_write_oob;
 420	if (parent->_read_user_prot_reg)
 421		slave->mtd._read_user_prot_reg = part_read_user_prot_reg;
 422	if (parent->_read_fact_prot_reg)
 423		slave->mtd._read_fact_prot_reg = part_read_fact_prot_reg;
 424	if (parent->_write_user_prot_reg)
 425		slave->mtd._write_user_prot_reg = part_write_user_prot_reg;
 426	if (parent->_lock_user_prot_reg)
 427		slave->mtd._lock_user_prot_reg = part_lock_user_prot_reg;
 428	if (parent->_get_user_prot_info)
 429		slave->mtd._get_user_prot_info = part_get_user_prot_info;
 430	if (parent->_get_fact_prot_info)
 431		slave->mtd._get_fact_prot_info = part_get_fact_prot_info;
 432	if (parent->_sync)
 433		slave->mtd._sync = part_sync;
 434	if (!partno && !parent->dev.class && parent->_suspend &&
 435	    parent->_resume) {
 436		slave->mtd._suspend = part_suspend;
 437		slave->mtd._resume = part_resume;
 438	}
 439	if (parent->_writev)
 440		slave->mtd._writev = part_writev;
 441	if (parent->_lock)
 442		slave->mtd._lock = part_lock;
 443	if (parent->_unlock)
 444		slave->mtd._unlock = part_unlock;
 445	if (parent->_is_locked)
 446		slave->mtd._is_locked = part_is_locked;
 447	if (parent->_block_isreserved)
 448		slave->mtd._block_isreserved = part_block_isreserved;
 449	if (parent->_block_isbad)
 450		slave->mtd._block_isbad = part_block_isbad;
 451	if (parent->_block_markbad)
 452		slave->mtd._block_markbad = part_block_markbad;
 453	if (parent->_max_bad_blocks)
 454		slave->mtd._max_bad_blocks = part_max_bad_blocks;
 455
 456	if (parent->_get_device)
 457		slave->mtd._get_device = part_get_device;
 458	if (parent->_put_device)
 459		slave->mtd._put_device = part_put_device;
 460
 461	slave->mtd._erase = part_erase;
 462	slave->parent = parent;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 463	slave->offset = part->offset;
 464
 465	if (slave->offset == MTDPART_OFS_APPEND)
 466		slave->offset = cur_offset;
 467	if (slave->offset == MTDPART_OFS_NXTBLK) {
 468		tmp = cur_offset;
 469		slave->offset = cur_offset;
 470		remainder = do_div(tmp, wr_alignment);
 471		if (remainder) {
 472			slave->offset += wr_alignment - remainder;
 473			printk(KERN_NOTICE "Moving partition %d: "
 474			       "0x%012llx -> 0x%012llx\n", partno,
 475			       (unsigned long long)cur_offset, (unsigned long long)slave->offset);
 476		}
 477	}
 478	if (slave->offset == MTDPART_OFS_RETAIN) {
 479		slave->offset = cur_offset;
 480		if (parent->size - slave->offset >= slave->mtd.size) {
 481			slave->mtd.size = parent->size - slave->offset
 482							- slave->mtd.size;
 483		} else {
 484			printk(KERN_ERR "mtd partition \"%s\" doesn't have enough space: %#llx < %#llx, disabled\n",
 485				part->name, parent->size - slave->offset,
 486				slave->mtd.size);
 487			/* register to preserve ordering */
 488			goto out_register;
 489		}
 490	}
 491	if (slave->mtd.size == MTDPART_SIZ_FULL)
 492		slave->mtd.size = parent->size - slave->offset;
 493
 494	printk(KERN_NOTICE "0x%012llx-0x%012llx : \"%s\"\n", (unsigned long long)slave->offset,
 495		(unsigned long long)(slave->offset + slave->mtd.size), slave->mtd.name);
 496
 497	/* let's do some sanity checks */
 498	if (slave->offset >= parent->size) {
 499		/* let's register it anyway to preserve ordering */
 500		slave->offset = 0;
 501		slave->mtd.size = 0;
 502		printk(KERN_ERR"mtd: partition \"%s\" is out of reach -- disabled\n",
 503			part->name);
 504		goto out_register;
 505	}
 506	if (slave->offset + slave->mtd.size > parent->size) {
 507		slave->mtd.size = parent->size - slave->offset;
 508		printk(KERN_WARNING"mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#llx\n",
 509			part->name, parent->name, (unsigned long long)slave->mtd.size);
 510	}
 511	if (parent->numeraseregions > 1) {
 512		/* Deal with variable erase size stuff */
 513		int i, max = parent->numeraseregions;
 514		u64 end = slave->offset + slave->mtd.size;
 515		struct mtd_erase_region_info *regions = parent->eraseregions;
 516
 517		/* Find the first erase regions which is part of this
 518		 * partition. */
 519		for (i = 0; i < max && regions[i].offset <= slave->offset; i++)
 520			;
 521		/* The loop searched for the region _behind_ the first one */
 522		if (i > 0)
 523			i--;
 524
 525		/* Pick biggest erasesize */
 526		for (; i < max && regions[i].offset < end; i++) {
 527			if (slave->mtd.erasesize < regions[i].erasesize) {
 528				slave->mtd.erasesize = regions[i].erasesize;
 529			}
 530		}
 531		BUG_ON(slave->mtd.erasesize == 0);
 532	} else {
 533		/* Single erase size */
 534		slave->mtd.erasesize = parent->erasesize;
 535	}
 536
 537	/*
 538	 * Slave erasesize might differ from the master one if the master
 539	 * exposes several regions with different erasesize. Adjust
 540	 * wr_alignment accordingly.
 541	 */
 542	if (!(slave->mtd.flags & MTD_NO_ERASE))
 543		wr_alignment = slave->mtd.erasesize;
 544
 545	tmp = slave->offset;
 546	remainder = do_div(tmp, wr_alignment);
 547	if ((slave->mtd.flags & MTD_WRITEABLE) && remainder) {
 548		/* Doesn't start on a boundary of major erase size */
 549		/* FIXME: Let it be writable if it is on a boundary of
 550		 * _minor_ erase size though */
 551		slave->mtd.flags &= ~MTD_WRITEABLE;
 552		printk(KERN_WARNING"mtd: partition \"%s\" doesn't start on an erase/write block boundary -- force read-only\n",
 553			part->name);
 554	}
 555
 556	tmp = slave->mtd.size;
 557	remainder = do_div(tmp, wr_alignment);
 558	if ((slave->mtd.flags & MTD_WRITEABLE) && remainder) {
 559		slave->mtd.flags &= ~MTD_WRITEABLE;
 560		printk(KERN_WARNING"mtd: partition \"%s\" doesn't end on an erase/write block -- force read-only\n",
 561			part->name);
 562	}
 563
 564	mtd_set_ooblayout(&slave->mtd, &part_ooblayout_ops);
 565	slave->mtd.ecc_step_size = parent->ecc_step_size;
 566	slave->mtd.ecc_strength = parent->ecc_strength;
 567	slave->mtd.bitflip_threshold = parent->bitflip_threshold;
 568
 569	if (parent->_block_isbad) {
 570		uint64_t offs = 0;
 571
 572		while (offs < slave->mtd.size) {
 573			if (mtd_block_isreserved(parent, offs + slave->offset))
 574				slave->mtd.ecc_stats.bbtblocks++;
 575			else if (mtd_block_isbad(parent, offs + slave->offset))
 576				slave->mtd.ecc_stats.badblocks++;
 577			offs += slave->mtd.erasesize;
 578		}
 579	}
 580
 581out_register:
 582	return slave;
 583}
 584
 585static ssize_t mtd_partition_offset_show(struct device *dev,
 586		struct device_attribute *attr, char *buf)
 587{
 588	struct mtd_info *mtd = dev_get_drvdata(dev);
 589	struct mtd_part *part = mtd_to_part(mtd);
 590	return snprintf(buf, PAGE_SIZE, "%lld\n", part->offset);
 591}
 592
 593static DEVICE_ATTR(offset, S_IRUGO, mtd_partition_offset_show, NULL);
 594
 595static const struct attribute *mtd_partition_attrs[] = {
 596	&dev_attr_offset.attr,
 597	NULL
 598};
 599
 600static int mtd_add_partition_attrs(struct mtd_part *new)
 601{
 602	int ret = sysfs_create_files(&new->mtd.dev.kobj, mtd_partition_attrs);
 603	if (ret)
 604		printk(KERN_WARNING
 605		       "mtd: failed to create partition attrs, err=%d\n", ret);
 606	return ret;
 607}
 608
 609int mtd_add_partition(struct mtd_info *parent, const char *name,
 610		      long long offset, long long length)
 611{
 612	struct mtd_partition part;
 613	struct mtd_part *new;
 
 614	int ret = 0;
 615
 616	/* the direct offset is expected */
 617	if (offset == MTDPART_OFS_APPEND ||
 618	    offset == MTDPART_OFS_NXTBLK)
 619		return -EINVAL;
 620
 621	if (length == MTDPART_SIZ_FULL)
 622		length = parent->size - offset;
 623
 624	if (length <= 0)
 625		return -EINVAL;
 626
 627	memset(&part, 0, sizeof(part));
 628	part.name = name;
 629	part.size = length;
 630	part.offset = offset;
 
 
 631
 632	new = allocate_partition(parent, &part, -1, offset);
 633	if (IS_ERR(new))
 634		return PTR_ERR(new);
 635
 
 
 
 636	mutex_lock(&mtd_partitions_mutex);
 
 
 
 
 
 
 
 
 
 
 
 637	list_add(&new->list, &mtd_partitions);
 638	mutex_unlock(&mtd_partitions_mutex);
 639
 640	add_mtd_device(&new->mtd);
 641
 642	mtd_add_partition_attrs(new);
 643
 644	return ret;
 
 
 
 
 645}
 646EXPORT_SYMBOL_GPL(mtd_add_partition);
 647
 648/**
 649 * __mtd_del_partition - delete MTD partition
 650 *
 651 * @priv: internal MTD struct for partition to be deleted
 652 *
 653 * This function must be called with the partitions mutex locked.
 654 */
 655static int __mtd_del_partition(struct mtd_part *priv)
 656{
 657	struct mtd_part *child, *next;
 658	int err;
 659
 660	list_for_each_entry_safe(child, next, &mtd_partitions, list) {
 661		if (child->parent == &priv->mtd) {
 662			err = __mtd_del_partition(child);
 663			if (err)
 664				return err;
 665		}
 666	}
 667
 668	sysfs_remove_files(&priv->mtd.dev.kobj, mtd_partition_attrs);
 669
 670	err = del_mtd_device(&priv->mtd);
 671	if (err)
 672		return err;
 673
 674	list_del(&priv->list);
 675	free_partition(priv);
 676
 677	return 0;
 678}
 679
 680/*
 681 * This function unregisters and destroy all slave MTD objects which are
 682 * attached to the given MTD object.
 683 */
 684int del_mtd_partitions(struct mtd_info *mtd)
 685{
 686	struct mtd_part *slave, *next;
 687	int ret, err = 0;
 688
 689	mutex_lock(&mtd_partitions_mutex);
 690	list_for_each_entry_safe(slave, next, &mtd_partitions, list)
 691		if (slave->parent == mtd) {
 692			ret = __mtd_del_partition(slave);
 
 693			if (ret < 0)
 694				err = ret;
 695		}
 696	mutex_unlock(&mtd_partitions_mutex);
 697
 698	return err;
 699}
 700
 701int mtd_del_partition(struct mtd_info *mtd, int partno)
 702{
 703	struct mtd_part *slave, *next;
 704	int ret = -EINVAL;
 705
 706	mutex_lock(&mtd_partitions_mutex);
 707	list_for_each_entry_safe(slave, next, &mtd_partitions, list)
 708		if ((slave->parent == mtd) &&
 709		    (slave->mtd.index == partno)) {
 710			ret = __mtd_del_partition(slave);
 711			break;
 712		}
 713	mutex_unlock(&mtd_partitions_mutex);
 714
 715	return ret;
 716}
 717EXPORT_SYMBOL_GPL(mtd_del_partition);
 718
 719/*
 720 * This function, given a master MTD object and a partition table, creates
 721 * and registers slave MTD objects which are bound to the master according to
 722 * the partition definitions.
 723 *
 724 * For historical reasons, this function's caller only registers the master
 725 * if the MTD_PARTITIONED_MASTER config option is set.
 726 */
 727
 728int add_mtd_partitions(struct mtd_info *master,
 729		       const struct mtd_partition *parts,
 730		       int nbparts)
 731{
 732	struct mtd_part *slave;
 733	uint64_t cur_offset = 0;
 734	int i;
 735
 736	printk(KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n", nbparts, master->name);
 737
 738	for (i = 0; i < nbparts; i++) {
 739		slave = allocate_partition(master, parts + i, i, cur_offset);
 740		if (IS_ERR(slave)) {
 741			del_mtd_partitions(master);
 742			return PTR_ERR(slave);
 743		}
 744
 745		mutex_lock(&mtd_partitions_mutex);
 746		list_add(&slave->list, &mtd_partitions);
 747		mutex_unlock(&mtd_partitions_mutex);
 748
 749		add_mtd_device(&slave->mtd);
 750		mtd_add_partition_attrs(slave);
 751		if (parts[i].types)
 752			mtd_parse_part(slave, parts[i].types);
 753
 754		cur_offset = slave->offset + slave->mtd.size;
 755	}
 756
 757	return 0;
 758}
 759
 760static DEFINE_SPINLOCK(part_parser_lock);
 761static LIST_HEAD(part_parsers);
 762
 763static struct mtd_part_parser *mtd_part_parser_get(const char *name)
 764{
 765	struct mtd_part_parser *p, *ret = NULL;
 766
 767	spin_lock(&part_parser_lock);
 768
 769	list_for_each_entry(p, &part_parsers, list)
 770		if (!strcmp(p->name, name) && try_module_get(p->owner)) {
 771			ret = p;
 772			break;
 773		}
 774
 775	spin_unlock(&part_parser_lock);
 776
 777	return ret;
 778}
 779
 780static inline void mtd_part_parser_put(const struct mtd_part_parser *p)
 781{
 782	module_put(p->owner);
 783}
 784
 785/*
 786 * Many partition parsers just expected the core to kfree() all their data in
 787 * one chunk. Do that by default.
 788 */
 789static void mtd_part_parser_cleanup_default(const struct mtd_partition *pparts,
 790					    int nr_parts)
 791{
 792	kfree(pparts);
 793}
 794
 795int __register_mtd_parser(struct mtd_part_parser *p, struct module *owner)
 796{
 797	p->owner = owner;
 798
 799	if (!p->cleanup)
 800		p->cleanup = &mtd_part_parser_cleanup_default;
 801
 802	spin_lock(&part_parser_lock);
 803	list_add(&p->list, &part_parsers);
 804	spin_unlock(&part_parser_lock);
 805
 806	return 0;
 807}
 808EXPORT_SYMBOL_GPL(__register_mtd_parser);
 809
 810void deregister_mtd_parser(struct mtd_part_parser *p)
 811{
 812	spin_lock(&part_parser_lock);
 813	list_del(&p->list);
 814	spin_unlock(&part_parser_lock);
 
 815}
 816EXPORT_SYMBOL_GPL(deregister_mtd_parser);
 817
 818/*
 819 * Do not forget to update 'parse_mtd_partitions()' kerneldoc comment if you
 820 * are changing this array!
 821 */
 822static const char * const default_mtd_part_types[] = {
 823	"cmdlinepart",
 824	"ofpart",
 825	NULL
 826};
 827
 828static int mtd_part_do_parse(struct mtd_part_parser *parser,
 829			     struct mtd_info *master,
 830			     struct mtd_partitions *pparts,
 831			     struct mtd_part_parser_data *data)
 832{
 833	int ret;
 834
 835	ret = (*parser->parse_fn)(master, &pparts->parts, data);
 836	pr_debug("%s: parser %s: %i\n", master->name, parser->name, ret);
 837	if (ret <= 0)
 838		return ret;
 839
 840	pr_notice("%d %s partitions found on MTD device %s\n", ret,
 841		  parser->name, master->name);
 842
 843	pparts->nr_parts = ret;
 844	pparts->parser = parser;
 845
 846	return ret;
 847}
 848
 849/**
 850 * mtd_part_get_compatible_parser - find MTD parser by a compatible string
 851 *
 852 * @compat: compatible string describing partitions in a device tree
 853 *
 854 * MTD parsers can specify supported partitions by providing a table of
 855 * compatibility strings. This function finds a parser that advertises support
 856 * for a passed value of "compatible".
 857 */
 858static struct mtd_part_parser *mtd_part_get_compatible_parser(const char *compat)
 859{
 860	struct mtd_part_parser *p, *ret = NULL;
 861
 862	spin_lock(&part_parser_lock);
 863
 864	list_for_each_entry(p, &part_parsers, list) {
 865		const struct of_device_id *matches;
 866
 867		matches = p->of_match_table;
 868		if (!matches)
 869			continue;
 870
 871		for (; matches->compatible[0]; matches++) {
 872			if (!strcmp(matches->compatible, compat) &&
 873			    try_module_get(p->owner)) {
 874				ret = p;
 875				break;
 876			}
 877		}
 878
 879		if (ret)
 880			break;
 881	}
 882
 883	spin_unlock(&part_parser_lock);
 884
 885	return ret;
 886}
 887
 888static int mtd_part_of_parse(struct mtd_info *master,
 889			     struct mtd_partitions *pparts)
 890{
 891	struct mtd_part_parser *parser;
 892	struct device_node *np;
 893	struct property *prop;
 894	const char *compat;
 895	const char *fixed = "fixed-partitions";
 896	int ret, err = 0;
 897
 898	np = of_get_child_by_name(mtd_get_of_node(master), "partitions");
 899	of_property_for_each_string(np, "compatible", prop, compat) {
 900		parser = mtd_part_get_compatible_parser(compat);
 
 901		if (!parser)
 902			continue;
 903		ret = mtd_part_do_parse(parser, master, pparts, NULL);
 904		if (ret > 0) {
 905			of_node_put(np);
 906			return ret;
 907		}
 908		mtd_part_parser_put(parser);
 909		if (ret < 0 && !err)
 910			err = ret;
 911	}
 912	of_node_put(np);
 913
 914	/*
 915	 * For backward compatibility we have to try the "fixed-partitions"
 916	 * parser. It supports old DT format with partitions specified as a
 917	 * direct subnodes of a flash device DT node without any compatibility
 918	 * specified we could match.
 919	 */
 920	parser = mtd_part_parser_get(fixed);
 921	if (!parser && !request_module("%s", fixed))
 922		parser = mtd_part_parser_get(fixed);
 923	if (parser) {
 924		ret = mtd_part_do_parse(parser, master, pparts, NULL);
 925		if (ret > 0)
 926			return ret;
 927		mtd_part_parser_put(parser);
 928		if (ret < 0 && !err)
 929			err = ret;
 930	}
 931
 932	return err;
 933}
 
 934
 935/**
 936 * parse_mtd_partitions - parse MTD partitions
 937 * @master: the master partition (describes whole MTD device)
 938 * @types: names of partition parsers to try or %NULL
 939 * @pparts: info about partitions found is returned here
 940 * @data: MTD partition parser-specific data
 941 *
 942 * This function tries to find partition on MTD device @master. It uses MTD
 943 * partition parsers, specified in @types. However, if @types is %NULL, then
 944 * the default list of parsers is used. The default list contains only the
 945 * "cmdlinepart" and "ofpart" parsers ATM.
 946 * Note: If there are more then one parser in @types, the kernel only takes the
 947 * partitions parsed out by the first parser.
 948 *
 949 * This function may return:
 950 * o a negative error code in case of failure
 951 * o zero otherwise, and @pparts will describe the partitions, number of
 952 *   partitions, and the parser which parsed them. Caller must release
 953 *   resources with mtd_part_parser_cleanup() when finished with the returned
 954 *   data.
 955 */
 956int parse_mtd_partitions(struct mtd_info *master, const char *const *types,
 957			 struct mtd_partitions *pparts,
 958			 struct mtd_part_parser_data *data)
 959{
 960	struct mtd_part_parser *parser;
 961	int ret, err = 0;
 962
 963	if (!types)
 964		types = default_mtd_part_types;
 965
 966	for ( ; *types; types++) {
 967		/*
 968		 * ofpart is a special type that means OF partitioning info
 969		 * should be used. It requires a bit different logic so it is
 970		 * handled in a separated function.
 971		 */
 972		if (!strcmp(*types, "ofpart")) {
 973			ret = mtd_part_of_parse(master, pparts);
 974		} else {
 975			pr_debug("%s: parsing partitions %s\n", master->name,
 976				 *types);
 977			parser = mtd_part_parser_get(*types);
 978			if (!parser && !request_module("%s", *types))
 979				parser = mtd_part_parser_get(*types);
 980			pr_debug("%s: got parser %s\n", master->name,
 981				parser ? parser->name : NULL);
 982			if (!parser)
 983				continue;
 984			ret = mtd_part_do_parse(parser, master, pparts, data);
 985			if (ret <= 0)
 986				mtd_part_parser_put(parser);
 987		}
 988		/* Found partitions! */
 989		if (ret > 0)
 990			return 0;
 991		/*
 992		 * Stash the first error we see; only report it if no parser
 993		 * succeeds
 994		 */
 995		if (ret < 0 && !err)
 996			err = ret;
 997	}
 998	return err;
 999}
1000
1001void mtd_part_parser_cleanup(struct mtd_partitions *parts)
1002{
1003	const struct mtd_part_parser *parser;
1004
1005	if (!parts)
1006		return;
1007
1008	parser = parts->parser;
1009	if (parser) {
1010		if (parser->cleanup)
1011			parser->cleanup(parts->parts, parts->nr_parts);
1012
1013		mtd_part_parser_put(parser);
1014	}
1015}
1016
1017int mtd_is_partition(const struct mtd_info *mtd)
1018{
1019	struct mtd_part *part;
1020	int ispart = 0;
1021
1022	mutex_lock(&mtd_partitions_mutex);
1023	list_for_each_entry(part, &mtd_partitions, list)
1024		if (&part->mtd == mtd) {
1025			ispart = 1;
1026			break;
1027		}
1028	mutex_unlock(&mtd_partitions_mutex);
1029
1030	return ispart;
1031}
1032EXPORT_SYMBOL_GPL(mtd_is_partition);
1033
1034/* Returns the size of the entire flash chip */
1035uint64_t mtd_get_device_size(const struct mtd_info *mtd)
1036{
1037	if (!mtd_is_partition(mtd))
1038		return mtd->size;
1039
1040	return mtd_get_device_size(mtd_to_part(mtd)->parent);
1041}
1042EXPORT_SYMBOL_GPL(mtd_get_device_size);