Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
   4 */
   5
   6#include <linux/device.h>
   7#include <linux/fs.h>
   8#include <linux/mm.h>
   9#include <linux/err.h>
  10#include <linux/init.h>
  11#include <linux/kernel.h>
  12#include <linux/module.h>
  13#include <linux/slab.h>
  14#include <linux/sched.h>
  15#include <linux/mutex.h>
  16#include <linux/backing-dev.h>
  17#include <linux/compat.h>
  18#include <linux/mount.h>
  19#include <linux/blkpg.h>
  20#include <linux/magic.h>
  21#include <linux/major.h>
  22#include <linux/mtd/mtd.h>
  23#include <linux/mtd/partitions.h>
  24#include <linux/mtd/map.h>
  25
  26#include <linux/uaccess.h>
  27
  28#include "mtdcore.h"
  29
  30/*
  31 * Data structure to hold the pointer to the mtd device as well
  32 * as mode information of various use cases.
  33 */
  34struct mtd_file_info {
  35	struct mtd_info *mtd;
 
  36	enum mtd_file_modes mode;
  37};
  38
  39static loff_t mtdchar_lseek(struct file *file, loff_t offset, int orig)
  40{
  41	struct mtd_file_info *mfi = file->private_data;
  42	return fixed_size_llseek(file, offset, orig, mfi->mtd->size);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  43}
  44
 
 
 
 
  45static int mtdchar_open(struct inode *inode, struct file *file)
  46{
  47	int minor = iminor(inode);
  48	int devnum = minor >> 1;
  49	int ret = 0;
  50	struct mtd_info *mtd;
  51	struct mtd_file_info *mfi;
 
  52
  53	pr_debug("MTD_open\n");
  54
  55	/* You can't open the RO devices RW */
  56	if ((file->f_mode & FMODE_WRITE) && (minor & 1))
  57		return -EACCES;
  58
 
 
 
 
 
  59	mtd = get_mtd_device(NULL, devnum);
  60
  61	if (IS_ERR(mtd))
  62		return PTR_ERR(mtd);
 
 
  63
  64	if (mtd->type == MTD_ABSENT) {
  65		ret = -ENODEV;
  66		goto out1;
  67	}
  68
 
 
 
 
 
 
 
 
 
 
 
 
 
  69	/* You can't open it RW if it's not a writeable device */
  70	if ((file->f_mode & FMODE_WRITE) && !(mtd->flags & MTD_WRITEABLE)) {
  71		ret = -EACCES;
  72		goto out1;
  73	}
  74
  75	mfi = kzalloc(sizeof(*mfi), GFP_KERNEL);
  76	if (!mfi) {
  77		ret = -ENOMEM;
  78		goto out1;
  79	}
 
  80	mfi->mtd = mtd;
  81	file->private_data = mfi;
 
  82	return 0;
  83
 
 
  84out1:
  85	put_mtd_device(mtd);
 
 
 
  86	return ret;
  87} /* mtdchar_open */
  88
  89/*====================================================================*/
  90
  91static int mtdchar_close(struct inode *inode, struct file *file)
  92{
  93	struct mtd_file_info *mfi = file->private_data;
  94	struct mtd_info *mtd = mfi->mtd;
  95
  96	pr_debug("MTD_close\n");
  97
  98	/* Only sync if opened RW */
  99	if ((file->f_mode & FMODE_WRITE))
 100		mtd_sync(mtd);
 101
 
 
 102	put_mtd_device(mtd);
 103	file->private_data = NULL;
 104	kfree(mfi);
 
 105
 106	return 0;
 107} /* mtdchar_close */
 108
 109/* Back in June 2001, dwmw2 wrote:
 110 *
 111 *   FIXME: This _really_ needs to die. In 2.5, we should lock the
 112 *   userspace buffer down and use it directly with readv/writev.
 113 *
 114 * The implementation below, using mtd_kmalloc_up_to, mitigates
 115 * allocation failures when the system is under low-memory situations
 116 * or if memory is highly fragmented at the cost of reducing the
 117 * performance of the requested transfer due to a smaller buffer size.
 118 *
 119 * A more complex but more memory-efficient implementation based on
 120 * get_user_pages and iovecs to cover extents of those pages is a
 121 * longer-term goal, as intimated by dwmw2 above. However, for the
 122 * write case, this requires yet more complex head and tail transfer
 123 * handling when those head and tail offsets and sizes are such that
 124 * alignment requirements are not met in the NAND subdriver.
 125 */
 126
 127static ssize_t mtdchar_read(struct file *file, char __user *buf, size_t count,
 128			loff_t *ppos)
 129{
 130	struct mtd_file_info *mfi = file->private_data;
 131	struct mtd_info *mtd = mfi->mtd;
 132	size_t retlen;
 133	size_t total_retlen=0;
 134	int ret=0;
 135	int len;
 136	size_t size = count;
 137	char *kbuf;
 138
 139	pr_debug("MTD_read\n");
 140
 141	if (*ppos + count > mtd->size) {
 142		if (*ppos < mtd->size)
 143			count = mtd->size - *ppos;
 144		else
 145			count = 0;
 146	}
 147
 148	if (!count)
 149		return 0;
 150
 151	kbuf = mtd_kmalloc_up_to(mtd, &size);
 152	if (!kbuf)
 153		return -ENOMEM;
 154
 155	while (count) {
 156		len = min_t(size_t, count, size);
 157
 158		switch (mfi->mode) {
 159		case MTD_FILE_MODE_OTP_FACTORY:
 160			ret = mtd_read_fact_prot_reg(mtd, *ppos, len,
 161						     &retlen, kbuf);
 162			break;
 163		case MTD_FILE_MODE_OTP_USER:
 164			ret = mtd_read_user_prot_reg(mtd, *ppos, len,
 165						     &retlen, kbuf);
 166			break;
 167		case MTD_FILE_MODE_RAW:
 168		{
 169			struct mtd_oob_ops ops = {};
 170
 171			ops.mode = MTD_OPS_RAW;
 172			ops.datbuf = kbuf;
 173			ops.oobbuf = NULL;
 174			ops.len = len;
 175
 176			ret = mtd_read_oob(mtd, *ppos, &ops);
 177			retlen = ops.retlen;
 178			break;
 179		}
 180		default:
 181			ret = mtd_read(mtd, *ppos, len, &retlen, kbuf);
 182		}
 183		/* Nand returns -EBADMSG on ECC errors, but it returns
 184		 * the data. For our userspace tools it is important
 185		 * to dump areas with ECC errors!
 186		 * For kernel internal usage it also might return -EUCLEAN
 187		 * to signal the caller that a bitflip has occurred and has
 188		 * been corrected by the ECC algorithm.
 189		 * Userspace software which accesses NAND this way
 190		 * must be aware of the fact that it deals with NAND
 191		 */
 192		if (!ret || mtd_is_bitflip_or_eccerr(ret)) {
 193			*ppos += retlen;
 194			if (copy_to_user(buf, kbuf, retlen)) {
 195				kfree(kbuf);
 196				return -EFAULT;
 197			}
 198			else
 199				total_retlen += retlen;
 200
 201			count -= retlen;
 202			buf += retlen;
 203			if (retlen == 0)
 204				count = 0;
 205		}
 206		else {
 207			kfree(kbuf);
 208			return ret;
 209		}
 210
 211	}
 212
 213	kfree(kbuf);
 214	return total_retlen;
 215} /* mtdchar_read */
 216
 217static ssize_t mtdchar_write(struct file *file, const char __user *buf, size_t count,
 218			loff_t *ppos)
 219{
 220	struct mtd_file_info *mfi = file->private_data;
 221	struct mtd_info *mtd = mfi->mtd;
 222	size_t size = count;
 223	char *kbuf;
 224	size_t retlen;
 225	size_t total_retlen=0;
 226	int ret=0;
 227	int len;
 228
 229	pr_debug("MTD_write\n");
 230
 231	if (*ppos >= mtd->size)
 232		return -ENOSPC;
 233
 234	if (*ppos + count > mtd->size)
 235		count = mtd->size - *ppos;
 236
 237	if (!count)
 238		return 0;
 239
 240	kbuf = mtd_kmalloc_up_to(mtd, &size);
 241	if (!kbuf)
 242		return -ENOMEM;
 243
 244	while (count) {
 245		len = min_t(size_t, count, size);
 246
 247		if (copy_from_user(kbuf, buf, len)) {
 248			kfree(kbuf);
 249			return -EFAULT;
 250		}
 251
 252		switch (mfi->mode) {
 253		case MTD_FILE_MODE_OTP_FACTORY:
 254			ret = -EROFS;
 255			break;
 256		case MTD_FILE_MODE_OTP_USER:
 257			ret = mtd_write_user_prot_reg(mtd, *ppos, len,
 258						      &retlen, kbuf);
 259			break;
 260
 261		case MTD_FILE_MODE_RAW:
 262		{
 263			struct mtd_oob_ops ops = {};
 264
 265			ops.mode = MTD_OPS_RAW;
 266			ops.datbuf = kbuf;
 267			ops.oobbuf = NULL;
 268			ops.ooboffs = 0;
 269			ops.len = len;
 270
 271			ret = mtd_write_oob(mtd, *ppos, &ops);
 272			retlen = ops.retlen;
 273			break;
 274		}
 275
 276		default:
 277			ret = mtd_write(mtd, *ppos, len, &retlen, kbuf);
 278		}
 279
 280		/*
 281		 * Return -ENOSPC only if no data could be written at all.
 282		 * Otherwise just return the number of bytes that actually
 283		 * have been written.
 284		 */
 285		if ((ret == -ENOSPC) && (total_retlen))
 286			break;
 287
 288		if (!ret) {
 289			*ppos += retlen;
 290			total_retlen += retlen;
 291			count -= retlen;
 292			buf += retlen;
 293		}
 294		else {
 295			kfree(kbuf);
 296			return ret;
 297		}
 298	}
 299
 300	kfree(kbuf);
 301	return total_retlen;
 302} /* mtdchar_write */
 303
 304/*======================================================================
 305
 306    IOCTL calls for getting device parameters.
 307
 308======================================================================*/
 
 
 
 
 309
 
 310static int otp_select_filemode(struct mtd_file_info *mfi, int mode)
 311{
 312	struct mtd_info *mtd = mfi->mtd;
 313	size_t retlen;
 
 
 
 
 
 
 
 
 314
 315	switch (mode) {
 316	case MTD_OTP_FACTORY:
 317		if (mtd_read_fact_prot_reg(mtd, -1, 0, &retlen, NULL) ==
 318				-EOPNOTSUPP)
 319			return -EOPNOTSUPP;
 320
 321		mfi->mode = MTD_FILE_MODE_OTP_FACTORY;
 322		break;
 323	case MTD_OTP_USER:
 324		if (mtd_read_user_prot_reg(mtd, -1, 0, &retlen, NULL) ==
 325				-EOPNOTSUPP)
 326			return -EOPNOTSUPP;
 327
 328		mfi->mode = MTD_FILE_MODE_OTP_USER;
 329		break;
 
 
 330	case MTD_OTP_OFF:
 331		mfi->mode = MTD_FILE_MODE_NORMAL;
 332		break;
 333	default:
 334		return -EINVAL;
 335	}
 336
 337	return 0;
 338}
 
 
 
 339
 340static int mtdchar_writeoob(struct file *file, struct mtd_info *mtd,
 341	uint64_t start, uint32_t length, void __user *ptr,
 342	uint32_t __user *retp)
 343{
 344	struct mtd_info *master  = mtd_get_master(mtd);
 345	struct mtd_file_info *mfi = file->private_data;
 346	struct mtd_oob_ops ops = {};
 347	uint32_t retlen;
 348	int ret = 0;
 349
 
 
 
 350	if (length > 4096)
 351		return -EINVAL;
 352
 353	if (!master->_write_oob)
 354		return -EOPNOTSUPP;
 
 
 
 
 
 355
 356	ops.ooblen = length;
 357	ops.ooboffs = start & (mtd->writesize - 1);
 358	ops.datbuf = NULL;
 359	ops.mode = (mfi->mode == MTD_FILE_MODE_RAW) ? MTD_OPS_RAW :
 360		MTD_OPS_PLACE_OOB;
 361
 362	if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
 363		return -EINVAL;
 364
 365	ops.oobbuf = memdup_user(ptr, length);
 366	if (IS_ERR(ops.oobbuf))
 367		return PTR_ERR(ops.oobbuf);
 368
 369	start &= ~((uint64_t)mtd->writesize - 1);
 370	ret = mtd_write_oob(mtd, start, &ops);
 371
 372	if (ops.oobretlen > 0xFFFFFFFFU)
 373		ret = -EOVERFLOW;
 374	retlen = ops.oobretlen;
 375	if (copy_to_user(retp, &retlen, sizeof(length)))
 376		ret = -EFAULT;
 377
 378	kfree(ops.oobbuf);
 379	return ret;
 380}
 381
 382static int mtdchar_readoob(struct file *file, struct mtd_info *mtd,
 383	uint64_t start, uint32_t length, void __user *ptr,
 384	uint32_t __user *retp)
 385{
 386	struct mtd_file_info *mfi = file->private_data;
 387	struct mtd_oob_ops ops = {};
 388	int ret = 0;
 389
 390	if (length > 4096)
 391		return -EINVAL;
 392
 
 
 
 393	ops.ooblen = length;
 394	ops.ooboffs = start & (mtd->writesize - 1);
 395	ops.datbuf = NULL;
 396	ops.mode = (mfi->mode == MTD_FILE_MODE_RAW) ? MTD_OPS_RAW :
 397		MTD_OPS_PLACE_OOB;
 398
 399	if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
 400		return -EINVAL;
 401
 402	ops.oobbuf = kmalloc(length, GFP_KERNEL);
 403	if (!ops.oobbuf)
 404		return -ENOMEM;
 405
 406	start &= ~((uint64_t)mtd->writesize - 1);
 407	ret = mtd_read_oob(mtd, start, &ops);
 408
 409	if (put_user(ops.oobretlen, retp))
 410		ret = -EFAULT;
 411	else if (ops.oobretlen && copy_to_user(ptr, ops.oobbuf,
 412					    ops.oobretlen))
 413		ret = -EFAULT;
 414
 415	kfree(ops.oobbuf);
 416
 417	/*
 418	 * NAND returns -EBADMSG on ECC errors, but it returns the OOB
 419	 * data. For our userspace tools it is important to dump areas
 420	 * with ECC errors!
 421	 * For kernel internal usage it also might return -EUCLEAN
 422	 * to signal the caller that a bitflip has occurred and has
 423	 * been corrected by the ECC algorithm.
 424	 *
 425	 * Note: currently the standard NAND function, nand_read_oob_std,
 426	 * does not calculate ECC for the OOB area, so do not rely on
 427	 * this behavior unless you have replaced it with your own.
 428	 */
 429	if (mtd_is_bitflip_or_eccerr(ret))
 430		return 0;
 431
 432	return ret;
 433}
 434
 435/*
 436 * Copies (and truncates, if necessary) OOB layout information to the
 437 * deprecated layout struct, nand_ecclayout_user. This is necessary only to
 438 * support the deprecated API ioctl ECCGETLAYOUT while allowing all new
 439 * functionality to use mtd_ooblayout_ops flexibly (i.e. mtd_ooblayout_ops
 440 * can describe any kind of OOB layout with almost zero overhead from a
 441 * memory usage point of view).
 442 */
 443static int shrink_ecclayout(struct mtd_info *mtd,
 444			    struct nand_ecclayout_user *to)
 445{
 446	struct mtd_oob_region oobregion;
 447	int i, section = 0, ret;
 448
 449	if (!mtd || !to)
 450		return -EINVAL;
 451
 452	memset(to, 0, sizeof(*to));
 453
 454	to->eccbytes = 0;
 455	for (i = 0; i < MTD_MAX_ECCPOS_ENTRIES;) {
 456		u32 eccpos;
 457
 458		ret = mtd_ooblayout_ecc(mtd, section++, &oobregion);
 459		if (ret < 0) {
 460			if (ret != -ERANGE)
 461				return ret;
 462
 463			break;
 464		}
 465
 466		eccpos = oobregion.offset;
 467		for (; i < MTD_MAX_ECCPOS_ENTRIES &&
 468		       eccpos < oobregion.offset + oobregion.length; i++) {
 469			to->eccpos[i] = eccpos++;
 470			to->eccbytes++;
 471		}
 472	}
 473
 474	for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES; i++) {
 475		ret = mtd_ooblayout_free(mtd, i, &oobregion);
 476		if (ret < 0) {
 477			if (ret != -ERANGE)
 478				return ret;
 479
 480			break;
 481		}
 482
 483		to->oobfree[i].offset = oobregion.offset;
 484		to->oobfree[i].length = oobregion.length;
 485		to->oobavail += to->oobfree[i].length;
 486	}
 487
 488	return 0;
 489}
 490
 491static int get_oobinfo(struct mtd_info *mtd, struct nand_oobinfo *to)
 492{
 493	struct mtd_oob_region oobregion;
 494	int i, section = 0, ret;
 495
 496	if (!mtd || !to)
 497		return -EINVAL;
 498
 499	memset(to, 0, sizeof(*to));
 500
 501	to->eccbytes = 0;
 502	for (i = 0; i < ARRAY_SIZE(to->eccpos);) {
 503		u32 eccpos;
 504
 505		ret = mtd_ooblayout_ecc(mtd, section++, &oobregion);
 506		if (ret < 0) {
 507			if (ret != -ERANGE)
 508				return ret;
 509
 510			break;
 511		}
 512
 513		if (oobregion.length + i > ARRAY_SIZE(to->eccpos))
 514			return -EINVAL;
 515
 516		eccpos = oobregion.offset;
 517		for (; eccpos < oobregion.offset + oobregion.length; i++) {
 518			to->eccpos[i] = eccpos++;
 519			to->eccbytes++;
 520		}
 521	}
 522
 523	for (i = 0; i < 8; i++) {
 524		ret = mtd_ooblayout_free(mtd, i, &oobregion);
 525		if (ret < 0) {
 526			if (ret != -ERANGE)
 527				return ret;
 528
 529			break;
 530		}
 531
 532		to->oobfree[i][0] = oobregion.offset;
 533		to->oobfree[i][1] = oobregion.length;
 534	}
 535
 536	to->useecc = MTD_NANDECC_AUTOPLACE;
 537
 538	return 0;
 539}
 540
 541static int mtdchar_blkpg_ioctl(struct mtd_info *mtd,
 542			       struct blkpg_ioctl_arg *arg)
 543{
 
 544	struct blkpg_partition p;
 545
 546	if (!capable(CAP_SYS_ADMIN))
 547		return -EPERM;
 548
 549	if (copy_from_user(&p, arg->data, sizeof(p)))
 550		return -EFAULT;
 551
 552	switch (arg->op) {
 
 
 
 553	case BLKPG_ADD_PARTITION:
 554
 555		/* Only master mtd device must be used to add partitions */
 556		if (mtd_is_partition(mtd))
 557			return -EINVAL;
 558
 559		/* Sanitize user input */
 560		p.devname[BLKPG_DEVNAMELTH - 1] = '\0';
 561
 562		return mtd_add_partition(mtd, p.devname, p.start, p.length);
 563
 564	case BLKPG_DEL_PARTITION:
 565
 566		if (p.pno < 0)
 567			return -EINVAL;
 568
 569		return mtd_del_partition(mtd, p.pno);
 570
 571	default:
 572		return -EINVAL;
 573	}
 574}
 575
 576static void adjust_oob_length(struct mtd_info *mtd, uint64_t start,
 577			      struct mtd_oob_ops *ops)
 578{
 579	uint32_t start_page, end_page;
 580	u32 oob_per_page;
 581
 582	if (ops->len == 0 || ops->ooblen == 0)
 583		return;
 584
 585	start_page = mtd_div_by_ws(start, mtd);
 586	end_page = mtd_div_by_ws(start + ops->len - 1, mtd);
 587	oob_per_page = mtd_oobavail(mtd, ops);
 588
 589	ops->ooblen = min_t(size_t, ops->ooblen,
 590			    (end_page - start_page + 1) * oob_per_page);
 591}
 592
 593static noinline_for_stack int
 594mtdchar_write_ioctl(struct mtd_info *mtd, struct mtd_write_req __user *argp)
 595{
 596	struct mtd_info *master = mtd_get_master(mtd);
 597	struct mtd_write_req req;
 598	const void __user *usr_data, *usr_oob;
 599	uint8_t *datbuf = NULL, *oobbuf = NULL;
 600	size_t datbuf_len, oobbuf_len;
 601	int ret = 0;
 602
 603	if (copy_from_user(&req, argp, sizeof(req)))
 604		return -EFAULT;
 605
 606	usr_data = (const void __user *)(uintptr_t)req.usr_data;
 607	usr_oob = (const void __user *)(uintptr_t)req.usr_oob;
 608
 609	if (!master->_write_oob)
 610		return -EOPNOTSUPP;
 611
 612	if (!usr_data)
 613		req.len = 0;
 614
 615	if (!usr_oob)
 616		req.ooblen = 0;
 617
 618	req.len &= 0xffffffff;
 619	req.ooblen &= 0xffffffff;
 620
 621	if (req.start + req.len > mtd->size)
 622		return -EINVAL;
 623
 624	datbuf_len = min_t(size_t, req.len, mtd->erasesize);
 625	if (datbuf_len > 0) {
 626		datbuf = kvmalloc(datbuf_len, GFP_KERNEL);
 627		if (!datbuf)
 628			return -ENOMEM;
 629	}
 630
 631	oobbuf_len = min_t(size_t, req.ooblen, mtd->erasesize);
 632	if (oobbuf_len > 0) {
 633		oobbuf = kvmalloc(oobbuf_len, GFP_KERNEL);
 634		if (!oobbuf) {
 635			kvfree(datbuf);
 636			return -ENOMEM;
 637		}
 638	}
 639
 640	while (req.len > 0 || (!usr_data && req.ooblen > 0)) {
 641		struct mtd_oob_ops ops = {
 642			.mode = req.mode,
 643			.len = min_t(size_t, req.len, datbuf_len),
 644			.ooblen = min_t(size_t, req.ooblen, oobbuf_len),
 645			.datbuf = datbuf,
 646			.oobbuf = oobbuf,
 647		};
 648
 649		/*
 650		 * Shorten non-page-aligned, eraseblock-sized writes so that
 651		 * the write ends on an eraseblock boundary.  This is necessary
 652		 * for adjust_oob_length() to properly handle non-page-aligned
 653		 * writes.
 654		 */
 655		if (ops.len == mtd->erasesize)
 656			ops.len -= mtd_mod_by_ws(req.start + ops.len, mtd);
 657
 658		/*
 659		 * For writes which are not OOB-only, adjust the amount of OOB
 660		 * data written according to the number of data pages written.
 661		 * This is necessary to prevent OOB data from being skipped
 662		 * over in data+OOB writes requiring multiple mtd_write_oob()
 663		 * calls to be completed.
 664		 */
 665		adjust_oob_length(mtd, req.start, &ops);
 666
 667		if (copy_from_user(datbuf, usr_data, ops.len) ||
 668		    copy_from_user(oobbuf, usr_oob, ops.ooblen)) {
 669			ret = -EFAULT;
 670			break;
 671		}
 672
 673		ret = mtd_write_oob(mtd, req.start, &ops);
 674		if (ret)
 675			break;
 676
 677		req.start += ops.retlen;
 678		req.len -= ops.retlen;
 679		usr_data += ops.retlen;
 680
 681		req.ooblen -= ops.oobretlen;
 682		usr_oob += ops.oobretlen;
 683	}
 684
 685	kvfree(datbuf);
 686	kvfree(oobbuf);
 687
 688	return ret;
 689}
 690
 691static noinline_for_stack int
 692mtdchar_read_ioctl(struct mtd_info *mtd, struct mtd_read_req __user *argp)
 693{
 694	struct mtd_info *master = mtd_get_master(mtd);
 695	struct mtd_read_req req;
 696	void __user *usr_data, *usr_oob;
 697	uint8_t *datbuf = NULL, *oobbuf = NULL;
 698	size_t datbuf_len, oobbuf_len;
 699	size_t orig_len, orig_ooblen;
 700	int ret = 0;
 701
 702	if (copy_from_user(&req, argp, sizeof(req)))
 
 
 703		return -EFAULT;
 
 
 704
 705	orig_len = req.len;
 706	orig_ooblen = req.ooblen;
 
 
 707
 708	usr_data = (void __user *)(uintptr_t)req.usr_data;
 709	usr_oob = (void __user *)(uintptr_t)req.usr_oob;
 710
 711	if (!master->_read_oob)
 712		return -EOPNOTSUPP;
 713
 714	if (!usr_data)
 715		req.len = 0;
 716
 717	if (!usr_oob)
 718		req.ooblen = 0;
 719
 720	req.ecc_stats.uncorrectable_errors = 0;
 721	req.ecc_stats.corrected_bitflips = 0;
 722	req.ecc_stats.max_bitflips = 0;
 723
 724	req.len &= 0xffffffff;
 725	req.ooblen &= 0xffffffff;
 726
 727	if (req.start + req.len > mtd->size) {
 728		ret = -EINVAL;
 729		goto out;
 730	}
 731
 732	datbuf_len = min_t(size_t, req.len, mtd->erasesize);
 733	if (datbuf_len > 0) {
 734		datbuf = kvmalloc(datbuf_len, GFP_KERNEL);
 735		if (!datbuf) {
 736			ret = -ENOMEM;
 737			goto out;
 738		}
 
 
 739	}
 740
 741	oobbuf_len = min_t(size_t, req.ooblen, mtd->erasesize);
 742	if (oobbuf_len > 0) {
 743		oobbuf = kvmalloc(oobbuf_len, GFP_KERNEL);
 744		if (!oobbuf) {
 745			ret = -ENOMEM;
 746			goto out;
 747		}
 748	}
 749
 750	while (req.len > 0 || (!usr_data && req.ooblen > 0)) {
 751		struct mtd_req_stats stats;
 752		struct mtd_oob_ops ops = {
 753			.mode = req.mode,
 754			.len = min_t(size_t, req.len, datbuf_len),
 755			.ooblen = min_t(size_t, req.ooblen, oobbuf_len),
 756			.datbuf = datbuf,
 757			.oobbuf = oobbuf,
 758			.stats = &stats,
 759		};
 760
 761		/*
 762		 * Shorten non-page-aligned, eraseblock-sized reads so that the
 763		 * read ends on an eraseblock boundary.  This is necessary in
 764		 * order to prevent OOB data for some pages from being
 765		 * duplicated in the output of non-page-aligned reads requiring
 766		 * multiple mtd_read_oob() calls to be completed.
 767		 */
 768		if (ops.len == mtd->erasesize)
 769			ops.len -= mtd_mod_by_ws(req.start + ops.len, mtd);
 770
 771		ret = mtd_read_oob(mtd, (loff_t)req.start, &ops);
 772
 773		req.ecc_stats.uncorrectable_errors +=
 774			stats.uncorrectable_errors;
 775		req.ecc_stats.corrected_bitflips += stats.corrected_bitflips;
 776		req.ecc_stats.max_bitflips =
 777			max(req.ecc_stats.max_bitflips, stats.max_bitflips);
 778
 779		if (ret && !mtd_is_bitflip_or_eccerr(ret))
 780			break;
 781
 782		if (copy_to_user(usr_data, ops.datbuf, ops.retlen) ||
 783		    copy_to_user(usr_oob, ops.oobbuf, ops.oobretlen)) {
 784			ret = -EFAULT;
 785			break;
 786		}
 787
 788		req.start += ops.retlen;
 789		req.len -= ops.retlen;
 790		usr_data += ops.retlen;
 791
 792		req.ooblen -= ops.oobretlen;
 793		usr_oob += ops.oobretlen;
 794	}
 795
 796	/*
 797	 * As multiple iterations of the above loop (and therefore multiple
 798	 * mtd_read_oob() calls) may be necessary to complete the read request,
 799	 * adjust the final return code to ensure it accounts for all detected
 800	 * ECC errors.
 801	 */
 802	if (!ret || mtd_is_bitflip(ret)) {
 803		if (req.ecc_stats.uncorrectable_errors > 0)
 804			ret = -EBADMSG;
 805		else if (req.ecc_stats.corrected_bitflips > 0)
 806			ret = -EUCLEAN;
 807	}
 808
 809out:
 810	req.len = orig_len - req.len;
 811	req.ooblen = orig_ooblen - req.ooblen;
 812
 813	if (copy_to_user(argp, &req, sizeof(req)))
 814		ret = -EFAULT;
 815
 816	kvfree(datbuf);
 817	kvfree(oobbuf);
 818
 819	return ret;
 820}
 821
 822static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg)
 823{
 824	struct mtd_file_info *mfi = file->private_data;
 825	struct mtd_info *mtd = mfi->mtd;
 826	struct mtd_info *master = mtd_get_master(mtd);
 827	void __user *argp = (void __user *)arg;
 828	int ret = 0;
 
 829	struct mtd_info_user info;
 830
 831	pr_debug("MTD_ioctl\n");
 832
 833	/*
 834	 * Check the file mode to require "dangerous" commands to have write
 835	 * permissions.
 836	 */
 837	switch (cmd) {
 838	/* "safe" commands */
 839	case MEMGETREGIONCOUNT:
 840	case MEMGETREGIONINFO:
 841	case MEMGETINFO:
 842	case MEMREADOOB:
 843	case MEMREADOOB64:
 844	case MEMREAD:
 845	case MEMISLOCKED:
 846	case MEMGETOOBSEL:
 847	case MEMGETBADBLOCK:
 848	case OTPSELECT:
 849	case OTPGETREGIONCOUNT:
 850	case OTPGETREGIONINFO:
 851	case ECCGETLAYOUT:
 852	case ECCGETSTATS:
 853	case MTDFILEMODE:
 854	case BLKPG:
 855	case BLKRRPART:
 856		break;
 857
 858	/* "dangerous" commands */
 859	case MEMERASE:
 860	case MEMERASE64:
 861	case MEMLOCK:
 862	case MEMUNLOCK:
 863	case MEMSETBADBLOCK:
 864	case MEMWRITEOOB:
 865	case MEMWRITEOOB64:
 866	case MEMWRITE:
 867	case OTPLOCK:
 868	case OTPERASE:
 869		if (!(file->f_mode & FMODE_WRITE))
 870			return -EPERM;
 871		break;
 872
 873	default:
 874		return -ENOTTY;
 875	}
 876
 877	switch (cmd) {
 878	case MEMGETREGIONCOUNT:
 879		if (copy_to_user(argp, &(mtd->numeraseregions), sizeof(int)))
 880			return -EFAULT;
 881		break;
 882
 883	case MEMGETREGIONINFO:
 884	{
 885		uint32_t ur_idx;
 886		struct mtd_erase_region_info *kr;
 887		struct region_info_user __user *ur = argp;
 888
 889		if (get_user(ur_idx, &(ur->regionindex)))
 890			return -EFAULT;
 891
 892		if (ur_idx >= mtd->numeraseregions)
 893			return -EINVAL;
 894
 895		kr = &(mtd->eraseregions[ur_idx]);
 896
 897		if (put_user(kr->offset, &(ur->offset))
 898		    || put_user(kr->erasesize, &(ur->erasesize))
 899		    || put_user(kr->numblocks, &(ur->numblocks)))
 900			return -EFAULT;
 901
 902		break;
 903	}
 904
 905	case MEMGETINFO:
 906		memset(&info, 0, sizeof(info));
 907		info.type	= mtd->type;
 908		info.flags	= mtd->flags;
 909		info.size	= mtd->size;
 910		info.erasesize	= mtd->erasesize;
 911		info.writesize	= mtd->writesize;
 912		info.oobsize	= mtd->oobsize;
 913		/* The below field is obsolete */
 914		info.padding	= 0;
 915		if (copy_to_user(argp, &info, sizeof(struct mtd_info_user)))
 916			return -EFAULT;
 917		break;
 918
 919	case MEMERASE:
 920	case MEMERASE64:
 921	{
 922		struct erase_info *erase;
 923
 
 
 
 924		erase=kzalloc(sizeof(struct erase_info),GFP_KERNEL);
 925		if (!erase)
 926			ret = -ENOMEM;
 927		else {
 
 
 
 
 
 928			if (cmd == MEMERASE64) {
 929				struct erase_info_user64 einfo64;
 930
 931				if (copy_from_user(&einfo64, argp,
 932					    sizeof(struct erase_info_user64))) {
 933					kfree(erase);
 934					return -EFAULT;
 935				}
 936				erase->addr = einfo64.start;
 937				erase->len = einfo64.length;
 938			} else {
 939				struct erase_info_user einfo32;
 940
 941				if (copy_from_user(&einfo32, argp,
 942					    sizeof(struct erase_info_user))) {
 943					kfree(erase);
 944					return -EFAULT;
 945				}
 946				erase->addr = einfo32.start;
 947				erase->len = einfo32.length;
 948			}
 949
 
 
 
 
 
 
 
 
 
 
 
 
 950			ret = mtd_erase(mtd, erase);
 
 
 
 
 
 
 
 
 
 
 
 951			kfree(erase);
 952		}
 953		break;
 954	}
 955
 956	case MEMWRITEOOB:
 957	{
 958		struct mtd_oob_buf buf;
 959		struct mtd_oob_buf __user *buf_user = argp;
 960
 961		/* NOTE: writes return length to buf_user->length */
 962		if (copy_from_user(&buf, argp, sizeof(buf)))
 963			ret = -EFAULT;
 964		else
 965			ret = mtdchar_writeoob(file, mtd, buf.start, buf.length,
 966				buf.ptr, &buf_user->length);
 967		break;
 968	}
 969
 970	case MEMREADOOB:
 971	{
 972		struct mtd_oob_buf buf;
 973		struct mtd_oob_buf __user *buf_user = argp;
 974
 975		/* NOTE: writes return length to buf_user->start */
 976		if (copy_from_user(&buf, argp, sizeof(buf)))
 977			ret = -EFAULT;
 978		else
 979			ret = mtdchar_readoob(file, mtd, buf.start, buf.length,
 980				buf.ptr, &buf_user->start);
 981		break;
 982	}
 983
 984	case MEMWRITEOOB64:
 985	{
 986		struct mtd_oob_buf64 buf;
 987		struct mtd_oob_buf64 __user *buf_user = argp;
 988
 989		if (copy_from_user(&buf, argp, sizeof(buf)))
 990			ret = -EFAULT;
 991		else
 992			ret = mtdchar_writeoob(file, mtd, buf.start, buf.length,
 993				(void __user *)(uintptr_t)buf.usr_ptr,
 994				&buf_user->length);
 995		break;
 996	}
 997
 998	case MEMREADOOB64:
 999	{
1000		struct mtd_oob_buf64 buf;
1001		struct mtd_oob_buf64 __user *buf_user = argp;
1002
1003		if (copy_from_user(&buf, argp, sizeof(buf)))
1004			ret = -EFAULT;
1005		else
1006			ret = mtdchar_readoob(file, mtd, buf.start, buf.length,
1007				(void __user *)(uintptr_t)buf.usr_ptr,
1008				&buf_user->length);
1009		break;
1010	}
1011
1012	case MEMWRITE:
1013	{
1014		ret = mtdchar_write_ioctl(mtd,
1015		      (struct mtd_write_req __user *)arg);
1016		break;
1017	}
1018
1019	case MEMREAD:
1020	{
1021		ret = mtdchar_read_ioctl(mtd,
1022		      (struct mtd_read_req __user *)arg);
1023		break;
1024	}
1025
1026	case MEMLOCK:
1027	{
1028		struct erase_info_user einfo;
1029
1030		if (copy_from_user(&einfo, argp, sizeof(einfo)))
1031			return -EFAULT;
1032
1033		ret = mtd_lock(mtd, einfo.start, einfo.length);
1034		break;
1035	}
1036
1037	case MEMUNLOCK:
1038	{
1039		struct erase_info_user einfo;
1040
1041		if (copy_from_user(&einfo, argp, sizeof(einfo)))
1042			return -EFAULT;
1043
1044		ret = mtd_unlock(mtd, einfo.start, einfo.length);
1045		break;
1046	}
1047
1048	case MEMISLOCKED:
1049	{
1050		struct erase_info_user einfo;
1051
1052		if (copy_from_user(&einfo, argp, sizeof(einfo)))
1053			return -EFAULT;
1054
1055		ret = mtd_is_locked(mtd, einfo.start, einfo.length);
1056		break;
1057	}
1058
1059	/* Legacy interface */
1060	case MEMGETOOBSEL:
1061	{
1062		struct nand_oobinfo oi;
1063
1064		if (!master->ooblayout)
1065			return -EOPNOTSUPP;
 
 
1066
1067		ret = get_oobinfo(mtd, &oi);
1068		if (ret)
1069			return ret;
 
 
1070
1071		if (copy_to_user(argp, &oi, sizeof(struct nand_oobinfo)))
1072			return -EFAULT;
1073		break;
1074	}
1075
1076	case MEMGETBADBLOCK:
1077	{
1078		loff_t offs;
1079
1080		if (copy_from_user(&offs, argp, sizeof(loff_t)))
1081			return -EFAULT;
1082		return mtd_block_isbad(mtd, offs);
 
1083	}
1084
1085	case MEMSETBADBLOCK:
1086	{
1087		loff_t offs;
1088
1089		if (copy_from_user(&offs, argp, sizeof(loff_t)))
1090			return -EFAULT;
1091		return mtd_block_markbad(mtd, offs);
 
1092	}
1093
 
1094	case OTPSELECT:
1095	{
1096		int mode;
1097		if (copy_from_user(&mode, argp, sizeof(int)))
1098			return -EFAULT;
1099
1100		mfi->mode = MTD_FILE_MODE_NORMAL;
1101
1102		ret = otp_select_filemode(mfi, mode);
1103
1104		file->f_pos = 0;
1105		break;
1106	}
1107
1108	case OTPGETREGIONCOUNT:
1109	case OTPGETREGIONINFO:
1110	{
1111		struct otp_info *buf = kmalloc(4096, GFP_KERNEL);
1112		size_t retlen;
1113		if (!buf)
1114			return -ENOMEM;
1115		switch (mfi->mode) {
1116		case MTD_FILE_MODE_OTP_FACTORY:
1117			ret = mtd_get_fact_prot_info(mtd, 4096, &retlen, buf);
1118			break;
1119		case MTD_FILE_MODE_OTP_USER:
1120			ret = mtd_get_user_prot_info(mtd, 4096, &retlen, buf);
1121			break;
1122		default:
1123			ret = -EINVAL;
1124			break;
1125		}
1126		if (!ret) {
1127			if (cmd == OTPGETREGIONCOUNT) {
1128				int nbr = retlen / sizeof(struct otp_info);
1129				ret = copy_to_user(argp, &nbr, sizeof(int));
1130			} else
1131				ret = copy_to_user(argp, buf, retlen);
1132			if (ret)
1133				ret = -EFAULT;
1134		}
1135		kfree(buf);
1136		break;
1137	}
1138
1139	case OTPLOCK:
1140	case OTPERASE:
1141	{
1142		struct otp_info oinfo;
1143
1144		if (mfi->mode != MTD_FILE_MODE_OTP_USER)
1145			return -EINVAL;
1146		if (copy_from_user(&oinfo, argp, sizeof(oinfo)))
1147			return -EFAULT;
1148		if (cmd == OTPLOCK)
1149			ret = mtd_lock_user_prot_reg(mtd, oinfo.start, oinfo.length);
1150		else
1151			ret = mtd_erase_user_prot_reg(mtd, oinfo.start, oinfo.length);
1152		break;
1153	}
 
1154
1155	/* This ioctl is being deprecated - it truncates the ECC layout */
1156	case ECCGETLAYOUT:
1157	{
1158		struct nand_ecclayout_user *usrlay;
1159
1160		if (!master->ooblayout)
1161			return -EOPNOTSUPP;
1162
1163		usrlay = kmalloc(sizeof(*usrlay), GFP_KERNEL);
1164		if (!usrlay)
1165			return -ENOMEM;
1166
1167		shrink_ecclayout(mtd, usrlay);
1168
1169		if (copy_to_user(argp, usrlay, sizeof(*usrlay)))
1170			ret = -EFAULT;
1171		kfree(usrlay);
1172		break;
1173	}
1174
1175	case ECCGETSTATS:
1176	{
1177		if (copy_to_user(argp, &mtd->ecc_stats,
1178				 sizeof(struct mtd_ecc_stats)))
1179			return -EFAULT;
1180		break;
1181	}
1182
1183	case MTDFILEMODE:
1184	{
1185		mfi->mode = 0;
1186
1187		switch(arg) {
1188		case MTD_FILE_MODE_OTP_FACTORY:
1189		case MTD_FILE_MODE_OTP_USER:
1190			ret = otp_select_filemode(mfi, arg);
1191			break;
1192
1193		case MTD_FILE_MODE_RAW:
1194			if (!mtd_has_oob(mtd))
1195				return -EOPNOTSUPP;
1196			mfi->mode = arg;
1197			break;
1198
1199		case MTD_FILE_MODE_NORMAL:
1200			break;
1201		default:
1202			ret = -EINVAL;
1203		}
1204		file->f_pos = 0;
1205		break;
1206	}
1207
1208	case BLKPG:
1209	{
1210		struct blkpg_ioctl_arg __user *blk_arg = argp;
1211		struct blkpg_ioctl_arg a;
1212
1213		if (copy_from_user(&a, blk_arg, sizeof(a)))
1214			ret = -EFAULT;
1215		else
1216			ret = mtdchar_blkpg_ioctl(mtd, &a);
1217		break;
1218	}
1219
1220	case BLKRRPART:
1221	{
1222		/* No reread partition feature. Just return ok */
1223		ret = 0;
1224		break;
1225	}
 
 
 
1226	}
1227
1228	return ret;
1229} /* memory_ioctl */
1230
1231static long mtdchar_unlocked_ioctl(struct file *file, u_int cmd, u_long arg)
1232{
1233	struct mtd_file_info *mfi = file->private_data;
1234	struct mtd_info *mtd = mfi->mtd;
1235	struct mtd_info *master = mtd_get_master(mtd);
1236	int ret;
1237
1238	mutex_lock(&master->master.chrdev_lock);
1239	ret = mtdchar_ioctl(file, cmd, arg);
1240	mutex_unlock(&master->master.chrdev_lock);
1241
1242	return ret;
1243}
1244
1245#ifdef CONFIG_COMPAT
1246
1247struct mtd_oob_buf32 {
1248	u_int32_t start;
1249	u_int32_t length;
1250	compat_caddr_t ptr;	/* unsigned char* */
1251};
1252
1253#define MEMWRITEOOB32		_IOWR('M', 3, struct mtd_oob_buf32)
1254#define MEMREADOOB32		_IOWR('M', 4, struct mtd_oob_buf32)
1255
1256static long mtdchar_compat_ioctl(struct file *file, unsigned int cmd,
1257	unsigned long arg)
1258{
1259	struct mtd_file_info *mfi = file->private_data;
1260	struct mtd_info *mtd = mfi->mtd;
1261	struct mtd_info *master = mtd_get_master(mtd);
1262	void __user *argp = compat_ptr(arg);
1263	int ret = 0;
1264
1265	mutex_lock(&master->master.chrdev_lock);
1266
1267	switch (cmd) {
1268	case MEMWRITEOOB32:
1269	{
1270		struct mtd_oob_buf32 buf;
1271		struct mtd_oob_buf32 __user *buf_user = argp;
1272
1273		if (!(file->f_mode & FMODE_WRITE)) {
1274			ret = -EPERM;
1275			break;
1276		}
1277
1278		if (copy_from_user(&buf, argp, sizeof(buf)))
1279			ret = -EFAULT;
1280		else
1281			ret = mtdchar_writeoob(file, mtd, buf.start,
1282				buf.length, compat_ptr(buf.ptr),
1283				&buf_user->length);
1284		break;
1285	}
1286
1287	case MEMREADOOB32:
1288	{
1289		struct mtd_oob_buf32 buf;
1290		struct mtd_oob_buf32 __user *buf_user = argp;
1291
1292		/* NOTE: writes return length to buf->start */
1293		if (copy_from_user(&buf, argp, sizeof(buf)))
1294			ret = -EFAULT;
1295		else
1296			ret = mtdchar_readoob(file, mtd, buf.start,
1297				buf.length, compat_ptr(buf.ptr),
1298				&buf_user->start);
1299		break;
1300	}
1301
1302	case BLKPG:
1303	{
1304		/* Convert from blkpg_compat_ioctl_arg to blkpg_ioctl_arg */
1305		struct blkpg_compat_ioctl_arg __user *uarg = argp;
1306		struct blkpg_compat_ioctl_arg compat_arg;
1307		struct blkpg_ioctl_arg a;
1308
1309		if (copy_from_user(&compat_arg, uarg, sizeof(compat_arg))) {
1310			ret = -EFAULT;
1311			break;
1312		}
1313
1314		memset(&a, 0, sizeof(a));
1315		a.op = compat_arg.op;
1316		a.flags = compat_arg.flags;
1317		a.datalen = compat_arg.datalen;
1318		a.data = compat_ptr(compat_arg.data);
1319
1320		ret = mtdchar_blkpg_ioctl(mtd, &a);
1321		break;
1322	}
1323
1324	default:
1325		ret = mtdchar_ioctl(file, cmd, (unsigned long)argp);
1326	}
1327
1328	mutex_unlock(&master->master.chrdev_lock);
1329
1330	return ret;
1331}
1332
1333#endif /* CONFIG_COMPAT */
1334
1335/*
1336 * try to determine where a shared mapping can be made
1337 * - only supported for NOMMU at the moment (MMU can't doesn't copy private
1338 *   mappings)
1339 */
1340#ifndef CONFIG_MMU
1341static unsigned long mtdchar_get_unmapped_area(struct file *file,
1342					   unsigned long addr,
1343					   unsigned long len,
1344					   unsigned long pgoff,
1345					   unsigned long flags)
1346{
1347	struct mtd_file_info *mfi = file->private_data;
1348	struct mtd_info *mtd = mfi->mtd;
1349	unsigned long offset;
1350	int ret;
1351
1352	if (addr != 0)
1353		return (unsigned long) -EINVAL;
1354
1355	if (len > mtd->size || pgoff >= (mtd->size >> PAGE_SHIFT))
1356		return (unsigned long) -EINVAL;
1357
1358	offset = pgoff << PAGE_SHIFT;
1359	if (offset > mtd->size - len)
1360		return (unsigned long) -EINVAL;
1361
1362	ret = mtd_get_unmapped_area(mtd, len, offset, flags);
1363	return ret == -EOPNOTSUPP ? -ENODEV : ret;
1364}
1365
1366static unsigned mtdchar_mmap_capabilities(struct file *file)
1367{
1368	struct mtd_file_info *mfi = file->private_data;
1369
1370	return mtd_mmap_capabilities(mfi->mtd);
1371}
1372#endif
1373
1374/*
1375 * set up a mapping for shared memory segments
1376 */
1377static int mtdchar_mmap(struct file *file, struct vm_area_struct *vma)
1378{
1379#ifdef CONFIG_MMU
1380	struct mtd_file_info *mfi = file->private_data;
1381	struct mtd_info *mtd = mfi->mtd;
1382	struct map_info *map = mtd->priv;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1383
1384        /* This is broken because it assumes the MTD device is map-based
1385	   and that mtd->priv is a valid struct map_info.  It should be
1386	   replaced with something that uses the mtd_get_unmapped_area()
1387	   operation properly. */
1388	if (0 /*mtd->type == MTD_RAM || mtd->type == MTD_ROM*/) {
1389#ifdef pgprot_noncached
1390		if (file->f_flags & O_DSYNC || map->phys >= __pa(high_memory))
1391			vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1392#endif
1393		return vm_iomap_memory(vma, map->phys, map->size);
 
 
 
 
 
1394	}
1395	return -ENODEV;
1396#else
1397	return vma->vm_flags & VM_SHARED ? 0 : -EACCES;
1398#endif
1399}
1400
1401static const struct file_operations mtd_fops = {
1402	.owner		= THIS_MODULE,
1403	.llseek		= mtdchar_lseek,
1404	.read		= mtdchar_read,
1405	.write		= mtdchar_write,
1406	.unlocked_ioctl	= mtdchar_unlocked_ioctl,
1407#ifdef CONFIG_COMPAT
1408	.compat_ioctl	= mtdchar_compat_ioctl,
1409#endif
1410	.open		= mtdchar_open,
1411	.release	= mtdchar_close,
1412	.mmap		= mtdchar_mmap,
1413#ifndef CONFIG_MMU
1414	.get_unmapped_area = mtdchar_get_unmapped_area,
1415	.mmap_capabilities = mtdchar_mmap_capabilities,
1416#endif
1417};
1418
1419int __init init_mtdchar(void)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1420{
1421	int ret;
1422
1423	ret = __register_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS,
1424				   "mtd", &mtd_fops);
1425	if (ret < 0) {
1426		pr_err("Can't allocate major number %d for MTD\n",
1427		       MTD_CHAR_MAJOR);
1428		return ret;
1429	}
1430
 
 
 
 
 
 
 
 
 
1431	return ret;
1432}
1433
1434void __exit cleanup_mtdchar(void)
1435{
 
1436	__unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd");
1437}
1438
 
 
 
 
 
 
 
 
1439MODULE_ALIAS_CHARDEV_MAJOR(MTD_CHAR_MAJOR);
v3.5.6
 
   1/*
   2 * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org>
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License as published by
   6 * the Free Software Foundation; either version 2 of the License, or
   7 * (at your option) any later version.
   8 *
   9 * This program is distributed in the hope that it will be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, write to the Free Software
  16 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  17 *
  18 */
  19
  20#include <linux/device.h>
  21#include <linux/fs.h>
  22#include <linux/mm.h>
  23#include <linux/err.h>
  24#include <linux/init.h>
  25#include <linux/kernel.h>
  26#include <linux/module.h>
  27#include <linux/slab.h>
  28#include <linux/sched.h>
  29#include <linux/mutex.h>
  30#include <linux/backing-dev.h>
  31#include <linux/compat.h>
  32#include <linux/mount.h>
  33#include <linux/blkpg.h>
  34#include <linux/magic.h>
 
  35#include <linux/mtd/mtd.h>
  36#include <linux/mtd/partitions.h>
  37#include <linux/mtd/map.h>
  38
  39#include <asm/uaccess.h>
  40
  41static DEFINE_MUTEX(mtd_mutex);
  42
  43/*
  44 * Data structure to hold the pointer to the mtd device as well
  45 * as mode information of various use cases.
  46 */
  47struct mtd_file_info {
  48	struct mtd_info *mtd;
  49	struct inode *ino;
  50	enum mtd_file_modes mode;
  51};
  52
  53static loff_t mtdchar_lseek(struct file *file, loff_t offset, int orig)
  54{
  55	struct mtd_file_info *mfi = file->private_data;
  56	struct mtd_info *mtd = mfi->mtd;
  57
  58	switch (orig) {
  59	case SEEK_SET:
  60		break;
  61	case SEEK_CUR:
  62		offset += file->f_pos;
  63		break;
  64	case SEEK_END:
  65		offset += mtd->size;
  66		break;
  67	default:
  68		return -EINVAL;
  69	}
  70
  71	if (offset >= 0 && offset <= mtd->size)
  72		return file->f_pos = offset;
  73
  74	return -EINVAL;
  75}
  76
  77static int count;
  78static struct vfsmount *mnt;
  79static struct file_system_type mtd_inodefs_type;
  80
  81static int mtdchar_open(struct inode *inode, struct file *file)
  82{
  83	int minor = iminor(inode);
  84	int devnum = minor >> 1;
  85	int ret = 0;
  86	struct mtd_info *mtd;
  87	struct mtd_file_info *mfi;
  88	struct inode *mtd_ino;
  89
  90	pr_debug("MTD_open\n");
  91
  92	/* You can't open the RO devices RW */
  93	if ((file->f_mode & FMODE_WRITE) && (minor & 1))
  94		return -EACCES;
  95
  96	ret = simple_pin_fs(&mtd_inodefs_type, &mnt, &count);
  97	if (ret)
  98		return ret;
  99
 100	mutex_lock(&mtd_mutex);
 101	mtd = get_mtd_device(NULL, devnum);
 102
 103	if (IS_ERR(mtd)) {
 104		ret = PTR_ERR(mtd);
 105		goto out;
 106	}
 107
 108	if (mtd->type == MTD_ABSENT) {
 109		ret = -ENODEV;
 110		goto out1;
 111	}
 112
 113	mtd_ino = iget_locked(mnt->mnt_sb, devnum);
 114	if (!mtd_ino) {
 115		ret = -ENOMEM;
 116		goto out1;
 117	}
 118	if (mtd_ino->i_state & I_NEW) {
 119		mtd_ino->i_private = mtd;
 120		mtd_ino->i_mode = S_IFCHR;
 121		mtd_ino->i_data.backing_dev_info = mtd->backing_dev_info;
 122		unlock_new_inode(mtd_ino);
 123	}
 124	file->f_mapping = mtd_ino->i_mapping;
 125
 126	/* You can't open it RW if it's not a writeable device */
 127	if ((file->f_mode & FMODE_WRITE) && !(mtd->flags & MTD_WRITEABLE)) {
 128		ret = -EACCES;
 129		goto out2;
 130	}
 131
 132	mfi = kzalloc(sizeof(*mfi), GFP_KERNEL);
 133	if (!mfi) {
 134		ret = -ENOMEM;
 135		goto out2;
 136	}
 137	mfi->ino = mtd_ino;
 138	mfi->mtd = mtd;
 139	file->private_data = mfi;
 140	mutex_unlock(&mtd_mutex);
 141	return 0;
 142
 143out2:
 144	iput(mtd_ino);
 145out1:
 146	put_mtd_device(mtd);
 147out:
 148	mutex_unlock(&mtd_mutex);
 149	simple_release_fs(&mnt, &count);
 150	return ret;
 151} /* mtdchar_open */
 152
 153/*====================================================================*/
 154
 155static int mtdchar_close(struct inode *inode, struct file *file)
 156{
 157	struct mtd_file_info *mfi = file->private_data;
 158	struct mtd_info *mtd = mfi->mtd;
 159
 160	pr_debug("MTD_close\n");
 161
 162	/* Only sync if opened RW */
 163	if ((file->f_mode & FMODE_WRITE))
 164		mtd_sync(mtd);
 165
 166	iput(mfi->ino);
 167
 168	put_mtd_device(mtd);
 169	file->private_data = NULL;
 170	kfree(mfi);
 171	simple_release_fs(&mnt, &count);
 172
 173	return 0;
 174} /* mtdchar_close */
 175
 176/* Back in June 2001, dwmw2 wrote:
 177 *
 178 *   FIXME: This _really_ needs to die. In 2.5, we should lock the
 179 *   userspace buffer down and use it directly with readv/writev.
 180 *
 181 * The implementation below, using mtd_kmalloc_up_to, mitigates
 182 * allocation failures when the system is under low-memory situations
 183 * or if memory is highly fragmented at the cost of reducing the
 184 * performance of the requested transfer due to a smaller buffer size.
 185 *
 186 * A more complex but more memory-efficient implementation based on
 187 * get_user_pages and iovecs to cover extents of those pages is a
 188 * longer-term goal, as intimated by dwmw2 above. However, for the
 189 * write case, this requires yet more complex head and tail transfer
 190 * handling when those head and tail offsets and sizes are such that
 191 * alignment requirements are not met in the NAND subdriver.
 192 */
 193
 194static ssize_t mtdchar_read(struct file *file, char __user *buf, size_t count,
 195			loff_t *ppos)
 196{
 197	struct mtd_file_info *mfi = file->private_data;
 198	struct mtd_info *mtd = mfi->mtd;
 199	size_t retlen;
 200	size_t total_retlen=0;
 201	int ret=0;
 202	int len;
 203	size_t size = count;
 204	char *kbuf;
 205
 206	pr_debug("MTD_read\n");
 207
 208	if (*ppos + count > mtd->size)
 209		count = mtd->size - *ppos;
 
 
 
 
 210
 211	if (!count)
 212		return 0;
 213
 214	kbuf = mtd_kmalloc_up_to(mtd, &size);
 215	if (!kbuf)
 216		return -ENOMEM;
 217
 218	while (count) {
 219		len = min_t(size_t, count, size);
 220
 221		switch (mfi->mode) {
 222		case MTD_FILE_MODE_OTP_FACTORY:
 223			ret = mtd_read_fact_prot_reg(mtd, *ppos, len,
 224						     &retlen, kbuf);
 225			break;
 226		case MTD_FILE_MODE_OTP_USER:
 227			ret = mtd_read_user_prot_reg(mtd, *ppos, len,
 228						     &retlen, kbuf);
 229			break;
 230		case MTD_FILE_MODE_RAW:
 231		{
 232			struct mtd_oob_ops ops;
 233
 234			ops.mode = MTD_OPS_RAW;
 235			ops.datbuf = kbuf;
 236			ops.oobbuf = NULL;
 237			ops.len = len;
 238
 239			ret = mtd_read_oob(mtd, *ppos, &ops);
 240			retlen = ops.retlen;
 241			break;
 242		}
 243		default:
 244			ret = mtd_read(mtd, *ppos, len, &retlen, kbuf);
 245		}
 246		/* Nand returns -EBADMSG on ECC errors, but it returns
 247		 * the data. For our userspace tools it is important
 248		 * to dump areas with ECC errors!
 249		 * For kernel internal usage it also might return -EUCLEAN
 250		 * to signal the caller that a bitflip has occurred and has
 251		 * been corrected by the ECC algorithm.
 252		 * Userspace software which accesses NAND this way
 253		 * must be aware of the fact that it deals with NAND
 254		 */
 255		if (!ret || mtd_is_bitflip_or_eccerr(ret)) {
 256			*ppos += retlen;
 257			if (copy_to_user(buf, kbuf, retlen)) {
 258				kfree(kbuf);
 259				return -EFAULT;
 260			}
 261			else
 262				total_retlen += retlen;
 263
 264			count -= retlen;
 265			buf += retlen;
 266			if (retlen == 0)
 267				count = 0;
 268		}
 269		else {
 270			kfree(kbuf);
 271			return ret;
 272		}
 273
 274	}
 275
 276	kfree(kbuf);
 277	return total_retlen;
 278} /* mtdchar_read */
 279
 280static ssize_t mtdchar_write(struct file *file, const char __user *buf, size_t count,
 281			loff_t *ppos)
 282{
 283	struct mtd_file_info *mfi = file->private_data;
 284	struct mtd_info *mtd = mfi->mtd;
 285	size_t size = count;
 286	char *kbuf;
 287	size_t retlen;
 288	size_t total_retlen=0;
 289	int ret=0;
 290	int len;
 291
 292	pr_debug("MTD_write\n");
 293
 294	if (*ppos == mtd->size)
 295		return -ENOSPC;
 296
 297	if (*ppos + count > mtd->size)
 298		count = mtd->size - *ppos;
 299
 300	if (!count)
 301		return 0;
 302
 303	kbuf = mtd_kmalloc_up_to(mtd, &size);
 304	if (!kbuf)
 305		return -ENOMEM;
 306
 307	while (count) {
 308		len = min_t(size_t, count, size);
 309
 310		if (copy_from_user(kbuf, buf, len)) {
 311			kfree(kbuf);
 312			return -EFAULT;
 313		}
 314
 315		switch (mfi->mode) {
 316		case MTD_FILE_MODE_OTP_FACTORY:
 317			ret = -EROFS;
 318			break;
 319		case MTD_FILE_MODE_OTP_USER:
 320			ret = mtd_write_user_prot_reg(mtd, *ppos, len,
 321						      &retlen, kbuf);
 322			break;
 323
 324		case MTD_FILE_MODE_RAW:
 325		{
 326			struct mtd_oob_ops ops;
 327
 328			ops.mode = MTD_OPS_RAW;
 329			ops.datbuf = kbuf;
 330			ops.oobbuf = NULL;
 331			ops.ooboffs = 0;
 332			ops.len = len;
 333
 334			ret = mtd_write_oob(mtd, *ppos, &ops);
 335			retlen = ops.retlen;
 336			break;
 337		}
 338
 339		default:
 340			ret = mtd_write(mtd, *ppos, len, &retlen, kbuf);
 341		}
 
 
 
 
 
 
 
 
 
 342		if (!ret) {
 343			*ppos += retlen;
 344			total_retlen += retlen;
 345			count -= retlen;
 346			buf += retlen;
 347		}
 348		else {
 349			kfree(kbuf);
 350			return ret;
 351		}
 352	}
 353
 354	kfree(kbuf);
 355	return total_retlen;
 356} /* mtdchar_write */
 357
 358/*======================================================================
 359
 360    IOCTL calls for getting device parameters.
 361
 362======================================================================*/
 363static void mtdchar_erase_callback (struct erase_info *instr)
 364{
 365	wake_up((wait_queue_head_t *)instr->priv);
 366}
 367
 368#ifdef CONFIG_HAVE_MTD_OTP
 369static int otp_select_filemode(struct mtd_file_info *mfi, int mode)
 370{
 371	struct mtd_info *mtd = mfi->mtd;
 372	size_t retlen;
 373	int ret = 0;
 374
 375	/*
 376	 * Make a fake call to mtd_read_fact_prot_reg() to check if OTP
 377	 * operations are supported.
 378	 */
 379	if (mtd_read_fact_prot_reg(mtd, -1, 0, &retlen, NULL) == -EOPNOTSUPP)
 380		return -EOPNOTSUPP;
 381
 382	switch (mode) {
 383	case MTD_OTP_FACTORY:
 
 
 
 
 384		mfi->mode = MTD_FILE_MODE_OTP_FACTORY;
 385		break;
 386	case MTD_OTP_USER:
 
 
 
 
 387		mfi->mode = MTD_FILE_MODE_OTP_USER;
 388		break;
 389	default:
 390		ret = -EINVAL;
 391	case MTD_OTP_OFF:
 
 392		break;
 
 
 393	}
 394	return ret;
 
 395}
 396#else
 397# define otp_select_filemode(f,m)	-EOPNOTSUPP
 398#endif
 399
 400static int mtdchar_writeoob(struct file *file, struct mtd_info *mtd,
 401	uint64_t start, uint32_t length, void __user *ptr,
 402	uint32_t __user *retp)
 403{
 
 404	struct mtd_file_info *mfi = file->private_data;
 405	struct mtd_oob_ops ops;
 406	uint32_t retlen;
 407	int ret = 0;
 408
 409	if (!(file->f_mode & FMODE_WRITE))
 410		return -EPERM;
 411
 412	if (length > 4096)
 413		return -EINVAL;
 414
 415	if (!mtd->_write_oob)
 416		ret = -EOPNOTSUPP;
 417	else
 418		ret = access_ok(VERIFY_READ, ptr, length) ? 0 : -EFAULT;
 419
 420	if (ret)
 421		return ret;
 422
 423	ops.ooblen = length;
 424	ops.ooboffs = start & (mtd->writesize - 1);
 425	ops.datbuf = NULL;
 426	ops.mode = (mfi->mode == MTD_FILE_MODE_RAW) ? MTD_OPS_RAW :
 427		MTD_OPS_PLACE_OOB;
 428
 429	if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
 430		return -EINVAL;
 431
 432	ops.oobbuf = memdup_user(ptr, length);
 433	if (IS_ERR(ops.oobbuf))
 434		return PTR_ERR(ops.oobbuf);
 435
 436	start &= ~((uint64_t)mtd->writesize - 1);
 437	ret = mtd_write_oob(mtd, start, &ops);
 438
 439	if (ops.oobretlen > 0xFFFFFFFFU)
 440		ret = -EOVERFLOW;
 441	retlen = ops.oobretlen;
 442	if (copy_to_user(retp, &retlen, sizeof(length)))
 443		ret = -EFAULT;
 444
 445	kfree(ops.oobbuf);
 446	return ret;
 447}
 448
 449static int mtdchar_readoob(struct file *file, struct mtd_info *mtd,
 450	uint64_t start, uint32_t length, void __user *ptr,
 451	uint32_t __user *retp)
 452{
 453	struct mtd_file_info *mfi = file->private_data;
 454	struct mtd_oob_ops ops;
 455	int ret = 0;
 456
 457	if (length > 4096)
 458		return -EINVAL;
 459
 460	if (!access_ok(VERIFY_WRITE, ptr, length))
 461		return -EFAULT;
 462
 463	ops.ooblen = length;
 464	ops.ooboffs = start & (mtd->writesize - 1);
 465	ops.datbuf = NULL;
 466	ops.mode = (mfi->mode == MTD_FILE_MODE_RAW) ? MTD_OPS_RAW :
 467		MTD_OPS_PLACE_OOB;
 468
 469	if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
 470		return -EINVAL;
 471
 472	ops.oobbuf = kmalloc(length, GFP_KERNEL);
 473	if (!ops.oobbuf)
 474		return -ENOMEM;
 475
 476	start &= ~((uint64_t)mtd->writesize - 1);
 477	ret = mtd_read_oob(mtd, start, &ops);
 478
 479	if (put_user(ops.oobretlen, retp))
 480		ret = -EFAULT;
 481	else if (ops.oobretlen && copy_to_user(ptr, ops.oobbuf,
 482					    ops.oobretlen))
 483		ret = -EFAULT;
 484
 485	kfree(ops.oobbuf);
 486
 487	/*
 488	 * NAND returns -EBADMSG on ECC errors, but it returns the OOB
 489	 * data. For our userspace tools it is important to dump areas
 490	 * with ECC errors!
 491	 * For kernel internal usage it also might return -EUCLEAN
 492	 * to signal the caller that a bitflip has occured and has
 493	 * been corrected by the ECC algorithm.
 494	 *
 495	 * Note: currently the standard NAND function, nand_read_oob_std,
 496	 * does not calculate ECC for the OOB area, so do not rely on
 497	 * this behavior unless you have replaced it with your own.
 498	 */
 499	if (mtd_is_bitflip_or_eccerr(ret))
 500		return 0;
 501
 502	return ret;
 503}
 504
 505/*
 506 * Copies (and truncates, if necessary) data from the larger struct,
 507 * nand_ecclayout, to the smaller, deprecated layout struct,
 508 * nand_ecclayout_user. This is necessary only to support the deprecated
 509 * API ioctl ECCGETLAYOUT while allowing all new functionality to use
 510 * nand_ecclayout flexibly (i.e. the struct may change size in new
 511 * releases without requiring major rewrites).
 512 */
 513static int shrink_ecclayout(const struct nand_ecclayout *from,
 514		struct nand_ecclayout_user *to)
 515{
 516	int i;
 
 517
 518	if (!from || !to)
 519		return -EINVAL;
 520
 521	memset(to, 0, sizeof(*to));
 522
 523	to->eccbytes = min((int)from->eccbytes, MTD_MAX_ECCPOS_ENTRIES);
 524	for (i = 0; i < to->eccbytes; i++)
 525		to->eccpos[i] = from->eccpos[i];
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 526
 527	for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES; i++) {
 528		if (from->oobfree[i].length == 0 &&
 529				from->oobfree[i].offset == 0)
 
 
 
 530			break;
 531		to->oobavail += from->oobfree[i].length;
 532		to->oobfree[i] = from->oobfree[i];
 
 
 
 533	}
 534
 535	return 0;
 536}
 537
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 538static int mtdchar_blkpg_ioctl(struct mtd_info *mtd,
 539			   struct blkpg_ioctl_arg __user *arg)
 540{
 541	struct blkpg_ioctl_arg a;
 542	struct blkpg_partition p;
 543
 544	if (!capable(CAP_SYS_ADMIN))
 545		return -EPERM;
 546
 547	if (copy_from_user(&a, arg, sizeof(struct blkpg_ioctl_arg)))
 548		return -EFAULT;
 549
 550	if (copy_from_user(&p, a.data, sizeof(struct blkpg_partition)))
 551		return -EFAULT;
 552
 553	switch (a.op) {
 554	case BLKPG_ADD_PARTITION:
 555
 556		/* Only master mtd device must be used to add partitions */
 557		if (mtd_is_partition(mtd))
 558			return -EINVAL;
 559
 
 
 
 560		return mtd_add_partition(mtd, p.devname, p.start, p.length);
 561
 562	case BLKPG_DEL_PARTITION:
 563
 564		if (p.pno < 0)
 565			return -EINVAL;
 566
 567		return mtd_del_partition(mtd, p.pno);
 568
 569	default:
 570		return -EINVAL;
 571	}
 572}
 573
 574static int mtdchar_write_ioctl(struct mtd_info *mtd,
 575		struct mtd_write_req __user *argp)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 576{
 
 577	struct mtd_write_req req;
 578	struct mtd_oob_ops ops;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 579	void __user *usr_data, *usr_oob;
 580	int ret;
 
 
 
 581
 582	if (copy_from_user(&req, argp, sizeof(req)) ||
 583			!access_ok(VERIFY_READ, req.usr_data, req.len) ||
 584			!access_ok(VERIFY_READ, req.usr_oob, req.ooblen))
 585		return -EFAULT;
 586	if (!mtd->_write_oob)
 587		return -EOPNOTSUPP;
 588
 589	ops.mode = req.mode;
 590	ops.len = (size_t)req.len;
 591	ops.ooblen = (size_t)req.ooblen;
 592	ops.ooboffs = 0;
 593
 594	usr_data = (void __user *)(uintptr_t)req.usr_data;
 595	usr_oob = (void __user *)(uintptr_t)req.usr_oob;
 596
 597	if (req.usr_data) {
 598		ops.datbuf = memdup_user(usr_data, ops.len);
 599		if (IS_ERR(ops.datbuf))
 600			return PTR_ERR(ops.datbuf);
 601	} else {
 602		ops.datbuf = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 603	}
 604
 605	if (req.usr_oob) {
 606		ops.oobbuf = memdup_user(usr_oob, ops.ooblen);
 607		if (IS_ERR(ops.oobbuf)) {
 608			kfree(ops.datbuf);
 609			return PTR_ERR(ops.oobbuf);
 
 610		}
 611	} else {
 612		ops.oobbuf = NULL;
 613	}
 614
 615	ret = mtd_write_oob(mtd, (loff_t)req.start, &ops);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 616
 617	kfree(ops.datbuf);
 618	kfree(ops.oobbuf);
 
 
 
 619
 620	return ret;
 621}
 622
 623static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg)
 624{
 625	struct mtd_file_info *mfi = file->private_data;
 626	struct mtd_info *mtd = mfi->mtd;
 
 627	void __user *argp = (void __user *)arg;
 628	int ret = 0;
 629	u_long size;
 630	struct mtd_info_user info;
 631
 632	pr_debug("MTD_ioctl\n");
 633
 634	size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
 635	if (cmd & IOC_IN) {
 636		if (!access_ok(VERIFY_READ, argp, size))
 637			return -EFAULT;
 638	}
 639	if (cmd & IOC_OUT) {
 640		if (!access_ok(VERIFY_WRITE, argp, size))
 641			return -EFAULT;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 642	}
 643
 644	switch (cmd) {
 645	case MEMGETREGIONCOUNT:
 646		if (copy_to_user(argp, &(mtd->numeraseregions), sizeof(int)))
 647			return -EFAULT;
 648		break;
 649
 650	case MEMGETREGIONINFO:
 651	{
 652		uint32_t ur_idx;
 653		struct mtd_erase_region_info *kr;
 654		struct region_info_user __user *ur = argp;
 655
 656		if (get_user(ur_idx, &(ur->regionindex)))
 657			return -EFAULT;
 658
 659		if (ur_idx >= mtd->numeraseregions)
 660			return -EINVAL;
 661
 662		kr = &(mtd->eraseregions[ur_idx]);
 663
 664		if (put_user(kr->offset, &(ur->offset))
 665		    || put_user(kr->erasesize, &(ur->erasesize))
 666		    || put_user(kr->numblocks, &(ur->numblocks)))
 667			return -EFAULT;
 668
 669		break;
 670	}
 671
 672	case MEMGETINFO:
 673		memset(&info, 0, sizeof(info));
 674		info.type	= mtd->type;
 675		info.flags	= mtd->flags;
 676		info.size	= mtd->size;
 677		info.erasesize	= mtd->erasesize;
 678		info.writesize	= mtd->writesize;
 679		info.oobsize	= mtd->oobsize;
 680		/* The below field is obsolete */
 681		info.padding	= 0;
 682		if (copy_to_user(argp, &info, sizeof(struct mtd_info_user)))
 683			return -EFAULT;
 684		break;
 685
 686	case MEMERASE:
 687	case MEMERASE64:
 688	{
 689		struct erase_info *erase;
 690
 691		if(!(file->f_mode & FMODE_WRITE))
 692			return -EPERM;
 693
 694		erase=kzalloc(sizeof(struct erase_info),GFP_KERNEL);
 695		if (!erase)
 696			ret = -ENOMEM;
 697		else {
 698			wait_queue_head_t waitq;
 699			DECLARE_WAITQUEUE(wait, current);
 700
 701			init_waitqueue_head(&waitq);
 702
 703			if (cmd == MEMERASE64) {
 704				struct erase_info_user64 einfo64;
 705
 706				if (copy_from_user(&einfo64, argp,
 707					    sizeof(struct erase_info_user64))) {
 708					kfree(erase);
 709					return -EFAULT;
 710				}
 711				erase->addr = einfo64.start;
 712				erase->len = einfo64.length;
 713			} else {
 714				struct erase_info_user einfo32;
 715
 716				if (copy_from_user(&einfo32, argp,
 717					    sizeof(struct erase_info_user))) {
 718					kfree(erase);
 719					return -EFAULT;
 720				}
 721				erase->addr = einfo32.start;
 722				erase->len = einfo32.length;
 723			}
 724			erase->mtd = mtd;
 725			erase->callback = mtdchar_erase_callback;
 726			erase->priv = (unsigned long)&waitq;
 727
 728			/*
 729			  FIXME: Allow INTERRUPTIBLE. Which means
 730			  not having the wait_queue head on the stack.
 731
 732			  If the wq_head is on the stack, and we
 733			  leave because we got interrupted, then the
 734			  wq_head is no longer there when the
 735			  callback routine tries to wake us up.
 736			*/
 737			ret = mtd_erase(mtd, erase);
 738			if (!ret) {
 739				set_current_state(TASK_UNINTERRUPTIBLE);
 740				add_wait_queue(&waitq, &wait);
 741				if (erase->state != MTD_ERASE_DONE &&
 742				    erase->state != MTD_ERASE_FAILED)
 743					schedule();
 744				remove_wait_queue(&waitq, &wait);
 745				set_current_state(TASK_RUNNING);
 746
 747				ret = (erase->state == MTD_ERASE_FAILED)?-EIO:0;
 748			}
 749			kfree(erase);
 750		}
 751		break;
 752	}
 753
 754	case MEMWRITEOOB:
 755	{
 756		struct mtd_oob_buf buf;
 757		struct mtd_oob_buf __user *buf_user = argp;
 758
 759		/* NOTE: writes return length to buf_user->length */
 760		if (copy_from_user(&buf, argp, sizeof(buf)))
 761			ret = -EFAULT;
 762		else
 763			ret = mtdchar_writeoob(file, mtd, buf.start, buf.length,
 764				buf.ptr, &buf_user->length);
 765		break;
 766	}
 767
 768	case MEMREADOOB:
 769	{
 770		struct mtd_oob_buf buf;
 771		struct mtd_oob_buf __user *buf_user = argp;
 772
 773		/* NOTE: writes return length to buf_user->start */
 774		if (copy_from_user(&buf, argp, sizeof(buf)))
 775			ret = -EFAULT;
 776		else
 777			ret = mtdchar_readoob(file, mtd, buf.start, buf.length,
 778				buf.ptr, &buf_user->start);
 779		break;
 780	}
 781
 782	case MEMWRITEOOB64:
 783	{
 784		struct mtd_oob_buf64 buf;
 785		struct mtd_oob_buf64 __user *buf_user = argp;
 786
 787		if (copy_from_user(&buf, argp, sizeof(buf)))
 788			ret = -EFAULT;
 789		else
 790			ret = mtdchar_writeoob(file, mtd, buf.start, buf.length,
 791				(void __user *)(uintptr_t)buf.usr_ptr,
 792				&buf_user->length);
 793		break;
 794	}
 795
 796	case MEMREADOOB64:
 797	{
 798		struct mtd_oob_buf64 buf;
 799		struct mtd_oob_buf64 __user *buf_user = argp;
 800
 801		if (copy_from_user(&buf, argp, sizeof(buf)))
 802			ret = -EFAULT;
 803		else
 804			ret = mtdchar_readoob(file, mtd, buf.start, buf.length,
 805				(void __user *)(uintptr_t)buf.usr_ptr,
 806				&buf_user->length);
 807		break;
 808	}
 809
 810	case MEMWRITE:
 811	{
 812		ret = mtdchar_write_ioctl(mtd,
 813		      (struct mtd_write_req __user *)arg);
 814		break;
 815	}
 816
 
 
 
 
 
 
 
 817	case MEMLOCK:
 818	{
 819		struct erase_info_user einfo;
 820
 821		if (copy_from_user(&einfo, argp, sizeof(einfo)))
 822			return -EFAULT;
 823
 824		ret = mtd_lock(mtd, einfo.start, einfo.length);
 825		break;
 826	}
 827
 828	case MEMUNLOCK:
 829	{
 830		struct erase_info_user einfo;
 831
 832		if (copy_from_user(&einfo, argp, sizeof(einfo)))
 833			return -EFAULT;
 834
 835		ret = mtd_unlock(mtd, einfo.start, einfo.length);
 836		break;
 837	}
 838
 839	case MEMISLOCKED:
 840	{
 841		struct erase_info_user einfo;
 842
 843		if (copy_from_user(&einfo, argp, sizeof(einfo)))
 844			return -EFAULT;
 845
 846		ret = mtd_is_locked(mtd, einfo.start, einfo.length);
 847		break;
 848	}
 849
 850	/* Legacy interface */
 851	case MEMGETOOBSEL:
 852	{
 853		struct nand_oobinfo oi;
 854
 855		if (!mtd->ecclayout)
 856			return -EOPNOTSUPP;
 857		if (mtd->ecclayout->eccbytes > ARRAY_SIZE(oi.eccpos))
 858			return -EINVAL;
 859
 860		oi.useecc = MTD_NANDECC_AUTOPLACE;
 861		memcpy(&oi.eccpos, mtd->ecclayout->eccpos, sizeof(oi.eccpos));
 862		memcpy(&oi.oobfree, mtd->ecclayout->oobfree,
 863		       sizeof(oi.oobfree));
 864		oi.eccbytes = mtd->ecclayout->eccbytes;
 865
 866		if (copy_to_user(argp, &oi, sizeof(struct nand_oobinfo)))
 867			return -EFAULT;
 868		break;
 869	}
 870
 871	case MEMGETBADBLOCK:
 872	{
 873		loff_t offs;
 874
 875		if (copy_from_user(&offs, argp, sizeof(loff_t)))
 876			return -EFAULT;
 877		return mtd_block_isbad(mtd, offs);
 878		break;
 879	}
 880
 881	case MEMSETBADBLOCK:
 882	{
 883		loff_t offs;
 884
 885		if (copy_from_user(&offs, argp, sizeof(loff_t)))
 886			return -EFAULT;
 887		return mtd_block_markbad(mtd, offs);
 888		break;
 889	}
 890
 891#ifdef CONFIG_HAVE_MTD_OTP
 892	case OTPSELECT:
 893	{
 894		int mode;
 895		if (copy_from_user(&mode, argp, sizeof(int)))
 896			return -EFAULT;
 897
 898		mfi->mode = MTD_FILE_MODE_NORMAL;
 899
 900		ret = otp_select_filemode(mfi, mode);
 901
 902		file->f_pos = 0;
 903		break;
 904	}
 905
 906	case OTPGETREGIONCOUNT:
 907	case OTPGETREGIONINFO:
 908	{
 909		struct otp_info *buf = kmalloc(4096, GFP_KERNEL);
 
 910		if (!buf)
 911			return -ENOMEM;
 912		switch (mfi->mode) {
 913		case MTD_FILE_MODE_OTP_FACTORY:
 914			ret = mtd_get_fact_prot_info(mtd, buf, 4096);
 915			break;
 916		case MTD_FILE_MODE_OTP_USER:
 917			ret = mtd_get_user_prot_info(mtd, buf, 4096);
 918			break;
 919		default:
 920			ret = -EINVAL;
 921			break;
 922		}
 923		if (ret >= 0) {
 924			if (cmd == OTPGETREGIONCOUNT) {
 925				int nbr = ret / sizeof(struct otp_info);
 926				ret = copy_to_user(argp, &nbr, sizeof(int));
 927			} else
 928				ret = copy_to_user(argp, buf, ret);
 929			if (ret)
 930				ret = -EFAULT;
 931		}
 932		kfree(buf);
 933		break;
 934	}
 935
 936	case OTPLOCK:
 
 937	{
 938		struct otp_info oinfo;
 939
 940		if (mfi->mode != MTD_FILE_MODE_OTP_USER)
 941			return -EINVAL;
 942		if (copy_from_user(&oinfo, argp, sizeof(oinfo)))
 943			return -EFAULT;
 944		ret = mtd_lock_user_prot_reg(mtd, oinfo.start, oinfo.length);
 
 
 
 945		break;
 946	}
 947#endif
 948
 949	/* This ioctl is being deprecated - it truncates the ECC layout */
 950	case ECCGETLAYOUT:
 951	{
 952		struct nand_ecclayout_user *usrlay;
 953
 954		if (!mtd->ecclayout)
 955			return -EOPNOTSUPP;
 956
 957		usrlay = kmalloc(sizeof(*usrlay), GFP_KERNEL);
 958		if (!usrlay)
 959			return -ENOMEM;
 960
 961		shrink_ecclayout(mtd->ecclayout, usrlay);
 962
 963		if (copy_to_user(argp, usrlay, sizeof(*usrlay)))
 964			ret = -EFAULT;
 965		kfree(usrlay);
 966		break;
 967	}
 968
 969	case ECCGETSTATS:
 970	{
 971		if (copy_to_user(argp, &mtd->ecc_stats,
 972				 sizeof(struct mtd_ecc_stats)))
 973			return -EFAULT;
 974		break;
 975	}
 976
 977	case MTDFILEMODE:
 978	{
 979		mfi->mode = 0;
 980
 981		switch(arg) {
 982		case MTD_FILE_MODE_OTP_FACTORY:
 983		case MTD_FILE_MODE_OTP_USER:
 984			ret = otp_select_filemode(mfi, arg);
 985			break;
 986
 987		case MTD_FILE_MODE_RAW:
 988			if (!mtd_has_oob(mtd))
 989				return -EOPNOTSUPP;
 990			mfi->mode = arg;
 
 991
 992		case MTD_FILE_MODE_NORMAL:
 993			break;
 994		default:
 995			ret = -EINVAL;
 996		}
 997		file->f_pos = 0;
 998		break;
 999	}
1000
1001	case BLKPG:
1002	{
1003		ret = mtdchar_blkpg_ioctl(mtd,
1004		      (struct blkpg_ioctl_arg __user *)arg);
 
 
 
 
 
1005		break;
1006	}
1007
1008	case BLKRRPART:
1009	{
1010		/* No reread partition feature. Just return ok */
1011		ret = 0;
1012		break;
1013	}
1014
1015	default:
1016		ret = -ENOTTY;
1017	}
1018
1019	return ret;
1020} /* memory_ioctl */
1021
1022static long mtdchar_unlocked_ioctl(struct file *file, u_int cmd, u_long arg)
1023{
 
 
 
1024	int ret;
1025
1026	mutex_lock(&mtd_mutex);
1027	ret = mtdchar_ioctl(file, cmd, arg);
1028	mutex_unlock(&mtd_mutex);
1029
1030	return ret;
1031}
1032
1033#ifdef CONFIG_COMPAT
1034
1035struct mtd_oob_buf32 {
1036	u_int32_t start;
1037	u_int32_t length;
1038	compat_caddr_t ptr;	/* unsigned char* */
1039};
1040
1041#define MEMWRITEOOB32		_IOWR('M', 3, struct mtd_oob_buf32)
1042#define MEMREADOOB32		_IOWR('M', 4, struct mtd_oob_buf32)
1043
1044static long mtdchar_compat_ioctl(struct file *file, unsigned int cmd,
1045	unsigned long arg)
1046{
1047	struct mtd_file_info *mfi = file->private_data;
1048	struct mtd_info *mtd = mfi->mtd;
 
1049	void __user *argp = compat_ptr(arg);
1050	int ret = 0;
1051
1052	mutex_lock(&mtd_mutex);
1053
1054	switch (cmd) {
1055	case MEMWRITEOOB32:
1056	{
1057		struct mtd_oob_buf32 buf;
1058		struct mtd_oob_buf32 __user *buf_user = argp;
1059
 
 
 
 
 
1060		if (copy_from_user(&buf, argp, sizeof(buf)))
1061			ret = -EFAULT;
1062		else
1063			ret = mtdchar_writeoob(file, mtd, buf.start,
1064				buf.length, compat_ptr(buf.ptr),
1065				&buf_user->length);
1066		break;
1067	}
1068
1069	case MEMREADOOB32:
1070	{
1071		struct mtd_oob_buf32 buf;
1072		struct mtd_oob_buf32 __user *buf_user = argp;
1073
1074		/* NOTE: writes return length to buf->start */
1075		if (copy_from_user(&buf, argp, sizeof(buf)))
1076			ret = -EFAULT;
1077		else
1078			ret = mtdchar_readoob(file, mtd, buf.start,
1079				buf.length, compat_ptr(buf.ptr),
1080				&buf_user->start);
1081		break;
1082	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1083	default:
1084		ret = mtdchar_ioctl(file, cmd, (unsigned long)argp);
1085	}
1086
1087	mutex_unlock(&mtd_mutex);
1088
1089	return ret;
1090}
1091
1092#endif /* CONFIG_COMPAT */
1093
1094/*
1095 * try to determine where a shared mapping can be made
1096 * - only supported for NOMMU at the moment (MMU can't doesn't copy private
1097 *   mappings)
1098 */
1099#ifndef CONFIG_MMU
1100static unsigned long mtdchar_get_unmapped_area(struct file *file,
1101					   unsigned long addr,
1102					   unsigned long len,
1103					   unsigned long pgoff,
1104					   unsigned long flags)
1105{
1106	struct mtd_file_info *mfi = file->private_data;
1107	struct mtd_info *mtd = mfi->mtd;
1108	unsigned long offset;
1109	int ret;
1110
1111	if (addr != 0)
1112		return (unsigned long) -EINVAL;
1113
1114	if (len > mtd->size || pgoff >= (mtd->size >> PAGE_SHIFT))
1115		return (unsigned long) -EINVAL;
1116
1117	offset = pgoff << PAGE_SHIFT;
1118	if (offset > mtd->size - len)
1119		return (unsigned long) -EINVAL;
1120
1121	ret = mtd_get_unmapped_area(mtd, len, offset, flags);
1122	return ret == -EOPNOTSUPP ? -ENOSYS : ret;
 
 
 
 
 
 
 
1123}
1124#endif
1125
1126/*
1127 * set up a mapping for shared memory segments
1128 */
1129static int mtdchar_mmap(struct file *file, struct vm_area_struct *vma)
1130{
1131#ifdef CONFIG_MMU
1132	struct mtd_file_info *mfi = file->private_data;
1133	struct mtd_info *mtd = mfi->mtd;
1134	struct map_info *map = mtd->priv;
1135	unsigned long start;
1136	unsigned long off;
1137	u32 len;
1138
1139	if (mtd->type == MTD_RAM || mtd->type == MTD_ROM) {
1140		off = vma->vm_pgoff << PAGE_SHIFT;
1141		start = map->phys;
1142		len = PAGE_ALIGN((start & ~PAGE_MASK) + map->size);
1143		start &= PAGE_MASK;
1144		if ((vma->vm_end - vma->vm_start + off) > len)
1145			return -EINVAL;
1146
1147		off += start;
1148		vma->vm_pgoff = off >> PAGE_SHIFT;
1149		vma->vm_flags |= VM_IO | VM_RESERVED;
1150
 
 
 
 
 
1151#ifdef pgprot_noncached
1152		if (file->f_flags & O_DSYNC || off >= __pa(high_memory))
1153			vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1154#endif
1155		if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
1156				       vma->vm_end - vma->vm_start,
1157				       vma->vm_page_prot))
1158			return -EAGAIN;
1159
1160		return 0;
1161	}
1162	return -ENOSYS;
1163#else
1164	return vma->vm_flags & VM_SHARED ? 0 : -ENOSYS;
1165#endif
1166}
1167
1168static const struct file_operations mtd_fops = {
1169	.owner		= THIS_MODULE,
1170	.llseek		= mtdchar_lseek,
1171	.read		= mtdchar_read,
1172	.write		= mtdchar_write,
1173	.unlocked_ioctl	= mtdchar_unlocked_ioctl,
1174#ifdef CONFIG_COMPAT
1175	.compat_ioctl	= mtdchar_compat_ioctl,
1176#endif
1177	.open		= mtdchar_open,
1178	.release	= mtdchar_close,
1179	.mmap		= mtdchar_mmap,
1180#ifndef CONFIG_MMU
1181	.get_unmapped_area = mtdchar_get_unmapped_area,
 
1182#endif
1183};
1184
1185static const struct super_operations mtd_ops = {
1186	.drop_inode = generic_delete_inode,
1187	.statfs = simple_statfs,
1188};
1189
1190static struct dentry *mtd_inodefs_mount(struct file_system_type *fs_type,
1191				int flags, const char *dev_name, void *data)
1192{
1193	return mount_pseudo(fs_type, "mtd_inode:", &mtd_ops, NULL, MTD_INODE_FS_MAGIC);
1194}
1195
1196static struct file_system_type mtd_inodefs_type = {
1197       .name = "mtd_inodefs",
1198       .mount = mtd_inodefs_mount,
1199       .kill_sb = kill_anon_super,
1200};
1201
1202static int __init init_mtdchar(void)
1203{
1204	int ret;
1205
1206	ret = __register_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS,
1207				   "mtd", &mtd_fops);
1208	if (ret < 0) {
1209		pr_notice("Can't allocate major number %d for "
1210				"Memory Technology Devices.\n", MTD_CHAR_MAJOR);
1211		return ret;
1212	}
1213
1214	ret = register_filesystem(&mtd_inodefs_type);
1215	if (ret) {
1216		pr_notice("Can't register mtd_inodefs filesystem: %d\n", ret);
1217		goto err_unregister_chdev;
1218	}
1219	return ret;
1220
1221err_unregister_chdev:
1222	__unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd");
1223	return ret;
1224}
1225
1226static void __exit cleanup_mtdchar(void)
1227{
1228	unregister_filesystem(&mtd_inodefs_type);
1229	__unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd");
1230}
1231
1232module_init(init_mtdchar);
1233module_exit(cleanup_mtdchar);
1234
1235MODULE_ALIAS_CHARDEV_MAJOR(MTD_CHAR_MAJOR);
1236
1237MODULE_LICENSE("GPL");
1238MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
1239MODULE_DESCRIPTION("Direct character-device access to MTD devices");
1240MODULE_ALIAS_CHARDEV_MAJOR(MTD_CHAR_MAJOR);