Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org>
4 */
5
6#include <linux/device.h>
7#include <linux/fs.h>
8#include <linux/mm.h>
9#include <linux/err.h>
10#include <linux/init.h>
11#include <linux/kernel.h>
12#include <linux/module.h>
13#include <linux/slab.h>
14#include <linux/sched.h>
15#include <linux/mutex.h>
16#include <linux/backing-dev.h>
17#include <linux/compat.h>
18#include <linux/mount.h>
19#include <linux/blkpg.h>
20#include <linux/magic.h>
21#include <linux/major.h>
22#include <linux/mtd/mtd.h>
23#include <linux/mtd/partitions.h>
24#include <linux/mtd/map.h>
25
26#include <linux/uaccess.h>
27
28#include "mtdcore.h"
29
30/*
31 * Data structure to hold the pointer to the mtd device as well
32 * as mode information of various use cases.
33 */
34struct mtd_file_info {
35 struct mtd_info *mtd;
36 enum mtd_file_modes mode;
37};
38
39static loff_t mtdchar_lseek(struct file *file, loff_t offset, int orig)
40{
41 struct mtd_file_info *mfi = file->private_data;
42 return fixed_size_llseek(file, offset, orig, mfi->mtd->size);
43}
44
45static int mtdchar_open(struct inode *inode, struct file *file)
46{
47 int minor = iminor(inode);
48 int devnum = minor >> 1;
49 int ret = 0;
50 struct mtd_info *mtd;
51 struct mtd_file_info *mfi;
52
53 pr_debug("MTD_open\n");
54
55 /* You can't open the RO devices RW */
56 if ((file->f_mode & FMODE_WRITE) && (minor & 1))
57 return -EACCES;
58
59 mtd = get_mtd_device(NULL, devnum);
60
61 if (IS_ERR(mtd))
62 return PTR_ERR(mtd);
63
64 if (mtd->type == MTD_ABSENT) {
65 ret = -ENODEV;
66 goto out1;
67 }
68
69 /* You can't open it RW if it's not a writeable device */
70 if ((file->f_mode & FMODE_WRITE) && !(mtd->flags & MTD_WRITEABLE)) {
71 ret = -EACCES;
72 goto out1;
73 }
74
75 mfi = kzalloc(sizeof(*mfi), GFP_KERNEL);
76 if (!mfi) {
77 ret = -ENOMEM;
78 goto out1;
79 }
80 mfi->mtd = mtd;
81 file->private_data = mfi;
82 return 0;
83
84out1:
85 put_mtd_device(mtd);
86 return ret;
87} /* mtdchar_open */
88
89/*====================================================================*/
90
91static int mtdchar_close(struct inode *inode, struct file *file)
92{
93 struct mtd_file_info *mfi = file->private_data;
94 struct mtd_info *mtd = mfi->mtd;
95
96 pr_debug("MTD_close\n");
97
98 /* Only sync if opened RW */
99 if ((file->f_mode & FMODE_WRITE))
100 mtd_sync(mtd);
101
102 put_mtd_device(mtd);
103 file->private_data = NULL;
104 kfree(mfi);
105
106 return 0;
107} /* mtdchar_close */
108
109/* Back in June 2001, dwmw2 wrote:
110 *
111 * FIXME: This _really_ needs to die. In 2.5, we should lock the
112 * userspace buffer down and use it directly with readv/writev.
113 *
114 * The implementation below, using mtd_kmalloc_up_to, mitigates
115 * allocation failures when the system is under low-memory situations
116 * or if memory is highly fragmented at the cost of reducing the
117 * performance of the requested transfer due to a smaller buffer size.
118 *
119 * A more complex but more memory-efficient implementation based on
120 * get_user_pages and iovecs to cover extents of those pages is a
121 * longer-term goal, as intimated by dwmw2 above. However, for the
122 * write case, this requires yet more complex head and tail transfer
123 * handling when those head and tail offsets and sizes are such that
124 * alignment requirements are not met in the NAND subdriver.
125 */
126
127static ssize_t mtdchar_read(struct file *file, char __user *buf, size_t count,
128 loff_t *ppos)
129{
130 struct mtd_file_info *mfi = file->private_data;
131 struct mtd_info *mtd = mfi->mtd;
132 size_t retlen;
133 size_t total_retlen=0;
134 int ret=0;
135 int len;
136 size_t size = count;
137 char *kbuf;
138
139 pr_debug("MTD_read\n");
140
141 if (*ppos + count > mtd->size) {
142 if (*ppos < mtd->size)
143 count = mtd->size - *ppos;
144 else
145 count = 0;
146 }
147
148 if (!count)
149 return 0;
150
151 kbuf = mtd_kmalloc_up_to(mtd, &size);
152 if (!kbuf)
153 return -ENOMEM;
154
155 while (count) {
156 len = min_t(size_t, count, size);
157
158 switch (mfi->mode) {
159 case MTD_FILE_MODE_OTP_FACTORY:
160 ret = mtd_read_fact_prot_reg(mtd, *ppos, len,
161 &retlen, kbuf);
162 break;
163 case MTD_FILE_MODE_OTP_USER:
164 ret = mtd_read_user_prot_reg(mtd, *ppos, len,
165 &retlen, kbuf);
166 break;
167 case MTD_FILE_MODE_RAW:
168 {
169 struct mtd_oob_ops ops = {};
170
171 ops.mode = MTD_OPS_RAW;
172 ops.datbuf = kbuf;
173 ops.oobbuf = NULL;
174 ops.len = len;
175
176 ret = mtd_read_oob(mtd, *ppos, &ops);
177 retlen = ops.retlen;
178 break;
179 }
180 default:
181 ret = mtd_read(mtd, *ppos, len, &retlen, kbuf);
182 }
183 /* Nand returns -EBADMSG on ECC errors, but it returns
184 * the data. For our userspace tools it is important
185 * to dump areas with ECC errors!
186 * For kernel internal usage it also might return -EUCLEAN
187 * to signal the caller that a bitflip has occurred and has
188 * been corrected by the ECC algorithm.
189 * Userspace software which accesses NAND this way
190 * must be aware of the fact that it deals with NAND
191 */
192 if (!ret || mtd_is_bitflip_or_eccerr(ret)) {
193 *ppos += retlen;
194 if (copy_to_user(buf, kbuf, retlen)) {
195 kfree(kbuf);
196 return -EFAULT;
197 }
198 else
199 total_retlen += retlen;
200
201 count -= retlen;
202 buf += retlen;
203 if (retlen == 0)
204 count = 0;
205 }
206 else {
207 kfree(kbuf);
208 return ret;
209 }
210
211 }
212
213 kfree(kbuf);
214 return total_retlen;
215} /* mtdchar_read */
216
217static ssize_t mtdchar_write(struct file *file, const char __user *buf, size_t count,
218 loff_t *ppos)
219{
220 struct mtd_file_info *mfi = file->private_data;
221 struct mtd_info *mtd = mfi->mtd;
222 size_t size = count;
223 char *kbuf;
224 size_t retlen;
225 size_t total_retlen=0;
226 int ret=0;
227 int len;
228
229 pr_debug("MTD_write\n");
230
231 if (*ppos >= mtd->size)
232 return -ENOSPC;
233
234 if (*ppos + count > mtd->size)
235 count = mtd->size - *ppos;
236
237 if (!count)
238 return 0;
239
240 kbuf = mtd_kmalloc_up_to(mtd, &size);
241 if (!kbuf)
242 return -ENOMEM;
243
244 while (count) {
245 len = min_t(size_t, count, size);
246
247 if (copy_from_user(kbuf, buf, len)) {
248 kfree(kbuf);
249 return -EFAULT;
250 }
251
252 switch (mfi->mode) {
253 case MTD_FILE_MODE_OTP_FACTORY:
254 ret = -EROFS;
255 break;
256 case MTD_FILE_MODE_OTP_USER:
257 ret = mtd_write_user_prot_reg(mtd, *ppos, len,
258 &retlen, kbuf);
259 break;
260
261 case MTD_FILE_MODE_RAW:
262 {
263 struct mtd_oob_ops ops = {};
264
265 ops.mode = MTD_OPS_RAW;
266 ops.datbuf = kbuf;
267 ops.oobbuf = NULL;
268 ops.ooboffs = 0;
269 ops.len = len;
270
271 ret = mtd_write_oob(mtd, *ppos, &ops);
272 retlen = ops.retlen;
273 break;
274 }
275
276 default:
277 ret = mtd_write(mtd, *ppos, len, &retlen, kbuf);
278 }
279
280 /*
281 * Return -ENOSPC only if no data could be written at all.
282 * Otherwise just return the number of bytes that actually
283 * have been written.
284 */
285 if ((ret == -ENOSPC) && (total_retlen))
286 break;
287
288 if (!ret) {
289 *ppos += retlen;
290 total_retlen += retlen;
291 count -= retlen;
292 buf += retlen;
293 }
294 else {
295 kfree(kbuf);
296 return ret;
297 }
298 }
299
300 kfree(kbuf);
301 return total_retlen;
302} /* mtdchar_write */
303
304/*======================================================================
305
306 IOCTL calls for getting device parameters.
307
308======================================================================*/
309
310static int otp_select_filemode(struct mtd_file_info *mfi, int mode)
311{
312 struct mtd_info *mtd = mfi->mtd;
313 size_t retlen;
314
315 switch (mode) {
316 case MTD_OTP_FACTORY:
317 if (mtd_read_fact_prot_reg(mtd, -1, 0, &retlen, NULL) ==
318 -EOPNOTSUPP)
319 return -EOPNOTSUPP;
320
321 mfi->mode = MTD_FILE_MODE_OTP_FACTORY;
322 break;
323 case MTD_OTP_USER:
324 if (mtd_read_user_prot_reg(mtd, -1, 0, &retlen, NULL) ==
325 -EOPNOTSUPP)
326 return -EOPNOTSUPP;
327
328 mfi->mode = MTD_FILE_MODE_OTP_USER;
329 break;
330 case MTD_OTP_OFF:
331 mfi->mode = MTD_FILE_MODE_NORMAL;
332 break;
333 default:
334 return -EINVAL;
335 }
336
337 return 0;
338}
339
340static int mtdchar_writeoob(struct file *file, struct mtd_info *mtd,
341 uint64_t start, uint32_t length, void __user *ptr,
342 uint32_t __user *retp)
343{
344 struct mtd_info *master = mtd_get_master(mtd);
345 struct mtd_file_info *mfi = file->private_data;
346 struct mtd_oob_ops ops = {};
347 uint32_t retlen;
348 int ret = 0;
349
350 if (length > 4096)
351 return -EINVAL;
352
353 if (!master->_write_oob)
354 return -EOPNOTSUPP;
355
356 ops.ooblen = length;
357 ops.ooboffs = start & (mtd->writesize - 1);
358 ops.datbuf = NULL;
359 ops.mode = (mfi->mode == MTD_FILE_MODE_RAW) ? MTD_OPS_RAW :
360 MTD_OPS_PLACE_OOB;
361
362 if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
363 return -EINVAL;
364
365 ops.oobbuf = memdup_user(ptr, length);
366 if (IS_ERR(ops.oobbuf))
367 return PTR_ERR(ops.oobbuf);
368
369 start &= ~((uint64_t)mtd->writesize - 1);
370 ret = mtd_write_oob(mtd, start, &ops);
371
372 if (ops.oobretlen > 0xFFFFFFFFU)
373 ret = -EOVERFLOW;
374 retlen = ops.oobretlen;
375 if (copy_to_user(retp, &retlen, sizeof(length)))
376 ret = -EFAULT;
377
378 kfree(ops.oobbuf);
379 return ret;
380}
381
382static int mtdchar_readoob(struct file *file, struct mtd_info *mtd,
383 uint64_t start, uint32_t length, void __user *ptr,
384 uint32_t __user *retp)
385{
386 struct mtd_file_info *mfi = file->private_data;
387 struct mtd_oob_ops ops = {};
388 int ret = 0;
389
390 if (length > 4096)
391 return -EINVAL;
392
393 ops.ooblen = length;
394 ops.ooboffs = start & (mtd->writesize - 1);
395 ops.datbuf = NULL;
396 ops.mode = (mfi->mode == MTD_FILE_MODE_RAW) ? MTD_OPS_RAW :
397 MTD_OPS_PLACE_OOB;
398
399 if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
400 return -EINVAL;
401
402 ops.oobbuf = kmalloc(length, GFP_KERNEL);
403 if (!ops.oobbuf)
404 return -ENOMEM;
405
406 start &= ~((uint64_t)mtd->writesize - 1);
407 ret = mtd_read_oob(mtd, start, &ops);
408
409 if (put_user(ops.oobretlen, retp))
410 ret = -EFAULT;
411 else if (ops.oobretlen && copy_to_user(ptr, ops.oobbuf,
412 ops.oobretlen))
413 ret = -EFAULT;
414
415 kfree(ops.oobbuf);
416
417 /*
418 * NAND returns -EBADMSG on ECC errors, but it returns the OOB
419 * data. For our userspace tools it is important to dump areas
420 * with ECC errors!
421 * For kernel internal usage it also might return -EUCLEAN
422 * to signal the caller that a bitflip has occurred and has
423 * been corrected by the ECC algorithm.
424 *
425 * Note: currently the standard NAND function, nand_read_oob_std,
426 * does not calculate ECC for the OOB area, so do not rely on
427 * this behavior unless you have replaced it with your own.
428 */
429 if (mtd_is_bitflip_or_eccerr(ret))
430 return 0;
431
432 return ret;
433}
434
435/*
436 * Copies (and truncates, if necessary) OOB layout information to the
437 * deprecated layout struct, nand_ecclayout_user. This is necessary only to
438 * support the deprecated API ioctl ECCGETLAYOUT while allowing all new
439 * functionality to use mtd_ooblayout_ops flexibly (i.e. mtd_ooblayout_ops
440 * can describe any kind of OOB layout with almost zero overhead from a
441 * memory usage point of view).
442 */
443static int shrink_ecclayout(struct mtd_info *mtd,
444 struct nand_ecclayout_user *to)
445{
446 struct mtd_oob_region oobregion;
447 int i, section = 0, ret;
448
449 if (!mtd || !to)
450 return -EINVAL;
451
452 memset(to, 0, sizeof(*to));
453
454 to->eccbytes = 0;
455 for (i = 0; i < MTD_MAX_ECCPOS_ENTRIES;) {
456 u32 eccpos;
457
458 ret = mtd_ooblayout_ecc(mtd, section++, &oobregion);
459 if (ret < 0) {
460 if (ret != -ERANGE)
461 return ret;
462
463 break;
464 }
465
466 eccpos = oobregion.offset;
467 for (; i < MTD_MAX_ECCPOS_ENTRIES &&
468 eccpos < oobregion.offset + oobregion.length; i++) {
469 to->eccpos[i] = eccpos++;
470 to->eccbytes++;
471 }
472 }
473
474 for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES; i++) {
475 ret = mtd_ooblayout_free(mtd, i, &oobregion);
476 if (ret < 0) {
477 if (ret != -ERANGE)
478 return ret;
479
480 break;
481 }
482
483 to->oobfree[i].offset = oobregion.offset;
484 to->oobfree[i].length = oobregion.length;
485 to->oobavail += to->oobfree[i].length;
486 }
487
488 return 0;
489}
490
491static int get_oobinfo(struct mtd_info *mtd, struct nand_oobinfo *to)
492{
493 struct mtd_oob_region oobregion;
494 int i, section = 0, ret;
495
496 if (!mtd || !to)
497 return -EINVAL;
498
499 memset(to, 0, sizeof(*to));
500
501 to->eccbytes = 0;
502 for (i = 0; i < ARRAY_SIZE(to->eccpos);) {
503 u32 eccpos;
504
505 ret = mtd_ooblayout_ecc(mtd, section++, &oobregion);
506 if (ret < 0) {
507 if (ret != -ERANGE)
508 return ret;
509
510 break;
511 }
512
513 if (oobregion.length + i > ARRAY_SIZE(to->eccpos))
514 return -EINVAL;
515
516 eccpos = oobregion.offset;
517 for (; eccpos < oobregion.offset + oobregion.length; i++) {
518 to->eccpos[i] = eccpos++;
519 to->eccbytes++;
520 }
521 }
522
523 for (i = 0; i < 8; i++) {
524 ret = mtd_ooblayout_free(mtd, i, &oobregion);
525 if (ret < 0) {
526 if (ret != -ERANGE)
527 return ret;
528
529 break;
530 }
531
532 to->oobfree[i][0] = oobregion.offset;
533 to->oobfree[i][1] = oobregion.length;
534 }
535
536 to->useecc = MTD_NANDECC_AUTOPLACE;
537
538 return 0;
539}
540
541static int mtdchar_blkpg_ioctl(struct mtd_info *mtd,
542 struct blkpg_ioctl_arg *arg)
543{
544 struct blkpg_partition p;
545
546 if (!capable(CAP_SYS_ADMIN))
547 return -EPERM;
548
549 if (copy_from_user(&p, arg->data, sizeof(p)))
550 return -EFAULT;
551
552 switch (arg->op) {
553 case BLKPG_ADD_PARTITION:
554
555 /* Only master mtd device must be used to add partitions */
556 if (mtd_is_partition(mtd))
557 return -EINVAL;
558
559 /* Sanitize user input */
560 p.devname[BLKPG_DEVNAMELTH - 1] = '\0';
561
562 return mtd_add_partition(mtd, p.devname, p.start, p.length);
563
564 case BLKPG_DEL_PARTITION:
565
566 if (p.pno < 0)
567 return -EINVAL;
568
569 return mtd_del_partition(mtd, p.pno);
570
571 default:
572 return -EINVAL;
573 }
574}
575
576static void adjust_oob_length(struct mtd_info *mtd, uint64_t start,
577 struct mtd_oob_ops *ops)
578{
579 uint32_t start_page, end_page;
580 u32 oob_per_page;
581
582 if (ops->len == 0 || ops->ooblen == 0)
583 return;
584
585 start_page = mtd_div_by_ws(start, mtd);
586 end_page = mtd_div_by_ws(start + ops->len - 1, mtd);
587 oob_per_page = mtd_oobavail(mtd, ops);
588
589 ops->ooblen = min_t(size_t, ops->ooblen,
590 (end_page - start_page + 1) * oob_per_page);
591}
592
593static noinline_for_stack int
594mtdchar_write_ioctl(struct mtd_info *mtd, struct mtd_write_req __user *argp)
595{
596 struct mtd_info *master = mtd_get_master(mtd);
597 struct mtd_write_req req;
598 const void __user *usr_data, *usr_oob;
599 uint8_t *datbuf = NULL, *oobbuf = NULL;
600 size_t datbuf_len, oobbuf_len;
601 int ret = 0;
602
603 if (copy_from_user(&req, argp, sizeof(req)))
604 return -EFAULT;
605
606 usr_data = (const void __user *)(uintptr_t)req.usr_data;
607 usr_oob = (const void __user *)(uintptr_t)req.usr_oob;
608
609 if (!master->_write_oob)
610 return -EOPNOTSUPP;
611
612 if (!usr_data)
613 req.len = 0;
614
615 if (!usr_oob)
616 req.ooblen = 0;
617
618 req.len &= 0xffffffff;
619 req.ooblen &= 0xffffffff;
620
621 if (req.start + req.len > mtd->size)
622 return -EINVAL;
623
624 datbuf_len = min_t(size_t, req.len, mtd->erasesize);
625 if (datbuf_len > 0) {
626 datbuf = kvmalloc(datbuf_len, GFP_KERNEL);
627 if (!datbuf)
628 return -ENOMEM;
629 }
630
631 oobbuf_len = min_t(size_t, req.ooblen, mtd->erasesize);
632 if (oobbuf_len > 0) {
633 oobbuf = kvmalloc(oobbuf_len, GFP_KERNEL);
634 if (!oobbuf) {
635 kvfree(datbuf);
636 return -ENOMEM;
637 }
638 }
639
640 while (req.len > 0 || (!usr_data && req.ooblen > 0)) {
641 struct mtd_oob_ops ops = {
642 .mode = req.mode,
643 .len = min_t(size_t, req.len, datbuf_len),
644 .ooblen = min_t(size_t, req.ooblen, oobbuf_len),
645 .datbuf = datbuf,
646 .oobbuf = oobbuf,
647 };
648
649 /*
650 * Shorten non-page-aligned, eraseblock-sized writes so that
651 * the write ends on an eraseblock boundary. This is necessary
652 * for adjust_oob_length() to properly handle non-page-aligned
653 * writes.
654 */
655 if (ops.len == mtd->erasesize)
656 ops.len -= mtd_mod_by_ws(req.start + ops.len, mtd);
657
658 /*
659 * For writes which are not OOB-only, adjust the amount of OOB
660 * data written according to the number of data pages written.
661 * This is necessary to prevent OOB data from being skipped
662 * over in data+OOB writes requiring multiple mtd_write_oob()
663 * calls to be completed.
664 */
665 adjust_oob_length(mtd, req.start, &ops);
666
667 if (copy_from_user(datbuf, usr_data, ops.len) ||
668 copy_from_user(oobbuf, usr_oob, ops.ooblen)) {
669 ret = -EFAULT;
670 break;
671 }
672
673 ret = mtd_write_oob(mtd, req.start, &ops);
674 if (ret)
675 break;
676
677 req.start += ops.retlen;
678 req.len -= ops.retlen;
679 usr_data += ops.retlen;
680
681 req.ooblen -= ops.oobretlen;
682 usr_oob += ops.oobretlen;
683 }
684
685 kvfree(datbuf);
686 kvfree(oobbuf);
687
688 return ret;
689}
690
691static noinline_for_stack int
692mtdchar_read_ioctl(struct mtd_info *mtd, struct mtd_read_req __user *argp)
693{
694 struct mtd_info *master = mtd_get_master(mtd);
695 struct mtd_read_req req;
696 void __user *usr_data, *usr_oob;
697 uint8_t *datbuf = NULL, *oobbuf = NULL;
698 size_t datbuf_len, oobbuf_len;
699 size_t orig_len, orig_ooblen;
700 int ret = 0;
701
702 if (copy_from_user(&req, argp, sizeof(req)))
703 return -EFAULT;
704
705 orig_len = req.len;
706 orig_ooblen = req.ooblen;
707
708 usr_data = (void __user *)(uintptr_t)req.usr_data;
709 usr_oob = (void __user *)(uintptr_t)req.usr_oob;
710
711 if (!master->_read_oob)
712 return -EOPNOTSUPP;
713
714 if (!usr_data)
715 req.len = 0;
716
717 if (!usr_oob)
718 req.ooblen = 0;
719
720 req.ecc_stats.uncorrectable_errors = 0;
721 req.ecc_stats.corrected_bitflips = 0;
722 req.ecc_stats.max_bitflips = 0;
723
724 req.len &= 0xffffffff;
725 req.ooblen &= 0xffffffff;
726
727 if (req.start + req.len > mtd->size) {
728 ret = -EINVAL;
729 goto out;
730 }
731
732 datbuf_len = min_t(size_t, req.len, mtd->erasesize);
733 if (datbuf_len > 0) {
734 datbuf = kvmalloc(datbuf_len, GFP_KERNEL);
735 if (!datbuf) {
736 ret = -ENOMEM;
737 goto out;
738 }
739 }
740
741 oobbuf_len = min_t(size_t, req.ooblen, mtd->erasesize);
742 if (oobbuf_len > 0) {
743 oobbuf = kvmalloc(oobbuf_len, GFP_KERNEL);
744 if (!oobbuf) {
745 ret = -ENOMEM;
746 goto out;
747 }
748 }
749
750 while (req.len > 0 || (!usr_data && req.ooblen > 0)) {
751 struct mtd_req_stats stats;
752 struct mtd_oob_ops ops = {
753 .mode = req.mode,
754 .len = min_t(size_t, req.len, datbuf_len),
755 .ooblen = min_t(size_t, req.ooblen, oobbuf_len),
756 .datbuf = datbuf,
757 .oobbuf = oobbuf,
758 .stats = &stats,
759 };
760
761 /*
762 * Shorten non-page-aligned, eraseblock-sized reads so that the
763 * read ends on an eraseblock boundary. This is necessary in
764 * order to prevent OOB data for some pages from being
765 * duplicated in the output of non-page-aligned reads requiring
766 * multiple mtd_read_oob() calls to be completed.
767 */
768 if (ops.len == mtd->erasesize)
769 ops.len -= mtd_mod_by_ws(req.start + ops.len, mtd);
770
771 ret = mtd_read_oob(mtd, (loff_t)req.start, &ops);
772
773 req.ecc_stats.uncorrectable_errors +=
774 stats.uncorrectable_errors;
775 req.ecc_stats.corrected_bitflips += stats.corrected_bitflips;
776 req.ecc_stats.max_bitflips =
777 max(req.ecc_stats.max_bitflips, stats.max_bitflips);
778
779 if (ret && !mtd_is_bitflip_or_eccerr(ret))
780 break;
781
782 if (copy_to_user(usr_data, ops.datbuf, ops.retlen) ||
783 copy_to_user(usr_oob, ops.oobbuf, ops.oobretlen)) {
784 ret = -EFAULT;
785 break;
786 }
787
788 req.start += ops.retlen;
789 req.len -= ops.retlen;
790 usr_data += ops.retlen;
791
792 req.ooblen -= ops.oobretlen;
793 usr_oob += ops.oobretlen;
794 }
795
796 /*
797 * As multiple iterations of the above loop (and therefore multiple
798 * mtd_read_oob() calls) may be necessary to complete the read request,
799 * adjust the final return code to ensure it accounts for all detected
800 * ECC errors.
801 */
802 if (!ret || mtd_is_bitflip(ret)) {
803 if (req.ecc_stats.uncorrectable_errors > 0)
804 ret = -EBADMSG;
805 else if (req.ecc_stats.corrected_bitflips > 0)
806 ret = -EUCLEAN;
807 }
808
809out:
810 req.len = orig_len - req.len;
811 req.ooblen = orig_ooblen - req.ooblen;
812
813 if (copy_to_user(argp, &req, sizeof(req)))
814 ret = -EFAULT;
815
816 kvfree(datbuf);
817 kvfree(oobbuf);
818
819 return ret;
820}
821
822static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg)
823{
824 struct mtd_file_info *mfi = file->private_data;
825 struct mtd_info *mtd = mfi->mtd;
826 struct mtd_info *master = mtd_get_master(mtd);
827 void __user *argp = (void __user *)arg;
828 int ret = 0;
829 struct mtd_info_user info;
830
831 pr_debug("MTD_ioctl\n");
832
833 /*
834 * Check the file mode to require "dangerous" commands to have write
835 * permissions.
836 */
837 switch (cmd) {
838 /* "safe" commands */
839 case MEMGETREGIONCOUNT:
840 case MEMGETREGIONINFO:
841 case MEMGETINFO:
842 case MEMREADOOB:
843 case MEMREADOOB64:
844 case MEMREAD:
845 case MEMISLOCKED:
846 case MEMGETOOBSEL:
847 case MEMGETBADBLOCK:
848 case OTPSELECT:
849 case OTPGETREGIONCOUNT:
850 case OTPGETREGIONINFO:
851 case ECCGETLAYOUT:
852 case ECCGETSTATS:
853 case MTDFILEMODE:
854 case BLKPG:
855 case BLKRRPART:
856 break;
857
858 /* "dangerous" commands */
859 case MEMERASE:
860 case MEMERASE64:
861 case MEMLOCK:
862 case MEMUNLOCK:
863 case MEMSETBADBLOCK:
864 case MEMWRITEOOB:
865 case MEMWRITEOOB64:
866 case MEMWRITE:
867 case OTPLOCK:
868 case OTPERASE:
869 if (!(file->f_mode & FMODE_WRITE))
870 return -EPERM;
871 break;
872
873 default:
874 return -ENOTTY;
875 }
876
877 switch (cmd) {
878 case MEMGETREGIONCOUNT:
879 if (copy_to_user(argp, &(mtd->numeraseregions), sizeof(int)))
880 return -EFAULT;
881 break;
882
883 case MEMGETREGIONINFO:
884 {
885 uint32_t ur_idx;
886 struct mtd_erase_region_info *kr;
887 struct region_info_user __user *ur = argp;
888
889 if (get_user(ur_idx, &(ur->regionindex)))
890 return -EFAULT;
891
892 if (ur_idx >= mtd->numeraseregions)
893 return -EINVAL;
894
895 kr = &(mtd->eraseregions[ur_idx]);
896
897 if (put_user(kr->offset, &(ur->offset))
898 || put_user(kr->erasesize, &(ur->erasesize))
899 || put_user(kr->numblocks, &(ur->numblocks)))
900 return -EFAULT;
901
902 break;
903 }
904
905 case MEMGETINFO:
906 memset(&info, 0, sizeof(info));
907 info.type = mtd->type;
908 info.flags = mtd->flags;
909 info.size = mtd->size;
910 info.erasesize = mtd->erasesize;
911 info.writesize = mtd->writesize;
912 info.oobsize = mtd->oobsize;
913 /* The below field is obsolete */
914 info.padding = 0;
915 if (copy_to_user(argp, &info, sizeof(struct mtd_info_user)))
916 return -EFAULT;
917 break;
918
919 case MEMERASE:
920 case MEMERASE64:
921 {
922 struct erase_info *erase;
923
924 erase=kzalloc(sizeof(struct erase_info),GFP_KERNEL);
925 if (!erase)
926 ret = -ENOMEM;
927 else {
928 if (cmd == MEMERASE64) {
929 struct erase_info_user64 einfo64;
930
931 if (copy_from_user(&einfo64, argp,
932 sizeof(struct erase_info_user64))) {
933 kfree(erase);
934 return -EFAULT;
935 }
936 erase->addr = einfo64.start;
937 erase->len = einfo64.length;
938 } else {
939 struct erase_info_user einfo32;
940
941 if (copy_from_user(&einfo32, argp,
942 sizeof(struct erase_info_user))) {
943 kfree(erase);
944 return -EFAULT;
945 }
946 erase->addr = einfo32.start;
947 erase->len = einfo32.length;
948 }
949
950 ret = mtd_erase(mtd, erase);
951 kfree(erase);
952 }
953 break;
954 }
955
956 case MEMWRITEOOB:
957 {
958 struct mtd_oob_buf buf;
959 struct mtd_oob_buf __user *buf_user = argp;
960
961 /* NOTE: writes return length to buf_user->length */
962 if (copy_from_user(&buf, argp, sizeof(buf)))
963 ret = -EFAULT;
964 else
965 ret = mtdchar_writeoob(file, mtd, buf.start, buf.length,
966 buf.ptr, &buf_user->length);
967 break;
968 }
969
970 case MEMREADOOB:
971 {
972 struct mtd_oob_buf buf;
973 struct mtd_oob_buf __user *buf_user = argp;
974
975 /* NOTE: writes return length to buf_user->start */
976 if (copy_from_user(&buf, argp, sizeof(buf)))
977 ret = -EFAULT;
978 else
979 ret = mtdchar_readoob(file, mtd, buf.start, buf.length,
980 buf.ptr, &buf_user->start);
981 break;
982 }
983
984 case MEMWRITEOOB64:
985 {
986 struct mtd_oob_buf64 buf;
987 struct mtd_oob_buf64 __user *buf_user = argp;
988
989 if (copy_from_user(&buf, argp, sizeof(buf)))
990 ret = -EFAULT;
991 else
992 ret = mtdchar_writeoob(file, mtd, buf.start, buf.length,
993 (void __user *)(uintptr_t)buf.usr_ptr,
994 &buf_user->length);
995 break;
996 }
997
998 case MEMREADOOB64:
999 {
1000 struct mtd_oob_buf64 buf;
1001 struct mtd_oob_buf64 __user *buf_user = argp;
1002
1003 if (copy_from_user(&buf, argp, sizeof(buf)))
1004 ret = -EFAULT;
1005 else
1006 ret = mtdchar_readoob(file, mtd, buf.start, buf.length,
1007 (void __user *)(uintptr_t)buf.usr_ptr,
1008 &buf_user->length);
1009 break;
1010 }
1011
1012 case MEMWRITE:
1013 {
1014 ret = mtdchar_write_ioctl(mtd,
1015 (struct mtd_write_req __user *)arg);
1016 break;
1017 }
1018
1019 case MEMREAD:
1020 {
1021 ret = mtdchar_read_ioctl(mtd,
1022 (struct mtd_read_req __user *)arg);
1023 break;
1024 }
1025
1026 case MEMLOCK:
1027 {
1028 struct erase_info_user einfo;
1029
1030 if (copy_from_user(&einfo, argp, sizeof(einfo)))
1031 return -EFAULT;
1032
1033 ret = mtd_lock(mtd, einfo.start, einfo.length);
1034 break;
1035 }
1036
1037 case MEMUNLOCK:
1038 {
1039 struct erase_info_user einfo;
1040
1041 if (copy_from_user(&einfo, argp, sizeof(einfo)))
1042 return -EFAULT;
1043
1044 ret = mtd_unlock(mtd, einfo.start, einfo.length);
1045 break;
1046 }
1047
1048 case MEMISLOCKED:
1049 {
1050 struct erase_info_user einfo;
1051
1052 if (copy_from_user(&einfo, argp, sizeof(einfo)))
1053 return -EFAULT;
1054
1055 ret = mtd_is_locked(mtd, einfo.start, einfo.length);
1056 break;
1057 }
1058
1059 /* Legacy interface */
1060 case MEMGETOOBSEL:
1061 {
1062 struct nand_oobinfo oi;
1063
1064 if (!master->ooblayout)
1065 return -EOPNOTSUPP;
1066
1067 ret = get_oobinfo(mtd, &oi);
1068 if (ret)
1069 return ret;
1070
1071 if (copy_to_user(argp, &oi, sizeof(struct nand_oobinfo)))
1072 return -EFAULT;
1073 break;
1074 }
1075
1076 case MEMGETBADBLOCK:
1077 {
1078 loff_t offs;
1079
1080 if (copy_from_user(&offs, argp, sizeof(loff_t)))
1081 return -EFAULT;
1082 return mtd_block_isbad(mtd, offs);
1083 }
1084
1085 case MEMSETBADBLOCK:
1086 {
1087 loff_t offs;
1088
1089 if (copy_from_user(&offs, argp, sizeof(loff_t)))
1090 return -EFAULT;
1091 return mtd_block_markbad(mtd, offs);
1092 }
1093
1094 case OTPSELECT:
1095 {
1096 int mode;
1097 if (copy_from_user(&mode, argp, sizeof(int)))
1098 return -EFAULT;
1099
1100 mfi->mode = MTD_FILE_MODE_NORMAL;
1101
1102 ret = otp_select_filemode(mfi, mode);
1103
1104 file->f_pos = 0;
1105 break;
1106 }
1107
1108 case OTPGETREGIONCOUNT:
1109 case OTPGETREGIONINFO:
1110 {
1111 struct otp_info *buf = kmalloc(4096, GFP_KERNEL);
1112 size_t retlen;
1113 if (!buf)
1114 return -ENOMEM;
1115 switch (mfi->mode) {
1116 case MTD_FILE_MODE_OTP_FACTORY:
1117 ret = mtd_get_fact_prot_info(mtd, 4096, &retlen, buf);
1118 break;
1119 case MTD_FILE_MODE_OTP_USER:
1120 ret = mtd_get_user_prot_info(mtd, 4096, &retlen, buf);
1121 break;
1122 default:
1123 ret = -EINVAL;
1124 break;
1125 }
1126 if (!ret) {
1127 if (cmd == OTPGETREGIONCOUNT) {
1128 int nbr = retlen / sizeof(struct otp_info);
1129 ret = copy_to_user(argp, &nbr, sizeof(int));
1130 } else
1131 ret = copy_to_user(argp, buf, retlen);
1132 if (ret)
1133 ret = -EFAULT;
1134 }
1135 kfree(buf);
1136 break;
1137 }
1138
1139 case OTPLOCK:
1140 case OTPERASE:
1141 {
1142 struct otp_info oinfo;
1143
1144 if (mfi->mode != MTD_FILE_MODE_OTP_USER)
1145 return -EINVAL;
1146 if (copy_from_user(&oinfo, argp, sizeof(oinfo)))
1147 return -EFAULT;
1148 if (cmd == OTPLOCK)
1149 ret = mtd_lock_user_prot_reg(mtd, oinfo.start, oinfo.length);
1150 else
1151 ret = mtd_erase_user_prot_reg(mtd, oinfo.start, oinfo.length);
1152 break;
1153 }
1154
1155 /* This ioctl is being deprecated - it truncates the ECC layout */
1156 case ECCGETLAYOUT:
1157 {
1158 struct nand_ecclayout_user *usrlay;
1159
1160 if (!master->ooblayout)
1161 return -EOPNOTSUPP;
1162
1163 usrlay = kmalloc(sizeof(*usrlay), GFP_KERNEL);
1164 if (!usrlay)
1165 return -ENOMEM;
1166
1167 shrink_ecclayout(mtd, usrlay);
1168
1169 if (copy_to_user(argp, usrlay, sizeof(*usrlay)))
1170 ret = -EFAULT;
1171 kfree(usrlay);
1172 break;
1173 }
1174
1175 case ECCGETSTATS:
1176 {
1177 if (copy_to_user(argp, &mtd->ecc_stats,
1178 sizeof(struct mtd_ecc_stats)))
1179 return -EFAULT;
1180 break;
1181 }
1182
1183 case MTDFILEMODE:
1184 {
1185 mfi->mode = 0;
1186
1187 switch(arg) {
1188 case MTD_FILE_MODE_OTP_FACTORY:
1189 case MTD_FILE_MODE_OTP_USER:
1190 ret = otp_select_filemode(mfi, arg);
1191 break;
1192
1193 case MTD_FILE_MODE_RAW:
1194 if (!mtd_has_oob(mtd))
1195 return -EOPNOTSUPP;
1196 mfi->mode = arg;
1197 break;
1198
1199 case MTD_FILE_MODE_NORMAL:
1200 break;
1201 default:
1202 ret = -EINVAL;
1203 }
1204 file->f_pos = 0;
1205 break;
1206 }
1207
1208 case BLKPG:
1209 {
1210 struct blkpg_ioctl_arg __user *blk_arg = argp;
1211 struct blkpg_ioctl_arg a;
1212
1213 if (copy_from_user(&a, blk_arg, sizeof(a)))
1214 ret = -EFAULT;
1215 else
1216 ret = mtdchar_blkpg_ioctl(mtd, &a);
1217 break;
1218 }
1219
1220 case BLKRRPART:
1221 {
1222 /* No reread partition feature. Just return ok */
1223 ret = 0;
1224 break;
1225 }
1226 }
1227
1228 return ret;
1229} /* memory_ioctl */
1230
1231static long mtdchar_unlocked_ioctl(struct file *file, u_int cmd, u_long arg)
1232{
1233 struct mtd_file_info *mfi = file->private_data;
1234 struct mtd_info *mtd = mfi->mtd;
1235 struct mtd_info *master = mtd_get_master(mtd);
1236 int ret;
1237
1238 mutex_lock(&master->master.chrdev_lock);
1239 ret = mtdchar_ioctl(file, cmd, arg);
1240 mutex_unlock(&master->master.chrdev_lock);
1241
1242 return ret;
1243}
1244
1245#ifdef CONFIG_COMPAT
1246
1247struct mtd_oob_buf32 {
1248 u_int32_t start;
1249 u_int32_t length;
1250 compat_caddr_t ptr; /* unsigned char* */
1251};
1252
1253#define MEMWRITEOOB32 _IOWR('M', 3, struct mtd_oob_buf32)
1254#define MEMREADOOB32 _IOWR('M', 4, struct mtd_oob_buf32)
1255
1256static long mtdchar_compat_ioctl(struct file *file, unsigned int cmd,
1257 unsigned long arg)
1258{
1259 struct mtd_file_info *mfi = file->private_data;
1260 struct mtd_info *mtd = mfi->mtd;
1261 struct mtd_info *master = mtd_get_master(mtd);
1262 void __user *argp = compat_ptr(arg);
1263 int ret = 0;
1264
1265 mutex_lock(&master->master.chrdev_lock);
1266
1267 switch (cmd) {
1268 case MEMWRITEOOB32:
1269 {
1270 struct mtd_oob_buf32 buf;
1271 struct mtd_oob_buf32 __user *buf_user = argp;
1272
1273 if (!(file->f_mode & FMODE_WRITE)) {
1274 ret = -EPERM;
1275 break;
1276 }
1277
1278 if (copy_from_user(&buf, argp, sizeof(buf)))
1279 ret = -EFAULT;
1280 else
1281 ret = mtdchar_writeoob(file, mtd, buf.start,
1282 buf.length, compat_ptr(buf.ptr),
1283 &buf_user->length);
1284 break;
1285 }
1286
1287 case MEMREADOOB32:
1288 {
1289 struct mtd_oob_buf32 buf;
1290 struct mtd_oob_buf32 __user *buf_user = argp;
1291
1292 /* NOTE: writes return length to buf->start */
1293 if (copy_from_user(&buf, argp, sizeof(buf)))
1294 ret = -EFAULT;
1295 else
1296 ret = mtdchar_readoob(file, mtd, buf.start,
1297 buf.length, compat_ptr(buf.ptr),
1298 &buf_user->start);
1299 break;
1300 }
1301
1302 case BLKPG:
1303 {
1304 /* Convert from blkpg_compat_ioctl_arg to blkpg_ioctl_arg */
1305 struct blkpg_compat_ioctl_arg __user *uarg = argp;
1306 struct blkpg_compat_ioctl_arg compat_arg;
1307 struct blkpg_ioctl_arg a;
1308
1309 if (copy_from_user(&compat_arg, uarg, sizeof(compat_arg))) {
1310 ret = -EFAULT;
1311 break;
1312 }
1313
1314 memset(&a, 0, sizeof(a));
1315 a.op = compat_arg.op;
1316 a.flags = compat_arg.flags;
1317 a.datalen = compat_arg.datalen;
1318 a.data = compat_ptr(compat_arg.data);
1319
1320 ret = mtdchar_blkpg_ioctl(mtd, &a);
1321 break;
1322 }
1323
1324 default:
1325 ret = mtdchar_ioctl(file, cmd, (unsigned long)argp);
1326 }
1327
1328 mutex_unlock(&master->master.chrdev_lock);
1329
1330 return ret;
1331}
1332
1333#endif /* CONFIG_COMPAT */
1334
1335/*
1336 * try to determine where a shared mapping can be made
1337 * - only supported for NOMMU at the moment (MMU can't doesn't copy private
1338 * mappings)
1339 */
1340#ifndef CONFIG_MMU
1341static unsigned long mtdchar_get_unmapped_area(struct file *file,
1342 unsigned long addr,
1343 unsigned long len,
1344 unsigned long pgoff,
1345 unsigned long flags)
1346{
1347 struct mtd_file_info *mfi = file->private_data;
1348 struct mtd_info *mtd = mfi->mtd;
1349 unsigned long offset;
1350 int ret;
1351
1352 if (addr != 0)
1353 return (unsigned long) -EINVAL;
1354
1355 if (len > mtd->size || pgoff >= (mtd->size >> PAGE_SHIFT))
1356 return (unsigned long) -EINVAL;
1357
1358 offset = pgoff << PAGE_SHIFT;
1359 if (offset > mtd->size - len)
1360 return (unsigned long) -EINVAL;
1361
1362 ret = mtd_get_unmapped_area(mtd, len, offset, flags);
1363 return ret == -EOPNOTSUPP ? -ENODEV : ret;
1364}
1365
1366static unsigned mtdchar_mmap_capabilities(struct file *file)
1367{
1368 struct mtd_file_info *mfi = file->private_data;
1369
1370 return mtd_mmap_capabilities(mfi->mtd);
1371}
1372#endif
1373
1374/*
1375 * set up a mapping for shared memory segments
1376 */
1377static int mtdchar_mmap(struct file *file, struct vm_area_struct *vma)
1378{
1379#ifdef CONFIG_MMU
1380 struct mtd_file_info *mfi = file->private_data;
1381 struct mtd_info *mtd = mfi->mtd;
1382 struct map_info *map = mtd->priv;
1383
1384 /* This is broken because it assumes the MTD device is map-based
1385 and that mtd->priv is a valid struct map_info. It should be
1386 replaced with something that uses the mtd_get_unmapped_area()
1387 operation properly. */
1388 if (0 /*mtd->type == MTD_RAM || mtd->type == MTD_ROM*/) {
1389#ifdef pgprot_noncached
1390 if (file->f_flags & O_DSYNC || map->phys >= __pa(high_memory))
1391 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1392#endif
1393 return vm_iomap_memory(vma, map->phys, map->size);
1394 }
1395 return -ENODEV;
1396#else
1397 return vma->vm_flags & VM_SHARED ? 0 : -EACCES;
1398#endif
1399}
1400
1401static const struct file_operations mtd_fops = {
1402 .owner = THIS_MODULE,
1403 .llseek = mtdchar_lseek,
1404 .read = mtdchar_read,
1405 .write = mtdchar_write,
1406 .unlocked_ioctl = mtdchar_unlocked_ioctl,
1407#ifdef CONFIG_COMPAT
1408 .compat_ioctl = mtdchar_compat_ioctl,
1409#endif
1410 .open = mtdchar_open,
1411 .release = mtdchar_close,
1412 .mmap = mtdchar_mmap,
1413#ifndef CONFIG_MMU
1414 .get_unmapped_area = mtdchar_get_unmapped_area,
1415 .mmap_capabilities = mtdchar_mmap_capabilities,
1416#endif
1417};
1418
1419int __init init_mtdchar(void)
1420{
1421 int ret;
1422
1423 ret = __register_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS,
1424 "mtd", &mtd_fops);
1425 if (ret < 0) {
1426 pr_err("Can't allocate major number %d for MTD\n",
1427 MTD_CHAR_MAJOR);
1428 return ret;
1429 }
1430
1431 return ret;
1432}
1433
1434void __exit cleanup_mtdchar(void)
1435{
1436 __unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd");
1437}
1438
1439MODULE_ALIAS_CHARDEV_MAJOR(MTD_CHAR_MAJOR);
1/*
2 * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 *
18 */
19
20#include <linux/device.h>
21#include <linux/fs.h>
22#include <linux/mm.h>
23#include <linux/err.h>
24#include <linux/init.h>
25#include <linux/kernel.h>
26#include <linux/module.h>
27#include <linux/slab.h>
28#include <linux/sched.h>
29#include <linux/mutex.h>
30#include <linux/backing-dev.h>
31#include <linux/compat.h>
32#include <linux/mount.h>
33#include <linux/blkpg.h>
34#include <linux/magic.h>
35#include <linux/major.h>
36#include <linux/mtd/mtd.h>
37#include <linux/mtd/partitions.h>
38#include <linux/mtd/map.h>
39
40#include <asm/uaccess.h>
41
42#include "mtdcore.h"
43
44static DEFINE_MUTEX(mtd_mutex);
45
46/*
47 * Data structure to hold the pointer to the mtd device as well
48 * as mode information of various use cases.
49 */
50struct mtd_file_info {
51 struct mtd_info *mtd;
52 struct inode *ino;
53 enum mtd_file_modes mode;
54};
55
56static loff_t mtdchar_lseek(struct file *file, loff_t offset, int orig)
57{
58 struct mtd_file_info *mfi = file->private_data;
59 return fixed_size_llseek(file, offset, orig, mfi->mtd->size);
60}
61
62static int count;
63static struct vfsmount *mnt;
64static struct file_system_type mtd_inodefs_type;
65
66static int mtdchar_open(struct inode *inode, struct file *file)
67{
68 int minor = iminor(inode);
69 int devnum = minor >> 1;
70 int ret = 0;
71 struct mtd_info *mtd;
72 struct mtd_file_info *mfi;
73 struct inode *mtd_ino;
74
75 pr_debug("MTD_open\n");
76
77 /* You can't open the RO devices RW */
78 if ((file->f_mode & FMODE_WRITE) && (minor & 1))
79 return -EACCES;
80
81 ret = simple_pin_fs(&mtd_inodefs_type, &mnt, &count);
82 if (ret)
83 return ret;
84
85 mutex_lock(&mtd_mutex);
86 mtd = get_mtd_device(NULL, devnum);
87
88 if (IS_ERR(mtd)) {
89 ret = PTR_ERR(mtd);
90 goto out;
91 }
92
93 if (mtd->type == MTD_ABSENT) {
94 ret = -ENODEV;
95 goto out1;
96 }
97
98 mtd_ino = iget_locked(mnt->mnt_sb, devnum);
99 if (!mtd_ino) {
100 ret = -ENOMEM;
101 goto out1;
102 }
103 if (mtd_ino->i_state & I_NEW) {
104 mtd_ino->i_private = mtd;
105 mtd_ino->i_mode = S_IFCHR;
106 mtd_ino->i_data.backing_dev_info = mtd->backing_dev_info;
107 unlock_new_inode(mtd_ino);
108 }
109 file->f_mapping = mtd_ino->i_mapping;
110
111 /* You can't open it RW if it's not a writeable device */
112 if ((file->f_mode & FMODE_WRITE) && !(mtd->flags & MTD_WRITEABLE)) {
113 ret = -EACCES;
114 goto out2;
115 }
116
117 mfi = kzalloc(sizeof(*mfi), GFP_KERNEL);
118 if (!mfi) {
119 ret = -ENOMEM;
120 goto out2;
121 }
122 mfi->ino = mtd_ino;
123 mfi->mtd = mtd;
124 file->private_data = mfi;
125 mutex_unlock(&mtd_mutex);
126 return 0;
127
128out2:
129 iput(mtd_ino);
130out1:
131 put_mtd_device(mtd);
132out:
133 mutex_unlock(&mtd_mutex);
134 simple_release_fs(&mnt, &count);
135 return ret;
136} /* mtdchar_open */
137
138/*====================================================================*/
139
140static int mtdchar_close(struct inode *inode, struct file *file)
141{
142 struct mtd_file_info *mfi = file->private_data;
143 struct mtd_info *mtd = mfi->mtd;
144
145 pr_debug("MTD_close\n");
146
147 /* Only sync if opened RW */
148 if ((file->f_mode & FMODE_WRITE))
149 mtd_sync(mtd);
150
151 iput(mfi->ino);
152
153 put_mtd_device(mtd);
154 file->private_data = NULL;
155 kfree(mfi);
156 simple_release_fs(&mnt, &count);
157
158 return 0;
159} /* mtdchar_close */
160
161/* Back in June 2001, dwmw2 wrote:
162 *
163 * FIXME: This _really_ needs to die. In 2.5, we should lock the
164 * userspace buffer down and use it directly with readv/writev.
165 *
166 * The implementation below, using mtd_kmalloc_up_to, mitigates
167 * allocation failures when the system is under low-memory situations
168 * or if memory is highly fragmented at the cost of reducing the
169 * performance of the requested transfer due to a smaller buffer size.
170 *
171 * A more complex but more memory-efficient implementation based on
172 * get_user_pages and iovecs to cover extents of those pages is a
173 * longer-term goal, as intimated by dwmw2 above. However, for the
174 * write case, this requires yet more complex head and tail transfer
175 * handling when those head and tail offsets and sizes are such that
176 * alignment requirements are not met in the NAND subdriver.
177 */
178
179static ssize_t mtdchar_read(struct file *file, char __user *buf, size_t count,
180 loff_t *ppos)
181{
182 struct mtd_file_info *mfi = file->private_data;
183 struct mtd_info *mtd = mfi->mtd;
184 size_t retlen;
185 size_t total_retlen=0;
186 int ret=0;
187 int len;
188 size_t size = count;
189 char *kbuf;
190
191 pr_debug("MTD_read\n");
192
193 if (*ppos + count > mtd->size)
194 count = mtd->size - *ppos;
195
196 if (!count)
197 return 0;
198
199 kbuf = mtd_kmalloc_up_to(mtd, &size);
200 if (!kbuf)
201 return -ENOMEM;
202
203 while (count) {
204 len = min_t(size_t, count, size);
205
206 switch (mfi->mode) {
207 case MTD_FILE_MODE_OTP_FACTORY:
208 ret = mtd_read_fact_prot_reg(mtd, *ppos, len,
209 &retlen, kbuf);
210 break;
211 case MTD_FILE_MODE_OTP_USER:
212 ret = mtd_read_user_prot_reg(mtd, *ppos, len,
213 &retlen, kbuf);
214 break;
215 case MTD_FILE_MODE_RAW:
216 {
217 struct mtd_oob_ops ops;
218
219 ops.mode = MTD_OPS_RAW;
220 ops.datbuf = kbuf;
221 ops.oobbuf = NULL;
222 ops.len = len;
223
224 ret = mtd_read_oob(mtd, *ppos, &ops);
225 retlen = ops.retlen;
226 break;
227 }
228 default:
229 ret = mtd_read(mtd, *ppos, len, &retlen, kbuf);
230 }
231 /* Nand returns -EBADMSG on ECC errors, but it returns
232 * the data. For our userspace tools it is important
233 * to dump areas with ECC errors!
234 * For kernel internal usage it also might return -EUCLEAN
235 * to signal the caller that a bitflip has occurred and has
236 * been corrected by the ECC algorithm.
237 * Userspace software which accesses NAND this way
238 * must be aware of the fact that it deals with NAND
239 */
240 if (!ret || mtd_is_bitflip_or_eccerr(ret)) {
241 *ppos += retlen;
242 if (copy_to_user(buf, kbuf, retlen)) {
243 kfree(kbuf);
244 return -EFAULT;
245 }
246 else
247 total_retlen += retlen;
248
249 count -= retlen;
250 buf += retlen;
251 if (retlen == 0)
252 count = 0;
253 }
254 else {
255 kfree(kbuf);
256 return ret;
257 }
258
259 }
260
261 kfree(kbuf);
262 return total_retlen;
263} /* mtdchar_read */
264
265static ssize_t mtdchar_write(struct file *file, const char __user *buf, size_t count,
266 loff_t *ppos)
267{
268 struct mtd_file_info *mfi = file->private_data;
269 struct mtd_info *mtd = mfi->mtd;
270 size_t size = count;
271 char *kbuf;
272 size_t retlen;
273 size_t total_retlen=0;
274 int ret=0;
275 int len;
276
277 pr_debug("MTD_write\n");
278
279 if (*ppos == mtd->size)
280 return -ENOSPC;
281
282 if (*ppos + count > mtd->size)
283 count = mtd->size - *ppos;
284
285 if (!count)
286 return 0;
287
288 kbuf = mtd_kmalloc_up_to(mtd, &size);
289 if (!kbuf)
290 return -ENOMEM;
291
292 while (count) {
293 len = min_t(size_t, count, size);
294
295 if (copy_from_user(kbuf, buf, len)) {
296 kfree(kbuf);
297 return -EFAULT;
298 }
299
300 switch (mfi->mode) {
301 case MTD_FILE_MODE_OTP_FACTORY:
302 ret = -EROFS;
303 break;
304 case MTD_FILE_MODE_OTP_USER:
305 ret = mtd_write_user_prot_reg(mtd, *ppos, len,
306 &retlen, kbuf);
307 break;
308
309 case MTD_FILE_MODE_RAW:
310 {
311 struct mtd_oob_ops ops;
312
313 ops.mode = MTD_OPS_RAW;
314 ops.datbuf = kbuf;
315 ops.oobbuf = NULL;
316 ops.ooboffs = 0;
317 ops.len = len;
318
319 ret = mtd_write_oob(mtd, *ppos, &ops);
320 retlen = ops.retlen;
321 break;
322 }
323
324 default:
325 ret = mtd_write(mtd, *ppos, len, &retlen, kbuf);
326 }
327
328 /*
329 * Return -ENOSPC only if no data could be written at all.
330 * Otherwise just return the number of bytes that actually
331 * have been written.
332 */
333 if ((ret == -ENOSPC) && (total_retlen))
334 break;
335
336 if (!ret) {
337 *ppos += retlen;
338 total_retlen += retlen;
339 count -= retlen;
340 buf += retlen;
341 }
342 else {
343 kfree(kbuf);
344 return ret;
345 }
346 }
347
348 kfree(kbuf);
349 return total_retlen;
350} /* mtdchar_write */
351
352/*======================================================================
353
354 IOCTL calls for getting device parameters.
355
356======================================================================*/
357static void mtdchar_erase_callback (struct erase_info *instr)
358{
359 wake_up((wait_queue_head_t *)instr->priv);
360}
361
362static int otp_select_filemode(struct mtd_file_info *mfi, int mode)
363{
364 struct mtd_info *mtd = mfi->mtd;
365 size_t retlen;
366
367 switch (mode) {
368 case MTD_OTP_FACTORY:
369 if (mtd_read_fact_prot_reg(mtd, -1, 0, &retlen, NULL) ==
370 -EOPNOTSUPP)
371 return -EOPNOTSUPP;
372
373 mfi->mode = MTD_FILE_MODE_OTP_FACTORY;
374 break;
375 case MTD_OTP_USER:
376 if (mtd_read_user_prot_reg(mtd, -1, 0, &retlen, NULL) ==
377 -EOPNOTSUPP)
378 return -EOPNOTSUPP;
379
380 mfi->mode = MTD_FILE_MODE_OTP_USER;
381 break;
382 case MTD_OTP_OFF:
383 mfi->mode = MTD_FILE_MODE_NORMAL;
384 break;
385 default:
386 return -EINVAL;
387 }
388
389 return 0;
390}
391
392static int mtdchar_writeoob(struct file *file, struct mtd_info *mtd,
393 uint64_t start, uint32_t length, void __user *ptr,
394 uint32_t __user *retp)
395{
396 struct mtd_file_info *mfi = file->private_data;
397 struct mtd_oob_ops ops;
398 uint32_t retlen;
399 int ret = 0;
400
401 if (!(file->f_mode & FMODE_WRITE))
402 return -EPERM;
403
404 if (length > 4096)
405 return -EINVAL;
406
407 if (!mtd->_write_oob)
408 ret = -EOPNOTSUPP;
409 else
410 ret = access_ok(VERIFY_READ, ptr, length) ? 0 : -EFAULT;
411
412 if (ret)
413 return ret;
414
415 ops.ooblen = length;
416 ops.ooboffs = start & (mtd->writesize - 1);
417 ops.datbuf = NULL;
418 ops.mode = (mfi->mode == MTD_FILE_MODE_RAW) ? MTD_OPS_RAW :
419 MTD_OPS_PLACE_OOB;
420
421 if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
422 return -EINVAL;
423
424 ops.oobbuf = memdup_user(ptr, length);
425 if (IS_ERR(ops.oobbuf))
426 return PTR_ERR(ops.oobbuf);
427
428 start &= ~((uint64_t)mtd->writesize - 1);
429 ret = mtd_write_oob(mtd, start, &ops);
430
431 if (ops.oobretlen > 0xFFFFFFFFU)
432 ret = -EOVERFLOW;
433 retlen = ops.oobretlen;
434 if (copy_to_user(retp, &retlen, sizeof(length)))
435 ret = -EFAULT;
436
437 kfree(ops.oobbuf);
438 return ret;
439}
440
441static int mtdchar_readoob(struct file *file, struct mtd_info *mtd,
442 uint64_t start, uint32_t length, void __user *ptr,
443 uint32_t __user *retp)
444{
445 struct mtd_file_info *mfi = file->private_data;
446 struct mtd_oob_ops ops;
447 int ret = 0;
448
449 if (length > 4096)
450 return -EINVAL;
451
452 if (!access_ok(VERIFY_WRITE, ptr, length))
453 return -EFAULT;
454
455 ops.ooblen = length;
456 ops.ooboffs = start & (mtd->writesize - 1);
457 ops.datbuf = NULL;
458 ops.mode = (mfi->mode == MTD_FILE_MODE_RAW) ? MTD_OPS_RAW :
459 MTD_OPS_PLACE_OOB;
460
461 if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
462 return -EINVAL;
463
464 ops.oobbuf = kmalloc(length, GFP_KERNEL);
465 if (!ops.oobbuf)
466 return -ENOMEM;
467
468 start &= ~((uint64_t)mtd->writesize - 1);
469 ret = mtd_read_oob(mtd, start, &ops);
470
471 if (put_user(ops.oobretlen, retp))
472 ret = -EFAULT;
473 else if (ops.oobretlen && copy_to_user(ptr, ops.oobbuf,
474 ops.oobretlen))
475 ret = -EFAULT;
476
477 kfree(ops.oobbuf);
478
479 /*
480 * NAND returns -EBADMSG on ECC errors, but it returns the OOB
481 * data. For our userspace tools it is important to dump areas
482 * with ECC errors!
483 * For kernel internal usage it also might return -EUCLEAN
484 * to signal the caller that a bitflip has occured and has
485 * been corrected by the ECC algorithm.
486 *
487 * Note: currently the standard NAND function, nand_read_oob_std,
488 * does not calculate ECC for the OOB area, so do not rely on
489 * this behavior unless you have replaced it with your own.
490 */
491 if (mtd_is_bitflip_or_eccerr(ret))
492 return 0;
493
494 return ret;
495}
496
497/*
498 * Copies (and truncates, if necessary) data from the larger struct,
499 * nand_ecclayout, to the smaller, deprecated layout struct,
500 * nand_ecclayout_user. This is necessary only to support the deprecated
501 * API ioctl ECCGETLAYOUT while allowing all new functionality to use
502 * nand_ecclayout flexibly (i.e. the struct may change size in new
503 * releases without requiring major rewrites).
504 */
505static int shrink_ecclayout(const struct nand_ecclayout *from,
506 struct nand_ecclayout_user *to)
507{
508 int i;
509
510 if (!from || !to)
511 return -EINVAL;
512
513 memset(to, 0, sizeof(*to));
514
515 to->eccbytes = min((int)from->eccbytes, MTD_MAX_ECCPOS_ENTRIES);
516 for (i = 0; i < to->eccbytes; i++)
517 to->eccpos[i] = from->eccpos[i];
518
519 for (i = 0; i < MTD_MAX_OOBFREE_ENTRIES; i++) {
520 if (from->oobfree[i].length == 0 &&
521 from->oobfree[i].offset == 0)
522 break;
523 to->oobavail += from->oobfree[i].length;
524 to->oobfree[i] = from->oobfree[i];
525 }
526
527 return 0;
528}
529
530static int mtdchar_blkpg_ioctl(struct mtd_info *mtd,
531 struct blkpg_ioctl_arg __user *arg)
532{
533 struct blkpg_ioctl_arg a;
534 struct blkpg_partition p;
535
536 if (!capable(CAP_SYS_ADMIN))
537 return -EPERM;
538
539 if (copy_from_user(&a, arg, sizeof(struct blkpg_ioctl_arg)))
540 return -EFAULT;
541
542 if (copy_from_user(&p, a.data, sizeof(struct blkpg_partition)))
543 return -EFAULT;
544
545 switch (a.op) {
546 case BLKPG_ADD_PARTITION:
547
548 /* Only master mtd device must be used to add partitions */
549 if (mtd_is_partition(mtd))
550 return -EINVAL;
551
552 return mtd_add_partition(mtd, p.devname, p.start, p.length);
553
554 case BLKPG_DEL_PARTITION:
555
556 if (p.pno < 0)
557 return -EINVAL;
558
559 return mtd_del_partition(mtd, p.pno);
560
561 default:
562 return -EINVAL;
563 }
564}
565
566static int mtdchar_write_ioctl(struct mtd_info *mtd,
567 struct mtd_write_req __user *argp)
568{
569 struct mtd_write_req req;
570 struct mtd_oob_ops ops;
571 void __user *usr_data, *usr_oob;
572 int ret;
573
574 if (copy_from_user(&req, argp, sizeof(req)) ||
575 !access_ok(VERIFY_READ, req.usr_data, req.len) ||
576 !access_ok(VERIFY_READ, req.usr_oob, req.ooblen))
577 return -EFAULT;
578 if (!mtd->_write_oob)
579 return -EOPNOTSUPP;
580
581 ops.mode = req.mode;
582 ops.len = (size_t)req.len;
583 ops.ooblen = (size_t)req.ooblen;
584 ops.ooboffs = 0;
585
586 usr_data = (void __user *)(uintptr_t)req.usr_data;
587 usr_oob = (void __user *)(uintptr_t)req.usr_oob;
588
589 if (req.usr_data) {
590 ops.datbuf = memdup_user(usr_data, ops.len);
591 if (IS_ERR(ops.datbuf))
592 return PTR_ERR(ops.datbuf);
593 } else {
594 ops.datbuf = NULL;
595 }
596
597 if (req.usr_oob) {
598 ops.oobbuf = memdup_user(usr_oob, ops.ooblen);
599 if (IS_ERR(ops.oobbuf)) {
600 kfree(ops.datbuf);
601 return PTR_ERR(ops.oobbuf);
602 }
603 } else {
604 ops.oobbuf = NULL;
605 }
606
607 ret = mtd_write_oob(mtd, (loff_t)req.start, &ops);
608
609 kfree(ops.datbuf);
610 kfree(ops.oobbuf);
611
612 return ret;
613}
614
615static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg)
616{
617 struct mtd_file_info *mfi = file->private_data;
618 struct mtd_info *mtd = mfi->mtd;
619 void __user *argp = (void __user *)arg;
620 int ret = 0;
621 u_long size;
622 struct mtd_info_user info;
623
624 pr_debug("MTD_ioctl\n");
625
626 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
627 if (cmd & IOC_IN) {
628 if (!access_ok(VERIFY_READ, argp, size))
629 return -EFAULT;
630 }
631 if (cmd & IOC_OUT) {
632 if (!access_ok(VERIFY_WRITE, argp, size))
633 return -EFAULT;
634 }
635
636 switch (cmd) {
637 case MEMGETREGIONCOUNT:
638 if (copy_to_user(argp, &(mtd->numeraseregions), sizeof(int)))
639 return -EFAULT;
640 break;
641
642 case MEMGETREGIONINFO:
643 {
644 uint32_t ur_idx;
645 struct mtd_erase_region_info *kr;
646 struct region_info_user __user *ur = argp;
647
648 if (get_user(ur_idx, &(ur->regionindex)))
649 return -EFAULT;
650
651 if (ur_idx >= mtd->numeraseregions)
652 return -EINVAL;
653
654 kr = &(mtd->eraseregions[ur_idx]);
655
656 if (put_user(kr->offset, &(ur->offset))
657 || put_user(kr->erasesize, &(ur->erasesize))
658 || put_user(kr->numblocks, &(ur->numblocks)))
659 return -EFAULT;
660
661 break;
662 }
663
664 case MEMGETINFO:
665 memset(&info, 0, sizeof(info));
666 info.type = mtd->type;
667 info.flags = mtd->flags;
668 info.size = mtd->size;
669 info.erasesize = mtd->erasesize;
670 info.writesize = mtd->writesize;
671 info.oobsize = mtd->oobsize;
672 /* The below field is obsolete */
673 info.padding = 0;
674 if (copy_to_user(argp, &info, sizeof(struct mtd_info_user)))
675 return -EFAULT;
676 break;
677
678 case MEMERASE:
679 case MEMERASE64:
680 {
681 struct erase_info *erase;
682
683 if(!(file->f_mode & FMODE_WRITE))
684 return -EPERM;
685
686 erase=kzalloc(sizeof(struct erase_info),GFP_KERNEL);
687 if (!erase)
688 ret = -ENOMEM;
689 else {
690 wait_queue_head_t waitq;
691 DECLARE_WAITQUEUE(wait, current);
692
693 init_waitqueue_head(&waitq);
694
695 if (cmd == MEMERASE64) {
696 struct erase_info_user64 einfo64;
697
698 if (copy_from_user(&einfo64, argp,
699 sizeof(struct erase_info_user64))) {
700 kfree(erase);
701 return -EFAULT;
702 }
703 erase->addr = einfo64.start;
704 erase->len = einfo64.length;
705 } else {
706 struct erase_info_user einfo32;
707
708 if (copy_from_user(&einfo32, argp,
709 sizeof(struct erase_info_user))) {
710 kfree(erase);
711 return -EFAULT;
712 }
713 erase->addr = einfo32.start;
714 erase->len = einfo32.length;
715 }
716 erase->mtd = mtd;
717 erase->callback = mtdchar_erase_callback;
718 erase->priv = (unsigned long)&waitq;
719
720 /*
721 FIXME: Allow INTERRUPTIBLE. Which means
722 not having the wait_queue head on the stack.
723
724 If the wq_head is on the stack, and we
725 leave because we got interrupted, then the
726 wq_head is no longer there when the
727 callback routine tries to wake us up.
728 */
729 ret = mtd_erase(mtd, erase);
730 if (!ret) {
731 set_current_state(TASK_UNINTERRUPTIBLE);
732 add_wait_queue(&waitq, &wait);
733 if (erase->state != MTD_ERASE_DONE &&
734 erase->state != MTD_ERASE_FAILED)
735 schedule();
736 remove_wait_queue(&waitq, &wait);
737 set_current_state(TASK_RUNNING);
738
739 ret = (erase->state == MTD_ERASE_FAILED)?-EIO:0;
740 }
741 kfree(erase);
742 }
743 break;
744 }
745
746 case MEMWRITEOOB:
747 {
748 struct mtd_oob_buf buf;
749 struct mtd_oob_buf __user *buf_user = argp;
750
751 /* NOTE: writes return length to buf_user->length */
752 if (copy_from_user(&buf, argp, sizeof(buf)))
753 ret = -EFAULT;
754 else
755 ret = mtdchar_writeoob(file, mtd, buf.start, buf.length,
756 buf.ptr, &buf_user->length);
757 break;
758 }
759
760 case MEMREADOOB:
761 {
762 struct mtd_oob_buf buf;
763 struct mtd_oob_buf __user *buf_user = argp;
764
765 /* NOTE: writes return length to buf_user->start */
766 if (copy_from_user(&buf, argp, sizeof(buf)))
767 ret = -EFAULT;
768 else
769 ret = mtdchar_readoob(file, mtd, buf.start, buf.length,
770 buf.ptr, &buf_user->start);
771 break;
772 }
773
774 case MEMWRITEOOB64:
775 {
776 struct mtd_oob_buf64 buf;
777 struct mtd_oob_buf64 __user *buf_user = argp;
778
779 if (copy_from_user(&buf, argp, sizeof(buf)))
780 ret = -EFAULT;
781 else
782 ret = mtdchar_writeoob(file, mtd, buf.start, buf.length,
783 (void __user *)(uintptr_t)buf.usr_ptr,
784 &buf_user->length);
785 break;
786 }
787
788 case MEMREADOOB64:
789 {
790 struct mtd_oob_buf64 buf;
791 struct mtd_oob_buf64 __user *buf_user = argp;
792
793 if (copy_from_user(&buf, argp, sizeof(buf)))
794 ret = -EFAULT;
795 else
796 ret = mtdchar_readoob(file, mtd, buf.start, buf.length,
797 (void __user *)(uintptr_t)buf.usr_ptr,
798 &buf_user->length);
799 break;
800 }
801
802 case MEMWRITE:
803 {
804 ret = mtdchar_write_ioctl(mtd,
805 (struct mtd_write_req __user *)arg);
806 break;
807 }
808
809 case MEMLOCK:
810 {
811 struct erase_info_user einfo;
812
813 if (copy_from_user(&einfo, argp, sizeof(einfo)))
814 return -EFAULT;
815
816 ret = mtd_lock(mtd, einfo.start, einfo.length);
817 break;
818 }
819
820 case MEMUNLOCK:
821 {
822 struct erase_info_user einfo;
823
824 if (copy_from_user(&einfo, argp, sizeof(einfo)))
825 return -EFAULT;
826
827 ret = mtd_unlock(mtd, einfo.start, einfo.length);
828 break;
829 }
830
831 case MEMISLOCKED:
832 {
833 struct erase_info_user einfo;
834
835 if (copy_from_user(&einfo, argp, sizeof(einfo)))
836 return -EFAULT;
837
838 ret = mtd_is_locked(mtd, einfo.start, einfo.length);
839 break;
840 }
841
842 /* Legacy interface */
843 case MEMGETOOBSEL:
844 {
845 struct nand_oobinfo oi;
846
847 if (!mtd->ecclayout)
848 return -EOPNOTSUPP;
849 if (mtd->ecclayout->eccbytes > ARRAY_SIZE(oi.eccpos))
850 return -EINVAL;
851
852 oi.useecc = MTD_NANDECC_AUTOPLACE;
853 memcpy(&oi.eccpos, mtd->ecclayout->eccpos, sizeof(oi.eccpos));
854 memcpy(&oi.oobfree, mtd->ecclayout->oobfree,
855 sizeof(oi.oobfree));
856 oi.eccbytes = mtd->ecclayout->eccbytes;
857
858 if (copy_to_user(argp, &oi, sizeof(struct nand_oobinfo)))
859 return -EFAULT;
860 break;
861 }
862
863 case MEMGETBADBLOCK:
864 {
865 loff_t offs;
866
867 if (copy_from_user(&offs, argp, sizeof(loff_t)))
868 return -EFAULT;
869 return mtd_block_isbad(mtd, offs);
870 break;
871 }
872
873 case MEMSETBADBLOCK:
874 {
875 loff_t offs;
876
877 if (copy_from_user(&offs, argp, sizeof(loff_t)))
878 return -EFAULT;
879 return mtd_block_markbad(mtd, offs);
880 break;
881 }
882
883 case OTPSELECT:
884 {
885 int mode;
886 if (copy_from_user(&mode, argp, sizeof(int)))
887 return -EFAULT;
888
889 mfi->mode = MTD_FILE_MODE_NORMAL;
890
891 ret = otp_select_filemode(mfi, mode);
892
893 file->f_pos = 0;
894 break;
895 }
896
897 case OTPGETREGIONCOUNT:
898 case OTPGETREGIONINFO:
899 {
900 struct otp_info *buf = kmalloc(4096, GFP_KERNEL);
901 size_t retlen;
902 if (!buf)
903 return -ENOMEM;
904 switch (mfi->mode) {
905 case MTD_FILE_MODE_OTP_FACTORY:
906 ret = mtd_get_fact_prot_info(mtd, 4096, &retlen, buf);
907 break;
908 case MTD_FILE_MODE_OTP_USER:
909 ret = mtd_get_user_prot_info(mtd, 4096, &retlen, buf);
910 break;
911 default:
912 ret = -EINVAL;
913 break;
914 }
915 if (!ret) {
916 if (cmd == OTPGETREGIONCOUNT) {
917 int nbr = retlen / sizeof(struct otp_info);
918 ret = copy_to_user(argp, &nbr, sizeof(int));
919 } else
920 ret = copy_to_user(argp, buf, retlen);
921 if (ret)
922 ret = -EFAULT;
923 }
924 kfree(buf);
925 break;
926 }
927
928 case OTPLOCK:
929 {
930 struct otp_info oinfo;
931
932 if (mfi->mode != MTD_FILE_MODE_OTP_USER)
933 return -EINVAL;
934 if (copy_from_user(&oinfo, argp, sizeof(oinfo)))
935 return -EFAULT;
936 ret = mtd_lock_user_prot_reg(mtd, oinfo.start, oinfo.length);
937 break;
938 }
939
940 /* This ioctl is being deprecated - it truncates the ECC layout */
941 case ECCGETLAYOUT:
942 {
943 struct nand_ecclayout_user *usrlay;
944
945 if (!mtd->ecclayout)
946 return -EOPNOTSUPP;
947
948 usrlay = kmalloc(sizeof(*usrlay), GFP_KERNEL);
949 if (!usrlay)
950 return -ENOMEM;
951
952 shrink_ecclayout(mtd->ecclayout, usrlay);
953
954 if (copy_to_user(argp, usrlay, sizeof(*usrlay)))
955 ret = -EFAULT;
956 kfree(usrlay);
957 break;
958 }
959
960 case ECCGETSTATS:
961 {
962 if (copy_to_user(argp, &mtd->ecc_stats,
963 sizeof(struct mtd_ecc_stats)))
964 return -EFAULT;
965 break;
966 }
967
968 case MTDFILEMODE:
969 {
970 mfi->mode = 0;
971
972 switch(arg) {
973 case MTD_FILE_MODE_OTP_FACTORY:
974 case MTD_FILE_MODE_OTP_USER:
975 ret = otp_select_filemode(mfi, arg);
976 break;
977
978 case MTD_FILE_MODE_RAW:
979 if (!mtd_has_oob(mtd))
980 return -EOPNOTSUPP;
981 mfi->mode = arg;
982
983 case MTD_FILE_MODE_NORMAL:
984 break;
985 default:
986 ret = -EINVAL;
987 }
988 file->f_pos = 0;
989 break;
990 }
991
992 case BLKPG:
993 {
994 ret = mtdchar_blkpg_ioctl(mtd,
995 (struct blkpg_ioctl_arg __user *)arg);
996 break;
997 }
998
999 case BLKRRPART:
1000 {
1001 /* No reread partition feature. Just return ok */
1002 ret = 0;
1003 break;
1004 }
1005
1006 default:
1007 ret = -ENOTTY;
1008 }
1009
1010 return ret;
1011} /* memory_ioctl */
1012
1013static long mtdchar_unlocked_ioctl(struct file *file, u_int cmd, u_long arg)
1014{
1015 int ret;
1016
1017 mutex_lock(&mtd_mutex);
1018 ret = mtdchar_ioctl(file, cmd, arg);
1019 mutex_unlock(&mtd_mutex);
1020
1021 return ret;
1022}
1023
1024#ifdef CONFIG_COMPAT
1025
1026struct mtd_oob_buf32 {
1027 u_int32_t start;
1028 u_int32_t length;
1029 compat_caddr_t ptr; /* unsigned char* */
1030};
1031
1032#define MEMWRITEOOB32 _IOWR('M', 3, struct mtd_oob_buf32)
1033#define MEMREADOOB32 _IOWR('M', 4, struct mtd_oob_buf32)
1034
1035static long mtdchar_compat_ioctl(struct file *file, unsigned int cmd,
1036 unsigned long arg)
1037{
1038 struct mtd_file_info *mfi = file->private_data;
1039 struct mtd_info *mtd = mfi->mtd;
1040 void __user *argp = compat_ptr(arg);
1041 int ret = 0;
1042
1043 mutex_lock(&mtd_mutex);
1044
1045 switch (cmd) {
1046 case MEMWRITEOOB32:
1047 {
1048 struct mtd_oob_buf32 buf;
1049 struct mtd_oob_buf32 __user *buf_user = argp;
1050
1051 if (copy_from_user(&buf, argp, sizeof(buf)))
1052 ret = -EFAULT;
1053 else
1054 ret = mtdchar_writeoob(file, mtd, buf.start,
1055 buf.length, compat_ptr(buf.ptr),
1056 &buf_user->length);
1057 break;
1058 }
1059
1060 case MEMREADOOB32:
1061 {
1062 struct mtd_oob_buf32 buf;
1063 struct mtd_oob_buf32 __user *buf_user = argp;
1064
1065 /* NOTE: writes return length to buf->start */
1066 if (copy_from_user(&buf, argp, sizeof(buf)))
1067 ret = -EFAULT;
1068 else
1069 ret = mtdchar_readoob(file, mtd, buf.start,
1070 buf.length, compat_ptr(buf.ptr),
1071 &buf_user->start);
1072 break;
1073 }
1074 default:
1075 ret = mtdchar_ioctl(file, cmd, (unsigned long)argp);
1076 }
1077
1078 mutex_unlock(&mtd_mutex);
1079
1080 return ret;
1081}
1082
1083#endif /* CONFIG_COMPAT */
1084
1085/*
1086 * try to determine where a shared mapping can be made
1087 * - only supported for NOMMU at the moment (MMU can't doesn't copy private
1088 * mappings)
1089 */
1090#ifndef CONFIG_MMU
1091static unsigned long mtdchar_get_unmapped_area(struct file *file,
1092 unsigned long addr,
1093 unsigned long len,
1094 unsigned long pgoff,
1095 unsigned long flags)
1096{
1097 struct mtd_file_info *mfi = file->private_data;
1098 struct mtd_info *mtd = mfi->mtd;
1099 unsigned long offset;
1100 int ret;
1101
1102 if (addr != 0)
1103 return (unsigned long) -EINVAL;
1104
1105 if (len > mtd->size || pgoff >= (mtd->size >> PAGE_SHIFT))
1106 return (unsigned long) -EINVAL;
1107
1108 offset = pgoff << PAGE_SHIFT;
1109 if (offset > mtd->size - len)
1110 return (unsigned long) -EINVAL;
1111
1112 ret = mtd_get_unmapped_area(mtd, len, offset, flags);
1113 return ret == -EOPNOTSUPP ? -ENODEV : ret;
1114}
1115#endif
1116
1117/*
1118 * set up a mapping for shared memory segments
1119 */
1120static int mtdchar_mmap(struct file *file, struct vm_area_struct *vma)
1121{
1122#ifdef CONFIG_MMU
1123 struct mtd_file_info *mfi = file->private_data;
1124 struct mtd_info *mtd = mfi->mtd;
1125 struct map_info *map = mtd->priv;
1126
1127 /* This is broken because it assumes the MTD device is map-based
1128 and that mtd->priv is a valid struct map_info. It should be
1129 replaced with something that uses the mtd_get_unmapped_area()
1130 operation properly. */
1131 if (0 /*mtd->type == MTD_RAM || mtd->type == MTD_ROM*/) {
1132#ifdef pgprot_noncached
1133 if (file->f_flags & O_DSYNC || map->phys >= __pa(high_memory))
1134 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1135#endif
1136 return vm_iomap_memory(vma, map->phys, map->size);
1137 }
1138 return -ENODEV;
1139#else
1140 return vma->vm_flags & VM_SHARED ? 0 : -EACCES;
1141#endif
1142}
1143
1144static const struct file_operations mtd_fops = {
1145 .owner = THIS_MODULE,
1146 .llseek = mtdchar_lseek,
1147 .read = mtdchar_read,
1148 .write = mtdchar_write,
1149 .unlocked_ioctl = mtdchar_unlocked_ioctl,
1150#ifdef CONFIG_COMPAT
1151 .compat_ioctl = mtdchar_compat_ioctl,
1152#endif
1153 .open = mtdchar_open,
1154 .release = mtdchar_close,
1155 .mmap = mtdchar_mmap,
1156#ifndef CONFIG_MMU
1157 .get_unmapped_area = mtdchar_get_unmapped_area,
1158#endif
1159};
1160
1161static const struct super_operations mtd_ops = {
1162 .drop_inode = generic_delete_inode,
1163 .statfs = simple_statfs,
1164};
1165
1166static struct dentry *mtd_inodefs_mount(struct file_system_type *fs_type,
1167 int flags, const char *dev_name, void *data)
1168{
1169 return mount_pseudo(fs_type, "mtd_inode:", &mtd_ops, NULL, MTD_INODE_FS_MAGIC);
1170}
1171
1172static struct file_system_type mtd_inodefs_type = {
1173 .name = "mtd_inodefs",
1174 .mount = mtd_inodefs_mount,
1175 .kill_sb = kill_anon_super,
1176};
1177MODULE_ALIAS_FS("mtd_inodefs");
1178
1179int __init init_mtdchar(void)
1180{
1181 int ret;
1182
1183 ret = __register_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS,
1184 "mtd", &mtd_fops);
1185 if (ret < 0) {
1186 pr_err("Can't allocate major number %d for MTD\n",
1187 MTD_CHAR_MAJOR);
1188 return ret;
1189 }
1190
1191 ret = register_filesystem(&mtd_inodefs_type);
1192 if (ret) {
1193 pr_err("Can't register mtd_inodefs filesystem, error %d\n",
1194 ret);
1195 goto err_unregister_chdev;
1196 }
1197
1198 return ret;
1199
1200err_unregister_chdev:
1201 __unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd");
1202 return ret;
1203}
1204
1205void __exit cleanup_mtdchar(void)
1206{
1207 unregister_filesystem(&mtd_inodefs_type);
1208 __unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd");
1209}
1210
1211MODULE_ALIAS_CHARDEV_MAJOR(MTD_CHAR_MAJOR);