Loading...
1/*
2 * linux/drivers/char/mem.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 *
6 * Added devfs support.
7 * Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
8 * Shared /dev/zero mmapping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
9 */
10
11#include <linux/mm.h>
12#include <linux/miscdevice.h>
13#include <linux/slab.h>
14#include <linux/vmalloc.h>
15#include <linux/mman.h>
16#include <linux/random.h>
17#include <linux/init.h>
18#include <linux/raw.h>
19#include <linux/tty.h>
20#include <linux/capability.h>
21#include <linux/ptrace.h>
22#include <linux/device.h>
23#include <linux/highmem.h>
24#include <linux/backing-dev.h>
25#include <linux/shmem_fs.h>
26#include <linux/splice.h>
27#include <linux/pfn.h>
28#include <linux/export.h>
29#include <linux/io.h>
30#include <linux/uio.h>
31
32#include <linux/uaccess.h>
33
34#ifdef CONFIG_IA64
35# include <linux/efi.h>
36#endif
37
38#define DEVPORT_MINOR 4
39
40static inline unsigned long size_inside_page(unsigned long start,
41 unsigned long size)
42{
43 unsigned long sz;
44
45 sz = PAGE_SIZE - (start & (PAGE_SIZE - 1));
46
47 return min(sz, size);
48}
49
50#ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
51static inline int valid_phys_addr_range(phys_addr_t addr, size_t count)
52{
53 return addr + count <= __pa(high_memory);
54}
55
56static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
57{
58 return 1;
59}
60#endif
61
62#ifdef CONFIG_STRICT_DEVMEM
63static inline int range_is_allowed(unsigned long pfn, unsigned long size)
64{
65 u64 from = ((u64)pfn) << PAGE_SHIFT;
66 u64 to = from + size;
67 u64 cursor = from;
68
69 while (cursor < to) {
70 if (!devmem_is_allowed(pfn))
71 return 0;
72 cursor += PAGE_SIZE;
73 pfn++;
74 }
75 return 1;
76}
77#else
78static inline int range_is_allowed(unsigned long pfn, unsigned long size)
79{
80 return 1;
81}
82#endif
83
84#ifndef unxlate_dev_mem_ptr
85#define unxlate_dev_mem_ptr unxlate_dev_mem_ptr
86void __weak unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
87{
88}
89#endif
90
91/*
92 * This funcion reads the *physical* memory. The f_pos points directly to the
93 * memory location.
94 */
95static ssize_t read_mem(struct file *file, char __user *buf,
96 size_t count, loff_t *ppos)
97{
98 phys_addr_t p = *ppos;
99 ssize_t read, sz;
100 void *ptr;
101
102 if (p != *ppos)
103 return 0;
104
105 if (!valid_phys_addr_range(p, count))
106 return -EFAULT;
107 read = 0;
108#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
109 /* we don't have page 0 mapped on sparc and m68k.. */
110 if (p < PAGE_SIZE) {
111 sz = size_inside_page(p, count);
112 if (sz > 0) {
113 if (clear_user(buf, sz))
114 return -EFAULT;
115 buf += sz;
116 p += sz;
117 count -= sz;
118 read += sz;
119 }
120 }
121#endif
122
123 while (count > 0) {
124 unsigned long remaining;
125
126 sz = size_inside_page(p, count);
127
128 if (!range_is_allowed(p >> PAGE_SHIFT, count))
129 return -EPERM;
130
131 /*
132 * On ia64 if a page has been mapped somewhere as uncached, then
133 * it must also be accessed uncached by the kernel or data
134 * corruption may occur.
135 */
136 ptr = xlate_dev_mem_ptr(p);
137 if (!ptr)
138 return -EFAULT;
139
140 remaining = copy_to_user(buf, ptr, sz);
141 unxlate_dev_mem_ptr(p, ptr);
142 if (remaining)
143 return -EFAULT;
144
145 buf += sz;
146 p += sz;
147 count -= sz;
148 read += sz;
149 }
150
151 *ppos += read;
152 return read;
153}
154
155static ssize_t write_mem(struct file *file, const char __user *buf,
156 size_t count, loff_t *ppos)
157{
158 phys_addr_t p = *ppos;
159 ssize_t written, sz;
160 unsigned long copied;
161 void *ptr;
162
163 if (p != *ppos)
164 return -EFBIG;
165
166 if (!valid_phys_addr_range(p, count))
167 return -EFAULT;
168
169 written = 0;
170
171#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
172 /* we don't have page 0 mapped on sparc and m68k.. */
173 if (p < PAGE_SIZE) {
174 sz = size_inside_page(p, count);
175 /* Hmm. Do something? */
176 buf += sz;
177 p += sz;
178 count -= sz;
179 written += sz;
180 }
181#endif
182
183 while (count > 0) {
184 sz = size_inside_page(p, count);
185
186 if (!range_is_allowed(p >> PAGE_SHIFT, sz))
187 return -EPERM;
188
189 /*
190 * On ia64 if a page has been mapped somewhere as uncached, then
191 * it must also be accessed uncached by the kernel or data
192 * corruption may occur.
193 */
194 ptr = xlate_dev_mem_ptr(p);
195 if (!ptr) {
196 if (written)
197 break;
198 return -EFAULT;
199 }
200
201 copied = copy_from_user(ptr, buf, sz);
202 unxlate_dev_mem_ptr(p, ptr);
203 if (copied) {
204 written += sz - copied;
205 if (written)
206 break;
207 return -EFAULT;
208 }
209
210 buf += sz;
211 p += sz;
212 count -= sz;
213 written += sz;
214 }
215
216 *ppos += written;
217 return written;
218}
219
220int __weak phys_mem_access_prot_allowed(struct file *file,
221 unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
222{
223 return 1;
224}
225
226#ifndef __HAVE_PHYS_MEM_ACCESS_PROT
227
228/*
229 * Architectures vary in how they handle caching for addresses
230 * outside of main memory.
231 *
232 */
233#ifdef pgprot_noncached
234static int uncached_access(struct file *file, phys_addr_t addr)
235{
236#if defined(CONFIG_IA64)
237 /*
238 * On ia64, we ignore O_DSYNC because we cannot tolerate memory
239 * attribute aliases.
240 */
241 return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
242#elif defined(CONFIG_MIPS)
243 {
244 extern int __uncached_access(struct file *file,
245 unsigned long addr);
246
247 return __uncached_access(file, addr);
248 }
249#else
250 /*
251 * Accessing memory above the top the kernel knows about or through a
252 * file pointer
253 * that was marked O_DSYNC will be done non-cached.
254 */
255 if (file->f_flags & O_DSYNC)
256 return 1;
257 return addr >= __pa(high_memory);
258#endif
259}
260#endif
261
262static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
263 unsigned long size, pgprot_t vma_prot)
264{
265#ifdef pgprot_noncached
266 phys_addr_t offset = pfn << PAGE_SHIFT;
267
268 if (uncached_access(file, offset))
269 return pgprot_noncached(vma_prot);
270#endif
271 return vma_prot;
272}
273#endif
274
275#ifndef CONFIG_MMU
276static unsigned long get_unmapped_area_mem(struct file *file,
277 unsigned long addr,
278 unsigned long len,
279 unsigned long pgoff,
280 unsigned long flags)
281{
282 if (!valid_mmap_phys_addr_range(pgoff, len))
283 return (unsigned long) -EINVAL;
284 return pgoff << PAGE_SHIFT;
285}
286
287/* permit direct mmap, for read, write or exec */
288static unsigned memory_mmap_capabilities(struct file *file)
289{
290 return NOMMU_MAP_DIRECT |
291 NOMMU_MAP_READ | NOMMU_MAP_WRITE | NOMMU_MAP_EXEC;
292}
293
294static unsigned zero_mmap_capabilities(struct file *file)
295{
296 return NOMMU_MAP_COPY;
297}
298
299/* can't do an in-place private mapping if there's no MMU */
300static inline int private_mapping_ok(struct vm_area_struct *vma)
301{
302 return vma->vm_flags & VM_MAYSHARE;
303}
304#else
305
306static inline int private_mapping_ok(struct vm_area_struct *vma)
307{
308 return 1;
309}
310#endif
311
312static const struct vm_operations_struct mmap_mem_ops = {
313#ifdef CONFIG_HAVE_IOREMAP_PROT
314 .access = generic_access_phys
315#endif
316};
317
318static int mmap_mem(struct file *file, struct vm_area_struct *vma)
319{
320 size_t size = vma->vm_end - vma->vm_start;
321
322 if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
323 return -EINVAL;
324
325 if (!private_mapping_ok(vma))
326 return -ENOSYS;
327
328 if (!range_is_allowed(vma->vm_pgoff, size))
329 return -EPERM;
330
331 if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size,
332 &vma->vm_page_prot))
333 return -EINVAL;
334
335 vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
336 size,
337 vma->vm_page_prot);
338
339 vma->vm_ops = &mmap_mem_ops;
340
341 /* Remap-pfn-range will mark the range VM_IO */
342 if (remap_pfn_range(vma,
343 vma->vm_start,
344 vma->vm_pgoff,
345 size,
346 vma->vm_page_prot)) {
347 return -EAGAIN;
348 }
349 return 0;
350}
351
352static int mmap_kmem(struct file *file, struct vm_area_struct *vma)
353{
354 unsigned long pfn;
355
356 /* Turn a kernel-virtual address into a physical page frame */
357 pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT;
358
359 /*
360 * RED-PEN: on some architectures there is more mapped memory than
361 * available in mem_map which pfn_valid checks for. Perhaps should add a
362 * new macro here.
363 *
364 * RED-PEN: vmalloc is not supported right now.
365 */
366 if (!pfn_valid(pfn))
367 return -EIO;
368
369 vma->vm_pgoff = pfn;
370 return mmap_mem(file, vma);
371}
372
373/*
374 * This function reads the *virtual* memory as seen by the kernel.
375 */
376static ssize_t read_kmem(struct file *file, char __user *buf,
377 size_t count, loff_t *ppos)
378{
379 unsigned long p = *ppos;
380 ssize_t low_count, read, sz;
381 char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
382 int err = 0;
383
384 read = 0;
385 if (p < (unsigned long) high_memory) {
386 low_count = count;
387 if (count > (unsigned long)high_memory - p)
388 low_count = (unsigned long)high_memory - p;
389
390#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
391 /* we don't have page 0 mapped on sparc and m68k.. */
392 if (p < PAGE_SIZE && low_count > 0) {
393 sz = size_inside_page(p, low_count);
394 if (clear_user(buf, sz))
395 return -EFAULT;
396 buf += sz;
397 p += sz;
398 read += sz;
399 low_count -= sz;
400 count -= sz;
401 }
402#endif
403 while (low_count > 0) {
404 sz = size_inside_page(p, low_count);
405
406 /*
407 * On ia64 if a page has been mapped somewhere as
408 * uncached, then it must also be accessed uncached
409 * by the kernel or data corruption may occur
410 */
411 kbuf = xlate_dev_kmem_ptr((void *)p);
412 if (!virt_addr_valid(kbuf))
413 return -ENXIO;
414
415 if (copy_to_user(buf, kbuf, sz))
416 return -EFAULT;
417 buf += sz;
418 p += sz;
419 read += sz;
420 low_count -= sz;
421 count -= sz;
422 }
423 }
424
425 if (count > 0) {
426 kbuf = (char *)__get_free_page(GFP_KERNEL);
427 if (!kbuf)
428 return -ENOMEM;
429 while (count > 0) {
430 sz = size_inside_page(p, count);
431 if (!is_vmalloc_or_module_addr((void *)p)) {
432 err = -ENXIO;
433 break;
434 }
435 sz = vread(kbuf, (char *)p, sz);
436 if (!sz)
437 break;
438 if (copy_to_user(buf, kbuf, sz)) {
439 err = -EFAULT;
440 break;
441 }
442 count -= sz;
443 buf += sz;
444 read += sz;
445 p += sz;
446 }
447 free_page((unsigned long)kbuf);
448 }
449 *ppos = p;
450 return read ? read : err;
451}
452
453
454static ssize_t do_write_kmem(unsigned long p, const char __user *buf,
455 size_t count, loff_t *ppos)
456{
457 ssize_t written, sz;
458 unsigned long copied;
459
460 written = 0;
461#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
462 /* we don't have page 0 mapped on sparc and m68k.. */
463 if (p < PAGE_SIZE) {
464 sz = size_inside_page(p, count);
465 /* Hmm. Do something? */
466 buf += sz;
467 p += sz;
468 count -= sz;
469 written += sz;
470 }
471#endif
472
473 while (count > 0) {
474 void *ptr;
475
476 sz = size_inside_page(p, count);
477
478 /*
479 * On ia64 if a page has been mapped somewhere as uncached, then
480 * it must also be accessed uncached by the kernel or data
481 * corruption may occur.
482 */
483 ptr = xlate_dev_kmem_ptr((void *)p);
484 if (!virt_addr_valid(ptr))
485 return -ENXIO;
486
487 copied = copy_from_user(ptr, buf, sz);
488 if (copied) {
489 written += sz - copied;
490 if (written)
491 break;
492 return -EFAULT;
493 }
494 buf += sz;
495 p += sz;
496 count -= sz;
497 written += sz;
498 }
499
500 *ppos += written;
501 return written;
502}
503
504/*
505 * This function writes to the *virtual* memory as seen by the kernel.
506 */
507static ssize_t write_kmem(struct file *file, const char __user *buf,
508 size_t count, loff_t *ppos)
509{
510 unsigned long p = *ppos;
511 ssize_t wrote = 0;
512 ssize_t virtr = 0;
513 char *kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
514 int err = 0;
515
516 if (p < (unsigned long) high_memory) {
517 unsigned long to_write = min_t(unsigned long, count,
518 (unsigned long)high_memory - p);
519 wrote = do_write_kmem(p, buf, to_write, ppos);
520 if (wrote != to_write)
521 return wrote;
522 p += wrote;
523 buf += wrote;
524 count -= wrote;
525 }
526
527 if (count > 0) {
528 kbuf = (char *)__get_free_page(GFP_KERNEL);
529 if (!kbuf)
530 return wrote ? wrote : -ENOMEM;
531 while (count > 0) {
532 unsigned long sz = size_inside_page(p, count);
533 unsigned long n;
534
535 if (!is_vmalloc_or_module_addr((void *)p)) {
536 err = -ENXIO;
537 break;
538 }
539 n = copy_from_user(kbuf, buf, sz);
540 if (n) {
541 err = -EFAULT;
542 break;
543 }
544 vwrite(kbuf, (char *)p, sz);
545 count -= sz;
546 buf += sz;
547 virtr += sz;
548 p += sz;
549 }
550 free_page((unsigned long)kbuf);
551 }
552
553 *ppos = p;
554 return virtr + wrote ? : err;
555}
556
557static ssize_t read_port(struct file *file, char __user *buf,
558 size_t count, loff_t *ppos)
559{
560 unsigned long i = *ppos;
561 char __user *tmp = buf;
562
563 if (!access_ok(VERIFY_WRITE, buf, count))
564 return -EFAULT;
565 while (count-- > 0 && i < 65536) {
566 if (__put_user(inb(i), tmp) < 0)
567 return -EFAULT;
568 i++;
569 tmp++;
570 }
571 *ppos = i;
572 return tmp-buf;
573}
574
575static ssize_t write_port(struct file *file, const char __user *buf,
576 size_t count, loff_t *ppos)
577{
578 unsigned long i = *ppos;
579 const char __user *tmp = buf;
580
581 if (!access_ok(VERIFY_READ, buf, count))
582 return -EFAULT;
583 while (count-- > 0 && i < 65536) {
584 char c;
585
586 if (__get_user(c, tmp)) {
587 if (tmp > buf)
588 break;
589 return -EFAULT;
590 }
591 outb(c, i);
592 i++;
593 tmp++;
594 }
595 *ppos = i;
596 return tmp-buf;
597}
598
599static ssize_t read_null(struct file *file, char __user *buf,
600 size_t count, loff_t *ppos)
601{
602 return 0;
603}
604
605static ssize_t write_null(struct file *file, const char __user *buf,
606 size_t count, loff_t *ppos)
607{
608 return count;
609}
610
611static ssize_t read_iter_null(struct kiocb *iocb, struct iov_iter *to)
612{
613 return 0;
614}
615
616static ssize_t write_iter_null(struct kiocb *iocb, struct iov_iter *from)
617{
618 size_t count = iov_iter_count(from);
619 iov_iter_advance(from, count);
620 return count;
621}
622
623static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf,
624 struct splice_desc *sd)
625{
626 return sd->len;
627}
628
629static ssize_t splice_write_null(struct pipe_inode_info *pipe, struct file *out,
630 loff_t *ppos, size_t len, unsigned int flags)
631{
632 return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
633}
634
635static ssize_t read_iter_zero(struct kiocb *iocb, struct iov_iter *iter)
636{
637 size_t written = 0;
638
639 while (iov_iter_count(iter)) {
640 size_t chunk = iov_iter_count(iter), n;
641
642 if (chunk > PAGE_SIZE)
643 chunk = PAGE_SIZE; /* Just for latency reasons */
644 n = iov_iter_zero(chunk, iter);
645 if (!n && iov_iter_count(iter))
646 return written ? written : -EFAULT;
647 written += n;
648 if (signal_pending(current))
649 return written ? written : -ERESTARTSYS;
650 cond_resched();
651 }
652 return written;
653}
654
655static int mmap_zero(struct file *file, struct vm_area_struct *vma)
656{
657#ifndef CONFIG_MMU
658 return -ENOSYS;
659#endif
660 if (vma->vm_flags & VM_SHARED)
661 return shmem_zero_setup(vma);
662 return 0;
663}
664
665static unsigned long get_unmapped_area_zero(struct file *file,
666 unsigned long addr, unsigned long len,
667 unsigned long pgoff, unsigned long flags)
668{
669#ifdef CONFIG_MMU
670 if (flags & MAP_SHARED) {
671 /*
672 * mmap_zero() will call shmem_zero_setup() to create a file,
673 * so use shmem's get_unmapped_area in case it can be huge;
674 * and pass NULL for file as in mmap.c's get_unmapped_area(),
675 * so as not to confuse shmem with our handle on "/dev/zero".
676 */
677 return shmem_get_unmapped_area(NULL, addr, len, pgoff, flags);
678 }
679
680 /* Otherwise flags & MAP_PRIVATE: with no shmem object beneath it */
681 return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
682#else
683 return -ENOSYS;
684#endif
685}
686
687static ssize_t write_full(struct file *file, const char __user *buf,
688 size_t count, loff_t *ppos)
689{
690 return -ENOSPC;
691}
692
693/*
694 * Special lseek() function for /dev/null and /dev/zero. Most notably, you
695 * can fopen() both devices with "a" now. This was previously impossible.
696 * -- SRB.
697 */
698static loff_t null_lseek(struct file *file, loff_t offset, int orig)
699{
700 return file->f_pos = 0;
701}
702
703/*
704 * The memory devices use the full 32/64 bits of the offset, and so we cannot
705 * check against negative addresses: they are ok. The return value is weird,
706 * though, in that case (0).
707 *
708 * also note that seeking relative to the "end of file" isn't supported:
709 * it has no meaning, so it returns -EINVAL.
710 */
711static loff_t memory_lseek(struct file *file, loff_t offset, int orig)
712{
713 loff_t ret;
714
715 inode_lock(file_inode(file));
716 switch (orig) {
717 case SEEK_CUR:
718 offset += file->f_pos;
719 case SEEK_SET:
720 /* to avoid userland mistaking f_pos=-9 as -EBADF=-9 */
721 if ((unsigned long long)offset >= -MAX_ERRNO) {
722 ret = -EOVERFLOW;
723 break;
724 }
725 file->f_pos = offset;
726 ret = file->f_pos;
727 force_successful_syscall_return();
728 break;
729 default:
730 ret = -EINVAL;
731 }
732 inode_unlock(file_inode(file));
733 return ret;
734}
735
736static int open_port(struct inode *inode, struct file *filp)
737{
738 return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
739}
740
741#define zero_lseek null_lseek
742#define full_lseek null_lseek
743#define write_zero write_null
744#define write_iter_zero write_iter_null
745#define open_mem open_port
746#define open_kmem open_mem
747
748static const struct file_operations __maybe_unused mem_fops = {
749 .llseek = memory_lseek,
750 .read = read_mem,
751 .write = write_mem,
752 .mmap = mmap_mem,
753 .open = open_mem,
754#ifndef CONFIG_MMU
755 .get_unmapped_area = get_unmapped_area_mem,
756 .mmap_capabilities = memory_mmap_capabilities,
757#endif
758};
759
760static const struct file_operations __maybe_unused kmem_fops = {
761 .llseek = memory_lseek,
762 .read = read_kmem,
763 .write = write_kmem,
764 .mmap = mmap_kmem,
765 .open = open_kmem,
766#ifndef CONFIG_MMU
767 .get_unmapped_area = get_unmapped_area_mem,
768 .mmap_capabilities = memory_mmap_capabilities,
769#endif
770};
771
772static const struct file_operations null_fops = {
773 .llseek = null_lseek,
774 .read = read_null,
775 .write = write_null,
776 .read_iter = read_iter_null,
777 .write_iter = write_iter_null,
778 .splice_write = splice_write_null,
779};
780
781static const struct file_operations __maybe_unused port_fops = {
782 .llseek = memory_lseek,
783 .read = read_port,
784 .write = write_port,
785 .open = open_port,
786};
787
788static const struct file_operations zero_fops = {
789 .llseek = zero_lseek,
790 .write = write_zero,
791 .read_iter = read_iter_zero,
792 .write_iter = write_iter_zero,
793 .mmap = mmap_zero,
794 .get_unmapped_area = get_unmapped_area_zero,
795#ifndef CONFIG_MMU
796 .mmap_capabilities = zero_mmap_capabilities,
797#endif
798};
799
800static const struct file_operations full_fops = {
801 .llseek = full_lseek,
802 .read_iter = read_iter_zero,
803 .write = write_full,
804};
805
806static const struct memdev {
807 const char *name;
808 umode_t mode;
809 const struct file_operations *fops;
810 fmode_t fmode;
811} devlist[] = {
812#ifdef CONFIG_DEVMEM
813 [1] = { "mem", 0, &mem_fops, FMODE_UNSIGNED_OFFSET },
814#endif
815#ifdef CONFIG_DEVKMEM
816 [2] = { "kmem", 0, &kmem_fops, FMODE_UNSIGNED_OFFSET },
817#endif
818 [3] = { "null", 0666, &null_fops, 0 },
819#ifdef CONFIG_DEVPORT
820 [4] = { "port", 0, &port_fops, 0 },
821#endif
822 [5] = { "zero", 0666, &zero_fops, 0 },
823 [7] = { "full", 0666, &full_fops, 0 },
824 [8] = { "random", 0666, &random_fops, 0 },
825 [9] = { "urandom", 0666, &urandom_fops, 0 },
826#ifdef CONFIG_PRINTK
827 [11] = { "kmsg", 0644, &kmsg_fops, 0 },
828#endif
829};
830
831static int memory_open(struct inode *inode, struct file *filp)
832{
833 int minor;
834 const struct memdev *dev;
835
836 minor = iminor(inode);
837 if (minor >= ARRAY_SIZE(devlist))
838 return -ENXIO;
839
840 dev = &devlist[minor];
841 if (!dev->fops)
842 return -ENXIO;
843
844 filp->f_op = dev->fops;
845 filp->f_mode |= dev->fmode;
846
847 if (dev->fops->open)
848 return dev->fops->open(inode, filp);
849
850 return 0;
851}
852
853static const struct file_operations memory_fops = {
854 .open = memory_open,
855 .llseek = noop_llseek,
856};
857
858static char *mem_devnode(struct device *dev, umode_t *mode)
859{
860 if (mode && devlist[MINOR(dev->devt)].mode)
861 *mode = devlist[MINOR(dev->devt)].mode;
862 return NULL;
863}
864
865static struct class *mem_class;
866
867static int __init chr_dev_init(void)
868{
869 int minor;
870
871 if (register_chrdev(MEM_MAJOR, "mem", &memory_fops))
872 printk("unable to get major %d for memory devs\n", MEM_MAJOR);
873
874 mem_class = class_create(THIS_MODULE, "mem");
875 if (IS_ERR(mem_class))
876 return PTR_ERR(mem_class);
877
878 mem_class->devnode = mem_devnode;
879 for (minor = 1; minor < ARRAY_SIZE(devlist); minor++) {
880 if (!devlist[minor].name)
881 continue;
882
883 /*
884 * Create /dev/port?
885 */
886 if ((minor == DEVPORT_MINOR) && !arch_has_dev_port())
887 continue;
888
889 device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
890 NULL, devlist[minor].name);
891 }
892
893 return tty_init();
894}
895
896fs_initcall(chr_dev_init);
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/drivers/char/mem.c
4 *
5 * Copyright (C) 1991, 1992 Linus Torvalds
6 *
7 * Added devfs support.
8 * Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
9 * Shared /dev/zero mmapping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
10 */
11
12#include <linux/mm.h>
13#include <linux/miscdevice.h>
14#include <linux/slab.h>
15#include <linux/vmalloc.h>
16#include <linux/mman.h>
17#include <linux/random.h>
18#include <linux/init.h>
19#include <linux/tty.h>
20#include <linux/capability.h>
21#include <linux/ptrace.h>
22#include <linux/device.h>
23#include <linux/highmem.h>
24#include <linux/backing-dev.h>
25#include <linux/shmem_fs.h>
26#include <linux/splice.h>
27#include <linux/pfn.h>
28#include <linux/export.h>
29#include <linux/io.h>
30#include <linux/uio.h>
31#include <linux/uaccess.h>
32#include <linux/security.h>
33
34#define DEVMEM_MINOR 1
35#define DEVPORT_MINOR 4
36
37static inline unsigned long size_inside_page(unsigned long start,
38 unsigned long size)
39{
40 unsigned long sz;
41
42 sz = PAGE_SIZE - (start & (PAGE_SIZE - 1));
43
44 return min(sz, size);
45}
46
47#ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
48static inline int valid_phys_addr_range(phys_addr_t addr, size_t count)
49{
50 return addr + count <= __pa(high_memory);
51}
52
53static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
54{
55 return 1;
56}
57#endif
58
59#ifdef CONFIG_STRICT_DEVMEM
60static inline int page_is_allowed(unsigned long pfn)
61{
62 return devmem_is_allowed(pfn);
63}
64static inline int range_is_allowed(unsigned long pfn, unsigned long size)
65{
66 u64 from = ((u64)pfn) << PAGE_SHIFT;
67 u64 to = from + size;
68 u64 cursor = from;
69
70 while (cursor < to) {
71 if (!devmem_is_allowed(pfn))
72 return 0;
73 cursor += PAGE_SIZE;
74 pfn++;
75 }
76 return 1;
77}
78#else
79static inline int page_is_allowed(unsigned long pfn)
80{
81 return 1;
82}
83static inline int range_is_allowed(unsigned long pfn, unsigned long size)
84{
85 return 1;
86}
87#endif
88
89static inline bool should_stop_iteration(void)
90{
91 if (need_resched())
92 cond_resched();
93 return signal_pending(current);
94}
95
96/*
97 * This funcion reads the *physical* memory. The f_pos points directly to the
98 * memory location.
99 */
100static ssize_t read_mem(struct file *file, char __user *buf,
101 size_t count, loff_t *ppos)
102{
103 phys_addr_t p = *ppos;
104 ssize_t read, sz;
105 void *ptr;
106 char *bounce;
107 int err;
108
109 if (p != *ppos)
110 return 0;
111
112 if (!valid_phys_addr_range(p, count))
113 return -EFAULT;
114 read = 0;
115#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
116 /* we don't have page 0 mapped on sparc and m68k.. */
117 if (p < PAGE_SIZE) {
118 sz = size_inside_page(p, count);
119 if (sz > 0) {
120 if (clear_user(buf, sz))
121 return -EFAULT;
122 buf += sz;
123 p += sz;
124 count -= sz;
125 read += sz;
126 }
127 }
128#endif
129
130 bounce = kmalloc(PAGE_SIZE, GFP_KERNEL);
131 if (!bounce)
132 return -ENOMEM;
133
134 while (count > 0) {
135 unsigned long remaining;
136 int allowed, probe;
137
138 sz = size_inside_page(p, count);
139
140 err = -EPERM;
141 allowed = page_is_allowed(p >> PAGE_SHIFT);
142 if (!allowed)
143 goto failed;
144
145 err = -EFAULT;
146 if (allowed == 2) {
147 /* Show zeros for restricted memory. */
148 remaining = clear_user(buf, sz);
149 } else {
150 /*
151 * On ia64 if a page has been mapped somewhere as
152 * uncached, then it must also be accessed uncached
153 * by the kernel or data corruption may occur.
154 */
155 ptr = xlate_dev_mem_ptr(p);
156 if (!ptr)
157 goto failed;
158
159 probe = copy_from_kernel_nofault(bounce, ptr, sz);
160 unxlate_dev_mem_ptr(p, ptr);
161 if (probe)
162 goto failed;
163
164 remaining = copy_to_user(buf, bounce, sz);
165 }
166
167 if (remaining)
168 goto failed;
169
170 buf += sz;
171 p += sz;
172 count -= sz;
173 read += sz;
174 if (should_stop_iteration())
175 break;
176 }
177 kfree(bounce);
178
179 *ppos += read;
180 return read;
181
182failed:
183 kfree(bounce);
184 return err;
185}
186
187static ssize_t write_mem(struct file *file, const char __user *buf,
188 size_t count, loff_t *ppos)
189{
190 phys_addr_t p = *ppos;
191 ssize_t written, sz;
192 unsigned long copied;
193 void *ptr;
194
195 if (p != *ppos)
196 return -EFBIG;
197
198 if (!valid_phys_addr_range(p, count))
199 return -EFAULT;
200
201 written = 0;
202
203#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
204 /* we don't have page 0 mapped on sparc and m68k.. */
205 if (p < PAGE_SIZE) {
206 sz = size_inside_page(p, count);
207 /* Hmm. Do something? */
208 buf += sz;
209 p += sz;
210 count -= sz;
211 written += sz;
212 }
213#endif
214
215 while (count > 0) {
216 int allowed;
217
218 sz = size_inside_page(p, count);
219
220 allowed = page_is_allowed(p >> PAGE_SHIFT);
221 if (!allowed)
222 return -EPERM;
223
224 /* Skip actual writing when a page is marked as restricted. */
225 if (allowed == 1) {
226 /*
227 * On ia64 if a page has been mapped somewhere as
228 * uncached, then it must also be accessed uncached
229 * by the kernel or data corruption may occur.
230 */
231 ptr = xlate_dev_mem_ptr(p);
232 if (!ptr) {
233 if (written)
234 break;
235 return -EFAULT;
236 }
237
238 copied = copy_from_user(ptr, buf, sz);
239 unxlate_dev_mem_ptr(p, ptr);
240 if (copied) {
241 written += sz - copied;
242 if (written)
243 break;
244 return -EFAULT;
245 }
246 }
247
248 buf += sz;
249 p += sz;
250 count -= sz;
251 written += sz;
252 if (should_stop_iteration())
253 break;
254 }
255
256 *ppos += written;
257 return written;
258}
259
260int __weak phys_mem_access_prot_allowed(struct file *file,
261 unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
262{
263 return 1;
264}
265
266#ifndef __HAVE_PHYS_MEM_ACCESS_PROT
267
268/*
269 * Architectures vary in how they handle caching for addresses
270 * outside of main memory.
271 *
272 */
273#ifdef pgprot_noncached
274static int uncached_access(struct file *file, phys_addr_t addr)
275{
276 /*
277 * Accessing memory above the top the kernel knows about or through a
278 * file pointer
279 * that was marked O_DSYNC will be done non-cached.
280 */
281 if (file->f_flags & O_DSYNC)
282 return 1;
283 return addr >= __pa(high_memory);
284}
285#endif
286
287static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
288 unsigned long size, pgprot_t vma_prot)
289{
290#ifdef pgprot_noncached
291 phys_addr_t offset = pfn << PAGE_SHIFT;
292
293 if (uncached_access(file, offset))
294 return pgprot_noncached(vma_prot);
295#endif
296 return vma_prot;
297}
298#endif
299
300#ifndef CONFIG_MMU
301static unsigned long get_unmapped_area_mem(struct file *file,
302 unsigned long addr,
303 unsigned long len,
304 unsigned long pgoff,
305 unsigned long flags)
306{
307 if (!valid_mmap_phys_addr_range(pgoff, len))
308 return (unsigned long) -EINVAL;
309 return pgoff << PAGE_SHIFT;
310}
311
312/* permit direct mmap, for read, write or exec */
313static unsigned memory_mmap_capabilities(struct file *file)
314{
315 return NOMMU_MAP_DIRECT |
316 NOMMU_MAP_READ | NOMMU_MAP_WRITE | NOMMU_MAP_EXEC;
317}
318
319static unsigned zero_mmap_capabilities(struct file *file)
320{
321 return NOMMU_MAP_COPY;
322}
323
324/* can't do an in-place private mapping if there's no MMU */
325static inline int private_mapping_ok(struct vm_area_struct *vma)
326{
327 return is_nommu_shared_mapping(vma->vm_flags);
328}
329#else
330
331static inline int private_mapping_ok(struct vm_area_struct *vma)
332{
333 return 1;
334}
335#endif
336
337static const struct vm_operations_struct mmap_mem_ops = {
338#ifdef CONFIG_HAVE_IOREMAP_PROT
339 .access = generic_access_phys
340#endif
341};
342
343static int mmap_mem(struct file *file, struct vm_area_struct *vma)
344{
345 size_t size = vma->vm_end - vma->vm_start;
346 phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
347
348 /* Does it even fit in phys_addr_t? */
349 if (offset >> PAGE_SHIFT != vma->vm_pgoff)
350 return -EINVAL;
351
352 /* It's illegal to wrap around the end of the physical address space. */
353 if (offset + (phys_addr_t)size - 1 < offset)
354 return -EINVAL;
355
356 if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
357 return -EINVAL;
358
359 if (!private_mapping_ok(vma))
360 return -ENOSYS;
361
362 if (!range_is_allowed(vma->vm_pgoff, size))
363 return -EPERM;
364
365 if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size,
366 &vma->vm_page_prot))
367 return -EINVAL;
368
369 vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
370 size,
371 vma->vm_page_prot);
372
373 vma->vm_ops = &mmap_mem_ops;
374
375 /* Remap-pfn-range will mark the range VM_IO */
376 if (remap_pfn_range(vma,
377 vma->vm_start,
378 vma->vm_pgoff,
379 size,
380 vma->vm_page_prot)) {
381 return -EAGAIN;
382 }
383 return 0;
384}
385
386static ssize_t read_port(struct file *file, char __user *buf,
387 size_t count, loff_t *ppos)
388{
389 unsigned long i = *ppos;
390 char __user *tmp = buf;
391
392 if (!access_ok(buf, count))
393 return -EFAULT;
394 while (count-- > 0 && i < 65536) {
395 if (__put_user(inb(i), tmp) < 0)
396 return -EFAULT;
397 i++;
398 tmp++;
399 }
400 *ppos = i;
401 return tmp-buf;
402}
403
404static ssize_t write_port(struct file *file, const char __user *buf,
405 size_t count, loff_t *ppos)
406{
407 unsigned long i = *ppos;
408 const char __user *tmp = buf;
409
410 if (!access_ok(buf, count))
411 return -EFAULT;
412 while (count-- > 0 && i < 65536) {
413 char c;
414
415 if (__get_user(c, tmp)) {
416 if (tmp > buf)
417 break;
418 return -EFAULT;
419 }
420 outb(c, i);
421 i++;
422 tmp++;
423 }
424 *ppos = i;
425 return tmp-buf;
426}
427
428static ssize_t read_null(struct file *file, char __user *buf,
429 size_t count, loff_t *ppos)
430{
431 return 0;
432}
433
434static ssize_t write_null(struct file *file, const char __user *buf,
435 size_t count, loff_t *ppos)
436{
437 return count;
438}
439
440static ssize_t read_iter_null(struct kiocb *iocb, struct iov_iter *to)
441{
442 return 0;
443}
444
445static ssize_t write_iter_null(struct kiocb *iocb, struct iov_iter *from)
446{
447 size_t count = iov_iter_count(from);
448 iov_iter_advance(from, count);
449 return count;
450}
451
452static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf,
453 struct splice_desc *sd)
454{
455 return sd->len;
456}
457
458static ssize_t splice_write_null(struct pipe_inode_info *pipe, struct file *out,
459 loff_t *ppos, size_t len, unsigned int flags)
460{
461 return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
462}
463
464static int uring_cmd_null(struct io_uring_cmd *ioucmd, unsigned int issue_flags)
465{
466 return 0;
467}
468
469static ssize_t read_iter_zero(struct kiocb *iocb, struct iov_iter *iter)
470{
471 size_t written = 0;
472
473 while (iov_iter_count(iter)) {
474 size_t chunk = iov_iter_count(iter), n;
475
476 if (chunk > PAGE_SIZE)
477 chunk = PAGE_SIZE; /* Just for latency reasons */
478 n = iov_iter_zero(chunk, iter);
479 if (!n && iov_iter_count(iter))
480 return written ? written : -EFAULT;
481 written += n;
482 if (signal_pending(current))
483 return written ? written : -ERESTARTSYS;
484 if (!need_resched())
485 continue;
486 if (iocb->ki_flags & IOCB_NOWAIT)
487 return written ? written : -EAGAIN;
488 cond_resched();
489 }
490 return written;
491}
492
493static ssize_t read_zero(struct file *file, char __user *buf,
494 size_t count, loff_t *ppos)
495{
496 size_t cleared = 0;
497
498 while (count) {
499 size_t chunk = min_t(size_t, count, PAGE_SIZE);
500 size_t left;
501
502 left = clear_user(buf + cleared, chunk);
503 if (unlikely(left)) {
504 cleared += (chunk - left);
505 if (!cleared)
506 return -EFAULT;
507 break;
508 }
509 cleared += chunk;
510 count -= chunk;
511
512 if (signal_pending(current))
513 break;
514 cond_resched();
515 }
516
517 return cleared;
518}
519
520static int mmap_zero(struct file *file, struct vm_area_struct *vma)
521{
522#ifndef CONFIG_MMU
523 return -ENOSYS;
524#endif
525 if (vma->vm_flags & VM_SHARED)
526 return shmem_zero_setup(vma);
527 vma_set_anonymous(vma);
528 return 0;
529}
530
531static unsigned long get_unmapped_area_zero(struct file *file,
532 unsigned long addr, unsigned long len,
533 unsigned long pgoff, unsigned long flags)
534{
535#ifdef CONFIG_MMU
536 if (flags & MAP_SHARED) {
537 /*
538 * mmap_zero() will call shmem_zero_setup() to create a file,
539 * so use shmem's get_unmapped_area in case it can be huge;
540 * and pass NULL for file as in mmap.c's get_unmapped_area(),
541 * so as not to confuse shmem with our handle on "/dev/zero".
542 */
543 return shmem_get_unmapped_area(NULL, addr, len, pgoff, flags);
544 }
545
546 /* Otherwise flags & MAP_PRIVATE: with no shmem object beneath it */
547 return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
548#else
549 return -ENOSYS;
550#endif
551}
552
553static ssize_t write_full(struct file *file, const char __user *buf,
554 size_t count, loff_t *ppos)
555{
556 return -ENOSPC;
557}
558
559/*
560 * Special lseek() function for /dev/null and /dev/zero. Most notably, you
561 * can fopen() both devices with "a" now. This was previously impossible.
562 * -- SRB.
563 */
564static loff_t null_lseek(struct file *file, loff_t offset, int orig)
565{
566 return file->f_pos = 0;
567}
568
569/*
570 * The memory devices use the full 32/64 bits of the offset, and so we cannot
571 * check against negative addresses: they are ok. The return value is weird,
572 * though, in that case (0).
573 *
574 * also note that seeking relative to the "end of file" isn't supported:
575 * it has no meaning, so it returns -EINVAL.
576 */
577static loff_t memory_lseek(struct file *file, loff_t offset, int orig)
578{
579 loff_t ret;
580
581 inode_lock(file_inode(file));
582 switch (orig) {
583 case SEEK_CUR:
584 offset += file->f_pos;
585 fallthrough;
586 case SEEK_SET:
587 /* to avoid userland mistaking f_pos=-9 as -EBADF=-9 */
588 if ((unsigned long long)offset >= -MAX_ERRNO) {
589 ret = -EOVERFLOW;
590 break;
591 }
592 file->f_pos = offset;
593 ret = file->f_pos;
594 force_successful_syscall_return();
595 break;
596 default:
597 ret = -EINVAL;
598 }
599 inode_unlock(file_inode(file));
600 return ret;
601}
602
603static int open_port(struct inode *inode, struct file *filp)
604{
605 int rc;
606
607 if (!capable(CAP_SYS_RAWIO))
608 return -EPERM;
609
610 rc = security_locked_down(LOCKDOWN_DEV_MEM);
611 if (rc)
612 return rc;
613
614 if (iminor(inode) != DEVMEM_MINOR)
615 return 0;
616
617 /*
618 * Use a unified address space to have a single point to manage
619 * revocations when drivers want to take over a /dev/mem mapped
620 * range.
621 */
622 filp->f_mapping = iomem_get_mapping();
623
624 return 0;
625}
626
627#define zero_lseek null_lseek
628#define full_lseek null_lseek
629#define write_zero write_null
630#define write_iter_zero write_iter_null
631#define splice_write_zero splice_write_null
632#define open_mem open_port
633
634static const struct file_operations __maybe_unused mem_fops = {
635 .llseek = memory_lseek,
636 .read = read_mem,
637 .write = write_mem,
638 .mmap = mmap_mem,
639 .open = open_mem,
640#ifndef CONFIG_MMU
641 .get_unmapped_area = get_unmapped_area_mem,
642 .mmap_capabilities = memory_mmap_capabilities,
643#endif
644};
645
646static const struct file_operations null_fops = {
647 .llseek = null_lseek,
648 .read = read_null,
649 .write = write_null,
650 .read_iter = read_iter_null,
651 .write_iter = write_iter_null,
652 .splice_write = splice_write_null,
653 .uring_cmd = uring_cmd_null,
654};
655
656static const struct file_operations __maybe_unused port_fops = {
657 .llseek = memory_lseek,
658 .read = read_port,
659 .write = write_port,
660 .open = open_port,
661};
662
663static const struct file_operations zero_fops = {
664 .llseek = zero_lseek,
665 .write = write_zero,
666 .read_iter = read_iter_zero,
667 .read = read_zero,
668 .write_iter = write_iter_zero,
669 .splice_read = copy_splice_read,
670 .splice_write = splice_write_zero,
671 .mmap = mmap_zero,
672 .get_unmapped_area = get_unmapped_area_zero,
673#ifndef CONFIG_MMU
674 .mmap_capabilities = zero_mmap_capabilities,
675#endif
676};
677
678static const struct file_operations full_fops = {
679 .llseek = full_lseek,
680 .read_iter = read_iter_zero,
681 .write = write_full,
682 .splice_read = copy_splice_read,
683};
684
685static const struct memdev {
686 const char *name;
687 const struct file_operations *fops;
688 fmode_t fmode;
689 umode_t mode;
690} devlist[] = {
691#ifdef CONFIG_DEVMEM
692 [DEVMEM_MINOR] = { "mem", &mem_fops, FMODE_UNSIGNED_OFFSET, 0 },
693#endif
694 [3] = { "null", &null_fops, FMODE_NOWAIT, 0666 },
695#ifdef CONFIG_DEVPORT
696 [4] = { "port", &port_fops, 0, 0 },
697#endif
698 [5] = { "zero", &zero_fops, FMODE_NOWAIT, 0666 },
699 [7] = { "full", &full_fops, 0, 0666 },
700 [8] = { "random", &random_fops, FMODE_NOWAIT, 0666 },
701 [9] = { "urandom", &urandom_fops, FMODE_NOWAIT, 0666 },
702#ifdef CONFIG_PRINTK
703 [11] = { "kmsg", &kmsg_fops, 0, 0644 },
704#endif
705};
706
707static int memory_open(struct inode *inode, struct file *filp)
708{
709 int minor;
710 const struct memdev *dev;
711
712 minor = iminor(inode);
713 if (minor >= ARRAY_SIZE(devlist))
714 return -ENXIO;
715
716 dev = &devlist[minor];
717 if (!dev->fops)
718 return -ENXIO;
719
720 filp->f_op = dev->fops;
721 filp->f_mode |= dev->fmode;
722
723 if (dev->fops->open)
724 return dev->fops->open(inode, filp);
725
726 return 0;
727}
728
729static const struct file_operations memory_fops = {
730 .open = memory_open,
731 .llseek = noop_llseek,
732};
733
734static char *mem_devnode(const struct device *dev, umode_t *mode)
735{
736 if (mode && devlist[MINOR(dev->devt)].mode)
737 *mode = devlist[MINOR(dev->devt)].mode;
738 return NULL;
739}
740
741static const struct class mem_class = {
742 .name = "mem",
743 .devnode = mem_devnode,
744};
745
746static int __init chr_dev_init(void)
747{
748 int retval;
749 int minor;
750
751 if (register_chrdev(MEM_MAJOR, "mem", &memory_fops))
752 printk("unable to get major %d for memory devs\n", MEM_MAJOR);
753
754 retval = class_register(&mem_class);
755 if (retval)
756 return retval;
757
758 for (minor = 1; minor < ARRAY_SIZE(devlist); minor++) {
759 if (!devlist[minor].name)
760 continue;
761
762 /*
763 * Create /dev/port?
764 */
765 if ((minor == DEVPORT_MINOR) && !arch_has_dev_port())
766 continue;
767
768 device_create(&mem_class, NULL, MKDEV(MEM_MAJOR, minor),
769 NULL, devlist[minor].name);
770 }
771
772 return tty_init();
773}
774
775fs_initcall(chr_dev_init);