Loading...
1/*
2 * linux/drivers/char/mem.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 *
6 * Added devfs support.
7 * Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
8 * Shared /dev/zero mmapping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
9 */
10
11#include <linux/mm.h>
12#include <linux/miscdevice.h>
13#include <linux/slab.h>
14#include <linux/vmalloc.h>
15#include <linux/mman.h>
16#include <linux/random.h>
17#include <linux/init.h>
18#include <linux/raw.h>
19#include <linux/tty.h>
20#include <linux/capability.h>
21#include <linux/ptrace.h>
22#include <linux/device.h>
23#include <linux/highmem.h>
24#include <linux/crash_dump.h>
25#include <linux/backing-dev.h>
26#include <linux/bootmem.h>
27#include <linux/splice.h>
28#include <linux/pfn.h>
29
30#include <asm/uaccess.h>
31#include <asm/io.h>
32
33#ifdef CONFIG_IA64
34# include <linux/efi.h>
35#endif
36
37static inline unsigned long size_inside_page(unsigned long start,
38 unsigned long size)
39{
40 unsigned long sz;
41
42 sz = PAGE_SIZE - (start & (PAGE_SIZE - 1));
43
44 return min(sz, size);
45}
46
47#ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
48static inline int valid_phys_addr_range(unsigned long addr, size_t count)
49{
50 return addr + count <= __pa(high_memory);
51}
52
53static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
54{
55 return 1;
56}
57#endif
58
59#ifdef CONFIG_STRICT_DEVMEM
60static inline int range_is_allowed(unsigned long pfn, unsigned long size)
61{
62 u64 from = ((u64)pfn) << PAGE_SHIFT;
63 u64 to = from + size;
64 u64 cursor = from;
65
66 while (cursor < to) {
67 if (!devmem_is_allowed(pfn)) {
68 printk(KERN_INFO
69 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
70 current->comm, from, to);
71 return 0;
72 }
73 cursor += PAGE_SIZE;
74 pfn++;
75 }
76 return 1;
77}
78#else
79static inline int range_is_allowed(unsigned long pfn, unsigned long size)
80{
81 return 1;
82}
83#endif
84
85void __weak unxlate_dev_mem_ptr(unsigned long phys, void *addr)
86{
87}
88
89/*
90 * This funcion reads the *physical* memory. The f_pos points directly to the
91 * memory location.
92 */
93static ssize_t read_mem(struct file *file, char __user *buf,
94 size_t count, loff_t *ppos)
95{
96 unsigned long p = *ppos;
97 ssize_t read, sz;
98 char *ptr;
99
100 if (!valid_phys_addr_range(p, count))
101 return -EFAULT;
102 read = 0;
103#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
104 /* we don't have page 0 mapped on sparc and m68k.. */
105 if (p < PAGE_SIZE) {
106 sz = size_inside_page(p, count);
107 if (sz > 0) {
108 if (clear_user(buf, sz))
109 return -EFAULT;
110 buf += sz;
111 p += sz;
112 count -= sz;
113 read += sz;
114 }
115 }
116#endif
117
118 while (count > 0) {
119 unsigned long remaining;
120
121 sz = size_inside_page(p, count);
122
123 if (!range_is_allowed(p >> PAGE_SHIFT, count))
124 return -EPERM;
125
126 /*
127 * On ia64 if a page has been mapped somewhere as uncached, then
128 * it must also be accessed uncached by the kernel or data
129 * corruption may occur.
130 */
131 ptr = xlate_dev_mem_ptr(p);
132 if (!ptr)
133 return -EFAULT;
134
135 remaining = copy_to_user(buf, ptr, sz);
136 unxlate_dev_mem_ptr(p, ptr);
137 if (remaining)
138 return -EFAULT;
139
140 buf += sz;
141 p += sz;
142 count -= sz;
143 read += sz;
144 }
145
146 *ppos += read;
147 return read;
148}
149
150static ssize_t write_mem(struct file *file, const char __user *buf,
151 size_t count, loff_t *ppos)
152{
153 unsigned long p = *ppos;
154 ssize_t written, sz;
155 unsigned long copied;
156 void *ptr;
157
158 if (!valid_phys_addr_range(p, count))
159 return -EFAULT;
160
161 written = 0;
162
163#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
164 /* we don't have page 0 mapped on sparc and m68k.. */
165 if (p < PAGE_SIZE) {
166 sz = size_inside_page(p, count);
167 /* Hmm. Do something? */
168 buf += sz;
169 p += sz;
170 count -= sz;
171 written += sz;
172 }
173#endif
174
175 while (count > 0) {
176 sz = size_inside_page(p, count);
177
178 if (!range_is_allowed(p >> PAGE_SHIFT, sz))
179 return -EPERM;
180
181 /*
182 * On ia64 if a page has been mapped somewhere as uncached, then
183 * it must also be accessed uncached by the kernel or data
184 * corruption may occur.
185 */
186 ptr = xlate_dev_mem_ptr(p);
187 if (!ptr) {
188 if (written)
189 break;
190 return -EFAULT;
191 }
192
193 copied = copy_from_user(ptr, buf, sz);
194 unxlate_dev_mem_ptr(p, ptr);
195 if (copied) {
196 written += sz - copied;
197 if (written)
198 break;
199 return -EFAULT;
200 }
201
202 buf += sz;
203 p += sz;
204 count -= sz;
205 written += sz;
206 }
207
208 *ppos += written;
209 return written;
210}
211
212int __weak phys_mem_access_prot_allowed(struct file *file,
213 unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
214{
215 return 1;
216}
217
218#ifndef __HAVE_PHYS_MEM_ACCESS_PROT
219
220/*
221 * Architectures vary in how they handle caching for addresses
222 * outside of main memory.
223 *
224 */
225#ifdef pgprot_noncached
226static int uncached_access(struct file *file, unsigned long addr)
227{
228#if defined(CONFIG_IA64)
229 /*
230 * On ia64, we ignore O_DSYNC because we cannot tolerate memory
231 * attribute aliases.
232 */
233 return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
234#elif defined(CONFIG_MIPS)
235 {
236 extern int __uncached_access(struct file *file,
237 unsigned long addr);
238
239 return __uncached_access(file, addr);
240 }
241#else
242 /*
243 * Accessing memory above the top the kernel knows about or through a
244 * file pointer
245 * that was marked O_DSYNC will be done non-cached.
246 */
247 if (file->f_flags & O_DSYNC)
248 return 1;
249 return addr >= __pa(high_memory);
250#endif
251}
252#endif
253
254static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
255 unsigned long size, pgprot_t vma_prot)
256{
257#ifdef pgprot_noncached
258 unsigned long offset = pfn << PAGE_SHIFT;
259
260 if (uncached_access(file, offset))
261 return pgprot_noncached(vma_prot);
262#endif
263 return vma_prot;
264}
265#endif
266
267#ifndef CONFIG_MMU
268static unsigned long get_unmapped_area_mem(struct file *file,
269 unsigned long addr,
270 unsigned long len,
271 unsigned long pgoff,
272 unsigned long flags)
273{
274 if (!valid_mmap_phys_addr_range(pgoff, len))
275 return (unsigned long) -EINVAL;
276 return pgoff << PAGE_SHIFT;
277}
278
279/* can't do an in-place private mapping if there's no MMU */
280static inline int private_mapping_ok(struct vm_area_struct *vma)
281{
282 return vma->vm_flags & VM_MAYSHARE;
283}
284#else
285#define get_unmapped_area_mem NULL
286
287static inline int private_mapping_ok(struct vm_area_struct *vma)
288{
289 return 1;
290}
291#endif
292
293static const struct vm_operations_struct mmap_mem_ops = {
294#ifdef CONFIG_HAVE_IOREMAP_PROT
295 .access = generic_access_phys
296#endif
297};
298
299static int mmap_mem(struct file *file, struct vm_area_struct *vma)
300{
301 size_t size = vma->vm_end - vma->vm_start;
302
303 if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
304 return -EINVAL;
305
306 if (!private_mapping_ok(vma))
307 return -ENOSYS;
308
309 if (!range_is_allowed(vma->vm_pgoff, size))
310 return -EPERM;
311
312 if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size,
313 &vma->vm_page_prot))
314 return -EINVAL;
315
316 vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
317 size,
318 vma->vm_page_prot);
319
320 vma->vm_ops = &mmap_mem_ops;
321
322 /* Remap-pfn-range will mark the range VM_IO and VM_RESERVED */
323 if (remap_pfn_range(vma,
324 vma->vm_start,
325 vma->vm_pgoff,
326 size,
327 vma->vm_page_prot)) {
328 return -EAGAIN;
329 }
330 return 0;
331}
332
333#ifdef CONFIG_DEVKMEM
334static int mmap_kmem(struct file *file, struct vm_area_struct *vma)
335{
336 unsigned long pfn;
337
338 /* Turn a kernel-virtual address into a physical page frame */
339 pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT;
340
341 /*
342 * RED-PEN: on some architectures there is more mapped memory than
343 * available in mem_map which pfn_valid checks for. Perhaps should add a
344 * new macro here.
345 *
346 * RED-PEN: vmalloc is not supported right now.
347 */
348 if (!pfn_valid(pfn))
349 return -EIO;
350
351 vma->vm_pgoff = pfn;
352 return mmap_mem(file, vma);
353}
354#endif
355
356#ifdef CONFIG_CRASH_DUMP
357/*
358 * Read memory corresponding to the old kernel.
359 */
360static ssize_t read_oldmem(struct file *file, char __user *buf,
361 size_t count, loff_t *ppos)
362{
363 unsigned long pfn, offset;
364 size_t read = 0, csize;
365 int rc = 0;
366
367 while (count) {
368 pfn = *ppos / PAGE_SIZE;
369 if (pfn > saved_max_pfn)
370 return read;
371
372 offset = (unsigned long)(*ppos % PAGE_SIZE);
373 if (count > PAGE_SIZE - offset)
374 csize = PAGE_SIZE - offset;
375 else
376 csize = count;
377
378 rc = copy_oldmem_page(pfn, buf, csize, offset, 1);
379 if (rc < 0)
380 return rc;
381 buf += csize;
382 *ppos += csize;
383 read += csize;
384 count -= csize;
385 }
386 return read;
387}
388#endif
389
390#ifdef CONFIG_DEVKMEM
391/*
392 * This function reads the *virtual* memory as seen by the kernel.
393 */
394static ssize_t read_kmem(struct file *file, char __user *buf,
395 size_t count, loff_t *ppos)
396{
397 unsigned long p = *ppos;
398 ssize_t low_count, read, sz;
399 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
400 int err = 0;
401
402 read = 0;
403 if (p < (unsigned long) high_memory) {
404 low_count = count;
405 if (count > (unsigned long)high_memory - p)
406 low_count = (unsigned long)high_memory - p;
407
408#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
409 /* we don't have page 0 mapped on sparc and m68k.. */
410 if (p < PAGE_SIZE && low_count > 0) {
411 sz = size_inside_page(p, low_count);
412 if (clear_user(buf, sz))
413 return -EFAULT;
414 buf += sz;
415 p += sz;
416 read += sz;
417 low_count -= sz;
418 count -= sz;
419 }
420#endif
421 while (low_count > 0) {
422 sz = size_inside_page(p, low_count);
423
424 /*
425 * On ia64 if a page has been mapped somewhere as
426 * uncached, then it must also be accessed uncached
427 * by the kernel or data corruption may occur
428 */
429 kbuf = xlate_dev_kmem_ptr((char *)p);
430
431 if (copy_to_user(buf, kbuf, sz))
432 return -EFAULT;
433 buf += sz;
434 p += sz;
435 read += sz;
436 low_count -= sz;
437 count -= sz;
438 }
439 }
440
441 if (count > 0) {
442 kbuf = (char *)__get_free_page(GFP_KERNEL);
443 if (!kbuf)
444 return -ENOMEM;
445 while (count > 0) {
446 sz = size_inside_page(p, count);
447 if (!is_vmalloc_or_module_addr((void *)p)) {
448 err = -ENXIO;
449 break;
450 }
451 sz = vread(kbuf, (char *)p, sz);
452 if (!sz)
453 break;
454 if (copy_to_user(buf, kbuf, sz)) {
455 err = -EFAULT;
456 break;
457 }
458 count -= sz;
459 buf += sz;
460 read += sz;
461 p += sz;
462 }
463 free_page((unsigned long)kbuf);
464 }
465 *ppos = p;
466 return read ? read : err;
467}
468
469
470static ssize_t do_write_kmem(unsigned long p, const char __user *buf,
471 size_t count, loff_t *ppos)
472{
473 ssize_t written, sz;
474 unsigned long copied;
475
476 written = 0;
477#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
478 /* we don't have page 0 mapped on sparc and m68k.. */
479 if (p < PAGE_SIZE) {
480 sz = size_inside_page(p, count);
481 /* Hmm. Do something? */
482 buf += sz;
483 p += sz;
484 count -= sz;
485 written += sz;
486 }
487#endif
488
489 while (count > 0) {
490 char *ptr;
491
492 sz = size_inside_page(p, count);
493
494 /*
495 * On ia64 if a page has been mapped somewhere as uncached, then
496 * it must also be accessed uncached by the kernel or data
497 * corruption may occur.
498 */
499 ptr = xlate_dev_kmem_ptr((char *)p);
500
501 copied = copy_from_user(ptr, buf, sz);
502 if (copied) {
503 written += sz - copied;
504 if (written)
505 break;
506 return -EFAULT;
507 }
508 buf += sz;
509 p += sz;
510 count -= sz;
511 written += sz;
512 }
513
514 *ppos += written;
515 return written;
516}
517
518/*
519 * This function writes to the *virtual* memory as seen by the kernel.
520 */
521static ssize_t write_kmem(struct file *file, const char __user *buf,
522 size_t count, loff_t *ppos)
523{
524 unsigned long p = *ppos;
525 ssize_t wrote = 0;
526 ssize_t virtr = 0;
527 char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
528 int err = 0;
529
530 if (p < (unsigned long) high_memory) {
531 unsigned long to_write = min_t(unsigned long, count,
532 (unsigned long)high_memory - p);
533 wrote = do_write_kmem(p, buf, to_write, ppos);
534 if (wrote != to_write)
535 return wrote;
536 p += wrote;
537 buf += wrote;
538 count -= wrote;
539 }
540
541 if (count > 0) {
542 kbuf = (char *)__get_free_page(GFP_KERNEL);
543 if (!kbuf)
544 return wrote ? wrote : -ENOMEM;
545 while (count > 0) {
546 unsigned long sz = size_inside_page(p, count);
547 unsigned long n;
548
549 if (!is_vmalloc_or_module_addr((void *)p)) {
550 err = -ENXIO;
551 break;
552 }
553 n = copy_from_user(kbuf, buf, sz);
554 if (n) {
555 err = -EFAULT;
556 break;
557 }
558 vwrite(kbuf, (char *)p, sz);
559 count -= sz;
560 buf += sz;
561 virtr += sz;
562 p += sz;
563 }
564 free_page((unsigned long)kbuf);
565 }
566
567 *ppos = p;
568 return virtr + wrote ? : err;
569}
570#endif
571
572#ifdef CONFIG_DEVPORT
573static ssize_t read_port(struct file *file, char __user *buf,
574 size_t count, loff_t *ppos)
575{
576 unsigned long i = *ppos;
577 char __user *tmp = buf;
578
579 if (!access_ok(VERIFY_WRITE, buf, count))
580 return -EFAULT;
581 while (count-- > 0 && i < 65536) {
582 if (__put_user(inb(i), tmp) < 0)
583 return -EFAULT;
584 i++;
585 tmp++;
586 }
587 *ppos = i;
588 return tmp-buf;
589}
590
591static ssize_t write_port(struct file *file, const char __user *buf,
592 size_t count, loff_t *ppos)
593{
594 unsigned long i = *ppos;
595 const char __user * tmp = buf;
596
597 if (!access_ok(VERIFY_READ, buf, count))
598 return -EFAULT;
599 while (count-- > 0 && i < 65536) {
600 char c;
601 if (__get_user(c, tmp)) {
602 if (tmp > buf)
603 break;
604 return -EFAULT;
605 }
606 outb(c, i);
607 i++;
608 tmp++;
609 }
610 *ppos = i;
611 return tmp-buf;
612}
613#endif
614
615static ssize_t read_null(struct file *file, char __user *buf,
616 size_t count, loff_t *ppos)
617{
618 return 0;
619}
620
621static ssize_t write_null(struct file *file, const char __user *buf,
622 size_t count, loff_t *ppos)
623{
624 return count;
625}
626
627static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf,
628 struct splice_desc *sd)
629{
630 return sd->len;
631}
632
633static ssize_t splice_write_null(struct pipe_inode_info *pipe, struct file *out,
634 loff_t *ppos, size_t len, unsigned int flags)
635{
636 return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
637}
638
639static ssize_t read_zero(struct file *file, char __user *buf,
640 size_t count, loff_t *ppos)
641{
642 size_t written;
643
644 if (!count)
645 return 0;
646
647 if (!access_ok(VERIFY_WRITE, buf, count))
648 return -EFAULT;
649
650 written = 0;
651 while (count) {
652 unsigned long unwritten;
653 size_t chunk = count;
654
655 if (chunk > PAGE_SIZE)
656 chunk = PAGE_SIZE; /* Just for latency reasons */
657 unwritten = __clear_user(buf, chunk);
658 written += chunk - unwritten;
659 if (unwritten)
660 break;
661 if (signal_pending(current))
662 return written ? written : -ERESTARTSYS;
663 buf += chunk;
664 count -= chunk;
665 cond_resched();
666 }
667 return written ? written : -EFAULT;
668}
669
670static int mmap_zero(struct file *file, struct vm_area_struct *vma)
671{
672#ifndef CONFIG_MMU
673 return -ENOSYS;
674#endif
675 if (vma->vm_flags & VM_SHARED)
676 return shmem_zero_setup(vma);
677 return 0;
678}
679
680static ssize_t write_full(struct file *file, const char __user *buf,
681 size_t count, loff_t *ppos)
682{
683 return -ENOSPC;
684}
685
686/*
687 * Special lseek() function for /dev/null and /dev/zero. Most notably, you
688 * can fopen() both devices with "a" now. This was previously impossible.
689 * -- SRB.
690 */
691static loff_t null_lseek(struct file *file, loff_t offset, int orig)
692{
693 return file->f_pos = 0;
694}
695
696/*
697 * The memory devices use the full 32/64 bits of the offset, and so we cannot
698 * check against negative addresses: they are ok. The return value is weird,
699 * though, in that case (0).
700 *
701 * also note that seeking relative to the "end of file" isn't supported:
702 * it has no meaning, so it returns -EINVAL.
703 */
704static loff_t memory_lseek(struct file *file, loff_t offset, int orig)
705{
706 loff_t ret;
707
708 mutex_lock(&file->f_path.dentry->d_inode->i_mutex);
709 switch (orig) {
710 case SEEK_CUR:
711 offset += file->f_pos;
712 case SEEK_SET:
713 /* to avoid userland mistaking f_pos=-9 as -EBADF=-9 */
714 if ((unsigned long long)offset >= ~0xFFFULL) {
715 ret = -EOVERFLOW;
716 break;
717 }
718 file->f_pos = offset;
719 ret = file->f_pos;
720 force_successful_syscall_return();
721 break;
722 default:
723 ret = -EINVAL;
724 }
725 mutex_unlock(&file->f_path.dentry->d_inode->i_mutex);
726 return ret;
727}
728
729static int open_port(struct inode * inode, struct file * filp)
730{
731 return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
732}
733
734#define zero_lseek null_lseek
735#define full_lseek null_lseek
736#define write_zero write_null
737#define read_full read_zero
738#define open_mem open_port
739#define open_kmem open_mem
740#define open_oldmem open_mem
741
742static const struct file_operations mem_fops = {
743 .llseek = memory_lseek,
744 .read = read_mem,
745 .write = write_mem,
746 .mmap = mmap_mem,
747 .open = open_mem,
748 .get_unmapped_area = get_unmapped_area_mem,
749};
750
751#ifdef CONFIG_DEVKMEM
752static const struct file_operations kmem_fops = {
753 .llseek = memory_lseek,
754 .read = read_kmem,
755 .write = write_kmem,
756 .mmap = mmap_kmem,
757 .open = open_kmem,
758 .get_unmapped_area = get_unmapped_area_mem,
759};
760#endif
761
762static const struct file_operations null_fops = {
763 .llseek = null_lseek,
764 .read = read_null,
765 .write = write_null,
766 .splice_write = splice_write_null,
767};
768
769#ifdef CONFIG_DEVPORT
770static const struct file_operations port_fops = {
771 .llseek = memory_lseek,
772 .read = read_port,
773 .write = write_port,
774 .open = open_port,
775};
776#endif
777
778static const struct file_operations zero_fops = {
779 .llseek = zero_lseek,
780 .read = read_zero,
781 .write = write_zero,
782 .mmap = mmap_zero,
783};
784
785/*
786 * capabilities for /dev/zero
787 * - permits private mappings, "copies" are taken of the source of zeros
788 * - no writeback happens
789 */
790static struct backing_dev_info zero_bdi = {
791 .name = "char/mem",
792 .capabilities = BDI_CAP_MAP_COPY | BDI_CAP_NO_ACCT_AND_WRITEBACK,
793};
794
795static const struct file_operations full_fops = {
796 .llseek = full_lseek,
797 .read = read_full,
798 .write = write_full,
799};
800
801#ifdef CONFIG_CRASH_DUMP
802static const struct file_operations oldmem_fops = {
803 .read = read_oldmem,
804 .open = open_oldmem,
805 .llseek = default_llseek,
806};
807#endif
808
809static ssize_t kmsg_writev(struct kiocb *iocb, const struct iovec *iv,
810 unsigned long count, loff_t pos)
811{
812 char *line, *p;
813 int i;
814 ssize_t ret = -EFAULT;
815 size_t len = iov_length(iv, count);
816
817 line = kmalloc(len + 1, GFP_KERNEL);
818 if (line == NULL)
819 return -ENOMEM;
820
821 /*
822 * copy all vectors into a single string, to ensure we do
823 * not interleave our log line with other printk calls
824 */
825 p = line;
826 for (i = 0; i < count; i++) {
827 if (copy_from_user(p, iv[i].iov_base, iv[i].iov_len))
828 goto out;
829 p += iv[i].iov_len;
830 }
831 p[0] = '\0';
832
833 ret = printk("%s", line);
834 /* printk can add a prefix */
835 if (ret > len)
836 ret = len;
837out:
838 kfree(line);
839 return ret;
840}
841
842static const struct file_operations kmsg_fops = {
843 .aio_write = kmsg_writev,
844 .llseek = noop_llseek,
845};
846
847static const struct memdev {
848 const char *name;
849 mode_t mode;
850 const struct file_operations *fops;
851 struct backing_dev_info *dev_info;
852} devlist[] = {
853 [1] = { "mem", 0, &mem_fops, &directly_mappable_cdev_bdi },
854#ifdef CONFIG_DEVKMEM
855 [2] = { "kmem", 0, &kmem_fops, &directly_mappable_cdev_bdi },
856#endif
857 [3] = { "null", 0666, &null_fops, NULL },
858#ifdef CONFIG_DEVPORT
859 [4] = { "port", 0, &port_fops, NULL },
860#endif
861 [5] = { "zero", 0666, &zero_fops, &zero_bdi },
862 [7] = { "full", 0666, &full_fops, NULL },
863 [8] = { "random", 0666, &random_fops, NULL },
864 [9] = { "urandom", 0666, &urandom_fops, NULL },
865 [11] = { "kmsg", 0, &kmsg_fops, NULL },
866#ifdef CONFIG_CRASH_DUMP
867 [12] = { "oldmem", 0, &oldmem_fops, NULL },
868#endif
869};
870
871static int memory_open(struct inode *inode, struct file *filp)
872{
873 int minor;
874 const struct memdev *dev;
875
876 minor = iminor(inode);
877 if (minor >= ARRAY_SIZE(devlist))
878 return -ENXIO;
879
880 dev = &devlist[minor];
881 if (!dev->fops)
882 return -ENXIO;
883
884 filp->f_op = dev->fops;
885 if (dev->dev_info)
886 filp->f_mapping->backing_dev_info = dev->dev_info;
887
888 /* Is /dev/mem or /dev/kmem ? */
889 if (dev->dev_info == &directly_mappable_cdev_bdi)
890 filp->f_mode |= FMODE_UNSIGNED_OFFSET;
891
892 if (dev->fops->open)
893 return dev->fops->open(inode, filp);
894
895 return 0;
896}
897
898static const struct file_operations memory_fops = {
899 .open = memory_open,
900 .llseek = noop_llseek,
901};
902
903static char *mem_devnode(struct device *dev, mode_t *mode)
904{
905 if (mode && devlist[MINOR(dev->devt)].mode)
906 *mode = devlist[MINOR(dev->devt)].mode;
907 return NULL;
908}
909
910static struct class *mem_class;
911
912static int __init chr_dev_init(void)
913{
914 int minor;
915 int err;
916
917 err = bdi_init(&zero_bdi);
918 if (err)
919 return err;
920
921 if (register_chrdev(MEM_MAJOR, "mem", &memory_fops))
922 printk("unable to get major %d for memory devs\n", MEM_MAJOR);
923
924 mem_class = class_create(THIS_MODULE, "mem");
925 if (IS_ERR(mem_class))
926 return PTR_ERR(mem_class);
927
928 mem_class->devnode = mem_devnode;
929 for (minor = 1; minor < ARRAY_SIZE(devlist); minor++) {
930 if (!devlist[minor].name)
931 continue;
932 device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
933 NULL, devlist[minor].name);
934 }
935
936 return tty_init();
937}
938
939fs_initcall(chr_dev_init);
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/drivers/char/mem.c
4 *
5 * Copyright (C) 1991, 1992 Linus Torvalds
6 *
7 * Added devfs support.
8 * Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
9 * Shared /dev/zero mmapping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
10 */
11
12#include <linux/mm.h>
13#include <linux/miscdevice.h>
14#include <linux/slab.h>
15#include <linux/vmalloc.h>
16#include <linux/mman.h>
17#include <linux/random.h>
18#include <linux/init.h>
19#include <linux/raw.h>
20#include <linux/tty.h>
21#include <linux/capability.h>
22#include <linux/ptrace.h>
23#include <linux/device.h>
24#include <linux/highmem.h>
25#include <linux/backing-dev.h>
26#include <linux/shmem_fs.h>
27#include <linux/splice.h>
28#include <linux/pfn.h>
29#include <linux/export.h>
30#include <linux/io.h>
31#include <linux/uio.h>
32#include <linux/uaccess.h>
33#include <linux/security.h>
34
35#ifdef CONFIG_IA64
36# include <linux/efi.h>
37#endif
38
39#define DEVPORT_MINOR 4
40
41static inline unsigned long size_inside_page(unsigned long start,
42 unsigned long size)
43{
44 unsigned long sz;
45
46 sz = PAGE_SIZE - (start & (PAGE_SIZE - 1));
47
48 return min(sz, size);
49}
50
51#ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
52static inline int valid_phys_addr_range(phys_addr_t addr, size_t count)
53{
54 return addr + count <= __pa(high_memory);
55}
56
57static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
58{
59 return 1;
60}
61#endif
62
63#ifdef CONFIG_STRICT_DEVMEM
64static inline int page_is_allowed(unsigned long pfn)
65{
66 return devmem_is_allowed(pfn);
67}
68static inline int range_is_allowed(unsigned long pfn, unsigned long size)
69{
70 u64 from = ((u64)pfn) << PAGE_SHIFT;
71 u64 to = from + size;
72 u64 cursor = from;
73
74 while (cursor < to) {
75 if (!devmem_is_allowed(pfn))
76 return 0;
77 cursor += PAGE_SIZE;
78 pfn++;
79 }
80 return 1;
81}
82#else
83static inline int page_is_allowed(unsigned long pfn)
84{
85 return 1;
86}
87static inline int range_is_allowed(unsigned long pfn, unsigned long size)
88{
89 return 1;
90}
91#endif
92
93#ifndef unxlate_dev_mem_ptr
94#define unxlate_dev_mem_ptr unxlate_dev_mem_ptr
95void __weak unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
96{
97}
98#endif
99
100static inline bool should_stop_iteration(void)
101{
102 if (need_resched())
103 cond_resched();
104 return fatal_signal_pending(current);
105}
106
107/*
108 * This funcion reads the *physical* memory. The f_pos points directly to the
109 * memory location.
110 */
111static ssize_t read_mem(struct file *file, char __user *buf,
112 size_t count, loff_t *ppos)
113{
114 phys_addr_t p = *ppos;
115 ssize_t read, sz;
116 void *ptr;
117 char *bounce;
118 int err;
119
120 if (p != *ppos)
121 return 0;
122
123 if (!valid_phys_addr_range(p, count))
124 return -EFAULT;
125 read = 0;
126#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
127 /* we don't have page 0 mapped on sparc and m68k.. */
128 if (p < PAGE_SIZE) {
129 sz = size_inside_page(p, count);
130 if (sz > 0) {
131 if (clear_user(buf, sz))
132 return -EFAULT;
133 buf += sz;
134 p += sz;
135 count -= sz;
136 read += sz;
137 }
138 }
139#endif
140
141 bounce = kmalloc(PAGE_SIZE, GFP_KERNEL);
142 if (!bounce)
143 return -ENOMEM;
144
145 while (count > 0) {
146 unsigned long remaining;
147 int allowed, probe;
148
149 sz = size_inside_page(p, count);
150
151 err = -EPERM;
152 allowed = page_is_allowed(p >> PAGE_SHIFT);
153 if (!allowed)
154 goto failed;
155
156 err = -EFAULT;
157 if (allowed == 2) {
158 /* Show zeros for restricted memory. */
159 remaining = clear_user(buf, sz);
160 } else {
161 /*
162 * On ia64 if a page has been mapped somewhere as
163 * uncached, then it must also be accessed uncached
164 * by the kernel or data corruption may occur.
165 */
166 ptr = xlate_dev_mem_ptr(p);
167 if (!ptr)
168 goto failed;
169
170 probe = probe_kernel_read(bounce, ptr, sz);
171 unxlate_dev_mem_ptr(p, ptr);
172 if (probe)
173 goto failed;
174
175 remaining = copy_to_user(buf, bounce, sz);
176 }
177
178 if (remaining)
179 goto failed;
180
181 buf += sz;
182 p += sz;
183 count -= sz;
184 read += sz;
185 if (should_stop_iteration())
186 break;
187 }
188 kfree(bounce);
189
190 *ppos += read;
191 return read;
192
193failed:
194 kfree(bounce);
195 return err;
196}
197
198static ssize_t write_mem(struct file *file, const char __user *buf,
199 size_t count, loff_t *ppos)
200{
201 phys_addr_t p = *ppos;
202 ssize_t written, sz;
203 unsigned long copied;
204 void *ptr;
205
206 if (p != *ppos)
207 return -EFBIG;
208
209 if (!valid_phys_addr_range(p, count))
210 return -EFAULT;
211
212 written = 0;
213
214#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
215 /* we don't have page 0 mapped on sparc and m68k.. */
216 if (p < PAGE_SIZE) {
217 sz = size_inside_page(p, count);
218 /* Hmm. Do something? */
219 buf += sz;
220 p += sz;
221 count -= sz;
222 written += sz;
223 }
224#endif
225
226 while (count > 0) {
227 int allowed;
228
229 sz = size_inside_page(p, count);
230
231 allowed = page_is_allowed(p >> PAGE_SHIFT);
232 if (!allowed)
233 return -EPERM;
234
235 /* Skip actual writing when a page is marked as restricted. */
236 if (allowed == 1) {
237 /*
238 * On ia64 if a page has been mapped somewhere as
239 * uncached, then it must also be accessed uncached
240 * by the kernel or data corruption may occur.
241 */
242 ptr = xlate_dev_mem_ptr(p);
243 if (!ptr) {
244 if (written)
245 break;
246 return -EFAULT;
247 }
248
249 copied = copy_from_user(ptr, buf, sz);
250 unxlate_dev_mem_ptr(p, ptr);
251 if (copied) {
252 written += sz - copied;
253 if (written)
254 break;
255 return -EFAULT;
256 }
257 }
258
259 buf += sz;
260 p += sz;
261 count -= sz;
262 written += sz;
263 if (should_stop_iteration())
264 break;
265 }
266
267 *ppos += written;
268 return written;
269}
270
271int __weak phys_mem_access_prot_allowed(struct file *file,
272 unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
273{
274 return 1;
275}
276
277#ifndef __HAVE_PHYS_MEM_ACCESS_PROT
278
279/*
280 * Architectures vary in how they handle caching for addresses
281 * outside of main memory.
282 *
283 */
284#ifdef pgprot_noncached
285static int uncached_access(struct file *file, phys_addr_t addr)
286{
287#if defined(CONFIG_IA64)
288 /*
289 * On ia64, we ignore O_DSYNC because we cannot tolerate memory
290 * attribute aliases.
291 */
292 return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
293#elif defined(CONFIG_MIPS)
294 {
295 extern int __uncached_access(struct file *file,
296 unsigned long addr);
297
298 return __uncached_access(file, addr);
299 }
300#else
301 /*
302 * Accessing memory above the top the kernel knows about or through a
303 * file pointer
304 * that was marked O_DSYNC will be done non-cached.
305 */
306 if (file->f_flags & O_DSYNC)
307 return 1;
308 return addr >= __pa(high_memory);
309#endif
310}
311#endif
312
313static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
314 unsigned long size, pgprot_t vma_prot)
315{
316#ifdef pgprot_noncached
317 phys_addr_t offset = pfn << PAGE_SHIFT;
318
319 if (uncached_access(file, offset))
320 return pgprot_noncached(vma_prot);
321#endif
322 return vma_prot;
323}
324#endif
325
326#ifndef CONFIG_MMU
327static unsigned long get_unmapped_area_mem(struct file *file,
328 unsigned long addr,
329 unsigned long len,
330 unsigned long pgoff,
331 unsigned long flags)
332{
333 if (!valid_mmap_phys_addr_range(pgoff, len))
334 return (unsigned long) -EINVAL;
335 return pgoff << PAGE_SHIFT;
336}
337
338/* permit direct mmap, for read, write or exec */
339static unsigned memory_mmap_capabilities(struct file *file)
340{
341 return NOMMU_MAP_DIRECT |
342 NOMMU_MAP_READ | NOMMU_MAP_WRITE | NOMMU_MAP_EXEC;
343}
344
345static unsigned zero_mmap_capabilities(struct file *file)
346{
347 return NOMMU_MAP_COPY;
348}
349
350/* can't do an in-place private mapping if there's no MMU */
351static inline int private_mapping_ok(struct vm_area_struct *vma)
352{
353 return vma->vm_flags & VM_MAYSHARE;
354}
355#else
356
357static inline int private_mapping_ok(struct vm_area_struct *vma)
358{
359 return 1;
360}
361#endif
362
363static const struct vm_operations_struct mmap_mem_ops = {
364#ifdef CONFIG_HAVE_IOREMAP_PROT
365 .access = generic_access_phys
366#endif
367};
368
369static int mmap_mem(struct file *file, struct vm_area_struct *vma)
370{
371 size_t size = vma->vm_end - vma->vm_start;
372 phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
373
374 /* Does it even fit in phys_addr_t? */
375 if (offset >> PAGE_SHIFT != vma->vm_pgoff)
376 return -EINVAL;
377
378 /* It's illegal to wrap around the end of the physical address space. */
379 if (offset + (phys_addr_t)size - 1 < offset)
380 return -EINVAL;
381
382 if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
383 return -EINVAL;
384
385 if (!private_mapping_ok(vma))
386 return -ENOSYS;
387
388 if (!range_is_allowed(vma->vm_pgoff, size))
389 return -EPERM;
390
391 if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size,
392 &vma->vm_page_prot))
393 return -EINVAL;
394
395 vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
396 size,
397 vma->vm_page_prot);
398
399 vma->vm_ops = &mmap_mem_ops;
400
401 /* Remap-pfn-range will mark the range VM_IO */
402 if (remap_pfn_range(vma,
403 vma->vm_start,
404 vma->vm_pgoff,
405 size,
406 vma->vm_page_prot)) {
407 return -EAGAIN;
408 }
409 return 0;
410}
411
412static int mmap_kmem(struct file *file, struct vm_area_struct *vma)
413{
414 unsigned long pfn;
415
416 /* Turn a kernel-virtual address into a physical page frame */
417 pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT;
418
419 /*
420 * RED-PEN: on some architectures there is more mapped memory than
421 * available in mem_map which pfn_valid checks for. Perhaps should add a
422 * new macro here.
423 *
424 * RED-PEN: vmalloc is not supported right now.
425 */
426 if (!pfn_valid(pfn))
427 return -EIO;
428
429 vma->vm_pgoff = pfn;
430 return mmap_mem(file, vma);
431}
432
433/*
434 * This function reads the *virtual* memory as seen by the kernel.
435 */
436static ssize_t read_kmem(struct file *file, char __user *buf,
437 size_t count, loff_t *ppos)
438{
439 unsigned long p = *ppos;
440 ssize_t low_count, read, sz;
441 char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
442 int err = 0;
443
444 read = 0;
445 if (p < (unsigned long) high_memory) {
446 low_count = count;
447 if (count > (unsigned long)high_memory - p)
448 low_count = (unsigned long)high_memory - p;
449
450#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
451 /* we don't have page 0 mapped on sparc and m68k.. */
452 if (p < PAGE_SIZE && low_count > 0) {
453 sz = size_inside_page(p, low_count);
454 if (clear_user(buf, sz))
455 return -EFAULT;
456 buf += sz;
457 p += sz;
458 read += sz;
459 low_count -= sz;
460 count -= sz;
461 }
462#endif
463 while (low_count > 0) {
464 sz = size_inside_page(p, low_count);
465
466 /*
467 * On ia64 if a page has been mapped somewhere as
468 * uncached, then it must also be accessed uncached
469 * by the kernel or data corruption may occur
470 */
471 kbuf = xlate_dev_kmem_ptr((void *)p);
472 if (!virt_addr_valid(kbuf))
473 return -ENXIO;
474
475 if (copy_to_user(buf, kbuf, sz))
476 return -EFAULT;
477 buf += sz;
478 p += sz;
479 read += sz;
480 low_count -= sz;
481 count -= sz;
482 if (should_stop_iteration()) {
483 count = 0;
484 break;
485 }
486 }
487 }
488
489 if (count > 0) {
490 kbuf = (char *)__get_free_page(GFP_KERNEL);
491 if (!kbuf)
492 return -ENOMEM;
493 while (count > 0) {
494 sz = size_inside_page(p, count);
495 if (!is_vmalloc_or_module_addr((void *)p)) {
496 err = -ENXIO;
497 break;
498 }
499 sz = vread(kbuf, (char *)p, sz);
500 if (!sz)
501 break;
502 if (copy_to_user(buf, kbuf, sz)) {
503 err = -EFAULT;
504 break;
505 }
506 count -= sz;
507 buf += sz;
508 read += sz;
509 p += sz;
510 if (should_stop_iteration())
511 break;
512 }
513 free_page((unsigned long)kbuf);
514 }
515 *ppos = p;
516 return read ? read : err;
517}
518
519
520static ssize_t do_write_kmem(unsigned long p, const char __user *buf,
521 size_t count, loff_t *ppos)
522{
523 ssize_t written, sz;
524 unsigned long copied;
525
526 written = 0;
527#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
528 /* we don't have page 0 mapped on sparc and m68k.. */
529 if (p < PAGE_SIZE) {
530 sz = size_inside_page(p, count);
531 /* Hmm. Do something? */
532 buf += sz;
533 p += sz;
534 count -= sz;
535 written += sz;
536 }
537#endif
538
539 while (count > 0) {
540 void *ptr;
541
542 sz = size_inside_page(p, count);
543
544 /*
545 * On ia64 if a page has been mapped somewhere as uncached, then
546 * it must also be accessed uncached by the kernel or data
547 * corruption may occur.
548 */
549 ptr = xlate_dev_kmem_ptr((void *)p);
550 if (!virt_addr_valid(ptr))
551 return -ENXIO;
552
553 copied = copy_from_user(ptr, buf, sz);
554 if (copied) {
555 written += sz - copied;
556 if (written)
557 break;
558 return -EFAULT;
559 }
560 buf += sz;
561 p += sz;
562 count -= sz;
563 written += sz;
564 if (should_stop_iteration())
565 break;
566 }
567
568 *ppos += written;
569 return written;
570}
571
572/*
573 * This function writes to the *virtual* memory as seen by the kernel.
574 */
575static ssize_t write_kmem(struct file *file, const char __user *buf,
576 size_t count, loff_t *ppos)
577{
578 unsigned long p = *ppos;
579 ssize_t wrote = 0;
580 ssize_t virtr = 0;
581 char *kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
582 int err = 0;
583
584 if (p < (unsigned long) high_memory) {
585 unsigned long to_write = min_t(unsigned long, count,
586 (unsigned long)high_memory - p);
587 wrote = do_write_kmem(p, buf, to_write, ppos);
588 if (wrote != to_write)
589 return wrote;
590 p += wrote;
591 buf += wrote;
592 count -= wrote;
593 }
594
595 if (count > 0) {
596 kbuf = (char *)__get_free_page(GFP_KERNEL);
597 if (!kbuf)
598 return wrote ? wrote : -ENOMEM;
599 while (count > 0) {
600 unsigned long sz = size_inside_page(p, count);
601 unsigned long n;
602
603 if (!is_vmalloc_or_module_addr((void *)p)) {
604 err = -ENXIO;
605 break;
606 }
607 n = copy_from_user(kbuf, buf, sz);
608 if (n) {
609 err = -EFAULT;
610 break;
611 }
612 vwrite(kbuf, (char *)p, sz);
613 count -= sz;
614 buf += sz;
615 virtr += sz;
616 p += sz;
617 if (should_stop_iteration())
618 break;
619 }
620 free_page((unsigned long)kbuf);
621 }
622
623 *ppos = p;
624 return virtr + wrote ? : err;
625}
626
627static ssize_t read_port(struct file *file, char __user *buf,
628 size_t count, loff_t *ppos)
629{
630 unsigned long i = *ppos;
631 char __user *tmp = buf;
632
633 if (!access_ok(buf, count))
634 return -EFAULT;
635 while (count-- > 0 && i < 65536) {
636 if (__put_user(inb(i), tmp) < 0)
637 return -EFAULT;
638 i++;
639 tmp++;
640 }
641 *ppos = i;
642 return tmp-buf;
643}
644
645static ssize_t write_port(struct file *file, const char __user *buf,
646 size_t count, loff_t *ppos)
647{
648 unsigned long i = *ppos;
649 const char __user *tmp = buf;
650
651 if (!access_ok(buf, count))
652 return -EFAULT;
653 while (count-- > 0 && i < 65536) {
654 char c;
655
656 if (__get_user(c, tmp)) {
657 if (tmp > buf)
658 break;
659 return -EFAULT;
660 }
661 outb(c, i);
662 i++;
663 tmp++;
664 }
665 *ppos = i;
666 return tmp-buf;
667}
668
669static ssize_t read_null(struct file *file, char __user *buf,
670 size_t count, loff_t *ppos)
671{
672 return 0;
673}
674
675static ssize_t write_null(struct file *file, const char __user *buf,
676 size_t count, loff_t *ppos)
677{
678 return count;
679}
680
681static ssize_t read_iter_null(struct kiocb *iocb, struct iov_iter *to)
682{
683 return 0;
684}
685
686static ssize_t write_iter_null(struct kiocb *iocb, struct iov_iter *from)
687{
688 size_t count = iov_iter_count(from);
689 iov_iter_advance(from, count);
690 return count;
691}
692
693static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf,
694 struct splice_desc *sd)
695{
696 return sd->len;
697}
698
699static ssize_t splice_write_null(struct pipe_inode_info *pipe, struct file *out,
700 loff_t *ppos, size_t len, unsigned int flags)
701{
702 return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
703}
704
705static ssize_t read_iter_zero(struct kiocb *iocb, struct iov_iter *iter)
706{
707 size_t written = 0;
708
709 while (iov_iter_count(iter)) {
710 size_t chunk = iov_iter_count(iter), n;
711
712 if (chunk > PAGE_SIZE)
713 chunk = PAGE_SIZE; /* Just for latency reasons */
714 n = iov_iter_zero(chunk, iter);
715 if (!n && iov_iter_count(iter))
716 return written ? written : -EFAULT;
717 written += n;
718 if (signal_pending(current))
719 return written ? written : -ERESTARTSYS;
720 cond_resched();
721 }
722 return written;
723}
724
725static int mmap_zero(struct file *file, struct vm_area_struct *vma)
726{
727#ifndef CONFIG_MMU
728 return -ENOSYS;
729#endif
730 if (vma->vm_flags & VM_SHARED)
731 return shmem_zero_setup(vma);
732 vma_set_anonymous(vma);
733 return 0;
734}
735
736static unsigned long get_unmapped_area_zero(struct file *file,
737 unsigned long addr, unsigned long len,
738 unsigned long pgoff, unsigned long flags)
739{
740#ifdef CONFIG_MMU
741 if (flags & MAP_SHARED) {
742 /*
743 * mmap_zero() will call shmem_zero_setup() to create a file,
744 * so use shmem's get_unmapped_area in case it can be huge;
745 * and pass NULL for file as in mmap.c's get_unmapped_area(),
746 * so as not to confuse shmem with our handle on "/dev/zero".
747 */
748 return shmem_get_unmapped_area(NULL, addr, len, pgoff, flags);
749 }
750
751 /* Otherwise flags & MAP_PRIVATE: with no shmem object beneath it */
752 return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
753#else
754 return -ENOSYS;
755#endif
756}
757
758static ssize_t write_full(struct file *file, const char __user *buf,
759 size_t count, loff_t *ppos)
760{
761 return -ENOSPC;
762}
763
764/*
765 * Special lseek() function for /dev/null and /dev/zero. Most notably, you
766 * can fopen() both devices with "a" now. This was previously impossible.
767 * -- SRB.
768 */
769static loff_t null_lseek(struct file *file, loff_t offset, int orig)
770{
771 return file->f_pos = 0;
772}
773
774/*
775 * The memory devices use the full 32/64 bits of the offset, and so we cannot
776 * check against negative addresses: they are ok. The return value is weird,
777 * though, in that case (0).
778 *
779 * also note that seeking relative to the "end of file" isn't supported:
780 * it has no meaning, so it returns -EINVAL.
781 */
782static loff_t memory_lseek(struct file *file, loff_t offset, int orig)
783{
784 loff_t ret;
785
786 inode_lock(file_inode(file));
787 switch (orig) {
788 case SEEK_CUR:
789 offset += file->f_pos;
790 /* fall through */
791 case SEEK_SET:
792 /* to avoid userland mistaking f_pos=-9 as -EBADF=-9 */
793 if ((unsigned long long)offset >= -MAX_ERRNO) {
794 ret = -EOVERFLOW;
795 break;
796 }
797 file->f_pos = offset;
798 ret = file->f_pos;
799 force_successful_syscall_return();
800 break;
801 default:
802 ret = -EINVAL;
803 }
804 inode_unlock(file_inode(file));
805 return ret;
806}
807
808static int open_port(struct inode *inode, struct file *filp)
809{
810 if (!capable(CAP_SYS_RAWIO))
811 return -EPERM;
812
813 return security_locked_down(LOCKDOWN_DEV_MEM);
814}
815
816#define zero_lseek null_lseek
817#define full_lseek null_lseek
818#define write_zero write_null
819#define write_iter_zero write_iter_null
820#define open_mem open_port
821#define open_kmem open_mem
822
823static const struct file_operations __maybe_unused mem_fops = {
824 .llseek = memory_lseek,
825 .read = read_mem,
826 .write = write_mem,
827 .mmap = mmap_mem,
828 .open = open_mem,
829#ifndef CONFIG_MMU
830 .get_unmapped_area = get_unmapped_area_mem,
831 .mmap_capabilities = memory_mmap_capabilities,
832#endif
833};
834
835static const struct file_operations __maybe_unused kmem_fops = {
836 .llseek = memory_lseek,
837 .read = read_kmem,
838 .write = write_kmem,
839 .mmap = mmap_kmem,
840 .open = open_kmem,
841#ifndef CONFIG_MMU
842 .get_unmapped_area = get_unmapped_area_mem,
843 .mmap_capabilities = memory_mmap_capabilities,
844#endif
845};
846
847static const struct file_operations null_fops = {
848 .llseek = null_lseek,
849 .read = read_null,
850 .write = write_null,
851 .read_iter = read_iter_null,
852 .write_iter = write_iter_null,
853 .splice_write = splice_write_null,
854};
855
856static const struct file_operations __maybe_unused port_fops = {
857 .llseek = memory_lseek,
858 .read = read_port,
859 .write = write_port,
860 .open = open_port,
861};
862
863static const struct file_operations zero_fops = {
864 .llseek = zero_lseek,
865 .write = write_zero,
866 .read_iter = read_iter_zero,
867 .write_iter = write_iter_zero,
868 .mmap = mmap_zero,
869 .get_unmapped_area = get_unmapped_area_zero,
870#ifndef CONFIG_MMU
871 .mmap_capabilities = zero_mmap_capabilities,
872#endif
873};
874
875static const struct file_operations full_fops = {
876 .llseek = full_lseek,
877 .read_iter = read_iter_zero,
878 .write = write_full,
879};
880
881static const struct memdev {
882 const char *name;
883 umode_t mode;
884 const struct file_operations *fops;
885 fmode_t fmode;
886} devlist[] = {
887#ifdef CONFIG_DEVMEM
888 [1] = { "mem", 0, &mem_fops, FMODE_UNSIGNED_OFFSET },
889#endif
890#ifdef CONFIG_DEVKMEM
891 [2] = { "kmem", 0, &kmem_fops, FMODE_UNSIGNED_OFFSET },
892#endif
893 [3] = { "null", 0666, &null_fops, 0 },
894#ifdef CONFIG_DEVPORT
895 [4] = { "port", 0, &port_fops, 0 },
896#endif
897 [5] = { "zero", 0666, &zero_fops, 0 },
898 [7] = { "full", 0666, &full_fops, 0 },
899 [8] = { "random", 0666, &random_fops, 0 },
900 [9] = { "urandom", 0666, &urandom_fops, 0 },
901#ifdef CONFIG_PRINTK
902 [11] = { "kmsg", 0644, &kmsg_fops, 0 },
903#endif
904};
905
906static int memory_open(struct inode *inode, struct file *filp)
907{
908 int minor;
909 const struct memdev *dev;
910
911 minor = iminor(inode);
912 if (minor >= ARRAY_SIZE(devlist))
913 return -ENXIO;
914
915 dev = &devlist[minor];
916 if (!dev->fops)
917 return -ENXIO;
918
919 filp->f_op = dev->fops;
920 filp->f_mode |= dev->fmode;
921
922 if (dev->fops->open)
923 return dev->fops->open(inode, filp);
924
925 return 0;
926}
927
928static const struct file_operations memory_fops = {
929 .open = memory_open,
930 .llseek = noop_llseek,
931};
932
933static char *mem_devnode(struct device *dev, umode_t *mode)
934{
935 if (mode && devlist[MINOR(dev->devt)].mode)
936 *mode = devlist[MINOR(dev->devt)].mode;
937 return NULL;
938}
939
940static struct class *mem_class;
941
942static int __init chr_dev_init(void)
943{
944 int minor;
945
946 if (register_chrdev(MEM_MAJOR, "mem", &memory_fops))
947 printk("unable to get major %d for memory devs\n", MEM_MAJOR);
948
949 mem_class = class_create(THIS_MODULE, "mem");
950 if (IS_ERR(mem_class))
951 return PTR_ERR(mem_class);
952
953 mem_class->devnode = mem_devnode;
954 for (minor = 1; minor < ARRAY_SIZE(devlist); minor++) {
955 if (!devlist[minor].name)
956 continue;
957
958 /*
959 * Create /dev/port?
960 */
961 if ((minor == DEVPORT_MINOR) && !arch_has_dev_port())
962 continue;
963
964 device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
965 NULL, devlist[minor].name);
966 }
967
968 return tty_init();
969}
970
971fs_initcall(chr_dev_init);