Linux Audio

Check our new training course

Loading...
v3.1
 
  1/*
  2 *  linux/drivers/char/mem.c
  3 *
  4 *  Copyright (C) 1991, 1992  Linus Torvalds
  5 *
  6 *  Added devfs support.
  7 *    Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
  8 *  Shared /dev/zero mmapping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
  9 */
 10
 11#include <linux/mm.h>
 12#include <linux/miscdevice.h>
 13#include <linux/slab.h>
 14#include <linux/vmalloc.h>
 15#include <linux/mman.h>
 16#include <linux/random.h>
 17#include <linux/init.h>
 18#include <linux/raw.h>
 19#include <linux/tty.h>
 20#include <linux/capability.h>
 21#include <linux/ptrace.h>
 22#include <linux/device.h>
 23#include <linux/highmem.h>
 24#include <linux/crash_dump.h>
 25#include <linux/backing-dev.h>
 26#include <linux/bootmem.h>
 27#include <linux/splice.h>
 28#include <linux/pfn.h>
 
 
 
 29
 30#include <asm/uaccess.h>
 31#include <asm/io.h>
 32
 33#ifdef CONFIG_IA64
 34# include <linux/efi.h>
 35#endif
 36
 
 
 37static inline unsigned long size_inside_page(unsigned long start,
 38					     unsigned long size)
 39{
 40	unsigned long sz;
 41
 42	sz = PAGE_SIZE - (start & (PAGE_SIZE - 1));
 43
 44	return min(sz, size);
 45}
 46
 47#ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
 48static inline int valid_phys_addr_range(unsigned long addr, size_t count)
 49{
 50	return addr + count <= __pa(high_memory);
 51}
 52
 53static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
 54{
 55	return 1;
 56}
 57#endif
 58
 59#ifdef CONFIG_STRICT_DEVMEM
 
 
 
 
 60static inline int range_is_allowed(unsigned long pfn, unsigned long size)
 61{
 62	u64 from = ((u64)pfn) << PAGE_SHIFT;
 63	u64 to = from + size;
 64	u64 cursor = from;
 65
 66	while (cursor < to) {
 67		if (!devmem_is_allowed(pfn)) {
 68			printk(KERN_INFO
 69		"Program %s tried to access /dev/mem between %Lx->%Lx.\n",
 70				current->comm, from, to);
 71			return 0;
 72		}
 73		cursor += PAGE_SIZE;
 74		pfn++;
 75	}
 76	return 1;
 77}
 78#else
 
 
 
 
 79static inline int range_is_allowed(unsigned long pfn, unsigned long size)
 80{
 81	return 1;
 82}
 83#endif
 84
 85void __weak unxlate_dev_mem_ptr(unsigned long phys, void *addr)
 
 
 86{
 87}
 
 88
 89/*
 90 * This funcion reads the *physical* memory. The f_pos points directly to the
 91 * memory location.
 92 */
 93static ssize_t read_mem(struct file *file, char __user *buf,
 94			size_t count, loff_t *ppos)
 95{
 96	unsigned long p = *ppos;
 97	ssize_t read, sz;
 98	char *ptr;
 
 
 
 
 
 99
100	if (!valid_phys_addr_range(p, count))
101		return -EFAULT;
102	read = 0;
103#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
104	/* we don't have page 0 mapped on sparc and m68k.. */
105	if (p < PAGE_SIZE) {
106		sz = size_inside_page(p, count);
107		if (sz > 0) {
108			if (clear_user(buf, sz))
109				return -EFAULT;
110			buf += sz;
111			p += sz;
112			count -= sz;
113			read += sz;
114		}
115	}
116#endif
117
 
 
 
 
118	while (count > 0) {
119		unsigned long remaining;
 
120
121		sz = size_inside_page(p, count);
122
123		if (!range_is_allowed(p >> PAGE_SHIFT, count))
124			return -EPERM;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
125
126		/*
127		 * On ia64 if a page has been mapped somewhere as uncached, then
128		 * it must also be accessed uncached by the kernel or data
129		 * corruption may occur.
130		 */
131		ptr = xlate_dev_mem_ptr(p);
132		if (!ptr)
133			return -EFAULT;
134
135		remaining = copy_to_user(buf, ptr, sz);
136		unxlate_dev_mem_ptr(p, ptr);
137		if (remaining)
138			return -EFAULT;
139
140		buf += sz;
141		p += sz;
142		count -= sz;
143		read += sz;
144	}
 
145
146	*ppos += read;
147	return read;
 
 
 
 
148}
149
150static ssize_t write_mem(struct file *file, const char __user *buf,
151			 size_t count, loff_t *ppos)
152{
153	unsigned long p = *ppos;
154	ssize_t written, sz;
155	unsigned long copied;
156	void *ptr;
157
 
 
 
158	if (!valid_phys_addr_range(p, count))
159		return -EFAULT;
160
161	written = 0;
162
163#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
164	/* we don't have page 0 mapped on sparc and m68k.. */
165	if (p < PAGE_SIZE) {
166		sz = size_inside_page(p, count);
167		/* Hmm. Do something? */
168		buf += sz;
169		p += sz;
170		count -= sz;
171		written += sz;
172	}
173#endif
174
175	while (count > 0) {
 
 
176		sz = size_inside_page(p, count);
177
178		if (!range_is_allowed(p >> PAGE_SHIFT, sz))
 
179			return -EPERM;
180
181		/*
182		 * On ia64 if a page has been mapped somewhere as uncached, then
183		 * it must also be accessed uncached by the kernel or data
184		 * corruption may occur.
185		 */
186		ptr = xlate_dev_mem_ptr(p);
187		if (!ptr) {
188			if (written)
189				break;
190			return -EFAULT;
191		}
 
 
192
193		copied = copy_from_user(ptr, buf, sz);
194		unxlate_dev_mem_ptr(p, ptr);
195		if (copied) {
196			written += sz - copied;
197			if (written)
198				break;
199			return -EFAULT;
 
200		}
201
202		buf += sz;
203		p += sz;
204		count -= sz;
205		written += sz;
206	}
207
208	*ppos += written;
209	return written;
210}
211
212int __weak phys_mem_access_prot_allowed(struct file *file,
213	unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
214{
215	return 1;
216}
217
218#ifndef __HAVE_PHYS_MEM_ACCESS_PROT
219
220/*
221 * Architectures vary in how they handle caching for addresses
222 * outside of main memory.
223 *
224 */
225#ifdef pgprot_noncached
226static int uncached_access(struct file *file, unsigned long addr)
227{
228#if defined(CONFIG_IA64)
229	/*
230	 * On ia64, we ignore O_DSYNC because we cannot tolerate memory
231	 * attribute aliases.
232	 */
233	return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
234#elif defined(CONFIG_MIPS)
235	{
236		extern int __uncached_access(struct file *file,
237					     unsigned long addr);
238
239		return __uncached_access(file, addr);
240	}
241#else
242	/*
243	 * Accessing memory above the top the kernel knows about or through a
244	 * file pointer
245	 * that was marked O_DSYNC will be done non-cached.
246	 */
247	if (file->f_flags & O_DSYNC)
248		return 1;
249	return addr >= __pa(high_memory);
250#endif
251}
252#endif
253
254static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
255				     unsigned long size, pgprot_t vma_prot)
256{
257#ifdef pgprot_noncached
258	unsigned long offset = pfn << PAGE_SHIFT;
259
260	if (uncached_access(file, offset))
261		return pgprot_noncached(vma_prot);
262#endif
263	return vma_prot;
264}
265#endif
266
267#ifndef CONFIG_MMU
268static unsigned long get_unmapped_area_mem(struct file *file,
269					   unsigned long addr,
270					   unsigned long len,
271					   unsigned long pgoff,
272					   unsigned long flags)
273{
274	if (!valid_mmap_phys_addr_range(pgoff, len))
275		return (unsigned long) -EINVAL;
276	return pgoff << PAGE_SHIFT;
277}
278
 
 
 
 
 
 
 
 
 
 
 
 
279/* can't do an in-place private mapping if there's no MMU */
280static inline int private_mapping_ok(struct vm_area_struct *vma)
281{
282	return vma->vm_flags & VM_MAYSHARE;
283}
284#else
285#define get_unmapped_area_mem	NULL
286
287static inline int private_mapping_ok(struct vm_area_struct *vma)
288{
289	return 1;
290}
291#endif
292
293static const struct vm_operations_struct mmap_mem_ops = {
294#ifdef CONFIG_HAVE_IOREMAP_PROT
295	.access = generic_access_phys
296#endif
297};
298
299static int mmap_mem(struct file *file, struct vm_area_struct *vma)
300{
301	size_t size = vma->vm_end - vma->vm_start;
 
 
 
 
 
 
 
 
 
302
303	if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
304		return -EINVAL;
305
306	if (!private_mapping_ok(vma))
307		return -ENOSYS;
308
309	if (!range_is_allowed(vma->vm_pgoff, size))
310		return -EPERM;
311
312	if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size,
313						&vma->vm_page_prot))
314		return -EINVAL;
315
316	vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
317						 size,
318						 vma->vm_page_prot);
319
320	vma->vm_ops = &mmap_mem_ops;
321
322	/* Remap-pfn-range will mark the range VM_IO and VM_RESERVED */
323	if (remap_pfn_range(vma,
324			    vma->vm_start,
325			    vma->vm_pgoff,
326			    size,
327			    vma->vm_page_prot)) {
328		return -EAGAIN;
329	}
330	return 0;
331}
332
333#ifdef CONFIG_DEVKMEM
334static int mmap_kmem(struct file *file, struct vm_area_struct *vma)
335{
336	unsigned long pfn;
337
338	/* Turn a kernel-virtual address into a physical page frame */
339	pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT;
340
341	/*
342	 * RED-PEN: on some architectures there is more mapped memory than
343	 * available in mem_map which pfn_valid checks for. Perhaps should add a
344	 * new macro here.
345	 *
346	 * RED-PEN: vmalloc is not supported right now.
347	 */
348	if (!pfn_valid(pfn))
349		return -EIO;
350
351	vma->vm_pgoff = pfn;
352	return mmap_mem(file, vma);
353}
354#endif
355
356#ifdef CONFIG_CRASH_DUMP
357/*
358 * Read memory corresponding to the old kernel.
359 */
360static ssize_t read_oldmem(struct file *file, char __user *buf,
361				size_t count, loff_t *ppos)
362{
363	unsigned long pfn, offset;
364	size_t read = 0, csize;
365	int rc = 0;
366
367	while (count) {
368		pfn = *ppos / PAGE_SIZE;
369		if (pfn > saved_max_pfn)
370			return read;
371
372		offset = (unsigned long)(*ppos % PAGE_SIZE);
373		if (count > PAGE_SIZE - offset)
374			csize = PAGE_SIZE - offset;
375		else
376			csize = count;
377
378		rc = copy_oldmem_page(pfn, buf, csize, offset, 1);
379		if (rc < 0)
380			return rc;
381		buf += csize;
382		*ppos += csize;
383		read += csize;
384		count -= csize;
385	}
386	return read;
387}
388#endif
389
390#ifdef CONFIG_DEVKMEM
391/*
392 * This function reads the *virtual* memory as seen by the kernel.
393 */
394static ssize_t read_kmem(struct file *file, char __user *buf,
395			 size_t count, loff_t *ppos)
396{
397	unsigned long p = *ppos;
398	ssize_t low_count, read, sz;
399	char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
400	int err = 0;
401
402	read = 0;
403	if (p < (unsigned long) high_memory) {
404		low_count = count;
405		if (count > (unsigned long)high_memory - p)
406			low_count = (unsigned long)high_memory - p;
407
408#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
409		/* we don't have page 0 mapped on sparc and m68k.. */
410		if (p < PAGE_SIZE && low_count > 0) {
411			sz = size_inside_page(p, low_count);
412			if (clear_user(buf, sz))
413				return -EFAULT;
414			buf += sz;
415			p += sz;
416			read += sz;
417			low_count -= sz;
418			count -= sz;
419		}
420#endif
421		while (low_count > 0) {
422			sz = size_inside_page(p, low_count);
423
424			/*
425			 * On ia64 if a page has been mapped somewhere as
426			 * uncached, then it must also be accessed uncached
427			 * by the kernel or data corruption may occur
428			 */
429			kbuf = xlate_dev_kmem_ptr((char *)p);
 
 
430
431			if (copy_to_user(buf, kbuf, sz))
432				return -EFAULT;
433			buf += sz;
434			p += sz;
435			read += sz;
436			low_count -= sz;
437			count -= sz;
438		}
439	}
440
441	if (count > 0) {
442		kbuf = (char *)__get_free_page(GFP_KERNEL);
443		if (!kbuf)
444			return -ENOMEM;
445		while (count > 0) {
446			sz = size_inside_page(p, count);
447			if (!is_vmalloc_or_module_addr((void *)p)) {
448				err = -ENXIO;
449				break;
450			}
451			sz = vread(kbuf, (char *)p, sz);
452			if (!sz)
453				break;
454			if (copy_to_user(buf, kbuf, sz)) {
455				err = -EFAULT;
456				break;
457			}
458			count -= sz;
459			buf += sz;
460			read += sz;
461			p += sz;
462		}
463		free_page((unsigned long)kbuf);
464	}
465	*ppos = p;
466	return read ? read : err;
467}
468
469
470static ssize_t do_write_kmem(unsigned long p, const char __user *buf,
471				size_t count, loff_t *ppos)
472{
473	ssize_t written, sz;
474	unsigned long copied;
475
476	written = 0;
477#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
478	/* we don't have page 0 mapped on sparc and m68k.. */
479	if (p < PAGE_SIZE) {
480		sz = size_inside_page(p, count);
481		/* Hmm. Do something? */
482		buf += sz;
483		p += sz;
484		count -= sz;
485		written += sz;
486	}
487#endif
488
489	while (count > 0) {
490		char *ptr;
491
492		sz = size_inside_page(p, count);
493
494		/*
495		 * On ia64 if a page has been mapped somewhere as uncached, then
496		 * it must also be accessed uncached by the kernel or data
497		 * corruption may occur.
498		 */
499		ptr = xlate_dev_kmem_ptr((char *)p);
 
 
500
501		copied = copy_from_user(ptr, buf, sz);
502		if (copied) {
503			written += sz - copied;
504			if (written)
505				break;
506			return -EFAULT;
507		}
508		buf += sz;
509		p += sz;
510		count -= sz;
511		written += sz;
512	}
513
514	*ppos += written;
515	return written;
516}
517
518/*
519 * This function writes to the *virtual* memory as seen by the kernel.
520 */
521static ssize_t write_kmem(struct file *file, const char __user *buf,
522			  size_t count, loff_t *ppos)
523{
524	unsigned long p = *ppos;
525	ssize_t wrote = 0;
526	ssize_t virtr = 0;
527	char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
528	int err = 0;
529
530	if (p < (unsigned long) high_memory) {
531		unsigned long to_write = min_t(unsigned long, count,
532					       (unsigned long)high_memory - p);
533		wrote = do_write_kmem(p, buf, to_write, ppos);
534		if (wrote != to_write)
535			return wrote;
536		p += wrote;
537		buf += wrote;
538		count -= wrote;
539	}
540
541	if (count > 0) {
542		kbuf = (char *)__get_free_page(GFP_KERNEL);
543		if (!kbuf)
544			return wrote ? wrote : -ENOMEM;
545		while (count > 0) {
546			unsigned long sz = size_inside_page(p, count);
547			unsigned long n;
548
549			if (!is_vmalloc_or_module_addr((void *)p)) {
550				err = -ENXIO;
551				break;
552			}
553			n = copy_from_user(kbuf, buf, sz);
554			if (n) {
555				err = -EFAULT;
556				break;
557			}
558			vwrite(kbuf, (char *)p, sz);
559			count -= sz;
560			buf += sz;
561			virtr += sz;
562			p += sz;
563		}
564		free_page((unsigned long)kbuf);
565	}
566
567	*ppos = p;
568	return virtr + wrote ? : err;
569}
570#endif
571
572#ifdef CONFIG_DEVPORT
573static ssize_t read_port(struct file *file, char __user *buf,
574			 size_t count, loff_t *ppos)
575{
576	unsigned long i = *ppos;
577	char __user *tmp = buf;
578
579	if (!access_ok(VERIFY_WRITE, buf, count))
580		return -EFAULT;
581	while (count-- > 0 && i < 65536) {
582		if (__put_user(inb(i), tmp) < 0)
583			return -EFAULT;
584		i++;
585		tmp++;
586	}
587	*ppos = i;
588	return tmp-buf;
589}
590
591static ssize_t write_port(struct file *file, const char __user *buf,
592			  size_t count, loff_t *ppos)
593{
594	unsigned long i = *ppos;
595	const char __user * tmp = buf;
596
597	if (!access_ok(VERIFY_READ, buf, count))
598		return -EFAULT;
599	while (count-- > 0 && i < 65536) {
600		char c;
 
601		if (__get_user(c, tmp)) {
602			if (tmp > buf)
603				break;
604			return -EFAULT;
605		}
606		outb(c, i);
607		i++;
608		tmp++;
609	}
610	*ppos = i;
611	return tmp-buf;
612}
613#endif
614
615static ssize_t read_null(struct file *file, char __user *buf,
616			 size_t count, loff_t *ppos)
617{
618	return 0;
619}
620
621static ssize_t write_null(struct file *file, const char __user *buf,
622			  size_t count, loff_t *ppos)
623{
624	return count;
625}
626
 
 
 
 
 
 
 
 
 
 
 
 
627static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf,
628			struct splice_desc *sd)
629{
630	return sd->len;
631}
632
633static ssize_t splice_write_null(struct pipe_inode_info *pipe, struct file *out,
634				 loff_t *ppos, size_t len, unsigned int flags)
635{
636	return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
637}
638
639static ssize_t read_zero(struct file *file, char __user *buf,
640			 size_t count, loff_t *ppos)
641{
642	size_t written;
643
644	if (!count)
645		return 0;
646
647	if (!access_ok(VERIFY_WRITE, buf, count))
648		return -EFAULT;
649
650	written = 0;
651	while (count) {
652		unsigned long unwritten;
653		size_t chunk = count;
654
655		if (chunk > PAGE_SIZE)
656			chunk = PAGE_SIZE;	/* Just for latency reasons */
657		unwritten = __clear_user(buf, chunk);
658		written += chunk - unwritten;
659		if (unwritten)
660			break;
661		if (signal_pending(current))
662			return written ? written : -ERESTARTSYS;
663		buf += chunk;
664		count -= chunk;
665		cond_resched();
666	}
667	return written ? written : -EFAULT;
668}
669
670static int mmap_zero(struct file *file, struct vm_area_struct *vma)
671{
672#ifndef CONFIG_MMU
673	return -ENOSYS;
674#endif
675	if (vma->vm_flags & VM_SHARED)
676		return shmem_zero_setup(vma);
677	return 0;
678}
679
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
680static ssize_t write_full(struct file *file, const char __user *buf,
681			  size_t count, loff_t *ppos)
682{
683	return -ENOSPC;
684}
685
686/*
687 * Special lseek() function for /dev/null and /dev/zero.  Most notably, you
688 * can fopen() both devices with "a" now.  This was previously impossible.
689 * -- SRB.
690 */
691static loff_t null_lseek(struct file *file, loff_t offset, int orig)
692{
693	return file->f_pos = 0;
694}
695
696/*
697 * The memory devices use the full 32/64 bits of the offset, and so we cannot
698 * check against negative addresses: they are ok. The return value is weird,
699 * though, in that case (0).
700 *
701 * also note that seeking relative to the "end of file" isn't supported:
702 * it has no meaning, so it returns -EINVAL.
703 */
704static loff_t memory_lseek(struct file *file, loff_t offset, int orig)
705{
706	loff_t ret;
707
708	mutex_lock(&file->f_path.dentry->d_inode->i_mutex);
709	switch (orig) {
710	case SEEK_CUR:
711		offset += file->f_pos;
712	case SEEK_SET:
713		/* to avoid userland mistaking f_pos=-9 as -EBADF=-9 */
714		if ((unsigned long long)offset >= ~0xFFFULL) {
715			ret = -EOVERFLOW;
716			break;
717		}
718		file->f_pos = offset;
719		ret = file->f_pos;
720		force_successful_syscall_return();
721		break;
722	default:
723		ret = -EINVAL;
724	}
725	mutex_unlock(&file->f_path.dentry->d_inode->i_mutex);
726	return ret;
727}
728
729static int open_port(struct inode * inode, struct file * filp)
730{
731	return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
732}
733
734#define zero_lseek	null_lseek
735#define full_lseek      null_lseek
736#define write_zero	write_null
737#define read_full       read_zero
738#define open_mem	open_port
739#define open_kmem	open_mem
740#define open_oldmem	open_mem
741
742static const struct file_operations mem_fops = {
743	.llseek		= memory_lseek,
744	.read		= read_mem,
745	.write		= write_mem,
746	.mmap		= mmap_mem,
747	.open		= open_mem,
 
748	.get_unmapped_area = get_unmapped_area_mem,
 
 
749};
750
751#ifdef CONFIG_DEVKMEM
752static const struct file_operations kmem_fops = {
753	.llseek		= memory_lseek,
754	.read		= read_kmem,
755	.write		= write_kmem,
756	.mmap		= mmap_kmem,
757	.open		= open_kmem,
 
758	.get_unmapped_area = get_unmapped_area_mem,
759};
760#endif
 
761
762static const struct file_operations null_fops = {
763	.llseek		= null_lseek,
764	.read		= read_null,
765	.write		= write_null,
 
 
766	.splice_write	= splice_write_null,
767};
768
769#ifdef CONFIG_DEVPORT
770static const struct file_operations port_fops = {
771	.llseek		= memory_lseek,
772	.read		= read_port,
773	.write		= write_port,
774	.open		= open_port,
775};
776#endif
777
778static const struct file_operations zero_fops = {
779	.llseek		= zero_lseek,
780	.read		= read_zero,
781	.write		= write_zero,
 
 
782	.mmap		= mmap_zero,
783};
784
785/*
786 * capabilities for /dev/zero
787 * - permits private mappings, "copies" are taken of the source of zeros
788 * - no writeback happens
789 */
790static struct backing_dev_info zero_bdi = {
791	.name		= "char/mem",
792	.capabilities	= BDI_CAP_MAP_COPY | BDI_CAP_NO_ACCT_AND_WRITEBACK,
793};
794
795static const struct file_operations full_fops = {
796	.llseek		= full_lseek,
797	.read		= read_full,
798	.write		= write_full,
799};
800
801#ifdef CONFIG_CRASH_DUMP
802static const struct file_operations oldmem_fops = {
803	.read	= read_oldmem,
804	.open	= open_oldmem,
805	.llseek = default_llseek,
806};
807#endif
808
809static ssize_t kmsg_writev(struct kiocb *iocb, const struct iovec *iv,
810			   unsigned long count, loff_t pos)
811{
812	char *line, *p;
813	int i;
814	ssize_t ret = -EFAULT;
815	size_t len = iov_length(iv, count);
816
817	line = kmalloc(len + 1, GFP_KERNEL);
818	if (line == NULL)
819		return -ENOMEM;
820
821	/*
822	 * copy all vectors into a single string, to ensure we do
823	 * not interleave our log line with other printk calls
824	 */
825	p = line;
826	for (i = 0; i < count; i++) {
827		if (copy_from_user(p, iv[i].iov_base, iv[i].iov_len))
828			goto out;
829		p += iv[i].iov_len;
830	}
831	p[0] = '\0';
832
833	ret = printk("%s", line);
834	/* printk can add a prefix */
835	if (ret > len)
836		ret = len;
837out:
838	kfree(line);
839	return ret;
840}
841
842static const struct file_operations kmsg_fops = {
843	.aio_write = kmsg_writev,
844	.llseek = noop_llseek,
845};
846
847static const struct memdev {
848	const char *name;
849	mode_t mode;
850	const struct file_operations *fops;
851	struct backing_dev_info *dev_info;
852} devlist[] = {
853	 [1] = { "mem", 0, &mem_fops, &directly_mappable_cdev_bdi },
 
 
854#ifdef CONFIG_DEVKMEM
855	 [2] = { "kmem", 0, &kmem_fops, &directly_mappable_cdev_bdi },
856#endif
857	 [3] = { "null", 0666, &null_fops, NULL },
858#ifdef CONFIG_DEVPORT
859	 [4] = { "port", 0, &port_fops, NULL },
860#endif
861	 [5] = { "zero", 0666, &zero_fops, &zero_bdi },
862	 [7] = { "full", 0666, &full_fops, NULL },
863	 [8] = { "random", 0666, &random_fops, NULL },
864	 [9] = { "urandom", 0666, &urandom_fops, NULL },
865	[11] = { "kmsg", 0, &kmsg_fops, NULL },
866#ifdef CONFIG_CRASH_DUMP
867	[12] = { "oldmem", 0, &oldmem_fops, NULL },
868#endif
869};
870
871static int memory_open(struct inode *inode, struct file *filp)
872{
873	int minor;
874	const struct memdev *dev;
875
876	minor = iminor(inode);
877	if (minor >= ARRAY_SIZE(devlist))
878		return -ENXIO;
879
880	dev = &devlist[minor];
881	if (!dev->fops)
882		return -ENXIO;
883
884	filp->f_op = dev->fops;
885	if (dev->dev_info)
886		filp->f_mapping->backing_dev_info = dev->dev_info;
887
888	/* Is /dev/mem or /dev/kmem ? */
889	if (dev->dev_info == &directly_mappable_cdev_bdi)
890		filp->f_mode |= FMODE_UNSIGNED_OFFSET;
891
892	if (dev->fops->open)
893		return dev->fops->open(inode, filp);
894
895	return 0;
896}
897
898static const struct file_operations memory_fops = {
899	.open = memory_open,
900	.llseek = noop_llseek,
901};
902
903static char *mem_devnode(struct device *dev, mode_t *mode)
904{
905	if (mode && devlist[MINOR(dev->devt)].mode)
906		*mode = devlist[MINOR(dev->devt)].mode;
907	return NULL;
908}
909
910static struct class *mem_class;
911
912static int __init chr_dev_init(void)
913{
914	int minor;
915	int err;
916
917	err = bdi_init(&zero_bdi);
918	if (err)
919		return err;
920
921	if (register_chrdev(MEM_MAJOR, "mem", &memory_fops))
922		printk("unable to get major %d for memory devs\n", MEM_MAJOR);
923
924	mem_class = class_create(THIS_MODULE, "mem");
925	if (IS_ERR(mem_class))
926		return PTR_ERR(mem_class);
927
928	mem_class->devnode = mem_devnode;
929	for (minor = 1; minor < ARRAY_SIZE(devlist); minor++) {
930		if (!devlist[minor].name)
931			continue;
 
 
 
 
 
 
 
932		device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
933			      NULL, devlist[minor].name);
934	}
935
936	return tty_init();
937}
938
939fs_initcall(chr_dev_init);
v4.17
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *  linux/drivers/char/mem.c
  4 *
  5 *  Copyright (C) 1991, 1992  Linus Torvalds
  6 *
  7 *  Added devfs support.
  8 *    Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
  9 *  Shared /dev/zero mmapping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
 10 */
 11
 12#include <linux/mm.h>
 13#include <linux/miscdevice.h>
 14#include <linux/slab.h>
 15#include <linux/vmalloc.h>
 16#include <linux/mman.h>
 17#include <linux/random.h>
 18#include <linux/init.h>
 19#include <linux/raw.h>
 20#include <linux/tty.h>
 21#include <linux/capability.h>
 22#include <linux/ptrace.h>
 23#include <linux/device.h>
 24#include <linux/highmem.h>
 
 25#include <linux/backing-dev.h>
 26#include <linux/shmem_fs.h>
 27#include <linux/splice.h>
 28#include <linux/pfn.h>
 29#include <linux/export.h>
 30#include <linux/io.h>
 31#include <linux/uio.h>
 32
 33#include <linux/uaccess.h>
 
 34
 35#ifdef CONFIG_IA64
 36# include <linux/efi.h>
 37#endif
 38
 39#define DEVPORT_MINOR	4
 40
 41static inline unsigned long size_inside_page(unsigned long start,
 42					     unsigned long size)
 43{
 44	unsigned long sz;
 45
 46	sz = PAGE_SIZE - (start & (PAGE_SIZE - 1));
 47
 48	return min(sz, size);
 49}
 50
 51#ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
 52static inline int valid_phys_addr_range(phys_addr_t addr, size_t count)
 53{
 54	return addr + count <= __pa(high_memory);
 55}
 56
 57static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
 58{
 59	return 1;
 60}
 61#endif
 62
 63#ifdef CONFIG_STRICT_DEVMEM
 64static inline int page_is_allowed(unsigned long pfn)
 65{
 66	return devmem_is_allowed(pfn);
 67}
 68static inline int range_is_allowed(unsigned long pfn, unsigned long size)
 69{
 70	u64 from = ((u64)pfn) << PAGE_SHIFT;
 71	u64 to = from + size;
 72	u64 cursor = from;
 73
 74	while (cursor < to) {
 75		if (!devmem_is_allowed(pfn))
 
 
 
 76			return 0;
 
 77		cursor += PAGE_SIZE;
 78		pfn++;
 79	}
 80	return 1;
 81}
 82#else
 83static inline int page_is_allowed(unsigned long pfn)
 84{
 85	return 1;
 86}
 87static inline int range_is_allowed(unsigned long pfn, unsigned long size)
 88{
 89	return 1;
 90}
 91#endif
 92
 93#ifndef unxlate_dev_mem_ptr
 94#define unxlate_dev_mem_ptr unxlate_dev_mem_ptr
 95void __weak unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
 96{
 97}
 98#endif
 99
100/*
101 * This funcion reads the *physical* memory. The f_pos points directly to the
102 * memory location.
103 */
104static ssize_t read_mem(struct file *file, char __user *buf,
105			size_t count, loff_t *ppos)
106{
107	phys_addr_t p = *ppos;
108	ssize_t read, sz;
109	void *ptr;
110	char *bounce;
111	int err;
112
113	if (p != *ppos)
114		return 0;
115
116	if (!valid_phys_addr_range(p, count))
117		return -EFAULT;
118	read = 0;
119#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
120	/* we don't have page 0 mapped on sparc and m68k.. */
121	if (p < PAGE_SIZE) {
122		sz = size_inside_page(p, count);
123		if (sz > 0) {
124			if (clear_user(buf, sz))
125				return -EFAULT;
126			buf += sz;
127			p += sz;
128			count -= sz;
129			read += sz;
130		}
131	}
132#endif
133
134	bounce = kmalloc(PAGE_SIZE, GFP_KERNEL);
135	if (!bounce)
136		return -ENOMEM;
137
138	while (count > 0) {
139		unsigned long remaining;
140		int allowed, probe;
141
142		sz = size_inside_page(p, count);
143
144		err = -EPERM;
145		allowed = page_is_allowed(p >> PAGE_SHIFT);
146		if (!allowed)
147			goto failed;
148
149		err = -EFAULT;
150		if (allowed == 2) {
151			/* Show zeros for restricted memory. */
152			remaining = clear_user(buf, sz);
153		} else {
154			/*
155			 * On ia64 if a page has been mapped somewhere as
156			 * uncached, then it must also be accessed uncached
157			 * by the kernel or data corruption may occur.
158			 */
159			ptr = xlate_dev_mem_ptr(p);
160			if (!ptr)
161				goto failed;
162
163			probe = probe_kernel_read(bounce, ptr, sz);
164			unxlate_dev_mem_ptr(p, ptr);
165			if (probe)
166				goto failed;
167
168			remaining = copy_to_user(buf, bounce, sz);
169		}
 
 
 
 
 
 
170
 
 
171		if (remaining)
172			goto failed;
173
174		buf += sz;
175		p += sz;
176		count -= sz;
177		read += sz;
178	}
179	kfree(bounce);
180
181	*ppos += read;
182	return read;
183
184failed:
185	kfree(bounce);
186	return err;
187}
188
189static ssize_t write_mem(struct file *file, const char __user *buf,
190			 size_t count, loff_t *ppos)
191{
192	phys_addr_t p = *ppos;
193	ssize_t written, sz;
194	unsigned long copied;
195	void *ptr;
196
197	if (p != *ppos)
198		return -EFBIG;
199
200	if (!valid_phys_addr_range(p, count))
201		return -EFAULT;
202
203	written = 0;
204
205#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
206	/* we don't have page 0 mapped on sparc and m68k.. */
207	if (p < PAGE_SIZE) {
208		sz = size_inside_page(p, count);
209		/* Hmm. Do something? */
210		buf += sz;
211		p += sz;
212		count -= sz;
213		written += sz;
214	}
215#endif
216
217	while (count > 0) {
218		int allowed;
219
220		sz = size_inside_page(p, count);
221
222		allowed = page_is_allowed(p >> PAGE_SHIFT);
223		if (!allowed)
224			return -EPERM;
225
226		/* Skip actual writing when a page is marked as restricted. */
227		if (allowed == 1) {
228			/*
229			 * On ia64 if a page has been mapped somewhere as
230			 * uncached, then it must also be accessed uncached
231			 * by the kernel or data corruption may occur.
232			 */
233			ptr = xlate_dev_mem_ptr(p);
234			if (!ptr) {
235				if (written)
236					break;
237				return -EFAULT;
238			}
239
240			copied = copy_from_user(ptr, buf, sz);
241			unxlate_dev_mem_ptr(p, ptr);
242			if (copied) {
243				written += sz - copied;
244				if (written)
245					break;
246				return -EFAULT;
247			}
248		}
249
250		buf += sz;
251		p += sz;
252		count -= sz;
253		written += sz;
254	}
255
256	*ppos += written;
257	return written;
258}
259
260int __weak phys_mem_access_prot_allowed(struct file *file,
261	unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
262{
263	return 1;
264}
265
266#ifndef __HAVE_PHYS_MEM_ACCESS_PROT
267
268/*
269 * Architectures vary in how they handle caching for addresses
270 * outside of main memory.
271 *
272 */
273#ifdef pgprot_noncached
274static int uncached_access(struct file *file, phys_addr_t addr)
275{
276#if defined(CONFIG_IA64)
277	/*
278	 * On ia64, we ignore O_DSYNC because we cannot tolerate memory
279	 * attribute aliases.
280	 */
281	return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
282#elif defined(CONFIG_MIPS)
283	{
284		extern int __uncached_access(struct file *file,
285					     unsigned long addr);
286
287		return __uncached_access(file, addr);
288	}
289#else
290	/*
291	 * Accessing memory above the top the kernel knows about or through a
292	 * file pointer
293	 * that was marked O_DSYNC will be done non-cached.
294	 */
295	if (file->f_flags & O_DSYNC)
296		return 1;
297	return addr >= __pa(high_memory);
298#endif
299}
300#endif
301
302static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
303				     unsigned long size, pgprot_t vma_prot)
304{
305#ifdef pgprot_noncached
306	phys_addr_t offset = pfn << PAGE_SHIFT;
307
308	if (uncached_access(file, offset))
309		return pgprot_noncached(vma_prot);
310#endif
311	return vma_prot;
312}
313#endif
314
315#ifndef CONFIG_MMU
316static unsigned long get_unmapped_area_mem(struct file *file,
317					   unsigned long addr,
318					   unsigned long len,
319					   unsigned long pgoff,
320					   unsigned long flags)
321{
322	if (!valid_mmap_phys_addr_range(pgoff, len))
323		return (unsigned long) -EINVAL;
324	return pgoff << PAGE_SHIFT;
325}
326
327/* permit direct mmap, for read, write or exec */
328static unsigned memory_mmap_capabilities(struct file *file)
329{
330	return NOMMU_MAP_DIRECT |
331		NOMMU_MAP_READ | NOMMU_MAP_WRITE | NOMMU_MAP_EXEC;
332}
333
334static unsigned zero_mmap_capabilities(struct file *file)
335{
336	return NOMMU_MAP_COPY;
337}
338
339/* can't do an in-place private mapping if there's no MMU */
340static inline int private_mapping_ok(struct vm_area_struct *vma)
341{
342	return vma->vm_flags & VM_MAYSHARE;
343}
344#else
 
345
346static inline int private_mapping_ok(struct vm_area_struct *vma)
347{
348	return 1;
349}
350#endif
351
352static const struct vm_operations_struct mmap_mem_ops = {
353#ifdef CONFIG_HAVE_IOREMAP_PROT
354	.access = generic_access_phys
355#endif
356};
357
358static int mmap_mem(struct file *file, struct vm_area_struct *vma)
359{
360	size_t size = vma->vm_end - vma->vm_start;
361	phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
362
363	/* Does it even fit in phys_addr_t? */
364	if (offset >> PAGE_SHIFT != vma->vm_pgoff)
365		return -EINVAL;
366
367	/* It's illegal to wrap around the end of the physical address space. */
368	if (offset + (phys_addr_t)size - 1 < offset)
369		return -EINVAL;
370
371	if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
372		return -EINVAL;
373
374	if (!private_mapping_ok(vma))
375		return -ENOSYS;
376
377	if (!range_is_allowed(vma->vm_pgoff, size))
378		return -EPERM;
379
380	if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size,
381						&vma->vm_page_prot))
382		return -EINVAL;
383
384	vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
385						 size,
386						 vma->vm_page_prot);
387
388	vma->vm_ops = &mmap_mem_ops;
389
390	/* Remap-pfn-range will mark the range VM_IO */
391	if (remap_pfn_range(vma,
392			    vma->vm_start,
393			    vma->vm_pgoff,
394			    size,
395			    vma->vm_page_prot)) {
396		return -EAGAIN;
397	}
398	return 0;
399}
400
 
401static int mmap_kmem(struct file *file, struct vm_area_struct *vma)
402{
403	unsigned long pfn;
404
405	/* Turn a kernel-virtual address into a physical page frame */
406	pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT;
407
408	/*
409	 * RED-PEN: on some architectures there is more mapped memory than
410	 * available in mem_map which pfn_valid checks for. Perhaps should add a
411	 * new macro here.
412	 *
413	 * RED-PEN: vmalloc is not supported right now.
414	 */
415	if (!pfn_valid(pfn))
416		return -EIO;
417
418	vma->vm_pgoff = pfn;
419	return mmap_mem(file, vma);
420}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
421
 
422/*
423 * This function reads the *virtual* memory as seen by the kernel.
424 */
425static ssize_t read_kmem(struct file *file, char __user *buf,
426			 size_t count, loff_t *ppos)
427{
428	unsigned long p = *ppos;
429	ssize_t low_count, read, sz;
430	char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
431	int err = 0;
432
433	read = 0;
434	if (p < (unsigned long) high_memory) {
435		low_count = count;
436		if (count > (unsigned long)high_memory - p)
437			low_count = (unsigned long)high_memory - p;
438
439#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
440		/* we don't have page 0 mapped on sparc and m68k.. */
441		if (p < PAGE_SIZE && low_count > 0) {
442			sz = size_inside_page(p, low_count);
443			if (clear_user(buf, sz))
444				return -EFAULT;
445			buf += sz;
446			p += sz;
447			read += sz;
448			low_count -= sz;
449			count -= sz;
450		}
451#endif
452		while (low_count > 0) {
453			sz = size_inside_page(p, low_count);
454
455			/*
456			 * On ia64 if a page has been mapped somewhere as
457			 * uncached, then it must also be accessed uncached
458			 * by the kernel or data corruption may occur
459			 */
460			kbuf = xlate_dev_kmem_ptr((void *)p);
461			if (!virt_addr_valid(kbuf))
462				return -ENXIO;
463
464			if (copy_to_user(buf, kbuf, sz))
465				return -EFAULT;
466			buf += sz;
467			p += sz;
468			read += sz;
469			low_count -= sz;
470			count -= sz;
471		}
472	}
473
474	if (count > 0) {
475		kbuf = (char *)__get_free_page(GFP_KERNEL);
476		if (!kbuf)
477			return -ENOMEM;
478		while (count > 0) {
479			sz = size_inside_page(p, count);
480			if (!is_vmalloc_or_module_addr((void *)p)) {
481				err = -ENXIO;
482				break;
483			}
484			sz = vread(kbuf, (char *)p, sz);
485			if (!sz)
486				break;
487			if (copy_to_user(buf, kbuf, sz)) {
488				err = -EFAULT;
489				break;
490			}
491			count -= sz;
492			buf += sz;
493			read += sz;
494			p += sz;
495		}
496		free_page((unsigned long)kbuf);
497	}
498	*ppos = p;
499	return read ? read : err;
500}
501
502
503static ssize_t do_write_kmem(unsigned long p, const char __user *buf,
504				size_t count, loff_t *ppos)
505{
506	ssize_t written, sz;
507	unsigned long copied;
508
509	written = 0;
510#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
511	/* we don't have page 0 mapped on sparc and m68k.. */
512	if (p < PAGE_SIZE) {
513		sz = size_inside_page(p, count);
514		/* Hmm. Do something? */
515		buf += sz;
516		p += sz;
517		count -= sz;
518		written += sz;
519	}
520#endif
521
522	while (count > 0) {
523		void *ptr;
524
525		sz = size_inside_page(p, count);
526
527		/*
528		 * On ia64 if a page has been mapped somewhere as uncached, then
529		 * it must also be accessed uncached by the kernel or data
530		 * corruption may occur.
531		 */
532		ptr = xlate_dev_kmem_ptr((void *)p);
533		if (!virt_addr_valid(ptr))
534			return -ENXIO;
535
536		copied = copy_from_user(ptr, buf, sz);
537		if (copied) {
538			written += sz - copied;
539			if (written)
540				break;
541			return -EFAULT;
542		}
543		buf += sz;
544		p += sz;
545		count -= sz;
546		written += sz;
547	}
548
549	*ppos += written;
550	return written;
551}
552
553/*
554 * This function writes to the *virtual* memory as seen by the kernel.
555 */
556static ssize_t write_kmem(struct file *file, const char __user *buf,
557			  size_t count, loff_t *ppos)
558{
559	unsigned long p = *ppos;
560	ssize_t wrote = 0;
561	ssize_t virtr = 0;
562	char *kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
563	int err = 0;
564
565	if (p < (unsigned long) high_memory) {
566		unsigned long to_write = min_t(unsigned long, count,
567					       (unsigned long)high_memory - p);
568		wrote = do_write_kmem(p, buf, to_write, ppos);
569		if (wrote != to_write)
570			return wrote;
571		p += wrote;
572		buf += wrote;
573		count -= wrote;
574	}
575
576	if (count > 0) {
577		kbuf = (char *)__get_free_page(GFP_KERNEL);
578		if (!kbuf)
579			return wrote ? wrote : -ENOMEM;
580		while (count > 0) {
581			unsigned long sz = size_inside_page(p, count);
582			unsigned long n;
583
584			if (!is_vmalloc_or_module_addr((void *)p)) {
585				err = -ENXIO;
586				break;
587			}
588			n = copy_from_user(kbuf, buf, sz);
589			if (n) {
590				err = -EFAULT;
591				break;
592			}
593			vwrite(kbuf, (char *)p, sz);
594			count -= sz;
595			buf += sz;
596			virtr += sz;
597			p += sz;
598		}
599		free_page((unsigned long)kbuf);
600	}
601
602	*ppos = p;
603	return virtr + wrote ? : err;
604}
 
605
 
606static ssize_t read_port(struct file *file, char __user *buf,
607			 size_t count, loff_t *ppos)
608{
609	unsigned long i = *ppos;
610	char __user *tmp = buf;
611
612	if (!access_ok(VERIFY_WRITE, buf, count))
613		return -EFAULT;
614	while (count-- > 0 && i < 65536) {
615		if (__put_user(inb(i), tmp) < 0)
616			return -EFAULT;
617		i++;
618		tmp++;
619	}
620	*ppos = i;
621	return tmp-buf;
622}
623
624static ssize_t write_port(struct file *file, const char __user *buf,
625			  size_t count, loff_t *ppos)
626{
627	unsigned long i = *ppos;
628	const char __user *tmp = buf;
629
630	if (!access_ok(VERIFY_READ, buf, count))
631		return -EFAULT;
632	while (count-- > 0 && i < 65536) {
633		char c;
634
635		if (__get_user(c, tmp)) {
636			if (tmp > buf)
637				break;
638			return -EFAULT;
639		}
640		outb(c, i);
641		i++;
642		tmp++;
643	}
644	*ppos = i;
645	return tmp-buf;
646}
 
647
648static ssize_t read_null(struct file *file, char __user *buf,
649			 size_t count, loff_t *ppos)
650{
651	return 0;
652}
653
654static ssize_t write_null(struct file *file, const char __user *buf,
655			  size_t count, loff_t *ppos)
656{
657	return count;
658}
659
660static ssize_t read_iter_null(struct kiocb *iocb, struct iov_iter *to)
661{
662	return 0;
663}
664
665static ssize_t write_iter_null(struct kiocb *iocb, struct iov_iter *from)
666{
667	size_t count = iov_iter_count(from);
668	iov_iter_advance(from, count);
669	return count;
670}
671
672static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf,
673			struct splice_desc *sd)
674{
675	return sd->len;
676}
677
678static ssize_t splice_write_null(struct pipe_inode_info *pipe, struct file *out,
679				 loff_t *ppos, size_t len, unsigned int flags)
680{
681	return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
682}
683
684static ssize_t read_iter_zero(struct kiocb *iocb, struct iov_iter *iter)
 
685{
686	size_t written = 0;
687
688	while (iov_iter_count(iter)) {
689		size_t chunk = iov_iter_count(iter), n;
 
 
 
 
 
 
 
 
690
691		if (chunk > PAGE_SIZE)
692			chunk = PAGE_SIZE;	/* Just for latency reasons */
693		n = iov_iter_zero(chunk, iter);
694		if (!n && iov_iter_count(iter))
695			return written ? written : -EFAULT;
696		written += n;
697		if (signal_pending(current))
698			return written ? written : -ERESTARTSYS;
 
 
699		cond_resched();
700	}
701	return written;
702}
703
704static int mmap_zero(struct file *file, struct vm_area_struct *vma)
705{
706#ifndef CONFIG_MMU
707	return -ENOSYS;
708#endif
709	if (vma->vm_flags & VM_SHARED)
710		return shmem_zero_setup(vma);
711	return 0;
712}
713
714static unsigned long get_unmapped_area_zero(struct file *file,
715				unsigned long addr, unsigned long len,
716				unsigned long pgoff, unsigned long flags)
717{
718#ifdef CONFIG_MMU
719	if (flags & MAP_SHARED) {
720		/*
721		 * mmap_zero() will call shmem_zero_setup() to create a file,
722		 * so use shmem's get_unmapped_area in case it can be huge;
723		 * and pass NULL for file as in mmap.c's get_unmapped_area(),
724		 * so as not to confuse shmem with our handle on "/dev/zero".
725		 */
726		return shmem_get_unmapped_area(NULL, addr, len, pgoff, flags);
727	}
728
729	/* Otherwise flags & MAP_PRIVATE: with no shmem object beneath it */
730	return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
731#else
732	return -ENOSYS;
733#endif
734}
735
736static ssize_t write_full(struct file *file, const char __user *buf,
737			  size_t count, loff_t *ppos)
738{
739	return -ENOSPC;
740}
741
742/*
743 * Special lseek() function for /dev/null and /dev/zero.  Most notably, you
744 * can fopen() both devices with "a" now.  This was previously impossible.
745 * -- SRB.
746 */
747static loff_t null_lseek(struct file *file, loff_t offset, int orig)
748{
749	return file->f_pos = 0;
750}
751
752/*
753 * The memory devices use the full 32/64 bits of the offset, and so we cannot
754 * check against negative addresses: they are ok. The return value is weird,
755 * though, in that case (0).
756 *
757 * also note that seeking relative to the "end of file" isn't supported:
758 * it has no meaning, so it returns -EINVAL.
759 */
760static loff_t memory_lseek(struct file *file, loff_t offset, int orig)
761{
762	loff_t ret;
763
764	inode_lock(file_inode(file));
765	switch (orig) {
766	case SEEK_CUR:
767		offset += file->f_pos;
768	case SEEK_SET:
769		/* to avoid userland mistaking f_pos=-9 as -EBADF=-9 */
770		if ((unsigned long long)offset >= -MAX_ERRNO) {
771			ret = -EOVERFLOW;
772			break;
773		}
774		file->f_pos = offset;
775		ret = file->f_pos;
776		force_successful_syscall_return();
777		break;
778	default:
779		ret = -EINVAL;
780	}
781	inode_unlock(file_inode(file));
782	return ret;
783}
784
785static int open_port(struct inode *inode, struct file *filp)
786{
787	return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
788}
789
790#define zero_lseek	null_lseek
791#define full_lseek      null_lseek
792#define write_zero	write_null
793#define write_iter_zero	write_iter_null
794#define open_mem	open_port
795#define open_kmem	open_mem
 
796
797static const struct file_operations __maybe_unused mem_fops = {
798	.llseek		= memory_lseek,
799	.read		= read_mem,
800	.write		= write_mem,
801	.mmap		= mmap_mem,
802	.open		= open_mem,
803#ifndef CONFIG_MMU
804	.get_unmapped_area = get_unmapped_area_mem,
805	.mmap_capabilities = memory_mmap_capabilities,
806#endif
807};
808
809static const struct file_operations __maybe_unused kmem_fops = {
 
810	.llseek		= memory_lseek,
811	.read		= read_kmem,
812	.write		= write_kmem,
813	.mmap		= mmap_kmem,
814	.open		= open_kmem,
815#ifndef CONFIG_MMU
816	.get_unmapped_area = get_unmapped_area_mem,
817	.mmap_capabilities = memory_mmap_capabilities,
818#endif
819};
820
821static const struct file_operations null_fops = {
822	.llseek		= null_lseek,
823	.read		= read_null,
824	.write		= write_null,
825	.read_iter	= read_iter_null,
826	.write_iter	= write_iter_null,
827	.splice_write	= splice_write_null,
828};
829
830static const struct file_operations __maybe_unused port_fops = {
 
831	.llseek		= memory_lseek,
832	.read		= read_port,
833	.write		= write_port,
834	.open		= open_port,
835};
 
836
837static const struct file_operations zero_fops = {
838	.llseek		= zero_lseek,
 
839	.write		= write_zero,
840	.read_iter	= read_iter_zero,
841	.write_iter	= write_iter_zero,
842	.mmap		= mmap_zero,
843	.get_unmapped_area = get_unmapped_area_zero,
844#ifndef CONFIG_MMU
845	.mmap_capabilities = zero_mmap_capabilities,
846#endif
 
 
 
 
 
 
847};
848
849static const struct file_operations full_fops = {
850	.llseek		= full_lseek,
851	.read_iter	= read_iter_zero,
852	.write		= write_full,
853};
854
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
855static const struct memdev {
856	const char *name;
857	umode_t mode;
858	const struct file_operations *fops;
859	fmode_t fmode;
860} devlist[] = {
861#ifdef CONFIG_DEVMEM
862	 [1] = { "mem", 0, &mem_fops, FMODE_UNSIGNED_OFFSET },
863#endif
864#ifdef CONFIG_DEVKMEM
865	 [2] = { "kmem", 0, &kmem_fops, FMODE_UNSIGNED_OFFSET },
866#endif
867	 [3] = { "null", 0666, &null_fops, 0 },
868#ifdef CONFIG_DEVPORT
869	 [4] = { "port", 0, &port_fops, 0 },
870#endif
871	 [5] = { "zero", 0666, &zero_fops, 0 },
872	 [7] = { "full", 0666, &full_fops, 0 },
873	 [8] = { "random", 0666, &random_fops, 0 },
874	 [9] = { "urandom", 0666, &urandom_fops, 0 },
875#ifdef CONFIG_PRINTK
876	[11] = { "kmsg", 0644, &kmsg_fops, 0 },
 
877#endif
878};
879
880static int memory_open(struct inode *inode, struct file *filp)
881{
882	int minor;
883	const struct memdev *dev;
884
885	minor = iminor(inode);
886	if (minor >= ARRAY_SIZE(devlist))
887		return -ENXIO;
888
889	dev = &devlist[minor];
890	if (!dev->fops)
891		return -ENXIO;
892
893	filp->f_op = dev->fops;
894	filp->f_mode |= dev->fmode;
 
 
 
 
 
895
896	if (dev->fops->open)
897		return dev->fops->open(inode, filp);
898
899	return 0;
900}
901
902static const struct file_operations memory_fops = {
903	.open = memory_open,
904	.llseek = noop_llseek,
905};
906
907static char *mem_devnode(struct device *dev, umode_t *mode)
908{
909	if (mode && devlist[MINOR(dev->devt)].mode)
910		*mode = devlist[MINOR(dev->devt)].mode;
911	return NULL;
912}
913
914static struct class *mem_class;
915
916static int __init chr_dev_init(void)
917{
918	int minor;
 
 
 
 
 
919
920	if (register_chrdev(MEM_MAJOR, "mem", &memory_fops))
921		printk("unable to get major %d for memory devs\n", MEM_MAJOR);
922
923	mem_class = class_create(THIS_MODULE, "mem");
924	if (IS_ERR(mem_class))
925		return PTR_ERR(mem_class);
926
927	mem_class->devnode = mem_devnode;
928	for (minor = 1; minor < ARRAY_SIZE(devlist); minor++) {
929		if (!devlist[minor].name)
930			continue;
931
932		/*
933		 * Create /dev/port?
934		 */
935		if ((minor == DEVPORT_MINOR) && !arch_has_dev_port())
936			continue;
937
938		device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
939			      NULL, devlist[minor].name);
940	}
941
942	return tty_init();
943}
944
945fs_initcall(chr_dev_init);