Linux Audio

Check our new training course

Loading...
v4.10.11
  1/*
  2 *  linux/drivers/char/mem.c
  3 *
  4 *  Copyright (C) 1991, 1992  Linus Torvalds
  5 *
  6 *  Added devfs support.
  7 *    Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
  8 *  Shared /dev/zero mmapping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
  9 */
 10
 11#include <linux/mm.h>
 12#include <linux/miscdevice.h>
 13#include <linux/slab.h>
 14#include <linux/vmalloc.h>
 15#include <linux/mman.h>
 16#include <linux/random.h>
 17#include <linux/init.h>
 18#include <linux/raw.h>
 19#include <linux/tty.h>
 20#include <linux/capability.h>
 21#include <linux/ptrace.h>
 22#include <linux/device.h>
 23#include <linux/highmem.h>
 
 24#include <linux/backing-dev.h>
 25#include <linux/shmem_fs.h>
 26#include <linux/splice.h>
 27#include <linux/pfn.h>
 28#include <linux/export.h>
 29#include <linux/io.h>
 30#include <linux/uio.h>
 31
 32#include <linux/uaccess.h>
 
 33
 34#ifdef CONFIG_IA64
 35# include <linux/efi.h>
 36#endif
 37
 38#define DEVPORT_MINOR	4
 39
 40static inline unsigned long size_inside_page(unsigned long start,
 41					     unsigned long size)
 42{
 43	unsigned long sz;
 44
 45	sz = PAGE_SIZE - (start & (PAGE_SIZE - 1));
 46
 47	return min(sz, size);
 48}
 49
 50#ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
 51static inline int valid_phys_addr_range(phys_addr_t addr, size_t count)
 52{
 53	return addr + count <= __pa(high_memory);
 54}
 55
 56static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
 57{
 58	return 1;
 59}
 60#endif
 61
 62#ifdef CONFIG_STRICT_DEVMEM
 63static inline int range_is_allowed(unsigned long pfn, unsigned long size)
 64{
 65	u64 from = ((u64)pfn) << PAGE_SHIFT;
 66	u64 to = from + size;
 67	u64 cursor = from;
 68
 69	while (cursor < to) {
 70		if (!devmem_is_allowed(pfn))
 
 
 
 71			return 0;
 
 72		cursor += PAGE_SIZE;
 73		pfn++;
 74	}
 75	return 1;
 76}
 77#else
 78static inline int range_is_allowed(unsigned long pfn, unsigned long size)
 79{
 80	return 1;
 81}
 82#endif
 83
 84#ifndef unxlate_dev_mem_ptr
 85#define unxlate_dev_mem_ptr unxlate_dev_mem_ptr
 86void __weak unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
 87{
 88}
 89#endif
 90
 91/*
 92 * This funcion reads the *physical* memory. The f_pos points directly to the
 93 * memory location.
 94 */
 95static ssize_t read_mem(struct file *file, char __user *buf,
 96			size_t count, loff_t *ppos)
 97{
 98	phys_addr_t p = *ppos;
 99	ssize_t read, sz;
100	void *ptr;
101
102	if (p != *ppos)
103		return 0;
104
105	if (!valid_phys_addr_range(p, count))
106		return -EFAULT;
107	read = 0;
108#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
109	/* we don't have page 0 mapped on sparc and m68k.. */
110	if (p < PAGE_SIZE) {
111		sz = size_inside_page(p, count);
112		if (sz > 0) {
113			if (clear_user(buf, sz))
114				return -EFAULT;
115			buf += sz;
116			p += sz;
117			count -= sz;
118			read += sz;
119		}
120	}
121#endif
122
123	while (count > 0) {
124		unsigned long remaining;
125
126		sz = size_inside_page(p, count);
127
128		if (!range_is_allowed(p >> PAGE_SHIFT, count))
129			return -EPERM;
130
131		/*
132		 * On ia64 if a page has been mapped somewhere as uncached, then
133		 * it must also be accessed uncached by the kernel or data
134		 * corruption may occur.
135		 */
136		ptr = xlate_dev_mem_ptr(p);
137		if (!ptr)
138			return -EFAULT;
139
140		remaining = copy_to_user(buf, ptr, sz);
141		unxlate_dev_mem_ptr(p, ptr);
142		if (remaining)
143			return -EFAULT;
144
145		buf += sz;
146		p += sz;
147		count -= sz;
148		read += sz;
149	}
150
151	*ppos += read;
152	return read;
153}
154
155static ssize_t write_mem(struct file *file, const char __user *buf,
156			 size_t count, loff_t *ppos)
157{
158	phys_addr_t p = *ppos;
159	ssize_t written, sz;
160	unsigned long copied;
161	void *ptr;
162
163	if (p != *ppos)
164		return -EFBIG;
165
166	if (!valid_phys_addr_range(p, count))
167		return -EFAULT;
168
169	written = 0;
170
171#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
172	/* we don't have page 0 mapped on sparc and m68k.. */
173	if (p < PAGE_SIZE) {
174		sz = size_inside_page(p, count);
175		/* Hmm. Do something? */
176		buf += sz;
177		p += sz;
178		count -= sz;
179		written += sz;
180	}
181#endif
182
183	while (count > 0) {
184		sz = size_inside_page(p, count);
185
186		if (!range_is_allowed(p >> PAGE_SHIFT, sz))
187			return -EPERM;
188
189		/*
190		 * On ia64 if a page has been mapped somewhere as uncached, then
191		 * it must also be accessed uncached by the kernel or data
192		 * corruption may occur.
193		 */
194		ptr = xlate_dev_mem_ptr(p);
195		if (!ptr) {
196			if (written)
197				break;
198			return -EFAULT;
199		}
200
201		copied = copy_from_user(ptr, buf, sz);
202		unxlate_dev_mem_ptr(p, ptr);
203		if (copied) {
204			written += sz - copied;
205			if (written)
206				break;
207			return -EFAULT;
208		}
209
210		buf += sz;
211		p += sz;
212		count -= sz;
213		written += sz;
214	}
215
216	*ppos += written;
217	return written;
218}
219
220int __weak phys_mem_access_prot_allowed(struct file *file,
221	unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
222{
223	return 1;
224}
225
226#ifndef __HAVE_PHYS_MEM_ACCESS_PROT
227
228/*
229 * Architectures vary in how they handle caching for addresses
230 * outside of main memory.
231 *
232 */
233#ifdef pgprot_noncached
234static int uncached_access(struct file *file, phys_addr_t addr)
235{
236#if defined(CONFIG_IA64)
237	/*
238	 * On ia64, we ignore O_DSYNC because we cannot tolerate memory
239	 * attribute aliases.
240	 */
241	return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
242#elif defined(CONFIG_MIPS)
243	{
244		extern int __uncached_access(struct file *file,
245					     unsigned long addr);
246
247		return __uncached_access(file, addr);
248	}
249#else
250	/*
251	 * Accessing memory above the top the kernel knows about or through a
252	 * file pointer
253	 * that was marked O_DSYNC will be done non-cached.
254	 */
255	if (file->f_flags & O_DSYNC)
256		return 1;
257	return addr >= __pa(high_memory);
258#endif
259}
260#endif
261
262static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
263				     unsigned long size, pgprot_t vma_prot)
264{
265#ifdef pgprot_noncached
266	phys_addr_t offset = pfn << PAGE_SHIFT;
267
268	if (uncached_access(file, offset))
269		return pgprot_noncached(vma_prot);
270#endif
271	return vma_prot;
272}
273#endif
274
275#ifndef CONFIG_MMU
276static unsigned long get_unmapped_area_mem(struct file *file,
277					   unsigned long addr,
278					   unsigned long len,
279					   unsigned long pgoff,
280					   unsigned long flags)
281{
282	if (!valid_mmap_phys_addr_range(pgoff, len))
283		return (unsigned long) -EINVAL;
284	return pgoff << PAGE_SHIFT;
285}
286
287/* permit direct mmap, for read, write or exec */
288static unsigned memory_mmap_capabilities(struct file *file)
289{
290	return NOMMU_MAP_DIRECT |
291		NOMMU_MAP_READ | NOMMU_MAP_WRITE | NOMMU_MAP_EXEC;
292}
293
294static unsigned zero_mmap_capabilities(struct file *file)
295{
296	return NOMMU_MAP_COPY;
297}
298
299/* can't do an in-place private mapping if there's no MMU */
300static inline int private_mapping_ok(struct vm_area_struct *vma)
301{
302	return vma->vm_flags & VM_MAYSHARE;
303}
304#else
 
305
306static inline int private_mapping_ok(struct vm_area_struct *vma)
307{
308	return 1;
309}
310#endif
311
312static const struct vm_operations_struct mmap_mem_ops = {
313#ifdef CONFIG_HAVE_IOREMAP_PROT
314	.access = generic_access_phys
315#endif
316};
317
318static int mmap_mem(struct file *file, struct vm_area_struct *vma)
319{
320	size_t size = vma->vm_end - vma->vm_start;
321
322	if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
323		return -EINVAL;
324
325	if (!private_mapping_ok(vma))
326		return -ENOSYS;
327
328	if (!range_is_allowed(vma->vm_pgoff, size))
329		return -EPERM;
330
331	if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size,
332						&vma->vm_page_prot))
333		return -EINVAL;
334
335	vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
336						 size,
337						 vma->vm_page_prot);
338
339	vma->vm_ops = &mmap_mem_ops;
340
341	/* Remap-pfn-range will mark the range VM_IO */
342	if (remap_pfn_range(vma,
343			    vma->vm_start,
344			    vma->vm_pgoff,
345			    size,
346			    vma->vm_page_prot)) {
347		return -EAGAIN;
348	}
349	return 0;
350}
351
 
352static int mmap_kmem(struct file *file, struct vm_area_struct *vma)
353{
354	unsigned long pfn;
355
356	/* Turn a kernel-virtual address into a physical page frame */
357	pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT;
358
359	/*
360	 * RED-PEN: on some architectures there is more mapped memory than
361	 * available in mem_map which pfn_valid checks for. Perhaps should add a
362	 * new macro here.
363	 *
364	 * RED-PEN: vmalloc is not supported right now.
365	 */
366	if (!pfn_valid(pfn))
367		return -EIO;
368
369	vma->vm_pgoff = pfn;
370	return mmap_mem(file, vma);
371}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
372
 
373/*
374 * This function reads the *virtual* memory as seen by the kernel.
375 */
376static ssize_t read_kmem(struct file *file, char __user *buf,
377			 size_t count, loff_t *ppos)
378{
379	unsigned long p = *ppos;
380	ssize_t low_count, read, sz;
381	char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
382	int err = 0;
383
384	read = 0;
385	if (p < (unsigned long) high_memory) {
386		low_count = count;
387		if (count > (unsigned long)high_memory - p)
388			low_count = (unsigned long)high_memory - p;
389
390#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
391		/* we don't have page 0 mapped on sparc and m68k.. */
392		if (p < PAGE_SIZE && low_count > 0) {
393			sz = size_inside_page(p, low_count);
394			if (clear_user(buf, sz))
395				return -EFAULT;
396			buf += sz;
397			p += sz;
398			read += sz;
399			low_count -= sz;
400			count -= sz;
401		}
402#endif
403		while (low_count > 0) {
404			sz = size_inside_page(p, low_count);
405
406			/*
407			 * On ia64 if a page has been mapped somewhere as
408			 * uncached, then it must also be accessed uncached
409			 * by the kernel or data corruption may occur
410			 */
411			kbuf = xlate_dev_kmem_ptr((void *)p);
412			if (!virt_addr_valid(kbuf))
413				return -ENXIO;
414
415			if (copy_to_user(buf, kbuf, sz))
416				return -EFAULT;
417			buf += sz;
418			p += sz;
419			read += sz;
420			low_count -= sz;
421			count -= sz;
422		}
423	}
424
425	if (count > 0) {
426		kbuf = (char *)__get_free_page(GFP_KERNEL);
427		if (!kbuf)
428			return -ENOMEM;
429		while (count > 0) {
430			sz = size_inside_page(p, count);
431			if (!is_vmalloc_or_module_addr((void *)p)) {
432				err = -ENXIO;
433				break;
434			}
435			sz = vread(kbuf, (char *)p, sz);
436			if (!sz)
437				break;
438			if (copy_to_user(buf, kbuf, sz)) {
439				err = -EFAULT;
440				break;
441			}
442			count -= sz;
443			buf += sz;
444			read += sz;
445			p += sz;
446		}
447		free_page((unsigned long)kbuf);
448	}
449	*ppos = p;
450	return read ? read : err;
451}
452
453
454static ssize_t do_write_kmem(unsigned long p, const char __user *buf,
455				size_t count, loff_t *ppos)
456{
457	ssize_t written, sz;
458	unsigned long copied;
459
460	written = 0;
461#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
462	/* we don't have page 0 mapped on sparc and m68k.. */
463	if (p < PAGE_SIZE) {
464		sz = size_inside_page(p, count);
465		/* Hmm. Do something? */
466		buf += sz;
467		p += sz;
468		count -= sz;
469		written += sz;
470	}
471#endif
472
473	while (count > 0) {
474		void *ptr;
475
476		sz = size_inside_page(p, count);
477
478		/*
479		 * On ia64 if a page has been mapped somewhere as uncached, then
480		 * it must also be accessed uncached by the kernel or data
481		 * corruption may occur.
482		 */
483		ptr = xlate_dev_kmem_ptr((void *)p);
484		if (!virt_addr_valid(ptr))
485			return -ENXIO;
486
487		copied = copy_from_user(ptr, buf, sz);
488		if (copied) {
489			written += sz - copied;
490			if (written)
491				break;
492			return -EFAULT;
493		}
494		buf += sz;
495		p += sz;
496		count -= sz;
497		written += sz;
498	}
499
500	*ppos += written;
501	return written;
502}
503
504/*
505 * This function writes to the *virtual* memory as seen by the kernel.
506 */
507static ssize_t write_kmem(struct file *file, const char __user *buf,
508			  size_t count, loff_t *ppos)
509{
510	unsigned long p = *ppos;
511	ssize_t wrote = 0;
512	ssize_t virtr = 0;
513	char *kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
514	int err = 0;
515
516	if (p < (unsigned long) high_memory) {
517		unsigned long to_write = min_t(unsigned long, count,
518					       (unsigned long)high_memory - p);
519		wrote = do_write_kmem(p, buf, to_write, ppos);
520		if (wrote != to_write)
521			return wrote;
522		p += wrote;
523		buf += wrote;
524		count -= wrote;
525	}
526
527	if (count > 0) {
528		kbuf = (char *)__get_free_page(GFP_KERNEL);
529		if (!kbuf)
530			return wrote ? wrote : -ENOMEM;
531		while (count > 0) {
532			unsigned long sz = size_inside_page(p, count);
533			unsigned long n;
534
535			if (!is_vmalloc_or_module_addr((void *)p)) {
536				err = -ENXIO;
537				break;
538			}
539			n = copy_from_user(kbuf, buf, sz);
540			if (n) {
541				err = -EFAULT;
542				break;
543			}
544			vwrite(kbuf, (char *)p, sz);
545			count -= sz;
546			buf += sz;
547			virtr += sz;
548			p += sz;
549		}
550		free_page((unsigned long)kbuf);
551	}
552
553	*ppos = p;
554	return virtr + wrote ? : err;
555}
 
556
 
557static ssize_t read_port(struct file *file, char __user *buf,
558			 size_t count, loff_t *ppos)
559{
560	unsigned long i = *ppos;
561	char __user *tmp = buf;
562
563	if (!access_ok(VERIFY_WRITE, buf, count))
564		return -EFAULT;
565	while (count-- > 0 && i < 65536) {
566		if (__put_user(inb(i), tmp) < 0)
567			return -EFAULT;
568		i++;
569		tmp++;
570	}
571	*ppos = i;
572	return tmp-buf;
573}
574
575static ssize_t write_port(struct file *file, const char __user *buf,
576			  size_t count, loff_t *ppos)
577{
578	unsigned long i = *ppos;
579	const char __user *tmp = buf;
580
581	if (!access_ok(VERIFY_READ, buf, count))
582		return -EFAULT;
583	while (count-- > 0 && i < 65536) {
584		char c;
585
586		if (__get_user(c, tmp)) {
587			if (tmp > buf)
588				break;
589			return -EFAULT;
590		}
591		outb(c, i);
592		i++;
593		tmp++;
594	}
595	*ppos = i;
596	return tmp-buf;
597}
 
598
599static ssize_t read_null(struct file *file, char __user *buf,
600			 size_t count, loff_t *ppos)
601{
602	return 0;
603}
604
605static ssize_t write_null(struct file *file, const char __user *buf,
606			  size_t count, loff_t *ppos)
607{
608	return count;
609}
610
611static ssize_t read_iter_null(struct kiocb *iocb, struct iov_iter *to)
612{
613	return 0;
614}
615
616static ssize_t write_iter_null(struct kiocb *iocb, struct iov_iter *from)
617{
618	size_t count = iov_iter_count(from);
619	iov_iter_advance(from, count);
620	return count;
621}
622
623static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf,
624			struct splice_desc *sd)
625{
626	return sd->len;
627}
628
629static ssize_t splice_write_null(struct pipe_inode_info *pipe, struct file *out,
630				 loff_t *ppos, size_t len, unsigned int flags)
631{
632	return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
633}
634
635static ssize_t read_iter_zero(struct kiocb *iocb, struct iov_iter *iter)
 
636{
637	size_t written = 0;
 
 
 
638
639	while (iov_iter_count(iter)) {
640		size_t chunk = iov_iter_count(iter), n;
 
 
 
 
 
641
642		if (chunk > PAGE_SIZE)
643			chunk = PAGE_SIZE;	/* Just for latency reasons */
644		n = iov_iter_zero(chunk, iter);
645		if (!n && iov_iter_count(iter))
646			return written ? written : -EFAULT;
647		written += n;
648		if (signal_pending(current))
649			return written ? written : -ERESTARTSYS;
 
 
650		cond_resched();
651	}
652	return written;
653}
654
655static int mmap_zero(struct file *file, struct vm_area_struct *vma)
656{
657#ifndef CONFIG_MMU
658	return -ENOSYS;
659#endif
660	if (vma->vm_flags & VM_SHARED)
661		return shmem_zero_setup(vma);
662	return 0;
663}
664
665static unsigned long get_unmapped_area_zero(struct file *file,
666				unsigned long addr, unsigned long len,
667				unsigned long pgoff, unsigned long flags)
668{
669#ifdef CONFIG_MMU
670	if (flags & MAP_SHARED) {
671		/*
672		 * mmap_zero() will call shmem_zero_setup() to create a file,
673		 * so use shmem's get_unmapped_area in case it can be huge;
674		 * and pass NULL for file as in mmap.c's get_unmapped_area(),
675		 * so as not to confuse shmem with our handle on "/dev/zero".
676		 */
677		return shmem_get_unmapped_area(NULL, addr, len, pgoff, flags);
678	}
679
680	/* Otherwise flags & MAP_PRIVATE: with no shmem object beneath it */
681	return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
682#else
683	return -ENOSYS;
684#endif
685}
686
687static ssize_t write_full(struct file *file, const char __user *buf,
688			  size_t count, loff_t *ppos)
689{
690	return -ENOSPC;
691}
692
693/*
694 * Special lseek() function for /dev/null and /dev/zero.  Most notably, you
695 * can fopen() both devices with "a" now.  This was previously impossible.
696 * -- SRB.
697 */
698static loff_t null_lseek(struct file *file, loff_t offset, int orig)
699{
700	return file->f_pos = 0;
701}
702
703/*
704 * The memory devices use the full 32/64 bits of the offset, and so we cannot
705 * check against negative addresses: they are ok. The return value is weird,
706 * though, in that case (0).
707 *
708 * also note that seeking relative to the "end of file" isn't supported:
709 * it has no meaning, so it returns -EINVAL.
710 */
711static loff_t memory_lseek(struct file *file, loff_t offset, int orig)
712{
713	loff_t ret;
714
715	inode_lock(file_inode(file));
716	switch (orig) {
717	case SEEK_CUR:
718		offset += file->f_pos;
719	case SEEK_SET:
720		/* to avoid userland mistaking f_pos=-9 as -EBADF=-9 */
721		if ((unsigned long long)offset >= -MAX_ERRNO) {
722			ret = -EOVERFLOW;
723			break;
724		}
725		file->f_pos = offset;
726		ret = file->f_pos;
727		force_successful_syscall_return();
728		break;
729	default:
730		ret = -EINVAL;
731	}
732	inode_unlock(file_inode(file));
733	return ret;
734}
735
736static int open_port(struct inode *inode, struct file *filp)
737{
738	return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
739}
740
741#define zero_lseek	null_lseek
742#define full_lseek      null_lseek
743#define write_zero	write_null
744#define write_iter_zero	write_iter_null
745#define open_mem	open_port
746#define open_kmem	open_mem
 
747
748static const struct file_operations __maybe_unused mem_fops = {
749	.llseek		= memory_lseek,
750	.read		= read_mem,
751	.write		= write_mem,
752	.mmap		= mmap_mem,
753	.open		= open_mem,
754#ifndef CONFIG_MMU
755	.get_unmapped_area = get_unmapped_area_mem,
756	.mmap_capabilities = memory_mmap_capabilities,
757#endif
758};
759
760static const struct file_operations __maybe_unused kmem_fops = {
 
761	.llseek		= memory_lseek,
762	.read		= read_kmem,
763	.write		= write_kmem,
764	.mmap		= mmap_kmem,
765	.open		= open_kmem,
766#ifndef CONFIG_MMU
767	.get_unmapped_area = get_unmapped_area_mem,
768	.mmap_capabilities = memory_mmap_capabilities,
769#endif
770};
 
771
772static const struct file_operations null_fops = {
773	.llseek		= null_lseek,
774	.read		= read_null,
775	.write		= write_null,
776	.read_iter	= read_iter_null,
777	.write_iter	= write_iter_null,
778	.splice_write	= splice_write_null,
779};
780
781static const struct file_operations __maybe_unused port_fops = {
 
782	.llseek		= memory_lseek,
783	.read		= read_port,
784	.write		= write_port,
785	.open		= open_port,
786};
 
787
788static const struct file_operations zero_fops = {
789	.llseek		= zero_lseek,
 
790	.write		= write_zero,
791	.read_iter	= read_iter_zero,
792	.write_iter	= write_iter_zero,
793	.mmap		= mmap_zero,
794	.get_unmapped_area = get_unmapped_area_zero,
795#ifndef CONFIG_MMU
796	.mmap_capabilities = zero_mmap_capabilities,
797#endif
 
 
 
 
 
 
798};
799
800static const struct file_operations full_fops = {
801	.llseek		= full_lseek,
802	.read_iter	= read_iter_zero,
803	.write		= write_full,
804};
805
 
 
 
 
 
 
 
 
806static const struct memdev {
807	const char *name;
808	umode_t mode;
809	const struct file_operations *fops;
810	fmode_t fmode;
811} devlist[] = {
812#ifdef CONFIG_DEVMEM
813	 [1] = { "mem", 0, &mem_fops, FMODE_UNSIGNED_OFFSET },
814#endif
815#ifdef CONFIG_DEVKMEM
816	 [2] = { "kmem", 0, &kmem_fops, FMODE_UNSIGNED_OFFSET },
817#endif
818	 [3] = { "null", 0666, &null_fops, 0 },
819#ifdef CONFIG_DEVPORT
820	 [4] = { "port", 0, &port_fops, 0 },
821#endif
822	 [5] = { "zero", 0666, &zero_fops, 0 },
823	 [7] = { "full", 0666, &full_fops, 0 },
824	 [8] = { "random", 0666, &random_fops, 0 },
825	 [9] = { "urandom", 0666, &urandom_fops, 0 },
826#ifdef CONFIG_PRINTK
827	[11] = { "kmsg", 0644, &kmsg_fops, 0 },
 
 
 
828#endif
829};
830
831static int memory_open(struct inode *inode, struct file *filp)
832{
833	int minor;
834	const struct memdev *dev;
835
836	minor = iminor(inode);
837	if (minor >= ARRAY_SIZE(devlist))
838		return -ENXIO;
839
840	dev = &devlist[minor];
841	if (!dev->fops)
842		return -ENXIO;
843
844	filp->f_op = dev->fops;
845	filp->f_mode |= dev->fmode;
 
 
 
 
 
846
847	if (dev->fops->open)
848		return dev->fops->open(inode, filp);
849
850	return 0;
851}
852
853static const struct file_operations memory_fops = {
854	.open = memory_open,
855	.llseek = noop_llseek,
856};
857
858static char *mem_devnode(struct device *dev, umode_t *mode)
859{
860	if (mode && devlist[MINOR(dev->devt)].mode)
861		*mode = devlist[MINOR(dev->devt)].mode;
862	return NULL;
863}
864
865static struct class *mem_class;
866
867static int __init chr_dev_init(void)
868{
869	int minor;
 
 
 
 
 
870
871	if (register_chrdev(MEM_MAJOR, "mem", &memory_fops))
872		printk("unable to get major %d for memory devs\n", MEM_MAJOR);
873
874	mem_class = class_create(THIS_MODULE, "mem");
875	if (IS_ERR(mem_class))
876		return PTR_ERR(mem_class);
877
878	mem_class->devnode = mem_devnode;
879	for (minor = 1; minor < ARRAY_SIZE(devlist); minor++) {
880		if (!devlist[minor].name)
881			continue;
882
883		/*
884		 * Create /dev/port?
885		 */
886		if ((minor == DEVPORT_MINOR) && !arch_has_dev_port())
887			continue;
888
889		device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
890			      NULL, devlist[minor].name);
891	}
892
893	return tty_init();
894}
895
896fs_initcall(chr_dev_init);
v3.5.6
  1/*
  2 *  linux/drivers/char/mem.c
  3 *
  4 *  Copyright (C) 1991, 1992  Linus Torvalds
  5 *
  6 *  Added devfs support.
  7 *    Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
  8 *  Shared /dev/zero mmapping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
  9 */
 10
 11#include <linux/mm.h>
 12#include <linux/miscdevice.h>
 13#include <linux/slab.h>
 14#include <linux/vmalloc.h>
 15#include <linux/mman.h>
 16#include <linux/random.h>
 17#include <linux/init.h>
 18#include <linux/raw.h>
 19#include <linux/tty.h>
 20#include <linux/capability.h>
 21#include <linux/ptrace.h>
 22#include <linux/device.h>
 23#include <linux/highmem.h>
 24#include <linux/crash_dump.h>
 25#include <linux/backing-dev.h>
 26#include <linux/bootmem.h>
 27#include <linux/splice.h>
 28#include <linux/pfn.h>
 29#include <linux/export.h>
 
 
 30
 31#include <asm/uaccess.h>
 32#include <asm/io.h>
 33
 34#ifdef CONFIG_IA64
 35# include <linux/efi.h>
 36#endif
 37
 
 
 38static inline unsigned long size_inside_page(unsigned long start,
 39					     unsigned long size)
 40{
 41	unsigned long sz;
 42
 43	sz = PAGE_SIZE - (start & (PAGE_SIZE - 1));
 44
 45	return min(sz, size);
 46}
 47
 48#ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
 49static inline int valid_phys_addr_range(unsigned long addr, size_t count)
 50{
 51	return addr + count <= __pa(high_memory);
 52}
 53
 54static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
 55{
 56	return 1;
 57}
 58#endif
 59
 60#ifdef CONFIG_STRICT_DEVMEM
 61static inline int range_is_allowed(unsigned long pfn, unsigned long size)
 62{
 63	u64 from = ((u64)pfn) << PAGE_SHIFT;
 64	u64 to = from + size;
 65	u64 cursor = from;
 66
 67	while (cursor < to) {
 68		if (!devmem_is_allowed(pfn)) {
 69			printk(KERN_INFO
 70		"Program %s tried to access /dev/mem between %Lx->%Lx.\n",
 71				current->comm, from, to);
 72			return 0;
 73		}
 74		cursor += PAGE_SIZE;
 75		pfn++;
 76	}
 77	return 1;
 78}
 79#else
 80static inline int range_is_allowed(unsigned long pfn, unsigned long size)
 81{
 82	return 1;
 83}
 84#endif
 85
 86void __weak unxlate_dev_mem_ptr(unsigned long phys, void *addr)
 
 
 87{
 88}
 
 89
 90/*
 91 * This funcion reads the *physical* memory. The f_pos points directly to the
 92 * memory location.
 93 */
 94static ssize_t read_mem(struct file *file, char __user *buf,
 95			size_t count, loff_t *ppos)
 96{
 97	unsigned long p = *ppos;
 98	ssize_t read, sz;
 99	char *ptr;
 
 
 
100
101	if (!valid_phys_addr_range(p, count))
102		return -EFAULT;
103	read = 0;
104#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
105	/* we don't have page 0 mapped on sparc and m68k.. */
106	if (p < PAGE_SIZE) {
107		sz = size_inside_page(p, count);
108		if (sz > 0) {
109			if (clear_user(buf, sz))
110				return -EFAULT;
111			buf += sz;
112			p += sz;
113			count -= sz;
114			read += sz;
115		}
116	}
117#endif
118
119	while (count > 0) {
120		unsigned long remaining;
121
122		sz = size_inside_page(p, count);
123
124		if (!range_is_allowed(p >> PAGE_SHIFT, count))
125			return -EPERM;
126
127		/*
128		 * On ia64 if a page has been mapped somewhere as uncached, then
129		 * it must also be accessed uncached by the kernel or data
130		 * corruption may occur.
131		 */
132		ptr = xlate_dev_mem_ptr(p);
133		if (!ptr)
134			return -EFAULT;
135
136		remaining = copy_to_user(buf, ptr, sz);
137		unxlate_dev_mem_ptr(p, ptr);
138		if (remaining)
139			return -EFAULT;
140
141		buf += sz;
142		p += sz;
143		count -= sz;
144		read += sz;
145	}
146
147	*ppos += read;
148	return read;
149}
150
151static ssize_t write_mem(struct file *file, const char __user *buf,
152			 size_t count, loff_t *ppos)
153{
154	unsigned long p = *ppos;
155	ssize_t written, sz;
156	unsigned long copied;
157	void *ptr;
158
 
 
 
159	if (!valid_phys_addr_range(p, count))
160		return -EFAULT;
161
162	written = 0;
163
164#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
165	/* we don't have page 0 mapped on sparc and m68k.. */
166	if (p < PAGE_SIZE) {
167		sz = size_inside_page(p, count);
168		/* Hmm. Do something? */
169		buf += sz;
170		p += sz;
171		count -= sz;
172		written += sz;
173	}
174#endif
175
176	while (count > 0) {
177		sz = size_inside_page(p, count);
178
179		if (!range_is_allowed(p >> PAGE_SHIFT, sz))
180			return -EPERM;
181
182		/*
183		 * On ia64 if a page has been mapped somewhere as uncached, then
184		 * it must also be accessed uncached by the kernel or data
185		 * corruption may occur.
186		 */
187		ptr = xlate_dev_mem_ptr(p);
188		if (!ptr) {
189			if (written)
190				break;
191			return -EFAULT;
192		}
193
194		copied = copy_from_user(ptr, buf, sz);
195		unxlate_dev_mem_ptr(p, ptr);
196		if (copied) {
197			written += sz - copied;
198			if (written)
199				break;
200			return -EFAULT;
201		}
202
203		buf += sz;
204		p += sz;
205		count -= sz;
206		written += sz;
207	}
208
209	*ppos += written;
210	return written;
211}
212
213int __weak phys_mem_access_prot_allowed(struct file *file,
214	unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
215{
216	return 1;
217}
218
219#ifndef __HAVE_PHYS_MEM_ACCESS_PROT
220
221/*
222 * Architectures vary in how they handle caching for addresses
223 * outside of main memory.
224 *
225 */
226#ifdef pgprot_noncached
227static int uncached_access(struct file *file, unsigned long addr)
228{
229#if defined(CONFIG_IA64)
230	/*
231	 * On ia64, we ignore O_DSYNC because we cannot tolerate memory
232	 * attribute aliases.
233	 */
234	return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
235#elif defined(CONFIG_MIPS)
236	{
237		extern int __uncached_access(struct file *file,
238					     unsigned long addr);
239
240		return __uncached_access(file, addr);
241	}
242#else
243	/*
244	 * Accessing memory above the top the kernel knows about or through a
245	 * file pointer
246	 * that was marked O_DSYNC will be done non-cached.
247	 */
248	if (file->f_flags & O_DSYNC)
249		return 1;
250	return addr >= __pa(high_memory);
251#endif
252}
253#endif
254
255static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
256				     unsigned long size, pgprot_t vma_prot)
257{
258#ifdef pgprot_noncached
259	unsigned long offset = pfn << PAGE_SHIFT;
260
261	if (uncached_access(file, offset))
262		return pgprot_noncached(vma_prot);
263#endif
264	return vma_prot;
265}
266#endif
267
268#ifndef CONFIG_MMU
269static unsigned long get_unmapped_area_mem(struct file *file,
270					   unsigned long addr,
271					   unsigned long len,
272					   unsigned long pgoff,
273					   unsigned long flags)
274{
275	if (!valid_mmap_phys_addr_range(pgoff, len))
276		return (unsigned long) -EINVAL;
277	return pgoff << PAGE_SHIFT;
278}
279
 
 
 
 
 
 
 
 
 
 
 
 
280/* can't do an in-place private mapping if there's no MMU */
281static inline int private_mapping_ok(struct vm_area_struct *vma)
282{
283	return vma->vm_flags & VM_MAYSHARE;
284}
285#else
286#define get_unmapped_area_mem	NULL
287
288static inline int private_mapping_ok(struct vm_area_struct *vma)
289{
290	return 1;
291}
292#endif
293
294static const struct vm_operations_struct mmap_mem_ops = {
295#ifdef CONFIG_HAVE_IOREMAP_PROT
296	.access = generic_access_phys
297#endif
298};
299
300static int mmap_mem(struct file *file, struct vm_area_struct *vma)
301{
302	size_t size = vma->vm_end - vma->vm_start;
303
304	if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
305		return -EINVAL;
306
307	if (!private_mapping_ok(vma))
308		return -ENOSYS;
309
310	if (!range_is_allowed(vma->vm_pgoff, size))
311		return -EPERM;
312
313	if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size,
314						&vma->vm_page_prot))
315		return -EINVAL;
316
317	vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
318						 size,
319						 vma->vm_page_prot);
320
321	vma->vm_ops = &mmap_mem_ops;
322
323	/* Remap-pfn-range will mark the range VM_IO and VM_RESERVED */
324	if (remap_pfn_range(vma,
325			    vma->vm_start,
326			    vma->vm_pgoff,
327			    size,
328			    vma->vm_page_prot)) {
329		return -EAGAIN;
330	}
331	return 0;
332}
333
334#ifdef CONFIG_DEVKMEM
335static int mmap_kmem(struct file *file, struct vm_area_struct *vma)
336{
337	unsigned long pfn;
338
339	/* Turn a kernel-virtual address into a physical page frame */
340	pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT;
341
342	/*
343	 * RED-PEN: on some architectures there is more mapped memory than
344	 * available in mem_map which pfn_valid checks for. Perhaps should add a
345	 * new macro here.
346	 *
347	 * RED-PEN: vmalloc is not supported right now.
348	 */
349	if (!pfn_valid(pfn))
350		return -EIO;
351
352	vma->vm_pgoff = pfn;
353	return mmap_mem(file, vma);
354}
355#endif
356
357#ifdef CONFIG_CRASH_DUMP
358/*
359 * Read memory corresponding to the old kernel.
360 */
361static ssize_t read_oldmem(struct file *file, char __user *buf,
362				size_t count, loff_t *ppos)
363{
364	unsigned long pfn, offset;
365	size_t read = 0, csize;
366	int rc = 0;
367
368	while (count) {
369		pfn = *ppos / PAGE_SIZE;
370		if (pfn > saved_max_pfn)
371			return read;
372
373		offset = (unsigned long)(*ppos % PAGE_SIZE);
374		if (count > PAGE_SIZE - offset)
375			csize = PAGE_SIZE - offset;
376		else
377			csize = count;
378
379		rc = copy_oldmem_page(pfn, buf, csize, offset, 1);
380		if (rc < 0)
381			return rc;
382		buf += csize;
383		*ppos += csize;
384		read += csize;
385		count -= csize;
386	}
387	return read;
388}
389#endif
390
391#ifdef CONFIG_DEVKMEM
392/*
393 * This function reads the *virtual* memory as seen by the kernel.
394 */
395static ssize_t read_kmem(struct file *file, char __user *buf,
396			 size_t count, loff_t *ppos)
397{
398	unsigned long p = *ppos;
399	ssize_t low_count, read, sz;
400	char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
401	int err = 0;
402
403	read = 0;
404	if (p < (unsigned long) high_memory) {
405		low_count = count;
406		if (count > (unsigned long)high_memory - p)
407			low_count = (unsigned long)high_memory - p;
408
409#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
410		/* we don't have page 0 mapped on sparc and m68k.. */
411		if (p < PAGE_SIZE && low_count > 0) {
412			sz = size_inside_page(p, low_count);
413			if (clear_user(buf, sz))
414				return -EFAULT;
415			buf += sz;
416			p += sz;
417			read += sz;
418			low_count -= sz;
419			count -= sz;
420		}
421#endif
422		while (low_count > 0) {
423			sz = size_inside_page(p, low_count);
424
425			/*
426			 * On ia64 if a page has been mapped somewhere as
427			 * uncached, then it must also be accessed uncached
428			 * by the kernel or data corruption may occur
429			 */
430			kbuf = xlate_dev_kmem_ptr((char *)p);
 
 
431
432			if (copy_to_user(buf, kbuf, sz))
433				return -EFAULT;
434			buf += sz;
435			p += sz;
436			read += sz;
437			low_count -= sz;
438			count -= sz;
439		}
440	}
441
442	if (count > 0) {
443		kbuf = (char *)__get_free_page(GFP_KERNEL);
444		if (!kbuf)
445			return -ENOMEM;
446		while (count > 0) {
447			sz = size_inside_page(p, count);
448			if (!is_vmalloc_or_module_addr((void *)p)) {
449				err = -ENXIO;
450				break;
451			}
452			sz = vread(kbuf, (char *)p, sz);
453			if (!sz)
454				break;
455			if (copy_to_user(buf, kbuf, sz)) {
456				err = -EFAULT;
457				break;
458			}
459			count -= sz;
460			buf += sz;
461			read += sz;
462			p += sz;
463		}
464		free_page((unsigned long)kbuf);
465	}
466	*ppos = p;
467	return read ? read : err;
468}
469
470
471static ssize_t do_write_kmem(unsigned long p, const char __user *buf,
472				size_t count, loff_t *ppos)
473{
474	ssize_t written, sz;
475	unsigned long copied;
476
477	written = 0;
478#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
479	/* we don't have page 0 mapped on sparc and m68k.. */
480	if (p < PAGE_SIZE) {
481		sz = size_inside_page(p, count);
482		/* Hmm. Do something? */
483		buf += sz;
484		p += sz;
485		count -= sz;
486		written += sz;
487	}
488#endif
489
490	while (count > 0) {
491		char *ptr;
492
493		sz = size_inside_page(p, count);
494
495		/*
496		 * On ia64 if a page has been mapped somewhere as uncached, then
497		 * it must also be accessed uncached by the kernel or data
498		 * corruption may occur.
499		 */
500		ptr = xlate_dev_kmem_ptr((char *)p);
 
 
501
502		copied = copy_from_user(ptr, buf, sz);
503		if (copied) {
504			written += sz - copied;
505			if (written)
506				break;
507			return -EFAULT;
508		}
509		buf += sz;
510		p += sz;
511		count -= sz;
512		written += sz;
513	}
514
515	*ppos += written;
516	return written;
517}
518
519/*
520 * This function writes to the *virtual* memory as seen by the kernel.
521 */
522static ssize_t write_kmem(struct file *file, const char __user *buf,
523			  size_t count, loff_t *ppos)
524{
525	unsigned long p = *ppos;
526	ssize_t wrote = 0;
527	ssize_t virtr = 0;
528	char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
529	int err = 0;
530
531	if (p < (unsigned long) high_memory) {
532		unsigned long to_write = min_t(unsigned long, count,
533					       (unsigned long)high_memory - p);
534		wrote = do_write_kmem(p, buf, to_write, ppos);
535		if (wrote != to_write)
536			return wrote;
537		p += wrote;
538		buf += wrote;
539		count -= wrote;
540	}
541
542	if (count > 0) {
543		kbuf = (char *)__get_free_page(GFP_KERNEL);
544		if (!kbuf)
545			return wrote ? wrote : -ENOMEM;
546		while (count > 0) {
547			unsigned long sz = size_inside_page(p, count);
548			unsigned long n;
549
550			if (!is_vmalloc_or_module_addr((void *)p)) {
551				err = -ENXIO;
552				break;
553			}
554			n = copy_from_user(kbuf, buf, sz);
555			if (n) {
556				err = -EFAULT;
557				break;
558			}
559			vwrite(kbuf, (char *)p, sz);
560			count -= sz;
561			buf += sz;
562			virtr += sz;
563			p += sz;
564		}
565		free_page((unsigned long)kbuf);
566	}
567
568	*ppos = p;
569	return virtr + wrote ? : err;
570}
571#endif
572
573#ifdef CONFIG_DEVPORT
574static ssize_t read_port(struct file *file, char __user *buf,
575			 size_t count, loff_t *ppos)
576{
577	unsigned long i = *ppos;
578	char __user *tmp = buf;
579
580	if (!access_ok(VERIFY_WRITE, buf, count))
581		return -EFAULT;
582	while (count-- > 0 && i < 65536) {
583		if (__put_user(inb(i), tmp) < 0)
584			return -EFAULT;
585		i++;
586		tmp++;
587	}
588	*ppos = i;
589	return tmp-buf;
590}
591
592static ssize_t write_port(struct file *file, const char __user *buf,
593			  size_t count, loff_t *ppos)
594{
595	unsigned long i = *ppos;
596	const char __user * tmp = buf;
597
598	if (!access_ok(VERIFY_READ, buf, count))
599		return -EFAULT;
600	while (count-- > 0 && i < 65536) {
601		char c;
 
602		if (__get_user(c, tmp)) {
603			if (tmp > buf)
604				break;
605			return -EFAULT;
606		}
607		outb(c, i);
608		i++;
609		tmp++;
610	}
611	*ppos = i;
612	return tmp-buf;
613}
614#endif
615
616static ssize_t read_null(struct file *file, char __user *buf,
617			 size_t count, loff_t *ppos)
618{
619	return 0;
620}
621
622static ssize_t write_null(struct file *file, const char __user *buf,
623			  size_t count, loff_t *ppos)
624{
625	return count;
626}
627
 
 
 
 
 
 
 
 
 
 
 
 
628static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf,
629			struct splice_desc *sd)
630{
631	return sd->len;
632}
633
634static ssize_t splice_write_null(struct pipe_inode_info *pipe, struct file *out,
635				 loff_t *ppos, size_t len, unsigned int flags)
636{
637	return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
638}
639
640static ssize_t read_zero(struct file *file, char __user *buf,
641			 size_t count, loff_t *ppos)
642{
643	size_t written;
644
645	if (!count)
646		return 0;
647
648	if (!access_ok(VERIFY_WRITE, buf, count))
649		return -EFAULT;
650
651	written = 0;
652	while (count) {
653		unsigned long unwritten;
654		size_t chunk = count;
655
656		if (chunk > PAGE_SIZE)
657			chunk = PAGE_SIZE;	/* Just for latency reasons */
658		unwritten = __clear_user(buf, chunk);
659		written += chunk - unwritten;
660		if (unwritten)
661			break;
662		if (signal_pending(current))
663			return written ? written : -ERESTARTSYS;
664		buf += chunk;
665		count -= chunk;
666		cond_resched();
667	}
668	return written ? written : -EFAULT;
669}
670
671static int mmap_zero(struct file *file, struct vm_area_struct *vma)
672{
673#ifndef CONFIG_MMU
674	return -ENOSYS;
675#endif
676	if (vma->vm_flags & VM_SHARED)
677		return shmem_zero_setup(vma);
678	return 0;
679}
680
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
681static ssize_t write_full(struct file *file, const char __user *buf,
682			  size_t count, loff_t *ppos)
683{
684	return -ENOSPC;
685}
686
687/*
688 * Special lseek() function for /dev/null and /dev/zero.  Most notably, you
689 * can fopen() both devices with "a" now.  This was previously impossible.
690 * -- SRB.
691 */
692static loff_t null_lseek(struct file *file, loff_t offset, int orig)
693{
694	return file->f_pos = 0;
695}
696
697/*
698 * The memory devices use the full 32/64 bits of the offset, and so we cannot
699 * check against negative addresses: they are ok. The return value is weird,
700 * though, in that case (0).
701 *
702 * also note that seeking relative to the "end of file" isn't supported:
703 * it has no meaning, so it returns -EINVAL.
704 */
705static loff_t memory_lseek(struct file *file, loff_t offset, int orig)
706{
707	loff_t ret;
708
709	mutex_lock(&file->f_path.dentry->d_inode->i_mutex);
710	switch (orig) {
711	case SEEK_CUR:
712		offset += file->f_pos;
713	case SEEK_SET:
714		/* to avoid userland mistaking f_pos=-9 as -EBADF=-9 */
715		if ((unsigned long long)offset >= ~0xFFFULL) {
716			ret = -EOVERFLOW;
717			break;
718		}
719		file->f_pos = offset;
720		ret = file->f_pos;
721		force_successful_syscall_return();
722		break;
723	default:
724		ret = -EINVAL;
725	}
726	mutex_unlock(&file->f_path.dentry->d_inode->i_mutex);
727	return ret;
728}
729
730static int open_port(struct inode * inode, struct file * filp)
731{
732	return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
733}
734
735#define zero_lseek	null_lseek
736#define full_lseek      null_lseek
737#define write_zero	write_null
738#define read_full       read_zero
739#define open_mem	open_port
740#define open_kmem	open_mem
741#define open_oldmem	open_mem
742
743static const struct file_operations mem_fops = {
744	.llseek		= memory_lseek,
745	.read		= read_mem,
746	.write		= write_mem,
747	.mmap		= mmap_mem,
748	.open		= open_mem,
 
749	.get_unmapped_area = get_unmapped_area_mem,
 
 
750};
751
752#ifdef CONFIG_DEVKMEM
753static const struct file_operations kmem_fops = {
754	.llseek		= memory_lseek,
755	.read		= read_kmem,
756	.write		= write_kmem,
757	.mmap		= mmap_kmem,
758	.open		= open_kmem,
 
759	.get_unmapped_area = get_unmapped_area_mem,
 
 
760};
761#endif
762
763static const struct file_operations null_fops = {
764	.llseek		= null_lseek,
765	.read		= read_null,
766	.write		= write_null,
 
 
767	.splice_write	= splice_write_null,
768};
769
770#ifdef CONFIG_DEVPORT
771static const struct file_operations port_fops = {
772	.llseek		= memory_lseek,
773	.read		= read_port,
774	.write		= write_port,
775	.open		= open_port,
776};
777#endif
778
779static const struct file_operations zero_fops = {
780	.llseek		= zero_lseek,
781	.read		= read_zero,
782	.write		= write_zero,
 
 
783	.mmap		= mmap_zero,
784};
785
786/*
787 * capabilities for /dev/zero
788 * - permits private mappings, "copies" are taken of the source of zeros
789 * - no writeback happens
790 */
791static struct backing_dev_info zero_bdi = {
792	.name		= "char/mem",
793	.capabilities	= BDI_CAP_MAP_COPY | BDI_CAP_NO_ACCT_AND_WRITEBACK,
794};
795
796static const struct file_operations full_fops = {
797	.llseek		= full_lseek,
798	.read		= read_full,
799	.write		= write_full,
800};
801
802#ifdef CONFIG_CRASH_DUMP
803static const struct file_operations oldmem_fops = {
804	.read	= read_oldmem,
805	.open	= open_oldmem,
806	.llseek = default_llseek,
807};
808#endif
809
810static const struct memdev {
811	const char *name;
812	umode_t mode;
813	const struct file_operations *fops;
814	struct backing_dev_info *dev_info;
815} devlist[] = {
816	 [1] = { "mem", 0, &mem_fops, &directly_mappable_cdev_bdi },
 
 
817#ifdef CONFIG_DEVKMEM
818	 [2] = { "kmem", 0, &kmem_fops, &directly_mappable_cdev_bdi },
819#endif
820	 [3] = { "null", 0666, &null_fops, NULL },
821#ifdef CONFIG_DEVPORT
822	 [4] = { "port", 0, &port_fops, NULL },
823#endif
824	 [5] = { "zero", 0666, &zero_fops, &zero_bdi },
825	 [7] = { "full", 0666, &full_fops, NULL },
826	 [8] = { "random", 0666, &random_fops, NULL },
827	 [9] = { "urandom", 0666, &urandom_fops, NULL },
828#ifdef CONFIG_PRINTK
829	[11] = { "kmsg", 0644, &kmsg_fops, NULL },
830#endif
831#ifdef CONFIG_CRASH_DUMP
832	[12] = { "oldmem", 0, &oldmem_fops, NULL },
833#endif
834};
835
836static int memory_open(struct inode *inode, struct file *filp)
837{
838	int minor;
839	const struct memdev *dev;
840
841	minor = iminor(inode);
842	if (minor >= ARRAY_SIZE(devlist))
843		return -ENXIO;
844
845	dev = &devlist[minor];
846	if (!dev->fops)
847		return -ENXIO;
848
849	filp->f_op = dev->fops;
850	if (dev->dev_info)
851		filp->f_mapping->backing_dev_info = dev->dev_info;
852
853	/* Is /dev/mem or /dev/kmem ? */
854	if (dev->dev_info == &directly_mappable_cdev_bdi)
855		filp->f_mode |= FMODE_UNSIGNED_OFFSET;
856
857	if (dev->fops->open)
858		return dev->fops->open(inode, filp);
859
860	return 0;
861}
862
863static const struct file_operations memory_fops = {
864	.open = memory_open,
865	.llseek = noop_llseek,
866};
867
868static char *mem_devnode(struct device *dev, umode_t *mode)
869{
870	if (mode && devlist[MINOR(dev->devt)].mode)
871		*mode = devlist[MINOR(dev->devt)].mode;
872	return NULL;
873}
874
875static struct class *mem_class;
876
877static int __init chr_dev_init(void)
878{
879	int minor;
880	int err;
881
882	err = bdi_init(&zero_bdi);
883	if (err)
884		return err;
885
886	if (register_chrdev(MEM_MAJOR, "mem", &memory_fops))
887		printk("unable to get major %d for memory devs\n", MEM_MAJOR);
888
889	mem_class = class_create(THIS_MODULE, "mem");
890	if (IS_ERR(mem_class))
891		return PTR_ERR(mem_class);
892
893	mem_class->devnode = mem_devnode;
894	for (minor = 1; minor < ARRAY_SIZE(devlist); minor++) {
895		if (!devlist[minor].name)
896			continue;
 
 
 
 
 
 
 
897		device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
898			      NULL, devlist[minor].name);
899	}
900
901	return tty_init();
902}
903
904fs_initcall(chr_dev_init);