Linux Audio

Check our new training course

Loading...
v3.1
  1/*
  2 *  linux/arch/arm/lib/uaccess_with_memcpy.c
  3 *
  4 *  Written by: Lennert Buytenhek and Nicolas Pitre
  5 *  Copyright (C) 2009 Marvell Semiconductor
  6 *
  7 * This program is free software; you can redistribute it and/or modify
  8 * it under the terms of the GNU General Public License version 2 as
  9 * published by the Free Software Foundation.
 10 */
 11
 12#include <linux/kernel.h>
 13#include <linux/ctype.h>
 14#include <linux/uaccess.h>
 15#include <linux/rwsem.h>
 16#include <linux/mm.h>
 17#include <linux/sched.h>
 18#include <linux/hardirq.h> /* for in_atomic() */
 19#include <linux/gfp.h>
 
 20#include <asm/current.h>
 21#include <asm/page.h>
 22
 23static int
 24pin_page_for_write(const void __user *_addr, pte_t **ptep, spinlock_t **ptlp)
 25{
 26	unsigned long addr = (unsigned long)_addr;
 27	pgd_t *pgd;
 28	pmd_t *pmd;
 29	pte_t *pte;
 30	pud_t *pud;
 31	spinlock_t *ptl;
 32
 33	pgd = pgd_offset(current->mm, addr);
 34	if (unlikely(pgd_none(*pgd) || pgd_bad(*pgd)))
 35		return 0;
 36
 37	pud = pud_offset(pgd, addr);
 38	if (unlikely(pud_none(*pud) || pud_bad(*pud)))
 39		return 0;
 40
 41	pmd = pmd_offset(pud, addr);
 42	if (unlikely(pmd_none(*pmd) || pmd_bad(*pmd)))
 43		return 0;
 44
 45	pte = pte_offset_map_lock(current->mm, pmd, addr, &ptl);
 46	if (unlikely(!pte_present(*pte) || !pte_young(*pte) ||
 47	    !pte_write(*pte) || !pte_dirty(*pte))) {
 48		pte_unmap_unlock(pte, ptl);
 49		return 0;
 50	}
 51
 52	*ptep = pte;
 53	*ptlp = ptl;
 54
 55	return 1;
 56}
 57
 58static unsigned long noinline
 59__copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
 60{
 61	int atomic;
 62
 63	if (unlikely(segment_eq(get_fs(), KERNEL_DS))) {
 64		memcpy((void *)to, from, n);
 65		return 0;
 66	}
 67
 68	/* the mmap semaphore is taken only if not in an atomic context */
 69	atomic = in_atomic();
 70
 71	if (!atomic)
 72		down_read(&current->mm->mmap_sem);
 73	while (n) {
 74		pte_t *pte;
 75		spinlock_t *ptl;
 76		int tocopy;
 77
 78		while (!pin_page_for_write(to, &pte, &ptl)) {
 79			if (!atomic)
 80				up_read(&current->mm->mmap_sem);
 81			if (__put_user(0, (char __user *)to))
 82				goto out;
 83			if (!atomic)
 84				down_read(&current->mm->mmap_sem);
 85		}
 86
 87		tocopy = (~(unsigned long)to & ~PAGE_MASK) + 1;
 88		if (tocopy > n)
 89			tocopy = n;
 90
 91		memcpy((void *)to, from, tocopy);
 92		to += tocopy;
 93		from += tocopy;
 94		n -= tocopy;
 95
 96		pte_unmap_unlock(pte, ptl);
 97	}
 98	if (!atomic)
 99		up_read(&current->mm->mmap_sem);
100
101out:
102	return n;
103}
104
105unsigned long
106__copy_to_user(void __user *to, const void *from, unsigned long n)
107{
108	/*
109	 * This test is stubbed out of the main function above to keep
110	 * the overhead for small copies low by avoiding a large
111	 * register dump on the stack just to reload them right away.
112	 * With frame pointer disabled, tail call optimization kicks in
113	 * as well making this test almost invisible.
114	 */
115	if (n < 64)
116		return __copy_to_user_std(to, from, n);
117	return __copy_to_user_memcpy(to, from, n);
118}
119	
120static unsigned long noinline
121__clear_user_memset(void __user *addr, unsigned long n)
122{
123	if (unlikely(segment_eq(get_fs(), KERNEL_DS))) {
124		memset((void *)addr, 0, n);
125		return 0;
126	}
127
128	down_read(&current->mm->mmap_sem);
129	while (n) {
130		pte_t *pte;
131		spinlock_t *ptl;
132		int tocopy;
133
134		while (!pin_page_for_write(addr, &pte, &ptl)) {
135			up_read(&current->mm->mmap_sem);
136			if (__put_user(0, (char __user *)addr))
137				goto out;
138			down_read(&current->mm->mmap_sem);
139		}
140
141		tocopy = (~(unsigned long)addr & ~PAGE_MASK) + 1;
142		if (tocopy > n)
143			tocopy = n;
144
145		memset((void *)addr, 0, tocopy);
146		addr += tocopy;
147		n -= tocopy;
148
149		pte_unmap_unlock(pte, ptl);
150	}
151	up_read(&current->mm->mmap_sem);
152
153out:
154	return n;
155}
156
157unsigned long __clear_user(void __user *addr, unsigned long n)
158{
159	/* See rational for this in __copy_to_user() above. */
160	if (n < 64)
161		return __clear_user_std(addr, n);
162	return __clear_user_memset(addr, n);
163}
164
165#if 0
166
167/*
168 * This code is disabled by default, but kept around in case the chosen
169 * thresholds need to be revalidated.  Some overhead (small but still)
170 * would be implied by a runtime determined variable threshold, and
171 * so far the measurement on concerned targets didn't show a worthwhile
172 * variation.
173 *
174 * Note that a fairly precise sched_clock() implementation is needed
175 * for results to make some sense.
176 */
177
178#include <linux/vmalloc.h>
179
180static int __init test_size_treshold(void)
181{
182	struct page *src_page, *dst_page;
183	void *user_ptr, *kernel_ptr;
184	unsigned long long t0, t1, t2;
185	int size, ret;
186
187	ret = -ENOMEM;
188	src_page = alloc_page(GFP_KERNEL);
189	if (!src_page)
190		goto no_src;
191	dst_page = alloc_page(GFP_KERNEL);
192	if (!dst_page)
193		goto no_dst;
194	kernel_ptr = page_address(src_page);
195	user_ptr = vmap(&dst_page, 1, VM_IOREMAP, __pgprot(__P010));
196	if (!user_ptr)
197		goto no_vmap;
198
199	/* warm up the src page dcache */
200	ret = __copy_to_user_memcpy(user_ptr, kernel_ptr, PAGE_SIZE);
201
202	for (size = PAGE_SIZE; size >= 4; size /= 2) {
203		t0 = sched_clock();
204		ret |= __copy_to_user_memcpy(user_ptr, kernel_ptr, size);
205		t1 = sched_clock();
206		ret |= __copy_to_user_std(user_ptr, kernel_ptr, size);
207		t2 = sched_clock();
208		printk("copy_to_user: %d %llu %llu\n", size, t1 - t0, t2 - t1);
209	}
210
211	for (size = PAGE_SIZE; size >= 4; size /= 2) {
212		t0 = sched_clock();
213		ret |= __clear_user_memset(user_ptr, size);
214		t1 = sched_clock();
215		ret |= __clear_user_std(user_ptr, size);
216		t2 = sched_clock();
217		printk("clear_user: %d %llu %llu\n", size, t1 - t0, t2 - t1);
218	}
219
220	if (ret)
221		ret = -EFAULT;
222
223	vunmap(user_ptr);
224no_vmap:
225	put_page(dst_page);
226no_dst:
227	put_page(src_page);
228no_src:
229	return ret;
230}
231
232subsys_initcall(test_size_treshold);
233
234#endif
v3.5.6
  1/*
  2 *  linux/arch/arm/lib/uaccess_with_memcpy.c
  3 *
  4 *  Written by: Lennert Buytenhek and Nicolas Pitre
  5 *  Copyright (C) 2009 Marvell Semiconductor
  6 *
  7 * This program is free software; you can redistribute it and/or modify
  8 * it under the terms of the GNU General Public License version 2 as
  9 * published by the Free Software Foundation.
 10 */
 11
 12#include <linux/kernel.h>
 13#include <linux/ctype.h>
 14#include <linux/uaccess.h>
 15#include <linux/rwsem.h>
 16#include <linux/mm.h>
 17#include <linux/sched.h>
 18#include <linux/hardirq.h> /* for in_atomic() */
 19#include <linux/gfp.h>
 20#include <linux/highmem.h>
 21#include <asm/current.h>
 22#include <asm/page.h>
 23
 24static int
 25pin_page_for_write(const void __user *_addr, pte_t **ptep, spinlock_t **ptlp)
 26{
 27	unsigned long addr = (unsigned long)_addr;
 28	pgd_t *pgd;
 29	pmd_t *pmd;
 30	pte_t *pte;
 31	pud_t *pud;
 32	spinlock_t *ptl;
 33
 34	pgd = pgd_offset(current->mm, addr);
 35	if (unlikely(pgd_none(*pgd) || pgd_bad(*pgd)))
 36		return 0;
 37
 38	pud = pud_offset(pgd, addr);
 39	if (unlikely(pud_none(*pud) || pud_bad(*pud)))
 40		return 0;
 41
 42	pmd = pmd_offset(pud, addr);
 43	if (unlikely(pmd_none(*pmd) || pmd_bad(*pmd)))
 44		return 0;
 45
 46	pte = pte_offset_map_lock(current->mm, pmd, addr, &ptl);
 47	if (unlikely(!pte_present(*pte) || !pte_young(*pte) ||
 48	    !pte_write(*pte) || !pte_dirty(*pte))) {
 49		pte_unmap_unlock(pte, ptl);
 50		return 0;
 51	}
 52
 53	*ptep = pte;
 54	*ptlp = ptl;
 55
 56	return 1;
 57}
 58
 59static unsigned long noinline
 60__copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
 61{
 62	int atomic;
 63
 64	if (unlikely(segment_eq(get_fs(), KERNEL_DS))) {
 65		memcpy((void *)to, from, n);
 66		return 0;
 67	}
 68
 69	/* the mmap semaphore is taken only if not in an atomic context */
 70	atomic = in_atomic();
 71
 72	if (!atomic)
 73		down_read(&current->mm->mmap_sem);
 74	while (n) {
 75		pte_t *pte;
 76		spinlock_t *ptl;
 77		int tocopy;
 78
 79		while (!pin_page_for_write(to, &pte, &ptl)) {
 80			if (!atomic)
 81				up_read(&current->mm->mmap_sem);
 82			if (__put_user(0, (char __user *)to))
 83				goto out;
 84			if (!atomic)
 85				down_read(&current->mm->mmap_sem);
 86		}
 87
 88		tocopy = (~(unsigned long)to & ~PAGE_MASK) + 1;
 89		if (tocopy > n)
 90			tocopy = n;
 91
 92		memcpy((void *)to, from, tocopy);
 93		to += tocopy;
 94		from += tocopy;
 95		n -= tocopy;
 96
 97		pte_unmap_unlock(pte, ptl);
 98	}
 99	if (!atomic)
100		up_read(&current->mm->mmap_sem);
101
102out:
103	return n;
104}
105
106unsigned long
107__copy_to_user(void __user *to, const void *from, unsigned long n)
108{
109	/*
110	 * This test is stubbed out of the main function above to keep
111	 * the overhead for small copies low by avoiding a large
112	 * register dump on the stack just to reload them right away.
113	 * With frame pointer disabled, tail call optimization kicks in
114	 * as well making this test almost invisible.
115	 */
116	if (n < 64)
117		return __copy_to_user_std(to, from, n);
118	return __copy_to_user_memcpy(to, from, n);
119}
120	
121static unsigned long noinline
122__clear_user_memset(void __user *addr, unsigned long n)
123{
124	if (unlikely(segment_eq(get_fs(), KERNEL_DS))) {
125		memset((void *)addr, 0, n);
126		return 0;
127	}
128
129	down_read(&current->mm->mmap_sem);
130	while (n) {
131		pte_t *pte;
132		spinlock_t *ptl;
133		int tocopy;
134
135		while (!pin_page_for_write(addr, &pte, &ptl)) {
136			up_read(&current->mm->mmap_sem);
137			if (__put_user(0, (char __user *)addr))
138				goto out;
139			down_read(&current->mm->mmap_sem);
140		}
141
142		tocopy = (~(unsigned long)addr & ~PAGE_MASK) + 1;
143		if (tocopy > n)
144			tocopy = n;
145
146		memset((void *)addr, 0, tocopy);
147		addr += tocopy;
148		n -= tocopy;
149
150		pte_unmap_unlock(pte, ptl);
151	}
152	up_read(&current->mm->mmap_sem);
153
154out:
155	return n;
156}
157
158unsigned long __clear_user(void __user *addr, unsigned long n)
159{
160	/* See rational for this in __copy_to_user() above. */
161	if (n < 64)
162		return __clear_user_std(addr, n);
163	return __clear_user_memset(addr, n);
164}
165
166#if 0
167
168/*
169 * This code is disabled by default, but kept around in case the chosen
170 * thresholds need to be revalidated.  Some overhead (small but still)
171 * would be implied by a runtime determined variable threshold, and
172 * so far the measurement on concerned targets didn't show a worthwhile
173 * variation.
174 *
175 * Note that a fairly precise sched_clock() implementation is needed
176 * for results to make some sense.
177 */
178
179#include <linux/vmalloc.h>
180
181static int __init test_size_treshold(void)
182{
183	struct page *src_page, *dst_page;
184	void *user_ptr, *kernel_ptr;
185	unsigned long long t0, t1, t2;
186	int size, ret;
187
188	ret = -ENOMEM;
189	src_page = alloc_page(GFP_KERNEL);
190	if (!src_page)
191		goto no_src;
192	dst_page = alloc_page(GFP_KERNEL);
193	if (!dst_page)
194		goto no_dst;
195	kernel_ptr = page_address(src_page);
196	user_ptr = vmap(&dst_page, 1, VM_IOREMAP, __pgprot(__P010));
197	if (!user_ptr)
198		goto no_vmap;
199
200	/* warm up the src page dcache */
201	ret = __copy_to_user_memcpy(user_ptr, kernel_ptr, PAGE_SIZE);
202
203	for (size = PAGE_SIZE; size >= 4; size /= 2) {
204		t0 = sched_clock();
205		ret |= __copy_to_user_memcpy(user_ptr, kernel_ptr, size);
206		t1 = sched_clock();
207		ret |= __copy_to_user_std(user_ptr, kernel_ptr, size);
208		t2 = sched_clock();
209		printk("copy_to_user: %d %llu %llu\n", size, t1 - t0, t2 - t1);
210	}
211
212	for (size = PAGE_SIZE; size >= 4; size /= 2) {
213		t0 = sched_clock();
214		ret |= __clear_user_memset(user_ptr, size);
215		t1 = sched_clock();
216		ret |= __clear_user_std(user_ptr, size);
217		t2 = sched_clock();
218		printk("clear_user: %d %llu %llu\n", size, t1 - t0, t2 - t1);
219	}
220
221	if (ret)
222		ret = -EFAULT;
223
224	vunmap(user_ptr);
225no_vmap:
226	put_page(dst_page);
227no_dst:
228	put_page(src_page);
229no_src:
230	return ret;
231}
232
233subsys_initcall(test_size_treshold);
234
235#endif