Linux Audio

Check our new training course

Loading...
v6.8
  1/* SPDX-License-Identifier: GPL-2.0-only */
  2/*
  3 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
  4 *
 
 
 
 
  5 * vineetg: June 2010
  6 *    -__clear_user( ) called multiple times during elf load was byte loop
  7 *    converted to do as much word clear as possible.
  8 *
  9 * vineetg: Dec 2009
 10 *    -Hand crafted constant propagation for "constant" copy sizes
 11 *    -stock kernel shrunk by 33K at -O3
 12 *
 13 * vineetg: Sept 2009
 14 *    -Added option to (UN)inline copy_(to|from)_user to reduce code sz
 15 *    -kernel shrunk by 200K even at -O3 (gcc 4.2.1)
 16 *    -Enabled when doing -Os
 17 *
 18 * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
 19 */
 20
 21#ifndef _ASM_ARC_UACCESS_H
 22#define _ASM_ARC_UACCESS_H
 23
 
 
 24#include <linux/string.h>	/* for generic string functions */
 25
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 26/*********** Single byte/hword/word copies ******************/
 27
 28#define __get_user_fn(sz, u, k)					\
 29({								\
 30	long __ret = 0;	/* success by default */	\
 31	switch (sz) {						\
 32	case 1: __arc_get_user_one(*(k), u, "ldb", __ret); break;	\
 33	case 2: __arc_get_user_one(*(k), u, "ldw", __ret); break;	\
 34	case 4: __arc_get_user_one(*(k), u, "ld", __ret);  break;	\
 35	case 8: __arc_get_user_one_64(*(k), u, __ret);     break;	\
 36	}							\
 37	__ret;							\
 38})
 39
 40/*
 41 * Returns 0 on success, -EFAULT if not.
 42 * @ret already contains 0 - given that errors will be less likely
 43 * (hence +r asm constraint below).
 44 * In case of error, fixup code will make it -EFAULT
 45 */
 46#define __arc_get_user_one(dst, src, op, ret)	\
 47	__asm__ __volatile__(                   \
 48	"1:	"op"    %1,[%2]\n"		\
 49	"2:	;nop\n"				\
 50	"	.section .fixup, \"ax\"\n"	\
 51	"	.align 4\n"			\
 52	"3:	# return -EFAULT\n"		\
 53	"	mov %0, %3\n"			\
 54	"	# zero out dst ptr\n"		\
 55	"	mov %1,  0\n"			\
 56	"	j   2b\n"			\
 57	"	.previous\n"			\
 58	"	.section __ex_table, \"a\"\n"	\
 59	"	.align 4\n"			\
 60	"	.word 1b,3b\n"			\
 61	"	.previous\n"			\
 62						\
 63	: "+r" (ret), "=r" (dst)		\
 64	: "r" (src), "ir" (-EFAULT))
 65
 66#define __arc_get_user_one_64(dst, src, ret)	\
 67	__asm__ __volatile__(                   \
 68	"1:	ld   %1,[%2]\n"			\
 69	"4:	ld  %R1,[%2, 4]\n"		\
 70	"2:	;nop\n"				\
 71	"	.section .fixup, \"ax\"\n"	\
 72	"	.align 4\n"			\
 73	"3:	# return -EFAULT\n"		\
 74	"	mov %0, %3\n"			\
 75	"	# zero out dst ptr\n"		\
 76	"	mov %1,  0\n"			\
 77	"	mov %R1, 0\n"			\
 78	"	j   2b\n"			\
 79	"	.previous\n"			\
 80	"	.section __ex_table, \"a\"\n"	\
 81	"	.align 4\n"			\
 82	"	.word 1b,3b\n"			\
 83	"	.word 4b,3b\n"			\
 84	"	.previous\n"			\
 85						\
 86	: "+r" (ret), "=r" (dst)		\
 87	: "r" (src), "ir" (-EFAULT))
 88
 89#define __put_user_fn(sz, u, k)					\
 90({								\
 91	long __ret = 0;	/* success by default */	\
 92	switch (sz) {						\
 93	case 1: __arc_put_user_one(*(k), u, "stb", __ret); break;	\
 94	case 2: __arc_put_user_one(*(k), u, "stw", __ret); break;	\
 95	case 4: __arc_put_user_one(*(k), u, "st", __ret);  break;	\
 96	case 8: __arc_put_user_one_64(*(k), u, __ret);     break;	\
 97	}							\
 98	__ret;							\
 99})
100
101#define __arc_put_user_one(src, dst, op, ret)	\
102	__asm__ __volatile__(                   \
103	"1:	"op"    %1,[%2]\n"		\
104	"2:	;nop\n"				\
105	"	.section .fixup, \"ax\"\n"	\
106	"	.align 4\n"			\
107	"3:	mov %0, %3\n"			\
108	"	j   2b\n"			\
109	"	.previous\n"			\
110	"	.section __ex_table, \"a\"\n"	\
111	"	.align 4\n"			\
112	"	.word 1b,3b\n"			\
113	"	.previous\n"			\
114						\
115	: "+r" (ret)				\
116	: "r" (src), "r" (dst), "ir" (-EFAULT))
117
118#define __arc_put_user_one_64(src, dst, ret)	\
119	__asm__ __volatile__(                   \
120	"1:	st   %1,[%2]\n"			\
121	"4:	st  %R1,[%2, 4]\n"		\
122	"2:	;nop\n"				\
123	"	.section .fixup, \"ax\"\n"	\
124	"	.align 4\n"			\
125	"3:	mov %0, %3\n"			\
126	"	j   2b\n"			\
127	"	.previous\n"			\
128	"	.section __ex_table, \"a\"\n"	\
129	"	.align 4\n"			\
130	"	.word 1b,3b\n"			\
131	"	.word 4b,3b\n"			\
132	"	.previous\n"			\
133						\
134	: "+r" (ret)				\
135	: "r" (src), "r" (dst), "ir" (-EFAULT))
136
137
138static inline unsigned long
139raw_copy_from_user(void *to, const void __user *from, unsigned long n)
140{
141	long res = 0;
142	char val;
143	unsigned long tmp1, tmp2, tmp3, tmp4;
144	unsigned long orig_n = n;
145
146	if (n == 0)
147		return 0;
148
149	/* fallback for unaligned access when hardware doesn't support */
150	if (!IS_ENABLED(CONFIG_ARC_USE_UNALIGNED_MEM_ACCESS) &&
151	     (((unsigned long)to & 0x3) || ((unsigned long)from & 0x3))) {
152
153		unsigned char tmp;
154
155		__asm__ __volatile__ (
156		"	mov.f   lp_count, %0		\n"
157		"	lpnz 2f				\n"
158		"1:	ldb.ab  %1, [%3, 1]		\n"
159		"	stb.ab  %1, [%2, 1]		\n"
160		"	sub     %0,%0,1			\n"
161		"2:	;nop				\n"
162		"	.section .fixup, \"ax\"		\n"
163		"	.align 4			\n"
164		"3:	j   2b				\n"
165		"	.previous			\n"
166		"	.section __ex_table, \"a\"	\n"
167		"	.align 4			\n"
168		"	.word   1b, 3b			\n"
169		"	.previous			\n"
170
171		: "+r" (n),
172		/*
173		 * Note as an '&' earlyclobber operand to make sure the
174		 * temporary register inside the loop is not the same as
175		 *  FROM or TO.
176		*/
177		  "=&r" (tmp), "+r" (to), "+r" (from)
178		:
179		: "lp_count", "memory");
180
181		return n;
182	}
183
184	/*
185	 * Hand-crafted constant propagation to reduce code sz of the
186	 * laddered copy 16x,8,4,2,1
187	 */
188	if (__builtin_constant_p(orig_n)) {
189		res = orig_n;
190
191		if (orig_n / 16) {
192			orig_n = orig_n % 16;
193
194			__asm__ __volatile__(
195			"	lsr   lp_count, %7,4		\n"
196			"	lp    3f			\n"
197			"1:	ld.ab   %3, [%2, 4]		\n"
198			"11:	ld.ab   %4, [%2, 4]		\n"
199			"12:	ld.ab   %5, [%2, 4]		\n"
200			"13:	ld.ab   %6, [%2, 4]		\n"
201			"	st.ab   %3, [%1, 4]		\n"
202			"	st.ab   %4, [%1, 4]		\n"
203			"	st.ab   %5, [%1, 4]		\n"
204			"	st.ab   %6, [%1, 4]		\n"
205			"	sub     %0,%0,16		\n"
206			"3:	;nop				\n"
207			"	.section .fixup, \"ax\"		\n"
208			"	.align 4			\n"
209			"4:	j   3b				\n"
210			"	.previous			\n"
211			"	.section __ex_table, \"a\"	\n"
212			"	.align 4			\n"
213			"	.word   1b, 4b			\n"
214			"	.word   11b,4b			\n"
215			"	.word   12b,4b			\n"
216			"	.word   13b,4b			\n"
217			"	.previous			\n"
218			: "+r" (res), "+r"(to), "+r"(from),
219			  "=r"(tmp1), "=r"(tmp2), "=r"(tmp3), "=r"(tmp4)
220			: "ir"(n)
221			: "lp_count", "memory");
222		}
223		if (orig_n / 8) {
224			orig_n = orig_n % 8;
225
226			__asm__ __volatile__(
227			"14:	ld.ab   %3, [%2,4]		\n"
228			"15:	ld.ab   %4, [%2,4]		\n"
229			"	st.ab   %3, [%1,4]		\n"
230			"	st.ab   %4, [%1,4]		\n"
231			"	sub     %0,%0,8			\n"
232			"31:	;nop				\n"
233			"	.section .fixup, \"ax\"		\n"
234			"	.align 4			\n"
235			"4:	j   31b				\n"
236			"	.previous			\n"
237			"	.section __ex_table, \"a\"	\n"
238			"	.align 4			\n"
239			"	.word   14b,4b			\n"
240			"	.word   15b,4b			\n"
241			"	.previous			\n"
242			: "+r" (res), "+r"(to), "+r"(from),
243			  "=r"(tmp1), "=r"(tmp2)
244			:
245			: "memory");
246		}
247		if (orig_n / 4) {
248			orig_n = orig_n % 4;
249
250			__asm__ __volatile__(
251			"16:	ld.ab   %3, [%2,4]		\n"
252			"	st.ab   %3, [%1,4]		\n"
253			"	sub     %0,%0,4			\n"
254			"32:	;nop				\n"
255			"	.section .fixup, \"ax\"		\n"
256			"	.align 4			\n"
257			"4:	j   32b				\n"
258			"	.previous			\n"
259			"	.section __ex_table, \"a\"	\n"
260			"	.align 4			\n"
261			"	.word   16b,4b			\n"
262			"	.previous			\n"
263			: "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
264			:
265			: "memory");
266		}
267		if (orig_n / 2) {
268			orig_n = orig_n % 2;
269
270			__asm__ __volatile__(
271			"17:	ldw.ab   %3, [%2,2]		\n"
272			"	stw.ab   %3, [%1,2]		\n"
273			"	sub      %0,%0,2		\n"
274			"33:	;nop				\n"
275			"	.section .fixup, \"ax\"		\n"
276			"	.align 4			\n"
277			"4:	j   33b				\n"
278			"	.previous			\n"
279			"	.section __ex_table, \"a\"	\n"
280			"	.align 4			\n"
281			"	.word   17b,4b			\n"
282			"	.previous			\n"
283			: "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
284			:
285			: "memory");
286		}
287		if (orig_n & 1) {
288			__asm__ __volatile__(
289			"18:	ldb.ab   %3, [%2,2]		\n"
290			"	stb.ab   %3, [%1,2]		\n"
291			"	sub      %0,%0,1		\n"
292			"34:	; nop				\n"
293			"	.section .fixup, \"ax\"		\n"
294			"	.align 4			\n"
295			"4:	j   34b				\n"
296			"	.previous			\n"
297			"	.section __ex_table, \"a\"	\n"
298			"	.align 4			\n"
299			"	.word   18b,4b			\n"
300			"	.previous			\n"
301			: "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
302			:
303			: "memory");
304		}
305	} else {  /* n is NOT constant, so laddered copy of 16x,8,4,2,1  */
306
307		__asm__ __volatile__(
308		"	mov %0,%3			\n"
309		"	lsr.f   lp_count, %3,4		\n"  /* 16x bytes */
310		"	lpnz    3f			\n"
311		"1:	ld.ab   %5, [%2, 4]		\n"
312		"11:	ld.ab   %6, [%2, 4]		\n"
313		"12:	ld.ab   %7, [%2, 4]		\n"
314		"13:	ld.ab   %8, [%2, 4]		\n"
315		"	st.ab   %5, [%1, 4]		\n"
316		"	st.ab   %6, [%1, 4]		\n"
317		"	st.ab   %7, [%1, 4]		\n"
318		"	st.ab   %8, [%1, 4]		\n"
319		"	sub     %0,%0,16		\n"
320		"3:	and.f   %3,%3,0xf		\n"  /* stragglers */
321		"	bz      34f			\n"
322		"	bbit0   %3,3,31f		\n"  /* 8 bytes left */
323		"14:	ld.ab   %5, [%2,4]		\n"
324		"15:	ld.ab   %6, [%2,4]		\n"
325		"	st.ab   %5, [%1,4]		\n"
326		"	st.ab   %6, [%1,4]		\n"
327		"	sub.f   %0,%0,8			\n"
328		"31:	bbit0   %3,2,32f		\n"  /* 4 bytes left */
329		"16:	ld.ab   %5, [%2,4]		\n"
330		"	st.ab   %5, [%1,4]		\n"
331		"	sub.f   %0,%0,4			\n"
332		"32:	bbit0   %3,1,33f		\n"  /* 2 bytes left */
333		"17:	ldw.ab  %5, [%2,2]		\n"
334		"	stw.ab  %5, [%1,2]		\n"
335		"	sub.f   %0,%0,2			\n"
336		"33:	bbit0   %3,0,34f		\n"
337		"18:	ldb.ab  %5, [%2,1]		\n"  /* 1 byte left */
338		"	stb.ab  %5, [%1,1]		\n"
339		"	sub.f   %0,%0,1			\n"
340		"34:	;nop				\n"
341		"	.section .fixup, \"ax\"		\n"
342		"	.align 4			\n"
343		"4:	j   34b				\n"
344		"	.previous			\n"
345		"	.section __ex_table, \"a\"	\n"
346		"	.align 4			\n"
347		"	.word   1b, 4b			\n"
348		"	.word   11b,4b			\n"
349		"	.word   12b,4b			\n"
350		"	.word   13b,4b			\n"
351		"	.word   14b,4b			\n"
352		"	.word   15b,4b			\n"
353		"	.word   16b,4b			\n"
354		"	.word   17b,4b			\n"
355		"	.word   18b,4b			\n"
356		"	.previous			\n"
357		: "=r" (res), "+r"(to), "+r"(from), "+r"(n), "=r"(val),
358		  "=r"(tmp1), "=r"(tmp2), "=r"(tmp3), "=r"(tmp4)
359		:
360		: "lp_count", "memory");
361	}
362
363	return res;
364}
365
 
 
 
366static inline unsigned long
367raw_copy_to_user(void __user *to, const void *from, unsigned long n)
368{
369	long res = 0;
370	char val;
371	unsigned long tmp1, tmp2, tmp3, tmp4;
372	unsigned long orig_n = n;
373
374	if (n == 0)
375		return 0;
376
377	/* fallback for unaligned access when hardware doesn't support */
378	if (!IS_ENABLED(CONFIG_ARC_USE_UNALIGNED_MEM_ACCESS) &&
379	     (((unsigned long)to & 0x3) || ((unsigned long)from & 0x3))) {
380
381		unsigned char tmp;
382
383		__asm__ __volatile__(
384		"	mov.f   lp_count, %0		\n"
385		"	lpnz 3f				\n"
386		"	ldb.ab  %1, [%3, 1]		\n"
387		"1:	stb.ab  %1, [%2, 1]		\n"
388		"	sub     %0, %0, 1		\n"
389		"3:	;nop				\n"
390		"	.section .fixup, \"ax\"		\n"
391		"	.align 4			\n"
392		"4:	j   3b				\n"
393		"	.previous			\n"
394		"	.section __ex_table, \"a\"	\n"
395		"	.align 4			\n"
396		"	.word   1b, 4b			\n"
397		"	.previous			\n"
398
399		: "+r" (n),
400		/* Note as an '&' earlyclobber operand to make sure the
401		 * temporary register inside the loop is not the same as
402		 * FROM or TO.
403		 */
404		  "=&r" (tmp), "+r" (to), "+r" (from)
405		:
406		: "lp_count", "memory");
407
408		return n;
409	}
410
411	if (__builtin_constant_p(orig_n)) {
412		res = orig_n;
413
414		if (orig_n / 16) {
415			orig_n = orig_n % 16;
416
417			__asm__ __volatile__(
418			"	lsr lp_count, %7,4		\n"
419			"	lp  3f				\n"
420			"	ld.ab %3, [%2, 4]		\n"
421			"	ld.ab %4, [%2, 4]		\n"
422			"	ld.ab %5, [%2, 4]		\n"
423			"	ld.ab %6, [%2, 4]		\n"
424			"1:	st.ab %3, [%1, 4]		\n"
425			"11:	st.ab %4, [%1, 4]		\n"
426			"12:	st.ab %5, [%1, 4]		\n"
427			"13:	st.ab %6, [%1, 4]		\n"
428			"	sub   %0, %0, 16		\n"
429			"3:;nop					\n"
430			"	.section .fixup, \"ax\"		\n"
431			"	.align 4			\n"
432			"4:	j   3b				\n"
433			"	.previous			\n"
434			"	.section __ex_table, \"a\"	\n"
435			"	.align 4			\n"
436			"	.word   1b, 4b			\n"
437			"	.word   11b,4b			\n"
438			"	.word   12b,4b			\n"
439			"	.word   13b,4b			\n"
440			"	.previous			\n"
441			: "+r" (res), "+r"(to), "+r"(from),
442			  "=r"(tmp1), "=r"(tmp2), "=r"(tmp3), "=r"(tmp4)
443			: "ir"(n)
444			: "lp_count", "memory");
445		}
446		if (orig_n / 8) {
447			orig_n = orig_n % 8;
448
449			__asm__ __volatile__(
450			"	ld.ab   %3, [%2,4]		\n"
451			"	ld.ab   %4, [%2,4]		\n"
452			"14:	st.ab   %3, [%1,4]		\n"
453			"15:	st.ab   %4, [%1,4]		\n"
454			"	sub     %0, %0, 8		\n"
455			"31:;nop				\n"
456			"	.section .fixup, \"ax\"		\n"
457			"	.align 4			\n"
458			"4:	j   31b				\n"
459			"	.previous			\n"
460			"	.section __ex_table, \"a\"	\n"
461			"	.align 4			\n"
462			"	.word   14b,4b			\n"
463			"	.word   15b,4b			\n"
464			"	.previous			\n"
465			: "+r" (res), "+r"(to), "+r"(from),
466			  "=r"(tmp1), "=r"(tmp2)
467			:
468			: "memory");
469		}
470		if (orig_n / 4) {
471			orig_n = orig_n % 4;
472
473			__asm__ __volatile__(
474			"	ld.ab   %3, [%2,4]		\n"
475			"16:	st.ab   %3, [%1,4]		\n"
476			"	sub     %0, %0, 4		\n"
477			"32:;nop				\n"
478			"	.section .fixup, \"ax\"		\n"
479			"	.align 4			\n"
480			"4:	j   32b				\n"
481			"	.previous			\n"
482			"	.section __ex_table, \"a\"	\n"
483			"	.align 4			\n"
484			"	.word   16b,4b			\n"
485			"	.previous			\n"
486			: "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
487			:
488			: "memory");
489		}
490		if (orig_n / 2) {
491			orig_n = orig_n % 2;
492
493			__asm__ __volatile__(
494			"	ldw.ab    %3, [%2,2]		\n"
495			"17:	stw.ab    %3, [%1,2]		\n"
496			"	sub       %0, %0, 2		\n"
497			"33:;nop				\n"
498			"	.section .fixup, \"ax\"		\n"
499			"	.align 4			\n"
500			"4:	j   33b				\n"
501			"	.previous			\n"
502			"	.section __ex_table, \"a\"	\n"
503			"	.align 4			\n"
504			"	.word   17b,4b			\n"
505			"	.previous			\n"
506			: "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
507			:
508			: "memory");
509		}
510		if (orig_n & 1) {
511			__asm__ __volatile__(
512			"	ldb.ab  %3, [%2,1]		\n"
513			"18:	stb.ab  %3, [%1,1]		\n"
514			"	sub     %0, %0, 1		\n"
515			"34:	;nop				\n"
516			"	.section .fixup, \"ax\"		\n"
517			"	.align 4			\n"
518			"4:	j   34b				\n"
519			"	.previous			\n"
520			"	.section __ex_table, \"a\"	\n"
521			"	.align 4			\n"
522			"	.word   18b,4b			\n"
523			"	.previous			\n"
524			: "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
525			:
526			: "memory");
527		}
528	} else {  /* n is NOT constant, so laddered copy of 16x,8,4,2,1  */
529
530		__asm__ __volatile__(
531		"	mov   %0,%3			\n"
532		"	lsr.f lp_count, %3,4		\n"  /* 16x bytes */
533		"	lpnz  3f			\n"
534		"	ld.ab %5, [%2, 4]		\n"
535		"	ld.ab %6, [%2, 4]		\n"
536		"	ld.ab %7, [%2, 4]		\n"
537		"	ld.ab %8, [%2, 4]		\n"
538		"1:	st.ab %5, [%1, 4]		\n"
539		"11:	st.ab %6, [%1, 4]		\n"
540		"12:	st.ab %7, [%1, 4]		\n"
541		"13:	st.ab %8, [%1, 4]		\n"
542		"	sub   %0, %0, 16		\n"
543		"3:	and.f %3,%3,0xf			\n" /* stragglers */
544		"	bz 34f				\n"
545		"	bbit0   %3,3,31f		\n" /* 8 bytes left */
546		"	ld.ab   %5, [%2,4]		\n"
547		"	ld.ab   %6, [%2,4]		\n"
548		"14:	st.ab   %5, [%1,4]		\n"
549		"15:	st.ab   %6, [%1,4]		\n"
550		"	sub.f   %0, %0, 8		\n"
551		"31:	bbit0   %3,2,32f		\n"  /* 4 bytes left */
552		"	ld.ab   %5, [%2,4]		\n"
553		"16:	st.ab   %5, [%1,4]		\n"
554		"	sub.f   %0, %0, 4		\n"
555		"32:	bbit0 %3,1,33f			\n"  /* 2 bytes left */
556		"	ldw.ab    %5, [%2,2]		\n"
557		"17:	stw.ab    %5, [%1,2]		\n"
558		"	sub.f %0, %0, 2			\n"
559		"33:	bbit0 %3,0,34f			\n"
560		"	ldb.ab    %5, [%2,1]		\n"  /* 1 byte left */
561		"18:	stb.ab  %5, [%1,1]		\n"
562		"	sub.f %0, %0, 1			\n"
563		"34:	;nop				\n"
564		"	.section .fixup, \"ax\"		\n"
565		"	.align 4			\n"
566		"4:	j   34b				\n"
567		"	.previous			\n"
568		"	.section __ex_table, \"a\"	\n"
569		"	.align 4			\n"
570		"	.word   1b, 4b			\n"
571		"	.word   11b,4b			\n"
572		"	.word   12b,4b			\n"
573		"	.word   13b,4b			\n"
574		"	.word   14b,4b			\n"
575		"	.word   15b,4b			\n"
576		"	.word   16b,4b			\n"
577		"	.word   17b,4b			\n"
578		"	.word   18b,4b			\n"
579		"	.previous			\n"
580		: "=r" (res), "+r"(to), "+r"(from), "+r"(n), "=r"(val),
581		  "=r"(tmp1), "=r"(tmp2), "=r"(tmp3), "=r"(tmp4)
582		:
583		: "lp_count", "memory");
584	}
585
586	return res;
587}
588
589static inline unsigned long __clear_user(void __user *to, unsigned long n)
590{
591	long res = n;
592	unsigned char *d_char = to;
593
594	__asm__ __volatile__(
595	"	bbit0   %0, 0, 1f		\n"
596	"75:	stb.ab  %2, [%0,1]		\n"
597	"	sub %1, %1, 1			\n"
598	"1:	bbit0   %0, 1, 2f		\n"
599	"76:	stw.ab  %2, [%0,2]		\n"
600	"	sub %1, %1, 2			\n"
601	"2:	asr.f   lp_count, %1, 2		\n"
602	"	lpnz    3f			\n"
603	"77:	st.ab   %2, [%0,4]		\n"
604	"	sub %1, %1, 4			\n"
605	"3:	bbit0   %1, 1, 4f		\n"
606	"78:	stw.ab  %2, [%0,2]		\n"
607	"	sub %1, %1, 2			\n"
608	"4:	bbit0   %1, 0, 5f		\n"
609	"79:	stb.ab  %2, [%0,1]		\n"
610	"	sub %1, %1, 1			\n"
611	"5:					\n"
612	"	.section .fixup, \"ax\"		\n"
613	"	.align 4			\n"
614	"3:	j   5b				\n"
615	"	.previous			\n"
616	"	.section __ex_table, \"a\"	\n"
617	"	.align 4			\n"
618	"	.word   75b, 3b			\n"
619	"	.word   76b, 3b			\n"
620	"	.word   77b, 3b			\n"
621	"	.word   78b, 3b			\n"
622	"	.word   79b, 3b			\n"
623	"	.previous			\n"
624	: "+r"(d_char), "+r"(res)
625	: "i"(0)
626	: "lp_count", "memory");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
627
628	return res;
629}
630
631#define INLINE_COPY_TO_USER
632#define INLINE_COPY_FROM_USER
 
 
633
634#define __clear_user			__clear_user
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
635
636#include <asm-generic/uaccess.h>
 
 
637
638#endif
v4.10.11
 
  1/*
  2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
  3 *
  4 * This program is free software; you can redistribute it and/or modify
  5 * it under the terms of the GNU General Public License version 2 as
  6 * published by the Free Software Foundation.
  7 *
  8 * vineetg: June 2010
  9 *    -__clear_user( ) called multiple times during elf load was byte loop
 10 *    converted to do as much word clear as possible.
 11 *
 12 * vineetg: Dec 2009
 13 *    -Hand crafted constant propagation for "constant" copy sizes
 14 *    -stock kernel shrunk by 33K at -O3
 15 *
 16 * vineetg: Sept 2009
 17 *    -Added option to (UN)inline copy_(to|from)_user to reduce code sz
 18 *    -kernel shrunk by 200K even at -O3 (gcc 4.2.1)
 19 *    -Enabled when doing -Os
 20 *
 21 * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
 22 */
 23
 24#ifndef _ASM_ARC_UACCESS_H
 25#define _ASM_ARC_UACCESS_H
 26
 27#include <linux/sched.h>
 28#include <asm/errno.h>
 29#include <linux/string.h>	/* for generic string functions */
 30
 31
 32#define __kernel_ok		(segment_eq(get_fs(), KERNEL_DS))
 33
 34/*
 35 * Algorithmically, for __user_ok() we want do:
 36 * 	(start < TASK_SIZE) && (start+len < TASK_SIZE)
 37 * where TASK_SIZE could either be retrieved from thread_info->addr_limit or
 38 * emitted directly in code.
 39 *
 40 * This can however be rewritten as follows:
 41 *	(len <= TASK_SIZE) && (start+len < TASK_SIZE)
 42 *
 43 * Because it essentially checks if buffer end is within limit and @len is
 44 * non-ngeative, which implies that buffer start will be within limit too.
 45 *
 46 * The reason for rewriting being, for majority of cases, @len is generally
 47 * compile time constant, causing first sub-expression to be compile time
 48 * subsumed.
 49 *
 50 * The second part would generate weird large LIMMs e.g. (0x6000_0000 - 0x10),
 51 * so we check for TASK_SIZE using get_fs() since the addr_limit load from mem
 52 * would already have been done at this call site for __kernel_ok()
 53 *
 54 */
 55#define __user_ok(addr, sz)	(((sz) <= TASK_SIZE) && \
 56				 ((addr) <= (get_fs() - (sz))))
 57#define __access_ok(addr, sz)	(unlikely(__kernel_ok) || \
 58				 likely(__user_ok((addr), (sz))))
 59
 60/*********** Single byte/hword/word copies ******************/
 61
 62#define __get_user_fn(sz, u, k)					\
 63({								\
 64	long __ret = 0;	/* success by default */	\
 65	switch (sz) {						\
 66	case 1: __arc_get_user_one(*(k), u, "ldb", __ret); break;	\
 67	case 2: __arc_get_user_one(*(k), u, "ldw", __ret); break;	\
 68	case 4: __arc_get_user_one(*(k), u, "ld", __ret);  break;	\
 69	case 8: __arc_get_user_one_64(*(k), u, __ret);     break;	\
 70	}							\
 71	__ret;							\
 72})
 73
 74/*
 75 * Returns 0 on success, -EFAULT if not.
 76 * @ret already contains 0 - given that errors will be less likely
 77 * (hence +r asm constraint below).
 78 * In case of error, fixup code will make it -EFAULT
 79 */
 80#define __arc_get_user_one(dst, src, op, ret)	\
 81	__asm__ __volatile__(                   \
 82	"1:	"op"    %1,[%2]\n"		\
 83	"2:	;nop\n"				\
 84	"	.section .fixup, \"ax\"\n"	\
 85	"	.align 4\n"			\
 86	"3:	# return -EFAULT\n"		\
 87	"	mov %0, %3\n"			\
 88	"	# zero out dst ptr\n"		\
 89	"	mov %1,  0\n"			\
 90	"	j   2b\n"			\
 91	"	.previous\n"			\
 92	"	.section __ex_table, \"a\"\n"	\
 93	"	.align 4\n"			\
 94	"	.word 1b,3b\n"			\
 95	"	.previous\n"			\
 96						\
 97	: "+r" (ret), "=r" (dst)		\
 98	: "r" (src), "ir" (-EFAULT))
 99
100#define __arc_get_user_one_64(dst, src, ret)	\
101	__asm__ __volatile__(                   \
102	"1:	ld   %1,[%2]\n"			\
103	"4:	ld  %R1,[%2, 4]\n"		\
104	"2:	;nop\n"				\
105	"	.section .fixup, \"ax\"\n"	\
106	"	.align 4\n"			\
107	"3:	# return -EFAULT\n"		\
108	"	mov %0, %3\n"			\
109	"	# zero out dst ptr\n"		\
110	"	mov %1,  0\n"			\
111	"	mov %R1, 0\n"			\
112	"	j   2b\n"			\
113	"	.previous\n"			\
114	"	.section __ex_table, \"a\"\n"	\
115	"	.align 4\n"			\
116	"	.word 1b,3b\n"			\
117	"	.word 4b,3b\n"			\
118	"	.previous\n"			\
119						\
120	: "+r" (ret), "=r" (dst)		\
121	: "r" (src), "ir" (-EFAULT))
122
123#define __put_user_fn(sz, u, k)					\
124({								\
125	long __ret = 0;	/* success by default */	\
126	switch (sz) {						\
127	case 1: __arc_put_user_one(*(k), u, "stb", __ret); break;	\
128	case 2: __arc_put_user_one(*(k), u, "stw", __ret); break;	\
129	case 4: __arc_put_user_one(*(k), u, "st", __ret);  break;	\
130	case 8: __arc_put_user_one_64(*(k), u, __ret);     break;	\
131	}							\
132	__ret;							\
133})
134
135#define __arc_put_user_one(src, dst, op, ret)	\
136	__asm__ __volatile__(                   \
137	"1:	"op"    %1,[%2]\n"		\
138	"2:	;nop\n"				\
139	"	.section .fixup, \"ax\"\n"	\
140	"	.align 4\n"			\
141	"3:	mov %0, %3\n"			\
142	"	j   2b\n"			\
143	"	.previous\n"			\
144	"	.section __ex_table, \"a\"\n"	\
145	"	.align 4\n"			\
146	"	.word 1b,3b\n"			\
147	"	.previous\n"			\
148						\
149	: "+r" (ret)				\
150	: "r" (src), "r" (dst), "ir" (-EFAULT))
151
152#define __arc_put_user_one_64(src, dst, ret)	\
153	__asm__ __volatile__(                   \
154	"1:	st   %1,[%2]\n"			\
155	"4:	st  %R1,[%2, 4]\n"		\
156	"2:	;nop\n"				\
157	"	.section .fixup, \"ax\"\n"	\
158	"	.align 4\n"			\
159	"3:	mov %0, %3\n"			\
160	"	j   2b\n"			\
161	"	.previous\n"			\
162	"	.section __ex_table, \"a\"\n"	\
163	"	.align 4\n"			\
164	"	.word 1b,3b\n"			\
165	"	.word 4b,3b\n"			\
166	"	.previous\n"			\
167						\
168	: "+r" (ret)				\
169	: "r" (src), "r" (dst), "ir" (-EFAULT))
170
171
172static inline unsigned long
173__arc_copy_from_user(void *to, const void __user *from, unsigned long n)
174{
175	long res = 0;
176	char val;
177	unsigned long tmp1, tmp2, tmp3, tmp4;
178	unsigned long orig_n = n;
179
180	if (n == 0)
181		return 0;
182
183	/* unaligned */
184	if (((unsigned long)to & 0x3) || ((unsigned long)from & 0x3)) {
 
185
186		unsigned char tmp;
187
188		__asm__ __volatile__ (
189		"	mov.f   lp_count, %0		\n"
190		"	lpnz 2f				\n"
191		"1:	ldb.ab  %1, [%3, 1]		\n"
192		"	stb.ab  %1, [%2, 1]		\n"
193		"	sub     %0,%0,1			\n"
194		"2:	;nop				\n"
195		"	.section .fixup, \"ax\"		\n"
196		"	.align 4			\n"
197		"3:	j   2b				\n"
198		"	.previous			\n"
199		"	.section __ex_table, \"a\"	\n"
200		"	.align 4			\n"
201		"	.word   1b, 3b			\n"
202		"	.previous			\n"
203
204		: "+r" (n),
205		/*
206		 * Note as an '&' earlyclobber operand to make sure the
207		 * temporary register inside the loop is not the same as
208		 *  FROM or TO.
209		*/
210		  "=&r" (tmp), "+r" (to), "+r" (from)
211		:
212		: "lp_count", "lp_start", "lp_end", "memory");
213
214		return n;
215	}
216
217	/*
218	 * Hand-crafted constant propagation to reduce code sz of the
219	 * laddered copy 16x,8,4,2,1
220	 */
221	if (__builtin_constant_p(orig_n)) {
222		res = orig_n;
223
224		if (orig_n / 16) {
225			orig_n = orig_n % 16;
226
227			__asm__ __volatile__(
228			"	lsr   lp_count, %7,4		\n"
229			"	lp    3f			\n"
230			"1:	ld.ab   %3, [%2, 4]		\n"
231			"11:	ld.ab   %4, [%2, 4]		\n"
232			"12:	ld.ab   %5, [%2, 4]		\n"
233			"13:	ld.ab   %6, [%2, 4]		\n"
234			"	st.ab   %3, [%1, 4]		\n"
235			"	st.ab   %4, [%1, 4]		\n"
236			"	st.ab   %5, [%1, 4]		\n"
237			"	st.ab   %6, [%1, 4]		\n"
238			"	sub     %0,%0,16		\n"
239			"3:	;nop				\n"
240			"	.section .fixup, \"ax\"		\n"
241			"	.align 4			\n"
242			"4:	j   3b				\n"
243			"	.previous			\n"
244			"	.section __ex_table, \"a\"	\n"
245			"	.align 4			\n"
246			"	.word   1b, 4b			\n"
247			"	.word   11b,4b			\n"
248			"	.word   12b,4b			\n"
249			"	.word   13b,4b			\n"
250			"	.previous			\n"
251			: "+r" (res), "+r"(to), "+r"(from),
252			  "=r"(tmp1), "=r"(tmp2), "=r"(tmp3), "=r"(tmp4)
253			: "ir"(n)
254			: "lp_count", "memory");
255		}
256		if (orig_n / 8) {
257			orig_n = orig_n % 8;
258
259			__asm__ __volatile__(
260			"14:	ld.ab   %3, [%2,4]		\n"
261			"15:	ld.ab   %4, [%2,4]		\n"
262			"	st.ab   %3, [%1,4]		\n"
263			"	st.ab   %4, [%1,4]		\n"
264			"	sub     %0,%0,8			\n"
265			"31:	;nop				\n"
266			"	.section .fixup, \"ax\"		\n"
267			"	.align 4			\n"
268			"4:	j   31b				\n"
269			"	.previous			\n"
270			"	.section __ex_table, \"a\"	\n"
271			"	.align 4			\n"
272			"	.word   14b,4b			\n"
273			"	.word   15b,4b			\n"
274			"	.previous			\n"
275			: "+r" (res), "+r"(to), "+r"(from),
276			  "=r"(tmp1), "=r"(tmp2)
277			:
278			: "memory");
279		}
280		if (orig_n / 4) {
281			orig_n = orig_n % 4;
282
283			__asm__ __volatile__(
284			"16:	ld.ab   %3, [%2,4]		\n"
285			"	st.ab   %3, [%1,4]		\n"
286			"	sub     %0,%0,4			\n"
287			"32:	;nop				\n"
288			"	.section .fixup, \"ax\"		\n"
289			"	.align 4			\n"
290			"4:	j   32b				\n"
291			"	.previous			\n"
292			"	.section __ex_table, \"a\"	\n"
293			"	.align 4			\n"
294			"	.word   16b,4b			\n"
295			"	.previous			\n"
296			: "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
297			:
298			: "memory");
299		}
300		if (orig_n / 2) {
301			orig_n = orig_n % 2;
302
303			__asm__ __volatile__(
304			"17:	ldw.ab   %3, [%2,2]		\n"
305			"	stw.ab   %3, [%1,2]		\n"
306			"	sub      %0,%0,2		\n"
307			"33:	;nop				\n"
308			"	.section .fixup, \"ax\"		\n"
309			"	.align 4			\n"
310			"4:	j   33b				\n"
311			"	.previous			\n"
312			"	.section __ex_table, \"a\"	\n"
313			"	.align 4			\n"
314			"	.word   17b,4b			\n"
315			"	.previous			\n"
316			: "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
317			:
318			: "memory");
319		}
320		if (orig_n & 1) {
321			__asm__ __volatile__(
322			"18:	ldb.ab   %3, [%2,2]		\n"
323			"	stb.ab   %3, [%1,2]		\n"
324			"	sub      %0,%0,1		\n"
325			"34:	; nop				\n"
326			"	.section .fixup, \"ax\"		\n"
327			"	.align 4			\n"
328			"4:	j   34b				\n"
329			"	.previous			\n"
330			"	.section __ex_table, \"a\"	\n"
331			"	.align 4			\n"
332			"	.word   18b,4b			\n"
333			"	.previous			\n"
334			: "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
335			:
336			: "memory");
337		}
338	} else {  /* n is NOT constant, so laddered copy of 16x,8,4,2,1  */
339
340		__asm__ __volatile__(
341		"	mov %0,%3			\n"
342		"	lsr.f   lp_count, %3,4		\n"  /* 16x bytes */
343		"	lpnz    3f			\n"
344		"1:	ld.ab   %5, [%2, 4]		\n"
345		"11:	ld.ab   %6, [%2, 4]		\n"
346		"12:	ld.ab   %7, [%2, 4]		\n"
347		"13:	ld.ab   %8, [%2, 4]		\n"
348		"	st.ab   %5, [%1, 4]		\n"
349		"	st.ab   %6, [%1, 4]		\n"
350		"	st.ab   %7, [%1, 4]		\n"
351		"	st.ab   %8, [%1, 4]		\n"
352		"	sub     %0,%0,16		\n"
353		"3:	and.f   %3,%3,0xf		\n"  /* stragglers */
354		"	bz      34f			\n"
355		"	bbit0   %3,3,31f		\n"  /* 8 bytes left */
356		"14:	ld.ab   %5, [%2,4]		\n"
357		"15:	ld.ab   %6, [%2,4]		\n"
358		"	st.ab   %5, [%1,4]		\n"
359		"	st.ab   %6, [%1,4]		\n"
360		"	sub.f   %0,%0,8			\n"
361		"31:	bbit0   %3,2,32f		\n"  /* 4 bytes left */
362		"16:	ld.ab   %5, [%2,4]		\n"
363		"	st.ab   %5, [%1,4]		\n"
364		"	sub.f   %0,%0,4			\n"
365		"32:	bbit0   %3,1,33f		\n"  /* 2 bytes left */
366		"17:	ldw.ab  %5, [%2,2]		\n"
367		"	stw.ab  %5, [%1,2]		\n"
368		"	sub.f   %0,%0,2			\n"
369		"33:	bbit0   %3,0,34f		\n"
370		"18:	ldb.ab  %5, [%2,1]		\n"  /* 1 byte left */
371		"	stb.ab  %5, [%1,1]		\n"
372		"	sub.f   %0,%0,1			\n"
373		"34:	;nop				\n"
374		"	.section .fixup, \"ax\"		\n"
375		"	.align 4			\n"
376		"4:	j   34b				\n"
377		"	.previous			\n"
378		"	.section __ex_table, \"a\"	\n"
379		"	.align 4			\n"
380		"	.word   1b, 4b			\n"
381		"	.word   11b,4b			\n"
382		"	.word   12b,4b			\n"
383		"	.word   13b,4b			\n"
384		"	.word   14b,4b			\n"
385		"	.word   15b,4b			\n"
386		"	.word   16b,4b			\n"
387		"	.word   17b,4b			\n"
388		"	.word   18b,4b			\n"
389		"	.previous			\n"
390		: "=r" (res), "+r"(to), "+r"(from), "+r"(n), "=r"(val),
391		  "=r"(tmp1), "=r"(tmp2), "=r"(tmp3), "=r"(tmp4)
392		:
393		: "lp_count", "memory");
394	}
395
396	return res;
397}
398
399extern unsigned long slowpath_copy_to_user(void __user *to, const void *from,
400					   unsigned long n);
401
402static inline unsigned long
403__arc_copy_to_user(void __user *to, const void *from, unsigned long n)
404{
405	long res = 0;
406	char val;
407	unsigned long tmp1, tmp2, tmp3, tmp4;
408	unsigned long orig_n = n;
409
410	if (n == 0)
411		return 0;
412
413	/* unaligned */
414	if (((unsigned long)to & 0x3) || ((unsigned long)from & 0x3)) {
 
415
416		unsigned char tmp;
417
418		__asm__ __volatile__(
419		"	mov.f   lp_count, %0		\n"
420		"	lpnz 3f				\n"
421		"	ldb.ab  %1, [%3, 1]		\n"
422		"1:	stb.ab  %1, [%2, 1]		\n"
423		"	sub     %0, %0, 1		\n"
424		"3:	;nop				\n"
425		"	.section .fixup, \"ax\"		\n"
426		"	.align 4			\n"
427		"4:	j   3b				\n"
428		"	.previous			\n"
429		"	.section __ex_table, \"a\"	\n"
430		"	.align 4			\n"
431		"	.word   1b, 4b			\n"
432		"	.previous			\n"
433
434		: "+r" (n),
435		/* Note as an '&' earlyclobber operand to make sure the
436		 * temporary register inside the loop is not the same as
437		 * FROM or TO.
438		 */
439		  "=&r" (tmp), "+r" (to), "+r" (from)
440		:
441		: "lp_count", "lp_start", "lp_end", "memory");
442
443		return n;
444	}
445
446	if (__builtin_constant_p(orig_n)) {
447		res = orig_n;
448
449		if (orig_n / 16) {
450			orig_n = orig_n % 16;
451
452			__asm__ __volatile__(
453			"	lsr lp_count, %7,4		\n"
454			"	lp  3f				\n"
455			"	ld.ab %3, [%2, 4]		\n"
456			"	ld.ab %4, [%2, 4]		\n"
457			"	ld.ab %5, [%2, 4]		\n"
458			"	ld.ab %6, [%2, 4]		\n"
459			"1:	st.ab %3, [%1, 4]		\n"
460			"11:	st.ab %4, [%1, 4]		\n"
461			"12:	st.ab %5, [%1, 4]		\n"
462			"13:	st.ab %6, [%1, 4]		\n"
463			"	sub   %0, %0, 16		\n"
464			"3:;nop					\n"
465			"	.section .fixup, \"ax\"		\n"
466			"	.align 4			\n"
467			"4:	j   3b				\n"
468			"	.previous			\n"
469			"	.section __ex_table, \"a\"	\n"
470			"	.align 4			\n"
471			"	.word   1b, 4b			\n"
472			"	.word   11b,4b			\n"
473			"	.word   12b,4b			\n"
474			"	.word   13b,4b			\n"
475			"	.previous			\n"
476			: "+r" (res), "+r"(to), "+r"(from),
477			  "=r"(tmp1), "=r"(tmp2), "=r"(tmp3), "=r"(tmp4)
478			: "ir"(n)
479			: "lp_count", "memory");
480		}
481		if (orig_n / 8) {
482			orig_n = orig_n % 8;
483
484			__asm__ __volatile__(
485			"	ld.ab   %3, [%2,4]		\n"
486			"	ld.ab   %4, [%2,4]		\n"
487			"14:	st.ab   %3, [%1,4]		\n"
488			"15:	st.ab   %4, [%1,4]		\n"
489			"	sub     %0, %0, 8		\n"
490			"31:;nop				\n"
491			"	.section .fixup, \"ax\"		\n"
492			"	.align 4			\n"
493			"4:	j   31b				\n"
494			"	.previous			\n"
495			"	.section __ex_table, \"a\"	\n"
496			"	.align 4			\n"
497			"	.word   14b,4b			\n"
498			"	.word   15b,4b			\n"
499			"	.previous			\n"
500			: "+r" (res), "+r"(to), "+r"(from),
501			  "=r"(tmp1), "=r"(tmp2)
502			:
503			: "memory");
504		}
505		if (orig_n / 4) {
506			orig_n = orig_n % 4;
507
508			__asm__ __volatile__(
509			"	ld.ab   %3, [%2,4]		\n"
510			"16:	st.ab   %3, [%1,4]		\n"
511			"	sub     %0, %0, 4		\n"
512			"32:;nop				\n"
513			"	.section .fixup, \"ax\"		\n"
514			"	.align 4			\n"
515			"4:	j   32b				\n"
516			"	.previous			\n"
517			"	.section __ex_table, \"a\"	\n"
518			"	.align 4			\n"
519			"	.word   16b,4b			\n"
520			"	.previous			\n"
521			: "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
522			:
523			: "memory");
524		}
525		if (orig_n / 2) {
526			orig_n = orig_n % 2;
527
528			__asm__ __volatile__(
529			"	ldw.ab    %3, [%2,2]		\n"
530			"17:	stw.ab    %3, [%1,2]		\n"
531			"	sub       %0, %0, 2		\n"
532			"33:;nop				\n"
533			"	.section .fixup, \"ax\"		\n"
534			"	.align 4			\n"
535			"4:	j   33b				\n"
536			"	.previous			\n"
537			"	.section __ex_table, \"a\"	\n"
538			"	.align 4			\n"
539			"	.word   17b,4b			\n"
540			"	.previous			\n"
541			: "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
542			:
543			: "memory");
544		}
545		if (orig_n & 1) {
546			__asm__ __volatile__(
547			"	ldb.ab  %3, [%2,1]		\n"
548			"18:	stb.ab  %3, [%1,1]		\n"
549			"	sub     %0, %0, 1		\n"
550			"34:	;nop				\n"
551			"	.section .fixup, \"ax\"		\n"
552			"	.align 4			\n"
553			"4:	j   34b				\n"
554			"	.previous			\n"
555			"	.section __ex_table, \"a\"	\n"
556			"	.align 4			\n"
557			"	.word   18b,4b			\n"
558			"	.previous			\n"
559			: "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
560			:
561			: "memory");
562		}
563	} else {  /* n is NOT constant, so laddered copy of 16x,8,4,2,1  */
564
565		__asm__ __volatile__(
566		"	mov   %0,%3			\n"
567		"	lsr.f lp_count, %3,4		\n"  /* 16x bytes */
568		"	lpnz  3f			\n"
569		"	ld.ab %5, [%2, 4]		\n"
570		"	ld.ab %6, [%2, 4]		\n"
571		"	ld.ab %7, [%2, 4]		\n"
572		"	ld.ab %8, [%2, 4]		\n"
573		"1:	st.ab %5, [%1, 4]		\n"
574		"11:	st.ab %6, [%1, 4]		\n"
575		"12:	st.ab %7, [%1, 4]		\n"
576		"13:	st.ab %8, [%1, 4]		\n"
577		"	sub   %0, %0, 16		\n"
578		"3:	and.f %3,%3,0xf			\n" /* stragglers */
579		"	bz 34f				\n"
580		"	bbit0   %3,3,31f		\n" /* 8 bytes left */
581		"	ld.ab   %5, [%2,4]		\n"
582		"	ld.ab   %6, [%2,4]		\n"
583		"14:	st.ab   %5, [%1,4]		\n"
584		"15:	st.ab   %6, [%1,4]		\n"
585		"	sub.f   %0, %0, 8		\n"
586		"31:	bbit0   %3,2,32f		\n"  /* 4 bytes left */
587		"	ld.ab   %5, [%2,4]		\n"
588		"16:	st.ab   %5, [%1,4]		\n"
589		"	sub.f   %0, %0, 4		\n"
590		"32:	bbit0 %3,1,33f			\n"  /* 2 bytes left */
591		"	ldw.ab    %5, [%2,2]		\n"
592		"17:	stw.ab    %5, [%1,2]		\n"
593		"	sub.f %0, %0, 2			\n"
594		"33:	bbit0 %3,0,34f			\n"
595		"	ldb.ab    %5, [%2,1]		\n"  /* 1 byte left */
596		"18:	stb.ab  %5, [%1,1]		\n"
597		"	sub.f %0, %0, 1			\n"
598		"34:	;nop				\n"
599		"	.section .fixup, \"ax\"		\n"
600		"	.align 4			\n"
601		"4:	j   34b				\n"
602		"	.previous			\n"
603		"	.section __ex_table, \"a\"	\n"
604		"	.align 4			\n"
605		"	.word   1b, 4b			\n"
606		"	.word   11b,4b			\n"
607		"	.word   12b,4b			\n"
608		"	.word   13b,4b			\n"
609		"	.word   14b,4b			\n"
610		"	.word   15b,4b			\n"
611		"	.word   16b,4b			\n"
612		"	.word   17b,4b			\n"
613		"	.word   18b,4b			\n"
614		"	.previous			\n"
615		: "=r" (res), "+r"(to), "+r"(from), "+r"(n), "=r"(val),
616		  "=r"(tmp1), "=r"(tmp2), "=r"(tmp3), "=r"(tmp4)
617		:
618		: "lp_count", "memory");
619	}
620
621	return res;
622}
623
624static inline unsigned long __arc_clear_user(void __user *to, unsigned long n)
625{
626	long res = n;
627	unsigned char *d_char = to;
628
629	__asm__ __volatile__(
630	"	bbit0   %0, 0, 1f		\n"
631	"75:	stb.ab  %2, [%0,1]		\n"
632	"	sub %1, %1, 1			\n"
633	"1:	bbit0   %0, 1, 2f		\n"
634	"76:	stw.ab  %2, [%0,2]		\n"
635	"	sub %1, %1, 2			\n"
636	"2:	asr.f   lp_count, %1, 2		\n"
637	"	lpnz    3f			\n"
638	"77:	st.ab   %2, [%0,4]		\n"
639	"	sub %1, %1, 4			\n"
640	"3:	bbit0   %1, 1, 4f		\n"
641	"78:	stw.ab  %2, [%0,2]		\n"
642	"	sub %1, %1, 2			\n"
643	"4:	bbit0   %1, 0, 5f		\n"
644	"79:	stb.ab  %2, [%0,1]		\n"
645	"	sub %1, %1, 1			\n"
646	"5:					\n"
647	"	.section .fixup, \"ax\"		\n"
648	"	.align 4			\n"
649	"3:	j   5b				\n"
650	"	.previous			\n"
651	"	.section __ex_table, \"a\"	\n"
652	"	.align 4			\n"
653	"	.word   75b, 3b			\n"
654	"	.word   76b, 3b			\n"
655	"	.word   77b, 3b			\n"
656	"	.word   78b, 3b			\n"
657	"	.word   79b, 3b			\n"
658	"	.previous			\n"
659	: "+r"(d_char), "+r"(res)
660	: "i"(0)
661	: "lp_count", "lp_start", "lp_end", "memory");
662
663	return res;
664}
665
666static inline long
667__arc_strncpy_from_user(char *dst, const char __user *src, long count)
668{
669	long res = 0;
670	char val;
671
672	if (count == 0)
673		return 0;
674
675	__asm__ __volatile__(
676	"	lp	3f			\n"
677	"1:	ldb.ab  %3, [%2, 1]		\n"
678	"	breq.d	%3, 0, 3f               \n"
679	"	stb.ab  %3, [%1, 1]		\n"
680	"	add	%0, %0, 1	# Num of NON NULL bytes copied	\n"
681	"3:								\n"
682	"	.section .fixup, \"ax\"		\n"
683	"	.align 4			\n"
684	"4:	mov %0, %4		# sets @res as -EFAULT	\n"
685	"	j   3b				\n"
686	"	.previous			\n"
687	"	.section __ex_table, \"a\"	\n"
688	"	.align 4			\n"
689	"	.word   1b, 4b			\n"
690	"	.previous			\n"
691	: "+r"(res), "+r"(dst), "+r"(src), "=r"(val)
692	: "g"(-EFAULT), "l"(count)
693	: "memory");
694
695	return res;
696}
697
698static inline long __arc_strnlen_user(const char __user *s, long n)
699{
700	long res, tmp1, cnt;
701	char val;
702
703	__asm__ __volatile__(
704	"	mov %2, %1			\n"
705	"1:	ldb.ab  %3, [%0, 1]		\n"
706	"	breq.d  %3, 0, 2f		\n"
707	"	sub.f   %2, %2, 1		\n"
708	"	bnz 1b				\n"
709	"	sub %2, %2, 1			\n"
710	"2:	sub %0, %1, %2			\n"
711	"3:	;nop				\n"
712	"	.section .fixup, \"ax\"		\n"
713	"	.align 4			\n"
714	"4:	mov %0, 0			\n"
715	"	j   3b				\n"
716	"	.previous			\n"
717	"	.section __ex_table, \"a\"	\n"
718	"	.align 4			\n"
719	"	.word 1b, 4b			\n"
720	"	.previous			\n"
721	: "=r"(res), "=r"(tmp1), "=r"(cnt), "=r"(val)
722	: "0"(s), "1"(n)
723	: "memory");
724
725	return res;
726}
727
728#ifndef CONFIG_CC_OPTIMIZE_FOR_SIZE
729#define __copy_from_user(t, f, n)	__arc_copy_from_user(t, f, n)
730#define __copy_to_user(t, f, n)		__arc_copy_to_user(t, f, n)
731#define __clear_user(d, n)		__arc_clear_user(d, n)
732#define __strncpy_from_user(d, s, n)	__arc_strncpy_from_user(d, s, n)
733#define __strnlen_user(s, n)		__arc_strnlen_user(s, n)
734#else
735extern long arc_copy_from_user_noinline(void *to, const void __user * from,
736		unsigned long n);
737extern long arc_copy_to_user_noinline(void __user *to, const void *from,
738		unsigned long n);
739extern unsigned long arc_clear_user_noinline(void __user *to,
740		unsigned long n);
741extern long arc_strncpy_from_user_noinline (char *dst, const char __user *src,
742		long count);
743extern long arc_strnlen_user_noinline(const char __user *src, long n);
744
745#define __copy_from_user(t, f, n)	arc_copy_from_user_noinline(t, f, n)
746#define __copy_to_user(t, f, n)		arc_copy_to_user_noinline(t, f, n)
747#define __clear_user(d, n)		arc_clear_user_noinline(d, n)
748#define __strncpy_from_user(d, s, n)	arc_strncpy_from_user_noinline(d, s, n)
749#define __strnlen_user(s, n)		arc_strnlen_user_noinline(s, n)
750
751#endif
752
753#include <asm-generic/uaccess.h>
754
755extern int fixup_exception(struct pt_regs *regs);
756
757#endif