Linux Audio

Check our new training course

Loading...
v3.1
  1/*
  2 * Handle unaligned accesses by emulation.
  3 *
  4 * This file is subject to the terms and conditions of the GNU General Public
  5 * License.  See the file "COPYING" in the main directory of this archive
  6 * for more details.
  7 *
  8 * Copyright (C) 1996, 1998, 1999, 2002 by Ralf Baechle
  9 * Copyright (C) 1999 Silicon Graphics, Inc.
 
 10 *
 11 * This file contains exception handler for address error exception with the
 12 * special capability to execute faulting instructions in software.  The
 13 * handler does not try to handle the case when the program counter points
 14 * to an address not aligned to a word boundary.
 15 *
 16 * Putting data to unaligned addresses is a bad practice even on Intel where
 17 * only the performance is affected.  Much worse is that such code is non-
 18 * portable.  Due to several programs that die on MIPS due to alignment
 19 * problems I decided to implement this handler anyway though I originally
 20 * didn't intend to do this at all for user code.
 21 *
 22 * For now I enable fixing of address errors by default to make life easier.
 23 * I however intend to disable this somewhen in the future when the alignment
 24 * problems with user programs have been fixed.  For programmers this is the
 25 * right way to go.
 26 *
 27 * Fixing address errors is a per process option.  The option is inherited
 28 * across fork(2) and execve(2) calls.  If you really want to use the
 29 * option in your user programs - I discourage the use of the software
 30 * emulation strongly - use the following code in your userland stuff:
 31 *
 32 * #include <sys/sysmips.h>
 33 *
 34 * ...
 35 * sysmips(MIPS_FIXADE, x);
 36 * ...
 37 *
 38 * The argument x is 0 for disabling software emulation, enabled otherwise.
 39 *
 40 * Below a little program to play around with this feature.
 41 *
 42 * #include <stdio.h>
 43 * #include <sys/sysmips.h>
 44 *
 45 * struct foo {
 46 *         unsigned char bar[8];
 47 * };
 48 *
 49 * main(int argc, char *argv[])
 50 * {
 51 *         struct foo x = {0, 1, 2, 3, 4, 5, 6, 7};
 52 *         unsigned int *p = (unsigned int *) (x.bar + 3);
 53 *         int i;
 54 *
 55 *         if (argc > 1)
 56 *                 sysmips(MIPS_FIXADE, atoi(argv[1]));
 57 *
 58 *         printf("*p = %08lx\n", *p);
 59 *
 60 *         *p = 0xdeadface;
 61 *
 62 *         for(i = 0; i <= 7; i++)
 63 *         printf("%02x ", x.bar[i]);
 64 *         printf("\n");
 65 * }
 66 *
 67 * Coprocessor loads are not supported; I think this case is unimportant
 68 * in the practice.
 69 *
 70 * TODO: Handle ndc (attempted store to doubleword in uncached memory)
 71 *       exception for the R6000.
 72 *       A store crossing a page boundary might be executed only partially.
 73 *       Undo the partial store in this case.
 74 */
 
 75#include <linux/mm.h>
 76#include <linux/module.h>
 77#include <linux/signal.h>
 78#include <linux/smp.h>
 79#include <linux/sched.h>
 80#include <linux/debugfs.h>
 81#include <linux/perf_event.h>
 82
 83#include <asm/asm.h>
 84#include <asm/branch.h>
 85#include <asm/byteorder.h>
 86#include <asm/cop2.h>
 
 
 
 87#include <asm/inst.h>
 88#include <asm/uaccess.h>
 89#include <asm/system.h>
 90
 91#define STR(x)  __STR(x)
 92#define __STR(x)  #x
 93
 94enum {
 95	UNALIGNED_ACTION_QUIET,
 96	UNALIGNED_ACTION_SIGNAL,
 97	UNALIGNED_ACTION_SHOW,
 98};
 99#ifdef CONFIG_DEBUG_FS
100static u32 unaligned_instructions;
101static u32 unaligned_action;
102#else
103#define unaligned_action UNALIGNED_ACTION_QUIET
104#endif
105extern void show_registers(struct pt_regs *regs);
106
107static void emulate_load_store_insn(struct pt_regs *regs,
108	void __user *addr, unsigned int __user *pc)
109{
 
110	union mips_instruction insn;
111	unsigned long value;
112	unsigned int res;
 
 
 
 
 
113
114	perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
115
116	/*
117	 * This load never faults.
118	 */
119	__get_user(insn.word, pc);
120
121	switch (insn.i_format.opcode) {
122	/*
123	 * These are instructions that a compiler doesn't generate.  We
124	 * can assume therefore that the code is MIPS-aware and
125	 * really buggy.  Emulating these instructions would break the
126	 * semantics anyway.
127	 */
128	case ll_op:
129	case lld_op:
130	case sc_op:
131	case scd_op:
132
133	/*
134	 * For these instructions the only way to create an address
135	 * error is an attempted access to kernel/supervisor address
136	 * space.
137	 */
138	case ldl_op:
139	case ldr_op:
140	case lwl_op:
141	case lwr_op:
142	case sdl_op:
143	case sdr_op:
144	case swl_op:
145	case swr_op:
146	case lb_op:
147	case lbu_op:
148	case sb_op:
149		goto sigbus;
150
151	/*
152	 * The remaining opcodes are the ones that are really of interest.
153	 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
154	case lh_op:
155		if (!access_ok(VERIFY_READ, addr, 2))
156			goto sigbus;
157
158		__asm__ __volatile__ (".set\tnoat\n"
159#ifdef __BIG_ENDIAN
160			"1:\tlb\t%0, 0(%2)\n"
161			"2:\tlbu\t$1, 1(%2)\n\t"
162#endif
163#ifdef __LITTLE_ENDIAN
164			"1:\tlb\t%0, 1(%2)\n"
165			"2:\tlbu\t$1, 0(%2)\n\t"
166#endif
167			"sll\t%0, 0x8\n\t"
168			"or\t%0, $1\n\t"
169			"li\t%1, 0\n"
170			"3:\t.set\tat\n\t"
171			".section\t.fixup,\"ax\"\n\t"
172			"4:\tli\t%1, %3\n\t"
173			"j\t3b\n\t"
174			".previous\n\t"
175			".section\t__ex_table,\"a\"\n\t"
176			STR(PTR)"\t1b, 4b\n\t"
177			STR(PTR)"\t2b, 4b\n\t"
178			".previous"
179			: "=&r" (value), "=r" (res)
180			: "r" (addr), "i" (-EFAULT));
181		if (res)
182			goto fault;
183		compute_return_epc(regs);
184		regs->regs[insn.i_format.rt] = value;
185		break;
186
187	case lw_op:
188		if (!access_ok(VERIFY_READ, addr, 4))
189			goto sigbus;
190
191		__asm__ __volatile__ (
192#ifdef __BIG_ENDIAN
193			"1:\tlwl\t%0, (%2)\n"
194			"2:\tlwr\t%0, 3(%2)\n\t"
195#endif
196#ifdef __LITTLE_ENDIAN
197			"1:\tlwl\t%0, 3(%2)\n"
198			"2:\tlwr\t%0, (%2)\n\t"
199#endif
200			"li\t%1, 0\n"
201			"3:\t.section\t.fixup,\"ax\"\n\t"
202			"4:\tli\t%1, %3\n\t"
203			"j\t3b\n\t"
204			".previous\n\t"
205			".section\t__ex_table,\"a\"\n\t"
206			STR(PTR)"\t1b, 4b\n\t"
207			STR(PTR)"\t2b, 4b\n\t"
208			".previous"
209			: "=&r" (value), "=r" (res)
210			: "r" (addr), "i" (-EFAULT));
211		if (res)
212			goto fault;
213		compute_return_epc(regs);
214		regs->regs[insn.i_format.rt] = value;
215		break;
216
217	case lhu_op:
218		if (!access_ok(VERIFY_READ, addr, 2))
219			goto sigbus;
220
221		__asm__ __volatile__ (
222			".set\tnoat\n"
223#ifdef __BIG_ENDIAN
224			"1:\tlbu\t%0, 0(%2)\n"
225			"2:\tlbu\t$1, 1(%2)\n\t"
226#endif
227#ifdef __LITTLE_ENDIAN
228			"1:\tlbu\t%0, 1(%2)\n"
229			"2:\tlbu\t$1, 0(%2)\n\t"
230#endif
231			"sll\t%0, 0x8\n\t"
232			"or\t%0, $1\n\t"
233			"li\t%1, 0\n"
234			"3:\t.set\tat\n\t"
235			".section\t.fixup,\"ax\"\n\t"
236			"4:\tli\t%1, %3\n\t"
237			"j\t3b\n\t"
238			".previous\n\t"
239			".section\t__ex_table,\"a\"\n\t"
240			STR(PTR)"\t1b, 4b\n\t"
241			STR(PTR)"\t2b, 4b\n\t"
242			".previous"
243			: "=&r" (value), "=r" (res)
244			: "r" (addr), "i" (-EFAULT));
245		if (res)
246			goto fault;
247		compute_return_epc(regs);
248		regs->regs[insn.i_format.rt] = value;
249		break;
250
251	case lwu_op:
252#ifdef CONFIG_64BIT
253		/*
254		 * A 32-bit kernel might be running on a 64-bit processor.  But
255		 * if we're on a 32-bit processor and an i-cache incoherency
256		 * or race makes us see a 64-bit instruction here the sdl/sdr
257		 * would blow up, so for now we don't handle unaligned 64-bit
258		 * instructions on 32-bit kernels.
259		 */
260		if (!access_ok(VERIFY_READ, addr, 4))
261			goto sigbus;
262
263		__asm__ __volatile__ (
264#ifdef __BIG_ENDIAN
265			"1:\tlwl\t%0, (%2)\n"
266			"2:\tlwr\t%0, 3(%2)\n\t"
267#endif
268#ifdef __LITTLE_ENDIAN
269			"1:\tlwl\t%0, 3(%2)\n"
270			"2:\tlwr\t%0, (%2)\n\t"
271#endif
272			"dsll\t%0, %0, 32\n\t"
273			"dsrl\t%0, %0, 32\n\t"
274			"li\t%1, 0\n"
275			"3:\t.section\t.fixup,\"ax\"\n\t"
276			"4:\tli\t%1, %3\n\t"
277			"j\t3b\n\t"
278			".previous\n\t"
279			".section\t__ex_table,\"a\"\n\t"
280			STR(PTR)"\t1b, 4b\n\t"
281			STR(PTR)"\t2b, 4b\n\t"
282			".previous"
283			: "=&r" (value), "=r" (res)
284			: "r" (addr), "i" (-EFAULT));
285		if (res)
286			goto fault;
287		compute_return_epc(regs);
288		regs->regs[insn.i_format.rt] = value;
289		break;
290#endif /* CONFIG_64BIT */
291
292		/* Cannot handle 64-bit instructions in 32-bit kernel */
293		goto sigill;
294
295	case ld_op:
296#ifdef CONFIG_64BIT
297		/*
298		 * A 32-bit kernel might be running on a 64-bit processor.  But
299		 * if we're on a 32-bit processor and an i-cache incoherency
300		 * or race makes us see a 64-bit instruction here the sdl/sdr
301		 * would blow up, so for now we don't handle unaligned 64-bit
302		 * instructions on 32-bit kernels.
303		 */
304		if (!access_ok(VERIFY_READ, addr, 8))
305			goto sigbus;
306
307		__asm__ __volatile__ (
308#ifdef __BIG_ENDIAN
309			"1:\tldl\t%0, (%2)\n"
310			"2:\tldr\t%0, 7(%2)\n\t"
311#endif
312#ifdef __LITTLE_ENDIAN
313			"1:\tldl\t%0, 7(%2)\n"
314			"2:\tldr\t%0, (%2)\n\t"
315#endif
316			"li\t%1, 0\n"
317			"3:\t.section\t.fixup,\"ax\"\n\t"
318			"4:\tli\t%1, %3\n\t"
319			"j\t3b\n\t"
320			".previous\n\t"
321			".section\t__ex_table,\"a\"\n\t"
322			STR(PTR)"\t1b, 4b\n\t"
323			STR(PTR)"\t2b, 4b\n\t"
324			".previous"
325			: "=&r" (value), "=r" (res)
326			: "r" (addr), "i" (-EFAULT));
327		if (res)
328			goto fault;
329		compute_return_epc(regs);
330		regs->regs[insn.i_format.rt] = value;
331		break;
332#endif /* CONFIG_64BIT */
333
334		/* Cannot handle 64-bit instructions in 32-bit kernel */
335		goto sigill;
336
337	case sh_op:
338		if (!access_ok(VERIFY_WRITE, addr, 2))
339			goto sigbus;
340
 
341		value = regs->regs[insn.i_format.rt];
342		__asm__ __volatile__ (
343#ifdef __BIG_ENDIAN
344			".set\tnoat\n"
345			"1:\tsb\t%1, 1(%2)\n\t"
346			"srl\t$1, %1, 0x8\n"
347			"2:\tsb\t$1, 0(%2)\n\t"
348			".set\tat\n\t"
349#endif
350#ifdef __LITTLE_ENDIAN
351			".set\tnoat\n"
352			"1:\tsb\t%1, 0(%2)\n\t"
353			"srl\t$1,%1, 0x8\n"
354			"2:\tsb\t$1, 1(%2)\n\t"
355			".set\tat\n\t"
356#endif
357			"li\t%0, 0\n"
358			"3:\n\t"
359			".section\t.fixup,\"ax\"\n\t"
360			"4:\tli\t%0, %3\n\t"
361			"j\t3b\n\t"
362			".previous\n\t"
363			".section\t__ex_table,\"a\"\n\t"
364			STR(PTR)"\t1b, 4b\n\t"
365			STR(PTR)"\t2b, 4b\n\t"
366			".previous"
367			: "=r" (res)
368			: "r" (value), "r" (addr), "i" (-EFAULT));
369		if (res)
370			goto fault;
371		compute_return_epc(regs);
372		break;
373
374	case sw_op:
375		if (!access_ok(VERIFY_WRITE, addr, 4))
376			goto sigbus;
377
 
378		value = regs->regs[insn.i_format.rt];
379		__asm__ __volatile__ (
380#ifdef __BIG_ENDIAN
381			"1:\tswl\t%1,(%2)\n"
382			"2:\tswr\t%1, 3(%2)\n\t"
383#endif
384#ifdef __LITTLE_ENDIAN
385			"1:\tswl\t%1, 3(%2)\n"
386			"2:\tswr\t%1, (%2)\n\t"
387#endif
388			"li\t%0, 0\n"
389			"3:\n\t"
390			".section\t.fixup,\"ax\"\n\t"
391			"4:\tli\t%0, %3\n\t"
392			"j\t3b\n\t"
393			".previous\n\t"
394			".section\t__ex_table,\"a\"\n\t"
395			STR(PTR)"\t1b, 4b\n\t"
396			STR(PTR)"\t2b, 4b\n\t"
397			".previous"
398		: "=r" (res)
399		: "r" (value), "r" (addr), "i" (-EFAULT));
400		if (res)
401			goto fault;
402		compute_return_epc(regs);
403		break;
404
405	case sd_op:
406#ifdef CONFIG_64BIT
407		/*
408		 * A 32-bit kernel might be running on a 64-bit processor.  But
409		 * if we're on a 32-bit processor and an i-cache incoherency
410		 * or race makes us see a 64-bit instruction here the sdl/sdr
411		 * would blow up, so for now we don't handle unaligned 64-bit
412		 * instructions on 32-bit kernels.
413		 */
414		if (!access_ok(VERIFY_WRITE, addr, 8))
415			goto sigbus;
416
 
417		value = regs->regs[insn.i_format.rt];
418		__asm__ __volatile__ (
419#ifdef __BIG_ENDIAN
420			"1:\tsdl\t%1,(%2)\n"
421			"2:\tsdr\t%1, 7(%2)\n\t"
422#endif
423#ifdef __LITTLE_ENDIAN
424			"1:\tsdl\t%1, 7(%2)\n"
425			"2:\tsdr\t%1, (%2)\n\t"
426#endif
427			"li\t%0, 0\n"
428			"3:\n\t"
429			".section\t.fixup,\"ax\"\n\t"
430			"4:\tli\t%0, %3\n\t"
431			"j\t3b\n\t"
432			".previous\n\t"
433			".section\t__ex_table,\"a\"\n\t"
434			STR(PTR)"\t1b, 4b\n\t"
435			STR(PTR)"\t2b, 4b\n\t"
436			".previous"
437		: "=r" (res)
438		: "r" (value), "r" (addr), "i" (-EFAULT));
439		if (res)
440			goto fault;
441		compute_return_epc(regs);
442		break;
443#endif /* CONFIG_64BIT */
444
445		/* Cannot handle 64-bit instructions in 32-bit kernel */
446		goto sigill;
447
 
 
448	case lwc1_op:
449	case ldc1_op:
450	case swc1_op:
451	case sdc1_op:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
452		/*
453		 * I herewith declare: this does not happen.  So send SIGBUS.
 
 
454		 */
455		goto sigbus;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
456
 
457	/*
458	 * COP2 is available to implementor for application specific use.
459	 * It's up to applications to register a notifier chain and do
460	 * whatever they have to do, including possible sending of signals.
 
 
461	 */
462	case lwc2_op:
463		cu2_notifier_call_chain(CU2_LWC2_OP, regs);
464		break;
465
466	case ldc2_op:
467		cu2_notifier_call_chain(CU2_LDC2_OP, regs);
468		break;
469
470	case swc2_op:
471		cu2_notifier_call_chain(CU2_SWC2_OP, regs);
472		break;
473
474	case sdc2_op:
475		cu2_notifier_call_chain(CU2_SDC2_OP, regs);
476		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
477
478	default:
479		/*
480		 * Pheeee...  We encountered an yet unknown instruction or
481		 * cache coherence problem.  Die sucker, die ...
482		 */
483		goto sigill;
484	}
485
486#ifdef CONFIG_DEBUG_FS
487	unaligned_instructions++;
488#endif
489
490	return;
491
492fault:
 
 
 
493	/* Did we have an exception handler installed? */
494	if (fixup_exception(regs))
495		return;
496
497	die_if_kernel("Unhandled kernel unaligned access", regs);
498	force_sig(SIGSEGV, current);
499
500	return;
501
502sigbus:
503	die_if_kernel("Unhandled kernel unaligned access", regs);
504	force_sig(SIGBUS, current);
505
506	return;
507
508sigill:
509	die_if_kernel("Unhandled kernel unaligned access or invalid instruction", regs);
510	force_sig(SIGILL, current);
 
511}
512
513asmlinkage void do_ade(struct pt_regs *regs)
514{
 
515	unsigned int __user *pc;
516	mm_segment_t seg;
517
 
518	perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS,
519			1, regs, regs->cp0_badvaddr);
520	/*
521	 * Did we catch a fault trying to load an instruction?
522	 * Or are we running in MIPS16 mode?
523	 */
524	if ((regs->cp0_badvaddr == regs->cp0_epc) || (regs->cp0_epc & 0x1))
525		goto sigbus;
526
527	pc = (unsigned int __user *) exception_epc(regs);
528	if (user_mode(regs) && !test_thread_flag(TIF_FIXADE))
529		goto sigbus;
530	if (unaligned_action == UNALIGNED_ACTION_SIGNAL)
531		goto sigbus;
532	else if (unaligned_action == UNALIGNED_ACTION_SHOW)
533		show_registers(regs);
534
535	/*
536	 * Do branch emulation only if we didn't forward the exception.
537	 * This is all so but ugly ...
538	 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
539	seg = get_fs();
540	if (!user_mode(regs))
541		set_fs(KERNEL_DS);
542	emulate_load_store_insn(regs, (void __user *)regs->cp0_badvaddr, pc);
543	set_fs(seg);
544
545	return;
546
547sigbus:
548	die_if_kernel("Kernel unaligned instruction access", regs);
549	force_sig(SIGBUS, current);
550
551	/*
552	 * XXX On return from the signal handler we should advance the epc
553	 */
 
554}
555
556#ifdef CONFIG_DEBUG_FS
557extern struct dentry *mips_debugfs_dir;
558static int __init debugfs_unaligned(void)
559{
560	struct dentry *d;
561
562	if (!mips_debugfs_dir)
563		return -ENODEV;
564	d = debugfs_create_u32("unaligned_instructions", S_IRUGO,
565			       mips_debugfs_dir, &unaligned_instructions);
566	if (!d)
567		return -ENOMEM;
568	d = debugfs_create_u32("unaligned_action", S_IRUGO | S_IWUSR,
569			       mips_debugfs_dir, &unaligned_action);
570	if (!d)
571		return -ENOMEM;
572	return 0;
573}
574__initcall(debugfs_unaligned);
575#endif
v5.9
   1/*
   2 * Handle unaligned accesses by emulation.
   3 *
   4 * This file is subject to the terms and conditions of the GNU General Public
   5 * License.  See the file "COPYING" in the main directory of this archive
   6 * for more details.
   7 *
   8 * Copyright (C) 1996, 1998, 1999, 2002 by Ralf Baechle
   9 * Copyright (C) 1999 Silicon Graphics, Inc.
  10 * Copyright (C) 2014 Imagination Technologies Ltd.
  11 *
  12 * This file contains exception handler for address error exception with the
  13 * special capability to execute faulting instructions in software.  The
  14 * handler does not try to handle the case when the program counter points
  15 * to an address not aligned to a word boundary.
  16 *
  17 * Putting data to unaligned addresses is a bad practice even on Intel where
  18 * only the performance is affected.  Much worse is that such code is non-
  19 * portable.  Due to several programs that die on MIPS due to alignment
  20 * problems I decided to implement this handler anyway though I originally
  21 * didn't intend to do this at all for user code.
  22 *
  23 * For now I enable fixing of address errors by default to make life easier.
  24 * I however intend to disable this somewhen in the future when the alignment
  25 * problems with user programs have been fixed.	 For programmers this is the
  26 * right way to go.
  27 *
  28 * Fixing address errors is a per process option.  The option is inherited
  29 * across fork(2) and execve(2) calls.	If you really want to use the
  30 * option in your user programs - I discourage the use of the software
  31 * emulation strongly - use the following code in your userland stuff:
  32 *
  33 * #include <sys/sysmips.h>
  34 *
  35 * ...
  36 * sysmips(MIPS_FIXADE, x);
  37 * ...
  38 *
  39 * The argument x is 0 for disabling software emulation, enabled otherwise.
  40 *
  41 * Below a little program to play around with this feature.
  42 *
  43 * #include <stdio.h>
  44 * #include <sys/sysmips.h>
  45 *
  46 * struct foo {
  47 *	   unsigned char bar[8];
  48 * };
  49 *
  50 * main(int argc, char *argv[])
  51 * {
  52 *	   struct foo x = {0, 1, 2, 3, 4, 5, 6, 7};
  53 *	   unsigned int *p = (unsigned int *) (x.bar + 3);
  54 *	   int i;
  55 *
  56 *	   if (argc > 1)
  57 *		   sysmips(MIPS_FIXADE, atoi(argv[1]));
  58 *
  59 *	   printf("*p = %08lx\n", *p);
  60 *
  61 *	   *p = 0xdeadface;
  62 *
  63 *	   for(i = 0; i <= 7; i++)
  64 *	   printf("%02x ", x.bar[i]);
  65 *	   printf("\n");
  66 * }
  67 *
  68 * Coprocessor loads are not supported; I think this case is unimportant
  69 * in the practice.
  70 *
  71 * TODO: Handle ndc (attempted store to doubleword in uncached memory)
  72 *	 exception for the R6000.
  73 *	 A store crossing a page boundary might be executed only partially.
  74 *	 Undo the partial store in this case.
  75 */
  76#include <linux/context_tracking.h>
  77#include <linux/mm.h>
 
  78#include <linux/signal.h>
  79#include <linux/smp.h>
  80#include <linux/sched.h>
  81#include <linux/debugfs.h>
  82#include <linux/perf_event.h>
  83
  84#include <asm/asm.h>
  85#include <asm/branch.h>
  86#include <asm/byteorder.h>
  87#include <asm/cop2.h>
  88#include <asm/debug.h>
  89#include <asm/fpu.h>
  90#include <asm/fpu_emulator.h>
  91#include <asm/inst.h>
  92#include <asm/unaligned-emul.h>
  93#include <asm/mmu_context.h>
  94#include <linux/uaccess.h>
 
 
  95
  96enum {
  97	UNALIGNED_ACTION_QUIET,
  98	UNALIGNED_ACTION_SIGNAL,
  99	UNALIGNED_ACTION_SHOW,
 100};
 101#ifdef CONFIG_DEBUG_FS
 102static u32 unaligned_instructions;
 103static u32 unaligned_action;
 104#else
 105#define unaligned_action UNALIGNED_ACTION_QUIET
 106#endif
 107extern void show_registers(struct pt_regs *regs);
 108
 109static void emulate_load_store_insn(struct pt_regs *regs,
 110	void __user *addr, unsigned int __user *pc)
 111{
 112	unsigned long origpc, orig31, value;
 113	union mips_instruction insn;
 
 114	unsigned int res;
 115#ifdef	CONFIG_EVA
 116	mm_segment_t seg;
 117#endif
 118	origpc = (unsigned long)pc;
 119	orig31 = regs->regs[31];
 120
 121	perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
 122
 123	/*
 124	 * This load never faults.
 125	 */
 126	__get_user(insn.word, pc);
 127
 128	switch (insn.i_format.opcode) {
 129		/*
 130		 * These are instructions that a compiler doesn't generate.  We
 131		 * can assume therefore that the code is MIPS-aware and
 132		 * really buggy.  Emulating these instructions would break the
 133		 * semantics anyway.
 134		 */
 135	case ll_op:
 136	case lld_op:
 137	case sc_op:
 138	case scd_op:
 139
 140		/*
 141		 * For these instructions the only way to create an address
 142		 * error is an attempted access to kernel/supervisor address
 143		 * space.
 144		 */
 145	case ldl_op:
 146	case ldr_op:
 147	case lwl_op:
 148	case lwr_op:
 149	case sdl_op:
 150	case sdr_op:
 151	case swl_op:
 152	case swr_op:
 153	case lb_op:
 154	case lbu_op:
 155	case sb_op:
 156		goto sigbus;
 157
 158		/*
 159		 * The remaining opcodes are the ones that are really of
 160		 * interest.
 161		 */
 162	case spec3_op:
 163		if (insn.dsp_format.func == lx_op) {
 164			switch (insn.dsp_format.op) {
 165			case lwx_op:
 166				if (!access_ok(addr, 4))
 167					goto sigbus;
 168				LoadW(addr, value, res);
 169				if (res)
 170					goto fault;
 171				compute_return_epc(regs);
 172				regs->regs[insn.dsp_format.rd] = value;
 173				break;
 174			case lhx_op:
 175				if (!access_ok(addr, 2))
 176					goto sigbus;
 177				LoadHW(addr, value, res);
 178				if (res)
 179					goto fault;
 180				compute_return_epc(regs);
 181				regs->regs[insn.dsp_format.rd] = value;
 182				break;
 183			default:
 184				goto sigill;
 185			}
 186		}
 187#ifdef CONFIG_EVA
 188		else {
 189			/*
 190			 * we can land here only from kernel accessing user
 191			 * memory, so we need to "switch" the address limit to
 192			 * user space, so that address check can work properly.
 193			 */
 194			seg = force_uaccess_begin();
 195			switch (insn.spec3_format.func) {
 196			case lhe_op:
 197				if (!access_ok(addr, 2)) {
 198					force_uaccess_end(seg);
 199					goto sigbus;
 200				}
 201				LoadHWE(addr, value, res);
 202				if (res) {
 203					force_uaccess_end(seg);
 204					goto fault;
 205				}
 206				compute_return_epc(regs);
 207				regs->regs[insn.spec3_format.rt] = value;
 208				break;
 209			case lwe_op:
 210				if (!access_ok(addr, 4)) {
 211					force_uaccess_end(seg);
 212					goto sigbus;
 213				}
 214				LoadWE(addr, value, res);
 215				if (res) {
 216					force_uaccess_end(seg);
 217					goto fault;
 218				}
 219				compute_return_epc(regs);
 220				regs->regs[insn.spec3_format.rt] = value;
 221				break;
 222			case lhue_op:
 223				if (!access_ok(addr, 2)) {
 224					force_uaccess_end(seg);
 225					goto sigbus;
 226				}
 227				LoadHWUE(addr, value, res);
 228				if (res) {
 229					force_uaccess_end(seg);
 230					goto fault;
 231				}
 232				compute_return_epc(regs);
 233				regs->regs[insn.spec3_format.rt] = value;
 234				break;
 235			case she_op:
 236				if (!access_ok(addr, 2)) {
 237					force_uaccess_end(seg);
 238					goto sigbus;
 239				}
 240				compute_return_epc(regs);
 241				value = regs->regs[insn.spec3_format.rt];
 242				StoreHWE(addr, value, res);
 243				if (res) {
 244					force_uaccess_end(seg);
 245					goto fault;
 246				}
 247				break;
 248			case swe_op:
 249				if (!access_ok(addr, 4)) {
 250					force_uaccess_end(seg);
 251					goto sigbus;
 252				}
 253				compute_return_epc(regs);
 254				value = regs->regs[insn.spec3_format.rt];
 255				StoreWE(addr, value, res);
 256				if (res) {
 257					force_uaccess_end(seg);
 258					goto fault;
 259				}
 260				break;
 261			default:
 262				force_uaccess_end(seg);
 263				goto sigill;
 264			}
 265			force_uaccess_end(seg);
 266		}
 267#endif
 268		break;
 269	case lh_op:
 270		if (!access_ok(addr, 2))
 271			goto sigbus;
 272
 273		if (IS_ENABLED(CONFIG_EVA)) {
 274			if (uaccess_kernel())
 275				LoadHW(addr, value, res);
 276			else
 277				LoadHWE(addr, value, res);
 278		} else {
 279			LoadHW(addr, value, res);
 280		}
 281
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 282		if (res)
 283			goto fault;
 284		compute_return_epc(regs);
 285		regs->regs[insn.i_format.rt] = value;
 286		break;
 287
 288	case lw_op:
 289		if (!access_ok(addr, 4))
 290			goto sigbus;
 291
 292		if (IS_ENABLED(CONFIG_EVA)) {
 293			if (uaccess_kernel())
 294				LoadW(addr, value, res);
 295			else
 296				LoadWE(addr, value, res);
 297		} else {
 298			LoadW(addr, value, res);
 299		}
 300
 
 
 
 
 
 
 
 
 
 
 
 301		if (res)
 302			goto fault;
 303		compute_return_epc(regs);
 304		regs->regs[insn.i_format.rt] = value;
 305		break;
 306
 307	case lhu_op:
 308		if (!access_ok(addr, 2))
 309			goto sigbus;
 310
 311		if (IS_ENABLED(CONFIG_EVA)) {
 312			if (uaccess_kernel())
 313				LoadHWU(addr, value, res);
 314			else
 315				LoadHWUE(addr, value, res);
 316		} else {
 317			LoadHWU(addr, value, res);
 318		}
 319
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 320		if (res)
 321			goto fault;
 322		compute_return_epc(regs);
 323		regs->regs[insn.i_format.rt] = value;
 324		break;
 325
 326	case lwu_op:
 327#ifdef CONFIG_64BIT
 328		/*
 329		 * A 32-bit kernel might be running on a 64-bit processor.  But
 330		 * if we're on a 32-bit processor and an i-cache incoherency
 331		 * or race makes us see a 64-bit instruction here the sdl/sdr
 332		 * would blow up, so for now we don't handle unaligned 64-bit
 333		 * instructions on 32-bit kernels.
 334		 */
 335		if (!access_ok(addr, 4))
 336			goto sigbus;
 337
 338		LoadWU(addr, value, res);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 339		if (res)
 340			goto fault;
 341		compute_return_epc(regs);
 342		regs->regs[insn.i_format.rt] = value;
 343		break;
 344#endif /* CONFIG_64BIT */
 345
 346		/* Cannot handle 64-bit instructions in 32-bit kernel */
 347		goto sigill;
 348
 349	case ld_op:
 350#ifdef CONFIG_64BIT
 351		/*
 352		 * A 32-bit kernel might be running on a 64-bit processor.  But
 353		 * if we're on a 32-bit processor and an i-cache incoherency
 354		 * or race makes us see a 64-bit instruction here the sdl/sdr
 355		 * would blow up, so for now we don't handle unaligned 64-bit
 356		 * instructions on 32-bit kernels.
 357		 */
 358		if (!access_ok(addr, 8))
 359			goto sigbus;
 360
 361		LoadDW(addr, value, res);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 362		if (res)
 363			goto fault;
 364		compute_return_epc(regs);
 365		regs->regs[insn.i_format.rt] = value;
 366		break;
 367#endif /* CONFIG_64BIT */
 368
 369		/* Cannot handle 64-bit instructions in 32-bit kernel */
 370		goto sigill;
 371
 372	case sh_op:
 373		if (!access_ok(addr, 2))
 374			goto sigbus;
 375
 376		compute_return_epc(regs);
 377		value = regs->regs[insn.i_format.rt];
 378
 379		if (IS_ENABLED(CONFIG_EVA)) {
 380			if (uaccess_kernel())
 381				StoreHW(addr, value, res);
 382			else
 383				StoreHWE(addr, value, res);
 384		} else {
 385			StoreHW(addr, value, res);
 386		}
 387
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 388		if (res)
 389			goto fault;
 
 390		break;
 391
 392	case sw_op:
 393		if (!access_ok(addr, 4))
 394			goto sigbus;
 395
 396		compute_return_epc(regs);
 397		value = regs->regs[insn.i_format.rt];
 398
 399		if (IS_ENABLED(CONFIG_EVA)) {
 400			if (uaccess_kernel())
 401				StoreW(addr, value, res);
 402			else
 403				StoreWE(addr, value, res);
 404		} else {
 405			StoreW(addr, value, res);
 406		}
 407
 
 
 
 
 
 
 
 
 
 
 
 408		if (res)
 409			goto fault;
 
 410		break;
 411
 412	case sd_op:
 413#ifdef CONFIG_64BIT
 414		/*
 415		 * A 32-bit kernel might be running on a 64-bit processor.  But
 416		 * if we're on a 32-bit processor and an i-cache incoherency
 417		 * or race makes us see a 64-bit instruction here the sdl/sdr
 418		 * would blow up, so for now we don't handle unaligned 64-bit
 419		 * instructions on 32-bit kernels.
 420		 */
 421		if (!access_ok(addr, 8))
 422			goto sigbus;
 423
 424		compute_return_epc(regs);
 425		value = regs->regs[insn.i_format.rt];
 426		StoreDW(addr, value, res);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 427		if (res)
 428			goto fault;
 
 429		break;
 430#endif /* CONFIG_64BIT */
 431
 432		/* Cannot handle 64-bit instructions in 32-bit kernel */
 433		goto sigill;
 434
 435#ifdef CONFIG_MIPS_FP_SUPPORT
 436
 437	case lwc1_op:
 438	case ldc1_op:
 439	case swc1_op:
 440	case sdc1_op:
 441	case cop1x_op: {
 442		void __user *fault_addr = NULL;
 443
 444		die_if_kernel("Unaligned FP access in kernel code", regs);
 445		BUG_ON(!used_math());
 446
 447		res = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
 448					       &fault_addr);
 449		own_fpu(1);	/* Restore FPU state. */
 450
 451		/* Signal if something went wrong. */
 452		process_fpemu_return(res, fault_addr, 0);
 453
 454		if (res == 0)
 455			break;
 456		return;
 457	}
 458#endif /* CONFIG_MIPS_FP_SUPPORT */
 459
 460#ifdef CONFIG_CPU_HAS_MSA
 461
 462	case msa_op: {
 463		unsigned int wd, preempted;
 464		enum msa_2b_fmt df;
 465		union fpureg *fpr;
 466
 467		if (!cpu_has_msa)
 468			goto sigill;
 469
 470		/*
 471		 * If we've reached this point then userland should have taken
 472		 * the MSA disabled exception & initialised vector context at
 473		 * some point in the past.
 474		 */
 475		BUG_ON(!thread_msa_context_live());
 476
 477		df = insn.msa_mi10_format.df;
 478		wd = insn.msa_mi10_format.wd;
 479		fpr = &current->thread.fpu.fpr[wd];
 480
 481		switch (insn.msa_mi10_format.func) {
 482		case msa_ld_op:
 483			if (!access_ok(addr, sizeof(*fpr)))
 484				goto sigbus;
 485
 486			do {
 487				/*
 488				 * If we have live MSA context keep track of
 489				 * whether we get preempted in order to avoid
 490				 * the register context we load being clobbered
 491				 * by the live context as it's saved during
 492				 * preemption. If we don't have live context
 493				 * then it can't be saved to clobber the value
 494				 * we load.
 495				 */
 496				preempted = test_thread_flag(TIF_USEDMSA);
 497
 498				res = __copy_from_user_inatomic(fpr, addr,
 499								sizeof(*fpr));
 500				if (res)
 501					goto fault;
 502
 503				/*
 504				 * Update the hardware register if it is in use
 505				 * by the task in this quantum, in order to
 506				 * avoid having to save & restore the whole
 507				 * vector context.
 508				 */
 509				preempt_disable();
 510				if (test_thread_flag(TIF_USEDMSA)) {
 511					write_msa_wr(wd, fpr, df);
 512					preempted = 0;
 513				}
 514				preempt_enable();
 515			} while (preempted);
 516			break;
 517
 518		case msa_st_op:
 519			if (!access_ok(addr, sizeof(*fpr)))
 520				goto sigbus;
 521
 522			/*
 523			 * Update from the hardware register if it is in use by
 524			 * the task in this quantum, in order to avoid having to
 525			 * save & restore the whole vector context.
 526			 */
 527			preempt_disable();
 528			if (test_thread_flag(TIF_USEDMSA))
 529				read_msa_wr(wd, fpr, df);
 530			preempt_enable();
 531
 532			res = __copy_to_user_inatomic(addr, fpr, sizeof(*fpr));
 533			if (res)
 534				goto fault;
 535			break;
 536
 537		default:
 538			goto sigbus;
 539		}
 540
 541		compute_return_epc(regs);
 542		break;
 543	}
 544#endif /* CONFIG_CPU_HAS_MSA */
 545
 546#ifndef CONFIG_CPU_MIPSR6
 547	/*
 548	 * COP2 is available to implementor for application specific use.
 549	 * It's up to applications to register a notifier chain and do
 550	 * whatever they have to do, including possible sending of signals.
 551	 *
 552	 * This instruction has been reallocated in Release 6
 553	 */
 554	case lwc2_op:
 555		cu2_notifier_call_chain(CU2_LWC2_OP, regs);
 556		break;
 557
 558	case ldc2_op:
 559		cu2_notifier_call_chain(CU2_LDC2_OP, regs);
 560		break;
 561
 562	case swc2_op:
 563		cu2_notifier_call_chain(CU2_SWC2_OP, regs);
 564		break;
 565
 566	case sdc2_op:
 567		cu2_notifier_call_chain(CU2_SDC2_OP, regs);
 568		break;
 569#endif
 570	default:
 571		/*
 572		 * Pheeee...  We encountered an yet unknown instruction or
 573		 * cache coherence problem.  Die sucker, die ...
 574		 */
 575		goto sigill;
 576	}
 577
 578#ifdef CONFIG_DEBUG_FS
 579	unaligned_instructions++;
 580#endif
 581
 582	return;
 583
 584fault:
 585	/* roll back jump/branch */
 586	regs->cp0_epc = origpc;
 587	regs->regs[31] = orig31;
 588	/* Did we have an exception handler installed? */
 589	if (fixup_exception(regs))
 590		return;
 591
 592	die_if_kernel("Unhandled kernel unaligned access", regs);
 593	force_sig(SIGSEGV);
 594
 595	return;
 596
 597sigbus:
 598	die_if_kernel("Unhandled kernel unaligned access", regs);
 599	force_sig(SIGBUS);
 600
 601	return;
 602
 603sigill:
 604	die_if_kernel
 605	    ("Unhandled kernel unaligned access or invalid instruction", regs);
 606	force_sig(SIGILL);
 607}
 608
 609/* Recode table from 16-bit register notation to 32-bit GPR. */
 610const int reg16to32[] = { 16, 17, 2, 3, 4, 5, 6, 7 };
 611
 612/* Recode table from 16-bit STORE register notation to 32-bit GPR. */
 613static const int reg16to32st[] = { 0, 17, 2, 3, 4, 5, 6, 7 };
 614
 615static void emulate_load_store_microMIPS(struct pt_regs *regs,
 616					 void __user *addr)
 617{
 618	unsigned long value;
 619	unsigned int res;
 620	int i;
 621	unsigned int reg = 0, rvar;
 622	unsigned long orig31;
 623	u16 __user *pc16;
 624	u16 halfword;
 625	unsigned int word;
 626	unsigned long origpc, contpc;
 627	union mips_instruction insn;
 628	struct mm_decoded_insn mminsn;
 629
 630	origpc = regs->cp0_epc;
 631	orig31 = regs->regs[31];
 632
 633	mminsn.micro_mips_mode = 1;
 634
 635	/*
 636	 * This load never faults.
 637	 */
 638	pc16 = (unsigned short __user *)msk_isa16_mode(regs->cp0_epc);
 639	__get_user(halfword, pc16);
 640	pc16++;
 641	contpc = regs->cp0_epc + 2;
 642	word = ((unsigned int)halfword << 16);
 643	mminsn.pc_inc = 2;
 644
 645	if (!mm_insn_16bit(halfword)) {
 646		__get_user(halfword, pc16);
 647		pc16++;
 648		contpc = regs->cp0_epc + 4;
 649		mminsn.pc_inc = 4;
 650		word |= halfword;
 651	}
 652	mminsn.insn = word;
 653
 654	if (get_user(halfword, pc16))
 655		goto fault;
 656	mminsn.next_pc_inc = 2;
 657	word = ((unsigned int)halfword << 16);
 658
 659	if (!mm_insn_16bit(halfword)) {
 660		pc16++;
 661		if (get_user(halfword, pc16))
 662			goto fault;
 663		mminsn.next_pc_inc = 4;
 664		word |= halfword;
 665	}
 666	mminsn.next_insn = word;
 667
 668	insn = (union mips_instruction)(mminsn.insn);
 669	if (mm_isBranchInstr(regs, mminsn, &contpc))
 670		insn = (union mips_instruction)(mminsn.next_insn);
 671
 672	/*  Parse instruction to find what to do */
 673
 674	switch (insn.mm_i_format.opcode) {
 675
 676	case mm_pool32a_op:
 677		switch (insn.mm_x_format.func) {
 678		case mm_lwxs_op:
 679			reg = insn.mm_x_format.rd;
 680			goto loadW;
 681		}
 682
 683		goto sigbus;
 684
 685	case mm_pool32b_op:
 686		switch (insn.mm_m_format.func) {
 687		case mm_lwp_func:
 688			reg = insn.mm_m_format.rd;
 689			if (reg == 31)
 690				goto sigbus;
 691
 692			if (!access_ok(addr, 8))
 693				goto sigbus;
 694
 695			LoadW(addr, value, res);
 696			if (res)
 697				goto fault;
 698			regs->regs[reg] = value;
 699			addr += 4;
 700			LoadW(addr, value, res);
 701			if (res)
 702				goto fault;
 703			regs->regs[reg + 1] = value;
 704			goto success;
 705
 706		case mm_swp_func:
 707			reg = insn.mm_m_format.rd;
 708			if (reg == 31)
 709				goto sigbus;
 710
 711			if (!access_ok(addr, 8))
 712				goto sigbus;
 713
 714			value = regs->regs[reg];
 715			StoreW(addr, value, res);
 716			if (res)
 717				goto fault;
 718			addr += 4;
 719			value = regs->regs[reg + 1];
 720			StoreW(addr, value, res);
 721			if (res)
 722				goto fault;
 723			goto success;
 724
 725		case mm_ldp_func:
 726#ifdef CONFIG_64BIT
 727			reg = insn.mm_m_format.rd;
 728			if (reg == 31)
 729				goto sigbus;
 730
 731			if (!access_ok(addr, 16))
 732				goto sigbus;
 733
 734			LoadDW(addr, value, res);
 735			if (res)
 736				goto fault;
 737			regs->regs[reg] = value;
 738			addr += 8;
 739			LoadDW(addr, value, res);
 740			if (res)
 741				goto fault;
 742			regs->regs[reg + 1] = value;
 743			goto success;
 744#endif /* CONFIG_64BIT */
 745
 746			goto sigill;
 747
 748		case mm_sdp_func:
 749#ifdef CONFIG_64BIT
 750			reg = insn.mm_m_format.rd;
 751			if (reg == 31)
 752				goto sigbus;
 753
 754			if (!access_ok(addr, 16))
 755				goto sigbus;
 756
 757			value = regs->regs[reg];
 758			StoreDW(addr, value, res);
 759			if (res)
 760				goto fault;
 761			addr += 8;
 762			value = regs->regs[reg + 1];
 763			StoreDW(addr, value, res);
 764			if (res)
 765				goto fault;
 766			goto success;
 767#endif /* CONFIG_64BIT */
 768
 769			goto sigill;
 770
 771		case mm_lwm32_func:
 772			reg = insn.mm_m_format.rd;
 773			rvar = reg & 0xf;
 774			if ((rvar > 9) || !reg)
 775				goto sigill;
 776			if (reg & 0x10) {
 777				if (!access_ok(addr, 4 * (rvar + 1)))
 778					goto sigbus;
 779			} else {
 780				if (!access_ok(addr, 4 * rvar))
 781					goto sigbus;
 782			}
 783			if (rvar == 9)
 784				rvar = 8;
 785			for (i = 16; rvar; rvar--, i++) {
 786				LoadW(addr, value, res);
 787				if (res)
 788					goto fault;
 789				addr += 4;
 790				regs->regs[i] = value;
 791			}
 792			if ((reg & 0xf) == 9) {
 793				LoadW(addr, value, res);
 794				if (res)
 795					goto fault;
 796				addr += 4;
 797				regs->regs[30] = value;
 798			}
 799			if (reg & 0x10) {
 800				LoadW(addr, value, res);
 801				if (res)
 802					goto fault;
 803				regs->regs[31] = value;
 804			}
 805			goto success;
 806
 807		case mm_swm32_func:
 808			reg = insn.mm_m_format.rd;
 809			rvar = reg & 0xf;
 810			if ((rvar > 9) || !reg)
 811				goto sigill;
 812			if (reg & 0x10) {
 813				if (!access_ok(addr, 4 * (rvar + 1)))
 814					goto sigbus;
 815			} else {
 816				if (!access_ok(addr, 4 * rvar))
 817					goto sigbus;
 818			}
 819			if (rvar == 9)
 820				rvar = 8;
 821			for (i = 16; rvar; rvar--, i++) {
 822				value = regs->regs[i];
 823				StoreW(addr, value, res);
 824				if (res)
 825					goto fault;
 826				addr += 4;
 827			}
 828			if ((reg & 0xf) == 9) {
 829				value = regs->regs[30];
 830				StoreW(addr, value, res);
 831				if (res)
 832					goto fault;
 833				addr += 4;
 834			}
 835			if (reg & 0x10) {
 836				value = regs->regs[31];
 837				StoreW(addr, value, res);
 838				if (res)
 839					goto fault;
 840			}
 841			goto success;
 842
 843		case mm_ldm_func:
 844#ifdef CONFIG_64BIT
 845			reg = insn.mm_m_format.rd;
 846			rvar = reg & 0xf;
 847			if ((rvar > 9) || !reg)
 848				goto sigill;
 849			if (reg & 0x10) {
 850				if (!access_ok(addr, 8 * (rvar + 1)))
 851					goto sigbus;
 852			} else {
 853				if (!access_ok(addr, 8 * rvar))
 854					goto sigbus;
 855			}
 856			if (rvar == 9)
 857				rvar = 8;
 858
 859			for (i = 16; rvar; rvar--, i++) {
 860				LoadDW(addr, value, res);
 861				if (res)
 862					goto fault;
 863				addr += 4;
 864				regs->regs[i] = value;
 865			}
 866			if ((reg & 0xf) == 9) {
 867				LoadDW(addr, value, res);
 868				if (res)
 869					goto fault;
 870				addr += 8;
 871				regs->regs[30] = value;
 872			}
 873			if (reg & 0x10) {
 874				LoadDW(addr, value, res);
 875				if (res)
 876					goto fault;
 877				regs->regs[31] = value;
 878			}
 879			goto success;
 880#endif /* CONFIG_64BIT */
 881
 882			goto sigill;
 883
 884		case mm_sdm_func:
 885#ifdef CONFIG_64BIT
 886			reg = insn.mm_m_format.rd;
 887			rvar = reg & 0xf;
 888			if ((rvar > 9) || !reg)
 889				goto sigill;
 890			if (reg & 0x10) {
 891				if (!access_ok(addr, 8 * (rvar + 1)))
 892					goto sigbus;
 893			} else {
 894				if (!access_ok(addr, 8 * rvar))
 895					goto sigbus;
 896			}
 897			if (rvar == 9)
 898				rvar = 8;
 899
 900			for (i = 16; rvar; rvar--, i++) {
 901				value = regs->regs[i];
 902				StoreDW(addr, value, res);
 903				if (res)
 904					goto fault;
 905				addr += 8;
 906			}
 907			if ((reg & 0xf) == 9) {
 908				value = regs->regs[30];
 909				StoreDW(addr, value, res);
 910				if (res)
 911					goto fault;
 912				addr += 8;
 913			}
 914			if (reg & 0x10) {
 915				value = regs->regs[31];
 916				StoreDW(addr, value, res);
 917				if (res)
 918					goto fault;
 919			}
 920			goto success;
 921#endif /* CONFIG_64BIT */
 922
 923			goto sigill;
 924
 925			/*  LWC2, SWC2, LDC2, SDC2 are not serviced */
 926		}
 927
 928		goto sigbus;
 929
 930	case mm_pool32c_op:
 931		switch (insn.mm_m_format.func) {
 932		case mm_lwu_func:
 933			reg = insn.mm_m_format.rd;
 934			goto loadWU;
 935		}
 936
 937		/*  LL,SC,LLD,SCD are not serviced */
 938		goto sigbus;
 939
 940#ifdef CONFIG_MIPS_FP_SUPPORT
 941	case mm_pool32f_op:
 942		switch (insn.mm_x_format.func) {
 943		case mm_lwxc1_func:
 944		case mm_swxc1_func:
 945		case mm_ldxc1_func:
 946		case mm_sdxc1_func:
 947			goto fpu_emul;
 948		}
 949
 950		goto sigbus;
 951
 952	case mm_ldc132_op:
 953	case mm_sdc132_op:
 954	case mm_lwc132_op:
 955	case mm_swc132_op: {
 956		void __user *fault_addr = NULL;
 957
 958fpu_emul:
 959		/* roll back jump/branch */
 960		regs->cp0_epc = origpc;
 961		regs->regs[31] = orig31;
 962
 963		die_if_kernel("Unaligned FP access in kernel code", regs);
 964		BUG_ON(!used_math());
 965		BUG_ON(!is_fpu_owner());
 966
 967		res = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
 968					       &fault_addr);
 969		own_fpu(1);	/* restore FPU state */
 970
 971		/* If something went wrong, signal */
 972		process_fpemu_return(res, fault_addr, 0);
 973
 974		if (res == 0)
 975			goto success;
 976		return;
 977	}
 978#endif /* CONFIG_MIPS_FP_SUPPORT */
 979
 980	case mm_lh32_op:
 981		reg = insn.mm_i_format.rt;
 982		goto loadHW;
 983
 984	case mm_lhu32_op:
 985		reg = insn.mm_i_format.rt;
 986		goto loadHWU;
 987
 988	case mm_lw32_op:
 989		reg = insn.mm_i_format.rt;
 990		goto loadW;
 991
 992	case mm_sh32_op:
 993		reg = insn.mm_i_format.rt;
 994		goto storeHW;
 995
 996	case mm_sw32_op:
 997		reg = insn.mm_i_format.rt;
 998		goto storeW;
 999
1000	case mm_ld32_op:
1001		reg = insn.mm_i_format.rt;
1002		goto loadDW;
1003
1004	case mm_sd32_op:
1005		reg = insn.mm_i_format.rt;
1006		goto storeDW;
1007
1008	case mm_pool16c_op:
1009		switch (insn.mm16_m_format.func) {
1010		case mm_lwm16_op:
1011			reg = insn.mm16_m_format.rlist;
1012			rvar = reg + 1;
1013			if (!access_ok(addr, 4 * rvar))
1014				goto sigbus;
1015
1016			for (i = 16; rvar; rvar--, i++) {
1017				LoadW(addr, value, res);
1018				if (res)
1019					goto fault;
1020				addr += 4;
1021				regs->regs[i] = value;
1022			}
1023			LoadW(addr, value, res);
1024			if (res)
1025				goto fault;
1026			regs->regs[31] = value;
1027
1028			goto success;
1029
1030		case mm_swm16_op:
1031			reg = insn.mm16_m_format.rlist;
1032			rvar = reg + 1;
1033			if (!access_ok(addr, 4 * rvar))
1034				goto sigbus;
1035
1036			for (i = 16; rvar; rvar--, i++) {
1037				value = regs->regs[i];
1038				StoreW(addr, value, res);
1039				if (res)
1040					goto fault;
1041				addr += 4;
1042			}
1043			value = regs->regs[31];
1044			StoreW(addr, value, res);
1045			if (res)
1046				goto fault;
1047
1048			goto success;
1049
1050		}
1051
1052		goto sigbus;
1053
1054	case mm_lhu16_op:
1055		reg = reg16to32[insn.mm16_rb_format.rt];
1056		goto loadHWU;
1057
1058	case mm_lw16_op:
1059		reg = reg16to32[insn.mm16_rb_format.rt];
1060		goto loadW;
1061
1062	case mm_sh16_op:
1063		reg = reg16to32st[insn.mm16_rb_format.rt];
1064		goto storeHW;
1065
1066	case mm_sw16_op:
1067		reg = reg16to32st[insn.mm16_rb_format.rt];
1068		goto storeW;
1069
1070	case mm_lwsp16_op:
1071		reg = insn.mm16_r5_format.rt;
1072		goto loadW;
1073
1074	case mm_swsp16_op:
1075		reg = insn.mm16_r5_format.rt;
1076		goto storeW;
1077
1078	case mm_lwgp16_op:
1079		reg = reg16to32[insn.mm16_r3_format.rt];
1080		goto loadW;
1081
1082	default:
1083		goto sigill;
1084	}
1085
1086loadHW:
1087	if (!access_ok(addr, 2))
1088		goto sigbus;
1089
1090	LoadHW(addr, value, res);
1091	if (res)
1092		goto fault;
1093	regs->regs[reg] = value;
1094	goto success;
1095
1096loadHWU:
1097	if (!access_ok(addr, 2))
1098		goto sigbus;
1099
1100	LoadHWU(addr, value, res);
1101	if (res)
1102		goto fault;
1103	regs->regs[reg] = value;
1104	goto success;
1105
1106loadW:
1107	if (!access_ok(addr, 4))
1108		goto sigbus;
1109
1110	LoadW(addr, value, res);
1111	if (res)
1112		goto fault;
1113	regs->regs[reg] = value;
1114	goto success;
1115
1116loadWU:
1117#ifdef CONFIG_64BIT
1118	/*
1119	 * A 32-bit kernel might be running on a 64-bit processor.  But
1120	 * if we're on a 32-bit processor and an i-cache incoherency
1121	 * or race makes us see a 64-bit instruction here the sdl/sdr
1122	 * would blow up, so for now we don't handle unaligned 64-bit
1123	 * instructions on 32-bit kernels.
1124	 */
1125	if (!access_ok(addr, 4))
1126		goto sigbus;
1127
1128	LoadWU(addr, value, res);
1129	if (res)
1130		goto fault;
1131	regs->regs[reg] = value;
1132	goto success;
1133#endif /* CONFIG_64BIT */
1134
1135	/* Cannot handle 64-bit instructions in 32-bit kernel */
1136	goto sigill;
1137
1138loadDW:
1139#ifdef CONFIG_64BIT
1140	/*
1141	 * A 32-bit kernel might be running on a 64-bit processor.  But
1142	 * if we're on a 32-bit processor and an i-cache incoherency
1143	 * or race makes us see a 64-bit instruction here the sdl/sdr
1144	 * would blow up, so for now we don't handle unaligned 64-bit
1145	 * instructions on 32-bit kernels.
1146	 */
1147	if (!access_ok(addr, 8))
1148		goto sigbus;
1149
1150	LoadDW(addr, value, res);
1151	if (res)
1152		goto fault;
1153	regs->regs[reg] = value;
1154	goto success;
1155#endif /* CONFIG_64BIT */
1156
1157	/* Cannot handle 64-bit instructions in 32-bit kernel */
1158	goto sigill;
1159
1160storeHW:
1161	if (!access_ok(addr, 2))
1162		goto sigbus;
1163
1164	value = regs->regs[reg];
1165	StoreHW(addr, value, res);
1166	if (res)
1167		goto fault;
1168	goto success;
1169
1170storeW:
1171	if (!access_ok(addr, 4))
1172		goto sigbus;
1173
1174	value = regs->regs[reg];
1175	StoreW(addr, value, res);
1176	if (res)
1177		goto fault;
1178	goto success;
1179
1180storeDW:
1181#ifdef CONFIG_64BIT
1182	/*
1183	 * A 32-bit kernel might be running on a 64-bit processor.  But
1184	 * if we're on a 32-bit processor and an i-cache incoherency
1185	 * or race makes us see a 64-bit instruction here the sdl/sdr
1186	 * would blow up, so for now we don't handle unaligned 64-bit
1187	 * instructions on 32-bit kernels.
1188	 */
1189	if (!access_ok(addr, 8))
1190		goto sigbus;
1191
1192	value = regs->regs[reg];
1193	StoreDW(addr, value, res);
1194	if (res)
1195		goto fault;
1196	goto success;
1197#endif /* CONFIG_64BIT */
1198
1199	/* Cannot handle 64-bit instructions in 32-bit kernel */
1200	goto sigill;
1201
1202success:
1203	regs->cp0_epc = contpc;	/* advance or branch */
1204
1205#ifdef CONFIG_DEBUG_FS
1206	unaligned_instructions++;
1207#endif
1208	return;
1209
1210fault:
1211	/* roll back jump/branch */
1212	regs->cp0_epc = origpc;
1213	regs->regs[31] = orig31;
1214	/* Did we have an exception handler installed? */
1215	if (fixup_exception(regs))
1216		return;
1217
1218	die_if_kernel("Unhandled kernel unaligned access", regs);
1219	force_sig(SIGSEGV);
1220
1221	return;
1222
1223sigbus:
1224	die_if_kernel("Unhandled kernel unaligned access", regs);
1225	force_sig(SIGBUS);
1226
1227	return;
1228
1229sigill:
1230	die_if_kernel
1231	    ("Unhandled kernel unaligned access or invalid instruction", regs);
1232	force_sig(SIGILL);
1233}
1234
1235static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr)
1236{
1237	unsigned long value;
1238	unsigned int res;
1239	int reg;
1240	unsigned long orig31;
1241	u16 __user *pc16;
1242	unsigned long origpc;
1243	union mips16e_instruction mips16inst, oldinst;
1244	unsigned int opcode;
1245	int extended = 0;
1246
1247	origpc = regs->cp0_epc;
1248	orig31 = regs->regs[31];
1249	pc16 = (unsigned short __user *)msk_isa16_mode(origpc);
1250	/*
1251	 * This load never faults.
1252	 */
1253	__get_user(mips16inst.full, pc16);
1254	oldinst = mips16inst;
1255
1256	/* skip EXTEND instruction */
1257	if (mips16inst.ri.opcode == MIPS16e_extend_op) {
1258		extended = 1;
1259		pc16++;
1260		__get_user(mips16inst.full, pc16);
1261	} else if (delay_slot(regs)) {
1262		/*  skip jump instructions */
1263		/*  JAL/JALX are 32 bits but have OPCODE in first short int */
1264		if (mips16inst.ri.opcode == MIPS16e_jal_op)
1265			pc16++;
1266		pc16++;
1267		if (get_user(mips16inst.full, pc16))
1268			goto sigbus;
1269	}
1270
1271	opcode = mips16inst.ri.opcode;
1272	switch (opcode) {
1273	case MIPS16e_i64_op:	/* I64 or RI64 instruction */
1274		switch (mips16inst.i64.func) {	/* I64/RI64 func field check */
1275		case MIPS16e_ldpc_func:
1276		case MIPS16e_ldsp_func:
1277			reg = reg16to32[mips16inst.ri64.ry];
1278			goto loadDW;
1279
1280		case MIPS16e_sdsp_func:
1281			reg = reg16to32[mips16inst.ri64.ry];
1282			goto writeDW;
1283
1284		case MIPS16e_sdrasp_func:
1285			reg = 29;	/* GPRSP */
1286			goto writeDW;
1287		}
1288
1289		goto sigbus;
1290
1291	case MIPS16e_swsp_op:
1292		reg = reg16to32[mips16inst.ri.rx];
1293		if (extended && cpu_has_mips16e2)
1294			switch (mips16inst.ri.imm >> 5) {
1295			case 0:		/* SWSP */
1296			case 1:		/* SWGP */
1297				break;
1298			case 2:		/* SHGP */
1299				opcode = MIPS16e_sh_op;
1300				break;
1301			default:
1302				goto sigbus;
1303			}
1304		break;
1305
1306	case MIPS16e_lwpc_op:
1307		reg = reg16to32[mips16inst.ri.rx];
1308		break;
1309
1310	case MIPS16e_lwsp_op:
1311		reg = reg16to32[mips16inst.ri.rx];
1312		if (extended && cpu_has_mips16e2)
1313			switch (mips16inst.ri.imm >> 5) {
1314			case 0:		/* LWSP */
1315			case 1:		/* LWGP */
1316				break;
1317			case 2:		/* LHGP */
1318				opcode = MIPS16e_lh_op;
1319				break;
1320			case 4:		/* LHUGP */
1321				opcode = MIPS16e_lhu_op;
1322				break;
1323			default:
1324				goto sigbus;
1325			}
1326		break;
1327
1328	case MIPS16e_i8_op:
1329		if (mips16inst.i8.func != MIPS16e_swrasp_func)
1330			goto sigbus;
1331		reg = 29;	/* GPRSP */
1332		break;
1333
1334	default:
1335		reg = reg16to32[mips16inst.rri.ry];
1336		break;
1337	}
1338
1339	switch (opcode) {
1340
1341	case MIPS16e_lb_op:
1342	case MIPS16e_lbu_op:
1343	case MIPS16e_sb_op:
1344		goto sigbus;
1345
1346	case MIPS16e_lh_op:
1347		if (!access_ok(addr, 2))
1348			goto sigbus;
1349
1350		LoadHW(addr, value, res);
1351		if (res)
1352			goto fault;
1353		MIPS16e_compute_return_epc(regs, &oldinst);
1354		regs->regs[reg] = value;
1355		break;
1356
1357	case MIPS16e_lhu_op:
1358		if (!access_ok(addr, 2))
1359			goto sigbus;
1360
1361		LoadHWU(addr, value, res);
1362		if (res)
1363			goto fault;
1364		MIPS16e_compute_return_epc(regs, &oldinst);
1365		regs->regs[reg] = value;
1366		break;
1367
1368	case MIPS16e_lw_op:
1369	case MIPS16e_lwpc_op:
1370	case MIPS16e_lwsp_op:
1371		if (!access_ok(addr, 4))
1372			goto sigbus;
1373
1374		LoadW(addr, value, res);
1375		if (res)
1376			goto fault;
1377		MIPS16e_compute_return_epc(regs, &oldinst);
1378		regs->regs[reg] = value;
1379		break;
1380
1381	case MIPS16e_lwu_op:
1382#ifdef CONFIG_64BIT
1383		/*
1384		 * A 32-bit kernel might be running on a 64-bit processor.  But
1385		 * if we're on a 32-bit processor and an i-cache incoherency
1386		 * or race makes us see a 64-bit instruction here the sdl/sdr
1387		 * would blow up, so for now we don't handle unaligned 64-bit
1388		 * instructions on 32-bit kernels.
1389		 */
1390		if (!access_ok(addr, 4))
1391			goto sigbus;
1392
1393		LoadWU(addr, value, res);
1394		if (res)
1395			goto fault;
1396		MIPS16e_compute_return_epc(regs, &oldinst);
1397		regs->regs[reg] = value;
1398		break;
1399#endif /* CONFIG_64BIT */
1400
1401		/* Cannot handle 64-bit instructions in 32-bit kernel */
1402		goto sigill;
1403
1404	case MIPS16e_ld_op:
1405loadDW:
1406#ifdef CONFIG_64BIT
1407		/*
1408		 * A 32-bit kernel might be running on a 64-bit processor.  But
1409		 * if we're on a 32-bit processor and an i-cache incoherency
1410		 * or race makes us see a 64-bit instruction here the sdl/sdr
1411		 * would blow up, so for now we don't handle unaligned 64-bit
1412		 * instructions on 32-bit kernels.
1413		 */
1414		if (!access_ok(addr, 8))
1415			goto sigbus;
1416
1417		LoadDW(addr, value, res);
1418		if (res)
1419			goto fault;
1420		MIPS16e_compute_return_epc(regs, &oldinst);
1421		regs->regs[reg] = value;
1422		break;
1423#endif /* CONFIG_64BIT */
1424
1425		/* Cannot handle 64-bit instructions in 32-bit kernel */
1426		goto sigill;
1427
1428	case MIPS16e_sh_op:
1429		if (!access_ok(addr, 2))
1430			goto sigbus;
1431
1432		MIPS16e_compute_return_epc(regs, &oldinst);
1433		value = regs->regs[reg];
1434		StoreHW(addr, value, res);
1435		if (res)
1436			goto fault;
1437		break;
1438
1439	case MIPS16e_sw_op:
1440	case MIPS16e_swsp_op:
1441	case MIPS16e_i8_op:	/* actually - MIPS16e_swrasp_func */
1442		if (!access_ok(addr, 4))
1443			goto sigbus;
1444
1445		MIPS16e_compute_return_epc(regs, &oldinst);
1446		value = regs->regs[reg];
1447		StoreW(addr, value, res);
1448		if (res)
1449			goto fault;
1450		break;
1451
1452	case MIPS16e_sd_op:
1453writeDW:
1454#ifdef CONFIG_64BIT
1455		/*
1456		 * A 32-bit kernel might be running on a 64-bit processor.  But
1457		 * if we're on a 32-bit processor and an i-cache incoherency
1458		 * or race makes us see a 64-bit instruction here the sdl/sdr
1459		 * would blow up, so for now we don't handle unaligned 64-bit
1460		 * instructions on 32-bit kernels.
1461		 */
1462		if (!access_ok(addr, 8))
1463			goto sigbus;
1464
1465		MIPS16e_compute_return_epc(regs, &oldinst);
1466		value = regs->regs[reg];
1467		StoreDW(addr, value, res);
1468		if (res)
1469			goto fault;
1470		break;
1471#endif /* CONFIG_64BIT */
1472
1473		/* Cannot handle 64-bit instructions in 32-bit kernel */
1474		goto sigill;
1475
1476	default:
1477		/*
1478		 * Pheeee...  We encountered an yet unknown instruction or
1479		 * cache coherence problem.  Die sucker, die ...
1480		 */
1481		goto sigill;
1482	}
1483
1484#ifdef CONFIG_DEBUG_FS
1485	unaligned_instructions++;
1486#endif
1487
1488	return;
1489
1490fault:
1491	/* roll back jump/branch */
1492	regs->cp0_epc = origpc;
1493	regs->regs[31] = orig31;
1494	/* Did we have an exception handler installed? */
1495	if (fixup_exception(regs))
1496		return;
1497
1498	die_if_kernel("Unhandled kernel unaligned access", regs);
1499	force_sig(SIGSEGV);
1500
1501	return;
1502
1503sigbus:
1504	die_if_kernel("Unhandled kernel unaligned access", regs);
1505	force_sig(SIGBUS);
1506
1507	return;
1508
1509sigill:
1510	die_if_kernel
1511	    ("Unhandled kernel unaligned access or invalid instruction", regs);
1512	force_sig(SIGILL);
1513}
1514
1515asmlinkage void do_ade(struct pt_regs *regs)
1516{
1517	enum ctx_state prev_state;
1518	unsigned int __user *pc;
1519	mm_segment_t seg;
1520
1521	prev_state = exception_enter();
1522	perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS,
1523			1, regs, regs->cp0_badvaddr);
1524	/*
1525	 * Did we catch a fault trying to load an instruction?
 
1526	 */
1527	if (regs->cp0_badvaddr == regs->cp0_epc)
1528		goto sigbus;
1529
 
1530	if (user_mode(regs) && !test_thread_flag(TIF_FIXADE))
1531		goto sigbus;
1532	if (unaligned_action == UNALIGNED_ACTION_SIGNAL)
1533		goto sigbus;
 
 
1534
1535	/*
1536	 * Do branch emulation only if we didn't forward the exception.
1537	 * This is all so but ugly ...
1538	 */
1539
1540	/*
1541	 * Are we running in microMIPS mode?
1542	 */
1543	if (get_isa16_mode(regs->cp0_epc)) {
1544		/*
1545		 * Did we catch a fault trying to load an instruction in
1546		 * 16-bit mode?
1547		 */
1548		if (regs->cp0_badvaddr == msk_isa16_mode(regs->cp0_epc))
1549			goto sigbus;
1550		if (unaligned_action == UNALIGNED_ACTION_SHOW)
1551			show_registers(regs);
1552
1553		if (cpu_has_mmips) {
1554			seg = get_fs();
1555			if (!user_mode(regs))
1556				set_fs(KERNEL_DS);
1557			emulate_load_store_microMIPS(regs,
1558				(void __user *)regs->cp0_badvaddr);
1559			set_fs(seg);
1560
1561			return;
1562		}
1563
1564		if (cpu_has_mips16) {
1565			seg = get_fs();
1566			if (!user_mode(regs))
1567				set_fs(KERNEL_DS);
1568			emulate_load_store_MIPS16e(regs,
1569				(void __user *)regs->cp0_badvaddr);
1570			set_fs(seg);
1571
1572			return;
1573		}
1574
1575		goto sigbus;
1576	}
1577
1578	if (unaligned_action == UNALIGNED_ACTION_SHOW)
1579		show_registers(regs);
1580	pc = (unsigned int __user *)exception_epc(regs);
1581
1582	seg = get_fs();
1583	if (!user_mode(regs))
1584		set_fs(KERNEL_DS);
1585	emulate_load_store_insn(regs, (void __user *)regs->cp0_badvaddr, pc);
1586	set_fs(seg);
1587
1588	return;
1589
1590sigbus:
1591	die_if_kernel("Kernel unaligned instruction access", regs);
1592	force_sig(SIGBUS);
1593
1594	/*
1595	 * XXX On return from the signal handler we should advance the epc
1596	 */
1597	exception_exit(prev_state);
1598}
1599
1600#ifdef CONFIG_DEBUG_FS
 
1601static int __init debugfs_unaligned(void)
1602{
1603	debugfs_create_u32("unaligned_instructions", S_IRUGO, mips_debugfs_dir,
1604			   &unaligned_instructions);
1605	debugfs_create_u32("unaligned_action", S_IRUGO | S_IWUSR,
1606			   mips_debugfs_dir, &unaligned_action);
 
 
 
 
 
 
 
 
1607	return 0;
1608}
1609arch_initcall(debugfs_unaligned);
1610#endif