Loading...
1/*
2 * Handle unaligned accesses by emulation.
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 1996, 1998, 1999, 2002 by Ralf Baechle
9 * Copyright (C) 1999 Silicon Graphics, Inc.
10 *
11 * This file contains exception handler for address error exception with the
12 * special capability to execute faulting instructions in software. The
13 * handler does not try to handle the case when the program counter points
14 * to an address not aligned to a word boundary.
15 *
16 * Putting data to unaligned addresses is a bad practice even on Intel where
17 * only the performance is affected. Much worse is that such code is non-
18 * portable. Due to several programs that die on MIPS due to alignment
19 * problems I decided to implement this handler anyway though I originally
20 * didn't intend to do this at all for user code.
21 *
22 * For now I enable fixing of address errors by default to make life easier.
23 * I however intend to disable this somewhen in the future when the alignment
24 * problems with user programs have been fixed. For programmers this is the
25 * right way to go.
26 *
27 * Fixing address errors is a per process option. The option is inherited
28 * across fork(2) and execve(2) calls. If you really want to use the
29 * option in your user programs - I discourage the use of the software
30 * emulation strongly - use the following code in your userland stuff:
31 *
32 * #include <sys/sysmips.h>
33 *
34 * ...
35 * sysmips(MIPS_FIXADE, x);
36 * ...
37 *
38 * The argument x is 0 for disabling software emulation, enabled otherwise.
39 *
40 * Below a little program to play around with this feature.
41 *
42 * #include <stdio.h>
43 * #include <sys/sysmips.h>
44 *
45 * struct foo {
46 * unsigned char bar[8];
47 * };
48 *
49 * main(int argc, char *argv[])
50 * {
51 * struct foo x = {0, 1, 2, 3, 4, 5, 6, 7};
52 * unsigned int *p = (unsigned int *) (x.bar + 3);
53 * int i;
54 *
55 * if (argc > 1)
56 * sysmips(MIPS_FIXADE, atoi(argv[1]));
57 *
58 * printf("*p = %08lx\n", *p);
59 *
60 * *p = 0xdeadface;
61 *
62 * for(i = 0; i <= 7; i++)
63 * printf("%02x ", x.bar[i]);
64 * printf("\n");
65 * }
66 *
67 * Coprocessor loads are not supported; I think this case is unimportant
68 * in the practice.
69 *
70 * TODO: Handle ndc (attempted store to doubleword in uncached memory)
71 * exception for the R6000.
72 * A store crossing a page boundary might be executed only partially.
73 * Undo the partial store in this case.
74 */
75#include <linux/mm.h>
76#include <linux/signal.h>
77#include <linux/smp.h>
78#include <linux/sched.h>
79#include <linux/debugfs.h>
80#include <linux/perf_event.h>
81
82#include <asm/asm.h>
83#include <asm/branch.h>
84#include <asm/byteorder.h>
85#include <asm/cop2.h>
86#include <asm/inst.h>
87#include <asm/uaccess.h>
88
89#define STR(x) __STR(x)
90#define __STR(x) #x
91
92enum {
93 UNALIGNED_ACTION_QUIET,
94 UNALIGNED_ACTION_SIGNAL,
95 UNALIGNED_ACTION_SHOW,
96};
97#ifdef CONFIG_DEBUG_FS
98static u32 unaligned_instructions;
99static u32 unaligned_action;
100#else
101#define unaligned_action UNALIGNED_ACTION_QUIET
102#endif
103extern void show_registers(struct pt_regs *regs);
104
105static void emulate_load_store_insn(struct pt_regs *regs,
106 void __user *addr, unsigned int __user *pc)
107{
108 union mips_instruction insn;
109 unsigned long value;
110 unsigned int res;
111
112 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
113
114 /*
115 * This load never faults.
116 */
117 __get_user(insn.word, pc);
118
119 switch (insn.i_format.opcode) {
120 /*
121 * These are instructions that a compiler doesn't generate. We
122 * can assume therefore that the code is MIPS-aware and
123 * really buggy. Emulating these instructions would break the
124 * semantics anyway.
125 */
126 case ll_op:
127 case lld_op:
128 case sc_op:
129 case scd_op:
130
131 /*
132 * For these instructions the only way to create an address
133 * error is an attempted access to kernel/supervisor address
134 * space.
135 */
136 case ldl_op:
137 case ldr_op:
138 case lwl_op:
139 case lwr_op:
140 case sdl_op:
141 case sdr_op:
142 case swl_op:
143 case swr_op:
144 case lb_op:
145 case lbu_op:
146 case sb_op:
147 goto sigbus;
148
149 /*
150 * The remaining opcodes are the ones that are really of interest.
151 */
152 case lh_op:
153 if (!access_ok(VERIFY_READ, addr, 2))
154 goto sigbus;
155
156 __asm__ __volatile__ (".set\tnoat\n"
157#ifdef __BIG_ENDIAN
158 "1:\tlb\t%0, 0(%2)\n"
159 "2:\tlbu\t$1, 1(%2)\n\t"
160#endif
161#ifdef __LITTLE_ENDIAN
162 "1:\tlb\t%0, 1(%2)\n"
163 "2:\tlbu\t$1, 0(%2)\n\t"
164#endif
165 "sll\t%0, 0x8\n\t"
166 "or\t%0, $1\n\t"
167 "li\t%1, 0\n"
168 "3:\t.set\tat\n\t"
169 ".section\t.fixup,\"ax\"\n\t"
170 "4:\tli\t%1, %3\n\t"
171 "j\t3b\n\t"
172 ".previous\n\t"
173 ".section\t__ex_table,\"a\"\n\t"
174 STR(PTR)"\t1b, 4b\n\t"
175 STR(PTR)"\t2b, 4b\n\t"
176 ".previous"
177 : "=&r" (value), "=r" (res)
178 : "r" (addr), "i" (-EFAULT));
179 if (res)
180 goto fault;
181 compute_return_epc(regs);
182 regs->regs[insn.i_format.rt] = value;
183 break;
184
185 case lw_op:
186 if (!access_ok(VERIFY_READ, addr, 4))
187 goto sigbus;
188
189 __asm__ __volatile__ (
190#ifdef __BIG_ENDIAN
191 "1:\tlwl\t%0, (%2)\n"
192 "2:\tlwr\t%0, 3(%2)\n\t"
193#endif
194#ifdef __LITTLE_ENDIAN
195 "1:\tlwl\t%0, 3(%2)\n"
196 "2:\tlwr\t%0, (%2)\n\t"
197#endif
198 "li\t%1, 0\n"
199 "3:\t.section\t.fixup,\"ax\"\n\t"
200 "4:\tli\t%1, %3\n\t"
201 "j\t3b\n\t"
202 ".previous\n\t"
203 ".section\t__ex_table,\"a\"\n\t"
204 STR(PTR)"\t1b, 4b\n\t"
205 STR(PTR)"\t2b, 4b\n\t"
206 ".previous"
207 : "=&r" (value), "=r" (res)
208 : "r" (addr), "i" (-EFAULT));
209 if (res)
210 goto fault;
211 compute_return_epc(regs);
212 regs->regs[insn.i_format.rt] = value;
213 break;
214
215 case lhu_op:
216 if (!access_ok(VERIFY_READ, addr, 2))
217 goto sigbus;
218
219 __asm__ __volatile__ (
220 ".set\tnoat\n"
221#ifdef __BIG_ENDIAN
222 "1:\tlbu\t%0, 0(%2)\n"
223 "2:\tlbu\t$1, 1(%2)\n\t"
224#endif
225#ifdef __LITTLE_ENDIAN
226 "1:\tlbu\t%0, 1(%2)\n"
227 "2:\tlbu\t$1, 0(%2)\n\t"
228#endif
229 "sll\t%0, 0x8\n\t"
230 "or\t%0, $1\n\t"
231 "li\t%1, 0\n"
232 "3:\t.set\tat\n\t"
233 ".section\t.fixup,\"ax\"\n\t"
234 "4:\tli\t%1, %3\n\t"
235 "j\t3b\n\t"
236 ".previous\n\t"
237 ".section\t__ex_table,\"a\"\n\t"
238 STR(PTR)"\t1b, 4b\n\t"
239 STR(PTR)"\t2b, 4b\n\t"
240 ".previous"
241 : "=&r" (value), "=r" (res)
242 : "r" (addr), "i" (-EFAULT));
243 if (res)
244 goto fault;
245 compute_return_epc(regs);
246 regs->regs[insn.i_format.rt] = value;
247 break;
248
249 case lwu_op:
250#ifdef CONFIG_64BIT
251 /*
252 * A 32-bit kernel might be running on a 64-bit processor. But
253 * if we're on a 32-bit processor and an i-cache incoherency
254 * or race makes us see a 64-bit instruction here the sdl/sdr
255 * would blow up, so for now we don't handle unaligned 64-bit
256 * instructions on 32-bit kernels.
257 */
258 if (!access_ok(VERIFY_READ, addr, 4))
259 goto sigbus;
260
261 __asm__ __volatile__ (
262#ifdef __BIG_ENDIAN
263 "1:\tlwl\t%0, (%2)\n"
264 "2:\tlwr\t%0, 3(%2)\n\t"
265#endif
266#ifdef __LITTLE_ENDIAN
267 "1:\tlwl\t%0, 3(%2)\n"
268 "2:\tlwr\t%0, (%2)\n\t"
269#endif
270 "dsll\t%0, %0, 32\n\t"
271 "dsrl\t%0, %0, 32\n\t"
272 "li\t%1, 0\n"
273 "3:\t.section\t.fixup,\"ax\"\n\t"
274 "4:\tli\t%1, %3\n\t"
275 "j\t3b\n\t"
276 ".previous\n\t"
277 ".section\t__ex_table,\"a\"\n\t"
278 STR(PTR)"\t1b, 4b\n\t"
279 STR(PTR)"\t2b, 4b\n\t"
280 ".previous"
281 : "=&r" (value), "=r" (res)
282 : "r" (addr), "i" (-EFAULT));
283 if (res)
284 goto fault;
285 compute_return_epc(regs);
286 regs->regs[insn.i_format.rt] = value;
287 break;
288#endif /* CONFIG_64BIT */
289
290 /* Cannot handle 64-bit instructions in 32-bit kernel */
291 goto sigill;
292
293 case ld_op:
294#ifdef CONFIG_64BIT
295 /*
296 * A 32-bit kernel might be running on a 64-bit processor. But
297 * if we're on a 32-bit processor and an i-cache incoherency
298 * or race makes us see a 64-bit instruction here the sdl/sdr
299 * would blow up, so for now we don't handle unaligned 64-bit
300 * instructions on 32-bit kernels.
301 */
302 if (!access_ok(VERIFY_READ, addr, 8))
303 goto sigbus;
304
305 __asm__ __volatile__ (
306#ifdef __BIG_ENDIAN
307 "1:\tldl\t%0, (%2)\n"
308 "2:\tldr\t%0, 7(%2)\n\t"
309#endif
310#ifdef __LITTLE_ENDIAN
311 "1:\tldl\t%0, 7(%2)\n"
312 "2:\tldr\t%0, (%2)\n\t"
313#endif
314 "li\t%1, 0\n"
315 "3:\t.section\t.fixup,\"ax\"\n\t"
316 "4:\tli\t%1, %3\n\t"
317 "j\t3b\n\t"
318 ".previous\n\t"
319 ".section\t__ex_table,\"a\"\n\t"
320 STR(PTR)"\t1b, 4b\n\t"
321 STR(PTR)"\t2b, 4b\n\t"
322 ".previous"
323 : "=&r" (value), "=r" (res)
324 : "r" (addr), "i" (-EFAULT));
325 if (res)
326 goto fault;
327 compute_return_epc(regs);
328 regs->regs[insn.i_format.rt] = value;
329 break;
330#endif /* CONFIG_64BIT */
331
332 /* Cannot handle 64-bit instructions in 32-bit kernel */
333 goto sigill;
334
335 case sh_op:
336 if (!access_ok(VERIFY_WRITE, addr, 2))
337 goto sigbus;
338
339 value = regs->regs[insn.i_format.rt];
340 __asm__ __volatile__ (
341#ifdef __BIG_ENDIAN
342 ".set\tnoat\n"
343 "1:\tsb\t%1, 1(%2)\n\t"
344 "srl\t$1, %1, 0x8\n"
345 "2:\tsb\t$1, 0(%2)\n\t"
346 ".set\tat\n\t"
347#endif
348#ifdef __LITTLE_ENDIAN
349 ".set\tnoat\n"
350 "1:\tsb\t%1, 0(%2)\n\t"
351 "srl\t$1,%1, 0x8\n"
352 "2:\tsb\t$1, 1(%2)\n\t"
353 ".set\tat\n\t"
354#endif
355 "li\t%0, 0\n"
356 "3:\n\t"
357 ".section\t.fixup,\"ax\"\n\t"
358 "4:\tli\t%0, %3\n\t"
359 "j\t3b\n\t"
360 ".previous\n\t"
361 ".section\t__ex_table,\"a\"\n\t"
362 STR(PTR)"\t1b, 4b\n\t"
363 STR(PTR)"\t2b, 4b\n\t"
364 ".previous"
365 : "=r" (res)
366 : "r" (value), "r" (addr), "i" (-EFAULT));
367 if (res)
368 goto fault;
369 compute_return_epc(regs);
370 break;
371
372 case sw_op:
373 if (!access_ok(VERIFY_WRITE, addr, 4))
374 goto sigbus;
375
376 value = regs->regs[insn.i_format.rt];
377 __asm__ __volatile__ (
378#ifdef __BIG_ENDIAN
379 "1:\tswl\t%1,(%2)\n"
380 "2:\tswr\t%1, 3(%2)\n\t"
381#endif
382#ifdef __LITTLE_ENDIAN
383 "1:\tswl\t%1, 3(%2)\n"
384 "2:\tswr\t%1, (%2)\n\t"
385#endif
386 "li\t%0, 0\n"
387 "3:\n\t"
388 ".section\t.fixup,\"ax\"\n\t"
389 "4:\tli\t%0, %3\n\t"
390 "j\t3b\n\t"
391 ".previous\n\t"
392 ".section\t__ex_table,\"a\"\n\t"
393 STR(PTR)"\t1b, 4b\n\t"
394 STR(PTR)"\t2b, 4b\n\t"
395 ".previous"
396 : "=r" (res)
397 : "r" (value), "r" (addr), "i" (-EFAULT));
398 if (res)
399 goto fault;
400 compute_return_epc(regs);
401 break;
402
403 case sd_op:
404#ifdef CONFIG_64BIT
405 /*
406 * A 32-bit kernel might be running on a 64-bit processor. But
407 * if we're on a 32-bit processor and an i-cache incoherency
408 * or race makes us see a 64-bit instruction here the sdl/sdr
409 * would blow up, so for now we don't handle unaligned 64-bit
410 * instructions on 32-bit kernels.
411 */
412 if (!access_ok(VERIFY_WRITE, addr, 8))
413 goto sigbus;
414
415 value = regs->regs[insn.i_format.rt];
416 __asm__ __volatile__ (
417#ifdef __BIG_ENDIAN
418 "1:\tsdl\t%1,(%2)\n"
419 "2:\tsdr\t%1, 7(%2)\n\t"
420#endif
421#ifdef __LITTLE_ENDIAN
422 "1:\tsdl\t%1, 7(%2)\n"
423 "2:\tsdr\t%1, (%2)\n\t"
424#endif
425 "li\t%0, 0\n"
426 "3:\n\t"
427 ".section\t.fixup,\"ax\"\n\t"
428 "4:\tli\t%0, %3\n\t"
429 "j\t3b\n\t"
430 ".previous\n\t"
431 ".section\t__ex_table,\"a\"\n\t"
432 STR(PTR)"\t1b, 4b\n\t"
433 STR(PTR)"\t2b, 4b\n\t"
434 ".previous"
435 : "=r" (res)
436 : "r" (value), "r" (addr), "i" (-EFAULT));
437 if (res)
438 goto fault;
439 compute_return_epc(regs);
440 break;
441#endif /* CONFIG_64BIT */
442
443 /* Cannot handle 64-bit instructions in 32-bit kernel */
444 goto sigill;
445
446 case lwc1_op:
447 case ldc1_op:
448 case swc1_op:
449 case sdc1_op:
450 /*
451 * I herewith declare: this does not happen. So send SIGBUS.
452 */
453 goto sigbus;
454
455 /*
456 * COP2 is available to implementor for application specific use.
457 * It's up to applications to register a notifier chain and do
458 * whatever they have to do, including possible sending of signals.
459 */
460 case lwc2_op:
461 cu2_notifier_call_chain(CU2_LWC2_OP, regs);
462 break;
463
464 case ldc2_op:
465 cu2_notifier_call_chain(CU2_LDC2_OP, regs);
466 break;
467
468 case swc2_op:
469 cu2_notifier_call_chain(CU2_SWC2_OP, regs);
470 break;
471
472 case sdc2_op:
473 cu2_notifier_call_chain(CU2_SDC2_OP, regs);
474 break;
475
476 default:
477 /*
478 * Pheeee... We encountered an yet unknown instruction or
479 * cache coherence problem. Die sucker, die ...
480 */
481 goto sigill;
482 }
483
484#ifdef CONFIG_DEBUG_FS
485 unaligned_instructions++;
486#endif
487
488 return;
489
490fault:
491 /* Did we have an exception handler installed? */
492 if (fixup_exception(regs))
493 return;
494
495 die_if_kernel("Unhandled kernel unaligned access", regs);
496 force_sig(SIGSEGV, current);
497
498 return;
499
500sigbus:
501 die_if_kernel("Unhandled kernel unaligned access", regs);
502 force_sig(SIGBUS, current);
503
504 return;
505
506sigill:
507 die_if_kernel("Unhandled kernel unaligned access or invalid instruction", regs);
508 force_sig(SIGILL, current);
509}
510
511asmlinkage void do_ade(struct pt_regs *regs)
512{
513 unsigned int __user *pc;
514 mm_segment_t seg;
515
516 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS,
517 1, regs, regs->cp0_badvaddr);
518 /*
519 * Did we catch a fault trying to load an instruction?
520 * Or are we running in MIPS16 mode?
521 */
522 if ((regs->cp0_badvaddr == regs->cp0_epc) || (regs->cp0_epc & 0x1))
523 goto sigbus;
524
525 pc = (unsigned int __user *) exception_epc(regs);
526 if (user_mode(regs) && !test_thread_flag(TIF_FIXADE))
527 goto sigbus;
528 if (unaligned_action == UNALIGNED_ACTION_SIGNAL)
529 goto sigbus;
530 else if (unaligned_action == UNALIGNED_ACTION_SHOW)
531 show_registers(regs);
532
533 /*
534 * Do branch emulation only if we didn't forward the exception.
535 * This is all so but ugly ...
536 */
537 seg = get_fs();
538 if (!user_mode(regs))
539 set_fs(KERNEL_DS);
540 emulate_load_store_insn(regs, (void __user *)regs->cp0_badvaddr, pc);
541 set_fs(seg);
542
543 return;
544
545sigbus:
546 die_if_kernel("Kernel unaligned instruction access", regs);
547 force_sig(SIGBUS, current);
548
549 /*
550 * XXX On return from the signal handler we should advance the epc
551 */
552}
553
554#ifdef CONFIG_DEBUG_FS
555extern struct dentry *mips_debugfs_dir;
556static int __init debugfs_unaligned(void)
557{
558 struct dentry *d;
559
560 if (!mips_debugfs_dir)
561 return -ENODEV;
562 d = debugfs_create_u32("unaligned_instructions", S_IRUGO,
563 mips_debugfs_dir, &unaligned_instructions);
564 if (!d)
565 return -ENOMEM;
566 d = debugfs_create_u32("unaligned_action", S_IRUGO | S_IWUSR,
567 mips_debugfs_dir, &unaligned_action);
568 if (!d)
569 return -ENOMEM;
570 return 0;
571}
572__initcall(debugfs_unaligned);
573#endif
1/*
2 * Handle unaligned accesses by emulation.
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 1996, 1998, 1999, 2002 by Ralf Baechle
9 * Copyright (C) 1999 Silicon Graphics, Inc.
10 *
11 * This file contains exception handler for address error exception with the
12 * special capability to execute faulting instructions in software. The
13 * handler does not try to handle the case when the program counter points
14 * to an address not aligned to a word boundary.
15 *
16 * Putting data to unaligned addresses is a bad practice even on Intel where
17 * only the performance is affected. Much worse is that such code is non-
18 * portable. Due to several programs that die on MIPS due to alignment
19 * problems I decided to implement this handler anyway though I originally
20 * didn't intend to do this at all for user code.
21 *
22 * For now I enable fixing of address errors by default to make life easier.
23 * I however intend to disable this somewhen in the future when the alignment
24 * problems with user programs have been fixed. For programmers this is the
25 * right way to go.
26 *
27 * Fixing address errors is a per process option. The option is inherited
28 * across fork(2) and execve(2) calls. If you really want to use the
29 * option in your user programs - I discourage the use of the software
30 * emulation strongly - use the following code in your userland stuff:
31 *
32 * #include <sys/sysmips.h>
33 *
34 * ...
35 * sysmips(MIPS_FIXADE, x);
36 * ...
37 *
38 * The argument x is 0 for disabling software emulation, enabled otherwise.
39 *
40 * Below a little program to play around with this feature.
41 *
42 * #include <stdio.h>
43 * #include <sys/sysmips.h>
44 *
45 * struct foo {
46 * unsigned char bar[8];
47 * };
48 *
49 * main(int argc, char *argv[])
50 * {
51 * struct foo x = {0, 1, 2, 3, 4, 5, 6, 7};
52 * unsigned int *p = (unsigned int *) (x.bar + 3);
53 * int i;
54 *
55 * if (argc > 1)
56 * sysmips(MIPS_FIXADE, atoi(argv[1]));
57 *
58 * printf("*p = %08lx\n", *p);
59 *
60 * *p = 0xdeadface;
61 *
62 * for(i = 0; i <= 7; i++)
63 * printf("%02x ", x.bar[i]);
64 * printf("\n");
65 * }
66 *
67 * Coprocessor loads are not supported; I think this case is unimportant
68 * in the practice.
69 *
70 * TODO: Handle ndc (attempted store to doubleword in uncached memory)
71 * exception for the R6000.
72 * A store crossing a page boundary might be executed only partially.
73 * Undo the partial store in this case.
74 */
75#include <linux/mm.h>
76#include <linux/module.h>
77#include <linux/signal.h>
78#include <linux/smp.h>
79#include <linux/sched.h>
80#include <linux/debugfs.h>
81#include <linux/perf_event.h>
82
83#include <asm/asm.h>
84#include <asm/branch.h>
85#include <asm/byteorder.h>
86#include <asm/cop2.h>
87#include <asm/inst.h>
88#include <asm/uaccess.h>
89#include <asm/system.h>
90
91#define STR(x) __STR(x)
92#define __STR(x) #x
93
94enum {
95 UNALIGNED_ACTION_QUIET,
96 UNALIGNED_ACTION_SIGNAL,
97 UNALIGNED_ACTION_SHOW,
98};
99#ifdef CONFIG_DEBUG_FS
100static u32 unaligned_instructions;
101static u32 unaligned_action;
102#else
103#define unaligned_action UNALIGNED_ACTION_QUIET
104#endif
105extern void show_registers(struct pt_regs *regs);
106
107static void emulate_load_store_insn(struct pt_regs *regs,
108 void __user *addr, unsigned int __user *pc)
109{
110 union mips_instruction insn;
111 unsigned long value;
112 unsigned int res;
113
114 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
115
116 /*
117 * This load never faults.
118 */
119 __get_user(insn.word, pc);
120
121 switch (insn.i_format.opcode) {
122 /*
123 * These are instructions that a compiler doesn't generate. We
124 * can assume therefore that the code is MIPS-aware and
125 * really buggy. Emulating these instructions would break the
126 * semantics anyway.
127 */
128 case ll_op:
129 case lld_op:
130 case sc_op:
131 case scd_op:
132
133 /*
134 * For these instructions the only way to create an address
135 * error is an attempted access to kernel/supervisor address
136 * space.
137 */
138 case ldl_op:
139 case ldr_op:
140 case lwl_op:
141 case lwr_op:
142 case sdl_op:
143 case sdr_op:
144 case swl_op:
145 case swr_op:
146 case lb_op:
147 case lbu_op:
148 case sb_op:
149 goto sigbus;
150
151 /*
152 * The remaining opcodes are the ones that are really of interest.
153 */
154 case lh_op:
155 if (!access_ok(VERIFY_READ, addr, 2))
156 goto sigbus;
157
158 __asm__ __volatile__ (".set\tnoat\n"
159#ifdef __BIG_ENDIAN
160 "1:\tlb\t%0, 0(%2)\n"
161 "2:\tlbu\t$1, 1(%2)\n\t"
162#endif
163#ifdef __LITTLE_ENDIAN
164 "1:\tlb\t%0, 1(%2)\n"
165 "2:\tlbu\t$1, 0(%2)\n\t"
166#endif
167 "sll\t%0, 0x8\n\t"
168 "or\t%0, $1\n\t"
169 "li\t%1, 0\n"
170 "3:\t.set\tat\n\t"
171 ".section\t.fixup,\"ax\"\n\t"
172 "4:\tli\t%1, %3\n\t"
173 "j\t3b\n\t"
174 ".previous\n\t"
175 ".section\t__ex_table,\"a\"\n\t"
176 STR(PTR)"\t1b, 4b\n\t"
177 STR(PTR)"\t2b, 4b\n\t"
178 ".previous"
179 : "=&r" (value), "=r" (res)
180 : "r" (addr), "i" (-EFAULT));
181 if (res)
182 goto fault;
183 compute_return_epc(regs);
184 regs->regs[insn.i_format.rt] = value;
185 break;
186
187 case lw_op:
188 if (!access_ok(VERIFY_READ, addr, 4))
189 goto sigbus;
190
191 __asm__ __volatile__ (
192#ifdef __BIG_ENDIAN
193 "1:\tlwl\t%0, (%2)\n"
194 "2:\tlwr\t%0, 3(%2)\n\t"
195#endif
196#ifdef __LITTLE_ENDIAN
197 "1:\tlwl\t%0, 3(%2)\n"
198 "2:\tlwr\t%0, (%2)\n\t"
199#endif
200 "li\t%1, 0\n"
201 "3:\t.section\t.fixup,\"ax\"\n\t"
202 "4:\tli\t%1, %3\n\t"
203 "j\t3b\n\t"
204 ".previous\n\t"
205 ".section\t__ex_table,\"a\"\n\t"
206 STR(PTR)"\t1b, 4b\n\t"
207 STR(PTR)"\t2b, 4b\n\t"
208 ".previous"
209 : "=&r" (value), "=r" (res)
210 : "r" (addr), "i" (-EFAULT));
211 if (res)
212 goto fault;
213 compute_return_epc(regs);
214 regs->regs[insn.i_format.rt] = value;
215 break;
216
217 case lhu_op:
218 if (!access_ok(VERIFY_READ, addr, 2))
219 goto sigbus;
220
221 __asm__ __volatile__ (
222 ".set\tnoat\n"
223#ifdef __BIG_ENDIAN
224 "1:\tlbu\t%0, 0(%2)\n"
225 "2:\tlbu\t$1, 1(%2)\n\t"
226#endif
227#ifdef __LITTLE_ENDIAN
228 "1:\tlbu\t%0, 1(%2)\n"
229 "2:\tlbu\t$1, 0(%2)\n\t"
230#endif
231 "sll\t%0, 0x8\n\t"
232 "or\t%0, $1\n\t"
233 "li\t%1, 0\n"
234 "3:\t.set\tat\n\t"
235 ".section\t.fixup,\"ax\"\n\t"
236 "4:\tli\t%1, %3\n\t"
237 "j\t3b\n\t"
238 ".previous\n\t"
239 ".section\t__ex_table,\"a\"\n\t"
240 STR(PTR)"\t1b, 4b\n\t"
241 STR(PTR)"\t2b, 4b\n\t"
242 ".previous"
243 : "=&r" (value), "=r" (res)
244 : "r" (addr), "i" (-EFAULT));
245 if (res)
246 goto fault;
247 compute_return_epc(regs);
248 regs->regs[insn.i_format.rt] = value;
249 break;
250
251 case lwu_op:
252#ifdef CONFIG_64BIT
253 /*
254 * A 32-bit kernel might be running on a 64-bit processor. But
255 * if we're on a 32-bit processor and an i-cache incoherency
256 * or race makes us see a 64-bit instruction here the sdl/sdr
257 * would blow up, so for now we don't handle unaligned 64-bit
258 * instructions on 32-bit kernels.
259 */
260 if (!access_ok(VERIFY_READ, addr, 4))
261 goto sigbus;
262
263 __asm__ __volatile__ (
264#ifdef __BIG_ENDIAN
265 "1:\tlwl\t%0, (%2)\n"
266 "2:\tlwr\t%0, 3(%2)\n\t"
267#endif
268#ifdef __LITTLE_ENDIAN
269 "1:\tlwl\t%0, 3(%2)\n"
270 "2:\tlwr\t%0, (%2)\n\t"
271#endif
272 "dsll\t%0, %0, 32\n\t"
273 "dsrl\t%0, %0, 32\n\t"
274 "li\t%1, 0\n"
275 "3:\t.section\t.fixup,\"ax\"\n\t"
276 "4:\tli\t%1, %3\n\t"
277 "j\t3b\n\t"
278 ".previous\n\t"
279 ".section\t__ex_table,\"a\"\n\t"
280 STR(PTR)"\t1b, 4b\n\t"
281 STR(PTR)"\t2b, 4b\n\t"
282 ".previous"
283 : "=&r" (value), "=r" (res)
284 : "r" (addr), "i" (-EFAULT));
285 if (res)
286 goto fault;
287 compute_return_epc(regs);
288 regs->regs[insn.i_format.rt] = value;
289 break;
290#endif /* CONFIG_64BIT */
291
292 /* Cannot handle 64-bit instructions in 32-bit kernel */
293 goto sigill;
294
295 case ld_op:
296#ifdef CONFIG_64BIT
297 /*
298 * A 32-bit kernel might be running on a 64-bit processor. But
299 * if we're on a 32-bit processor and an i-cache incoherency
300 * or race makes us see a 64-bit instruction here the sdl/sdr
301 * would blow up, so for now we don't handle unaligned 64-bit
302 * instructions on 32-bit kernels.
303 */
304 if (!access_ok(VERIFY_READ, addr, 8))
305 goto sigbus;
306
307 __asm__ __volatile__ (
308#ifdef __BIG_ENDIAN
309 "1:\tldl\t%0, (%2)\n"
310 "2:\tldr\t%0, 7(%2)\n\t"
311#endif
312#ifdef __LITTLE_ENDIAN
313 "1:\tldl\t%0, 7(%2)\n"
314 "2:\tldr\t%0, (%2)\n\t"
315#endif
316 "li\t%1, 0\n"
317 "3:\t.section\t.fixup,\"ax\"\n\t"
318 "4:\tli\t%1, %3\n\t"
319 "j\t3b\n\t"
320 ".previous\n\t"
321 ".section\t__ex_table,\"a\"\n\t"
322 STR(PTR)"\t1b, 4b\n\t"
323 STR(PTR)"\t2b, 4b\n\t"
324 ".previous"
325 : "=&r" (value), "=r" (res)
326 : "r" (addr), "i" (-EFAULT));
327 if (res)
328 goto fault;
329 compute_return_epc(regs);
330 regs->regs[insn.i_format.rt] = value;
331 break;
332#endif /* CONFIG_64BIT */
333
334 /* Cannot handle 64-bit instructions in 32-bit kernel */
335 goto sigill;
336
337 case sh_op:
338 if (!access_ok(VERIFY_WRITE, addr, 2))
339 goto sigbus;
340
341 value = regs->regs[insn.i_format.rt];
342 __asm__ __volatile__ (
343#ifdef __BIG_ENDIAN
344 ".set\tnoat\n"
345 "1:\tsb\t%1, 1(%2)\n\t"
346 "srl\t$1, %1, 0x8\n"
347 "2:\tsb\t$1, 0(%2)\n\t"
348 ".set\tat\n\t"
349#endif
350#ifdef __LITTLE_ENDIAN
351 ".set\tnoat\n"
352 "1:\tsb\t%1, 0(%2)\n\t"
353 "srl\t$1,%1, 0x8\n"
354 "2:\tsb\t$1, 1(%2)\n\t"
355 ".set\tat\n\t"
356#endif
357 "li\t%0, 0\n"
358 "3:\n\t"
359 ".section\t.fixup,\"ax\"\n\t"
360 "4:\tli\t%0, %3\n\t"
361 "j\t3b\n\t"
362 ".previous\n\t"
363 ".section\t__ex_table,\"a\"\n\t"
364 STR(PTR)"\t1b, 4b\n\t"
365 STR(PTR)"\t2b, 4b\n\t"
366 ".previous"
367 : "=r" (res)
368 : "r" (value), "r" (addr), "i" (-EFAULT));
369 if (res)
370 goto fault;
371 compute_return_epc(regs);
372 break;
373
374 case sw_op:
375 if (!access_ok(VERIFY_WRITE, addr, 4))
376 goto sigbus;
377
378 value = regs->regs[insn.i_format.rt];
379 __asm__ __volatile__ (
380#ifdef __BIG_ENDIAN
381 "1:\tswl\t%1,(%2)\n"
382 "2:\tswr\t%1, 3(%2)\n\t"
383#endif
384#ifdef __LITTLE_ENDIAN
385 "1:\tswl\t%1, 3(%2)\n"
386 "2:\tswr\t%1, (%2)\n\t"
387#endif
388 "li\t%0, 0\n"
389 "3:\n\t"
390 ".section\t.fixup,\"ax\"\n\t"
391 "4:\tli\t%0, %3\n\t"
392 "j\t3b\n\t"
393 ".previous\n\t"
394 ".section\t__ex_table,\"a\"\n\t"
395 STR(PTR)"\t1b, 4b\n\t"
396 STR(PTR)"\t2b, 4b\n\t"
397 ".previous"
398 : "=r" (res)
399 : "r" (value), "r" (addr), "i" (-EFAULT));
400 if (res)
401 goto fault;
402 compute_return_epc(regs);
403 break;
404
405 case sd_op:
406#ifdef CONFIG_64BIT
407 /*
408 * A 32-bit kernel might be running on a 64-bit processor. But
409 * if we're on a 32-bit processor and an i-cache incoherency
410 * or race makes us see a 64-bit instruction here the sdl/sdr
411 * would blow up, so for now we don't handle unaligned 64-bit
412 * instructions on 32-bit kernels.
413 */
414 if (!access_ok(VERIFY_WRITE, addr, 8))
415 goto sigbus;
416
417 value = regs->regs[insn.i_format.rt];
418 __asm__ __volatile__ (
419#ifdef __BIG_ENDIAN
420 "1:\tsdl\t%1,(%2)\n"
421 "2:\tsdr\t%1, 7(%2)\n\t"
422#endif
423#ifdef __LITTLE_ENDIAN
424 "1:\tsdl\t%1, 7(%2)\n"
425 "2:\tsdr\t%1, (%2)\n\t"
426#endif
427 "li\t%0, 0\n"
428 "3:\n\t"
429 ".section\t.fixup,\"ax\"\n\t"
430 "4:\tli\t%0, %3\n\t"
431 "j\t3b\n\t"
432 ".previous\n\t"
433 ".section\t__ex_table,\"a\"\n\t"
434 STR(PTR)"\t1b, 4b\n\t"
435 STR(PTR)"\t2b, 4b\n\t"
436 ".previous"
437 : "=r" (res)
438 : "r" (value), "r" (addr), "i" (-EFAULT));
439 if (res)
440 goto fault;
441 compute_return_epc(regs);
442 break;
443#endif /* CONFIG_64BIT */
444
445 /* Cannot handle 64-bit instructions in 32-bit kernel */
446 goto sigill;
447
448 case lwc1_op:
449 case ldc1_op:
450 case swc1_op:
451 case sdc1_op:
452 /*
453 * I herewith declare: this does not happen. So send SIGBUS.
454 */
455 goto sigbus;
456
457 /*
458 * COP2 is available to implementor for application specific use.
459 * It's up to applications to register a notifier chain and do
460 * whatever they have to do, including possible sending of signals.
461 */
462 case lwc2_op:
463 cu2_notifier_call_chain(CU2_LWC2_OP, regs);
464 break;
465
466 case ldc2_op:
467 cu2_notifier_call_chain(CU2_LDC2_OP, regs);
468 break;
469
470 case swc2_op:
471 cu2_notifier_call_chain(CU2_SWC2_OP, regs);
472 break;
473
474 case sdc2_op:
475 cu2_notifier_call_chain(CU2_SDC2_OP, regs);
476 break;
477
478 default:
479 /*
480 * Pheeee... We encountered an yet unknown instruction or
481 * cache coherence problem. Die sucker, die ...
482 */
483 goto sigill;
484 }
485
486#ifdef CONFIG_DEBUG_FS
487 unaligned_instructions++;
488#endif
489
490 return;
491
492fault:
493 /* Did we have an exception handler installed? */
494 if (fixup_exception(regs))
495 return;
496
497 die_if_kernel("Unhandled kernel unaligned access", regs);
498 force_sig(SIGSEGV, current);
499
500 return;
501
502sigbus:
503 die_if_kernel("Unhandled kernel unaligned access", regs);
504 force_sig(SIGBUS, current);
505
506 return;
507
508sigill:
509 die_if_kernel("Unhandled kernel unaligned access or invalid instruction", regs);
510 force_sig(SIGILL, current);
511}
512
513asmlinkage void do_ade(struct pt_regs *regs)
514{
515 unsigned int __user *pc;
516 mm_segment_t seg;
517
518 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS,
519 1, regs, regs->cp0_badvaddr);
520 /*
521 * Did we catch a fault trying to load an instruction?
522 * Or are we running in MIPS16 mode?
523 */
524 if ((regs->cp0_badvaddr == regs->cp0_epc) || (regs->cp0_epc & 0x1))
525 goto sigbus;
526
527 pc = (unsigned int __user *) exception_epc(regs);
528 if (user_mode(regs) && !test_thread_flag(TIF_FIXADE))
529 goto sigbus;
530 if (unaligned_action == UNALIGNED_ACTION_SIGNAL)
531 goto sigbus;
532 else if (unaligned_action == UNALIGNED_ACTION_SHOW)
533 show_registers(regs);
534
535 /*
536 * Do branch emulation only if we didn't forward the exception.
537 * This is all so but ugly ...
538 */
539 seg = get_fs();
540 if (!user_mode(regs))
541 set_fs(KERNEL_DS);
542 emulate_load_store_insn(regs, (void __user *)regs->cp0_badvaddr, pc);
543 set_fs(seg);
544
545 return;
546
547sigbus:
548 die_if_kernel("Kernel unaligned instruction access", regs);
549 force_sig(SIGBUS, current);
550
551 /*
552 * XXX On return from the signal handler we should advance the epc
553 */
554}
555
556#ifdef CONFIG_DEBUG_FS
557extern struct dentry *mips_debugfs_dir;
558static int __init debugfs_unaligned(void)
559{
560 struct dentry *d;
561
562 if (!mips_debugfs_dir)
563 return -ENODEV;
564 d = debugfs_create_u32("unaligned_instructions", S_IRUGO,
565 mips_debugfs_dir, &unaligned_instructions);
566 if (!d)
567 return -ENOMEM;
568 d = debugfs_create_u32("unaligned_action", S_IRUGO | S_IWUSR,
569 mips_debugfs_dir, &unaligned_action);
570 if (!d)
571 return -ENOMEM;
572 return 0;
573}
574__initcall(debugfs_unaligned);
575#endif