Loading...
1/*
2 * Copyright (C) 2011-2012 Synopsys (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * vineetg : May 2011
9 * -Adapted (from .26 to .35)
10 * -original contribution by Tim.yao@amlogic.com
11 *
12 */
13
14#include <linux/types.h>
15#include <linux/perf_event.h>
16#include <linux/ptrace.h>
17#include <linux/uaccess.h>
18#include <asm/disasm.h>
19
20#ifdef CONFIG_CPU_BIG_ENDIAN
21#define BE 1
22#define FIRST_BYTE_16 "swap %1, %1\n swape %1, %1\n"
23#define FIRST_BYTE_32 "swape %1, %1\n"
24#else
25#define BE 0
26#define FIRST_BYTE_16
27#define FIRST_BYTE_32
28#endif
29
30#define __get8_unaligned_check(val, addr, err) \
31 __asm__( \
32 "1: ldb.ab %1, [%2, 1]\n" \
33 "2:\n" \
34 " .section .fixup,\"ax\"\n" \
35 " .align 4\n" \
36 "3: mov %0, 1\n" \
37 " j 2b\n" \
38 " .previous\n" \
39 " .section __ex_table,\"a\"\n" \
40 " .align 4\n" \
41 " .long 1b, 3b\n" \
42 " .previous\n" \
43 : "=r" (err), "=&r" (val), "=r" (addr) \
44 : "0" (err), "2" (addr))
45
46#define get16_unaligned_check(val, addr) \
47 do { \
48 unsigned int err = 0, v, a = addr; \
49 __get8_unaligned_check(v, a, err); \
50 val = v << ((BE) ? 8 : 0); \
51 __get8_unaligned_check(v, a, err); \
52 val |= v << ((BE) ? 0 : 8); \
53 if (err) \
54 goto fault; \
55 } while (0)
56
57#define get32_unaligned_check(val, addr) \
58 do { \
59 unsigned int err = 0, v, a = addr; \
60 __get8_unaligned_check(v, a, err); \
61 val = v << ((BE) ? 24 : 0); \
62 __get8_unaligned_check(v, a, err); \
63 val |= v << ((BE) ? 16 : 8); \
64 __get8_unaligned_check(v, a, err); \
65 val |= v << ((BE) ? 8 : 16); \
66 __get8_unaligned_check(v, a, err); \
67 val |= v << ((BE) ? 0 : 24); \
68 if (err) \
69 goto fault; \
70 } while (0)
71
72#define put16_unaligned_check(val, addr) \
73 do { \
74 unsigned int err = 0, v = val, a = addr;\
75 \
76 __asm__( \
77 FIRST_BYTE_16 \
78 "1: stb.ab %1, [%2, 1]\n" \
79 " lsr %1, %1, 8\n" \
80 "2: stb %1, [%2]\n" \
81 "3:\n" \
82 " .section .fixup,\"ax\"\n" \
83 " .align 4\n" \
84 "4: mov %0, 1\n" \
85 " j 3b\n" \
86 " .previous\n" \
87 " .section __ex_table,\"a\"\n" \
88 " .align 4\n" \
89 " .long 1b, 4b\n" \
90 " .long 2b, 4b\n" \
91 " .previous\n" \
92 : "=r" (err), "=&r" (v), "=&r" (a) \
93 : "0" (err), "1" (v), "2" (a)); \
94 \
95 if (err) \
96 goto fault; \
97 } while (0)
98
99#define put32_unaligned_check(val, addr) \
100 do { \
101 unsigned int err = 0, v = val, a = addr;\
102 \
103 __asm__( \
104 FIRST_BYTE_32 \
105 "1: stb.ab %1, [%2, 1]\n" \
106 " lsr %1, %1, 8\n" \
107 "2: stb.ab %1, [%2, 1]\n" \
108 " lsr %1, %1, 8\n" \
109 "3: stb.ab %1, [%2, 1]\n" \
110 " lsr %1, %1, 8\n" \
111 "4: stb %1, [%2]\n" \
112 "5:\n" \
113 " .section .fixup,\"ax\"\n" \
114 " .align 4\n" \
115 "6: mov %0, 1\n" \
116 " j 5b\n" \
117 " .previous\n" \
118 " .section __ex_table,\"a\"\n" \
119 " .align 4\n" \
120 " .long 1b, 6b\n" \
121 " .long 2b, 6b\n" \
122 " .long 3b, 6b\n" \
123 " .long 4b, 6b\n" \
124 " .previous\n" \
125 : "=r" (err), "=&r" (v), "=&r" (a) \
126 : "0" (err), "1" (v), "2" (a)); \
127 \
128 if (err) \
129 goto fault; \
130 } while (0)
131
132/* sysctl hooks */
133int unaligned_enabled __read_mostly = 1; /* Enabled by default */
134int no_unaligned_warning __read_mostly = 1; /* Only 1 warning by default */
135
136static void fixup_load(struct disasm_state *state, struct pt_regs *regs,
137 struct callee_regs *cregs)
138{
139 int val;
140
141 /* register write back */
142 if ((state->aa == 1) || (state->aa == 2)) {
143 set_reg(state->wb_reg, state->src1 + state->src2, regs, cregs);
144
145 if (state->aa == 2)
146 state->src2 = 0;
147 }
148
149 if (state->zz == 0) {
150 get32_unaligned_check(val, state->src1 + state->src2);
151 } else {
152 get16_unaligned_check(val, state->src1 + state->src2);
153
154 if (state->x)
155 val = (val << 16) >> 16;
156 }
157
158 if (state->pref == 0)
159 set_reg(state->dest, val, regs, cregs);
160
161 return;
162
163fault: state->fault = 1;
164}
165
166static void fixup_store(struct disasm_state *state, struct pt_regs *regs,
167 struct callee_regs *cregs)
168{
169 /* register write back */
170 if ((state->aa == 1) || (state->aa == 2)) {
171 set_reg(state->wb_reg, state->src2 + state->src3, regs, cregs);
172
173 if (state->aa == 3)
174 state->src3 = 0;
175 } else if (state->aa == 3) {
176 if (state->zz == 2) {
177 set_reg(state->wb_reg, state->src2 + (state->src3 << 1),
178 regs, cregs);
179 } else if (!state->zz) {
180 set_reg(state->wb_reg, state->src2 + (state->src3 << 2),
181 regs, cregs);
182 } else {
183 goto fault;
184 }
185 }
186
187 /* write fix-up */
188 if (!state->zz)
189 put32_unaligned_check(state->src1, state->src2 + state->src3);
190 else
191 put16_unaligned_check(state->src1, state->src2 + state->src3);
192
193 return;
194
195fault: state->fault = 1;
196}
197
198/*
199 * Handle an unaligned access
200 * Returns 0 if successfully handled, 1 if some error happened
201 */
202int misaligned_fixup(unsigned long address, struct pt_regs *regs,
203 struct callee_regs *cregs)
204{
205 struct disasm_state state;
206 char buf[TASK_COMM_LEN];
207
208 /* handle user mode only and only if enabled by sysadmin */
209 if (!user_mode(regs) || !unaligned_enabled)
210 return 1;
211
212 if (no_unaligned_warning) {
213 pr_warn_once("%s(%d) made unaligned access which was emulated"
214 " by kernel assist\n. This can degrade application"
215 " performance significantly\n. To enable further"
216 " logging of such instances, please \n"
217 " echo 0 > /proc/sys/kernel/ignore-unaligned-usertrap\n",
218 get_task_comm(buf, current), task_pid_nr(current));
219 } else {
220 /* Add rate limiting if it gets down to it */
221 pr_warn("%s(%d): unaligned access to/from 0x%lx by PC: 0x%lx\n",
222 get_task_comm(buf, current), task_pid_nr(current),
223 address, regs->ret);
224
225 }
226
227 disasm_instr(regs->ret, &state, 1, regs, cregs);
228
229 if (state.fault)
230 goto fault;
231
232 /* ldb/stb should not have unaligned exception */
233 if ((state.zz == 1) || (state.di))
234 goto fault;
235
236 if (!state.write)
237 fixup_load(&state, regs, cregs);
238 else
239 fixup_store(&state, regs, cregs);
240
241 if (state.fault)
242 goto fault;
243
244 /* clear any remanants of delay slot */
245 if (delay_mode(regs)) {
246 regs->ret = regs->bta & ~1U;
247 regs->status32 &= ~STATUS_DE_MASK;
248 } else {
249 regs->ret += state.instr_len;
250
251 /* handle zero-overhead-loop */
252 if ((regs->ret == regs->lp_end) && (regs->lp_count)) {
253 regs->ret = regs->lp_start;
254 regs->lp_count--;
255 }
256 }
257
258 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, address);
259 return 0;
260
261fault:
262 pr_err("Alignment trap: fault in fix-up %08lx at [<%08lx>]\n",
263 state.words[0], address);
264
265 return 1;
266}
1/*
2 * Copyright (C) 2011-2012 Synopsys (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * vineetg : May 2011
9 * -Adapted (from .26 to .35)
10 * -original contribution by Tim.yao@amlogic.com
11 *
12 */
13
14#include <linux/types.h>
15#include <linux/ptrace.h>
16#include <linux/uaccess.h>
17#include <asm/disasm.h>
18
19#ifdef CONFIG_CPU_BIG_ENDIAN
20#define BE 1
21#define FIRST_BYTE_16 "swap %1, %1\n swape %1, %1\n"
22#define FIRST_BYTE_32 "swape %1, %1\n"
23#else
24#define BE 0
25#define FIRST_BYTE_16
26#define FIRST_BYTE_32
27#endif
28
29#define __get8_unaligned_check(val, addr, err) \
30 __asm__( \
31 "1: ldb.ab %1, [%2, 1]\n" \
32 "2:\n" \
33 " .section .fixup,\"ax\"\n" \
34 " .align 4\n" \
35 "3: mov %0, 1\n" \
36 " b 2b\n" \
37 " .previous\n" \
38 " .section __ex_table,\"a\"\n" \
39 " .align 4\n" \
40 " .long 1b, 3b\n" \
41 " .previous\n" \
42 : "=r" (err), "=&r" (val), "=r" (addr) \
43 : "0" (err), "2" (addr))
44
45#define get16_unaligned_check(val, addr) \
46 do { \
47 unsigned int err = 0, v, a = addr; \
48 __get8_unaligned_check(v, a, err); \
49 val = v << ((BE) ? 8 : 0); \
50 __get8_unaligned_check(v, a, err); \
51 val |= v << ((BE) ? 0 : 8); \
52 if (err) \
53 goto fault; \
54 } while (0)
55
56#define get32_unaligned_check(val, addr) \
57 do { \
58 unsigned int err = 0, v, a = addr; \
59 __get8_unaligned_check(v, a, err); \
60 val = v << ((BE) ? 24 : 0); \
61 __get8_unaligned_check(v, a, err); \
62 val |= v << ((BE) ? 16 : 8); \
63 __get8_unaligned_check(v, a, err); \
64 val |= v << ((BE) ? 8 : 16); \
65 __get8_unaligned_check(v, a, err); \
66 val |= v << ((BE) ? 0 : 24); \
67 if (err) \
68 goto fault; \
69 } while (0)
70
71#define put16_unaligned_check(val, addr) \
72 do { \
73 unsigned int err = 0, v = val, a = addr;\
74 \
75 __asm__( \
76 FIRST_BYTE_16 \
77 "1: stb.ab %1, [%2, 1]\n" \
78 " lsr %1, %1, 8\n" \
79 "2: stb %1, [%2]\n" \
80 "3:\n" \
81 " .section .fixup,\"ax\"\n" \
82 " .align 4\n" \
83 "4: mov %0, 1\n" \
84 " b 3b\n" \
85 " .previous\n" \
86 " .section __ex_table,\"a\"\n" \
87 " .align 4\n" \
88 " .long 1b, 4b\n" \
89 " .long 2b, 4b\n" \
90 " .previous\n" \
91 : "=r" (err), "=&r" (v), "=&r" (a) \
92 : "0" (err), "1" (v), "2" (a)); \
93 \
94 if (err) \
95 goto fault; \
96 } while (0)
97
98#define put32_unaligned_check(val, addr) \
99 do { \
100 unsigned int err = 0, v = val, a = addr;\
101 \
102 __asm__( \
103 FIRST_BYTE_32 \
104 "1: stb.ab %1, [%2, 1]\n" \
105 " lsr %1, %1, 8\n" \
106 "2: stb.ab %1, [%2, 1]\n" \
107 " lsr %1, %1, 8\n" \
108 "3: stb.ab %1, [%2, 1]\n" \
109 " lsr %1, %1, 8\n" \
110 "4: stb %1, [%2]\n" \
111 "5:\n" \
112 " .section .fixup,\"ax\"\n" \
113 " .align 4\n" \
114 "6: mov %0, 1\n" \
115 " b 5b\n" \
116 " .previous\n" \
117 " .section __ex_table,\"a\"\n" \
118 " .align 4\n" \
119 " .long 1b, 6b\n" \
120 " .long 2b, 6b\n" \
121 " .long 3b, 6b\n" \
122 " .long 4b, 6b\n" \
123 " .previous\n" \
124 : "=r" (err), "=&r" (v), "=&r" (a) \
125 : "0" (err), "1" (v), "2" (a)); \
126 \
127 if (err) \
128 goto fault; \
129 } while (0)
130
131/* sysctl hooks */
132int unaligned_enabled __read_mostly = 1; /* Enabled by default */
133int no_unaligned_warning __read_mostly = 1; /* Only 1 warning by default */
134
135static void fixup_load(struct disasm_state *state, struct pt_regs *regs,
136 struct callee_regs *cregs)
137{
138 int val;
139
140 /* register write back */
141 if ((state->aa == 1) || (state->aa == 2)) {
142 set_reg(state->wb_reg, state->src1 + state->src2, regs, cregs);
143
144 if (state->aa == 2)
145 state->src2 = 0;
146 }
147
148 if (state->zz == 0) {
149 get32_unaligned_check(val, state->src1 + state->src2);
150 } else {
151 get16_unaligned_check(val, state->src1 + state->src2);
152
153 if (state->x)
154 val = (val << 16) >> 16;
155 }
156
157 if (state->pref == 0)
158 set_reg(state->dest, val, regs, cregs);
159
160 return;
161
162fault: state->fault = 1;
163}
164
165static void fixup_store(struct disasm_state *state, struct pt_regs *regs,
166 struct callee_regs *cregs)
167{
168 /* register write back */
169 if ((state->aa == 1) || (state->aa == 2)) {
170 set_reg(state->wb_reg, state->src2 + state->src3, regs, cregs);
171
172 if (state->aa == 3)
173 state->src3 = 0;
174 } else if (state->aa == 3) {
175 if (state->zz == 2) {
176 set_reg(state->wb_reg, state->src2 + (state->src3 << 1),
177 regs, cregs);
178 } else if (!state->zz) {
179 set_reg(state->wb_reg, state->src2 + (state->src3 << 2),
180 regs, cregs);
181 } else {
182 goto fault;
183 }
184 }
185
186 /* write fix-up */
187 if (!state->zz)
188 put32_unaligned_check(state->src1, state->src2 + state->src3);
189 else
190 put16_unaligned_check(state->src1, state->src2 + state->src3);
191
192 return;
193
194fault: state->fault = 1;
195}
196
197/*
198 * Handle an unaligned access
199 * Returns 0 if successfully handled, 1 if some error happened
200 */
201int misaligned_fixup(unsigned long address, struct pt_regs *regs,
202 struct callee_regs *cregs)
203{
204 struct disasm_state state;
205 char buf[TASK_COMM_LEN];
206
207 /* handle user mode only and only if enabled by sysadmin */
208 if (!user_mode(regs) || !unaligned_enabled)
209 return 1;
210
211 if (no_unaligned_warning) {
212 pr_warn_once("%s(%d) made unaligned access which was emulated"
213 " by kernel assist\n. This can degrade application"
214 " performance significantly\n. To enable further"
215 " logging of such instances, please \n"
216 " echo 0 > /proc/sys/kernel/ignore-unaligned-usertrap\n",
217 get_task_comm(buf, current), task_pid_nr(current));
218 } else {
219 /* Add rate limiting if it gets down to it */
220 pr_warn("%s(%d): unaligned access to/from 0x%lx by PC: 0x%lx\n",
221 get_task_comm(buf, current), task_pid_nr(current),
222 address, regs->ret);
223
224 }
225
226 disasm_instr(regs->ret, &state, 1, regs, cregs);
227
228 if (state.fault)
229 goto fault;
230
231 /* ldb/stb should not have unaligned exception */
232 if ((state.zz == 1) || (state.di))
233 goto fault;
234
235 if (!state.write)
236 fixup_load(&state, regs, cregs);
237 else
238 fixup_store(&state, regs, cregs);
239
240 if (state.fault)
241 goto fault;
242
243 if (delay_mode(regs)) {
244 regs->ret = regs->bta;
245 regs->status32 &= ~STATUS_DE_MASK;
246 } else {
247 regs->ret += state.instr_len;
248
249 /* handle zero-overhead-loop */
250 if ((regs->ret == regs->lp_end) && (regs->lp_count)) {
251 regs->ret = regs->lp_start;
252 regs->lp_count--;
253 }
254 }
255
256 return 0;
257
258fault:
259 pr_err("Alignment trap: fault in fix-up %08lx at [<%08lx>]\n",
260 state.words[0], address);
261
262 return 1;
263}