Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2011-2012 Synopsys (www.synopsys.com)
4 *
5 * vineetg : May 2011
6 * -Adapted (from .26 to .35)
7 * -original contribution by Tim.yao@amlogic.com
8 */
9
10#include <linux/types.h>
11#include <linux/perf_event.h>
12#include <linux/ptrace.h>
13#include <linux/uaccess.h>
14#include <asm/disasm.h>
15#include "unaligned.h"
16
17#ifdef CONFIG_CPU_BIG_ENDIAN
18#define BE 1
19#define FIRST_BYTE_16 "swap %1, %1\n swape %1, %1\n"
20#define FIRST_BYTE_32 "swape %1, %1\n"
21#else
22#define BE 0
23#define FIRST_BYTE_16
24#define FIRST_BYTE_32
25#endif
26
27#define __get8_unaligned_check(val, addr, err) \
28 __asm__( \
29 "1: ldb.ab %1, [%2, 1]\n" \
30 "2:\n" \
31 " .section .fixup,\"ax\"\n" \
32 " .align 4\n" \
33 "3: mov %0, 1\n" \
34 " j 2b\n" \
35 " .previous\n" \
36 " .section __ex_table,\"a\"\n" \
37 " .align 4\n" \
38 " .long 1b, 3b\n" \
39 " .previous\n" \
40 : "=r" (err), "=&r" (val), "=r" (addr) \
41 : "0" (err), "2" (addr))
42
43#define get16_unaligned_check(val, addr) \
44 do { \
45 unsigned int err = 0, v, a = addr; \
46 __get8_unaligned_check(v, a, err); \
47 val = v << ((BE) ? 8 : 0); \
48 __get8_unaligned_check(v, a, err); \
49 val |= v << ((BE) ? 0 : 8); \
50 if (err) \
51 goto fault; \
52 } while (0)
53
54#define get32_unaligned_check(val, addr) \
55 do { \
56 unsigned int err = 0, v, a = addr; \
57 __get8_unaligned_check(v, a, err); \
58 val = v << ((BE) ? 24 : 0); \
59 __get8_unaligned_check(v, a, err); \
60 val |= v << ((BE) ? 16 : 8); \
61 __get8_unaligned_check(v, a, err); \
62 val |= v << ((BE) ? 8 : 16); \
63 __get8_unaligned_check(v, a, err); \
64 val |= v << ((BE) ? 0 : 24); \
65 if (err) \
66 goto fault; \
67 } while (0)
68
69#define put16_unaligned_check(val, addr) \
70 do { \
71 unsigned int err = 0, v = val, a = addr;\
72 \
73 __asm__( \
74 FIRST_BYTE_16 \
75 "1: stb.ab %1, [%2, 1]\n" \
76 " lsr %1, %1, 8\n" \
77 "2: stb %1, [%2]\n" \
78 "3:\n" \
79 " .section .fixup,\"ax\"\n" \
80 " .align 4\n" \
81 "4: mov %0, 1\n" \
82 " j 3b\n" \
83 " .previous\n" \
84 " .section __ex_table,\"a\"\n" \
85 " .align 4\n" \
86 " .long 1b, 4b\n" \
87 " .long 2b, 4b\n" \
88 " .previous\n" \
89 : "=r" (err), "=&r" (v), "=&r" (a) \
90 : "0" (err), "1" (v), "2" (a)); \
91 \
92 if (err) \
93 goto fault; \
94 } while (0)
95
96#define put32_unaligned_check(val, addr) \
97 do { \
98 unsigned int err = 0, v = val, a = addr;\
99 \
100 __asm__( \
101 FIRST_BYTE_32 \
102 "1: stb.ab %1, [%2, 1]\n" \
103 " lsr %1, %1, 8\n" \
104 "2: stb.ab %1, [%2, 1]\n" \
105 " lsr %1, %1, 8\n" \
106 "3: stb.ab %1, [%2, 1]\n" \
107 " lsr %1, %1, 8\n" \
108 "4: stb %1, [%2]\n" \
109 "5:\n" \
110 " .section .fixup,\"ax\"\n" \
111 " .align 4\n" \
112 "6: mov %0, 1\n" \
113 " j 5b\n" \
114 " .previous\n" \
115 " .section __ex_table,\"a\"\n" \
116 " .align 4\n" \
117 " .long 1b, 6b\n" \
118 " .long 2b, 6b\n" \
119 " .long 3b, 6b\n" \
120 " .long 4b, 6b\n" \
121 " .previous\n" \
122 : "=r" (err), "=&r" (v), "=&r" (a) \
123 : "0" (err), "1" (v), "2" (a)); \
124 \
125 if (err) \
126 goto fault; \
127 } while (0)
128
129/* sysctl hooks */
130int unaligned_enabled __read_mostly = 1; /* Enabled by default */
131int no_unaligned_warning __read_mostly = 1; /* Only 1 warning by default */
132
133static void fixup_load(struct disasm_state *state, struct pt_regs *regs,
134 struct callee_regs *cregs)
135{
136 int val;
137
138 /* register write back */
139 if ((state->aa == 1) || (state->aa == 2)) {
140 set_reg(state->wb_reg, state->src1 + state->src2, regs, cregs);
141
142 if (state->aa == 2)
143 state->src2 = 0;
144 }
145
146 if (state->zz == 0) {
147 get32_unaligned_check(val, state->src1 + state->src2);
148 } else {
149 get16_unaligned_check(val, state->src1 + state->src2);
150
151 if (state->x)
152 val = (val << 16) >> 16;
153 }
154
155 if (state->pref == 0)
156 set_reg(state->dest, val, regs, cregs);
157
158 return;
159
160fault: state->fault = 1;
161}
162
163static void fixup_store(struct disasm_state *state, struct pt_regs *regs,
164 struct callee_regs *cregs)
165{
166 /* register write back */
167 if ((state->aa == 1) || (state->aa == 2)) {
168 set_reg(state->wb_reg, state->src2 + state->src3, regs, cregs);
169
170 if (state->aa == 3)
171 state->src3 = 0;
172 } else if (state->aa == 3) {
173 if (state->zz == 2) {
174 set_reg(state->wb_reg, state->src2 + (state->src3 << 1),
175 regs, cregs);
176 } else if (!state->zz) {
177 set_reg(state->wb_reg, state->src2 + (state->src3 << 2),
178 regs, cregs);
179 } else {
180 goto fault;
181 }
182 }
183
184 /* write fix-up */
185 if (!state->zz)
186 put32_unaligned_check(state->src1, state->src2 + state->src3);
187 else
188 put16_unaligned_check(state->src1, state->src2 + state->src3);
189
190 return;
191
192fault: state->fault = 1;
193}
194
195/*
196 * Handle an unaligned access
197 * Returns 0 if successfully handled, 1 if some error happened
198 */
199int misaligned_fixup(unsigned long address, struct pt_regs *regs,
200 struct callee_regs *cregs)
201{
202 struct disasm_state state;
203 char buf[TASK_COMM_LEN];
204
205 /* handle user mode only and only if enabled by sysadmin */
206 if (!user_mode(regs) || !unaligned_enabled)
207 return 1;
208
209 if (no_unaligned_warning) {
210 pr_warn_once("%s(%d) made unaligned access which was emulated"
211 " by kernel assist\n. This can degrade application"
212 " performance significantly\n. To enable further"
213 " logging of such instances, please \n"
214 " echo 0 > /proc/sys/kernel/ignore-unaligned-usertrap\n",
215 get_task_comm(buf, current), task_pid_nr(current));
216 } else {
217 /* Add rate limiting if it gets down to it */
218 pr_warn("%s(%d): unaligned access to/from 0x%lx by PC: 0x%lx\n",
219 get_task_comm(buf, current), task_pid_nr(current),
220 address, regs->ret);
221
222 }
223
224 disasm_instr(regs->ret, &state, 1, regs, cregs);
225
226 if (state.fault)
227 goto fault;
228
229 /* ldb/stb should not have unaligned exception */
230 if ((state.zz == 1) || (state.di))
231 goto fault;
232
233 if (!state.write)
234 fixup_load(&state, regs, cregs);
235 else
236 fixup_store(&state, regs, cregs);
237
238 if (state.fault)
239 goto fault;
240
241 /* clear any remnants of delay slot */
242 if (delay_mode(regs)) {
243 regs->ret = regs->bta & ~1U;
244 regs->status32 &= ~STATUS_DE_MASK;
245 } else {
246 regs->ret += state.instr_len;
247
248 /* handle zero-overhead-loop */
249 if ((regs->ret == regs->lp_end) && (regs->lp_count)) {
250 regs->ret = regs->lp_start;
251 regs->lp_count--;
252 }
253 }
254
255 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, address);
256 return 0;
257
258fault:
259 pr_err("Alignment trap: fault in fix-up %08lx at [<%08lx>]\n",
260 state.words[0], address);
261
262 return 1;
263}
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2011-2012 Synopsys (www.synopsys.com)
4 *
5 * vineetg : May 2011
6 * -Adapted (from .26 to .35)
7 * -original contribution by Tim.yao@amlogic.com
8 */
9
10#include <linux/types.h>
11#include <linux/perf_event.h>
12#include <linux/ptrace.h>
13#include <linux/uaccess.h>
14#include <asm/disasm.h>
15
16#ifdef CONFIG_CPU_BIG_ENDIAN
17#define BE 1
18#define FIRST_BYTE_16 "swap %1, %1\n swape %1, %1\n"
19#define FIRST_BYTE_32 "swape %1, %1\n"
20#else
21#define BE 0
22#define FIRST_BYTE_16
23#define FIRST_BYTE_32
24#endif
25
26#define __get8_unaligned_check(val, addr, err) \
27 __asm__( \
28 "1: ldb.ab %1, [%2, 1]\n" \
29 "2:\n" \
30 " .section .fixup,\"ax\"\n" \
31 " .align 4\n" \
32 "3: mov %0, 1\n" \
33 " j 2b\n" \
34 " .previous\n" \
35 " .section __ex_table,\"a\"\n" \
36 " .align 4\n" \
37 " .long 1b, 3b\n" \
38 " .previous\n" \
39 : "=r" (err), "=&r" (val), "=r" (addr) \
40 : "0" (err), "2" (addr))
41
42#define get16_unaligned_check(val, addr) \
43 do { \
44 unsigned int err = 0, v, a = addr; \
45 __get8_unaligned_check(v, a, err); \
46 val = v << ((BE) ? 8 : 0); \
47 __get8_unaligned_check(v, a, err); \
48 val |= v << ((BE) ? 0 : 8); \
49 if (err) \
50 goto fault; \
51 } while (0)
52
53#define get32_unaligned_check(val, addr) \
54 do { \
55 unsigned int err = 0, v, a = addr; \
56 __get8_unaligned_check(v, a, err); \
57 val = v << ((BE) ? 24 : 0); \
58 __get8_unaligned_check(v, a, err); \
59 val |= v << ((BE) ? 16 : 8); \
60 __get8_unaligned_check(v, a, err); \
61 val |= v << ((BE) ? 8 : 16); \
62 __get8_unaligned_check(v, a, err); \
63 val |= v << ((BE) ? 0 : 24); \
64 if (err) \
65 goto fault; \
66 } while (0)
67
68#define put16_unaligned_check(val, addr) \
69 do { \
70 unsigned int err = 0, v = val, a = addr;\
71 \
72 __asm__( \
73 FIRST_BYTE_16 \
74 "1: stb.ab %1, [%2, 1]\n" \
75 " lsr %1, %1, 8\n" \
76 "2: stb %1, [%2]\n" \
77 "3:\n" \
78 " .section .fixup,\"ax\"\n" \
79 " .align 4\n" \
80 "4: mov %0, 1\n" \
81 " j 3b\n" \
82 " .previous\n" \
83 " .section __ex_table,\"a\"\n" \
84 " .align 4\n" \
85 " .long 1b, 4b\n" \
86 " .long 2b, 4b\n" \
87 " .previous\n" \
88 : "=r" (err), "=&r" (v), "=&r" (a) \
89 : "0" (err), "1" (v), "2" (a)); \
90 \
91 if (err) \
92 goto fault; \
93 } while (0)
94
95#define put32_unaligned_check(val, addr) \
96 do { \
97 unsigned int err = 0, v = val, a = addr;\
98 \
99 __asm__( \
100 FIRST_BYTE_32 \
101 "1: stb.ab %1, [%2, 1]\n" \
102 " lsr %1, %1, 8\n" \
103 "2: stb.ab %1, [%2, 1]\n" \
104 " lsr %1, %1, 8\n" \
105 "3: stb.ab %1, [%2, 1]\n" \
106 " lsr %1, %1, 8\n" \
107 "4: stb %1, [%2]\n" \
108 "5:\n" \
109 " .section .fixup,\"ax\"\n" \
110 " .align 4\n" \
111 "6: mov %0, 1\n" \
112 " j 5b\n" \
113 " .previous\n" \
114 " .section __ex_table,\"a\"\n" \
115 " .align 4\n" \
116 " .long 1b, 6b\n" \
117 " .long 2b, 6b\n" \
118 " .long 3b, 6b\n" \
119 " .long 4b, 6b\n" \
120 " .previous\n" \
121 : "=r" (err), "=&r" (v), "=&r" (a) \
122 : "0" (err), "1" (v), "2" (a)); \
123 \
124 if (err) \
125 goto fault; \
126 } while (0)
127
128/* sysctl hooks */
129int unaligned_enabled __read_mostly = 1; /* Enabled by default */
130int no_unaligned_warning __read_mostly = 1; /* Only 1 warning by default */
131
132static void fixup_load(struct disasm_state *state, struct pt_regs *regs,
133 struct callee_regs *cregs)
134{
135 int val;
136
137 /* register write back */
138 if ((state->aa == 1) || (state->aa == 2)) {
139 set_reg(state->wb_reg, state->src1 + state->src2, regs, cregs);
140
141 if (state->aa == 2)
142 state->src2 = 0;
143 }
144
145 if (state->zz == 0) {
146 get32_unaligned_check(val, state->src1 + state->src2);
147 } else {
148 get16_unaligned_check(val, state->src1 + state->src2);
149
150 if (state->x)
151 val = (val << 16) >> 16;
152 }
153
154 if (state->pref == 0)
155 set_reg(state->dest, val, regs, cregs);
156
157 return;
158
159fault: state->fault = 1;
160}
161
162static void fixup_store(struct disasm_state *state, struct pt_regs *regs,
163 struct callee_regs *cregs)
164{
165 /* register write back */
166 if ((state->aa == 1) || (state->aa == 2)) {
167 set_reg(state->wb_reg, state->src2 + state->src3, regs, cregs);
168
169 if (state->aa == 3)
170 state->src3 = 0;
171 } else if (state->aa == 3) {
172 if (state->zz == 2) {
173 set_reg(state->wb_reg, state->src2 + (state->src3 << 1),
174 regs, cregs);
175 } else if (!state->zz) {
176 set_reg(state->wb_reg, state->src2 + (state->src3 << 2),
177 regs, cregs);
178 } else {
179 goto fault;
180 }
181 }
182
183 /* write fix-up */
184 if (!state->zz)
185 put32_unaligned_check(state->src1, state->src2 + state->src3);
186 else
187 put16_unaligned_check(state->src1, state->src2 + state->src3);
188
189 return;
190
191fault: state->fault = 1;
192}
193
194/*
195 * Handle an unaligned access
196 * Returns 0 if successfully handled, 1 if some error happened
197 */
198int misaligned_fixup(unsigned long address, struct pt_regs *regs,
199 struct callee_regs *cregs)
200{
201 struct disasm_state state;
202 char buf[TASK_COMM_LEN];
203
204 /* handle user mode only and only if enabled by sysadmin */
205 if (!user_mode(regs) || !unaligned_enabled)
206 return 1;
207
208 if (no_unaligned_warning) {
209 pr_warn_once("%s(%d) made unaligned access which was emulated"
210 " by kernel assist\n. This can degrade application"
211 " performance significantly\n. To enable further"
212 " logging of such instances, please \n"
213 " echo 0 > /proc/sys/kernel/ignore-unaligned-usertrap\n",
214 get_task_comm(buf, current), task_pid_nr(current));
215 } else {
216 /* Add rate limiting if it gets down to it */
217 pr_warn("%s(%d): unaligned access to/from 0x%lx by PC: 0x%lx\n",
218 get_task_comm(buf, current), task_pid_nr(current),
219 address, regs->ret);
220
221 }
222
223 disasm_instr(regs->ret, &state, 1, regs, cregs);
224
225 if (state.fault)
226 goto fault;
227
228 /* ldb/stb should not have unaligned exception */
229 if ((state.zz == 1) || (state.di))
230 goto fault;
231
232 if (!state.write)
233 fixup_load(&state, regs, cregs);
234 else
235 fixup_store(&state, regs, cregs);
236
237 if (state.fault)
238 goto fault;
239
240 /* clear any remnants of delay slot */
241 if (delay_mode(regs)) {
242 regs->ret = regs->bta & ~1U;
243 regs->status32 &= ~STATUS_DE_MASK;
244 } else {
245 regs->ret += state.instr_len;
246
247 /* handle zero-overhead-loop */
248 if ((regs->ret == regs->lp_end) && (regs->lp_count)) {
249 regs->ret = regs->lp_start;
250 regs->lp_count--;
251 }
252 }
253
254 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, address);
255 return 0;
256
257fault:
258 pr_err("Alignment trap: fault in fix-up %08lx at [<%08lx>]\n",
259 state.words[0], address);
260
261 return 1;
262}