Loading...
1#include <asm/ptrace.h>
2
3#include "bpf_jit.h"
4
5#ifdef CONFIG_SPARC64
6#define SAVE_SZ 176
7#define SCRATCH_OFF STACK_BIAS + 128
8#define BE_PTR(label) be,pn %xcc, label
9#else
10#define SAVE_SZ 96
11#define SCRATCH_OFF 72
12#define BE_PTR(label) be label
13#endif
14
15#define SKF_MAX_NEG_OFF (-0x200000) /* SKF_LL_OFF from filter.h */
16
17 .text
18 .globl bpf_jit_load_word
19bpf_jit_load_word:
20 cmp r_OFF, 0
21 bl bpf_slow_path_word_neg
22 nop
23 .globl bpf_jit_load_word_positive_offset
24bpf_jit_load_word_positive_offset:
25 sub r_HEADLEN, r_OFF, r_TMP
26 cmp r_TMP, 3
27 ble bpf_slow_path_word
28 add r_SKB_DATA, r_OFF, r_TMP
29 andcc r_TMP, 3, %g0
30 bne load_word_unaligned
31 nop
32 retl
33 ld [r_TMP], r_A
34load_word_unaligned:
35 ldub [r_TMP + 0x0], r_OFF
36 ldub [r_TMP + 0x1], r_TMP2
37 sll r_OFF, 8, r_OFF
38 or r_OFF, r_TMP2, r_OFF
39 ldub [r_TMP + 0x2], r_TMP2
40 sll r_OFF, 8, r_OFF
41 or r_OFF, r_TMP2, r_OFF
42 ldub [r_TMP + 0x3], r_TMP2
43 sll r_OFF, 8, r_OFF
44 retl
45 or r_OFF, r_TMP2, r_A
46
47 .globl bpf_jit_load_half
48bpf_jit_load_half:
49 cmp r_OFF, 0
50 bl bpf_slow_path_half_neg
51 nop
52 .globl bpf_jit_load_half_positive_offset
53bpf_jit_load_half_positive_offset:
54 sub r_HEADLEN, r_OFF, r_TMP
55 cmp r_TMP, 1
56 ble bpf_slow_path_half
57 add r_SKB_DATA, r_OFF, r_TMP
58 andcc r_TMP, 1, %g0
59 bne load_half_unaligned
60 nop
61 retl
62 lduh [r_TMP], r_A
63load_half_unaligned:
64 ldub [r_TMP + 0x0], r_OFF
65 ldub [r_TMP + 0x1], r_TMP2
66 sll r_OFF, 8, r_OFF
67 retl
68 or r_OFF, r_TMP2, r_A
69
70 .globl bpf_jit_load_byte
71bpf_jit_load_byte:
72 cmp r_OFF, 0
73 bl bpf_slow_path_byte_neg
74 nop
75 .globl bpf_jit_load_byte_positive_offset
76bpf_jit_load_byte_positive_offset:
77 cmp r_OFF, r_HEADLEN
78 bge bpf_slow_path_byte
79 nop
80 retl
81 ldub [r_SKB_DATA + r_OFF], r_A
82
83 .globl bpf_jit_load_byte_msh
84bpf_jit_load_byte_msh:
85 cmp r_OFF, 0
86 bl bpf_slow_path_byte_msh_neg
87 nop
88 .globl bpf_jit_load_byte_msh_positive_offset
89bpf_jit_load_byte_msh_positive_offset:
90 cmp r_OFF, r_HEADLEN
91 bge bpf_slow_path_byte_msh
92 nop
93 ldub [r_SKB_DATA + r_OFF], r_OFF
94 and r_OFF, 0xf, r_OFF
95 retl
96 sll r_OFF, 2, r_X
97
98#define bpf_slow_path_common(LEN) \
99 save %sp, -SAVE_SZ, %sp; \
100 mov %i0, %o0; \
101 mov r_OFF, %o1; \
102 add %fp, SCRATCH_OFF, %o2; \
103 call skb_copy_bits; \
104 mov (LEN), %o3; \
105 cmp %o0, 0; \
106 restore;
107
108bpf_slow_path_word:
109 bpf_slow_path_common(4)
110 bl bpf_error
111 ld [%sp + SCRATCH_OFF], r_A
112 retl
113 nop
114bpf_slow_path_half:
115 bpf_slow_path_common(2)
116 bl bpf_error
117 lduh [%sp + SCRATCH_OFF], r_A
118 retl
119 nop
120bpf_slow_path_byte:
121 bpf_slow_path_common(1)
122 bl bpf_error
123 ldub [%sp + SCRATCH_OFF], r_A
124 retl
125 nop
126bpf_slow_path_byte_msh:
127 bpf_slow_path_common(1)
128 bl bpf_error
129 ldub [%sp + SCRATCH_OFF], r_A
130 and r_OFF, 0xf, r_OFF
131 retl
132 sll r_OFF, 2, r_X
133
134#define bpf_negative_common(LEN) \
135 save %sp, -SAVE_SZ, %sp; \
136 mov %i0, %o0; \
137 mov r_OFF, %o1; \
138 call bpf_internal_load_pointer_neg_helper; \
139 mov (LEN), %o2; \
140 mov %o0, r_TMP; \
141 cmp %o0, 0; \
142 BE_PTR(bpf_error); \
143 restore;
144
145bpf_slow_path_word_neg:
146 sethi %hi(SKF_MAX_NEG_OFF), r_TMP
147 cmp r_OFF, r_TMP
148 bl bpf_error
149 nop
150 .globl bpf_jit_load_word_negative_offset
151bpf_jit_load_word_negative_offset:
152 bpf_negative_common(4)
153 andcc r_TMP, 3, %g0
154 bne load_word_unaligned
155 nop
156 retl
157 ld [r_TMP], r_A
158
159bpf_slow_path_half_neg:
160 sethi %hi(SKF_MAX_NEG_OFF), r_TMP
161 cmp r_OFF, r_TMP
162 bl bpf_error
163 nop
164 .globl bpf_jit_load_half_negative_offset
165bpf_jit_load_half_negative_offset:
166 bpf_negative_common(2)
167 andcc r_TMP, 1, %g0
168 bne load_half_unaligned
169 nop
170 retl
171 lduh [r_TMP], r_A
172
173bpf_slow_path_byte_neg:
174 sethi %hi(SKF_MAX_NEG_OFF), r_TMP
175 cmp r_OFF, r_TMP
176 bl bpf_error
177 nop
178 .globl bpf_jit_load_byte_negative_offset
179bpf_jit_load_byte_negative_offset:
180 bpf_negative_common(1)
181 retl
182 ldub [r_TMP], r_A
183
184bpf_slow_path_byte_msh_neg:
185 sethi %hi(SKF_MAX_NEG_OFF), r_TMP
186 cmp r_OFF, r_TMP
187 bl bpf_error
188 nop
189 .globl bpf_jit_load_byte_msh_negative_offset
190bpf_jit_load_byte_msh_negative_offset:
191 bpf_negative_common(1)
192 ldub [r_TMP], r_OFF
193 and r_OFF, 0xf, r_OFF
194 retl
195 sll r_OFF, 2, r_X
196
197bpf_error:
198 /* Make the JIT program return zero. The JIT epilogue
199 * stores away the original %o7 into r_saved_O7. The
200 * normal leaf function return is to use "retl" which
201 * would evalute to "jmpl %o7 + 8, %g0" but we want to
202 * use the saved value thus the sequence you see here.
203 */
204 jmpl r_saved_O7 + 8, %g0
205 clr %o0
1#include <asm/ptrace.h>
2
3#include "bpf_jit.h"
4
5#ifdef CONFIG_SPARC64
6#define SAVE_SZ 176
7#define SCRATCH_OFF STACK_BIAS + 128
8#define BE_PTR(label) be,pn %xcc, label
9#define SIGN_EXTEND(reg) sra reg, 0, reg
10#else
11#define SAVE_SZ 96
12#define SCRATCH_OFF 72
13#define BE_PTR(label) be label
14#define SIGN_EXTEND(reg)
15#endif
16
17#define SKF_MAX_NEG_OFF (-0x200000) /* SKF_LL_OFF from filter.h */
18
19 .text
20 .globl bpf_jit_load_word
21bpf_jit_load_word:
22 cmp r_OFF, 0
23 bl bpf_slow_path_word_neg
24 nop
25 .globl bpf_jit_load_word_positive_offset
26bpf_jit_load_word_positive_offset:
27 sub r_HEADLEN, r_OFF, r_TMP
28 cmp r_TMP, 3
29 ble bpf_slow_path_word
30 add r_SKB_DATA, r_OFF, r_TMP
31 andcc r_TMP, 3, %g0
32 bne load_word_unaligned
33 nop
34 retl
35 ld [r_TMP], r_A
36load_word_unaligned:
37 ldub [r_TMP + 0x0], r_OFF
38 ldub [r_TMP + 0x1], r_TMP2
39 sll r_OFF, 8, r_OFF
40 or r_OFF, r_TMP2, r_OFF
41 ldub [r_TMP + 0x2], r_TMP2
42 sll r_OFF, 8, r_OFF
43 or r_OFF, r_TMP2, r_OFF
44 ldub [r_TMP + 0x3], r_TMP2
45 sll r_OFF, 8, r_OFF
46 retl
47 or r_OFF, r_TMP2, r_A
48
49 .globl bpf_jit_load_half
50bpf_jit_load_half:
51 cmp r_OFF, 0
52 bl bpf_slow_path_half_neg
53 nop
54 .globl bpf_jit_load_half_positive_offset
55bpf_jit_load_half_positive_offset:
56 sub r_HEADLEN, r_OFF, r_TMP
57 cmp r_TMP, 1
58 ble bpf_slow_path_half
59 add r_SKB_DATA, r_OFF, r_TMP
60 andcc r_TMP, 1, %g0
61 bne load_half_unaligned
62 nop
63 retl
64 lduh [r_TMP], r_A
65load_half_unaligned:
66 ldub [r_TMP + 0x0], r_OFF
67 ldub [r_TMP + 0x1], r_TMP2
68 sll r_OFF, 8, r_OFF
69 retl
70 or r_OFF, r_TMP2, r_A
71
72 .globl bpf_jit_load_byte
73bpf_jit_load_byte:
74 cmp r_OFF, 0
75 bl bpf_slow_path_byte_neg
76 nop
77 .globl bpf_jit_load_byte_positive_offset
78bpf_jit_load_byte_positive_offset:
79 cmp r_OFF, r_HEADLEN
80 bge bpf_slow_path_byte
81 nop
82 retl
83 ldub [r_SKB_DATA + r_OFF], r_A
84
85 .globl bpf_jit_load_byte_msh
86bpf_jit_load_byte_msh:
87 cmp r_OFF, 0
88 bl bpf_slow_path_byte_msh_neg
89 nop
90 .globl bpf_jit_load_byte_msh_positive_offset
91bpf_jit_load_byte_msh_positive_offset:
92 cmp r_OFF, r_HEADLEN
93 bge bpf_slow_path_byte_msh
94 nop
95 ldub [r_SKB_DATA + r_OFF], r_OFF
96 and r_OFF, 0xf, r_OFF
97 retl
98 sll r_OFF, 2, r_X
99
100#define bpf_slow_path_common(LEN) \
101 save %sp, -SAVE_SZ, %sp; \
102 mov %i0, %o0; \
103 mov r_OFF, %o1; \
104 add %fp, SCRATCH_OFF, %o2; \
105 call skb_copy_bits; \
106 mov (LEN), %o3; \
107 cmp %o0, 0; \
108 restore;
109
110bpf_slow_path_word:
111 bpf_slow_path_common(4)
112 bl bpf_error
113 ld [%sp + SCRATCH_OFF], r_A
114 retl
115 nop
116bpf_slow_path_half:
117 bpf_slow_path_common(2)
118 bl bpf_error
119 lduh [%sp + SCRATCH_OFF], r_A
120 retl
121 nop
122bpf_slow_path_byte:
123 bpf_slow_path_common(1)
124 bl bpf_error
125 ldub [%sp + SCRATCH_OFF], r_A
126 retl
127 nop
128bpf_slow_path_byte_msh:
129 bpf_slow_path_common(1)
130 bl bpf_error
131 ldub [%sp + SCRATCH_OFF], r_A
132 and r_OFF, 0xf, r_OFF
133 retl
134 sll r_OFF, 2, r_X
135
136#define bpf_negative_common(LEN) \
137 save %sp, -SAVE_SZ, %sp; \
138 mov %i0, %o0; \
139 mov r_OFF, %o1; \
140 SIGN_EXTEND(%o1); \
141 call bpf_internal_load_pointer_neg_helper; \
142 mov (LEN), %o2; \
143 mov %o0, r_TMP; \
144 cmp %o0, 0; \
145 BE_PTR(bpf_error); \
146 restore;
147
148bpf_slow_path_word_neg:
149 sethi %hi(SKF_MAX_NEG_OFF), r_TMP
150 cmp r_OFF, r_TMP
151 bl bpf_error
152 nop
153 .globl bpf_jit_load_word_negative_offset
154bpf_jit_load_word_negative_offset:
155 bpf_negative_common(4)
156 andcc r_TMP, 3, %g0
157 bne load_word_unaligned
158 nop
159 retl
160 ld [r_TMP], r_A
161
162bpf_slow_path_half_neg:
163 sethi %hi(SKF_MAX_NEG_OFF), r_TMP
164 cmp r_OFF, r_TMP
165 bl bpf_error
166 nop
167 .globl bpf_jit_load_half_negative_offset
168bpf_jit_load_half_negative_offset:
169 bpf_negative_common(2)
170 andcc r_TMP, 1, %g0
171 bne load_half_unaligned
172 nop
173 retl
174 lduh [r_TMP], r_A
175
176bpf_slow_path_byte_neg:
177 sethi %hi(SKF_MAX_NEG_OFF), r_TMP
178 cmp r_OFF, r_TMP
179 bl bpf_error
180 nop
181 .globl bpf_jit_load_byte_negative_offset
182bpf_jit_load_byte_negative_offset:
183 bpf_negative_common(1)
184 retl
185 ldub [r_TMP], r_A
186
187bpf_slow_path_byte_msh_neg:
188 sethi %hi(SKF_MAX_NEG_OFF), r_TMP
189 cmp r_OFF, r_TMP
190 bl bpf_error
191 nop
192 .globl bpf_jit_load_byte_msh_negative_offset
193bpf_jit_load_byte_msh_negative_offset:
194 bpf_negative_common(1)
195 ldub [r_TMP], r_OFF
196 and r_OFF, 0xf, r_OFF
197 retl
198 sll r_OFF, 2, r_X
199
200bpf_error:
201 /* Make the JIT program return zero. The JIT epilogue
202 * stores away the original %o7 into r_saved_O7. The
203 * normal leaf function return is to use "retl" which
204 * would evalute to "jmpl %o7 + 8, %g0" but we want to
205 * use the saved value thus the sequence you see here.
206 */
207 jmpl r_saved_O7 + 8, %g0
208 clr %o0