Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 *
4 * Copyright (C) IBM Corporation, 2012
5 *
6 * Author: Anton Blanchard <anton@au.ibm.com>
7 */
8
9/*
10 * Sparse (as at v0.5.0) gets very, very confused by this file.
11 * Make it a bit simpler for it.
12 */
13#if !defined(__CHECKER__)
14#include <altivec.h>
15#else
16#define vec_xor(a, b) a ^ b
17#define vector __attribute__((vector_size(16)))
18#endif
19
20#include "xor_vmx.h"
21
22typedef vector signed char unative_t;
23
24#define DEFINE(V) \
25 unative_t *V = (unative_t *)V##_in; \
26 unative_t V##_0, V##_1, V##_2, V##_3
27
28#define LOAD(V) \
29 do { \
30 V##_0 = V[0]; \
31 V##_1 = V[1]; \
32 V##_2 = V[2]; \
33 V##_3 = V[3]; \
34 } while (0)
35
36#define STORE(V) \
37 do { \
38 V[0] = V##_0; \
39 V[1] = V##_1; \
40 V[2] = V##_2; \
41 V[3] = V##_3; \
42 } while (0)
43
44#define XOR(V1, V2) \
45 do { \
46 V1##_0 = vec_xor(V1##_0, V2##_0); \
47 V1##_1 = vec_xor(V1##_1, V2##_1); \
48 V1##_2 = vec_xor(V1##_2, V2##_2); \
49 V1##_3 = vec_xor(V1##_3, V2##_3); \
50 } while (0)
51
52void __xor_altivec_2(unsigned long bytes,
53 unsigned long * __restrict v1_in,
54 const unsigned long * __restrict v2_in)
55{
56 DEFINE(v1);
57 DEFINE(v2);
58 unsigned long lines = bytes / (sizeof(unative_t)) / 4;
59
60 do {
61 LOAD(v1);
62 LOAD(v2);
63 XOR(v1, v2);
64 STORE(v1);
65
66 v1 += 4;
67 v2 += 4;
68 } while (--lines > 0);
69}
70
71void __xor_altivec_3(unsigned long bytes,
72 unsigned long * __restrict v1_in,
73 const unsigned long * __restrict v2_in,
74 const unsigned long * __restrict v3_in)
75{
76 DEFINE(v1);
77 DEFINE(v2);
78 DEFINE(v3);
79 unsigned long lines = bytes / (sizeof(unative_t)) / 4;
80
81 do {
82 LOAD(v1);
83 LOAD(v2);
84 LOAD(v3);
85 XOR(v1, v2);
86 XOR(v1, v3);
87 STORE(v1);
88
89 v1 += 4;
90 v2 += 4;
91 v3 += 4;
92 } while (--lines > 0);
93}
94
95void __xor_altivec_4(unsigned long bytes,
96 unsigned long * __restrict v1_in,
97 const unsigned long * __restrict v2_in,
98 const unsigned long * __restrict v3_in,
99 const unsigned long * __restrict v4_in)
100{
101 DEFINE(v1);
102 DEFINE(v2);
103 DEFINE(v3);
104 DEFINE(v4);
105 unsigned long lines = bytes / (sizeof(unative_t)) / 4;
106
107 do {
108 LOAD(v1);
109 LOAD(v2);
110 LOAD(v3);
111 LOAD(v4);
112 XOR(v1, v2);
113 XOR(v3, v4);
114 XOR(v1, v3);
115 STORE(v1);
116
117 v1 += 4;
118 v2 += 4;
119 v3 += 4;
120 v4 += 4;
121 } while (--lines > 0);
122}
123
124void __xor_altivec_5(unsigned long bytes,
125 unsigned long * __restrict v1_in,
126 const unsigned long * __restrict v2_in,
127 const unsigned long * __restrict v3_in,
128 const unsigned long * __restrict v4_in,
129 const unsigned long * __restrict v5_in)
130{
131 DEFINE(v1);
132 DEFINE(v2);
133 DEFINE(v3);
134 DEFINE(v4);
135 DEFINE(v5);
136 unsigned long lines = bytes / (sizeof(unative_t)) / 4;
137
138 do {
139 LOAD(v1);
140 LOAD(v2);
141 LOAD(v3);
142 LOAD(v4);
143 LOAD(v5);
144 XOR(v1, v2);
145 XOR(v3, v4);
146 XOR(v1, v5);
147 XOR(v1, v3);
148 STORE(v1);
149
150 v1 += 4;
151 v2 += 4;
152 v3 += 4;
153 v4 += 4;
154 v5 += 4;
155 } while (--lines > 0);
156}
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; either version 2 of the License, or
5 * (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
15 *
16 * Copyright (C) IBM Corporation, 2012
17 *
18 * Author: Anton Blanchard <anton@au.ibm.com>
19 */
20
21/*
22 * Sparse (as at v0.5.0) gets very, very confused by this file.
23 * Make it a bit simpler for it.
24 */
25#if !defined(__CHECKER__)
26#include <altivec.h>
27#else
28#define vec_xor(a, b) a ^ b
29#define vector __attribute__((vector_size(16)))
30#endif
31
32#include <linux/preempt.h>
33#include <linux/export.h>
34#include <linux/sched.h>
35#include <asm/switch_to.h>
36
37typedef vector signed char unative_t;
38
39#define DEFINE(V) \
40 unative_t *V = (unative_t *)V##_in; \
41 unative_t V##_0, V##_1, V##_2, V##_3
42
43#define LOAD(V) \
44 do { \
45 V##_0 = V[0]; \
46 V##_1 = V[1]; \
47 V##_2 = V[2]; \
48 V##_3 = V[3]; \
49 } while (0)
50
51#define STORE(V) \
52 do { \
53 V[0] = V##_0; \
54 V[1] = V##_1; \
55 V[2] = V##_2; \
56 V[3] = V##_3; \
57 } while (0)
58
59#define XOR(V1, V2) \
60 do { \
61 V1##_0 = vec_xor(V1##_0, V2##_0); \
62 V1##_1 = vec_xor(V1##_1, V2##_1); \
63 V1##_2 = vec_xor(V1##_2, V2##_2); \
64 V1##_3 = vec_xor(V1##_3, V2##_3); \
65 } while (0)
66
67void xor_altivec_2(unsigned long bytes, unsigned long *v1_in,
68 unsigned long *v2_in)
69{
70 DEFINE(v1);
71 DEFINE(v2);
72 unsigned long lines = bytes / (sizeof(unative_t)) / 4;
73
74 preempt_disable();
75 enable_kernel_altivec();
76
77 do {
78 LOAD(v1);
79 LOAD(v2);
80 XOR(v1, v2);
81 STORE(v1);
82
83 v1 += 4;
84 v2 += 4;
85 } while (--lines > 0);
86
87 disable_kernel_altivec();
88 preempt_enable();
89}
90EXPORT_SYMBOL(xor_altivec_2);
91
92void xor_altivec_3(unsigned long bytes, unsigned long *v1_in,
93 unsigned long *v2_in, unsigned long *v3_in)
94{
95 DEFINE(v1);
96 DEFINE(v2);
97 DEFINE(v3);
98 unsigned long lines = bytes / (sizeof(unative_t)) / 4;
99
100 preempt_disable();
101 enable_kernel_altivec();
102
103 do {
104 LOAD(v1);
105 LOAD(v2);
106 LOAD(v3);
107 XOR(v1, v2);
108 XOR(v1, v3);
109 STORE(v1);
110
111 v1 += 4;
112 v2 += 4;
113 v3 += 4;
114 } while (--lines > 0);
115
116 disable_kernel_altivec();
117 preempt_enable();
118}
119EXPORT_SYMBOL(xor_altivec_3);
120
121void xor_altivec_4(unsigned long bytes, unsigned long *v1_in,
122 unsigned long *v2_in, unsigned long *v3_in,
123 unsigned long *v4_in)
124{
125 DEFINE(v1);
126 DEFINE(v2);
127 DEFINE(v3);
128 DEFINE(v4);
129 unsigned long lines = bytes / (sizeof(unative_t)) / 4;
130
131 preempt_disable();
132 enable_kernel_altivec();
133
134 do {
135 LOAD(v1);
136 LOAD(v2);
137 LOAD(v3);
138 LOAD(v4);
139 XOR(v1, v2);
140 XOR(v3, v4);
141 XOR(v1, v3);
142 STORE(v1);
143
144 v1 += 4;
145 v2 += 4;
146 v3 += 4;
147 v4 += 4;
148 } while (--lines > 0);
149
150 disable_kernel_altivec();
151 preempt_enable();
152}
153EXPORT_SYMBOL(xor_altivec_4);
154
155void xor_altivec_5(unsigned long bytes, unsigned long *v1_in,
156 unsigned long *v2_in, unsigned long *v3_in,
157 unsigned long *v4_in, unsigned long *v5_in)
158{
159 DEFINE(v1);
160 DEFINE(v2);
161 DEFINE(v3);
162 DEFINE(v4);
163 DEFINE(v5);
164 unsigned long lines = bytes / (sizeof(unative_t)) / 4;
165
166 preempt_disable();
167 enable_kernel_altivec();
168
169 do {
170 LOAD(v1);
171 LOAD(v2);
172 LOAD(v3);
173 LOAD(v4);
174 LOAD(v5);
175 XOR(v1, v2);
176 XOR(v3, v4);
177 XOR(v1, v5);
178 XOR(v1, v3);
179 STORE(v1);
180
181 v1 += 4;
182 v2 += 4;
183 v3 += 4;
184 v4 += 4;
185 v5 += 4;
186 } while (--lines > 0);
187
188 disable_kernel_altivec();
189 preempt_enable();
190}
191EXPORT_SYMBOL(xor_altivec_5);