Loading...
1/* -*- linux-c -*- ------------------------------------------------------- *
2 *
3 * Copyright 2002 H. Peter Anvin - All Rights Reserved
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation, Inc., 53 Temple Place Ste 330,
8 * Boston MA 02111-1307, USA; either version 2 of the License, or
9 * (at your option) any later version; incorporated herein by reference.
10 *
11 * ----------------------------------------------------------------------- */
12
13/*
14 * raid6/algos.c
15 *
16 * Algorithm list and algorithm selection for RAID-6
17 */
18
19#include <linux/raid/pq.h>
20#ifndef __KERNEL__
21#include <sys/mman.h>
22#include <stdio.h>
23#else
24#include <linux/module.h>
25#include <linux/gfp.h>
26#if !RAID6_USE_EMPTY_ZERO_PAGE
27/* In .bss so it's zeroed */
28const char raid6_empty_zero_page[PAGE_SIZE] __attribute__((aligned(256)));
29EXPORT_SYMBOL(raid6_empty_zero_page);
30#endif
31#endif
32
33struct raid6_calls raid6_call;
34EXPORT_SYMBOL_GPL(raid6_call);
35
36const struct raid6_calls * const raid6_algos[] = {
37#if defined(__ia64__)
38 &raid6_intx16,
39 &raid6_intx32,
40#endif
41#if defined(__i386__) && !defined(__arch_um__)
42 &raid6_mmxx1,
43 &raid6_mmxx2,
44 &raid6_sse1x1,
45 &raid6_sse1x2,
46 &raid6_sse2x1,
47 &raid6_sse2x2,
48#endif
49#if defined(__x86_64__) && !defined(__arch_um__)
50 &raid6_sse2x1,
51 &raid6_sse2x2,
52 &raid6_sse2x4,
53#endif
54#ifdef CONFIG_ALTIVEC
55 &raid6_altivec1,
56 &raid6_altivec2,
57 &raid6_altivec4,
58 &raid6_altivec8,
59#endif
60 &raid6_intx1,
61 &raid6_intx2,
62 &raid6_intx4,
63 &raid6_intx8,
64 NULL
65};
66
67void (*raid6_2data_recov)(int, size_t, int, int, void **);
68EXPORT_SYMBOL_GPL(raid6_2data_recov);
69
70void (*raid6_datap_recov)(int, size_t, int, void **);
71EXPORT_SYMBOL_GPL(raid6_datap_recov);
72
73const struct raid6_recov_calls *const raid6_recov_algos[] = {
74#if (defined(__i386__) || defined(__x86_64__)) && !defined(__arch_um__)
75 &raid6_recov_ssse3,
76#endif
77 &raid6_recov_intx1,
78 NULL
79};
80
81#ifdef __KERNEL__
82#define RAID6_TIME_JIFFIES_LG2 4
83#else
84/* Need more time to be stable in userspace */
85#define RAID6_TIME_JIFFIES_LG2 9
86#define time_before(x, y) ((x) < (y))
87#endif
88
89static inline const struct raid6_recov_calls *raid6_choose_recov(void)
90{
91 const struct raid6_recov_calls *const *algo;
92 const struct raid6_recov_calls *best;
93
94 for (best = NULL, algo = raid6_recov_algos; *algo; algo++)
95 if (!best || (*algo)->priority > best->priority)
96 if (!(*algo)->valid || (*algo)->valid())
97 best = *algo;
98
99 if (best) {
100 raid6_2data_recov = best->data2;
101 raid6_datap_recov = best->datap;
102
103 printk("raid6: using %s recovery algorithm\n", best->name);
104 } else
105 printk("raid6: Yikes! No recovery algorithm found!\n");
106
107 return best;
108}
109
110static inline const struct raid6_calls *raid6_choose_gen(
111 void *(*const dptrs)[(65536/PAGE_SIZE)+2], const int disks)
112{
113 unsigned long perf, bestperf, j0, j1;
114 const struct raid6_calls *const *algo;
115 const struct raid6_calls *best;
116
117 for (bestperf = 0, best = NULL, algo = raid6_algos; *algo; algo++) {
118 if (!best || (*algo)->prefer >= best->prefer) {
119 if ((*algo)->valid && !(*algo)->valid())
120 continue;
121
122 perf = 0;
123
124 preempt_disable();
125 j0 = jiffies;
126 while ((j1 = jiffies) == j0)
127 cpu_relax();
128 while (time_before(jiffies,
129 j1 + (1<<RAID6_TIME_JIFFIES_LG2))) {
130 (*algo)->gen_syndrome(disks, PAGE_SIZE, *dptrs);
131 perf++;
132 }
133 preempt_enable();
134
135 if (perf > bestperf) {
136 bestperf = perf;
137 best = *algo;
138 }
139 printk("raid6: %-8s %5ld MB/s\n", (*algo)->name,
140 (perf*HZ) >> (20-16+RAID6_TIME_JIFFIES_LG2));
141 }
142 }
143
144 if (best) {
145 printk("raid6: using algorithm %s (%ld MB/s)\n",
146 best->name,
147 (bestperf*HZ) >> (20-16+RAID6_TIME_JIFFIES_LG2));
148 raid6_call = *best;
149 } else
150 printk("raid6: Yikes! No algorithm found!\n");
151
152 return best;
153}
154
155
156/* Try to pick the best algorithm */
157/* This code uses the gfmul table as convenient data set to abuse */
158
159int __init raid6_select_algo(void)
160{
161 const int disks = (65536/PAGE_SIZE)+2;
162
163 const struct raid6_calls *gen_best;
164 const struct raid6_recov_calls *rec_best;
165 char *syndromes;
166 void *dptrs[(65536/PAGE_SIZE)+2];
167 int i;
168
169 for (i = 0; i < disks-2; i++)
170 dptrs[i] = ((char *)raid6_gfmul) + PAGE_SIZE*i;
171
172 /* Normal code - use a 2-page allocation to avoid D$ conflict */
173 syndromes = (void *) __get_free_pages(GFP_KERNEL, 1);
174
175 if (!syndromes) {
176 printk("raid6: Yikes! No memory available.\n");
177 return -ENOMEM;
178 }
179
180 dptrs[disks-2] = syndromes;
181 dptrs[disks-1] = syndromes + PAGE_SIZE;
182
183 /* select raid gen_syndrome function */
184 gen_best = raid6_choose_gen(&dptrs, disks);
185
186 /* select raid recover functions */
187 rec_best = raid6_choose_recov();
188
189 free_pages((unsigned long)syndromes, 1);
190
191 return gen_best && rec_best ? 0 : -EINVAL;
192}
193
194static void raid6_exit(void)
195{
196 do { } while (0);
197}
198
199subsys_initcall(raid6_select_algo);
200module_exit(raid6_exit);
201MODULE_LICENSE("GPL");
202MODULE_DESCRIPTION("RAID6 Q-syndrome calculations");
1// SPDX-License-Identifier: GPL-2.0-or-later
2/* -*- linux-c -*- ------------------------------------------------------- *
3 *
4 * Copyright 2002 H. Peter Anvin - All Rights Reserved
5 *
6 * ----------------------------------------------------------------------- */
7
8/*
9 * raid6/algos.c
10 *
11 * Algorithm list and algorithm selection for RAID-6
12 */
13
14#include <linux/raid/pq.h>
15#ifndef __KERNEL__
16#include <sys/mman.h>
17#include <stdio.h>
18#else
19#include <linux/module.h>
20#include <linux/gfp.h>
21/* In .bss so it's zeroed */
22const char raid6_empty_zero_page[PAGE_SIZE] __attribute__((aligned(256)));
23EXPORT_SYMBOL(raid6_empty_zero_page);
24#endif
25
26struct raid6_calls raid6_call;
27EXPORT_SYMBOL_GPL(raid6_call);
28
29const struct raid6_calls * const raid6_algos[] = {
30#if defined(__i386__) && !defined(__arch_um__)
31#ifdef CONFIG_AS_AVX512
32 &raid6_avx512x2,
33 &raid6_avx512x1,
34#endif
35 &raid6_avx2x2,
36 &raid6_avx2x1,
37 &raid6_sse2x2,
38 &raid6_sse2x1,
39 &raid6_sse1x2,
40 &raid6_sse1x1,
41 &raid6_mmxx2,
42 &raid6_mmxx1,
43#endif
44#if defined(__x86_64__) && !defined(__arch_um__)
45#ifdef CONFIG_AS_AVX512
46 &raid6_avx512x4,
47 &raid6_avx512x2,
48 &raid6_avx512x1,
49#endif
50 &raid6_avx2x4,
51 &raid6_avx2x2,
52 &raid6_avx2x1,
53 &raid6_sse2x4,
54 &raid6_sse2x2,
55 &raid6_sse2x1,
56#endif
57#ifdef CONFIG_ALTIVEC
58 &raid6_vpermxor8,
59 &raid6_vpermxor4,
60 &raid6_vpermxor2,
61 &raid6_vpermxor1,
62 &raid6_altivec8,
63 &raid6_altivec4,
64 &raid6_altivec2,
65 &raid6_altivec1,
66#endif
67#if defined(CONFIG_S390)
68 &raid6_s390vx8,
69#endif
70#ifdef CONFIG_KERNEL_MODE_NEON
71 &raid6_neonx8,
72 &raid6_neonx4,
73 &raid6_neonx2,
74 &raid6_neonx1,
75#endif
76#if defined(__ia64__)
77 &raid6_intx32,
78 &raid6_intx16,
79#endif
80 &raid6_intx8,
81 &raid6_intx4,
82 &raid6_intx2,
83 &raid6_intx1,
84 NULL
85};
86
87void (*raid6_2data_recov)(int, size_t, int, int, void **);
88EXPORT_SYMBOL_GPL(raid6_2data_recov);
89
90void (*raid6_datap_recov)(int, size_t, int, void **);
91EXPORT_SYMBOL_GPL(raid6_datap_recov);
92
93const struct raid6_recov_calls *const raid6_recov_algos[] = {
94#ifdef CONFIG_X86
95#ifdef CONFIG_AS_AVX512
96 &raid6_recov_avx512,
97#endif
98 &raid6_recov_avx2,
99 &raid6_recov_ssse3,
100#endif
101#ifdef CONFIG_S390
102 &raid6_recov_s390xc,
103#endif
104#if defined(CONFIG_KERNEL_MODE_NEON)
105 &raid6_recov_neon,
106#endif
107 &raid6_recov_intx1,
108 NULL
109};
110
111#ifdef __KERNEL__
112#define RAID6_TIME_JIFFIES_LG2 4
113#else
114/* Need more time to be stable in userspace */
115#define RAID6_TIME_JIFFIES_LG2 9
116#define time_before(x, y) ((x) < (y))
117#endif
118
119#define RAID6_TEST_DISKS 8
120#define RAID6_TEST_DISKS_ORDER 3
121
122static inline const struct raid6_recov_calls *raid6_choose_recov(void)
123{
124 const struct raid6_recov_calls *const *algo;
125 const struct raid6_recov_calls *best;
126
127 for (best = NULL, algo = raid6_recov_algos; *algo; algo++)
128 if (!best || (*algo)->priority > best->priority)
129 if (!(*algo)->valid || (*algo)->valid())
130 best = *algo;
131
132 if (best) {
133 raid6_2data_recov = best->data2;
134 raid6_datap_recov = best->datap;
135
136 pr_info("raid6: using %s recovery algorithm\n", best->name);
137 } else
138 pr_err("raid6: Yikes! No recovery algorithm found!\n");
139
140 return best;
141}
142
143static inline const struct raid6_calls *raid6_choose_gen(
144 void *(*const dptrs)[RAID6_TEST_DISKS], const int disks)
145{
146 unsigned long perf, bestgenperf, j0, j1;
147 int start = (disks>>1)-1, stop = disks-3; /* work on the second half of the disks */
148 const struct raid6_calls *const *algo;
149 const struct raid6_calls *best;
150
151 for (bestgenperf = 0, best = NULL, algo = raid6_algos; *algo; algo++) {
152 if (!best || (*algo)->priority >= best->priority) {
153 if ((*algo)->valid && !(*algo)->valid())
154 continue;
155
156 if (!IS_ENABLED(CONFIG_RAID6_PQ_BENCHMARK)) {
157 best = *algo;
158 break;
159 }
160
161 perf = 0;
162
163 preempt_disable();
164 j0 = jiffies;
165 while ((j1 = jiffies) == j0)
166 cpu_relax();
167 while (time_before(jiffies,
168 j1 + (1<<RAID6_TIME_JIFFIES_LG2))) {
169 (*algo)->gen_syndrome(disks, PAGE_SIZE, *dptrs);
170 perf++;
171 }
172 preempt_enable();
173
174 if (perf > bestgenperf) {
175 bestgenperf = perf;
176 best = *algo;
177 }
178 pr_info("raid6: %-8s gen() %5ld MB/s\n", (*algo)->name,
179 (perf * HZ * (disks-2)) >>
180 (20 - PAGE_SHIFT + RAID6_TIME_JIFFIES_LG2));
181 }
182 }
183
184 if (!best) {
185 pr_err("raid6: Yikes! No algorithm found!\n");
186 goto out;
187 }
188
189 raid6_call = *best;
190
191 if (!IS_ENABLED(CONFIG_RAID6_PQ_BENCHMARK)) {
192 pr_info("raid6: skipped pq benchmark and selected %s\n",
193 best->name);
194 goto out;
195 }
196
197 pr_info("raid6: using algorithm %s gen() %ld MB/s\n",
198 best->name,
199 (bestgenperf * HZ * (disks - 2)) >>
200 (20 - PAGE_SHIFT + RAID6_TIME_JIFFIES_LG2));
201
202 if (best->xor_syndrome) {
203 perf = 0;
204
205 preempt_disable();
206 j0 = jiffies;
207 while ((j1 = jiffies) == j0)
208 cpu_relax();
209 while (time_before(jiffies,
210 j1 + (1 << RAID6_TIME_JIFFIES_LG2))) {
211 best->xor_syndrome(disks, start, stop,
212 PAGE_SIZE, *dptrs);
213 perf++;
214 }
215 preempt_enable();
216
217 pr_info("raid6: .... xor() %ld MB/s, rmw enabled\n",
218 (perf * HZ * (disks - 2)) >>
219 (20 - PAGE_SHIFT + RAID6_TIME_JIFFIES_LG2 + 1));
220 }
221
222out:
223 return best;
224}
225
226
227/* Try to pick the best algorithm */
228/* This code uses the gfmul table as convenient data set to abuse */
229
230int __init raid6_select_algo(void)
231{
232 const int disks = RAID6_TEST_DISKS;
233
234 const struct raid6_calls *gen_best;
235 const struct raid6_recov_calls *rec_best;
236 char *disk_ptr, *p;
237 void *dptrs[RAID6_TEST_DISKS];
238 int i, cycle;
239
240 /* prepare the buffer and fill it circularly with gfmul table */
241 disk_ptr = (char *)__get_free_pages(GFP_KERNEL, RAID6_TEST_DISKS_ORDER);
242 if (!disk_ptr) {
243 pr_err("raid6: Yikes! No memory available.\n");
244 return -ENOMEM;
245 }
246
247 p = disk_ptr;
248 for (i = 0; i < disks; i++)
249 dptrs[i] = p + PAGE_SIZE * i;
250
251 cycle = ((disks - 2) * PAGE_SIZE) / 65536;
252 for (i = 0; i < cycle; i++) {
253 memcpy(p, raid6_gfmul, 65536);
254 p += 65536;
255 }
256
257 if ((disks - 2) * PAGE_SIZE % 65536)
258 memcpy(p, raid6_gfmul, (disks - 2) * PAGE_SIZE % 65536);
259
260 /* select raid gen_syndrome function */
261 gen_best = raid6_choose_gen(&dptrs, disks);
262
263 /* select raid recover functions */
264 rec_best = raid6_choose_recov();
265
266 free_pages((unsigned long)disk_ptr, RAID6_TEST_DISKS_ORDER);
267
268 return gen_best && rec_best ? 0 : -EINVAL;
269}
270
271static void raid6_exit(void)
272{
273 do { } while (0);
274}
275
276subsys_initcall(raid6_select_algo);
277module_exit(raid6_exit);
278MODULE_LICENSE("GPL");
279MODULE_DESCRIPTION("RAID6 Q-syndrome calculations");