Loading...
1#ifndef _ASM_X86_PERCPU_H
2#define _ASM_X86_PERCPU_H
3
4#ifdef CONFIG_X86_64
5#define __percpu_seg gs
6#define __percpu_mov_op movq
7#else
8#define __percpu_seg fs
9#define __percpu_mov_op movl
10#endif
11
12#ifdef __ASSEMBLY__
13
14/*
15 * PER_CPU finds an address of a per-cpu variable.
16 *
17 * Args:
18 * var - variable name
19 * reg - 32bit register
20 *
21 * The resulting address is stored in the "reg" argument.
22 *
23 * Example:
24 * PER_CPU(cpu_gdt_descr, %ebx)
25 */
26#ifdef CONFIG_SMP
27#define PER_CPU(var, reg) \
28 __percpu_mov_op %__percpu_seg:this_cpu_off, reg; \
29 lea var(reg), reg
30#define PER_CPU_VAR(var) %__percpu_seg:var
31#else /* ! SMP */
32#define PER_CPU(var, reg) __percpu_mov_op $var, reg
33#define PER_CPU_VAR(var) var
34#endif /* SMP */
35
36#ifdef CONFIG_X86_64_SMP
37#define INIT_PER_CPU_VAR(var) init_per_cpu__##var
38#else
39#define INIT_PER_CPU_VAR(var) var
40#endif
41
42#else /* ...!ASSEMBLY */
43
44#include <linux/kernel.h>
45#include <linux/stringify.h>
46
47#ifdef CONFIG_SMP
48#define __percpu_prefix "%%"__stringify(__percpu_seg)":"
49#define __my_cpu_offset this_cpu_read(this_cpu_off)
50
51/*
52 * Compared to the generic __my_cpu_offset version, the following
53 * saves one instruction and avoids clobbering a temp register.
54 */
55#define arch_raw_cpu_ptr(ptr) \
56({ \
57 unsigned long tcp_ptr__; \
58 asm volatile("add " __percpu_arg(1) ", %0" \
59 : "=r" (tcp_ptr__) \
60 : "m" (this_cpu_off), "0" (ptr)); \
61 (typeof(*(ptr)) __kernel __force *)tcp_ptr__; \
62})
63#else
64#define __percpu_prefix ""
65#endif
66
67#define __percpu_arg(x) __percpu_prefix "%" #x
68
69/*
70 * Initialized pointers to per-cpu variables needed for the boot
71 * processor need to use these macros to get the proper address
72 * offset from __per_cpu_load on SMP.
73 *
74 * There also must be an entry in vmlinux_64.lds.S
75 */
76#define DECLARE_INIT_PER_CPU(var) \
77 extern typeof(var) init_per_cpu_var(var)
78
79#ifdef CONFIG_X86_64_SMP
80#define init_per_cpu_var(var) init_per_cpu__##var
81#else
82#define init_per_cpu_var(var) var
83#endif
84
85/* For arch-specific code, we can use direct single-insn ops (they
86 * don't give an lvalue though). */
87extern void __bad_percpu_size(void);
88
89#define percpu_to_op(op, var, val) \
90do { \
91 typedef typeof(var) pto_T__; \
92 if (0) { \
93 pto_T__ pto_tmp__; \
94 pto_tmp__ = (val); \
95 (void)pto_tmp__; \
96 } \
97 switch (sizeof(var)) { \
98 case 1: \
99 asm(op "b %1,"__percpu_arg(0) \
100 : "+m" (var) \
101 : "qi" ((pto_T__)(val))); \
102 break; \
103 case 2: \
104 asm(op "w %1,"__percpu_arg(0) \
105 : "+m" (var) \
106 : "ri" ((pto_T__)(val))); \
107 break; \
108 case 4: \
109 asm(op "l %1,"__percpu_arg(0) \
110 : "+m" (var) \
111 : "ri" ((pto_T__)(val))); \
112 break; \
113 case 8: \
114 asm(op "q %1,"__percpu_arg(0) \
115 : "+m" (var) \
116 : "re" ((pto_T__)(val))); \
117 break; \
118 default: __bad_percpu_size(); \
119 } \
120} while (0)
121
122/*
123 * Generate a percpu add to memory instruction and optimize code
124 * if one is added or subtracted.
125 */
126#define percpu_add_op(var, val) \
127do { \
128 typedef typeof(var) pao_T__; \
129 const int pao_ID__ = (__builtin_constant_p(val) && \
130 ((val) == 1 || (val) == -1)) ? \
131 (int)(val) : 0; \
132 if (0) { \
133 pao_T__ pao_tmp__; \
134 pao_tmp__ = (val); \
135 (void)pao_tmp__; \
136 } \
137 switch (sizeof(var)) { \
138 case 1: \
139 if (pao_ID__ == 1) \
140 asm("incb "__percpu_arg(0) : "+m" (var)); \
141 else if (pao_ID__ == -1) \
142 asm("decb "__percpu_arg(0) : "+m" (var)); \
143 else \
144 asm("addb %1, "__percpu_arg(0) \
145 : "+m" (var) \
146 : "qi" ((pao_T__)(val))); \
147 break; \
148 case 2: \
149 if (pao_ID__ == 1) \
150 asm("incw "__percpu_arg(0) : "+m" (var)); \
151 else if (pao_ID__ == -1) \
152 asm("decw "__percpu_arg(0) : "+m" (var)); \
153 else \
154 asm("addw %1, "__percpu_arg(0) \
155 : "+m" (var) \
156 : "ri" ((pao_T__)(val))); \
157 break; \
158 case 4: \
159 if (pao_ID__ == 1) \
160 asm("incl "__percpu_arg(0) : "+m" (var)); \
161 else if (pao_ID__ == -1) \
162 asm("decl "__percpu_arg(0) : "+m" (var)); \
163 else \
164 asm("addl %1, "__percpu_arg(0) \
165 : "+m" (var) \
166 : "ri" ((pao_T__)(val))); \
167 break; \
168 case 8: \
169 if (pao_ID__ == 1) \
170 asm("incq "__percpu_arg(0) : "+m" (var)); \
171 else if (pao_ID__ == -1) \
172 asm("decq "__percpu_arg(0) : "+m" (var)); \
173 else \
174 asm("addq %1, "__percpu_arg(0) \
175 : "+m" (var) \
176 : "re" ((pao_T__)(val))); \
177 break; \
178 default: __bad_percpu_size(); \
179 } \
180} while (0)
181
182#define percpu_from_op(op, var) \
183({ \
184 typeof(var) pfo_ret__; \
185 switch (sizeof(var)) { \
186 case 1: \
187 asm(op "b "__percpu_arg(1)",%0" \
188 : "=q" (pfo_ret__) \
189 : "m" (var)); \
190 break; \
191 case 2: \
192 asm(op "w "__percpu_arg(1)",%0" \
193 : "=r" (pfo_ret__) \
194 : "m" (var)); \
195 break; \
196 case 4: \
197 asm(op "l "__percpu_arg(1)",%0" \
198 : "=r" (pfo_ret__) \
199 : "m" (var)); \
200 break; \
201 case 8: \
202 asm(op "q "__percpu_arg(1)",%0" \
203 : "=r" (pfo_ret__) \
204 : "m" (var)); \
205 break; \
206 default: __bad_percpu_size(); \
207 } \
208 pfo_ret__; \
209})
210
211#define percpu_stable_op(op, var) \
212({ \
213 typeof(var) pfo_ret__; \
214 switch (sizeof(var)) { \
215 case 1: \
216 asm(op "b "__percpu_arg(P1)",%0" \
217 : "=q" (pfo_ret__) \
218 : "p" (&(var))); \
219 break; \
220 case 2: \
221 asm(op "w "__percpu_arg(P1)",%0" \
222 : "=r" (pfo_ret__) \
223 : "p" (&(var))); \
224 break; \
225 case 4: \
226 asm(op "l "__percpu_arg(P1)",%0" \
227 : "=r" (pfo_ret__) \
228 : "p" (&(var))); \
229 break; \
230 case 8: \
231 asm(op "q "__percpu_arg(P1)",%0" \
232 : "=r" (pfo_ret__) \
233 : "p" (&(var))); \
234 break; \
235 default: __bad_percpu_size(); \
236 } \
237 pfo_ret__; \
238})
239
240#define percpu_unary_op(op, var) \
241({ \
242 switch (sizeof(var)) { \
243 case 1: \
244 asm(op "b "__percpu_arg(0) \
245 : "+m" (var)); \
246 break; \
247 case 2: \
248 asm(op "w "__percpu_arg(0) \
249 : "+m" (var)); \
250 break; \
251 case 4: \
252 asm(op "l "__percpu_arg(0) \
253 : "+m" (var)); \
254 break; \
255 case 8: \
256 asm(op "q "__percpu_arg(0) \
257 : "+m" (var)); \
258 break; \
259 default: __bad_percpu_size(); \
260 } \
261})
262
263/*
264 * Add return operation
265 */
266#define percpu_add_return_op(var, val) \
267({ \
268 typeof(var) paro_ret__ = val; \
269 switch (sizeof(var)) { \
270 case 1: \
271 asm("xaddb %0, "__percpu_arg(1) \
272 : "+q" (paro_ret__), "+m" (var) \
273 : : "memory"); \
274 break; \
275 case 2: \
276 asm("xaddw %0, "__percpu_arg(1) \
277 : "+r" (paro_ret__), "+m" (var) \
278 : : "memory"); \
279 break; \
280 case 4: \
281 asm("xaddl %0, "__percpu_arg(1) \
282 : "+r" (paro_ret__), "+m" (var) \
283 : : "memory"); \
284 break; \
285 case 8: \
286 asm("xaddq %0, "__percpu_arg(1) \
287 : "+re" (paro_ret__), "+m" (var) \
288 : : "memory"); \
289 break; \
290 default: __bad_percpu_size(); \
291 } \
292 paro_ret__ += val; \
293 paro_ret__; \
294})
295
296/*
297 * xchg is implemented using cmpxchg without a lock prefix. xchg is
298 * expensive due to the implied lock prefix. The processor cannot prefetch
299 * cachelines if xchg is used.
300 */
301#define percpu_xchg_op(var, nval) \
302({ \
303 typeof(var) pxo_ret__; \
304 typeof(var) pxo_new__ = (nval); \
305 switch (sizeof(var)) { \
306 case 1: \
307 asm("\n\tmov "__percpu_arg(1)",%%al" \
308 "\n1:\tcmpxchgb %2, "__percpu_arg(1) \
309 "\n\tjnz 1b" \
310 : "=&a" (pxo_ret__), "+m" (var) \
311 : "q" (pxo_new__) \
312 : "memory"); \
313 break; \
314 case 2: \
315 asm("\n\tmov "__percpu_arg(1)",%%ax" \
316 "\n1:\tcmpxchgw %2, "__percpu_arg(1) \
317 "\n\tjnz 1b" \
318 : "=&a" (pxo_ret__), "+m" (var) \
319 : "r" (pxo_new__) \
320 : "memory"); \
321 break; \
322 case 4: \
323 asm("\n\tmov "__percpu_arg(1)",%%eax" \
324 "\n1:\tcmpxchgl %2, "__percpu_arg(1) \
325 "\n\tjnz 1b" \
326 : "=&a" (pxo_ret__), "+m" (var) \
327 : "r" (pxo_new__) \
328 : "memory"); \
329 break; \
330 case 8: \
331 asm("\n\tmov "__percpu_arg(1)",%%rax" \
332 "\n1:\tcmpxchgq %2, "__percpu_arg(1) \
333 "\n\tjnz 1b" \
334 : "=&a" (pxo_ret__), "+m" (var) \
335 : "r" (pxo_new__) \
336 : "memory"); \
337 break; \
338 default: __bad_percpu_size(); \
339 } \
340 pxo_ret__; \
341})
342
343/*
344 * cmpxchg has no such implied lock semantics as a result it is much
345 * more efficient for cpu local operations.
346 */
347#define percpu_cmpxchg_op(var, oval, nval) \
348({ \
349 typeof(var) pco_ret__; \
350 typeof(var) pco_old__ = (oval); \
351 typeof(var) pco_new__ = (nval); \
352 switch (sizeof(var)) { \
353 case 1: \
354 asm("cmpxchgb %2, "__percpu_arg(1) \
355 : "=a" (pco_ret__), "+m" (var) \
356 : "q" (pco_new__), "0" (pco_old__) \
357 : "memory"); \
358 break; \
359 case 2: \
360 asm("cmpxchgw %2, "__percpu_arg(1) \
361 : "=a" (pco_ret__), "+m" (var) \
362 : "r" (pco_new__), "0" (pco_old__) \
363 : "memory"); \
364 break; \
365 case 4: \
366 asm("cmpxchgl %2, "__percpu_arg(1) \
367 : "=a" (pco_ret__), "+m" (var) \
368 : "r" (pco_new__), "0" (pco_old__) \
369 : "memory"); \
370 break; \
371 case 8: \
372 asm("cmpxchgq %2, "__percpu_arg(1) \
373 : "=a" (pco_ret__), "+m" (var) \
374 : "r" (pco_new__), "0" (pco_old__) \
375 : "memory"); \
376 break; \
377 default: __bad_percpu_size(); \
378 } \
379 pco_ret__; \
380})
381
382/*
383 * this_cpu_read() makes gcc load the percpu variable every time it is
384 * accessed while this_cpu_read_stable() allows the value to be cached.
385 * this_cpu_read_stable() is more efficient and can be used if its value
386 * is guaranteed to be valid across cpus. The current users include
387 * get_current() and get_thread_info() both of which are actually
388 * per-thread variables implemented as per-cpu variables and thus
389 * stable for the duration of the respective task.
390 */
391#define this_cpu_read_stable(var) percpu_stable_op("mov", var)
392
393#define raw_cpu_read_1(pcp) percpu_from_op("mov", pcp)
394#define raw_cpu_read_2(pcp) percpu_from_op("mov", pcp)
395#define raw_cpu_read_4(pcp) percpu_from_op("mov", pcp)
396
397#define raw_cpu_write_1(pcp, val) percpu_to_op("mov", (pcp), val)
398#define raw_cpu_write_2(pcp, val) percpu_to_op("mov", (pcp), val)
399#define raw_cpu_write_4(pcp, val) percpu_to_op("mov", (pcp), val)
400#define raw_cpu_add_1(pcp, val) percpu_add_op((pcp), val)
401#define raw_cpu_add_2(pcp, val) percpu_add_op((pcp), val)
402#define raw_cpu_add_4(pcp, val) percpu_add_op((pcp), val)
403#define raw_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val)
404#define raw_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val)
405#define raw_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val)
406#define raw_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val)
407#define raw_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val)
408#define raw_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val)
409#define raw_cpu_xchg_1(pcp, val) percpu_xchg_op(pcp, val)
410#define raw_cpu_xchg_2(pcp, val) percpu_xchg_op(pcp, val)
411#define raw_cpu_xchg_4(pcp, val) percpu_xchg_op(pcp, val)
412
413#define this_cpu_read_1(pcp) percpu_from_op("mov", pcp)
414#define this_cpu_read_2(pcp) percpu_from_op("mov", pcp)
415#define this_cpu_read_4(pcp) percpu_from_op("mov", pcp)
416#define this_cpu_write_1(pcp, val) percpu_to_op("mov", (pcp), val)
417#define this_cpu_write_2(pcp, val) percpu_to_op("mov", (pcp), val)
418#define this_cpu_write_4(pcp, val) percpu_to_op("mov", (pcp), val)
419#define this_cpu_add_1(pcp, val) percpu_add_op((pcp), val)
420#define this_cpu_add_2(pcp, val) percpu_add_op((pcp), val)
421#define this_cpu_add_4(pcp, val) percpu_add_op((pcp), val)
422#define this_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val)
423#define this_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val)
424#define this_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val)
425#define this_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val)
426#define this_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val)
427#define this_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val)
428#define this_cpu_xchg_1(pcp, nval) percpu_xchg_op(pcp, nval)
429#define this_cpu_xchg_2(pcp, nval) percpu_xchg_op(pcp, nval)
430#define this_cpu_xchg_4(pcp, nval) percpu_xchg_op(pcp, nval)
431
432#define raw_cpu_add_return_1(pcp, val) percpu_add_return_op(pcp, val)
433#define raw_cpu_add_return_2(pcp, val) percpu_add_return_op(pcp, val)
434#define raw_cpu_add_return_4(pcp, val) percpu_add_return_op(pcp, val)
435#define raw_cpu_cmpxchg_1(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
436#define raw_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
437#define raw_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
438
439#define this_cpu_add_return_1(pcp, val) percpu_add_return_op(pcp, val)
440#define this_cpu_add_return_2(pcp, val) percpu_add_return_op(pcp, val)
441#define this_cpu_add_return_4(pcp, val) percpu_add_return_op(pcp, val)
442#define this_cpu_cmpxchg_1(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
443#define this_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
444#define this_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
445
446#ifdef CONFIG_X86_CMPXCHG64
447#define percpu_cmpxchg8b_double(pcp1, pcp2, o1, o2, n1, n2) \
448({ \
449 bool __ret; \
450 typeof(pcp1) __o1 = (o1), __n1 = (n1); \
451 typeof(pcp2) __o2 = (o2), __n2 = (n2); \
452 asm volatile("cmpxchg8b "__percpu_arg(1)"\n\tsetz %0\n\t" \
453 : "=a" (__ret), "+m" (pcp1), "+m" (pcp2), "+d" (__o2) \
454 : "b" (__n1), "c" (__n2), "a" (__o1)); \
455 __ret; \
456})
457
458#define raw_cpu_cmpxchg_double_4 percpu_cmpxchg8b_double
459#define this_cpu_cmpxchg_double_4 percpu_cmpxchg8b_double
460#endif /* CONFIG_X86_CMPXCHG64 */
461
462/*
463 * Per cpu atomic 64 bit operations are only available under 64 bit.
464 * 32 bit must fall back to generic operations.
465 */
466#ifdef CONFIG_X86_64
467#define raw_cpu_read_8(pcp) percpu_from_op("mov", pcp)
468#define raw_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val)
469#define raw_cpu_add_8(pcp, val) percpu_add_op((pcp), val)
470#define raw_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
471#define raw_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
472#define raw_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val)
473#define raw_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval)
474#define raw_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
475
476#define this_cpu_read_8(pcp) percpu_from_op("mov", pcp)
477#define this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val)
478#define this_cpu_add_8(pcp, val) percpu_add_op((pcp), val)
479#define this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
480#define this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
481#define this_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val)
482#define this_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval)
483#define this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
484
485/*
486 * Pretty complex macro to generate cmpxchg16 instruction. The instruction
487 * is not supported on early AMD64 processors so we must be able to emulate
488 * it in software. The address used in the cmpxchg16 instruction must be
489 * aligned to a 16 byte boundary.
490 */
491#define percpu_cmpxchg16b_double(pcp1, pcp2, o1, o2, n1, n2) \
492({ \
493 bool __ret; \
494 typeof(pcp1) __o1 = (o1), __n1 = (n1); \
495 typeof(pcp2) __o2 = (o2), __n2 = (n2); \
496 alternative_io("leaq %P1,%%rsi\n\tcall this_cpu_cmpxchg16b_emu\n\t", \
497 "cmpxchg16b " __percpu_arg(1) "\n\tsetz %0\n\t", \
498 X86_FEATURE_CX16, \
499 ASM_OUTPUT2("=a" (__ret), "+m" (pcp1), \
500 "+m" (pcp2), "+d" (__o2)), \
501 "b" (__n1), "c" (__n2), "a" (__o1) : "rsi"); \
502 __ret; \
503})
504
505#define raw_cpu_cmpxchg_double_8 percpu_cmpxchg16b_double
506#define this_cpu_cmpxchg_double_8 percpu_cmpxchg16b_double
507
508#endif
509
510/* This is not atomic against other CPUs -- CPU preemption needs to be off */
511#define x86_test_and_clear_bit_percpu(bit, var) \
512({ \
513 int old__; \
514 asm volatile("btr %2,"__percpu_arg(1)"\n\tsbbl %0,%0" \
515 : "=r" (old__), "+m" (var) \
516 : "dIr" (bit)); \
517 old__; \
518})
519
520static __always_inline int x86_this_cpu_constant_test_bit(unsigned int nr,
521 const unsigned long __percpu *addr)
522{
523 unsigned long __percpu *a = (unsigned long *)addr + nr / BITS_PER_LONG;
524
525#ifdef CONFIG_X86_64
526 return ((1UL << (nr % BITS_PER_LONG)) & raw_cpu_read_8(*a)) != 0;
527#else
528 return ((1UL << (nr % BITS_PER_LONG)) & raw_cpu_read_4(*a)) != 0;
529#endif
530}
531
532static inline int x86_this_cpu_variable_test_bit(int nr,
533 const unsigned long __percpu *addr)
534{
535 int oldbit;
536
537 asm volatile("bt "__percpu_arg(2)",%1\n\t"
538 "sbb %0,%0"
539 : "=r" (oldbit)
540 : "m" (*(unsigned long *)addr), "Ir" (nr));
541
542 return oldbit;
543}
544
545#define x86_this_cpu_test_bit(nr, addr) \
546 (__builtin_constant_p((nr)) \
547 ? x86_this_cpu_constant_test_bit((nr), (addr)) \
548 : x86_this_cpu_variable_test_bit((nr), (addr)))
549
550
551#include <asm-generic/percpu.h>
552
553/* We can use this directly for local CPU (faster). */
554DECLARE_PER_CPU_READ_MOSTLY(unsigned long, this_cpu_off);
555
556#endif /* !__ASSEMBLY__ */
557
558#ifdef CONFIG_SMP
559
560/*
561 * Define the "EARLY_PER_CPU" macros. These are used for some per_cpu
562 * variables that are initialized and accessed before there are per_cpu
563 * areas allocated.
564 */
565
566#define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \
567 DEFINE_PER_CPU(_type, _name) = _initvalue; \
568 __typeof__(_type) _name##_early_map[NR_CPUS] __initdata = \
569 { [0 ... NR_CPUS-1] = _initvalue }; \
570 __typeof__(_type) *_name##_early_ptr __refdata = _name##_early_map
571
572#define DEFINE_EARLY_PER_CPU_READ_MOSTLY(_type, _name, _initvalue) \
573 DEFINE_PER_CPU_READ_MOSTLY(_type, _name) = _initvalue; \
574 __typeof__(_type) _name##_early_map[NR_CPUS] __initdata = \
575 { [0 ... NR_CPUS-1] = _initvalue }; \
576 __typeof__(_type) *_name##_early_ptr __refdata = _name##_early_map
577
578#define EXPORT_EARLY_PER_CPU_SYMBOL(_name) \
579 EXPORT_PER_CPU_SYMBOL(_name)
580
581#define DECLARE_EARLY_PER_CPU(_type, _name) \
582 DECLARE_PER_CPU(_type, _name); \
583 extern __typeof__(_type) *_name##_early_ptr; \
584 extern __typeof__(_type) _name##_early_map[]
585
586#define DECLARE_EARLY_PER_CPU_READ_MOSTLY(_type, _name) \
587 DECLARE_PER_CPU_READ_MOSTLY(_type, _name); \
588 extern __typeof__(_type) *_name##_early_ptr; \
589 extern __typeof__(_type) _name##_early_map[]
590
591#define early_per_cpu_ptr(_name) (_name##_early_ptr)
592#define early_per_cpu_map(_name, _idx) (_name##_early_map[_idx])
593#define early_per_cpu(_name, _cpu) \
594 *(early_per_cpu_ptr(_name) ? \
595 &early_per_cpu_ptr(_name)[_cpu] : \
596 &per_cpu(_name, _cpu))
597
598#else /* !CONFIG_SMP */
599#define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \
600 DEFINE_PER_CPU(_type, _name) = _initvalue
601
602#define DEFINE_EARLY_PER_CPU_READ_MOSTLY(_type, _name, _initvalue) \
603 DEFINE_PER_CPU_READ_MOSTLY(_type, _name) = _initvalue
604
605#define EXPORT_EARLY_PER_CPU_SYMBOL(_name) \
606 EXPORT_PER_CPU_SYMBOL(_name)
607
608#define DECLARE_EARLY_PER_CPU(_type, _name) \
609 DECLARE_PER_CPU(_type, _name)
610
611#define DECLARE_EARLY_PER_CPU_READ_MOSTLY(_type, _name) \
612 DECLARE_PER_CPU_READ_MOSTLY(_type, _name)
613
614#define early_per_cpu(_name, _cpu) per_cpu(_name, _cpu)
615#define early_per_cpu_ptr(_name) NULL
616/* no early_per_cpu_map() */
617
618#endif /* !CONFIG_SMP */
619
620#endif /* _ASM_X86_PERCPU_H */
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_X86_PERCPU_H
3#define _ASM_X86_PERCPU_H
4
5#ifdef CONFIG_X86_64
6#define __percpu_seg gs
7#else
8#define __percpu_seg fs
9#endif
10
11#ifdef __ASSEMBLY__
12
13#ifdef CONFIG_SMP
14#define PER_CPU_VAR(var) %__percpu_seg:var
15#else /* ! SMP */
16#define PER_CPU_VAR(var) var
17#endif /* SMP */
18
19#ifdef CONFIG_X86_64_SMP
20#define INIT_PER_CPU_VAR(var) init_per_cpu__##var
21#else
22#define INIT_PER_CPU_VAR(var) var
23#endif
24
25#else /* ...!ASSEMBLY */
26
27#include <linux/stringify.h>
28#include <asm/asm.h>
29
30#ifdef CONFIG_SMP
31#define __percpu_prefix "%%"__stringify(__percpu_seg)":"
32#define __my_cpu_offset this_cpu_read(this_cpu_off)
33
34/*
35 * Compared to the generic __my_cpu_offset version, the following
36 * saves one instruction and avoids clobbering a temp register.
37 */
38#define arch_raw_cpu_ptr(ptr) \
39({ \
40 unsigned long tcp_ptr__; \
41 asm ("add " __percpu_arg(1) ", %0" \
42 : "=r" (tcp_ptr__) \
43 : "m" (this_cpu_off), "0" (ptr)); \
44 (typeof(*(ptr)) __kernel __force *)tcp_ptr__; \
45})
46#else
47#define __percpu_prefix ""
48#endif
49
50#define __percpu_arg(x) __percpu_prefix "%" #x
51
52/*
53 * Initialized pointers to per-cpu variables needed for the boot
54 * processor need to use these macros to get the proper address
55 * offset from __per_cpu_load on SMP.
56 *
57 * There also must be an entry in vmlinux_64.lds.S
58 */
59#define DECLARE_INIT_PER_CPU(var) \
60 extern typeof(var) init_per_cpu_var(var)
61
62#ifdef CONFIG_X86_64_SMP
63#define init_per_cpu_var(var) init_per_cpu__##var
64#else
65#define init_per_cpu_var(var) var
66#endif
67
68/* For arch-specific code, we can use direct single-insn ops (they
69 * don't give an lvalue though). */
70
71#define __pcpu_type_1 u8
72#define __pcpu_type_2 u16
73#define __pcpu_type_4 u32
74#define __pcpu_type_8 u64
75
76#define __pcpu_cast_1(val) ((u8)(((unsigned long) val) & 0xff))
77#define __pcpu_cast_2(val) ((u16)(((unsigned long) val) & 0xffff))
78#define __pcpu_cast_4(val) ((u32)(((unsigned long) val) & 0xffffffff))
79#define __pcpu_cast_8(val) ((u64)(val))
80
81#define __pcpu_op1_1(op, dst) op "b " dst
82#define __pcpu_op1_2(op, dst) op "w " dst
83#define __pcpu_op1_4(op, dst) op "l " dst
84#define __pcpu_op1_8(op, dst) op "q " dst
85
86#define __pcpu_op2_1(op, src, dst) op "b " src ", " dst
87#define __pcpu_op2_2(op, src, dst) op "w " src ", " dst
88#define __pcpu_op2_4(op, src, dst) op "l " src ", " dst
89#define __pcpu_op2_8(op, src, dst) op "q " src ", " dst
90
91#define __pcpu_reg_1(mod, x) mod "q" (x)
92#define __pcpu_reg_2(mod, x) mod "r" (x)
93#define __pcpu_reg_4(mod, x) mod "r" (x)
94#define __pcpu_reg_8(mod, x) mod "r" (x)
95
96#define __pcpu_reg_imm_1(x) "qi" (x)
97#define __pcpu_reg_imm_2(x) "ri" (x)
98#define __pcpu_reg_imm_4(x) "ri" (x)
99#define __pcpu_reg_imm_8(x) "re" (x)
100
101#define percpu_to_op(size, qual, op, _var, _val) \
102do { \
103 __pcpu_type_##size pto_val__ = __pcpu_cast_##size(_val); \
104 if (0) { \
105 typeof(_var) pto_tmp__; \
106 pto_tmp__ = (_val); \
107 (void)pto_tmp__; \
108 } \
109 asm qual(__pcpu_op2_##size(op, "%[val]", __percpu_arg([var])) \
110 : [var] "+m" (_var) \
111 : [val] __pcpu_reg_imm_##size(pto_val__)); \
112} while (0)
113
114#define percpu_unary_op(size, qual, op, _var) \
115({ \
116 asm qual (__pcpu_op1_##size(op, __percpu_arg([var])) \
117 : [var] "+m" (_var)); \
118})
119
120/*
121 * Generate a percpu add to memory instruction and optimize code
122 * if one is added or subtracted.
123 */
124#define percpu_add_op(size, qual, var, val) \
125do { \
126 const int pao_ID__ = (__builtin_constant_p(val) && \
127 ((val) == 1 || (val) == -1)) ? \
128 (int)(val) : 0; \
129 if (0) { \
130 typeof(var) pao_tmp__; \
131 pao_tmp__ = (val); \
132 (void)pao_tmp__; \
133 } \
134 if (pao_ID__ == 1) \
135 percpu_unary_op(size, qual, "inc", var); \
136 else if (pao_ID__ == -1) \
137 percpu_unary_op(size, qual, "dec", var); \
138 else \
139 percpu_to_op(size, qual, "add", var, val); \
140} while (0)
141
142#define percpu_from_op(size, qual, op, _var) \
143({ \
144 __pcpu_type_##size pfo_val__; \
145 asm qual (__pcpu_op2_##size(op, __percpu_arg([var]), "%[val]") \
146 : [val] __pcpu_reg_##size("=", pfo_val__) \
147 : [var] "m" (_var)); \
148 (typeof(_var))(unsigned long) pfo_val__; \
149})
150
151#define percpu_stable_op(size, op, _var) \
152({ \
153 __pcpu_type_##size pfo_val__; \
154 asm(__pcpu_op2_##size(op, __percpu_arg(P[var]), "%[val]") \
155 : [val] __pcpu_reg_##size("=", pfo_val__) \
156 : [var] "p" (&(_var))); \
157 (typeof(_var))(unsigned long) pfo_val__; \
158})
159
160/*
161 * Add return operation
162 */
163#define percpu_add_return_op(size, qual, _var, _val) \
164({ \
165 __pcpu_type_##size paro_tmp__ = __pcpu_cast_##size(_val); \
166 asm qual (__pcpu_op2_##size("xadd", "%[tmp]", \
167 __percpu_arg([var])) \
168 : [tmp] __pcpu_reg_##size("+", paro_tmp__), \
169 [var] "+m" (_var) \
170 : : "memory"); \
171 (typeof(_var))(unsigned long) (paro_tmp__ + _val); \
172})
173
174/*
175 * xchg is implemented using cmpxchg without a lock prefix. xchg is
176 * expensive due to the implied lock prefix. The processor cannot prefetch
177 * cachelines if xchg is used.
178 */
179#define percpu_xchg_op(size, qual, _var, _nval) \
180({ \
181 __pcpu_type_##size pxo_old__; \
182 __pcpu_type_##size pxo_new__ = __pcpu_cast_##size(_nval); \
183 asm qual (__pcpu_op2_##size("mov", __percpu_arg([var]), \
184 "%[oval]") \
185 "\n1:\t" \
186 __pcpu_op2_##size("cmpxchg", "%[nval]", \
187 __percpu_arg([var])) \
188 "\n\tjnz 1b" \
189 : [oval] "=&a" (pxo_old__), \
190 [var] "+m" (_var) \
191 : [nval] __pcpu_reg_##size(, pxo_new__) \
192 : "memory"); \
193 (typeof(_var))(unsigned long) pxo_old__; \
194})
195
196/*
197 * cmpxchg has no such implied lock semantics as a result it is much
198 * more efficient for cpu local operations.
199 */
200#define percpu_cmpxchg_op(size, qual, _var, _oval, _nval) \
201({ \
202 __pcpu_type_##size pco_old__ = __pcpu_cast_##size(_oval); \
203 __pcpu_type_##size pco_new__ = __pcpu_cast_##size(_nval); \
204 asm qual (__pcpu_op2_##size("cmpxchg", "%[nval]", \
205 __percpu_arg([var])) \
206 : [oval] "+a" (pco_old__), \
207 [var] "+m" (_var) \
208 : [nval] __pcpu_reg_##size(, pco_new__) \
209 : "memory"); \
210 (typeof(_var))(unsigned long) pco_old__; \
211})
212
213#define percpu_try_cmpxchg_op(size, qual, _var, _ovalp, _nval) \
214({ \
215 bool success; \
216 __pcpu_type_##size *pco_oval__ = (__pcpu_type_##size *)(_ovalp); \
217 __pcpu_type_##size pco_old__ = *pco_oval__; \
218 __pcpu_type_##size pco_new__ = __pcpu_cast_##size(_nval); \
219 asm qual (__pcpu_op2_##size("cmpxchg", "%[nval]", \
220 __percpu_arg([var])) \
221 CC_SET(z) \
222 : CC_OUT(z) (success), \
223 [oval] "+a" (pco_old__), \
224 [var] "+m" (_var) \
225 : [nval] __pcpu_reg_##size(, pco_new__) \
226 : "memory"); \
227 if (unlikely(!success)) \
228 *pco_oval__ = pco_old__; \
229 likely(success); \
230})
231
232#if defined(CONFIG_X86_32) && !defined(CONFIG_UML)
233#define percpu_cmpxchg64_op(size, qual, _var, _oval, _nval) \
234({ \
235 union { \
236 u64 var; \
237 struct { \
238 u32 low, high; \
239 }; \
240 } old__, new__; \
241 \
242 old__.var = _oval; \
243 new__.var = _nval; \
244 \
245 asm qual (ALTERNATIVE("call this_cpu_cmpxchg8b_emu", \
246 "cmpxchg8b " __percpu_arg([var]), X86_FEATURE_CX8) \
247 : [var] "+m" (_var), \
248 "+a" (old__.low), \
249 "+d" (old__.high) \
250 : "b" (new__.low), \
251 "c" (new__.high), \
252 "S" (&(_var)) \
253 : "memory"); \
254 \
255 old__.var; \
256})
257
258#define raw_cpu_cmpxchg64(pcp, oval, nval) percpu_cmpxchg64_op(8, , pcp, oval, nval)
259#define this_cpu_cmpxchg64(pcp, oval, nval) percpu_cmpxchg64_op(8, volatile, pcp, oval, nval)
260
261#define percpu_try_cmpxchg64_op(size, qual, _var, _ovalp, _nval) \
262({ \
263 bool success; \
264 u64 *_oval = (u64 *)(_ovalp); \
265 union { \
266 u64 var; \
267 struct { \
268 u32 low, high; \
269 }; \
270 } old__, new__; \
271 \
272 old__.var = *_oval; \
273 new__.var = _nval; \
274 \
275 asm qual (ALTERNATIVE("call this_cpu_cmpxchg8b_emu", \
276 "cmpxchg8b " __percpu_arg([var]), X86_FEATURE_CX8) \
277 CC_SET(z) \
278 : CC_OUT(z) (success), \
279 [var] "+m" (_var), \
280 "+a" (old__.low), \
281 "+d" (old__.high) \
282 : "b" (new__.low), \
283 "c" (new__.high), \
284 "S" (&(_var)) \
285 : "memory"); \
286 if (unlikely(!success)) \
287 *_oval = old__.var; \
288 likely(success); \
289})
290
291#define raw_cpu_try_cmpxchg64(pcp, ovalp, nval) percpu_try_cmpxchg64_op(8, , pcp, ovalp, nval)
292#define this_cpu_try_cmpxchg64(pcp, ovalp, nval) percpu_try_cmpxchg64_op(8, volatile, pcp, ovalp, nval)
293#endif
294
295#ifdef CONFIG_X86_64
296#define raw_cpu_cmpxchg64(pcp, oval, nval) percpu_cmpxchg_op(8, , pcp, oval, nval);
297#define this_cpu_cmpxchg64(pcp, oval, nval) percpu_cmpxchg_op(8, volatile, pcp, oval, nval);
298
299#define raw_cpu_try_cmpxchg64(pcp, ovalp, nval) percpu_try_cmpxchg_op(8, , pcp, ovalp, nval);
300#define this_cpu_try_cmpxchg64(pcp, ovalp, nval) percpu_try_cmpxchg_op(8, volatile, pcp, ovalp, nval);
301
302#define percpu_cmpxchg128_op(size, qual, _var, _oval, _nval) \
303({ \
304 union { \
305 u128 var; \
306 struct { \
307 u64 low, high; \
308 }; \
309 } old__, new__; \
310 \
311 old__.var = _oval; \
312 new__.var = _nval; \
313 \
314 asm qual (ALTERNATIVE("call this_cpu_cmpxchg16b_emu", \
315 "cmpxchg16b " __percpu_arg([var]), X86_FEATURE_CX16) \
316 : [var] "+m" (_var), \
317 "+a" (old__.low), \
318 "+d" (old__.high) \
319 : "b" (new__.low), \
320 "c" (new__.high), \
321 "S" (&(_var)) \
322 : "memory"); \
323 \
324 old__.var; \
325})
326
327#define raw_cpu_cmpxchg128(pcp, oval, nval) percpu_cmpxchg128_op(16, , pcp, oval, nval)
328#define this_cpu_cmpxchg128(pcp, oval, nval) percpu_cmpxchg128_op(16, volatile, pcp, oval, nval)
329
330#define percpu_try_cmpxchg128_op(size, qual, _var, _ovalp, _nval) \
331({ \
332 bool success; \
333 u128 *_oval = (u128 *)(_ovalp); \
334 union { \
335 u128 var; \
336 struct { \
337 u64 low, high; \
338 }; \
339 } old__, new__; \
340 \
341 old__.var = *_oval; \
342 new__.var = _nval; \
343 \
344 asm qual (ALTERNATIVE("call this_cpu_cmpxchg16b_emu", \
345 "cmpxchg16b " __percpu_arg([var]), X86_FEATURE_CX16) \
346 CC_SET(z) \
347 : CC_OUT(z) (success), \
348 [var] "+m" (_var), \
349 "+a" (old__.low), \
350 "+d" (old__.high) \
351 : "b" (new__.low), \
352 "c" (new__.high), \
353 "S" (&(_var)) \
354 : "memory"); \
355 if (unlikely(!success)) \
356 *_oval = old__.var; \
357 likely(success); \
358})
359
360#define raw_cpu_try_cmpxchg128(pcp, ovalp, nval) percpu_try_cmpxchg128_op(16, , pcp, ovalp, nval)
361#define this_cpu_try_cmpxchg128(pcp, ovalp, nval) percpu_try_cmpxchg128_op(16, volatile, pcp, ovalp, nval)
362#endif
363
364/*
365 * this_cpu_read() makes gcc load the percpu variable every time it is
366 * accessed while this_cpu_read_stable() allows the value to be cached.
367 * this_cpu_read_stable() is more efficient and can be used if its value
368 * is guaranteed to be valid across cpus. The current users include
369 * get_current() and get_thread_info() both of which are actually
370 * per-thread variables implemented as per-cpu variables and thus
371 * stable for the duration of the respective task.
372 */
373#define this_cpu_read_stable_1(pcp) percpu_stable_op(1, "mov", pcp)
374#define this_cpu_read_stable_2(pcp) percpu_stable_op(2, "mov", pcp)
375#define this_cpu_read_stable_4(pcp) percpu_stable_op(4, "mov", pcp)
376#define this_cpu_read_stable_8(pcp) percpu_stable_op(8, "mov", pcp)
377#define this_cpu_read_stable(pcp) __pcpu_size_call_return(this_cpu_read_stable_, pcp)
378
379#define raw_cpu_read_1(pcp) percpu_from_op(1, , "mov", pcp)
380#define raw_cpu_read_2(pcp) percpu_from_op(2, , "mov", pcp)
381#define raw_cpu_read_4(pcp) percpu_from_op(4, , "mov", pcp)
382
383#define raw_cpu_write_1(pcp, val) percpu_to_op(1, , "mov", (pcp), val)
384#define raw_cpu_write_2(pcp, val) percpu_to_op(2, , "mov", (pcp), val)
385#define raw_cpu_write_4(pcp, val) percpu_to_op(4, , "mov", (pcp), val)
386#define raw_cpu_add_1(pcp, val) percpu_add_op(1, , (pcp), val)
387#define raw_cpu_add_2(pcp, val) percpu_add_op(2, , (pcp), val)
388#define raw_cpu_add_4(pcp, val) percpu_add_op(4, , (pcp), val)
389#define raw_cpu_and_1(pcp, val) percpu_to_op(1, , "and", (pcp), val)
390#define raw_cpu_and_2(pcp, val) percpu_to_op(2, , "and", (pcp), val)
391#define raw_cpu_and_4(pcp, val) percpu_to_op(4, , "and", (pcp), val)
392#define raw_cpu_or_1(pcp, val) percpu_to_op(1, , "or", (pcp), val)
393#define raw_cpu_or_2(pcp, val) percpu_to_op(2, , "or", (pcp), val)
394#define raw_cpu_or_4(pcp, val) percpu_to_op(4, , "or", (pcp), val)
395
396/*
397 * raw_cpu_xchg() can use a load-store since it is not required to be
398 * IRQ-safe.
399 */
400#define raw_percpu_xchg_op(var, nval) \
401({ \
402 typeof(var) pxo_ret__ = raw_cpu_read(var); \
403 raw_cpu_write(var, (nval)); \
404 pxo_ret__; \
405})
406
407#define raw_cpu_xchg_1(pcp, val) raw_percpu_xchg_op(pcp, val)
408#define raw_cpu_xchg_2(pcp, val) raw_percpu_xchg_op(pcp, val)
409#define raw_cpu_xchg_4(pcp, val) raw_percpu_xchg_op(pcp, val)
410
411#define this_cpu_read_1(pcp) percpu_from_op(1, volatile, "mov", pcp)
412#define this_cpu_read_2(pcp) percpu_from_op(2, volatile, "mov", pcp)
413#define this_cpu_read_4(pcp) percpu_from_op(4, volatile, "mov", pcp)
414#define this_cpu_write_1(pcp, val) percpu_to_op(1, volatile, "mov", (pcp), val)
415#define this_cpu_write_2(pcp, val) percpu_to_op(2, volatile, "mov", (pcp), val)
416#define this_cpu_write_4(pcp, val) percpu_to_op(4, volatile, "mov", (pcp), val)
417#define this_cpu_add_1(pcp, val) percpu_add_op(1, volatile, (pcp), val)
418#define this_cpu_add_2(pcp, val) percpu_add_op(2, volatile, (pcp), val)
419#define this_cpu_add_4(pcp, val) percpu_add_op(4, volatile, (pcp), val)
420#define this_cpu_and_1(pcp, val) percpu_to_op(1, volatile, "and", (pcp), val)
421#define this_cpu_and_2(pcp, val) percpu_to_op(2, volatile, "and", (pcp), val)
422#define this_cpu_and_4(pcp, val) percpu_to_op(4, volatile, "and", (pcp), val)
423#define this_cpu_or_1(pcp, val) percpu_to_op(1, volatile, "or", (pcp), val)
424#define this_cpu_or_2(pcp, val) percpu_to_op(2, volatile, "or", (pcp), val)
425#define this_cpu_or_4(pcp, val) percpu_to_op(4, volatile, "or", (pcp), val)
426#define this_cpu_xchg_1(pcp, nval) percpu_xchg_op(1, volatile, pcp, nval)
427#define this_cpu_xchg_2(pcp, nval) percpu_xchg_op(2, volatile, pcp, nval)
428#define this_cpu_xchg_4(pcp, nval) percpu_xchg_op(4, volatile, pcp, nval)
429
430#define raw_cpu_add_return_1(pcp, val) percpu_add_return_op(1, , pcp, val)
431#define raw_cpu_add_return_2(pcp, val) percpu_add_return_op(2, , pcp, val)
432#define raw_cpu_add_return_4(pcp, val) percpu_add_return_op(4, , pcp, val)
433#define raw_cpu_cmpxchg_1(pcp, oval, nval) percpu_cmpxchg_op(1, , pcp, oval, nval)
434#define raw_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(2, , pcp, oval, nval)
435#define raw_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(4, , pcp, oval, nval)
436#define raw_cpu_try_cmpxchg_1(pcp, ovalp, nval) percpu_try_cmpxchg_op(1, , pcp, ovalp, nval)
437#define raw_cpu_try_cmpxchg_2(pcp, ovalp, nval) percpu_try_cmpxchg_op(2, , pcp, ovalp, nval)
438#define raw_cpu_try_cmpxchg_4(pcp, ovalp, nval) percpu_try_cmpxchg_op(4, , pcp, ovalp, nval)
439
440#define this_cpu_add_return_1(pcp, val) percpu_add_return_op(1, volatile, pcp, val)
441#define this_cpu_add_return_2(pcp, val) percpu_add_return_op(2, volatile, pcp, val)
442#define this_cpu_add_return_4(pcp, val) percpu_add_return_op(4, volatile, pcp, val)
443#define this_cpu_cmpxchg_1(pcp, oval, nval) percpu_cmpxchg_op(1, volatile, pcp, oval, nval)
444#define this_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(2, volatile, pcp, oval, nval)
445#define this_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(4, volatile, pcp, oval, nval)
446#define this_cpu_try_cmpxchg_1(pcp, ovalp, nval) percpu_try_cmpxchg_op(1, volatile, pcp, ovalp, nval)
447#define this_cpu_try_cmpxchg_2(pcp, ovalp, nval) percpu_try_cmpxchg_op(2, volatile, pcp, ovalp, nval)
448#define this_cpu_try_cmpxchg_4(pcp, ovalp, nval) percpu_try_cmpxchg_op(4, volatile, pcp, ovalp, nval)
449
450/*
451 * Per cpu atomic 64 bit operations are only available under 64 bit.
452 * 32 bit must fall back to generic operations.
453 */
454#ifdef CONFIG_X86_64
455#define raw_cpu_read_8(pcp) percpu_from_op(8, , "mov", pcp)
456#define raw_cpu_write_8(pcp, val) percpu_to_op(8, , "mov", (pcp), val)
457#define raw_cpu_add_8(pcp, val) percpu_add_op(8, , (pcp), val)
458#define raw_cpu_and_8(pcp, val) percpu_to_op(8, , "and", (pcp), val)
459#define raw_cpu_or_8(pcp, val) percpu_to_op(8, , "or", (pcp), val)
460#define raw_cpu_add_return_8(pcp, val) percpu_add_return_op(8, , pcp, val)
461#define raw_cpu_xchg_8(pcp, nval) raw_percpu_xchg_op(pcp, nval)
462#define raw_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(8, , pcp, oval, nval)
463#define raw_cpu_try_cmpxchg_8(pcp, ovalp, nval) percpu_try_cmpxchg_op(8, , pcp, ovalp, nval)
464
465#define this_cpu_read_8(pcp) percpu_from_op(8, volatile, "mov", pcp)
466#define this_cpu_write_8(pcp, val) percpu_to_op(8, volatile, "mov", (pcp), val)
467#define this_cpu_add_8(pcp, val) percpu_add_op(8, volatile, (pcp), val)
468#define this_cpu_and_8(pcp, val) percpu_to_op(8, volatile, "and", (pcp), val)
469#define this_cpu_or_8(pcp, val) percpu_to_op(8, volatile, "or", (pcp), val)
470#define this_cpu_add_return_8(pcp, val) percpu_add_return_op(8, volatile, pcp, val)
471#define this_cpu_xchg_8(pcp, nval) percpu_xchg_op(8, volatile, pcp, nval)
472#define this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(8, volatile, pcp, oval, nval)
473#define this_cpu_try_cmpxchg_8(pcp, ovalp, nval) percpu_try_cmpxchg_op(8, volatile, pcp, ovalp, nval)
474#endif
475
476static __always_inline bool x86_this_cpu_constant_test_bit(unsigned int nr,
477 const unsigned long __percpu *addr)
478{
479 unsigned long __percpu *a =
480 (unsigned long __percpu *)addr + nr / BITS_PER_LONG;
481
482#ifdef CONFIG_X86_64
483 return ((1UL << (nr % BITS_PER_LONG)) & raw_cpu_read_8(*a)) != 0;
484#else
485 return ((1UL << (nr % BITS_PER_LONG)) & raw_cpu_read_4(*a)) != 0;
486#endif
487}
488
489static inline bool x86_this_cpu_variable_test_bit(int nr,
490 const unsigned long __percpu *addr)
491{
492 bool oldbit;
493
494 asm volatile("btl "__percpu_arg(2)",%1"
495 CC_SET(c)
496 : CC_OUT(c) (oldbit)
497 : "m" (*(unsigned long __percpu *)addr), "Ir" (nr));
498
499 return oldbit;
500}
501
502#define x86_this_cpu_test_bit(nr, addr) \
503 (__builtin_constant_p((nr)) \
504 ? x86_this_cpu_constant_test_bit((nr), (addr)) \
505 : x86_this_cpu_variable_test_bit((nr), (addr)))
506
507
508#include <asm-generic/percpu.h>
509
510/* We can use this directly for local CPU (faster). */
511DECLARE_PER_CPU_READ_MOSTLY(unsigned long, this_cpu_off);
512
513#endif /* !__ASSEMBLY__ */
514
515#ifdef CONFIG_SMP
516
517/*
518 * Define the "EARLY_PER_CPU" macros. These are used for some per_cpu
519 * variables that are initialized and accessed before there are per_cpu
520 * areas allocated.
521 */
522
523#define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \
524 DEFINE_PER_CPU(_type, _name) = _initvalue; \
525 __typeof__(_type) _name##_early_map[NR_CPUS] __initdata = \
526 { [0 ... NR_CPUS-1] = _initvalue }; \
527 __typeof__(_type) *_name##_early_ptr __refdata = _name##_early_map
528
529#define DEFINE_EARLY_PER_CPU_READ_MOSTLY(_type, _name, _initvalue) \
530 DEFINE_PER_CPU_READ_MOSTLY(_type, _name) = _initvalue; \
531 __typeof__(_type) _name##_early_map[NR_CPUS] __initdata = \
532 { [0 ... NR_CPUS-1] = _initvalue }; \
533 __typeof__(_type) *_name##_early_ptr __refdata = _name##_early_map
534
535#define EXPORT_EARLY_PER_CPU_SYMBOL(_name) \
536 EXPORT_PER_CPU_SYMBOL(_name)
537
538#define DECLARE_EARLY_PER_CPU(_type, _name) \
539 DECLARE_PER_CPU(_type, _name); \
540 extern __typeof__(_type) *_name##_early_ptr; \
541 extern __typeof__(_type) _name##_early_map[]
542
543#define DECLARE_EARLY_PER_CPU_READ_MOSTLY(_type, _name) \
544 DECLARE_PER_CPU_READ_MOSTLY(_type, _name); \
545 extern __typeof__(_type) *_name##_early_ptr; \
546 extern __typeof__(_type) _name##_early_map[]
547
548#define early_per_cpu_ptr(_name) (_name##_early_ptr)
549#define early_per_cpu_map(_name, _idx) (_name##_early_map[_idx])
550#define early_per_cpu(_name, _cpu) \
551 *(early_per_cpu_ptr(_name) ? \
552 &early_per_cpu_ptr(_name)[_cpu] : \
553 &per_cpu(_name, _cpu))
554
555#else /* !CONFIG_SMP */
556#define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) \
557 DEFINE_PER_CPU(_type, _name) = _initvalue
558
559#define DEFINE_EARLY_PER_CPU_READ_MOSTLY(_type, _name, _initvalue) \
560 DEFINE_PER_CPU_READ_MOSTLY(_type, _name) = _initvalue
561
562#define EXPORT_EARLY_PER_CPU_SYMBOL(_name) \
563 EXPORT_PER_CPU_SYMBOL(_name)
564
565#define DECLARE_EARLY_PER_CPU(_type, _name) \
566 DECLARE_PER_CPU(_type, _name)
567
568#define DECLARE_EARLY_PER_CPU_READ_MOSTLY(_type, _name) \
569 DECLARE_PER_CPU_READ_MOSTLY(_type, _name)
570
571#define early_per_cpu(_name, _cpu) per_cpu(_name, _cpu)
572#define early_per_cpu_ptr(_name) NULL
573/* no early_per_cpu_map() */
574
575#endif /* !CONFIG_SMP */
576
577#endif /* _ASM_X86_PERCPU_H */