Linux Audio

Check our new training course

Loading...
v4.17
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef _ASM_X86_PERCPU_H
  3#define _ASM_X86_PERCPU_H
  4
  5#ifdef CONFIG_X86_64
  6#define __percpu_seg		gs
  7#define __percpu_mov_op		movq
  8#else
  9#define __percpu_seg		fs
 10#define __percpu_mov_op		movl
 11#endif
 12
 13#ifdef __ASSEMBLY__
 14
 15/*
 16 * PER_CPU finds an address of a per-cpu variable.
 17 *
 18 * Args:
 19 *    var - variable name
 20 *    reg - 32bit register
 21 *
 22 * The resulting address is stored in the "reg" argument.
 23 *
 24 * Example:
 25 *    PER_CPU(cpu_gdt_descr, %ebx)
 26 */
 27#ifdef CONFIG_SMP
 28#define PER_CPU(var, reg)						\
 29	__percpu_mov_op %__percpu_seg:this_cpu_off, reg;		\
 30	lea var(reg), reg
 31#define PER_CPU_VAR(var)	%__percpu_seg:var
 32#else /* ! SMP */
 33#define PER_CPU(var, reg)	__percpu_mov_op $var, reg
 34#define PER_CPU_VAR(var)	var
 35#endif	/* SMP */
 36
 37#ifdef CONFIG_X86_64_SMP
 38#define INIT_PER_CPU_VAR(var)  init_per_cpu__##var
 39#else
 40#define INIT_PER_CPU_VAR(var)  var
 41#endif
 42
 43#else /* ...!ASSEMBLY */
 44
 45#include <linux/kernel.h>
 46#include <linux/stringify.h>
 47
 48#ifdef CONFIG_SMP
 49#define __percpu_prefix		"%%"__stringify(__percpu_seg)":"
 50#define __my_cpu_offset		this_cpu_read(this_cpu_off)
 51
 52/*
 53 * Compared to the generic __my_cpu_offset version, the following
 54 * saves one instruction and avoids clobbering a temp register.
 55 */
 56#define arch_raw_cpu_ptr(ptr)				\
 57({							\
 58	unsigned long tcp_ptr__;			\
 59	asm volatile("add " __percpu_arg(1) ", %0"	\
 60		     : "=r" (tcp_ptr__)			\
 61		     : "m" (this_cpu_off), "0" (ptr));	\
 62	(typeof(*(ptr)) __kernel __force *)tcp_ptr__;	\
 63})
 64#else
 65#define __percpu_prefix		""
 66#endif
 67
 68#define __percpu_arg(x)		__percpu_prefix "%" #x
 69
 70/*
 71 * Initialized pointers to per-cpu variables needed for the boot
 72 * processor need to use these macros to get the proper address
 73 * offset from __per_cpu_load on SMP.
 74 *
 75 * There also must be an entry in vmlinux_64.lds.S
 76 */
 77#define DECLARE_INIT_PER_CPU(var) \
 78       extern typeof(var) init_per_cpu_var(var)
 79
 80#ifdef CONFIG_X86_64_SMP
 81#define init_per_cpu_var(var)  init_per_cpu__##var
 82#else
 83#define init_per_cpu_var(var)  var
 84#endif
 85
 86/* For arch-specific code, we can use direct single-insn ops (they
 87 * don't give an lvalue though). */
 88extern void __bad_percpu_size(void);
 89
 90#define percpu_to_op(op, var, val)			\
 91do {							\
 92	typedef typeof(var) pto_T__;			\
 93	if (0) {					\
 94		pto_T__ pto_tmp__;			\
 95		pto_tmp__ = (val);			\
 96		(void)pto_tmp__;			\
 97	}						\
 98	switch (sizeof(var)) {				\
 99	case 1:						\
100		asm(op "b %1,"__percpu_arg(0)		\
101		    : "+m" (var)			\
102		    : "qi" ((pto_T__)(val)));		\
103		break;					\
104	case 2:						\
105		asm(op "w %1,"__percpu_arg(0)		\
106		    : "+m" (var)			\
107		    : "ri" ((pto_T__)(val)));		\
108		break;					\
109	case 4:						\
110		asm(op "l %1,"__percpu_arg(0)		\
111		    : "+m" (var)			\
112		    : "ri" ((pto_T__)(val)));		\
113		break;					\
114	case 8:						\
115		asm(op "q %1,"__percpu_arg(0)		\
116		    : "+m" (var)			\
117		    : "re" ((pto_T__)(val)));		\
118		break;					\
119	default: __bad_percpu_size();			\
120	}						\
 
 
 
 
 
 
 
 
 
 
121} while (0)
122
 
 
 
 
 
 
123/*
124 * Generate a percpu add to memory instruction and optimize code
125 * if one is added or subtracted.
126 */
127#define percpu_add_op(var, val)						\
128do {									\
129	typedef typeof(var) pao_T__;					\
130	const int pao_ID__ = (__builtin_constant_p(val) &&		\
131			      ((val) == 1 || (val) == -1)) ?		\
132				(int)(val) : 0;				\
133	if (0) {							\
134		pao_T__ pao_tmp__;					\
135		pao_tmp__ = (val);					\
136		(void)pao_tmp__;					\
137	}								\
138	switch (sizeof(var)) {						\
139	case 1:								\
140		if (pao_ID__ == 1)					\
141			asm("incb "__percpu_arg(0) : "+m" (var));	\
142		else if (pao_ID__ == -1)				\
143			asm("decb "__percpu_arg(0) : "+m" (var));	\
144		else							\
145			asm("addb %1, "__percpu_arg(0)			\
146			    : "+m" (var)				\
147			    : "qi" ((pao_T__)(val)));			\
148		break;							\
149	case 2:								\
150		if (pao_ID__ == 1)					\
151			asm("incw "__percpu_arg(0) : "+m" (var));	\
152		else if (pao_ID__ == -1)				\
153			asm("decw "__percpu_arg(0) : "+m" (var));	\
154		else							\
155			asm("addw %1, "__percpu_arg(0)			\
156			    : "+m" (var)				\
157			    : "ri" ((pao_T__)(val)));			\
158		break;							\
159	case 4:								\
160		if (pao_ID__ == 1)					\
161			asm("incl "__percpu_arg(0) : "+m" (var));	\
162		else if (pao_ID__ == -1)				\
163			asm("decl "__percpu_arg(0) : "+m" (var));	\
164		else							\
165			asm("addl %1, "__percpu_arg(0)			\
166			    : "+m" (var)				\
167			    : "ri" ((pao_T__)(val)));			\
168		break;							\
169	case 8:								\
170		if (pao_ID__ == 1)					\
171			asm("incq "__percpu_arg(0) : "+m" (var));	\
172		else if (pao_ID__ == -1)				\
173			asm("decq "__percpu_arg(0) : "+m" (var));	\
174		else							\
175			asm("addq %1, "__percpu_arg(0)			\
176			    : "+m" (var)				\
177			    : "re" ((pao_T__)(val)));			\
178		break;							\
179	default: __bad_percpu_size();					\
180	}								\
181} while (0)
182
183#define percpu_from_op(op, var)				\
184({							\
185	typeof(var) pfo_ret__;				\
186	switch (sizeof(var)) {				\
187	case 1:						\
188		asm(op "b "__percpu_arg(1)",%0"		\
189		    : "=q" (pfo_ret__)			\
190		    : "m" (var));			\
191		break;					\
192	case 2:						\
193		asm(op "w "__percpu_arg(1)",%0"		\
194		    : "=r" (pfo_ret__)			\
195		    : "m" (var));			\
196		break;					\
197	case 4:						\
198		asm(op "l "__percpu_arg(1)",%0"		\
199		    : "=r" (pfo_ret__)			\
200		    : "m" (var));			\
201		break;					\
202	case 8:						\
203		asm(op "q "__percpu_arg(1)",%0"		\
204		    : "=r" (pfo_ret__)			\
205		    : "m" (var));			\
206		break;					\
207	default: __bad_percpu_size();			\
208	}						\
209	pfo_ret__;					\
210})
211
212#define percpu_stable_op(op, var)			\
213({							\
214	typeof(var) pfo_ret__;				\
215	switch (sizeof(var)) {				\
216	case 1:						\
217		asm(op "b "__percpu_arg(P1)",%0"	\
218		    : "=q" (pfo_ret__)			\
219		    : "p" (&(var)));			\
220		break;					\
221	case 2:						\
222		asm(op "w "__percpu_arg(P1)",%0"	\
223		    : "=r" (pfo_ret__)			\
224		    : "p" (&(var)));			\
225		break;					\
226	case 4:						\
227		asm(op "l "__percpu_arg(P1)",%0"	\
228		    : "=r" (pfo_ret__)			\
229		    : "p" (&(var)));			\
230		break;					\
231	case 8:						\
232		asm(op "q "__percpu_arg(P1)",%0"	\
233		    : "=r" (pfo_ret__)			\
234		    : "p" (&(var)));			\
235		break;					\
236	default: __bad_percpu_size();			\
237	}						\
238	pfo_ret__;					\
239})
240
241#define percpu_unary_op(op, var)			\
242({							\
243	switch (sizeof(var)) {				\
244	case 1:						\
245		asm(op "b "__percpu_arg(0)		\
246		    : "+m" (var));			\
247		break;					\
248	case 2:						\
249		asm(op "w "__percpu_arg(0)		\
250		    : "+m" (var));			\
251		break;					\
252	case 4:						\
253		asm(op "l "__percpu_arg(0)		\
254		    : "+m" (var));			\
255		break;					\
256	case 8:						\
257		asm(op "q "__percpu_arg(0)		\
258		    : "+m" (var));			\
259		break;					\
260	default: __bad_percpu_size();			\
261	}						\
262})
263
264/*
265 * Add return operation
266 */
267#define percpu_add_return_op(var, val)					\
268({									\
269	typeof(var) paro_ret__ = val;					\
270	switch (sizeof(var)) {						\
271	case 1:								\
272		asm("xaddb %0, "__percpu_arg(1)				\
273			    : "+q" (paro_ret__), "+m" (var)		\
274			    : : "memory");				\
275		break;							\
276	case 2:								\
277		asm("xaddw %0, "__percpu_arg(1)				\
278			    : "+r" (paro_ret__), "+m" (var)		\
279			    : : "memory");				\
280		break;							\
281	case 4:								\
282		asm("xaddl %0, "__percpu_arg(1)				\
283			    : "+r" (paro_ret__), "+m" (var)		\
284			    : : "memory");				\
285		break;							\
286	case 8:								\
287		asm("xaddq %0, "__percpu_arg(1)				\
288			    : "+re" (paro_ret__), "+m" (var)		\
289			    : : "memory");				\
290		break;							\
291	default: __bad_percpu_size();					\
292	}								\
293	paro_ret__ += val;						\
294	paro_ret__;							\
295})
296
297/*
298 * xchg is implemented using cmpxchg without a lock prefix. xchg is
299 * expensive due to the implied lock prefix.  The processor cannot prefetch
300 * cachelines if xchg is used.
301 */
302#define percpu_xchg_op(var, nval)					\
303({									\
304	typeof(var) pxo_ret__;						\
305	typeof(var) pxo_new__ = (nval);					\
306	switch (sizeof(var)) {						\
307	case 1:								\
308		asm("\n\tmov "__percpu_arg(1)",%%al"			\
309		    "\n1:\tcmpxchgb %2, "__percpu_arg(1)		\
310		    "\n\tjnz 1b"					\
311			    : "=&a" (pxo_ret__), "+m" (var)		\
312			    : "q" (pxo_new__)				\
313			    : "memory");				\
314		break;							\
315	case 2:								\
316		asm("\n\tmov "__percpu_arg(1)",%%ax"			\
317		    "\n1:\tcmpxchgw %2, "__percpu_arg(1)		\
318		    "\n\tjnz 1b"					\
319			    : "=&a" (pxo_ret__), "+m" (var)		\
320			    : "r" (pxo_new__)				\
321			    : "memory");				\
322		break;							\
323	case 4:								\
324		asm("\n\tmov "__percpu_arg(1)",%%eax"			\
325		    "\n1:\tcmpxchgl %2, "__percpu_arg(1)		\
326		    "\n\tjnz 1b"					\
327			    : "=&a" (pxo_ret__), "+m" (var)		\
328			    : "r" (pxo_new__)				\
329			    : "memory");				\
330		break;							\
331	case 8:								\
332		asm("\n\tmov "__percpu_arg(1)",%%rax"			\
333		    "\n1:\tcmpxchgq %2, "__percpu_arg(1)		\
334		    "\n\tjnz 1b"					\
335			    : "=&a" (pxo_ret__), "+m" (var)		\
336			    : "r" (pxo_new__)				\
337			    : "memory");				\
338		break;							\
339	default: __bad_percpu_size();					\
340	}								\
341	pxo_ret__;							\
342})
343
344/*
345 * cmpxchg has no such implied lock semantics as a result it is much
346 * more efficient for cpu local operations.
347 */
348#define percpu_cmpxchg_op(var, oval, nval)				\
349({									\
350	typeof(var) pco_ret__;						\
351	typeof(var) pco_old__ = (oval);					\
352	typeof(var) pco_new__ = (nval);					\
353	switch (sizeof(var)) {						\
354	case 1:								\
355		asm("cmpxchgb %2, "__percpu_arg(1)			\
356			    : "=a" (pco_ret__), "+m" (var)		\
357			    : "q" (pco_new__), "0" (pco_old__)		\
358			    : "memory");				\
359		break;							\
360	case 2:								\
361		asm("cmpxchgw %2, "__percpu_arg(1)			\
362			    : "=a" (pco_ret__), "+m" (var)		\
363			    : "r" (pco_new__), "0" (pco_old__)		\
364			    : "memory");				\
365		break;							\
366	case 4:								\
367		asm("cmpxchgl %2, "__percpu_arg(1)			\
368			    : "=a" (pco_ret__), "+m" (var)		\
369			    : "r" (pco_new__), "0" (pco_old__)		\
370			    : "memory");				\
371		break;							\
372	case 8:								\
373		asm("cmpxchgq %2, "__percpu_arg(1)			\
374			    : "=a" (pco_ret__), "+m" (var)		\
375			    : "r" (pco_new__), "0" (pco_old__)		\
376			    : "memory");				\
377		break;							\
378	default: __bad_percpu_size();					\
379	}								\
380	pco_ret__;							\
381})
382
383/*
384 * this_cpu_read() makes gcc load the percpu variable every time it is
385 * accessed while this_cpu_read_stable() allows the value to be cached.
386 * this_cpu_read_stable() is more efficient and can be used if its value
387 * is guaranteed to be valid across cpus.  The current users include
388 * get_current() and get_thread_info() both of which are actually
389 * per-thread variables implemented as per-cpu variables and thus
390 * stable for the duration of the respective task.
391 */
392#define this_cpu_read_stable(var)	percpu_stable_op("mov", var)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
393
394#define raw_cpu_read_1(pcp)		percpu_from_op("mov", pcp)
395#define raw_cpu_read_2(pcp)		percpu_from_op("mov", pcp)
396#define raw_cpu_read_4(pcp)		percpu_from_op("mov", pcp)
397
398#define raw_cpu_write_1(pcp, val)	percpu_to_op("mov", (pcp), val)
399#define raw_cpu_write_2(pcp, val)	percpu_to_op("mov", (pcp), val)
400#define raw_cpu_write_4(pcp, val)	percpu_to_op("mov", (pcp), val)
401#define raw_cpu_add_1(pcp, val)		percpu_add_op((pcp), val)
402#define raw_cpu_add_2(pcp, val)		percpu_add_op((pcp), val)
403#define raw_cpu_add_4(pcp, val)		percpu_add_op((pcp), val)
404#define raw_cpu_and_1(pcp, val)		percpu_to_op("and", (pcp), val)
405#define raw_cpu_and_2(pcp, val)		percpu_to_op("and", (pcp), val)
406#define raw_cpu_and_4(pcp, val)		percpu_to_op("and", (pcp), val)
407#define raw_cpu_or_1(pcp, val)		percpu_to_op("or", (pcp), val)
408#define raw_cpu_or_2(pcp, val)		percpu_to_op("or", (pcp), val)
409#define raw_cpu_or_4(pcp, val)		percpu_to_op("or", (pcp), val)
410#define raw_cpu_xchg_1(pcp, val)	percpu_xchg_op(pcp, val)
411#define raw_cpu_xchg_2(pcp, val)	percpu_xchg_op(pcp, val)
412#define raw_cpu_xchg_4(pcp, val)	percpu_xchg_op(pcp, val)
413
414#define this_cpu_read_1(pcp)		percpu_from_op("mov", pcp)
415#define this_cpu_read_2(pcp)		percpu_from_op("mov", pcp)
416#define this_cpu_read_4(pcp)		percpu_from_op("mov", pcp)
417#define this_cpu_write_1(pcp, val)	percpu_to_op("mov", (pcp), val)
418#define this_cpu_write_2(pcp, val)	percpu_to_op("mov", (pcp), val)
419#define this_cpu_write_4(pcp, val)	percpu_to_op("mov", (pcp), val)
420#define this_cpu_add_1(pcp, val)	percpu_add_op((pcp), val)
421#define this_cpu_add_2(pcp, val)	percpu_add_op((pcp), val)
422#define this_cpu_add_4(pcp, val)	percpu_add_op((pcp), val)
423#define this_cpu_and_1(pcp, val)	percpu_to_op("and", (pcp), val)
424#define this_cpu_and_2(pcp, val)	percpu_to_op("and", (pcp), val)
425#define this_cpu_and_4(pcp, val)	percpu_to_op("and", (pcp), val)
426#define this_cpu_or_1(pcp, val)		percpu_to_op("or", (pcp), val)
427#define this_cpu_or_2(pcp, val)		percpu_to_op("or", (pcp), val)
428#define this_cpu_or_4(pcp, val)		percpu_to_op("or", (pcp), val)
429#define this_cpu_xchg_1(pcp, nval)	percpu_xchg_op(pcp, nval)
430#define this_cpu_xchg_2(pcp, nval)	percpu_xchg_op(pcp, nval)
431#define this_cpu_xchg_4(pcp, nval)	percpu_xchg_op(pcp, nval)
432
433#define raw_cpu_add_return_1(pcp, val)		percpu_add_return_op(pcp, val)
434#define raw_cpu_add_return_2(pcp, val)		percpu_add_return_op(pcp, val)
435#define raw_cpu_add_return_4(pcp, val)		percpu_add_return_op(pcp, val)
436#define raw_cpu_cmpxchg_1(pcp, oval, nval)	percpu_cmpxchg_op(pcp, oval, nval)
437#define raw_cpu_cmpxchg_2(pcp, oval, nval)	percpu_cmpxchg_op(pcp, oval, nval)
438#define raw_cpu_cmpxchg_4(pcp, oval, nval)	percpu_cmpxchg_op(pcp, oval, nval)
439
440#define this_cpu_add_return_1(pcp, val)		percpu_add_return_op(pcp, val)
441#define this_cpu_add_return_2(pcp, val)		percpu_add_return_op(pcp, val)
442#define this_cpu_add_return_4(pcp, val)		percpu_add_return_op(pcp, val)
443#define this_cpu_cmpxchg_1(pcp, oval, nval)	percpu_cmpxchg_op(pcp, oval, nval)
444#define this_cpu_cmpxchg_2(pcp, oval, nval)	percpu_cmpxchg_op(pcp, oval, nval)
445#define this_cpu_cmpxchg_4(pcp, oval, nval)	percpu_cmpxchg_op(pcp, oval, nval)
446
447#ifdef CONFIG_X86_CMPXCHG64
448#define percpu_cmpxchg8b_double(pcp1, pcp2, o1, o2, n1, n2)		\
449({									\
450	bool __ret;							\
451	typeof(pcp1) __o1 = (o1), __n1 = (n1);				\
452	typeof(pcp2) __o2 = (o2), __n2 = (n2);				\
453	asm volatile("cmpxchg8b "__percpu_arg(1)"\n\tsetz %0\n\t"	\
454		    : "=a" (__ret), "+m" (pcp1), "+m" (pcp2), "+d" (__o2) \
455		    :  "b" (__n1), "c" (__n2), "a" (__o1));		\
 
456	__ret;								\
457})
458
459#define raw_cpu_cmpxchg_double_4	percpu_cmpxchg8b_double
460#define this_cpu_cmpxchg_double_4	percpu_cmpxchg8b_double
461#endif /* CONFIG_X86_CMPXCHG64 */
462
463/*
464 * Per cpu atomic 64 bit operations are only available under 64 bit.
465 * 32 bit must fall back to generic operations.
466 */
467#ifdef CONFIG_X86_64
468#define raw_cpu_read_8(pcp)			percpu_from_op("mov", pcp)
469#define raw_cpu_write_8(pcp, val)		percpu_to_op("mov", (pcp), val)
470#define raw_cpu_add_8(pcp, val)			percpu_add_op((pcp), val)
471#define raw_cpu_and_8(pcp, val)			percpu_to_op("and", (pcp), val)
472#define raw_cpu_or_8(pcp, val)			percpu_to_op("or", (pcp), val)
473#define raw_cpu_add_return_8(pcp, val)		percpu_add_return_op(pcp, val)
474#define raw_cpu_xchg_8(pcp, nval)		percpu_xchg_op(pcp, nval)
475#define raw_cpu_cmpxchg_8(pcp, oval, nval)	percpu_cmpxchg_op(pcp, oval, nval)
476
477#define this_cpu_read_8(pcp)			percpu_from_op("mov", pcp)
478#define this_cpu_write_8(pcp, val)		percpu_to_op("mov", (pcp), val)
479#define this_cpu_add_8(pcp, val)		percpu_add_op((pcp), val)
480#define this_cpu_and_8(pcp, val)		percpu_to_op("and", (pcp), val)
481#define this_cpu_or_8(pcp, val)			percpu_to_op("or", (pcp), val)
482#define this_cpu_add_return_8(pcp, val)		percpu_add_return_op(pcp, val)
483#define this_cpu_xchg_8(pcp, nval)		percpu_xchg_op(pcp, nval)
484#define this_cpu_cmpxchg_8(pcp, oval, nval)	percpu_cmpxchg_op(pcp, oval, nval)
485
486/*
487 * Pretty complex macro to generate cmpxchg16 instruction.  The instruction
488 * is not supported on early AMD64 processors so we must be able to emulate
489 * it in software.  The address used in the cmpxchg16 instruction must be
490 * aligned to a 16 byte boundary.
491 */
492#define percpu_cmpxchg16b_double(pcp1, pcp2, o1, o2, n1, n2)		\
493({									\
494	bool __ret;							\
495	typeof(pcp1) __o1 = (o1), __n1 = (n1);				\
496	typeof(pcp2) __o2 = (o2), __n2 = (n2);				\
497	alternative_io("leaq %P1,%%rsi\n\tcall this_cpu_cmpxchg16b_emu\n\t", \
498		       "cmpxchg16b " __percpu_arg(1) "\n\tsetz %0\n\t",	\
499		       X86_FEATURE_CX16,				\
500		       ASM_OUTPUT2("=a" (__ret), "+m" (pcp1),		\
501				   "+m" (pcp2), "+d" (__o2)),		\
502		       "b" (__n1), "c" (__n2), "a" (__o1) : "rsi");	\
503	__ret;								\
504})
505
506#define raw_cpu_cmpxchg_double_8	percpu_cmpxchg16b_double
507#define this_cpu_cmpxchg_double_8	percpu_cmpxchg16b_double
508
509#endif
510
511static __always_inline bool x86_this_cpu_constant_test_bit(unsigned int nr,
512                        const unsigned long __percpu *addr)
513{
514	unsigned long __percpu *a =
515		(unsigned long __percpu *)addr + nr / BITS_PER_LONG;
516
517#ifdef CONFIG_X86_64
518	return ((1UL << (nr % BITS_PER_LONG)) & raw_cpu_read_8(*a)) != 0;
519#else
520	return ((1UL << (nr % BITS_PER_LONG)) & raw_cpu_read_4(*a)) != 0;
521#endif
522}
523
524static inline bool x86_this_cpu_variable_test_bit(int nr,
525                        const unsigned long __percpu *addr)
526{
527	bool oldbit;
528
529	asm volatile("btl "__percpu_arg(2)",%1"
530			CC_SET(c)
531			: CC_OUT(c) (oldbit)
532			: "m" (*(unsigned long __percpu *)addr), "Ir" (nr));
533
534	return oldbit;
535}
536
537#define x86_this_cpu_test_bit(nr, addr)			\
538	(__builtin_constant_p((nr))			\
539	 ? x86_this_cpu_constant_test_bit((nr), (addr))	\
540	 : x86_this_cpu_variable_test_bit((nr), (addr)))
541
542
543#include <asm-generic/percpu.h>
544
545/* We can use this directly for local CPU (faster). */
546DECLARE_PER_CPU_READ_MOSTLY(unsigned long, this_cpu_off);
547
548#endif /* !__ASSEMBLY__ */
549
550#ifdef CONFIG_SMP
551
552/*
553 * Define the "EARLY_PER_CPU" macros.  These are used for some per_cpu
554 * variables that are initialized and accessed before there are per_cpu
555 * areas allocated.
556 */
557
558#define	DEFINE_EARLY_PER_CPU(_type, _name, _initvalue)			\
559	DEFINE_PER_CPU(_type, _name) = _initvalue;			\
560	__typeof__(_type) _name##_early_map[NR_CPUS] __initdata =	\
561				{ [0 ... NR_CPUS-1] = _initvalue };	\
562	__typeof__(_type) *_name##_early_ptr __refdata = _name##_early_map
563
564#define DEFINE_EARLY_PER_CPU_READ_MOSTLY(_type, _name, _initvalue)	\
565	DEFINE_PER_CPU_READ_MOSTLY(_type, _name) = _initvalue;		\
566	__typeof__(_type) _name##_early_map[NR_CPUS] __initdata =	\
567				{ [0 ... NR_CPUS-1] = _initvalue };	\
568	__typeof__(_type) *_name##_early_ptr __refdata = _name##_early_map
569
570#define EXPORT_EARLY_PER_CPU_SYMBOL(_name)			\
571	EXPORT_PER_CPU_SYMBOL(_name)
572
573#define DECLARE_EARLY_PER_CPU(_type, _name)			\
574	DECLARE_PER_CPU(_type, _name);				\
575	extern __typeof__(_type) *_name##_early_ptr;		\
576	extern __typeof__(_type)  _name##_early_map[]
577
578#define DECLARE_EARLY_PER_CPU_READ_MOSTLY(_type, _name)		\
579	DECLARE_PER_CPU_READ_MOSTLY(_type, _name);		\
580	extern __typeof__(_type) *_name##_early_ptr;		\
581	extern __typeof__(_type)  _name##_early_map[]
582
583#define	early_per_cpu_ptr(_name) (_name##_early_ptr)
584#define	early_per_cpu_map(_name, _idx) (_name##_early_map[_idx])
585#define	early_per_cpu(_name, _cpu) 				\
586	*(early_per_cpu_ptr(_name) ?				\
587		&early_per_cpu_ptr(_name)[_cpu] :		\
588		&per_cpu(_name, _cpu))
589
590#else	/* !CONFIG_SMP */
591#define	DEFINE_EARLY_PER_CPU(_type, _name, _initvalue)		\
592	DEFINE_PER_CPU(_type, _name) = _initvalue
593
594#define DEFINE_EARLY_PER_CPU_READ_MOSTLY(_type, _name, _initvalue)	\
595	DEFINE_PER_CPU_READ_MOSTLY(_type, _name) = _initvalue
596
597#define EXPORT_EARLY_PER_CPU_SYMBOL(_name)			\
598	EXPORT_PER_CPU_SYMBOL(_name)
599
600#define DECLARE_EARLY_PER_CPU(_type, _name)			\
601	DECLARE_PER_CPU(_type, _name)
602
603#define DECLARE_EARLY_PER_CPU_READ_MOSTLY(_type, _name)		\
604	DECLARE_PER_CPU_READ_MOSTLY(_type, _name)
605
606#define	early_per_cpu(_name, _cpu) per_cpu(_name, _cpu)
607#define	early_per_cpu_ptr(_name) NULL
608/* no early_per_cpu_map() */
609
610#endif	/* !CONFIG_SMP */
611
612#endif /* _ASM_X86_PERCPU_H */
v6.2
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef _ASM_X86_PERCPU_H
  3#define _ASM_X86_PERCPU_H
  4
  5#ifdef CONFIG_X86_64
  6#define __percpu_seg		gs
 
  7#else
  8#define __percpu_seg		fs
 
  9#endif
 10
 11#ifdef __ASSEMBLY__
 12
 
 
 
 
 
 
 
 
 
 
 
 
 13#ifdef CONFIG_SMP
 
 
 
 14#define PER_CPU_VAR(var)	%__percpu_seg:var
 15#else /* ! SMP */
 
 16#define PER_CPU_VAR(var)	var
 17#endif	/* SMP */
 18
 19#ifdef CONFIG_X86_64_SMP
 20#define INIT_PER_CPU_VAR(var)  init_per_cpu__##var
 21#else
 22#define INIT_PER_CPU_VAR(var)  var
 23#endif
 24
 25#else /* ...!ASSEMBLY */
 26
 27#include <linux/kernel.h>
 28#include <linux/stringify.h>
 29
 30#ifdef CONFIG_SMP
 31#define __percpu_prefix		"%%"__stringify(__percpu_seg)":"
 32#define __my_cpu_offset		this_cpu_read(this_cpu_off)
 33
 34/*
 35 * Compared to the generic __my_cpu_offset version, the following
 36 * saves one instruction and avoids clobbering a temp register.
 37 */
 38#define arch_raw_cpu_ptr(ptr)				\
 39({							\
 40	unsigned long tcp_ptr__;			\
 41	asm ("add " __percpu_arg(1) ", %0"		\
 42	     : "=r" (tcp_ptr__)				\
 43	     : "m" (this_cpu_off), "0" (ptr));		\
 44	(typeof(*(ptr)) __kernel __force *)tcp_ptr__;	\
 45})
 46#else
 47#define __percpu_prefix		""
 48#endif
 49
 50#define __percpu_arg(x)		__percpu_prefix "%" #x
 51
 52/*
 53 * Initialized pointers to per-cpu variables needed for the boot
 54 * processor need to use these macros to get the proper address
 55 * offset from __per_cpu_load on SMP.
 56 *
 57 * There also must be an entry in vmlinux_64.lds.S
 58 */
 59#define DECLARE_INIT_PER_CPU(var) \
 60       extern typeof(var) init_per_cpu_var(var)
 61
 62#ifdef CONFIG_X86_64_SMP
 63#define init_per_cpu_var(var)  init_per_cpu__##var
 64#else
 65#define init_per_cpu_var(var)  var
 66#endif
 67
 68/* For arch-specific code, we can use direct single-insn ops (they
 69 * don't give an lvalue though). */
 
 70
 71#define __pcpu_type_1 u8
 72#define __pcpu_type_2 u16
 73#define __pcpu_type_4 u32
 74#define __pcpu_type_8 u64
 75
 76#define __pcpu_cast_1(val) ((u8)(((unsigned long) val) & 0xff))
 77#define __pcpu_cast_2(val) ((u16)(((unsigned long) val) & 0xffff))
 78#define __pcpu_cast_4(val) ((u32)(((unsigned long) val) & 0xffffffff))
 79#define __pcpu_cast_8(val) ((u64)(val))
 80
 81#define __pcpu_op1_1(op, dst) op "b " dst
 82#define __pcpu_op1_2(op, dst) op "w " dst
 83#define __pcpu_op1_4(op, dst) op "l " dst
 84#define __pcpu_op1_8(op, dst) op "q " dst
 85
 86#define __pcpu_op2_1(op, src, dst) op "b " src ", " dst
 87#define __pcpu_op2_2(op, src, dst) op "w " src ", " dst
 88#define __pcpu_op2_4(op, src, dst) op "l " src ", " dst
 89#define __pcpu_op2_8(op, src, dst) op "q " src ", " dst
 90
 91#define __pcpu_reg_1(mod, x) mod "q" (x)
 92#define __pcpu_reg_2(mod, x) mod "r" (x)
 93#define __pcpu_reg_4(mod, x) mod "r" (x)
 94#define __pcpu_reg_8(mod, x) mod "r" (x)
 95
 96#define __pcpu_reg_imm_1(x) "qi" (x)
 97#define __pcpu_reg_imm_2(x) "ri" (x)
 98#define __pcpu_reg_imm_4(x) "ri" (x)
 99#define __pcpu_reg_imm_8(x) "re" (x)
100
101#define percpu_to_op(size, qual, op, _var, _val)			\
102do {									\
103	__pcpu_type_##size pto_val__ = __pcpu_cast_##size(_val);	\
104	if (0) {		                                        \
105		typeof(_var) pto_tmp__;					\
106		pto_tmp__ = (_val);					\
107		(void)pto_tmp__;					\
108	}								\
109	asm qual(__pcpu_op2_##size(op, "%[val]", __percpu_arg([var]))	\
110	    : [var] "+m" (_var)						\
111	    : [val] __pcpu_reg_imm_##size(pto_val__));			\
112} while (0)
113
114#define percpu_unary_op(size, qual, op, _var)				\
115({									\
116	asm qual (__pcpu_op1_##size(op, __percpu_arg([var]))		\
117	    : [var] "+m" (_var));					\
118})
119
120/*
121 * Generate a percpu add to memory instruction and optimize code
122 * if one is added or subtracted.
123 */
124#define percpu_add_op(size, qual, var, val)				\
125do {									\
 
126	const int pao_ID__ = (__builtin_constant_p(val) &&		\
127			      ((val) == 1 || (val) == -1)) ?		\
128				(int)(val) : 0;				\
129	if (0) {							\
130		typeof(var) pao_tmp__;					\
131		pao_tmp__ = (val);					\
132		(void)pao_tmp__;					\
133	}								\
134	if (pao_ID__ == 1)						\
135		percpu_unary_op(size, qual, "inc", var);		\
136	else if (pao_ID__ == -1)					\
137		percpu_unary_op(size, qual, "dec", var);		\
138	else								\
139		percpu_to_op(size, qual, "add", var, val);		\
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
140} while (0)
141
142#define percpu_from_op(size, qual, op, _var)				\
143({									\
144	__pcpu_type_##size pfo_val__;					\
145	asm qual (__pcpu_op2_##size(op, __percpu_arg([var]), "%[val]")	\
146	    : [val] __pcpu_reg_##size("=", pfo_val__)			\
147	    : [var] "m" (_var));					\
148	(typeof(_var))(unsigned long) pfo_val__;			\
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
149})
150
151#define percpu_stable_op(size, op, _var)				\
152({									\
153	__pcpu_type_##size pfo_val__;					\
154	asm(__pcpu_op2_##size(op, __percpu_arg(P[var]), "%[val]")	\
155	    : [val] __pcpu_reg_##size("=", pfo_val__)			\
156	    : [var] "p" (&(_var)));					\
157	(typeof(_var))(unsigned long) pfo_val__;			\
 
 
 
 
 
 
 
 
 
 
 
 
 
 
158})
159
160/*
161 * Add return operation
162 */
163#define percpu_add_return_op(size, qual, _var, _val)			\
164({									\
165	__pcpu_type_##size paro_tmp__ = __pcpu_cast_##size(_val);	\
166	asm qual (__pcpu_op2_##size("xadd", "%[tmp]",			\
167				     __percpu_arg([var]))		\
168		  : [tmp] __pcpu_reg_##size("+", paro_tmp__),		\
169		    [var] "+m" (_var)					\
170		  : : "memory");					\
171	(typeof(_var))(unsigned long) (paro_tmp__ + _val);		\
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
172})
173
174/*
175 * xchg is implemented using cmpxchg without a lock prefix. xchg is
176 * expensive due to the implied lock prefix.  The processor cannot prefetch
177 * cachelines if xchg is used.
178 */
179#define percpu_xchg_op(size, qual, _var, _nval)				\
180({									\
181	__pcpu_type_##size pxo_old__;					\
182	__pcpu_type_##size pxo_new__ = __pcpu_cast_##size(_nval);	\
183	asm qual (__pcpu_op2_##size("mov", __percpu_arg([var]),		\
184				    "%[oval]")				\
185		  "\n1:\t"						\
186		  __pcpu_op2_##size("cmpxchg", "%[nval]",		\
187				    __percpu_arg([var]))		\
188		  "\n\tjnz 1b"						\
189		  : [oval] "=&a" (pxo_old__),				\
190		    [var] "+m" (_var)					\
191		  : [nval] __pcpu_reg_##size(, pxo_new__)		\
192		  : "memory");						\
193	(typeof(_var))(unsigned long) pxo_old__;			\
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
194})
195
196/*
197 * cmpxchg has no such implied lock semantics as a result it is much
198 * more efficient for cpu local operations.
199 */
200#define percpu_cmpxchg_op(size, qual, _var, _oval, _nval)		\
201({									\
202	__pcpu_type_##size pco_old__ = __pcpu_cast_##size(_oval);	\
203	__pcpu_type_##size pco_new__ = __pcpu_cast_##size(_nval);	\
204	asm qual (__pcpu_op2_##size("cmpxchg", "%[nval]",		\
205				    __percpu_arg([var]))		\
206		  : [oval] "+a" (pco_old__),				\
207		    [var] "+m" (_var)					\
208		  : [nval] __pcpu_reg_##size(, pco_new__)		\
209		  : "memory");						\
210	(typeof(_var))(unsigned long) pco_old__;			\
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
211})
212
213/*
214 * this_cpu_read() makes gcc load the percpu variable every time it is
215 * accessed while this_cpu_read_stable() allows the value to be cached.
216 * this_cpu_read_stable() is more efficient and can be used if its value
217 * is guaranteed to be valid across cpus.  The current users include
218 * get_current() and get_thread_info() both of which are actually
219 * per-thread variables implemented as per-cpu variables and thus
220 * stable for the duration of the respective task.
221 */
222#define this_cpu_read_stable_1(pcp)	percpu_stable_op(1, "mov", pcp)
223#define this_cpu_read_stable_2(pcp)	percpu_stable_op(2, "mov", pcp)
224#define this_cpu_read_stable_4(pcp)	percpu_stable_op(4, "mov", pcp)
225#define this_cpu_read_stable_8(pcp)	percpu_stable_op(8, "mov", pcp)
226#define this_cpu_read_stable(pcp)	__pcpu_size_call_return(this_cpu_read_stable_, pcp)
227
228#define raw_cpu_read_1(pcp)		percpu_from_op(1, , "mov", pcp)
229#define raw_cpu_read_2(pcp)		percpu_from_op(2, , "mov", pcp)
230#define raw_cpu_read_4(pcp)		percpu_from_op(4, , "mov", pcp)
231
232#define raw_cpu_write_1(pcp, val)	percpu_to_op(1, , "mov", (pcp), val)
233#define raw_cpu_write_2(pcp, val)	percpu_to_op(2, , "mov", (pcp), val)
234#define raw_cpu_write_4(pcp, val)	percpu_to_op(4, , "mov", (pcp), val)
235#define raw_cpu_add_1(pcp, val)		percpu_add_op(1, , (pcp), val)
236#define raw_cpu_add_2(pcp, val)		percpu_add_op(2, , (pcp), val)
237#define raw_cpu_add_4(pcp, val)		percpu_add_op(4, , (pcp), val)
238#define raw_cpu_and_1(pcp, val)		percpu_to_op(1, , "and", (pcp), val)
239#define raw_cpu_and_2(pcp, val)		percpu_to_op(2, , "and", (pcp), val)
240#define raw_cpu_and_4(pcp, val)		percpu_to_op(4, , "and", (pcp), val)
241#define raw_cpu_or_1(pcp, val)		percpu_to_op(1, , "or", (pcp), val)
242#define raw_cpu_or_2(pcp, val)		percpu_to_op(2, , "or", (pcp), val)
243#define raw_cpu_or_4(pcp, val)		percpu_to_op(4, , "or", (pcp), val)
244
245/*
246 * raw_cpu_xchg() can use a load-store since it is not required to be
247 * IRQ-safe.
248 */
249#define raw_percpu_xchg_op(var, nval)					\
250({									\
251	typeof(var) pxo_ret__ = raw_cpu_read(var);			\
252	raw_cpu_write(var, (nval));					\
253	pxo_ret__;							\
254})
255
256#define raw_cpu_xchg_1(pcp, val)	raw_percpu_xchg_op(pcp, val)
257#define raw_cpu_xchg_2(pcp, val)	raw_percpu_xchg_op(pcp, val)
258#define raw_cpu_xchg_4(pcp, val)	raw_percpu_xchg_op(pcp, val)
259
260#define this_cpu_read_1(pcp)		percpu_from_op(1, volatile, "mov", pcp)
261#define this_cpu_read_2(pcp)		percpu_from_op(2, volatile, "mov", pcp)
262#define this_cpu_read_4(pcp)		percpu_from_op(4, volatile, "mov", pcp)
263#define this_cpu_write_1(pcp, val)	percpu_to_op(1, volatile, "mov", (pcp), val)
264#define this_cpu_write_2(pcp, val)	percpu_to_op(2, volatile, "mov", (pcp), val)
265#define this_cpu_write_4(pcp, val)	percpu_to_op(4, volatile, "mov", (pcp), val)
266#define this_cpu_add_1(pcp, val)	percpu_add_op(1, volatile, (pcp), val)
267#define this_cpu_add_2(pcp, val)	percpu_add_op(2, volatile, (pcp), val)
268#define this_cpu_add_4(pcp, val)	percpu_add_op(4, volatile, (pcp), val)
269#define this_cpu_and_1(pcp, val)	percpu_to_op(1, volatile, "and", (pcp), val)
270#define this_cpu_and_2(pcp, val)	percpu_to_op(2, volatile, "and", (pcp), val)
271#define this_cpu_and_4(pcp, val)	percpu_to_op(4, volatile, "and", (pcp), val)
272#define this_cpu_or_1(pcp, val)		percpu_to_op(1, volatile, "or", (pcp), val)
273#define this_cpu_or_2(pcp, val)		percpu_to_op(2, volatile, "or", (pcp), val)
274#define this_cpu_or_4(pcp, val)		percpu_to_op(4, volatile, "or", (pcp), val)
275#define this_cpu_xchg_1(pcp, nval)	percpu_xchg_op(1, volatile, pcp, nval)
276#define this_cpu_xchg_2(pcp, nval)	percpu_xchg_op(2, volatile, pcp, nval)
277#define this_cpu_xchg_4(pcp, nval)	percpu_xchg_op(4, volatile, pcp, nval)
278
279#define raw_cpu_add_return_1(pcp, val)		percpu_add_return_op(1, , pcp, val)
280#define raw_cpu_add_return_2(pcp, val)		percpu_add_return_op(2, , pcp, val)
281#define raw_cpu_add_return_4(pcp, val)		percpu_add_return_op(4, , pcp, val)
282#define raw_cpu_cmpxchg_1(pcp, oval, nval)	percpu_cmpxchg_op(1, , pcp, oval, nval)
283#define raw_cpu_cmpxchg_2(pcp, oval, nval)	percpu_cmpxchg_op(2, , pcp, oval, nval)
284#define raw_cpu_cmpxchg_4(pcp, oval, nval)	percpu_cmpxchg_op(4, , pcp, oval, nval)
285
286#define this_cpu_add_return_1(pcp, val)		percpu_add_return_op(1, volatile, pcp, val)
287#define this_cpu_add_return_2(pcp, val)		percpu_add_return_op(2, volatile, pcp, val)
288#define this_cpu_add_return_4(pcp, val)		percpu_add_return_op(4, volatile, pcp, val)
289#define this_cpu_cmpxchg_1(pcp, oval, nval)	percpu_cmpxchg_op(1, volatile, pcp, oval, nval)
290#define this_cpu_cmpxchg_2(pcp, oval, nval)	percpu_cmpxchg_op(2, volatile, pcp, oval, nval)
291#define this_cpu_cmpxchg_4(pcp, oval, nval)	percpu_cmpxchg_op(4, volatile, pcp, oval, nval)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
292
293#ifdef CONFIG_X86_CMPXCHG64
294#define percpu_cmpxchg8b_double(pcp1, pcp2, o1, o2, n1, n2)		\
295({									\
296	bool __ret;							\
297	typeof(pcp1) __o1 = (o1), __n1 = (n1);				\
298	typeof(pcp2) __o2 = (o2), __n2 = (n2);				\
299	asm volatile("cmpxchg8b "__percpu_arg(1)			\
300		     CC_SET(z)						\
301		     : CC_OUT(z) (__ret), "+m" (pcp1), "+m" (pcp2), "+a" (__o1), "+d" (__o2) \
302		     : "b" (__n1), "c" (__n2));				\
303	__ret;								\
304})
305
306#define raw_cpu_cmpxchg_double_4	percpu_cmpxchg8b_double
307#define this_cpu_cmpxchg_double_4	percpu_cmpxchg8b_double
308#endif /* CONFIG_X86_CMPXCHG64 */
309
310/*
311 * Per cpu atomic 64 bit operations are only available under 64 bit.
312 * 32 bit must fall back to generic operations.
313 */
314#ifdef CONFIG_X86_64
315#define raw_cpu_read_8(pcp)			percpu_from_op(8, , "mov", pcp)
316#define raw_cpu_write_8(pcp, val)		percpu_to_op(8, , "mov", (pcp), val)
317#define raw_cpu_add_8(pcp, val)			percpu_add_op(8, , (pcp), val)
318#define raw_cpu_and_8(pcp, val)			percpu_to_op(8, , "and", (pcp), val)
319#define raw_cpu_or_8(pcp, val)			percpu_to_op(8, , "or", (pcp), val)
320#define raw_cpu_add_return_8(pcp, val)		percpu_add_return_op(8, , pcp, val)
321#define raw_cpu_xchg_8(pcp, nval)		raw_percpu_xchg_op(pcp, nval)
322#define raw_cpu_cmpxchg_8(pcp, oval, nval)	percpu_cmpxchg_op(8, , pcp, oval, nval)
323
324#define this_cpu_read_8(pcp)			percpu_from_op(8, volatile, "mov", pcp)
325#define this_cpu_write_8(pcp, val)		percpu_to_op(8, volatile, "mov", (pcp), val)
326#define this_cpu_add_8(pcp, val)		percpu_add_op(8, volatile, (pcp), val)
327#define this_cpu_and_8(pcp, val)		percpu_to_op(8, volatile, "and", (pcp), val)
328#define this_cpu_or_8(pcp, val)			percpu_to_op(8, volatile, "or", (pcp), val)
329#define this_cpu_add_return_8(pcp, val)		percpu_add_return_op(8, volatile, pcp, val)
330#define this_cpu_xchg_8(pcp, nval)		percpu_xchg_op(8, volatile, pcp, nval)
331#define this_cpu_cmpxchg_8(pcp, oval, nval)	percpu_cmpxchg_op(8, volatile, pcp, oval, nval)
332
333/*
334 * Pretty complex macro to generate cmpxchg16 instruction.  The instruction
335 * is not supported on early AMD64 processors so we must be able to emulate
336 * it in software.  The address used in the cmpxchg16 instruction must be
337 * aligned to a 16 byte boundary.
338 */
339#define percpu_cmpxchg16b_double(pcp1, pcp2, o1, o2, n1, n2)		\
340({									\
341	bool __ret;							\
342	typeof(pcp1) __o1 = (o1), __n1 = (n1);				\
343	typeof(pcp2) __o2 = (o2), __n2 = (n2);				\
344	alternative_io("leaq %P1,%%rsi\n\tcall this_cpu_cmpxchg16b_emu\n\t", \
345		       "cmpxchg16b " __percpu_arg(1) "\n\tsetz %0\n\t",	\
346		       X86_FEATURE_CX16,				\
347		       ASM_OUTPUT2("=a" (__ret), "+m" (pcp1),		\
348				   "+m" (pcp2), "+d" (__o2)),		\
349		       "b" (__n1), "c" (__n2), "a" (__o1) : "rsi");	\
350	__ret;								\
351})
352
353#define raw_cpu_cmpxchg_double_8	percpu_cmpxchg16b_double
354#define this_cpu_cmpxchg_double_8	percpu_cmpxchg16b_double
355
356#endif
357
358static __always_inline bool x86_this_cpu_constant_test_bit(unsigned int nr,
359                        const unsigned long __percpu *addr)
360{
361	unsigned long __percpu *a =
362		(unsigned long __percpu *)addr + nr / BITS_PER_LONG;
363
364#ifdef CONFIG_X86_64
365	return ((1UL << (nr % BITS_PER_LONG)) & raw_cpu_read_8(*a)) != 0;
366#else
367	return ((1UL << (nr % BITS_PER_LONG)) & raw_cpu_read_4(*a)) != 0;
368#endif
369}
370
371static inline bool x86_this_cpu_variable_test_bit(int nr,
372                        const unsigned long __percpu *addr)
373{
374	bool oldbit;
375
376	asm volatile("btl "__percpu_arg(2)",%1"
377			CC_SET(c)
378			: CC_OUT(c) (oldbit)
379			: "m" (*(unsigned long __percpu *)addr), "Ir" (nr));
380
381	return oldbit;
382}
383
384#define x86_this_cpu_test_bit(nr, addr)			\
385	(__builtin_constant_p((nr))			\
386	 ? x86_this_cpu_constant_test_bit((nr), (addr))	\
387	 : x86_this_cpu_variable_test_bit((nr), (addr)))
388
389
390#include <asm-generic/percpu.h>
391
392/* We can use this directly for local CPU (faster). */
393DECLARE_PER_CPU_READ_MOSTLY(unsigned long, this_cpu_off);
394
395#endif /* !__ASSEMBLY__ */
396
397#ifdef CONFIG_SMP
398
399/*
400 * Define the "EARLY_PER_CPU" macros.  These are used for some per_cpu
401 * variables that are initialized and accessed before there are per_cpu
402 * areas allocated.
403 */
404
405#define	DEFINE_EARLY_PER_CPU(_type, _name, _initvalue)			\
406	DEFINE_PER_CPU(_type, _name) = _initvalue;			\
407	__typeof__(_type) _name##_early_map[NR_CPUS] __initdata =	\
408				{ [0 ... NR_CPUS-1] = _initvalue };	\
409	__typeof__(_type) *_name##_early_ptr __refdata = _name##_early_map
410
411#define DEFINE_EARLY_PER_CPU_READ_MOSTLY(_type, _name, _initvalue)	\
412	DEFINE_PER_CPU_READ_MOSTLY(_type, _name) = _initvalue;		\
413	__typeof__(_type) _name##_early_map[NR_CPUS] __initdata =	\
414				{ [0 ... NR_CPUS-1] = _initvalue };	\
415	__typeof__(_type) *_name##_early_ptr __refdata = _name##_early_map
416
417#define EXPORT_EARLY_PER_CPU_SYMBOL(_name)			\
418	EXPORT_PER_CPU_SYMBOL(_name)
419
420#define DECLARE_EARLY_PER_CPU(_type, _name)			\
421	DECLARE_PER_CPU(_type, _name);				\
422	extern __typeof__(_type) *_name##_early_ptr;		\
423	extern __typeof__(_type)  _name##_early_map[]
424
425#define DECLARE_EARLY_PER_CPU_READ_MOSTLY(_type, _name)		\
426	DECLARE_PER_CPU_READ_MOSTLY(_type, _name);		\
427	extern __typeof__(_type) *_name##_early_ptr;		\
428	extern __typeof__(_type)  _name##_early_map[]
429
430#define	early_per_cpu_ptr(_name) (_name##_early_ptr)
431#define	early_per_cpu_map(_name, _idx) (_name##_early_map[_idx])
432#define	early_per_cpu(_name, _cpu) 				\
433	*(early_per_cpu_ptr(_name) ?				\
434		&early_per_cpu_ptr(_name)[_cpu] :		\
435		&per_cpu(_name, _cpu))
436
437#else	/* !CONFIG_SMP */
438#define	DEFINE_EARLY_PER_CPU(_type, _name, _initvalue)		\
439	DEFINE_PER_CPU(_type, _name) = _initvalue
440
441#define DEFINE_EARLY_PER_CPU_READ_MOSTLY(_type, _name, _initvalue)	\
442	DEFINE_PER_CPU_READ_MOSTLY(_type, _name) = _initvalue
443
444#define EXPORT_EARLY_PER_CPU_SYMBOL(_name)			\
445	EXPORT_PER_CPU_SYMBOL(_name)
446
447#define DECLARE_EARLY_PER_CPU(_type, _name)			\
448	DECLARE_PER_CPU(_type, _name)
449
450#define DECLARE_EARLY_PER_CPU_READ_MOSTLY(_type, _name)		\
451	DECLARE_PER_CPU_READ_MOSTLY(_type, _name)
452
453#define	early_per_cpu(_name, _cpu) per_cpu(_name, _cpu)
454#define	early_per_cpu_ptr(_name) NULL
455/* no early_per_cpu_map() */
456
457#endif	/* !CONFIG_SMP */
458
459#endif /* _ASM_X86_PERCPU_H */