Linux Audio

Check our new training course

Loading...
v5.9
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef __LINUX_PERCPU_H
  3#define __LINUX_PERCPU_H
  4
  5#include <linux/mmdebug.h>
  6#include <linux/preempt.h>
  7#include <linux/smp.h>
  8#include <linux/cpumask.h>
  9#include <linux/printk.h>
 10#include <linux/pfn.h>
 11#include <linux/init.h>
 12
 13#include <asm/percpu.h>
 14
 15/* enough to cover all DEFINE_PER_CPUs in modules */
 16#ifdef CONFIG_MODULES
 17#define PERCPU_MODULE_RESERVE		(8 << 10)
 18#else
 19#define PERCPU_MODULE_RESERVE		0
 20#endif
 21
 22/* minimum unit size, also is the maximum supported allocation size */
 23#define PCPU_MIN_UNIT_SIZE		PFN_ALIGN(32 << 10)
 
 
 
 24
 25/* minimum allocation size and shift in bytes */
 26#define PCPU_MIN_ALLOC_SHIFT		2
 27#define PCPU_MIN_ALLOC_SIZE		(1 << PCPU_MIN_ALLOC_SHIFT)
 
 
 
 
 28
 29/*
 30 * The PCPU_BITMAP_BLOCK_SIZE must be the same size as PAGE_SIZE as the
 31 * updating of hints is used to manage the nr_empty_pop_pages in both
 32 * the chunk and globally.
 33 */
 34#define PCPU_BITMAP_BLOCK_SIZE		PAGE_SIZE
 35#define PCPU_BITMAP_BLOCK_BITS		(PCPU_BITMAP_BLOCK_SIZE >>	\
 36					 PCPU_MIN_ALLOC_SHIFT)
 
 
 
 
 
 
 
 
 
 
 
 
 37
 38/*
 39 * Percpu allocator can serve percpu allocations before slab is
 40 * initialized which allows slab to depend on the percpu allocator.
 41 * The following two parameters decide how much resource to
 42 * preallocate for this.  Keep PERCPU_DYNAMIC_RESERVE equal to or
 43 * larger than PERCPU_DYNAMIC_EARLY_SIZE.
 44 */
 45#define PERCPU_DYNAMIC_EARLY_SLOTS	128
 46#define PERCPU_DYNAMIC_EARLY_SIZE	(12 << 10)
 47
 48/*
 49 * PERCPU_DYNAMIC_RESERVE indicates the amount of free area to piggy
 50 * back on the first chunk for dynamic percpu allocation if arch is
 51 * manually allocating and mapping it for faster access (as a part of
 52 * large page mapping for example).
 53 *
 54 * The following values give between one and two pages of free space
 55 * after typical minimal boot (2-way SMP, single disk and NIC) with
 56 * both defconfig and a distro config on x86_64 and 32.  More
 57 * intelligent way to determine this would be nice.
 58 */
 59#if BITS_PER_LONG > 32
 60#define PERCPU_DYNAMIC_RESERVE		(28 << 10)
 61#else
 62#define PERCPU_DYNAMIC_RESERVE		(20 << 10)
 
 
 63#endif
 64
 65extern void *pcpu_base_addr;
 66extern const unsigned long *pcpu_unit_offsets;
 67
 68struct pcpu_group_info {
 69	int			nr_units;	/* aligned # of units */
 70	unsigned long		base_offset;	/* base address offset */
 71	unsigned int		*cpu_map;	/* unit->cpu map, empty
 72						 * entries contain NR_CPUS */
 73};
 74
 75struct pcpu_alloc_info {
 76	size_t			static_size;
 77	size_t			reserved_size;
 78	size_t			dyn_size;
 79	size_t			unit_size;
 80	size_t			atom_size;
 81	size_t			alloc_size;
 82	size_t			__ai_size;	/* internal, don't use */
 83	int			nr_groups;	/* 0 if grouping unnecessary */
 84	struct pcpu_group_info	groups[];
 85};
 86
 87enum pcpu_fc {
 88	PCPU_FC_AUTO,
 89	PCPU_FC_EMBED,
 90	PCPU_FC_PAGE,
 91
 92	PCPU_FC_NR,
 93};
 94extern const char * const pcpu_fc_names[PCPU_FC_NR];
 95
 96extern enum pcpu_fc pcpu_chosen_fc;
 97
 98typedef void * (*pcpu_fc_alloc_fn_t)(unsigned int cpu, size_t size,
 99				     size_t align);
100typedef void (*pcpu_fc_free_fn_t)(void *ptr, size_t size);
101typedef void (*pcpu_fc_populate_pte_fn_t)(unsigned long addr);
102typedef int (pcpu_fc_cpu_distance_fn_t)(unsigned int from, unsigned int to);
103
104extern struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
105							     int nr_units);
106extern void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai);
107
108extern void __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
109					 void *base_addr);
110
111#ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
112extern int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
113				size_t atom_size,
114				pcpu_fc_cpu_distance_fn_t cpu_distance_fn,
115				pcpu_fc_alloc_fn_t alloc_fn,
116				pcpu_fc_free_fn_t free_fn);
117#endif
118
119#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
120extern int __init pcpu_page_first_chunk(size_t reserved_size,
121				pcpu_fc_alloc_fn_t alloc_fn,
122				pcpu_fc_free_fn_t free_fn,
123				pcpu_fc_populate_pte_fn_t populate_pte_fn);
124#endif
125
 
 
 
 
 
 
 
 
 
 
 
126extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align);
127extern bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr);
128extern bool is_kernel_percpu_address(unsigned long addr);
129
130#if !defined(CONFIG_SMP) || !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
131extern void __init setup_per_cpu_areas(void);
132#endif
 
133
134extern void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp);
135extern void __percpu *__alloc_percpu(size_t size, size_t align);
136extern void free_percpu(void __percpu *__pdata);
137extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
138
139#define alloc_percpu_gfp(type, gfp)					\
140	(typeof(type) __percpu *)__alloc_percpu_gfp(sizeof(type),	\
141						__alignof__(type), gfp)
142#define alloc_percpu(type)						\
143	(typeof(type) __percpu *)__alloc_percpu(sizeof(type),		\
144						__alignof__(type))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
145
146extern unsigned long pcpu_nr_pages(void);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
147
148#endif /* __LINUX_PERCPU_H */
v3.1
 
  1#ifndef __LINUX_PERCPU_H
  2#define __LINUX_PERCPU_H
  3
 
  4#include <linux/preempt.h>
  5#include <linux/smp.h>
  6#include <linux/cpumask.h>
 
  7#include <linux/pfn.h>
  8#include <linux/init.h>
  9
 10#include <asm/percpu.h>
 11
 12/* enough to cover all DEFINE_PER_CPUs in modules */
 13#ifdef CONFIG_MODULES
 14#define PERCPU_MODULE_RESERVE		(8 << 10)
 15#else
 16#define PERCPU_MODULE_RESERVE		0
 17#endif
 18
 19#ifndef PERCPU_ENOUGH_ROOM
 20#define PERCPU_ENOUGH_ROOM						\
 21	(ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES) +	\
 22	 PERCPU_MODULE_RESERVE)
 23#endif
 24
 25/*
 26 * Must be an lvalue. Since @var must be a simple identifier,
 27 * we force a syntax error here if it isn't.
 28 */
 29#define get_cpu_var(var) (*({				\
 30	preempt_disable();				\
 31	&__get_cpu_var(var); }))
 32
 33/*
 34 * The weird & is necessary because sparse considers (void)(var) to be
 35 * a direct dereference of percpu variable (var).
 36 */
 37#define put_cpu_var(var) do {				\
 38	(void)&(var);					\
 39	preempt_enable();				\
 40} while (0)
 41
 42#define get_cpu_ptr(var) ({				\
 43	preempt_disable();				\
 44	this_cpu_ptr(var); })
 45
 46#define put_cpu_ptr(var) do {				\
 47	(void)(var);					\
 48	preempt_enable();				\
 49} while (0)
 50
 51/* minimum unit size, also is the maximum supported allocation size */
 52#define PCPU_MIN_UNIT_SIZE		PFN_ALIGN(32 << 10)
 53
 54/*
 55 * Percpu allocator can serve percpu allocations before slab is
 56 * initialized which allows slab to depend on the percpu allocator.
 57 * The following two parameters decide how much resource to
 58 * preallocate for this.  Keep PERCPU_DYNAMIC_RESERVE equal to or
 59 * larger than PERCPU_DYNAMIC_EARLY_SIZE.
 60 */
 61#define PERCPU_DYNAMIC_EARLY_SLOTS	128
 62#define PERCPU_DYNAMIC_EARLY_SIZE	(12 << 10)
 63
 64/*
 65 * PERCPU_DYNAMIC_RESERVE indicates the amount of free area to piggy
 66 * back on the first chunk for dynamic percpu allocation if arch is
 67 * manually allocating and mapping it for faster access (as a part of
 68 * large page mapping for example).
 69 *
 70 * The following values give between one and two pages of free space
 71 * after typical minimal boot (2-way SMP, single disk and NIC) with
 72 * both defconfig and a distro config on x86_64 and 32.  More
 73 * intelligent way to determine this would be nice.
 74 */
 75#if BITS_PER_LONG > 32
 
 
 76#define PERCPU_DYNAMIC_RESERVE		(20 << 10)
 77#else
 78#define PERCPU_DYNAMIC_RESERVE		(12 << 10)
 79#endif
 80
 81extern void *pcpu_base_addr;
 82extern const unsigned long *pcpu_unit_offsets;
 83
 84struct pcpu_group_info {
 85	int			nr_units;	/* aligned # of units */
 86	unsigned long		base_offset;	/* base address offset */
 87	unsigned int		*cpu_map;	/* unit->cpu map, empty
 88						 * entries contain NR_CPUS */
 89};
 90
 91struct pcpu_alloc_info {
 92	size_t			static_size;
 93	size_t			reserved_size;
 94	size_t			dyn_size;
 95	size_t			unit_size;
 96	size_t			atom_size;
 97	size_t			alloc_size;
 98	size_t			__ai_size;	/* internal, don't use */
 99	int			nr_groups;	/* 0 if grouping unnecessary */
100	struct pcpu_group_info	groups[];
101};
102
103enum pcpu_fc {
104	PCPU_FC_AUTO,
105	PCPU_FC_EMBED,
106	PCPU_FC_PAGE,
107
108	PCPU_FC_NR,
109};
110extern const char *pcpu_fc_names[PCPU_FC_NR];
111
112extern enum pcpu_fc pcpu_chosen_fc;
113
114typedef void * (*pcpu_fc_alloc_fn_t)(unsigned int cpu, size_t size,
115				     size_t align);
116typedef void (*pcpu_fc_free_fn_t)(void *ptr, size_t size);
117typedef void (*pcpu_fc_populate_pte_fn_t)(unsigned long addr);
118typedef int (pcpu_fc_cpu_distance_fn_t)(unsigned int from, unsigned int to);
119
120extern struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
121							     int nr_units);
122extern void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai);
123
124extern int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
125					 void *base_addr);
126
127#ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
128extern int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
129				size_t atom_size,
130				pcpu_fc_cpu_distance_fn_t cpu_distance_fn,
131				pcpu_fc_alloc_fn_t alloc_fn,
132				pcpu_fc_free_fn_t free_fn);
133#endif
134
135#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
136extern int __init pcpu_page_first_chunk(size_t reserved_size,
137				pcpu_fc_alloc_fn_t alloc_fn,
138				pcpu_fc_free_fn_t free_fn,
139				pcpu_fc_populate_pte_fn_t populate_pte_fn);
140#endif
141
142/*
143 * Use this to get to a cpu's version of the per-cpu object
144 * dynamically allocated. Non-atomic access to the current CPU's
145 * version should probably be combined with get_cpu()/put_cpu().
146 */
147#ifdef CONFIG_SMP
148#define per_cpu_ptr(ptr, cpu)	SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu)))
149#else
150#define per_cpu_ptr(ptr, cpu)	({ (void)(cpu); VERIFY_PERCPU_PTR((ptr)); })
151#endif
152
153extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align);
 
154extern bool is_kernel_percpu_address(unsigned long addr);
155
156#if !defined(CONFIG_SMP) || !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
157extern void __init setup_per_cpu_areas(void);
158#endif
159extern void __init percpu_init_late(void);
160
 
161extern void __percpu *__alloc_percpu(size_t size, size_t align);
162extern void free_percpu(void __percpu *__pdata);
163extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
164
165#define alloc_percpu(type)	\
166	(typeof(type) __percpu *)__alloc_percpu(sizeof(type), __alignof__(type))
167
168/*
169 * Optional methods for optimized non-lvalue per-cpu variable access.
170 *
171 * @var can be a percpu variable or a field of it and its size should
172 * equal char, int or long.  percpu_read() evaluates to a lvalue and
173 * all others to void.
174 *
175 * These operations are guaranteed to be atomic w.r.t. preemption.
176 * The generic versions use plain get/put_cpu_var().  Archs are
177 * encouraged to implement single-instruction alternatives which don't
178 * require preemption protection.
179 */
180#ifndef percpu_read
181# define percpu_read(var)						\
182  ({									\
183	typeof(var) *pr_ptr__ = &(var);					\
184	typeof(var) pr_ret__;						\
185	pr_ret__ = get_cpu_var(*pr_ptr__);				\
186	put_cpu_var(*pr_ptr__);						\
187	pr_ret__;							\
188  })
189#endif
190
191#define __percpu_generic_to_op(var, val, op)				\
192do {									\
193	typeof(var) *pgto_ptr__ = &(var);				\
194	get_cpu_var(*pgto_ptr__) op val;				\
195	put_cpu_var(*pgto_ptr__);					\
196} while (0)
197
198#ifndef percpu_write
199# define percpu_write(var, val)		__percpu_generic_to_op(var, (val), =)
200#endif
201
202#ifndef percpu_add
203# define percpu_add(var, val)		__percpu_generic_to_op(var, (val), +=)
204#endif
205
206#ifndef percpu_sub
207# define percpu_sub(var, val)		__percpu_generic_to_op(var, (val), -=)
208#endif
209
210#ifndef percpu_and
211# define percpu_and(var, val)		__percpu_generic_to_op(var, (val), &=)
212#endif
213
214#ifndef percpu_or
215# define percpu_or(var, val)		__percpu_generic_to_op(var, (val), |=)
216#endif
217
218#ifndef percpu_xor
219# define percpu_xor(var, val)		__percpu_generic_to_op(var, (val), ^=)
220#endif
221
222/*
223 * Branching function to split up a function into a set of functions that
224 * are called for different scalar sizes of the objects handled.
225 */
226
227extern void __bad_size_call_parameter(void);
228
229#define __pcpu_size_call_return(stem, variable)				\
230({	typeof(variable) pscr_ret__;					\
231	__verify_pcpu_ptr(&(variable));					\
232	switch(sizeof(variable)) {					\
233	case 1: pscr_ret__ = stem##1(variable);break;			\
234	case 2: pscr_ret__ = stem##2(variable);break;			\
235	case 4: pscr_ret__ = stem##4(variable);break;			\
236	case 8: pscr_ret__ = stem##8(variable);break;			\
237	default:							\
238		__bad_size_call_parameter();break;			\
239	}								\
240	pscr_ret__;							\
241})
242
243#define __pcpu_size_call_return2(stem, variable, ...)			\
244({									\
245	typeof(variable) pscr2_ret__;					\
246	__verify_pcpu_ptr(&(variable));					\
247	switch(sizeof(variable)) {					\
248	case 1: pscr2_ret__ = stem##1(variable, __VA_ARGS__); break;	\
249	case 2: pscr2_ret__ = stem##2(variable, __VA_ARGS__); break;	\
250	case 4: pscr2_ret__ = stem##4(variable, __VA_ARGS__); break;	\
251	case 8: pscr2_ret__ = stem##8(variable, __VA_ARGS__); break;	\
252	default:							\
253		__bad_size_call_parameter(); break;			\
254	}								\
255	pscr2_ret__;							\
256})
257
258/*
259 * Special handling for cmpxchg_double.  cmpxchg_double is passed two
260 * percpu variables.  The first has to be aligned to a double word
261 * boundary and the second has to follow directly thereafter.
262 * We enforce this on all architectures even if they don't support
263 * a double cmpxchg instruction, since it's a cheap requirement, and it
264 * avoids breaking the requirement for architectures with the instruction.
265 */
266#define __pcpu_double_call_return_bool(stem, pcp1, pcp2, ...)		\
267({									\
268	bool pdcrb_ret__;						\
269	__verify_pcpu_ptr(&pcp1);					\
270	BUILD_BUG_ON(sizeof(pcp1) != sizeof(pcp2));			\
271	VM_BUG_ON((unsigned long)(&pcp1) % (2 * sizeof(pcp1)));		\
272	VM_BUG_ON((unsigned long)(&pcp2) !=				\
273		  (unsigned long)(&pcp1) + sizeof(pcp1));		\
274	switch(sizeof(pcp1)) {						\
275	case 1: pdcrb_ret__ = stem##1(pcp1, pcp2, __VA_ARGS__); break;	\
276	case 2: pdcrb_ret__ = stem##2(pcp1, pcp2, __VA_ARGS__); break;	\
277	case 4: pdcrb_ret__ = stem##4(pcp1, pcp2, __VA_ARGS__); break;	\
278	case 8: pdcrb_ret__ = stem##8(pcp1, pcp2, __VA_ARGS__); break;	\
279	default:							\
280		__bad_size_call_parameter(); break;			\
281	}								\
282	pdcrb_ret__;							\
283})
284
285#define __pcpu_size_call(stem, variable, ...)				\
286do {									\
287	__verify_pcpu_ptr(&(variable));					\
288	switch(sizeof(variable)) {					\
289		case 1: stem##1(variable, __VA_ARGS__);break;		\
290		case 2: stem##2(variable, __VA_ARGS__);break;		\
291		case 4: stem##4(variable, __VA_ARGS__);break;		\
292		case 8: stem##8(variable, __VA_ARGS__);break;		\
293		default: 						\
294			__bad_size_call_parameter();break;		\
295	}								\
296} while (0)
297
298/*
299 * Optimized manipulation for memory allocated through the per cpu
300 * allocator or for addresses of per cpu variables.
301 *
302 * These operation guarantee exclusivity of access for other operations
303 * on the *same* processor. The assumption is that per cpu data is only
304 * accessed by a single processor instance (the current one).
305 *
306 * The first group is used for accesses that must be done in a
307 * preemption safe way since we know that the context is not preempt
308 * safe. Interrupts may occur. If the interrupt modifies the variable
309 * too then RMW actions will not be reliable.
310 *
311 * The arch code can provide optimized functions in two ways:
312 *
313 * 1. Override the function completely. F.e. define this_cpu_add().
314 *    The arch must then ensure that the various scalar format passed
315 *    are handled correctly.
316 *
317 * 2. Provide functions for certain scalar sizes. F.e. provide
318 *    this_cpu_add_2() to provide per cpu atomic operations for 2 byte
319 *    sized RMW actions. If arch code does not provide operations for
320 *    a scalar size then the fallback in the generic code will be
321 *    used.
322 */
323
324#define _this_cpu_generic_read(pcp)					\
325({	typeof(pcp) ret__;						\
326	preempt_disable();						\
327	ret__ = *this_cpu_ptr(&(pcp));					\
328	preempt_enable();						\
329	ret__;								\
330})
331
332#ifndef this_cpu_read
333# ifndef this_cpu_read_1
334#  define this_cpu_read_1(pcp)	_this_cpu_generic_read(pcp)
335# endif
336# ifndef this_cpu_read_2
337#  define this_cpu_read_2(pcp)	_this_cpu_generic_read(pcp)
338# endif
339# ifndef this_cpu_read_4
340#  define this_cpu_read_4(pcp)	_this_cpu_generic_read(pcp)
341# endif
342# ifndef this_cpu_read_8
343#  define this_cpu_read_8(pcp)	_this_cpu_generic_read(pcp)
344# endif
345# define this_cpu_read(pcp)	__pcpu_size_call_return(this_cpu_read_, (pcp))
346#endif
347
348#define _this_cpu_generic_to_op(pcp, val, op)				\
349do {									\
350	preempt_disable();						\
351	*__this_cpu_ptr(&(pcp)) op val;					\
352	preempt_enable();						\
353} while (0)
354
355#ifndef this_cpu_write
356# ifndef this_cpu_write_1
357#  define this_cpu_write_1(pcp, val)	_this_cpu_generic_to_op((pcp), (val), =)
358# endif
359# ifndef this_cpu_write_2
360#  define this_cpu_write_2(pcp, val)	_this_cpu_generic_to_op((pcp), (val), =)
361# endif
362# ifndef this_cpu_write_4
363#  define this_cpu_write_4(pcp, val)	_this_cpu_generic_to_op((pcp), (val), =)
364# endif
365# ifndef this_cpu_write_8
366#  define this_cpu_write_8(pcp, val)	_this_cpu_generic_to_op((pcp), (val), =)
367# endif
368# define this_cpu_write(pcp, val)	__pcpu_size_call(this_cpu_write_, (pcp), (val))
369#endif
370
371#ifndef this_cpu_add
372# ifndef this_cpu_add_1
373#  define this_cpu_add_1(pcp, val)	_this_cpu_generic_to_op((pcp), (val), +=)
374# endif
375# ifndef this_cpu_add_2
376#  define this_cpu_add_2(pcp, val)	_this_cpu_generic_to_op((pcp), (val), +=)
377# endif
378# ifndef this_cpu_add_4
379#  define this_cpu_add_4(pcp, val)	_this_cpu_generic_to_op((pcp), (val), +=)
380# endif
381# ifndef this_cpu_add_8
382#  define this_cpu_add_8(pcp, val)	_this_cpu_generic_to_op((pcp), (val), +=)
383# endif
384# define this_cpu_add(pcp, val)		__pcpu_size_call(this_cpu_add_, (pcp), (val))
385#endif
386
387#ifndef this_cpu_sub
388# define this_cpu_sub(pcp, val)		this_cpu_add((pcp), -(val))
389#endif
390
391#ifndef this_cpu_inc
392# define this_cpu_inc(pcp)		this_cpu_add((pcp), 1)
393#endif
394
395#ifndef this_cpu_dec
396# define this_cpu_dec(pcp)		this_cpu_sub((pcp), 1)
397#endif
398
399#ifndef this_cpu_and
400# ifndef this_cpu_and_1
401#  define this_cpu_and_1(pcp, val)	_this_cpu_generic_to_op((pcp), (val), &=)
402# endif
403# ifndef this_cpu_and_2
404#  define this_cpu_and_2(pcp, val)	_this_cpu_generic_to_op((pcp), (val), &=)
405# endif
406# ifndef this_cpu_and_4
407#  define this_cpu_and_4(pcp, val)	_this_cpu_generic_to_op((pcp), (val), &=)
408# endif
409# ifndef this_cpu_and_8
410#  define this_cpu_and_8(pcp, val)	_this_cpu_generic_to_op((pcp), (val), &=)
411# endif
412# define this_cpu_and(pcp, val)		__pcpu_size_call(this_cpu_and_, (pcp), (val))
413#endif
414
415#ifndef this_cpu_or
416# ifndef this_cpu_or_1
417#  define this_cpu_or_1(pcp, val)	_this_cpu_generic_to_op((pcp), (val), |=)
418# endif
419# ifndef this_cpu_or_2
420#  define this_cpu_or_2(pcp, val)	_this_cpu_generic_to_op((pcp), (val), |=)
421# endif
422# ifndef this_cpu_or_4
423#  define this_cpu_or_4(pcp, val)	_this_cpu_generic_to_op((pcp), (val), |=)
424# endif
425# ifndef this_cpu_or_8
426#  define this_cpu_or_8(pcp, val)	_this_cpu_generic_to_op((pcp), (val), |=)
427# endif
428# define this_cpu_or(pcp, val)		__pcpu_size_call(this_cpu_or_, (pcp), (val))
429#endif
430
431#ifndef this_cpu_xor
432# ifndef this_cpu_xor_1
433#  define this_cpu_xor_1(pcp, val)	_this_cpu_generic_to_op((pcp), (val), ^=)
434# endif
435# ifndef this_cpu_xor_2
436#  define this_cpu_xor_2(pcp, val)	_this_cpu_generic_to_op((pcp), (val), ^=)
437# endif
438# ifndef this_cpu_xor_4
439#  define this_cpu_xor_4(pcp, val)	_this_cpu_generic_to_op((pcp), (val), ^=)
440# endif
441# ifndef this_cpu_xor_8
442#  define this_cpu_xor_8(pcp, val)	_this_cpu_generic_to_op((pcp), (val), ^=)
443# endif
444# define this_cpu_xor(pcp, val)		__pcpu_size_call(this_cpu_or_, (pcp), (val))
445#endif
446
447#define _this_cpu_generic_add_return(pcp, val)				\
448({									\
449	typeof(pcp) ret__;						\
450	preempt_disable();						\
451	__this_cpu_add(pcp, val);					\
452	ret__ = __this_cpu_read(pcp);					\
453	preempt_enable();						\
454	ret__;								\
455})
456
457#ifndef this_cpu_add_return
458# ifndef this_cpu_add_return_1
459#  define this_cpu_add_return_1(pcp, val)	_this_cpu_generic_add_return(pcp, val)
460# endif
461# ifndef this_cpu_add_return_2
462#  define this_cpu_add_return_2(pcp, val)	_this_cpu_generic_add_return(pcp, val)
463# endif
464# ifndef this_cpu_add_return_4
465#  define this_cpu_add_return_4(pcp, val)	_this_cpu_generic_add_return(pcp, val)
466# endif
467# ifndef this_cpu_add_return_8
468#  define this_cpu_add_return_8(pcp, val)	_this_cpu_generic_add_return(pcp, val)
469# endif
470# define this_cpu_add_return(pcp, val)	__pcpu_size_call_return2(this_cpu_add_return_, pcp, val)
471#endif
472
473#define this_cpu_sub_return(pcp, val)	this_cpu_add_return(pcp, -(val))
474#define this_cpu_inc_return(pcp)	this_cpu_add_return(pcp, 1)
475#define this_cpu_dec_return(pcp)	this_cpu_add_return(pcp, -1)
476
477#define _this_cpu_generic_xchg(pcp, nval)				\
478({	typeof(pcp) ret__;						\
479	preempt_disable();						\
480	ret__ = __this_cpu_read(pcp);					\
481	__this_cpu_write(pcp, nval);					\
482	preempt_enable();						\
483	ret__;								\
484})
485
486#ifndef this_cpu_xchg
487# ifndef this_cpu_xchg_1
488#  define this_cpu_xchg_1(pcp, nval)	_this_cpu_generic_xchg(pcp, nval)
489# endif
490# ifndef this_cpu_xchg_2
491#  define this_cpu_xchg_2(pcp, nval)	_this_cpu_generic_xchg(pcp, nval)
492# endif
493# ifndef this_cpu_xchg_4
494#  define this_cpu_xchg_4(pcp, nval)	_this_cpu_generic_xchg(pcp, nval)
495# endif
496# ifndef this_cpu_xchg_8
497#  define this_cpu_xchg_8(pcp, nval)	_this_cpu_generic_xchg(pcp, nval)
498# endif
499# define this_cpu_xchg(pcp, nval)	\
500	__pcpu_size_call_return2(this_cpu_xchg_, (pcp), nval)
501#endif
502
503#define _this_cpu_generic_cmpxchg(pcp, oval, nval)			\
504({	typeof(pcp) ret__;						\
505	preempt_disable();						\
506	ret__ = __this_cpu_read(pcp);					\
507	if (ret__ == (oval))						\
508		__this_cpu_write(pcp, nval);				\
509	preempt_enable();						\
510	ret__;								\
511})
512
513#ifndef this_cpu_cmpxchg
514# ifndef this_cpu_cmpxchg_1
515#  define this_cpu_cmpxchg_1(pcp, oval, nval)	_this_cpu_generic_cmpxchg(pcp, oval, nval)
516# endif
517# ifndef this_cpu_cmpxchg_2
518#  define this_cpu_cmpxchg_2(pcp, oval, nval)	_this_cpu_generic_cmpxchg(pcp, oval, nval)
519# endif
520# ifndef this_cpu_cmpxchg_4
521#  define this_cpu_cmpxchg_4(pcp, oval, nval)	_this_cpu_generic_cmpxchg(pcp, oval, nval)
522# endif
523# ifndef this_cpu_cmpxchg_8
524#  define this_cpu_cmpxchg_8(pcp, oval, nval)	_this_cpu_generic_cmpxchg(pcp, oval, nval)
525# endif
526# define this_cpu_cmpxchg(pcp, oval, nval)	\
527	__pcpu_size_call_return2(this_cpu_cmpxchg_, pcp, oval, nval)
528#endif
529
530/*
531 * cmpxchg_double replaces two adjacent scalars at once.  The first
532 * two parameters are per cpu variables which have to be of the same
533 * size.  A truth value is returned to indicate success or failure
534 * (since a double register result is difficult to handle).  There is
535 * very limited hardware support for these operations, so only certain
536 * sizes may work.
537 */
538#define _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)	\
539({									\
540	int ret__;							\
541	preempt_disable();						\
542	ret__ = __this_cpu_generic_cmpxchg_double(pcp1, pcp2,		\
543			oval1, oval2, nval1, nval2);			\
544	preempt_enable();						\
545	ret__;								\
546})
547
548#ifndef this_cpu_cmpxchg_double
549# ifndef this_cpu_cmpxchg_double_1
550#  define this_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2)	\
551	_this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
552# endif
553# ifndef this_cpu_cmpxchg_double_2
554#  define this_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2)	\
555	_this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
556# endif
557# ifndef this_cpu_cmpxchg_double_4
558#  define this_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2)	\
559	_this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
560# endif
561# ifndef this_cpu_cmpxchg_double_8
562#  define this_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2)	\
563	_this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
564# endif
565# define this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)	\
566	__pcpu_double_call_return_bool(this_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2))
567#endif
568
569/*
570 * Generic percpu operations that do not require preemption handling.
571 * Either we do not care about races or the caller has the
572 * responsibility of handling preemptions issues. Arch code can still
573 * override these instructions since the arch per cpu code may be more
574 * efficient and may actually get race freeness for free (that is the
575 * case for x86 for example).
576 *
577 * If there is no other protection through preempt disable and/or
578 * disabling interupts then one of these RMW operations can show unexpected
579 * behavior because the execution thread was rescheduled on another processor
580 * or an interrupt occurred and the same percpu variable was modified from
581 * the interrupt context.
582 */
583#ifndef __this_cpu_read
584# ifndef __this_cpu_read_1
585#  define __this_cpu_read_1(pcp)	(*__this_cpu_ptr(&(pcp)))
586# endif
587# ifndef __this_cpu_read_2
588#  define __this_cpu_read_2(pcp)	(*__this_cpu_ptr(&(pcp)))
589# endif
590# ifndef __this_cpu_read_4
591#  define __this_cpu_read_4(pcp)	(*__this_cpu_ptr(&(pcp)))
592# endif
593# ifndef __this_cpu_read_8
594#  define __this_cpu_read_8(pcp)	(*__this_cpu_ptr(&(pcp)))
595# endif
596# define __this_cpu_read(pcp)	__pcpu_size_call_return(__this_cpu_read_, (pcp))
597#endif
598
599#define __this_cpu_generic_to_op(pcp, val, op)				\
600do {									\
601	*__this_cpu_ptr(&(pcp)) op val;					\
602} while (0)
603
604#ifndef __this_cpu_write
605# ifndef __this_cpu_write_1
606#  define __this_cpu_write_1(pcp, val)	__this_cpu_generic_to_op((pcp), (val), =)
607# endif
608# ifndef __this_cpu_write_2
609#  define __this_cpu_write_2(pcp, val)	__this_cpu_generic_to_op((pcp), (val), =)
610# endif
611# ifndef __this_cpu_write_4
612#  define __this_cpu_write_4(pcp, val)	__this_cpu_generic_to_op((pcp), (val), =)
613# endif
614# ifndef __this_cpu_write_8
615#  define __this_cpu_write_8(pcp, val)	__this_cpu_generic_to_op((pcp), (val), =)
616# endif
617# define __this_cpu_write(pcp, val)	__pcpu_size_call(__this_cpu_write_, (pcp), (val))
618#endif
619
620#ifndef __this_cpu_add
621# ifndef __this_cpu_add_1
622#  define __this_cpu_add_1(pcp, val)	__this_cpu_generic_to_op((pcp), (val), +=)
623# endif
624# ifndef __this_cpu_add_2
625#  define __this_cpu_add_2(pcp, val)	__this_cpu_generic_to_op((pcp), (val), +=)
626# endif
627# ifndef __this_cpu_add_4
628#  define __this_cpu_add_4(pcp, val)	__this_cpu_generic_to_op((pcp), (val), +=)
629# endif
630# ifndef __this_cpu_add_8
631#  define __this_cpu_add_8(pcp, val)	__this_cpu_generic_to_op((pcp), (val), +=)
632# endif
633# define __this_cpu_add(pcp, val)	__pcpu_size_call(__this_cpu_add_, (pcp), (val))
634#endif
635
636#ifndef __this_cpu_sub
637# define __this_cpu_sub(pcp, val)	__this_cpu_add((pcp), -(val))
638#endif
639
640#ifndef __this_cpu_inc
641# define __this_cpu_inc(pcp)		__this_cpu_add((pcp), 1)
642#endif
643
644#ifndef __this_cpu_dec
645# define __this_cpu_dec(pcp)		__this_cpu_sub((pcp), 1)
646#endif
647
648#ifndef __this_cpu_and
649# ifndef __this_cpu_and_1
650#  define __this_cpu_and_1(pcp, val)	__this_cpu_generic_to_op((pcp), (val), &=)
651# endif
652# ifndef __this_cpu_and_2
653#  define __this_cpu_and_2(pcp, val)	__this_cpu_generic_to_op((pcp), (val), &=)
654# endif
655# ifndef __this_cpu_and_4
656#  define __this_cpu_and_4(pcp, val)	__this_cpu_generic_to_op((pcp), (val), &=)
657# endif
658# ifndef __this_cpu_and_8
659#  define __this_cpu_and_8(pcp, val)	__this_cpu_generic_to_op((pcp), (val), &=)
660# endif
661# define __this_cpu_and(pcp, val)	__pcpu_size_call(__this_cpu_and_, (pcp), (val))
662#endif
663
664#ifndef __this_cpu_or
665# ifndef __this_cpu_or_1
666#  define __this_cpu_or_1(pcp, val)	__this_cpu_generic_to_op((pcp), (val), |=)
667# endif
668# ifndef __this_cpu_or_2
669#  define __this_cpu_or_2(pcp, val)	__this_cpu_generic_to_op((pcp), (val), |=)
670# endif
671# ifndef __this_cpu_or_4
672#  define __this_cpu_or_4(pcp, val)	__this_cpu_generic_to_op((pcp), (val), |=)
673# endif
674# ifndef __this_cpu_or_8
675#  define __this_cpu_or_8(pcp, val)	__this_cpu_generic_to_op((pcp), (val), |=)
676# endif
677# define __this_cpu_or(pcp, val)	__pcpu_size_call(__this_cpu_or_, (pcp), (val))
678#endif
679
680#ifndef __this_cpu_xor
681# ifndef __this_cpu_xor_1
682#  define __this_cpu_xor_1(pcp, val)	__this_cpu_generic_to_op((pcp), (val), ^=)
683# endif
684# ifndef __this_cpu_xor_2
685#  define __this_cpu_xor_2(pcp, val)	__this_cpu_generic_to_op((pcp), (val), ^=)
686# endif
687# ifndef __this_cpu_xor_4
688#  define __this_cpu_xor_4(pcp, val)	__this_cpu_generic_to_op((pcp), (val), ^=)
689# endif
690# ifndef __this_cpu_xor_8
691#  define __this_cpu_xor_8(pcp, val)	__this_cpu_generic_to_op((pcp), (val), ^=)
692# endif
693# define __this_cpu_xor(pcp, val)	__pcpu_size_call(__this_cpu_xor_, (pcp), (val))
694#endif
695
696#define __this_cpu_generic_add_return(pcp, val)				\
697({									\
698	__this_cpu_add(pcp, val);					\
699	__this_cpu_read(pcp);						\
700})
701
702#ifndef __this_cpu_add_return
703# ifndef __this_cpu_add_return_1
704#  define __this_cpu_add_return_1(pcp, val)	__this_cpu_generic_add_return(pcp, val)
705# endif
706# ifndef __this_cpu_add_return_2
707#  define __this_cpu_add_return_2(pcp, val)	__this_cpu_generic_add_return(pcp, val)
708# endif
709# ifndef __this_cpu_add_return_4
710#  define __this_cpu_add_return_4(pcp, val)	__this_cpu_generic_add_return(pcp, val)
711# endif
712# ifndef __this_cpu_add_return_8
713#  define __this_cpu_add_return_8(pcp, val)	__this_cpu_generic_add_return(pcp, val)
714# endif
715# define __this_cpu_add_return(pcp, val)	__pcpu_size_call_return2(this_cpu_add_return_, pcp, val)
716#endif
717
718#define __this_cpu_sub_return(pcp, val)	this_cpu_add_return(pcp, -(val))
719#define __this_cpu_inc_return(pcp)	this_cpu_add_return(pcp, 1)
720#define __this_cpu_dec_return(pcp)	this_cpu_add_return(pcp, -1)
721
722#define __this_cpu_generic_xchg(pcp, nval)				\
723({	typeof(pcp) ret__;						\
724	ret__ = __this_cpu_read(pcp);					\
725	__this_cpu_write(pcp, nval);					\
726	ret__;								\
727})
728
729#ifndef __this_cpu_xchg
730# ifndef __this_cpu_xchg_1
731#  define __this_cpu_xchg_1(pcp, nval)	__this_cpu_generic_xchg(pcp, nval)
732# endif
733# ifndef __this_cpu_xchg_2
734#  define __this_cpu_xchg_2(pcp, nval)	__this_cpu_generic_xchg(pcp, nval)
735# endif
736# ifndef __this_cpu_xchg_4
737#  define __this_cpu_xchg_4(pcp, nval)	__this_cpu_generic_xchg(pcp, nval)
738# endif
739# ifndef __this_cpu_xchg_8
740#  define __this_cpu_xchg_8(pcp, nval)	__this_cpu_generic_xchg(pcp, nval)
741# endif
742# define __this_cpu_xchg(pcp, nval)	\
743	__pcpu_size_call_return2(__this_cpu_xchg_, (pcp), nval)
744#endif
745
746#define __this_cpu_generic_cmpxchg(pcp, oval, nval)			\
747({									\
748	typeof(pcp) ret__;						\
749	ret__ = __this_cpu_read(pcp);					\
750	if (ret__ == (oval))						\
751		__this_cpu_write(pcp, nval);				\
752	ret__;								\
753})
754
755#ifndef __this_cpu_cmpxchg
756# ifndef __this_cpu_cmpxchg_1
757#  define __this_cpu_cmpxchg_1(pcp, oval, nval)	__this_cpu_generic_cmpxchg(pcp, oval, nval)
758# endif
759# ifndef __this_cpu_cmpxchg_2
760#  define __this_cpu_cmpxchg_2(pcp, oval, nval)	__this_cpu_generic_cmpxchg(pcp, oval, nval)
761# endif
762# ifndef __this_cpu_cmpxchg_4
763#  define __this_cpu_cmpxchg_4(pcp, oval, nval)	__this_cpu_generic_cmpxchg(pcp, oval, nval)
764# endif
765# ifndef __this_cpu_cmpxchg_8
766#  define __this_cpu_cmpxchg_8(pcp, oval, nval)	__this_cpu_generic_cmpxchg(pcp, oval, nval)
767# endif
768# define __this_cpu_cmpxchg(pcp, oval, nval)	\
769	__pcpu_size_call_return2(__this_cpu_cmpxchg_, pcp, oval, nval)
770#endif
771
772#define __this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)	\
773({									\
774	int __ret = 0;							\
775	if (__this_cpu_read(pcp1) == (oval1) &&				\
776			 __this_cpu_read(pcp2)  == (oval2)) {		\
777		__this_cpu_write(pcp1, (nval1));			\
778		__this_cpu_write(pcp2, (nval2));			\
779		__ret = 1;						\
780	}								\
781	(__ret);							\
782})
783
784#ifndef __this_cpu_cmpxchg_double
785# ifndef __this_cpu_cmpxchg_double_1
786#  define __this_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2)	\
787	__this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
788# endif
789# ifndef __this_cpu_cmpxchg_double_2
790#  define __this_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2)	\
791	__this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
792# endif
793# ifndef __this_cpu_cmpxchg_double_4
794#  define __this_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2)	\
795	__this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
796# endif
797# ifndef __this_cpu_cmpxchg_double_8
798#  define __this_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2)	\
799	__this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
800# endif
801# define __this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)	\
802	__pcpu_double_call_return_bool(__this_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2))
803#endif
804
805/*
806 * IRQ safe versions of the per cpu RMW operations. Note that these operations
807 * are *not* safe against modification of the same variable from another
808 * processors (which one gets when using regular atomic operations)
809 * They are guaranteed to be atomic vs. local interrupts and
810 * preemption only.
811 */
812#define irqsafe_cpu_generic_to_op(pcp, val, op)				\
813do {									\
814	unsigned long flags;						\
815	local_irq_save(flags);						\
816	*__this_cpu_ptr(&(pcp)) op val;					\
817	local_irq_restore(flags);					\
818} while (0)
819
820#ifndef irqsafe_cpu_add
821# ifndef irqsafe_cpu_add_1
822#  define irqsafe_cpu_add_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=)
823# endif
824# ifndef irqsafe_cpu_add_2
825#  define irqsafe_cpu_add_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=)
826# endif
827# ifndef irqsafe_cpu_add_4
828#  define irqsafe_cpu_add_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=)
829# endif
830# ifndef irqsafe_cpu_add_8
831#  define irqsafe_cpu_add_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=)
832# endif
833# define irqsafe_cpu_add(pcp, val) __pcpu_size_call(irqsafe_cpu_add_, (pcp), (val))
834#endif
835
836#ifndef irqsafe_cpu_sub
837# define irqsafe_cpu_sub(pcp, val)	irqsafe_cpu_add((pcp), -(val))
838#endif
839
840#ifndef irqsafe_cpu_inc
841# define irqsafe_cpu_inc(pcp)	irqsafe_cpu_add((pcp), 1)
842#endif
843
844#ifndef irqsafe_cpu_dec
845# define irqsafe_cpu_dec(pcp)	irqsafe_cpu_sub((pcp), 1)
846#endif
847
848#ifndef irqsafe_cpu_and
849# ifndef irqsafe_cpu_and_1
850#  define irqsafe_cpu_and_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=)
851# endif
852# ifndef irqsafe_cpu_and_2
853#  define irqsafe_cpu_and_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=)
854# endif
855# ifndef irqsafe_cpu_and_4
856#  define irqsafe_cpu_and_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=)
857# endif
858# ifndef irqsafe_cpu_and_8
859#  define irqsafe_cpu_and_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=)
860# endif
861# define irqsafe_cpu_and(pcp, val) __pcpu_size_call(irqsafe_cpu_and_, (val))
862#endif
863
864#ifndef irqsafe_cpu_or
865# ifndef irqsafe_cpu_or_1
866#  define irqsafe_cpu_or_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=)
867# endif
868# ifndef irqsafe_cpu_or_2
869#  define irqsafe_cpu_or_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=)
870# endif
871# ifndef irqsafe_cpu_or_4
872#  define irqsafe_cpu_or_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=)
873# endif
874# ifndef irqsafe_cpu_or_8
875#  define irqsafe_cpu_or_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=)
876# endif
877# define irqsafe_cpu_or(pcp, val) __pcpu_size_call(irqsafe_cpu_or_, (val))
878#endif
879
880#ifndef irqsafe_cpu_xor
881# ifndef irqsafe_cpu_xor_1
882#  define irqsafe_cpu_xor_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=)
883# endif
884# ifndef irqsafe_cpu_xor_2
885#  define irqsafe_cpu_xor_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=)
886# endif
887# ifndef irqsafe_cpu_xor_4
888#  define irqsafe_cpu_xor_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=)
889# endif
890# ifndef irqsafe_cpu_xor_8
891#  define irqsafe_cpu_xor_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=)
892# endif
893# define irqsafe_cpu_xor(pcp, val) __pcpu_size_call(irqsafe_cpu_xor_, (val))
894#endif
895
896#define irqsafe_cpu_generic_cmpxchg(pcp, oval, nval)			\
897({									\
898	typeof(pcp) ret__;						\
899	unsigned long flags;						\
900	local_irq_save(flags);						\
901	ret__ = __this_cpu_read(pcp);					\
902	if (ret__ == (oval))						\
903		__this_cpu_write(pcp, nval);				\
904	local_irq_restore(flags);					\
905	ret__;								\
906})
907
908#ifndef irqsafe_cpu_cmpxchg
909# ifndef irqsafe_cpu_cmpxchg_1
910#  define irqsafe_cpu_cmpxchg_1(pcp, oval, nval)	irqsafe_cpu_generic_cmpxchg(pcp, oval, nval)
911# endif
912# ifndef irqsafe_cpu_cmpxchg_2
913#  define irqsafe_cpu_cmpxchg_2(pcp, oval, nval)	irqsafe_cpu_generic_cmpxchg(pcp, oval, nval)
914# endif
915# ifndef irqsafe_cpu_cmpxchg_4
916#  define irqsafe_cpu_cmpxchg_4(pcp, oval, nval)	irqsafe_cpu_generic_cmpxchg(pcp, oval, nval)
917# endif
918# ifndef irqsafe_cpu_cmpxchg_8
919#  define irqsafe_cpu_cmpxchg_8(pcp, oval, nval)	irqsafe_cpu_generic_cmpxchg(pcp, oval, nval)
920# endif
921# define irqsafe_cpu_cmpxchg(pcp, oval, nval)		\
922	__pcpu_size_call_return2(irqsafe_cpu_cmpxchg_, (pcp), oval, nval)
923#endif
924
925#define irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)	\
926({									\
927	int ret__;							\
928	unsigned long flags;						\
929	local_irq_save(flags);						\
930	ret__ = __this_cpu_generic_cmpxchg_double(pcp1, pcp2,		\
931			oval1, oval2, nval1, nval2);			\
932	local_irq_restore(flags);					\
933	ret__;								\
934})
935
936#ifndef irqsafe_cpu_cmpxchg_double
937# ifndef irqsafe_cpu_cmpxchg_double_1
938#  define irqsafe_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2)	\
939	irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
940# endif
941# ifndef irqsafe_cpu_cmpxchg_double_2
942#  define irqsafe_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2)	\
943	irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
944# endif
945# ifndef irqsafe_cpu_cmpxchg_double_4
946#  define irqsafe_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2)	\
947	irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
948# endif
949# ifndef irqsafe_cpu_cmpxchg_double_8
950#  define irqsafe_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2)	\
951	irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
952# endif
953# define irqsafe_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)	\
954	__pcpu_double_call_return_bool(irqsafe_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2))
955#endif
956
957#endif /* __LINUX_PERCPU_H */