Linux Audio

Check our new training course

Loading...
v3.15
  1/*
  2 * Generic implementation of 64-bit atomics using spinlocks,
  3 * useful on processors that don't have 64-bit atomic instructions.
  4 *
  5 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
  6 *
  7 * This program is free software; you can redistribute it and/or
  8 * modify it under the terms of the GNU General Public License
  9 * as published by the Free Software Foundation; either version
 10 * 2 of the License, or (at your option) any later version.
 11 */
 12#include <linux/types.h>
 13#include <linux/cache.h>
 14#include <linux/spinlock.h>
 15#include <linux/init.h>
 16#include <linux/export.h>
 17#include <linux/atomic.h>
 18
 19/*
 20 * We use a hashed array of spinlocks to provide exclusive access
 21 * to each atomic64_t variable.  Since this is expected to used on
 22 * systems with small numbers of CPUs (<= 4 or so), we use a
 23 * relatively small array of 16 spinlocks to avoid wasting too much
 24 * memory on the spinlock array.
 25 */
 26#define NR_LOCKS	16
 27
 28/*
 29 * Ensure each lock is in a separate cacheline.
 30 */
 31static union {
 32	raw_spinlock_t lock;
 33	char pad[L1_CACHE_BYTES];
 34} atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp = {
 35	[0 ... (NR_LOCKS - 1)] = {
 36		.lock =  __RAW_SPIN_LOCK_UNLOCKED(atomic64_lock.lock),
 37	},
 38};
 39
 40static inline raw_spinlock_t *lock_addr(const atomic64_t *v)
 41{
 42	unsigned long addr = (unsigned long) v;
 43
 44	addr >>= L1_CACHE_SHIFT;
 45	addr ^= (addr >> 8) ^ (addr >> 16);
 46	return &atomic64_lock[addr & (NR_LOCKS - 1)].lock;
 47}
 48
 49long long atomic64_read(const atomic64_t *v)
 50{
 51	unsigned long flags;
 52	raw_spinlock_t *lock = lock_addr(v);
 53	long long val;
 54
 55	raw_spin_lock_irqsave(lock, flags);
 56	val = v->counter;
 57	raw_spin_unlock_irqrestore(lock, flags);
 58	return val;
 59}
 60EXPORT_SYMBOL(atomic64_read);
 61
 62void atomic64_set(atomic64_t *v, long long i)
 63{
 64	unsigned long flags;
 65	raw_spinlock_t *lock = lock_addr(v);
 66
 67	raw_spin_lock_irqsave(lock, flags);
 68	v->counter = i;
 69	raw_spin_unlock_irqrestore(lock, flags);
 70}
 71EXPORT_SYMBOL(atomic64_set);
 72
 73void atomic64_add(long long a, atomic64_t *v)
 74{
 75	unsigned long flags;
 76	raw_spinlock_t *lock = lock_addr(v);
 77
 78	raw_spin_lock_irqsave(lock, flags);
 79	v->counter += a;
 80	raw_spin_unlock_irqrestore(lock, flags);
 81}
 82EXPORT_SYMBOL(atomic64_add);
 83
 84long long atomic64_add_return(long long a, atomic64_t *v)
 85{
 86	unsigned long flags;
 87	raw_spinlock_t *lock = lock_addr(v);
 88	long long val;
 89
 90	raw_spin_lock_irqsave(lock, flags);
 91	val = v->counter += a;
 92	raw_spin_unlock_irqrestore(lock, flags);
 93	return val;
 94}
 95EXPORT_SYMBOL(atomic64_add_return);
 96
 97void atomic64_sub(long long a, atomic64_t *v)
 98{
 99	unsigned long flags;
100	raw_spinlock_t *lock = lock_addr(v);
101
102	raw_spin_lock_irqsave(lock, flags);
103	v->counter -= a;
104	raw_spin_unlock_irqrestore(lock, flags);
105}
106EXPORT_SYMBOL(atomic64_sub);
107
108long long atomic64_sub_return(long long a, atomic64_t *v)
109{
110	unsigned long flags;
111	raw_spinlock_t *lock = lock_addr(v);
112	long long val;
113
114	raw_spin_lock_irqsave(lock, flags);
115	val = v->counter -= a;
116	raw_spin_unlock_irqrestore(lock, flags);
117	return val;
118}
119EXPORT_SYMBOL(atomic64_sub_return);
120
121long long atomic64_dec_if_positive(atomic64_t *v)
122{
123	unsigned long flags;
124	raw_spinlock_t *lock = lock_addr(v);
125	long long val;
126
127	raw_spin_lock_irqsave(lock, flags);
128	val = v->counter - 1;
129	if (val >= 0)
130		v->counter = val;
131	raw_spin_unlock_irqrestore(lock, flags);
132	return val;
133}
134EXPORT_SYMBOL(atomic64_dec_if_positive);
135
136long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n)
137{
138	unsigned long flags;
139	raw_spinlock_t *lock = lock_addr(v);
140	long long val;
141
142	raw_spin_lock_irqsave(lock, flags);
143	val = v->counter;
144	if (val == o)
145		v->counter = n;
146	raw_spin_unlock_irqrestore(lock, flags);
147	return val;
148}
149EXPORT_SYMBOL(atomic64_cmpxchg);
150
151long long atomic64_xchg(atomic64_t *v, long long new)
152{
153	unsigned long flags;
154	raw_spinlock_t *lock = lock_addr(v);
155	long long val;
156
157	raw_spin_lock_irqsave(lock, flags);
158	val = v->counter;
159	v->counter = new;
160	raw_spin_unlock_irqrestore(lock, flags);
161	return val;
162}
163EXPORT_SYMBOL(atomic64_xchg);
164
165int atomic64_add_unless(atomic64_t *v, long long a, long long u)
166{
167	unsigned long flags;
168	raw_spinlock_t *lock = lock_addr(v);
169	int ret = 0;
170
171	raw_spin_lock_irqsave(lock, flags);
172	if (v->counter != u) {
173		v->counter += a;
174		ret = 1;
175	}
176	raw_spin_unlock_irqrestore(lock, flags);
177	return ret;
178}
179EXPORT_SYMBOL(atomic64_add_unless);
v4.6
  1/*
  2 * Generic implementation of 64-bit atomics using spinlocks,
  3 * useful on processors that don't have 64-bit atomic instructions.
  4 *
  5 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
  6 *
  7 * This program is free software; you can redistribute it and/or
  8 * modify it under the terms of the GNU General Public License
  9 * as published by the Free Software Foundation; either version
 10 * 2 of the License, or (at your option) any later version.
 11 */
 12#include <linux/types.h>
 13#include <linux/cache.h>
 14#include <linux/spinlock.h>
 15#include <linux/init.h>
 16#include <linux/export.h>
 17#include <linux/atomic.h>
 18
 19/*
 20 * We use a hashed array of spinlocks to provide exclusive access
 21 * to each atomic64_t variable.  Since this is expected to used on
 22 * systems with small numbers of CPUs (<= 4 or so), we use a
 23 * relatively small array of 16 spinlocks to avoid wasting too much
 24 * memory on the spinlock array.
 25 */
 26#define NR_LOCKS	16
 27
 28/*
 29 * Ensure each lock is in a separate cacheline.
 30 */
 31static union {
 32	raw_spinlock_t lock;
 33	char pad[L1_CACHE_BYTES];
 34} atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp = {
 35	[0 ... (NR_LOCKS - 1)] = {
 36		.lock =  __RAW_SPIN_LOCK_UNLOCKED(atomic64_lock.lock),
 37	},
 38};
 39
 40static inline raw_spinlock_t *lock_addr(const atomic64_t *v)
 41{
 42	unsigned long addr = (unsigned long) v;
 43
 44	addr >>= L1_CACHE_SHIFT;
 45	addr ^= (addr >> 8) ^ (addr >> 16);
 46	return &atomic64_lock[addr & (NR_LOCKS - 1)].lock;
 47}
 48
 49long long atomic64_read(const atomic64_t *v)
 50{
 51	unsigned long flags;
 52	raw_spinlock_t *lock = lock_addr(v);
 53	long long val;
 54
 55	raw_spin_lock_irqsave(lock, flags);
 56	val = v->counter;
 57	raw_spin_unlock_irqrestore(lock, flags);
 58	return val;
 59}
 60EXPORT_SYMBOL(atomic64_read);
 61
 62void atomic64_set(atomic64_t *v, long long i)
 63{
 64	unsigned long flags;
 65	raw_spinlock_t *lock = lock_addr(v);
 66
 67	raw_spin_lock_irqsave(lock, flags);
 68	v->counter = i;
 69	raw_spin_unlock_irqrestore(lock, flags);
 70}
 71EXPORT_SYMBOL(atomic64_set);
 72
 73#define ATOMIC64_OP(op, c_op)						\
 74void atomic64_##op(long long a, atomic64_t *v)				\
 75{									\
 76	unsigned long flags;						\
 77	raw_spinlock_t *lock = lock_addr(v);				\
 78									\
 79	raw_spin_lock_irqsave(lock, flags);				\
 80	v->counter c_op a;						\
 81	raw_spin_unlock_irqrestore(lock, flags);			\
 82}									\
 83EXPORT_SYMBOL(atomic64_##op);
 84
 85#define ATOMIC64_OP_RETURN(op, c_op)					\
 86long long atomic64_##op##_return(long long a, atomic64_t *v)		\
 87{									\
 88	unsigned long flags;						\
 89	raw_spinlock_t *lock = lock_addr(v);				\
 90	long long val;							\
 91									\
 92	raw_spin_lock_irqsave(lock, flags);				\
 93	val = (v->counter c_op a);					\
 94	raw_spin_unlock_irqrestore(lock, flags);			\
 95	return val;							\
 96}									\
 97EXPORT_SYMBOL(atomic64_##op##_return);
 98
 99#define ATOMIC64_OPS(op, c_op)						\
100	ATOMIC64_OP(op, c_op)						\
101	ATOMIC64_OP_RETURN(op, c_op)
102
103ATOMIC64_OPS(add, +=)
104ATOMIC64_OPS(sub, -=)
105ATOMIC64_OP(and, &=)
106ATOMIC64_OP(or, |=)
107ATOMIC64_OP(xor, ^=)
108
109#undef ATOMIC64_OPS
110#undef ATOMIC64_OP_RETURN
111#undef ATOMIC64_OP
 
 
 
 
 
 
 
 
112
113long long atomic64_dec_if_positive(atomic64_t *v)
114{
115	unsigned long flags;
116	raw_spinlock_t *lock = lock_addr(v);
117	long long val;
118
119	raw_spin_lock_irqsave(lock, flags);
120	val = v->counter - 1;
121	if (val >= 0)
122		v->counter = val;
123	raw_spin_unlock_irqrestore(lock, flags);
124	return val;
125}
126EXPORT_SYMBOL(atomic64_dec_if_positive);
127
128long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n)
129{
130	unsigned long flags;
131	raw_spinlock_t *lock = lock_addr(v);
132	long long val;
133
134	raw_spin_lock_irqsave(lock, flags);
135	val = v->counter;
136	if (val == o)
137		v->counter = n;
138	raw_spin_unlock_irqrestore(lock, flags);
139	return val;
140}
141EXPORT_SYMBOL(atomic64_cmpxchg);
142
143long long atomic64_xchg(atomic64_t *v, long long new)
144{
145	unsigned long flags;
146	raw_spinlock_t *lock = lock_addr(v);
147	long long val;
148
149	raw_spin_lock_irqsave(lock, flags);
150	val = v->counter;
151	v->counter = new;
152	raw_spin_unlock_irqrestore(lock, flags);
153	return val;
154}
155EXPORT_SYMBOL(atomic64_xchg);
156
157int atomic64_add_unless(atomic64_t *v, long long a, long long u)
158{
159	unsigned long flags;
160	raw_spinlock_t *lock = lock_addr(v);
161	int ret = 0;
162
163	raw_spin_lock_irqsave(lock, flags);
164	if (v->counter != u) {
165		v->counter += a;
166		ret = 1;
167	}
168	raw_spin_unlock_irqrestore(lock, flags);
169	return ret;
170}
171EXPORT_SYMBOL(atomic64_add_unless);