Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.13.7.
  1/*
  2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
  3 *
  4 *   This program is free software; you can redistribute it and/or
  5 *   modify it under the terms of the GNU General Public License
  6 *   as published by the Free Software Foundation, version 2.
  7 *
  8 *   This program is distributed in the hope that it will be useful, but
  9 *   WITHOUT ANY WARRANTY; without even the implied warranty of
 10 *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
 11 *   NON INFRINGEMENT.  See the GNU General Public License for
 12 *   more details.
 13 */
 14
 15#include <linux/cache.h>
 16#include <linux/delay.h>
 17#include <linux/uaccess.h>
 18#include <linux/module.h>
 19#include <linux/mm.h>
 20#include <linux/atomic.h>
 21#include <arch/chip.h>
 22
 23/* This page is remapped on startup to be hash-for-home. */
 24int atomic_locks[PAGE_SIZE / sizeof(int)] __page_aligned_bss;
 25
 26int *__atomic_hashed_lock(volatile void *v)
 27{
 28	/* NOTE: this code must match "sys_cmpxchg" in kernel/intvec_32.S */
 29	/*
 30	 * Use bits [3, 3 + ATOMIC_HASH_SHIFT) as the lock index.
 31	 * Using mm works here because atomic_locks is page aligned.
 32	 */
 33	unsigned long ptr = __insn_mm((unsigned long)v >> 1,
 34				      (unsigned long)atomic_locks,
 35				      2, (ATOMIC_HASH_SHIFT + 2) - 1);
 36	return (int *)ptr;
 37}
 38
 39#ifdef CONFIG_SMP
 40/* Return whether the passed pointer is a valid atomic lock pointer. */
 41static int is_atomic_lock(int *p)
 42{
 43	return p >= &atomic_locks[0] && p < &atomic_locks[ATOMIC_HASH_SIZE];
 44}
 45
 46void __atomic_fault_unlock(int *irqlock_word)
 47{
 48	BUG_ON(!is_atomic_lock(irqlock_word));
 49	BUG_ON(*irqlock_word != 1);
 50	*irqlock_word = 0;
 51}
 52
 53#endif /* CONFIG_SMP */
 54
 55static inline int *__atomic_setup(volatile void *v)
 56{
 57	/* Issue a load to the target to bring it into cache. */
 58	*(volatile int *)v;
 59	return __atomic_hashed_lock(v);
 60}
 61
 62int _atomic_xchg(int *v, int n)
 63{
 64	return __atomic_xchg(v, __atomic_setup(v), n).val;
 65}
 66EXPORT_SYMBOL(_atomic_xchg);
 67
 68int _atomic_xchg_add(int *v, int i)
 69{
 70	return __atomic_xchg_add(v, __atomic_setup(v), i).val;
 71}
 72EXPORT_SYMBOL(_atomic_xchg_add);
 73
 74int _atomic_xchg_add_unless(int *v, int a, int u)
 75{
 76	/*
 77	 * Note: argument order is switched here since it is easier
 78	 * to use the first argument consistently as the "old value"
 79	 * in the assembly, as is done for _atomic_cmpxchg().
 80	 */
 81	return __atomic_xchg_add_unless(v, __atomic_setup(v), u, a).val;
 82}
 83EXPORT_SYMBOL(_atomic_xchg_add_unless);
 84
 85int _atomic_cmpxchg(int *v, int o, int n)
 86{
 87	return __atomic_cmpxchg(v, __atomic_setup(v), o, n).val;
 88}
 89EXPORT_SYMBOL(_atomic_cmpxchg);
 90
 91unsigned long _atomic_or(volatile unsigned long *p, unsigned long mask)
 92{
 93	return __atomic_or((int *)p, __atomic_setup(p), mask).val;
 94}
 95EXPORT_SYMBOL(_atomic_or);
 96
 97unsigned long _atomic_and(volatile unsigned long *p, unsigned long mask)
 98{
 99	return __atomic_and((int *)p, __atomic_setup(p), mask).val;
100}
101EXPORT_SYMBOL(_atomic_and);
102
103unsigned long _atomic_andn(volatile unsigned long *p, unsigned long mask)
104{
105	return __atomic_andn((int *)p, __atomic_setup(p), mask).val;
106}
107EXPORT_SYMBOL(_atomic_andn);
108
109unsigned long _atomic_xor(volatile unsigned long *p, unsigned long mask)
110{
111	return __atomic_xor((int *)p, __atomic_setup(p), mask).val;
112}
113EXPORT_SYMBOL(_atomic_xor);
114
115
116long long _atomic64_xchg(long long *v, long long n)
117{
118	return __atomic64_xchg(v, __atomic_setup(v), n);
119}
120EXPORT_SYMBOL(_atomic64_xchg);
121
122long long _atomic64_xchg_add(long long *v, long long i)
123{
124	return __atomic64_xchg_add(v, __atomic_setup(v), i);
125}
126EXPORT_SYMBOL(_atomic64_xchg_add);
127
128long long _atomic64_xchg_add_unless(long long *v, long long a, long long u)
129{
130	/*
131	 * Note: argument order is switched here since it is easier
132	 * to use the first argument consistently as the "old value"
133	 * in the assembly, as is done for _atomic_cmpxchg().
134	 */
135	return __atomic64_xchg_add_unless(v, __atomic_setup(v), u, a);
136}
137EXPORT_SYMBOL(_atomic64_xchg_add_unless);
138
139long long _atomic64_cmpxchg(long long *v, long long o, long long n)
140{
141	return __atomic64_cmpxchg(v, __atomic_setup(v), o, n);
142}
143EXPORT_SYMBOL(_atomic64_cmpxchg);
144
145long long _atomic64_and(long long *v, long long n)
146{
147	return __atomic64_and(v, __atomic_setup(v), n);
148}
149EXPORT_SYMBOL(_atomic64_and);
150
151long long _atomic64_or(long long *v, long long n)
152{
153	return __atomic64_or(v, __atomic_setup(v), n);
154}
155EXPORT_SYMBOL(_atomic64_or);
156
157long long _atomic64_xor(long long *v, long long n)
158{
159	return __atomic64_xor(v, __atomic_setup(v), n);
160}
161EXPORT_SYMBOL(_atomic64_xor);
162
163/*
164 * If any of the atomic or futex routines hit a bad address (not in
165 * the page tables at kernel PL) this routine is called.  The futex
166 * routines are never used on kernel space, and the normal atomics and
167 * bitops are never used on user space.  So a fault on kernel space
168 * must be fatal, but a fault on userspace is a futex fault and we
169 * need to return -EFAULT.  Note that the context this routine is
170 * invoked in is the context of the "_atomic_xxx()" routines called
171 * by the functions in this file.
172 */
173struct __get_user __atomic_bad_address(int __user *addr)
174{
175	if (unlikely(!access_ok(VERIFY_WRITE, addr, sizeof(int))))
176		panic("Bad address used for kernel atomic op: %p\n", addr);
177	return (struct __get_user) { .err = -EFAULT };
178}
179
180
181void __init __init_atomic_per_cpu(void)
182{
183	/* Validate power-of-two and "bigger than cpus" assumption */
184	BUILD_BUG_ON(ATOMIC_HASH_SIZE & (ATOMIC_HASH_SIZE-1));
185	BUG_ON(ATOMIC_HASH_SIZE < nr_cpu_ids);
186
187	/*
188	 * On TILEPro we prefer to use a single hash-for-home
189	 * page, since this means atomic operations are less
190	 * likely to encounter a TLB fault and thus should
191	 * in general perform faster.  You may wish to disable
192	 * this in situations where few hash-for-home tiles
193	 * are configured.
194	 */
195	BUG_ON((unsigned long)atomic_locks % PAGE_SIZE != 0);
196
197	/* The locks must all fit on one page. */
198	BUILD_BUG_ON(ATOMIC_HASH_SIZE * sizeof(int) > PAGE_SIZE);
199
200	/*
201	 * We use the page offset of the atomic value's address as
202	 * an index into atomic_locks, excluding the low 3 bits.
203	 * That should not produce more indices than ATOMIC_HASH_SIZE.
204	 */
205	BUILD_BUG_ON((PAGE_SIZE >> 3) > ATOMIC_HASH_SIZE);
206}