Linux Audio

Check our new training course

Loading...
v3.5.6
  1/*
  2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
  3 *
  4 *   This program is free software; you can redistribute it and/or
  5 *   modify it under the terms of the GNU General Public License
  6 *   as published by the Free Software Foundation, version 2.
  7 *
  8 *   This program is distributed in the hope that it will be useful, but
  9 *   WITHOUT ANY WARRANTY; without even the implied warranty of
 10 *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
 11 *   NON INFRINGEMENT.  See the GNU General Public License for
 12 *   more details.
 13 */
 14
 15#ifndef _ASM_TILE_BITOPS_32_H
 16#define _ASM_TILE_BITOPS_32_H
 17
 18#include <linux/compiler.h>
 19#include <linux/atomic.h>
 
 20
 21/* Tile-specific routines to support <asm/bitops.h>. */
 22unsigned long _atomic_or(volatile unsigned long *p, unsigned long mask);
 23unsigned long _atomic_andn(volatile unsigned long *p, unsigned long mask);
 24unsigned long _atomic_xor(volatile unsigned long *p, unsigned long mask);
 25
 26/**
 27 * set_bit - Atomically set a bit in memory
 28 * @nr: the bit to set
 29 * @addr: the address to start counting from
 30 *
 31 * This function is atomic and may not be reordered.
 32 * See __set_bit() if you do not require the atomic guarantees.
 33 * Note that @nr may be almost arbitrarily large; this function is not
 34 * restricted to acting on a single-word quantity.
 35 */
 36static inline void set_bit(unsigned nr, volatile unsigned long *addr)
 37{
 38	_atomic_or(addr + BIT_WORD(nr), BIT_MASK(nr));
 39}
 40
 41/**
 42 * clear_bit - Clears a bit in memory
 43 * @nr: Bit to clear
 44 * @addr: Address to start counting from
 45 *
 46 * clear_bit() is atomic and may not be reordered.
 47 * See __clear_bit() if you do not require the atomic guarantees.
 48 * Note that @nr may be almost arbitrarily large; this function is not
 49 * restricted to acting on a single-word quantity.
 50 *
 51 * clear_bit() may not contain a memory barrier, so if it is used for
 52 * locking purposes, you should call smp_mb__before_clear_bit() and/or
 53 * smp_mb__after_clear_bit() to ensure changes are visible on other cpus.
 54 */
 55static inline void clear_bit(unsigned nr, volatile unsigned long *addr)
 56{
 57	_atomic_andn(addr + BIT_WORD(nr), BIT_MASK(nr));
 58}
 59
 60/**
 61 * change_bit - Toggle a bit in memory
 62 * @nr: Bit to change
 63 * @addr: Address to start counting from
 64 *
 65 * change_bit() is atomic and may not be reordered.
 66 * See __change_bit() if you do not require the atomic guarantees.
 67 * Note that @nr may be almost arbitrarily large; this function is not
 68 * restricted to acting on a single-word quantity.
 69 */
 70static inline void change_bit(unsigned nr, volatile unsigned long *addr)
 71{
 72	_atomic_xor(addr + BIT_WORD(nr), BIT_MASK(nr));
 73}
 74
 75/**
 76 * test_and_set_bit - Set a bit and return its old value
 77 * @nr: Bit to set
 78 * @addr: Address to count from
 79 *
 80 * This operation is atomic and cannot be reordered.
 81 * It also implies a memory barrier.
 82 */
 83static inline int test_and_set_bit(unsigned nr, volatile unsigned long *addr)
 84{
 85	unsigned long mask = BIT_MASK(nr);
 86	addr += BIT_WORD(nr);
 87	smp_mb();  /* barrier for proper semantics */
 88	return (_atomic_or(addr, mask) & mask) != 0;
 89}
 90
 91/**
 92 * test_and_clear_bit - Clear a bit and return its old value
 93 * @nr: Bit to clear
 94 * @addr: Address to count from
 95 *
 96 * This operation is atomic and cannot be reordered.
 97 * It also implies a memory barrier.
 98 */
 99static inline int test_and_clear_bit(unsigned nr, volatile unsigned long *addr)
100{
101	unsigned long mask = BIT_MASK(nr);
102	addr += BIT_WORD(nr);
103	smp_mb();  /* barrier for proper semantics */
104	return (_atomic_andn(addr, mask) & mask) != 0;
105}
106
107/**
108 * test_and_change_bit - Change a bit and return its old value
109 * @nr: Bit to change
110 * @addr: Address to count from
111 *
112 * This operation is atomic and cannot be reordered.
113 * It also implies a memory barrier.
114 */
115static inline int test_and_change_bit(unsigned nr,
116				      volatile unsigned long *addr)
117{
118	unsigned long mask = BIT_MASK(nr);
119	addr += BIT_WORD(nr);
120	smp_mb();  /* barrier for proper semantics */
121	return (_atomic_xor(addr, mask) & mask) != 0;
122}
123
124/* See discussion at smp_mb__before_atomic_dec() in <asm/atomic_32.h>. */
125#define smp_mb__before_clear_bit()	smp_mb()
126#define smp_mb__after_clear_bit()	do {} while (0)
127
128#include <asm-generic/bitops/ext2-atomic.h>
129
130#endif /* _ASM_TILE_BITOPS_32_H */
v3.1
  1/*
  2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
  3 *
  4 *   This program is free software; you can redistribute it and/or
  5 *   modify it under the terms of the GNU General Public License
  6 *   as published by the Free Software Foundation, version 2.
  7 *
  8 *   This program is distributed in the hope that it will be useful, but
  9 *   WITHOUT ANY WARRANTY; without even the implied warranty of
 10 *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
 11 *   NON INFRINGEMENT.  See the GNU General Public License for
 12 *   more details.
 13 */
 14
 15#ifndef _ASM_TILE_BITOPS_32_H
 16#define _ASM_TILE_BITOPS_32_H
 17
 18#include <linux/compiler.h>
 19#include <linux/atomic.h>
 20#include <asm/system.h>
 21
 22/* Tile-specific routines to support <asm/bitops.h>. */
 23unsigned long _atomic_or(volatile unsigned long *p, unsigned long mask);
 24unsigned long _atomic_andn(volatile unsigned long *p, unsigned long mask);
 25unsigned long _atomic_xor(volatile unsigned long *p, unsigned long mask);
 26
 27/**
 28 * set_bit - Atomically set a bit in memory
 29 * @nr: the bit to set
 30 * @addr: the address to start counting from
 31 *
 32 * This function is atomic and may not be reordered.
 33 * See __set_bit() if you do not require the atomic guarantees.
 34 * Note that @nr may be almost arbitrarily large; this function is not
 35 * restricted to acting on a single-word quantity.
 36 */
 37static inline void set_bit(unsigned nr, volatile unsigned long *addr)
 38{
 39	_atomic_or(addr + BIT_WORD(nr), BIT_MASK(nr));
 40}
 41
 42/**
 43 * clear_bit - Clears a bit in memory
 44 * @nr: Bit to clear
 45 * @addr: Address to start counting from
 46 *
 47 * clear_bit() is atomic and may not be reordered.
 48 * See __clear_bit() if you do not require the atomic guarantees.
 49 * Note that @nr may be almost arbitrarily large; this function is not
 50 * restricted to acting on a single-word quantity.
 51 *
 52 * clear_bit() may not contain a memory barrier, so if it is used for
 53 * locking purposes, you should call smp_mb__before_clear_bit() and/or
 54 * smp_mb__after_clear_bit() to ensure changes are visible on other cpus.
 55 */
 56static inline void clear_bit(unsigned nr, volatile unsigned long *addr)
 57{
 58	_atomic_andn(addr + BIT_WORD(nr), BIT_MASK(nr));
 59}
 60
 61/**
 62 * change_bit - Toggle a bit in memory
 63 * @nr: Bit to change
 64 * @addr: Address to start counting from
 65 *
 66 * change_bit() is atomic and may not be reordered.
 67 * See __change_bit() if you do not require the atomic guarantees.
 68 * Note that @nr may be almost arbitrarily large; this function is not
 69 * restricted to acting on a single-word quantity.
 70 */
 71static inline void change_bit(unsigned nr, volatile unsigned long *addr)
 72{
 73	_atomic_xor(addr + BIT_WORD(nr), BIT_MASK(nr));
 74}
 75
 76/**
 77 * test_and_set_bit - Set a bit and return its old value
 78 * @nr: Bit to set
 79 * @addr: Address to count from
 80 *
 81 * This operation is atomic and cannot be reordered.
 82 * It also implies a memory barrier.
 83 */
 84static inline int test_and_set_bit(unsigned nr, volatile unsigned long *addr)
 85{
 86	unsigned long mask = BIT_MASK(nr);
 87	addr += BIT_WORD(nr);
 88	smp_mb();  /* barrier for proper semantics */
 89	return (_atomic_or(addr, mask) & mask) != 0;
 90}
 91
 92/**
 93 * test_and_clear_bit - Clear a bit and return its old value
 94 * @nr: Bit to clear
 95 * @addr: Address to count from
 96 *
 97 * This operation is atomic and cannot be reordered.
 98 * It also implies a memory barrier.
 99 */
100static inline int test_and_clear_bit(unsigned nr, volatile unsigned long *addr)
101{
102	unsigned long mask = BIT_MASK(nr);
103	addr += BIT_WORD(nr);
104	smp_mb();  /* barrier for proper semantics */
105	return (_atomic_andn(addr, mask) & mask) != 0;
106}
107
108/**
109 * test_and_change_bit - Change a bit and return its old value
110 * @nr: Bit to change
111 * @addr: Address to count from
112 *
113 * This operation is atomic and cannot be reordered.
114 * It also implies a memory barrier.
115 */
116static inline int test_and_change_bit(unsigned nr,
117				      volatile unsigned long *addr)
118{
119	unsigned long mask = BIT_MASK(nr);
120	addr += BIT_WORD(nr);
121	smp_mb();  /* barrier for proper semantics */
122	return (_atomic_xor(addr, mask) & mask) != 0;
123}
124
125/* See discussion at smp_mb__before_atomic_dec() in <asm/atomic_32.h>. */
126#define smp_mb__before_clear_bit()	smp_mb()
127#define smp_mb__after_clear_bit()	do {} while (0)
128
129#include <asm-generic/bitops/ext2-atomic.h>
130
131#endif /* _ASM_TILE_BITOPS_32_H */