Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright IBM Corp. 1999, 2009
4 *
5 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
6 */
7
8#ifndef __ASM_BARRIER_H
9#define __ASM_BARRIER_H
10
11/*
12 * Force strict CPU ordering.
13 * And yes, this is required on UP too when we're talking
14 * to devices.
15 */
16
17#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
18/* Fast-BCR without checkpoint synchronization */
19#define __ASM_BCR_SERIALIZE "bcr 14,0\n"
20#else
21#define __ASM_BCR_SERIALIZE "bcr 15,0\n"
22#endif
23
24static __always_inline void bcr_serialize(void)
25{
26 asm volatile(__ASM_BCR_SERIALIZE : : : "memory");
27}
28
29#define __mb() bcr_serialize()
30#define __rmb() barrier()
31#define __wmb() barrier()
32#define __dma_rmb() __mb()
33#define __dma_wmb() __mb()
34#define __smp_mb() __mb()
35#define __smp_rmb() __rmb()
36#define __smp_wmb() __wmb()
37
38#define __smp_store_release(p, v) \
39do { \
40 compiletime_assert_atomic_type(*p); \
41 barrier(); \
42 WRITE_ONCE(*p, v); \
43} while (0)
44
45#define __smp_load_acquire(p) \
46({ \
47 typeof(*p) ___p1 = READ_ONCE(*p); \
48 compiletime_assert_atomic_type(*p); \
49 barrier(); \
50 ___p1; \
51})
52
53#define __smp_mb__before_atomic() barrier()
54#define __smp_mb__after_atomic() barrier()
55
56/**
57 * array_index_mask_nospec - generate a mask for array_idx() that is
58 * ~0UL when the bounds check succeeds and 0 otherwise
59 * @index: array element index
60 * @size: number of elements in array
61 */
62#define array_index_mask_nospec array_index_mask_nospec
63static inline unsigned long array_index_mask_nospec(unsigned long index,
64 unsigned long size)
65{
66 unsigned long mask;
67
68 if (__builtin_constant_p(size) && size > 0) {
69 asm(" clgr %2,%1\n"
70 " slbgr %0,%0\n"
71 :"=d" (mask) : "d" (size-1), "d" (index) :"cc");
72 return mask;
73 }
74 asm(" clgr %1,%2\n"
75 " slbgr %0,%0\n"
76 :"=d" (mask) : "d" (size), "d" (index) :"cc");
77 return ~mask;
78}
79
80#include <asm-generic/barrier.h>
81
82#endif /* __ASM_BARRIER_H */
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright IBM Corp. 1999, 2009
4 *
5 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
6 */
7
8#ifndef __ASM_BARRIER_H
9#define __ASM_BARRIER_H
10
11#include <asm/march.h>
12
13/*
14 * Force strict CPU ordering.
15 * And yes, this is required on UP too when we're talking
16 * to devices.
17 */
18
19#ifdef MARCH_HAS_Z196_FEATURES
20/* Fast-BCR without checkpoint synchronization */
21#define __ASM_BCR_SERIALIZE "bcr 14,0\n"
22#else
23#define __ASM_BCR_SERIALIZE "bcr 15,0\n"
24#endif
25
26static __always_inline void bcr_serialize(void)
27{
28 asm volatile(__ASM_BCR_SERIALIZE : : : "memory");
29}
30
31#define __mb() bcr_serialize()
32#define __rmb() barrier()
33#define __wmb() barrier()
34#define __dma_rmb() __mb()
35#define __dma_wmb() __mb()
36#define __smp_mb() __mb()
37#define __smp_rmb() __rmb()
38#define __smp_wmb() __wmb()
39
40#define __smp_store_release(p, v) \
41do { \
42 compiletime_assert_atomic_type(*p); \
43 barrier(); \
44 WRITE_ONCE(*p, v); \
45} while (0)
46
47#define __smp_load_acquire(p) \
48({ \
49 typeof(*p) ___p1 = READ_ONCE(*p); \
50 compiletime_assert_atomic_type(*p); \
51 barrier(); \
52 ___p1; \
53})
54
55#define __smp_mb__before_atomic() barrier()
56#define __smp_mb__after_atomic() barrier()
57
58/**
59 * array_index_mask_nospec - generate a mask for array_idx() that is
60 * ~0UL when the bounds check succeeds and 0 otherwise
61 * @index: array element index
62 * @size: number of elements in array
63 */
64#define array_index_mask_nospec array_index_mask_nospec
65static inline unsigned long array_index_mask_nospec(unsigned long index,
66 unsigned long size)
67{
68 unsigned long mask;
69
70 if (__builtin_constant_p(size) && size > 0) {
71 asm(" clgr %2,%1\n"
72 " slbgr %0,%0\n"
73 :"=d" (mask) : "d" (size-1), "d" (index) :"cc");
74 return mask;
75 }
76 asm(" clgr %1,%2\n"
77 " slbgr %0,%0\n"
78 :"=d" (mask) : "d" (size), "d" (index) :"cc");
79 return ~mask;
80}
81
82#include <asm-generic/barrier.h>
83
84#endif /* __ASM_BARRIER_H */