Loading...
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * Queued spinlock
4 *
5 * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
6 *
7 * Authors: Waiman Long <waiman.long@hp.com>
8 */
9#ifndef __ASM_GENERIC_QSPINLOCK_TYPES_H
10#define __ASM_GENERIC_QSPINLOCK_TYPES_H
11
12/*
13 * Including atomic.h with PARAVIRT on will cause compilation errors because
14 * of recursive header file incluson via paravirt_types.h. So don't include
15 * it if PARAVIRT is on.
16 */
17#ifndef CONFIG_PARAVIRT
18#include <linux/types.h>
19#include <linux/atomic.h>
20#endif
21
22typedef struct qspinlock {
23 union {
24 atomic_t val;
25
26 /*
27 * By using the whole 2nd least significant byte for the
28 * pending bit, we can allow better optimization of the lock
29 * acquisition for the pending bit holder.
30 */
31#ifdef __LITTLE_ENDIAN
32 struct {
33 u8 locked;
34 u8 pending;
35 };
36 struct {
37 u16 locked_pending;
38 u16 tail;
39 };
40#else
41 struct {
42 u16 tail;
43 u16 locked_pending;
44 };
45 struct {
46 u8 reserved[2];
47 u8 pending;
48 u8 locked;
49 };
50#endif
51 };
52} arch_spinlock_t;
53
54/*
55 * Initializier
56 */
57#define __ARCH_SPIN_LOCK_UNLOCKED { { .val = ATOMIC_INIT(0) } }
58
59/*
60 * Bitfields in the atomic value:
61 *
62 * When NR_CPUS < 16K
63 * 0- 7: locked byte
64 * 8: pending
65 * 9-15: not used
66 * 16-17: tail index
67 * 18-31: tail cpu (+1)
68 *
69 * When NR_CPUS >= 16K
70 * 0- 7: locked byte
71 * 8: pending
72 * 9-10: tail index
73 * 11-31: tail cpu (+1)
74 */
75#define _Q_SET_MASK(type) (((1U << _Q_ ## type ## _BITS) - 1)\
76 << _Q_ ## type ## _OFFSET)
77#define _Q_LOCKED_OFFSET 0
78#define _Q_LOCKED_BITS 8
79#define _Q_LOCKED_MASK _Q_SET_MASK(LOCKED)
80
81#define _Q_PENDING_OFFSET (_Q_LOCKED_OFFSET + _Q_LOCKED_BITS)
82#if CONFIG_NR_CPUS < (1U << 14)
83#define _Q_PENDING_BITS 8
84#else
85#define _Q_PENDING_BITS 1
86#endif
87#define _Q_PENDING_MASK _Q_SET_MASK(PENDING)
88
89#define _Q_TAIL_IDX_OFFSET (_Q_PENDING_OFFSET + _Q_PENDING_BITS)
90#define _Q_TAIL_IDX_BITS 2
91#define _Q_TAIL_IDX_MASK _Q_SET_MASK(TAIL_IDX)
92
93#define _Q_TAIL_CPU_OFFSET (_Q_TAIL_IDX_OFFSET + _Q_TAIL_IDX_BITS)
94#define _Q_TAIL_CPU_BITS (32 - _Q_TAIL_CPU_OFFSET)
95#define _Q_TAIL_CPU_MASK _Q_SET_MASK(TAIL_CPU)
96
97#define _Q_TAIL_OFFSET _Q_TAIL_IDX_OFFSET
98#define _Q_TAIL_MASK (_Q_TAIL_IDX_MASK | _Q_TAIL_CPU_MASK)
99
100#define _Q_LOCKED_VAL (1U << _Q_LOCKED_OFFSET)
101#define _Q_PENDING_VAL (1U << _Q_PENDING_OFFSET)
102
103#endif /* __ASM_GENERIC_QSPINLOCK_TYPES_H */
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * Queued spinlock
4 *
5 * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
6 *
7 * Authors: Waiman Long <waiman.long@hp.com>
8 */
9#ifndef __ASM_GENERIC_QSPINLOCK_TYPES_H
10#define __ASM_GENERIC_QSPINLOCK_TYPES_H
11
12#include <linux/types.h>
13
14typedef struct qspinlock {
15 union {
16 atomic_t val;
17
18 /*
19 * By using the whole 2nd least significant byte for the
20 * pending bit, we can allow better optimization of the lock
21 * acquisition for the pending bit holder.
22 */
23#ifdef __LITTLE_ENDIAN
24 struct {
25 u8 locked;
26 u8 pending;
27 };
28 struct {
29 u16 locked_pending;
30 u16 tail;
31 };
32#else
33 struct {
34 u16 tail;
35 u16 locked_pending;
36 };
37 struct {
38 u8 reserved[2];
39 u8 pending;
40 u8 locked;
41 };
42#endif
43 };
44} arch_spinlock_t;
45
46/*
47 * Initializier
48 */
49#define __ARCH_SPIN_LOCK_UNLOCKED { { .val = ATOMIC_INIT(0) } }
50
51/*
52 * Bitfields in the atomic value:
53 *
54 * When NR_CPUS < 16K
55 * 0- 7: locked byte
56 * 8: pending
57 * 9-15: not used
58 * 16-17: tail index
59 * 18-31: tail cpu (+1)
60 *
61 * When NR_CPUS >= 16K
62 * 0- 7: locked byte
63 * 8: pending
64 * 9-10: tail index
65 * 11-31: tail cpu (+1)
66 */
67#define _Q_SET_MASK(type) (((1U << _Q_ ## type ## _BITS) - 1)\
68 << _Q_ ## type ## _OFFSET)
69#define _Q_LOCKED_OFFSET 0
70#define _Q_LOCKED_BITS 8
71#define _Q_LOCKED_MASK _Q_SET_MASK(LOCKED)
72
73#define _Q_PENDING_OFFSET (_Q_LOCKED_OFFSET + _Q_LOCKED_BITS)
74#if CONFIG_NR_CPUS < (1U << 14)
75#define _Q_PENDING_BITS 8
76#else
77#define _Q_PENDING_BITS 1
78#endif
79#define _Q_PENDING_MASK _Q_SET_MASK(PENDING)
80
81#define _Q_TAIL_IDX_OFFSET (_Q_PENDING_OFFSET + _Q_PENDING_BITS)
82#define _Q_TAIL_IDX_BITS 2
83#define _Q_TAIL_IDX_MASK _Q_SET_MASK(TAIL_IDX)
84
85#define _Q_TAIL_CPU_OFFSET (_Q_TAIL_IDX_OFFSET + _Q_TAIL_IDX_BITS)
86#define _Q_TAIL_CPU_BITS (32 - _Q_TAIL_CPU_OFFSET)
87#define _Q_TAIL_CPU_MASK _Q_SET_MASK(TAIL_CPU)
88
89#define _Q_TAIL_OFFSET _Q_TAIL_IDX_OFFSET
90#define _Q_TAIL_MASK (_Q_TAIL_IDX_MASK | _Q_TAIL_CPU_MASK)
91
92#define _Q_LOCKED_VAL (1U << _Q_LOCKED_OFFSET)
93#define _Q_PENDING_VAL (1U << _Q_PENDING_OFFSET)
94
95#endif /* __ASM_GENERIC_QSPINLOCK_TYPES_H */