Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Queued read/write locks
4 *
5 * (C) Copyright 2013-2014 Hewlett-Packard Development Company, L.P.
6 *
7 * Authors: Waiman Long <waiman.long@hp.com>
8 */
9#include <linux/smp.h>
10#include <linux/bug.h>
11#include <linux/cpumask.h>
12#include <linux/percpu.h>
13#include <linux/hardirq.h>
14#include <linux/spinlock.h>
15#include <trace/events/lock.h>
16
17/**
18 * queued_read_lock_slowpath - acquire read lock of a queued rwlock
19 * @lock: Pointer to queued rwlock structure
20 */
21void __lockfunc queued_read_lock_slowpath(struct qrwlock *lock)
22{
23 /*
24 * Readers come here when they cannot get the lock without waiting
25 */
26 if (unlikely(in_interrupt())) {
27 /*
28 * Readers in interrupt context will get the lock immediately
29 * if the writer is just waiting (not holding the lock yet),
30 * so spin with ACQUIRE semantics until the lock is available
31 * without waiting in the queue.
32 */
33 atomic_cond_read_acquire(&lock->cnts, !(VAL & _QW_LOCKED));
34 return;
35 }
36 atomic_sub(_QR_BIAS, &lock->cnts);
37
38 trace_contention_begin(lock, LCB_F_SPIN | LCB_F_READ);
39
40 /*
41 * Put the reader into the wait queue
42 */
43 arch_spin_lock(&lock->wait_lock);
44 atomic_add(_QR_BIAS, &lock->cnts);
45
46 /*
47 * The ACQUIRE semantics of the following spinning code ensure
48 * that accesses can't leak upwards out of our subsequent critical
49 * section in the case that the lock is currently held for write.
50 */
51 atomic_cond_read_acquire(&lock->cnts, !(VAL & _QW_LOCKED));
52
53 /*
54 * Signal the next one in queue to become queue head
55 */
56 arch_spin_unlock(&lock->wait_lock);
57
58 trace_contention_end(lock, 0);
59}
60EXPORT_SYMBOL(queued_read_lock_slowpath);
61
62/**
63 * queued_write_lock_slowpath - acquire write lock of a queued rwlock
64 * @lock : Pointer to queued rwlock structure
65 */
66void __lockfunc queued_write_lock_slowpath(struct qrwlock *lock)
67{
68 int cnts;
69
70 trace_contention_begin(lock, LCB_F_SPIN | LCB_F_WRITE);
71
72 /* Put the writer into the wait queue */
73 arch_spin_lock(&lock->wait_lock);
74
75 /* Try to acquire the lock directly if no reader is present */
76 if (!(cnts = atomic_read(&lock->cnts)) &&
77 atomic_try_cmpxchg_acquire(&lock->cnts, &cnts, _QW_LOCKED))
78 goto unlock;
79
80 /* Set the waiting flag to notify readers that a writer is pending */
81 atomic_or(_QW_WAITING, &lock->cnts);
82
83 /* When no more readers or writers, set the locked flag */
84 do {
85 cnts = atomic_cond_read_relaxed(&lock->cnts, VAL == _QW_WAITING);
86 } while (!atomic_try_cmpxchg_acquire(&lock->cnts, &cnts, _QW_LOCKED));
87unlock:
88 arch_spin_unlock(&lock->wait_lock);
89
90 trace_contention_end(lock, 0);
91}
92EXPORT_SYMBOL(queued_write_lock_slowpath);
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Queued read/write locks
4 *
5 * (C) Copyright 2013-2014 Hewlett-Packard Development Company, L.P.
6 *
7 * Authors: Waiman Long <waiman.long@hp.com>
8 */
9#include <linux/smp.h>
10#include <linux/bug.h>
11#include <linux/cpumask.h>
12#include <linux/percpu.h>
13#include <linux/hardirq.h>
14#include <linux/spinlock.h>
15#include <asm/qrwlock.h>
16
17/**
18 * queued_read_lock_slowpath - acquire read lock of a queue rwlock
19 * @lock: Pointer to queue rwlock structure
20 */
21void queued_read_lock_slowpath(struct qrwlock *lock)
22{
23 /*
24 * Readers come here when they cannot get the lock without waiting
25 */
26 if (unlikely(in_interrupt())) {
27 /*
28 * Readers in interrupt context will get the lock immediately
29 * if the writer is just waiting (not holding the lock yet),
30 * so spin with ACQUIRE semantics until the lock is available
31 * without waiting in the queue.
32 */
33 atomic_cond_read_acquire(&lock->cnts, !(VAL & _QW_LOCKED));
34 return;
35 }
36 atomic_sub(_QR_BIAS, &lock->cnts);
37
38 /*
39 * Put the reader into the wait queue
40 */
41 arch_spin_lock(&lock->wait_lock);
42 atomic_add(_QR_BIAS, &lock->cnts);
43
44 /*
45 * The ACQUIRE semantics of the following spinning code ensure
46 * that accesses can't leak upwards out of our subsequent critical
47 * section in the case that the lock is currently held for write.
48 */
49 atomic_cond_read_acquire(&lock->cnts, !(VAL & _QW_LOCKED));
50
51 /*
52 * Signal the next one in queue to become queue head
53 */
54 arch_spin_unlock(&lock->wait_lock);
55}
56EXPORT_SYMBOL(queued_read_lock_slowpath);
57
58/**
59 * queued_write_lock_slowpath - acquire write lock of a queue rwlock
60 * @lock : Pointer to queue rwlock structure
61 */
62void queued_write_lock_slowpath(struct qrwlock *lock)
63{
64 /* Put the writer into the wait queue */
65 arch_spin_lock(&lock->wait_lock);
66
67 /* Try to acquire the lock directly if no reader is present */
68 if (!atomic_read(&lock->cnts) &&
69 (atomic_cmpxchg_acquire(&lock->cnts, 0, _QW_LOCKED) == 0))
70 goto unlock;
71
72 /* Set the waiting flag to notify readers that a writer is pending */
73 atomic_add(_QW_WAITING, &lock->cnts);
74
75 /* When no more readers or writers, set the locked flag */
76 do {
77 atomic_cond_read_acquire(&lock->cnts, VAL == _QW_WAITING);
78 } while (atomic_cmpxchg_relaxed(&lock->cnts, _QW_WAITING,
79 _QW_LOCKED) != _QW_WAITING);
80unlock:
81 arch_spin_unlock(&lock->wait_lock);
82}
83EXPORT_SYMBOL(queued_write_lock_slowpath);