Loading...
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/slab.h>
3#include <linux/spinlock.h>
4#include <linux/once.h>
5#include <linux/random.h>
6
7struct once_work {
8 struct work_struct work;
9 struct static_key_true *key;
10};
11
12static void once_deferred(struct work_struct *w)
13{
14 struct once_work *work;
15
16 work = container_of(w, struct once_work, work);
17 BUG_ON(!static_key_enabled(work->key));
18 static_branch_disable(work->key);
19 kfree(work);
20}
21
22static void once_disable_jump(struct static_key_true *key)
23{
24 struct once_work *w;
25
26 w = kmalloc(sizeof(*w), GFP_ATOMIC);
27 if (!w)
28 return;
29
30 INIT_WORK(&w->work, once_deferred);
31 w->key = key;
32 schedule_work(&w->work);
33}
34
35static DEFINE_SPINLOCK(once_lock);
36
37bool __do_once_start(bool *done, unsigned long *flags)
38 __acquires(once_lock)
39{
40 spin_lock_irqsave(&once_lock, *flags);
41 if (*done) {
42 spin_unlock_irqrestore(&once_lock, *flags);
43 /* Keep sparse happy by restoring an even lock count on
44 * this lock. In case we return here, we don't call into
45 * __do_once_done but return early in the DO_ONCE() macro.
46 */
47 __acquire(once_lock);
48 return false;
49 }
50
51 return true;
52}
53EXPORT_SYMBOL(__do_once_start);
54
55void __do_once_done(bool *done, struct static_key_true *once_key,
56 unsigned long *flags)
57 __releases(once_lock)
58{
59 *done = true;
60 spin_unlock_irqrestore(&once_lock, *flags);
61 once_disable_jump(once_key);
62}
63EXPORT_SYMBOL(__do_once_done);
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/slab.h>
3#include <linux/spinlock.h>
4#include <linux/once.h>
5#include <linux/random.h>
6#include <linux/module.h>
7
8struct once_work {
9 struct work_struct work;
10 struct static_key_true *key;
11 struct module *module;
12};
13
14static void once_deferred(struct work_struct *w)
15{
16 struct once_work *work;
17
18 work = container_of(w, struct once_work, work);
19 BUG_ON(!static_key_enabled(work->key));
20 static_branch_disable(work->key);
21 module_put(work->module);
22 kfree(work);
23}
24
25static void once_disable_jump(struct static_key_true *key, struct module *mod)
26{
27 struct once_work *w;
28
29 w = kmalloc(sizeof(*w), GFP_ATOMIC);
30 if (!w)
31 return;
32
33 INIT_WORK(&w->work, once_deferred);
34 w->key = key;
35 w->module = mod;
36 __module_get(mod);
37 schedule_work(&w->work);
38}
39
40static DEFINE_SPINLOCK(once_lock);
41
42bool __do_once_start(bool *done, unsigned long *flags)
43 __acquires(once_lock)
44{
45 spin_lock_irqsave(&once_lock, *flags);
46 if (*done) {
47 spin_unlock_irqrestore(&once_lock, *flags);
48 /* Keep sparse happy by restoring an even lock count on
49 * this lock. In case we return here, we don't call into
50 * __do_once_done but return early in the DO_ONCE() macro.
51 */
52 __acquire(once_lock);
53 return false;
54 }
55
56 return true;
57}
58EXPORT_SYMBOL(__do_once_start);
59
60void __do_once_done(bool *done, struct static_key_true *once_key,
61 unsigned long *flags, struct module *mod)
62 __releases(once_lock)
63{
64 *done = true;
65 spin_unlock_irqrestore(&once_lock, *flags);
66 once_disable_jump(once_key, mod);
67}
68EXPORT_SYMBOL(__do_once_done);
69
70static DEFINE_MUTEX(once_mutex);
71
72bool __do_once_sleepable_start(bool *done)
73 __acquires(once_mutex)
74{
75 mutex_lock(&once_mutex);
76 if (*done) {
77 mutex_unlock(&once_mutex);
78 /* Keep sparse happy by restoring an even lock count on
79 * this mutex. In case we return here, we don't call into
80 * __do_once_done but return early in the DO_ONCE_SLEEPABLE() macro.
81 */
82 __acquire(once_mutex);
83 return false;
84 }
85
86 return true;
87}
88EXPORT_SYMBOL(__do_once_sleepable_start);
89
90void __do_once_sleepable_done(bool *done, struct static_key_true *once_key,
91 struct module *mod)
92 __releases(once_mutex)
93{
94 *done = true;
95 mutex_unlock(&once_mutex);
96 once_disable_jump(once_key, mod);
97}
98EXPORT_SYMBOL(__do_once_sleepable_done);