Loading...
Note: File does not exist in v3.1.
1/* SPDX-License-Identifier: GPL-2.0-only */
2
3#ifndef WW_RT
4
5#define MUTEX mutex
6#define MUTEX_WAITER mutex_waiter
7
8static inline struct mutex_waiter *
9__ww_waiter_first(struct mutex *lock)
10{
11 struct mutex_waiter *w;
12
13 w = list_first_entry(&lock->wait_list, struct mutex_waiter, list);
14 if (list_entry_is_head(w, &lock->wait_list, list))
15 return NULL;
16
17 return w;
18}
19
20static inline struct mutex_waiter *
21__ww_waiter_next(struct mutex *lock, struct mutex_waiter *w)
22{
23 w = list_next_entry(w, list);
24 if (list_entry_is_head(w, &lock->wait_list, list))
25 return NULL;
26
27 return w;
28}
29
30static inline struct mutex_waiter *
31__ww_waiter_prev(struct mutex *lock, struct mutex_waiter *w)
32{
33 w = list_prev_entry(w, list);
34 if (list_entry_is_head(w, &lock->wait_list, list))
35 return NULL;
36
37 return w;
38}
39
40static inline struct mutex_waiter *
41__ww_waiter_last(struct mutex *lock)
42{
43 struct mutex_waiter *w;
44
45 w = list_last_entry(&lock->wait_list, struct mutex_waiter, list);
46 if (list_entry_is_head(w, &lock->wait_list, list))
47 return NULL;
48
49 return w;
50}
51
52static inline void
53__ww_waiter_add(struct mutex *lock, struct mutex_waiter *waiter, struct mutex_waiter *pos)
54{
55 struct list_head *p = &lock->wait_list;
56 if (pos)
57 p = &pos->list;
58 __mutex_add_waiter(lock, waiter, p);
59}
60
61static inline struct task_struct *
62__ww_mutex_owner(struct mutex *lock)
63{
64 return __mutex_owner(lock);
65}
66
67static inline bool
68__ww_mutex_has_waiters(struct mutex *lock)
69{
70 return atomic_long_read(&lock->owner) & MUTEX_FLAG_WAITERS;
71}
72
73static inline void lock_wait_lock(struct mutex *lock, unsigned long *flags)
74{
75 raw_spin_lock_irqsave(&lock->wait_lock, *flags);
76}
77
78static inline void unlock_wait_lock(struct mutex *lock, unsigned long *flags)
79{
80 raw_spin_unlock_irqrestore(&lock->wait_lock, *flags);
81}
82
83static inline void lockdep_assert_wait_lock_held(struct mutex *lock)
84{
85 lockdep_assert_held(&lock->wait_lock);
86}
87
88#else /* WW_RT */
89
90#define MUTEX rt_mutex
91#define MUTEX_WAITER rt_mutex_waiter
92
93static inline struct rt_mutex_waiter *
94__ww_waiter_first(struct rt_mutex *lock)
95{
96 struct rb_node *n = rb_first(&lock->rtmutex.waiters.rb_root);
97 if (!n)
98 return NULL;
99 return rb_entry(n, struct rt_mutex_waiter, tree.entry);
100}
101
102static inline struct rt_mutex_waiter *
103__ww_waiter_next(struct rt_mutex *lock, struct rt_mutex_waiter *w)
104{
105 struct rb_node *n = rb_next(&w->tree.entry);
106 if (!n)
107 return NULL;
108 return rb_entry(n, struct rt_mutex_waiter, tree.entry);
109}
110
111static inline struct rt_mutex_waiter *
112__ww_waiter_prev(struct rt_mutex *lock, struct rt_mutex_waiter *w)
113{
114 struct rb_node *n = rb_prev(&w->tree.entry);
115 if (!n)
116 return NULL;
117 return rb_entry(n, struct rt_mutex_waiter, tree.entry);
118}
119
120static inline struct rt_mutex_waiter *
121__ww_waiter_last(struct rt_mutex *lock)
122{
123 struct rb_node *n = rb_last(&lock->rtmutex.waiters.rb_root);
124 if (!n)
125 return NULL;
126 return rb_entry(n, struct rt_mutex_waiter, tree.entry);
127}
128
129static inline void
130__ww_waiter_add(struct rt_mutex *lock, struct rt_mutex_waiter *waiter, struct rt_mutex_waiter *pos)
131{
132 /* RT unconditionally adds the waiter first and then removes it on error */
133}
134
135static inline struct task_struct *
136__ww_mutex_owner(struct rt_mutex *lock)
137{
138 return rt_mutex_owner(&lock->rtmutex);
139}
140
141static inline bool
142__ww_mutex_has_waiters(struct rt_mutex *lock)
143{
144 return rt_mutex_has_waiters(&lock->rtmutex);
145}
146
147static inline void lock_wait_lock(struct rt_mutex *lock, unsigned long *flags)
148{
149 raw_spin_lock_irqsave(&lock->rtmutex.wait_lock, *flags);
150}
151
152static inline void unlock_wait_lock(struct rt_mutex *lock, unsigned long *flags)
153{
154 raw_spin_unlock_irqrestore(&lock->rtmutex.wait_lock, *flags);
155}
156
157static inline void lockdep_assert_wait_lock_held(struct rt_mutex *lock)
158{
159 lockdep_assert_held(&lock->rtmutex.wait_lock);
160}
161
162#endif /* WW_RT */
163
164/*
165 * Wait-Die:
166 * The newer transactions are killed when:
167 * It (the new transaction) makes a request for a lock being held
168 * by an older transaction.
169 *
170 * Wound-Wait:
171 * The newer transactions are wounded when:
172 * An older transaction makes a request for a lock being held by
173 * the newer transaction.
174 */
175
176/*
177 * Associate the ww_mutex @ww with the context @ww_ctx under which we acquired
178 * it.
179 */
180static __always_inline void
181ww_mutex_lock_acquired(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx)
182{
183#ifdef DEBUG_WW_MUTEXES
184 /*
185 * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
186 * but released with a normal mutex_unlock in this call.
187 *
188 * This should never happen, always use ww_mutex_unlock.
189 */
190 DEBUG_LOCKS_WARN_ON(ww->ctx);
191
192 /*
193 * Not quite done after calling ww_acquire_done() ?
194 */
195 DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
196
197 if (ww_ctx->contending_lock) {
198 /*
199 * After -EDEADLK you tried to
200 * acquire a different ww_mutex? Bad!
201 */
202 DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
203
204 /*
205 * You called ww_mutex_lock after receiving -EDEADLK,
206 * but 'forgot' to unlock everything else first?
207 */
208 DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
209 ww_ctx->contending_lock = NULL;
210 }
211
212 /*
213 * Naughty, using a different class will lead to undefined behavior!
214 */
215 DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
216#endif
217 ww_ctx->acquired++;
218 ww->ctx = ww_ctx;
219}
220
221/*
222 * Determine if @a is 'less' than @b. IOW, either @a is a lower priority task
223 * or, when of equal priority, a younger transaction than @b.
224 *
225 * Depending on the algorithm, @a will either need to wait for @b, or die.
226 */
227static inline bool
228__ww_ctx_less(struct ww_acquire_ctx *a, struct ww_acquire_ctx *b)
229{
230/*
231 * Can only do the RT prio for WW_RT, because task->prio isn't stable due to PI,
232 * so the wait_list ordering will go wobbly. rt_mutex re-queues the waiter and
233 * isn't affected by this.
234 */
235#ifdef WW_RT
236 /* kernel prio; less is more */
237 int a_prio = a->task->prio;
238 int b_prio = b->task->prio;
239
240 if (rt_or_dl_prio(a_prio) || rt_or_dl_prio(b_prio)) {
241
242 if (a_prio > b_prio)
243 return true;
244
245 if (a_prio < b_prio)
246 return false;
247
248 /* equal static prio */
249
250 if (dl_prio(a_prio)) {
251 if (dl_time_before(b->task->dl.deadline,
252 a->task->dl.deadline))
253 return true;
254
255 if (dl_time_before(a->task->dl.deadline,
256 b->task->dl.deadline))
257 return false;
258 }
259
260 /* equal prio */
261 }
262#endif
263
264 /* FIFO order tie break -- bigger is younger */
265 return (signed long)(a->stamp - b->stamp) > 0;
266}
267
268/*
269 * Wait-Die; wake a lesser waiter context (when locks held) such that it can
270 * die.
271 *
272 * Among waiters with context, only the first one can have other locks acquired
273 * already (ctx->acquired > 0), because __ww_mutex_add_waiter() and
274 * __ww_mutex_check_kill() wake any but the earliest context.
275 */
276static bool
277__ww_mutex_die(struct MUTEX *lock, struct MUTEX_WAITER *waiter,
278 struct ww_acquire_ctx *ww_ctx, struct wake_q_head *wake_q)
279{
280 if (!ww_ctx->is_wait_die)
281 return false;
282
283 if (waiter->ww_ctx->acquired > 0 && __ww_ctx_less(waiter->ww_ctx, ww_ctx)) {
284#ifndef WW_RT
285 debug_mutex_wake_waiter(lock, waiter);
286#endif
287 wake_q_add(wake_q, waiter->task);
288 }
289
290 return true;
291}
292
293/*
294 * Wound-Wait; wound a lesser @hold_ctx if it holds the lock.
295 *
296 * Wound the lock holder if there are waiters with more important transactions
297 * than the lock holders. Even if multiple waiters may wound the lock holder,
298 * it's sufficient that only one does.
299 */
300static bool __ww_mutex_wound(struct MUTEX *lock,
301 struct ww_acquire_ctx *ww_ctx,
302 struct ww_acquire_ctx *hold_ctx,
303 struct wake_q_head *wake_q)
304{
305 struct task_struct *owner = __ww_mutex_owner(lock);
306
307 lockdep_assert_wait_lock_held(lock);
308
309 /*
310 * Possible through __ww_mutex_add_waiter() when we race with
311 * ww_mutex_set_context_fastpath(). In that case we'll get here again
312 * through __ww_mutex_check_waiters().
313 */
314 if (!hold_ctx)
315 return false;
316
317 /*
318 * Can have !owner because of __mutex_unlock_slowpath(), but if owner,
319 * it cannot go away because we'll have FLAG_WAITERS set and hold
320 * wait_lock.
321 */
322 if (!owner)
323 return false;
324
325 if (ww_ctx->acquired > 0 && __ww_ctx_less(hold_ctx, ww_ctx)) {
326 hold_ctx->wounded = 1;
327
328 /*
329 * wake_up_process() paired with set_current_state()
330 * inserts sufficient barriers to make sure @owner either sees
331 * it's wounded in __ww_mutex_check_kill() or has a
332 * wakeup pending to re-read the wounded state.
333 */
334 if (owner != current)
335 wake_q_add(wake_q, owner);
336
337 return true;
338 }
339
340 return false;
341}
342
343/*
344 * We just acquired @lock under @ww_ctx, if there are more important contexts
345 * waiting behind us on the wait-list, check if they need to die, or wound us.
346 *
347 * See __ww_mutex_add_waiter() for the list-order construction; basically the
348 * list is ordered by stamp, smallest (oldest) first.
349 *
350 * This relies on never mixing wait-die/wound-wait on the same wait-list;
351 * which is currently ensured by that being a ww_class property.
352 *
353 * The current task must not be on the wait list.
354 */
355static void
356__ww_mutex_check_waiters(struct MUTEX *lock, struct ww_acquire_ctx *ww_ctx,
357 struct wake_q_head *wake_q)
358{
359 struct MUTEX_WAITER *cur;
360
361 lockdep_assert_wait_lock_held(lock);
362
363 for (cur = __ww_waiter_first(lock); cur;
364 cur = __ww_waiter_next(lock, cur)) {
365
366 if (!cur->ww_ctx)
367 continue;
368
369 if (__ww_mutex_die(lock, cur, ww_ctx, wake_q) ||
370 __ww_mutex_wound(lock, cur->ww_ctx, ww_ctx, wake_q))
371 break;
372 }
373}
374
375/*
376 * After acquiring lock with fastpath, where we do not hold wait_lock, set ctx
377 * and wake up any waiters so they can recheck.
378 */
379static __always_inline void
380ww_mutex_set_context_fastpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
381{
382 DEFINE_WAKE_Q(wake_q);
383 unsigned long flags;
384
385 ww_mutex_lock_acquired(lock, ctx);
386
387 /*
388 * The lock->ctx update should be visible on all cores before
389 * the WAITERS check is done, otherwise contended waiters might be
390 * missed. The contended waiters will either see ww_ctx == NULL
391 * and keep spinning, or it will acquire wait_lock, add itself
392 * to waiter list and sleep.
393 */
394 smp_mb(); /* See comments above and below. */
395
396 /*
397 * [W] ww->ctx = ctx [W] MUTEX_FLAG_WAITERS
398 * MB MB
399 * [R] MUTEX_FLAG_WAITERS [R] ww->ctx
400 *
401 * The memory barrier above pairs with the memory barrier in
402 * __ww_mutex_add_waiter() and makes sure we either observe ww->ctx
403 * and/or !empty list.
404 */
405 if (likely(!__ww_mutex_has_waiters(&lock->base)))
406 return;
407
408 /*
409 * Uh oh, we raced in fastpath, check if any of the waiters need to
410 * die or wound us.
411 */
412 lock_wait_lock(&lock->base, &flags);
413 __ww_mutex_check_waiters(&lock->base, ctx, &wake_q);
414 preempt_disable();
415 unlock_wait_lock(&lock->base, &flags);
416 wake_up_q(&wake_q);
417 preempt_enable();
418}
419
420static __always_inline int
421__ww_mutex_kill(struct MUTEX *lock, struct ww_acquire_ctx *ww_ctx)
422{
423 if (ww_ctx->acquired > 0) {
424#ifdef DEBUG_WW_MUTEXES
425 struct ww_mutex *ww;
426
427 ww = container_of(lock, struct ww_mutex, base);
428 DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock);
429 ww_ctx->contending_lock = ww;
430#endif
431 return -EDEADLK;
432 }
433
434 return 0;
435}
436
437/*
438 * Check the wound condition for the current lock acquire.
439 *
440 * Wound-Wait: If we're wounded, kill ourself.
441 *
442 * Wait-Die: If we're trying to acquire a lock already held by an older
443 * context, kill ourselves.
444 *
445 * Since __ww_mutex_add_waiter() orders the wait-list on stamp, we only have to
446 * look at waiters before us in the wait-list.
447 */
448static inline int
449__ww_mutex_check_kill(struct MUTEX *lock, struct MUTEX_WAITER *waiter,
450 struct ww_acquire_ctx *ctx)
451{
452 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
453 struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx);
454 struct MUTEX_WAITER *cur;
455
456 if (ctx->acquired == 0)
457 return 0;
458
459 if (!ctx->is_wait_die) {
460 if (ctx->wounded)
461 return __ww_mutex_kill(lock, ctx);
462
463 return 0;
464 }
465
466 if (hold_ctx && __ww_ctx_less(ctx, hold_ctx))
467 return __ww_mutex_kill(lock, ctx);
468
469 /*
470 * If there is a waiter in front of us that has a context, then its
471 * stamp is earlier than ours and we must kill ourself.
472 */
473 for (cur = __ww_waiter_prev(lock, waiter); cur;
474 cur = __ww_waiter_prev(lock, cur)) {
475
476 if (!cur->ww_ctx)
477 continue;
478
479 return __ww_mutex_kill(lock, ctx);
480 }
481
482 return 0;
483}
484
485/*
486 * Add @waiter to the wait-list, keep the wait-list ordered by stamp, smallest
487 * first. Such that older contexts are preferred to acquire the lock over
488 * younger contexts.
489 *
490 * Waiters without context are interspersed in FIFO order.
491 *
492 * Furthermore, for Wait-Die kill ourself immediately when possible (there are
493 * older contexts already waiting) to avoid unnecessary waiting and for
494 * Wound-Wait ensure we wound the owning context when it is younger.
495 */
496static inline int
497__ww_mutex_add_waiter(struct MUTEX_WAITER *waiter,
498 struct MUTEX *lock,
499 struct ww_acquire_ctx *ww_ctx,
500 struct wake_q_head *wake_q)
501{
502 struct MUTEX_WAITER *cur, *pos = NULL;
503 bool is_wait_die;
504
505 if (!ww_ctx) {
506 __ww_waiter_add(lock, waiter, NULL);
507 return 0;
508 }
509
510 is_wait_die = ww_ctx->is_wait_die;
511
512 /*
513 * Add the waiter before the first waiter with a higher stamp.
514 * Waiters without a context are skipped to avoid starving
515 * them. Wait-Die waiters may die here. Wound-Wait waiters
516 * never die here, but they are sorted in stamp order and
517 * may wound the lock holder.
518 */
519 for (cur = __ww_waiter_last(lock); cur;
520 cur = __ww_waiter_prev(lock, cur)) {
521
522 if (!cur->ww_ctx)
523 continue;
524
525 if (__ww_ctx_less(ww_ctx, cur->ww_ctx)) {
526 /*
527 * Wait-Die: if we find an older context waiting, there
528 * is no point in queueing behind it, as we'd have to
529 * die the moment it would acquire the lock.
530 */
531 if (is_wait_die) {
532 int ret = __ww_mutex_kill(lock, ww_ctx);
533
534 if (ret)
535 return ret;
536 }
537
538 break;
539 }
540
541 pos = cur;
542
543 /* Wait-Die: ensure younger waiters die. */
544 __ww_mutex_die(lock, cur, ww_ctx, wake_q);
545 }
546
547 __ww_waiter_add(lock, waiter, pos);
548
549 /*
550 * Wound-Wait: if we're blocking on a mutex owned by a younger context,
551 * wound that such that we might proceed.
552 */
553 if (!is_wait_die) {
554 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
555
556 /*
557 * See ww_mutex_set_context_fastpath(). Orders setting
558 * MUTEX_FLAG_WAITERS vs the ww->ctx load,
559 * such that either we or the fastpath will wound @ww->ctx.
560 */
561 smp_mb();
562 __ww_mutex_wound(lock, ww_ctx, ww->ctx, wake_q);
563 }
564
565 return 0;
566}
567
568static inline void __ww_mutex_unlock(struct ww_mutex *lock)
569{
570 if (lock->ctx) {
571#ifdef DEBUG_WW_MUTEXES
572 DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
573#endif
574 if (lock->ctx->acquired > 0)
575 lock->ctx->acquired--;
576 lock->ctx = NULL;
577 }
578}