blob: 8052d34da7826cddb7f298c4814479d23d27b0c7 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Davidlohr Bueso8f95c902017-01-11 07:22:25 -08002#ifndef _LINUX_RCUWAIT_H_
3#define _LINUX_RCUWAIT_H_
4
5#include <linux/rcupdate.h>
Peter Zijlstra (Intel)80fbaf12020-03-21 12:25:55 +01006#include <linux/sched/signal.h>
Davidlohr Bueso8f95c902017-01-11 07:22:25 -08007
8/*
9 * rcuwait provides a way of blocking and waking up a single
Eric W. Biederman154abaf2019-09-14 07:34:30 -050010 * task in an rcu-safe manner.
Davidlohr Bueso8f95c902017-01-11 07:22:25 -080011 *
Eric W. Biederman154abaf2019-09-14 07:34:30 -050012 * The only time @task is non-nil is when a user is blocked (or
13 * checking if it needs to) on a condition, and reset as soon as we
14 * know that the condition has succeeded and are awoken.
Davidlohr Bueso8f95c902017-01-11 07:22:25 -080015 */
16struct rcuwait {
Joel Fernandes (Google)03f4b482019-03-20 20:34:25 -040017 struct task_struct __rcu *task;
Davidlohr Bueso8f95c902017-01-11 07:22:25 -080018};
19
20#define __RCUWAIT_INITIALIZER(name) \
21 { .task = NULL, }
22
23static inline void rcuwait_init(struct rcuwait *w)
24{
25 w->task = NULL;
26}
27
Davidlohr Bueso191a43b2020-04-23 22:48:36 -070028/*
29 * Note: this provides no serialization and, just as with waitqueues,
30 * requires care to estimate as to whether or not the wait is active.
31 */
32static inline int rcuwait_active(struct rcuwait *w)
33{
Paolo Bonzinifebd6682020-05-18 06:30:09 -040034 return !!rcu_access_pointer(w->task);
Davidlohr Bueso191a43b2020-04-23 22:48:36 -070035}
36
Davidlohr Bueso9d9a6eb2020-04-23 22:48:34 -070037extern int rcuwait_wake_up(struct rcuwait *w);
Davidlohr Bueso8f95c902017-01-11 07:22:25 -080038
39/*
40 * The caller is responsible for locking around rcuwait_wait_event(),
Davidlohr Bueso5c21f7b2020-04-23 22:48:35 -070041 * and [prepare_to/finish]_rcuwait() such that writes to @task are
42 * properly serialized.
Davidlohr Bueso8f95c902017-01-11 07:22:25 -080043 */
Davidlohr Bueso5c21f7b2020-04-23 22:48:35 -070044
45static inline void prepare_to_rcuwait(struct rcuwait *w)
46{
47 rcu_assign_pointer(w->task, current);
48}
49
Ingo Molnar58d42922022-01-14 16:07:28 -080050extern void finish_rcuwait(struct rcuwait *w);
Davidlohr Bueso5c21f7b2020-04-23 22:48:35 -070051
Peter Zijlstra (Intel)80fbaf12020-03-21 12:25:55 +010052#define rcuwait_wait_event(w, condition, state) \
Davidlohr Bueso8f95c902017-01-11 07:22:25 -080053({ \
Peter Zijlstra (Intel)80fbaf12020-03-21 12:25:55 +010054 int __ret = 0; \
Davidlohr Bueso5c21f7b2020-04-23 22:48:35 -070055 prepare_to_rcuwait(w); \
Davidlohr Bueso8f95c902017-01-11 07:22:25 -080056 for (;;) { \
57 /* \
58 * Implicit barrier (A) pairs with (B) in \
Davidlohr Bueso7e1f9462017-01-29 07:42:12 -080059 * rcuwait_wake_up(). \
Davidlohr Bueso8f95c902017-01-11 07:22:25 -080060 */ \
Peter Zijlstra (Intel)80fbaf12020-03-21 12:25:55 +010061 set_current_state(state); \
Davidlohr Bueso8f95c902017-01-11 07:22:25 -080062 if (condition) \
63 break; \
64 \
Peter Zijlstra (Intel)80fbaf12020-03-21 12:25:55 +010065 if (signal_pending_state(state, current)) { \
66 __ret = -EINTR; \
67 break; \
68 } \
69 \
Davidlohr Bueso8f95c902017-01-11 07:22:25 -080070 schedule(); \
71 } \
Davidlohr Bueso5c21f7b2020-04-23 22:48:35 -070072 finish_rcuwait(w); \
Peter Zijlstra (Intel)80fbaf12020-03-21 12:25:55 +010073 __ret; \
Davidlohr Bueso8f95c902017-01-11 07:22:25 -080074})
75
76#endif /* _LINUX_RCUWAIT_H_ */