Thomas Gleixner | 142781e | 2020-07-22 23:59:56 +0200 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | #ifndef __LINUX_ENTRYCOMMON_H |
| 3 | #define __LINUX_ENTRYCOMMON_H |
| 4 | |
Peter Zijlstra (Intel) | 40607ee | 2021-01-18 15:12:22 +0100 | [diff] [blame] | 5 | #include <linux/static_call_types.h> |
Eric W. Biederman | 153474b | 2022-01-27 11:46:37 -0600 | [diff] [blame] | 6 | #include <linux/ptrace.h> |
Thomas Gleixner | 142781e | 2020-07-22 23:59:56 +0200 | [diff] [blame] | 7 | #include <linux/syscalls.h> |
| 8 | #include <linux/seccomp.h> |
| 9 | #include <linux/sched.h> |
| 10 | |
| 11 | #include <asm/entry-common.h> |
| 12 | |
| 13 | /* |
| 14 | * Define dummy _TIF work flags if not defined by the architecture or for |
| 15 | * disabled functionality. |
| 16 | */ |
Thomas Gleixner | a9f3a74 | 2020-07-22 23:59:57 +0200 | [diff] [blame] | 17 | #ifndef _TIF_PATCH_PENDING |
| 18 | # define _TIF_PATCH_PENDING (0) |
| 19 | #endif |
| 20 | |
| 21 | #ifndef _TIF_UPROBE |
| 22 | # define _TIF_UPROBE (0) |
| 23 | #endif |
| 24 | |
Thomas Gleixner | 142781e | 2020-07-22 23:59:56 +0200 | [diff] [blame] | 25 | /* |
Gabriel Krisman Bertazi | 2991552 | 2020-11-16 12:42:05 -0500 | [diff] [blame] | 26 | * SYSCALL_WORK flags handled in syscall_enter_from_user_mode() |
Thomas Gleixner | 142781e | 2020-07-22 23:59:56 +0200 | [diff] [blame] | 27 | */ |
Gabriel Krisman Bertazi | 2991552 | 2020-11-16 12:42:05 -0500 | [diff] [blame] | 28 | #ifndef ARCH_SYSCALL_WORK_ENTER |
| 29 | # define ARCH_SYSCALL_WORK_ENTER (0) |
Thomas Gleixner | 142781e | 2020-07-22 23:59:56 +0200 | [diff] [blame] | 30 | #endif |
| 31 | |
Thomas Gleixner | a9f3a74 | 2020-07-22 23:59:57 +0200 | [diff] [blame] | 32 | /* |
Gabriel Krisman Bertazi | 2991552 | 2020-11-16 12:42:05 -0500 | [diff] [blame] | 33 | * SYSCALL_WORK flags handled in syscall_exit_to_user_mode() |
Thomas Gleixner | a9f3a74 | 2020-07-22 23:59:57 +0200 | [diff] [blame] | 34 | */ |
Gabriel Krisman Bertazi | 2991552 | 2020-11-16 12:42:05 -0500 | [diff] [blame] | 35 | #ifndef ARCH_SYSCALL_WORK_EXIT |
| 36 | # define ARCH_SYSCALL_WORK_EXIT (0) |
Thomas Gleixner | a9f3a74 | 2020-07-22 23:59:57 +0200 | [diff] [blame] | 37 | #endif |
| 38 | |
Gabriel Krisman Bertazi | 524666c | 2020-11-16 12:42:01 -0500 | [diff] [blame] | 39 | #define SYSCALL_WORK_ENTER (SYSCALL_WORK_SECCOMP | \ |
Gabriel Krisman Bertazi | 64c19ba | 2020-11-16 12:42:02 -0500 | [diff] [blame] | 40 | SYSCALL_WORK_SYSCALL_TRACEPOINT | \ |
Gabriel Krisman Bertazi | 64eb35f | 2020-11-16 12:42:03 -0500 | [diff] [blame] | 41 | SYSCALL_WORK_SYSCALL_TRACE | \ |
Gabriel Krisman Bertazi | 785dc4e | 2020-11-16 12:42:04 -0500 | [diff] [blame] | 42 | SYSCALL_WORK_SYSCALL_EMU | \ |
Gabriel Krisman Bertazi | 2991552 | 2020-11-16 12:42:05 -0500 | [diff] [blame] | 43 | SYSCALL_WORK_SYSCALL_AUDIT | \ |
Gabriel Krisman Bertazi | 1189446 | 2020-11-27 14:32:35 -0500 | [diff] [blame] | 44 | SYSCALL_WORK_SYSCALL_USER_DISPATCH | \ |
Gabriel Krisman Bertazi | 2991552 | 2020-11-16 12:42:05 -0500 | [diff] [blame] | 45 | ARCH_SYSCALL_WORK_ENTER) |
Gabriel Krisman Bertazi | 64c19ba | 2020-11-16 12:42:02 -0500 | [diff] [blame] | 46 | #define SYSCALL_WORK_EXIT (SYSCALL_WORK_SYSCALL_TRACEPOINT | \ |
Gabriel Krisman Bertazi | 785dc4e | 2020-11-16 12:42:04 -0500 | [diff] [blame] | 47 | SYSCALL_WORK_SYSCALL_TRACE | \ |
Gabriel Krisman Bertazi | 2991552 | 2020-11-16 12:42:05 -0500 | [diff] [blame] | 48 | SYSCALL_WORK_SYSCALL_AUDIT | \ |
Gabriel Krisman Bertazi | 1189446 | 2020-11-27 14:32:35 -0500 | [diff] [blame] | 49 | SYSCALL_WORK_SYSCALL_USER_DISPATCH | \ |
Gabriel Krisman Bertazi | 6342adc | 2021-02-03 13:00:48 -0500 | [diff] [blame] | 50 | SYSCALL_WORK_SYSCALL_EXIT_TRAP | \ |
Gabriel Krisman Bertazi | 2991552 | 2020-11-16 12:42:05 -0500 | [diff] [blame] | 51 | ARCH_SYSCALL_WORK_EXIT) |
Gabriel Krisman Bertazi | b86678c | 2020-11-16 12:41:59 -0500 | [diff] [blame] | 52 | |
Thomas Gleixner | a9f3a74 | 2020-07-22 23:59:57 +0200 | [diff] [blame] | 53 | /* |
| 54 | * TIF flags handled in exit_to_user_mode_loop() |
| 55 | */ |
| 56 | #ifndef ARCH_EXIT_TO_USER_MODE_WORK |
| 57 | # define ARCH_EXIT_TO_USER_MODE_WORK (0) |
| 58 | #endif |
| 59 | |
| 60 | #define EXIT_TO_USER_MODE_WORK \ |
| 61 | (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \ |
Jens Axboe | 12db8b6 | 2020-10-26 14:32:28 -0600 | [diff] [blame] | 62 | _TIF_NEED_RESCHED | _TIF_PATCH_PENDING | _TIF_NOTIFY_SIGNAL | \ |
Thomas Gleixner | a9f3a74 | 2020-07-22 23:59:57 +0200 | [diff] [blame] | 63 | ARCH_EXIT_TO_USER_MODE_WORK) |
| 64 | |
Thomas Gleixner | 142781e | 2020-07-22 23:59:56 +0200 | [diff] [blame] | 65 | /** |
Sven Schnelle | 6d97af4 | 2022-05-04 08:23:50 +0200 | [diff] [blame] | 66 | * arch_enter_from_user_mode - Architecture specific sanity check for user mode regs |
Thomas Gleixner | 142781e | 2020-07-22 23:59:56 +0200 | [diff] [blame] | 67 | * @regs: Pointer to currents pt_regs |
| 68 | * |
| 69 | * Defaults to an empty implementation. Can be replaced by architecture |
| 70 | * specific code. |
| 71 | * |
| 72 | * Invoked from syscall_enter_from_user_mode() in the non-instrumentable |
| 73 | * section. Use __always_inline so the compiler cannot push it out of line |
| 74 | * and make it instrumentable. |
| 75 | */ |
Sven Schnelle | 6d97af4 | 2022-05-04 08:23:50 +0200 | [diff] [blame] | 76 | static __always_inline void arch_enter_from_user_mode(struct pt_regs *regs); |
Thomas Gleixner | 142781e | 2020-07-22 23:59:56 +0200 | [diff] [blame] | 77 | |
Sven Schnelle | 6d97af4 | 2022-05-04 08:23:50 +0200 | [diff] [blame] | 78 | #ifndef arch_enter_from_user_mode |
| 79 | static __always_inline void arch_enter_from_user_mode(struct pt_regs *regs) {} |
Thomas Gleixner | 142781e | 2020-07-22 23:59:56 +0200 | [diff] [blame] | 80 | #endif |
| 81 | |
| 82 | /** |
Sven Schnelle | 96e2fbc | 2020-12-01 15:27:53 +0100 | [diff] [blame] | 83 | * enter_from_user_mode - Establish state when coming from user mode |
| 84 | * |
| 85 | * Syscall/interrupt entry disables interrupts, but user mode is traced as |
| 86 | * interrupts enabled. Also with NO_HZ_FULL RCU might be idle. |
| 87 | * |
| 88 | * 1) Tell lockdep that interrupts are disabled |
| 89 | * 2) Invoke context tracking if enabled to reactivate RCU |
| 90 | * 3) Trace interrupts off state |
| 91 | * |
| 92 | * Invoked from architecture specific syscall entry code with interrupts |
| 93 | * disabled. The calling code has to be non-instrumentable. When the |
| 94 | * function returns all state is correct and interrupts are still |
| 95 | * disabled. The subsequent functions can be instrumented. |
| 96 | * |
| 97 | * This is invoked when there is architecture specific functionality to be |
| 98 | * done between establishing state and enabling interrupts. The caller must |
| 99 | * enable interrupts before invoking syscall_enter_from_user_mode_work(). |
| 100 | */ |
| 101 | void enter_from_user_mode(struct pt_regs *regs); |
| 102 | |
| 103 | /** |
Thomas Gleixner | 4facb95 | 2020-09-02 01:50:54 +0200 | [diff] [blame] | 104 | * syscall_enter_from_user_mode_prepare - Establish state and enable interrupts |
| 105 | * @regs: Pointer to currents pt_regs |
| 106 | * |
| 107 | * Invoked from architecture specific syscall entry code with interrupts |
| 108 | * disabled. The calling code has to be non-instrumentable. When the |
| 109 | * function returns all state is correct, interrupts are enabled and the |
| 110 | * subsequent functions can be instrumented. |
| 111 | * |
Sven Schnelle | 96e2fbc | 2020-12-01 15:27:53 +0100 | [diff] [blame] | 112 | * This handles lockdep, RCU (context tracking) and tracing state, i.e. |
| 113 | * the functionality provided by enter_from_user_mode(). |
Thomas Gleixner | 4facb95 | 2020-09-02 01:50:54 +0200 | [diff] [blame] | 114 | * |
| 115 | * This is invoked when there is extra architecture specific functionality |
| 116 | * to be done between establishing state and handling user mode entry work. |
| 117 | */ |
| 118 | void syscall_enter_from_user_mode_prepare(struct pt_regs *regs); |
| 119 | |
| 120 | /** |
| 121 | * syscall_enter_from_user_mode_work - Check and handle work before invoking |
| 122 | * a syscall |
Thomas Gleixner | 142781e | 2020-07-22 23:59:56 +0200 | [diff] [blame] | 123 | * @regs: Pointer to currents pt_regs |
| 124 | * @syscall: The syscall number |
| 125 | * |
| 126 | * Invoked from architecture specific syscall entry code with interrupts |
Thomas Gleixner | 4facb95 | 2020-09-02 01:50:54 +0200 | [diff] [blame] | 127 | * enabled after invoking syscall_enter_from_user_mode_prepare() and extra |
| 128 | * architecture specific work. |
Thomas Gleixner | 142781e | 2020-07-22 23:59:56 +0200 | [diff] [blame] | 129 | * |
| 130 | * Returns: The original or a modified syscall number |
| 131 | * |
| 132 | * If the returned syscall number is -1 then the syscall should be |
| 133 | * skipped. In this case the caller may invoke syscall_set_error() or |
| 134 | * syscall_set_return_value() first. If neither of those are called and -1 |
| 135 | * is returned, then the syscall will fail with ENOSYS. |
| 136 | * |
Thomas Gleixner | 4facb95 | 2020-09-02 01:50:54 +0200 | [diff] [blame] | 137 | * It handles the following work items: |
Thomas Gleixner | 142781e | 2020-07-22 23:59:56 +0200 | [diff] [blame] | 138 | * |
Gabriel Krisman Bertazi | 2991552 | 2020-11-16 12:42:05 -0500 | [diff] [blame] | 139 | * 1) syscall_work flag dependent invocations of |
Eric W. Biederman | 0cfcb2b | 2022-01-27 12:00:55 -0600 | [diff] [blame] | 140 | * ptrace_report_syscall_entry(), __secure_computing(), trace_sys_enter() |
Thomas Gleixner | 4facb95 | 2020-09-02 01:50:54 +0200 | [diff] [blame] | 141 | * 2) Invocation of audit_syscall_entry() |
| 142 | */ |
| 143 | long syscall_enter_from_user_mode_work(struct pt_regs *regs, long syscall); |
| 144 | |
| 145 | /** |
| 146 | * syscall_enter_from_user_mode - Establish state and check and handle work |
| 147 | * before invoking a syscall |
| 148 | * @regs: Pointer to currents pt_regs |
| 149 | * @syscall: The syscall number |
| 150 | * |
| 151 | * Invoked from architecture specific syscall entry code with interrupts |
| 152 | * disabled. The calling code has to be non-instrumentable. When the |
| 153 | * function returns all state is correct, interrupts are enabled and the |
| 154 | * subsequent functions can be instrumented. |
| 155 | * |
| 156 | * This is combination of syscall_enter_from_user_mode_prepare() and |
| 157 | * syscall_enter_from_user_mode_work(). |
| 158 | * |
| 159 | * Returns: The original or a modified syscall number. See |
| 160 | * syscall_enter_from_user_mode_work() for further explanation. |
Thomas Gleixner | 142781e | 2020-07-22 23:59:56 +0200 | [diff] [blame] | 161 | */ |
| 162 | long syscall_enter_from_user_mode(struct pt_regs *regs, long syscall); |
| 163 | |
| 164 | /** |
Thomas Gleixner | a9f3a74 | 2020-07-22 23:59:57 +0200 | [diff] [blame] | 165 | * local_irq_enable_exit_to_user - Exit to user variant of local_irq_enable() |
| 166 | * @ti_work: Cached TIF flags gathered with interrupts disabled |
| 167 | * |
| 168 | * Defaults to local_irq_enable(). Can be supplied by architecture specific |
| 169 | * code. |
| 170 | */ |
| 171 | static inline void local_irq_enable_exit_to_user(unsigned long ti_work); |
| 172 | |
| 173 | #ifndef local_irq_enable_exit_to_user |
| 174 | static inline void local_irq_enable_exit_to_user(unsigned long ti_work) |
| 175 | { |
| 176 | local_irq_enable(); |
| 177 | } |
| 178 | #endif |
| 179 | |
| 180 | /** |
| 181 | * local_irq_disable_exit_to_user - Exit to user variant of local_irq_disable() |
| 182 | * |
| 183 | * Defaults to local_irq_disable(). Can be supplied by architecture specific |
| 184 | * code. |
| 185 | */ |
| 186 | static inline void local_irq_disable_exit_to_user(void); |
| 187 | |
| 188 | #ifndef local_irq_disable_exit_to_user |
| 189 | static inline void local_irq_disable_exit_to_user(void) |
| 190 | { |
| 191 | local_irq_disable(); |
| 192 | } |
| 193 | #endif |
| 194 | |
| 195 | /** |
| 196 | * arch_exit_to_user_mode_work - Architecture specific TIF work for exit |
| 197 | * to user mode. |
| 198 | * @regs: Pointer to currents pt_regs |
| 199 | * @ti_work: Cached TIF flags gathered with interrupts disabled |
| 200 | * |
| 201 | * Invoked from exit_to_user_mode_loop() with interrupt enabled |
| 202 | * |
| 203 | * Defaults to NOOP. Can be supplied by architecture specific code. |
| 204 | */ |
| 205 | static inline void arch_exit_to_user_mode_work(struct pt_regs *regs, |
| 206 | unsigned long ti_work); |
| 207 | |
| 208 | #ifndef arch_exit_to_user_mode_work |
| 209 | static inline void arch_exit_to_user_mode_work(struct pt_regs *regs, |
| 210 | unsigned long ti_work) |
| 211 | { |
| 212 | } |
| 213 | #endif |
| 214 | |
| 215 | /** |
| 216 | * arch_exit_to_user_mode_prepare - Architecture specific preparation for |
| 217 | * exit to user mode. |
| 218 | * @regs: Pointer to currents pt_regs |
| 219 | * @ti_work: Cached TIF flags gathered with interrupts disabled |
| 220 | * |
| 221 | * Invoked from exit_to_user_mode_prepare() with interrupt disabled as the last |
| 222 | * function before return. Defaults to NOOP. |
| 223 | */ |
| 224 | static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs, |
| 225 | unsigned long ti_work); |
| 226 | |
| 227 | #ifndef arch_exit_to_user_mode_prepare |
| 228 | static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs, |
| 229 | unsigned long ti_work) |
| 230 | { |
| 231 | } |
| 232 | #endif |
| 233 | |
| 234 | /** |
| 235 | * arch_exit_to_user_mode - Architecture specific final work before |
| 236 | * exit to user mode. |
| 237 | * |
| 238 | * Invoked from exit_to_user_mode() with interrupt disabled as the last |
| 239 | * function before return. Defaults to NOOP. |
| 240 | * |
| 241 | * This needs to be __always_inline because it is non-instrumentable code |
| 242 | * invoked after context tracking switched to user mode. |
| 243 | * |
| 244 | * An architecture implementation must not do anything complex, no locking |
| 245 | * etc. The main purpose is for speculation mitigations. |
| 246 | */ |
| 247 | static __always_inline void arch_exit_to_user_mode(void); |
| 248 | |
| 249 | #ifndef arch_exit_to_user_mode |
| 250 | static __always_inline void arch_exit_to_user_mode(void) { } |
| 251 | #endif |
| 252 | |
| 253 | /** |
Jens Axboe | 12db8b6 | 2020-10-26 14:32:28 -0600 | [diff] [blame] | 254 | * arch_do_signal_or_restart - Architecture specific signal delivery function |
Thomas Gleixner | a9f3a74 | 2020-07-22 23:59:57 +0200 | [diff] [blame] | 255 | * @regs: Pointer to currents pt_regs |
Thomas Gleixner | a9f3a74 | 2020-07-22 23:59:57 +0200 | [diff] [blame] | 256 | * |
| 257 | * Invoked from exit_to_user_mode_loop(). |
| 258 | */ |
Eric W. Biederman | 8ba62d3 | 2022-02-09 09:51:14 -0600 | [diff] [blame] | 259 | void arch_do_signal_or_restart(struct pt_regs *regs); |
Thomas Gleixner | a9f3a74 | 2020-07-22 23:59:57 +0200 | [diff] [blame] | 260 | |
| 261 | /** |
Sven Schnelle | 310de1a | 2020-12-01 15:27:54 +0100 | [diff] [blame] | 262 | * exit_to_user_mode - Fixup state when exiting to user mode |
| 263 | * |
| 264 | * Syscall/interrupt exit enables interrupts, but the kernel state is |
| 265 | * interrupts disabled when this is invoked. Also tell RCU about it. |
| 266 | * |
| 267 | * 1) Trace interrupts on state |
| 268 | * 2) Invoke context tracking if enabled to adjust RCU state |
| 269 | * 3) Invoke architecture specific last minute exit code, e.g. speculation |
| 270 | * mitigations, etc.: arch_exit_to_user_mode() |
| 271 | * 4) Tell lockdep that interrupts are enabled |
| 272 | * |
| 273 | * Invoked from architecture specific code when syscall_exit_to_user_mode() |
| 274 | * is not suitable as the last step before returning to userspace. Must be |
| 275 | * invoked with interrupts disabled and the caller must be |
| 276 | * non-instrumentable. |
Sven Schnelle | c6156e1 | 2020-12-01 15:27:55 +0100 | [diff] [blame] | 277 | * The caller has to invoke syscall_exit_to_user_mode_work() before this. |
Sven Schnelle | 310de1a | 2020-12-01 15:27:54 +0100 | [diff] [blame] | 278 | */ |
| 279 | void exit_to_user_mode(void); |
| 280 | |
| 281 | /** |
Sven Schnelle | c6156e1 | 2020-12-01 15:27:55 +0100 | [diff] [blame] | 282 | * syscall_exit_to_user_mode_work - Handle work before returning to user mode |
| 283 | * @regs: Pointer to currents pt_regs |
| 284 | * |
| 285 | * Same as step 1 and 2 of syscall_exit_to_user_mode() but without calling |
| 286 | * exit_to_user_mode() to perform the final transition to user mode. |
| 287 | * |
| 288 | * Calling convention is the same as for syscall_exit_to_user_mode() and it |
| 289 | * returns with all work handled and interrupts disabled. The caller must |
| 290 | * invoke exit_to_user_mode() before actually switching to user mode to |
| 291 | * make the final state transitions. Interrupts must stay disabled between |
| 292 | * return from this function and the invocation of exit_to_user_mode(). |
| 293 | */ |
| 294 | void syscall_exit_to_user_mode_work(struct pt_regs *regs); |
| 295 | |
| 296 | /** |
Thomas Gleixner | a9f3a74 | 2020-07-22 23:59:57 +0200 | [diff] [blame] | 297 | * syscall_exit_to_user_mode - Handle work before returning to user mode |
| 298 | * @regs: Pointer to currents pt_regs |
| 299 | * |
| 300 | * Invoked with interrupts enabled and fully valid regs. Returns with all |
| 301 | * work handled, interrupts disabled such that the caller can immediately |
| 302 | * switch to user mode. Called from architecture specific syscall and ret |
| 303 | * from fork code. |
| 304 | * |
| 305 | * The call order is: |
| 306 | * 1) One-time syscall exit work: |
| 307 | * - rseq syscall exit |
| 308 | * - audit |
| 309 | * - syscall tracing |
Eric W. Biederman | 0cfcb2b | 2022-01-27 12:00:55 -0600 | [diff] [blame] | 310 | * - ptrace (single stepping) |
Thomas Gleixner | a9f3a74 | 2020-07-22 23:59:57 +0200 | [diff] [blame] | 311 | * |
| 312 | * 2) Preparatory work |
| 313 | * - Exit to user mode loop (common TIF handling). Invokes |
| 314 | * arch_exit_to_user_mode_work() for architecture specific TIF work |
| 315 | * - Architecture specific one time work arch_exit_to_user_mode_prepare() |
| 316 | * - Address limit and lockdep checks |
| 317 | * |
Sven Schnelle | 310de1a | 2020-12-01 15:27:54 +0100 | [diff] [blame] | 318 | * 3) Final transition (lockdep, tracing, context tracking, RCU), i.e. the |
| 319 | * functionality in exit_to_user_mode(). |
Sven Schnelle | c6156e1 | 2020-12-01 15:27:55 +0100 | [diff] [blame] | 320 | * |
| 321 | * This is a combination of syscall_exit_to_user_mode_work() (1,2) and |
| 322 | * exit_to_user_mode(). This function is preferred unless there is a |
Ingo Molnar | 97258ce | 2021-03-22 03:55:50 +0100 | [diff] [blame] | 323 | * compelling architectural reason to use the separate functions. |
Thomas Gleixner | a9f3a74 | 2020-07-22 23:59:57 +0200 | [diff] [blame] | 324 | */ |
| 325 | void syscall_exit_to_user_mode(struct pt_regs *regs); |
| 326 | |
| 327 | /** |
Thomas Gleixner | 142781e | 2020-07-22 23:59:56 +0200 | [diff] [blame] | 328 | * irqentry_enter_from_user_mode - Establish state before invoking the irq handler |
| 329 | * @regs: Pointer to currents pt_regs |
| 330 | * |
| 331 | * Invoked from architecture specific entry code with interrupts disabled. |
| 332 | * Can only be called when the interrupt entry came from user mode. The |
| 333 | * calling code must be non-instrumentable. When the function returns all |
| 334 | * state is correct and the subsequent functions can be instrumented. |
| 335 | * |
| 336 | * The function establishes state (lockdep, RCU (context tracking), tracing) |
| 337 | */ |
| 338 | void irqentry_enter_from_user_mode(struct pt_regs *regs); |
| 339 | |
Thomas Gleixner | a9f3a74 | 2020-07-22 23:59:57 +0200 | [diff] [blame] | 340 | /** |
| 341 | * irqentry_exit_to_user_mode - Interrupt exit work |
| 342 | * @regs: Pointer to current's pt_regs |
| 343 | * |
Ingo Molnar | 97258ce | 2021-03-22 03:55:50 +0100 | [diff] [blame] | 344 | * Invoked with interrupts disabled and fully valid regs. Returns with all |
Thomas Gleixner | a9f3a74 | 2020-07-22 23:59:57 +0200 | [diff] [blame] | 345 | * work handled, interrupts disabled such that the caller can immediately |
| 346 | * switch to user mode. Called from architecture specific interrupt |
| 347 | * handling code. |
| 348 | * |
| 349 | * The call order is #2 and #3 as described in syscall_exit_to_user_mode(). |
| 350 | * Interrupt exit is not invoking #1 which is the syscall specific one time |
| 351 | * work. |
| 352 | */ |
| 353 | void irqentry_exit_to_user_mode(struct pt_regs *regs); |
| 354 | |
Thomas Gleixner | a5497bab | 2020-07-22 23:59:58 +0200 | [diff] [blame] | 355 | #ifndef irqentry_state |
Thomas Gleixner | b6be002 | 2020-11-02 12:53:16 -0800 | [diff] [blame] | 356 | /** |
| 357 | * struct irqentry_state - Opaque object for exception state storage |
| 358 | * @exit_rcu: Used exclusively in the irqentry_*() calls; signals whether the |
Frederic Weisbecker | 6f0e6c1 | 2022-06-08 16:40:26 +0200 | [diff] [blame] | 359 | * exit path has to invoke ct_irq_exit(). |
Thomas Gleixner | b6be002 | 2020-11-02 12:53:16 -0800 | [diff] [blame] | 360 | * @lockdep: Used exclusively in the irqentry_nmi_*() calls; ensures that |
| 361 | * lockdep state is restored correctly on exit from nmi. |
| 362 | * |
| 363 | * This opaque object is filled in by the irqentry_*_enter() functions and |
| 364 | * must be passed back into the corresponding irqentry_*_exit() functions |
| 365 | * when the exception is complete. |
| 366 | * |
| 367 | * Callers of irqentry_*_[enter|exit]() must consider this structure opaque |
| 368 | * and all members private. Descriptions of the members are provided to aid in |
| 369 | * the maintenance of the irqentry_*() functions. |
| 370 | */ |
Thomas Gleixner | a5497bab | 2020-07-22 23:59:58 +0200 | [diff] [blame] | 371 | typedef struct irqentry_state { |
Thomas Gleixner | b6be002 | 2020-11-02 12:53:16 -0800 | [diff] [blame] | 372 | union { |
| 373 | bool exit_rcu; |
| 374 | bool lockdep; |
| 375 | }; |
Thomas Gleixner | a5497bab | 2020-07-22 23:59:58 +0200 | [diff] [blame] | 376 | } irqentry_state_t; |
| 377 | #endif |
| 378 | |
| 379 | /** |
| 380 | * irqentry_enter - Handle state tracking on ordinary interrupt entries |
| 381 | * @regs: Pointer to pt_regs of interrupted context |
| 382 | * |
| 383 | * Invokes: |
| 384 | * - lockdep irqflag state tracking as low level ASM entry disabled |
| 385 | * interrupts. |
| 386 | * |
| 387 | * - Context tracking if the exception hit user mode. |
| 388 | * |
| 389 | * - The hardirq tracer to keep the state consistent as low level ASM |
| 390 | * entry disabled interrupts. |
| 391 | * |
| 392 | * As a precondition, this requires that the entry came from user mode, |
| 393 | * idle, or a kernel context in which RCU is watching. |
| 394 | * |
| 395 | * For kernel mode entries RCU handling is done conditional. If RCU is |
| 396 | * watching then the only RCU requirement is to check whether the tick has |
Frederic Weisbecker | 6f0e6c1 | 2022-06-08 16:40:26 +0200 | [diff] [blame] | 397 | * to be restarted. If RCU is not watching then ct_irq_enter() has to be |
| 398 | * invoked on entry and ct_irq_exit() on exit. |
Thomas Gleixner | a5497bab | 2020-07-22 23:59:58 +0200 | [diff] [blame] | 399 | * |
Frederic Weisbecker | 6f0e6c1 | 2022-06-08 16:40:26 +0200 | [diff] [blame] | 400 | * Avoiding the ct_irq_enter/exit() calls is an optimization but also |
Thomas Gleixner | a5497bab | 2020-07-22 23:59:58 +0200 | [diff] [blame] | 401 | * solves the problem of kernel mode pagefaults which can schedule, which |
Frederic Weisbecker | 6f0e6c1 | 2022-06-08 16:40:26 +0200 | [diff] [blame] | 402 | * is not possible after invoking ct_irq_enter() without undoing it. |
Thomas Gleixner | a5497bab | 2020-07-22 23:59:58 +0200 | [diff] [blame] | 403 | * |
| 404 | * For user mode entries irqentry_enter_from_user_mode() is invoked to |
| 405 | * establish the proper context for NOHZ_FULL. Otherwise scheduling on exit |
| 406 | * would not be possible. |
| 407 | * |
| 408 | * Returns: An opaque object that must be passed to idtentry_exit() |
| 409 | */ |
| 410 | irqentry_state_t noinstr irqentry_enter(struct pt_regs *regs); |
| 411 | |
| 412 | /** |
| 413 | * irqentry_exit_cond_resched - Conditionally reschedule on return from interrupt |
| 414 | * |
| 415 | * Conditional reschedule with additional sanity checks. |
| 416 | */ |
Mark Rutland | 4624a14 | 2022-02-14 16:52:12 +0000 | [diff] [blame] | 417 | void raw_irqentry_exit_cond_resched(void); |
Peter Zijlstra (Intel) | 40607ee | 2021-01-18 15:12:22 +0100 | [diff] [blame] | 418 | #ifdef CONFIG_PREEMPT_DYNAMIC |
Mark Rutland | 99cf983c | 2022-02-14 16:52:14 +0000 | [diff] [blame] | 419 | #if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL) |
Mark Rutland | 4624a14 | 2022-02-14 16:52:12 +0000 | [diff] [blame] | 420 | #define irqentry_exit_cond_resched_dynamic_enabled raw_irqentry_exit_cond_resched |
Mark Rutland | 8a69fe0 | 2022-02-14 16:52:11 +0000 | [diff] [blame] | 421 | #define irqentry_exit_cond_resched_dynamic_disabled NULL |
Mark Rutland | 4624a14 | 2022-02-14 16:52:12 +0000 | [diff] [blame] | 422 | DECLARE_STATIC_CALL(irqentry_exit_cond_resched, raw_irqentry_exit_cond_resched); |
| 423 | #define irqentry_exit_cond_resched() static_call(irqentry_exit_cond_resched)() |
Mark Rutland | 99cf983c | 2022-02-14 16:52:14 +0000 | [diff] [blame] | 424 | #elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY) |
| 425 | DECLARE_STATIC_KEY_TRUE(sk_dynamic_irqentry_exit_cond_resched); |
| 426 | void dynamic_irqentry_exit_cond_resched(void); |
| 427 | #define irqentry_exit_cond_resched() dynamic_irqentry_exit_cond_resched() |
Peter Zijlstra (Intel) | 40607ee | 2021-01-18 15:12:22 +0100 | [diff] [blame] | 428 | #endif |
Mark Rutland | 99cf983c | 2022-02-14 16:52:14 +0000 | [diff] [blame] | 429 | #else /* CONFIG_PREEMPT_DYNAMIC */ |
| 430 | #define irqentry_exit_cond_resched() raw_irqentry_exit_cond_resched() |
| 431 | #endif /* CONFIG_PREEMPT_DYNAMIC */ |
Thomas Gleixner | a5497bab | 2020-07-22 23:59:58 +0200 | [diff] [blame] | 432 | |
| 433 | /** |
| 434 | * irqentry_exit - Handle return from exception that used irqentry_enter() |
| 435 | * @regs: Pointer to pt_regs (exception entry regs) |
| 436 | * @state: Return value from matching call to irqentry_enter() |
| 437 | * |
| 438 | * Depending on the return target (kernel/user) this runs the necessary |
Ira Weiny | 78a56e0 | 2020-11-04 15:01:57 -0800 | [diff] [blame] | 439 | * preemption and work checks if possible and required and returns to |
Thomas Gleixner | a5497bab | 2020-07-22 23:59:58 +0200 | [diff] [blame] | 440 | * the caller with interrupts disabled and no further work pending. |
| 441 | * |
| 442 | * This is the last action before returning to the low level ASM code which |
| 443 | * just needs to return to the appropriate context. |
| 444 | * |
| 445 | * Counterpart to irqentry_enter(). |
| 446 | */ |
| 447 | void noinstr irqentry_exit(struct pt_regs *regs, irqentry_state_t state); |
| 448 | |
Thomas Gleixner | b6be002 | 2020-11-02 12:53:16 -0800 | [diff] [blame] | 449 | /** |
| 450 | * irqentry_nmi_enter - Handle NMI entry |
| 451 | * @regs: Pointer to currents pt_regs |
| 452 | * |
| 453 | * Similar to irqentry_enter() but taking care of the NMI constraints. |
| 454 | */ |
| 455 | irqentry_state_t noinstr irqentry_nmi_enter(struct pt_regs *regs); |
| 456 | |
| 457 | /** |
| 458 | * irqentry_nmi_exit - Handle return from NMI handling |
| 459 | * @regs: Pointer to pt_regs (NMI entry regs) |
| 460 | * @irq_state: Return value from matching call to irqentry_nmi_enter() |
| 461 | * |
Ira Weiny | 78a56e0 | 2020-11-04 15:01:57 -0800 | [diff] [blame] | 462 | * Last action before returning to the low level assembly code. |
Thomas Gleixner | b6be002 | 2020-11-02 12:53:16 -0800 | [diff] [blame] | 463 | * |
| 464 | * Counterpart to irqentry_nmi_enter(). |
| 465 | */ |
| 466 | void noinstr irqentry_nmi_exit(struct pt_regs *regs, irqentry_state_t irq_state); |
| 467 | |
Thomas Gleixner | 142781e | 2020-07-22 23:59:56 +0200 | [diff] [blame] | 468 | #endif |