blob: b9b970f7ab45a58bcc79ece84b5479d0d4420f7c [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Michael S. Tsirkin3d2d8272009-09-21 17:03:51 -07002#ifndef _LINUX_MMU_CONTEXT_H
3#define _LINUX_MMU_CONTEXT_H
4
Andy Lutomirskif98db602016-04-26 09:39:06 -07005#include <asm/mmu_context.h>
Peter Zijlstrabf9282d2020-08-12 12:22:17 +02006#include <asm/mmu.h>
Andy Lutomirskif98db602016-04-26 09:39:06 -07007
Andy Lutomirskif98db602016-04-26 09:39:06 -07008/* Architectures that care about IRQ state in switch_mm can override this. */
9#ifndef switch_mm_irqs_off
10# define switch_mm_irqs_off switch_mm
11#endif
12
Peter Zijlstrabf9282d2020-08-12 12:22:17 +020013#ifndef leave_mm
14static inline void leave_mm(int cpu) { }
15#endif
16
Will Deacon9ae606b2021-07-30 12:24:28 +010017/*
18 * CPUs that are capable of running user task @p. Must contain at least one
19 * active CPU. It is assumed that the kernel can run on all CPUs, so calling
20 * this for a kernel thread is pointless.
21 *
22 * By default, we assume a sane, homogeneous system.
23 */
24#ifndef task_cpu_possible_mask
25# define task_cpu_possible_mask(p) cpu_possible_mask
26# define task_cpu_possible(cpu, p) true
27#else
28# define task_cpu_possible(cpu, p) cpumask_test_cpu((cpu), task_cpu_possible_mask(p))
29#endif
30
Michael S. Tsirkin3d2d8272009-09-21 17:03:51 -070031#endif