Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Jason A. Donenfeld | 6071a6c | 2022-02-11 12:28:33 +0100 | [diff] [blame] | 2 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3 | #ifndef _LINUX_RANDOM_H |
| 4 | #define _LINUX_RANDOM_H |
| 5 | |
Mark Rutland | 253d319 | 2020-02-10 13:00:13 +0000 | [diff] [blame] | 6 | #include <linux/bug.h> |
| 7 | #include <linux/kernel.h> |
Herbert Xu | 205a525 | 2015-06-09 18:19:39 +0800 | [diff] [blame] | 8 | #include <linux/list.h> |
Daniel Borkmann | 897ece5 | 2015-10-08 01:20:38 +0200 | [diff] [blame] | 9 | #include <linux/once.h> |
| 10 | |
David Howells | 607ca46 | 2012-10-13 10:46:48 +0100 | [diff] [blame] | 11 | #include <uapi/linux/random.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12 | |
Jason A. Donenfeld | 5acd354 | 2022-03-01 20:03:49 +0100 | [diff] [blame] | 13 | struct notifier_block; |
Herbert Xu | 205a525 | 2015-06-09 18:19:39 +0800 | [diff] [blame] | 14 | |
Jason A. Donenfeld | a194026 | 2022-05-13 13:18:46 +0200 | [diff] [blame] | 15 | void add_device_randomness(const void *buf, size_t len); |
Jason A. Donenfeld | 39e0f99 | 2022-06-07 17:00:16 +0200 | [diff] [blame] | 16 | void __init add_bootloader_randomness(const void *buf, size_t len); |
Jason A. Donenfeld | 7782cfe | 2022-05-13 12:29:38 +0200 | [diff] [blame] | 17 | void add_input_randomness(unsigned int type, unsigned int code, |
| 18 | unsigned int value) __latent_entropy; |
| 19 | void add_interrupt_randomness(int irq) __latent_entropy; |
Jason A. Donenfeld | a194026 | 2022-05-13 13:18:46 +0200 | [diff] [blame] | 20 | void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy); |
Jason A. Donenfeld | 2f14062 | 2022-05-05 02:20:22 +0200 | [diff] [blame] | 21 | |
Jason A. Donenfeld | 2f14062 | 2022-05-05 02:20:22 +0200 | [diff] [blame] | 22 | static inline void add_latent_entropy(void) |
| 23 | { |
Jason A. Donenfeld | 38c5d24 | 2022-06-01 22:45:33 +0200 | [diff] [blame] | 24 | #if defined(LATENT_ENTROPY_PLUGIN) && !defined(__CHECKER__) |
Jason A. Donenfeld | 2f14062 | 2022-05-05 02:20:22 +0200 | [diff] [blame] | 25 | add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy)); |
Jason A. Donenfeld | 2f14062 | 2022-05-05 02:20:22 +0200 | [diff] [blame] | 26 | #else |
Jason A. Donenfeld | 38c5d24 | 2022-06-01 22:45:33 +0200 | [diff] [blame] | 27 | add_device_randomness(NULL, 0); |
Jason A. Donenfeld | 2f14062 | 2022-05-05 02:20:22 +0200 | [diff] [blame] | 28 | #endif |
Jason A. Donenfeld | 38c5d24 | 2022-06-01 22:45:33 +0200 | [diff] [blame] | 29 | } |
Jason A. Donenfeld | 2f14062 | 2022-05-05 02:20:22 +0200 | [diff] [blame] | 30 | |
Jason A. Donenfeld | a4107d3 | 2022-03-01 15:14:04 +0100 | [diff] [blame] | 31 | #if IS_ENABLED(CONFIG_VMGENID) |
Jason A. Donenfeld | a194026 | 2022-05-13 13:18:46 +0200 | [diff] [blame] | 32 | void add_vmfork_randomness(const void *unique_vm_id, size_t len); |
Jason A. Donenfeld | 7782cfe | 2022-05-13 12:29:38 +0200 | [diff] [blame] | 33 | int register_random_vmfork_notifier(struct notifier_block *nb); |
| 34 | int unregister_random_vmfork_notifier(struct notifier_block *nb); |
Jason A. Donenfeld | f3c2682 | 2022-03-01 20:22:39 +0100 | [diff] [blame] | 35 | #else |
| 36 | static inline int register_random_vmfork_notifier(struct notifier_block *nb) { return 0; } |
| 37 | static inline int unregister_random_vmfork_notifier(struct notifier_block *nb) { return 0; } |
Jason A. Donenfeld | a4107d3 | 2022-03-01 15:14:04 +0100 | [diff] [blame] | 38 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 | |
Jason A. Donenfeld | a194026 | 2022-05-13 13:18:46 +0200 | [diff] [blame] | 40 | void get_random_bytes(void *buf, size_t len); |
Jason A. Donenfeld | 585cd5f | 2022-09-28 18:47:30 +0200 | [diff] [blame] | 41 | u8 get_random_u8(void); |
| 42 | u16 get_random_u16(void); |
Jason A. Donenfeld | c440408 | 2017-01-22 16:34:08 +0100 | [diff] [blame] | 43 | u32 get_random_u32(void); |
| 44 | u64 get_random_u64(void); |
Jason A. Donenfeld | c440408 | 2017-01-22 16:34:08 +0100 | [diff] [blame] | 45 | static inline unsigned long get_random_long(void) |
| 46 | { |
| 47 | #if BITS_PER_LONG == 64 |
| 48 | return get_random_u64(); |
| 49 | #else |
| 50 | return get_random_u32(); |
| 51 | #endif |
| 52 | } |
| 53 | |
Jason A. Donenfeld | 346ac4a | 2022-10-08 20:42:54 -0600 | [diff] [blame] | 54 | u32 __get_random_u32_below(u32 ceil); |
| 55 | |
| 56 | /* |
| 57 | * Returns a random integer in the interval [0, ceil), with uniform |
| 58 | * distribution, suitable for all uses. Fastest when ceil is a constant, but |
| 59 | * still fast for variable ceil as well. |
| 60 | */ |
| 61 | static inline u32 get_random_u32_below(u32 ceil) |
| 62 | { |
| 63 | if (!__builtin_constant_p(ceil)) |
| 64 | return __get_random_u32_below(ceil); |
| 65 | |
| 66 | /* |
| 67 | * For the fast path, below, all operations on ceil are precomputed by |
| 68 | * the compiler, so this incurs no overhead for checking pow2, doing |
| 69 | * divisions, or branching based on integer size. The resultant |
| 70 | * algorithm does traditional reciprocal multiplication (typically |
| 71 | * optimized by the compiler into shifts and adds), rejecting samples |
| 72 | * whose lower half would indicate a range indivisible by ceil. |
| 73 | */ |
| 74 | BUILD_BUG_ON_MSG(!ceil, "get_random_u32_below() must take ceil > 0"); |
| 75 | if (ceil <= 1) |
| 76 | return 0; |
| 77 | for (;;) { |
| 78 | if (ceil <= 1U << 8) { |
| 79 | u32 mult = ceil * get_random_u8(); |
| 80 | if (likely(is_power_of_2(ceil) || (u8)mult >= (1U << 8) % ceil)) |
| 81 | return mult >> 8; |
| 82 | } else if (ceil <= 1U << 16) { |
| 83 | u32 mult = ceil * get_random_u16(); |
| 84 | if (likely(is_power_of_2(ceil) || (u16)mult >= (1U << 16) % ceil)) |
| 85 | return mult >> 16; |
| 86 | } else { |
| 87 | u64 mult = (u64)ceil * get_random_u32(); |
| 88 | if (likely(is_power_of_2(ceil) || (u32)mult >= -ceil % ceil)) |
| 89 | return mult >> 32; |
| 90 | } |
| 91 | } |
| 92 | } |
| 93 | |
Rik van Riel | 022c204 | 2017-07-12 14:36:17 -0700 | [diff] [blame] | 94 | /* |
Jason A. Donenfeld | 6088d87 | 2022-10-19 23:19:35 -0600 | [diff] [blame] | 95 | * Returns a random integer in the interval (floor, U32_MAX], with uniform |
| 96 | * distribution, suitable for all uses. Fastest when floor is a constant, but |
| 97 | * still fast for variable floor as well. |
| 98 | */ |
| 99 | static inline u32 get_random_u32_above(u32 floor) |
| 100 | { |
| 101 | BUILD_BUG_ON_MSG(__builtin_constant_p(floor) && floor == U32_MAX, |
| 102 | "get_random_u32_above() must take floor < U32_MAX"); |
| 103 | return floor + 1 + get_random_u32_below(U32_MAX - floor); |
| 104 | } |
| 105 | |
| 106 | /* |
| 107 | * Returns a random integer in the interval [floor, ceil], with uniform |
| 108 | * distribution, suitable for all uses. Fastest when floor and ceil are |
| 109 | * constant, but still fast for variable floor and ceil as well. |
| 110 | */ |
| 111 | static inline u32 get_random_u32_inclusive(u32 floor, u32 ceil) |
| 112 | { |
| 113 | BUILD_BUG_ON_MSG(__builtin_constant_p(floor) && __builtin_constant_p(ceil) && |
| 114 | (floor > ceil || ceil - floor == U32_MAX), |
| 115 | "get_random_u32_inclusive() must take floor <= ceil"); |
| 116 | return floor + get_random_u32_below(ceil - floor + 1); |
| 117 | } |
| 118 | |
| 119 | /* |
Rik van Riel | 022c204 | 2017-07-12 14:36:17 -0700 | [diff] [blame] | 120 | * On 64-bit architectures, protect against non-terminated C string overflows |
| 121 | * by zeroing out the first byte of the canary; this leaves 56 bits of entropy. |
| 122 | */ |
| 123 | #ifdef CONFIG_64BIT |
| 124 | # ifdef __LITTLE_ENDIAN |
| 125 | # define CANARY_MASK 0xffffffffffffff00UL |
| 126 | # else /* big endian, 64 bits: */ |
| 127 | # define CANARY_MASK 0x00ffffffffffffffUL |
| 128 | # endif |
| 129 | #else /* 32 bits: */ |
| 130 | # define CANARY_MASK 0xffffffffUL |
| 131 | #endif |
| 132 | |
| 133 | static inline unsigned long get_random_canary(void) |
| 134 | { |
Jason A. Donenfeld | 7782cfe | 2022-05-13 12:29:38 +0200 | [diff] [blame] | 135 | return get_random_long() & CANARY_MASK; |
Rik van Riel | 022c204 | 2017-07-12 14:36:17 -0700 | [diff] [blame] | 136 | } |
| 137 | |
Jason A. Donenfeld | f623849 | 2022-09-26 17:43:14 +0200 | [diff] [blame] | 138 | void __init random_init_early(const char *command_line); |
| 139 | void __init random_init(void); |
Jason A. Donenfeld | 7782cfe | 2022-05-13 12:29:38 +0200 | [diff] [blame] | 140 | bool rng_is_initialized(void); |
| 141 | int wait_for_random_bytes(void); |
Jason A. Donenfeld | 7782cfe | 2022-05-13 12:29:38 +0200 | [diff] [blame] | 142 | |
Jason A. Donenfeld | da9ba56 | 2017-06-07 20:05:02 -0400 | [diff] [blame] | 143 | /* Calls wait_for_random_bytes() and then calls get_random_bytes(buf, nbytes). |
| 144 | * Returns the result of the call to wait_for_random_bytes. */ |
Jason A. Donenfeld | 04ec96b | 2022-02-09 14:43:25 +0100 | [diff] [blame] | 145 | static inline int get_random_bytes_wait(void *buf, size_t nbytes) |
Jason A. Donenfeld | da9ba56 | 2017-06-07 20:05:02 -0400 | [diff] [blame] | 146 | { |
| 147 | int ret = wait_for_random_bytes(); |
Jason A. Donenfeld | da9ba56 | 2017-06-07 20:05:02 -0400 | [diff] [blame] | 148 | get_random_bytes(buf, nbytes); |
Jason A. Donenfeld | 25e3fca | 2018-02-04 23:07:46 +0100 | [diff] [blame] | 149 | return ret; |
Jason A. Donenfeld | da9ba56 | 2017-06-07 20:05:02 -0400 | [diff] [blame] | 150 | } |
| 151 | |
Jason A. Donenfeld | 7c3a8a1 | 2022-05-13 12:32:23 +0200 | [diff] [blame] | 152 | #define declare_get_random_var_wait(name, ret_type) \ |
| 153 | static inline int get_random_ ## name ## _wait(ret_type *out) { \ |
Jason A. Donenfeld | da9ba56 | 2017-06-07 20:05:02 -0400 | [diff] [blame] | 154 | int ret = wait_for_random_bytes(); \ |
| 155 | if (unlikely(ret)) \ |
| 156 | return ret; \ |
Jason A. Donenfeld | 7c3a8a1 | 2022-05-13 12:32:23 +0200 | [diff] [blame] | 157 | *out = get_random_ ## name(); \ |
Jason A. Donenfeld | da9ba56 | 2017-06-07 20:05:02 -0400 | [diff] [blame] | 158 | return 0; \ |
| 159 | } |
Jason A. Donenfeld | a890d1c | 2022-10-05 12:54:38 +0200 | [diff] [blame] | 160 | declare_get_random_var_wait(u8, u8) |
| 161 | declare_get_random_var_wait(u16, u16) |
Jason A. Donenfeld | 7c3a8a1 | 2022-05-13 12:32:23 +0200 | [diff] [blame] | 162 | declare_get_random_var_wait(u32, u32) |
| 163 | declare_get_random_var_wait(u64, u32) |
Jason A. Donenfeld | 7c3a8a1 | 2022-05-13 12:32:23 +0200 | [diff] [blame] | 164 | declare_get_random_var_wait(long, unsigned long) |
Jason A. Donenfeld | da9ba56 | 2017-06-07 20:05:02 -0400 | [diff] [blame] | 165 | #undef declare_get_random_var |
| 166 | |
Joe Eykholt | 5960164 | 2010-05-26 14:44:13 -0700 | [diff] [blame] | 167 | /* |
Linus Torvalds | c0842fb | 2020-07-31 07:51:14 +0200 | [diff] [blame] | 168 | * This is designed to be standalone for just prandom |
| 169 | * users, but for now we include it from <linux/random.h> |
| 170 | * for legacy reasons. |
Joe Eykholt | 5960164 | 2010-05-26 14:44:13 -0700 | [diff] [blame] | 171 | */ |
Linus Torvalds | c0842fb | 2020-07-31 07:51:14 +0200 | [diff] [blame] | 172 | #include <linux/prandom.h> |
Joe Eykholt | 5960164 | 2010-05-26 14:44:13 -0700 | [diff] [blame] | 173 | |
Jason A. Donenfeld | 9592eef | 2022-07-05 20:48:41 +0200 | [diff] [blame] | 174 | #include <asm/archrandom.h> |
H. Peter Anvin | 63d7717 | 2011-07-31 13:54:50 -0700 | [diff] [blame] | 175 | |
Mark Rutland | 253d319 | 2020-02-10 13:00:13 +0000 | [diff] [blame] | 176 | /* |
| 177 | * Called from the boot CPU during startup; not valid to call once |
| 178 | * secondary CPUs are up and preemption is possible. |
| 179 | */ |
Jason A. Donenfeld | d349ab9 | 2022-07-17 12:35:24 +0200 | [diff] [blame] | 180 | #ifndef arch_get_random_seed_longs_early |
| 181 | static inline size_t __init arch_get_random_seed_longs_early(unsigned long *v, size_t max_longs) |
Mark Rutland | 253d319 | 2020-02-10 13:00:13 +0000 | [diff] [blame] | 182 | { |
| 183 | WARN_ON(system_state != SYSTEM_BOOTING); |
Jason A. Donenfeld | d349ab9 | 2022-07-17 12:35:24 +0200 | [diff] [blame] | 184 | return arch_get_random_seed_longs(v, max_longs); |
Mark Rutland | 253d319 | 2020-02-10 13:00:13 +0000 | [diff] [blame] | 185 | } |
| 186 | #endif |
| 187 | |
Jason A. Donenfeld | d349ab9 | 2022-07-17 12:35:24 +0200 | [diff] [blame] | 188 | #ifndef arch_get_random_longs_early |
| 189 | static inline bool __init arch_get_random_longs_early(unsigned long *v, size_t max_longs) |
Mark Rutland | 253d319 | 2020-02-10 13:00:13 +0000 | [diff] [blame] | 190 | { |
| 191 | WARN_ON(system_state != SYSTEM_BOOTING); |
Jason A. Donenfeld | d349ab9 | 2022-07-17 12:35:24 +0200 | [diff] [blame] | 192 | return arch_get_random_longs(v, max_longs); |
Mark Rutland | 253d319 | 2020-02-10 13:00:13 +0000 | [diff] [blame] | 193 | } |
| 194 | #endif |
| 195 | |
Jason A. Donenfeld | 3191dd5 | 2022-02-13 22:48:04 +0100 | [diff] [blame] | 196 | #ifdef CONFIG_SMP |
Jason A. Donenfeld | 7782cfe | 2022-05-13 12:29:38 +0200 | [diff] [blame] | 197 | int random_prepare_cpu(unsigned int cpu); |
| 198 | int random_online_cpu(unsigned int cpu); |
| 199 | #endif |
| 200 | |
| 201 | #ifndef MODULE |
| 202 | extern const struct file_operations random_fops, urandom_fops; |
Jason A. Donenfeld | 3191dd5 | 2022-02-13 22:48:04 +0100 | [diff] [blame] | 203 | #endif |
| 204 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 205 | #endif /* _LINUX_RANDOM_H */ |