Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Jason A. Donenfeld | 6071a6c | 2022-02-11 12:28:33 +0100 | [diff] [blame] | 2 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3 | #ifndef _LINUX_RANDOM_H |
| 4 | #define _LINUX_RANDOM_H |
| 5 | |
Mark Rutland | 253d319 | 2020-02-10 13:00:13 +0000 | [diff] [blame] | 6 | #include <linux/bug.h> |
| 7 | #include <linux/kernel.h> |
Herbert Xu | 205a525 | 2015-06-09 18:19:39 +0800 | [diff] [blame] | 8 | #include <linux/list.h> |
Daniel Borkmann | 897ece5 | 2015-10-08 01:20:38 +0200 | [diff] [blame] | 9 | #include <linux/once.h> |
| 10 | |
David Howells | 607ca46 | 2012-10-13 10:46:48 +0100 | [diff] [blame] | 11 | #include <uapi/linux/random.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12 | |
Jason A. Donenfeld | 5acd354 | 2022-03-01 20:03:49 +0100 | [diff] [blame] | 13 | struct notifier_block; |
Herbert Xu | 205a525 | 2015-06-09 18:19:39 +0800 | [diff] [blame] | 14 | |
Jason A. Donenfeld | a194026 | 2022-05-13 13:18:46 +0200 | [diff] [blame] | 15 | void add_device_randomness(const void *buf, size_t len); |
Jason A. Donenfeld | 39e0f99 | 2022-06-07 17:00:16 +0200 | [diff] [blame] | 16 | void __init add_bootloader_randomness(const void *buf, size_t len); |
Jason A. Donenfeld | 7782cfe | 2022-05-13 12:29:38 +0200 | [diff] [blame] | 17 | void add_input_randomness(unsigned int type, unsigned int code, |
| 18 | unsigned int value) __latent_entropy; |
| 19 | void add_interrupt_randomness(int irq) __latent_entropy; |
Jason A. Donenfeld | a194026 | 2022-05-13 13:18:46 +0200 | [diff] [blame] | 20 | void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy); |
Jason A. Donenfeld | 2f14062 | 2022-05-05 02:20:22 +0200 | [diff] [blame] | 21 | |
| 22 | #if defined(LATENT_ENTROPY_PLUGIN) && !defined(__CHECKER__) |
| 23 | static inline void add_latent_entropy(void) |
| 24 | { |
| 25 | add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy)); |
| 26 | } |
| 27 | #else |
Jason A. Donenfeld | 7782cfe | 2022-05-13 12:29:38 +0200 | [diff] [blame] | 28 | static inline void add_latent_entropy(void) { } |
Jason A. Donenfeld | 2f14062 | 2022-05-05 02:20:22 +0200 | [diff] [blame] | 29 | #endif |
| 30 | |
Jason A. Donenfeld | a4107d3 | 2022-03-01 15:14:04 +0100 | [diff] [blame] | 31 | #if IS_ENABLED(CONFIG_VMGENID) |
Jason A. Donenfeld | a194026 | 2022-05-13 13:18:46 +0200 | [diff] [blame] | 32 | void add_vmfork_randomness(const void *unique_vm_id, size_t len); |
Jason A. Donenfeld | 7782cfe | 2022-05-13 12:29:38 +0200 | [diff] [blame] | 33 | int register_random_vmfork_notifier(struct notifier_block *nb); |
| 34 | int unregister_random_vmfork_notifier(struct notifier_block *nb); |
Jason A. Donenfeld | f3c2682 | 2022-03-01 20:22:39 +0100 | [diff] [blame] | 35 | #else |
| 36 | static inline int register_random_vmfork_notifier(struct notifier_block *nb) { return 0; } |
| 37 | static inline int unregister_random_vmfork_notifier(struct notifier_block *nb) { return 0; } |
Jason A. Donenfeld | a4107d3 | 2022-03-01 15:14:04 +0100 | [diff] [blame] | 38 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 | |
Jason A. Donenfeld | a194026 | 2022-05-13 13:18:46 +0200 | [diff] [blame] | 40 | void get_random_bytes(void *buf, size_t len); |
Jason A. Donenfeld | 585cd5f | 2022-09-28 18:47:30 +0200 | [diff] [blame] | 41 | u8 get_random_u8(void); |
| 42 | u16 get_random_u16(void); |
Jason A. Donenfeld | c440408 | 2017-01-22 16:34:08 +0100 | [diff] [blame] | 43 | u32 get_random_u32(void); |
| 44 | u64 get_random_u64(void); |
Jason A. Donenfeld | c440408 | 2017-01-22 16:34:08 +0100 | [diff] [blame] | 45 | static inline unsigned long get_random_long(void) |
| 46 | { |
| 47 | #if BITS_PER_LONG == 64 |
| 48 | return get_random_u64(); |
| 49 | #else |
| 50 | return get_random_u32(); |
| 51 | #endif |
| 52 | } |
| 53 | |
Jason A. Donenfeld | 346ac4a | 2022-10-08 20:42:54 -0600 | [diff] [blame^] | 54 | u32 __get_random_u32_below(u32 ceil); |
| 55 | |
| 56 | /* |
| 57 | * Returns a random integer in the interval [0, ceil), with uniform |
| 58 | * distribution, suitable for all uses. Fastest when ceil is a constant, but |
| 59 | * still fast for variable ceil as well. |
| 60 | */ |
| 61 | static inline u32 get_random_u32_below(u32 ceil) |
| 62 | { |
| 63 | if (!__builtin_constant_p(ceil)) |
| 64 | return __get_random_u32_below(ceil); |
| 65 | |
| 66 | /* |
| 67 | * For the fast path, below, all operations on ceil are precomputed by |
| 68 | * the compiler, so this incurs no overhead for checking pow2, doing |
| 69 | * divisions, or branching based on integer size. The resultant |
| 70 | * algorithm does traditional reciprocal multiplication (typically |
| 71 | * optimized by the compiler into shifts and adds), rejecting samples |
| 72 | * whose lower half would indicate a range indivisible by ceil. |
| 73 | */ |
| 74 | BUILD_BUG_ON_MSG(!ceil, "get_random_u32_below() must take ceil > 0"); |
| 75 | if (ceil <= 1) |
| 76 | return 0; |
| 77 | for (;;) { |
| 78 | if (ceil <= 1U << 8) { |
| 79 | u32 mult = ceil * get_random_u8(); |
| 80 | if (likely(is_power_of_2(ceil) || (u8)mult >= (1U << 8) % ceil)) |
| 81 | return mult >> 8; |
| 82 | } else if (ceil <= 1U << 16) { |
| 83 | u32 mult = ceil * get_random_u16(); |
| 84 | if (likely(is_power_of_2(ceil) || (u16)mult >= (1U << 16) % ceil)) |
| 85 | return mult >> 16; |
| 86 | } else { |
| 87 | u64 mult = (u64)ceil * get_random_u32(); |
| 88 | if (likely(is_power_of_2(ceil) || (u32)mult >= -ceil % ceil)) |
| 89 | return mult >> 32; |
| 90 | } |
| 91 | } |
| 92 | } |
| 93 | |
Rik van Riel | 022c204 | 2017-07-12 14:36:17 -0700 | [diff] [blame] | 94 | /* |
| 95 | * On 64-bit architectures, protect against non-terminated C string overflows |
| 96 | * by zeroing out the first byte of the canary; this leaves 56 bits of entropy. |
| 97 | */ |
| 98 | #ifdef CONFIG_64BIT |
| 99 | # ifdef __LITTLE_ENDIAN |
| 100 | # define CANARY_MASK 0xffffffffffffff00UL |
| 101 | # else /* big endian, 64 bits: */ |
| 102 | # define CANARY_MASK 0x00ffffffffffffffUL |
| 103 | # endif |
| 104 | #else /* 32 bits: */ |
| 105 | # define CANARY_MASK 0xffffffffUL |
| 106 | #endif |
| 107 | |
| 108 | static inline unsigned long get_random_canary(void) |
| 109 | { |
Jason A. Donenfeld | 7782cfe | 2022-05-13 12:29:38 +0200 | [diff] [blame] | 110 | return get_random_long() & CANARY_MASK; |
Rik van Riel | 022c204 | 2017-07-12 14:36:17 -0700 | [diff] [blame] | 111 | } |
| 112 | |
Jason A. Donenfeld | f623849 | 2022-09-26 17:43:14 +0200 | [diff] [blame] | 113 | void __init random_init_early(const char *command_line); |
| 114 | void __init random_init(void); |
Jason A. Donenfeld | 7782cfe | 2022-05-13 12:29:38 +0200 | [diff] [blame] | 115 | bool rng_is_initialized(void); |
| 116 | int wait_for_random_bytes(void); |
Jason A. Donenfeld | 7782cfe | 2022-05-13 12:29:38 +0200 | [diff] [blame] | 117 | |
Jason A. Donenfeld | da9ba56 | 2017-06-07 20:05:02 -0400 | [diff] [blame] | 118 | /* Calls wait_for_random_bytes() and then calls get_random_bytes(buf, nbytes). |
| 119 | * Returns the result of the call to wait_for_random_bytes. */ |
Jason A. Donenfeld | 04ec96b | 2022-02-09 14:43:25 +0100 | [diff] [blame] | 120 | static inline int get_random_bytes_wait(void *buf, size_t nbytes) |
Jason A. Donenfeld | da9ba56 | 2017-06-07 20:05:02 -0400 | [diff] [blame] | 121 | { |
| 122 | int ret = wait_for_random_bytes(); |
Jason A. Donenfeld | da9ba56 | 2017-06-07 20:05:02 -0400 | [diff] [blame] | 123 | get_random_bytes(buf, nbytes); |
Jason A. Donenfeld | 25e3fca | 2018-02-04 23:07:46 +0100 | [diff] [blame] | 124 | return ret; |
Jason A. Donenfeld | da9ba56 | 2017-06-07 20:05:02 -0400 | [diff] [blame] | 125 | } |
| 126 | |
Jason A. Donenfeld | 7c3a8a1 | 2022-05-13 12:32:23 +0200 | [diff] [blame] | 127 | #define declare_get_random_var_wait(name, ret_type) \ |
| 128 | static inline int get_random_ ## name ## _wait(ret_type *out) { \ |
Jason A. Donenfeld | da9ba56 | 2017-06-07 20:05:02 -0400 | [diff] [blame] | 129 | int ret = wait_for_random_bytes(); \ |
| 130 | if (unlikely(ret)) \ |
| 131 | return ret; \ |
Jason A. Donenfeld | 7c3a8a1 | 2022-05-13 12:32:23 +0200 | [diff] [blame] | 132 | *out = get_random_ ## name(); \ |
Jason A. Donenfeld | da9ba56 | 2017-06-07 20:05:02 -0400 | [diff] [blame] | 133 | return 0; \ |
| 134 | } |
Jason A. Donenfeld | a890d1c | 2022-10-05 12:54:38 +0200 | [diff] [blame] | 135 | declare_get_random_var_wait(u8, u8) |
| 136 | declare_get_random_var_wait(u16, u16) |
Jason A. Donenfeld | 7c3a8a1 | 2022-05-13 12:32:23 +0200 | [diff] [blame] | 137 | declare_get_random_var_wait(u32, u32) |
| 138 | declare_get_random_var_wait(u64, u32) |
Jason A. Donenfeld | 7c3a8a1 | 2022-05-13 12:32:23 +0200 | [diff] [blame] | 139 | declare_get_random_var_wait(long, unsigned long) |
Jason A. Donenfeld | da9ba56 | 2017-06-07 20:05:02 -0400 | [diff] [blame] | 140 | #undef declare_get_random_var |
| 141 | |
Joe Eykholt | 5960164 | 2010-05-26 14:44:13 -0700 | [diff] [blame] | 142 | /* |
Linus Torvalds | c0842fb | 2020-07-31 07:51:14 +0200 | [diff] [blame] | 143 | * This is designed to be standalone for just prandom |
| 144 | * users, but for now we include it from <linux/random.h> |
| 145 | * for legacy reasons. |
Joe Eykholt | 5960164 | 2010-05-26 14:44:13 -0700 | [diff] [blame] | 146 | */ |
Linus Torvalds | c0842fb | 2020-07-31 07:51:14 +0200 | [diff] [blame] | 147 | #include <linux/prandom.h> |
Joe Eykholt | 5960164 | 2010-05-26 14:44:13 -0700 | [diff] [blame] | 148 | |
Jason A. Donenfeld | 9592eef | 2022-07-05 20:48:41 +0200 | [diff] [blame] | 149 | #include <asm/archrandom.h> |
H. Peter Anvin | 63d7717 | 2011-07-31 13:54:50 -0700 | [diff] [blame] | 150 | |
Mark Rutland | 253d319 | 2020-02-10 13:00:13 +0000 | [diff] [blame] | 151 | /* |
| 152 | * Called from the boot CPU during startup; not valid to call once |
| 153 | * secondary CPUs are up and preemption is possible. |
| 154 | */ |
Jason A. Donenfeld | d349ab9 | 2022-07-17 12:35:24 +0200 | [diff] [blame] | 155 | #ifndef arch_get_random_seed_longs_early |
| 156 | static inline size_t __init arch_get_random_seed_longs_early(unsigned long *v, size_t max_longs) |
Mark Rutland | 253d319 | 2020-02-10 13:00:13 +0000 | [diff] [blame] | 157 | { |
| 158 | WARN_ON(system_state != SYSTEM_BOOTING); |
Jason A. Donenfeld | d349ab9 | 2022-07-17 12:35:24 +0200 | [diff] [blame] | 159 | return arch_get_random_seed_longs(v, max_longs); |
Mark Rutland | 253d319 | 2020-02-10 13:00:13 +0000 | [diff] [blame] | 160 | } |
| 161 | #endif |
| 162 | |
Jason A. Donenfeld | d349ab9 | 2022-07-17 12:35:24 +0200 | [diff] [blame] | 163 | #ifndef arch_get_random_longs_early |
| 164 | static inline bool __init arch_get_random_longs_early(unsigned long *v, size_t max_longs) |
Mark Rutland | 253d319 | 2020-02-10 13:00:13 +0000 | [diff] [blame] | 165 | { |
| 166 | WARN_ON(system_state != SYSTEM_BOOTING); |
Jason A. Donenfeld | d349ab9 | 2022-07-17 12:35:24 +0200 | [diff] [blame] | 167 | return arch_get_random_longs(v, max_longs); |
Mark Rutland | 253d319 | 2020-02-10 13:00:13 +0000 | [diff] [blame] | 168 | } |
| 169 | #endif |
| 170 | |
Jason A. Donenfeld | 3191dd5 | 2022-02-13 22:48:04 +0100 | [diff] [blame] | 171 | #ifdef CONFIG_SMP |
Jason A. Donenfeld | 7782cfe | 2022-05-13 12:29:38 +0200 | [diff] [blame] | 172 | int random_prepare_cpu(unsigned int cpu); |
| 173 | int random_online_cpu(unsigned int cpu); |
| 174 | #endif |
| 175 | |
| 176 | #ifndef MODULE |
| 177 | extern const struct file_operations random_fops, urandom_fops; |
Jason A. Donenfeld | 3191dd5 | 2022-02-13 22:48:04 +0100 | [diff] [blame] | 178 | #endif |
| 179 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 180 | #endif /* _LINUX_RANDOM_H */ |