Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Jason A. Donenfeld | 6071a6c | 2022-02-11 12:28:33 +0100 | [diff] [blame] | 2 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3 | #ifndef _LINUX_RANDOM_H |
| 4 | #define _LINUX_RANDOM_H |
| 5 | |
Mark Rutland | 253d319 | 2020-02-10 13:00:13 +0000 | [diff] [blame] | 6 | #include <linux/bug.h> |
| 7 | #include <linux/kernel.h> |
Herbert Xu | 205a525 | 2015-06-09 18:19:39 +0800 | [diff] [blame] | 8 | #include <linux/list.h> |
Daniel Borkmann | 897ece5 | 2015-10-08 01:20:38 +0200 | [diff] [blame] | 9 | #include <linux/once.h> |
| 10 | |
David Howells | 607ca46 | 2012-10-13 10:46:48 +0100 | [diff] [blame] | 11 | #include <uapi/linux/random.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12 | |
Herbert Xu | 205a525 | 2015-06-09 18:19:39 +0800 | [diff] [blame] | 13 | struct random_ready_callback { |
| 14 | struct list_head list; |
| 15 | void (*func)(struct random_ready_callback *rdy); |
| 16 | struct module *owner; |
| 17 | }; |
| 18 | |
Jason A. Donenfeld | 04ec96b | 2022-02-09 14:43:25 +0100 | [diff] [blame] | 19 | extern void add_device_randomness(const void *, size_t); |
| 20 | extern void add_bootloader_randomness(const void *, size_t); |
Emese Revfy | 38addce | 2016-06-20 20:41:19 +0200 | [diff] [blame] | 21 | |
Vasily Gorbik | 7e756f4 | 2019-05-07 16:28:15 +0200 | [diff] [blame] | 22 | #if defined(LATENT_ENTROPY_PLUGIN) && !defined(__CHECKER__) |
Emese Revfy | 38addce | 2016-06-20 20:41:19 +0200 | [diff] [blame] | 23 | static inline void add_latent_entropy(void) |
| 24 | { |
| 25 | add_device_randomness((const void *)&latent_entropy, |
| 26 | sizeof(latent_entropy)); |
| 27 | } |
| 28 | #else |
| 29 | static inline void add_latent_entropy(void) {} |
| 30 | #endif |
| 31 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 32 | extern void add_input_randomness(unsigned int type, unsigned int code, |
Emese Revfy | 0766f78 | 2016-06-20 20:42:34 +0200 | [diff] [blame] | 33 | unsigned int value) __latent_entropy; |
Sebastian Andrzej Siewior | 703f706 | 2021-12-07 13:17:33 +0100 | [diff] [blame] | 34 | extern void add_interrupt_randomness(int irq) __latent_entropy; |
Jason A. Donenfeld | b777c38 | 2022-02-13 16:17:01 +0100 | [diff] [blame] | 35 | extern void add_hwgenerator_randomness(const void *buffer, size_t count, |
| 36 | size_t entropy); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 37 | |
Jason A. Donenfeld | 04ec96b | 2022-02-09 14:43:25 +0100 | [diff] [blame] | 38 | extern void get_random_bytes(void *buf, size_t nbytes); |
Jason A. Donenfeld | e297a78 | 2017-06-07 19:58:56 -0400 | [diff] [blame] | 39 | extern int wait_for_random_bytes(void); |
Kees Cook | d555352 | 2019-04-19 23:27:05 -0400 | [diff] [blame] | 40 | extern int __init rand_initialize(void); |
Jason A. Donenfeld | 9a47249 | 2018-07-31 21:11:00 +0200 | [diff] [blame] | 41 | extern bool rng_is_initialized(void); |
Herbert Xu | 205a525 | 2015-06-09 18:19:39 +0800 | [diff] [blame] | 42 | extern int add_random_ready_callback(struct random_ready_callback *rdy); |
| 43 | extern void del_random_ready_callback(struct random_ready_callback *rdy); |
Jason A. Donenfeld | 04ec96b | 2022-02-09 14:43:25 +0100 | [diff] [blame] | 44 | extern size_t __must_check get_random_bytes_arch(void *buf, size_t nbytes); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 45 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 46 | #ifndef MODULE |
Arjan van de Ven | 5404732 | 2007-02-12 00:55:28 -0800 | [diff] [blame] | 47 | extern const struct file_operations random_fops, urandom_fops; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 48 | #endif |
| 49 | |
Jason A. Donenfeld | c440408 | 2017-01-22 16:34:08 +0100 | [diff] [blame] | 50 | u32 get_random_u32(void); |
| 51 | u64 get_random_u64(void); |
| 52 | static inline unsigned int get_random_int(void) |
| 53 | { |
| 54 | return get_random_u32(); |
| 55 | } |
| 56 | static inline unsigned long get_random_long(void) |
| 57 | { |
| 58 | #if BITS_PER_LONG == 64 |
| 59 | return get_random_u64(); |
| 60 | #else |
| 61 | return get_random_u32(); |
| 62 | #endif |
| 63 | } |
| 64 | |
Rik van Riel | 022c204 | 2017-07-12 14:36:17 -0700 | [diff] [blame] | 65 | /* |
| 66 | * On 64-bit architectures, protect against non-terminated C string overflows |
| 67 | * by zeroing out the first byte of the canary; this leaves 56 bits of entropy. |
| 68 | */ |
| 69 | #ifdef CONFIG_64BIT |
| 70 | # ifdef __LITTLE_ENDIAN |
| 71 | # define CANARY_MASK 0xffffffffffffff00UL |
| 72 | # else /* big endian, 64 bits: */ |
| 73 | # define CANARY_MASK 0x00ffffffffffffffUL |
| 74 | # endif |
| 75 | #else /* 32 bits: */ |
| 76 | # define CANARY_MASK 0xffffffffUL |
| 77 | #endif |
| 78 | |
| 79 | static inline unsigned long get_random_canary(void) |
| 80 | { |
| 81 | unsigned long val = get_random_long(); |
| 82 | |
| 83 | return val & CANARY_MASK; |
| 84 | } |
| 85 | |
Jason A. Donenfeld | da9ba56 | 2017-06-07 20:05:02 -0400 | [diff] [blame] | 86 | /* Calls wait_for_random_bytes() and then calls get_random_bytes(buf, nbytes). |
| 87 | * Returns the result of the call to wait_for_random_bytes. */ |
Jason A. Donenfeld | 04ec96b | 2022-02-09 14:43:25 +0100 | [diff] [blame] | 88 | static inline int get_random_bytes_wait(void *buf, size_t nbytes) |
Jason A. Donenfeld | da9ba56 | 2017-06-07 20:05:02 -0400 | [diff] [blame] | 89 | { |
| 90 | int ret = wait_for_random_bytes(); |
Jason A. Donenfeld | da9ba56 | 2017-06-07 20:05:02 -0400 | [diff] [blame] | 91 | get_random_bytes(buf, nbytes); |
Jason A. Donenfeld | 25e3fca | 2018-02-04 23:07:46 +0100 | [diff] [blame] | 92 | return ret; |
Jason A. Donenfeld | da9ba56 | 2017-06-07 20:05:02 -0400 | [diff] [blame] | 93 | } |
| 94 | |
| 95 | #define declare_get_random_var_wait(var) \ |
| 96 | static inline int get_random_ ## var ## _wait(var *out) { \ |
| 97 | int ret = wait_for_random_bytes(); \ |
| 98 | if (unlikely(ret)) \ |
| 99 | return ret; \ |
| 100 | *out = get_random_ ## var(); \ |
| 101 | return 0; \ |
| 102 | } |
| 103 | declare_get_random_var_wait(u32) |
| 104 | declare_get_random_var_wait(u64) |
| 105 | declare_get_random_var_wait(int) |
| 106 | declare_get_random_var_wait(long) |
| 107 | #undef declare_get_random_var |
| 108 | |
Jason Cooper | 99fdafd | 2016-10-11 13:53:52 -0700 | [diff] [blame] | 109 | unsigned long randomize_page(unsigned long start, unsigned long range); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 110 | |
Joe Eykholt | 5960164 | 2010-05-26 14:44:13 -0700 | [diff] [blame] | 111 | /* |
Linus Torvalds | c0842fb | 2020-07-31 07:51:14 +0200 | [diff] [blame] | 112 | * This is designed to be standalone for just prandom |
| 113 | * users, but for now we include it from <linux/random.h> |
| 114 | * for legacy reasons. |
Joe Eykholt | 5960164 | 2010-05-26 14:44:13 -0700 | [diff] [blame] | 115 | */ |
Linus Torvalds | c0842fb | 2020-07-31 07:51:14 +0200 | [diff] [blame] | 116 | #include <linux/prandom.h> |
Joe Eykholt | 5960164 | 2010-05-26 14:44:13 -0700 | [diff] [blame] | 117 | |
H. Peter Anvin | 63d7717 | 2011-07-31 13:54:50 -0700 | [diff] [blame] | 118 | #ifdef CONFIG_ARCH_RANDOM |
| 119 | # include <asm/archrandom.h> |
| 120 | #else |
Richard Henderson | 904caa6 | 2020-01-10 14:54:18 +0000 | [diff] [blame] | 121 | static inline bool __must_check arch_get_random_long(unsigned long *v) |
H. Peter Anvin | 63d7717 | 2011-07-31 13:54:50 -0700 | [diff] [blame] | 122 | { |
Richard Henderson | 66f5ae8 | 2020-01-10 14:54:17 +0000 | [diff] [blame] | 123 | return false; |
H. Peter Anvin | 63d7717 | 2011-07-31 13:54:50 -0700 | [diff] [blame] | 124 | } |
Richard Henderson | 904caa6 | 2020-01-10 14:54:18 +0000 | [diff] [blame] | 125 | static inline bool __must_check arch_get_random_int(unsigned int *v) |
H. Peter Anvin | 63d7717 | 2011-07-31 13:54:50 -0700 | [diff] [blame] | 126 | { |
Richard Henderson | 66f5ae8 | 2020-01-10 14:54:17 +0000 | [diff] [blame] | 127 | return false; |
H. Peter Anvin | 63d7717 | 2011-07-31 13:54:50 -0700 | [diff] [blame] | 128 | } |
Richard Henderson | 904caa6 | 2020-01-10 14:54:18 +0000 | [diff] [blame] | 129 | static inline bool __must_check arch_get_random_seed_long(unsigned long *v) |
H. Peter Anvin | d20f78d | 2014-03-17 16:36:27 -0700 | [diff] [blame] | 130 | { |
Richard Henderson | 66f5ae8 | 2020-01-10 14:54:17 +0000 | [diff] [blame] | 131 | return false; |
H. Peter Anvin | d20f78d | 2014-03-17 16:36:27 -0700 | [diff] [blame] | 132 | } |
Richard Henderson | 904caa6 | 2020-01-10 14:54:18 +0000 | [diff] [blame] | 133 | static inline bool __must_check arch_get_random_seed_int(unsigned int *v) |
H. Peter Anvin | d20f78d | 2014-03-17 16:36:27 -0700 | [diff] [blame] | 134 | { |
Richard Henderson | 66f5ae8 | 2020-01-10 14:54:17 +0000 | [diff] [blame] | 135 | return false; |
H. Peter Anvin | d20f78d | 2014-03-17 16:36:27 -0700 | [diff] [blame] | 136 | } |
H. Peter Anvin | 63d7717 | 2011-07-31 13:54:50 -0700 | [diff] [blame] | 137 | #endif |
| 138 | |
Mark Rutland | 253d319 | 2020-02-10 13:00:13 +0000 | [diff] [blame] | 139 | /* |
| 140 | * Called from the boot CPU during startup; not valid to call once |
| 141 | * secondary CPUs are up and preemption is possible. |
| 142 | */ |
| 143 | #ifndef arch_get_random_seed_long_early |
| 144 | static inline bool __init arch_get_random_seed_long_early(unsigned long *v) |
| 145 | { |
| 146 | WARN_ON(system_state != SYSTEM_BOOTING); |
| 147 | return arch_get_random_seed_long(v); |
| 148 | } |
| 149 | #endif |
| 150 | |
| 151 | #ifndef arch_get_random_long_early |
| 152 | static inline bool __init arch_get_random_long_early(unsigned long *v) |
| 153 | { |
| 154 | WARN_ON(system_state != SYSTEM_BOOTING); |
| 155 | return arch_get_random_long(v); |
| 156 | } |
| 157 | #endif |
| 158 | |
Jason A. Donenfeld | 3191dd5 | 2022-02-13 22:48:04 +0100 | [diff] [blame^] | 159 | #ifdef CONFIG_SMP |
| 160 | extern int random_prepare_cpu(unsigned int cpu); |
| 161 | extern int random_online_cpu(unsigned int cpu); |
| 162 | #endif |
| 163 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 164 | #endif /* _LINUX_RANDOM_H */ |