blob: 51133627ba73ad568b85605b3c74c2208be5f648 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Jason A. Donenfeld6071a6c2022-02-11 12:28:33 +01002
Linus Torvalds1da177e2005-04-16 15:20:36 -07003#ifndef _LINUX_RANDOM_H
4#define _LINUX_RANDOM_H
5
Mark Rutland253d3192020-02-10 13:00:13 +00006#include <linux/bug.h>
7#include <linux/kernel.h>
Herbert Xu205a5252015-06-09 18:19:39 +08008#include <linux/list.h>
Daniel Borkmann897ece52015-10-08 01:20:38 +02009#include <linux/once.h>
10
David Howells607ca462012-10-13 10:46:48 +010011#include <uapi/linux/random.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012
Jason A. Donenfeld5acd3542022-03-01 20:03:49 +010013struct notifier_block;
Herbert Xu205a5252015-06-09 18:19:39 +080014
Jason A. Donenfelda1940262022-05-13 13:18:46 +020015void add_device_randomness(const void *buf, size_t len);
Jason A. Donenfeld39e0f992022-06-07 17:00:16 +020016void __init add_bootloader_randomness(const void *buf, size_t len);
Jason A. Donenfeld7782cfe2022-05-13 12:29:38 +020017void add_input_randomness(unsigned int type, unsigned int code,
18 unsigned int value) __latent_entropy;
19void add_interrupt_randomness(int irq) __latent_entropy;
Jason A. Donenfelda1940262022-05-13 13:18:46 +020020void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy);
Jason A. Donenfeld2f140622022-05-05 02:20:22 +020021
Jason A. Donenfeld2f140622022-05-05 02:20:22 +020022static inline void add_latent_entropy(void)
23{
Jason A. Donenfeld38c5d242022-06-01 22:45:33 +020024#if defined(LATENT_ENTROPY_PLUGIN) && !defined(__CHECKER__)
Jason A. Donenfeld2f140622022-05-05 02:20:22 +020025 add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
Jason A. Donenfeld2f140622022-05-05 02:20:22 +020026#else
Jason A. Donenfeld38c5d242022-06-01 22:45:33 +020027 add_device_randomness(NULL, 0);
Jason A. Donenfeld2f140622022-05-05 02:20:22 +020028#endif
Jason A. Donenfeld38c5d242022-06-01 22:45:33 +020029}
Jason A. Donenfeld2f140622022-05-05 02:20:22 +020030
Jason A. Donenfelda4107d32022-03-01 15:14:04 +010031#if IS_ENABLED(CONFIG_VMGENID)
Jason A. Donenfelda1940262022-05-13 13:18:46 +020032void add_vmfork_randomness(const void *unique_vm_id, size_t len);
Jason A. Donenfeld7782cfe2022-05-13 12:29:38 +020033int register_random_vmfork_notifier(struct notifier_block *nb);
34int unregister_random_vmfork_notifier(struct notifier_block *nb);
Jason A. Donenfeldf3c26822022-03-01 20:22:39 +010035#else
36static inline int register_random_vmfork_notifier(struct notifier_block *nb) { return 0; }
37static inline int unregister_random_vmfork_notifier(struct notifier_block *nb) { return 0; }
Jason A. Donenfelda4107d32022-03-01 15:14:04 +010038#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Jason A. Donenfelda1940262022-05-13 13:18:46 +020040void get_random_bytes(void *buf, size_t len);
Jason A. Donenfeld585cd5f2022-09-28 18:47:30 +020041u8 get_random_u8(void);
42u16 get_random_u16(void);
Jason A. Donenfeldc4404082017-01-22 16:34:08 +010043u32 get_random_u32(void);
44u64 get_random_u64(void);
Jason A. Donenfeldc4404082017-01-22 16:34:08 +010045static inline unsigned long get_random_long(void)
46{
47#if BITS_PER_LONG == 64
48 return get_random_u64();
49#else
50 return get_random_u32();
51#endif
52}
53
Jason A. Donenfeld346ac4a2022-10-08 20:42:54 -060054u32 __get_random_u32_below(u32 ceil);
55
56/*
57 * Returns a random integer in the interval [0, ceil), with uniform
58 * distribution, suitable for all uses. Fastest when ceil is a constant, but
59 * still fast for variable ceil as well.
60 */
61static inline u32 get_random_u32_below(u32 ceil)
62{
63 if (!__builtin_constant_p(ceil))
64 return __get_random_u32_below(ceil);
65
66 /*
67 * For the fast path, below, all operations on ceil are precomputed by
68 * the compiler, so this incurs no overhead for checking pow2, doing
69 * divisions, or branching based on integer size. The resultant
70 * algorithm does traditional reciprocal multiplication (typically
71 * optimized by the compiler into shifts and adds), rejecting samples
72 * whose lower half would indicate a range indivisible by ceil.
73 */
74 BUILD_BUG_ON_MSG(!ceil, "get_random_u32_below() must take ceil > 0");
75 if (ceil <= 1)
76 return 0;
77 for (;;) {
78 if (ceil <= 1U << 8) {
79 u32 mult = ceil * get_random_u8();
80 if (likely(is_power_of_2(ceil) || (u8)mult >= (1U << 8) % ceil))
81 return mult >> 8;
82 } else if (ceil <= 1U << 16) {
83 u32 mult = ceil * get_random_u16();
84 if (likely(is_power_of_2(ceil) || (u16)mult >= (1U << 16) % ceil))
85 return mult >> 16;
86 } else {
87 u64 mult = (u64)ceil * get_random_u32();
88 if (likely(is_power_of_2(ceil) || (u32)mult >= -ceil % ceil))
89 return mult >> 32;
90 }
91 }
92}
93
Rik van Riel022c2042017-07-12 14:36:17 -070094/*
Jason A. Donenfeld6088d872022-10-19 23:19:35 -060095 * Returns a random integer in the interval (floor, U32_MAX], with uniform
96 * distribution, suitable for all uses. Fastest when floor is a constant, but
97 * still fast for variable floor as well.
98 */
99static inline u32 get_random_u32_above(u32 floor)
100{
101 BUILD_BUG_ON_MSG(__builtin_constant_p(floor) && floor == U32_MAX,
102 "get_random_u32_above() must take floor < U32_MAX");
103 return floor + 1 + get_random_u32_below(U32_MAX - floor);
104}
105
106/*
107 * Returns a random integer in the interval [floor, ceil], with uniform
108 * distribution, suitable for all uses. Fastest when floor and ceil are
109 * constant, but still fast for variable floor and ceil as well.
110 */
111static inline u32 get_random_u32_inclusive(u32 floor, u32 ceil)
112{
113 BUILD_BUG_ON_MSG(__builtin_constant_p(floor) && __builtin_constant_p(ceil) &&
114 (floor > ceil || ceil - floor == U32_MAX),
115 "get_random_u32_inclusive() must take floor <= ceil");
116 return floor + get_random_u32_below(ceil - floor + 1);
117}
118
119/*
Rik van Riel022c2042017-07-12 14:36:17 -0700120 * On 64-bit architectures, protect against non-terminated C string overflows
121 * by zeroing out the first byte of the canary; this leaves 56 bits of entropy.
122 */
123#ifdef CONFIG_64BIT
124# ifdef __LITTLE_ENDIAN
125# define CANARY_MASK 0xffffffffffffff00UL
126# else /* big endian, 64 bits: */
127# define CANARY_MASK 0x00ffffffffffffffUL
128# endif
129#else /* 32 bits: */
130# define CANARY_MASK 0xffffffffUL
131#endif
132
133static inline unsigned long get_random_canary(void)
134{
Jason A. Donenfeld7782cfe2022-05-13 12:29:38 +0200135 return get_random_long() & CANARY_MASK;
Rik van Riel022c2042017-07-12 14:36:17 -0700136}
137
Jason A. Donenfeldf6238492022-09-26 17:43:14 +0200138void __init random_init_early(const char *command_line);
139void __init random_init(void);
Jason A. Donenfeld7782cfe2022-05-13 12:29:38 +0200140bool rng_is_initialized(void);
141int wait_for_random_bytes(void);
Jason A. Donenfeld7782cfe2022-05-13 12:29:38 +0200142
Jason A. Donenfeldda9ba562017-06-07 20:05:02 -0400143/* Calls wait_for_random_bytes() and then calls get_random_bytes(buf, nbytes).
144 * Returns the result of the call to wait_for_random_bytes. */
Jason A. Donenfeld04ec96b2022-02-09 14:43:25 +0100145static inline int get_random_bytes_wait(void *buf, size_t nbytes)
Jason A. Donenfeldda9ba562017-06-07 20:05:02 -0400146{
147 int ret = wait_for_random_bytes();
Jason A. Donenfeldda9ba562017-06-07 20:05:02 -0400148 get_random_bytes(buf, nbytes);
Jason A. Donenfeld25e3fca2018-02-04 23:07:46 +0100149 return ret;
Jason A. Donenfeldda9ba562017-06-07 20:05:02 -0400150}
151
Jason A. Donenfeld7c3a8a12022-05-13 12:32:23 +0200152#define declare_get_random_var_wait(name, ret_type) \
153 static inline int get_random_ ## name ## _wait(ret_type *out) { \
Jason A. Donenfeldda9ba562017-06-07 20:05:02 -0400154 int ret = wait_for_random_bytes(); \
155 if (unlikely(ret)) \
156 return ret; \
Jason A. Donenfeld7c3a8a12022-05-13 12:32:23 +0200157 *out = get_random_ ## name(); \
Jason A. Donenfeldda9ba562017-06-07 20:05:02 -0400158 return 0; \
159 }
Jason A. Donenfelda890d1c2022-10-05 12:54:38 +0200160declare_get_random_var_wait(u8, u8)
161declare_get_random_var_wait(u16, u16)
Jason A. Donenfeld7c3a8a12022-05-13 12:32:23 +0200162declare_get_random_var_wait(u32, u32)
163declare_get_random_var_wait(u64, u32)
Jason A. Donenfeld7c3a8a12022-05-13 12:32:23 +0200164declare_get_random_var_wait(long, unsigned long)
Jason A. Donenfeldda9ba562017-06-07 20:05:02 -0400165#undef declare_get_random_var
166
Joe Eykholt59601642010-05-26 14:44:13 -0700167/*
Linus Torvaldsc0842fb2020-07-31 07:51:14 +0200168 * This is designed to be standalone for just prandom
169 * users, but for now we include it from <linux/random.h>
170 * for legacy reasons.
Joe Eykholt59601642010-05-26 14:44:13 -0700171 */
Linus Torvaldsc0842fb2020-07-31 07:51:14 +0200172#include <linux/prandom.h>
Joe Eykholt59601642010-05-26 14:44:13 -0700173
Jason A. Donenfeld9592eef2022-07-05 20:48:41 +0200174#include <asm/archrandom.h>
H. Peter Anvin63d77172011-07-31 13:54:50 -0700175
Mark Rutland253d3192020-02-10 13:00:13 +0000176/*
177 * Called from the boot CPU during startup; not valid to call once
178 * secondary CPUs are up and preemption is possible.
179 */
Jason A. Donenfeldd349ab92022-07-17 12:35:24 +0200180#ifndef arch_get_random_seed_longs_early
181static inline size_t __init arch_get_random_seed_longs_early(unsigned long *v, size_t max_longs)
Mark Rutland253d3192020-02-10 13:00:13 +0000182{
183 WARN_ON(system_state != SYSTEM_BOOTING);
Jason A. Donenfeldd349ab92022-07-17 12:35:24 +0200184 return arch_get_random_seed_longs(v, max_longs);
Mark Rutland253d3192020-02-10 13:00:13 +0000185}
186#endif
187
Jason A. Donenfeldd349ab92022-07-17 12:35:24 +0200188#ifndef arch_get_random_longs_early
189static inline bool __init arch_get_random_longs_early(unsigned long *v, size_t max_longs)
Mark Rutland253d3192020-02-10 13:00:13 +0000190{
191 WARN_ON(system_state != SYSTEM_BOOTING);
Jason A. Donenfeldd349ab92022-07-17 12:35:24 +0200192 return arch_get_random_longs(v, max_longs);
Mark Rutland253d3192020-02-10 13:00:13 +0000193}
194#endif
195
Jason A. Donenfeld3191dd52022-02-13 22:48:04 +0100196#ifdef CONFIG_SMP
Jason A. Donenfeld7782cfe2022-05-13 12:29:38 +0200197int random_prepare_cpu(unsigned int cpu);
198int random_online_cpu(unsigned int cpu);
199#endif
200
201#ifndef MODULE
202extern const struct file_operations random_fops, urandom_fops;
Jason A. Donenfeld3191dd52022-02-13 22:48:04 +0100203#endif
204
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205#endif /* _LINUX_RANDOM_H */