1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * s390 arch random implementation. 4 * 5 * Copyright IBM Corp. 2017, 2020 6 * Author(s): Harald Freudenberger 7 * 8 * The s390_arch_random_generate() function may be called from random.c 9 * in interrupt context. So this implementation does the best to be very 10 * fast. There is a buffer of random data which is asynchronously checked 11 * and filled by a workqueue thread. 12 * If there are enough bytes in the buffer the s390_arch_random_generate() 13 * just delivers these bytes. Otherwise false is returned until the 14 * worker thread refills the buffer. 15 * The worker fills the rng buffer by pulling fresh entropy from the 16 * high quality (but slow) true hardware random generator. This entropy 17 * is then spread over the buffer with an pseudo random generator PRNG. 18 * As the arch_get_random_seed_long() fetches 8 bytes and the calling 19 * function add_interrupt_randomness() counts this as 1 bit entropy the 20 * distribution needs to make sure there is in fact 1 bit entropy contained 21 * in 8 bytes of the buffer. The current values pull 32 byte entropy 22 * and scatter this into a 2048 byte buffer. So 8 byte in the buffer 23 * will contain 1 bit of entropy. 24 * The worker thread is rescheduled based on the charge level of the 25 * buffer but at least with 500 ms delay to avoid too much CPU consumption. 26 * So the max. amount of rng data delivered via arch_get_random_seed is 27 * limited to 4k bytes per second. 28 */ 29 30 #include <linux/kernel.h> 31 #include <linux/atomic.h> 32 #include <linux/random.h> 33 #include <linux/slab.h> 34 #include <linux/static_key.h> 35 #include <linux/workqueue.h> 36 #include <linux/moduleparam.h> 37 #include <asm/cpacf.h> 38 39 DEFINE_STATIC_KEY_FALSE(s390_arch_random_available); 40 41 atomic64_t s390_arch_random_counter = ATOMIC64_INIT(0); 42 EXPORT_SYMBOL(s390_arch_random_counter); 43 44 #define ARCH_REFILL_TICKS (HZ/2) 45 #define ARCH_PRNG_SEED_SIZE 32 46 #define ARCH_RNG_BUF_SIZE 2048 47 48 static DEFINE_SPINLOCK(arch_rng_lock); 49 static u8 *arch_rng_buf; 50 static unsigned int arch_rng_buf_idx; 51 52 static void arch_rng_refill_buffer(struct work_struct *); 53 static DECLARE_DELAYED_WORK(arch_rng_work, arch_rng_refill_buffer); 54 55 bool s390_arch_random_generate(u8 *buf, unsigned int nbytes) 56 { 57 /* max hunk is ARCH_RNG_BUF_SIZE */ 58 if (nbytes > ARCH_RNG_BUF_SIZE) 59 return false; 60 61 /* lock rng buffer */ 62 if (!spin_trylock(&arch_rng_lock)) 63 return false; 64 65 /* try to resolve the requested amount of bytes from the buffer */ 66 arch_rng_buf_idx -= nbytes; 67 if (arch_rng_buf_idx < ARCH_RNG_BUF_SIZE) { 68 memcpy(buf, arch_rng_buf + arch_rng_buf_idx, nbytes); 69 atomic64_add(nbytes, &s390_arch_random_counter); 70 spin_unlock(&arch_rng_lock); 71 return true; 72 } 73 74 /* not enough bytes in rng buffer, refill is done asynchronously */ 75 spin_unlock(&arch_rng_lock); 76 77 return false; 78 } 79 EXPORT_SYMBOL(s390_arch_random_generate); 80 81 static void arch_rng_refill_buffer(struct work_struct *unused) 82 { 83 unsigned int delay = ARCH_REFILL_TICKS; 84 85 spin_lock(&arch_rng_lock); 86 if (arch_rng_buf_idx > ARCH_RNG_BUF_SIZE) { 87 /* buffer is exhausted and needs refill */ 88 u8 seed[ARCH_PRNG_SEED_SIZE]; 89 u8 prng_wa[240]; 90 /* fetch ARCH_PRNG_SEED_SIZE bytes of entropy */ 91 cpacf_trng(NULL, 0, seed, sizeof(seed)); 92 /* blow this entropy up to ARCH_RNG_BUF_SIZE with PRNG */ 93 memset(prng_wa, 0, sizeof(prng_wa)); 94 cpacf_prno(CPACF_PRNO_SHA512_DRNG_SEED, 95 &prng_wa, NULL, 0, seed, sizeof(seed)); 96 cpacf_prno(CPACF_PRNO_SHA512_DRNG_GEN, 97 &prng_wa, arch_rng_buf, ARCH_RNG_BUF_SIZE, NULL, 0); 98 arch_rng_buf_idx = ARCH_RNG_BUF_SIZE; 99 } 100 delay += (ARCH_REFILL_TICKS * arch_rng_buf_idx) / ARCH_RNG_BUF_SIZE; 101 spin_unlock(&arch_rng_lock); 102 103 /* kick next check */ 104 queue_delayed_work(system_long_wq, &arch_rng_work, delay); 105 } 106 107 /* 108 * Here follows the implementation of s390_arch_get_random_long(). 109 * 110 * The random longs to be pulled by arch_get_random_long() are 111 * prepared in an 4K buffer which is filled from the NIST 800-90 112 * compliant s390 drbg. By default the random long buffer is refilled 113 * 256 times before the drbg itself needs a reseed. The reseed of the 114 * drbg is done with 32 bytes fetched from the high quality (but slow) 115 * trng which is assumed to deliver 100% entropy. So the 32 * 8 = 256 116 * bits of entropy are spread over 256 * 4KB = 1MB serving 131072 117 * arch_get_random_long() invocations before reseeded. 118 * 119 * How often the 4K random long buffer is refilled with the drbg 120 * before the drbg is reseeded can be adjusted. There is a module 121 * parameter 's390_arch_rnd_long_drbg_reseed' accessible via 122 * /sys/module/arch_random/parameters/rndlong_drbg_reseed 123 * or as kernel command line parameter 124 * arch_random.rndlong_drbg_reseed=<value> 125 * This parameter tells how often the drbg fills the 4K buffer before 126 * it is re-seeded by fresh entropy from the trng. 127 * A value of 16 results in reseeding the drbg at every 16 * 4 KB = 64 128 * KB with 32 bytes of fresh entropy pulled from the trng. So a value 129 * of 16 would result in 256 bits entropy per 64 KB. 130 * A value of 256 results in 1MB of drbg output before a reseed of the 131 * drbg is done. So this would spread the 256 bits of entropy among 1MB. 132 * Setting this parameter to 0 forces the reseed to take place every 133 * time the 4K buffer is depleted, so the entropy rises to 256 bits 134 * entropy per 4K or 0.5 bit entropy per arch_get_random_long(). With 135 * setting this parameter to negative values all this effort is 136 * disabled, arch_get_random long() returns false and thus indicating 137 * that the arch_get_random_long() feature is disabled at all. 138 */ 139 140 static unsigned long rndlong_buf[512]; 141 static DEFINE_SPINLOCK(rndlong_lock); 142 static int rndlong_buf_index; 143 144 static int rndlong_drbg_reseed = 256; 145 module_param_named(rndlong_drbg_reseed, rndlong_drbg_reseed, int, 0600); 146 MODULE_PARM_DESC(rndlong_drbg_reseed, "s390 arch_get_random_long() drbg reseed"); 147 148 static inline void refill_rndlong_buf(void) 149 { 150 static u8 prng_ws[240]; 151 static int drbg_counter; 152 153 if (--drbg_counter < 0) { 154 /* need to re-seed the drbg */ 155 u8 seed[32]; 156 157 /* fetch seed from trng */ 158 cpacf_trng(NULL, 0, seed, sizeof(seed)); 159 /* seed drbg */ 160 memset(prng_ws, 0, sizeof(prng_ws)); 161 cpacf_prno(CPACF_PRNO_SHA512_DRNG_SEED, 162 &prng_ws, NULL, 0, seed, sizeof(seed)); 163 /* re-init counter for drbg */ 164 drbg_counter = rndlong_drbg_reseed; 165 } 166 167 /* fill the arch_get_random_long buffer from drbg */ 168 cpacf_prno(CPACF_PRNO_SHA512_DRNG_GEN, &prng_ws, 169 (u8 *) rndlong_buf, sizeof(rndlong_buf), 170 NULL, 0); 171 } 172 173 bool s390_arch_get_random_long(unsigned long *v) 174 { 175 bool rc = false; 176 unsigned long flags; 177 178 /* arch_get_random_long() disabled ? */ 179 if (rndlong_drbg_reseed < 0) 180 return false; 181 182 /* try to lock the random long lock */ 183 if (!spin_trylock_irqsave(&rndlong_lock, flags)) 184 return false; 185 186 if (--rndlong_buf_index >= 0) { 187 /* deliver next long value from the buffer */ 188 *v = rndlong_buf[rndlong_buf_index]; 189 rc = true; 190 goto out; 191 } 192 193 /* buffer is depleted and needs refill */ 194 if (in_interrupt()) { 195 /* delay refill in interrupt context to next caller */ 196 rndlong_buf_index = 0; 197 goto out; 198 } 199 200 /* refill random long buffer */ 201 refill_rndlong_buf(); 202 rndlong_buf_index = ARRAY_SIZE(rndlong_buf); 203 204 /* and provide one random long */ 205 *v = rndlong_buf[--rndlong_buf_index]; 206 rc = true; 207 208 out: 209 spin_unlock_irqrestore(&rndlong_lock, flags); 210 return rc; 211 } 212 EXPORT_SYMBOL(s390_arch_get_random_long); 213 214 static int __init s390_arch_random_init(void) 215 { 216 /* all the needed PRNO subfunctions available ? */ 217 if (cpacf_query_func(CPACF_PRNO, CPACF_PRNO_TRNG) && 218 cpacf_query_func(CPACF_PRNO, CPACF_PRNO_SHA512_DRNG_GEN)) { 219 220 /* alloc arch random working buffer */ 221 arch_rng_buf = kmalloc(ARCH_RNG_BUF_SIZE, GFP_KERNEL); 222 if (!arch_rng_buf) 223 return -ENOMEM; 224 225 /* kick worker queue job to fill the random buffer */ 226 queue_delayed_work(system_long_wq, 227 &arch_rng_work, ARCH_REFILL_TICKS); 228 229 /* enable arch random to the outside world */ 230 static_branch_enable(&s390_arch_random_available); 231 } 232 233 return 0; 234 } 235 arch_initcall(s390_arch_random_init); 236