xref: /openbmc/linux/arch/s390/boot/kaslr.c (revision 15e3ae36)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright IBM Corp. 2019
4  */
5 #include <asm/mem_detect.h>
6 #include <asm/pgtable.h>
7 #include <asm/cpacf.h>
8 #include <asm/timex.h>
9 #include <asm/sclp.h>
10 #include "compressed/decompressor.h"
11 #include "boot.h"
12 
13 #define PRNG_MODE_TDES	 1
14 #define PRNG_MODE_SHA512 2
15 #define PRNG_MODE_TRNG	 3
16 
17 struct prno_parm {
18 	u32 res;
19 	u32 reseed_counter;
20 	u64 stream_bytes;
21 	u8  V[112];
22 	u8  C[112];
23 };
24 
25 struct prng_parm {
26 	u8  parm_block[32];
27 	u32 reseed_counter;
28 	u64 byte_counter;
29 };
30 
31 static int check_prng(void)
32 {
33 	if (!cpacf_query_func(CPACF_KMC, CPACF_KMC_PRNG)) {
34 		sclp_early_printk("KASLR disabled: CPU has no PRNG\n");
35 		return 0;
36 	}
37 	if (cpacf_query_func(CPACF_PRNO, CPACF_PRNO_TRNG))
38 		return PRNG_MODE_TRNG;
39 	if (cpacf_query_func(CPACF_PRNO, CPACF_PRNO_SHA512_DRNG_GEN))
40 		return PRNG_MODE_SHA512;
41 	else
42 		return PRNG_MODE_TDES;
43 }
44 
45 static unsigned long get_random(unsigned long limit)
46 {
47 	struct prng_parm prng = {
48 		/* initial parameter block for tdes mode, copied from libica */
49 		.parm_block = {
50 			0x0F, 0x2B, 0x8E, 0x63, 0x8C, 0x8E, 0xD2, 0x52,
51 			0x64, 0xB7, 0xA0, 0x7B, 0x75, 0x28, 0xB8, 0xF4,
52 			0x75, 0x5F, 0xD2, 0xA6, 0x8D, 0x97, 0x11, 0xFF,
53 			0x49, 0xD8, 0x23, 0xF3, 0x7E, 0x21, 0xEC, 0xA0
54 		},
55 	};
56 	unsigned long seed, random;
57 	struct prno_parm prno;
58 	__u64 entropy[4];
59 	int mode, i;
60 
61 	mode = check_prng();
62 	seed = get_tod_clock_fast();
63 	switch (mode) {
64 	case PRNG_MODE_TRNG:
65 		cpacf_trng(NULL, 0, (u8 *) &random, sizeof(random));
66 		break;
67 	case PRNG_MODE_SHA512:
68 		cpacf_prno(CPACF_PRNO_SHA512_DRNG_SEED, &prno, NULL, 0,
69 			   (u8 *) &seed, sizeof(seed));
70 		cpacf_prno(CPACF_PRNO_SHA512_DRNG_GEN, &prno, (u8 *) &random,
71 			   sizeof(random), NULL, 0);
72 		break;
73 	case PRNG_MODE_TDES:
74 		/* add entropy */
75 		*(unsigned long *) prng.parm_block ^= seed;
76 		for (i = 0; i < 16; i++) {
77 			cpacf_kmc(CPACF_KMC_PRNG, prng.parm_block,
78 				  (u8 *) entropy, (u8 *) entropy,
79 				  sizeof(entropy));
80 			memcpy(prng.parm_block, entropy, sizeof(entropy));
81 		}
82 		random = seed;
83 		cpacf_kmc(CPACF_KMC_PRNG, prng.parm_block, (u8 *) &random,
84 			  (u8 *) &random, sizeof(random));
85 		break;
86 	default:
87 		random = 0;
88 	}
89 	return random % limit;
90 }
91 
92 unsigned long get_random_base(unsigned long safe_addr)
93 {
94 	unsigned long memory_limit = memory_end_set ? memory_end : 0;
95 	unsigned long base, start, end, kernel_size;
96 	unsigned long block_sum, offset;
97 	unsigned long kasan_needs;
98 	int i;
99 
100 	if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && INITRD_START && INITRD_SIZE) {
101 		if (safe_addr < INITRD_START + INITRD_SIZE)
102 			safe_addr = INITRD_START + INITRD_SIZE;
103 	}
104 	safe_addr = ALIGN(safe_addr, THREAD_SIZE);
105 
106 	if ((IS_ENABLED(CONFIG_KASAN))) {
107 		/*
108 		 * Estimate kasan memory requirements, which it will reserve
109 		 * at the very end of available physical memory. To estimate
110 		 * that, we take into account that kasan would require
111 		 * 1/8 of available physical memory (for shadow memory) +
112 		 * creating page tables for the whole memory + shadow memory
113 		 * region (1 + 1/8). To keep page tables estimates simple take
114 		 * the double of combined ptes size.
115 		 */
116 		memory_limit = get_mem_detect_end();
117 		if (memory_end_set && memory_limit > memory_end)
118 			memory_limit = memory_end;
119 
120 		/* for shadow memory */
121 		kasan_needs = memory_limit / 8;
122 		/* for paging structures */
123 		kasan_needs += (memory_limit + kasan_needs) / PAGE_SIZE /
124 			       _PAGE_ENTRIES * _PAGE_TABLE_SIZE * 2;
125 		memory_limit -= kasan_needs;
126 	}
127 
128 	kernel_size = vmlinux.image_size + vmlinux.bss_size;
129 	block_sum = 0;
130 	for_each_mem_detect_block(i, &start, &end) {
131 		if (memory_limit) {
132 			if (start >= memory_limit)
133 				break;
134 			if (end > memory_limit)
135 				end = memory_limit;
136 		}
137 		if (end - start < kernel_size)
138 			continue;
139 		block_sum += end - start - kernel_size;
140 	}
141 	if (!block_sum) {
142 		sclp_early_printk("KASLR disabled: not enough memory\n");
143 		return 0;
144 	}
145 
146 	base = get_random(block_sum);
147 	if (base == 0)
148 		return 0;
149 	if (base < safe_addr)
150 		base = safe_addr;
151 	block_sum = offset = 0;
152 	for_each_mem_detect_block(i, &start, &end) {
153 		if (memory_limit) {
154 			if (start >= memory_limit)
155 				break;
156 			if (end > memory_limit)
157 				end = memory_limit;
158 		}
159 		if (end - start < kernel_size)
160 			continue;
161 		block_sum += end - start - kernel_size;
162 		if (base <= block_sum) {
163 			base = start + base - offset;
164 			base = ALIGN_DOWN(base, THREAD_SIZE);
165 			break;
166 		}
167 		offset = block_sum;
168 	}
169 	return base;
170 }
171