xref: /openbmc/linux/tools/testing/selftests/sgx/main.c (revision 64288aa9)
1 // SPDX-License-Identifier: GPL-2.0
2 /*  Copyright(c) 2016-20 Intel Corporation. */
3 
4 #include <cpuid.h>
5 #include <elf.h>
6 #include <errno.h>
7 #include <fcntl.h>
8 #include <stdbool.h>
9 #include <stdio.h>
10 #include <stdint.h>
11 #include <stdlib.h>
12 #include <string.h>
13 #include <unistd.h>
14 #include <sys/ioctl.h>
15 #include <sys/mman.h>
16 #include <sys/stat.h>
17 #include <sys/time.h>
18 #include <sys/types.h>
19 #include <sys/auxv.h>
20 #include "defines.h"
21 #include "../kselftest_harness.h"
22 #include "main.h"
23 
24 static const uint64_t MAGIC = 0x1122334455667788ULL;
25 static const uint64_t MAGIC2 = 0x8877665544332211ULL;
26 vdso_sgx_enter_enclave_t vdso_sgx_enter_enclave;
27 
28 struct vdso_symtab {
29 	Elf64_Sym *elf_symtab;
30 	const char *elf_symstrtab;
31 	Elf64_Word *elf_hashtab;
32 };
33 
34 static Elf64_Dyn *vdso_get_dyntab(void *addr)
35 {
36 	Elf64_Ehdr *ehdr = addr;
37 	Elf64_Phdr *phdrtab = addr + ehdr->e_phoff;
38 	int i;
39 
40 	for (i = 0; i < ehdr->e_phnum; i++)
41 		if (phdrtab[i].p_type == PT_DYNAMIC)
42 			return addr + phdrtab[i].p_offset;
43 
44 	return NULL;
45 }
46 
47 static void *vdso_get_dyn(void *addr, Elf64_Dyn *dyntab, Elf64_Sxword tag)
48 {
49 	int i;
50 
51 	for (i = 0; dyntab[i].d_tag != DT_NULL; i++)
52 		if (dyntab[i].d_tag == tag)
53 			return addr + dyntab[i].d_un.d_ptr;
54 
55 	return NULL;
56 }
57 
58 static bool vdso_get_symtab(void *addr, struct vdso_symtab *symtab)
59 {
60 	Elf64_Dyn *dyntab = vdso_get_dyntab(addr);
61 
62 	symtab->elf_symtab = vdso_get_dyn(addr, dyntab, DT_SYMTAB);
63 	if (!symtab->elf_symtab)
64 		return false;
65 
66 	symtab->elf_symstrtab = vdso_get_dyn(addr, dyntab, DT_STRTAB);
67 	if (!symtab->elf_symstrtab)
68 		return false;
69 
70 	symtab->elf_hashtab = vdso_get_dyn(addr, dyntab, DT_HASH);
71 	if (!symtab->elf_hashtab)
72 		return false;
73 
74 	return true;
75 }
76 
77 static unsigned long elf_sym_hash(const char *name)
78 {
79 	unsigned long h = 0, high;
80 
81 	while (*name) {
82 		h = (h << 4) + *name++;
83 		high = h & 0xf0000000;
84 
85 		if (high)
86 			h ^= high >> 24;
87 
88 		h &= ~high;
89 	}
90 
91 	return h;
92 }
93 
94 static Elf64_Sym *vdso_symtab_get(struct vdso_symtab *symtab, const char *name)
95 {
96 	Elf64_Word bucketnum = symtab->elf_hashtab[0];
97 	Elf64_Word *buckettab = &symtab->elf_hashtab[2];
98 	Elf64_Word *chaintab = &symtab->elf_hashtab[2 + bucketnum];
99 	Elf64_Sym *sym;
100 	Elf64_Word i;
101 
102 	for (i = buckettab[elf_sym_hash(name) % bucketnum]; i != STN_UNDEF;
103 	     i = chaintab[i]) {
104 		sym = &symtab->elf_symtab[i];
105 		if (!strcmp(name, &symtab->elf_symstrtab[sym->st_name]))
106 			return sym;
107 	}
108 
109 	return NULL;
110 }
111 
112 /*
113  * Return the offset in the enclave where the data segment can be found.
114  * The first RW segment loaded is the TCS, skip that to get info on the
115  * data segment.
116  */
117 static off_t encl_get_data_offset(struct encl *encl)
118 {
119 	int i;
120 
121 	for (i = 1; i < encl->nr_segments; i++) {
122 		struct encl_segment *seg = &encl->segment_tbl[i];
123 
124 		if (seg->prot == (PROT_READ | PROT_WRITE))
125 			return seg->offset;
126 	}
127 
128 	return -1;
129 }
130 
131 FIXTURE(enclave) {
132 	struct encl encl;
133 	struct sgx_enclave_run run;
134 };
135 
136 static bool setup_test_encl(unsigned long heap_size, struct encl *encl,
137 			    struct __test_metadata *_metadata)
138 {
139 	Elf64_Sym *sgx_enter_enclave_sym = NULL;
140 	struct vdso_symtab symtab;
141 	struct encl_segment *seg;
142 	char maps_line[256];
143 	FILE *maps_file;
144 	unsigned int i;
145 	void *addr;
146 
147 	if (!encl_load("test_encl.elf", encl, heap_size)) {
148 		encl_delete(encl);
149 		TH_LOG("Failed to load the test enclave.\n");
150 	}
151 
152 	if (!encl_measure(encl))
153 		goto err;
154 
155 	if (!encl_build(encl))
156 		goto err;
157 
158 	/*
159 	 * An enclave consumer only must do this.
160 	 */
161 	for (i = 0; i < encl->nr_segments; i++) {
162 		struct encl_segment *seg = &encl->segment_tbl[i];
163 
164 		addr = mmap((void *)encl->encl_base + seg->offset, seg->size,
165 			    seg->prot, MAP_SHARED | MAP_FIXED, encl->fd, 0);
166 		EXPECT_NE(addr, MAP_FAILED);
167 		if (addr == MAP_FAILED)
168 			goto err;
169 	}
170 
171 	/* Get vDSO base address */
172 	addr = (void *)getauxval(AT_SYSINFO_EHDR);
173 	if (!addr)
174 		goto err;
175 
176 	if (!vdso_get_symtab(addr, &symtab))
177 		goto err;
178 
179 	sgx_enter_enclave_sym = vdso_symtab_get(&symtab, "__vdso_sgx_enter_enclave");
180 	if (!sgx_enter_enclave_sym)
181 		goto err;
182 
183 	vdso_sgx_enter_enclave = addr + sgx_enter_enclave_sym->st_value;
184 
185 	return true;
186 
187 err:
188 	encl_delete(encl);
189 
190 	for (i = 0; i < encl->nr_segments; i++) {
191 		seg = &encl->segment_tbl[i];
192 
193 		TH_LOG("0x%016lx 0x%016lx 0x%02x", seg->offset, seg->size, seg->prot);
194 	}
195 
196 	maps_file = fopen("/proc/self/maps", "r");
197 	if (maps_file != NULL)  {
198 		while (fgets(maps_line, sizeof(maps_line), maps_file) != NULL) {
199 			maps_line[strlen(maps_line) - 1] = '\0';
200 
201 			if (strstr(maps_line, "/dev/sgx_enclave"))
202 				TH_LOG("%s", maps_line);
203 		}
204 
205 		fclose(maps_file);
206 	}
207 
208 	TH_LOG("Failed to initialize the test enclave.\n");
209 
210 	return false;
211 }
212 
213 FIXTURE_SETUP(enclave)
214 {
215 }
216 
217 FIXTURE_TEARDOWN(enclave)
218 {
219 	encl_delete(&self->encl);
220 }
221 
222 #define ENCL_CALL(op, run, clobbered) \
223 	({ \
224 		int ret; \
225 		if ((clobbered)) \
226 			ret = vdso_sgx_enter_enclave((unsigned long)(op), 0, 0, \
227 						     EENTER, 0, 0, (run)); \
228 		else \
229 			ret = sgx_enter_enclave((void *)(op), NULL, 0, EENTER, NULL, NULL, \
230 						(run)); \
231 		ret; \
232 	})
233 
234 #define EXPECT_EEXIT(run) \
235 	do { \
236 		EXPECT_EQ((run)->function, EEXIT); \
237 		if ((run)->function != EEXIT) \
238 			TH_LOG("0x%02x 0x%02x 0x%016llx", (run)->exception_vector, \
239 			       (run)->exception_error_code, (run)->exception_addr); \
240 	} while (0)
241 
242 TEST_F(enclave, unclobbered_vdso)
243 {
244 	struct encl_op_get_from_buf get_op;
245 	struct encl_op_put_to_buf put_op;
246 
247 	ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
248 
249 	memset(&self->run, 0, sizeof(self->run));
250 	self->run.tcs = self->encl.encl_base;
251 
252 	put_op.header.type = ENCL_OP_PUT_TO_BUFFER;
253 	put_op.value = MAGIC;
254 
255 	EXPECT_EQ(ENCL_CALL(&put_op, &self->run, false), 0);
256 
257 	EXPECT_EEXIT(&self->run);
258 	EXPECT_EQ(self->run.user_data, 0);
259 
260 	get_op.header.type = ENCL_OP_GET_FROM_BUFFER;
261 	get_op.value = 0;
262 
263 	EXPECT_EQ(ENCL_CALL(&get_op, &self->run, false), 0);
264 
265 	EXPECT_EQ(get_op.value, MAGIC);
266 	EXPECT_EEXIT(&self->run);
267 	EXPECT_EQ(self->run.user_data, 0);
268 }
269 
270 /*
271  * A section metric is concatenated in a way that @low bits 12-31 define the
272  * bits 12-31 of the metric and @high bits 0-19 define the bits 32-51 of the
273  * metric.
274  */
275 static unsigned long sgx_calc_section_metric(unsigned int low,
276 					     unsigned int high)
277 {
278 	return (low & GENMASK_ULL(31, 12)) +
279 	       ((high & GENMASK_ULL(19, 0)) << 32);
280 }
281 
282 /*
283  * Sum total available physical SGX memory across all EPC sections
284  *
285  * Return: total available physical SGX memory available on system
286  */
287 static unsigned long get_total_epc_mem(void)
288 {
289 	unsigned int eax, ebx, ecx, edx;
290 	unsigned long total_size = 0;
291 	unsigned int type;
292 	int section = 0;
293 
294 	while (true) {
295 		__cpuid_count(SGX_CPUID, section + SGX_CPUID_EPC, eax, ebx, ecx, edx);
296 
297 		type = eax & SGX_CPUID_EPC_MASK;
298 		if (type == SGX_CPUID_EPC_INVALID)
299 			break;
300 
301 		if (type != SGX_CPUID_EPC_SECTION)
302 			break;
303 
304 		total_size += sgx_calc_section_metric(ecx, edx);
305 
306 		section++;
307 	}
308 
309 	return total_size;
310 }
311 
312 TEST_F(enclave, unclobbered_vdso_oversubscribed)
313 {
314 	struct encl_op_get_from_buf get_op;
315 	struct encl_op_put_to_buf put_op;
316 	unsigned long total_mem;
317 
318 	total_mem = get_total_epc_mem();
319 	ASSERT_NE(total_mem, 0);
320 	ASSERT_TRUE(setup_test_encl(total_mem, &self->encl, _metadata));
321 
322 	memset(&self->run, 0, sizeof(self->run));
323 	self->run.tcs = self->encl.encl_base;
324 
325 	put_op.header.type = ENCL_OP_PUT_TO_BUFFER;
326 	put_op.value = MAGIC;
327 
328 	EXPECT_EQ(ENCL_CALL(&put_op, &self->run, false), 0);
329 
330 	EXPECT_EEXIT(&self->run);
331 	EXPECT_EQ(self->run.user_data, 0);
332 
333 	get_op.header.type = ENCL_OP_GET_FROM_BUFFER;
334 	get_op.value = 0;
335 
336 	EXPECT_EQ(ENCL_CALL(&get_op, &self->run, false), 0);
337 
338 	EXPECT_EQ(get_op.value, MAGIC);
339 	EXPECT_EEXIT(&self->run);
340 	EXPECT_EQ(self->run.user_data, 0);
341 
342 }
343 
344 TEST_F(enclave, clobbered_vdso)
345 {
346 	struct encl_op_get_from_buf get_op;
347 	struct encl_op_put_to_buf put_op;
348 
349 	ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
350 
351 	memset(&self->run, 0, sizeof(self->run));
352 	self->run.tcs = self->encl.encl_base;
353 
354 	put_op.header.type = ENCL_OP_PUT_TO_BUFFER;
355 	put_op.value = MAGIC;
356 
357 	EXPECT_EQ(ENCL_CALL(&put_op, &self->run, true), 0);
358 
359 	EXPECT_EEXIT(&self->run);
360 	EXPECT_EQ(self->run.user_data, 0);
361 
362 	get_op.header.type = ENCL_OP_GET_FROM_BUFFER;
363 	get_op.value = 0;
364 
365 	EXPECT_EQ(ENCL_CALL(&get_op, &self->run, true), 0);
366 
367 	EXPECT_EQ(get_op.value, MAGIC);
368 	EXPECT_EEXIT(&self->run);
369 	EXPECT_EQ(self->run.user_data, 0);
370 }
371 
372 static int test_handler(long rdi, long rsi, long rdx, long ursp, long r8, long r9,
373 			struct sgx_enclave_run *run)
374 {
375 	run->user_data = 0;
376 
377 	return 0;
378 }
379 
380 TEST_F(enclave, clobbered_vdso_and_user_function)
381 {
382 	struct encl_op_get_from_buf get_op;
383 	struct encl_op_put_to_buf put_op;
384 
385 	ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
386 
387 	memset(&self->run, 0, sizeof(self->run));
388 	self->run.tcs = self->encl.encl_base;
389 
390 	self->run.user_handler = (__u64)test_handler;
391 	self->run.user_data = 0xdeadbeef;
392 
393 	put_op.header.type = ENCL_OP_PUT_TO_BUFFER;
394 	put_op.value = MAGIC;
395 
396 	EXPECT_EQ(ENCL_CALL(&put_op, &self->run, true), 0);
397 
398 	EXPECT_EEXIT(&self->run);
399 	EXPECT_EQ(self->run.user_data, 0);
400 
401 	get_op.header.type = ENCL_OP_GET_FROM_BUFFER;
402 	get_op.value = 0;
403 
404 	EXPECT_EQ(ENCL_CALL(&get_op, &self->run, true), 0);
405 
406 	EXPECT_EQ(get_op.value, MAGIC);
407 	EXPECT_EEXIT(&self->run);
408 	EXPECT_EQ(self->run.user_data, 0);
409 }
410 
411 /*
412  * Sanity check that it is possible to enter either of the two hardcoded TCS
413  */
414 TEST_F(enclave, tcs_entry)
415 {
416 	struct encl_op_header op;
417 
418 	ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
419 
420 	memset(&self->run, 0, sizeof(self->run));
421 	self->run.tcs = self->encl.encl_base;
422 
423 	op.type = ENCL_OP_NOP;
424 
425 	EXPECT_EQ(ENCL_CALL(&op, &self->run, true), 0);
426 
427 	EXPECT_EEXIT(&self->run);
428 	EXPECT_EQ(self->run.exception_vector, 0);
429 	EXPECT_EQ(self->run.exception_error_code, 0);
430 	EXPECT_EQ(self->run.exception_addr, 0);
431 
432 	/* Move to the next TCS. */
433 	self->run.tcs = self->encl.encl_base + PAGE_SIZE;
434 
435 	EXPECT_EQ(ENCL_CALL(&op, &self->run, true), 0);
436 
437 	EXPECT_EEXIT(&self->run);
438 	EXPECT_EQ(self->run.exception_vector, 0);
439 	EXPECT_EQ(self->run.exception_error_code, 0);
440 	EXPECT_EQ(self->run.exception_addr, 0);
441 }
442 
443 /*
444  * Second page of .data segment is used to test changing PTE permissions.
445  * This spans the local encl_buffer within the test enclave.
446  *
447  * 1) Start with a sanity check: a value is written to the target page within
448  *    the enclave and read back to ensure target page can be written to.
449  * 2) Change PTE permissions (RW -> RO) of target page within enclave.
450  * 3) Repeat (1) - this time expecting a regular #PF communicated via the
451  *    vDSO.
452  * 4) Change PTE permissions of target page within enclave back to be RW.
453  * 5) Repeat (1) by resuming enclave, now expected to be possible to write to
454  *    and read from target page within enclave.
455  */
456 TEST_F(enclave, pte_permissions)
457 {
458 	struct encl_op_get_from_addr get_addr_op;
459 	struct encl_op_put_to_addr put_addr_op;
460 	unsigned long data_start;
461 	int ret;
462 
463 	ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
464 
465 	memset(&self->run, 0, sizeof(self->run));
466 	self->run.tcs = self->encl.encl_base;
467 
468 	data_start = self->encl.encl_base +
469 		     encl_get_data_offset(&self->encl) +
470 		     PAGE_SIZE;
471 
472 	/*
473 	 * Sanity check to ensure it is possible to write to page that will
474 	 * have its permissions manipulated.
475 	 */
476 
477 	/* Write MAGIC to page */
478 	put_addr_op.value = MAGIC;
479 	put_addr_op.addr = data_start;
480 	put_addr_op.header.type = ENCL_OP_PUT_TO_ADDRESS;
481 
482 	EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0);
483 
484 	EXPECT_EEXIT(&self->run);
485 	EXPECT_EQ(self->run.exception_vector, 0);
486 	EXPECT_EQ(self->run.exception_error_code, 0);
487 	EXPECT_EQ(self->run.exception_addr, 0);
488 
489 	/*
490 	 * Read memory that was just written to, confirming that it is the
491 	 * value previously written (MAGIC).
492 	 */
493 	get_addr_op.value = 0;
494 	get_addr_op.addr = data_start;
495 	get_addr_op.header.type = ENCL_OP_GET_FROM_ADDRESS;
496 
497 	EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0);
498 
499 	EXPECT_EQ(get_addr_op.value, MAGIC);
500 	EXPECT_EEXIT(&self->run);
501 	EXPECT_EQ(self->run.exception_vector, 0);
502 	EXPECT_EQ(self->run.exception_error_code, 0);
503 	EXPECT_EQ(self->run.exception_addr, 0);
504 
505 	/* Change PTE permissions of target page within the enclave */
506 	ret = mprotect((void *)data_start, PAGE_SIZE, PROT_READ);
507 	if (ret)
508 		perror("mprotect");
509 
510 	/*
511 	 * PTE permissions of target page changed to read-only, EPCM
512 	 * permissions unchanged (EPCM permissions are RW), attempt to
513 	 * write to the page, expecting a regular #PF.
514 	 */
515 
516 	put_addr_op.value = MAGIC2;
517 
518 	EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0);
519 
520 	EXPECT_EQ(self->run.exception_vector, 14);
521 	EXPECT_EQ(self->run.exception_error_code, 0x7);
522 	EXPECT_EQ(self->run.exception_addr, data_start);
523 
524 	self->run.exception_vector = 0;
525 	self->run.exception_error_code = 0;
526 	self->run.exception_addr = 0;
527 
528 	/*
529 	 * Change PTE permissions back to enable enclave to write to the
530 	 * target page and resume enclave - do not expect any exceptions this
531 	 * time.
532 	 */
533 	ret = mprotect((void *)data_start, PAGE_SIZE, PROT_READ | PROT_WRITE);
534 	if (ret)
535 		perror("mprotect");
536 
537 	EXPECT_EQ(vdso_sgx_enter_enclave((unsigned long)&put_addr_op, 0,
538 					 0, ERESUME, 0, 0, &self->run),
539 		 0);
540 
541 	EXPECT_EEXIT(&self->run);
542 	EXPECT_EQ(self->run.exception_vector, 0);
543 	EXPECT_EQ(self->run.exception_error_code, 0);
544 	EXPECT_EQ(self->run.exception_addr, 0);
545 
546 	get_addr_op.value = 0;
547 
548 	EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0);
549 
550 	EXPECT_EQ(get_addr_op.value, MAGIC2);
551 	EXPECT_EEXIT(&self->run);
552 	EXPECT_EQ(self->run.exception_vector, 0);
553 	EXPECT_EQ(self->run.exception_error_code, 0);
554 	EXPECT_EQ(self->run.exception_addr, 0);
555 }
556 
557 TEST_HARNESS_MAIN
558