xref: /openbmc/linux/tools/testing/selftests/sgx/main.c (revision 7a2f6f61)
1 // SPDX-License-Identifier: GPL-2.0
2 /*  Copyright(c) 2016-20 Intel Corporation. */
3 
4 #include <cpuid.h>
5 #include <elf.h>
6 #include <errno.h>
7 #include <fcntl.h>
8 #include <stdbool.h>
9 #include <stdio.h>
10 #include <stdint.h>
11 #include <stdlib.h>
12 #include <string.h>
13 #include <unistd.h>
14 #include <sys/ioctl.h>
15 #include <sys/mman.h>
16 #include <sys/stat.h>
17 #include <sys/time.h>
18 #include <sys/types.h>
19 #include <sys/auxv.h>
20 #include "defines.h"
21 #include "../kselftest_harness.h"
22 #include "main.h"
23 
24 static const uint64_t MAGIC = 0x1122334455667788ULL;
25 static const uint64_t MAGIC2 = 0x8877665544332211ULL;
26 vdso_sgx_enter_enclave_t vdso_sgx_enter_enclave;
27 
28 struct vdso_symtab {
29 	Elf64_Sym *elf_symtab;
30 	const char *elf_symstrtab;
31 	Elf64_Word *elf_hashtab;
32 };
33 
34 static Elf64_Dyn *vdso_get_dyntab(void *addr)
35 {
36 	Elf64_Ehdr *ehdr = addr;
37 	Elf64_Phdr *phdrtab = addr + ehdr->e_phoff;
38 	int i;
39 
40 	for (i = 0; i < ehdr->e_phnum; i++)
41 		if (phdrtab[i].p_type == PT_DYNAMIC)
42 			return addr + phdrtab[i].p_offset;
43 
44 	return NULL;
45 }
46 
47 static void *vdso_get_dyn(void *addr, Elf64_Dyn *dyntab, Elf64_Sxword tag)
48 {
49 	int i;
50 
51 	for (i = 0; dyntab[i].d_tag != DT_NULL; i++)
52 		if (dyntab[i].d_tag == tag)
53 			return addr + dyntab[i].d_un.d_ptr;
54 
55 	return NULL;
56 }
57 
58 static bool vdso_get_symtab(void *addr, struct vdso_symtab *symtab)
59 {
60 	Elf64_Dyn *dyntab = vdso_get_dyntab(addr);
61 
62 	symtab->elf_symtab = vdso_get_dyn(addr, dyntab, DT_SYMTAB);
63 	if (!symtab->elf_symtab)
64 		return false;
65 
66 	symtab->elf_symstrtab = vdso_get_dyn(addr, dyntab, DT_STRTAB);
67 	if (!symtab->elf_symstrtab)
68 		return false;
69 
70 	symtab->elf_hashtab = vdso_get_dyn(addr, dyntab, DT_HASH);
71 	if (!symtab->elf_hashtab)
72 		return false;
73 
74 	return true;
75 }
76 
77 static unsigned long elf_sym_hash(const char *name)
78 {
79 	unsigned long h = 0, high;
80 
81 	while (*name) {
82 		h = (h << 4) + *name++;
83 		high = h & 0xf0000000;
84 
85 		if (high)
86 			h ^= high >> 24;
87 
88 		h &= ~high;
89 	}
90 
91 	return h;
92 }
93 
94 static Elf64_Sym *vdso_symtab_get(struct vdso_symtab *symtab, const char *name)
95 {
96 	Elf64_Word bucketnum = symtab->elf_hashtab[0];
97 	Elf64_Word *buckettab = &symtab->elf_hashtab[2];
98 	Elf64_Word *chaintab = &symtab->elf_hashtab[2 + bucketnum];
99 	Elf64_Sym *sym;
100 	Elf64_Word i;
101 
102 	for (i = buckettab[elf_sym_hash(name) % bucketnum]; i != STN_UNDEF;
103 	     i = chaintab[i]) {
104 		sym = &symtab->elf_symtab[i];
105 		if (!strcmp(name, &symtab->elf_symstrtab[sym->st_name]))
106 			return sym;
107 	}
108 
109 	return NULL;
110 }
111 
112 /*
113  * Return the offset in the enclave where the data segment can be found.
114  * The first RW segment loaded is the TCS, skip that to get info on the
115  * data segment.
116  */
117 static off_t encl_get_data_offset(struct encl *encl)
118 {
119 	int i;
120 
121 	for (i = 1; i < encl->nr_segments; i++) {
122 		struct encl_segment *seg = &encl->segment_tbl[i];
123 
124 		if (seg->prot == (PROT_READ | PROT_WRITE))
125 			return seg->offset;
126 	}
127 
128 	return -1;
129 }
130 
131 FIXTURE(enclave) {
132 	struct encl encl;
133 	struct sgx_enclave_run run;
134 };
135 
136 static bool setup_test_encl(unsigned long heap_size, struct encl *encl,
137 			    struct __test_metadata *_metadata)
138 {
139 	Elf64_Sym *sgx_enter_enclave_sym = NULL;
140 	struct vdso_symtab symtab;
141 	struct encl_segment *seg;
142 	char maps_line[256];
143 	FILE *maps_file;
144 	unsigned int i;
145 	void *addr;
146 
147 	if (!encl_load("test_encl.elf", encl, heap_size)) {
148 		encl_delete(encl);
149 		TH_LOG("Failed to load the test enclave.");
150 		return false;
151 	}
152 
153 	if (!encl_measure(encl))
154 		goto err;
155 
156 	if (!encl_build(encl))
157 		goto err;
158 
159 	/*
160 	 * An enclave consumer only must do this.
161 	 */
162 	for (i = 0; i < encl->nr_segments; i++) {
163 		struct encl_segment *seg = &encl->segment_tbl[i];
164 
165 		addr = mmap((void *)encl->encl_base + seg->offset, seg->size,
166 			    seg->prot, MAP_SHARED | MAP_FIXED, encl->fd, 0);
167 		EXPECT_NE(addr, MAP_FAILED);
168 		if (addr == MAP_FAILED)
169 			goto err;
170 	}
171 
172 	/* Get vDSO base address */
173 	addr = (void *)getauxval(AT_SYSINFO_EHDR);
174 	if (!addr)
175 		goto err;
176 
177 	if (!vdso_get_symtab(addr, &symtab))
178 		goto err;
179 
180 	sgx_enter_enclave_sym = vdso_symtab_get(&symtab, "__vdso_sgx_enter_enclave");
181 	if (!sgx_enter_enclave_sym)
182 		goto err;
183 
184 	vdso_sgx_enter_enclave = addr + sgx_enter_enclave_sym->st_value;
185 
186 	return true;
187 
188 err:
189 	for (i = 0; i < encl->nr_segments; i++) {
190 		seg = &encl->segment_tbl[i];
191 
192 		TH_LOG("0x%016lx 0x%016lx 0x%02x", seg->offset, seg->size, seg->prot);
193 	}
194 
195 	maps_file = fopen("/proc/self/maps", "r");
196 	if (maps_file != NULL)  {
197 		while (fgets(maps_line, sizeof(maps_line), maps_file) != NULL) {
198 			maps_line[strlen(maps_line) - 1] = '\0';
199 
200 			if (strstr(maps_line, "/dev/sgx_enclave"))
201 				TH_LOG("%s", maps_line);
202 		}
203 
204 		fclose(maps_file);
205 	}
206 
207 	TH_LOG("Failed to initialize the test enclave.");
208 
209 	encl_delete(encl);
210 
211 	return false;
212 }
213 
214 FIXTURE_SETUP(enclave)
215 {
216 }
217 
218 FIXTURE_TEARDOWN(enclave)
219 {
220 	encl_delete(&self->encl);
221 }
222 
223 #define ENCL_CALL(op, run, clobbered) \
224 	({ \
225 		int ret; \
226 		if ((clobbered)) \
227 			ret = vdso_sgx_enter_enclave((unsigned long)(op), 0, 0, \
228 						     EENTER, 0, 0, (run)); \
229 		else \
230 			ret = sgx_enter_enclave((void *)(op), NULL, 0, EENTER, NULL, NULL, \
231 						(run)); \
232 		ret; \
233 	})
234 
235 #define EXPECT_EEXIT(run) \
236 	do { \
237 		EXPECT_EQ((run)->function, EEXIT); \
238 		if ((run)->function != EEXIT) \
239 			TH_LOG("0x%02x 0x%02x 0x%016llx", (run)->exception_vector, \
240 			       (run)->exception_error_code, (run)->exception_addr); \
241 	} while (0)
242 
243 TEST_F(enclave, unclobbered_vdso)
244 {
245 	struct encl_op_get_from_buf get_op;
246 	struct encl_op_put_to_buf put_op;
247 
248 	ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
249 
250 	memset(&self->run, 0, sizeof(self->run));
251 	self->run.tcs = self->encl.encl_base;
252 
253 	put_op.header.type = ENCL_OP_PUT_TO_BUFFER;
254 	put_op.value = MAGIC;
255 
256 	EXPECT_EQ(ENCL_CALL(&put_op, &self->run, false), 0);
257 
258 	EXPECT_EEXIT(&self->run);
259 	EXPECT_EQ(self->run.user_data, 0);
260 
261 	get_op.header.type = ENCL_OP_GET_FROM_BUFFER;
262 	get_op.value = 0;
263 
264 	EXPECT_EQ(ENCL_CALL(&get_op, &self->run, false), 0);
265 
266 	EXPECT_EQ(get_op.value, MAGIC);
267 	EXPECT_EEXIT(&self->run);
268 	EXPECT_EQ(self->run.user_data, 0);
269 }
270 
271 /*
272  * A section metric is concatenated in a way that @low bits 12-31 define the
273  * bits 12-31 of the metric and @high bits 0-19 define the bits 32-51 of the
274  * metric.
275  */
276 static unsigned long sgx_calc_section_metric(unsigned int low,
277 					     unsigned int high)
278 {
279 	return (low & GENMASK_ULL(31, 12)) +
280 	       ((high & GENMASK_ULL(19, 0)) << 32);
281 }
282 
283 /*
284  * Sum total available physical SGX memory across all EPC sections
285  *
286  * Return: total available physical SGX memory available on system
287  */
288 static unsigned long get_total_epc_mem(void)
289 {
290 	unsigned int eax, ebx, ecx, edx;
291 	unsigned long total_size = 0;
292 	unsigned int type;
293 	int section = 0;
294 
295 	while (true) {
296 		__cpuid_count(SGX_CPUID, section + SGX_CPUID_EPC, eax, ebx, ecx, edx);
297 
298 		type = eax & SGX_CPUID_EPC_MASK;
299 		if (type == SGX_CPUID_EPC_INVALID)
300 			break;
301 
302 		if (type != SGX_CPUID_EPC_SECTION)
303 			break;
304 
305 		total_size += sgx_calc_section_metric(ecx, edx);
306 
307 		section++;
308 	}
309 
310 	return total_size;
311 }
312 
313 TEST_F(enclave, unclobbered_vdso_oversubscribed)
314 {
315 	struct encl_op_get_from_buf get_op;
316 	struct encl_op_put_to_buf put_op;
317 	unsigned long total_mem;
318 
319 	total_mem = get_total_epc_mem();
320 	ASSERT_NE(total_mem, 0);
321 	ASSERT_TRUE(setup_test_encl(total_mem, &self->encl, _metadata));
322 
323 	memset(&self->run, 0, sizeof(self->run));
324 	self->run.tcs = self->encl.encl_base;
325 
326 	put_op.header.type = ENCL_OP_PUT_TO_BUFFER;
327 	put_op.value = MAGIC;
328 
329 	EXPECT_EQ(ENCL_CALL(&put_op, &self->run, false), 0);
330 
331 	EXPECT_EEXIT(&self->run);
332 	EXPECT_EQ(self->run.user_data, 0);
333 
334 	get_op.header.type = ENCL_OP_GET_FROM_BUFFER;
335 	get_op.value = 0;
336 
337 	EXPECT_EQ(ENCL_CALL(&get_op, &self->run, false), 0);
338 
339 	EXPECT_EQ(get_op.value, MAGIC);
340 	EXPECT_EEXIT(&self->run);
341 	EXPECT_EQ(self->run.user_data, 0);
342 
343 }
344 
345 TEST_F(enclave, clobbered_vdso)
346 {
347 	struct encl_op_get_from_buf get_op;
348 	struct encl_op_put_to_buf put_op;
349 
350 	ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
351 
352 	memset(&self->run, 0, sizeof(self->run));
353 	self->run.tcs = self->encl.encl_base;
354 
355 	put_op.header.type = ENCL_OP_PUT_TO_BUFFER;
356 	put_op.value = MAGIC;
357 
358 	EXPECT_EQ(ENCL_CALL(&put_op, &self->run, true), 0);
359 
360 	EXPECT_EEXIT(&self->run);
361 	EXPECT_EQ(self->run.user_data, 0);
362 
363 	get_op.header.type = ENCL_OP_GET_FROM_BUFFER;
364 	get_op.value = 0;
365 
366 	EXPECT_EQ(ENCL_CALL(&get_op, &self->run, true), 0);
367 
368 	EXPECT_EQ(get_op.value, MAGIC);
369 	EXPECT_EEXIT(&self->run);
370 	EXPECT_EQ(self->run.user_data, 0);
371 }
372 
373 static int test_handler(long rdi, long rsi, long rdx, long ursp, long r8, long r9,
374 			struct sgx_enclave_run *run)
375 {
376 	run->user_data = 0;
377 
378 	return 0;
379 }
380 
381 TEST_F(enclave, clobbered_vdso_and_user_function)
382 {
383 	struct encl_op_get_from_buf get_op;
384 	struct encl_op_put_to_buf put_op;
385 
386 	ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
387 
388 	memset(&self->run, 0, sizeof(self->run));
389 	self->run.tcs = self->encl.encl_base;
390 
391 	self->run.user_handler = (__u64)test_handler;
392 	self->run.user_data = 0xdeadbeef;
393 
394 	put_op.header.type = ENCL_OP_PUT_TO_BUFFER;
395 	put_op.value = MAGIC;
396 
397 	EXPECT_EQ(ENCL_CALL(&put_op, &self->run, true), 0);
398 
399 	EXPECT_EEXIT(&self->run);
400 	EXPECT_EQ(self->run.user_data, 0);
401 
402 	get_op.header.type = ENCL_OP_GET_FROM_BUFFER;
403 	get_op.value = 0;
404 
405 	EXPECT_EQ(ENCL_CALL(&get_op, &self->run, true), 0);
406 
407 	EXPECT_EQ(get_op.value, MAGIC);
408 	EXPECT_EEXIT(&self->run);
409 	EXPECT_EQ(self->run.user_data, 0);
410 }
411 
412 /*
413  * Sanity check that it is possible to enter either of the two hardcoded TCS
414  */
415 TEST_F(enclave, tcs_entry)
416 {
417 	struct encl_op_header op;
418 
419 	ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
420 
421 	memset(&self->run, 0, sizeof(self->run));
422 	self->run.tcs = self->encl.encl_base;
423 
424 	op.type = ENCL_OP_NOP;
425 
426 	EXPECT_EQ(ENCL_CALL(&op, &self->run, true), 0);
427 
428 	EXPECT_EEXIT(&self->run);
429 	EXPECT_EQ(self->run.exception_vector, 0);
430 	EXPECT_EQ(self->run.exception_error_code, 0);
431 	EXPECT_EQ(self->run.exception_addr, 0);
432 
433 	/* Move to the next TCS. */
434 	self->run.tcs = self->encl.encl_base + PAGE_SIZE;
435 
436 	EXPECT_EQ(ENCL_CALL(&op, &self->run, true), 0);
437 
438 	EXPECT_EEXIT(&self->run);
439 	EXPECT_EQ(self->run.exception_vector, 0);
440 	EXPECT_EQ(self->run.exception_error_code, 0);
441 	EXPECT_EQ(self->run.exception_addr, 0);
442 }
443 
444 /*
445  * Second page of .data segment is used to test changing PTE permissions.
446  * This spans the local encl_buffer within the test enclave.
447  *
448  * 1) Start with a sanity check: a value is written to the target page within
449  *    the enclave and read back to ensure target page can be written to.
450  * 2) Change PTE permissions (RW -> RO) of target page within enclave.
451  * 3) Repeat (1) - this time expecting a regular #PF communicated via the
452  *    vDSO.
453  * 4) Change PTE permissions of target page within enclave back to be RW.
454  * 5) Repeat (1) by resuming enclave, now expected to be possible to write to
455  *    and read from target page within enclave.
456  */
457 TEST_F(enclave, pte_permissions)
458 {
459 	struct encl_op_get_from_addr get_addr_op;
460 	struct encl_op_put_to_addr put_addr_op;
461 	unsigned long data_start;
462 	int ret;
463 
464 	ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata));
465 
466 	memset(&self->run, 0, sizeof(self->run));
467 	self->run.tcs = self->encl.encl_base;
468 
469 	data_start = self->encl.encl_base +
470 		     encl_get_data_offset(&self->encl) +
471 		     PAGE_SIZE;
472 
473 	/*
474 	 * Sanity check to ensure it is possible to write to page that will
475 	 * have its permissions manipulated.
476 	 */
477 
478 	/* Write MAGIC to page */
479 	put_addr_op.value = MAGIC;
480 	put_addr_op.addr = data_start;
481 	put_addr_op.header.type = ENCL_OP_PUT_TO_ADDRESS;
482 
483 	EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0);
484 
485 	EXPECT_EEXIT(&self->run);
486 	EXPECT_EQ(self->run.exception_vector, 0);
487 	EXPECT_EQ(self->run.exception_error_code, 0);
488 	EXPECT_EQ(self->run.exception_addr, 0);
489 
490 	/*
491 	 * Read memory that was just written to, confirming that it is the
492 	 * value previously written (MAGIC).
493 	 */
494 	get_addr_op.value = 0;
495 	get_addr_op.addr = data_start;
496 	get_addr_op.header.type = ENCL_OP_GET_FROM_ADDRESS;
497 
498 	EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0);
499 
500 	EXPECT_EQ(get_addr_op.value, MAGIC);
501 	EXPECT_EEXIT(&self->run);
502 	EXPECT_EQ(self->run.exception_vector, 0);
503 	EXPECT_EQ(self->run.exception_error_code, 0);
504 	EXPECT_EQ(self->run.exception_addr, 0);
505 
506 	/* Change PTE permissions of target page within the enclave */
507 	ret = mprotect((void *)data_start, PAGE_SIZE, PROT_READ);
508 	if (ret)
509 		perror("mprotect");
510 
511 	/*
512 	 * PTE permissions of target page changed to read-only, EPCM
513 	 * permissions unchanged (EPCM permissions are RW), attempt to
514 	 * write to the page, expecting a regular #PF.
515 	 */
516 
517 	put_addr_op.value = MAGIC2;
518 
519 	EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0);
520 
521 	EXPECT_EQ(self->run.exception_vector, 14);
522 	EXPECT_EQ(self->run.exception_error_code, 0x7);
523 	EXPECT_EQ(self->run.exception_addr, data_start);
524 
525 	self->run.exception_vector = 0;
526 	self->run.exception_error_code = 0;
527 	self->run.exception_addr = 0;
528 
529 	/*
530 	 * Change PTE permissions back to enable enclave to write to the
531 	 * target page and resume enclave - do not expect any exceptions this
532 	 * time.
533 	 */
534 	ret = mprotect((void *)data_start, PAGE_SIZE, PROT_READ | PROT_WRITE);
535 	if (ret)
536 		perror("mprotect");
537 
538 	EXPECT_EQ(vdso_sgx_enter_enclave((unsigned long)&put_addr_op, 0,
539 					 0, ERESUME, 0, 0, &self->run),
540 		 0);
541 
542 	EXPECT_EEXIT(&self->run);
543 	EXPECT_EQ(self->run.exception_vector, 0);
544 	EXPECT_EQ(self->run.exception_error_code, 0);
545 	EXPECT_EQ(self->run.exception_addr, 0);
546 
547 	get_addr_op.value = 0;
548 
549 	EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0);
550 
551 	EXPECT_EQ(get_addr_op.value, MAGIC2);
552 	EXPECT_EEXIT(&self->run);
553 	EXPECT_EQ(self->run.exception_vector, 0);
554 	EXPECT_EQ(self->run.exception_error_code, 0);
555 	EXPECT_EQ(self->run.exception_addr, 0);
556 }
557 
558 TEST_HARNESS_MAIN
559