xref: /openbmc/linux/tools/testing/selftests/sgx/load.c (revision 5a734a0e)
1 // SPDX-License-Identifier: GPL-2.0
2 /*  Copyright(c) 2016-20 Intel Corporation. */
3 
4 #include <assert.h>
5 #include <elf.h>
6 #include <errno.h>
7 #include <fcntl.h>
8 #include <stdbool.h>
9 #include <stdio.h>
10 #include <stdint.h>
11 #include <stdlib.h>
12 #include <string.h>
13 #include <unistd.h>
14 #include <sys/ioctl.h>
15 #include <sys/mman.h>
16 #include <sys/stat.h>
17 #include <sys/time.h>
18 #include <sys/types.h>
19 #include "defines.h"
20 #include "main.h"
21 
encl_delete(struct encl * encl)22 void encl_delete(struct encl *encl)
23 {
24 	struct encl_segment *heap_seg;
25 
26 	if (encl->encl_base)
27 		munmap((void *)encl->encl_base, encl->encl_size);
28 
29 	if (encl->bin)
30 		munmap(encl->bin, encl->bin_size);
31 
32 	if (encl->fd)
33 		close(encl->fd);
34 
35 	if (encl->segment_tbl) {
36 		heap_seg = &encl->segment_tbl[encl->nr_segments - 1];
37 		munmap(heap_seg->src, heap_seg->size);
38 		free(encl->segment_tbl);
39 	}
40 
41 	memset(encl, 0, sizeof(*encl));
42 }
43 
encl_map_bin(const char * path,struct encl * encl)44 static bool encl_map_bin(const char *path, struct encl *encl)
45 {
46 	struct stat sb;
47 	void *bin;
48 	int ret;
49 	int fd;
50 
51 	fd = open(path, O_RDONLY);
52 	if (fd == -1)  {
53 		perror("enclave executable open()");
54 		return false;
55 	}
56 
57 	ret = stat(path, &sb);
58 	if (ret) {
59 		perror("enclave executable stat()");
60 		goto err;
61 	}
62 
63 	bin = mmap(NULL, sb.st_size, PROT_READ, MAP_PRIVATE, fd, 0);
64 	if (bin == MAP_FAILED) {
65 		perror("enclave executable mmap()");
66 		goto err;
67 	}
68 
69 	encl->bin = bin;
70 	encl->bin_size = sb.st_size;
71 
72 	close(fd);
73 	return true;
74 
75 err:
76 	close(fd);
77 	return false;
78 }
79 
encl_ioc_create(struct encl * encl)80 static bool encl_ioc_create(struct encl *encl)
81 {
82 	struct sgx_secs *secs = &encl->secs;
83 	struct sgx_enclave_create ioc;
84 	int rc;
85 
86 	assert(encl->encl_base != 0);
87 
88 	memset(secs, 0, sizeof(*secs));
89 	secs->ssa_frame_size = 1;
90 	secs->attributes = SGX_ATTR_MODE64BIT;
91 	secs->xfrm = 3;
92 	secs->base = encl->encl_base;
93 	secs->size = encl->encl_size;
94 
95 	ioc.src = (unsigned long)secs;
96 	rc = ioctl(encl->fd, SGX_IOC_ENCLAVE_CREATE, &ioc);
97 	if (rc) {
98 		perror("SGX_IOC_ENCLAVE_CREATE failed");
99 		munmap((void *)secs->base, encl->encl_size);
100 		return false;
101 	}
102 
103 	return true;
104 }
105 
encl_ioc_add_pages(struct encl * encl,struct encl_segment * seg)106 static bool encl_ioc_add_pages(struct encl *encl, struct encl_segment *seg)
107 {
108 	struct sgx_enclave_add_pages ioc;
109 	struct sgx_secinfo secinfo;
110 	int rc;
111 
112 	memset(&secinfo, 0, sizeof(secinfo));
113 	secinfo.flags = seg->flags;
114 
115 	ioc.src = (uint64_t)seg->src;
116 	ioc.offset = seg->offset;
117 	ioc.length = seg->size;
118 	ioc.secinfo = (unsigned long)&secinfo;
119 	if (seg->measure)
120 		ioc.flags = SGX_PAGE_MEASURE;
121 	else
122 		ioc.flags = 0;
123 
124 	rc = ioctl(encl->fd, SGX_IOC_ENCLAVE_ADD_PAGES, &ioc);
125 	if (rc < 0) {
126 		perror("SGX_IOC_ENCLAVE_ADD_PAGES failed");
127 		return false;
128 	}
129 
130 	return true;
131 }
132 
133 /*
134  * Parse the enclave code's symbol table to locate and return address of
135  * the provided symbol
136  */
encl_get_entry(struct encl * encl,const char * symbol)137 uint64_t encl_get_entry(struct encl *encl, const char *symbol)
138 {
139 	Elf64_Sym *symtab = NULL;
140 	char *sym_names = NULL;
141 	Elf64_Shdr *sections;
142 	Elf64_Ehdr *ehdr;
143 	int num_sym = 0;
144 	int i;
145 
146 	ehdr = encl->bin;
147 	sections = encl->bin + ehdr->e_shoff;
148 
149 	for (i = 0; i < ehdr->e_shnum; i++) {
150 		if (sections[i].sh_type == SHT_SYMTAB) {
151 			symtab = (Elf64_Sym *)((char *)encl->bin + sections[i].sh_offset);
152 			num_sym = sections[i].sh_size / sections[i].sh_entsize;
153 			break;
154 		}
155 	}
156 
157 	for (i = 0; i < ehdr->e_shnum; i++) {
158 		if (sections[i].sh_type == SHT_STRTAB) {
159 			sym_names = (char *)encl->bin + sections[i].sh_offset;
160 			break;
161 		}
162 	}
163 
164 	if (!symtab || !sym_names)
165 		return 0;
166 
167 	for (i = 0; i < num_sym; i++) {
168 		Elf64_Sym *sym = &symtab[i];
169 
170 		if (!strcmp(symbol, sym_names + sym->st_name))
171 			return (uint64_t)sym->st_value;
172 	}
173 
174 	return 0;
175 }
176 
encl_load(const char * path,struct encl * encl,unsigned long heap_size)177 bool encl_load(const char *path, struct encl *encl, unsigned long heap_size)
178 {
179 	const char device_path[] = "/dev/sgx_enclave";
180 	struct encl_segment *seg;
181 	Elf64_Phdr *phdr_tbl;
182 	off_t src_offset;
183 	Elf64_Ehdr *ehdr;
184 	struct stat sb;
185 	void *ptr;
186 	int i, j;
187 	int ret;
188 	int fd = -1;
189 
190 	memset(encl, 0, sizeof(*encl));
191 
192 	fd = open(device_path, O_RDWR);
193 	if (fd < 0) {
194 		perror("Unable to open /dev/sgx_enclave");
195 		goto err;
196 	}
197 
198 	ret = stat(device_path, &sb);
199 	if (ret) {
200 		perror("device file stat()");
201 		goto err;
202 	}
203 
204 	ptr = mmap(NULL, PAGE_SIZE, PROT_READ, MAP_SHARED, fd, 0);
205 	if (ptr == (void *)-1) {
206 		perror("mmap for read");
207 		goto err;
208 	}
209 	munmap(ptr, PAGE_SIZE);
210 
211 #define ERR_MSG \
212 "mmap() succeeded for PROT_READ, but failed for PROT_EXEC.\n" \
213 " Check that /dev does not have noexec set:\n" \
214 " \tmount | grep \"/dev .*noexec\"\n" \
215 " If so, remount it executable: mount -o remount,exec /dev\n\n"
216 
217 	ptr = mmap(NULL, PAGE_SIZE, PROT_EXEC, MAP_SHARED, fd, 0);
218 	if (ptr == (void *)-1) {
219 		fprintf(stderr, ERR_MSG);
220 		goto err;
221 	}
222 	munmap(ptr, PAGE_SIZE);
223 
224 	encl->fd = fd;
225 
226 	if (!encl_map_bin(path, encl))
227 		goto err;
228 
229 	ehdr = encl->bin;
230 	phdr_tbl = encl->bin + ehdr->e_phoff;
231 
232 	encl->nr_segments = 1; /* one for the heap */
233 
234 	for (i = 0; i < ehdr->e_phnum; i++) {
235 		Elf64_Phdr *phdr = &phdr_tbl[i];
236 
237 		if (phdr->p_type == PT_LOAD)
238 			encl->nr_segments++;
239 	}
240 
241 	encl->segment_tbl = calloc(encl->nr_segments,
242 				   sizeof(struct encl_segment));
243 	if (!encl->segment_tbl)
244 		goto err;
245 
246 	for (i = 0, j = 0; i < ehdr->e_phnum; i++) {
247 		Elf64_Phdr *phdr = &phdr_tbl[i];
248 		unsigned int flags = phdr->p_flags;
249 
250 		if (phdr->p_type != PT_LOAD)
251 			continue;
252 
253 		seg = &encl->segment_tbl[j];
254 
255 		if (!!(flags & ~(PF_R | PF_W | PF_X))) {
256 			fprintf(stderr,
257 				"%d has invalid segment flags 0x%02x.\n", i,
258 				phdr->p_flags);
259 			goto err;
260 		}
261 
262 		if (j == 0 && flags != (PF_R | PF_W)) {
263 			fprintf(stderr,
264 				"TCS has invalid segment flags 0x%02x.\n",
265 				phdr->p_flags);
266 			goto err;
267 		}
268 
269 		if (j == 0) {
270 			src_offset = phdr->p_offset & PAGE_MASK;
271 			encl->src = encl->bin + src_offset;
272 
273 			seg->prot = PROT_READ | PROT_WRITE;
274 			seg->flags = SGX_PAGE_TYPE_TCS << 8;
275 		} else  {
276 			seg->prot = (phdr->p_flags & PF_R) ? PROT_READ : 0;
277 			seg->prot |= (phdr->p_flags & PF_W) ? PROT_WRITE : 0;
278 			seg->prot |= (phdr->p_flags & PF_X) ? PROT_EXEC : 0;
279 			seg->flags = (SGX_PAGE_TYPE_REG << 8) | seg->prot;
280 		}
281 
282 		seg->offset = (phdr->p_offset & PAGE_MASK) - src_offset;
283 		seg->size = (phdr->p_filesz + PAGE_SIZE - 1) & PAGE_MASK;
284 		seg->src = encl->src + seg->offset;
285 		seg->measure = true;
286 
287 		j++;
288 	}
289 
290 	assert(j == encl->nr_segments - 1);
291 
292 	seg = &encl->segment_tbl[j];
293 	seg->offset =  encl->segment_tbl[j - 1].offset + encl->segment_tbl[j - 1].size;
294 	seg->size = heap_size;
295 	seg->src = mmap(NULL, heap_size, PROT_READ | PROT_WRITE,
296 			MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
297 	seg->prot = PROT_READ | PROT_WRITE;
298 	seg->flags = (SGX_PAGE_TYPE_REG << 8) | seg->prot;
299 	seg->measure = false;
300 
301 	if (seg->src == MAP_FAILED)
302 		goto err;
303 
304 	encl->src_size = encl->segment_tbl[j].offset + encl->segment_tbl[j].size;
305 
306 	for (encl->encl_size = 4096; encl->encl_size < encl->src_size; )
307 		encl->encl_size <<= 1;
308 
309 	return true;
310 
311 err:
312 	if (fd != -1)
313 		close(fd);
314 	encl_delete(encl);
315 	return false;
316 }
317 
encl_map_area(struct encl * encl)318 static bool encl_map_area(struct encl *encl)
319 {
320 	size_t encl_size = encl->encl_size;
321 	void *area;
322 
323 	area = mmap(NULL, encl_size * 2, PROT_NONE,
324 		    MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
325 	if (area == MAP_FAILED) {
326 		perror("reservation mmap()");
327 		return false;
328 	}
329 
330 	encl->encl_base = ((uint64_t)area + encl_size - 1) & ~(encl_size - 1);
331 
332 	munmap(area, encl->encl_base - (uint64_t)area);
333 	munmap((void *)(encl->encl_base + encl_size),
334 	       (uint64_t)area + encl_size - encl->encl_base);
335 
336 	return true;
337 }
338 
encl_build(struct encl * encl)339 bool encl_build(struct encl *encl)
340 {
341 	struct sgx_enclave_init ioc;
342 	int ret;
343 	int i;
344 
345 	if (!encl_map_area(encl))
346 		return false;
347 
348 	if (!encl_ioc_create(encl))
349 		return false;
350 
351 	/*
352 	 * Pages must be added before mapping VMAs because their permissions
353 	 * cap the VMA permissions.
354 	 */
355 	for (i = 0; i < encl->nr_segments; i++) {
356 		struct encl_segment *seg = &encl->segment_tbl[i];
357 
358 		if (!encl_ioc_add_pages(encl, seg))
359 			return false;
360 	}
361 
362 	ioc.sigstruct = (uint64_t)&encl->sigstruct;
363 	ret = ioctl(encl->fd, SGX_IOC_ENCLAVE_INIT, &ioc);
364 	if (ret) {
365 		perror("SGX_IOC_ENCLAVE_INIT failed");
366 		return false;
367 	}
368 
369 	return true;
370 }
371