xref: /openbmc/linux/tools/testing/selftests/sgx/load.c (revision 31e67366)
1 // SPDX-License-Identifier: GPL-2.0
2 /*  Copyright(c) 2016-20 Intel Corporation. */
3 
4 #include <assert.h>
5 #include <elf.h>
6 #include <errno.h>
7 #include <fcntl.h>
8 #include <stdbool.h>
9 #include <stdio.h>
10 #include <stdint.h>
11 #include <stdlib.h>
12 #include <string.h>
13 #include <unistd.h>
14 #include <sys/ioctl.h>
15 #include <sys/mman.h>
16 #include <sys/stat.h>
17 #include <sys/time.h>
18 #include <sys/types.h>
19 #include "defines.h"
20 #include "main.h"
21 
22 void encl_delete(struct encl *encl)
23 {
24 	if (encl->encl_base)
25 		munmap((void *)encl->encl_base, encl->encl_size);
26 
27 	if (encl->bin)
28 		munmap(encl->bin, encl->bin_size);
29 
30 	if (encl->fd)
31 		close(encl->fd);
32 
33 	if (encl->segment_tbl)
34 		free(encl->segment_tbl);
35 
36 	memset(encl, 0, sizeof(*encl));
37 }
38 
39 static bool encl_map_bin(const char *path, struct encl *encl)
40 {
41 	struct stat sb;
42 	void *bin;
43 	int ret;
44 	int fd;
45 
46 	fd = open(path, O_RDONLY);
47 	if (fd == -1)  {
48 		perror("open()");
49 		return false;
50 	}
51 
52 	ret = stat(path, &sb);
53 	if (ret) {
54 		perror("stat()");
55 		goto err;
56 	}
57 
58 	bin = mmap(NULL, sb.st_size, PROT_READ, MAP_PRIVATE, fd, 0);
59 	if (bin == MAP_FAILED) {
60 		perror("mmap()");
61 		goto err;
62 	}
63 
64 	encl->bin = bin;
65 	encl->bin_size = sb.st_size;
66 
67 	close(fd);
68 	return true;
69 
70 err:
71 	close(fd);
72 	return false;
73 }
74 
75 static bool encl_ioc_create(struct encl *encl)
76 {
77 	struct sgx_secs *secs = &encl->secs;
78 	struct sgx_enclave_create ioc;
79 	int rc;
80 
81 	assert(encl->encl_base != 0);
82 
83 	memset(secs, 0, sizeof(*secs));
84 	secs->ssa_frame_size = 1;
85 	secs->attributes = SGX_ATTR_MODE64BIT;
86 	secs->xfrm = 3;
87 	secs->base = encl->encl_base;
88 	secs->size = encl->encl_size;
89 
90 	ioc.src = (unsigned long)secs;
91 	rc = ioctl(encl->fd, SGX_IOC_ENCLAVE_CREATE, &ioc);
92 	if (rc) {
93 		fprintf(stderr, "SGX_IOC_ENCLAVE_CREATE failed: errno=%d\n",
94 			errno);
95 		munmap((void *)secs->base, encl->encl_size);
96 		return false;
97 	}
98 
99 	return true;
100 }
101 
102 static bool encl_ioc_add_pages(struct encl *encl, struct encl_segment *seg)
103 {
104 	struct sgx_enclave_add_pages ioc;
105 	struct sgx_secinfo secinfo;
106 	int rc;
107 
108 	memset(&secinfo, 0, sizeof(secinfo));
109 	secinfo.flags = seg->flags;
110 
111 	ioc.src = (uint64_t)encl->src + seg->offset;
112 	ioc.offset = seg->offset;
113 	ioc.length = seg->size;
114 	ioc.secinfo = (unsigned long)&secinfo;
115 	ioc.flags = SGX_PAGE_MEASURE;
116 
117 	rc = ioctl(encl->fd, SGX_IOC_ENCLAVE_ADD_PAGES, &ioc);
118 	if (rc < 0) {
119 		fprintf(stderr, "SGX_IOC_ENCLAVE_ADD_PAGES failed: errno=%d.\n",
120 			errno);
121 		return false;
122 	}
123 
124 	return true;
125 }
126 
127 bool encl_load(const char *path, struct encl *encl)
128 {
129 	Elf64_Phdr *phdr_tbl;
130 	off_t src_offset;
131 	Elf64_Ehdr *ehdr;
132 	int i, j;
133 	int ret;
134 
135 	memset(encl, 0, sizeof(*encl));
136 
137 	ret = open("/dev/sgx_enclave", O_RDWR);
138 	if (ret < 0) {
139 		fprintf(stderr, "Unable to open /dev/sgx_enclave\n");
140 		goto err;
141 	}
142 
143 	encl->fd = ret;
144 
145 	if (!encl_map_bin(path, encl))
146 		goto err;
147 
148 	ehdr = encl->bin;
149 	phdr_tbl = encl->bin + ehdr->e_phoff;
150 
151 	for (i = 0; i < ehdr->e_phnum; i++) {
152 		Elf64_Phdr *phdr = &phdr_tbl[i];
153 
154 		if (phdr->p_type == PT_LOAD)
155 			encl->nr_segments++;
156 	}
157 
158 	encl->segment_tbl = calloc(encl->nr_segments,
159 				   sizeof(struct encl_segment));
160 	if (!encl->segment_tbl)
161 		goto err;
162 
163 	for (i = 0, j = 0; i < ehdr->e_phnum; i++) {
164 		Elf64_Phdr *phdr = &phdr_tbl[i];
165 		unsigned int flags = phdr->p_flags;
166 		struct encl_segment *seg;
167 
168 		if (phdr->p_type != PT_LOAD)
169 			continue;
170 
171 		seg = &encl->segment_tbl[j];
172 
173 		if (!!(flags & ~(PF_R | PF_W | PF_X))) {
174 			fprintf(stderr,
175 				"%d has invalid segment flags 0x%02x.\n", i,
176 				phdr->p_flags);
177 			goto err;
178 		}
179 
180 		if (j == 0 && flags != (PF_R | PF_W)) {
181 			fprintf(stderr,
182 				"TCS has invalid segment flags 0x%02x.\n",
183 				phdr->p_flags);
184 			goto err;
185 		}
186 
187 		if (j == 0) {
188 			src_offset = phdr->p_offset & PAGE_MASK;
189 
190 			seg->prot = PROT_READ | PROT_WRITE;
191 			seg->flags = SGX_PAGE_TYPE_TCS << 8;
192 		} else  {
193 			seg->prot = (phdr->p_flags & PF_R) ? PROT_READ : 0;
194 			seg->prot |= (phdr->p_flags & PF_W) ? PROT_WRITE : 0;
195 			seg->prot |= (phdr->p_flags & PF_X) ? PROT_EXEC : 0;
196 			seg->flags = (SGX_PAGE_TYPE_REG << 8) | seg->prot;
197 		}
198 
199 		seg->offset = (phdr->p_offset & PAGE_MASK) - src_offset;
200 		seg->size = (phdr->p_filesz + PAGE_SIZE - 1) & PAGE_MASK;
201 
202 		printf("0x%016lx 0x%016lx 0x%02x\n", seg->offset, seg->size,
203 		       seg->prot);
204 
205 		j++;
206 	}
207 
208 	assert(j == encl->nr_segments);
209 
210 	encl->src = encl->bin + src_offset;
211 	encl->src_size = encl->segment_tbl[j - 1].offset +
212 			 encl->segment_tbl[j - 1].size;
213 
214 	for (encl->encl_size = 4096; encl->encl_size < encl->src_size; )
215 		encl->encl_size <<= 1;
216 
217 	return true;
218 
219 err:
220 	encl_delete(encl);
221 	return false;
222 }
223 
224 static bool encl_map_area(struct encl *encl)
225 {
226 	size_t encl_size = encl->encl_size;
227 	void *area;
228 
229 	area = mmap(NULL, encl_size * 2, PROT_NONE,
230 		    MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
231 	if (area == MAP_FAILED) {
232 		perror("mmap");
233 		return false;
234 	}
235 
236 	encl->encl_base = ((uint64_t)area + encl_size - 1) & ~(encl_size - 1);
237 
238 	munmap(area, encl->encl_base - (uint64_t)area);
239 	munmap((void *)(encl->encl_base + encl_size),
240 	       (uint64_t)area + encl_size - encl->encl_base);
241 
242 	return true;
243 }
244 
245 bool encl_build(struct encl *encl)
246 {
247 	struct sgx_enclave_init ioc;
248 	int ret;
249 	int i;
250 
251 	if (!encl_map_area(encl))
252 		return false;
253 
254 	if (!encl_ioc_create(encl))
255 		return false;
256 
257 	/*
258 	 * Pages must be added before mapping VMAs because their permissions
259 	 * cap the VMA permissions.
260 	 */
261 	for (i = 0; i < encl->nr_segments; i++) {
262 		struct encl_segment *seg = &encl->segment_tbl[i];
263 
264 		if (!encl_ioc_add_pages(encl, seg))
265 			return false;
266 	}
267 
268 	ioc.sigstruct = (uint64_t)&encl->sigstruct;
269 	ret = ioctl(encl->fd, SGX_IOC_ENCLAVE_INIT, &ioc);
270 	if (ret) {
271 		fprintf(stderr, "SGX_IOC_ENCLAVE_INIT failed: errno=%d\n",
272 			errno);
273 		return false;
274 	}
275 
276 	return true;
277 }
278