1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2016-20 Intel Corporation. */ 3 4 #include <assert.h> 5 #include <elf.h> 6 #include <errno.h> 7 #include <fcntl.h> 8 #include <stdbool.h> 9 #include <stdio.h> 10 #include <stdint.h> 11 #include <stdlib.h> 12 #include <string.h> 13 #include <unistd.h> 14 #include <sys/ioctl.h> 15 #include <sys/mman.h> 16 #include <sys/stat.h> 17 #include <sys/time.h> 18 #include <sys/types.h> 19 #include "defines.h" 20 #include "main.h" 21 22 void encl_delete(struct encl *encl) 23 { 24 struct encl_segment *heap_seg; 25 26 if (encl->encl_base) 27 munmap((void *)encl->encl_base, encl->encl_size); 28 29 if (encl->bin) 30 munmap(encl->bin, encl->bin_size); 31 32 if (encl->fd) 33 close(encl->fd); 34 35 if (encl->segment_tbl) { 36 heap_seg = &encl->segment_tbl[encl->nr_segments - 1]; 37 munmap(heap_seg->src, heap_seg->size); 38 free(encl->segment_tbl); 39 } 40 41 memset(encl, 0, sizeof(*encl)); 42 } 43 44 static bool encl_map_bin(const char *path, struct encl *encl) 45 { 46 struct stat sb; 47 void *bin; 48 int ret; 49 int fd; 50 51 fd = open(path, O_RDONLY); 52 if (fd == -1) { 53 perror("enclave executable open()"); 54 return false; 55 } 56 57 ret = stat(path, &sb); 58 if (ret) { 59 perror("enclave executable stat()"); 60 goto err; 61 } 62 63 bin = mmap(NULL, sb.st_size, PROT_READ, MAP_PRIVATE, fd, 0); 64 if (bin == MAP_FAILED) { 65 perror("enclave executable mmap()"); 66 goto err; 67 } 68 69 encl->bin = bin; 70 encl->bin_size = sb.st_size; 71 72 close(fd); 73 return true; 74 75 err: 76 close(fd); 77 return false; 78 } 79 80 static bool encl_ioc_create(struct encl *encl) 81 { 82 struct sgx_secs *secs = &encl->secs; 83 struct sgx_enclave_create ioc; 84 int rc; 85 86 assert(encl->encl_base != 0); 87 88 memset(secs, 0, sizeof(*secs)); 89 secs->ssa_frame_size = 1; 90 secs->attributes = SGX_ATTR_MODE64BIT; 91 secs->xfrm = 3; 92 secs->base = encl->encl_base; 93 secs->size = encl->encl_size; 94 95 ioc.src = (unsigned long)secs; 96 rc = ioctl(encl->fd, SGX_IOC_ENCLAVE_CREATE, &ioc); 97 if (rc) { 98 perror("SGX_IOC_ENCLAVE_CREATE failed"); 99 munmap((void *)secs->base, encl->encl_size); 100 return false; 101 } 102 103 return true; 104 } 105 106 static bool encl_ioc_add_pages(struct encl *encl, struct encl_segment *seg) 107 { 108 struct sgx_enclave_add_pages ioc; 109 struct sgx_secinfo secinfo; 110 int rc; 111 112 memset(&secinfo, 0, sizeof(secinfo)); 113 secinfo.flags = seg->flags; 114 115 ioc.src = (uint64_t)seg->src; 116 ioc.offset = seg->offset; 117 ioc.length = seg->size; 118 ioc.secinfo = (unsigned long)&secinfo; 119 if (seg->measure) 120 ioc.flags = SGX_PAGE_MEASURE; 121 else 122 ioc.flags = 0; 123 124 rc = ioctl(encl->fd, SGX_IOC_ENCLAVE_ADD_PAGES, &ioc); 125 if (rc < 0) { 126 perror("SGX_IOC_ENCLAVE_ADD_PAGES failed"); 127 return false; 128 } 129 130 return true; 131 } 132 133 /* 134 * Parse the enclave code's symbol table to locate and return address of 135 * the provided symbol 136 */ 137 uint64_t encl_get_entry(struct encl *encl, const char *symbol) 138 { 139 Elf64_Shdr *sections; 140 Elf64_Sym *symtab; 141 Elf64_Ehdr *ehdr; 142 char *sym_names; 143 int num_sym; 144 int i; 145 146 ehdr = encl->bin; 147 sections = encl->bin + ehdr->e_shoff; 148 149 for (i = 0; i < ehdr->e_shnum; i++) { 150 if (sections[i].sh_type == SHT_SYMTAB) { 151 symtab = (Elf64_Sym *)((char *)encl->bin + sections[i].sh_offset); 152 num_sym = sections[i].sh_size / sections[i].sh_entsize; 153 break; 154 } 155 } 156 157 for (i = 0; i < ehdr->e_shnum; i++) { 158 if (sections[i].sh_type == SHT_STRTAB) { 159 sym_names = (char *)encl->bin + sections[i].sh_offset; 160 break; 161 } 162 } 163 164 for (i = 0; i < num_sym; i++) { 165 Elf64_Sym *sym = &symtab[i]; 166 167 if (!strcmp(symbol, sym_names + sym->st_name)) 168 return (uint64_t)sym->st_value; 169 } 170 171 return 0; 172 } 173 174 bool encl_load(const char *path, struct encl *encl, unsigned long heap_size) 175 { 176 const char device_path[] = "/dev/sgx_enclave"; 177 struct encl_segment *seg; 178 Elf64_Phdr *phdr_tbl; 179 off_t src_offset; 180 Elf64_Ehdr *ehdr; 181 struct stat sb; 182 void *ptr; 183 int i, j; 184 int ret; 185 int fd = -1; 186 187 memset(encl, 0, sizeof(*encl)); 188 189 fd = open(device_path, O_RDWR); 190 if (fd < 0) { 191 perror("Unable to open /dev/sgx_enclave"); 192 goto err; 193 } 194 195 ret = stat(device_path, &sb); 196 if (ret) { 197 perror("device file stat()"); 198 goto err; 199 } 200 201 ptr = mmap(NULL, PAGE_SIZE, PROT_READ, MAP_SHARED, fd, 0); 202 if (ptr == (void *)-1) { 203 perror("mmap for read"); 204 goto err; 205 } 206 munmap(ptr, PAGE_SIZE); 207 208 #define ERR_MSG \ 209 "mmap() succeeded for PROT_READ, but failed for PROT_EXEC.\n" \ 210 " Check that /dev does not have noexec set:\n" \ 211 " \tmount | grep \"/dev .*noexec\"\n" \ 212 " If so, remount it executable: mount -o remount,exec /dev\n\n" 213 214 ptr = mmap(NULL, PAGE_SIZE, PROT_EXEC, MAP_SHARED, fd, 0); 215 if (ptr == (void *)-1) { 216 fprintf(stderr, ERR_MSG); 217 goto err; 218 } 219 munmap(ptr, PAGE_SIZE); 220 221 encl->fd = fd; 222 223 if (!encl_map_bin(path, encl)) 224 goto err; 225 226 ehdr = encl->bin; 227 phdr_tbl = encl->bin + ehdr->e_phoff; 228 229 encl->nr_segments = 1; /* one for the heap */ 230 231 for (i = 0; i < ehdr->e_phnum; i++) { 232 Elf64_Phdr *phdr = &phdr_tbl[i]; 233 234 if (phdr->p_type == PT_LOAD) 235 encl->nr_segments++; 236 } 237 238 encl->segment_tbl = calloc(encl->nr_segments, 239 sizeof(struct encl_segment)); 240 if (!encl->segment_tbl) 241 goto err; 242 243 for (i = 0, j = 0; i < ehdr->e_phnum; i++) { 244 Elf64_Phdr *phdr = &phdr_tbl[i]; 245 unsigned int flags = phdr->p_flags; 246 247 if (phdr->p_type != PT_LOAD) 248 continue; 249 250 seg = &encl->segment_tbl[j]; 251 252 if (!!(flags & ~(PF_R | PF_W | PF_X))) { 253 fprintf(stderr, 254 "%d has invalid segment flags 0x%02x.\n", i, 255 phdr->p_flags); 256 goto err; 257 } 258 259 if (j == 0 && flags != (PF_R | PF_W)) { 260 fprintf(stderr, 261 "TCS has invalid segment flags 0x%02x.\n", 262 phdr->p_flags); 263 goto err; 264 } 265 266 if (j == 0) { 267 src_offset = phdr->p_offset & PAGE_MASK; 268 encl->src = encl->bin + src_offset; 269 270 seg->prot = PROT_READ | PROT_WRITE; 271 seg->flags = SGX_PAGE_TYPE_TCS << 8; 272 } else { 273 seg->prot = (phdr->p_flags & PF_R) ? PROT_READ : 0; 274 seg->prot |= (phdr->p_flags & PF_W) ? PROT_WRITE : 0; 275 seg->prot |= (phdr->p_flags & PF_X) ? PROT_EXEC : 0; 276 seg->flags = (SGX_PAGE_TYPE_REG << 8) | seg->prot; 277 } 278 279 seg->offset = (phdr->p_offset & PAGE_MASK) - src_offset; 280 seg->size = (phdr->p_filesz + PAGE_SIZE - 1) & PAGE_MASK; 281 seg->src = encl->src + seg->offset; 282 seg->measure = true; 283 284 j++; 285 } 286 287 assert(j == encl->nr_segments - 1); 288 289 seg = &encl->segment_tbl[j]; 290 seg->offset = encl->segment_tbl[j - 1].offset + encl->segment_tbl[j - 1].size; 291 seg->size = heap_size; 292 seg->src = mmap(NULL, heap_size, PROT_READ | PROT_WRITE, 293 MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); 294 seg->prot = PROT_READ | PROT_WRITE; 295 seg->flags = (SGX_PAGE_TYPE_REG << 8) | seg->prot; 296 seg->measure = false; 297 298 if (seg->src == MAP_FAILED) 299 goto err; 300 301 encl->src_size = encl->segment_tbl[j].offset + encl->segment_tbl[j].size; 302 303 for (encl->encl_size = 4096; encl->encl_size < encl->src_size; ) 304 encl->encl_size <<= 1; 305 306 return true; 307 308 err: 309 if (fd != -1) 310 close(fd); 311 encl_delete(encl); 312 return false; 313 } 314 315 static bool encl_map_area(struct encl *encl) 316 { 317 size_t encl_size = encl->encl_size; 318 void *area; 319 320 area = mmap(NULL, encl_size * 2, PROT_NONE, 321 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); 322 if (area == MAP_FAILED) { 323 perror("reservation mmap()"); 324 return false; 325 } 326 327 encl->encl_base = ((uint64_t)area + encl_size - 1) & ~(encl_size - 1); 328 329 munmap(area, encl->encl_base - (uint64_t)area); 330 munmap((void *)(encl->encl_base + encl_size), 331 (uint64_t)area + encl_size - encl->encl_base); 332 333 return true; 334 } 335 336 bool encl_build(struct encl *encl) 337 { 338 struct sgx_enclave_init ioc; 339 int ret; 340 int i; 341 342 if (!encl_map_area(encl)) 343 return false; 344 345 if (!encl_ioc_create(encl)) 346 return false; 347 348 /* 349 * Pages must be added before mapping VMAs because their permissions 350 * cap the VMA permissions. 351 */ 352 for (i = 0; i < encl->nr_segments; i++) { 353 struct encl_segment *seg = &encl->segment_tbl[i]; 354 355 if (!encl_ioc_add_pages(encl, seg)) 356 return false; 357 } 358 359 ioc.sigstruct = (uint64_t)&encl->sigstruct; 360 ret = ioctl(encl->fd, SGX_IOC_ENCLAVE_INIT, &ioc); 361 if (ret) { 362 perror("SGX_IOC_ENCLAVE_INIT failed"); 363 return false; 364 } 365 366 return true; 367 } 368