1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2016-20 Intel Corporation. */ 3 4 #include <asm/mman.h> 5 #include <linux/mman.h> 6 #include <linux/delay.h> 7 #include <linux/file.h> 8 #include <linux/hashtable.h> 9 #include <linux/highmem.h> 10 #include <linux/ratelimit.h> 11 #include <linux/sched/signal.h> 12 #include <linux/shmem_fs.h> 13 #include <linux/slab.h> 14 #include <linux/suspend.h> 15 #include "driver.h" 16 #include "encl.h" 17 #include "encls.h" 18 19 static struct sgx_va_page *sgx_encl_grow(struct sgx_encl *encl) 20 { 21 struct sgx_va_page *va_page = NULL; 22 void *err; 23 24 BUILD_BUG_ON(SGX_VA_SLOT_COUNT != 25 (SGX_ENCL_PAGE_VA_OFFSET_MASK >> 3) + 1); 26 27 if (!(encl->page_cnt % SGX_VA_SLOT_COUNT)) { 28 va_page = kzalloc(sizeof(*va_page), GFP_KERNEL); 29 if (!va_page) 30 return ERR_PTR(-ENOMEM); 31 32 va_page->epc_page = sgx_alloc_va_page(); 33 if (IS_ERR(va_page->epc_page)) { 34 err = ERR_CAST(va_page->epc_page); 35 kfree(va_page); 36 return err; 37 } 38 39 WARN_ON_ONCE(encl->page_cnt % SGX_VA_SLOT_COUNT); 40 } 41 encl->page_cnt++; 42 return va_page; 43 } 44 45 static void sgx_encl_shrink(struct sgx_encl *encl, struct sgx_va_page *va_page) 46 { 47 encl->page_cnt--; 48 49 if (va_page) { 50 sgx_free_epc_page(va_page->epc_page); 51 list_del(&va_page->list); 52 kfree(va_page); 53 } 54 } 55 56 static int sgx_encl_create(struct sgx_encl *encl, struct sgx_secs *secs) 57 { 58 struct sgx_epc_page *secs_epc; 59 struct sgx_va_page *va_page; 60 struct sgx_pageinfo pginfo; 61 struct sgx_secinfo secinfo; 62 unsigned long encl_size; 63 struct file *backing; 64 long ret; 65 66 va_page = sgx_encl_grow(encl); 67 if (IS_ERR(va_page)) 68 return PTR_ERR(va_page); 69 else if (va_page) 70 list_add(&va_page->list, &encl->va_pages); 71 /* else the tail page of the VA page list had free slots. */ 72 73 /* The extra page goes to SECS. */ 74 encl_size = secs->size + PAGE_SIZE; 75 76 backing = shmem_file_setup("SGX backing", encl_size + (encl_size >> 5), 77 VM_NORESERVE); 78 if (IS_ERR(backing)) { 79 ret = PTR_ERR(backing); 80 goto err_out_shrink; 81 } 82 83 encl->backing = backing; 84 85 secs_epc = sgx_alloc_epc_page(&encl->secs, true); 86 if (IS_ERR(secs_epc)) { 87 ret = PTR_ERR(secs_epc); 88 goto err_out_backing; 89 } 90 91 encl->secs.epc_page = secs_epc; 92 93 pginfo.addr = 0; 94 pginfo.contents = (unsigned long)secs; 95 pginfo.metadata = (unsigned long)&secinfo; 96 pginfo.secs = 0; 97 memset(&secinfo, 0, sizeof(secinfo)); 98 99 ret = __ecreate((void *)&pginfo, sgx_get_epc_virt_addr(secs_epc)); 100 if (ret) { 101 ret = -EIO; 102 goto err_out; 103 } 104 105 if (secs->attributes & SGX_ATTR_DEBUG) 106 set_bit(SGX_ENCL_DEBUG, &encl->flags); 107 108 encl->secs.encl = encl; 109 encl->base = secs->base; 110 encl->size = secs->size; 111 encl->attributes = secs->attributes; 112 encl->attributes_mask = SGX_ATTR_DEBUG | SGX_ATTR_MODE64BIT | SGX_ATTR_KSS; 113 114 /* Set only after completion, as encl->lock has not been taken. */ 115 set_bit(SGX_ENCL_CREATED, &encl->flags); 116 117 return 0; 118 119 err_out: 120 sgx_free_epc_page(encl->secs.epc_page); 121 encl->secs.epc_page = NULL; 122 123 err_out_backing: 124 fput(encl->backing); 125 encl->backing = NULL; 126 127 err_out_shrink: 128 sgx_encl_shrink(encl, va_page); 129 130 return ret; 131 } 132 133 /** 134 * sgx_ioc_enclave_create() - handler for %SGX_IOC_ENCLAVE_CREATE 135 * @encl: An enclave pointer. 136 * @arg: The ioctl argument. 137 * 138 * Allocate kernel data structures for the enclave and invoke ECREATE. 139 * 140 * Return: 141 * - 0: Success. 142 * - -EIO: ECREATE failed. 143 * - -errno: POSIX error. 144 */ 145 static long sgx_ioc_enclave_create(struct sgx_encl *encl, void __user *arg) 146 { 147 struct sgx_enclave_create create_arg; 148 void *secs; 149 int ret; 150 151 if (test_bit(SGX_ENCL_CREATED, &encl->flags)) 152 return -EINVAL; 153 154 if (copy_from_user(&create_arg, arg, sizeof(create_arg))) 155 return -EFAULT; 156 157 secs = kmalloc(PAGE_SIZE, GFP_KERNEL); 158 if (!secs) 159 return -ENOMEM; 160 161 if (copy_from_user(secs, (void __user *)create_arg.src, PAGE_SIZE)) 162 ret = -EFAULT; 163 else 164 ret = sgx_encl_create(encl, secs); 165 166 kfree(secs); 167 return ret; 168 } 169 170 static struct sgx_encl_page *sgx_encl_page_alloc(struct sgx_encl *encl, 171 unsigned long offset, 172 u64 secinfo_flags) 173 { 174 struct sgx_encl_page *encl_page; 175 unsigned long prot; 176 177 encl_page = kzalloc(sizeof(*encl_page), GFP_KERNEL); 178 if (!encl_page) 179 return ERR_PTR(-ENOMEM); 180 181 encl_page->desc = encl->base + offset; 182 encl_page->encl = encl; 183 184 prot = _calc_vm_trans(secinfo_flags, SGX_SECINFO_R, PROT_READ) | 185 _calc_vm_trans(secinfo_flags, SGX_SECINFO_W, PROT_WRITE) | 186 _calc_vm_trans(secinfo_flags, SGX_SECINFO_X, PROT_EXEC); 187 188 /* 189 * TCS pages must always RW set for CPU access while the SECINFO 190 * permissions are *always* zero - the CPU ignores the user provided 191 * values and silently overwrites them with zero permissions. 192 */ 193 if ((secinfo_flags & SGX_SECINFO_PAGE_TYPE_MASK) == SGX_SECINFO_TCS) 194 prot |= PROT_READ | PROT_WRITE; 195 196 /* Calculate maximum of the VM flags for the page. */ 197 encl_page->vm_max_prot_bits = calc_vm_prot_bits(prot, 0); 198 199 return encl_page; 200 } 201 202 static int sgx_validate_secinfo(struct sgx_secinfo *secinfo) 203 { 204 u64 perm = secinfo->flags & SGX_SECINFO_PERMISSION_MASK; 205 u64 pt = secinfo->flags & SGX_SECINFO_PAGE_TYPE_MASK; 206 207 if (pt != SGX_SECINFO_REG && pt != SGX_SECINFO_TCS) 208 return -EINVAL; 209 210 if ((perm & SGX_SECINFO_W) && !(perm & SGX_SECINFO_R)) 211 return -EINVAL; 212 213 /* 214 * CPU will silently overwrite the permissions as zero, which means 215 * that we need to validate it ourselves. 216 */ 217 if (pt == SGX_SECINFO_TCS && perm) 218 return -EINVAL; 219 220 if (secinfo->flags & SGX_SECINFO_RESERVED_MASK) 221 return -EINVAL; 222 223 if (memchr_inv(secinfo->reserved, 0, sizeof(secinfo->reserved))) 224 return -EINVAL; 225 226 return 0; 227 } 228 229 static int __sgx_encl_add_page(struct sgx_encl *encl, 230 struct sgx_encl_page *encl_page, 231 struct sgx_epc_page *epc_page, 232 struct sgx_secinfo *secinfo, unsigned long src) 233 { 234 struct sgx_pageinfo pginfo; 235 struct vm_area_struct *vma; 236 struct page *src_page; 237 int ret; 238 239 /* Deny noexec. */ 240 vma = find_vma(current->mm, src); 241 if (!vma) 242 return -EFAULT; 243 244 if (!(vma->vm_flags & VM_MAYEXEC)) 245 return -EACCES; 246 247 ret = get_user_pages(src, 1, 0, &src_page, NULL); 248 if (ret < 1) 249 return -EFAULT; 250 251 pginfo.secs = (unsigned long)sgx_get_epc_virt_addr(encl->secs.epc_page); 252 pginfo.addr = encl_page->desc & PAGE_MASK; 253 pginfo.metadata = (unsigned long)secinfo; 254 pginfo.contents = (unsigned long)kmap_atomic(src_page); 255 256 ret = __eadd(&pginfo, sgx_get_epc_virt_addr(epc_page)); 257 258 kunmap_atomic((void *)pginfo.contents); 259 put_page(src_page); 260 261 return ret ? -EIO : 0; 262 } 263 264 /* 265 * If the caller requires measurement of the page as a proof for the content, 266 * use EEXTEND to add a measurement for 256 bytes of the page. Repeat this 267 * operation until the entire page is measured." 268 */ 269 static int __sgx_encl_extend(struct sgx_encl *encl, 270 struct sgx_epc_page *epc_page) 271 { 272 unsigned long offset; 273 int ret; 274 275 for (offset = 0; offset < PAGE_SIZE; offset += SGX_EEXTEND_BLOCK_SIZE) { 276 ret = __eextend(sgx_get_epc_virt_addr(encl->secs.epc_page), 277 sgx_get_epc_virt_addr(epc_page) + offset); 278 if (ret) { 279 if (encls_failed(ret)) 280 ENCLS_WARN(ret, "EEXTEND"); 281 282 return -EIO; 283 } 284 } 285 286 return 0; 287 } 288 289 static int sgx_encl_add_page(struct sgx_encl *encl, unsigned long src, 290 unsigned long offset, struct sgx_secinfo *secinfo, 291 unsigned long flags) 292 { 293 struct sgx_encl_page *encl_page; 294 struct sgx_epc_page *epc_page; 295 struct sgx_va_page *va_page; 296 int ret; 297 298 encl_page = sgx_encl_page_alloc(encl, offset, secinfo->flags); 299 if (IS_ERR(encl_page)) 300 return PTR_ERR(encl_page); 301 302 epc_page = sgx_alloc_epc_page(encl_page, true); 303 if (IS_ERR(epc_page)) { 304 kfree(encl_page); 305 return PTR_ERR(epc_page); 306 } 307 308 va_page = sgx_encl_grow(encl); 309 if (IS_ERR(va_page)) { 310 ret = PTR_ERR(va_page); 311 goto err_out_free; 312 } 313 314 mmap_read_lock(current->mm); 315 mutex_lock(&encl->lock); 316 317 /* 318 * Adding to encl->va_pages must be done under encl->lock. Ditto for 319 * deleting (via sgx_encl_shrink()) in the error path. 320 */ 321 if (va_page) 322 list_add(&va_page->list, &encl->va_pages); 323 324 /* 325 * Insert prior to EADD in case of OOM. EADD modifies MRENCLAVE, i.e. 326 * can't be gracefully unwound, while failure on EADD/EXTEND is limited 327 * to userspace errors (or kernel/hardware bugs). 328 */ 329 ret = xa_insert(&encl->page_array, PFN_DOWN(encl_page->desc), 330 encl_page, GFP_KERNEL); 331 if (ret) 332 goto err_out_unlock; 333 334 ret = __sgx_encl_add_page(encl, encl_page, epc_page, secinfo, 335 src); 336 if (ret) 337 goto err_out; 338 339 /* 340 * Complete the "add" before doing the "extend" so that the "add" 341 * isn't in a half-baked state in the extremely unlikely scenario 342 * the enclave will be destroyed in response to EEXTEND failure. 343 */ 344 encl_page->encl = encl; 345 encl_page->epc_page = epc_page; 346 encl->secs_child_cnt++; 347 348 if (flags & SGX_PAGE_MEASURE) { 349 ret = __sgx_encl_extend(encl, epc_page); 350 if (ret) 351 goto err_out; 352 } 353 354 sgx_mark_page_reclaimable(encl_page->epc_page); 355 mutex_unlock(&encl->lock); 356 mmap_read_unlock(current->mm); 357 return ret; 358 359 err_out: 360 xa_erase(&encl->page_array, PFN_DOWN(encl_page->desc)); 361 362 err_out_unlock: 363 sgx_encl_shrink(encl, va_page); 364 mutex_unlock(&encl->lock); 365 mmap_read_unlock(current->mm); 366 367 err_out_free: 368 sgx_free_epc_page(epc_page); 369 kfree(encl_page); 370 371 return ret; 372 } 373 374 /** 375 * sgx_ioc_enclave_add_pages() - The handler for %SGX_IOC_ENCLAVE_ADD_PAGES 376 * @encl: an enclave pointer 377 * @arg: a user pointer to a struct sgx_enclave_add_pages instance 378 * 379 * Add one or more pages to an uninitialized enclave, and optionally extend the 380 * measurement with the contents of the page. The SECINFO and measurement mask 381 * are applied to all pages. 382 * 383 * A SECINFO for a TCS is required to always contain zero permissions because 384 * CPU silently zeros them. Allowing anything else would cause a mismatch in 385 * the measurement. 386 * 387 * mmap()'s protection bits are capped by the page permissions. For each page 388 * address, the maximum protection bits are computed with the following 389 * heuristics: 390 * 391 * 1. A regular page: PROT_R, PROT_W and PROT_X match the SECINFO permissions. 392 * 2. A TCS page: PROT_R | PROT_W. 393 * 394 * mmap() is not allowed to surpass the minimum of the maximum protection bits 395 * within the given address range. 396 * 397 * The function deinitializes kernel data structures for enclave and returns 398 * -EIO in any of the following conditions: 399 * 400 * - Enclave Page Cache (EPC), the physical memory holding enclaves, has 401 * been invalidated. This will cause EADD and EEXTEND to fail. 402 * - If the source address is corrupted somehow when executing EADD. 403 * 404 * Return: 405 * - 0: Success. 406 * - -EACCES: The source page is located in a noexec partition. 407 * - -ENOMEM: Out of EPC pages. 408 * - -EINTR: The call was interrupted before data was processed. 409 * - -EIO: Either EADD or EEXTEND failed because invalid source address 410 * or power cycle. 411 * - -errno: POSIX error. 412 */ 413 static long sgx_ioc_enclave_add_pages(struct sgx_encl *encl, void __user *arg) 414 { 415 struct sgx_enclave_add_pages add_arg; 416 struct sgx_secinfo secinfo; 417 unsigned long c; 418 int ret; 419 420 if (!test_bit(SGX_ENCL_CREATED, &encl->flags) || 421 test_bit(SGX_ENCL_INITIALIZED, &encl->flags)) 422 return -EINVAL; 423 424 if (copy_from_user(&add_arg, arg, sizeof(add_arg))) 425 return -EFAULT; 426 427 if (!IS_ALIGNED(add_arg.offset, PAGE_SIZE) || 428 !IS_ALIGNED(add_arg.src, PAGE_SIZE)) 429 return -EINVAL; 430 431 if (!add_arg.length || add_arg.length & (PAGE_SIZE - 1)) 432 return -EINVAL; 433 434 if (add_arg.offset + add_arg.length - PAGE_SIZE >= encl->size) 435 return -EINVAL; 436 437 if (copy_from_user(&secinfo, (void __user *)add_arg.secinfo, 438 sizeof(secinfo))) 439 return -EFAULT; 440 441 if (sgx_validate_secinfo(&secinfo)) 442 return -EINVAL; 443 444 for (c = 0 ; c < add_arg.length; c += PAGE_SIZE) { 445 if (signal_pending(current)) { 446 if (!c) 447 ret = -ERESTARTSYS; 448 449 break; 450 } 451 452 if (need_resched()) 453 cond_resched(); 454 455 ret = sgx_encl_add_page(encl, add_arg.src + c, add_arg.offset + c, 456 &secinfo, add_arg.flags); 457 if (ret) 458 break; 459 } 460 461 add_arg.count = c; 462 463 if (copy_to_user(arg, &add_arg, sizeof(add_arg))) 464 return -EFAULT; 465 466 return ret; 467 } 468 469 static int __sgx_get_key_hash(struct crypto_shash *tfm, const void *modulus, 470 void *hash) 471 { 472 SHASH_DESC_ON_STACK(shash, tfm); 473 474 shash->tfm = tfm; 475 476 return crypto_shash_digest(shash, modulus, SGX_MODULUS_SIZE, hash); 477 } 478 479 static int sgx_get_key_hash(const void *modulus, void *hash) 480 { 481 struct crypto_shash *tfm; 482 int ret; 483 484 tfm = crypto_alloc_shash("sha256", 0, CRYPTO_ALG_ASYNC); 485 if (IS_ERR(tfm)) 486 return PTR_ERR(tfm); 487 488 ret = __sgx_get_key_hash(tfm, modulus, hash); 489 490 crypto_free_shash(tfm); 491 return ret; 492 } 493 494 static int sgx_encl_init(struct sgx_encl *encl, struct sgx_sigstruct *sigstruct, 495 void *token) 496 { 497 u64 mrsigner[4]; 498 int i, j, k; 499 void *addr; 500 int ret; 501 502 /* 503 * Deny initializing enclaves with attributes (namely provisioning) 504 * that have not been explicitly allowed. 505 */ 506 if (encl->attributes & ~encl->attributes_mask) 507 return -EACCES; 508 509 /* 510 * Attributes should not be enforced *only* against what's available on 511 * platform (done in sgx_encl_create) but checked and enforced against 512 * the mask for enforcement in sigstruct. For example an enclave could 513 * opt to sign with AVX bit in xfrm, but still be loadable on a platform 514 * without it if the sigstruct->body.attributes_mask does not turn that 515 * bit on. 516 */ 517 if (sigstruct->body.attributes & sigstruct->body.attributes_mask & 518 sgx_attributes_reserved_mask) 519 return -EINVAL; 520 521 if (sigstruct->body.miscselect & sigstruct->body.misc_mask & 522 sgx_misc_reserved_mask) 523 return -EINVAL; 524 525 if (sigstruct->body.xfrm & sigstruct->body.xfrm_mask & 526 sgx_xfrm_reserved_mask) 527 return -EINVAL; 528 529 ret = sgx_get_key_hash(sigstruct->modulus, mrsigner); 530 if (ret) 531 return ret; 532 533 mutex_lock(&encl->lock); 534 535 /* 536 * ENCLS[EINIT] is interruptible because it has such a high latency, 537 * e.g. 50k+ cycles on success. If an IRQ/NMI/SMI becomes pending, 538 * EINIT may fail with SGX_UNMASKED_EVENT so that the event can be 539 * serviced. 540 */ 541 for (i = 0; i < SGX_EINIT_SLEEP_COUNT; i++) { 542 for (j = 0; j < SGX_EINIT_SPIN_COUNT; j++) { 543 addr = sgx_get_epc_virt_addr(encl->secs.epc_page); 544 545 preempt_disable(); 546 547 for (k = 0; k < 4; k++) 548 wrmsrl(MSR_IA32_SGXLEPUBKEYHASH0 + k, mrsigner[k]); 549 550 ret = __einit(sigstruct, token, addr); 551 552 preempt_enable(); 553 554 if (ret == SGX_UNMASKED_EVENT) 555 continue; 556 else 557 break; 558 } 559 560 if (ret != SGX_UNMASKED_EVENT) 561 break; 562 563 msleep_interruptible(SGX_EINIT_SLEEP_TIME); 564 565 if (signal_pending(current)) { 566 ret = -ERESTARTSYS; 567 goto err_out; 568 } 569 } 570 571 if (ret & ENCLS_FAULT_FLAG) { 572 if (encls_failed(ret)) 573 ENCLS_WARN(ret, "EINIT"); 574 575 ret = -EIO; 576 } else if (ret) { 577 pr_debug("EINIT returned %d\n", ret); 578 ret = -EPERM; 579 } else { 580 set_bit(SGX_ENCL_INITIALIZED, &encl->flags); 581 } 582 583 err_out: 584 mutex_unlock(&encl->lock); 585 return ret; 586 } 587 588 /** 589 * sgx_ioc_enclave_init() - handler for %SGX_IOC_ENCLAVE_INIT 590 * @encl: an enclave pointer 591 * @arg: userspace pointer to a struct sgx_enclave_init instance 592 * 593 * Flush any outstanding enqueued EADD operations and perform EINIT. The 594 * Launch Enclave Public Key Hash MSRs are rewritten as necessary to match 595 * the enclave's MRSIGNER, which is caculated from the provided sigstruct. 596 * 597 * Return: 598 * - 0: Success. 599 * - -EPERM: Invalid SIGSTRUCT. 600 * - -EIO: EINIT failed because of a power cycle. 601 * - -errno: POSIX error. 602 */ 603 static long sgx_ioc_enclave_init(struct sgx_encl *encl, void __user *arg) 604 { 605 struct sgx_sigstruct *sigstruct; 606 struct sgx_enclave_init init_arg; 607 struct page *initp_page; 608 void *token; 609 int ret; 610 611 if (!test_bit(SGX_ENCL_CREATED, &encl->flags) || 612 test_bit(SGX_ENCL_INITIALIZED, &encl->flags)) 613 return -EINVAL; 614 615 if (copy_from_user(&init_arg, arg, sizeof(init_arg))) 616 return -EFAULT; 617 618 initp_page = alloc_page(GFP_KERNEL); 619 if (!initp_page) 620 return -ENOMEM; 621 622 sigstruct = kmap(initp_page); 623 token = (void *)((unsigned long)sigstruct + PAGE_SIZE / 2); 624 memset(token, 0, SGX_LAUNCH_TOKEN_SIZE); 625 626 if (copy_from_user(sigstruct, (void __user *)init_arg.sigstruct, 627 sizeof(*sigstruct))) { 628 ret = -EFAULT; 629 goto out; 630 } 631 632 /* 633 * A legacy field used with Intel signed enclaves. These used to mean 634 * regular and architectural enclaves. The CPU only accepts these values 635 * but they do not have any other meaning. 636 * 637 * Thus, reject any other values. 638 */ 639 if (sigstruct->header.vendor != 0x0000 && 640 sigstruct->header.vendor != 0x8086) { 641 ret = -EINVAL; 642 goto out; 643 } 644 645 ret = sgx_encl_init(encl, sigstruct, token); 646 647 out: 648 kunmap(initp_page); 649 __free_page(initp_page); 650 return ret; 651 } 652 653 /** 654 * sgx_ioc_enclave_provision() - handler for %SGX_IOC_ENCLAVE_PROVISION 655 * @encl: an enclave pointer 656 * @arg: userspace pointer to a struct sgx_enclave_provision instance 657 * 658 * Allow ATTRIBUTE.PROVISION_KEY for an enclave by providing a file handle to 659 * /dev/sgx_provision. 660 * 661 * Return: 662 * - 0: Success. 663 * - -errno: Otherwise. 664 */ 665 static long sgx_ioc_enclave_provision(struct sgx_encl *encl, void __user *arg) 666 { 667 struct sgx_enclave_provision params; 668 struct file *file; 669 670 if (copy_from_user(¶ms, arg, sizeof(params))) 671 return -EFAULT; 672 673 file = fget(params.fd); 674 if (!file) 675 return -EINVAL; 676 677 if (file->f_op != &sgx_provision_fops) { 678 fput(file); 679 return -EINVAL; 680 } 681 682 encl->attributes_mask |= SGX_ATTR_PROVISIONKEY; 683 684 fput(file); 685 return 0; 686 } 687 688 long sgx_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) 689 { 690 struct sgx_encl *encl = filep->private_data; 691 int ret; 692 693 if (test_and_set_bit(SGX_ENCL_IOCTL, &encl->flags)) 694 return -EBUSY; 695 696 switch (cmd) { 697 case SGX_IOC_ENCLAVE_CREATE: 698 ret = sgx_ioc_enclave_create(encl, (void __user *)arg); 699 break; 700 case SGX_IOC_ENCLAVE_ADD_PAGES: 701 ret = sgx_ioc_enclave_add_pages(encl, (void __user *)arg); 702 break; 703 case SGX_IOC_ENCLAVE_INIT: 704 ret = sgx_ioc_enclave_init(encl, (void __user *)arg); 705 break; 706 case SGX_IOC_ENCLAVE_PROVISION: 707 ret = sgx_ioc_enclave_provision(encl, (void __user *)arg); 708 break; 709 default: 710 ret = -ENOIOCTLCMD; 711 break; 712 } 713 714 clear_bit(SGX_ENCL_IOCTL, &encl->flags); 715 return ret; 716 } 717