1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2016-20 Intel Corporation. */ 3 4 #include <linux/lockdep.h> 5 #include <linux/mm.h> 6 #include <linux/mman.h> 7 #include <linux/shmem_fs.h> 8 #include <linux/suspend.h> 9 #include <linux/sched/mm.h> 10 #include <asm/sgx.h> 11 #include "encl.h" 12 #include "encls.h" 13 #include "sgx.h" 14 15 /* 16 * ELDU: Load an EPC page as unblocked. For more info, see "OS Management of EPC 17 * Pages" in the SDM. 18 */ 19 static int __sgx_encl_eldu(struct sgx_encl_page *encl_page, 20 struct sgx_epc_page *epc_page, 21 struct sgx_epc_page *secs_page) 22 { 23 unsigned long va_offset = encl_page->desc & SGX_ENCL_PAGE_VA_OFFSET_MASK; 24 struct sgx_encl *encl = encl_page->encl; 25 struct sgx_pageinfo pginfo; 26 struct sgx_backing b; 27 pgoff_t page_index; 28 int ret; 29 30 if (secs_page) 31 page_index = PFN_DOWN(encl_page->desc - encl_page->encl->base); 32 else 33 page_index = PFN_DOWN(encl->size); 34 35 ret = sgx_encl_get_backing(encl, page_index, &b); 36 if (ret) 37 return ret; 38 39 pginfo.addr = encl_page->desc & PAGE_MASK; 40 pginfo.contents = (unsigned long)kmap_atomic(b.contents); 41 pginfo.metadata = (unsigned long)kmap_atomic(b.pcmd) + 42 b.pcmd_offset; 43 44 if (secs_page) 45 pginfo.secs = (u64)sgx_get_epc_virt_addr(secs_page); 46 else 47 pginfo.secs = 0; 48 49 ret = __eldu(&pginfo, sgx_get_epc_virt_addr(epc_page), 50 sgx_get_epc_virt_addr(encl_page->va_page->epc_page) + va_offset); 51 if (ret) { 52 if (encls_failed(ret)) 53 ENCLS_WARN(ret, "ELDU"); 54 55 ret = -EFAULT; 56 } 57 58 kunmap_atomic((void *)(unsigned long)(pginfo.metadata - b.pcmd_offset)); 59 kunmap_atomic((void *)(unsigned long)pginfo.contents); 60 61 sgx_encl_put_backing(&b, false); 62 63 return ret; 64 } 65 66 static struct sgx_epc_page *sgx_encl_eldu(struct sgx_encl_page *encl_page, 67 struct sgx_epc_page *secs_page) 68 { 69 70 unsigned long va_offset = encl_page->desc & SGX_ENCL_PAGE_VA_OFFSET_MASK; 71 struct sgx_encl *encl = encl_page->encl; 72 struct sgx_epc_page *epc_page; 73 int ret; 74 75 epc_page = sgx_alloc_epc_page(encl_page, false); 76 if (IS_ERR(epc_page)) 77 return epc_page; 78 79 ret = __sgx_encl_eldu(encl_page, epc_page, secs_page); 80 if (ret) { 81 sgx_encl_free_epc_page(epc_page); 82 return ERR_PTR(ret); 83 } 84 85 sgx_free_va_slot(encl_page->va_page, va_offset); 86 list_move(&encl_page->va_page->list, &encl->va_pages); 87 encl_page->desc &= ~SGX_ENCL_PAGE_VA_OFFSET_MASK; 88 encl_page->epc_page = epc_page; 89 90 return epc_page; 91 } 92 93 static struct sgx_encl_page *sgx_encl_load_page(struct sgx_encl *encl, 94 unsigned long addr, 95 unsigned long vm_flags) 96 { 97 unsigned long vm_prot_bits = vm_flags & (VM_READ | VM_WRITE | VM_EXEC); 98 struct sgx_epc_page *epc_page; 99 struct sgx_encl_page *entry; 100 101 entry = xa_load(&encl->page_array, PFN_DOWN(addr)); 102 if (!entry) 103 return ERR_PTR(-EFAULT); 104 105 /* 106 * Verify that the faulted page has equal or higher build time 107 * permissions than the VMA permissions (i.e. the subset of {VM_READ, 108 * VM_WRITE, VM_EXECUTE} in vma->vm_flags). 109 */ 110 if ((entry->vm_max_prot_bits & vm_prot_bits) != vm_prot_bits) 111 return ERR_PTR(-EFAULT); 112 113 /* Entry successfully located. */ 114 if (entry->epc_page) { 115 if (entry->desc & SGX_ENCL_PAGE_BEING_RECLAIMED) 116 return ERR_PTR(-EBUSY); 117 118 return entry; 119 } 120 121 if (!(encl->secs.epc_page)) { 122 epc_page = sgx_encl_eldu(&encl->secs, NULL); 123 if (IS_ERR(epc_page)) 124 return ERR_CAST(epc_page); 125 } 126 127 epc_page = sgx_encl_eldu(entry, encl->secs.epc_page); 128 if (IS_ERR(epc_page)) 129 return ERR_CAST(epc_page); 130 131 encl->secs_child_cnt++; 132 sgx_mark_page_reclaimable(entry->epc_page); 133 134 return entry; 135 } 136 137 static vm_fault_t sgx_vma_fault(struct vm_fault *vmf) 138 { 139 unsigned long addr = (unsigned long)vmf->address; 140 struct vm_area_struct *vma = vmf->vma; 141 struct sgx_encl_page *entry; 142 unsigned long phys_addr; 143 struct sgx_encl *encl; 144 vm_fault_t ret; 145 146 encl = vma->vm_private_data; 147 148 /* 149 * It's very unlikely but possible that allocating memory for the 150 * mm_list entry of a forked process failed in sgx_vma_open(). When 151 * this happens, vm_private_data is set to NULL. 152 */ 153 if (unlikely(!encl)) 154 return VM_FAULT_SIGBUS; 155 156 mutex_lock(&encl->lock); 157 158 entry = sgx_encl_load_page(encl, addr, vma->vm_flags); 159 if (IS_ERR(entry)) { 160 mutex_unlock(&encl->lock); 161 162 if (PTR_ERR(entry) == -EBUSY) 163 return VM_FAULT_NOPAGE; 164 165 return VM_FAULT_SIGBUS; 166 } 167 168 phys_addr = sgx_get_epc_phys_addr(entry->epc_page); 169 170 ret = vmf_insert_pfn(vma, addr, PFN_DOWN(phys_addr)); 171 if (ret != VM_FAULT_NOPAGE) { 172 mutex_unlock(&encl->lock); 173 174 return VM_FAULT_SIGBUS; 175 } 176 177 sgx_encl_test_and_clear_young(vma->vm_mm, entry); 178 mutex_unlock(&encl->lock); 179 180 return VM_FAULT_NOPAGE; 181 } 182 183 static void sgx_vma_open(struct vm_area_struct *vma) 184 { 185 struct sgx_encl *encl = vma->vm_private_data; 186 187 /* 188 * It's possible but unlikely that vm_private_data is NULL. This can 189 * happen in a grandchild of a process, when sgx_encl_mm_add() had 190 * failed to allocate memory in this callback. 191 */ 192 if (unlikely(!encl)) 193 return; 194 195 if (sgx_encl_mm_add(encl, vma->vm_mm)) 196 vma->vm_private_data = NULL; 197 } 198 199 200 /** 201 * sgx_encl_may_map() - Check if a requested VMA mapping is allowed 202 * @encl: an enclave pointer 203 * @start: lower bound of the address range, inclusive 204 * @end: upper bound of the address range, exclusive 205 * @vm_flags: VMA flags 206 * 207 * Iterate through the enclave pages contained within [@start, @end) to verify 208 * that the permissions requested by a subset of {VM_READ, VM_WRITE, VM_EXEC} 209 * do not contain any permissions that are not contained in the build time 210 * permissions of any of the enclave pages within the given address range. 211 * 212 * An enclave creator must declare the strongest permissions that will be 213 * needed for each enclave page. This ensures that mappings have the identical 214 * or weaker permissions than the earlier declared permissions. 215 * 216 * Return: 0 on success, -EACCES otherwise 217 */ 218 int sgx_encl_may_map(struct sgx_encl *encl, unsigned long start, 219 unsigned long end, unsigned long vm_flags) 220 { 221 unsigned long vm_prot_bits = vm_flags & (VM_READ | VM_WRITE | VM_EXEC); 222 struct sgx_encl_page *page; 223 unsigned long count = 0; 224 int ret = 0; 225 226 XA_STATE(xas, &encl->page_array, PFN_DOWN(start)); 227 228 /* 229 * Disallow READ_IMPLIES_EXEC tasks as their VMA permissions might 230 * conflict with the enclave page permissions. 231 */ 232 if (current->personality & READ_IMPLIES_EXEC) 233 return -EACCES; 234 235 mutex_lock(&encl->lock); 236 xas_lock(&xas); 237 xas_for_each(&xas, page, PFN_DOWN(end - 1)) { 238 if (~page->vm_max_prot_bits & vm_prot_bits) { 239 ret = -EACCES; 240 break; 241 } 242 243 /* Reschedule on every XA_CHECK_SCHED iteration. */ 244 if (!(++count % XA_CHECK_SCHED)) { 245 xas_pause(&xas); 246 xas_unlock(&xas); 247 mutex_unlock(&encl->lock); 248 249 cond_resched(); 250 251 mutex_lock(&encl->lock); 252 xas_lock(&xas); 253 } 254 } 255 xas_unlock(&xas); 256 mutex_unlock(&encl->lock); 257 258 return ret; 259 } 260 261 static int sgx_vma_mprotect(struct vm_area_struct *vma, unsigned long start, 262 unsigned long end, unsigned long newflags) 263 { 264 return sgx_encl_may_map(vma->vm_private_data, start, end, newflags); 265 } 266 267 static int sgx_encl_debug_read(struct sgx_encl *encl, struct sgx_encl_page *page, 268 unsigned long addr, void *data) 269 { 270 unsigned long offset = addr & ~PAGE_MASK; 271 int ret; 272 273 274 ret = __edbgrd(sgx_get_epc_virt_addr(page->epc_page) + offset, data); 275 if (ret) 276 return -EIO; 277 278 return 0; 279 } 280 281 static int sgx_encl_debug_write(struct sgx_encl *encl, struct sgx_encl_page *page, 282 unsigned long addr, void *data) 283 { 284 unsigned long offset = addr & ~PAGE_MASK; 285 int ret; 286 287 ret = __edbgwr(sgx_get_epc_virt_addr(page->epc_page) + offset, data); 288 if (ret) 289 return -EIO; 290 291 return 0; 292 } 293 294 /* 295 * Load an enclave page to EPC if required, and take encl->lock. 296 */ 297 static struct sgx_encl_page *sgx_encl_reserve_page(struct sgx_encl *encl, 298 unsigned long addr, 299 unsigned long vm_flags) 300 { 301 struct sgx_encl_page *entry; 302 303 for ( ; ; ) { 304 mutex_lock(&encl->lock); 305 306 entry = sgx_encl_load_page(encl, addr, vm_flags); 307 if (PTR_ERR(entry) != -EBUSY) 308 break; 309 310 mutex_unlock(&encl->lock); 311 } 312 313 if (IS_ERR(entry)) 314 mutex_unlock(&encl->lock); 315 316 return entry; 317 } 318 319 static int sgx_vma_access(struct vm_area_struct *vma, unsigned long addr, 320 void *buf, int len, int write) 321 { 322 struct sgx_encl *encl = vma->vm_private_data; 323 struct sgx_encl_page *entry = NULL; 324 char data[sizeof(unsigned long)]; 325 unsigned long align; 326 int offset; 327 int cnt; 328 int ret = 0; 329 int i; 330 331 /* 332 * If process was forked, VMA is still there but vm_private_data is set 333 * to NULL. 334 */ 335 if (!encl) 336 return -EFAULT; 337 338 if (!test_bit(SGX_ENCL_DEBUG, &encl->flags)) 339 return -EFAULT; 340 341 for (i = 0; i < len; i += cnt) { 342 entry = sgx_encl_reserve_page(encl, (addr + i) & PAGE_MASK, 343 vma->vm_flags); 344 if (IS_ERR(entry)) { 345 ret = PTR_ERR(entry); 346 break; 347 } 348 349 align = ALIGN_DOWN(addr + i, sizeof(unsigned long)); 350 offset = (addr + i) & (sizeof(unsigned long) - 1); 351 cnt = sizeof(unsigned long) - offset; 352 cnt = min(cnt, len - i); 353 354 ret = sgx_encl_debug_read(encl, entry, align, data); 355 if (ret) 356 goto out; 357 358 if (write) { 359 memcpy(data + offset, buf + i, cnt); 360 ret = sgx_encl_debug_write(encl, entry, align, data); 361 if (ret) 362 goto out; 363 } else { 364 memcpy(buf + i, data + offset, cnt); 365 } 366 367 out: 368 mutex_unlock(&encl->lock); 369 370 if (ret) 371 break; 372 } 373 374 return ret < 0 ? ret : i; 375 } 376 377 const struct vm_operations_struct sgx_vm_ops = { 378 .fault = sgx_vma_fault, 379 .mprotect = sgx_vma_mprotect, 380 .open = sgx_vma_open, 381 .access = sgx_vma_access, 382 }; 383 384 /** 385 * sgx_encl_release - Destroy an enclave instance 386 * @ref: address of a kref inside &sgx_encl 387 * 388 * Used together with kref_put(). Frees all the resources associated with the 389 * enclave and the instance itself. 390 */ 391 void sgx_encl_release(struct kref *ref) 392 { 393 struct sgx_encl *encl = container_of(ref, struct sgx_encl, refcount); 394 struct sgx_va_page *va_page; 395 struct sgx_encl_page *entry; 396 unsigned long index; 397 398 xa_for_each(&encl->page_array, index, entry) { 399 if (entry->epc_page) { 400 /* 401 * The page and its radix tree entry cannot be freed 402 * if the page is being held by the reclaimer. 403 */ 404 if (sgx_unmark_page_reclaimable(entry->epc_page)) 405 continue; 406 407 sgx_encl_free_epc_page(entry->epc_page); 408 encl->secs_child_cnt--; 409 entry->epc_page = NULL; 410 } 411 412 kfree(entry); 413 /* Invoke scheduler to prevent soft lockups. */ 414 cond_resched(); 415 } 416 417 xa_destroy(&encl->page_array); 418 419 if (!encl->secs_child_cnt && encl->secs.epc_page) { 420 sgx_encl_free_epc_page(encl->secs.epc_page); 421 encl->secs.epc_page = NULL; 422 } 423 424 while (!list_empty(&encl->va_pages)) { 425 va_page = list_first_entry(&encl->va_pages, struct sgx_va_page, 426 list); 427 list_del(&va_page->list); 428 sgx_encl_free_epc_page(va_page->epc_page); 429 kfree(va_page); 430 } 431 432 if (encl->backing) 433 fput(encl->backing); 434 435 cleanup_srcu_struct(&encl->srcu); 436 437 WARN_ON_ONCE(!list_empty(&encl->mm_list)); 438 439 /* Detect EPC page leak's. */ 440 WARN_ON_ONCE(encl->secs_child_cnt); 441 WARN_ON_ONCE(encl->secs.epc_page); 442 443 kfree(encl); 444 } 445 446 /* 447 * 'mm' is exiting and no longer needs mmu notifications. 448 */ 449 static void sgx_mmu_notifier_release(struct mmu_notifier *mn, 450 struct mm_struct *mm) 451 { 452 struct sgx_encl_mm *encl_mm = container_of(mn, struct sgx_encl_mm, mmu_notifier); 453 struct sgx_encl_mm *tmp = NULL; 454 455 /* 456 * The enclave itself can remove encl_mm. Note, objects can't be moved 457 * off an RCU protected list, but deletion is ok. 458 */ 459 spin_lock(&encl_mm->encl->mm_lock); 460 list_for_each_entry(tmp, &encl_mm->encl->mm_list, list) { 461 if (tmp == encl_mm) { 462 list_del_rcu(&encl_mm->list); 463 break; 464 } 465 } 466 spin_unlock(&encl_mm->encl->mm_lock); 467 468 if (tmp == encl_mm) { 469 synchronize_srcu(&encl_mm->encl->srcu); 470 mmu_notifier_put(mn); 471 } 472 } 473 474 static void sgx_mmu_notifier_free(struct mmu_notifier *mn) 475 { 476 struct sgx_encl_mm *encl_mm = container_of(mn, struct sgx_encl_mm, mmu_notifier); 477 478 /* 'encl_mm' is going away, put encl_mm->encl reference: */ 479 kref_put(&encl_mm->encl->refcount, sgx_encl_release); 480 481 kfree(encl_mm); 482 } 483 484 static const struct mmu_notifier_ops sgx_mmu_notifier_ops = { 485 .release = sgx_mmu_notifier_release, 486 .free_notifier = sgx_mmu_notifier_free, 487 }; 488 489 static struct sgx_encl_mm *sgx_encl_find_mm(struct sgx_encl *encl, 490 struct mm_struct *mm) 491 { 492 struct sgx_encl_mm *encl_mm = NULL; 493 struct sgx_encl_mm *tmp; 494 int idx; 495 496 idx = srcu_read_lock(&encl->srcu); 497 498 list_for_each_entry_rcu(tmp, &encl->mm_list, list) { 499 if (tmp->mm == mm) { 500 encl_mm = tmp; 501 break; 502 } 503 } 504 505 srcu_read_unlock(&encl->srcu, idx); 506 507 return encl_mm; 508 } 509 510 int sgx_encl_mm_add(struct sgx_encl *encl, struct mm_struct *mm) 511 { 512 struct sgx_encl_mm *encl_mm; 513 int ret; 514 515 /* 516 * Even though a single enclave may be mapped into an mm more than once, 517 * each 'mm' only appears once on encl->mm_list. This is guaranteed by 518 * holding the mm's mmap lock for write before an mm can be added or 519 * remove to an encl->mm_list. 520 */ 521 mmap_assert_write_locked(mm); 522 523 /* 524 * It's possible that an entry already exists in the mm_list, because it 525 * is removed only on VFS release or process exit. 526 */ 527 if (sgx_encl_find_mm(encl, mm)) 528 return 0; 529 530 encl_mm = kzalloc(sizeof(*encl_mm), GFP_KERNEL); 531 if (!encl_mm) 532 return -ENOMEM; 533 534 /* Grab a refcount for the encl_mm->encl reference: */ 535 kref_get(&encl->refcount); 536 encl_mm->encl = encl; 537 encl_mm->mm = mm; 538 encl_mm->mmu_notifier.ops = &sgx_mmu_notifier_ops; 539 540 ret = __mmu_notifier_register(&encl_mm->mmu_notifier, mm); 541 if (ret) { 542 kfree(encl_mm); 543 return ret; 544 } 545 546 spin_lock(&encl->mm_lock); 547 list_add_rcu(&encl_mm->list, &encl->mm_list); 548 /* Pairs with smp_rmb() in sgx_reclaimer_block(). */ 549 smp_wmb(); 550 encl->mm_list_version++; 551 spin_unlock(&encl->mm_lock); 552 553 return 0; 554 } 555 556 static struct page *sgx_encl_get_backing_page(struct sgx_encl *encl, 557 pgoff_t index) 558 { 559 struct inode *inode = encl->backing->f_path.dentry->d_inode; 560 struct address_space *mapping = inode->i_mapping; 561 gfp_t gfpmask = mapping_gfp_mask(mapping); 562 563 return shmem_read_mapping_page_gfp(mapping, index, gfpmask); 564 } 565 566 /** 567 * sgx_encl_get_backing() - Pin the backing storage 568 * @encl: an enclave pointer 569 * @page_index: enclave page index 570 * @backing: data for accessing backing storage for the page 571 * 572 * Pin the backing storage pages for storing the encrypted contents and Paging 573 * Crypto MetaData (PCMD) of an enclave page. 574 * 575 * Return: 576 * 0 on success, 577 * -errno otherwise. 578 */ 579 int sgx_encl_get_backing(struct sgx_encl *encl, unsigned long page_index, 580 struct sgx_backing *backing) 581 { 582 pgoff_t pcmd_index = PFN_DOWN(encl->size) + 1 + (page_index >> 5); 583 struct page *contents; 584 struct page *pcmd; 585 586 contents = sgx_encl_get_backing_page(encl, page_index); 587 if (IS_ERR(contents)) 588 return PTR_ERR(contents); 589 590 pcmd = sgx_encl_get_backing_page(encl, pcmd_index); 591 if (IS_ERR(pcmd)) { 592 put_page(contents); 593 return PTR_ERR(pcmd); 594 } 595 596 backing->page_index = page_index; 597 backing->contents = contents; 598 backing->pcmd = pcmd; 599 backing->pcmd_offset = 600 (page_index & (PAGE_SIZE / sizeof(struct sgx_pcmd) - 1)) * 601 sizeof(struct sgx_pcmd); 602 603 return 0; 604 } 605 606 /** 607 * sgx_encl_put_backing() - Unpin the backing storage 608 * @backing: data for accessing backing storage for the page 609 * @do_write: mark pages dirty 610 */ 611 void sgx_encl_put_backing(struct sgx_backing *backing, bool do_write) 612 { 613 if (do_write) { 614 set_page_dirty(backing->pcmd); 615 set_page_dirty(backing->contents); 616 } 617 618 put_page(backing->pcmd); 619 put_page(backing->contents); 620 } 621 622 static int sgx_encl_test_and_clear_young_cb(pte_t *ptep, unsigned long addr, 623 void *data) 624 { 625 pte_t pte; 626 int ret; 627 628 ret = pte_young(*ptep); 629 if (ret) { 630 pte = pte_mkold(*ptep); 631 set_pte_at((struct mm_struct *)data, addr, ptep, pte); 632 } 633 634 return ret; 635 } 636 637 /** 638 * sgx_encl_test_and_clear_young() - Test and reset the accessed bit 639 * @mm: mm_struct that is checked 640 * @page: enclave page to be tested for recent access 641 * 642 * Checks the Access (A) bit from the PTE corresponding to the enclave page and 643 * clears it. 644 * 645 * Return: 1 if the page has been recently accessed and 0 if not. 646 */ 647 int sgx_encl_test_and_clear_young(struct mm_struct *mm, 648 struct sgx_encl_page *page) 649 { 650 unsigned long addr = page->desc & PAGE_MASK; 651 struct sgx_encl *encl = page->encl; 652 struct vm_area_struct *vma; 653 int ret; 654 655 ret = sgx_encl_find(mm, addr, &vma); 656 if (ret) 657 return 0; 658 659 if (encl != vma->vm_private_data) 660 return 0; 661 662 ret = apply_to_page_range(vma->vm_mm, addr, PAGE_SIZE, 663 sgx_encl_test_and_clear_young_cb, vma->vm_mm); 664 if (ret < 0) 665 return 0; 666 667 return ret; 668 } 669 670 /** 671 * sgx_alloc_va_page() - Allocate a Version Array (VA) page 672 * 673 * Allocate a free EPC page and convert it to a Version Array (VA) page. 674 * 675 * Return: 676 * a VA page, 677 * -errno otherwise 678 */ 679 struct sgx_epc_page *sgx_alloc_va_page(void) 680 { 681 struct sgx_epc_page *epc_page; 682 int ret; 683 684 epc_page = sgx_alloc_epc_page(NULL, true); 685 if (IS_ERR(epc_page)) 686 return ERR_CAST(epc_page); 687 688 ret = __epa(sgx_get_epc_virt_addr(epc_page)); 689 if (ret) { 690 WARN_ONCE(1, "EPA returned %d (0x%x)", ret, ret); 691 sgx_encl_free_epc_page(epc_page); 692 return ERR_PTR(-EFAULT); 693 } 694 695 return epc_page; 696 } 697 698 /** 699 * sgx_alloc_va_slot - allocate a VA slot 700 * @va_page: a &struct sgx_va_page instance 701 * 702 * Allocates a slot from a &struct sgx_va_page instance. 703 * 704 * Return: offset of the slot inside the VA page 705 */ 706 unsigned int sgx_alloc_va_slot(struct sgx_va_page *va_page) 707 { 708 int slot = find_first_zero_bit(va_page->slots, SGX_VA_SLOT_COUNT); 709 710 if (slot < SGX_VA_SLOT_COUNT) 711 set_bit(slot, va_page->slots); 712 713 return slot << 3; 714 } 715 716 /** 717 * sgx_free_va_slot - free a VA slot 718 * @va_page: a &struct sgx_va_page instance 719 * @offset: offset of the slot inside the VA page 720 * 721 * Frees a slot from a &struct sgx_va_page instance. 722 */ 723 void sgx_free_va_slot(struct sgx_va_page *va_page, unsigned int offset) 724 { 725 clear_bit(offset >> 3, va_page->slots); 726 } 727 728 /** 729 * sgx_va_page_full - is the VA page full? 730 * @va_page: a &struct sgx_va_page instance 731 * 732 * Return: true if all slots have been taken 733 */ 734 bool sgx_va_page_full(struct sgx_va_page *va_page) 735 { 736 int slot = find_first_zero_bit(va_page->slots, SGX_VA_SLOT_COUNT); 737 738 return slot == SGX_VA_SLOT_COUNT; 739 } 740 741 /** 742 * sgx_encl_free_epc_page - free an EPC page assigned to an enclave 743 * @page: EPC page to be freed 744 * 745 * Free an EPC page assigned to an enclave. It does EREMOVE for the page, and 746 * only upon success, it puts the page back to free page list. Otherwise, it 747 * gives a WARNING to indicate page is leaked. 748 */ 749 void sgx_encl_free_epc_page(struct sgx_epc_page *page) 750 { 751 int ret; 752 753 WARN_ON_ONCE(page->flags & SGX_EPC_PAGE_RECLAIMER_TRACKED); 754 755 ret = __eremove(sgx_get_epc_virt_addr(page)); 756 if (WARN_ONCE(ret, EREMOVE_ERROR_MESSAGE, ret, ret)) 757 return; 758 759 sgx_free_epc_page(page); 760 } 761