1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2016-20 Intel Corporation. */ 3 4 #include <linux/lockdep.h> 5 #include <linux/mm.h> 6 #include <linux/mman.h> 7 #include <linux/shmem_fs.h> 8 #include <linux/suspend.h> 9 #include <linux/sched/mm.h> 10 #include "arch.h" 11 #include "encl.h" 12 #include "encls.h" 13 #include "sgx.h" 14 15 /* 16 * ELDU: Load an EPC page as unblocked. For more info, see "OS Management of EPC 17 * Pages" in the SDM. 18 */ 19 static int __sgx_encl_eldu(struct sgx_encl_page *encl_page, 20 struct sgx_epc_page *epc_page, 21 struct sgx_epc_page *secs_page) 22 { 23 unsigned long va_offset = encl_page->desc & SGX_ENCL_PAGE_VA_OFFSET_MASK; 24 struct sgx_encl *encl = encl_page->encl; 25 struct sgx_pageinfo pginfo; 26 struct sgx_backing b; 27 pgoff_t page_index; 28 int ret; 29 30 if (secs_page) 31 page_index = PFN_DOWN(encl_page->desc - encl_page->encl->base); 32 else 33 page_index = PFN_DOWN(encl->size); 34 35 ret = sgx_encl_get_backing(encl, page_index, &b); 36 if (ret) 37 return ret; 38 39 pginfo.addr = encl_page->desc & PAGE_MASK; 40 pginfo.contents = (unsigned long)kmap_atomic(b.contents); 41 pginfo.metadata = (unsigned long)kmap_atomic(b.pcmd) + 42 b.pcmd_offset; 43 44 if (secs_page) 45 pginfo.secs = (u64)sgx_get_epc_virt_addr(secs_page); 46 else 47 pginfo.secs = 0; 48 49 ret = __eldu(&pginfo, sgx_get_epc_virt_addr(epc_page), 50 sgx_get_epc_virt_addr(encl_page->va_page->epc_page) + va_offset); 51 if (ret) { 52 if (encls_failed(ret)) 53 ENCLS_WARN(ret, "ELDU"); 54 55 ret = -EFAULT; 56 } 57 58 kunmap_atomic((void *)(unsigned long)(pginfo.metadata - b.pcmd_offset)); 59 kunmap_atomic((void *)(unsigned long)pginfo.contents); 60 61 sgx_encl_put_backing(&b, false); 62 63 return ret; 64 } 65 66 static struct sgx_epc_page *sgx_encl_eldu(struct sgx_encl_page *encl_page, 67 struct sgx_epc_page *secs_page) 68 { 69 70 unsigned long va_offset = encl_page->desc & SGX_ENCL_PAGE_VA_OFFSET_MASK; 71 struct sgx_encl *encl = encl_page->encl; 72 struct sgx_epc_page *epc_page; 73 int ret; 74 75 epc_page = sgx_alloc_epc_page(encl_page, false); 76 if (IS_ERR(epc_page)) 77 return epc_page; 78 79 ret = __sgx_encl_eldu(encl_page, epc_page, secs_page); 80 if (ret) { 81 sgx_free_epc_page(epc_page); 82 return ERR_PTR(ret); 83 } 84 85 sgx_free_va_slot(encl_page->va_page, va_offset); 86 list_move(&encl_page->va_page->list, &encl->va_pages); 87 encl_page->desc &= ~SGX_ENCL_PAGE_VA_OFFSET_MASK; 88 encl_page->epc_page = epc_page; 89 90 return epc_page; 91 } 92 93 static struct sgx_encl_page *sgx_encl_load_page(struct sgx_encl *encl, 94 unsigned long addr, 95 unsigned long vm_flags) 96 { 97 unsigned long vm_prot_bits = vm_flags & (VM_READ | VM_WRITE | VM_EXEC); 98 struct sgx_epc_page *epc_page; 99 struct sgx_encl_page *entry; 100 101 entry = xa_load(&encl->page_array, PFN_DOWN(addr)); 102 if (!entry) 103 return ERR_PTR(-EFAULT); 104 105 /* 106 * Verify that the faulted page has equal or higher build time 107 * permissions than the VMA permissions (i.e. the subset of {VM_READ, 108 * VM_WRITE, VM_EXECUTE} in vma->vm_flags). 109 */ 110 if ((entry->vm_max_prot_bits & vm_prot_bits) != vm_prot_bits) 111 return ERR_PTR(-EFAULT); 112 113 /* Entry successfully located. */ 114 if (entry->epc_page) { 115 if (entry->desc & SGX_ENCL_PAGE_BEING_RECLAIMED) 116 return ERR_PTR(-EBUSY); 117 118 return entry; 119 } 120 121 if (!(encl->secs.epc_page)) { 122 epc_page = sgx_encl_eldu(&encl->secs, NULL); 123 if (IS_ERR(epc_page)) 124 return ERR_CAST(epc_page); 125 } 126 127 epc_page = sgx_encl_eldu(entry, encl->secs.epc_page); 128 if (IS_ERR(epc_page)) 129 return ERR_CAST(epc_page); 130 131 encl->secs_child_cnt++; 132 sgx_mark_page_reclaimable(entry->epc_page); 133 134 return entry; 135 } 136 137 static vm_fault_t sgx_vma_fault(struct vm_fault *vmf) 138 { 139 unsigned long addr = (unsigned long)vmf->address; 140 struct vm_area_struct *vma = vmf->vma; 141 struct sgx_encl_page *entry; 142 unsigned long phys_addr; 143 struct sgx_encl *encl; 144 unsigned long pfn; 145 vm_fault_t ret; 146 147 encl = vma->vm_private_data; 148 149 /* 150 * It's very unlikely but possible that allocating memory for the 151 * mm_list entry of a forked process failed in sgx_vma_open(). When 152 * this happens, vm_private_data is set to NULL. 153 */ 154 if (unlikely(!encl)) 155 return VM_FAULT_SIGBUS; 156 157 mutex_lock(&encl->lock); 158 159 entry = sgx_encl_load_page(encl, addr, vma->vm_flags); 160 if (IS_ERR(entry)) { 161 mutex_unlock(&encl->lock); 162 163 if (PTR_ERR(entry) == -EBUSY) 164 return VM_FAULT_NOPAGE; 165 166 return VM_FAULT_SIGBUS; 167 } 168 169 phys_addr = sgx_get_epc_phys_addr(entry->epc_page); 170 171 /* Check if another thread got here first to insert the PTE. */ 172 if (!follow_pfn(vma, addr, &pfn)) { 173 mutex_unlock(&encl->lock); 174 175 return VM_FAULT_NOPAGE; 176 } 177 178 ret = vmf_insert_pfn(vma, addr, PFN_DOWN(phys_addr)); 179 if (ret != VM_FAULT_NOPAGE) { 180 mutex_unlock(&encl->lock); 181 182 return VM_FAULT_SIGBUS; 183 } 184 185 sgx_encl_test_and_clear_young(vma->vm_mm, entry); 186 mutex_unlock(&encl->lock); 187 188 return VM_FAULT_NOPAGE; 189 } 190 191 static void sgx_vma_open(struct vm_area_struct *vma) 192 { 193 struct sgx_encl *encl = vma->vm_private_data; 194 195 /* 196 * It's possible but unlikely that vm_private_data is NULL. This can 197 * happen in a grandchild of a process, when sgx_encl_mm_add() had 198 * failed to allocate memory in this callback. 199 */ 200 if (unlikely(!encl)) 201 return; 202 203 if (sgx_encl_mm_add(encl, vma->vm_mm)) 204 vma->vm_private_data = NULL; 205 } 206 207 208 /** 209 * sgx_encl_may_map() - Check if a requested VMA mapping is allowed 210 * @encl: an enclave pointer 211 * @start: lower bound of the address range, inclusive 212 * @end: upper bound of the address range, exclusive 213 * @vm_flags: VMA flags 214 * 215 * Iterate through the enclave pages contained within [@start, @end) to verify 216 * that the permissions requested by a subset of {VM_READ, VM_WRITE, VM_EXEC} 217 * do not contain any permissions that are not contained in the build time 218 * permissions of any of the enclave pages within the given address range. 219 * 220 * An enclave creator must declare the strongest permissions that will be 221 * needed for each enclave page. This ensures that mappings have the identical 222 * or weaker permissions than the earlier declared permissions. 223 * 224 * Return: 0 on success, -EACCES otherwise 225 */ 226 int sgx_encl_may_map(struct sgx_encl *encl, unsigned long start, 227 unsigned long end, unsigned long vm_flags) 228 { 229 unsigned long vm_prot_bits = vm_flags & (VM_READ | VM_WRITE | VM_EXEC); 230 struct sgx_encl_page *page; 231 unsigned long count = 0; 232 int ret = 0; 233 234 XA_STATE(xas, &encl->page_array, PFN_DOWN(start)); 235 236 /* 237 * Disallow READ_IMPLIES_EXEC tasks as their VMA permissions might 238 * conflict with the enclave page permissions. 239 */ 240 if (current->personality & READ_IMPLIES_EXEC) 241 return -EACCES; 242 243 mutex_lock(&encl->lock); 244 xas_lock(&xas); 245 xas_for_each(&xas, page, PFN_DOWN(end - 1)) { 246 if (~page->vm_max_prot_bits & vm_prot_bits) { 247 ret = -EACCES; 248 break; 249 } 250 251 /* Reschedule on every XA_CHECK_SCHED iteration. */ 252 if (!(++count % XA_CHECK_SCHED)) { 253 xas_pause(&xas); 254 xas_unlock(&xas); 255 mutex_unlock(&encl->lock); 256 257 cond_resched(); 258 259 mutex_lock(&encl->lock); 260 xas_lock(&xas); 261 } 262 } 263 xas_unlock(&xas); 264 mutex_unlock(&encl->lock); 265 266 return ret; 267 } 268 269 static int sgx_vma_mprotect(struct vm_area_struct *vma, unsigned long start, 270 unsigned long end, unsigned long newflags) 271 { 272 return sgx_encl_may_map(vma->vm_private_data, start, end, newflags); 273 } 274 275 static int sgx_encl_debug_read(struct sgx_encl *encl, struct sgx_encl_page *page, 276 unsigned long addr, void *data) 277 { 278 unsigned long offset = addr & ~PAGE_MASK; 279 int ret; 280 281 282 ret = __edbgrd(sgx_get_epc_virt_addr(page->epc_page) + offset, data); 283 if (ret) 284 return -EIO; 285 286 return 0; 287 } 288 289 static int sgx_encl_debug_write(struct sgx_encl *encl, struct sgx_encl_page *page, 290 unsigned long addr, void *data) 291 { 292 unsigned long offset = addr & ~PAGE_MASK; 293 int ret; 294 295 ret = __edbgwr(sgx_get_epc_virt_addr(page->epc_page) + offset, data); 296 if (ret) 297 return -EIO; 298 299 return 0; 300 } 301 302 /* 303 * Load an enclave page to EPC if required, and take encl->lock. 304 */ 305 static struct sgx_encl_page *sgx_encl_reserve_page(struct sgx_encl *encl, 306 unsigned long addr, 307 unsigned long vm_flags) 308 { 309 struct sgx_encl_page *entry; 310 311 for ( ; ; ) { 312 mutex_lock(&encl->lock); 313 314 entry = sgx_encl_load_page(encl, addr, vm_flags); 315 if (PTR_ERR(entry) != -EBUSY) 316 break; 317 318 mutex_unlock(&encl->lock); 319 } 320 321 if (IS_ERR(entry)) 322 mutex_unlock(&encl->lock); 323 324 return entry; 325 } 326 327 static int sgx_vma_access(struct vm_area_struct *vma, unsigned long addr, 328 void *buf, int len, int write) 329 { 330 struct sgx_encl *encl = vma->vm_private_data; 331 struct sgx_encl_page *entry = NULL; 332 char data[sizeof(unsigned long)]; 333 unsigned long align; 334 int offset; 335 int cnt; 336 int ret = 0; 337 int i; 338 339 /* 340 * If process was forked, VMA is still there but vm_private_data is set 341 * to NULL. 342 */ 343 if (!encl) 344 return -EFAULT; 345 346 if (!test_bit(SGX_ENCL_DEBUG, &encl->flags)) 347 return -EFAULT; 348 349 for (i = 0; i < len; i += cnt) { 350 entry = sgx_encl_reserve_page(encl, (addr + i) & PAGE_MASK, 351 vma->vm_flags); 352 if (IS_ERR(entry)) { 353 ret = PTR_ERR(entry); 354 break; 355 } 356 357 align = ALIGN_DOWN(addr + i, sizeof(unsigned long)); 358 offset = (addr + i) & (sizeof(unsigned long) - 1); 359 cnt = sizeof(unsigned long) - offset; 360 cnt = min(cnt, len - i); 361 362 ret = sgx_encl_debug_read(encl, entry, align, data); 363 if (ret) 364 goto out; 365 366 if (write) { 367 memcpy(data + offset, buf + i, cnt); 368 ret = sgx_encl_debug_write(encl, entry, align, data); 369 if (ret) 370 goto out; 371 } else { 372 memcpy(buf + i, data + offset, cnt); 373 } 374 375 out: 376 mutex_unlock(&encl->lock); 377 378 if (ret) 379 break; 380 } 381 382 return ret < 0 ? ret : i; 383 } 384 385 const struct vm_operations_struct sgx_vm_ops = { 386 .fault = sgx_vma_fault, 387 .mprotect = sgx_vma_mprotect, 388 .open = sgx_vma_open, 389 .access = sgx_vma_access, 390 }; 391 392 /** 393 * sgx_encl_release - Destroy an enclave instance 394 * @kref: address of a kref inside &sgx_encl 395 * 396 * Used together with kref_put(). Frees all the resources associated with the 397 * enclave and the instance itself. 398 */ 399 void sgx_encl_release(struct kref *ref) 400 { 401 struct sgx_encl *encl = container_of(ref, struct sgx_encl, refcount); 402 struct sgx_va_page *va_page; 403 struct sgx_encl_page *entry; 404 unsigned long index; 405 406 xa_for_each(&encl->page_array, index, entry) { 407 if (entry->epc_page) { 408 /* 409 * The page and its radix tree entry cannot be freed 410 * if the page is being held by the reclaimer. 411 */ 412 if (sgx_unmark_page_reclaimable(entry->epc_page)) 413 continue; 414 415 sgx_free_epc_page(entry->epc_page); 416 encl->secs_child_cnt--; 417 entry->epc_page = NULL; 418 } 419 420 kfree(entry); 421 } 422 423 xa_destroy(&encl->page_array); 424 425 if (!encl->secs_child_cnt && encl->secs.epc_page) { 426 sgx_free_epc_page(encl->secs.epc_page); 427 encl->secs.epc_page = NULL; 428 } 429 430 while (!list_empty(&encl->va_pages)) { 431 va_page = list_first_entry(&encl->va_pages, struct sgx_va_page, 432 list); 433 list_del(&va_page->list); 434 sgx_free_epc_page(va_page->epc_page); 435 kfree(va_page); 436 } 437 438 if (encl->backing) 439 fput(encl->backing); 440 441 cleanup_srcu_struct(&encl->srcu); 442 443 WARN_ON_ONCE(!list_empty(&encl->mm_list)); 444 445 /* Detect EPC page leak's. */ 446 WARN_ON_ONCE(encl->secs_child_cnt); 447 WARN_ON_ONCE(encl->secs.epc_page); 448 449 kfree(encl); 450 } 451 452 /* 453 * 'mm' is exiting and no longer needs mmu notifications. 454 */ 455 static void sgx_mmu_notifier_release(struct mmu_notifier *mn, 456 struct mm_struct *mm) 457 { 458 struct sgx_encl_mm *encl_mm = container_of(mn, struct sgx_encl_mm, mmu_notifier); 459 struct sgx_encl_mm *tmp = NULL; 460 461 /* 462 * The enclave itself can remove encl_mm. Note, objects can't be moved 463 * off an RCU protected list, but deletion is ok. 464 */ 465 spin_lock(&encl_mm->encl->mm_lock); 466 list_for_each_entry(tmp, &encl_mm->encl->mm_list, list) { 467 if (tmp == encl_mm) { 468 list_del_rcu(&encl_mm->list); 469 break; 470 } 471 } 472 spin_unlock(&encl_mm->encl->mm_lock); 473 474 if (tmp == encl_mm) { 475 synchronize_srcu(&encl_mm->encl->srcu); 476 mmu_notifier_put(mn); 477 } 478 } 479 480 static void sgx_mmu_notifier_free(struct mmu_notifier *mn) 481 { 482 struct sgx_encl_mm *encl_mm = container_of(mn, struct sgx_encl_mm, mmu_notifier); 483 484 /* 'encl_mm' is going away, put encl_mm->encl reference: */ 485 kref_put(&encl_mm->encl->refcount, sgx_encl_release); 486 487 kfree(encl_mm); 488 } 489 490 static const struct mmu_notifier_ops sgx_mmu_notifier_ops = { 491 .release = sgx_mmu_notifier_release, 492 .free_notifier = sgx_mmu_notifier_free, 493 }; 494 495 static struct sgx_encl_mm *sgx_encl_find_mm(struct sgx_encl *encl, 496 struct mm_struct *mm) 497 { 498 struct sgx_encl_mm *encl_mm = NULL; 499 struct sgx_encl_mm *tmp; 500 int idx; 501 502 idx = srcu_read_lock(&encl->srcu); 503 504 list_for_each_entry_rcu(tmp, &encl->mm_list, list) { 505 if (tmp->mm == mm) { 506 encl_mm = tmp; 507 break; 508 } 509 } 510 511 srcu_read_unlock(&encl->srcu, idx); 512 513 return encl_mm; 514 } 515 516 int sgx_encl_mm_add(struct sgx_encl *encl, struct mm_struct *mm) 517 { 518 struct sgx_encl_mm *encl_mm; 519 int ret; 520 521 /* 522 * Even though a single enclave may be mapped into an mm more than once, 523 * each 'mm' only appears once on encl->mm_list. This is guaranteed by 524 * holding the mm's mmap lock for write before an mm can be added or 525 * remove to an encl->mm_list. 526 */ 527 mmap_assert_write_locked(mm); 528 529 /* 530 * It's possible that an entry already exists in the mm_list, because it 531 * is removed only on VFS release or process exit. 532 */ 533 if (sgx_encl_find_mm(encl, mm)) 534 return 0; 535 536 encl_mm = kzalloc(sizeof(*encl_mm), GFP_KERNEL); 537 if (!encl_mm) 538 return -ENOMEM; 539 540 /* Grab a refcount for the encl_mm->encl reference: */ 541 kref_get(&encl->refcount); 542 encl_mm->encl = encl; 543 encl_mm->mm = mm; 544 encl_mm->mmu_notifier.ops = &sgx_mmu_notifier_ops; 545 546 ret = __mmu_notifier_register(&encl_mm->mmu_notifier, mm); 547 if (ret) { 548 kfree(encl_mm); 549 return ret; 550 } 551 552 spin_lock(&encl->mm_lock); 553 list_add_rcu(&encl_mm->list, &encl->mm_list); 554 /* Pairs with smp_rmb() in sgx_reclaimer_block(). */ 555 smp_wmb(); 556 encl->mm_list_version++; 557 spin_unlock(&encl->mm_lock); 558 559 return 0; 560 } 561 562 static struct page *sgx_encl_get_backing_page(struct sgx_encl *encl, 563 pgoff_t index) 564 { 565 struct inode *inode = encl->backing->f_path.dentry->d_inode; 566 struct address_space *mapping = inode->i_mapping; 567 gfp_t gfpmask = mapping_gfp_mask(mapping); 568 569 return shmem_read_mapping_page_gfp(mapping, index, gfpmask); 570 } 571 572 /** 573 * sgx_encl_get_backing() - Pin the backing storage 574 * @encl: an enclave pointer 575 * @page_index: enclave page index 576 * @backing: data for accessing backing storage for the page 577 * 578 * Pin the backing storage pages for storing the encrypted contents and Paging 579 * Crypto MetaData (PCMD) of an enclave page. 580 * 581 * Return: 582 * 0 on success, 583 * -errno otherwise. 584 */ 585 int sgx_encl_get_backing(struct sgx_encl *encl, unsigned long page_index, 586 struct sgx_backing *backing) 587 { 588 pgoff_t pcmd_index = PFN_DOWN(encl->size) + 1 + (page_index >> 5); 589 struct page *contents; 590 struct page *pcmd; 591 592 contents = sgx_encl_get_backing_page(encl, page_index); 593 if (IS_ERR(contents)) 594 return PTR_ERR(contents); 595 596 pcmd = sgx_encl_get_backing_page(encl, pcmd_index); 597 if (IS_ERR(pcmd)) { 598 put_page(contents); 599 return PTR_ERR(pcmd); 600 } 601 602 backing->page_index = page_index; 603 backing->contents = contents; 604 backing->pcmd = pcmd; 605 backing->pcmd_offset = 606 (page_index & (PAGE_SIZE / sizeof(struct sgx_pcmd) - 1)) * 607 sizeof(struct sgx_pcmd); 608 609 return 0; 610 } 611 612 /** 613 * sgx_encl_put_backing() - Unpin the backing storage 614 * @backing: data for accessing backing storage for the page 615 * @do_write: mark pages dirty 616 */ 617 void sgx_encl_put_backing(struct sgx_backing *backing, bool do_write) 618 { 619 if (do_write) { 620 set_page_dirty(backing->pcmd); 621 set_page_dirty(backing->contents); 622 } 623 624 put_page(backing->pcmd); 625 put_page(backing->contents); 626 } 627 628 static int sgx_encl_test_and_clear_young_cb(pte_t *ptep, unsigned long addr, 629 void *data) 630 { 631 pte_t pte; 632 int ret; 633 634 ret = pte_young(*ptep); 635 if (ret) { 636 pte = pte_mkold(*ptep); 637 set_pte_at((struct mm_struct *)data, addr, ptep, pte); 638 } 639 640 return ret; 641 } 642 643 /** 644 * sgx_encl_test_and_clear_young() - Test and reset the accessed bit 645 * @mm: mm_struct that is checked 646 * @page: enclave page to be tested for recent access 647 * 648 * Checks the Access (A) bit from the PTE corresponding to the enclave page and 649 * clears it. 650 * 651 * Return: 1 if the page has been recently accessed and 0 if not. 652 */ 653 int sgx_encl_test_and_clear_young(struct mm_struct *mm, 654 struct sgx_encl_page *page) 655 { 656 unsigned long addr = page->desc & PAGE_MASK; 657 struct sgx_encl *encl = page->encl; 658 struct vm_area_struct *vma; 659 int ret; 660 661 ret = sgx_encl_find(mm, addr, &vma); 662 if (ret) 663 return 0; 664 665 if (encl != vma->vm_private_data) 666 return 0; 667 668 ret = apply_to_page_range(vma->vm_mm, addr, PAGE_SIZE, 669 sgx_encl_test_and_clear_young_cb, vma->vm_mm); 670 if (ret < 0) 671 return 0; 672 673 return ret; 674 } 675 676 /** 677 * sgx_alloc_va_page() - Allocate a Version Array (VA) page 678 * 679 * Allocate a free EPC page and convert it to a Version Array (VA) page. 680 * 681 * Return: 682 * a VA page, 683 * -errno otherwise 684 */ 685 struct sgx_epc_page *sgx_alloc_va_page(void) 686 { 687 struct sgx_epc_page *epc_page; 688 int ret; 689 690 epc_page = sgx_alloc_epc_page(NULL, true); 691 if (IS_ERR(epc_page)) 692 return ERR_CAST(epc_page); 693 694 ret = __epa(sgx_get_epc_virt_addr(epc_page)); 695 if (ret) { 696 WARN_ONCE(1, "EPA returned %d (0x%x)", ret, ret); 697 sgx_free_epc_page(epc_page); 698 return ERR_PTR(-EFAULT); 699 } 700 701 return epc_page; 702 } 703 704 /** 705 * sgx_alloc_va_slot - allocate a VA slot 706 * @va_page: a &struct sgx_va_page instance 707 * 708 * Allocates a slot from a &struct sgx_va_page instance. 709 * 710 * Return: offset of the slot inside the VA page 711 */ 712 unsigned int sgx_alloc_va_slot(struct sgx_va_page *va_page) 713 { 714 int slot = find_first_zero_bit(va_page->slots, SGX_VA_SLOT_COUNT); 715 716 if (slot < SGX_VA_SLOT_COUNT) 717 set_bit(slot, va_page->slots); 718 719 return slot << 3; 720 } 721 722 /** 723 * sgx_free_va_slot - free a VA slot 724 * @va_page: a &struct sgx_va_page instance 725 * @offset: offset of the slot inside the VA page 726 * 727 * Frees a slot from a &struct sgx_va_page instance. 728 */ 729 void sgx_free_va_slot(struct sgx_va_page *va_page, unsigned int offset) 730 { 731 clear_bit(offset >> 3, va_page->slots); 732 } 733 734 /** 735 * sgx_va_page_full - is the VA page full? 736 * @va_page: a &struct sgx_va_page instance 737 * 738 * Return: true if all slots have been taken 739 */ 740 bool sgx_va_page_full(struct sgx_va_page *va_page) 741 { 742 int slot = find_first_zero_bit(va_page->slots, SGX_VA_SLOT_COUNT); 743 744 return slot == SGX_VA_SLOT_COUNT; 745 } 746