1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2016-20 Intel Corporation. */ 3 4 #include <linux/file.h> 5 #include <linux/freezer.h> 6 #include <linux/highmem.h> 7 #include <linux/kthread.h> 8 #include <linux/miscdevice.h> 9 #include <linux/node.h> 10 #include <linux/pagemap.h> 11 #include <linux/ratelimit.h> 12 #include <linux/sched/mm.h> 13 #include <linux/sched/signal.h> 14 #include <linux/slab.h> 15 #include <linux/sysfs.h> 16 #include <asm/sgx.h> 17 #include "driver.h" 18 #include "encl.h" 19 #include "encls.h" 20 21 struct sgx_epc_section sgx_epc_sections[SGX_MAX_EPC_SECTIONS]; 22 static int sgx_nr_epc_sections; 23 static struct task_struct *ksgxd_tsk; 24 static DECLARE_WAIT_QUEUE_HEAD(ksgxd_waitq); 25 static DEFINE_XARRAY(sgx_epc_address_space); 26 27 /* 28 * These variables are part of the state of the reclaimer, and must be accessed 29 * with sgx_reclaimer_lock acquired. 30 */ 31 static LIST_HEAD(sgx_active_page_list); 32 static DEFINE_SPINLOCK(sgx_reclaimer_lock); 33 34 static atomic_long_t sgx_nr_free_pages = ATOMIC_LONG_INIT(0); 35 36 /* Nodes with one or more EPC sections. */ 37 static nodemask_t sgx_numa_mask; 38 39 /* 40 * Array with one list_head for each possible NUMA node. Each 41 * list contains all the sgx_epc_section's which are on that 42 * node. 43 */ 44 static struct sgx_numa_node *sgx_numa_nodes; 45 46 static LIST_HEAD(sgx_dirty_page_list); 47 48 /* 49 * Reset post-kexec EPC pages to the uninitialized state. The pages are removed 50 * from the input list, and made available for the page allocator. SECS pages 51 * prepending their children in the input list are left intact. 52 */ 53 static void __sgx_sanitize_pages(struct list_head *dirty_page_list) 54 { 55 struct sgx_epc_page *page; 56 LIST_HEAD(dirty); 57 int ret; 58 59 /* dirty_page_list is thread-local, no need for a lock: */ 60 while (!list_empty(dirty_page_list)) { 61 if (kthread_should_stop()) 62 return; 63 64 page = list_first_entry(dirty_page_list, struct sgx_epc_page, list); 65 66 /* 67 * Checking page->poison without holding the node->lock 68 * is racy, but losing the race (i.e. poison is set just 69 * after the check) just means __eremove() will be uselessly 70 * called for a page that sgx_free_epc_page() will put onto 71 * the node->sgx_poison_page_list later. 72 */ 73 if (page->poison) { 74 struct sgx_epc_section *section = &sgx_epc_sections[page->section]; 75 struct sgx_numa_node *node = section->node; 76 77 spin_lock(&node->lock); 78 list_move(&page->list, &node->sgx_poison_page_list); 79 spin_unlock(&node->lock); 80 81 continue; 82 } 83 84 ret = __eremove(sgx_get_epc_virt_addr(page)); 85 if (!ret) { 86 /* 87 * page is now sanitized. Make it available via the SGX 88 * page allocator: 89 */ 90 list_del(&page->list); 91 sgx_free_epc_page(page); 92 } else { 93 /* The page is not yet clean - move to the dirty list. */ 94 list_move_tail(&page->list, &dirty); 95 } 96 97 cond_resched(); 98 } 99 100 list_splice(&dirty, dirty_page_list); 101 } 102 103 static bool sgx_reclaimer_age(struct sgx_epc_page *epc_page) 104 { 105 struct sgx_encl_page *page = epc_page->owner; 106 struct sgx_encl *encl = page->encl; 107 struct sgx_encl_mm *encl_mm; 108 bool ret = true; 109 int idx; 110 111 idx = srcu_read_lock(&encl->srcu); 112 113 list_for_each_entry_rcu(encl_mm, &encl->mm_list, list) { 114 if (!mmget_not_zero(encl_mm->mm)) 115 continue; 116 117 mmap_read_lock(encl_mm->mm); 118 ret = !sgx_encl_test_and_clear_young(encl_mm->mm, page); 119 mmap_read_unlock(encl_mm->mm); 120 121 mmput_async(encl_mm->mm); 122 123 if (!ret) 124 break; 125 } 126 127 srcu_read_unlock(&encl->srcu, idx); 128 129 if (!ret) 130 return false; 131 132 return true; 133 } 134 135 static void sgx_reclaimer_block(struct sgx_epc_page *epc_page) 136 { 137 struct sgx_encl_page *page = epc_page->owner; 138 unsigned long addr = page->desc & PAGE_MASK; 139 struct sgx_encl *encl = page->encl; 140 unsigned long mm_list_version; 141 struct sgx_encl_mm *encl_mm; 142 struct vm_area_struct *vma; 143 int idx, ret; 144 145 do { 146 mm_list_version = encl->mm_list_version; 147 148 /* Pairs with smp_rmb() in sgx_encl_mm_add(). */ 149 smp_rmb(); 150 151 idx = srcu_read_lock(&encl->srcu); 152 153 list_for_each_entry_rcu(encl_mm, &encl->mm_list, list) { 154 if (!mmget_not_zero(encl_mm->mm)) 155 continue; 156 157 mmap_read_lock(encl_mm->mm); 158 159 ret = sgx_encl_find(encl_mm->mm, addr, &vma); 160 if (!ret && encl == vma->vm_private_data) 161 zap_vma_ptes(vma, addr, PAGE_SIZE); 162 163 mmap_read_unlock(encl_mm->mm); 164 165 mmput_async(encl_mm->mm); 166 } 167 168 srcu_read_unlock(&encl->srcu, idx); 169 } while (unlikely(encl->mm_list_version != mm_list_version)); 170 171 mutex_lock(&encl->lock); 172 173 ret = __eblock(sgx_get_epc_virt_addr(epc_page)); 174 if (encls_failed(ret)) 175 ENCLS_WARN(ret, "EBLOCK"); 176 177 mutex_unlock(&encl->lock); 178 } 179 180 static int __sgx_encl_ewb(struct sgx_epc_page *epc_page, void *va_slot, 181 struct sgx_backing *backing) 182 { 183 struct sgx_pageinfo pginfo; 184 int ret; 185 186 pginfo.addr = 0; 187 pginfo.secs = 0; 188 189 pginfo.contents = (unsigned long)kmap_atomic(backing->contents); 190 pginfo.metadata = (unsigned long)kmap_atomic(backing->pcmd) + 191 backing->pcmd_offset; 192 193 ret = __ewb(&pginfo, sgx_get_epc_virt_addr(epc_page), va_slot); 194 195 kunmap_atomic((void *)(unsigned long)(pginfo.metadata - 196 backing->pcmd_offset)); 197 kunmap_atomic((void *)(unsigned long)pginfo.contents); 198 199 return ret; 200 } 201 202 static void sgx_ipi_cb(void *info) 203 { 204 } 205 206 static const cpumask_t *sgx_encl_ewb_cpumask(struct sgx_encl *encl) 207 { 208 cpumask_t *cpumask = &encl->cpumask; 209 struct sgx_encl_mm *encl_mm; 210 int idx; 211 212 /* 213 * Can race with sgx_encl_mm_add(), but ETRACK has already been 214 * executed, which means that the CPUs running in the new mm will enter 215 * into the enclave with a fresh epoch. 216 */ 217 cpumask_clear(cpumask); 218 219 idx = srcu_read_lock(&encl->srcu); 220 221 list_for_each_entry_rcu(encl_mm, &encl->mm_list, list) { 222 if (!mmget_not_zero(encl_mm->mm)) 223 continue; 224 225 cpumask_or(cpumask, cpumask, mm_cpumask(encl_mm->mm)); 226 227 mmput_async(encl_mm->mm); 228 } 229 230 srcu_read_unlock(&encl->srcu, idx); 231 232 return cpumask; 233 } 234 235 /* 236 * Swap page to the regular memory transformed to the blocked state by using 237 * EBLOCK, which means that it can no longer be referenced (no new TLB entries). 238 * 239 * The first trial just tries to write the page assuming that some other thread 240 * has reset the count for threads inside the enclave by using ETRACK, and 241 * previous thread count has been zeroed out. The second trial calls ETRACK 242 * before EWB. If that fails we kick all the HW threads out, and then do EWB, 243 * which should be guaranteed the succeed. 244 */ 245 static void sgx_encl_ewb(struct sgx_epc_page *epc_page, 246 struct sgx_backing *backing) 247 { 248 struct sgx_encl_page *encl_page = epc_page->owner; 249 struct sgx_encl *encl = encl_page->encl; 250 struct sgx_va_page *va_page; 251 unsigned int va_offset; 252 void *va_slot; 253 int ret; 254 255 encl_page->desc &= ~SGX_ENCL_PAGE_BEING_RECLAIMED; 256 257 va_page = list_first_entry(&encl->va_pages, struct sgx_va_page, 258 list); 259 va_offset = sgx_alloc_va_slot(va_page); 260 va_slot = sgx_get_epc_virt_addr(va_page->epc_page) + va_offset; 261 if (sgx_va_page_full(va_page)) 262 list_move_tail(&va_page->list, &encl->va_pages); 263 264 ret = __sgx_encl_ewb(epc_page, va_slot, backing); 265 if (ret == SGX_NOT_TRACKED) { 266 ret = __etrack(sgx_get_epc_virt_addr(encl->secs.epc_page)); 267 if (ret) { 268 if (encls_failed(ret)) 269 ENCLS_WARN(ret, "ETRACK"); 270 } 271 272 ret = __sgx_encl_ewb(epc_page, va_slot, backing); 273 if (ret == SGX_NOT_TRACKED) { 274 /* 275 * Slow path, send IPIs to kick cpus out of the 276 * enclave. Note, it's imperative that the cpu 277 * mask is generated *after* ETRACK, else we'll 278 * miss cpus that entered the enclave between 279 * generating the mask and incrementing epoch. 280 */ 281 on_each_cpu_mask(sgx_encl_ewb_cpumask(encl), 282 sgx_ipi_cb, NULL, 1); 283 ret = __sgx_encl_ewb(epc_page, va_slot, backing); 284 } 285 } 286 287 if (ret) { 288 if (encls_failed(ret)) 289 ENCLS_WARN(ret, "EWB"); 290 291 sgx_free_va_slot(va_page, va_offset); 292 } else { 293 encl_page->desc |= va_offset; 294 encl_page->va_page = va_page; 295 } 296 } 297 298 static void sgx_reclaimer_write(struct sgx_epc_page *epc_page, 299 struct sgx_backing *backing) 300 { 301 struct sgx_encl_page *encl_page = epc_page->owner; 302 struct sgx_encl *encl = encl_page->encl; 303 struct sgx_backing secs_backing; 304 int ret; 305 306 mutex_lock(&encl->lock); 307 308 sgx_encl_ewb(epc_page, backing); 309 encl_page->epc_page = NULL; 310 encl->secs_child_cnt--; 311 312 if (!encl->secs_child_cnt && test_bit(SGX_ENCL_INITIALIZED, &encl->flags)) { 313 ret = sgx_encl_get_backing(encl, PFN_DOWN(encl->size), 314 &secs_backing); 315 if (ret) 316 goto out; 317 318 sgx_encl_ewb(encl->secs.epc_page, &secs_backing); 319 320 sgx_encl_free_epc_page(encl->secs.epc_page); 321 encl->secs.epc_page = NULL; 322 323 sgx_encl_put_backing(&secs_backing, true); 324 } 325 326 out: 327 mutex_unlock(&encl->lock); 328 } 329 330 /* 331 * Take a fixed number of pages from the head of the active page pool and 332 * reclaim them to the enclave's private shmem files. Skip the pages, which have 333 * been accessed since the last scan. Move those pages to the tail of active 334 * page pool so that the pages get scanned in LRU like fashion. 335 * 336 * Batch process a chunk of pages (at the moment 16) in order to degrade amount 337 * of IPI's and ETRACK's potentially required. sgx_encl_ewb() does degrade a bit 338 * among the HW threads with three stage EWB pipeline (EWB, ETRACK + EWB and IPI 339 * + EWB) but not sufficiently. Reclaiming one page at a time would also be 340 * problematic as it would increase the lock contention too much, which would 341 * halt forward progress. 342 */ 343 static void sgx_reclaim_pages(void) 344 { 345 struct sgx_epc_page *chunk[SGX_NR_TO_SCAN]; 346 struct sgx_backing backing[SGX_NR_TO_SCAN]; 347 struct sgx_epc_section *section; 348 struct sgx_encl_page *encl_page; 349 struct sgx_epc_page *epc_page; 350 struct sgx_numa_node *node; 351 pgoff_t page_index; 352 int cnt = 0; 353 int ret; 354 int i; 355 356 spin_lock(&sgx_reclaimer_lock); 357 for (i = 0; i < SGX_NR_TO_SCAN; i++) { 358 if (list_empty(&sgx_active_page_list)) 359 break; 360 361 epc_page = list_first_entry(&sgx_active_page_list, 362 struct sgx_epc_page, list); 363 list_del_init(&epc_page->list); 364 encl_page = epc_page->owner; 365 366 if (kref_get_unless_zero(&encl_page->encl->refcount) != 0) 367 chunk[cnt++] = epc_page; 368 else 369 /* The owner is freeing the page. No need to add the 370 * page back to the list of reclaimable pages. 371 */ 372 epc_page->flags &= ~SGX_EPC_PAGE_RECLAIMER_TRACKED; 373 } 374 spin_unlock(&sgx_reclaimer_lock); 375 376 for (i = 0; i < cnt; i++) { 377 epc_page = chunk[i]; 378 encl_page = epc_page->owner; 379 380 if (!sgx_reclaimer_age(epc_page)) 381 goto skip; 382 383 page_index = PFN_DOWN(encl_page->desc - encl_page->encl->base); 384 ret = sgx_encl_get_backing(encl_page->encl, page_index, &backing[i]); 385 if (ret) 386 goto skip; 387 388 mutex_lock(&encl_page->encl->lock); 389 encl_page->desc |= SGX_ENCL_PAGE_BEING_RECLAIMED; 390 mutex_unlock(&encl_page->encl->lock); 391 continue; 392 393 skip: 394 spin_lock(&sgx_reclaimer_lock); 395 list_add_tail(&epc_page->list, &sgx_active_page_list); 396 spin_unlock(&sgx_reclaimer_lock); 397 398 kref_put(&encl_page->encl->refcount, sgx_encl_release); 399 400 chunk[i] = NULL; 401 } 402 403 for (i = 0; i < cnt; i++) { 404 epc_page = chunk[i]; 405 if (epc_page) 406 sgx_reclaimer_block(epc_page); 407 } 408 409 for (i = 0; i < cnt; i++) { 410 epc_page = chunk[i]; 411 if (!epc_page) 412 continue; 413 414 encl_page = epc_page->owner; 415 sgx_reclaimer_write(epc_page, &backing[i]); 416 sgx_encl_put_backing(&backing[i], true); 417 418 kref_put(&encl_page->encl->refcount, sgx_encl_release); 419 epc_page->flags &= ~SGX_EPC_PAGE_RECLAIMER_TRACKED; 420 421 section = &sgx_epc_sections[epc_page->section]; 422 node = section->node; 423 424 spin_lock(&node->lock); 425 list_add_tail(&epc_page->list, &node->free_page_list); 426 spin_unlock(&node->lock); 427 atomic_long_inc(&sgx_nr_free_pages); 428 } 429 } 430 431 static bool sgx_should_reclaim(unsigned long watermark) 432 { 433 return atomic_long_read(&sgx_nr_free_pages) < watermark && 434 !list_empty(&sgx_active_page_list); 435 } 436 437 static int ksgxd(void *p) 438 { 439 set_freezable(); 440 441 /* 442 * Sanitize pages in order to recover from kexec(). The 2nd pass is 443 * required for SECS pages, whose child pages blocked EREMOVE. 444 */ 445 __sgx_sanitize_pages(&sgx_dirty_page_list); 446 __sgx_sanitize_pages(&sgx_dirty_page_list); 447 448 /* sanity check: */ 449 WARN_ON(!list_empty(&sgx_dirty_page_list)); 450 451 while (!kthread_should_stop()) { 452 if (try_to_freeze()) 453 continue; 454 455 wait_event_freezable(ksgxd_waitq, 456 kthread_should_stop() || 457 sgx_should_reclaim(SGX_NR_HIGH_PAGES)); 458 459 if (sgx_should_reclaim(SGX_NR_HIGH_PAGES)) 460 sgx_reclaim_pages(); 461 462 cond_resched(); 463 } 464 465 return 0; 466 } 467 468 static bool __init sgx_page_reclaimer_init(void) 469 { 470 struct task_struct *tsk; 471 472 tsk = kthread_run(ksgxd, NULL, "ksgxd"); 473 if (IS_ERR(tsk)) 474 return false; 475 476 ksgxd_tsk = tsk; 477 478 return true; 479 } 480 481 static struct sgx_epc_page *__sgx_alloc_epc_page_from_node(int nid) 482 { 483 struct sgx_numa_node *node = &sgx_numa_nodes[nid]; 484 struct sgx_epc_page *page = NULL; 485 486 spin_lock(&node->lock); 487 488 if (list_empty(&node->free_page_list)) { 489 spin_unlock(&node->lock); 490 return NULL; 491 } 492 493 page = list_first_entry(&node->free_page_list, struct sgx_epc_page, list); 494 list_del_init(&page->list); 495 page->flags = 0; 496 497 spin_unlock(&node->lock); 498 atomic_long_dec(&sgx_nr_free_pages); 499 500 return page; 501 } 502 503 /** 504 * __sgx_alloc_epc_page() - Allocate an EPC page 505 * 506 * Iterate through NUMA nodes and reserve ia free EPC page to the caller. Start 507 * from the NUMA node, where the caller is executing. 508 * 509 * Return: 510 * - an EPC page: A borrowed EPC pages were available. 511 * - NULL: Out of EPC pages. 512 */ 513 struct sgx_epc_page *__sgx_alloc_epc_page(void) 514 { 515 struct sgx_epc_page *page; 516 int nid_of_current = numa_node_id(); 517 int nid = nid_of_current; 518 519 if (node_isset(nid_of_current, sgx_numa_mask)) { 520 page = __sgx_alloc_epc_page_from_node(nid_of_current); 521 if (page) 522 return page; 523 } 524 525 /* Fall back to the non-local NUMA nodes: */ 526 while (true) { 527 nid = next_node_in(nid, sgx_numa_mask); 528 if (nid == nid_of_current) 529 break; 530 531 page = __sgx_alloc_epc_page_from_node(nid); 532 if (page) 533 return page; 534 } 535 536 return ERR_PTR(-ENOMEM); 537 } 538 539 /** 540 * sgx_mark_page_reclaimable() - Mark a page as reclaimable 541 * @page: EPC page 542 * 543 * Mark a page as reclaimable and add it to the active page list. Pages 544 * are automatically removed from the active list when freed. 545 */ 546 void sgx_mark_page_reclaimable(struct sgx_epc_page *page) 547 { 548 spin_lock(&sgx_reclaimer_lock); 549 page->flags |= SGX_EPC_PAGE_RECLAIMER_TRACKED; 550 list_add_tail(&page->list, &sgx_active_page_list); 551 spin_unlock(&sgx_reclaimer_lock); 552 } 553 554 /** 555 * sgx_unmark_page_reclaimable() - Remove a page from the reclaim list 556 * @page: EPC page 557 * 558 * Clear the reclaimable flag and remove the page from the active page list. 559 * 560 * Return: 561 * 0 on success, 562 * -EBUSY if the page is in the process of being reclaimed 563 */ 564 int sgx_unmark_page_reclaimable(struct sgx_epc_page *page) 565 { 566 spin_lock(&sgx_reclaimer_lock); 567 if (page->flags & SGX_EPC_PAGE_RECLAIMER_TRACKED) { 568 /* The page is being reclaimed. */ 569 if (list_empty(&page->list)) { 570 spin_unlock(&sgx_reclaimer_lock); 571 return -EBUSY; 572 } 573 574 list_del(&page->list); 575 page->flags &= ~SGX_EPC_PAGE_RECLAIMER_TRACKED; 576 } 577 spin_unlock(&sgx_reclaimer_lock); 578 579 return 0; 580 } 581 582 /** 583 * sgx_alloc_epc_page() - Allocate an EPC page 584 * @owner: the owner of the EPC page 585 * @reclaim: reclaim pages if necessary 586 * 587 * Iterate through EPC sections and borrow a free EPC page to the caller. When a 588 * page is no longer needed it must be released with sgx_free_epc_page(). If 589 * @reclaim is set to true, directly reclaim pages when we are out of pages. No 590 * mm's can be locked when @reclaim is set to true. 591 * 592 * Finally, wake up ksgxd when the number of pages goes below the watermark 593 * before returning back to the caller. 594 * 595 * Return: 596 * an EPC page, 597 * -errno on error 598 */ 599 struct sgx_epc_page *sgx_alloc_epc_page(void *owner, bool reclaim) 600 { 601 struct sgx_epc_page *page; 602 603 for ( ; ; ) { 604 page = __sgx_alloc_epc_page(); 605 if (!IS_ERR(page)) { 606 page->owner = owner; 607 break; 608 } 609 610 if (list_empty(&sgx_active_page_list)) 611 return ERR_PTR(-ENOMEM); 612 613 if (!reclaim) { 614 page = ERR_PTR(-EBUSY); 615 break; 616 } 617 618 if (signal_pending(current)) { 619 page = ERR_PTR(-ERESTARTSYS); 620 break; 621 } 622 623 sgx_reclaim_pages(); 624 cond_resched(); 625 } 626 627 if (sgx_should_reclaim(SGX_NR_LOW_PAGES)) 628 wake_up(&ksgxd_waitq); 629 630 return page; 631 } 632 633 /** 634 * sgx_free_epc_page() - Free an EPC page 635 * @page: an EPC page 636 * 637 * Put the EPC page back to the list of free pages. It's the caller's 638 * responsibility to make sure that the page is in uninitialized state. In other 639 * words, do EREMOVE, EWB or whatever operation is necessary before calling 640 * this function. 641 */ 642 void sgx_free_epc_page(struct sgx_epc_page *page) 643 { 644 struct sgx_epc_section *section = &sgx_epc_sections[page->section]; 645 struct sgx_numa_node *node = section->node; 646 647 spin_lock(&node->lock); 648 649 page->owner = NULL; 650 if (page->poison) 651 list_add(&page->list, &node->sgx_poison_page_list); 652 else 653 list_add_tail(&page->list, &node->free_page_list); 654 page->flags = SGX_EPC_PAGE_IS_FREE; 655 656 spin_unlock(&node->lock); 657 atomic_long_inc(&sgx_nr_free_pages); 658 } 659 660 static bool __init sgx_setup_epc_section(u64 phys_addr, u64 size, 661 unsigned long index, 662 struct sgx_epc_section *section) 663 { 664 unsigned long nr_pages = size >> PAGE_SHIFT; 665 unsigned long i; 666 667 section->virt_addr = memremap(phys_addr, size, MEMREMAP_WB); 668 if (!section->virt_addr) 669 return false; 670 671 section->pages = vmalloc(nr_pages * sizeof(struct sgx_epc_page)); 672 if (!section->pages) { 673 memunmap(section->virt_addr); 674 return false; 675 } 676 677 section->phys_addr = phys_addr; 678 xa_store_range(&sgx_epc_address_space, section->phys_addr, 679 phys_addr + size - 1, section, GFP_KERNEL); 680 681 for (i = 0; i < nr_pages; i++) { 682 section->pages[i].section = index; 683 section->pages[i].flags = 0; 684 section->pages[i].owner = NULL; 685 section->pages[i].poison = 0; 686 list_add_tail(§ion->pages[i].list, &sgx_dirty_page_list); 687 } 688 689 return true; 690 } 691 692 bool arch_is_platform_page(u64 paddr) 693 { 694 return !!xa_load(&sgx_epc_address_space, paddr); 695 } 696 EXPORT_SYMBOL_GPL(arch_is_platform_page); 697 698 static struct sgx_epc_page *sgx_paddr_to_page(u64 paddr) 699 { 700 struct sgx_epc_section *section; 701 702 section = xa_load(&sgx_epc_address_space, paddr); 703 if (!section) 704 return NULL; 705 706 return §ion->pages[PFN_DOWN(paddr - section->phys_addr)]; 707 } 708 709 /* 710 * Called in process context to handle a hardware reported 711 * error in an SGX EPC page. 712 * If the MF_ACTION_REQUIRED bit is set in flags, then the 713 * context is the task that consumed the poison data. Otherwise 714 * this is called from a kernel thread unrelated to the page. 715 */ 716 int arch_memory_failure(unsigned long pfn, int flags) 717 { 718 struct sgx_epc_page *page = sgx_paddr_to_page(pfn << PAGE_SHIFT); 719 struct sgx_epc_section *section; 720 struct sgx_numa_node *node; 721 722 /* 723 * mm/memory-failure.c calls this routine for all errors 724 * where there isn't a "struct page" for the address. But that 725 * includes other address ranges besides SGX. 726 */ 727 if (!page) 728 return -ENXIO; 729 730 /* 731 * If poison was consumed synchronously. Send a SIGBUS to 732 * the task. Hardware has already exited the SGX enclave and 733 * will not allow re-entry to an enclave that has a memory 734 * error. The signal may help the task understand why the 735 * enclave is broken. 736 */ 737 if (flags & MF_ACTION_REQUIRED) 738 force_sig(SIGBUS); 739 740 section = &sgx_epc_sections[page->section]; 741 node = section->node; 742 743 spin_lock(&node->lock); 744 745 /* Already poisoned? Nothing more to do */ 746 if (page->poison) 747 goto out; 748 749 page->poison = 1; 750 751 /* 752 * If the page is on a free list, move it to the per-node 753 * poison page list. 754 */ 755 if (page->flags & SGX_EPC_PAGE_IS_FREE) { 756 list_move(&page->list, &node->sgx_poison_page_list); 757 goto out; 758 } 759 760 /* 761 * TBD: Add additional plumbing to enable pre-emptive 762 * action for asynchronous poison notification. Until 763 * then just hope that the poison: 764 * a) is not accessed - sgx_free_epc_page() will deal with it 765 * when the user gives it back 766 * b) results in a recoverable machine check rather than 767 * a fatal one 768 */ 769 out: 770 spin_unlock(&node->lock); 771 return 0; 772 } 773 774 /** 775 * A section metric is concatenated in a way that @low bits 12-31 define the 776 * bits 12-31 of the metric and @high bits 0-19 define the bits 32-51 of the 777 * metric. 778 */ 779 static inline u64 __init sgx_calc_section_metric(u64 low, u64 high) 780 { 781 return (low & GENMASK_ULL(31, 12)) + 782 ((high & GENMASK_ULL(19, 0)) << 32); 783 } 784 785 #ifdef CONFIG_NUMA 786 static ssize_t sgx_total_bytes_show(struct device *dev, struct device_attribute *attr, char *buf) 787 { 788 return sysfs_emit(buf, "%lu\n", sgx_numa_nodes[dev->id].size); 789 } 790 static DEVICE_ATTR_RO(sgx_total_bytes); 791 792 static umode_t arch_node_attr_is_visible(struct kobject *kobj, 793 struct attribute *attr, int idx) 794 { 795 /* Make all x86/ attributes invisible when SGX is not initialized: */ 796 if (nodes_empty(sgx_numa_mask)) 797 return 0; 798 799 return attr->mode; 800 } 801 802 static struct attribute *arch_node_dev_attrs[] = { 803 &dev_attr_sgx_total_bytes.attr, 804 NULL, 805 }; 806 807 const struct attribute_group arch_node_dev_group = { 808 .name = "x86", 809 .attrs = arch_node_dev_attrs, 810 .is_visible = arch_node_attr_is_visible, 811 }; 812 813 static void __init arch_update_sysfs_visibility(int nid) 814 { 815 struct node *node = node_devices[nid]; 816 int ret; 817 818 ret = sysfs_update_group(&node->dev.kobj, &arch_node_dev_group); 819 820 if (ret) 821 pr_err("sysfs update failed (%d), files may be invisible", ret); 822 } 823 #else /* !CONFIG_NUMA */ 824 static void __init arch_update_sysfs_visibility(int nid) {} 825 #endif 826 827 static bool __init sgx_page_cache_init(void) 828 { 829 u32 eax, ebx, ecx, edx, type; 830 u64 pa, size; 831 int nid; 832 int i; 833 834 sgx_numa_nodes = kmalloc_array(num_possible_nodes(), sizeof(*sgx_numa_nodes), GFP_KERNEL); 835 if (!sgx_numa_nodes) 836 return false; 837 838 for (i = 0; i < ARRAY_SIZE(sgx_epc_sections); i++) { 839 cpuid_count(SGX_CPUID, i + SGX_CPUID_EPC, &eax, &ebx, &ecx, &edx); 840 841 type = eax & SGX_CPUID_EPC_MASK; 842 if (type == SGX_CPUID_EPC_INVALID) 843 break; 844 845 if (type != SGX_CPUID_EPC_SECTION) { 846 pr_err_once("Unknown EPC section type: %u\n", type); 847 break; 848 } 849 850 pa = sgx_calc_section_metric(eax, ebx); 851 size = sgx_calc_section_metric(ecx, edx); 852 853 pr_info("EPC section 0x%llx-0x%llx\n", pa, pa + size - 1); 854 855 if (!sgx_setup_epc_section(pa, size, i, &sgx_epc_sections[i])) { 856 pr_err("No free memory for an EPC section\n"); 857 break; 858 } 859 860 nid = numa_map_to_online_node(phys_to_target_node(pa)); 861 if (nid == NUMA_NO_NODE) { 862 /* The physical address is already printed above. */ 863 pr_warn(FW_BUG "Unable to map EPC section to online node. Fallback to the NUMA node 0.\n"); 864 nid = 0; 865 } 866 867 if (!node_isset(nid, sgx_numa_mask)) { 868 spin_lock_init(&sgx_numa_nodes[nid].lock); 869 INIT_LIST_HEAD(&sgx_numa_nodes[nid].free_page_list); 870 INIT_LIST_HEAD(&sgx_numa_nodes[nid].sgx_poison_page_list); 871 node_set(nid, sgx_numa_mask); 872 sgx_numa_nodes[nid].size = 0; 873 874 /* Make SGX-specific node sysfs files visible: */ 875 arch_update_sysfs_visibility(nid); 876 } 877 878 sgx_epc_sections[i].node = &sgx_numa_nodes[nid]; 879 sgx_numa_nodes[nid].size += size; 880 881 sgx_nr_epc_sections++; 882 } 883 884 if (!sgx_nr_epc_sections) { 885 pr_err("There are zero EPC sections.\n"); 886 return false; 887 } 888 889 return true; 890 } 891 892 /* 893 * Update the SGX_LEPUBKEYHASH MSRs to the values specified by caller. 894 * Bare-metal driver requires to update them to hash of enclave's signer 895 * before EINIT. KVM needs to update them to guest's virtual MSR values 896 * before doing EINIT from guest. 897 */ 898 void sgx_update_lepubkeyhash(u64 *lepubkeyhash) 899 { 900 int i; 901 902 WARN_ON_ONCE(preemptible()); 903 904 for (i = 0; i < 4; i++) 905 wrmsrl(MSR_IA32_SGXLEPUBKEYHASH0 + i, lepubkeyhash[i]); 906 } 907 908 const struct file_operations sgx_provision_fops = { 909 .owner = THIS_MODULE, 910 }; 911 912 static struct miscdevice sgx_dev_provision = { 913 .minor = MISC_DYNAMIC_MINOR, 914 .name = "sgx_provision", 915 .nodename = "sgx_provision", 916 .fops = &sgx_provision_fops, 917 }; 918 919 /** 920 * sgx_set_attribute() - Update allowed attributes given file descriptor 921 * @allowed_attributes: Pointer to allowed enclave attributes 922 * @attribute_fd: File descriptor for specific attribute 923 * 924 * Append enclave attribute indicated by file descriptor to allowed 925 * attributes. Currently only SGX_ATTR_PROVISIONKEY indicated by 926 * /dev/sgx_provision is supported. 927 * 928 * Return: 929 * -0: SGX_ATTR_PROVISIONKEY is appended to allowed_attributes 930 * -EINVAL: Invalid, or not supported file descriptor 931 */ 932 int sgx_set_attribute(unsigned long *allowed_attributes, 933 unsigned int attribute_fd) 934 { 935 struct file *file; 936 937 file = fget(attribute_fd); 938 if (!file) 939 return -EINVAL; 940 941 if (file->f_op != &sgx_provision_fops) { 942 fput(file); 943 return -EINVAL; 944 } 945 946 *allowed_attributes |= SGX_ATTR_PROVISIONKEY; 947 948 fput(file); 949 return 0; 950 } 951 EXPORT_SYMBOL_GPL(sgx_set_attribute); 952 953 static int __init sgx_init(void) 954 { 955 int ret; 956 int i; 957 958 if (!cpu_feature_enabled(X86_FEATURE_SGX)) 959 return -ENODEV; 960 961 if (!sgx_page_cache_init()) 962 return -ENOMEM; 963 964 if (!sgx_page_reclaimer_init()) { 965 ret = -ENOMEM; 966 goto err_page_cache; 967 } 968 969 ret = misc_register(&sgx_dev_provision); 970 if (ret) 971 goto err_kthread; 972 973 /* 974 * Always try to initialize the native *and* KVM drivers. 975 * The KVM driver is less picky than the native one and 976 * can function if the native one is not supported on the 977 * current system or fails to initialize. 978 * 979 * Error out only if both fail to initialize. 980 */ 981 ret = sgx_drv_init(); 982 983 if (sgx_vepc_init() && ret) 984 goto err_provision; 985 986 return 0; 987 988 err_provision: 989 misc_deregister(&sgx_dev_provision); 990 991 err_kthread: 992 kthread_stop(ksgxd_tsk); 993 994 err_page_cache: 995 for (i = 0; i < sgx_nr_epc_sections; i++) { 996 vfree(sgx_epc_sections[i].pages); 997 memunmap(sgx_epc_sections[i].virt_addr); 998 } 999 1000 return ret; 1001 } 1002 1003 device_initcall(sgx_init); 1004