main.c (40e0e7843e23d164625b9031514f5672f8758bf4) main.c (992801ae92431761b3d8ec88abd5793d154d34ac)
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright(c) 2016-20 Intel Corporation. */
3
4#include <linux/file.h>
5#include <linux/freezer.h>
6#include <linux/highmem.h>
7#include <linux/kthread.h>
8#include <linux/miscdevice.h>

--- 48 unchanged lines hidden (view full) ---

57
58 /* dirty_page_list is thread-local, no need for a lock: */
59 while (!list_empty(dirty_page_list)) {
60 if (kthread_should_stop())
61 return;
62
63 page = list_first_entry(dirty_page_list, struct sgx_epc_page, list);
64
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright(c) 2016-20 Intel Corporation. */
3
4#include <linux/file.h>
5#include <linux/freezer.h>
6#include <linux/highmem.h>
7#include <linux/kthread.h>
8#include <linux/miscdevice.h>

--- 48 unchanged lines hidden (view full) ---

57
58 /* dirty_page_list is thread-local, no need for a lock: */
59 while (!list_empty(dirty_page_list)) {
60 if (kthread_should_stop())
61 return;
62
63 page = list_first_entry(dirty_page_list, struct sgx_epc_page, list);
64
65 /*
66 * Checking page->poison without holding the node->lock
67 * is racy, but losing the race (i.e. poison is set just
68 * after the check) just means __eremove() will be uselessly
69 * called for a page that sgx_free_epc_page() will put onto
70 * the node->sgx_poison_page_list later.
71 */
72 if (page->poison) {
73 struct sgx_epc_section *section = &sgx_epc_sections[page->section];
74 struct sgx_numa_node *node = section->node;
75
76 spin_lock(&node->lock);
77 list_move(&page->list, &node->sgx_poison_page_list);
78 spin_unlock(&node->lock);
79
80 continue;
81 }
82
65 ret = __eremove(sgx_get_epc_virt_addr(page));
66 if (!ret) {
67 /*
68 * page is now sanitized. Make it available via the SGX
69 * page allocator:
70 */
71 list_del(&page->list);
72 sgx_free_epc_page(page);

--- 548 unchanged lines hidden (view full) ---

621 */
622void sgx_free_epc_page(struct sgx_epc_page *page)
623{
624 struct sgx_epc_section *section = &sgx_epc_sections[page->section];
625 struct sgx_numa_node *node = section->node;
626
627 spin_lock(&node->lock);
628
83 ret = __eremove(sgx_get_epc_virt_addr(page));
84 if (!ret) {
85 /*
86 * page is now sanitized. Make it available via the SGX
87 * page allocator:
88 */
89 list_del(&page->list);
90 sgx_free_epc_page(page);

--- 548 unchanged lines hidden (view full) ---

639 */
640void sgx_free_epc_page(struct sgx_epc_page *page)
641{
642 struct sgx_epc_section *section = &sgx_epc_sections[page->section];
643 struct sgx_numa_node *node = section->node;
644
645 spin_lock(&node->lock);
646
629 list_add_tail(&page->list, &node->free_page_list);
647 page->owner = NULL;
648 if (page->poison)
649 list_add(&page->list, &node->sgx_poison_page_list);
650 else
651 list_add_tail(&page->list, &node->free_page_list);
630 sgx_nr_free_pages++;
631 page->flags = SGX_EPC_PAGE_IS_FREE;
632
633 spin_unlock(&node->lock);
634}
635
636static bool __init sgx_setup_epc_section(u64 phys_addr, u64 size,
637 unsigned long index,

--- 15 unchanged lines hidden (view full) ---

653 section->phys_addr = phys_addr;
654 xa_store_range(&sgx_epc_address_space, section->phys_addr,
655 phys_addr + size - 1, section, GFP_KERNEL);
656
657 for (i = 0; i < nr_pages; i++) {
658 section->pages[i].section = index;
659 section->pages[i].flags = 0;
660 section->pages[i].owner = NULL;
652 sgx_nr_free_pages++;
653 page->flags = SGX_EPC_PAGE_IS_FREE;
654
655 spin_unlock(&node->lock);
656}
657
658static bool __init sgx_setup_epc_section(u64 phys_addr, u64 size,
659 unsigned long index,

--- 15 unchanged lines hidden (view full) ---

675 section->phys_addr = phys_addr;
676 xa_store_range(&sgx_epc_address_space, section->phys_addr,
677 phys_addr + size - 1, section, GFP_KERNEL);
678
679 for (i = 0; i < nr_pages; i++) {
680 section->pages[i].section = index;
681 section->pages[i].flags = 0;
682 section->pages[i].owner = NULL;
683 section->pages[i].poison = 0;
661 list_add_tail(&section->pages[i].list, &sgx_dirty_page_list);
662 }
663
664 return true;
665}
666
667bool arch_is_platform_page(u64 paddr)
668{

--- 50 unchanged lines hidden (view full) ---

719 /* The physical address is already printed above. */
720 pr_warn(FW_BUG "Unable to map EPC section to online node. Fallback to the NUMA node 0.\n");
721 nid = 0;
722 }
723
724 if (!node_isset(nid, sgx_numa_mask)) {
725 spin_lock_init(&sgx_numa_nodes[nid].lock);
726 INIT_LIST_HEAD(&sgx_numa_nodes[nid].free_page_list);
684 list_add_tail(&section->pages[i].list, &sgx_dirty_page_list);
685 }
686
687 return true;
688}
689
690bool arch_is_platform_page(u64 paddr)
691{

--- 50 unchanged lines hidden (view full) ---

742 /* The physical address is already printed above. */
743 pr_warn(FW_BUG "Unable to map EPC section to online node. Fallback to the NUMA node 0.\n");
744 nid = 0;
745 }
746
747 if (!node_isset(nid, sgx_numa_mask)) {
748 spin_lock_init(&sgx_numa_nodes[nid].lock);
749 INIT_LIST_HEAD(&sgx_numa_nodes[nid].free_page_list);
750 INIT_LIST_HEAD(&sgx_numa_nodes[nid].sgx_poison_page_list);
727 node_set(nid, sgx_numa_mask);
728 }
729
730 sgx_epc_sections[i].node = &sgx_numa_nodes[nid];
731
732 sgx_nr_epc_sections++;
733 }
734

--- 120 unchanged lines hidden ---
751 node_set(nid, sgx_numa_mask);
752 }
753
754 sgx_epc_sections[i].node = &sgx_numa_nodes[nid];
755
756 sgx_nr_epc_sections++;
757 }
758

--- 120 unchanged lines hidden ---