1e7e05452SSean Christopherson // SPDX-License-Identifier: GPL-2.0
2e7e05452SSean Christopherson /* Copyright(c) 2016-20 Intel Corporation. */
3e7e05452SSean Christopherson
4b3754e5dSSean Christopherson #include <linux/file.h>
5e7e05452SSean Christopherson #include <linux/freezer.h>
6e7e05452SSean Christopherson #include <linux/highmem.h>
7e7e05452SSean Christopherson #include <linux/kthread.h>
8b3754e5dSSean Christopherson #include <linux/miscdevice.h>
92056e298SDave Hansen #include <linux/node.h>
10e7e05452SSean Christopherson #include <linux/pagemap.h>
11e7e05452SSean Christopherson #include <linux/ratelimit.h>
12e7e05452SSean Christopherson #include <linux/sched/mm.h>
13e7e05452SSean Christopherson #include <linux/sched/signal.h>
14e7e05452SSean Christopherson #include <linux/slab.h>
152056e298SDave Hansen #include <linux/sysfs.h>
16b3754e5dSSean Christopherson #include <asm/sgx.h>
173fe0778eSJarkko Sakkinen #include "driver.h"
183fe0778eSJarkko Sakkinen #include "encl.h"
19e7e05452SSean Christopherson #include "encls.h"
20e7e05452SSean Christopherson
21e7e05452SSean Christopherson struct sgx_epc_section sgx_epc_sections[SGX_MAX_EPC_SECTIONS];
22e7e05452SSean Christopherson static int sgx_nr_epc_sections;
23e7e05452SSean Christopherson static struct task_struct *ksgxd_tsk;
241728ab54SJarkko Sakkinen static DECLARE_WAIT_QUEUE_HEAD(ksgxd_waitq);
2540e0e784STony Luck static DEFINE_XARRAY(sgx_epc_address_space);
261728ab54SJarkko Sakkinen
271728ab54SJarkko Sakkinen /*
281728ab54SJarkko Sakkinen * These variables are part of the state of the reclaimer, and must be accessed
291728ab54SJarkko Sakkinen * with sgx_reclaimer_lock acquired.
301728ab54SJarkko Sakkinen */
311728ab54SJarkko Sakkinen static LIST_HEAD(sgx_active_page_list);
321728ab54SJarkko Sakkinen static DEFINE_SPINLOCK(sgx_reclaimer_lock);
33e7e05452SSean Christopherson
34ac5d272aSReinette Chatre static atomic_long_t sgx_nr_free_pages = ATOMIC_LONG_INIT(0);
35901ddbb9SJarkko Sakkinen
36901ddbb9SJarkko Sakkinen /* Nodes with one or more EPC sections. */
37901ddbb9SJarkko Sakkinen static nodemask_t sgx_numa_mask;
38901ddbb9SJarkko Sakkinen
39901ddbb9SJarkko Sakkinen /*
40901ddbb9SJarkko Sakkinen * Array with one list_head for each possible NUMA node. Each
41901ddbb9SJarkko Sakkinen * list contains all the sgx_epc_section's which are on that
42901ddbb9SJarkko Sakkinen * node.
43901ddbb9SJarkko Sakkinen */
44901ddbb9SJarkko Sakkinen static struct sgx_numa_node *sgx_numa_nodes;
45901ddbb9SJarkko Sakkinen
4651ab30ebSJarkko Sakkinen static LIST_HEAD(sgx_dirty_page_list);
4751ab30ebSJarkko Sakkinen
48e7e05452SSean Christopherson /*
4951ab30ebSJarkko Sakkinen * Reset post-kexec EPC pages to the uninitialized state. The pages are removed
5051ab30ebSJarkko Sakkinen * from the input list, and made available for the page allocator. SECS pages
5151ab30ebSJarkko Sakkinen * prepending their children in the input list are left intact.
52133e049aSJarkko Sakkinen *
53133e049aSJarkko Sakkinen * Return 0 when sanitization was successful or kthread was stopped, and the
54133e049aSJarkko Sakkinen * number of unsanitized pages otherwise.
55e7e05452SSean Christopherson */
__sgx_sanitize_pages(struct list_head * dirty_page_list)56133e049aSJarkko Sakkinen static unsigned long __sgx_sanitize_pages(struct list_head *dirty_page_list)
57e7e05452SSean Christopherson {
58133e049aSJarkko Sakkinen unsigned long left_dirty = 0;
59e7e05452SSean Christopherson struct sgx_epc_page *page;
60e7e05452SSean Christopherson LIST_HEAD(dirty);
61e7e05452SSean Christopherson int ret;
62e7e05452SSean Christopherson
6351ab30ebSJarkko Sakkinen /* dirty_page_list is thread-local, no need for a lock: */
6451ab30ebSJarkko Sakkinen while (!list_empty(dirty_page_list)) {
65e7e05452SSean Christopherson if (kthread_should_stop())
66133e049aSJarkko Sakkinen return 0;
67e7e05452SSean Christopherson
6851ab30ebSJarkko Sakkinen page = list_first_entry(dirty_page_list, struct sgx_epc_page, list);
69e7e05452SSean Christopherson
70992801aeSTony Luck /*
71992801aeSTony Luck * Checking page->poison without holding the node->lock
72992801aeSTony Luck * is racy, but losing the race (i.e. poison is set just
73992801aeSTony Luck * after the check) just means __eremove() will be uselessly
74992801aeSTony Luck * called for a page that sgx_free_epc_page() will put onto
75992801aeSTony Luck * the node->sgx_poison_page_list later.
76992801aeSTony Luck */
77992801aeSTony Luck if (page->poison) {
78992801aeSTony Luck struct sgx_epc_section *section = &sgx_epc_sections[page->section];
79992801aeSTony Luck struct sgx_numa_node *node = section->node;
80992801aeSTony Luck
81992801aeSTony Luck spin_lock(&node->lock);
82992801aeSTony Luck list_move(&page->list, &node->sgx_poison_page_list);
83992801aeSTony Luck spin_unlock(&node->lock);
84992801aeSTony Luck
85992801aeSTony Luck continue;
86992801aeSTony Luck }
87992801aeSTony Luck
88e7e05452SSean Christopherson ret = __eremove(sgx_get_epc_virt_addr(page));
8951ab30ebSJarkko Sakkinen if (!ret) {
9051ab30ebSJarkko Sakkinen /*
9151ab30ebSJarkko Sakkinen * page is now sanitized. Make it available via the SGX
9251ab30ebSJarkko Sakkinen * page allocator:
9351ab30ebSJarkko Sakkinen */
9451ab30ebSJarkko Sakkinen list_del(&page->list);
9551ab30ebSJarkko Sakkinen sgx_free_epc_page(page);
9651ab30ebSJarkko Sakkinen } else {
9751ab30ebSJarkko Sakkinen /* The page is not yet clean - move to the dirty list. */
98e7e05452SSean Christopherson list_move_tail(&page->list, &dirty);
99133e049aSJarkko Sakkinen left_dirty++;
10051ab30ebSJarkko Sakkinen }
101e7e05452SSean Christopherson
102e7e05452SSean Christopherson cond_resched();
103e7e05452SSean Christopherson }
104e7e05452SSean Christopherson
10551ab30ebSJarkko Sakkinen list_splice(&dirty, dirty_page_list);
106133e049aSJarkko Sakkinen return left_dirty;
107e7e05452SSean Christopherson }
108e7e05452SSean Christopherson
sgx_reclaimer_age(struct sgx_epc_page * epc_page)1091728ab54SJarkko Sakkinen static bool sgx_reclaimer_age(struct sgx_epc_page *epc_page)
1101728ab54SJarkko Sakkinen {
1111728ab54SJarkko Sakkinen struct sgx_encl_page *page = epc_page->owner;
1121728ab54SJarkko Sakkinen struct sgx_encl *encl = page->encl;
1131728ab54SJarkko Sakkinen struct sgx_encl_mm *encl_mm;
1141728ab54SJarkko Sakkinen bool ret = true;
1151728ab54SJarkko Sakkinen int idx;
1161728ab54SJarkko Sakkinen
1171728ab54SJarkko Sakkinen idx = srcu_read_lock(&encl->srcu);
1181728ab54SJarkko Sakkinen
1191728ab54SJarkko Sakkinen list_for_each_entry_rcu(encl_mm, &encl->mm_list, list) {
1201728ab54SJarkko Sakkinen if (!mmget_not_zero(encl_mm->mm))
1211728ab54SJarkko Sakkinen continue;
1221728ab54SJarkko Sakkinen
1231728ab54SJarkko Sakkinen mmap_read_lock(encl_mm->mm);
1241728ab54SJarkko Sakkinen ret = !sgx_encl_test_and_clear_young(encl_mm->mm, page);
1251728ab54SJarkko Sakkinen mmap_read_unlock(encl_mm->mm);
1261728ab54SJarkko Sakkinen
1271728ab54SJarkko Sakkinen mmput_async(encl_mm->mm);
1281728ab54SJarkko Sakkinen
1291728ab54SJarkko Sakkinen if (!ret)
1301728ab54SJarkko Sakkinen break;
1311728ab54SJarkko Sakkinen }
1321728ab54SJarkko Sakkinen
1331728ab54SJarkko Sakkinen srcu_read_unlock(&encl->srcu, idx);
1341728ab54SJarkko Sakkinen
1351728ab54SJarkko Sakkinen if (!ret)
1361728ab54SJarkko Sakkinen return false;
1371728ab54SJarkko Sakkinen
1381728ab54SJarkko Sakkinen return true;
1391728ab54SJarkko Sakkinen }
1401728ab54SJarkko Sakkinen
sgx_reclaimer_block(struct sgx_epc_page * epc_page)1411728ab54SJarkko Sakkinen static void sgx_reclaimer_block(struct sgx_epc_page *epc_page)
1421728ab54SJarkko Sakkinen {
1431728ab54SJarkko Sakkinen struct sgx_encl_page *page = epc_page->owner;
1441728ab54SJarkko Sakkinen unsigned long addr = page->desc & PAGE_MASK;
1451728ab54SJarkko Sakkinen struct sgx_encl *encl = page->encl;
146f89c2f9bSReinette Chatre int ret;
1471728ab54SJarkko Sakkinen
148f89c2f9bSReinette Chatre sgx_zap_enclave_ptes(encl, addr);
1491728ab54SJarkko Sakkinen
1501728ab54SJarkko Sakkinen mutex_lock(&encl->lock);
1511728ab54SJarkko Sakkinen
1521728ab54SJarkko Sakkinen ret = __eblock(sgx_get_epc_virt_addr(epc_page));
1531728ab54SJarkko Sakkinen if (encls_failed(ret))
1541728ab54SJarkko Sakkinen ENCLS_WARN(ret, "EBLOCK");
1551728ab54SJarkko Sakkinen
1561728ab54SJarkko Sakkinen mutex_unlock(&encl->lock);
1571728ab54SJarkko Sakkinen }
1581728ab54SJarkko Sakkinen
__sgx_encl_ewb(struct sgx_epc_page * epc_page,void * va_slot,struct sgx_backing * backing)1591728ab54SJarkko Sakkinen static int __sgx_encl_ewb(struct sgx_epc_page *epc_page, void *va_slot,
1601728ab54SJarkko Sakkinen struct sgx_backing *backing)
1611728ab54SJarkko Sakkinen {
1621728ab54SJarkko Sakkinen struct sgx_pageinfo pginfo;
1631728ab54SJarkko Sakkinen int ret;
1641728ab54SJarkko Sakkinen
1651728ab54SJarkko Sakkinen pginfo.addr = 0;
1661728ab54SJarkko Sakkinen pginfo.secs = 0;
1671728ab54SJarkko Sakkinen
16889e927bbSKristen Carlson Accardi pginfo.contents = (unsigned long)kmap_local_page(backing->contents);
16989e927bbSKristen Carlson Accardi pginfo.metadata = (unsigned long)kmap_local_page(backing->pcmd) +
1701728ab54SJarkko Sakkinen backing->pcmd_offset;
1711728ab54SJarkko Sakkinen
1721728ab54SJarkko Sakkinen ret = __ewb(&pginfo, sgx_get_epc_virt_addr(epc_page), va_slot);
1736bd42964SReinette Chatre set_page_dirty(backing->pcmd);
1746bd42964SReinette Chatre set_page_dirty(backing->contents);
1751728ab54SJarkko Sakkinen
17689e927bbSKristen Carlson Accardi kunmap_local((void *)(unsigned long)(pginfo.metadata -
1771728ab54SJarkko Sakkinen backing->pcmd_offset));
17889e927bbSKristen Carlson Accardi kunmap_local((void *)(unsigned long)pginfo.contents);
1791728ab54SJarkko Sakkinen
1801728ab54SJarkko Sakkinen return ret;
1811728ab54SJarkko Sakkinen }
1821728ab54SJarkko Sakkinen
sgx_ipi_cb(void * info)183c7c6a8a6SReinette Chatre void sgx_ipi_cb(void *info)
1841728ab54SJarkko Sakkinen {
1851728ab54SJarkko Sakkinen }
1861728ab54SJarkko Sakkinen
1871728ab54SJarkko Sakkinen /*
1881728ab54SJarkko Sakkinen * Swap page to the regular memory transformed to the blocked state by using
189d9f6e12fSIngo Molnar * EBLOCK, which means that it can no longer be referenced (no new TLB entries).
1901728ab54SJarkko Sakkinen *
1911728ab54SJarkko Sakkinen * The first trial just tries to write the page assuming that some other thread
192d9f6e12fSIngo Molnar * has reset the count for threads inside the enclave by using ETRACK, and
1931728ab54SJarkko Sakkinen * previous thread count has been zeroed out. The second trial calls ETRACK
1941728ab54SJarkko Sakkinen * before EWB. If that fails we kick all the HW threads out, and then do EWB,
1951728ab54SJarkko Sakkinen * which should be guaranteed the succeed.
1961728ab54SJarkko Sakkinen */
sgx_encl_ewb(struct sgx_epc_page * epc_page,struct sgx_backing * backing)1971728ab54SJarkko Sakkinen static void sgx_encl_ewb(struct sgx_epc_page *epc_page,
1981728ab54SJarkko Sakkinen struct sgx_backing *backing)
1991728ab54SJarkko Sakkinen {
2001728ab54SJarkko Sakkinen struct sgx_encl_page *encl_page = epc_page->owner;
2011728ab54SJarkko Sakkinen struct sgx_encl *encl = encl_page->encl;
2021728ab54SJarkko Sakkinen struct sgx_va_page *va_page;
2031728ab54SJarkko Sakkinen unsigned int va_offset;
2041728ab54SJarkko Sakkinen void *va_slot;
2051728ab54SJarkko Sakkinen int ret;
2061728ab54SJarkko Sakkinen
2071728ab54SJarkko Sakkinen encl_page->desc &= ~SGX_ENCL_PAGE_BEING_RECLAIMED;
2081728ab54SJarkko Sakkinen
2091728ab54SJarkko Sakkinen va_page = list_first_entry(&encl->va_pages, struct sgx_va_page,
2101728ab54SJarkko Sakkinen list);
2111728ab54SJarkko Sakkinen va_offset = sgx_alloc_va_slot(va_page);
2121728ab54SJarkko Sakkinen va_slot = sgx_get_epc_virt_addr(va_page->epc_page) + va_offset;
2131728ab54SJarkko Sakkinen if (sgx_va_page_full(va_page))
2141728ab54SJarkko Sakkinen list_move_tail(&va_page->list, &encl->va_pages);
2151728ab54SJarkko Sakkinen
2161728ab54SJarkko Sakkinen ret = __sgx_encl_ewb(epc_page, va_slot, backing);
2171728ab54SJarkko Sakkinen if (ret == SGX_NOT_TRACKED) {
2181728ab54SJarkko Sakkinen ret = __etrack(sgx_get_epc_virt_addr(encl->secs.epc_page));
2191728ab54SJarkko Sakkinen if (ret) {
2201728ab54SJarkko Sakkinen if (encls_failed(ret))
2211728ab54SJarkko Sakkinen ENCLS_WARN(ret, "ETRACK");
2221728ab54SJarkko Sakkinen }
2231728ab54SJarkko Sakkinen
2241728ab54SJarkko Sakkinen ret = __sgx_encl_ewb(epc_page, va_slot, backing);
2251728ab54SJarkko Sakkinen if (ret == SGX_NOT_TRACKED) {
2261728ab54SJarkko Sakkinen /*
2271728ab54SJarkko Sakkinen * Slow path, send IPIs to kick cpus out of the
2281728ab54SJarkko Sakkinen * enclave. Note, it's imperative that the cpu
2291728ab54SJarkko Sakkinen * mask is generated *after* ETRACK, else we'll
2301728ab54SJarkko Sakkinen * miss cpus that entered the enclave between
2311728ab54SJarkko Sakkinen * generating the mask and incrementing epoch.
2321728ab54SJarkko Sakkinen */
233bdaa8799SReinette Chatre on_each_cpu_mask(sgx_encl_cpumask(encl),
2341728ab54SJarkko Sakkinen sgx_ipi_cb, NULL, 1);
2351728ab54SJarkko Sakkinen ret = __sgx_encl_ewb(epc_page, va_slot, backing);
2361728ab54SJarkko Sakkinen }
2371728ab54SJarkko Sakkinen }
2381728ab54SJarkko Sakkinen
2391728ab54SJarkko Sakkinen if (ret) {
2401728ab54SJarkko Sakkinen if (encls_failed(ret))
2411728ab54SJarkko Sakkinen ENCLS_WARN(ret, "EWB");
2421728ab54SJarkko Sakkinen
2431728ab54SJarkko Sakkinen sgx_free_va_slot(va_page, va_offset);
2441728ab54SJarkko Sakkinen } else {
2451728ab54SJarkko Sakkinen encl_page->desc |= va_offset;
2461728ab54SJarkko Sakkinen encl_page->va_page = va_page;
2471728ab54SJarkko Sakkinen }
2481728ab54SJarkko Sakkinen }
2491728ab54SJarkko Sakkinen
sgx_reclaimer_write(struct sgx_epc_page * epc_page,struct sgx_backing * backing)2501728ab54SJarkko Sakkinen static void sgx_reclaimer_write(struct sgx_epc_page *epc_page,
2511728ab54SJarkko Sakkinen struct sgx_backing *backing)
2521728ab54SJarkko Sakkinen {
2531728ab54SJarkko Sakkinen struct sgx_encl_page *encl_page = epc_page->owner;
2541728ab54SJarkko Sakkinen struct sgx_encl *encl = encl_page->encl;
2551728ab54SJarkko Sakkinen struct sgx_backing secs_backing;
2561728ab54SJarkko Sakkinen int ret;
2571728ab54SJarkko Sakkinen
2581728ab54SJarkko Sakkinen mutex_lock(&encl->lock);
2591728ab54SJarkko Sakkinen
2601728ab54SJarkko Sakkinen sgx_encl_ewb(epc_page, backing);
2611728ab54SJarkko Sakkinen encl_page->epc_page = NULL;
2621728ab54SJarkko Sakkinen encl->secs_child_cnt--;
2630e4e729aSReinette Chatre sgx_encl_put_backing(backing);
2641728ab54SJarkko Sakkinen
2651728ab54SJarkko Sakkinen if (!encl->secs_child_cnt && test_bit(SGX_ENCL_INITIALIZED, &encl->flags)) {
2660c9782e2SKristen Carlson Accardi ret = sgx_encl_alloc_backing(encl, PFN_DOWN(encl->size),
2671728ab54SJarkko Sakkinen &secs_backing);
2681728ab54SJarkko Sakkinen if (ret)
2691728ab54SJarkko Sakkinen goto out;
2701728ab54SJarkko Sakkinen
2711728ab54SJarkko Sakkinen sgx_encl_ewb(encl->secs.epc_page, &secs_backing);
2721728ab54SJarkko Sakkinen
273b0c7459bSKai Huang sgx_encl_free_epc_page(encl->secs.epc_page);
2741728ab54SJarkko Sakkinen encl->secs.epc_page = NULL;
2751728ab54SJarkko Sakkinen
2766bd42964SReinette Chatre sgx_encl_put_backing(&secs_backing);
2771728ab54SJarkko Sakkinen }
2781728ab54SJarkko Sakkinen
2791728ab54SJarkko Sakkinen out:
2801728ab54SJarkko Sakkinen mutex_unlock(&encl->lock);
2811728ab54SJarkko Sakkinen }
2821728ab54SJarkko Sakkinen
2831728ab54SJarkko Sakkinen /*
2841728ab54SJarkko Sakkinen * Take a fixed number of pages from the head of the active page pool and
2851728ab54SJarkko Sakkinen * reclaim them to the enclave's private shmem files. Skip the pages, which have
2861728ab54SJarkko Sakkinen * been accessed since the last scan. Move those pages to the tail of active
2871728ab54SJarkko Sakkinen * page pool so that the pages get scanned in LRU like fashion.
2881728ab54SJarkko Sakkinen *
2891728ab54SJarkko Sakkinen * Batch process a chunk of pages (at the moment 16) in order to degrade amount
2901728ab54SJarkko Sakkinen * of IPI's and ETRACK's potentially required. sgx_encl_ewb() does degrade a bit
2911728ab54SJarkko Sakkinen * among the HW threads with three stage EWB pipeline (EWB, ETRACK + EWB and IPI
2921728ab54SJarkko Sakkinen * + EWB) but not sufficiently. Reclaiming one page at a time would also be
2931728ab54SJarkko Sakkinen * problematic as it would increase the lock contention too much, which would
2941728ab54SJarkko Sakkinen * halt forward progress.
2951728ab54SJarkko Sakkinen */
sgx_reclaim_pages(void)2961728ab54SJarkko Sakkinen static void sgx_reclaim_pages(void)
2971728ab54SJarkko Sakkinen {
2981728ab54SJarkko Sakkinen struct sgx_epc_page *chunk[SGX_NR_TO_SCAN];
2991728ab54SJarkko Sakkinen struct sgx_backing backing[SGX_NR_TO_SCAN];
3001728ab54SJarkko Sakkinen struct sgx_encl_page *encl_page;
3011728ab54SJarkko Sakkinen struct sgx_epc_page *epc_page;
3021728ab54SJarkko Sakkinen pgoff_t page_index;
3031728ab54SJarkko Sakkinen int cnt = 0;
3041728ab54SJarkko Sakkinen int ret;
3051728ab54SJarkko Sakkinen int i;
3061728ab54SJarkko Sakkinen
3071728ab54SJarkko Sakkinen spin_lock(&sgx_reclaimer_lock);
3081728ab54SJarkko Sakkinen for (i = 0; i < SGX_NR_TO_SCAN; i++) {
3091728ab54SJarkko Sakkinen if (list_empty(&sgx_active_page_list))
3101728ab54SJarkko Sakkinen break;
3111728ab54SJarkko Sakkinen
3121728ab54SJarkko Sakkinen epc_page = list_first_entry(&sgx_active_page_list,
3131728ab54SJarkko Sakkinen struct sgx_epc_page, list);
3141728ab54SJarkko Sakkinen list_del_init(&epc_page->list);
3151728ab54SJarkko Sakkinen encl_page = epc_page->owner;
3161728ab54SJarkko Sakkinen
3171728ab54SJarkko Sakkinen if (kref_get_unless_zero(&encl_page->encl->refcount) != 0)
3181728ab54SJarkko Sakkinen chunk[cnt++] = epc_page;
3191728ab54SJarkko Sakkinen else
3201728ab54SJarkko Sakkinen /* The owner is freeing the page. No need to add the
3211728ab54SJarkko Sakkinen * page back to the list of reclaimable pages.
3221728ab54SJarkko Sakkinen */
3231728ab54SJarkko Sakkinen epc_page->flags &= ~SGX_EPC_PAGE_RECLAIMER_TRACKED;
3241728ab54SJarkko Sakkinen }
3251728ab54SJarkko Sakkinen spin_unlock(&sgx_reclaimer_lock);
3261728ab54SJarkko Sakkinen
3271728ab54SJarkko Sakkinen for (i = 0; i < cnt; i++) {
3281728ab54SJarkko Sakkinen epc_page = chunk[i];
3291728ab54SJarkko Sakkinen encl_page = epc_page->owner;
3301728ab54SJarkko Sakkinen
3311728ab54SJarkko Sakkinen if (!sgx_reclaimer_age(epc_page))
3321728ab54SJarkko Sakkinen goto skip;
3331728ab54SJarkko Sakkinen
3341728ab54SJarkko Sakkinen page_index = PFN_DOWN(encl_page->desc - encl_page->encl->base);
3351728ab54SJarkko Sakkinen
3361728ab54SJarkko Sakkinen mutex_lock(&encl_page->encl->lock);
3370c9782e2SKristen Carlson Accardi ret = sgx_encl_alloc_backing(encl_page->encl, page_index, &backing[i]);
3380e4e729aSReinette Chatre if (ret) {
3390e4e729aSReinette Chatre mutex_unlock(&encl_page->encl->lock);
3400e4e729aSReinette Chatre goto skip;
3410e4e729aSReinette Chatre }
3420e4e729aSReinette Chatre
3431728ab54SJarkko Sakkinen encl_page->desc |= SGX_ENCL_PAGE_BEING_RECLAIMED;
3441728ab54SJarkko Sakkinen mutex_unlock(&encl_page->encl->lock);
3451728ab54SJarkko Sakkinen continue;
3461728ab54SJarkko Sakkinen
3471728ab54SJarkko Sakkinen skip:
3481728ab54SJarkko Sakkinen spin_lock(&sgx_reclaimer_lock);
3491728ab54SJarkko Sakkinen list_add_tail(&epc_page->list, &sgx_active_page_list);
3501728ab54SJarkko Sakkinen spin_unlock(&sgx_reclaimer_lock);
3511728ab54SJarkko Sakkinen
3521728ab54SJarkko Sakkinen kref_put(&encl_page->encl->refcount, sgx_encl_release);
3531728ab54SJarkko Sakkinen
3541728ab54SJarkko Sakkinen chunk[i] = NULL;
3551728ab54SJarkko Sakkinen }
3561728ab54SJarkko Sakkinen
3571728ab54SJarkko Sakkinen for (i = 0; i < cnt; i++) {
3581728ab54SJarkko Sakkinen epc_page = chunk[i];
3591728ab54SJarkko Sakkinen if (epc_page)
3601728ab54SJarkko Sakkinen sgx_reclaimer_block(epc_page);
3611728ab54SJarkko Sakkinen }
3621728ab54SJarkko Sakkinen
3631728ab54SJarkko Sakkinen for (i = 0; i < cnt; i++) {
3641728ab54SJarkko Sakkinen epc_page = chunk[i];
3651728ab54SJarkko Sakkinen if (!epc_page)
3661728ab54SJarkko Sakkinen continue;
3671728ab54SJarkko Sakkinen
3681728ab54SJarkko Sakkinen encl_page = epc_page->owner;
3691728ab54SJarkko Sakkinen sgx_reclaimer_write(epc_page, &backing[i]);
3701728ab54SJarkko Sakkinen
3711728ab54SJarkko Sakkinen kref_put(&encl_page->encl->refcount, sgx_encl_release);
3721728ab54SJarkko Sakkinen epc_page->flags &= ~SGX_EPC_PAGE_RECLAIMER_TRACKED;
3731728ab54SJarkko Sakkinen
374e5733d8cSReinette Chatre sgx_free_epc_page(epc_page);
3751728ab54SJarkko Sakkinen }
3761728ab54SJarkko Sakkinen }
3771728ab54SJarkko Sakkinen
sgx_should_reclaim(unsigned long watermark)3781728ab54SJarkko Sakkinen static bool sgx_should_reclaim(unsigned long watermark)
3791728ab54SJarkko Sakkinen {
380ac5d272aSReinette Chatre return atomic_long_read(&sgx_nr_free_pages) < watermark &&
381ac5d272aSReinette Chatre !list_empty(&sgx_active_page_list);
3821728ab54SJarkko Sakkinen }
3831728ab54SJarkko Sakkinen
384a0506b3bSReinette Chatre /*
385a0506b3bSReinette Chatre * sgx_reclaim_direct() should be called (without enclave's mutex held)
386a0506b3bSReinette Chatre * in locations where SGX memory resources might be low and might be
387a0506b3bSReinette Chatre * needed in order to make forward progress.
388a0506b3bSReinette Chatre */
sgx_reclaim_direct(void)389a0506b3bSReinette Chatre void sgx_reclaim_direct(void)
390a0506b3bSReinette Chatre {
391a0506b3bSReinette Chatre if (sgx_should_reclaim(SGX_NR_LOW_PAGES))
392a0506b3bSReinette Chatre sgx_reclaim_pages();
393a0506b3bSReinette Chatre }
394a0506b3bSReinette Chatre
ksgxd(void * p)395e7e05452SSean Christopherson static int ksgxd(void *p)
396e7e05452SSean Christopherson {
397e7e05452SSean Christopherson set_freezable();
398e7e05452SSean Christopherson
399e7e05452SSean Christopherson /*
400e7e05452SSean Christopherson * Sanitize pages in order to recover from kexec(). The 2nd pass is
401e7e05452SSean Christopherson * required for SECS pages, whose child pages blocked EREMOVE.
402e7e05452SSean Christopherson */
40351ab30ebSJarkko Sakkinen __sgx_sanitize_pages(&sgx_dirty_page_list);
404133e049aSJarkko Sakkinen WARN_ON(__sgx_sanitize_pages(&sgx_dirty_page_list));
405e7e05452SSean Christopherson
4061728ab54SJarkko Sakkinen while (!kthread_should_stop()) {
4071728ab54SJarkko Sakkinen if (try_to_freeze())
4081728ab54SJarkko Sakkinen continue;
4091728ab54SJarkko Sakkinen
4101728ab54SJarkko Sakkinen wait_event_freezable(ksgxd_waitq,
4111728ab54SJarkko Sakkinen kthread_should_stop() ||
4121728ab54SJarkko Sakkinen sgx_should_reclaim(SGX_NR_HIGH_PAGES));
4131728ab54SJarkko Sakkinen
4141728ab54SJarkko Sakkinen if (sgx_should_reclaim(SGX_NR_HIGH_PAGES))
4151728ab54SJarkko Sakkinen sgx_reclaim_pages();
4161728ab54SJarkko Sakkinen
4171728ab54SJarkko Sakkinen cond_resched();
4181728ab54SJarkko Sakkinen }
4191728ab54SJarkko Sakkinen
420e7e05452SSean Christopherson return 0;
421e7e05452SSean Christopherson }
422e7e05452SSean Christopherson
sgx_page_reclaimer_init(void)423e7e05452SSean Christopherson static bool __init sgx_page_reclaimer_init(void)
424e7e05452SSean Christopherson {
425e7e05452SSean Christopherson struct task_struct *tsk;
426e7e05452SSean Christopherson
427e7e05452SSean Christopherson tsk = kthread_run(ksgxd, NULL, "ksgxd");
428e7e05452SSean Christopherson if (IS_ERR(tsk))
429e7e05452SSean Christopherson return false;
430e7e05452SSean Christopherson
431e7e05452SSean Christopherson ksgxd_tsk = tsk;
432e7e05452SSean Christopherson
433e7e05452SSean Christopherson return true;
434e7e05452SSean Christopherson }
435e7e05452SSean Christopherson
current_is_ksgxd(void)4360c9782e2SKristen Carlson Accardi bool current_is_ksgxd(void)
4370c9782e2SKristen Carlson Accardi {
4380c9782e2SKristen Carlson Accardi return current == ksgxd_tsk;
4390c9782e2SKristen Carlson Accardi }
4400c9782e2SKristen Carlson Accardi
__sgx_alloc_epc_page_from_node(int nid)441901ddbb9SJarkko Sakkinen static struct sgx_epc_page *__sgx_alloc_epc_page_from_node(int nid)
442d2285493SJarkko Sakkinen {
443901ddbb9SJarkko Sakkinen struct sgx_numa_node *node = &sgx_numa_nodes[nid];
444901ddbb9SJarkko Sakkinen struct sgx_epc_page *page = NULL;
445d2285493SJarkko Sakkinen
446901ddbb9SJarkko Sakkinen spin_lock(&node->lock);
447d2285493SJarkko Sakkinen
448901ddbb9SJarkko Sakkinen if (list_empty(&node->free_page_list)) {
449901ddbb9SJarkko Sakkinen spin_unlock(&node->lock);
450d2285493SJarkko Sakkinen return NULL;
451d2285493SJarkko Sakkinen }
452d2285493SJarkko Sakkinen
453901ddbb9SJarkko Sakkinen page = list_first_entry(&node->free_page_list, struct sgx_epc_page, list);
454d2285493SJarkko Sakkinen list_del_init(&page->list);
455d6d261bdSTony Luck page->flags = 0;
456d2285493SJarkko Sakkinen
457901ddbb9SJarkko Sakkinen spin_unlock(&node->lock);
458ac5d272aSReinette Chatre atomic_long_dec(&sgx_nr_free_pages);
459901ddbb9SJarkko Sakkinen
460d2285493SJarkko Sakkinen return page;
461d2285493SJarkko Sakkinen }
462d2285493SJarkko Sakkinen
463d2285493SJarkko Sakkinen /**
464d2285493SJarkko Sakkinen * __sgx_alloc_epc_page() - Allocate an EPC page
465d2285493SJarkko Sakkinen *
466901ddbb9SJarkko Sakkinen * Iterate through NUMA nodes and reserve ia free EPC page to the caller. Start
467901ddbb9SJarkko Sakkinen * from the NUMA node, where the caller is executing.
468d2285493SJarkko Sakkinen *
469d2285493SJarkko Sakkinen * Return:
470901ddbb9SJarkko Sakkinen * - an EPC page: A borrowed EPC pages were available.
471901ddbb9SJarkko Sakkinen * - NULL: Out of EPC pages.
472d2285493SJarkko Sakkinen */
__sgx_alloc_epc_page(void)473d2285493SJarkko Sakkinen struct sgx_epc_page *__sgx_alloc_epc_page(void)
474d2285493SJarkko Sakkinen {
475d2285493SJarkko Sakkinen struct sgx_epc_page *page;
476901ddbb9SJarkko Sakkinen int nid_of_current = numa_node_id();
477*fb2d0575SAaron Lu int nid_start, nid;
478d2285493SJarkko Sakkinen
479*fb2d0575SAaron Lu /*
480*fb2d0575SAaron Lu * Try local node first. If it doesn't have an EPC section,
481*fb2d0575SAaron Lu * fall back to the non-local NUMA nodes.
482*fb2d0575SAaron Lu */
483*fb2d0575SAaron Lu if (node_isset(nid_of_current, sgx_numa_mask))
484*fb2d0575SAaron Lu nid_start = nid_of_current;
485*fb2d0575SAaron Lu else
486*fb2d0575SAaron Lu nid_start = next_node_in(nid_of_current, sgx_numa_mask);
487d2285493SJarkko Sakkinen
488*fb2d0575SAaron Lu nid = nid_start;
489*fb2d0575SAaron Lu do {
490901ddbb9SJarkko Sakkinen page = __sgx_alloc_epc_page_from_node(nid);
491d2285493SJarkko Sakkinen if (page)
492d2285493SJarkko Sakkinen return page;
493*fb2d0575SAaron Lu
494*fb2d0575SAaron Lu nid = next_node_in(nid, sgx_numa_mask);
495*fb2d0575SAaron Lu } while (nid != nid_start);
496d2285493SJarkko Sakkinen
497d2285493SJarkko Sakkinen return ERR_PTR(-ENOMEM);
498d2285493SJarkko Sakkinen }
499d2285493SJarkko Sakkinen
500d2285493SJarkko Sakkinen /**
5011728ab54SJarkko Sakkinen * sgx_mark_page_reclaimable() - Mark a page as reclaimable
5021728ab54SJarkko Sakkinen * @page: EPC page
5031728ab54SJarkko Sakkinen *
5041728ab54SJarkko Sakkinen * Mark a page as reclaimable and add it to the active page list. Pages
5051728ab54SJarkko Sakkinen * are automatically removed from the active list when freed.
5061728ab54SJarkko Sakkinen */
sgx_mark_page_reclaimable(struct sgx_epc_page * page)5071728ab54SJarkko Sakkinen void sgx_mark_page_reclaimable(struct sgx_epc_page *page)
5081728ab54SJarkko Sakkinen {
5091728ab54SJarkko Sakkinen spin_lock(&sgx_reclaimer_lock);
5101728ab54SJarkko Sakkinen page->flags |= SGX_EPC_PAGE_RECLAIMER_TRACKED;
5111728ab54SJarkko Sakkinen list_add_tail(&page->list, &sgx_active_page_list);
5121728ab54SJarkko Sakkinen spin_unlock(&sgx_reclaimer_lock);
5131728ab54SJarkko Sakkinen }
5141728ab54SJarkko Sakkinen
5151728ab54SJarkko Sakkinen /**
5161728ab54SJarkko Sakkinen * sgx_unmark_page_reclaimable() - Remove a page from the reclaim list
5171728ab54SJarkko Sakkinen * @page: EPC page
5181728ab54SJarkko Sakkinen *
5191728ab54SJarkko Sakkinen * Clear the reclaimable flag and remove the page from the active page list.
5201728ab54SJarkko Sakkinen *
5211728ab54SJarkko Sakkinen * Return:
5221728ab54SJarkko Sakkinen * 0 on success,
5231728ab54SJarkko Sakkinen * -EBUSY if the page is in the process of being reclaimed
5241728ab54SJarkko Sakkinen */
sgx_unmark_page_reclaimable(struct sgx_epc_page * page)5251728ab54SJarkko Sakkinen int sgx_unmark_page_reclaimable(struct sgx_epc_page *page)
5261728ab54SJarkko Sakkinen {
5271728ab54SJarkko Sakkinen spin_lock(&sgx_reclaimer_lock);
5281728ab54SJarkko Sakkinen if (page->flags & SGX_EPC_PAGE_RECLAIMER_TRACKED) {
5291728ab54SJarkko Sakkinen /* The page is being reclaimed. */
5301728ab54SJarkko Sakkinen if (list_empty(&page->list)) {
5311728ab54SJarkko Sakkinen spin_unlock(&sgx_reclaimer_lock);
5321728ab54SJarkko Sakkinen return -EBUSY;
5331728ab54SJarkko Sakkinen }
5341728ab54SJarkko Sakkinen
5351728ab54SJarkko Sakkinen list_del(&page->list);
5361728ab54SJarkko Sakkinen page->flags &= ~SGX_EPC_PAGE_RECLAIMER_TRACKED;
5371728ab54SJarkko Sakkinen }
5381728ab54SJarkko Sakkinen spin_unlock(&sgx_reclaimer_lock);
5391728ab54SJarkko Sakkinen
5401728ab54SJarkko Sakkinen return 0;
5411728ab54SJarkko Sakkinen }
5421728ab54SJarkko Sakkinen
5431728ab54SJarkko Sakkinen /**
5441728ab54SJarkko Sakkinen * sgx_alloc_epc_page() - Allocate an EPC page
5451728ab54SJarkko Sakkinen * @owner: the owner of the EPC page
5461728ab54SJarkko Sakkinen * @reclaim: reclaim pages if necessary
5471728ab54SJarkko Sakkinen *
5481728ab54SJarkko Sakkinen * Iterate through EPC sections and borrow a free EPC page to the caller. When a
5491728ab54SJarkko Sakkinen * page is no longer needed it must be released with sgx_free_epc_page(). If
5501728ab54SJarkko Sakkinen * @reclaim is set to true, directly reclaim pages when we are out of pages. No
5511728ab54SJarkko Sakkinen * mm's can be locked when @reclaim is set to true.
5521728ab54SJarkko Sakkinen *
5531728ab54SJarkko Sakkinen * Finally, wake up ksgxd when the number of pages goes below the watermark
5541728ab54SJarkko Sakkinen * before returning back to the caller.
5551728ab54SJarkko Sakkinen *
5561728ab54SJarkko Sakkinen * Return:
5571728ab54SJarkko Sakkinen * an EPC page,
5581728ab54SJarkko Sakkinen * -errno on error
5591728ab54SJarkko Sakkinen */
sgx_alloc_epc_page(void * owner,bool reclaim)5601728ab54SJarkko Sakkinen struct sgx_epc_page *sgx_alloc_epc_page(void *owner, bool reclaim)
5611728ab54SJarkko Sakkinen {
5621728ab54SJarkko Sakkinen struct sgx_epc_page *page;
5631728ab54SJarkko Sakkinen
5641728ab54SJarkko Sakkinen for ( ; ; ) {
5651728ab54SJarkko Sakkinen page = __sgx_alloc_epc_page();
5661728ab54SJarkko Sakkinen if (!IS_ERR(page)) {
5671728ab54SJarkko Sakkinen page->owner = owner;
5681728ab54SJarkko Sakkinen break;
5691728ab54SJarkko Sakkinen }
5701728ab54SJarkko Sakkinen
5711728ab54SJarkko Sakkinen if (list_empty(&sgx_active_page_list))
5721728ab54SJarkko Sakkinen return ERR_PTR(-ENOMEM);
5731728ab54SJarkko Sakkinen
5741728ab54SJarkko Sakkinen if (!reclaim) {
5751728ab54SJarkko Sakkinen page = ERR_PTR(-EBUSY);
5761728ab54SJarkko Sakkinen break;
5771728ab54SJarkko Sakkinen }
5781728ab54SJarkko Sakkinen
5791728ab54SJarkko Sakkinen if (signal_pending(current)) {
5801728ab54SJarkko Sakkinen page = ERR_PTR(-ERESTARTSYS);
5811728ab54SJarkko Sakkinen break;
5821728ab54SJarkko Sakkinen }
5831728ab54SJarkko Sakkinen
5841728ab54SJarkko Sakkinen sgx_reclaim_pages();
5851728ab54SJarkko Sakkinen cond_resched();
5861728ab54SJarkko Sakkinen }
5871728ab54SJarkko Sakkinen
5881728ab54SJarkko Sakkinen if (sgx_should_reclaim(SGX_NR_LOW_PAGES))
5891728ab54SJarkko Sakkinen wake_up(&ksgxd_waitq);
5901728ab54SJarkko Sakkinen
5911728ab54SJarkko Sakkinen return page;
5921728ab54SJarkko Sakkinen }
5931728ab54SJarkko Sakkinen
5941728ab54SJarkko Sakkinen /**
595d2285493SJarkko Sakkinen * sgx_free_epc_page() - Free an EPC page
596d2285493SJarkko Sakkinen * @page: an EPC page
597d2285493SJarkko Sakkinen *
598b0c7459bSKai Huang * Put the EPC page back to the list of free pages. It's the caller's
599b0c7459bSKai Huang * responsibility to make sure that the page is in uninitialized state. In other
600b0c7459bSKai Huang * words, do EREMOVE, EWB or whatever operation is necessary before calling
601b0c7459bSKai Huang * this function.
602d2285493SJarkko Sakkinen */
sgx_free_epc_page(struct sgx_epc_page * page)603d2285493SJarkko Sakkinen void sgx_free_epc_page(struct sgx_epc_page *page)
604d2285493SJarkko Sakkinen {
605d2285493SJarkko Sakkinen struct sgx_epc_section *section = &sgx_epc_sections[page->section];
606901ddbb9SJarkko Sakkinen struct sgx_numa_node *node = section->node;
607d2285493SJarkko Sakkinen
608901ddbb9SJarkko Sakkinen spin_lock(&node->lock);
609901ddbb9SJarkko Sakkinen
610992801aeSTony Luck page->owner = NULL;
611992801aeSTony Luck if (page->poison)
612992801aeSTony Luck list_add(&page->list, &node->sgx_poison_page_list);
613992801aeSTony Luck else
614901ddbb9SJarkko Sakkinen list_add_tail(&page->list, &node->free_page_list);
615d6d261bdSTony Luck page->flags = SGX_EPC_PAGE_IS_FREE;
616901ddbb9SJarkko Sakkinen
617901ddbb9SJarkko Sakkinen spin_unlock(&node->lock);
618ac5d272aSReinette Chatre atomic_long_inc(&sgx_nr_free_pages);
619d2285493SJarkko Sakkinen }
620d2285493SJarkko Sakkinen
sgx_setup_epc_section(u64 phys_addr,u64 size,unsigned long index,struct sgx_epc_section * section)621e7e05452SSean Christopherson static bool __init sgx_setup_epc_section(u64 phys_addr, u64 size,
622e7e05452SSean Christopherson unsigned long index,
623e7e05452SSean Christopherson struct sgx_epc_section *section)
624e7e05452SSean Christopherson {
625e7e05452SSean Christopherson unsigned long nr_pages = size >> PAGE_SHIFT;
626e7e05452SSean Christopherson unsigned long i;
627e7e05452SSean Christopherson
628e7e05452SSean Christopherson section->virt_addr = memremap(phys_addr, size, MEMREMAP_WB);
629e7e05452SSean Christopherson if (!section->virt_addr)
630e7e05452SSean Christopherson return false;
631e7e05452SSean Christopherson
632e7e05452SSean Christopherson section->pages = vmalloc(nr_pages * sizeof(struct sgx_epc_page));
633e7e05452SSean Christopherson if (!section->pages) {
634e7e05452SSean Christopherson memunmap(section->virt_addr);
635e7e05452SSean Christopherson return false;
636e7e05452SSean Christopherson }
637e7e05452SSean Christopherson
638e7e05452SSean Christopherson section->phys_addr = phys_addr;
63940e0e784STony Luck xa_store_range(&sgx_epc_address_space, section->phys_addr,
64040e0e784STony Luck phys_addr + size - 1, section, GFP_KERNEL);
641e7e05452SSean Christopherson
642e7e05452SSean Christopherson for (i = 0; i < nr_pages; i++) {
643e7e05452SSean Christopherson section->pages[i].section = index;
6441728ab54SJarkko Sakkinen section->pages[i].flags = 0;
6451728ab54SJarkko Sakkinen section->pages[i].owner = NULL;
646992801aeSTony Luck section->pages[i].poison = 0;
64751ab30ebSJarkko Sakkinen list_add_tail(§ion->pages[i].list, &sgx_dirty_page_list);
648e7e05452SSean Christopherson }
649e7e05452SSean Christopherson
650e7e05452SSean Christopherson return true;
651e7e05452SSean Christopherson }
652e7e05452SSean Christopherson
arch_is_platform_page(u64 paddr)65340e0e784STony Luck bool arch_is_platform_page(u64 paddr)
65440e0e784STony Luck {
65540e0e784STony Luck return !!xa_load(&sgx_epc_address_space, paddr);
65640e0e784STony Luck }
65740e0e784STony Luck EXPORT_SYMBOL_GPL(arch_is_platform_page);
65840e0e784STony Luck
sgx_paddr_to_page(u64 paddr)659a495cbdfSTony Luck static struct sgx_epc_page *sgx_paddr_to_page(u64 paddr)
660a495cbdfSTony Luck {
661a495cbdfSTony Luck struct sgx_epc_section *section;
662a495cbdfSTony Luck
663a495cbdfSTony Luck section = xa_load(&sgx_epc_address_space, paddr);
664a495cbdfSTony Luck if (!section)
665a495cbdfSTony Luck return NULL;
666a495cbdfSTony Luck
667a495cbdfSTony Luck return §ion->pages[PFN_DOWN(paddr - section->phys_addr)];
668a495cbdfSTony Luck }
669a495cbdfSTony Luck
670a495cbdfSTony Luck /*
671a495cbdfSTony Luck * Called in process context to handle a hardware reported
672a495cbdfSTony Luck * error in an SGX EPC page.
673a495cbdfSTony Luck * If the MF_ACTION_REQUIRED bit is set in flags, then the
674a495cbdfSTony Luck * context is the task that consumed the poison data. Otherwise
675a495cbdfSTony Luck * this is called from a kernel thread unrelated to the page.
676a495cbdfSTony Luck */
arch_memory_failure(unsigned long pfn,int flags)677a495cbdfSTony Luck int arch_memory_failure(unsigned long pfn, int flags)
678a495cbdfSTony Luck {
679a495cbdfSTony Luck struct sgx_epc_page *page = sgx_paddr_to_page(pfn << PAGE_SHIFT);
680a495cbdfSTony Luck struct sgx_epc_section *section;
681a495cbdfSTony Luck struct sgx_numa_node *node;
682a495cbdfSTony Luck
683a495cbdfSTony Luck /*
684a495cbdfSTony Luck * mm/memory-failure.c calls this routine for all errors
685a495cbdfSTony Luck * where there isn't a "struct page" for the address. But that
686a495cbdfSTony Luck * includes other address ranges besides SGX.
687a495cbdfSTony Luck */
688a495cbdfSTony Luck if (!page)
689a495cbdfSTony Luck return -ENXIO;
690a495cbdfSTony Luck
691a495cbdfSTony Luck /*
692a495cbdfSTony Luck * If poison was consumed synchronously. Send a SIGBUS to
693a495cbdfSTony Luck * the task. Hardware has already exited the SGX enclave and
694a495cbdfSTony Luck * will not allow re-entry to an enclave that has a memory
695a495cbdfSTony Luck * error. The signal may help the task understand why the
696a495cbdfSTony Luck * enclave is broken.
697a495cbdfSTony Luck */
698a495cbdfSTony Luck if (flags & MF_ACTION_REQUIRED)
699a495cbdfSTony Luck force_sig(SIGBUS);
700a495cbdfSTony Luck
701a495cbdfSTony Luck section = &sgx_epc_sections[page->section];
702a495cbdfSTony Luck node = section->node;
703a495cbdfSTony Luck
704a495cbdfSTony Luck spin_lock(&node->lock);
705a495cbdfSTony Luck
706a495cbdfSTony Luck /* Already poisoned? Nothing more to do */
707a495cbdfSTony Luck if (page->poison)
708a495cbdfSTony Luck goto out;
709a495cbdfSTony Luck
710a495cbdfSTony Luck page->poison = 1;
711a495cbdfSTony Luck
712a495cbdfSTony Luck /*
713a495cbdfSTony Luck * If the page is on a free list, move it to the per-node
714a495cbdfSTony Luck * poison page list.
715a495cbdfSTony Luck */
716a495cbdfSTony Luck if (page->flags & SGX_EPC_PAGE_IS_FREE) {
717a495cbdfSTony Luck list_move(&page->list, &node->sgx_poison_page_list);
718a495cbdfSTony Luck goto out;
719a495cbdfSTony Luck }
720a495cbdfSTony Luck
721a495cbdfSTony Luck /*
722a495cbdfSTony Luck * TBD: Add additional plumbing to enable pre-emptive
723a495cbdfSTony Luck * action for asynchronous poison notification. Until
724a495cbdfSTony Luck * then just hope that the poison:
725a495cbdfSTony Luck * a) is not accessed - sgx_free_epc_page() will deal with it
726a495cbdfSTony Luck * when the user gives it back
727a495cbdfSTony Luck * b) results in a recoverable machine check rather than
728a495cbdfSTony Luck * a fatal one
729a495cbdfSTony Luck */
730a495cbdfSTony Luck out:
731a495cbdfSTony Luck spin_unlock(&node->lock);
732a495cbdfSTony Luck return 0;
733a495cbdfSTony Luck }
734a495cbdfSTony Luck
735e7e05452SSean Christopherson /**
736e7e05452SSean Christopherson * A section metric is concatenated in a way that @low bits 12-31 define the
737e7e05452SSean Christopherson * bits 12-31 of the metric and @high bits 0-19 define the bits 32-51 of the
738e7e05452SSean Christopherson * metric.
739e7e05452SSean Christopherson */
sgx_calc_section_metric(u64 low,u64 high)740e7e05452SSean Christopherson static inline u64 __init sgx_calc_section_metric(u64 low, u64 high)
741e7e05452SSean Christopherson {
742e7e05452SSean Christopherson return (low & GENMASK_ULL(31, 12)) +
743e7e05452SSean Christopherson ((high & GENMASK_ULL(19, 0)) << 32);
744e7e05452SSean Christopherson }
745e7e05452SSean Christopherson
7462056e298SDave Hansen #ifdef CONFIG_NUMA
sgx_total_bytes_show(struct device * dev,struct device_attribute * attr,char * buf)7472056e298SDave Hansen static ssize_t sgx_total_bytes_show(struct device *dev, struct device_attribute *attr, char *buf)
7482056e298SDave Hansen {
7492056e298SDave Hansen return sysfs_emit(buf, "%lu\n", sgx_numa_nodes[dev->id].size);
7502056e298SDave Hansen }
7512056e298SDave Hansen static DEVICE_ATTR_RO(sgx_total_bytes);
7522056e298SDave Hansen
arch_node_attr_is_visible(struct kobject * kobj,struct attribute * attr,int idx)7532056e298SDave Hansen static umode_t arch_node_attr_is_visible(struct kobject *kobj,
7542056e298SDave Hansen struct attribute *attr, int idx)
7552056e298SDave Hansen {
7562056e298SDave Hansen /* Make all x86/ attributes invisible when SGX is not initialized: */
7572056e298SDave Hansen if (nodes_empty(sgx_numa_mask))
7582056e298SDave Hansen return 0;
7592056e298SDave Hansen
7602056e298SDave Hansen return attr->mode;
7612056e298SDave Hansen }
7622056e298SDave Hansen
7632056e298SDave Hansen static struct attribute *arch_node_dev_attrs[] = {
7642056e298SDave Hansen &dev_attr_sgx_total_bytes.attr,
7652056e298SDave Hansen NULL,
7662056e298SDave Hansen };
7672056e298SDave Hansen
7682056e298SDave Hansen const struct attribute_group arch_node_dev_group = {
7692056e298SDave Hansen .name = "x86",
7702056e298SDave Hansen .attrs = arch_node_dev_attrs,
7712056e298SDave Hansen .is_visible = arch_node_attr_is_visible,
7722056e298SDave Hansen };
7732056e298SDave Hansen
arch_update_sysfs_visibility(int nid)7742056e298SDave Hansen static void __init arch_update_sysfs_visibility(int nid)
7752056e298SDave Hansen {
7762056e298SDave Hansen struct node *node = node_devices[nid];
7772056e298SDave Hansen int ret;
7782056e298SDave Hansen
7792056e298SDave Hansen ret = sysfs_update_group(&node->dev.kobj, &arch_node_dev_group);
7802056e298SDave Hansen
7812056e298SDave Hansen if (ret)
7822056e298SDave Hansen pr_err("sysfs update failed (%d), files may be invisible", ret);
7832056e298SDave Hansen }
7842056e298SDave Hansen #else /* !CONFIG_NUMA */
arch_update_sysfs_visibility(int nid)7852056e298SDave Hansen static void __init arch_update_sysfs_visibility(int nid) {}
7862056e298SDave Hansen #endif
7872056e298SDave Hansen
sgx_page_cache_init(void)788e7e05452SSean Christopherson static bool __init sgx_page_cache_init(void)
789e7e05452SSean Christopherson {
790e7e05452SSean Christopherson u32 eax, ebx, ecx, edx, type;
791e7e05452SSean Christopherson u64 pa, size;
792901ddbb9SJarkko Sakkinen int nid;
793e7e05452SSean Christopherson int i;
794e7e05452SSean Christopherson
795901ddbb9SJarkko Sakkinen sgx_numa_nodes = kmalloc_array(num_possible_nodes(), sizeof(*sgx_numa_nodes), GFP_KERNEL);
796901ddbb9SJarkko Sakkinen if (!sgx_numa_nodes)
797901ddbb9SJarkko Sakkinen return false;
798901ddbb9SJarkko Sakkinen
799e7e05452SSean Christopherson for (i = 0; i < ARRAY_SIZE(sgx_epc_sections); i++) {
800e7e05452SSean Christopherson cpuid_count(SGX_CPUID, i + SGX_CPUID_EPC, &eax, &ebx, &ecx, &edx);
801e7e05452SSean Christopherson
802e7e05452SSean Christopherson type = eax & SGX_CPUID_EPC_MASK;
803e7e05452SSean Christopherson if (type == SGX_CPUID_EPC_INVALID)
804e7e05452SSean Christopherson break;
805e7e05452SSean Christopherson
806e7e05452SSean Christopherson if (type != SGX_CPUID_EPC_SECTION) {
807e7e05452SSean Christopherson pr_err_once("Unknown EPC section type: %u\n", type);
808e7e05452SSean Christopherson break;
809e7e05452SSean Christopherson }
810e7e05452SSean Christopherson
811e7e05452SSean Christopherson pa = sgx_calc_section_metric(eax, ebx);
812e7e05452SSean Christopherson size = sgx_calc_section_metric(ecx, edx);
813e7e05452SSean Christopherson
814e7e05452SSean Christopherson pr_info("EPC section 0x%llx-0x%llx\n", pa, pa + size - 1);
815e7e05452SSean Christopherson
816e7e05452SSean Christopherson if (!sgx_setup_epc_section(pa, size, i, &sgx_epc_sections[i])) {
817e7e05452SSean Christopherson pr_err("No free memory for an EPC section\n");
818e7e05452SSean Christopherson break;
819e7e05452SSean Christopherson }
820e7e05452SSean Christopherson
821901ddbb9SJarkko Sakkinen nid = numa_map_to_online_node(phys_to_target_node(pa));
822901ddbb9SJarkko Sakkinen if (nid == NUMA_NO_NODE) {
823901ddbb9SJarkko Sakkinen /* The physical address is already printed above. */
824901ddbb9SJarkko Sakkinen pr_warn(FW_BUG "Unable to map EPC section to online node. Fallback to the NUMA node 0.\n");
825901ddbb9SJarkko Sakkinen nid = 0;
826901ddbb9SJarkko Sakkinen }
827901ddbb9SJarkko Sakkinen
828901ddbb9SJarkko Sakkinen if (!node_isset(nid, sgx_numa_mask)) {
829901ddbb9SJarkko Sakkinen spin_lock_init(&sgx_numa_nodes[nid].lock);
830901ddbb9SJarkko Sakkinen INIT_LIST_HEAD(&sgx_numa_nodes[nid].free_page_list);
831992801aeSTony Luck INIT_LIST_HEAD(&sgx_numa_nodes[nid].sgx_poison_page_list);
832901ddbb9SJarkko Sakkinen node_set(nid, sgx_numa_mask);
83350468e43SJarkko Sakkinen sgx_numa_nodes[nid].size = 0;
8342056e298SDave Hansen
8352056e298SDave Hansen /* Make SGX-specific node sysfs files visible: */
8362056e298SDave Hansen arch_update_sysfs_visibility(nid);
837901ddbb9SJarkko Sakkinen }
838901ddbb9SJarkko Sakkinen
839901ddbb9SJarkko Sakkinen sgx_epc_sections[i].node = &sgx_numa_nodes[nid];
84050468e43SJarkko Sakkinen sgx_numa_nodes[nid].size += size;
841901ddbb9SJarkko Sakkinen
842e7e05452SSean Christopherson sgx_nr_epc_sections++;
843e7e05452SSean Christopherson }
844e7e05452SSean Christopherson
845e7e05452SSean Christopherson if (!sgx_nr_epc_sections) {
846e7e05452SSean Christopherson pr_err("There are zero EPC sections.\n");
847e7e05452SSean Christopherson return false;
848e7e05452SSean Christopherson }
849e7e05452SSean Christopherson
850e7e05452SSean Christopherson return true;
851e7e05452SSean Christopherson }
852e7e05452SSean Christopherson
85373916b6aSKai Huang /*
85473916b6aSKai Huang * Update the SGX_LEPUBKEYHASH MSRs to the values specified by caller.
85573916b6aSKai Huang * Bare-metal driver requires to update them to hash of enclave's signer
85673916b6aSKai Huang * before EINIT. KVM needs to update them to guest's virtual MSR values
85773916b6aSKai Huang * before doing EINIT from guest.
85873916b6aSKai Huang */
sgx_update_lepubkeyhash(u64 * lepubkeyhash)85973916b6aSKai Huang void sgx_update_lepubkeyhash(u64 *lepubkeyhash)
86073916b6aSKai Huang {
86173916b6aSKai Huang int i;
86273916b6aSKai Huang
86373916b6aSKai Huang WARN_ON_ONCE(preemptible());
86473916b6aSKai Huang
86573916b6aSKai Huang for (i = 0; i < 4; i++)
86673916b6aSKai Huang wrmsrl(MSR_IA32_SGXLEPUBKEYHASH0 + i, lepubkeyhash[i]);
86773916b6aSKai Huang }
86873916b6aSKai Huang
869b3754e5dSSean Christopherson const struct file_operations sgx_provision_fops = {
870b3754e5dSSean Christopherson .owner = THIS_MODULE,
871b3754e5dSSean Christopherson };
872b3754e5dSSean Christopherson
873b3754e5dSSean Christopherson static struct miscdevice sgx_dev_provision = {
874b3754e5dSSean Christopherson .minor = MISC_DYNAMIC_MINOR,
875b3754e5dSSean Christopherson .name = "sgx_provision",
876b3754e5dSSean Christopherson .nodename = "sgx_provision",
877b3754e5dSSean Christopherson .fops = &sgx_provision_fops,
878b3754e5dSSean Christopherson };
879b3754e5dSSean Christopherson
880b3754e5dSSean Christopherson /**
881b3754e5dSSean Christopherson * sgx_set_attribute() - Update allowed attributes given file descriptor
882b3754e5dSSean Christopherson * @allowed_attributes: Pointer to allowed enclave attributes
883b3754e5dSSean Christopherson * @attribute_fd: File descriptor for specific attribute
884b3754e5dSSean Christopherson *
885b3754e5dSSean Christopherson * Append enclave attribute indicated by file descriptor to allowed
886b3754e5dSSean Christopherson * attributes. Currently only SGX_ATTR_PROVISIONKEY indicated by
887b3754e5dSSean Christopherson * /dev/sgx_provision is supported.
888b3754e5dSSean Christopherson *
889b3754e5dSSean Christopherson * Return:
890b3754e5dSSean Christopherson * -0: SGX_ATTR_PROVISIONKEY is appended to allowed_attributes
891b3754e5dSSean Christopherson * -EINVAL: Invalid, or not supported file descriptor
892b3754e5dSSean Christopherson */
sgx_set_attribute(unsigned long * allowed_attributes,unsigned int attribute_fd)893b3754e5dSSean Christopherson int sgx_set_attribute(unsigned long *allowed_attributes,
894b3754e5dSSean Christopherson unsigned int attribute_fd)
895b3754e5dSSean Christopherson {
896e73d4376SAl Viro struct fd f = fdget(attribute_fd);
897b3754e5dSSean Christopherson
898e73d4376SAl Viro if (!f.file)
899b3754e5dSSean Christopherson return -EINVAL;
900b3754e5dSSean Christopherson
901e73d4376SAl Viro if (f.file->f_op != &sgx_provision_fops) {
902e73d4376SAl Viro fdput(f);
903b3754e5dSSean Christopherson return -EINVAL;
904b3754e5dSSean Christopherson }
905b3754e5dSSean Christopherson
906b3754e5dSSean Christopherson *allowed_attributes |= SGX_ATTR_PROVISIONKEY;
907b3754e5dSSean Christopherson
908e73d4376SAl Viro fdput(f);
909b3754e5dSSean Christopherson return 0;
910b3754e5dSSean Christopherson }
911b3754e5dSSean Christopherson EXPORT_SYMBOL_GPL(sgx_set_attribute);
912b3754e5dSSean Christopherson
sgx_init(void)91331bf9288SSami Tolvanen static int __init sgx_init(void)
914e7e05452SSean Christopherson {
9153fe0778eSJarkko Sakkinen int ret;
916e7e05452SSean Christopherson int i;
917e7e05452SSean Christopherson
9183fe0778eSJarkko Sakkinen if (!cpu_feature_enabled(X86_FEATURE_SGX))
91931bf9288SSami Tolvanen return -ENODEV;
920e7e05452SSean Christopherson
921e7e05452SSean Christopherson if (!sgx_page_cache_init())
92231bf9288SSami Tolvanen return -ENOMEM;
923e7e05452SSean Christopherson
92431bf9288SSami Tolvanen if (!sgx_page_reclaimer_init()) {
92531bf9288SSami Tolvanen ret = -ENOMEM;
926e7e05452SSean Christopherson goto err_page_cache;
92731bf9288SSami Tolvanen }
928e7e05452SSean Christopherson
929b3754e5dSSean Christopherson ret = misc_register(&sgx_dev_provision);
930b3754e5dSSean Christopherson if (ret)
931b3754e5dSSean Christopherson goto err_kthread;
932b3754e5dSSean Christopherson
933faa7d3e6SKai Huang /*
934faa7d3e6SKai Huang * Always try to initialize the native *and* KVM drivers.
935faa7d3e6SKai Huang * The KVM driver is less picky than the native one and
936faa7d3e6SKai Huang * can function if the native one is not supported on the
937faa7d3e6SKai Huang * current system or fails to initialize.
938faa7d3e6SKai Huang *
939faa7d3e6SKai Huang * Error out only if both fail to initialize.
940faa7d3e6SKai Huang */
9413fe0778eSJarkko Sakkinen ret = sgx_drv_init();
942faa7d3e6SKai Huang
943faa7d3e6SKai Huang if (sgx_vepc_init() && ret)
944b3754e5dSSean Christopherson goto err_provision;
9453fe0778eSJarkko Sakkinen
94631bf9288SSami Tolvanen return 0;
947e7e05452SSean Christopherson
948b3754e5dSSean Christopherson err_provision:
949b3754e5dSSean Christopherson misc_deregister(&sgx_dev_provision);
950b3754e5dSSean Christopherson
9513fe0778eSJarkko Sakkinen err_kthread:
9523fe0778eSJarkko Sakkinen kthread_stop(ksgxd_tsk);
9533fe0778eSJarkko Sakkinen
954e7e05452SSean Christopherson err_page_cache:
955e7e05452SSean Christopherson for (i = 0; i < sgx_nr_epc_sections; i++) {
956e7e05452SSean Christopherson vfree(sgx_epc_sections[i].pages);
957e7e05452SSean Christopherson memunmap(sgx_epc_sections[i].virt_addr);
958e7e05452SSean Christopherson }
95931bf9288SSami Tolvanen
96031bf9288SSami Tolvanen return ret;
961e7e05452SSean Christopherson }
962e7e05452SSean Christopherson
963e7e05452SSean Christopherson device_initcall(sgx_init);
964