xref: /openbmc/linux/arch/x86/kernel/cpu/sgx/driver.c (revision 630dce2810b9f09d312aed4189300e785254c24b)
1 // SPDX-License-Identifier: GPL-2.0
2 /*  Copyright(c) 2016-20 Intel Corporation. */
3 
4 #include <linux/acpi.h>
5 #include <linux/miscdevice.h>
6 #include <linux/mman.h>
7 #include <linux/security.h>
8 #include <linux/suspend.h>
9 #include <asm/traps.h>
10 #include "driver.h"
11 #include "encl.h"
12 
13 u64 sgx_attributes_reserved_mask;
14 u64 sgx_xfrm_reserved_mask = ~0x3;
15 u32 sgx_misc_reserved_mask;
16 
17 static int sgx_open(struct inode *inode, struct file *file)
18 {
19 	struct sgx_encl *encl;
20 	int ret;
21 
22 	encl = kzalloc(sizeof(*encl), GFP_KERNEL);
23 	if (!encl)
24 		return -ENOMEM;
25 
26 	kref_init(&encl->refcount);
27 	xa_init(&encl->page_array);
28 	mutex_init(&encl->lock);
29 	INIT_LIST_HEAD(&encl->va_pages);
30 	INIT_LIST_HEAD(&encl->mm_list);
31 	spin_lock_init(&encl->mm_lock);
32 
33 	ret = init_srcu_struct(&encl->srcu);
34 	if (ret) {
35 		kfree(encl);
36 		return ret;
37 	}
38 
39 	file->private_data = encl;
40 
41 	return 0;
42 }
43 
44 static int sgx_release(struct inode *inode, struct file *file)
45 {
46 	struct sgx_encl *encl = file->private_data;
47 	struct sgx_encl_mm *encl_mm;
48 
49 	/*
50 	 * Drain the remaining mm_list entries. At this point the list contains
51 	 * entries for processes, which have closed the enclave file but have
52 	 * not exited yet. The processes, which have exited, are gone from the
53 	 * list by sgx_mmu_notifier_release().
54 	 */
55 	for ( ; ; )  {
56 		spin_lock(&encl->mm_lock);
57 
58 		if (list_empty(&encl->mm_list)) {
59 			encl_mm = NULL;
60 		} else {
61 			encl_mm = list_first_entry(&encl->mm_list,
62 						   struct sgx_encl_mm, list);
63 			list_del_rcu(&encl_mm->list);
64 		}
65 
66 		spin_unlock(&encl->mm_lock);
67 
68 		/* The enclave is no longer mapped by any mm. */
69 		if (!encl_mm)
70 			break;
71 
72 		synchronize_srcu(&encl->srcu);
73 		mmu_notifier_unregister(&encl_mm->mmu_notifier, encl_mm->mm);
74 		kfree(encl_mm);
75 	}
76 
77 	kref_put(&encl->refcount, sgx_encl_release);
78 	return 0;
79 }
80 
81 static int sgx_mmap(struct file *file, struct vm_area_struct *vma)
82 {
83 	struct sgx_encl *encl = file->private_data;
84 	int ret;
85 
86 	ret = sgx_encl_may_map(encl, vma->vm_start, vma->vm_end, vma->vm_flags);
87 	if (ret)
88 		return ret;
89 
90 	ret = sgx_encl_mm_add(encl, vma->vm_mm);
91 	if (ret)
92 		return ret;
93 
94 	vma->vm_ops = &sgx_vm_ops;
95 	vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_IO;
96 	vma->vm_private_data = encl;
97 
98 	return 0;
99 }
100 
101 static unsigned long sgx_get_unmapped_area(struct file *file,
102 					   unsigned long addr,
103 					   unsigned long len,
104 					   unsigned long pgoff,
105 					   unsigned long flags)
106 {
107 	if ((flags & MAP_TYPE) == MAP_PRIVATE)
108 		return -EINVAL;
109 
110 	if (flags & MAP_FIXED)
111 		return addr;
112 
113 	return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
114 }
115 
116 #ifdef CONFIG_COMPAT
117 static long sgx_compat_ioctl(struct file *filep, unsigned int cmd,
118 			      unsigned long arg)
119 {
120 	return sgx_ioctl(filep, cmd, arg);
121 }
122 #endif
123 
124 static const struct file_operations sgx_encl_fops = {
125 	.owner			= THIS_MODULE,
126 	.open			= sgx_open,
127 	.release		= sgx_release,
128 	.unlocked_ioctl		= sgx_ioctl,
129 #ifdef CONFIG_COMPAT
130 	.compat_ioctl		= sgx_compat_ioctl,
131 #endif
132 	.mmap			= sgx_mmap,
133 	.get_unmapped_area	= sgx_get_unmapped_area,
134 };
135 
136 const struct file_operations sgx_provision_fops = {
137 	.owner			= THIS_MODULE,
138 };
139 
140 static struct miscdevice sgx_dev_enclave = {
141 	.minor = MISC_DYNAMIC_MINOR,
142 	.name = "sgx_enclave",
143 	.nodename = "sgx_enclave",
144 	.fops = &sgx_encl_fops,
145 };
146 
147 static struct miscdevice sgx_dev_provision = {
148 	.minor = MISC_DYNAMIC_MINOR,
149 	.name = "sgx_provision",
150 	.nodename = "sgx_provision",
151 	.fops = &sgx_provision_fops,
152 };
153 
154 int __init sgx_drv_init(void)
155 {
156 	unsigned int eax, ebx, ecx, edx;
157 	u64 attr_mask;
158 	u64 xfrm_mask;
159 	int ret;
160 
161 	if (!cpu_feature_enabled(X86_FEATURE_SGX_LC))
162 		return -ENODEV;
163 
164 	cpuid_count(SGX_CPUID, 0, &eax, &ebx, &ecx, &edx);
165 
166 	if (!(eax & 1))  {
167 		pr_err("SGX disabled: SGX1 instruction support not available.\n");
168 		return -ENODEV;
169 	}
170 
171 	sgx_misc_reserved_mask = ~ebx | SGX_MISC_RESERVED_MASK;
172 
173 	cpuid_count(SGX_CPUID, 1, &eax, &ebx, &ecx, &edx);
174 
175 	attr_mask = (((u64)ebx) << 32) + (u64)eax;
176 	sgx_attributes_reserved_mask = ~attr_mask | SGX_ATTR_RESERVED_MASK;
177 
178 	if (cpu_feature_enabled(X86_FEATURE_OSXSAVE)) {
179 		xfrm_mask = (((u64)edx) << 32) + (u64)ecx;
180 		sgx_xfrm_reserved_mask = ~xfrm_mask;
181 	}
182 
183 	ret = misc_register(&sgx_dev_enclave);
184 	if (ret)
185 		return ret;
186 
187 	ret = misc_register(&sgx_dev_provision);
188 	if (ret) {
189 		misc_deregister(&sgx_dev_enclave);
190 		return ret;
191 	}
192 
193 	return 0;
194 }
195