1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2016-20 Intel Corporation. */ 3 4 #include <linux/acpi.h> 5 #include <linux/miscdevice.h> 6 #include <linux/mman.h> 7 #include <linux/security.h> 8 #include <linux/suspend.h> 9 #include <asm/traps.h> 10 #include "driver.h" 11 #include "encl.h" 12 13 u64 sgx_attributes_reserved_mask; 14 u64 sgx_xfrm_reserved_mask = ~0x3; 15 u32 sgx_misc_reserved_mask; 16 17 static int sgx_open(struct inode *inode, struct file *file) 18 { 19 struct sgx_encl *encl; 20 int ret; 21 22 encl = kzalloc(sizeof(*encl), GFP_KERNEL); 23 if (!encl) 24 return -ENOMEM; 25 26 kref_init(&encl->refcount); 27 xa_init(&encl->page_array); 28 mutex_init(&encl->lock); 29 INIT_LIST_HEAD(&encl->va_pages); 30 INIT_LIST_HEAD(&encl->mm_list); 31 spin_lock_init(&encl->mm_lock); 32 33 ret = init_srcu_struct(&encl->srcu); 34 if (ret) { 35 kfree(encl); 36 return ret; 37 } 38 39 file->private_data = encl; 40 41 return 0; 42 } 43 44 static int sgx_release(struct inode *inode, struct file *file) 45 { 46 struct sgx_encl *encl = file->private_data; 47 struct sgx_encl_mm *encl_mm; 48 49 /* 50 * Drain the remaining mm_list entries. At this point the list contains 51 * entries for processes, which have closed the enclave file but have 52 * not exited yet. The processes, which have exited, are gone from the 53 * list by sgx_mmu_notifier_release(). 54 */ 55 for ( ; ; ) { 56 spin_lock(&encl->mm_lock); 57 58 if (list_empty(&encl->mm_list)) { 59 encl_mm = NULL; 60 } else { 61 encl_mm = list_first_entry(&encl->mm_list, 62 struct sgx_encl_mm, list); 63 list_del_rcu(&encl_mm->list); 64 } 65 66 spin_unlock(&encl->mm_lock); 67 68 /* The enclave is no longer mapped by any mm. */ 69 if (!encl_mm) 70 break; 71 72 synchronize_srcu(&encl->srcu); 73 mmu_notifier_unregister(&encl_mm->mmu_notifier, encl_mm->mm); 74 kfree(encl_mm); 75 76 /* 'encl_mm' is gone, put encl_mm->encl reference: */ 77 kref_put(&encl->refcount, sgx_encl_release); 78 } 79 80 kref_put(&encl->refcount, sgx_encl_release); 81 return 0; 82 } 83 84 static int sgx_mmap(struct file *file, struct vm_area_struct *vma) 85 { 86 struct sgx_encl *encl = file->private_data; 87 int ret; 88 89 ret = sgx_encl_may_map(encl, vma->vm_start, vma->vm_end, vma->vm_flags); 90 if (ret) 91 return ret; 92 93 ret = sgx_encl_mm_add(encl, vma->vm_mm); 94 if (ret) 95 return ret; 96 97 vma->vm_ops = &sgx_vm_ops; 98 vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_IO; 99 vma->vm_private_data = encl; 100 101 return 0; 102 } 103 104 static unsigned long sgx_get_unmapped_area(struct file *file, 105 unsigned long addr, 106 unsigned long len, 107 unsigned long pgoff, 108 unsigned long flags) 109 { 110 if ((flags & MAP_TYPE) == MAP_PRIVATE) 111 return -EINVAL; 112 113 if (flags & MAP_FIXED) 114 return addr; 115 116 return current->mm->get_unmapped_area(file, addr, len, pgoff, flags); 117 } 118 119 #ifdef CONFIG_COMPAT 120 static long sgx_compat_ioctl(struct file *filep, unsigned int cmd, 121 unsigned long arg) 122 { 123 return sgx_ioctl(filep, cmd, arg); 124 } 125 #endif 126 127 static const struct file_operations sgx_encl_fops = { 128 .owner = THIS_MODULE, 129 .open = sgx_open, 130 .release = sgx_release, 131 .unlocked_ioctl = sgx_ioctl, 132 #ifdef CONFIG_COMPAT 133 .compat_ioctl = sgx_compat_ioctl, 134 #endif 135 .mmap = sgx_mmap, 136 .get_unmapped_area = sgx_get_unmapped_area, 137 }; 138 139 const struct file_operations sgx_provision_fops = { 140 .owner = THIS_MODULE, 141 }; 142 143 static struct miscdevice sgx_dev_enclave = { 144 .minor = MISC_DYNAMIC_MINOR, 145 .name = "sgx_enclave", 146 .nodename = "sgx_enclave", 147 .fops = &sgx_encl_fops, 148 }; 149 150 static struct miscdevice sgx_dev_provision = { 151 .minor = MISC_DYNAMIC_MINOR, 152 .name = "sgx_provision", 153 .nodename = "sgx_provision", 154 .fops = &sgx_provision_fops, 155 }; 156 157 int __init sgx_drv_init(void) 158 { 159 unsigned int eax, ebx, ecx, edx; 160 u64 attr_mask; 161 u64 xfrm_mask; 162 int ret; 163 164 if (!cpu_feature_enabled(X86_FEATURE_SGX_LC)) 165 return -ENODEV; 166 167 cpuid_count(SGX_CPUID, 0, &eax, &ebx, &ecx, &edx); 168 169 if (!(eax & 1)) { 170 pr_err("SGX disabled: SGX1 instruction support not available.\n"); 171 return -ENODEV; 172 } 173 174 sgx_misc_reserved_mask = ~ebx | SGX_MISC_RESERVED_MASK; 175 176 cpuid_count(SGX_CPUID, 1, &eax, &ebx, &ecx, &edx); 177 178 attr_mask = (((u64)ebx) << 32) + (u64)eax; 179 sgx_attributes_reserved_mask = ~attr_mask | SGX_ATTR_RESERVED_MASK; 180 181 if (cpu_feature_enabled(X86_FEATURE_OSXSAVE)) { 182 xfrm_mask = (((u64)edx) << 32) + (u64)ecx; 183 sgx_xfrm_reserved_mask = ~xfrm_mask; 184 } 185 186 ret = misc_register(&sgx_dev_enclave); 187 if (ret) 188 return ret; 189 190 ret = misc_register(&sgx_dev_provision); 191 if (ret) { 192 misc_deregister(&sgx_dev_enclave); 193 return ret; 194 } 195 196 return 0; 197 } 198