1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/kernel.h> 3 #include <linux/errno.h> 4 #include <linux/sched.h> 5 #include <linux/user.h> 6 #include <linux/regset.h> 7 #include <linux/syscalls.h> 8 #include <linux/nospec.h> 9 10 #include <linux/uaccess.h> 11 #include <asm/desc.h> 12 #include <asm/ldt.h> 13 #include <asm/processor.h> 14 #include <asm/proto.h> 15 16 #include "tls.h" 17 18 /* 19 * sys_alloc_thread_area: get a yet unused TLS descriptor index. 20 */ 21 static int get_free_idx(void) 22 { 23 struct thread_struct *t = ¤t->thread; 24 int idx; 25 26 for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++) 27 if (desc_empty(&t->tls_array[idx])) 28 return idx + GDT_ENTRY_TLS_MIN; 29 return -ESRCH; 30 } 31 32 static bool tls_desc_okay(const struct user_desc *info) 33 { 34 /* 35 * For historical reasons (i.e. no one ever documented how any 36 * of the segmentation APIs work), user programs can and do 37 * assume that a struct user_desc that's all zeros except for 38 * entry_number means "no segment at all". This never actually 39 * worked. In fact, up to Linux 3.19, a struct user_desc like 40 * this would create a 16-bit read-write segment with base and 41 * limit both equal to zero. 42 * 43 * That was close enough to "no segment at all" until we 44 * hardened this function to disallow 16-bit TLS segments. Fix 45 * it up by interpreting these zeroed segments the way that they 46 * were almost certainly intended to be interpreted. 47 * 48 * The correct way to ask for "no segment at all" is to specify 49 * a user_desc that satisfies LDT_empty. To keep everything 50 * working, we accept both. 51 * 52 * Note that there's a similar kludge in modify_ldt -- look at 53 * the distinction between modes 1 and 0x11. 54 */ 55 if (LDT_empty(info) || LDT_zero(info)) 56 return true; 57 58 /* 59 * espfix is required for 16-bit data segments, but espfix 60 * only works for LDT segments. 61 */ 62 if (!info->seg_32bit) 63 return false; 64 65 /* Only allow data segments in the TLS array. */ 66 if (info->contents > 1) 67 return false; 68 69 /* 70 * Non-present segments with DPL 3 present an interesting attack 71 * surface. The kernel should handle such segments correctly, 72 * but TLS is very difficult to protect in a sandbox, so prevent 73 * such segments from being created. 74 * 75 * If userspace needs to remove a TLS entry, it can still delete 76 * it outright. 77 */ 78 if (info->seg_not_present) 79 return false; 80 81 return true; 82 } 83 84 static void set_tls_desc(struct task_struct *p, int idx, 85 const struct user_desc *info, int n) 86 { 87 struct thread_struct *t = &p->thread; 88 struct desc_struct *desc = &t->tls_array[idx - GDT_ENTRY_TLS_MIN]; 89 int cpu; 90 91 /* 92 * We must not get preempted while modifying the TLS. 93 */ 94 cpu = get_cpu(); 95 96 while (n-- > 0) { 97 if (LDT_empty(info) || LDT_zero(info)) 98 memset(desc, 0, sizeof(*desc)); 99 else 100 fill_ldt(desc, info); 101 ++info; 102 ++desc; 103 } 104 105 if (t == ¤t->thread) 106 load_TLS(t, cpu); 107 108 put_cpu(); 109 } 110 111 /* 112 * Set a given TLS descriptor: 113 */ 114 int do_set_thread_area(struct task_struct *p, int idx, 115 struct user_desc __user *u_info, 116 int can_allocate) 117 { 118 struct user_desc info; 119 unsigned short __maybe_unused sel, modified_sel; 120 121 if (copy_from_user(&info, u_info, sizeof(info))) 122 return -EFAULT; 123 124 if (!tls_desc_okay(&info)) 125 return -EINVAL; 126 127 if (idx == -1) 128 idx = info.entry_number; 129 130 /* 131 * index -1 means the kernel should try to find and 132 * allocate an empty descriptor: 133 */ 134 if (idx == -1 && can_allocate) { 135 idx = get_free_idx(); 136 if (idx < 0) 137 return idx; 138 if (put_user(idx, &u_info->entry_number)) 139 return -EFAULT; 140 } 141 142 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX) 143 return -EINVAL; 144 145 set_tls_desc(p, idx, &info, 1); 146 147 /* 148 * If DS, ES, FS, or GS points to the modified segment, forcibly 149 * refresh it. Only needed on x86_64 because x86_32 reloads them 150 * on return to user mode. 151 */ 152 modified_sel = (idx << 3) | 3; 153 154 if (p == current) { 155 #ifdef CONFIG_X86_64 156 savesegment(ds, sel); 157 if (sel == modified_sel) 158 loadsegment(ds, sel); 159 160 savesegment(es, sel); 161 if (sel == modified_sel) 162 loadsegment(es, sel); 163 164 savesegment(fs, sel); 165 if (sel == modified_sel) 166 loadsegment(fs, sel); 167 168 savesegment(gs, sel); 169 if (sel == modified_sel) 170 load_gs_index(sel); 171 #endif 172 173 #ifdef CONFIG_X86_32_LAZY_GS 174 savesegment(gs, sel); 175 if (sel == modified_sel) 176 loadsegment(gs, sel); 177 #endif 178 } else { 179 #ifdef CONFIG_X86_64 180 if (p->thread.fsindex == modified_sel) 181 p->thread.fsbase = info.base_addr; 182 183 if (p->thread.gsindex == modified_sel) 184 p->thread.gsbase = info.base_addr; 185 #endif 186 } 187 188 return 0; 189 } 190 191 SYSCALL_DEFINE1(set_thread_area, struct user_desc __user *, u_info) 192 { 193 return do_set_thread_area(current, -1, u_info, 1); 194 } 195 196 197 /* 198 * Get the current Thread-Local Storage area: 199 */ 200 201 static void fill_user_desc(struct user_desc *info, int idx, 202 const struct desc_struct *desc) 203 204 { 205 memset(info, 0, sizeof(*info)); 206 info->entry_number = idx; 207 info->base_addr = get_desc_base(desc); 208 info->limit = get_desc_limit(desc); 209 info->seg_32bit = desc->d; 210 info->contents = desc->type >> 2; 211 info->read_exec_only = !(desc->type & 2); 212 info->limit_in_pages = desc->g; 213 info->seg_not_present = !desc->p; 214 info->useable = desc->avl; 215 #ifdef CONFIG_X86_64 216 info->lm = desc->l; 217 #endif 218 } 219 220 int do_get_thread_area(struct task_struct *p, int idx, 221 struct user_desc __user *u_info) 222 { 223 struct user_desc info; 224 int index; 225 226 if (idx == -1 && get_user(idx, &u_info->entry_number)) 227 return -EFAULT; 228 229 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX) 230 return -EINVAL; 231 232 index = idx - GDT_ENTRY_TLS_MIN; 233 index = array_index_nospec(index, 234 GDT_ENTRY_TLS_MAX - GDT_ENTRY_TLS_MIN + 1); 235 236 fill_user_desc(&info, idx, &p->thread.tls_array[index]); 237 238 if (copy_to_user(u_info, &info, sizeof(info))) 239 return -EFAULT; 240 return 0; 241 } 242 243 SYSCALL_DEFINE1(get_thread_area, struct user_desc __user *, u_info) 244 { 245 return do_get_thread_area(current, -1, u_info); 246 } 247 248 int regset_tls_active(struct task_struct *target, 249 const struct user_regset *regset) 250 { 251 struct thread_struct *t = &target->thread; 252 int n = GDT_ENTRY_TLS_ENTRIES; 253 while (n > 0 && desc_empty(&t->tls_array[n - 1])) 254 --n; 255 return n; 256 } 257 258 int regset_tls_get(struct task_struct *target, const struct user_regset *regset, 259 struct membuf to) 260 { 261 const struct desc_struct *tls; 262 struct user_desc v; 263 int pos; 264 265 for (pos = 0, tls = target->thread.tls_array; to.left; pos++, tls++) { 266 fill_user_desc(&v, GDT_ENTRY_TLS_MIN + pos, tls); 267 membuf_write(&to, &v, sizeof(v)); 268 } 269 return 0; 270 } 271 272 int regset_tls_set(struct task_struct *target, const struct user_regset *regset, 273 unsigned int pos, unsigned int count, 274 const void *kbuf, const void __user *ubuf) 275 { 276 struct user_desc infobuf[GDT_ENTRY_TLS_ENTRIES]; 277 const struct user_desc *info; 278 int i; 279 280 if (pos >= GDT_ENTRY_TLS_ENTRIES * sizeof(struct user_desc) || 281 (pos % sizeof(struct user_desc)) != 0 || 282 (count % sizeof(struct user_desc)) != 0) 283 return -EINVAL; 284 285 if (kbuf) 286 info = kbuf; 287 else if (__copy_from_user(infobuf, ubuf, count)) 288 return -EFAULT; 289 else 290 info = infobuf; 291 292 for (i = 0; i < count / sizeof(struct user_desc); i++) 293 if (!tls_desc_okay(info + i)) 294 return -EINVAL; 295 296 set_tls_desc(target, 297 GDT_ENTRY_TLS_MIN + (pos / sizeof(struct user_desc)), 298 info, count / sizeof(struct user_desc)); 299 300 return 0; 301 } 302