1 #include <linux/kernel.h> 2 #include <linux/errno.h> 3 #include <linux/sched.h> 4 #include <linux/user.h> 5 #include <linux/regset.h> 6 #include <linux/syscalls.h> 7 8 #include <linux/uaccess.h> 9 #include <asm/desc.h> 10 #include <asm/ldt.h> 11 #include <asm/processor.h> 12 #include <asm/proto.h> 13 14 #include "tls.h" 15 16 /* 17 * sys_alloc_thread_area: get a yet unused TLS descriptor index. 18 */ 19 static int get_free_idx(void) 20 { 21 struct thread_struct *t = ¤t->thread; 22 int idx; 23 24 for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++) 25 if (desc_empty(&t->tls_array[idx])) 26 return idx + GDT_ENTRY_TLS_MIN; 27 return -ESRCH; 28 } 29 30 static bool tls_desc_okay(const struct user_desc *info) 31 { 32 /* 33 * For historical reasons (i.e. no one ever documented how any 34 * of the segmentation APIs work), user programs can and do 35 * assume that a struct user_desc that's all zeros except for 36 * entry_number means "no segment at all". This never actually 37 * worked. In fact, up to Linux 3.19, a struct user_desc like 38 * this would create a 16-bit read-write segment with base and 39 * limit both equal to zero. 40 * 41 * That was close enough to "no segment at all" until we 42 * hardened this function to disallow 16-bit TLS segments. Fix 43 * it up by interpreting these zeroed segments the way that they 44 * were almost certainly intended to be interpreted. 45 * 46 * The correct way to ask for "no segment at all" is to specify 47 * a user_desc that satisfies LDT_empty. To keep everything 48 * working, we accept both. 49 * 50 * Note that there's a similar kludge in modify_ldt -- look at 51 * the distinction between modes 1 and 0x11. 52 */ 53 if (LDT_empty(info) || LDT_zero(info)) 54 return true; 55 56 /* 57 * espfix is required for 16-bit data segments, but espfix 58 * only works for LDT segments. 59 */ 60 if (!info->seg_32bit) 61 return false; 62 63 /* Only allow data segments in the TLS array. */ 64 if (info->contents > 1) 65 return false; 66 67 /* 68 * Non-present segments with DPL 3 present an interesting attack 69 * surface. The kernel should handle such segments correctly, 70 * but TLS is very difficult to protect in a sandbox, so prevent 71 * such segments from being created. 72 * 73 * If userspace needs to remove a TLS entry, it can still delete 74 * it outright. 75 */ 76 if (info->seg_not_present) 77 return false; 78 79 return true; 80 } 81 82 static void set_tls_desc(struct task_struct *p, int idx, 83 const struct user_desc *info, int n) 84 { 85 struct thread_struct *t = &p->thread; 86 struct desc_struct *desc = &t->tls_array[idx - GDT_ENTRY_TLS_MIN]; 87 int cpu; 88 89 /* 90 * We must not get preempted while modifying the TLS. 91 */ 92 cpu = get_cpu(); 93 94 while (n-- > 0) { 95 if (LDT_empty(info) || LDT_zero(info)) { 96 desc->a = desc->b = 0; 97 } else { 98 fill_ldt(desc, info); 99 100 /* 101 * Always set the accessed bit so that the CPU 102 * doesn't try to write to the (read-only) GDT. 103 */ 104 desc->type |= 1; 105 } 106 ++info; 107 ++desc; 108 } 109 110 if (t == ¤t->thread) 111 load_TLS(t, cpu); 112 113 put_cpu(); 114 } 115 116 /* 117 * Set a given TLS descriptor: 118 */ 119 int do_set_thread_area(struct task_struct *p, int idx, 120 struct user_desc __user *u_info, 121 int can_allocate) 122 { 123 struct user_desc info; 124 unsigned short __maybe_unused sel, modified_sel; 125 126 if (copy_from_user(&info, u_info, sizeof(info))) 127 return -EFAULT; 128 129 if (!tls_desc_okay(&info)) 130 return -EINVAL; 131 132 if (idx == -1) 133 idx = info.entry_number; 134 135 /* 136 * index -1 means the kernel should try to find and 137 * allocate an empty descriptor: 138 */ 139 if (idx == -1 && can_allocate) { 140 idx = get_free_idx(); 141 if (idx < 0) 142 return idx; 143 if (put_user(idx, &u_info->entry_number)) 144 return -EFAULT; 145 } 146 147 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX) 148 return -EINVAL; 149 150 set_tls_desc(p, idx, &info, 1); 151 152 /* 153 * If DS, ES, FS, or GS points to the modified segment, forcibly 154 * refresh it. Only needed on x86_64 because x86_32 reloads them 155 * on return to user mode. 156 */ 157 modified_sel = (idx << 3) | 3; 158 159 if (p == current) { 160 #ifdef CONFIG_X86_64 161 savesegment(ds, sel); 162 if (sel == modified_sel) 163 loadsegment(ds, sel); 164 165 savesegment(es, sel); 166 if (sel == modified_sel) 167 loadsegment(es, sel); 168 169 savesegment(fs, sel); 170 if (sel == modified_sel) 171 loadsegment(fs, sel); 172 173 savesegment(gs, sel); 174 if (sel == modified_sel) 175 load_gs_index(sel); 176 #endif 177 178 #ifdef CONFIG_X86_32_LAZY_GS 179 savesegment(gs, sel); 180 if (sel == modified_sel) 181 loadsegment(gs, sel); 182 #endif 183 } else { 184 #ifdef CONFIG_X86_64 185 if (p->thread.fsindex == modified_sel) 186 p->thread.fsbase = info.base_addr; 187 188 if (p->thread.gsindex == modified_sel) 189 p->thread.gsbase = info.base_addr; 190 #endif 191 } 192 193 return 0; 194 } 195 196 SYSCALL_DEFINE1(set_thread_area, struct user_desc __user *, u_info) 197 { 198 return do_set_thread_area(current, -1, u_info, 1); 199 } 200 201 202 /* 203 * Get the current Thread-Local Storage area: 204 */ 205 206 static void fill_user_desc(struct user_desc *info, int idx, 207 const struct desc_struct *desc) 208 209 { 210 memset(info, 0, sizeof(*info)); 211 info->entry_number = idx; 212 info->base_addr = get_desc_base(desc); 213 info->limit = get_desc_limit(desc); 214 info->seg_32bit = desc->d; 215 info->contents = desc->type >> 2; 216 info->read_exec_only = !(desc->type & 2); 217 info->limit_in_pages = desc->g; 218 info->seg_not_present = !desc->p; 219 info->useable = desc->avl; 220 #ifdef CONFIG_X86_64 221 info->lm = desc->l; 222 #endif 223 } 224 225 int do_get_thread_area(struct task_struct *p, int idx, 226 struct user_desc __user *u_info) 227 { 228 struct user_desc info; 229 230 if (idx == -1 && get_user(idx, &u_info->entry_number)) 231 return -EFAULT; 232 233 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX) 234 return -EINVAL; 235 236 fill_user_desc(&info, idx, 237 &p->thread.tls_array[idx - GDT_ENTRY_TLS_MIN]); 238 239 if (copy_to_user(u_info, &info, sizeof(info))) 240 return -EFAULT; 241 return 0; 242 } 243 244 SYSCALL_DEFINE1(get_thread_area, struct user_desc __user *, u_info) 245 { 246 return do_get_thread_area(current, -1, u_info); 247 } 248 249 int regset_tls_active(struct task_struct *target, 250 const struct user_regset *regset) 251 { 252 struct thread_struct *t = &target->thread; 253 int n = GDT_ENTRY_TLS_ENTRIES; 254 while (n > 0 && desc_empty(&t->tls_array[n - 1])) 255 --n; 256 return n; 257 } 258 259 int regset_tls_get(struct task_struct *target, const struct user_regset *regset, 260 unsigned int pos, unsigned int count, 261 void *kbuf, void __user *ubuf) 262 { 263 const struct desc_struct *tls; 264 265 if (pos >= GDT_ENTRY_TLS_ENTRIES * sizeof(struct user_desc) || 266 (pos % sizeof(struct user_desc)) != 0 || 267 (count % sizeof(struct user_desc)) != 0) 268 return -EINVAL; 269 270 pos /= sizeof(struct user_desc); 271 count /= sizeof(struct user_desc); 272 273 tls = &target->thread.tls_array[pos]; 274 275 if (kbuf) { 276 struct user_desc *info = kbuf; 277 while (count-- > 0) 278 fill_user_desc(info++, GDT_ENTRY_TLS_MIN + pos++, 279 tls++); 280 } else { 281 struct user_desc __user *u_info = ubuf; 282 while (count-- > 0) { 283 struct user_desc info; 284 fill_user_desc(&info, GDT_ENTRY_TLS_MIN + pos++, tls++); 285 if (__copy_to_user(u_info++, &info, sizeof(info))) 286 return -EFAULT; 287 } 288 } 289 290 return 0; 291 } 292 293 int regset_tls_set(struct task_struct *target, const struct user_regset *regset, 294 unsigned int pos, unsigned int count, 295 const void *kbuf, const void __user *ubuf) 296 { 297 struct user_desc infobuf[GDT_ENTRY_TLS_ENTRIES]; 298 const struct user_desc *info; 299 int i; 300 301 if (pos >= GDT_ENTRY_TLS_ENTRIES * sizeof(struct user_desc) || 302 (pos % sizeof(struct user_desc)) != 0 || 303 (count % sizeof(struct user_desc)) != 0) 304 return -EINVAL; 305 306 if (kbuf) 307 info = kbuf; 308 else if (__copy_from_user(infobuf, ubuf, count)) 309 return -EFAULT; 310 else 311 info = infobuf; 312 313 for (i = 0; i < count / sizeof(struct user_desc); i++) 314 if (!tls_desc_okay(info + i)) 315 return -EINVAL; 316 317 set_tls_desc(target, 318 GDT_ENTRY_TLS_MIN + (pos / sizeof(struct user_desc)), 319 info, count / sizeof(struct user_desc)); 320 321 return 0; 322 } 323