1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _ASM_X86_SEGMENT_H 3 #define _ASM_X86_SEGMENT_H 4 5 #include <linux/const.h> 6 #include <asm/alternative.h> 7 8 /* 9 * Constructor for a conventional segment GDT (or LDT) entry. 10 * This is a macro so it can be used in initializers. 11 */ 12 #define GDT_ENTRY(flags, base, limit) \ 13 ((((base) & _AC(0xff000000,ULL)) << (56-24)) | \ 14 (((flags) & _AC(0x0000f0ff,ULL)) << 40) | \ 15 (((limit) & _AC(0x000f0000,ULL)) << (48-16)) | \ 16 (((base) & _AC(0x00ffffff,ULL)) << 16) | \ 17 (((limit) & _AC(0x0000ffff,ULL)))) 18 19 /* Simple and small GDT entries for booting only: */ 20 21 #define GDT_ENTRY_BOOT_CS 2 22 #define GDT_ENTRY_BOOT_DS 3 23 #define GDT_ENTRY_BOOT_TSS 4 24 #define __BOOT_CS (GDT_ENTRY_BOOT_CS*8) 25 #define __BOOT_DS (GDT_ENTRY_BOOT_DS*8) 26 #define __BOOT_TSS (GDT_ENTRY_BOOT_TSS*8) 27 28 /* 29 * Bottom two bits of selector give the ring 30 * privilege level 31 */ 32 #define SEGMENT_RPL_MASK 0x3 33 34 /* 35 * When running on Xen PV, the actual privilege level of the kernel is 1, 36 * not 0. Testing the Requested Privilege Level in a segment selector to 37 * determine whether the context is user mode or kernel mode with 38 * SEGMENT_RPL_MASK is wrong because the PV kernel's privilege level 39 * matches the 0x3 mask. 40 * 41 * Testing with USER_SEGMENT_RPL_MASK is valid for both native and Xen PV 42 * kernels because privilege level 2 is never used. 43 */ 44 #define USER_SEGMENT_RPL_MASK 0x2 45 46 /* User mode is privilege level 3: */ 47 #define USER_RPL 0x3 48 49 /* Bit 2 is Table Indicator (TI): selects between LDT or GDT */ 50 #define SEGMENT_TI_MASK 0x4 51 /* LDT segment has TI set ... */ 52 #define SEGMENT_LDT 0x4 53 /* ... GDT has it cleared */ 54 #define SEGMENT_GDT 0x0 55 56 #define GDT_ENTRY_INVALID_SEG 0 57 58 #ifdef CONFIG_X86_32 59 /* 60 * The layout of the per-CPU GDT under Linux: 61 * 62 * 0 - null <=== cacheline #1 63 * 1 - reserved 64 * 2 - reserved 65 * 3 - reserved 66 * 67 * 4 - unused <=== cacheline #2 68 * 5 - unused 69 * 70 * ------- start of TLS (Thread-Local Storage) segments: 71 * 72 * 6 - TLS segment #1 [ glibc's TLS segment ] 73 * 7 - TLS segment #2 [ Wine's %fs Win32 segment ] 74 * 8 - TLS segment #3 <=== cacheline #3 75 * 9 - reserved 76 * 10 - reserved 77 * 11 - reserved 78 * 79 * ------- start of kernel segments: 80 * 81 * 12 - kernel code segment <=== cacheline #4 82 * 13 - kernel data segment 83 * 14 - default user CS 84 * 15 - default user DS 85 * 16 - TSS <=== cacheline #5 86 * 17 - LDT 87 * 18 - PNPBIOS support (16->32 gate) 88 * 19 - PNPBIOS support 89 * 20 - PNPBIOS support <=== cacheline #6 90 * 21 - PNPBIOS support 91 * 22 - PNPBIOS support 92 * 23 - APM BIOS support 93 * 24 - APM BIOS support <=== cacheline #7 94 * 25 - APM BIOS support 95 * 96 * 26 - ESPFIX small SS 97 * 27 - per-cpu [ offset to per-cpu data area ] 98 * 28 - stack_canary-20 [ for stack protector ] <=== cacheline #8 99 * 29 - unused 100 * 30 - unused 101 * 31 - TSS for double fault handler 102 */ 103 #define GDT_ENTRY_TLS_MIN 6 104 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1) 105 106 #define GDT_ENTRY_KERNEL_CS 12 107 #define GDT_ENTRY_KERNEL_DS 13 108 #define GDT_ENTRY_DEFAULT_USER_CS 14 109 #define GDT_ENTRY_DEFAULT_USER_DS 15 110 #define GDT_ENTRY_TSS 16 111 #define GDT_ENTRY_LDT 17 112 #define GDT_ENTRY_PNPBIOS_CS32 18 113 #define GDT_ENTRY_PNPBIOS_CS16 19 114 #define GDT_ENTRY_PNPBIOS_DS 20 115 #define GDT_ENTRY_PNPBIOS_TS1 21 116 #define GDT_ENTRY_PNPBIOS_TS2 22 117 #define GDT_ENTRY_APMBIOS_BASE 23 118 119 #define GDT_ENTRY_ESPFIX_SS 26 120 #define GDT_ENTRY_PERCPU 27 121 #define GDT_ENTRY_STACK_CANARY 28 122 123 #define GDT_ENTRY_DOUBLEFAULT_TSS 31 124 125 /* 126 * Number of entries in the GDT table: 127 */ 128 #define GDT_ENTRIES 32 129 130 /* 131 * Segment selector values corresponding to the above entries: 132 */ 133 134 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8) 135 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8) 136 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8 + 3) 137 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8 + 3) 138 #define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS*8) 139 140 /* segment for calling fn: */ 141 #define PNP_CS32 (GDT_ENTRY_PNPBIOS_CS32*8) 142 /* code segment for BIOS: */ 143 #define PNP_CS16 (GDT_ENTRY_PNPBIOS_CS16*8) 144 145 /* "Is this PNP code selector (PNP_CS32 or PNP_CS16)?" */ 146 #define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == PNP_CS32) 147 148 /* data segment for BIOS: */ 149 #define PNP_DS (GDT_ENTRY_PNPBIOS_DS*8) 150 /* transfer data segment: */ 151 #define PNP_TS1 (GDT_ENTRY_PNPBIOS_TS1*8) 152 /* another data segment: */ 153 #define PNP_TS2 (GDT_ENTRY_PNPBIOS_TS2*8) 154 155 #ifdef CONFIG_SMP 156 # define __KERNEL_PERCPU (GDT_ENTRY_PERCPU*8) 157 #else 158 # define __KERNEL_PERCPU 0 159 #endif 160 161 #ifdef CONFIG_STACKPROTECTOR 162 # define __KERNEL_STACK_CANARY (GDT_ENTRY_STACK_CANARY*8) 163 #else 164 # define __KERNEL_STACK_CANARY 0 165 #endif 166 167 #else /* 64-bit: */ 168 169 #include <asm/cache.h> 170 171 #define GDT_ENTRY_KERNEL32_CS 1 172 #define GDT_ENTRY_KERNEL_CS 2 173 #define GDT_ENTRY_KERNEL_DS 3 174 175 /* 176 * We cannot use the same code segment descriptor for user and kernel mode, 177 * not even in long flat mode, because of different DPL. 178 * 179 * GDT layout to get 64-bit SYSCALL/SYSRET support right. SYSRET hardcodes 180 * selectors: 181 * 182 * if returning to 32-bit userspace: cs = STAR.SYSRET_CS, 183 * if returning to 64-bit userspace: cs = STAR.SYSRET_CS+16, 184 * 185 * ss = STAR.SYSRET_CS+8 (in either case) 186 * 187 * thus USER_DS should be between 32-bit and 64-bit code selectors: 188 */ 189 #define GDT_ENTRY_DEFAULT_USER32_CS 4 190 #define GDT_ENTRY_DEFAULT_USER_DS 5 191 #define GDT_ENTRY_DEFAULT_USER_CS 6 192 193 /* Needs two entries */ 194 #define GDT_ENTRY_TSS 8 195 /* Needs two entries */ 196 #define GDT_ENTRY_LDT 10 197 198 #define GDT_ENTRY_TLS_MIN 12 199 #define GDT_ENTRY_TLS_MAX 14 200 201 #define GDT_ENTRY_CPUNODE 15 202 203 /* 204 * Number of entries in the GDT table: 205 */ 206 #define GDT_ENTRIES 16 207 208 /* 209 * Segment selector values corresponding to the above entries: 210 * 211 * Note, selectors also need to have a correct RPL, 212 * expressed with the +3 value for user-space selectors: 213 */ 214 #define __KERNEL32_CS (GDT_ENTRY_KERNEL32_CS*8) 215 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8) 216 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8) 217 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8 + 3) 218 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8 + 3) 219 #define __USER32_DS __USER_DS 220 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8 + 3) 221 #define __CPUNODE_SEG (GDT_ENTRY_CPUNODE*8 + 3) 222 223 #endif 224 225 #ifndef CONFIG_PARAVIRT_XXL 226 # define get_kernel_rpl() 0 227 #endif 228 229 #define IDT_ENTRIES 256 230 #define NUM_EXCEPTION_VECTORS 32 231 232 /* Bitmask of exception vectors which push an error code on the stack: */ 233 #define EXCEPTION_ERRCODE_MASK 0x00027d00 234 235 #define GDT_SIZE (GDT_ENTRIES*8) 236 #define GDT_ENTRY_TLS_ENTRIES 3 237 #define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES* 8) 238 239 #ifdef CONFIG_X86_64 240 241 /* Bit size and mask of CPU number stored in the per CPU data (and TSC_AUX) */ 242 #define VDSO_CPUNODE_BITS 12 243 #define VDSO_CPUNODE_MASK 0xfff 244 245 #ifndef __ASSEMBLY__ 246 247 /* Helper functions to store/load CPU and node numbers */ 248 249 static inline unsigned long vdso_encode_cpunode(int cpu, unsigned long node) 250 { 251 return (node << VDSO_CPUNODE_BITS) | cpu; 252 } 253 254 static inline void vdso_read_cpunode(unsigned *cpu, unsigned *node) 255 { 256 unsigned int p; 257 258 /* 259 * Load CPU and node number from the GDT. LSL is faster than RDTSCP 260 * and works on all CPUs. This is volatile so that it orders 261 * correctly with respect to barrier() and to keep GCC from cleverly 262 * hoisting it out of the calling function. 263 * 264 * If RDPID is available, use it. 265 */ 266 alternative_io ("lsl %[seg],%[p]", 267 ".byte 0xf3,0x0f,0xc7,0xf8", /* RDPID %eax/rax */ 268 X86_FEATURE_RDPID, 269 [p] "=a" (p), [seg] "r" (__CPUNODE_SEG)); 270 271 if (cpu) 272 *cpu = (p & VDSO_CPUNODE_MASK); 273 if (node) 274 *node = (p >> VDSO_CPUNODE_BITS); 275 } 276 277 #endif /* !__ASSEMBLY__ */ 278 #endif /* CONFIG_X86_64 */ 279 280 #ifdef __KERNEL__ 281 282 /* 283 * early_idt_handler_array is an array of entry points referenced in the 284 * early IDT. For simplicity, it's a real array with one entry point 285 * every nine bytes. That leaves room for an optional 'push $0' if the 286 * vector has no error code (two bytes), a 'push $vector_number' (two 287 * bytes), and a jump to the common entry code (up to five bytes). 288 */ 289 #define EARLY_IDT_HANDLER_SIZE 9 290 291 /* 292 * xen_early_idt_handler_array is for Xen pv guests: for each entry in 293 * early_idt_handler_array it contains a prequel in the form of 294 * pop %rcx; pop %r11; jmp early_idt_handler_array[i]; summing up to 295 * max 8 bytes. 296 */ 297 #define XEN_EARLY_IDT_HANDLER_SIZE 8 298 299 #ifndef __ASSEMBLY__ 300 301 extern const char early_idt_handler_array[NUM_EXCEPTION_VECTORS][EARLY_IDT_HANDLER_SIZE]; 302 extern void early_ignore_irq(void); 303 304 #ifdef CONFIG_XEN_PV 305 extern const char xen_early_idt_handler_array[NUM_EXCEPTION_VECTORS][XEN_EARLY_IDT_HANDLER_SIZE]; 306 #endif 307 308 /* 309 * Load a segment. Fall back on loading the zero segment if something goes 310 * wrong. This variant assumes that loading zero fully clears the segment. 311 * This is always the case on Intel CPUs and, even on 64-bit AMD CPUs, any 312 * failure to fully clear the cached descriptor is only observable for 313 * FS and GS. 314 */ 315 #define __loadsegment_simple(seg, value) \ 316 do { \ 317 unsigned short __val = (value); \ 318 \ 319 asm volatile(" \n" \ 320 "1: movl %k0,%%" #seg " \n" \ 321 \ 322 ".section .fixup,\"ax\" \n" \ 323 "2: xorl %k0,%k0 \n" \ 324 " jmp 1b \n" \ 325 ".previous \n" \ 326 \ 327 _ASM_EXTABLE(1b, 2b) \ 328 \ 329 : "+r" (__val) : : "memory"); \ 330 } while (0) 331 332 #define __loadsegment_ss(value) __loadsegment_simple(ss, (value)) 333 #define __loadsegment_ds(value) __loadsegment_simple(ds, (value)) 334 #define __loadsegment_es(value) __loadsegment_simple(es, (value)) 335 336 #ifdef CONFIG_X86_32 337 338 /* 339 * On 32-bit systems, the hidden parts of FS and GS are unobservable if 340 * the selector is NULL, so there's no funny business here. 341 */ 342 #define __loadsegment_fs(value) __loadsegment_simple(fs, (value)) 343 #define __loadsegment_gs(value) __loadsegment_simple(gs, (value)) 344 345 #else 346 347 static inline void __loadsegment_fs(unsigned short value) 348 { 349 asm volatile(" \n" 350 "1: movw %0, %%fs \n" 351 "2: \n" 352 353 _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_clear_fs) 354 355 : : "rm" (value) : "memory"); 356 } 357 358 /* __loadsegment_gs is intentionally undefined. Use load_gs_index instead. */ 359 360 #endif 361 362 #define loadsegment(seg, value) __loadsegment_ ## seg (value) 363 364 /* 365 * Save a segment register away: 366 */ 367 #define savesegment(seg, value) \ 368 asm("mov %%" #seg ",%0":"=r" (value) : : "memory") 369 370 /* 371 * x86-32 user GS accessors: 372 */ 373 #ifdef CONFIG_X86_32 374 # ifdef CONFIG_X86_32_LAZY_GS 375 # define get_user_gs(regs) (u16)({ unsigned long v; savesegment(gs, v); v; }) 376 # define set_user_gs(regs, v) loadsegment(gs, (unsigned long)(v)) 377 # define task_user_gs(tsk) ((tsk)->thread.gs) 378 # define lazy_save_gs(v) savesegment(gs, (v)) 379 # define lazy_load_gs(v) loadsegment(gs, (v)) 380 # else /* X86_32_LAZY_GS */ 381 # define get_user_gs(regs) (u16)((regs)->gs) 382 # define set_user_gs(regs, v) do { (regs)->gs = (v); } while (0) 383 # define task_user_gs(tsk) (task_pt_regs(tsk)->gs) 384 # define lazy_save_gs(v) do { } while (0) 385 # define lazy_load_gs(v) do { } while (0) 386 # endif /* X86_32_LAZY_GS */ 387 #endif /* X86_32 */ 388 389 #endif /* !__ASSEMBLY__ */ 390 #endif /* __KERNEL__ */ 391 392 #endif /* _ASM_X86_SEGMENT_H */ 393