1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */ 2669ebabbSIngo Molnar #ifndef __ASM_X86_XSAVE_H 3669ebabbSIngo Molnar #define __ASM_X86_XSAVE_H 4669ebabbSIngo Molnar 559a36d16SIngo Molnar #include <linux/uaccess.h> 60cecca9dSRik van Riel #include <linux/types.h> 70cecca9dSRik van Riel 80cecca9dSRik van Riel #include <asm/processor.h> 9784a4661SDave Hansen #include <asm/fpu/api.h> 100cecca9dSRik van Riel #include <asm/user.h> 11669ebabbSIngo Molnar 12669ebabbSIngo Molnar /* Bit 63 of XCR0 is reserved for future expansion */ 13d91cab78SDave Hansen #define XFEATURE_MASK_EXTEND (~(XFEATURE_MASK_FPSSE | (1ULL << 63))) 14669ebabbSIngo Molnar 15677b98bdSIngo Molnar #define XSTATE_CPUID 0x0000000d 16677b98bdSIngo Molnar 17669ebabbSIngo Molnar #define FXSAVE_SIZE 512 18669ebabbSIngo Molnar 19669ebabbSIngo Molnar #define XSAVE_HDR_SIZE 64 20669ebabbSIngo Molnar #define XSAVE_HDR_OFFSET FXSAVE_SIZE 21669ebabbSIngo Molnar 22669ebabbSIngo Molnar #define XSAVE_YMM_SIZE 256 23669ebabbSIngo Molnar #define XSAVE_YMM_OFFSET (XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET) 24669ebabbSIngo Molnar 25ce711ea3SKan Liang #define XSAVE_ALIGNMENT 64 26ce711ea3SKan Liang 278ab22804SFenghua Yu /* All currently supported user features */ 288ab22804SFenghua Yu #define XFEATURE_MASK_USER_SUPPORTED (XFEATURE_MASK_FP | \ 29a65050c6SYu-cheng Yu XFEATURE_MASK_SSE | \ 30d91cab78SDave Hansen XFEATURE_MASK_YMM | \ 31d91cab78SDave Hansen XFEATURE_MASK_OPMASK | \ 32d91cab78SDave Hansen XFEATURE_MASK_ZMM_Hi256 | \ 33c8df4009SDave Hansen XFEATURE_MASK_Hi16_ZMM | \ 342f7fada2SAndy Lutomirski XFEATURE_MASK_PKRU | \ 352f7fada2SAndy Lutomirski XFEATURE_MASK_BNDREGS | \ 362f7fada2SAndy Lutomirski XFEATURE_MASK_BNDCSR) 37669ebabbSIngo Molnar 38*2ebe81c6SThomas Gleixner /* 39*2ebe81c6SThomas Gleixner * Features which are restored when returning to user space. 40*2ebe81c6SThomas Gleixner * PKRU is not restored on return to user space because PKRU 41*2ebe81c6SThomas Gleixner * is switched eagerly in switch_to() and flush_thread() 42*2ebe81c6SThomas Gleixner */ 43*2ebe81c6SThomas Gleixner #define XFEATURE_MASK_USER_RESTORE \ 44*2ebe81c6SThomas Gleixner (XFEATURE_MASK_USER_SUPPORTED & ~XFEATURE_MASK_PKRU) 45*2ebe81c6SThomas Gleixner 468ab22804SFenghua Yu /* All currently supported supervisor features */ 47b454feb9SYu-cheng Yu #define XFEATURE_MASK_SUPERVISOR_SUPPORTED (XFEATURE_MASK_PASID) 488ab22804SFenghua Yu 498ab22804SFenghua Yu /* 50f0dccc9dSKan Liang * A supervisor state component may not always contain valuable information, 51f0dccc9dSKan Liang * and its size may be huge. Saving/restoring such supervisor state components 52f0dccc9dSKan Liang * at each context switch can cause high CPU and space overhead, which should 53f0dccc9dSKan Liang * be avoided. Such supervisor state components should only be saved/restored 5401707b66SAndy Lutomirski * on demand. The on-demand supervisor features are set in this mask. 55f0dccc9dSKan Liang * 5601707b66SAndy Lutomirski * Unlike the existing supported supervisor features, an independent supervisor 57f0dccc9dSKan Liang * feature does not allocate a buffer in task->fpu, and the corresponding 58f0dccc9dSKan Liang * supervisor state component cannot be saved/restored at each context switch. 59f0dccc9dSKan Liang * 6001707b66SAndy Lutomirski * To support an independent supervisor feature, a developer should follow the 61f0dccc9dSKan Liang * dos and don'ts as below: 62f0dccc9dSKan Liang * - Do dynamically allocate a buffer for the supervisor state component. 63f0dccc9dSKan Liang * - Do manually invoke the XSAVES/XRSTORS instruction to save/restore the 64f0dccc9dSKan Liang * state component to/from the buffer. 6501707b66SAndy Lutomirski * - Don't set the bit corresponding to the independent supervisor feature in 66f0dccc9dSKan Liang * IA32_XSS at run time, since it has been set at boot time. 67f0dccc9dSKan Liang */ 6801707b66SAndy Lutomirski #define XFEATURE_MASK_INDEPENDENT (XFEATURE_MASK_LBR) 69f0dccc9dSKan Liang 70f0dccc9dSKan Liang /* 718ab22804SFenghua Yu * Unsupported supervisor features. When a supervisor feature in this mask is 728ab22804SFenghua Yu * supported in the future, move it to the supported supervisor feature mask. 738ab22804SFenghua Yu */ 748ab22804SFenghua Yu #define XFEATURE_MASK_SUPERVISOR_UNSUPPORTED (XFEATURE_MASK_PT) 758ab22804SFenghua Yu 768ab22804SFenghua Yu /* All supervisor states including supported and unsupported states. */ 778ab22804SFenghua Yu #define XFEATURE_MASK_SUPERVISOR_ALL (XFEATURE_MASK_SUPERVISOR_SUPPORTED | \ 7801707b66SAndy Lutomirski XFEATURE_MASK_INDEPENDENT | \ 798ab22804SFenghua Yu XFEATURE_MASK_SUPERVISOR_UNSUPPORTED) 808ab22804SFenghua Yu 81669ebabbSIngo Molnar #ifdef CONFIG_X86_64 82669ebabbSIngo Molnar #define REX_PREFIX "0x48, " 83669ebabbSIngo Molnar #else 84669ebabbSIngo Molnar #define REX_PREFIX 85669ebabbSIngo Molnar #endif 86669ebabbSIngo Molnar 87524bb73bSYu-cheng Yu extern u64 xfeatures_mask_all; 88524bb73bSYu-cheng Yu 89524bb73bSYu-cheng Yu static inline u64 xfeatures_mask_supervisor(void) 90524bb73bSYu-cheng Yu { 91524bb73bSYu-cheng Yu return xfeatures_mask_all & XFEATURE_MASK_SUPERVISOR_SUPPORTED; 92524bb73bSYu-cheng Yu } 93524bb73bSYu-cheng Yu 9465e95210SThomas Gleixner /* 9565e95210SThomas Gleixner * The xfeatures which are enabled in XCR0 and expected to be in ptrace 9665e95210SThomas Gleixner * buffers and signal frames. 9765e95210SThomas Gleixner */ 9865e95210SThomas Gleixner static inline u64 xfeatures_mask_uabi(void) 99524bb73bSYu-cheng Yu { 100524bb73bSYu-cheng Yu return xfeatures_mask_all & XFEATURE_MASK_USER_SUPPORTED; 101524bb73bSYu-cheng Yu } 102524bb73bSYu-cheng Yu 103*2ebe81c6SThomas Gleixner /* 104*2ebe81c6SThomas Gleixner * The xfeatures which are restored by the kernel when returning to user 105*2ebe81c6SThomas Gleixner * mode. This is not necessarily the same as xfeatures_mask_uabi() as the 106*2ebe81c6SThomas Gleixner * kernel does not manage all XCR0 enabled features via xsave/xrstor as 107*2ebe81c6SThomas Gleixner * some of them have to be switched eagerly on context switch and exec(). 108*2ebe81c6SThomas Gleixner */ 109*2ebe81c6SThomas Gleixner static inline u64 xfeatures_mask_restore_user(void) 110*2ebe81c6SThomas Gleixner { 111*2ebe81c6SThomas Gleixner return xfeatures_mask_all & XFEATURE_MASK_USER_RESTORE; 112*2ebe81c6SThomas Gleixner } 113*2ebe81c6SThomas Gleixner 11401707b66SAndy Lutomirski static inline u64 xfeatures_mask_independent(void) 115f0dccc9dSKan Liang { 116f0dccc9dSKan Liang if (!boot_cpu_has(X86_FEATURE_ARCH_LBR)) 11701707b66SAndy Lutomirski return XFEATURE_MASK_INDEPENDENT & ~XFEATURE_MASK_LBR; 118f0dccc9dSKan Liang 11901707b66SAndy Lutomirski return XFEATURE_MASK_INDEPENDENT; 120f0dccc9dSKan Liang } 121f0dccc9dSKan Liang 122669ebabbSIngo Molnar extern u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS]; 123669ebabbSIngo Molnar 124404f6aacSKees Cook extern void __init update_regset_xstate_info(unsigned int size, 125404f6aacSKees Cook u64 xstate_mask); 126669ebabbSIngo Molnar 127abd16d68SSebastian Andrzej Siewior void *get_xsave_addr(struct xregs_state *xsave, int xfeature_nr); 128ce711ea3SKan Liang int xfeature_size(int xfeature_nr); 1291cc34413SThomas Gleixner int copy_uabi_from_kernel_to_xstate(struct xregs_state *xsave, const void *kbuf); 1301cc34413SThomas Gleixner int copy_sigframe_from_user_to_xstate(struct xregs_state *xsave, const void __user *ubuf); 131a75c5289SThomas Gleixner 132a75c5289SThomas Gleixner void xsaves(struct xregs_state *xsave, u64 mask); 133a75c5289SThomas Gleixner void xrstors(struct xregs_state *xsave, u64 mask); 13450f408d9SKan Liang 135eb6f5172SThomas Gleixner enum xstate_copy_mode { 136eb6f5172SThomas Gleixner XSTATE_COPY_FP, 137eb6f5172SThomas Gleixner XSTATE_COPY_FX, 138eb6f5172SThomas Gleixner XSTATE_COPY_XSAVE, 139eb6f5172SThomas Gleixner }; 140eb6f5172SThomas Gleixner 141eb6f5172SThomas Gleixner struct membuf; 142eb6f5172SThomas Gleixner void copy_xstate_to_uabi_buf(struct membuf to, struct xregs_state *xsave, 143eb6f5172SThomas Gleixner enum xstate_copy_mode mode); 144eb6f5172SThomas Gleixner 145669ebabbSIngo Molnar #endif 146