xref: /openbmc/linux/arch/x86/include/asm/set_memory.h (revision b8d312aa)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_SET_MEMORY_H
3 #define _ASM_X86_SET_MEMORY_H
4 
5 #include <asm/page.h>
6 #include <asm-generic/set_memory.h>
7 
8 /*
9  * The set_memory_* API can be used to change various attributes of a virtual
10  * address range. The attributes include:
11  * Cachability   : UnCached, WriteCombining, WriteThrough, WriteBack
12  * Executability : eXeutable, NoteXecutable
13  * Read/Write    : ReadOnly, ReadWrite
14  * Presence      : NotPresent
15  * Encryption    : Encrypted, Decrypted
16  *
17  * Within a category, the attributes are mutually exclusive.
18  *
19  * The implementation of this API will take care of various aspects that
20  * are associated with changing such attributes, such as:
21  * - Flushing TLBs
22  * - Flushing CPU caches
23  * - Making sure aliases of the memory behind the mapping don't violate
24  *   coherency rules as defined by the CPU in the system.
25  *
26  * What this API does not do:
27  * - Provide exclusion between various callers - including callers that
28  *   operation on other mappings of the same physical page
29  * - Restore default attributes when a page is freed
30  * - Guarantee that mappings other than the requested one are
31  *   in any state, other than that these do not violate rules for
32  *   the CPU you have. Do not depend on any effects on other mappings,
33  *   CPUs other than the one you have may have more relaxed rules.
34  * The caller is required to take care of these.
35  */
36 
37 int _set_memory_uc(unsigned long addr, int numpages);
38 int _set_memory_wc(unsigned long addr, int numpages);
39 int _set_memory_wt(unsigned long addr, int numpages);
40 int _set_memory_wb(unsigned long addr, int numpages);
41 int set_memory_uc(unsigned long addr, int numpages);
42 int set_memory_wc(unsigned long addr, int numpages);
43 int set_memory_wt(unsigned long addr, int numpages);
44 int set_memory_wb(unsigned long addr, int numpages);
45 int set_memory_np(unsigned long addr, int numpages);
46 int set_memory_4k(unsigned long addr, int numpages);
47 int set_memory_encrypted(unsigned long addr, int numpages);
48 int set_memory_decrypted(unsigned long addr, int numpages);
49 int set_memory_np_noalias(unsigned long addr, int numpages);
50 
51 int set_memory_array_uc(unsigned long *addr, int addrinarray);
52 int set_memory_array_wc(unsigned long *addr, int addrinarray);
53 int set_memory_array_wt(unsigned long *addr, int addrinarray);
54 int set_memory_array_wb(unsigned long *addr, int addrinarray);
55 
56 int set_pages_array_uc(struct page **pages, int addrinarray);
57 int set_pages_array_wc(struct page **pages, int addrinarray);
58 int set_pages_array_wt(struct page **pages, int addrinarray);
59 int set_pages_array_wb(struct page **pages, int addrinarray);
60 
61 /*
62  * For legacy compatibility with the old APIs, a few functions
63  * are provided that work on a "struct page".
64  * These functions operate ONLY on the 1:1 kernel mapping of the
65  * memory that the struct page represents, and internally just
66  * call the set_memory_* function. See the description of the
67  * set_memory_* function for more details on conventions.
68  *
69  * These APIs should be considered *deprecated* and are likely going to
70  * be removed in the future.
71  * The reason for this is the implicit operation on the 1:1 mapping only,
72  * making this not a generally useful API.
73  *
74  * Specifically, many users of the old APIs had a virtual address,
75  * called virt_to_page() or vmalloc_to_page() on that address to
76  * get a struct page* that the old API required.
77  * To convert these cases, use set_memory_*() on the original
78  * virtual address, do not use these functions.
79  */
80 
81 int set_pages_uc(struct page *page, int numpages);
82 int set_pages_wb(struct page *page, int numpages);
83 int set_pages_x(struct page *page, int numpages);
84 int set_pages_nx(struct page *page, int numpages);
85 int set_pages_ro(struct page *page, int numpages);
86 int set_pages_rw(struct page *page, int numpages);
87 
88 int set_direct_map_invalid_noflush(struct page *page);
89 int set_direct_map_default_noflush(struct page *page);
90 
91 extern int kernel_set_to_readonly;
92 void set_kernel_text_rw(void);
93 void set_kernel_text_ro(void);
94 
95 #ifdef CONFIG_X86_64
96 static inline int set_mce_nospec(unsigned long pfn)
97 {
98 	unsigned long decoy_addr;
99 	int rc;
100 
101 	/*
102 	 * Mark the linear address as UC to make sure we don't log more
103 	 * errors because of speculative access to the page.
104 	 * We would like to just call:
105 	 *      set_memory_uc((unsigned long)pfn_to_kaddr(pfn), 1);
106 	 * but doing that would radically increase the odds of a
107 	 * speculative access to the poison page because we'd have
108 	 * the virtual address of the kernel 1:1 mapping sitting
109 	 * around in registers.
110 	 * Instead we get tricky.  We create a non-canonical address
111 	 * that looks just like the one we want, but has bit 63 flipped.
112 	 * This relies on set_memory_uc() properly sanitizing any __pa()
113 	 * results with __PHYSICAL_MASK or PTE_PFN_MASK.
114 	 */
115 	decoy_addr = (pfn << PAGE_SHIFT) + (PAGE_OFFSET ^ BIT(63));
116 
117 	rc = set_memory_uc(decoy_addr, 1);
118 	if (rc)
119 		pr_warn("Could not invalidate pfn=0x%lx from 1:1 map\n", pfn);
120 	return rc;
121 }
122 #define set_mce_nospec set_mce_nospec
123 
124 /* Restore full speculative operation to the pfn. */
125 static inline int clear_mce_nospec(unsigned long pfn)
126 {
127 	return set_memory_wb((unsigned long) pfn_to_kaddr(pfn), 1);
128 }
129 #define clear_mce_nospec clear_mce_nospec
130 #else
131 /*
132  * Few people would run a 32-bit kernel on a machine that supports
133  * recoverable errors because they have too much memory to boot 32-bit.
134  */
135 #endif
136 
137 #endif /* _ASM_X86_SET_MEMORY_H */
138