xref: /openbmc/linux/arch/x86/include/asm/set_memory.h (revision f16fe2d3)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_SET_MEMORY_H
3 #define _ASM_X86_SET_MEMORY_H
4 
5 #include <linux/mm.h>
6 #include <asm/page.h>
7 #include <asm-generic/set_memory.h>
8 
9 /*
10  * The set_memory_* API can be used to change various attributes of a virtual
11  * address range. The attributes include:
12  * Cacheability  : UnCached, WriteCombining, WriteThrough, WriteBack
13  * Executability : eXecutable, NoteXecutable
14  * Read/Write    : ReadOnly, ReadWrite
15  * Presence      : NotPresent
16  * Encryption    : Encrypted, Decrypted
17  *
18  * Within a category, the attributes are mutually exclusive.
19  *
20  * The implementation of this API will take care of various aspects that
21  * are associated with changing such attributes, such as:
22  * - Flushing TLBs
23  * - Flushing CPU caches
24  * - Making sure aliases of the memory behind the mapping don't violate
25  *   coherency rules as defined by the CPU in the system.
26  *
27  * What this API does not do:
28  * - Provide exclusion between various callers - including callers that
29  *   operation on other mappings of the same physical page
30  * - Restore default attributes when a page is freed
31  * - Guarantee that mappings other than the requested one are
32  *   in any state, other than that these do not violate rules for
33  *   the CPU you have. Do not depend on any effects on other mappings,
34  *   CPUs other than the one you have may have more relaxed rules.
35  * The caller is required to take care of these.
36  */
37 
38 int __set_memory_prot(unsigned long addr, int numpages, pgprot_t prot);
39 int _set_memory_uc(unsigned long addr, int numpages);
40 int _set_memory_wc(unsigned long addr, int numpages);
41 int _set_memory_wt(unsigned long addr, int numpages);
42 int _set_memory_wb(unsigned long addr, int numpages);
43 int set_memory_uc(unsigned long addr, int numpages);
44 int set_memory_wc(unsigned long addr, int numpages);
45 int set_memory_wb(unsigned long addr, int numpages);
46 int set_memory_np(unsigned long addr, int numpages);
47 int set_memory_4k(unsigned long addr, int numpages);
48 int set_memory_encrypted(unsigned long addr, int numpages);
49 int set_memory_decrypted(unsigned long addr, int numpages);
50 int set_memory_np_noalias(unsigned long addr, int numpages);
51 int set_memory_nonglobal(unsigned long addr, int numpages);
52 int set_memory_global(unsigned long addr, int numpages);
53 
54 int set_pages_array_uc(struct page **pages, int addrinarray);
55 int set_pages_array_wc(struct page **pages, int addrinarray);
56 int set_pages_array_wt(struct page **pages, int addrinarray);
57 int set_pages_array_wb(struct page **pages, int addrinarray);
58 
59 /*
60  * For legacy compatibility with the old APIs, a few functions
61  * are provided that work on a "struct page".
62  * These functions operate ONLY on the 1:1 kernel mapping of the
63  * memory that the struct page represents, and internally just
64  * call the set_memory_* function. See the description of the
65  * set_memory_* function for more details on conventions.
66  *
67  * These APIs should be considered *deprecated* and are likely going to
68  * be removed in the future.
69  * The reason for this is the implicit operation on the 1:1 mapping only,
70  * making this not a generally useful API.
71  *
72  * Specifically, many users of the old APIs had a virtual address,
73  * called virt_to_page() or vmalloc_to_page() on that address to
74  * get a struct page* that the old API required.
75  * To convert these cases, use set_memory_*() on the original
76  * virtual address, do not use these functions.
77  */
78 
79 int set_pages_uc(struct page *page, int numpages);
80 int set_pages_wb(struct page *page, int numpages);
81 int set_pages_ro(struct page *page, int numpages);
82 int set_pages_rw(struct page *page, int numpages);
83 
84 int set_direct_map_invalid_noflush(struct page *page);
85 int set_direct_map_default_noflush(struct page *page);
86 bool kernel_page_present(struct page *page);
87 void notify_range_enc_status_changed(unsigned long vaddr, int npages, bool enc);
88 
89 extern int kernel_set_to_readonly;
90 
91 #ifdef CONFIG_X86_64
92 /*
93  * Prevent speculative access to the page by either unmapping
94  * it (if we do not require access to any part of the page) or
95  * marking it uncacheable (if we want to try to retrieve data
96  * from non-poisoned lines in the page).
97  */
98 static inline int set_mce_nospec(unsigned long pfn, bool unmap)
99 {
100 	unsigned long decoy_addr;
101 	int rc;
102 
103 	/* SGX pages are not in the 1:1 map */
104 	if (arch_is_platform_page(pfn << PAGE_SHIFT))
105 		return 0;
106 	/*
107 	 * We would like to just call:
108 	 *      set_memory_XX((unsigned long)pfn_to_kaddr(pfn), 1);
109 	 * but doing that would radically increase the odds of a
110 	 * speculative access to the poison page because we'd have
111 	 * the virtual address of the kernel 1:1 mapping sitting
112 	 * around in registers.
113 	 * Instead we get tricky.  We create a non-canonical address
114 	 * that looks just like the one we want, but has bit 63 flipped.
115 	 * This relies on set_memory_XX() properly sanitizing any __pa()
116 	 * results with __PHYSICAL_MASK or PTE_PFN_MASK.
117 	 */
118 	decoy_addr = (pfn << PAGE_SHIFT) + (PAGE_OFFSET ^ BIT(63));
119 
120 	if (unmap)
121 		rc = set_memory_np(decoy_addr, 1);
122 	else
123 		rc = set_memory_uc(decoy_addr, 1);
124 	if (rc)
125 		pr_warn("Could not invalidate pfn=0x%lx from 1:1 map\n", pfn);
126 	return rc;
127 }
128 #define set_mce_nospec set_mce_nospec
129 
130 /* Restore full speculative operation to the pfn. */
131 static inline int clear_mce_nospec(unsigned long pfn)
132 {
133 	return set_memory_wb((unsigned long) pfn_to_kaddr(pfn), 1);
134 }
135 #define clear_mce_nospec clear_mce_nospec
136 #else
137 /*
138  * Few people would run a 32-bit kernel on a machine that supports
139  * recoverable errors because they have too much memory to boot 32-bit.
140  */
141 #endif
142 
143 #endif /* _ASM_X86_SET_MEMORY_H */
144