1 // SPDX-License-Identifier: GPL-2.0-only
2
3 #include <linux/efi.h>
4 #include <linux/memblock.h>
5 #include <linux/spinlock.h>
6 #include <linux/nmi.h>
7 #include <asm/unaccepted_memory.h>
8
9 /* Protects unaccepted memory bitmap and accepting_list */
10 static DEFINE_SPINLOCK(unaccepted_memory_lock);
11
12 struct accept_range {
13 struct list_head list;
14 unsigned long start;
15 unsigned long end;
16 };
17
18 static LIST_HEAD(accepting_list);
19
20 /*
21 * accept_memory() -- Consult bitmap and accept the memory if needed.
22 *
23 * Only memory that is explicitly marked as unaccepted in the bitmap requires
24 * an action. All the remaining memory is implicitly accepted and doesn't need
25 * acceptance.
26 *
27 * No need to accept:
28 * - anything if the system has no unaccepted table;
29 * - memory that is below phys_base;
30 * - memory that is above the memory that addressable by the bitmap;
31 */
accept_memory(phys_addr_t start,phys_addr_t end)32 void accept_memory(phys_addr_t start, phys_addr_t end)
33 {
34 struct efi_unaccepted_memory *unaccepted;
35 unsigned long range_start, range_end;
36 struct accept_range range, *entry;
37 unsigned long flags;
38 u64 unit_size;
39
40 unaccepted = efi_get_unaccepted_table();
41 if (!unaccepted)
42 return;
43
44 unit_size = unaccepted->unit_size;
45
46 /*
47 * Only care for the part of the range that is represented
48 * in the bitmap.
49 */
50 if (start < unaccepted->phys_base)
51 start = unaccepted->phys_base;
52 if (end < unaccepted->phys_base)
53 return;
54
55 /* Translate to offsets from the beginning of the bitmap */
56 start -= unaccepted->phys_base;
57 end -= unaccepted->phys_base;
58
59 /*
60 * load_unaligned_zeropad() can lead to unwanted loads across page
61 * boundaries. The unwanted loads are typically harmless. But, they
62 * might be made to totally unrelated or even unmapped memory.
63 * load_unaligned_zeropad() relies on exception fixup (#PF, #GP and now
64 * #VE) to recover from these unwanted loads.
65 *
66 * But, this approach does not work for unaccepted memory. For TDX, a
67 * load from unaccepted memory will not lead to a recoverable exception
68 * within the guest. The guest will exit to the VMM where the only
69 * recourse is to terminate the guest.
70 *
71 * There are two parts to fix this issue and comprehensively avoid
72 * access to unaccepted memory. Together these ensure that an extra
73 * "guard" page is accepted in addition to the memory that needs to be
74 * used:
75 *
76 * 1. Implicitly extend the range_contains_unaccepted_memory(start, end)
77 * checks up to end+unit_size if 'end' is aligned on a unit_size
78 * boundary.
79 *
80 * 2. Implicitly extend accept_memory(start, end) to end+unit_size if
81 * 'end' is aligned on a unit_size boundary. (immediately following
82 * this comment)
83 */
84 if (!(end % unit_size))
85 end += unit_size;
86
87 /* Make sure not to overrun the bitmap */
88 if (end > unaccepted->size * unit_size * BITS_PER_BYTE)
89 end = unaccepted->size * unit_size * BITS_PER_BYTE;
90
91 range.start = start / unit_size;
92 range.end = DIV_ROUND_UP(end, unit_size);
93 retry:
94 spin_lock_irqsave(&unaccepted_memory_lock, flags);
95
96 /*
97 * Check if anybody works on accepting the same range of the memory.
98 *
99 * The check is done with unit_size granularity. It is crucial to catch
100 * all accept requests to the same unit_size block, even if they don't
101 * overlap on physical address level.
102 */
103 list_for_each_entry(entry, &accepting_list, list) {
104 if (entry->end <= range.start)
105 continue;
106 if (entry->start >= range.end)
107 continue;
108
109 /*
110 * Somebody else accepting the range. Or at least part of it.
111 *
112 * Drop the lock and retry until it is complete.
113 */
114 spin_unlock_irqrestore(&unaccepted_memory_lock, flags);
115 goto retry;
116 }
117
118 /*
119 * Register that the range is about to be accepted.
120 * Make sure nobody else will accept it.
121 */
122 list_add(&range.list, &accepting_list);
123
124 range_start = range.start;
125 for_each_set_bitrange_from(range_start, range_end, unaccepted->bitmap,
126 range.end) {
127 unsigned long phys_start, phys_end;
128 unsigned long len = range_end - range_start;
129
130 phys_start = range_start * unit_size + unaccepted->phys_base;
131 phys_end = range_end * unit_size + unaccepted->phys_base;
132
133 /*
134 * Keep interrupts disabled until the accept operation is
135 * complete in order to prevent deadlocks.
136 *
137 * Enabling interrupts before calling arch_accept_memory()
138 * creates an opportunity for an interrupt handler to request
139 * acceptance for the same memory. The handler will continuously
140 * spin with interrupts disabled, preventing other task from
141 * making progress with the acceptance process.
142 */
143 spin_unlock(&unaccepted_memory_lock);
144
145 arch_accept_memory(phys_start, phys_end);
146
147 spin_lock(&unaccepted_memory_lock);
148 bitmap_clear(unaccepted->bitmap, range_start, len);
149 }
150
151 list_del(&range.list);
152
153 touch_softlockup_watchdog();
154
155 spin_unlock_irqrestore(&unaccepted_memory_lock, flags);
156 }
157
range_contains_unaccepted_memory(phys_addr_t start,phys_addr_t end)158 bool range_contains_unaccepted_memory(phys_addr_t start, phys_addr_t end)
159 {
160 struct efi_unaccepted_memory *unaccepted;
161 unsigned long flags;
162 bool ret = false;
163 u64 unit_size;
164
165 unaccepted = efi_get_unaccepted_table();
166 if (!unaccepted)
167 return false;
168
169 unit_size = unaccepted->unit_size;
170
171 /*
172 * Only care for the part of the range that is represented
173 * in the bitmap.
174 */
175 if (start < unaccepted->phys_base)
176 start = unaccepted->phys_base;
177 if (end < unaccepted->phys_base)
178 return false;
179
180 /* Translate to offsets from the beginning of the bitmap */
181 start -= unaccepted->phys_base;
182 end -= unaccepted->phys_base;
183
184 /*
185 * Also consider the unaccepted state of the *next* page. See fix #1 in
186 * the comment on load_unaligned_zeropad() in accept_memory().
187 */
188 if (!(end % unit_size))
189 end += unit_size;
190
191 /* Make sure not to overrun the bitmap */
192 if (end > unaccepted->size * unit_size * BITS_PER_BYTE)
193 end = unaccepted->size * unit_size * BITS_PER_BYTE;
194
195 spin_lock_irqsave(&unaccepted_memory_lock, flags);
196 while (start < end) {
197 if (test_bit(start / unit_size, unaccepted->bitmap)) {
198 ret = true;
199 break;
200 }
201
202 start += unit_size;
203 }
204 spin_unlock_irqrestore(&unaccepted_memory_lock, flags);
205
206 return ret;
207 }
208