1 // SPDX-License-Identifier: GPL-2.0-only 2 3 #include <linux/efi.h> 4 #include <linux/memblock.h> 5 #include <linux/spinlock.h> 6 #include <asm/unaccepted_memory.h> 7 8 /* Protects unaccepted memory bitmap */ 9 static DEFINE_SPINLOCK(unaccepted_memory_lock); 10 11 /* 12 * accept_memory() -- Consult bitmap and accept the memory if needed. 13 * 14 * Only memory that is explicitly marked as unaccepted in the bitmap requires 15 * an action. All the remaining memory is implicitly accepted and doesn't need 16 * acceptance. 17 * 18 * No need to accept: 19 * - anything if the system has no unaccepted table; 20 * - memory that is below phys_base; 21 * - memory that is above the memory that addressable by the bitmap; 22 */ 23 void accept_memory(phys_addr_t start, phys_addr_t end) 24 { 25 struct efi_unaccepted_memory *unaccepted; 26 unsigned long range_start, range_end; 27 unsigned long flags; 28 u64 unit_size; 29 30 unaccepted = efi_get_unaccepted_table(); 31 if (!unaccepted) 32 return; 33 34 unit_size = unaccepted->unit_size; 35 36 /* 37 * Only care for the part of the range that is represented 38 * in the bitmap. 39 */ 40 if (start < unaccepted->phys_base) 41 start = unaccepted->phys_base; 42 if (end < unaccepted->phys_base) 43 return; 44 45 /* Translate to offsets from the beginning of the bitmap */ 46 start -= unaccepted->phys_base; 47 end -= unaccepted->phys_base; 48 49 /* 50 * load_unaligned_zeropad() can lead to unwanted loads across page 51 * boundaries. The unwanted loads are typically harmless. But, they 52 * might be made to totally unrelated or even unmapped memory. 53 * load_unaligned_zeropad() relies on exception fixup (#PF, #GP and now 54 * #VE) to recover from these unwanted loads. 55 * 56 * But, this approach does not work for unaccepted memory. For TDX, a 57 * load from unaccepted memory will not lead to a recoverable exception 58 * within the guest. The guest will exit to the VMM where the only 59 * recourse is to terminate the guest. 60 * 61 * There are two parts to fix this issue and comprehensively avoid 62 * access to unaccepted memory. Together these ensure that an extra 63 * "guard" page is accepted in addition to the memory that needs to be 64 * used: 65 * 66 * 1. Implicitly extend the range_contains_unaccepted_memory(start, end) 67 * checks up to end+unit_size if 'end' is aligned on a unit_size 68 * boundary. 69 * 70 * 2. Implicitly extend accept_memory(start, end) to end+unit_size if 71 * 'end' is aligned on a unit_size boundary. (immediately following 72 * this comment) 73 */ 74 if (!(end % unit_size)) 75 end += unit_size; 76 77 /* Make sure not to overrun the bitmap */ 78 if (end > unaccepted->size * unit_size * BITS_PER_BYTE) 79 end = unaccepted->size * unit_size * BITS_PER_BYTE; 80 81 range_start = start / unit_size; 82 83 spin_lock_irqsave(&unaccepted_memory_lock, flags); 84 for_each_set_bitrange_from(range_start, range_end, unaccepted->bitmap, 85 DIV_ROUND_UP(end, unit_size)) { 86 unsigned long phys_start, phys_end; 87 unsigned long len = range_end - range_start; 88 89 phys_start = range_start * unit_size + unaccepted->phys_base; 90 phys_end = range_end * unit_size + unaccepted->phys_base; 91 92 arch_accept_memory(phys_start, phys_end); 93 bitmap_clear(unaccepted->bitmap, range_start, len); 94 } 95 spin_unlock_irqrestore(&unaccepted_memory_lock, flags); 96 } 97 98 bool range_contains_unaccepted_memory(phys_addr_t start, phys_addr_t end) 99 { 100 struct efi_unaccepted_memory *unaccepted; 101 unsigned long flags; 102 bool ret = false; 103 u64 unit_size; 104 105 unaccepted = efi_get_unaccepted_table(); 106 if (!unaccepted) 107 return false; 108 109 unit_size = unaccepted->unit_size; 110 111 /* 112 * Only care for the part of the range that is represented 113 * in the bitmap. 114 */ 115 if (start < unaccepted->phys_base) 116 start = unaccepted->phys_base; 117 if (end < unaccepted->phys_base) 118 return false; 119 120 /* Translate to offsets from the beginning of the bitmap */ 121 start -= unaccepted->phys_base; 122 end -= unaccepted->phys_base; 123 124 /* 125 * Also consider the unaccepted state of the *next* page. See fix #1 in 126 * the comment on load_unaligned_zeropad() in accept_memory(). 127 */ 128 if (!(end % unit_size)) 129 end += unit_size; 130 131 /* Make sure not to overrun the bitmap */ 132 if (end > unaccepted->size * unit_size * BITS_PER_BYTE) 133 end = unaccepted->size * unit_size * BITS_PER_BYTE; 134 135 spin_lock_irqsave(&unaccepted_memory_lock, flags); 136 while (start < end) { 137 if (test_bit(start / unit_size, unaccepted->bitmap)) { 138 ret = true; 139 break; 140 } 141 142 start += unit_size; 143 } 144 spin_unlock_irqrestore(&unaccepted_memory_lock, flags); 145 146 return ret; 147 } 148