1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Test TEST PROTECTION emulation. 4 * 5 * Copyright IBM Corp. 2021 6 */ 7 #include <sys/mman.h> 8 #include "test_util.h" 9 #include "kvm_util.h" 10 #include "kselftest.h" 11 12 #define PAGE_SHIFT 12 13 #define PAGE_SIZE (1 << PAGE_SHIFT) 14 #define CR0_FETCH_PROTECTION_OVERRIDE (1UL << (63 - 38)) 15 #define CR0_STORAGE_PROTECTION_OVERRIDE (1UL << (63 - 39)) 16 17 static __aligned(PAGE_SIZE) uint8_t pages[2][PAGE_SIZE]; 18 static uint8_t *const page_store_prot = pages[0]; 19 static uint8_t *const page_fetch_prot = pages[1]; 20 21 /* Nonzero return value indicates that address not mapped */ 22 static int set_storage_key(void *addr, uint8_t key) 23 { 24 int not_mapped = 0; 25 26 asm volatile ( 27 "lra %[addr], 0(0,%[addr])\n" 28 " jz 0f\n" 29 " llill %[not_mapped],1\n" 30 " j 1f\n" 31 "0: sske %[key], %[addr]\n" 32 "1:" 33 : [addr] "+&a" (addr), [not_mapped] "+r" (not_mapped) 34 : [key] "r" (key) 35 : "cc" 36 ); 37 return -not_mapped; 38 } 39 40 enum permission { 41 READ_WRITE = 0, 42 READ = 1, 43 RW_PROTECTED = 2, 44 TRANSL_UNAVAIL = 3, 45 }; 46 47 static enum permission test_protection(void *addr, uint8_t key) 48 { 49 uint64_t mask; 50 51 asm volatile ( 52 "tprot %[addr], 0(%[key])\n" 53 " ipm %[mask]\n" 54 : [mask] "=r" (mask) 55 : [addr] "Q" (*(char *)addr), 56 [key] "a" (key) 57 : "cc" 58 ); 59 60 return (enum permission)(mask >> 28); 61 } 62 63 enum stage { 64 STAGE_INIT_SIMPLE, 65 TEST_SIMPLE, 66 STAGE_INIT_FETCH_PROT_OVERRIDE, 67 TEST_FETCH_PROT_OVERRIDE, 68 TEST_STORAGE_PROT_OVERRIDE, 69 STAGE_END /* must be the last entry (it's the amount of tests) */ 70 }; 71 72 struct test { 73 enum stage stage; 74 void *addr; 75 uint8_t key; 76 enum permission expected; 77 } tests[] = { 78 /* 79 * We perform each test in the array by executing TEST PROTECTION on 80 * the specified addr with the specified key and checking if the returned 81 * permissions match the expected value. 82 * Both guest and host cooperate to set up the required test conditions. 83 * A central condition is that the page targeted by addr has to be DAT 84 * protected in the host mappings, in order for KVM to emulate the 85 * TEST PROTECTION instruction. 86 * Since the page tables are shared, the host uses mprotect to achieve 87 * this. 88 * 89 * Test resulting in RW_PROTECTED/TRANSL_UNAVAIL will be interpreted 90 * by SIE, not KVM, but there is no harm in testing them also. 91 * See Enhanced Suppression-on-Protection Facilities in the 92 * Interpretive-Execution Mode 93 */ 94 /* 95 * guest: set storage key of page_store_prot to 1 96 * storage key of page_fetch_prot to 9 and enable 97 * protection for it 98 * STAGE_INIT_SIMPLE 99 * host: write protect both via mprotect 100 */ 101 /* access key 0 matches any storage key -> RW */ 102 { TEST_SIMPLE, page_store_prot, 0x00, READ_WRITE }, 103 /* access key matches storage key -> RW */ 104 { TEST_SIMPLE, page_store_prot, 0x10, READ_WRITE }, 105 /* mismatched keys, but no fetch protection -> RO */ 106 { TEST_SIMPLE, page_store_prot, 0x20, READ }, 107 /* access key 0 matches any storage key -> RW */ 108 { TEST_SIMPLE, page_fetch_prot, 0x00, READ_WRITE }, 109 /* access key matches storage key -> RW */ 110 { TEST_SIMPLE, page_fetch_prot, 0x90, READ_WRITE }, 111 /* mismatched keys, fetch protection -> inaccessible */ 112 { TEST_SIMPLE, page_fetch_prot, 0x10, RW_PROTECTED }, 113 /* page 0 not mapped yet -> translation not available */ 114 { TEST_SIMPLE, (void *)0x00, 0x10, TRANSL_UNAVAIL }, 115 /* 116 * host: try to map page 0 117 * guest: set storage key of page 0 to 9 and enable fetch protection 118 * STAGE_INIT_FETCH_PROT_OVERRIDE 119 * host: write protect page 0 120 * enable fetch protection override 121 */ 122 /* mismatched keys, fetch protection, but override applies -> RO */ 123 { TEST_FETCH_PROT_OVERRIDE, (void *)0x00, 0x10, READ }, 124 /* mismatched keys, fetch protection, override applies to 0-2048 only -> inaccessible */ 125 { TEST_FETCH_PROT_OVERRIDE, (void *)2049, 0x10, RW_PROTECTED }, 126 /* 127 * host: enable storage protection override 128 */ 129 /* mismatched keys, but override applies (storage key 9) -> RW */ 130 { TEST_STORAGE_PROT_OVERRIDE, page_fetch_prot, 0x10, READ_WRITE }, 131 /* mismatched keys, no fetch protection, override doesn't apply -> RO */ 132 { TEST_STORAGE_PROT_OVERRIDE, page_store_prot, 0x20, READ }, 133 /* mismatched keys, but override applies (storage key 9) -> RW */ 134 { TEST_STORAGE_PROT_OVERRIDE, (void *)2049, 0x10, READ_WRITE }, 135 /* end marker */ 136 { STAGE_END, 0, 0, 0 }, 137 }; 138 139 static enum stage perform_next_stage(int *i, bool mapped_0) 140 { 141 enum stage stage = tests[*i].stage; 142 enum permission result; 143 bool skip; 144 145 for (; tests[*i].stage == stage; (*i)++) { 146 /* 147 * Some fetch protection override tests require that page 0 148 * be mapped, however, when the hosts tries to map that page via 149 * vm_vaddr_alloc, it may happen that some other page gets mapped 150 * instead. 151 * In order to skip these tests we detect this inside the guest 152 */ 153 skip = tests[*i].addr < (void *)4096 && 154 tests[*i].expected != TRANSL_UNAVAIL && 155 !mapped_0; 156 if (!skip) { 157 result = test_protection(tests[*i].addr, tests[*i].key); 158 __GUEST_ASSERT(result == tests[*i].expected, 159 "Wanted %u, got %u, for i = %u", 160 tests[*i].expected, result, *i); 161 } 162 } 163 return stage; 164 } 165 166 static void guest_code(void) 167 { 168 bool mapped_0; 169 int i = 0; 170 171 GUEST_ASSERT_EQ(set_storage_key(page_store_prot, 0x10), 0); 172 GUEST_ASSERT_EQ(set_storage_key(page_fetch_prot, 0x98), 0); 173 GUEST_SYNC(STAGE_INIT_SIMPLE); 174 GUEST_SYNC(perform_next_stage(&i, false)); 175 176 /* Fetch-protection override */ 177 mapped_0 = !set_storage_key((void *)0, 0x98); 178 GUEST_SYNC(STAGE_INIT_FETCH_PROT_OVERRIDE); 179 GUEST_SYNC(perform_next_stage(&i, mapped_0)); 180 181 /* Storage-protection override */ 182 GUEST_SYNC(perform_next_stage(&i, mapped_0)); 183 } 184 185 #define HOST_SYNC_NO_TAP(vcpup, stage) \ 186 ({ \ 187 struct kvm_vcpu *__vcpu = (vcpup); \ 188 struct ucall uc; \ 189 int __stage = (stage); \ 190 \ 191 vcpu_run(__vcpu); \ 192 get_ucall(__vcpu, &uc); \ 193 if (uc.cmd == UCALL_ABORT) \ 194 REPORT_GUEST_ASSERT(uc); \ 195 TEST_ASSERT_EQ(uc.cmd, UCALL_SYNC); \ 196 TEST_ASSERT_EQ(uc.args[1], __stage); \ 197 }) 198 199 #define HOST_SYNC(vcpu, stage) \ 200 ({ \ 201 HOST_SYNC_NO_TAP(vcpu, stage); \ 202 ksft_test_result_pass("" #stage "\n"); \ 203 }) 204 205 int main(int argc, char *argv[]) 206 { 207 struct kvm_vcpu *vcpu; 208 struct kvm_vm *vm; 209 struct kvm_run *run; 210 vm_vaddr_t guest_0_page; 211 212 ksft_print_header(); 213 ksft_set_plan(STAGE_END); 214 215 vm = vm_create_with_one_vcpu(&vcpu, guest_code); 216 run = vcpu->run; 217 218 HOST_SYNC(vcpu, STAGE_INIT_SIMPLE); 219 mprotect(addr_gva2hva(vm, (vm_vaddr_t)pages), PAGE_SIZE * 2, PROT_READ); 220 HOST_SYNC(vcpu, TEST_SIMPLE); 221 222 guest_0_page = vm_vaddr_alloc(vm, PAGE_SIZE, 0); 223 if (guest_0_page != 0) { 224 /* Use NO_TAP so we don't get a PASS print */ 225 HOST_SYNC_NO_TAP(vcpu, STAGE_INIT_FETCH_PROT_OVERRIDE); 226 ksft_test_result_skip("STAGE_INIT_FETCH_PROT_OVERRIDE - " 227 "Did not allocate page at 0\n"); 228 } else { 229 HOST_SYNC(vcpu, STAGE_INIT_FETCH_PROT_OVERRIDE); 230 } 231 if (guest_0_page == 0) 232 mprotect(addr_gva2hva(vm, (vm_vaddr_t)0), PAGE_SIZE, PROT_READ); 233 run->s.regs.crs[0] |= CR0_FETCH_PROTECTION_OVERRIDE; 234 run->kvm_dirty_regs = KVM_SYNC_CRS; 235 HOST_SYNC(vcpu, TEST_FETCH_PROT_OVERRIDE); 236 237 run->s.regs.crs[0] |= CR0_STORAGE_PROTECTION_OVERRIDE; 238 run->kvm_dirty_regs = KVM_SYNC_CRS; 239 HOST_SYNC(vcpu, TEST_STORAGE_PROT_OVERRIDE); 240 241 kvm_vm_free(vm); 242 243 ksft_finished(); /* Print results and exit() accordingly */ 244 } 245