1 // SPDX-License-Identifier: GPL-2.0-only 2 #define _GNU_SOURCE /* for program_invocation_short_name */ 3 #include <errno.h> 4 #include <fcntl.h> 5 #include <pthread.h> 6 #include <sched.h> 7 #include <stdio.h> 8 #include <stdlib.h> 9 #include <string.h> 10 #include <signal.h> 11 #include <syscall.h> 12 #include <sys/ioctl.h> 13 #include <sys/sysinfo.h> 14 #include <asm/barrier.h> 15 #include <linux/atomic.h> 16 #include <linux/rseq.h> 17 #include <linux/unistd.h> 18 19 #include "kvm_util.h" 20 #include "processor.h" 21 #include "test_util.h" 22 23 static __thread volatile struct rseq __rseq = { 24 .cpu_id = RSEQ_CPU_ID_UNINITIALIZED, 25 }; 26 27 /* 28 * Use an arbitrary, bogus signature for configuring rseq, this test does not 29 * actually enter an rseq critical section. 30 */ 31 #define RSEQ_SIG 0xdeadbeef 32 33 /* 34 * Any bug related to task migration is likely to be timing-dependent; perform 35 * a large number of migrations to reduce the odds of a false negative. 36 */ 37 #define NR_TASK_MIGRATIONS 100000 38 39 static pthread_t migration_thread; 40 static cpu_set_t possible_mask; 41 static int min_cpu, max_cpu; 42 static bool done; 43 44 static atomic_t seq_cnt; 45 46 static void guest_code(void) 47 { 48 for (;;) 49 GUEST_SYNC(0); 50 } 51 52 static void sys_rseq(int flags) 53 { 54 int r; 55 56 r = syscall(__NR_rseq, &__rseq, sizeof(__rseq), flags, RSEQ_SIG); 57 TEST_ASSERT(!r, "rseq failed, errno = %d (%s)", errno, strerror(errno)); 58 } 59 60 static int next_cpu(int cpu) 61 { 62 /* 63 * Advance to the next CPU, skipping those that weren't in the original 64 * affinity set. Sadly, there is no CPU_SET_FOR_EACH, and cpu_set_t's 65 * data storage is considered as opaque. Note, if this task is pinned 66 * to a small set of discontigous CPUs, e.g. 2 and 1023, this loop will 67 * burn a lot cycles and the test will take longer than normal to 68 * complete. 69 */ 70 do { 71 cpu++; 72 if (cpu > max_cpu) { 73 cpu = min_cpu; 74 TEST_ASSERT(CPU_ISSET(cpu, &possible_mask), 75 "Min CPU = %d must always be usable", cpu); 76 break; 77 } 78 } while (!CPU_ISSET(cpu, &possible_mask)); 79 80 return cpu; 81 } 82 83 static void *migration_worker(void *__rseq_tid) 84 { 85 pid_t rseq_tid = (pid_t)(unsigned long)__rseq_tid; 86 cpu_set_t allowed_mask; 87 int r, i, cpu; 88 89 CPU_ZERO(&allowed_mask); 90 91 for (i = 0, cpu = min_cpu; i < NR_TASK_MIGRATIONS; i++, cpu = next_cpu(cpu)) { 92 CPU_SET(cpu, &allowed_mask); 93 94 /* 95 * Bump the sequence count twice to allow the reader to detect 96 * that a migration may have occurred in between rseq and sched 97 * CPU ID reads. An odd sequence count indicates a migration 98 * is in-progress, while a completely different count indicates 99 * a migration occurred since the count was last read. 100 */ 101 atomic_inc(&seq_cnt); 102 103 /* 104 * Ensure the odd count is visible while sched_getcpu() isn't 105 * stable, i.e. while changing affinity is in-progress. 106 */ 107 smp_wmb(); 108 r = sched_setaffinity(rseq_tid, sizeof(allowed_mask), &allowed_mask); 109 TEST_ASSERT(!r, "sched_setaffinity failed, errno = %d (%s)", 110 errno, strerror(errno)); 111 smp_wmb(); 112 atomic_inc(&seq_cnt); 113 114 CPU_CLR(cpu, &allowed_mask); 115 116 /* 117 * Wait 1-10us before proceeding to the next iteration and more 118 * specifically, before bumping seq_cnt again. A delay is 119 * needed on three fronts: 120 * 121 * 1. To allow sched_setaffinity() to prompt migration before 122 * ioctl(KVM_RUN) enters the guest so that TIF_NOTIFY_RESUME 123 * (or TIF_NEED_RESCHED, which indirectly leads to handling 124 * NOTIFY_RESUME) is handled in KVM context. 125 * 126 * If NOTIFY_RESUME/NEED_RESCHED is set after KVM enters 127 * the guest, the guest will trigger a IO/MMIO exit all the 128 * way to userspace and the TIF flags will be handled by 129 * the generic "exit to userspace" logic, not by KVM. The 130 * exit to userspace is necessary to give the test a chance 131 * to check the rseq CPU ID (see #2). 132 * 133 * Alternatively, guest_code() could include an instruction 134 * to trigger an exit that is handled by KVM, but any such 135 * exit requires architecture specific code. 136 * 137 * 2. To let ioctl(KVM_RUN) make its way back to the test 138 * before the next round of migration. The test's check on 139 * the rseq CPU ID must wait for migration to complete in 140 * order to avoid false positive, thus any kernel rseq bug 141 * will be missed if the next migration starts before the 142 * check completes. 143 * 144 * 3. To ensure the read-side makes efficient forward progress, 145 * e.g. if sched_getcpu() involves a syscall. Stalling the 146 * read-side means the test will spend more time waiting for 147 * sched_getcpu() to stabilize and less time trying to hit 148 * the timing-dependent bug. 149 * 150 * Because any bug in this area is likely to be timing-dependent, 151 * run with a range of delays at 1us intervals from 1us to 10us 152 * as a best effort to avoid tuning the test to the point where 153 * it can hit _only_ the original bug and not detect future 154 * regressions. 155 * 156 * The original bug can reproduce with a delay up to ~500us on 157 * x86-64, but starts to require more iterations to reproduce 158 * as the delay creeps above ~10us, and the average runtime of 159 * each iteration obviously increases as well. Cap the delay 160 * at 10us to keep test runtime reasonable while minimizing 161 * potential coverage loss. 162 * 163 * The lower bound for reproducing the bug is likely below 1us, 164 * e.g. failures occur on x86-64 with nanosleep(0), but at that 165 * point the overhead of the syscall likely dominates the delay. 166 * Use usleep() for simplicity and to avoid unnecessary kernel 167 * dependencies. 168 */ 169 usleep((i % 10) + 1); 170 } 171 done = true; 172 return NULL; 173 } 174 175 static void calc_min_max_cpu(void) 176 { 177 int i, cnt, nproc; 178 179 TEST_REQUIRE(CPU_COUNT(&possible_mask) >= 2); 180 181 /* 182 * CPU_SET doesn't provide a FOR_EACH helper, get the min/max CPU that 183 * this task is affined to in order to reduce the time spent querying 184 * unusable CPUs, e.g. if this task is pinned to a small percentage of 185 * total CPUs. 186 */ 187 nproc = get_nprocs_conf(); 188 min_cpu = -1; 189 max_cpu = -1; 190 cnt = 0; 191 192 for (i = 0; i < nproc; i++) { 193 if (!CPU_ISSET(i, &possible_mask)) 194 continue; 195 if (min_cpu == -1) 196 min_cpu = i; 197 max_cpu = i; 198 cnt++; 199 } 200 201 __TEST_REQUIRE(cnt >= 2, 202 "Only one usable CPU, task migration not possible"); 203 } 204 205 int main(int argc, char *argv[]) 206 { 207 int r, i, snapshot; 208 struct kvm_vm *vm; 209 struct kvm_vcpu *vcpu; 210 u32 cpu, rseq_cpu; 211 212 /* Tell stdout not to buffer its content */ 213 setbuf(stdout, NULL); 214 215 r = sched_getaffinity(0, sizeof(possible_mask), &possible_mask); 216 TEST_ASSERT(!r, "sched_getaffinity failed, errno = %d (%s)", errno, 217 strerror(errno)); 218 219 calc_min_max_cpu(); 220 221 sys_rseq(0); 222 223 /* 224 * Create and run a dummy VM that immediately exits to userspace via 225 * GUEST_SYNC, while concurrently migrating the process by setting its 226 * CPU affinity. 227 */ 228 vm = vm_create_with_one_vcpu(&vcpu, guest_code); 229 ucall_init(vm, NULL); 230 231 pthread_create(&migration_thread, NULL, migration_worker, 232 (void *)(unsigned long)gettid()); 233 234 for (i = 0; !done; i++) { 235 vcpu_run(vcpu); 236 TEST_ASSERT(get_ucall(vcpu, NULL) == UCALL_SYNC, 237 "Guest failed?"); 238 239 /* 240 * Verify rseq's CPU matches sched's CPU. Ensure migration 241 * doesn't occur between sched_getcpu() and reading the rseq 242 * cpu_id by rereading both if the sequence count changes, or 243 * if the count is odd (migration in-progress). 244 */ 245 do { 246 /* 247 * Drop bit 0 to force a mismatch if the count is odd, 248 * i.e. if a migration is in-progress. 249 */ 250 snapshot = atomic_read(&seq_cnt) & ~1; 251 252 /* 253 * Ensure reading sched_getcpu() and rseq.cpu_id 254 * complete in a single "no migration" window, i.e. are 255 * not reordered across the seq_cnt reads. 256 */ 257 smp_rmb(); 258 cpu = sched_getcpu(); 259 rseq_cpu = READ_ONCE(__rseq.cpu_id); 260 smp_rmb(); 261 } while (snapshot != atomic_read(&seq_cnt)); 262 263 TEST_ASSERT(rseq_cpu == cpu, 264 "rseq CPU = %d, sched CPU = %d\n", rseq_cpu, cpu); 265 } 266 267 /* 268 * Sanity check that the test was able to enter the guest a reasonable 269 * number of times, e.g. didn't get stalled too often/long waiting for 270 * sched_getcpu() to stabilize. A 2:1 migration:KVM_RUN ratio is a 271 * fairly conservative ratio on x86-64, which can do _more_ KVM_RUNs 272 * than migrations given the 1us+ delay in the migration task. 273 */ 274 TEST_ASSERT(i > (NR_TASK_MIGRATIONS / 2), 275 "Only performed %d KVM_RUNs, task stalled too much?\n", i); 276 277 pthread_join(migration_thread, NULL); 278 279 kvm_vm_free(vm); 280 281 sys_rseq(RSEQ_FLAG_UNREGISTER); 282 283 return 0; 284 } 285