1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * The main purpose of the tests here is to exercise the migration entry code 4 * paths in the kernel. 5 */ 6 7 #include "../kselftest_harness.h" 8 #include <strings.h> 9 #include <pthread.h> 10 #include <numa.h> 11 #include <numaif.h> 12 #include <sys/mman.h> 13 #include <sys/types.h> 14 #include <signal.h> 15 #include <time.h> 16 17 #define TWOMEG (2<<20) 18 #define RUNTIME (60) 19 20 #define ALIGN(x, a) (((x) + (a - 1)) & (~((a) - 1))) 21 22 FIXTURE(migration) 23 { 24 pthread_t *threads; 25 pid_t *pids; 26 int nthreads; 27 int n1; 28 int n2; 29 }; 30 31 FIXTURE_SETUP(migration) 32 { 33 int n; 34 35 ASSERT_EQ(numa_available(), 0); 36 self->nthreads = numa_num_task_cpus() - 1; 37 self->n1 = -1; 38 self->n2 = -1; 39 40 for (n = 0; n < numa_max_possible_node(); n++) 41 if (numa_bitmask_isbitset(numa_all_nodes_ptr, n)) { 42 if (self->n1 == -1) { 43 self->n1 = n; 44 } else { 45 self->n2 = n; 46 break; 47 } 48 } 49 50 self->threads = malloc(self->nthreads * sizeof(*self->threads)); 51 ASSERT_NE(self->threads, NULL); 52 self->pids = malloc(self->nthreads * sizeof(*self->pids)); 53 ASSERT_NE(self->pids, NULL); 54 }; 55 56 FIXTURE_TEARDOWN(migration) 57 { 58 free(self->threads); 59 free(self->pids); 60 } 61 62 int migrate(uint64_t *ptr, int n1, int n2) 63 { 64 int ret, tmp; 65 int status = 0; 66 struct timespec ts1, ts2; 67 68 if (clock_gettime(CLOCK_MONOTONIC, &ts1)) 69 return -1; 70 71 while (1) { 72 if (clock_gettime(CLOCK_MONOTONIC, &ts2)) 73 return -1; 74 75 if (ts2.tv_sec - ts1.tv_sec >= RUNTIME) 76 return 0; 77 78 ret = move_pages(0, 1, (void **) &ptr, &n2, &status, 79 MPOL_MF_MOVE_ALL); 80 if (ret) { 81 if (ret > 0) 82 printf("Didn't migrate %d pages\n", ret); 83 else 84 perror("Couldn't migrate pages"); 85 return -2; 86 } 87 88 tmp = n2; 89 n2 = n1; 90 n1 = tmp; 91 } 92 93 return 0; 94 } 95 96 void *access_mem(void *ptr) 97 { 98 volatile uint64_t y = 0; 99 volatile uint64_t *x = ptr; 100 101 while (1) { 102 pthread_testcancel(); 103 y += *x; 104 105 /* Prevent the compiler from optimizing out the writes to y: */ 106 asm volatile("" : "+r" (y)); 107 } 108 109 return NULL; 110 } 111 112 /* 113 * Basic migration entry testing. One thread will move pages back and forth 114 * between nodes whilst other threads try and access them triggering the 115 * migration entry wait paths in the kernel. 116 */ 117 TEST_F_TIMEOUT(migration, private_anon, 2*RUNTIME) 118 { 119 uint64_t *ptr; 120 int i; 121 122 if (self->nthreads < 2 || self->n1 < 0 || self->n2 < 0) 123 SKIP(return, "Not enough threads or NUMA nodes available"); 124 125 ptr = mmap(NULL, TWOMEG, PROT_READ | PROT_WRITE, 126 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); 127 ASSERT_NE(ptr, MAP_FAILED); 128 129 memset(ptr, 0xde, TWOMEG); 130 for (i = 0; i < self->nthreads - 1; i++) 131 if (pthread_create(&self->threads[i], NULL, access_mem, ptr)) 132 perror("Couldn't create thread"); 133 134 ASSERT_EQ(migrate(ptr, self->n1, self->n2), 0); 135 for (i = 0; i < self->nthreads - 1; i++) 136 ASSERT_EQ(pthread_cancel(self->threads[i]), 0); 137 } 138 139 /* 140 * Same as the previous test but with shared memory. 141 */ 142 TEST_F_TIMEOUT(migration, shared_anon, 2*RUNTIME) 143 { 144 pid_t pid; 145 uint64_t *ptr; 146 int i; 147 148 if (self->nthreads < 2 || self->n1 < 0 || self->n2 < 0) 149 SKIP(return, "Not enough threads or NUMA nodes available"); 150 151 ptr = mmap(NULL, TWOMEG, PROT_READ | PROT_WRITE, 152 MAP_SHARED | MAP_ANONYMOUS, -1, 0); 153 ASSERT_NE(ptr, MAP_FAILED); 154 155 memset(ptr, 0xde, TWOMEG); 156 for (i = 0; i < self->nthreads - 1; i++) { 157 pid = fork(); 158 if (!pid) 159 access_mem(ptr); 160 else 161 self->pids[i] = pid; 162 } 163 164 ASSERT_EQ(migrate(ptr, self->n1, self->n2), 0); 165 for (i = 0; i < self->nthreads - 1; i++) 166 ASSERT_EQ(kill(self->pids[i], SIGTERM), 0); 167 } 168 169 /* 170 * Tests the pmd migration entry paths. 171 */ 172 TEST_F_TIMEOUT(migration, private_anon_thp, 2*RUNTIME) 173 { 174 uint64_t *ptr; 175 int i; 176 177 if (self->nthreads < 2 || self->n1 < 0 || self->n2 < 0) 178 SKIP(return, "Not enough threads or NUMA nodes available"); 179 180 ptr = mmap(NULL, 2*TWOMEG, PROT_READ | PROT_WRITE, 181 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); 182 ASSERT_NE(ptr, MAP_FAILED); 183 184 ptr = (uint64_t *) ALIGN((uintptr_t) ptr, TWOMEG); 185 ASSERT_EQ(madvise(ptr, TWOMEG, MADV_HUGEPAGE), 0); 186 memset(ptr, 0xde, TWOMEG); 187 for (i = 0; i < self->nthreads - 1; i++) 188 if (pthread_create(&self->threads[i], NULL, access_mem, ptr)) 189 perror("Couldn't create thread"); 190 191 ASSERT_EQ(migrate(ptr, self->n1, self->n2), 0); 192 for (i = 0; i < self->nthreads - 1; i++) 193 ASSERT_EQ(pthread_cancel(self->threads[i]), 0); 194 } 195 196 TEST_HARNESS_MAIN 197