1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Data Access Monitor Unit Tests 4 * 5 * Copyright 2019 Amazon.com, Inc. or its affiliates. All rights reserved. 6 * 7 * Author: SeongJae Park <sjpark@amazon.de> 8 */ 9 10 #ifdef CONFIG_DAMON_KUNIT_TEST 11 12 #ifndef _DAMON_CORE_TEST_H 13 #define _DAMON_CORE_TEST_H 14 15 #include <kunit/test.h> 16 17 static void damon_test_regions(struct kunit *test) 18 { 19 struct damon_region *r; 20 struct damon_target *t; 21 22 r = damon_new_region(1, 2); 23 KUNIT_EXPECT_EQ(test, 1ul, r->ar.start); 24 KUNIT_EXPECT_EQ(test, 2ul, r->ar.end); 25 KUNIT_EXPECT_EQ(test, 0u, r->nr_accesses); 26 27 t = damon_new_target(); 28 KUNIT_EXPECT_EQ(test, 0u, damon_nr_regions(t)); 29 30 damon_add_region(r, t); 31 KUNIT_EXPECT_EQ(test, 1u, damon_nr_regions(t)); 32 33 damon_del_region(r, t); 34 KUNIT_EXPECT_EQ(test, 0u, damon_nr_regions(t)); 35 36 damon_free_target(t); 37 } 38 39 static unsigned int nr_damon_targets(struct damon_ctx *ctx) 40 { 41 struct damon_target *t; 42 unsigned int nr_targets = 0; 43 44 damon_for_each_target(t, ctx) 45 nr_targets++; 46 47 return nr_targets; 48 } 49 50 static void damon_test_target(struct kunit *test) 51 { 52 struct damon_ctx *c = damon_new_ctx(); 53 struct damon_target *t; 54 55 t = damon_new_target(); 56 KUNIT_EXPECT_EQ(test, 0u, nr_damon_targets(c)); 57 58 damon_add_target(c, t); 59 KUNIT_EXPECT_EQ(test, 1u, nr_damon_targets(c)); 60 61 damon_destroy_target(t); 62 KUNIT_EXPECT_EQ(test, 0u, nr_damon_targets(c)); 63 64 damon_destroy_ctx(c); 65 } 66 67 /* 68 * Test kdamond_reset_aggregated() 69 * 70 * DAMON checks access to each region and aggregates this information as the 71 * access frequency of each region. In detail, it increases '->nr_accesses' of 72 * regions that an access has confirmed. 'kdamond_reset_aggregated()' flushes 73 * the aggregated information ('->nr_accesses' of each regions) to the result 74 * buffer. As a result of the flushing, the '->nr_accesses' of regions are 75 * initialized to zero. 76 */ 77 static void damon_test_aggregate(struct kunit *test) 78 { 79 struct damon_ctx *ctx = damon_new_ctx(); 80 unsigned long saddr[][3] = {{10, 20, 30}, {5, 42, 49}, {13, 33, 55} }; 81 unsigned long eaddr[][3] = {{15, 27, 40}, {31, 45, 55}, {23, 44, 66} }; 82 unsigned long accesses[][3] = {{42, 95, 84}, {10, 20, 30}, {0, 1, 2} }; 83 struct damon_target *t; 84 struct damon_region *r; 85 int it, ir; 86 87 for (it = 0; it < 3; it++) { 88 t = damon_new_target(); 89 damon_add_target(ctx, t); 90 } 91 92 it = 0; 93 damon_for_each_target(t, ctx) { 94 for (ir = 0; ir < 3; ir++) { 95 r = damon_new_region(saddr[it][ir], eaddr[it][ir]); 96 r->nr_accesses = accesses[it][ir]; 97 damon_add_region(r, t); 98 } 99 it++; 100 } 101 kdamond_reset_aggregated(ctx); 102 it = 0; 103 damon_for_each_target(t, ctx) { 104 ir = 0; 105 /* '->nr_accesses' should be zeroed */ 106 damon_for_each_region(r, t) { 107 KUNIT_EXPECT_EQ(test, 0u, r->nr_accesses); 108 ir++; 109 } 110 /* regions should be preserved */ 111 KUNIT_EXPECT_EQ(test, 3, ir); 112 it++; 113 } 114 /* targets also should be preserved */ 115 KUNIT_EXPECT_EQ(test, 3, it); 116 117 damon_destroy_ctx(ctx); 118 } 119 120 static void damon_test_split_at(struct kunit *test) 121 { 122 struct damon_ctx *c = damon_new_ctx(); 123 struct damon_target *t; 124 struct damon_region *r; 125 126 t = damon_new_target(); 127 r = damon_new_region(0, 100); 128 damon_add_region(r, t); 129 damon_split_region_at(t, r, 25); 130 KUNIT_EXPECT_EQ(test, r->ar.start, 0ul); 131 KUNIT_EXPECT_EQ(test, r->ar.end, 25ul); 132 133 r = damon_next_region(r); 134 KUNIT_EXPECT_EQ(test, r->ar.start, 25ul); 135 KUNIT_EXPECT_EQ(test, r->ar.end, 100ul); 136 137 damon_free_target(t); 138 damon_destroy_ctx(c); 139 } 140 141 static void damon_test_merge_two(struct kunit *test) 142 { 143 struct damon_target *t; 144 struct damon_region *r, *r2, *r3; 145 int i; 146 147 t = damon_new_target(); 148 r = damon_new_region(0, 100); 149 r->nr_accesses = 10; 150 damon_add_region(r, t); 151 r2 = damon_new_region(100, 300); 152 r2->nr_accesses = 20; 153 damon_add_region(r2, t); 154 155 damon_merge_two_regions(t, r, r2); 156 KUNIT_EXPECT_EQ(test, r->ar.start, 0ul); 157 KUNIT_EXPECT_EQ(test, r->ar.end, 300ul); 158 KUNIT_EXPECT_EQ(test, r->nr_accesses, 16u); 159 160 i = 0; 161 damon_for_each_region(r3, t) { 162 KUNIT_EXPECT_PTR_EQ(test, r, r3); 163 i++; 164 } 165 KUNIT_EXPECT_EQ(test, i, 1); 166 167 damon_free_target(t); 168 } 169 170 static struct damon_region *__nth_region_of(struct damon_target *t, int idx) 171 { 172 struct damon_region *r; 173 unsigned int i = 0; 174 175 damon_for_each_region(r, t) { 176 if (i++ == idx) 177 return r; 178 } 179 180 return NULL; 181 } 182 183 static void damon_test_merge_regions_of(struct kunit *test) 184 { 185 struct damon_target *t; 186 struct damon_region *r; 187 unsigned long sa[] = {0, 100, 114, 122, 130, 156, 170, 184}; 188 unsigned long ea[] = {100, 112, 122, 130, 156, 170, 184, 230}; 189 unsigned int nrs[] = {0, 0, 10, 10, 20, 30, 1, 2}; 190 191 unsigned long saddrs[] = {0, 114, 130, 156, 170}; 192 unsigned long eaddrs[] = {112, 130, 156, 170, 230}; 193 int i; 194 195 t = damon_new_target(); 196 for (i = 0; i < ARRAY_SIZE(sa); i++) { 197 r = damon_new_region(sa[i], ea[i]); 198 r->nr_accesses = nrs[i]; 199 damon_add_region(r, t); 200 } 201 202 damon_merge_regions_of(t, 9, 9999); 203 /* 0-112, 114-130, 130-156, 156-170 */ 204 KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 5u); 205 for (i = 0; i < 5; i++) { 206 r = __nth_region_of(t, i); 207 KUNIT_EXPECT_EQ(test, r->ar.start, saddrs[i]); 208 KUNIT_EXPECT_EQ(test, r->ar.end, eaddrs[i]); 209 } 210 damon_free_target(t); 211 } 212 213 static void damon_test_split_regions_of(struct kunit *test) 214 { 215 struct damon_ctx *c = damon_new_ctx(); 216 struct damon_target *t; 217 struct damon_region *r; 218 219 t = damon_new_target(); 220 r = damon_new_region(0, 22); 221 damon_add_region(r, t); 222 damon_split_regions_of(t, 2); 223 KUNIT_EXPECT_LE(test, damon_nr_regions(t), 2u); 224 damon_free_target(t); 225 226 t = damon_new_target(); 227 r = damon_new_region(0, 220); 228 damon_add_region(r, t); 229 damon_split_regions_of(t, 4); 230 KUNIT_EXPECT_LE(test, damon_nr_regions(t), 4u); 231 damon_free_target(t); 232 damon_destroy_ctx(c); 233 } 234 235 static void damon_test_ops_registration(struct kunit *test) 236 { 237 struct damon_ctx *c = damon_new_ctx(); 238 struct damon_operations ops, bak; 239 240 /* DAMON_OPS_{V,P}ADDR are registered on subsys_initcall */ 241 KUNIT_EXPECT_EQ(test, damon_select_ops(c, DAMON_OPS_VADDR), 0); 242 KUNIT_EXPECT_EQ(test, damon_select_ops(c, DAMON_OPS_PADDR), 0); 243 244 /* Double-registration is prohibited */ 245 ops.id = DAMON_OPS_VADDR; 246 KUNIT_EXPECT_EQ(test, damon_register_ops(&ops), -EINVAL); 247 ops.id = DAMON_OPS_PADDR; 248 KUNIT_EXPECT_EQ(test, damon_register_ops(&ops), -EINVAL); 249 250 /* Unknown ops id cannot be registered */ 251 KUNIT_EXPECT_EQ(test, damon_select_ops(c, NR_DAMON_OPS), -EINVAL); 252 253 /* Registration should success after unregistration */ 254 mutex_lock(&damon_ops_lock); 255 bak = damon_registered_ops[DAMON_OPS_VADDR]; 256 damon_registered_ops[DAMON_OPS_VADDR] = (struct damon_operations){}; 257 mutex_unlock(&damon_ops_lock); 258 259 ops.id = DAMON_OPS_VADDR; 260 KUNIT_EXPECT_EQ(test, damon_register_ops(&ops), 0); 261 262 mutex_lock(&damon_ops_lock); 263 damon_registered_ops[DAMON_OPS_VADDR] = bak; 264 mutex_unlock(&damon_ops_lock); 265 266 /* Check double-registration failure again */ 267 KUNIT_EXPECT_EQ(test, damon_register_ops(&ops), -EINVAL); 268 } 269 270 static void damon_test_set_regions(struct kunit *test) 271 { 272 struct damon_target *t = damon_new_target(); 273 struct damon_region *r1 = damon_new_region(4, 16); 274 struct damon_region *r2 = damon_new_region(24, 32); 275 struct damon_addr_range range = {.start = 8, .end = 28}; 276 unsigned long expects[] = {8, 16, 16, 24, 24, 28}; 277 int expect_idx = 0; 278 struct damon_region *r; 279 280 damon_add_region(r1, t); 281 damon_add_region(r2, t); 282 damon_set_regions(t, &range, 1); 283 284 KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 3); 285 damon_for_each_region(r, t) { 286 KUNIT_EXPECT_EQ(test, r->ar.start, expects[expect_idx++]); 287 KUNIT_EXPECT_EQ(test, r->ar.end, expects[expect_idx++]); 288 } 289 damon_destroy_target(t); 290 } 291 292 static void damon_test_update_monitoring_result(struct kunit *test) 293 { 294 struct damon_attrs old_attrs = { 295 .sample_interval = 10, .aggr_interval = 1000,}; 296 struct damon_attrs new_attrs; 297 struct damon_region *r = damon_new_region(3, 7); 298 299 r->nr_accesses = 15; 300 r->age = 20; 301 302 new_attrs = (struct damon_attrs){ 303 .sample_interval = 100, .aggr_interval = 10000,}; 304 damon_update_monitoring_result(r, &old_attrs, &new_attrs); 305 KUNIT_EXPECT_EQ(test, r->nr_accesses, 15); 306 KUNIT_EXPECT_EQ(test, r->age, 2); 307 308 new_attrs = (struct damon_attrs){ 309 .sample_interval = 1, .aggr_interval = 1000}; 310 damon_update_monitoring_result(r, &old_attrs, &new_attrs); 311 KUNIT_EXPECT_EQ(test, r->nr_accesses, 150); 312 KUNIT_EXPECT_EQ(test, r->age, 2); 313 314 new_attrs = (struct damon_attrs){ 315 .sample_interval = 1, .aggr_interval = 100}; 316 damon_update_monitoring_result(r, &old_attrs, &new_attrs); 317 KUNIT_EXPECT_EQ(test, r->nr_accesses, 150); 318 KUNIT_EXPECT_EQ(test, r->age, 20); 319 } 320 321 static struct kunit_case damon_test_cases[] = { 322 KUNIT_CASE(damon_test_target), 323 KUNIT_CASE(damon_test_regions), 324 KUNIT_CASE(damon_test_aggregate), 325 KUNIT_CASE(damon_test_split_at), 326 KUNIT_CASE(damon_test_merge_two), 327 KUNIT_CASE(damon_test_merge_regions_of), 328 KUNIT_CASE(damon_test_split_regions_of), 329 KUNIT_CASE(damon_test_ops_registration), 330 KUNIT_CASE(damon_test_set_regions), 331 KUNIT_CASE(damon_test_update_monitoring_result), 332 {}, 333 }; 334 335 static struct kunit_suite damon_test_suite = { 336 .name = "damon", 337 .test_cases = damon_test_cases, 338 }; 339 kunit_test_suite(damon_test_suite); 340 341 #endif /* _DAMON_CORE_TEST_H */ 342 343 #endif /* CONFIG_DAMON_KUNIT_TEST */ 344