1 /* SPDX-License-Identifier: MIT */ 2 3 /* 4 * Copyright © 2019 Intel Corporation 5 * Copyright © 2021 Advanced Micro Devices, Inc. 6 */ 7 8 #include <linux/slab.h> 9 #include <linux/spinlock.h> 10 #include <linux/dma-resv.h> 11 12 #include "selftest.h" 13 14 static struct spinlock fence_lock; 15 16 static const char *fence_name(struct dma_fence *f) 17 { 18 return "selftest"; 19 } 20 21 static const struct dma_fence_ops fence_ops = { 22 .get_driver_name = fence_name, 23 .get_timeline_name = fence_name, 24 }; 25 26 static struct dma_fence *alloc_fence(void) 27 { 28 struct dma_fence *f; 29 30 f = kmalloc(sizeof(*f), GFP_KERNEL); 31 if (!f) 32 return NULL; 33 34 dma_fence_init(f, &fence_ops, &fence_lock, 0, 0); 35 return f; 36 } 37 38 static int sanitycheck(void *arg) 39 { 40 struct dma_resv resv; 41 struct dma_fence *f; 42 int r; 43 44 f = alloc_fence(); 45 if (!f) 46 return -ENOMEM; 47 48 dma_fence_signal(f); 49 dma_fence_put(f); 50 51 dma_resv_init(&resv); 52 r = dma_resv_lock(&resv, NULL); 53 if (r) 54 pr_err("Resv locking failed\n"); 55 else 56 dma_resv_unlock(&resv); 57 dma_resv_fini(&resv); 58 return r; 59 } 60 61 static int test_signaling(void *arg, bool shared) 62 { 63 struct dma_resv resv; 64 struct dma_fence *f; 65 int r; 66 67 f = alloc_fence(); 68 if (!f) 69 return -ENOMEM; 70 71 dma_resv_init(&resv); 72 r = dma_resv_lock(&resv, NULL); 73 if (r) { 74 pr_err("Resv locking failed\n"); 75 goto err_free; 76 } 77 78 if (shared) { 79 r = dma_resv_reserve_shared(&resv, 1); 80 if (r) { 81 pr_err("Resv shared slot allocation failed\n"); 82 goto err_unlock; 83 } 84 85 dma_resv_add_shared_fence(&resv, f); 86 } else { 87 dma_resv_add_excl_fence(&resv, f); 88 } 89 90 if (dma_resv_test_signaled(&resv, shared)) { 91 pr_err("Resv unexpectedly signaled\n"); 92 r = -EINVAL; 93 goto err_unlock; 94 } 95 dma_fence_signal(f); 96 if (!dma_resv_test_signaled(&resv, shared)) { 97 pr_err("Resv not reporting signaled\n"); 98 r = -EINVAL; 99 goto err_unlock; 100 } 101 err_unlock: 102 dma_resv_unlock(&resv); 103 err_free: 104 dma_resv_fini(&resv); 105 dma_fence_put(f); 106 return r; 107 } 108 109 static int test_excl_signaling(void *arg) 110 { 111 return test_signaling(arg, false); 112 } 113 114 static int test_shared_signaling(void *arg) 115 { 116 return test_signaling(arg, true); 117 } 118 119 static int test_for_each(void *arg, bool shared) 120 { 121 struct dma_resv_iter cursor; 122 struct dma_fence *f, *fence; 123 struct dma_resv resv; 124 int r; 125 126 f = alloc_fence(); 127 if (!f) 128 return -ENOMEM; 129 130 dma_resv_init(&resv); 131 r = dma_resv_lock(&resv, NULL); 132 if (r) { 133 pr_err("Resv locking failed\n"); 134 goto err_free; 135 } 136 137 if (shared) { 138 r = dma_resv_reserve_shared(&resv, 1); 139 if (r) { 140 pr_err("Resv shared slot allocation failed\n"); 141 goto err_unlock; 142 } 143 144 dma_resv_add_shared_fence(&resv, f); 145 } else { 146 dma_resv_add_excl_fence(&resv, f); 147 } 148 149 r = -ENOENT; 150 dma_resv_for_each_fence(&cursor, &resv, shared, fence) { 151 if (!r) { 152 pr_err("More than one fence found\n"); 153 r = -EINVAL; 154 goto err_unlock; 155 } 156 if (f != fence) { 157 pr_err("Unexpected fence\n"); 158 r = -EINVAL; 159 goto err_unlock; 160 } 161 if (dma_resv_iter_is_exclusive(&cursor) != !shared) { 162 pr_err("Unexpected fence usage\n"); 163 r = -EINVAL; 164 goto err_unlock; 165 } 166 r = 0; 167 } 168 if (r) { 169 pr_err("No fence found\n"); 170 goto err_unlock; 171 } 172 dma_fence_signal(f); 173 err_unlock: 174 dma_resv_unlock(&resv); 175 err_free: 176 dma_resv_fini(&resv); 177 dma_fence_put(f); 178 return r; 179 } 180 181 static int test_excl_for_each(void *arg) 182 { 183 return test_for_each(arg, false); 184 } 185 186 static int test_shared_for_each(void *arg) 187 { 188 return test_for_each(arg, true); 189 } 190 191 static int test_for_each_unlocked(void *arg, bool shared) 192 { 193 struct dma_resv_iter cursor; 194 struct dma_fence *f, *fence; 195 struct dma_resv resv; 196 int r; 197 198 f = alloc_fence(); 199 if (!f) 200 return -ENOMEM; 201 202 dma_resv_init(&resv); 203 r = dma_resv_lock(&resv, NULL); 204 if (r) { 205 pr_err("Resv locking failed\n"); 206 goto err_free; 207 } 208 209 if (shared) { 210 r = dma_resv_reserve_shared(&resv, 1); 211 if (r) { 212 pr_err("Resv shared slot allocation failed\n"); 213 dma_resv_unlock(&resv); 214 goto err_free; 215 } 216 217 dma_resv_add_shared_fence(&resv, f); 218 } else { 219 dma_resv_add_excl_fence(&resv, f); 220 } 221 dma_resv_unlock(&resv); 222 223 r = -ENOENT; 224 dma_resv_iter_begin(&cursor, &resv, shared); 225 dma_resv_for_each_fence_unlocked(&cursor, fence) { 226 if (!r) { 227 pr_err("More than one fence found\n"); 228 r = -EINVAL; 229 goto err_iter_end; 230 } 231 if (!dma_resv_iter_is_restarted(&cursor)) { 232 pr_err("No restart flag\n"); 233 goto err_iter_end; 234 } 235 if (f != fence) { 236 pr_err("Unexpected fence\n"); 237 r = -EINVAL; 238 goto err_iter_end; 239 } 240 if (dma_resv_iter_is_exclusive(&cursor) != !shared) { 241 pr_err("Unexpected fence usage\n"); 242 r = -EINVAL; 243 goto err_iter_end; 244 } 245 246 /* We use r as state here */ 247 if (r == -ENOENT) { 248 r = -EINVAL; 249 /* That should trigger an restart */ 250 cursor.seq--; 251 } else if (r == -EINVAL) { 252 r = 0; 253 } 254 } 255 if (r) 256 pr_err("No fence found\n"); 257 err_iter_end: 258 dma_resv_iter_end(&cursor); 259 dma_fence_signal(f); 260 err_free: 261 dma_resv_fini(&resv); 262 dma_fence_put(f); 263 return r; 264 } 265 266 static int test_excl_for_each_unlocked(void *arg) 267 { 268 return test_for_each_unlocked(arg, false); 269 } 270 271 static int test_shared_for_each_unlocked(void *arg) 272 { 273 return test_for_each_unlocked(arg, true); 274 } 275 276 static int test_get_fences(void *arg, bool shared) 277 { 278 struct dma_fence *f, **fences = NULL; 279 struct dma_resv resv; 280 int r, i; 281 282 f = alloc_fence(); 283 if (!f) 284 return -ENOMEM; 285 286 dma_resv_init(&resv); 287 r = dma_resv_lock(&resv, NULL); 288 if (r) { 289 pr_err("Resv locking failed\n"); 290 goto err_resv; 291 } 292 293 if (shared) { 294 r = dma_resv_reserve_shared(&resv, 1); 295 if (r) { 296 pr_err("Resv shared slot allocation failed\n"); 297 dma_resv_unlock(&resv); 298 goto err_resv; 299 } 300 301 dma_resv_add_shared_fence(&resv, f); 302 } else { 303 dma_resv_add_excl_fence(&resv, f); 304 } 305 dma_resv_unlock(&resv); 306 307 r = dma_resv_get_fences(&resv, shared, &i, &fences); 308 if (r) { 309 pr_err("get_fences failed\n"); 310 goto err_free; 311 } 312 313 if (i != 1 || fences[0] != f) { 314 pr_err("get_fences returned unexpected fence\n"); 315 goto err_free; 316 } 317 318 dma_fence_signal(f); 319 err_free: 320 while (i--) 321 dma_fence_put(fences[i]); 322 kfree(fences); 323 err_resv: 324 dma_resv_fini(&resv); 325 dma_fence_put(f); 326 return r; 327 } 328 329 static int test_excl_get_fences(void *arg) 330 { 331 return test_get_fences(arg, false); 332 } 333 334 static int test_shared_get_fences(void *arg) 335 { 336 return test_get_fences(arg, true); 337 } 338 339 int dma_resv(void) 340 { 341 static const struct subtest tests[] = { 342 SUBTEST(sanitycheck), 343 SUBTEST(test_excl_signaling), 344 SUBTEST(test_shared_signaling), 345 SUBTEST(test_excl_for_each), 346 SUBTEST(test_shared_for_each), 347 SUBTEST(test_excl_for_each_unlocked), 348 SUBTEST(test_shared_for_each_unlocked), 349 SUBTEST(test_excl_get_fences), 350 SUBTEST(test_shared_get_fences), 351 }; 352 353 spin_lock_init(&fence_lock); 354 return subtests(tests, NULL); 355 } 356