1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2018 Intel Corporation 5 */ 6 7 #include "../i915_selftest.h" 8 9 #include "igt_flush_test.h" 10 #include "lib_sw_fence.h" 11 12 struct live_active { 13 struct i915_active base; 14 bool retired; 15 }; 16 17 static void __live_active_retire(struct i915_active *base) 18 { 19 struct live_active *active = container_of(base, typeof(*active), base); 20 21 active->retired = true; 22 } 23 24 static int __live_active_setup(struct drm_i915_private *i915, 25 struct live_active *active) 26 { 27 struct intel_engine_cs *engine; 28 struct i915_sw_fence *submit; 29 enum intel_engine_id id; 30 unsigned int count = 0; 31 int err = 0; 32 33 submit = heap_fence_create(GFP_KERNEL); 34 if (!submit) 35 return -ENOMEM; 36 37 i915_active_init(i915, &active->base, __live_active_retire); 38 active->retired = false; 39 40 if (!i915_active_acquire(&active->base)) { 41 pr_err("First i915_active_acquire should report being idle\n"); 42 err = -EINVAL; 43 goto out; 44 } 45 46 for_each_engine(engine, i915, id) { 47 struct i915_request *rq; 48 49 rq = i915_request_alloc(engine, i915->kernel_context); 50 if (IS_ERR(rq)) { 51 err = PTR_ERR(rq); 52 break; 53 } 54 55 err = i915_sw_fence_await_sw_fence_gfp(&rq->submit, 56 submit, 57 GFP_KERNEL); 58 if (err >= 0) 59 err = i915_active_ref(&active->base, 60 rq->fence.context, rq); 61 i915_request_add(rq); 62 if (err) { 63 pr_err("Failed to track active ref!\n"); 64 break; 65 } 66 67 count++; 68 } 69 70 i915_active_release(&active->base); 71 if (active->retired && count) { 72 pr_err("i915_active retired before submission!\n"); 73 err = -EINVAL; 74 } 75 if (active->base.count != count) { 76 pr_err("i915_active not tracking all requests, found %d, expected %d\n", 77 active->base.count, count); 78 err = -EINVAL; 79 } 80 81 out: 82 i915_sw_fence_commit(submit); 83 heap_fence_put(submit); 84 85 return err; 86 } 87 88 static int live_active_wait(void *arg) 89 { 90 struct drm_i915_private *i915 = arg; 91 struct live_active active; 92 intel_wakeref_t wakeref; 93 int err; 94 95 /* Check that we get a callback when requests retire upon waiting */ 96 97 mutex_lock(&i915->drm.struct_mutex); 98 wakeref = intel_runtime_pm_get(i915); 99 100 err = __live_active_setup(i915, &active); 101 102 i915_active_wait(&active.base); 103 if (!active.retired) { 104 pr_err("i915_active not retired after waiting!\n"); 105 err = -EINVAL; 106 } 107 108 i915_active_fini(&active.base); 109 if (igt_flush_test(i915, I915_WAIT_LOCKED)) 110 err = -EIO; 111 112 intel_runtime_pm_put(i915, wakeref); 113 mutex_unlock(&i915->drm.struct_mutex); 114 return err; 115 } 116 117 static int live_active_retire(void *arg) 118 { 119 struct drm_i915_private *i915 = arg; 120 struct live_active active; 121 intel_wakeref_t wakeref; 122 int err; 123 124 /* Check that we get a callback when requests are indirectly retired */ 125 126 mutex_lock(&i915->drm.struct_mutex); 127 wakeref = intel_runtime_pm_get(i915); 128 129 err = __live_active_setup(i915, &active); 130 131 /* waits for & retires all requests */ 132 if (igt_flush_test(i915, I915_WAIT_LOCKED)) 133 err = -EIO; 134 135 if (!active.retired) { 136 pr_err("i915_active not retired after flushing!\n"); 137 err = -EINVAL; 138 } 139 140 i915_active_fini(&active.base); 141 intel_runtime_pm_put(i915, wakeref); 142 mutex_unlock(&i915->drm.struct_mutex); 143 return err; 144 } 145 146 int i915_active_live_selftests(struct drm_i915_private *i915) 147 { 148 static const struct i915_subtest tests[] = { 149 SUBTEST(live_active_wait), 150 SUBTEST(live_active_retire), 151 }; 152 153 if (i915_terminally_wedged(i915)) 154 return 0; 155 156 return i915_subtests(tests, i915); 157 } 158