1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2019 Intel Corporation 5 */ 6 7 #include <linux/workqueue.h> 8 9 #include "i915_drv.h" /* for_each_engine() */ 10 #include "i915_request.h" 11 #include "intel_engine_heartbeat.h" 12 #include "intel_gt.h" 13 #include "intel_gt_pm.h" 14 #include "intel_gt_requests.h" 15 #include "intel_timeline.h" 16 17 static bool retire_requests(struct intel_timeline *tl) 18 { 19 struct i915_request *rq, *rn; 20 21 list_for_each_entry_safe(rq, rn, &tl->requests, link) 22 if (!i915_request_retire(rq)) 23 return false; 24 25 /* And check nothing new was submitted */ 26 return !i915_active_fence_isset(&tl->last_request); 27 } 28 29 static bool flush_submission(struct intel_gt *gt) 30 { 31 struct intel_engine_cs *engine; 32 enum intel_engine_id id; 33 bool active = false; 34 35 if (!intel_gt_pm_is_awake(gt)) 36 return false; 37 38 for_each_engine(engine, gt, id) { 39 intel_engine_flush_submission(engine); 40 active |= flush_work(&engine->retire_work); 41 active |= flush_work(&engine->wakeref.work); 42 } 43 44 return active; 45 } 46 47 static void engine_retire(struct work_struct *work) 48 { 49 struct intel_engine_cs *engine = 50 container_of(work, typeof(*engine), retire_work); 51 struct intel_timeline *tl = xchg(&engine->retire, NULL); 52 53 do { 54 struct intel_timeline *next = xchg(&tl->retire, NULL); 55 56 /* 57 * Our goal here is to retire _idle_ timelines as soon as 58 * possible (as they are idle, we do not expect userspace 59 * to be cleaning up anytime soon). 60 * 61 * If the timeline is currently locked, either it is being 62 * retired elsewhere or about to be! 63 */ 64 if (mutex_trylock(&tl->mutex)) { 65 retire_requests(tl); 66 mutex_unlock(&tl->mutex); 67 } 68 intel_timeline_put(tl); 69 70 GEM_BUG_ON(!next); 71 tl = ptr_mask_bits(next, 1); 72 } while (tl); 73 } 74 75 static bool add_retire(struct intel_engine_cs *engine, 76 struct intel_timeline *tl) 77 { 78 #define STUB ((struct intel_timeline *)1) 79 struct intel_timeline *first; 80 81 /* 82 * We open-code a llist here to include the additional tag [BIT(0)] 83 * so that we know when the timeline is already on a 84 * retirement queue: either this engine or another. 85 */ 86 87 if (cmpxchg(&tl->retire, NULL, STUB)) /* already queued */ 88 return false; 89 90 intel_timeline_get(tl); 91 first = READ_ONCE(engine->retire); 92 do 93 tl->retire = ptr_pack_bits(first, 1, 1); 94 while (!try_cmpxchg(&engine->retire, &first, tl)); 95 96 return !first; 97 } 98 99 void intel_engine_add_retire(struct intel_engine_cs *engine, 100 struct intel_timeline *tl) 101 { 102 if (add_retire(engine, tl)) 103 schedule_work(&engine->retire_work); 104 } 105 106 void intel_engine_init_retire(struct intel_engine_cs *engine) 107 { 108 INIT_WORK(&engine->retire_work, engine_retire); 109 } 110 111 void intel_engine_fini_retire(struct intel_engine_cs *engine) 112 { 113 flush_work(&engine->retire_work); 114 GEM_BUG_ON(engine->retire); 115 } 116 117 long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout) 118 { 119 struct intel_gt_timelines *timelines = >->timelines; 120 struct intel_timeline *tl, *tn; 121 unsigned long active_count = 0; 122 bool interruptible; 123 LIST_HEAD(free); 124 125 interruptible = true; 126 if (unlikely(timeout < 0)) 127 timeout = -timeout, interruptible = false; 128 129 flush_submission(gt); /* kick the ksoftirqd tasklets */ 130 spin_lock(&timelines->lock); 131 list_for_each_entry_safe(tl, tn, &timelines->active_list, link) { 132 if (!mutex_trylock(&tl->mutex)) { 133 active_count++; /* report busy to caller, try again? */ 134 continue; 135 } 136 137 intel_timeline_get(tl); 138 GEM_BUG_ON(!atomic_read(&tl->active_count)); 139 atomic_inc(&tl->active_count); /* pin the list element */ 140 spin_unlock(&timelines->lock); 141 142 if (timeout > 0) { 143 struct dma_fence *fence; 144 145 fence = i915_active_fence_get(&tl->last_request); 146 if (fence) { 147 timeout = dma_fence_wait_timeout(fence, 148 interruptible, 149 timeout); 150 dma_fence_put(fence); 151 } 152 } 153 154 if (!retire_requests(tl) || flush_submission(gt)) 155 active_count++; 156 157 spin_lock(&timelines->lock); 158 159 /* Resume iteration after dropping lock */ 160 list_safe_reset_next(tl, tn, link); 161 if (atomic_dec_and_test(&tl->active_count)) 162 list_del(&tl->link); 163 164 mutex_unlock(&tl->mutex); 165 166 /* Defer the final release to after the spinlock */ 167 if (refcount_dec_and_test(&tl->kref.refcount)) { 168 GEM_BUG_ON(atomic_read(&tl->active_count)); 169 list_add(&tl->link, &free); 170 } 171 } 172 spin_unlock(&timelines->lock); 173 174 list_for_each_entry_safe(tl, tn, &free, link) 175 __intel_timeline_free(&tl->kref); 176 177 return active_count ? timeout : 0; 178 } 179 180 int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout) 181 { 182 /* If the device is asleep, we have no requests outstanding */ 183 if (!intel_gt_pm_is_awake(gt)) 184 return 0; 185 186 while ((timeout = intel_gt_retire_requests_timeout(gt, timeout)) > 0) { 187 cond_resched(); 188 if (signal_pending(current)) 189 return -EINTR; 190 } 191 192 return timeout; 193 } 194 195 static void retire_work_handler(struct work_struct *work) 196 { 197 struct intel_gt *gt = 198 container_of(work, typeof(*gt), requests.retire_work.work); 199 200 schedule_delayed_work(>->requests.retire_work, 201 round_jiffies_up_relative(HZ)); 202 intel_gt_retire_requests(gt); 203 } 204 205 void intel_gt_init_requests(struct intel_gt *gt) 206 { 207 INIT_DELAYED_WORK(>->requests.retire_work, retire_work_handler); 208 } 209 210 void intel_gt_park_requests(struct intel_gt *gt) 211 { 212 cancel_delayed_work(>->requests.retire_work); 213 } 214 215 void intel_gt_unpark_requests(struct intel_gt *gt) 216 { 217 schedule_delayed_work(>->requests.retire_work, 218 round_jiffies_up_relative(HZ)); 219 } 220 221 void intel_gt_fini_requests(struct intel_gt *gt) 222 { 223 /* Wait until the work is marked as finished before unloading! */ 224 cancel_delayed_work_sync(>->requests.retire_work); 225 } 226