1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2016 Intel Corporation 5 */ 6 7 #include <linux/dma-fence-array.h> 8 #include <linux/dma-fence-chain.h> 9 #include <linux/jiffies.h> 10 11 #include "gt/intel_engine.h" 12 #include "gt/intel_rps.h" 13 14 #include "i915_gem_ioctls.h" 15 #include "i915_gem_object.h" 16 17 static long 18 i915_gem_object_wait_fence(struct dma_fence *fence, 19 unsigned int flags, 20 long timeout) 21 { 22 BUILD_BUG_ON(I915_WAIT_INTERRUPTIBLE != 0x1); 23 24 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) 25 return timeout; 26 27 if (dma_fence_is_i915(fence)) 28 return i915_request_wait_timeout(to_request(fence), flags, timeout); 29 30 return dma_fence_wait_timeout(fence, 31 flags & I915_WAIT_INTERRUPTIBLE, 32 timeout); 33 } 34 35 static void 36 i915_gem_object_boost(struct dma_resv *resv, unsigned int flags) 37 { 38 struct dma_resv_iter cursor; 39 struct dma_fence *fence; 40 41 /* 42 * Prescan all fences for potential boosting before we begin waiting. 43 * 44 * When we wait, we wait on outstanding fences serially. If the 45 * dma-resv contains a sequence such as 1:1, 1:2 instead of a reduced 46 * form 1:2, then as we look at each wait in turn we see that each 47 * request is currently executing and not worthy of boosting. But if 48 * we only happen to look at the final fence in the sequence (because 49 * of request coalescing or splitting between read/write arrays by 50 * the iterator), then we would boost. As such our decision to boost 51 * or not is delicately balanced on the order we wait on fences. 52 * 53 * So instead of looking for boosts sequentially, look for all boosts 54 * upfront and then wait on the outstanding fences. 55 */ 56 57 dma_resv_iter_begin(&cursor, resv, 58 dma_resv_usage_rw(flags & I915_WAIT_ALL)); 59 dma_resv_for_each_fence_unlocked(&cursor, fence) 60 if (dma_fence_is_i915(fence) && 61 !i915_request_started(to_request(fence))) 62 intel_rps_boost(to_request(fence)); 63 dma_resv_iter_end(&cursor); 64 } 65 66 static long 67 i915_gem_object_wait_reservation(struct dma_resv *resv, 68 unsigned int flags, 69 long timeout) 70 { 71 struct dma_resv_iter cursor; 72 struct dma_fence *fence; 73 long ret = timeout ?: 1; 74 75 i915_gem_object_boost(resv, flags); 76 77 dma_resv_iter_begin(&cursor, resv, 78 dma_resv_usage_rw(flags & I915_WAIT_ALL)); 79 dma_resv_for_each_fence_unlocked(&cursor, fence) { 80 ret = i915_gem_object_wait_fence(fence, flags, timeout); 81 if (ret <= 0) 82 break; 83 84 if (timeout) 85 timeout = ret; 86 } 87 dma_resv_iter_end(&cursor); 88 89 return ret; 90 } 91 92 static void fence_set_priority(struct dma_fence *fence, 93 const struct i915_sched_attr *attr) 94 { 95 struct i915_request *rq; 96 struct intel_engine_cs *engine; 97 98 if (dma_fence_is_signaled(fence) || !dma_fence_is_i915(fence)) 99 return; 100 101 rq = to_request(fence); 102 engine = rq->engine; 103 104 rcu_read_lock(); /* RCU serialisation for set-wedged protection */ 105 if (engine->sched_engine->schedule) 106 engine->sched_engine->schedule(rq, attr); 107 rcu_read_unlock(); 108 } 109 110 static inline bool __dma_fence_is_chain(const struct dma_fence *fence) 111 { 112 return fence->ops == &dma_fence_chain_ops; 113 } 114 115 void i915_gem_fence_wait_priority(struct dma_fence *fence, 116 const struct i915_sched_attr *attr) 117 { 118 if (dma_fence_is_signaled(fence)) 119 return; 120 121 local_bh_disable(); 122 123 /* Recurse once into a fence-array */ 124 if (dma_fence_is_array(fence)) { 125 struct dma_fence_array *array = to_dma_fence_array(fence); 126 int i; 127 128 for (i = 0; i < array->num_fences; i++) 129 fence_set_priority(array->fences[i], attr); 130 } else if (__dma_fence_is_chain(fence)) { 131 struct dma_fence *iter; 132 133 /* The chain is ordered; if we boost the last, we boost all */ 134 dma_fence_chain_for_each(iter, fence) { 135 fence_set_priority(to_dma_fence_chain(iter)->fence, 136 attr); 137 break; 138 } 139 dma_fence_put(iter); 140 } else { 141 fence_set_priority(fence, attr); 142 } 143 144 local_bh_enable(); /* kick the tasklets if queues were reprioritised */ 145 } 146 147 int 148 i915_gem_object_wait_priority(struct drm_i915_gem_object *obj, 149 unsigned int flags, 150 const struct i915_sched_attr *attr) 151 { 152 struct dma_resv_iter cursor; 153 struct dma_fence *fence; 154 155 dma_resv_iter_begin(&cursor, obj->base.resv, 156 dma_resv_usage_rw(flags & I915_WAIT_ALL)); 157 dma_resv_for_each_fence_unlocked(&cursor, fence) 158 i915_gem_fence_wait_priority(fence, attr); 159 dma_resv_iter_end(&cursor); 160 return 0; 161 } 162 163 /** 164 * Waits for rendering to the object to be completed 165 * @obj: i915 gem object 166 * @flags: how to wait (under a lock, for all rendering or just for writes etc) 167 * @timeout: how long to wait 168 */ 169 int 170 i915_gem_object_wait(struct drm_i915_gem_object *obj, 171 unsigned int flags, 172 long timeout) 173 { 174 might_sleep(); 175 GEM_BUG_ON(timeout < 0); 176 177 timeout = i915_gem_object_wait_reservation(obj->base.resv, 178 flags, timeout); 179 180 if (timeout < 0) 181 return timeout; 182 183 return !timeout ? -ETIME : 0; 184 } 185 186 static inline unsigned long nsecs_to_jiffies_timeout(const u64 n) 187 { 188 /* nsecs_to_jiffies64() does not guard against overflow */ 189 if (NSEC_PER_SEC % HZ && 190 div_u64(n, NSEC_PER_SEC) >= MAX_JIFFY_OFFSET / HZ) 191 return MAX_JIFFY_OFFSET; 192 193 return min_t(u64, MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1); 194 } 195 196 static unsigned long to_wait_timeout(s64 timeout_ns) 197 { 198 if (timeout_ns < 0) 199 return MAX_SCHEDULE_TIMEOUT; 200 201 if (timeout_ns == 0) 202 return 0; 203 204 return nsecs_to_jiffies_timeout(timeout_ns); 205 } 206 207 /** 208 * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT 209 * @dev: drm device pointer 210 * @data: ioctl data blob 211 * @file: drm file pointer 212 * 213 * Returns 0 if successful, else an error is returned with the remaining time in 214 * the timeout parameter. 215 * -ETIME: object is still busy after timeout 216 * -ERESTARTSYS: signal interrupted the wait 217 * -ENONENT: object doesn't exist 218 * Also possible, but rare: 219 * -EAGAIN: incomplete, restart syscall 220 * -ENOMEM: damn 221 * -ENODEV: Internal IRQ fail 222 * -E?: The add request failed 223 * 224 * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any 225 * non-zero timeout parameter the wait ioctl will wait for the given number of 226 * nanoseconds on an object becoming unbusy. Since the wait itself does so 227 * without holding struct_mutex the object may become re-busied before this 228 * function completes. A similar but shorter * race condition exists in the busy 229 * ioctl 230 */ 231 int 232 i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) 233 { 234 struct drm_i915_gem_wait *args = data; 235 struct drm_i915_gem_object *obj; 236 ktime_t start; 237 long ret; 238 239 if (args->flags != 0) 240 return -EINVAL; 241 242 obj = i915_gem_object_lookup(file, args->bo_handle); 243 if (!obj) 244 return -ENOENT; 245 246 start = ktime_get(); 247 248 ret = i915_gem_object_wait(obj, 249 I915_WAIT_INTERRUPTIBLE | 250 I915_WAIT_PRIORITY | 251 I915_WAIT_ALL, 252 to_wait_timeout(args->timeout_ns)); 253 254 if (args->timeout_ns > 0) { 255 args->timeout_ns -= ktime_to_ns(ktime_sub(ktime_get(), start)); 256 if (args->timeout_ns < 0) 257 args->timeout_ns = 0; 258 259 /* 260 * Apparently ktime isn't accurate enough and occasionally has a 261 * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch 262 * things up to make the test happy. We allow up to 1 jiffy. 263 * 264 * This is a regression from the timespec->ktime conversion. 265 */ 266 if (ret == -ETIME && !nsecs_to_jiffies(args->timeout_ns)) 267 args->timeout_ns = 0; 268 269 /* Asked to wait beyond the jiffie/scheduler precision? */ 270 if (ret == -ETIME && args->timeout_ns) 271 ret = -EAGAIN; 272 } 273 274 i915_gem_object_put(obj); 275 return ret; 276 } 277 278 /** 279 * i915_gem_object_wait_migration - Sync an accelerated migration operation 280 * @obj: The migrating object. 281 * @flags: waiting flags. Currently supports only I915_WAIT_INTERRUPTIBLE. 282 * 283 * Wait for any pending async migration operation on the object, 284 * whether it's explicitly (i915_gem_object_migrate()) or implicitly 285 * (swapin, initial clearing) initiated. 286 * 287 * Return: 0 if successful, -ERESTARTSYS if a signal was hit during waiting. 288 */ 289 int i915_gem_object_wait_migration(struct drm_i915_gem_object *obj, 290 unsigned int flags) 291 { 292 might_sleep(); 293 294 return i915_gem_object_wait_moving_fence(obj, !!(flags & I915_WAIT_INTERRUPTIBLE)); 295 } 296