1 /* 2 * Copyright (C) 2007 Ben Skeggs. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining 6 * a copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sublicense, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * The above copyright notice and this permission notice (including the 14 * next paragraph) shall be included in all copies or substantial 15 * portions of the Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE 21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 24 * 25 */ 26 27 #include <linux/ktime.h> 28 #include <linux/hrtimer.h> 29 #include <linux/sched/signal.h> 30 #include <trace/events/dma_fence.h> 31 32 #include <nvif/if0020.h> 33 34 #include "nouveau_drv.h" 35 #include "nouveau_dma.h" 36 #include "nouveau_fence.h" 37 38 static const struct dma_fence_ops nouveau_fence_ops_uevent; 39 static const struct dma_fence_ops nouveau_fence_ops_legacy; 40 41 static inline struct nouveau_fence * 42 from_fence(struct dma_fence *fence) 43 { 44 return container_of(fence, struct nouveau_fence, base); 45 } 46 47 static inline struct nouveau_fence_chan * 48 nouveau_fctx(struct nouveau_fence *fence) 49 { 50 return container_of(fence->base.lock, struct nouveau_fence_chan, lock); 51 } 52 53 static int 54 nouveau_fence_signal(struct nouveau_fence *fence) 55 { 56 int drop = 0; 57 58 dma_fence_signal_locked(&fence->base); 59 list_del(&fence->head); 60 rcu_assign_pointer(fence->channel, NULL); 61 62 if (test_bit(DMA_FENCE_FLAG_USER_BITS, &fence->base.flags)) { 63 struct nouveau_fence_chan *fctx = nouveau_fctx(fence); 64 65 if (!--fctx->notify_ref) 66 drop = 1; 67 } 68 69 dma_fence_put(&fence->base); 70 return drop; 71 } 72 73 static struct nouveau_fence * 74 nouveau_local_fence(struct dma_fence *fence, struct nouveau_drm *drm) 75 { 76 if (fence->ops != &nouveau_fence_ops_legacy && 77 fence->ops != &nouveau_fence_ops_uevent) 78 return NULL; 79 80 return from_fence(fence); 81 } 82 83 void 84 nouveau_fence_context_kill(struct nouveau_fence_chan *fctx, int error) 85 { 86 struct nouveau_fence *fence; 87 unsigned long flags; 88 89 spin_lock_irqsave(&fctx->lock, flags); 90 while (!list_empty(&fctx->pending)) { 91 fence = list_entry(fctx->pending.next, typeof(*fence), head); 92 93 if (error) 94 dma_fence_set_error(&fence->base, error); 95 96 if (nouveau_fence_signal(fence)) 97 nvif_event_block(&fctx->event); 98 } 99 fctx->killed = 1; 100 spin_unlock_irqrestore(&fctx->lock, flags); 101 } 102 103 void 104 nouveau_fence_context_del(struct nouveau_fence_chan *fctx) 105 { 106 cancel_work_sync(&fctx->uevent_work); 107 nouveau_fence_context_kill(fctx, 0); 108 nvif_event_dtor(&fctx->event); 109 fctx->dead = 1; 110 111 /* 112 * Ensure that all accesses to fence->channel complete before freeing 113 * the channel. 114 */ 115 synchronize_rcu(); 116 } 117 118 static void 119 nouveau_fence_context_put(struct kref *fence_ref) 120 { 121 kfree(container_of(fence_ref, struct nouveau_fence_chan, fence_ref)); 122 } 123 124 void 125 nouveau_fence_context_free(struct nouveau_fence_chan *fctx) 126 { 127 kref_put(&fctx->fence_ref, nouveau_fence_context_put); 128 } 129 130 static int 131 nouveau_fence_update(struct nouveau_channel *chan, struct nouveau_fence_chan *fctx) 132 { 133 struct nouveau_fence *fence; 134 int drop = 0; 135 u32 seq = fctx->read(chan); 136 137 while (!list_empty(&fctx->pending)) { 138 fence = list_entry(fctx->pending.next, typeof(*fence), head); 139 140 if ((int)(seq - fence->base.seqno) < 0) 141 break; 142 143 drop |= nouveau_fence_signal(fence); 144 } 145 146 return drop; 147 } 148 149 static void 150 nouveau_fence_uevent_work(struct work_struct *work) 151 { 152 struct nouveau_fence_chan *fctx = container_of(work, struct nouveau_fence_chan, 153 uevent_work); 154 unsigned long flags; 155 int drop = 0; 156 157 spin_lock_irqsave(&fctx->lock, flags); 158 if (!list_empty(&fctx->pending)) { 159 struct nouveau_fence *fence; 160 struct nouveau_channel *chan; 161 162 fence = list_entry(fctx->pending.next, typeof(*fence), head); 163 chan = rcu_dereference_protected(fence->channel, lockdep_is_held(&fctx->lock)); 164 if (nouveau_fence_update(chan, fctx)) 165 drop = 1; 166 } 167 if (drop) 168 nvif_event_block(&fctx->event); 169 170 spin_unlock_irqrestore(&fctx->lock, flags); 171 } 172 173 static int 174 nouveau_fence_wait_uevent_handler(struct nvif_event *event, void *repv, u32 repc) 175 { 176 struct nouveau_fence_chan *fctx = container_of(event, typeof(*fctx), event); 177 schedule_work(&fctx->uevent_work); 178 return NVIF_EVENT_KEEP; 179 } 180 181 void 182 nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_chan *fctx) 183 { 184 struct nouveau_fence_priv *priv = (void*)chan->drm->fence; 185 struct nouveau_cli *cli = (void *)chan->user.client; 186 struct { 187 struct nvif_event_v0 base; 188 struct nvif_chan_event_v0 host; 189 } args; 190 int ret; 191 192 INIT_WORK(&fctx->uevent_work, nouveau_fence_uevent_work); 193 INIT_LIST_HEAD(&fctx->flip); 194 INIT_LIST_HEAD(&fctx->pending); 195 spin_lock_init(&fctx->lock); 196 fctx->context = chan->drm->runl[chan->runlist].context_base + chan->chid; 197 198 if (chan == chan->drm->cechan) 199 strcpy(fctx->name, "copy engine channel"); 200 else if (chan == chan->drm->channel) 201 strcpy(fctx->name, "generic kernel channel"); 202 else 203 strcpy(fctx->name, nvxx_client(&cli->base)->name); 204 205 kref_init(&fctx->fence_ref); 206 if (!priv->uevent) 207 return; 208 209 args.host.version = 0; 210 args.host.type = NVIF_CHAN_EVENT_V0_NON_STALL_INTR; 211 212 ret = nvif_event_ctor(&chan->user, "fenceNonStallIntr", (chan->runlist << 16) | chan->chid, 213 nouveau_fence_wait_uevent_handler, false, 214 &args.base, sizeof(args), &fctx->event); 215 216 WARN_ON(ret); 217 } 218 219 int 220 nouveau_fence_emit(struct nouveau_fence *fence) 221 { 222 struct nouveau_channel *chan = unrcu_pointer(fence->channel); 223 struct nouveau_fence_chan *fctx = chan->fence; 224 struct nouveau_fence_priv *priv = (void*)chan->drm->fence; 225 int ret; 226 227 fence->timeout = jiffies + (15 * HZ); 228 229 if (priv->uevent) 230 dma_fence_init(&fence->base, &nouveau_fence_ops_uevent, 231 &fctx->lock, fctx->context, ++fctx->sequence); 232 else 233 dma_fence_init(&fence->base, &nouveau_fence_ops_legacy, 234 &fctx->lock, fctx->context, ++fctx->sequence); 235 kref_get(&fctx->fence_ref); 236 237 ret = fctx->emit(fence); 238 if (!ret) { 239 dma_fence_get(&fence->base); 240 spin_lock_irq(&fctx->lock); 241 242 if (unlikely(fctx->killed)) { 243 spin_unlock_irq(&fctx->lock); 244 dma_fence_put(&fence->base); 245 return -ENODEV; 246 } 247 248 if (nouveau_fence_update(chan, fctx)) 249 nvif_event_block(&fctx->event); 250 251 list_add_tail(&fence->head, &fctx->pending); 252 spin_unlock_irq(&fctx->lock); 253 } 254 255 return ret; 256 } 257 258 bool 259 nouveau_fence_done(struct nouveau_fence *fence) 260 { 261 if (fence->base.ops == &nouveau_fence_ops_legacy || 262 fence->base.ops == &nouveau_fence_ops_uevent) { 263 struct nouveau_fence_chan *fctx = nouveau_fctx(fence); 264 struct nouveau_channel *chan; 265 unsigned long flags; 266 267 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags)) 268 return true; 269 270 spin_lock_irqsave(&fctx->lock, flags); 271 chan = rcu_dereference_protected(fence->channel, lockdep_is_held(&fctx->lock)); 272 if (chan && nouveau_fence_update(chan, fctx)) 273 nvif_event_block(&fctx->event); 274 spin_unlock_irqrestore(&fctx->lock, flags); 275 } 276 return dma_fence_is_signaled(&fence->base); 277 } 278 279 static long 280 nouveau_fence_wait_legacy(struct dma_fence *f, bool intr, long wait) 281 { 282 struct nouveau_fence *fence = from_fence(f); 283 unsigned long sleep_time = NSEC_PER_MSEC / 1000; 284 unsigned long t = jiffies, timeout = t + wait; 285 286 while (!nouveau_fence_done(fence)) { 287 ktime_t kt; 288 289 t = jiffies; 290 291 if (wait != MAX_SCHEDULE_TIMEOUT && time_after_eq(t, timeout)) { 292 __set_current_state(TASK_RUNNING); 293 return 0; 294 } 295 296 __set_current_state(intr ? TASK_INTERRUPTIBLE : 297 TASK_UNINTERRUPTIBLE); 298 299 kt = sleep_time; 300 schedule_hrtimeout(&kt, HRTIMER_MODE_REL); 301 sleep_time *= 2; 302 if (sleep_time > NSEC_PER_MSEC) 303 sleep_time = NSEC_PER_MSEC; 304 305 if (intr && signal_pending(current)) 306 return -ERESTARTSYS; 307 } 308 309 __set_current_state(TASK_RUNNING); 310 311 return timeout - t; 312 } 313 314 static int 315 nouveau_fence_wait_busy(struct nouveau_fence *fence, bool intr) 316 { 317 int ret = 0; 318 319 while (!nouveau_fence_done(fence)) { 320 if (time_after_eq(jiffies, fence->timeout)) { 321 ret = -EBUSY; 322 break; 323 } 324 325 __set_current_state(intr ? 326 TASK_INTERRUPTIBLE : 327 TASK_UNINTERRUPTIBLE); 328 329 if (intr && signal_pending(current)) { 330 ret = -ERESTARTSYS; 331 break; 332 } 333 } 334 335 __set_current_state(TASK_RUNNING); 336 return ret; 337 } 338 339 int 340 nouveau_fence_wait(struct nouveau_fence *fence, bool lazy, bool intr) 341 { 342 long ret; 343 344 if (!lazy) 345 return nouveau_fence_wait_busy(fence, intr); 346 347 ret = dma_fence_wait_timeout(&fence->base, intr, 15 * HZ); 348 if (ret < 0) 349 return ret; 350 else if (!ret) 351 return -EBUSY; 352 else 353 return 0; 354 } 355 356 int 357 nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, 358 bool exclusive, bool intr) 359 { 360 struct nouveau_fence_chan *fctx = chan->fence; 361 struct dma_resv *resv = nvbo->bo.base.resv; 362 int i, ret; 363 364 ret = dma_resv_reserve_fences(resv, 1); 365 if (ret) 366 return ret; 367 368 /* Waiting for the writes first causes performance regressions 369 * under some circumstances. So manually wait for the reads first. 370 */ 371 for (i = 0; i < 2; ++i) { 372 struct dma_resv_iter cursor; 373 struct dma_fence *fence; 374 375 dma_resv_for_each_fence(&cursor, resv, 376 dma_resv_usage_rw(exclusive), 377 fence) { 378 enum dma_resv_usage usage; 379 struct nouveau_fence *f; 380 381 usage = dma_resv_iter_usage(&cursor); 382 if (i == 0 && usage == DMA_RESV_USAGE_WRITE) 383 continue; 384 385 f = nouveau_local_fence(fence, chan->drm); 386 if (f) { 387 struct nouveau_channel *prev; 388 bool must_wait = true; 389 bool local; 390 391 rcu_read_lock(); 392 prev = rcu_dereference(f->channel); 393 local = prev && prev->cli->drm == chan->cli->drm; 394 if (local && (prev == chan || 395 fctx->sync(f, prev, chan) == 0)) 396 must_wait = false; 397 rcu_read_unlock(); 398 if (!must_wait) 399 continue; 400 } 401 402 ret = dma_fence_wait(fence, intr); 403 if (ret) 404 return ret; 405 } 406 } 407 408 return 0; 409 } 410 411 void 412 nouveau_fence_unref(struct nouveau_fence **pfence) 413 { 414 if (*pfence) 415 dma_fence_put(&(*pfence)->base); 416 *pfence = NULL; 417 } 418 419 int 420 nouveau_fence_create(struct nouveau_fence **pfence, 421 struct nouveau_channel *chan) 422 { 423 struct nouveau_fence *fence; 424 425 if (unlikely(!chan->fence)) 426 return -ENODEV; 427 428 fence = kzalloc(sizeof(*fence), GFP_KERNEL); 429 if (!fence) 430 return -ENOMEM; 431 432 fence->channel = chan; 433 434 *pfence = fence; 435 return 0; 436 } 437 438 int 439 nouveau_fence_new(struct nouveau_fence **pfence, 440 struct nouveau_channel *chan) 441 { 442 int ret = 0; 443 444 ret = nouveau_fence_create(pfence, chan); 445 if (ret) 446 return ret; 447 448 ret = nouveau_fence_emit(*pfence); 449 if (ret) 450 nouveau_fence_unref(pfence); 451 452 return ret; 453 } 454 455 static const char *nouveau_fence_get_get_driver_name(struct dma_fence *fence) 456 { 457 return "nouveau"; 458 } 459 460 static const char *nouveau_fence_get_timeline_name(struct dma_fence *f) 461 { 462 struct nouveau_fence *fence = from_fence(f); 463 struct nouveau_fence_chan *fctx = nouveau_fctx(fence); 464 465 return !fctx->dead ? fctx->name : "dead channel"; 466 } 467 468 /* 469 * In an ideal world, read would not assume the channel context is still alive. 470 * This function may be called from another device, running into free memory as a 471 * result. The drm node should still be there, so we can derive the index from 472 * the fence context. 473 */ 474 static bool nouveau_fence_is_signaled(struct dma_fence *f) 475 { 476 struct nouveau_fence *fence = from_fence(f); 477 struct nouveau_fence_chan *fctx = nouveau_fctx(fence); 478 struct nouveau_channel *chan; 479 bool ret = false; 480 481 rcu_read_lock(); 482 chan = rcu_dereference(fence->channel); 483 if (chan) 484 ret = (int)(fctx->read(chan) - fence->base.seqno) >= 0; 485 rcu_read_unlock(); 486 487 return ret; 488 } 489 490 static bool nouveau_fence_no_signaling(struct dma_fence *f) 491 { 492 struct nouveau_fence *fence = from_fence(f); 493 494 /* 495 * caller should have a reference on the fence, 496 * else fence could get freed here 497 */ 498 WARN_ON(kref_read(&fence->base.refcount) <= 1); 499 500 /* 501 * This needs uevents to work correctly, but dma_fence_add_callback relies on 502 * being able to enable signaling. It will still get signaled eventually, 503 * just not right away. 504 */ 505 if (nouveau_fence_is_signaled(f)) { 506 list_del(&fence->head); 507 508 dma_fence_put(&fence->base); 509 return false; 510 } 511 512 return true; 513 } 514 515 static void nouveau_fence_release(struct dma_fence *f) 516 { 517 struct nouveau_fence *fence = from_fence(f); 518 struct nouveau_fence_chan *fctx = nouveau_fctx(fence); 519 520 kref_put(&fctx->fence_ref, nouveau_fence_context_put); 521 dma_fence_free(&fence->base); 522 } 523 524 static const struct dma_fence_ops nouveau_fence_ops_legacy = { 525 .get_driver_name = nouveau_fence_get_get_driver_name, 526 .get_timeline_name = nouveau_fence_get_timeline_name, 527 .enable_signaling = nouveau_fence_no_signaling, 528 .signaled = nouveau_fence_is_signaled, 529 .wait = nouveau_fence_wait_legacy, 530 .release = nouveau_fence_release 531 }; 532 533 static bool nouveau_fence_enable_signaling(struct dma_fence *f) 534 { 535 struct nouveau_fence *fence = from_fence(f); 536 struct nouveau_fence_chan *fctx = nouveau_fctx(fence); 537 bool ret; 538 539 if (!fctx->notify_ref++) 540 nvif_event_allow(&fctx->event); 541 542 ret = nouveau_fence_no_signaling(f); 543 if (ret) 544 set_bit(DMA_FENCE_FLAG_USER_BITS, &fence->base.flags); 545 else if (!--fctx->notify_ref) 546 nvif_event_block(&fctx->event); 547 548 return ret; 549 } 550 551 static const struct dma_fence_ops nouveau_fence_ops_uevent = { 552 .get_driver_name = nouveau_fence_get_get_driver_name, 553 .get_timeline_name = nouveau_fence_get_timeline_name, 554 .enable_signaling = nouveau_fence_enable_signaling, 555 .signaled = nouveau_fence_is_signaled, 556 .release = nouveau_fence_release 557 }; 558