1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3  *
4  * Copyright 2011-2014 VMware, Inc., Palo Alto, CA., USA
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 
28 #include <linux/sched/signal.h>
29 
30 #include "vmwgfx_drv.h"
31 
32 #define VMW_FENCE_WRAP (1 << 31)
33 
34 struct vmw_fence_manager {
35 	int num_fence_objects;
36 	struct vmw_private *dev_priv;
37 	spinlock_t lock;
38 	struct list_head fence_list;
39 	struct work_struct work;
40 	u32 user_fence_size;
41 	u32 fence_size;
42 	u32 event_fence_action_size;
43 	bool fifo_down;
44 	struct list_head cleanup_list;
45 	uint32_t pending_actions[VMW_ACTION_MAX];
46 	struct mutex goal_irq_mutex;
47 	bool goal_irq_on; /* Protected by @goal_irq_mutex */
48 	bool seqno_valid; /* Protected by @lock, and may not be set to true
49 			     without the @goal_irq_mutex held. */
50 	u64 ctx;
51 };
52 
53 struct vmw_user_fence {
54 	struct ttm_base_object base;
55 	struct vmw_fence_obj fence;
56 };
57 
58 /**
59  * struct vmw_event_fence_action - fence action that delivers a drm event.
60  *
61  * @e: A struct drm_pending_event that controls the event delivery.
62  * @action: A struct vmw_fence_action to hook up to a fence.
63  * @fence: A referenced pointer to the fence to keep it alive while @action
64  * hangs on it.
65  * @dev: Pointer to a struct drm_device so we can access the event stuff.
66  * @kref: Both @e and @action has destructors, so we need to refcount.
67  * @size: Size accounted for this object.
68  * @tv_sec: If non-null, the variable pointed to will be assigned
69  * current time tv_sec val when the fence signals.
70  * @tv_usec: Must be set if @tv_sec is set, and the variable pointed to will
71  * be assigned the current time tv_usec val when the fence signals.
72  */
73 struct vmw_event_fence_action {
74 	struct vmw_fence_action action;
75 
76 	struct drm_pending_event *event;
77 	struct vmw_fence_obj *fence;
78 	struct drm_device *dev;
79 
80 	uint32_t *tv_sec;
81 	uint32_t *tv_usec;
82 };
83 
84 static struct vmw_fence_manager *
85 fman_from_fence(struct vmw_fence_obj *fence)
86 {
87 	return container_of(fence->base.lock, struct vmw_fence_manager, lock);
88 }
89 
90 /**
91  * Note on fencing subsystem usage of irqs:
92  * Typically the vmw_fences_update function is called
93  *
94  * a) When a new fence seqno has been submitted by the fifo code.
95  * b) On-demand when we have waiters. Sleeping waiters will switch on the
96  * ANY_FENCE irq and call vmw_fences_update function each time an ANY_FENCE
97  * irq is received. When the last fence waiter is gone, that IRQ is masked
98  * away.
99  *
100  * In situations where there are no waiters and we don't submit any new fences,
101  * fence objects may not be signaled. This is perfectly OK, since there are
102  * no consumers of the signaled data, but that is NOT ok when there are fence
103  * actions attached to a fence. The fencing subsystem then makes use of the
104  * FENCE_GOAL irq and sets the fence goal seqno to that of the next fence
105  * which has an action attached, and each time vmw_fences_update is called,
106  * the subsystem makes sure the fence goal seqno is updated.
107  *
108  * The fence goal seqno irq is on as long as there are unsignaled fence
109  * objects with actions attached to them.
110  */
111 
112 static void vmw_fence_obj_destroy(struct dma_fence *f)
113 {
114 	struct vmw_fence_obj *fence =
115 		container_of(f, struct vmw_fence_obj, base);
116 
117 	struct vmw_fence_manager *fman = fman_from_fence(fence);
118 
119 	spin_lock(&fman->lock);
120 	list_del_init(&fence->head);
121 	--fman->num_fence_objects;
122 	spin_unlock(&fman->lock);
123 	fence->destroy(fence);
124 }
125 
126 static const char *vmw_fence_get_driver_name(struct dma_fence *f)
127 {
128 	return "vmwgfx";
129 }
130 
131 static const char *vmw_fence_get_timeline_name(struct dma_fence *f)
132 {
133 	return "svga";
134 }
135 
136 static bool vmw_fence_enable_signaling(struct dma_fence *f)
137 {
138 	struct vmw_fence_obj *fence =
139 		container_of(f, struct vmw_fence_obj, base);
140 
141 	struct vmw_fence_manager *fman = fman_from_fence(fence);
142 	struct vmw_private *dev_priv = fman->dev_priv;
143 
144 	u32 *fifo_mem = dev_priv->mmio_virt;
145 	u32 seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
146 	if (seqno - fence->base.seqno < VMW_FENCE_WRAP)
147 		return false;
148 
149 	vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
150 
151 	return true;
152 }
153 
154 struct vmwgfx_wait_cb {
155 	struct dma_fence_cb base;
156 	struct task_struct *task;
157 };
158 
159 static void
160 vmwgfx_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
161 {
162 	struct vmwgfx_wait_cb *wait =
163 		container_of(cb, struct vmwgfx_wait_cb, base);
164 
165 	wake_up_process(wait->task);
166 }
167 
168 static void __vmw_fences_update(struct vmw_fence_manager *fman);
169 
170 static long vmw_fence_wait(struct dma_fence *f, bool intr, signed long timeout)
171 {
172 	struct vmw_fence_obj *fence =
173 		container_of(f, struct vmw_fence_obj, base);
174 
175 	struct vmw_fence_manager *fman = fman_from_fence(fence);
176 	struct vmw_private *dev_priv = fman->dev_priv;
177 	struct vmwgfx_wait_cb cb;
178 	long ret = timeout;
179 
180 	if (likely(vmw_fence_obj_signaled(fence)))
181 		return timeout;
182 
183 	vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
184 	vmw_seqno_waiter_add(dev_priv);
185 
186 	spin_lock(f->lock);
187 
188 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags))
189 		goto out;
190 
191 	if (intr && signal_pending(current)) {
192 		ret = -ERESTARTSYS;
193 		goto out;
194 	}
195 
196 	cb.base.func = vmwgfx_wait_cb;
197 	cb.task = current;
198 	list_add(&cb.base.node, &f->cb_list);
199 
200 	for (;;) {
201 		__vmw_fences_update(fman);
202 
203 		/*
204 		 * We can use the barrier free __set_current_state() since
205 		 * DMA_FENCE_FLAG_SIGNALED_BIT + wakeup is protected by the
206 		 * fence spinlock.
207 		 */
208 		if (intr)
209 			__set_current_state(TASK_INTERRUPTIBLE);
210 		else
211 			__set_current_state(TASK_UNINTERRUPTIBLE);
212 
213 		if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags)) {
214 			if (ret == 0 && timeout > 0)
215 				ret = 1;
216 			break;
217 		}
218 
219 		if (intr && signal_pending(current)) {
220 			ret = -ERESTARTSYS;
221 			break;
222 		}
223 
224 		if (ret == 0)
225 			break;
226 
227 		spin_unlock(f->lock);
228 
229 		ret = schedule_timeout(ret);
230 
231 		spin_lock(f->lock);
232 	}
233 	__set_current_state(TASK_RUNNING);
234 	if (!list_empty(&cb.base.node))
235 		list_del(&cb.base.node);
236 
237 out:
238 	spin_unlock(f->lock);
239 
240 	vmw_seqno_waiter_remove(dev_priv);
241 
242 	return ret;
243 }
244 
245 static const struct dma_fence_ops vmw_fence_ops = {
246 	.get_driver_name = vmw_fence_get_driver_name,
247 	.get_timeline_name = vmw_fence_get_timeline_name,
248 	.enable_signaling = vmw_fence_enable_signaling,
249 	.wait = vmw_fence_wait,
250 	.release = vmw_fence_obj_destroy,
251 };
252 
253 
254 /**
255  * Execute signal actions on fences recently signaled.
256  * This is done from a workqueue so we don't have to execute
257  * signal actions from atomic context.
258  */
259 
260 static void vmw_fence_work_func(struct work_struct *work)
261 {
262 	struct vmw_fence_manager *fman =
263 		container_of(work, struct vmw_fence_manager, work);
264 	struct list_head list;
265 	struct vmw_fence_action *action, *next_action;
266 	bool seqno_valid;
267 
268 	do {
269 		INIT_LIST_HEAD(&list);
270 		mutex_lock(&fman->goal_irq_mutex);
271 
272 		spin_lock(&fman->lock);
273 		list_splice_init(&fman->cleanup_list, &list);
274 		seqno_valid = fman->seqno_valid;
275 		spin_unlock(&fman->lock);
276 
277 		if (!seqno_valid && fman->goal_irq_on) {
278 			fman->goal_irq_on = false;
279 			vmw_goal_waiter_remove(fman->dev_priv);
280 		}
281 		mutex_unlock(&fman->goal_irq_mutex);
282 
283 		if (list_empty(&list))
284 			return;
285 
286 		/*
287 		 * At this point, only we should be able to manipulate the
288 		 * list heads of the actions we have on the private list.
289 		 * hence fman::lock not held.
290 		 */
291 
292 		list_for_each_entry_safe(action, next_action, &list, head) {
293 			list_del_init(&action->head);
294 			if (action->cleanup)
295 				action->cleanup(action);
296 		}
297 	} while (1);
298 }
299 
300 struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
301 {
302 	struct vmw_fence_manager *fman = kzalloc(sizeof(*fman), GFP_KERNEL);
303 
304 	if (unlikely(!fman))
305 		return NULL;
306 
307 	fman->dev_priv = dev_priv;
308 	spin_lock_init(&fman->lock);
309 	INIT_LIST_HEAD(&fman->fence_list);
310 	INIT_LIST_HEAD(&fman->cleanup_list);
311 	INIT_WORK(&fman->work, &vmw_fence_work_func);
312 	fman->fifo_down = true;
313 	fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence)) +
314 		TTM_OBJ_EXTRA_SIZE;
315 	fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj));
316 	fman->event_fence_action_size =
317 		ttm_round_pot(sizeof(struct vmw_event_fence_action));
318 	mutex_init(&fman->goal_irq_mutex);
319 	fman->ctx = dma_fence_context_alloc(1);
320 
321 	return fman;
322 }
323 
324 void vmw_fence_manager_takedown(struct vmw_fence_manager *fman)
325 {
326 	bool lists_empty;
327 
328 	(void) cancel_work_sync(&fman->work);
329 
330 	spin_lock(&fman->lock);
331 	lists_empty = list_empty(&fman->fence_list) &&
332 		list_empty(&fman->cleanup_list);
333 	spin_unlock(&fman->lock);
334 
335 	BUG_ON(!lists_empty);
336 	kfree(fman);
337 }
338 
339 static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
340 			      struct vmw_fence_obj *fence, u32 seqno,
341 			      void (*destroy) (struct vmw_fence_obj *fence))
342 {
343 	int ret = 0;
344 
345 	dma_fence_init(&fence->base, &vmw_fence_ops, &fman->lock,
346 		       fman->ctx, seqno);
347 	INIT_LIST_HEAD(&fence->seq_passed_actions);
348 	fence->destroy = destroy;
349 
350 	spin_lock(&fman->lock);
351 	if (unlikely(fman->fifo_down)) {
352 		ret = -EBUSY;
353 		goto out_unlock;
354 	}
355 	list_add_tail(&fence->head, &fman->fence_list);
356 	++fman->num_fence_objects;
357 
358 out_unlock:
359 	spin_unlock(&fman->lock);
360 	return ret;
361 
362 }
363 
364 static void vmw_fences_perform_actions(struct vmw_fence_manager *fman,
365 				struct list_head *list)
366 {
367 	struct vmw_fence_action *action, *next_action;
368 
369 	list_for_each_entry_safe(action, next_action, list, head) {
370 		list_del_init(&action->head);
371 		fman->pending_actions[action->type]--;
372 		if (action->seq_passed != NULL)
373 			action->seq_passed(action);
374 
375 		/*
376 		 * Add the cleanup action to the cleanup list so that
377 		 * it will be performed by a worker task.
378 		 */
379 
380 		list_add_tail(&action->head, &fman->cleanup_list);
381 	}
382 }
383 
384 /**
385  * vmw_fence_goal_new_locked - Figure out a new device fence goal
386  * seqno if needed.
387  *
388  * @fman: Pointer to a fence manager.
389  * @passed_seqno: The seqno the device currently signals as passed.
390  *
391  * This function should be called with the fence manager lock held.
392  * It is typically called when we have a new passed_seqno, and
393  * we might need to update the fence goal. It checks to see whether
394  * the current fence goal has already passed, and, in that case,
395  * scans through all unsignaled fences to get the next fence object with an
396  * action attached, and sets the seqno of that fence as a new fence goal.
397  *
398  * returns true if the device goal seqno was updated. False otherwise.
399  */
400 static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
401 				      u32 passed_seqno)
402 {
403 	u32 goal_seqno;
404 	u32 *fifo_mem;
405 	struct vmw_fence_obj *fence;
406 
407 	if (likely(!fman->seqno_valid))
408 		return false;
409 
410 	fifo_mem = fman->dev_priv->mmio_virt;
411 	goal_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE_GOAL);
412 	if (likely(passed_seqno - goal_seqno >= VMW_FENCE_WRAP))
413 		return false;
414 
415 	fman->seqno_valid = false;
416 	list_for_each_entry(fence, &fman->fence_list, head) {
417 		if (!list_empty(&fence->seq_passed_actions)) {
418 			fman->seqno_valid = true;
419 			vmw_mmio_write(fence->base.seqno,
420 				       fifo_mem + SVGA_FIFO_FENCE_GOAL);
421 			break;
422 		}
423 	}
424 
425 	return true;
426 }
427 
428 
429 /**
430  * vmw_fence_goal_check_locked - Replace the device fence goal seqno if
431  * needed.
432  *
433  * @fence: Pointer to a struct vmw_fence_obj the seqno of which should be
434  * considered as a device fence goal.
435  *
436  * This function should be called with the fence manager lock held.
437  * It is typically called when an action has been attached to a fence to
438  * check whether the seqno of that fence should be used for a fence
439  * goal interrupt. This is typically needed if the current fence goal is
440  * invalid, or has a higher seqno than that of the current fence object.
441  *
442  * returns true if the device goal seqno was updated. False otherwise.
443  */
444 static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence)
445 {
446 	struct vmw_fence_manager *fman = fman_from_fence(fence);
447 	u32 goal_seqno;
448 	u32 *fifo_mem;
449 
450 	if (dma_fence_is_signaled_locked(&fence->base))
451 		return false;
452 
453 	fifo_mem = fman->dev_priv->mmio_virt;
454 	goal_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE_GOAL);
455 	if (likely(fman->seqno_valid &&
456 		   goal_seqno - fence->base.seqno < VMW_FENCE_WRAP))
457 		return false;
458 
459 	vmw_mmio_write(fence->base.seqno, fifo_mem + SVGA_FIFO_FENCE_GOAL);
460 	fman->seqno_valid = true;
461 
462 	return true;
463 }
464 
465 static void __vmw_fences_update(struct vmw_fence_manager *fman)
466 {
467 	struct vmw_fence_obj *fence, *next_fence;
468 	struct list_head action_list;
469 	bool needs_rerun;
470 	uint32_t seqno, new_seqno;
471 	u32 *fifo_mem = fman->dev_priv->mmio_virt;
472 
473 	seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
474 rerun:
475 	list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) {
476 		if (seqno - fence->base.seqno < VMW_FENCE_WRAP) {
477 			list_del_init(&fence->head);
478 			dma_fence_signal_locked(&fence->base);
479 			INIT_LIST_HEAD(&action_list);
480 			list_splice_init(&fence->seq_passed_actions,
481 					 &action_list);
482 			vmw_fences_perform_actions(fman, &action_list);
483 		} else
484 			break;
485 	}
486 
487 	/*
488 	 * Rerun if the fence goal seqno was updated, and the
489 	 * hardware might have raced with that update, so that
490 	 * we missed a fence_goal irq.
491 	 */
492 
493 	needs_rerun = vmw_fence_goal_new_locked(fman, seqno);
494 	if (unlikely(needs_rerun)) {
495 		new_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
496 		if (new_seqno != seqno) {
497 			seqno = new_seqno;
498 			goto rerun;
499 		}
500 	}
501 
502 	if (!list_empty(&fman->cleanup_list))
503 		(void) schedule_work(&fman->work);
504 }
505 
506 void vmw_fences_update(struct vmw_fence_manager *fman)
507 {
508 	spin_lock(&fman->lock);
509 	__vmw_fences_update(fman);
510 	spin_unlock(&fman->lock);
511 }
512 
513 bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence)
514 {
515 	struct vmw_fence_manager *fman = fman_from_fence(fence);
516 
517 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
518 		return true;
519 
520 	vmw_fences_update(fman);
521 
522 	return dma_fence_is_signaled(&fence->base);
523 }
524 
525 int vmw_fence_obj_wait(struct vmw_fence_obj *fence, bool lazy,
526 		       bool interruptible, unsigned long timeout)
527 {
528 	long ret = dma_fence_wait_timeout(&fence->base, interruptible, timeout);
529 
530 	if (likely(ret > 0))
531 		return 0;
532 	else if (ret == 0)
533 		return -EBUSY;
534 	else
535 		return ret;
536 }
537 
538 void vmw_fence_obj_flush(struct vmw_fence_obj *fence)
539 {
540 	struct vmw_private *dev_priv = fman_from_fence(fence)->dev_priv;
541 
542 	vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
543 }
544 
545 static void vmw_fence_destroy(struct vmw_fence_obj *fence)
546 {
547 	dma_fence_free(&fence->base);
548 }
549 
550 int vmw_fence_create(struct vmw_fence_manager *fman,
551 		     uint32_t seqno,
552 		     struct vmw_fence_obj **p_fence)
553 {
554 	struct vmw_fence_obj *fence;
555  	int ret;
556 
557 	fence = kzalloc(sizeof(*fence), GFP_KERNEL);
558 	if (unlikely(!fence))
559 		return -ENOMEM;
560 
561 	ret = vmw_fence_obj_init(fman, fence, seqno,
562 				 vmw_fence_destroy);
563 	if (unlikely(ret != 0))
564 		goto out_err_init;
565 
566 	*p_fence = fence;
567 	return 0;
568 
569 out_err_init:
570 	kfree(fence);
571 	return ret;
572 }
573 
574 
575 static void vmw_user_fence_destroy(struct vmw_fence_obj *fence)
576 {
577 	struct vmw_user_fence *ufence =
578 		container_of(fence, struct vmw_user_fence, fence);
579 	struct vmw_fence_manager *fman = fman_from_fence(fence);
580 
581 	ttm_base_object_kfree(ufence, base);
582 	/*
583 	 * Free kernel space accounting.
584 	 */
585 	ttm_mem_global_free(vmw_mem_glob(fman->dev_priv),
586 			    fman->user_fence_size);
587 }
588 
589 static void vmw_user_fence_base_release(struct ttm_base_object **p_base)
590 {
591 	struct ttm_base_object *base = *p_base;
592 	struct vmw_user_fence *ufence =
593 		container_of(base, struct vmw_user_fence, base);
594 	struct vmw_fence_obj *fence = &ufence->fence;
595 
596 	*p_base = NULL;
597 	vmw_fence_obj_unreference(&fence);
598 }
599 
600 int vmw_user_fence_create(struct drm_file *file_priv,
601 			  struct vmw_fence_manager *fman,
602 			  uint32_t seqno,
603 			  struct vmw_fence_obj **p_fence,
604 			  uint32_t *p_handle)
605 {
606 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
607 	struct vmw_user_fence *ufence;
608 	struct vmw_fence_obj *tmp;
609 	struct ttm_mem_global *mem_glob = vmw_mem_glob(fman->dev_priv);
610 	struct ttm_operation_ctx ctx = {
611 		.interruptible = false,
612 		.no_wait_gpu = false
613 	};
614 	int ret;
615 
616 	/*
617 	 * Kernel memory space accounting, since this object may
618 	 * be created by a user-space request.
619 	 */
620 
621 	ret = ttm_mem_global_alloc(mem_glob, fman->user_fence_size,
622 				   &ctx);
623 	if (unlikely(ret != 0))
624 		return ret;
625 
626 	ufence = kzalloc(sizeof(*ufence), GFP_KERNEL);
627 	if (unlikely(!ufence)) {
628 		ret = -ENOMEM;
629 		goto out_no_object;
630 	}
631 
632 	ret = vmw_fence_obj_init(fman, &ufence->fence, seqno,
633 				 vmw_user_fence_destroy);
634 	if (unlikely(ret != 0)) {
635 		kfree(ufence);
636 		goto out_no_object;
637 	}
638 
639 	/*
640 	 * The base object holds a reference which is freed in
641 	 * vmw_user_fence_base_release.
642 	 */
643 	tmp = vmw_fence_obj_reference(&ufence->fence);
644 	ret = ttm_base_object_init(tfile, &ufence->base, false,
645 				   VMW_RES_FENCE,
646 				   &vmw_user_fence_base_release, NULL);
647 
648 
649 	if (unlikely(ret != 0)) {
650 		/*
651 		 * Free the base object's reference
652 		 */
653 		vmw_fence_obj_unreference(&tmp);
654 		goto out_err;
655 	}
656 
657 	*p_fence = &ufence->fence;
658 	*p_handle = ufence->base.handle;
659 
660 	return 0;
661 out_err:
662 	tmp = &ufence->fence;
663 	vmw_fence_obj_unreference(&tmp);
664 out_no_object:
665 	ttm_mem_global_free(mem_glob, fman->user_fence_size);
666 	return ret;
667 }
668 
669 
670 /**
671  * vmw_wait_dma_fence - Wait for a dma fence
672  *
673  * @fman: pointer to a fence manager
674  * @fence: DMA fence to wait on
675  *
676  * This function handles the case when the fence is actually a fence
677  * array.  If that's the case, it'll wait on each of the child fence
678  */
679 int vmw_wait_dma_fence(struct vmw_fence_manager *fman,
680 		       struct dma_fence *fence)
681 {
682 	struct dma_fence_array *fence_array;
683 	int ret = 0;
684 	int i;
685 
686 
687 	if (dma_fence_is_signaled(fence))
688 		return 0;
689 
690 	if (!dma_fence_is_array(fence))
691 		return dma_fence_wait(fence, true);
692 
693 	/* From i915: Note that if the fence-array was created in
694 	 * signal-on-any mode, we should *not* decompose it into its individual
695 	 * fences. However, we don't currently store which mode the fence-array
696 	 * is operating in. Fortunately, the only user of signal-on-any is
697 	 * private to amdgpu and we should not see any incoming fence-array
698 	 * from sync-file being in signal-on-any mode.
699 	 */
700 
701 	fence_array = to_dma_fence_array(fence);
702 	for (i = 0; i < fence_array->num_fences; i++) {
703 		struct dma_fence *child = fence_array->fences[i];
704 
705 		ret = dma_fence_wait(child, true);
706 
707 		if (ret < 0)
708 			return ret;
709 	}
710 
711 	return 0;
712 }
713 
714 
715 /**
716  * vmw_fence_fifo_down - signal all unsignaled fence objects.
717  */
718 
719 void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
720 {
721 	struct list_head action_list;
722 	int ret;
723 
724 	/*
725 	 * The list may be altered while we traverse it, so always
726 	 * restart when we've released the fman->lock.
727 	 */
728 
729 	spin_lock(&fman->lock);
730 	fman->fifo_down = true;
731 	while (!list_empty(&fman->fence_list)) {
732 		struct vmw_fence_obj *fence =
733 			list_entry(fman->fence_list.prev, struct vmw_fence_obj,
734 				   head);
735 		dma_fence_get(&fence->base);
736 		spin_unlock(&fman->lock);
737 
738 		ret = vmw_fence_obj_wait(fence, false, false,
739 					 VMW_FENCE_WAIT_TIMEOUT);
740 
741 		if (unlikely(ret != 0)) {
742 			list_del_init(&fence->head);
743 			dma_fence_signal(&fence->base);
744 			INIT_LIST_HEAD(&action_list);
745 			list_splice_init(&fence->seq_passed_actions,
746 					 &action_list);
747 			vmw_fences_perform_actions(fman, &action_list);
748 		}
749 
750 		BUG_ON(!list_empty(&fence->head));
751 		dma_fence_put(&fence->base);
752 		spin_lock(&fman->lock);
753 	}
754 	spin_unlock(&fman->lock);
755 }
756 
757 void vmw_fence_fifo_up(struct vmw_fence_manager *fman)
758 {
759 	spin_lock(&fman->lock);
760 	fman->fifo_down = false;
761 	spin_unlock(&fman->lock);
762 }
763 
764 
765 /**
766  * vmw_fence_obj_lookup - Look up a user-space fence object
767  *
768  * @tfile: A struct ttm_object_file identifying the caller.
769  * @handle: A handle identifying the fence object.
770  * @return: A struct vmw_user_fence base ttm object on success or
771  * an error pointer on failure.
772  *
773  * The fence object is looked up and type-checked. The caller needs
774  * to have opened the fence object first, but since that happens on
775  * creation and fence objects aren't shareable, that's not an
776  * issue currently.
777  */
778 static struct ttm_base_object *
779 vmw_fence_obj_lookup(struct ttm_object_file *tfile, u32 handle)
780 {
781 	struct ttm_base_object *base = ttm_base_object_lookup(tfile, handle);
782 
783 	if (!base) {
784 		pr_err("Invalid fence object handle 0x%08lx.\n",
785 		       (unsigned long)handle);
786 		return ERR_PTR(-EINVAL);
787 	}
788 
789 	if (base->refcount_release != vmw_user_fence_base_release) {
790 		pr_err("Invalid fence object handle 0x%08lx.\n",
791 		       (unsigned long)handle);
792 		ttm_base_object_unref(&base);
793 		return ERR_PTR(-EINVAL);
794 	}
795 
796 	return base;
797 }
798 
799 
800 int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,
801 			     struct drm_file *file_priv)
802 {
803 	struct drm_vmw_fence_wait_arg *arg =
804 	    (struct drm_vmw_fence_wait_arg *)data;
805 	unsigned long timeout;
806 	struct ttm_base_object *base;
807 	struct vmw_fence_obj *fence;
808 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
809 	int ret;
810 	uint64_t wait_timeout = ((uint64_t)arg->timeout_us * HZ);
811 
812 	/*
813 	 * 64-bit division not present on 32-bit systems, so do an
814 	 * approximation. (Divide by 1000000).
815 	 */
816 
817 	wait_timeout = (wait_timeout >> 20) + (wait_timeout >> 24) -
818 	  (wait_timeout >> 26);
819 
820 	if (!arg->cookie_valid) {
821 		arg->cookie_valid = 1;
822 		arg->kernel_cookie = jiffies + wait_timeout;
823 	}
824 
825 	base = vmw_fence_obj_lookup(tfile, arg->handle);
826 	if (IS_ERR(base))
827 		return PTR_ERR(base);
828 
829 	fence = &(container_of(base, struct vmw_user_fence, base)->fence);
830 
831 	timeout = jiffies;
832 	if (time_after_eq(timeout, (unsigned long)arg->kernel_cookie)) {
833 		ret = ((vmw_fence_obj_signaled(fence)) ?
834 		       0 : -EBUSY);
835 		goto out;
836 	}
837 
838 	timeout = (unsigned long)arg->kernel_cookie - timeout;
839 
840 	ret = vmw_fence_obj_wait(fence, arg->lazy, true, timeout);
841 
842 out:
843 	ttm_base_object_unref(&base);
844 
845 	/*
846 	 * Optionally unref the fence object.
847 	 */
848 
849 	if (ret == 0 && (arg->wait_options & DRM_VMW_WAIT_OPTION_UNREF))
850 		return ttm_ref_object_base_unref(tfile, arg->handle,
851 						 TTM_REF_USAGE);
852 	return ret;
853 }
854 
855 int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data,
856 				 struct drm_file *file_priv)
857 {
858 	struct drm_vmw_fence_signaled_arg *arg =
859 		(struct drm_vmw_fence_signaled_arg *) data;
860 	struct ttm_base_object *base;
861 	struct vmw_fence_obj *fence;
862 	struct vmw_fence_manager *fman;
863 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
864 	struct vmw_private *dev_priv = vmw_priv(dev);
865 
866 	base = vmw_fence_obj_lookup(tfile, arg->handle);
867 	if (IS_ERR(base))
868 		return PTR_ERR(base);
869 
870 	fence = &(container_of(base, struct vmw_user_fence, base)->fence);
871 	fman = fman_from_fence(fence);
872 
873 	arg->signaled = vmw_fence_obj_signaled(fence);
874 
875 	arg->signaled_flags = arg->flags;
876 	spin_lock(&fman->lock);
877 	arg->passed_seqno = dev_priv->last_read_seqno;
878 	spin_unlock(&fman->lock);
879 
880 	ttm_base_object_unref(&base);
881 
882 	return 0;
883 }
884 
885 
886 int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data,
887 			      struct drm_file *file_priv)
888 {
889 	struct drm_vmw_fence_arg *arg =
890 		(struct drm_vmw_fence_arg *) data;
891 
892 	return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
893 					 arg->handle,
894 					 TTM_REF_USAGE);
895 }
896 
897 /**
898  * vmw_event_fence_action_seq_passed
899  *
900  * @action: The struct vmw_fence_action embedded in a struct
901  * vmw_event_fence_action.
902  *
903  * This function is called when the seqno of the fence where @action is
904  * attached has passed. It queues the event on the submitter's event list.
905  * This function is always called from atomic context.
906  */
907 static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action)
908 {
909 	struct vmw_event_fence_action *eaction =
910 		container_of(action, struct vmw_event_fence_action, action);
911 	struct drm_device *dev = eaction->dev;
912 	struct drm_pending_event *event = eaction->event;
913 
914 	if (unlikely(event == NULL))
915 		return;
916 
917 	spin_lock_irq(&dev->event_lock);
918 
919 	if (likely(eaction->tv_sec != NULL)) {
920 		struct timespec64 ts;
921 
922 		ktime_get_ts64(&ts);
923 		/* monotonic time, so no y2038 overflow */
924 		*eaction->tv_sec = ts.tv_sec;
925 		*eaction->tv_usec = ts.tv_nsec / NSEC_PER_USEC;
926 	}
927 
928 	drm_send_event_locked(dev, eaction->event);
929 	eaction->event = NULL;
930 	spin_unlock_irq(&dev->event_lock);
931 }
932 
933 /**
934  * vmw_event_fence_action_cleanup
935  *
936  * @action: The struct vmw_fence_action embedded in a struct
937  * vmw_event_fence_action.
938  *
939  * This function is the struct vmw_fence_action destructor. It's typically
940  * called from a workqueue.
941  */
942 static void vmw_event_fence_action_cleanup(struct vmw_fence_action *action)
943 {
944 	struct vmw_event_fence_action *eaction =
945 		container_of(action, struct vmw_event_fence_action, action);
946 
947 	vmw_fence_obj_unreference(&eaction->fence);
948 	kfree(eaction);
949 }
950 
951 
952 /**
953  * vmw_fence_obj_add_action - Add an action to a fence object.
954  *
955  * @fence - The fence object.
956  * @action - The action to add.
957  *
958  * Note that the action callbacks may be executed before this function
959  * returns.
960  */
961 static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence,
962 			      struct vmw_fence_action *action)
963 {
964 	struct vmw_fence_manager *fman = fman_from_fence(fence);
965 	bool run_update = false;
966 
967 	mutex_lock(&fman->goal_irq_mutex);
968 	spin_lock(&fman->lock);
969 
970 	fman->pending_actions[action->type]++;
971 	if (dma_fence_is_signaled_locked(&fence->base)) {
972 		struct list_head action_list;
973 
974 		INIT_LIST_HEAD(&action_list);
975 		list_add_tail(&action->head, &action_list);
976 		vmw_fences_perform_actions(fman, &action_list);
977 	} else {
978 		list_add_tail(&action->head, &fence->seq_passed_actions);
979 
980 		/*
981 		 * This function may set fman::seqno_valid, so it must
982 		 * be run with the goal_irq_mutex held.
983 		 */
984 		run_update = vmw_fence_goal_check_locked(fence);
985 	}
986 
987 	spin_unlock(&fman->lock);
988 
989 	if (run_update) {
990 		if (!fman->goal_irq_on) {
991 			fman->goal_irq_on = true;
992 			vmw_goal_waiter_add(fman->dev_priv);
993 		}
994 		vmw_fences_update(fman);
995 	}
996 	mutex_unlock(&fman->goal_irq_mutex);
997 
998 }
999 
1000 /**
1001  * vmw_event_fence_action_create - Post an event for sending when a fence
1002  * object seqno has passed.
1003  *
1004  * @file_priv: The file connection on which the event should be posted.
1005  * @fence: The fence object on which to post the event.
1006  * @event: Event to be posted. This event should've been alloced
1007  * using k[mz]alloc, and should've been completely initialized.
1008  * @interruptible: Interruptible waits if possible.
1009  *
1010  * As a side effect, the object pointed to by @event may have been
1011  * freed when this function returns. If this function returns with
1012  * an error code, the caller needs to free that object.
1013  */
1014 
1015 int vmw_event_fence_action_queue(struct drm_file *file_priv,
1016 				 struct vmw_fence_obj *fence,
1017 				 struct drm_pending_event *event,
1018 				 uint32_t *tv_sec,
1019 				 uint32_t *tv_usec,
1020 				 bool interruptible)
1021 {
1022 	struct vmw_event_fence_action *eaction;
1023 	struct vmw_fence_manager *fman = fman_from_fence(fence);
1024 
1025 	eaction = kzalloc(sizeof(*eaction), GFP_KERNEL);
1026 	if (unlikely(!eaction))
1027 		return -ENOMEM;
1028 
1029 	eaction->event = event;
1030 
1031 	eaction->action.seq_passed = vmw_event_fence_action_seq_passed;
1032 	eaction->action.cleanup = vmw_event_fence_action_cleanup;
1033 	eaction->action.type = VMW_ACTION_EVENT;
1034 
1035 	eaction->fence = vmw_fence_obj_reference(fence);
1036 	eaction->dev = fman->dev_priv->dev;
1037 	eaction->tv_sec = tv_sec;
1038 	eaction->tv_usec = tv_usec;
1039 
1040 	vmw_fence_obj_add_action(fence, &eaction->action);
1041 
1042 	return 0;
1043 }
1044 
1045 struct vmw_event_fence_pending {
1046 	struct drm_pending_event base;
1047 	struct drm_vmw_event_fence event;
1048 };
1049 
1050 static int vmw_event_fence_action_create(struct drm_file *file_priv,
1051 				  struct vmw_fence_obj *fence,
1052 				  uint32_t flags,
1053 				  uint64_t user_data,
1054 				  bool interruptible)
1055 {
1056 	struct vmw_event_fence_pending *event;
1057 	struct vmw_fence_manager *fman = fman_from_fence(fence);
1058 	struct drm_device *dev = fman->dev_priv->dev;
1059 	int ret;
1060 
1061 	event = kzalloc(sizeof(*event), GFP_KERNEL);
1062 	if (unlikely(!event)) {
1063 		DRM_ERROR("Failed to allocate an event.\n");
1064 		ret = -ENOMEM;
1065 		goto out_no_space;
1066 	}
1067 
1068 	event->event.base.type = DRM_VMW_EVENT_FENCE_SIGNALED;
1069 	event->event.base.length = sizeof(*event);
1070 	event->event.user_data = user_data;
1071 
1072 	ret = drm_event_reserve_init(dev, file_priv, &event->base, &event->event.base);
1073 
1074 	if (unlikely(ret != 0)) {
1075 		DRM_ERROR("Failed to allocate event space for this file.\n");
1076 		kfree(event);
1077 		goto out_no_space;
1078 	}
1079 
1080 	if (flags & DRM_VMW_FE_FLAG_REQ_TIME)
1081 		ret = vmw_event_fence_action_queue(file_priv, fence,
1082 						   &event->base,
1083 						   &event->event.tv_sec,
1084 						   &event->event.tv_usec,
1085 						   interruptible);
1086 	else
1087 		ret = vmw_event_fence_action_queue(file_priv, fence,
1088 						   &event->base,
1089 						   NULL,
1090 						   NULL,
1091 						   interruptible);
1092 	if (ret != 0)
1093 		goto out_no_queue;
1094 
1095 	return 0;
1096 
1097 out_no_queue:
1098 	drm_event_cancel_free(dev, &event->base);
1099 out_no_space:
1100 	return ret;
1101 }
1102 
1103 int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
1104 			  struct drm_file *file_priv)
1105 {
1106 	struct vmw_private *dev_priv = vmw_priv(dev);
1107 	struct drm_vmw_fence_event_arg *arg =
1108 		(struct drm_vmw_fence_event_arg *) data;
1109 	struct vmw_fence_obj *fence = NULL;
1110 	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1111 	struct ttm_object_file *tfile = vmw_fp->tfile;
1112 	struct drm_vmw_fence_rep __user *user_fence_rep =
1113 		(struct drm_vmw_fence_rep __user *)(unsigned long)
1114 		arg->fence_rep;
1115 	uint32_t handle;
1116 	int ret;
1117 
1118 	/*
1119 	 * Look up an existing fence object,
1120 	 * and if user-space wants a new reference,
1121 	 * add one.
1122 	 */
1123 	if (arg->handle) {
1124 		struct ttm_base_object *base =
1125 			vmw_fence_obj_lookup(tfile, arg->handle);
1126 
1127 		if (IS_ERR(base))
1128 			return PTR_ERR(base);
1129 
1130 		fence = &(container_of(base, struct vmw_user_fence,
1131 				       base)->fence);
1132 		(void) vmw_fence_obj_reference(fence);
1133 
1134 		if (user_fence_rep != NULL) {
1135 			ret = ttm_ref_object_add(vmw_fp->tfile, base,
1136 						 TTM_REF_USAGE, NULL, false);
1137 			if (unlikely(ret != 0)) {
1138 				DRM_ERROR("Failed to reference a fence "
1139 					  "object.\n");
1140 				goto out_no_ref_obj;
1141 			}
1142 			handle = base->handle;
1143 		}
1144 		ttm_base_object_unref(&base);
1145 	}
1146 
1147 	/*
1148 	 * Create a new fence object.
1149 	 */
1150 	if (!fence) {
1151 		ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
1152 						 &fence,
1153 						 (user_fence_rep) ?
1154 						 &handle : NULL);
1155 		if (unlikely(ret != 0)) {
1156 			DRM_ERROR("Fence event failed to create fence.\n");
1157 			return ret;
1158 		}
1159 	}
1160 
1161 	BUG_ON(fence == NULL);
1162 
1163 	ret = vmw_event_fence_action_create(file_priv, fence,
1164 					    arg->flags,
1165 					    arg->user_data,
1166 					    true);
1167 	if (unlikely(ret != 0)) {
1168 		if (ret != -ERESTARTSYS)
1169 			DRM_ERROR("Failed to attach event to fence.\n");
1170 		goto out_no_create;
1171 	}
1172 
1173 	vmw_execbuf_copy_fence_user(dev_priv, vmw_fp, 0, user_fence_rep, fence,
1174 				    handle, -1, NULL);
1175 	vmw_fence_obj_unreference(&fence);
1176 	return 0;
1177 out_no_create:
1178 	if (user_fence_rep != NULL)
1179 		ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE);
1180 out_no_ref_obj:
1181 	vmw_fence_obj_unreference(&fence);
1182 	return ret;
1183 }
1184