1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3  *
4  * Copyright 2011-2014 VMware, Inc., Palo Alto, CA., USA
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 
28 #include <drm/drmP.h>
29 #include "vmwgfx_drv.h"
30 
31 #define VMW_FENCE_WRAP (1 << 31)
32 
33 struct vmw_fence_manager {
34 	int num_fence_objects;
35 	struct vmw_private *dev_priv;
36 	spinlock_t lock;
37 	struct list_head fence_list;
38 	struct work_struct work;
39 	u32 user_fence_size;
40 	u32 fence_size;
41 	u32 event_fence_action_size;
42 	bool fifo_down;
43 	struct list_head cleanup_list;
44 	uint32_t pending_actions[VMW_ACTION_MAX];
45 	struct mutex goal_irq_mutex;
46 	bool goal_irq_on; /* Protected by @goal_irq_mutex */
47 	bool seqno_valid; /* Protected by @lock, and may not be set to true
48 			     without the @goal_irq_mutex held. */
49 	u64 ctx;
50 };
51 
52 struct vmw_user_fence {
53 	struct ttm_base_object base;
54 	struct vmw_fence_obj fence;
55 };
56 
57 /**
58  * struct vmw_event_fence_action - fence action that delivers a drm event.
59  *
60  * @e: A struct drm_pending_event that controls the event delivery.
61  * @action: A struct vmw_fence_action to hook up to a fence.
62  * @fence: A referenced pointer to the fence to keep it alive while @action
63  * hangs on it.
64  * @dev: Pointer to a struct drm_device so we can access the event stuff.
65  * @kref: Both @e and @action has destructors, so we need to refcount.
66  * @size: Size accounted for this object.
67  * @tv_sec: If non-null, the variable pointed to will be assigned
68  * current time tv_sec val when the fence signals.
69  * @tv_usec: Must be set if @tv_sec is set, and the variable pointed to will
70  * be assigned the current time tv_usec val when the fence signals.
71  */
72 struct vmw_event_fence_action {
73 	struct vmw_fence_action action;
74 
75 	struct drm_pending_event *event;
76 	struct vmw_fence_obj *fence;
77 	struct drm_device *dev;
78 
79 	uint32_t *tv_sec;
80 	uint32_t *tv_usec;
81 };
82 
83 static struct vmw_fence_manager *
84 fman_from_fence(struct vmw_fence_obj *fence)
85 {
86 	return container_of(fence->base.lock, struct vmw_fence_manager, lock);
87 }
88 
89 /**
90  * Note on fencing subsystem usage of irqs:
91  * Typically the vmw_fences_update function is called
92  *
93  * a) When a new fence seqno has been submitted by the fifo code.
94  * b) On-demand when we have waiters. Sleeping waiters will switch on the
95  * ANY_FENCE irq and call vmw_fences_update function each time an ANY_FENCE
96  * irq is received. When the last fence waiter is gone, that IRQ is masked
97  * away.
98  *
99  * In situations where there are no waiters and we don't submit any new fences,
100  * fence objects may not be signaled. This is perfectly OK, since there are
101  * no consumers of the signaled data, but that is NOT ok when there are fence
102  * actions attached to a fence. The fencing subsystem then makes use of the
103  * FENCE_GOAL irq and sets the fence goal seqno to that of the next fence
104  * which has an action attached, and each time vmw_fences_update is called,
105  * the subsystem makes sure the fence goal seqno is updated.
106  *
107  * The fence goal seqno irq is on as long as there are unsignaled fence
108  * objects with actions attached to them.
109  */
110 
111 static void vmw_fence_obj_destroy(struct dma_fence *f)
112 {
113 	struct vmw_fence_obj *fence =
114 		container_of(f, struct vmw_fence_obj, base);
115 
116 	struct vmw_fence_manager *fman = fman_from_fence(fence);
117 
118 	spin_lock(&fman->lock);
119 	list_del_init(&fence->head);
120 	--fman->num_fence_objects;
121 	spin_unlock(&fman->lock);
122 	fence->destroy(fence);
123 }
124 
125 static const char *vmw_fence_get_driver_name(struct dma_fence *f)
126 {
127 	return "vmwgfx";
128 }
129 
130 static const char *vmw_fence_get_timeline_name(struct dma_fence *f)
131 {
132 	return "svga";
133 }
134 
135 static bool vmw_fence_enable_signaling(struct dma_fence *f)
136 {
137 	struct vmw_fence_obj *fence =
138 		container_of(f, struct vmw_fence_obj, base);
139 
140 	struct vmw_fence_manager *fman = fman_from_fence(fence);
141 	struct vmw_private *dev_priv = fman->dev_priv;
142 
143 	u32 *fifo_mem = dev_priv->mmio_virt;
144 	u32 seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
145 	if (seqno - fence->base.seqno < VMW_FENCE_WRAP)
146 		return false;
147 
148 	vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
149 
150 	return true;
151 }
152 
153 struct vmwgfx_wait_cb {
154 	struct dma_fence_cb base;
155 	struct task_struct *task;
156 };
157 
158 static void
159 vmwgfx_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
160 {
161 	struct vmwgfx_wait_cb *wait =
162 		container_of(cb, struct vmwgfx_wait_cb, base);
163 
164 	wake_up_process(wait->task);
165 }
166 
167 static void __vmw_fences_update(struct vmw_fence_manager *fman);
168 
169 static long vmw_fence_wait(struct dma_fence *f, bool intr, signed long timeout)
170 {
171 	struct vmw_fence_obj *fence =
172 		container_of(f, struct vmw_fence_obj, base);
173 
174 	struct vmw_fence_manager *fman = fman_from_fence(fence);
175 	struct vmw_private *dev_priv = fman->dev_priv;
176 	struct vmwgfx_wait_cb cb;
177 	long ret = timeout;
178 
179 	if (likely(vmw_fence_obj_signaled(fence)))
180 		return timeout;
181 
182 	vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
183 	vmw_seqno_waiter_add(dev_priv);
184 
185 	spin_lock(f->lock);
186 
187 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags))
188 		goto out;
189 
190 	if (intr && signal_pending(current)) {
191 		ret = -ERESTARTSYS;
192 		goto out;
193 	}
194 
195 	cb.base.func = vmwgfx_wait_cb;
196 	cb.task = current;
197 	list_add(&cb.base.node, &f->cb_list);
198 
199 	for (;;) {
200 		__vmw_fences_update(fman);
201 
202 		/*
203 		 * We can use the barrier free __set_current_state() since
204 		 * DMA_FENCE_FLAG_SIGNALED_BIT + wakeup is protected by the
205 		 * fence spinlock.
206 		 */
207 		if (intr)
208 			__set_current_state(TASK_INTERRUPTIBLE);
209 		else
210 			__set_current_state(TASK_UNINTERRUPTIBLE);
211 
212 		if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags)) {
213 			if (ret == 0 && timeout > 0)
214 				ret = 1;
215 			break;
216 		}
217 
218 		if (intr && signal_pending(current)) {
219 			ret = -ERESTARTSYS;
220 			break;
221 		}
222 
223 		if (ret == 0)
224 			break;
225 
226 		spin_unlock(f->lock);
227 
228 		ret = schedule_timeout(ret);
229 
230 		spin_lock(f->lock);
231 	}
232 	__set_current_state(TASK_RUNNING);
233 	if (!list_empty(&cb.base.node))
234 		list_del(&cb.base.node);
235 
236 out:
237 	spin_unlock(f->lock);
238 
239 	vmw_seqno_waiter_remove(dev_priv);
240 
241 	return ret;
242 }
243 
244 static const struct dma_fence_ops vmw_fence_ops = {
245 	.get_driver_name = vmw_fence_get_driver_name,
246 	.get_timeline_name = vmw_fence_get_timeline_name,
247 	.enable_signaling = vmw_fence_enable_signaling,
248 	.wait = vmw_fence_wait,
249 	.release = vmw_fence_obj_destroy,
250 };
251 
252 
253 /**
254  * Execute signal actions on fences recently signaled.
255  * This is done from a workqueue so we don't have to execute
256  * signal actions from atomic context.
257  */
258 
259 static void vmw_fence_work_func(struct work_struct *work)
260 {
261 	struct vmw_fence_manager *fman =
262 		container_of(work, struct vmw_fence_manager, work);
263 	struct list_head list;
264 	struct vmw_fence_action *action, *next_action;
265 	bool seqno_valid;
266 
267 	do {
268 		INIT_LIST_HEAD(&list);
269 		mutex_lock(&fman->goal_irq_mutex);
270 
271 		spin_lock(&fman->lock);
272 		list_splice_init(&fman->cleanup_list, &list);
273 		seqno_valid = fman->seqno_valid;
274 		spin_unlock(&fman->lock);
275 
276 		if (!seqno_valid && fman->goal_irq_on) {
277 			fman->goal_irq_on = false;
278 			vmw_goal_waiter_remove(fman->dev_priv);
279 		}
280 		mutex_unlock(&fman->goal_irq_mutex);
281 
282 		if (list_empty(&list))
283 			return;
284 
285 		/*
286 		 * At this point, only we should be able to manipulate the
287 		 * list heads of the actions we have on the private list.
288 		 * hence fman::lock not held.
289 		 */
290 
291 		list_for_each_entry_safe(action, next_action, &list, head) {
292 			list_del_init(&action->head);
293 			if (action->cleanup)
294 				action->cleanup(action);
295 		}
296 	} while (1);
297 }
298 
299 struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
300 {
301 	struct vmw_fence_manager *fman = kzalloc(sizeof(*fman), GFP_KERNEL);
302 
303 	if (unlikely(!fman))
304 		return NULL;
305 
306 	fman->dev_priv = dev_priv;
307 	spin_lock_init(&fman->lock);
308 	INIT_LIST_HEAD(&fman->fence_list);
309 	INIT_LIST_HEAD(&fman->cleanup_list);
310 	INIT_WORK(&fman->work, &vmw_fence_work_func);
311 	fman->fifo_down = true;
312 	fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence)) +
313 		TTM_OBJ_EXTRA_SIZE;
314 	fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj));
315 	fman->event_fence_action_size =
316 		ttm_round_pot(sizeof(struct vmw_event_fence_action));
317 	mutex_init(&fman->goal_irq_mutex);
318 	fman->ctx = dma_fence_context_alloc(1);
319 
320 	return fman;
321 }
322 
323 void vmw_fence_manager_takedown(struct vmw_fence_manager *fman)
324 {
325 	bool lists_empty;
326 
327 	(void) cancel_work_sync(&fman->work);
328 
329 	spin_lock(&fman->lock);
330 	lists_empty = list_empty(&fman->fence_list) &&
331 		list_empty(&fman->cleanup_list);
332 	spin_unlock(&fman->lock);
333 
334 	BUG_ON(!lists_empty);
335 	kfree(fman);
336 }
337 
338 static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
339 			      struct vmw_fence_obj *fence, u32 seqno,
340 			      void (*destroy) (struct vmw_fence_obj *fence))
341 {
342 	int ret = 0;
343 
344 	dma_fence_init(&fence->base, &vmw_fence_ops, &fman->lock,
345 		       fman->ctx, seqno);
346 	INIT_LIST_HEAD(&fence->seq_passed_actions);
347 	fence->destroy = destroy;
348 
349 	spin_lock(&fman->lock);
350 	if (unlikely(fman->fifo_down)) {
351 		ret = -EBUSY;
352 		goto out_unlock;
353 	}
354 	list_add_tail(&fence->head, &fman->fence_list);
355 	++fman->num_fence_objects;
356 
357 out_unlock:
358 	spin_unlock(&fman->lock);
359 	return ret;
360 
361 }
362 
363 static void vmw_fences_perform_actions(struct vmw_fence_manager *fman,
364 				struct list_head *list)
365 {
366 	struct vmw_fence_action *action, *next_action;
367 
368 	list_for_each_entry_safe(action, next_action, list, head) {
369 		list_del_init(&action->head);
370 		fman->pending_actions[action->type]--;
371 		if (action->seq_passed != NULL)
372 			action->seq_passed(action);
373 
374 		/*
375 		 * Add the cleanup action to the cleanup list so that
376 		 * it will be performed by a worker task.
377 		 */
378 
379 		list_add_tail(&action->head, &fman->cleanup_list);
380 	}
381 }
382 
383 /**
384  * vmw_fence_goal_new_locked - Figure out a new device fence goal
385  * seqno if needed.
386  *
387  * @fman: Pointer to a fence manager.
388  * @passed_seqno: The seqno the device currently signals as passed.
389  *
390  * This function should be called with the fence manager lock held.
391  * It is typically called when we have a new passed_seqno, and
392  * we might need to update the fence goal. It checks to see whether
393  * the current fence goal has already passed, and, in that case,
394  * scans through all unsignaled fences to get the next fence object with an
395  * action attached, and sets the seqno of that fence as a new fence goal.
396  *
397  * returns true if the device goal seqno was updated. False otherwise.
398  */
399 static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
400 				      u32 passed_seqno)
401 {
402 	u32 goal_seqno;
403 	u32 *fifo_mem;
404 	struct vmw_fence_obj *fence;
405 
406 	if (likely(!fman->seqno_valid))
407 		return false;
408 
409 	fifo_mem = fman->dev_priv->mmio_virt;
410 	goal_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE_GOAL);
411 	if (likely(passed_seqno - goal_seqno >= VMW_FENCE_WRAP))
412 		return false;
413 
414 	fman->seqno_valid = false;
415 	list_for_each_entry(fence, &fman->fence_list, head) {
416 		if (!list_empty(&fence->seq_passed_actions)) {
417 			fman->seqno_valid = true;
418 			vmw_mmio_write(fence->base.seqno,
419 				       fifo_mem + SVGA_FIFO_FENCE_GOAL);
420 			break;
421 		}
422 	}
423 
424 	return true;
425 }
426 
427 
428 /**
429  * vmw_fence_goal_check_locked - Replace the device fence goal seqno if
430  * needed.
431  *
432  * @fence: Pointer to a struct vmw_fence_obj the seqno of which should be
433  * considered as a device fence goal.
434  *
435  * This function should be called with the fence manager lock held.
436  * It is typically called when an action has been attached to a fence to
437  * check whether the seqno of that fence should be used for a fence
438  * goal interrupt. This is typically needed if the current fence goal is
439  * invalid, or has a higher seqno than that of the current fence object.
440  *
441  * returns true if the device goal seqno was updated. False otherwise.
442  */
443 static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence)
444 {
445 	struct vmw_fence_manager *fman = fman_from_fence(fence);
446 	u32 goal_seqno;
447 	u32 *fifo_mem;
448 
449 	if (dma_fence_is_signaled_locked(&fence->base))
450 		return false;
451 
452 	fifo_mem = fman->dev_priv->mmio_virt;
453 	goal_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE_GOAL);
454 	if (likely(fman->seqno_valid &&
455 		   goal_seqno - fence->base.seqno < VMW_FENCE_WRAP))
456 		return false;
457 
458 	vmw_mmio_write(fence->base.seqno, fifo_mem + SVGA_FIFO_FENCE_GOAL);
459 	fman->seqno_valid = true;
460 
461 	return true;
462 }
463 
464 static void __vmw_fences_update(struct vmw_fence_manager *fman)
465 {
466 	struct vmw_fence_obj *fence, *next_fence;
467 	struct list_head action_list;
468 	bool needs_rerun;
469 	uint32_t seqno, new_seqno;
470 	u32 *fifo_mem = fman->dev_priv->mmio_virt;
471 
472 	seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
473 rerun:
474 	list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) {
475 		if (seqno - fence->base.seqno < VMW_FENCE_WRAP) {
476 			list_del_init(&fence->head);
477 			dma_fence_signal_locked(&fence->base);
478 			INIT_LIST_HEAD(&action_list);
479 			list_splice_init(&fence->seq_passed_actions,
480 					 &action_list);
481 			vmw_fences_perform_actions(fman, &action_list);
482 		} else
483 			break;
484 	}
485 
486 	/*
487 	 * Rerun if the fence goal seqno was updated, and the
488 	 * hardware might have raced with that update, so that
489 	 * we missed a fence_goal irq.
490 	 */
491 
492 	needs_rerun = vmw_fence_goal_new_locked(fman, seqno);
493 	if (unlikely(needs_rerun)) {
494 		new_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
495 		if (new_seqno != seqno) {
496 			seqno = new_seqno;
497 			goto rerun;
498 		}
499 	}
500 
501 	if (!list_empty(&fman->cleanup_list))
502 		(void) schedule_work(&fman->work);
503 }
504 
505 void vmw_fences_update(struct vmw_fence_manager *fman)
506 {
507 	spin_lock(&fman->lock);
508 	__vmw_fences_update(fman);
509 	spin_unlock(&fman->lock);
510 }
511 
512 bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence)
513 {
514 	struct vmw_fence_manager *fman = fman_from_fence(fence);
515 
516 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
517 		return 1;
518 
519 	vmw_fences_update(fman);
520 
521 	return dma_fence_is_signaled(&fence->base);
522 }
523 
524 int vmw_fence_obj_wait(struct vmw_fence_obj *fence, bool lazy,
525 		       bool interruptible, unsigned long timeout)
526 {
527 	long ret = dma_fence_wait_timeout(&fence->base, interruptible, timeout);
528 
529 	if (likely(ret > 0))
530 		return 0;
531 	else if (ret == 0)
532 		return -EBUSY;
533 	else
534 		return ret;
535 }
536 
537 void vmw_fence_obj_flush(struct vmw_fence_obj *fence)
538 {
539 	struct vmw_private *dev_priv = fman_from_fence(fence)->dev_priv;
540 
541 	vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
542 }
543 
544 static void vmw_fence_destroy(struct vmw_fence_obj *fence)
545 {
546 	dma_fence_free(&fence->base);
547 }
548 
549 int vmw_fence_create(struct vmw_fence_manager *fman,
550 		     uint32_t seqno,
551 		     struct vmw_fence_obj **p_fence)
552 {
553 	struct vmw_fence_obj *fence;
554  	int ret;
555 
556 	fence = kzalloc(sizeof(*fence), GFP_KERNEL);
557 	if (unlikely(!fence))
558 		return -ENOMEM;
559 
560 	ret = vmw_fence_obj_init(fman, fence, seqno,
561 				 vmw_fence_destroy);
562 	if (unlikely(ret != 0))
563 		goto out_err_init;
564 
565 	*p_fence = fence;
566 	return 0;
567 
568 out_err_init:
569 	kfree(fence);
570 	return ret;
571 }
572 
573 
574 static void vmw_user_fence_destroy(struct vmw_fence_obj *fence)
575 {
576 	struct vmw_user_fence *ufence =
577 		container_of(fence, struct vmw_user_fence, fence);
578 	struct vmw_fence_manager *fman = fman_from_fence(fence);
579 
580 	ttm_base_object_kfree(ufence, base);
581 	/*
582 	 * Free kernel space accounting.
583 	 */
584 	ttm_mem_global_free(vmw_mem_glob(fman->dev_priv),
585 			    fman->user_fence_size);
586 }
587 
588 static void vmw_user_fence_base_release(struct ttm_base_object **p_base)
589 {
590 	struct ttm_base_object *base = *p_base;
591 	struct vmw_user_fence *ufence =
592 		container_of(base, struct vmw_user_fence, base);
593 	struct vmw_fence_obj *fence = &ufence->fence;
594 
595 	*p_base = NULL;
596 	vmw_fence_obj_unreference(&fence);
597 }
598 
599 int vmw_user_fence_create(struct drm_file *file_priv,
600 			  struct vmw_fence_manager *fman,
601 			  uint32_t seqno,
602 			  struct vmw_fence_obj **p_fence,
603 			  uint32_t *p_handle)
604 {
605 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
606 	struct vmw_user_fence *ufence;
607 	struct vmw_fence_obj *tmp;
608 	struct ttm_mem_global *mem_glob = vmw_mem_glob(fman->dev_priv);
609 	struct ttm_operation_ctx ctx = {
610 		.interruptible = false,
611 		.no_wait_gpu = false
612 	};
613 	int ret;
614 
615 	/*
616 	 * Kernel memory space accounting, since this object may
617 	 * be created by a user-space request.
618 	 */
619 
620 	ret = ttm_mem_global_alloc(mem_glob, fman->user_fence_size,
621 				   &ctx);
622 	if (unlikely(ret != 0))
623 		return ret;
624 
625 	ufence = kzalloc(sizeof(*ufence), GFP_KERNEL);
626 	if (unlikely(!ufence)) {
627 		ret = -ENOMEM;
628 		goto out_no_object;
629 	}
630 
631 	ret = vmw_fence_obj_init(fman, &ufence->fence, seqno,
632 				 vmw_user_fence_destroy);
633 	if (unlikely(ret != 0)) {
634 		kfree(ufence);
635 		goto out_no_object;
636 	}
637 
638 	/*
639 	 * The base object holds a reference which is freed in
640 	 * vmw_user_fence_base_release.
641 	 */
642 	tmp = vmw_fence_obj_reference(&ufence->fence);
643 	ret = ttm_base_object_init(tfile, &ufence->base, false,
644 				   VMW_RES_FENCE,
645 				   &vmw_user_fence_base_release, NULL);
646 
647 
648 	if (unlikely(ret != 0)) {
649 		/*
650 		 * Free the base object's reference
651 		 */
652 		vmw_fence_obj_unreference(&tmp);
653 		goto out_err;
654 	}
655 
656 	*p_fence = &ufence->fence;
657 	*p_handle = ufence->base.handle;
658 
659 	return 0;
660 out_err:
661 	tmp = &ufence->fence;
662 	vmw_fence_obj_unreference(&tmp);
663 out_no_object:
664 	ttm_mem_global_free(mem_glob, fman->user_fence_size);
665 	return ret;
666 }
667 
668 
669 /**
670  * vmw_wait_dma_fence - Wait for a dma fence
671  *
672  * @fman: pointer to a fence manager
673  * @fence: DMA fence to wait on
674  *
675  * This function handles the case when the fence is actually a fence
676  * array.  If that's the case, it'll wait on each of the child fence
677  */
678 int vmw_wait_dma_fence(struct vmw_fence_manager *fman,
679 		       struct dma_fence *fence)
680 {
681 	struct dma_fence_array *fence_array;
682 	int ret = 0;
683 	int i;
684 
685 
686 	if (dma_fence_is_signaled(fence))
687 		return 0;
688 
689 	if (!dma_fence_is_array(fence))
690 		return dma_fence_wait(fence, true);
691 
692 	/* From i915: Note that if the fence-array was created in
693 	 * signal-on-any mode, we should *not* decompose it into its individual
694 	 * fences. However, we don't currently store which mode the fence-array
695 	 * is operating in. Fortunately, the only user of signal-on-any is
696 	 * private to amdgpu and we should not see any incoming fence-array
697 	 * from sync-file being in signal-on-any mode.
698 	 */
699 
700 	fence_array = to_dma_fence_array(fence);
701 	for (i = 0; i < fence_array->num_fences; i++) {
702 		struct dma_fence *child = fence_array->fences[i];
703 
704 		ret = dma_fence_wait(child, true);
705 
706 		if (ret < 0)
707 			return ret;
708 	}
709 
710 	return 0;
711 }
712 
713 
714 /**
715  * vmw_fence_fifo_down - signal all unsignaled fence objects.
716  */
717 
718 void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
719 {
720 	struct list_head action_list;
721 	int ret;
722 
723 	/*
724 	 * The list may be altered while we traverse it, so always
725 	 * restart when we've released the fman->lock.
726 	 */
727 
728 	spin_lock(&fman->lock);
729 	fman->fifo_down = true;
730 	while (!list_empty(&fman->fence_list)) {
731 		struct vmw_fence_obj *fence =
732 			list_entry(fman->fence_list.prev, struct vmw_fence_obj,
733 				   head);
734 		dma_fence_get(&fence->base);
735 		spin_unlock(&fman->lock);
736 
737 		ret = vmw_fence_obj_wait(fence, false, false,
738 					 VMW_FENCE_WAIT_TIMEOUT);
739 
740 		if (unlikely(ret != 0)) {
741 			list_del_init(&fence->head);
742 			dma_fence_signal(&fence->base);
743 			INIT_LIST_HEAD(&action_list);
744 			list_splice_init(&fence->seq_passed_actions,
745 					 &action_list);
746 			vmw_fences_perform_actions(fman, &action_list);
747 		}
748 
749 		BUG_ON(!list_empty(&fence->head));
750 		dma_fence_put(&fence->base);
751 		spin_lock(&fman->lock);
752 	}
753 	spin_unlock(&fman->lock);
754 }
755 
756 void vmw_fence_fifo_up(struct vmw_fence_manager *fman)
757 {
758 	spin_lock(&fman->lock);
759 	fman->fifo_down = false;
760 	spin_unlock(&fman->lock);
761 }
762 
763 
764 /**
765  * vmw_fence_obj_lookup - Look up a user-space fence object
766  *
767  * @tfile: A struct ttm_object_file identifying the caller.
768  * @handle: A handle identifying the fence object.
769  * @return: A struct vmw_user_fence base ttm object on success or
770  * an error pointer on failure.
771  *
772  * The fence object is looked up and type-checked. The caller needs
773  * to have opened the fence object first, but since that happens on
774  * creation and fence objects aren't shareable, that's not an
775  * issue currently.
776  */
777 static struct ttm_base_object *
778 vmw_fence_obj_lookup(struct ttm_object_file *tfile, u32 handle)
779 {
780 	struct ttm_base_object *base = ttm_base_object_lookup(tfile, handle);
781 
782 	if (!base) {
783 		pr_err("Invalid fence object handle 0x%08lx.\n",
784 		       (unsigned long)handle);
785 		return ERR_PTR(-EINVAL);
786 	}
787 
788 	if (base->refcount_release != vmw_user_fence_base_release) {
789 		pr_err("Invalid fence object handle 0x%08lx.\n",
790 		       (unsigned long)handle);
791 		ttm_base_object_unref(&base);
792 		return ERR_PTR(-EINVAL);
793 	}
794 
795 	return base;
796 }
797 
798 
799 int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,
800 			     struct drm_file *file_priv)
801 {
802 	struct drm_vmw_fence_wait_arg *arg =
803 	    (struct drm_vmw_fence_wait_arg *)data;
804 	unsigned long timeout;
805 	struct ttm_base_object *base;
806 	struct vmw_fence_obj *fence;
807 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
808 	int ret;
809 	uint64_t wait_timeout = ((uint64_t)arg->timeout_us * HZ);
810 
811 	/*
812 	 * 64-bit division not present on 32-bit systems, so do an
813 	 * approximation. (Divide by 1000000).
814 	 */
815 
816 	wait_timeout = (wait_timeout >> 20) + (wait_timeout >> 24) -
817 	  (wait_timeout >> 26);
818 
819 	if (!arg->cookie_valid) {
820 		arg->cookie_valid = 1;
821 		arg->kernel_cookie = jiffies + wait_timeout;
822 	}
823 
824 	base = vmw_fence_obj_lookup(tfile, arg->handle);
825 	if (IS_ERR(base))
826 		return PTR_ERR(base);
827 
828 	fence = &(container_of(base, struct vmw_user_fence, base)->fence);
829 
830 	timeout = jiffies;
831 	if (time_after_eq(timeout, (unsigned long)arg->kernel_cookie)) {
832 		ret = ((vmw_fence_obj_signaled(fence)) ?
833 		       0 : -EBUSY);
834 		goto out;
835 	}
836 
837 	timeout = (unsigned long)arg->kernel_cookie - timeout;
838 
839 	ret = vmw_fence_obj_wait(fence, arg->lazy, true, timeout);
840 
841 out:
842 	ttm_base_object_unref(&base);
843 
844 	/*
845 	 * Optionally unref the fence object.
846 	 */
847 
848 	if (ret == 0 && (arg->wait_options & DRM_VMW_WAIT_OPTION_UNREF))
849 		return ttm_ref_object_base_unref(tfile, arg->handle,
850 						 TTM_REF_USAGE);
851 	return ret;
852 }
853 
854 int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data,
855 				 struct drm_file *file_priv)
856 {
857 	struct drm_vmw_fence_signaled_arg *arg =
858 		(struct drm_vmw_fence_signaled_arg *) data;
859 	struct ttm_base_object *base;
860 	struct vmw_fence_obj *fence;
861 	struct vmw_fence_manager *fman;
862 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
863 	struct vmw_private *dev_priv = vmw_priv(dev);
864 
865 	base = vmw_fence_obj_lookup(tfile, arg->handle);
866 	if (IS_ERR(base))
867 		return PTR_ERR(base);
868 
869 	fence = &(container_of(base, struct vmw_user_fence, base)->fence);
870 	fman = fman_from_fence(fence);
871 
872 	arg->signaled = vmw_fence_obj_signaled(fence);
873 
874 	arg->signaled_flags = arg->flags;
875 	spin_lock(&fman->lock);
876 	arg->passed_seqno = dev_priv->last_read_seqno;
877 	spin_unlock(&fman->lock);
878 
879 	ttm_base_object_unref(&base);
880 
881 	return 0;
882 }
883 
884 
885 int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data,
886 			      struct drm_file *file_priv)
887 {
888 	struct drm_vmw_fence_arg *arg =
889 		(struct drm_vmw_fence_arg *) data;
890 
891 	return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
892 					 arg->handle,
893 					 TTM_REF_USAGE);
894 }
895 
896 /**
897  * vmw_event_fence_action_seq_passed
898  *
899  * @action: The struct vmw_fence_action embedded in a struct
900  * vmw_event_fence_action.
901  *
902  * This function is called when the seqno of the fence where @action is
903  * attached has passed. It queues the event on the submitter's event list.
904  * This function is always called from atomic context.
905  */
906 static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action)
907 {
908 	struct vmw_event_fence_action *eaction =
909 		container_of(action, struct vmw_event_fence_action, action);
910 	struct drm_device *dev = eaction->dev;
911 	struct drm_pending_event *event = eaction->event;
912 
913 	if (unlikely(event == NULL))
914 		return;
915 
916 	spin_lock_irq(&dev->event_lock);
917 
918 	if (likely(eaction->tv_sec != NULL)) {
919 		struct timespec64 ts;
920 
921 		ktime_get_ts64(&ts);
922 		/* monotonic time, so no y2038 overflow */
923 		*eaction->tv_sec = ts.tv_sec;
924 		*eaction->tv_usec = ts.tv_nsec / NSEC_PER_USEC;
925 	}
926 
927 	drm_send_event_locked(dev, eaction->event);
928 	eaction->event = NULL;
929 	spin_unlock_irq(&dev->event_lock);
930 }
931 
932 /**
933  * vmw_event_fence_action_cleanup
934  *
935  * @action: The struct vmw_fence_action embedded in a struct
936  * vmw_event_fence_action.
937  *
938  * This function is the struct vmw_fence_action destructor. It's typically
939  * called from a workqueue.
940  */
941 static void vmw_event_fence_action_cleanup(struct vmw_fence_action *action)
942 {
943 	struct vmw_event_fence_action *eaction =
944 		container_of(action, struct vmw_event_fence_action, action);
945 
946 	vmw_fence_obj_unreference(&eaction->fence);
947 	kfree(eaction);
948 }
949 
950 
951 /**
952  * vmw_fence_obj_add_action - Add an action to a fence object.
953  *
954  * @fence - The fence object.
955  * @action - The action to add.
956  *
957  * Note that the action callbacks may be executed before this function
958  * returns.
959  */
960 static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence,
961 			      struct vmw_fence_action *action)
962 {
963 	struct vmw_fence_manager *fman = fman_from_fence(fence);
964 	bool run_update = false;
965 
966 	mutex_lock(&fman->goal_irq_mutex);
967 	spin_lock(&fman->lock);
968 
969 	fman->pending_actions[action->type]++;
970 	if (dma_fence_is_signaled_locked(&fence->base)) {
971 		struct list_head action_list;
972 
973 		INIT_LIST_HEAD(&action_list);
974 		list_add_tail(&action->head, &action_list);
975 		vmw_fences_perform_actions(fman, &action_list);
976 	} else {
977 		list_add_tail(&action->head, &fence->seq_passed_actions);
978 
979 		/*
980 		 * This function may set fman::seqno_valid, so it must
981 		 * be run with the goal_irq_mutex held.
982 		 */
983 		run_update = vmw_fence_goal_check_locked(fence);
984 	}
985 
986 	spin_unlock(&fman->lock);
987 
988 	if (run_update) {
989 		if (!fman->goal_irq_on) {
990 			fman->goal_irq_on = true;
991 			vmw_goal_waiter_add(fman->dev_priv);
992 		}
993 		vmw_fences_update(fman);
994 	}
995 	mutex_unlock(&fman->goal_irq_mutex);
996 
997 }
998 
999 /**
1000  * vmw_event_fence_action_create - Post an event for sending when a fence
1001  * object seqno has passed.
1002  *
1003  * @file_priv: The file connection on which the event should be posted.
1004  * @fence: The fence object on which to post the event.
1005  * @event: Event to be posted. This event should've been alloced
1006  * using k[mz]alloc, and should've been completely initialized.
1007  * @interruptible: Interruptible waits if possible.
1008  *
1009  * As a side effect, the object pointed to by @event may have been
1010  * freed when this function returns. If this function returns with
1011  * an error code, the caller needs to free that object.
1012  */
1013 
1014 int vmw_event_fence_action_queue(struct drm_file *file_priv,
1015 				 struct vmw_fence_obj *fence,
1016 				 struct drm_pending_event *event,
1017 				 uint32_t *tv_sec,
1018 				 uint32_t *tv_usec,
1019 				 bool interruptible)
1020 {
1021 	struct vmw_event_fence_action *eaction;
1022 	struct vmw_fence_manager *fman = fman_from_fence(fence);
1023 
1024 	eaction = kzalloc(sizeof(*eaction), GFP_KERNEL);
1025 	if (unlikely(!eaction))
1026 		return -ENOMEM;
1027 
1028 	eaction->event = event;
1029 
1030 	eaction->action.seq_passed = vmw_event_fence_action_seq_passed;
1031 	eaction->action.cleanup = vmw_event_fence_action_cleanup;
1032 	eaction->action.type = VMW_ACTION_EVENT;
1033 
1034 	eaction->fence = vmw_fence_obj_reference(fence);
1035 	eaction->dev = fman->dev_priv->dev;
1036 	eaction->tv_sec = tv_sec;
1037 	eaction->tv_usec = tv_usec;
1038 
1039 	vmw_fence_obj_add_action(fence, &eaction->action);
1040 
1041 	return 0;
1042 }
1043 
1044 struct vmw_event_fence_pending {
1045 	struct drm_pending_event base;
1046 	struct drm_vmw_event_fence event;
1047 };
1048 
1049 static int vmw_event_fence_action_create(struct drm_file *file_priv,
1050 				  struct vmw_fence_obj *fence,
1051 				  uint32_t flags,
1052 				  uint64_t user_data,
1053 				  bool interruptible)
1054 {
1055 	struct vmw_event_fence_pending *event;
1056 	struct vmw_fence_manager *fman = fman_from_fence(fence);
1057 	struct drm_device *dev = fman->dev_priv->dev;
1058 	int ret;
1059 
1060 	event = kzalloc(sizeof(*event), GFP_KERNEL);
1061 	if (unlikely(!event)) {
1062 		DRM_ERROR("Failed to allocate an event.\n");
1063 		ret = -ENOMEM;
1064 		goto out_no_space;
1065 	}
1066 
1067 	event->event.base.type = DRM_VMW_EVENT_FENCE_SIGNALED;
1068 	event->event.base.length = sizeof(*event);
1069 	event->event.user_data = user_data;
1070 
1071 	ret = drm_event_reserve_init(dev, file_priv, &event->base, &event->event.base);
1072 
1073 	if (unlikely(ret != 0)) {
1074 		DRM_ERROR("Failed to allocate event space for this file.\n");
1075 		kfree(event);
1076 		goto out_no_space;
1077 	}
1078 
1079 	if (flags & DRM_VMW_FE_FLAG_REQ_TIME)
1080 		ret = vmw_event_fence_action_queue(file_priv, fence,
1081 						   &event->base,
1082 						   &event->event.tv_sec,
1083 						   &event->event.tv_usec,
1084 						   interruptible);
1085 	else
1086 		ret = vmw_event_fence_action_queue(file_priv, fence,
1087 						   &event->base,
1088 						   NULL,
1089 						   NULL,
1090 						   interruptible);
1091 	if (ret != 0)
1092 		goto out_no_queue;
1093 
1094 	return 0;
1095 
1096 out_no_queue:
1097 	drm_event_cancel_free(dev, &event->base);
1098 out_no_space:
1099 	return ret;
1100 }
1101 
1102 int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
1103 			  struct drm_file *file_priv)
1104 {
1105 	struct vmw_private *dev_priv = vmw_priv(dev);
1106 	struct drm_vmw_fence_event_arg *arg =
1107 		(struct drm_vmw_fence_event_arg *) data;
1108 	struct vmw_fence_obj *fence = NULL;
1109 	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1110 	struct ttm_object_file *tfile = vmw_fp->tfile;
1111 	struct drm_vmw_fence_rep __user *user_fence_rep =
1112 		(struct drm_vmw_fence_rep __user *)(unsigned long)
1113 		arg->fence_rep;
1114 	uint32_t handle;
1115 	int ret;
1116 
1117 	/*
1118 	 * Look up an existing fence object,
1119 	 * and if user-space wants a new reference,
1120 	 * add one.
1121 	 */
1122 	if (arg->handle) {
1123 		struct ttm_base_object *base =
1124 			vmw_fence_obj_lookup(tfile, arg->handle);
1125 
1126 		if (IS_ERR(base))
1127 			return PTR_ERR(base);
1128 
1129 		fence = &(container_of(base, struct vmw_user_fence,
1130 				       base)->fence);
1131 		(void) vmw_fence_obj_reference(fence);
1132 
1133 		if (user_fence_rep != NULL) {
1134 			ret = ttm_ref_object_add(vmw_fp->tfile, base,
1135 						 TTM_REF_USAGE, NULL, false);
1136 			if (unlikely(ret != 0)) {
1137 				DRM_ERROR("Failed to reference a fence "
1138 					  "object.\n");
1139 				goto out_no_ref_obj;
1140 			}
1141 			handle = base->handle;
1142 		}
1143 		ttm_base_object_unref(&base);
1144 	}
1145 
1146 	/*
1147 	 * Create a new fence object.
1148 	 */
1149 	if (!fence) {
1150 		ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
1151 						 &fence,
1152 						 (user_fence_rep) ?
1153 						 &handle : NULL);
1154 		if (unlikely(ret != 0)) {
1155 			DRM_ERROR("Fence event failed to create fence.\n");
1156 			return ret;
1157 		}
1158 	}
1159 
1160 	BUG_ON(fence == NULL);
1161 
1162 	ret = vmw_event_fence_action_create(file_priv, fence,
1163 					    arg->flags,
1164 					    arg->user_data,
1165 					    true);
1166 	if (unlikely(ret != 0)) {
1167 		if (ret != -ERESTARTSYS)
1168 			DRM_ERROR("Failed to attach event to fence.\n");
1169 		goto out_no_create;
1170 	}
1171 
1172 	vmw_execbuf_copy_fence_user(dev_priv, vmw_fp, 0, user_fence_rep, fence,
1173 				    handle, -1, NULL);
1174 	vmw_fence_obj_unreference(&fence);
1175 	return 0;
1176 out_no_create:
1177 	if (user_fence_rep != NULL)
1178 		ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE);
1179 out_no_ref_obj:
1180 	vmw_fence_obj_unreference(&fence);
1181 	return ret;
1182 }
1183