1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3  *
4  * Copyright 2011-2014 VMware, Inc., Palo Alto, CA., USA
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 
28 #include <linux/sched/signal.h>
29 
30 #include "vmwgfx_drv.h"
31 
32 #define VMW_FENCE_WRAP (1 << 31)
33 
34 struct vmw_fence_manager {
35 	int num_fence_objects;
36 	struct vmw_private *dev_priv;
37 	spinlock_t lock;
38 	struct list_head fence_list;
39 	struct work_struct work;
40 	bool fifo_down;
41 	struct list_head cleanup_list;
42 	uint32_t pending_actions[VMW_ACTION_MAX];
43 	struct mutex goal_irq_mutex;
44 	bool goal_irq_on; /* Protected by @goal_irq_mutex */
45 	bool seqno_valid; /* Protected by @lock, and may not be set to true
46 			     without the @goal_irq_mutex held. */
47 	u64 ctx;
48 };
49 
50 struct vmw_user_fence {
51 	struct ttm_base_object base;
52 	struct vmw_fence_obj fence;
53 };
54 
55 /**
56  * struct vmw_event_fence_action - fence action that delivers a drm event.
57  *
58  * @action: A struct vmw_fence_action to hook up to a fence.
59  * @event: A pointer to the pending event.
60  * @fence: A referenced pointer to the fence to keep it alive while @action
61  * hangs on it.
62  * @dev: Pointer to a struct drm_device so we can access the event stuff.
63  * @tv_sec: If non-null, the variable pointed to will be assigned
64  * current time tv_sec val when the fence signals.
65  * @tv_usec: Must be set if @tv_sec is set, and the variable pointed to will
66  * be assigned the current time tv_usec val when the fence signals.
67  */
68 struct vmw_event_fence_action {
69 	struct vmw_fence_action action;
70 
71 	struct drm_pending_event *event;
72 	struct vmw_fence_obj *fence;
73 	struct drm_device *dev;
74 
75 	uint32_t *tv_sec;
76 	uint32_t *tv_usec;
77 };
78 
79 static struct vmw_fence_manager *
80 fman_from_fence(struct vmw_fence_obj *fence)
81 {
82 	return container_of(fence->base.lock, struct vmw_fence_manager, lock);
83 }
84 
85 /*
86  * Note on fencing subsystem usage of irqs:
87  * Typically the vmw_fences_update function is called
88  *
89  * a) When a new fence seqno has been submitted by the fifo code.
90  * b) On-demand when we have waiters. Sleeping waiters will switch on the
91  * ANY_FENCE irq and call vmw_fences_update function each time an ANY_FENCE
92  * irq is received. When the last fence waiter is gone, that IRQ is masked
93  * away.
94  *
95  * In situations where there are no waiters and we don't submit any new fences,
96  * fence objects may not be signaled. This is perfectly OK, since there are
97  * no consumers of the signaled data, but that is NOT ok when there are fence
98  * actions attached to a fence. The fencing subsystem then makes use of the
99  * FENCE_GOAL irq and sets the fence goal seqno to that of the next fence
100  * which has an action attached, and each time vmw_fences_update is called,
101  * the subsystem makes sure the fence goal seqno is updated.
102  *
103  * The fence goal seqno irq is on as long as there are unsignaled fence
104  * objects with actions attached to them.
105  */
106 
107 static void vmw_fence_obj_destroy(struct dma_fence *f)
108 {
109 	struct vmw_fence_obj *fence =
110 		container_of(f, struct vmw_fence_obj, base);
111 
112 	struct vmw_fence_manager *fman = fman_from_fence(fence);
113 
114 	spin_lock(&fman->lock);
115 	list_del_init(&fence->head);
116 	--fman->num_fence_objects;
117 	spin_unlock(&fman->lock);
118 	fence->destroy(fence);
119 }
120 
121 static const char *vmw_fence_get_driver_name(struct dma_fence *f)
122 {
123 	return "vmwgfx";
124 }
125 
126 static const char *vmw_fence_get_timeline_name(struct dma_fence *f)
127 {
128 	return "svga";
129 }
130 
131 static bool vmw_fence_enable_signaling(struct dma_fence *f)
132 {
133 	struct vmw_fence_obj *fence =
134 		container_of(f, struct vmw_fence_obj, base);
135 
136 	struct vmw_fence_manager *fman = fman_from_fence(fence);
137 	struct vmw_private *dev_priv = fman->dev_priv;
138 
139 	u32 seqno = vmw_fence_read(dev_priv);
140 	if (seqno - fence->base.seqno < VMW_FENCE_WRAP)
141 		return false;
142 
143 	return true;
144 }
145 
146 struct vmwgfx_wait_cb {
147 	struct dma_fence_cb base;
148 	struct task_struct *task;
149 };
150 
151 static void
152 vmwgfx_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
153 {
154 	struct vmwgfx_wait_cb *wait =
155 		container_of(cb, struct vmwgfx_wait_cb, base);
156 
157 	wake_up_process(wait->task);
158 }
159 
160 static void __vmw_fences_update(struct vmw_fence_manager *fman);
161 
162 static long vmw_fence_wait(struct dma_fence *f, bool intr, signed long timeout)
163 {
164 	struct vmw_fence_obj *fence =
165 		container_of(f, struct vmw_fence_obj, base);
166 
167 	struct vmw_fence_manager *fman = fman_from_fence(fence);
168 	struct vmw_private *dev_priv = fman->dev_priv;
169 	struct vmwgfx_wait_cb cb;
170 	long ret = timeout;
171 
172 	if (likely(vmw_fence_obj_signaled(fence)))
173 		return timeout;
174 
175 	vmw_seqno_waiter_add(dev_priv);
176 
177 	spin_lock(f->lock);
178 
179 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags))
180 		goto out;
181 
182 	if (intr && signal_pending(current)) {
183 		ret = -ERESTARTSYS;
184 		goto out;
185 	}
186 
187 	cb.base.func = vmwgfx_wait_cb;
188 	cb.task = current;
189 	list_add(&cb.base.node, &f->cb_list);
190 
191 	for (;;) {
192 		__vmw_fences_update(fman);
193 
194 		/*
195 		 * We can use the barrier free __set_current_state() since
196 		 * DMA_FENCE_FLAG_SIGNALED_BIT + wakeup is protected by the
197 		 * fence spinlock.
198 		 */
199 		if (intr)
200 			__set_current_state(TASK_INTERRUPTIBLE);
201 		else
202 			__set_current_state(TASK_UNINTERRUPTIBLE);
203 
204 		if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags)) {
205 			if (ret == 0 && timeout > 0)
206 				ret = 1;
207 			break;
208 		}
209 
210 		if (intr && signal_pending(current)) {
211 			ret = -ERESTARTSYS;
212 			break;
213 		}
214 
215 		if (ret == 0)
216 			break;
217 
218 		spin_unlock(f->lock);
219 
220 		ret = schedule_timeout(ret);
221 
222 		spin_lock(f->lock);
223 	}
224 	__set_current_state(TASK_RUNNING);
225 	if (!list_empty(&cb.base.node))
226 		list_del(&cb.base.node);
227 
228 out:
229 	spin_unlock(f->lock);
230 
231 	vmw_seqno_waiter_remove(dev_priv);
232 
233 	return ret;
234 }
235 
236 static const struct dma_fence_ops vmw_fence_ops = {
237 	.get_driver_name = vmw_fence_get_driver_name,
238 	.get_timeline_name = vmw_fence_get_timeline_name,
239 	.enable_signaling = vmw_fence_enable_signaling,
240 	.wait = vmw_fence_wait,
241 	.release = vmw_fence_obj_destroy,
242 };
243 
244 
245 /*
246  * Execute signal actions on fences recently signaled.
247  * This is done from a workqueue so we don't have to execute
248  * signal actions from atomic context.
249  */
250 
251 static void vmw_fence_work_func(struct work_struct *work)
252 {
253 	struct vmw_fence_manager *fman =
254 		container_of(work, struct vmw_fence_manager, work);
255 	struct list_head list;
256 	struct vmw_fence_action *action, *next_action;
257 	bool seqno_valid;
258 
259 	do {
260 		INIT_LIST_HEAD(&list);
261 		mutex_lock(&fman->goal_irq_mutex);
262 
263 		spin_lock(&fman->lock);
264 		list_splice_init(&fman->cleanup_list, &list);
265 		seqno_valid = fman->seqno_valid;
266 		spin_unlock(&fman->lock);
267 
268 		if (!seqno_valid && fman->goal_irq_on) {
269 			fman->goal_irq_on = false;
270 			vmw_goal_waiter_remove(fman->dev_priv);
271 		}
272 		mutex_unlock(&fman->goal_irq_mutex);
273 
274 		if (list_empty(&list))
275 			return;
276 
277 		/*
278 		 * At this point, only we should be able to manipulate the
279 		 * list heads of the actions we have on the private list.
280 		 * hence fman::lock not held.
281 		 */
282 
283 		list_for_each_entry_safe(action, next_action, &list, head) {
284 			list_del_init(&action->head);
285 			if (action->cleanup)
286 				action->cleanup(action);
287 		}
288 	} while (1);
289 }
290 
291 struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
292 {
293 	struct vmw_fence_manager *fman = kzalloc(sizeof(*fman), GFP_KERNEL);
294 
295 	if (unlikely(!fman))
296 		return NULL;
297 
298 	fman->dev_priv = dev_priv;
299 	spin_lock_init(&fman->lock);
300 	INIT_LIST_HEAD(&fman->fence_list);
301 	INIT_LIST_HEAD(&fman->cleanup_list);
302 	INIT_WORK(&fman->work, &vmw_fence_work_func);
303 	fman->fifo_down = true;
304 	mutex_init(&fman->goal_irq_mutex);
305 	fman->ctx = dma_fence_context_alloc(1);
306 
307 	return fman;
308 }
309 
310 void vmw_fence_manager_takedown(struct vmw_fence_manager *fman)
311 {
312 	bool lists_empty;
313 
314 	(void) cancel_work_sync(&fman->work);
315 
316 	spin_lock(&fman->lock);
317 	lists_empty = list_empty(&fman->fence_list) &&
318 		list_empty(&fman->cleanup_list);
319 	spin_unlock(&fman->lock);
320 
321 	BUG_ON(!lists_empty);
322 	kfree(fman);
323 }
324 
325 static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
326 			      struct vmw_fence_obj *fence, u32 seqno,
327 			      void (*destroy) (struct vmw_fence_obj *fence))
328 {
329 	int ret = 0;
330 
331 	dma_fence_init(&fence->base, &vmw_fence_ops, &fman->lock,
332 		       fman->ctx, seqno);
333 	INIT_LIST_HEAD(&fence->seq_passed_actions);
334 	fence->destroy = destroy;
335 
336 	spin_lock(&fman->lock);
337 	if (unlikely(fman->fifo_down)) {
338 		ret = -EBUSY;
339 		goto out_unlock;
340 	}
341 	list_add_tail(&fence->head, &fman->fence_list);
342 	++fman->num_fence_objects;
343 
344 out_unlock:
345 	spin_unlock(&fman->lock);
346 	return ret;
347 
348 }
349 
350 static void vmw_fences_perform_actions(struct vmw_fence_manager *fman,
351 				struct list_head *list)
352 {
353 	struct vmw_fence_action *action, *next_action;
354 
355 	list_for_each_entry_safe(action, next_action, list, head) {
356 		list_del_init(&action->head);
357 		fman->pending_actions[action->type]--;
358 		if (action->seq_passed != NULL)
359 			action->seq_passed(action);
360 
361 		/*
362 		 * Add the cleanup action to the cleanup list so that
363 		 * it will be performed by a worker task.
364 		 */
365 
366 		list_add_tail(&action->head, &fman->cleanup_list);
367 	}
368 }
369 
370 /**
371  * vmw_fence_goal_new_locked - Figure out a new device fence goal
372  * seqno if needed.
373  *
374  * @fman: Pointer to a fence manager.
375  * @passed_seqno: The seqno the device currently signals as passed.
376  *
377  * This function should be called with the fence manager lock held.
378  * It is typically called when we have a new passed_seqno, and
379  * we might need to update the fence goal. It checks to see whether
380  * the current fence goal has already passed, and, in that case,
381  * scans through all unsignaled fences to get the next fence object with an
382  * action attached, and sets the seqno of that fence as a new fence goal.
383  *
384  * returns true if the device goal seqno was updated. False otherwise.
385  */
386 static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
387 				      u32 passed_seqno)
388 {
389 	u32 goal_seqno;
390 	struct vmw_fence_obj *fence;
391 
392 	if (likely(!fman->seqno_valid))
393 		return false;
394 
395 	goal_seqno = vmw_fifo_mem_read(fman->dev_priv, SVGA_FIFO_FENCE_GOAL);
396 	if (likely(passed_seqno - goal_seqno >= VMW_FENCE_WRAP))
397 		return false;
398 
399 	fman->seqno_valid = false;
400 	list_for_each_entry(fence, &fman->fence_list, head) {
401 		if (!list_empty(&fence->seq_passed_actions)) {
402 			fman->seqno_valid = true;
403 			vmw_fifo_mem_write(fman->dev_priv,
404 					   SVGA_FIFO_FENCE_GOAL,
405 					   fence->base.seqno);
406 			break;
407 		}
408 	}
409 
410 	return true;
411 }
412 
413 
414 /**
415  * vmw_fence_goal_check_locked - Replace the device fence goal seqno if
416  * needed.
417  *
418  * @fence: Pointer to a struct vmw_fence_obj the seqno of which should be
419  * considered as a device fence goal.
420  *
421  * This function should be called with the fence manager lock held.
422  * It is typically called when an action has been attached to a fence to
423  * check whether the seqno of that fence should be used for a fence
424  * goal interrupt. This is typically needed if the current fence goal is
425  * invalid, or has a higher seqno than that of the current fence object.
426  *
427  * returns true if the device goal seqno was updated. False otherwise.
428  */
429 static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence)
430 {
431 	struct vmw_fence_manager *fman = fman_from_fence(fence);
432 	u32 goal_seqno;
433 
434 	if (dma_fence_is_signaled_locked(&fence->base))
435 		return false;
436 
437 	goal_seqno = vmw_fifo_mem_read(fman->dev_priv, SVGA_FIFO_FENCE_GOAL);
438 	if (likely(fman->seqno_valid &&
439 		   goal_seqno - fence->base.seqno < VMW_FENCE_WRAP))
440 		return false;
441 
442 	vmw_fifo_mem_write(fman->dev_priv, SVGA_FIFO_FENCE_GOAL,
443 			   fence->base.seqno);
444 	fman->seqno_valid = true;
445 
446 	return true;
447 }
448 
449 static void __vmw_fences_update(struct vmw_fence_manager *fman)
450 {
451 	struct vmw_fence_obj *fence, *next_fence;
452 	struct list_head action_list;
453 	bool needs_rerun;
454 	uint32_t seqno, new_seqno;
455 
456 	seqno = vmw_fence_read(fman->dev_priv);
457 rerun:
458 	list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) {
459 		if (seqno - fence->base.seqno < VMW_FENCE_WRAP) {
460 			list_del_init(&fence->head);
461 			dma_fence_signal_locked(&fence->base);
462 			INIT_LIST_HEAD(&action_list);
463 			list_splice_init(&fence->seq_passed_actions,
464 					 &action_list);
465 			vmw_fences_perform_actions(fman, &action_list);
466 		} else
467 			break;
468 	}
469 
470 	/*
471 	 * Rerun if the fence goal seqno was updated, and the
472 	 * hardware might have raced with that update, so that
473 	 * we missed a fence_goal irq.
474 	 */
475 
476 	needs_rerun = vmw_fence_goal_new_locked(fman, seqno);
477 	if (unlikely(needs_rerun)) {
478 		new_seqno = vmw_fence_read(fman->dev_priv);
479 		if (new_seqno != seqno) {
480 			seqno = new_seqno;
481 			goto rerun;
482 		}
483 	}
484 
485 	if (!list_empty(&fman->cleanup_list))
486 		(void) schedule_work(&fman->work);
487 }
488 
489 void vmw_fences_update(struct vmw_fence_manager *fman)
490 {
491 	spin_lock(&fman->lock);
492 	__vmw_fences_update(fman);
493 	spin_unlock(&fman->lock);
494 }
495 
496 bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence)
497 {
498 	struct vmw_fence_manager *fman = fman_from_fence(fence);
499 
500 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
501 		return true;
502 
503 	vmw_fences_update(fman);
504 
505 	return dma_fence_is_signaled(&fence->base);
506 }
507 
508 int vmw_fence_obj_wait(struct vmw_fence_obj *fence, bool lazy,
509 		       bool interruptible, unsigned long timeout)
510 {
511 	long ret = dma_fence_wait_timeout(&fence->base, interruptible, timeout);
512 
513 	if (likely(ret > 0))
514 		return 0;
515 	else if (ret == 0)
516 		return -EBUSY;
517 	else
518 		return ret;
519 }
520 
521 static void vmw_fence_destroy(struct vmw_fence_obj *fence)
522 {
523 	dma_fence_free(&fence->base);
524 }
525 
526 int vmw_fence_create(struct vmw_fence_manager *fman,
527 		     uint32_t seqno,
528 		     struct vmw_fence_obj **p_fence)
529 {
530 	struct vmw_fence_obj *fence;
531  	int ret;
532 
533 	fence = kzalloc(sizeof(*fence), GFP_KERNEL);
534 	if (unlikely(!fence))
535 		return -ENOMEM;
536 
537 	ret = vmw_fence_obj_init(fman, fence, seqno,
538 				 vmw_fence_destroy);
539 	if (unlikely(ret != 0))
540 		goto out_err_init;
541 
542 	*p_fence = fence;
543 	return 0;
544 
545 out_err_init:
546 	kfree(fence);
547 	return ret;
548 }
549 
550 
551 static void vmw_user_fence_destroy(struct vmw_fence_obj *fence)
552 {
553 	struct vmw_user_fence *ufence =
554 		container_of(fence, struct vmw_user_fence, fence);
555 
556 	ttm_base_object_kfree(ufence, base);
557 }
558 
559 static void vmw_user_fence_base_release(struct ttm_base_object **p_base)
560 {
561 	struct ttm_base_object *base = *p_base;
562 	struct vmw_user_fence *ufence =
563 		container_of(base, struct vmw_user_fence, base);
564 	struct vmw_fence_obj *fence = &ufence->fence;
565 
566 	*p_base = NULL;
567 	vmw_fence_obj_unreference(&fence);
568 }
569 
570 int vmw_user_fence_create(struct drm_file *file_priv,
571 			  struct vmw_fence_manager *fman,
572 			  uint32_t seqno,
573 			  struct vmw_fence_obj **p_fence,
574 			  uint32_t *p_handle)
575 {
576 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
577 	struct vmw_user_fence *ufence;
578 	struct vmw_fence_obj *tmp;
579 	int ret;
580 
581 	ufence = kzalloc(sizeof(*ufence), GFP_KERNEL);
582 	if (unlikely(!ufence)) {
583 		ret = -ENOMEM;
584 		goto out_no_object;
585 	}
586 
587 	ret = vmw_fence_obj_init(fman, &ufence->fence, seqno,
588 				 vmw_user_fence_destroy);
589 	if (unlikely(ret != 0)) {
590 		kfree(ufence);
591 		goto out_no_object;
592 	}
593 
594 	/*
595 	 * The base object holds a reference which is freed in
596 	 * vmw_user_fence_base_release.
597 	 */
598 	tmp = vmw_fence_obj_reference(&ufence->fence);
599 
600 	ret = ttm_base_object_init(tfile, &ufence->base, false,
601 				   VMW_RES_FENCE,
602 				   &vmw_user_fence_base_release);
603 
604 
605 	if (unlikely(ret != 0)) {
606 		/*
607 		 * Free the base object's reference
608 		 */
609 		vmw_fence_obj_unreference(&tmp);
610 		goto out_err;
611 	}
612 
613 	*p_fence = &ufence->fence;
614 	*p_handle = ufence->base.handle;
615 
616 	return 0;
617 out_err:
618 	tmp = &ufence->fence;
619 	vmw_fence_obj_unreference(&tmp);
620 out_no_object:
621 	return ret;
622 }
623 
624 
625 /**
626  * vmw_wait_dma_fence - Wait for a dma fence
627  *
628  * @fman: pointer to a fence manager
629  * @fence: DMA fence to wait on
630  *
631  * This function handles the case when the fence is actually a fence
632  * array.  If that's the case, it'll wait on each of the child fence
633  */
634 int vmw_wait_dma_fence(struct vmw_fence_manager *fman,
635 		       struct dma_fence *fence)
636 {
637 	struct dma_fence_array *fence_array;
638 	int ret = 0;
639 	int i;
640 
641 
642 	if (dma_fence_is_signaled(fence))
643 		return 0;
644 
645 	if (!dma_fence_is_array(fence))
646 		return dma_fence_wait(fence, true);
647 
648 	/* From i915: Note that if the fence-array was created in
649 	 * signal-on-any mode, we should *not* decompose it into its individual
650 	 * fences. However, we don't currently store which mode the fence-array
651 	 * is operating in. Fortunately, the only user of signal-on-any is
652 	 * private to amdgpu and we should not see any incoming fence-array
653 	 * from sync-file being in signal-on-any mode.
654 	 */
655 
656 	fence_array = to_dma_fence_array(fence);
657 	for (i = 0; i < fence_array->num_fences; i++) {
658 		struct dma_fence *child = fence_array->fences[i];
659 
660 		ret = dma_fence_wait(child, true);
661 
662 		if (ret < 0)
663 			return ret;
664 	}
665 
666 	return 0;
667 }
668 
669 
670 /*
671  * vmw_fence_fifo_down - signal all unsignaled fence objects.
672  */
673 
674 void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
675 {
676 	struct list_head action_list;
677 	int ret;
678 
679 	/*
680 	 * The list may be altered while we traverse it, so always
681 	 * restart when we've released the fman->lock.
682 	 */
683 
684 	spin_lock(&fman->lock);
685 	fman->fifo_down = true;
686 	while (!list_empty(&fman->fence_list)) {
687 		struct vmw_fence_obj *fence =
688 			list_entry(fman->fence_list.prev, struct vmw_fence_obj,
689 				   head);
690 		dma_fence_get(&fence->base);
691 		spin_unlock(&fman->lock);
692 
693 		ret = vmw_fence_obj_wait(fence, false, false,
694 					 VMW_FENCE_WAIT_TIMEOUT);
695 
696 		if (unlikely(ret != 0)) {
697 			list_del_init(&fence->head);
698 			dma_fence_signal(&fence->base);
699 			INIT_LIST_HEAD(&action_list);
700 			list_splice_init(&fence->seq_passed_actions,
701 					 &action_list);
702 			vmw_fences_perform_actions(fman, &action_list);
703 		}
704 
705 		BUG_ON(!list_empty(&fence->head));
706 		dma_fence_put(&fence->base);
707 		spin_lock(&fman->lock);
708 	}
709 	spin_unlock(&fman->lock);
710 }
711 
712 void vmw_fence_fifo_up(struct vmw_fence_manager *fman)
713 {
714 	spin_lock(&fman->lock);
715 	fman->fifo_down = false;
716 	spin_unlock(&fman->lock);
717 }
718 
719 
720 /**
721  * vmw_fence_obj_lookup - Look up a user-space fence object
722  *
723  * @tfile: A struct ttm_object_file identifying the caller.
724  * @handle: A handle identifying the fence object.
725  * @return: A struct vmw_user_fence base ttm object on success or
726  * an error pointer on failure.
727  *
728  * The fence object is looked up and type-checked. The caller needs
729  * to have opened the fence object first, but since that happens on
730  * creation and fence objects aren't shareable, that's not an
731  * issue currently.
732  */
733 static struct ttm_base_object *
734 vmw_fence_obj_lookup(struct ttm_object_file *tfile, u32 handle)
735 {
736 	struct ttm_base_object *base = ttm_base_object_lookup(tfile, handle);
737 
738 	if (!base) {
739 		pr_err("Invalid fence object handle 0x%08lx.\n",
740 		       (unsigned long)handle);
741 		return ERR_PTR(-EINVAL);
742 	}
743 
744 	if (base->refcount_release != vmw_user_fence_base_release) {
745 		pr_err("Invalid fence object handle 0x%08lx.\n",
746 		       (unsigned long)handle);
747 		ttm_base_object_unref(&base);
748 		return ERR_PTR(-EINVAL);
749 	}
750 
751 	return base;
752 }
753 
754 
755 int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,
756 			     struct drm_file *file_priv)
757 {
758 	struct drm_vmw_fence_wait_arg *arg =
759 	    (struct drm_vmw_fence_wait_arg *)data;
760 	unsigned long timeout;
761 	struct ttm_base_object *base;
762 	struct vmw_fence_obj *fence;
763 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
764 	int ret;
765 	uint64_t wait_timeout = ((uint64_t)arg->timeout_us * HZ);
766 
767 	/*
768 	 * 64-bit division not present on 32-bit systems, so do an
769 	 * approximation. (Divide by 1000000).
770 	 */
771 
772 	wait_timeout = (wait_timeout >> 20) + (wait_timeout >> 24) -
773 	  (wait_timeout >> 26);
774 
775 	if (!arg->cookie_valid) {
776 		arg->cookie_valid = 1;
777 		arg->kernel_cookie = jiffies + wait_timeout;
778 	}
779 
780 	base = vmw_fence_obj_lookup(tfile, arg->handle);
781 	if (IS_ERR(base))
782 		return PTR_ERR(base);
783 
784 	fence = &(container_of(base, struct vmw_user_fence, base)->fence);
785 
786 	timeout = jiffies;
787 	if (time_after_eq(timeout, (unsigned long)arg->kernel_cookie)) {
788 		ret = ((vmw_fence_obj_signaled(fence)) ?
789 		       0 : -EBUSY);
790 		goto out;
791 	}
792 
793 	timeout = (unsigned long)arg->kernel_cookie - timeout;
794 
795 	ret = vmw_fence_obj_wait(fence, arg->lazy, true, timeout);
796 
797 out:
798 	ttm_base_object_unref(&base);
799 
800 	/*
801 	 * Optionally unref the fence object.
802 	 */
803 
804 	if (ret == 0 && (arg->wait_options & DRM_VMW_WAIT_OPTION_UNREF))
805 		return ttm_ref_object_base_unref(tfile, arg->handle);
806 	return ret;
807 }
808 
809 int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data,
810 				 struct drm_file *file_priv)
811 {
812 	struct drm_vmw_fence_signaled_arg *arg =
813 		(struct drm_vmw_fence_signaled_arg *) data;
814 	struct ttm_base_object *base;
815 	struct vmw_fence_obj *fence;
816 	struct vmw_fence_manager *fman;
817 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
818 	struct vmw_private *dev_priv = vmw_priv(dev);
819 
820 	base = vmw_fence_obj_lookup(tfile, arg->handle);
821 	if (IS_ERR(base))
822 		return PTR_ERR(base);
823 
824 	fence = &(container_of(base, struct vmw_user_fence, base)->fence);
825 	fman = fman_from_fence(fence);
826 
827 	arg->signaled = vmw_fence_obj_signaled(fence);
828 
829 	arg->signaled_flags = arg->flags;
830 	spin_lock(&fman->lock);
831 	arg->passed_seqno = dev_priv->last_read_seqno;
832 	spin_unlock(&fman->lock);
833 
834 	ttm_base_object_unref(&base);
835 
836 	return 0;
837 }
838 
839 
840 int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data,
841 			      struct drm_file *file_priv)
842 {
843 	struct drm_vmw_fence_arg *arg =
844 		(struct drm_vmw_fence_arg *) data;
845 
846 	return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
847 					 arg->handle);
848 }
849 
850 /**
851  * vmw_event_fence_action_seq_passed
852  *
853  * @action: The struct vmw_fence_action embedded in a struct
854  * vmw_event_fence_action.
855  *
856  * This function is called when the seqno of the fence where @action is
857  * attached has passed. It queues the event on the submitter's event list.
858  * This function is always called from atomic context.
859  */
860 static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action)
861 {
862 	struct vmw_event_fence_action *eaction =
863 		container_of(action, struct vmw_event_fence_action, action);
864 	struct drm_device *dev = eaction->dev;
865 	struct drm_pending_event *event = eaction->event;
866 
867 	if (unlikely(event == NULL))
868 		return;
869 
870 	spin_lock_irq(&dev->event_lock);
871 
872 	if (likely(eaction->tv_sec != NULL)) {
873 		struct timespec64 ts;
874 
875 		ktime_get_ts64(&ts);
876 		/* monotonic time, so no y2038 overflow */
877 		*eaction->tv_sec = ts.tv_sec;
878 		*eaction->tv_usec = ts.tv_nsec / NSEC_PER_USEC;
879 	}
880 
881 	drm_send_event_locked(dev, eaction->event);
882 	eaction->event = NULL;
883 	spin_unlock_irq(&dev->event_lock);
884 }
885 
886 /**
887  * vmw_event_fence_action_cleanup
888  *
889  * @action: The struct vmw_fence_action embedded in a struct
890  * vmw_event_fence_action.
891  *
892  * This function is the struct vmw_fence_action destructor. It's typically
893  * called from a workqueue.
894  */
895 static void vmw_event_fence_action_cleanup(struct vmw_fence_action *action)
896 {
897 	struct vmw_event_fence_action *eaction =
898 		container_of(action, struct vmw_event_fence_action, action);
899 
900 	vmw_fence_obj_unreference(&eaction->fence);
901 	kfree(eaction);
902 }
903 
904 
905 /**
906  * vmw_fence_obj_add_action - Add an action to a fence object.
907  *
908  * @fence: The fence object.
909  * @action: The action to add.
910  *
911  * Note that the action callbacks may be executed before this function
912  * returns.
913  */
914 static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence,
915 			      struct vmw_fence_action *action)
916 {
917 	struct vmw_fence_manager *fman = fman_from_fence(fence);
918 	bool run_update = false;
919 
920 	mutex_lock(&fman->goal_irq_mutex);
921 	spin_lock(&fman->lock);
922 
923 	fman->pending_actions[action->type]++;
924 	if (dma_fence_is_signaled_locked(&fence->base)) {
925 		struct list_head action_list;
926 
927 		INIT_LIST_HEAD(&action_list);
928 		list_add_tail(&action->head, &action_list);
929 		vmw_fences_perform_actions(fman, &action_list);
930 	} else {
931 		list_add_tail(&action->head, &fence->seq_passed_actions);
932 
933 		/*
934 		 * This function may set fman::seqno_valid, so it must
935 		 * be run with the goal_irq_mutex held.
936 		 */
937 		run_update = vmw_fence_goal_check_locked(fence);
938 	}
939 
940 	spin_unlock(&fman->lock);
941 
942 	if (run_update) {
943 		if (!fman->goal_irq_on) {
944 			fman->goal_irq_on = true;
945 			vmw_goal_waiter_add(fman->dev_priv);
946 		}
947 		vmw_fences_update(fman);
948 	}
949 	mutex_unlock(&fman->goal_irq_mutex);
950 
951 }
952 
953 /**
954  * vmw_event_fence_action_queue - Post an event for sending when a fence
955  * object seqno has passed.
956  *
957  * @file_priv: The file connection on which the event should be posted.
958  * @fence: The fence object on which to post the event.
959  * @event: Event to be posted. This event should've been alloced
960  * using k[mz]alloc, and should've been completely initialized.
961  * @tv_sec: If non-null, the variable pointed to will be assigned
962  * current time tv_sec val when the fence signals.
963  * @tv_usec: Must be set if @tv_sec is set, and the variable pointed to will
964  * be assigned the current time tv_usec val when the fence signals.
965  * @interruptible: Interruptible waits if possible.
966  *
967  * As a side effect, the object pointed to by @event may have been
968  * freed when this function returns. If this function returns with
969  * an error code, the caller needs to free that object.
970  */
971 
972 int vmw_event_fence_action_queue(struct drm_file *file_priv,
973 				 struct vmw_fence_obj *fence,
974 				 struct drm_pending_event *event,
975 				 uint32_t *tv_sec,
976 				 uint32_t *tv_usec,
977 				 bool interruptible)
978 {
979 	struct vmw_event_fence_action *eaction;
980 	struct vmw_fence_manager *fman = fman_from_fence(fence);
981 
982 	eaction = kzalloc(sizeof(*eaction), GFP_KERNEL);
983 	if (unlikely(!eaction))
984 		return -ENOMEM;
985 
986 	eaction->event = event;
987 
988 	eaction->action.seq_passed = vmw_event_fence_action_seq_passed;
989 	eaction->action.cleanup = vmw_event_fence_action_cleanup;
990 	eaction->action.type = VMW_ACTION_EVENT;
991 
992 	eaction->fence = vmw_fence_obj_reference(fence);
993 	eaction->dev = &fman->dev_priv->drm;
994 	eaction->tv_sec = tv_sec;
995 	eaction->tv_usec = tv_usec;
996 
997 	vmw_fence_obj_add_action(fence, &eaction->action);
998 
999 	return 0;
1000 }
1001 
1002 struct vmw_event_fence_pending {
1003 	struct drm_pending_event base;
1004 	struct drm_vmw_event_fence event;
1005 };
1006 
1007 static int vmw_event_fence_action_create(struct drm_file *file_priv,
1008 				  struct vmw_fence_obj *fence,
1009 				  uint32_t flags,
1010 				  uint64_t user_data,
1011 				  bool interruptible)
1012 {
1013 	struct vmw_event_fence_pending *event;
1014 	struct vmw_fence_manager *fman = fman_from_fence(fence);
1015 	struct drm_device *dev = &fman->dev_priv->drm;
1016 	int ret;
1017 
1018 	event = kzalloc(sizeof(*event), GFP_KERNEL);
1019 	if (unlikely(!event)) {
1020 		DRM_ERROR("Failed to allocate an event.\n");
1021 		ret = -ENOMEM;
1022 		goto out_no_space;
1023 	}
1024 
1025 	event->event.base.type = DRM_VMW_EVENT_FENCE_SIGNALED;
1026 	event->event.base.length = sizeof(*event);
1027 	event->event.user_data = user_data;
1028 
1029 	ret = drm_event_reserve_init(dev, file_priv, &event->base, &event->event.base);
1030 
1031 	if (unlikely(ret != 0)) {
1032 		DRM_ERROR("Failed to allocate event space for this file.\n");
1033 		kfree(event);
1034 		goto out_no_space;
1035 	}
1036 
1037 	if (flags & DRM_VMW_FE_FLAG_REQ_TIME)
1038 		ret = vmw_event_fence_action_queue(file_priv, fence,
1039 						   &event->base,
1040 						   &event->event.tv_sec,
1041 						   &event->event.tv_usec,
1042 						   interruptible);
1043 	else
1044 		ret = vmw_event_fence_action_queue(file_priv, fence,
1045 						   &event->base,
1046 						   NULL,
1047 						   NULL,
1048 						   interruptible);
1049 	if (ret != 0)
1050 		goto out_no_queue;
1051 
1052 	return 0;
1053 
1054 out_no_queue:
1055 	drm_event_cancel_free(dev, &event->base);
1056 out_no_space:
1057 	return ret;
1058 }
1059 
1060 int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
1061 			  struct drm_file *file_priv)
1062 {
1063 	struct vmw_private *dev_priv = vmw_priv(dev);
1064 	struct drm_vmw_fence_event_arg *arg =
1065 		(struct drm_vmw_fence_event_arg *) data;
1066 	struct vmw_fence_obj *fence = NULL;
1067 	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1068 	struct ttm_object_file *tfile = vmw_fp->tfile;
1069 	struct drm_vmw_fence_rep __user *user_fence_rep =
1070 		(struct drm_vmw_fence_rep __user *)(unsigned long)
1071 		arg->fence_rep;
1072 	uint32_t handle;
1073 	int ret;
1074 
1075 	/*
1076 	 * Look up an existing fence object,
1077 	 * and if user-space wants a new reference,
1078 	 * add one.
1079 	 */
1080 	if (arg->handle) {
1081 		struct ttm_base_object *base =
1082 			vmw_fence_obj_lookup(tfile, arg->handle);
1083 
1084 		if (IS_ERR(base))
1085 			return PTR_ERR(base);
1086 
1087 		fence = &(container_of(base, struct vmw_user_fence,
1088 				       base)->fence);
1089 		(void) vmw_fence_obj_reference(fence);
1090 
1091 		if (user_fence_rep != NULL) {
1092 			ret = ttm_ref_object_add(vmw_fp->tfile, base,
1093 						 NULL, false);
1094 			if (unlikely(ret != 0)) {
1095 				DRM_ERROR("Failed to reference a fence "
1096 					  "object.\n");
1097 				goto out_no_ref_obj;
1098 			}
1099 			handle = base->handle;
1100 		}
1101 		ttm_base_object_unref(&base);
1102 	}
1103 
1104 	/*
1105 	 * Create a new fence object.
1106 	 */
1107 	if (!fence) {
1108 		ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
1109 						 &fence,
1110 						 (user_fence_rep) ?
1111 						 &handle : NULL);
1112 		if (unlikely(ret != 0)) {
1113 			DRM_ERROR("Fence event failed to create fence.\n");
1114 			return ret;
1115 		}
1116 	}
1117 
1118 	BUG_ON(fence == NULL);
1119 
1120 	ret = vmw_event_fence_action_create(file_priv, fence,
1121 					    arg->flags,
1122 					    arg->user_data,
1123 					    true);
1124 	if (unlikely(ret != 0)) {
1125 		if (ret != -ERESTARTSYS)
1126 			DRM_ERROR("Failed to attach event to fence.\n");
1127 		goto out_no_create;
1128 	}
1129 
1130 	vmw_execbuf_copy_fence_user(dev_priv, vmw_fp, 0, user_fence_rep, fence,
1131 				    handle, -1, NULL);
1132 	vmw_fence_obj_unreference(&fence);
1133 	return 0;
1134 out_no_create:
1135 	if (user_fence_rep != NULL)
1136 		ttm_ref_object_base_unref(tfile, handle);
1137 out_no_ref_obj:
1138 	vmw_fence_obj_unreference(&fence);
1139 	return ret;
1140 }
1141