1 /**************************************************************************
2  *
3  * Copyright © 2011-2014 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 
28 #include <drm/drmP.h>
29 #include "vmwgfx_drv.h"
30 
31 #define VMW_FENCE_WRAP (1 << 31)
32 
33 struct vmw_fence_manager {
34 	int num_fence_objects;
35 	struct vmw_private *dev_priv;
36 	spinlock_t lock;
37 	struct list_head fence_list;
38 	struct work_struct work;
39 	u32 user_fence_size;
40 	u32 fence_size;
41 	u32 event_fence_action_size;
42 	bool fifo_down;
43 	struct list_head cleanup_list;
44 	uint32_t pending_actions[VMW_ACTION_MAX];
45 	struct mutex goal_irq_mutex;
46 	bool goal_irq_on; /* Protected by @goal_irq_mutex */
47 	bool seqno_valid; /* Protected by @lock, and may not be set to true
48 			     without the @goal_irq_mutex held. */
49 	u64 ctx;
50 };
51 
52 struct vmw_user_fence {
53 	struct ttm_base_object base;
54 	struct vmw_fence_obj fence;
55 };
56 
57 /**
58  * struct vmw_event_fence_action - fence action that delivers a drm event.
59  *
60  * @e: A struct drm_pending_event that controls the event delivery.
61  * @action: A struct vmw_fence_action to hook up to a fence.
62  * @fence: A referenced pointer to the fence to keep it alive while @action
63  * hangs on it.
64  * @dev: Pointer to a struct drm_device so we can access the event stuff.
65  * @kref: Both @e and @action has destructors, so we need to refcount.
66  * @size: Size accounted for this object.
67  * @tv_sec: If non-null, the variable pointed to will be assigned
68  * current time tv_sec val when the fence signals.
69  * @tv_usec: Must be set if @tv_sec is set, and the variable pointed to will
70  * be assigned the current time tv_usec val when the fence signals.
71  */
72 struct vmw_event_fence_action {
73 	struct vmw_fence_action action;
74 
75 	struct drm_pending_event *event;
76 	struct vmw_fence_obj *fence;
77 	struct drm_device *dev;
78 
79 	uint32_t *tv_sec;
80 	uint32_t *tv_usec;
81 };
82 
83 static struct vmw_fence_manager *
84 fman_from_fence(struct vmw_fence_obj *fence)
85 {
86 	return container_of(fence->base.lock, struct vmw_fence_manager, lock);
87 }
88 
89 /**
90  * Note on fencing subsystem usage of irqs:
91  * Typically the vmw_fences_update function is called
92  *
93  * a) When a new fence seqno has been submitted by the fifo code.
94  * b) On-demand when we have waiters. Sleeping waiters will switch on the
95  * ANY_FENCE irq and call vmw_fences_update function each time an ANY_FENCE
96  * irq is received. When the last fence waiter is gone, that IRQ is masked
97  * away.
98  *
99  * In situations where there are no waiters and we don't submit any new fences,
100  * fence objects may not be signaled. This is perfectly OK, since there are
101  * no consumers of the signaled data, but that is NOT ok when there are fence
102  * actions attached to a fence. The fencing subsystem then makes use of the
103  * FENCE_GOAL irq and sets the fence goal seqno to that of the next fence
104  * which has an action attached, and each time vmw_fences_update is called,
105  * the subsystem makes sure the fence goal seqno is updated.
106  *
107  * The fence goal seqno irq is on as long as there are unsignaled fence
108  * objects with actions attached to them.
109  */
110 
111 static void vmw_fence_obj_destroy(struct dma_fence *f)
112 {
113 	struct vmw_fence_obj *fence =
114 		container_of(f, struct vmw_fence_obj, base);
115 
116 	struct vmw_fence_manager *fman = fman_from_fence(fence);
117 
118 	spin_lock(&fman->lock);
119 	list_del_init(&fence->head);
120 	--fman->num_fence_objects;
121 	spin_unlock(&fman->lock);
122 	fence->destroy(fence);
123 }
124 
125 static const char *vmw_fence_get_driver_name(struct dma_fence *f)
126 {
127 	return "vmwgfx";
128 }
129 
130 static const char *vmw_fence_get_timeline_name(struct dma_fence *f)
131 {
132 	return "svga";
133 }
134 
135 static bool vmw_fence_enable_signaling(struct dma_fence *f)
136 {
137 	struct vmw_fence_obj *fence =
138 		container_of(f, struct vmw_fence_obj, base);
139 
140 	struct vmw_fence_manager *fman = fman_from_fence(fence);
141 	struct vmw_private *dev_priv = fman->dev_priv;
142 
143 	u32 *fifo_mem = dev_priv->mmio_virt;
144 	u32 seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
145 	if (seqno - fence->base.seqno < VMW_FENCE_WRAP)
146 		return false;
147 
148 	vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
149 
150 	return true;
151 }
152 
153 struct vmwgfx_wait_cb {
154 	struct dma_fence_cb base;
155 	struct task_struct *task;
156 };
157 
158 static void
159 vmwgfx_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
160 {
161 	struct vmwgfx_wait_cb *wait =
162 		container_of(cb, struct vmwgfx_wait_cb, base);
163 
164 	wake_up_process(wait->task);
165 }
166 
167 static void __vmw_fences_update(struct vmw_fence_manager *fman);
168 
169 static long vmw_fence_wait(struct dma_fence *f, bool intr, signed long timeout)
170 {
171 	struct vmw_fence_obj *fence =
172 		container_of(f, struct vmw_fence_obj, base);
173 
174 	struct vmw_fence_manager *fman = fman_from_fence(fence);
175 	struct vmw_private *dev_priv = fman->dev_priv;
176 	struct vmwgfx_wait_cb cb;
177 	long ret = timeout;
178 	unsigned long irq_flags;
179 
180 	if (likely(vmw_fence_obj_signaled(fence)))
181 		return timeout;
182 
183 	vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
184 	vmw_seqno_waiter_add(dev_priv);
185 
186 	spin_lock_irqsave(f->lock, irq_flags);
187 
188 	if (intr && signal_pending(current)) {
189 		ret = -ERESTARTSYS;
190 		goto out;
191 	}
192 
193 	cb.base.func = vmwgfx_wait_cb;
194 	cb.task = current;
195 	list_add(&cb.base.node, &f->cb_list);
196 
197 	while (ret > 0) {
198 		__vmw_fences_update(fman);
199 		if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags))
200 			break;
201 
202 		if (intr)
203 			__set_current_state(TASK_INTERRUPTIBLE);
204 		else
205 			__set_current_state(TASK_UNINTERRUPTIBLE);
206 		spin_unlock_irqrestore(f->lock, irq_flags);
207 
208 		ret = schedule_timeout(ret);
209 
210 		spin_lock_irqsave(f->lock, irq_flags);
211 		if (ret > 0 && intr && signal_pending(current))
212 			ret = -ERESTARTSYS;
213 	}
214 
215 	if (!list_empty(&cb.base.node))
216 		list_del(&cb.base.node);
217 	__set_current_state(TASK_RUNNING);
218 
219 out:
220 	spin_unlock_irqrestore(f->lock, irq_flags);
221 
222 	vmw_seqno_waiter_remove(dev_priv);
223 
224 	return ret;
225 }
226 
227 static const struct dma_fence_ops vmw_fence_ops = {
228 	.get_driver_name = vmw_fence_get_driver_name,
229 	.get_timeline_name = vmw_fence_get_timeline_name,
230 	.enable_signaling = vmw_fence_enable_signaling,
231 	.wait = vmw_fence_wait,
232 	.release = vmw_fence_obj_destroy,
233 };
234 
235 
236 /**
237  * Execute signal actions on fences recently signaled.
238  * This is done from a workqueue so we don't have to execute
239  * signal actions from atomic context.
240  */
241 
242 static void vmw_fence_work_func(struct work_struct *work)
243 {
244 	struct vmw_fence_manager *fman =
245 		container_of(work, struct vmw_fence_manager, work);
246 	struct list_head list;
247 	struct vmw_fence_action *action, *next_action;
248 	bool seqno_valid;
249 
250 	do {
251 		INIT_LIST_HEAD(&list);
252 		mutex_lock(&fman->goal_irq_mutex);
253 
254 		spin_lock(&fman->lock);
255 		list_splice_init(&fman->cleanup_list, &list);
256 		seqno_valid = fman->seqno_valid;
257 		spin_unlock(&fman->lock);
258 
259 		if (!seqno_valid && fman->goal_irq_on) {
260 			fman->goal_irq_on = false;
261 			vmw_goal_waiter_remove(fman->dev_priv);
262 		}
263 		mutex_unlock(&fman->goal_irq_mutex);
264 
265 		if (list_empty(&list))
266 			return;
267 
268 		/*
269 		 * At this point, only we should be able to manipulate the
270 		 * list heads of the actions we have on the private list.
271 		 * hence fman::lock not held.
272 		 */
273 
274 		list_for_each_entry_safe(action, next_action, &list, head) {
275 			list_del_init(&action->head);
276 			if (action->cleanup)
277 				action->cleanup(action);
278 		}
279 	} while (1);
280 }
281 
282 struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
283 {
284 	struct vmw_fence_manager *fman = kzalloc(sizeof(*fman), GFP_KERNEL);
285 
286 	if (unlikely(!fman))
287 		return NULL;
288 
289 	fman->dev_priv = dev_priv;
290 	spin_lock_init(&fman->lock);
291 	INIT_LIST_HEAD(&fman->fence_list);
292 	INIT_LIST_HEAD(&fman->cleanup_list);
293 	INIT_WORK(&fman->work, &vmw_fence_work_func);
294 	fman->fifo_down = true;
295 	fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence));
296 	fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj));
297 	fman->event_fence_action_size =
298 		ttm_round_pot(sizeof(struct vmw_event_fence_action));
299 	mutex_init(&fman->goal_irq_mutex);
300 	fman->ctx = dma_fence_context_alloc(1);
301 
302 	return fman;
303 }
304 
305 void vmw_fence_manager_takedown(struct vmw_fence_manager *fman)
306 {
307 	bool lists_empty;
308 
309 	(void) cancel_work_sync(&fman->work);
310 
311 	spin_lock(&fman->lock);
312 	lists_empty = list_empty(&fman->fence_list) &&
313 		list_empty(&fman->cleanup_list);
314 	spin_unlock(&fman->lock);
315 
316 	BUG_ON(!lists_empty);
317 	kfree(fman);
318 }
319 
320 static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
321 			      struct vmw_fence_obj *fence, u32 seqno,
322 			      void (*destroy) (struct vmw_fence_obj *fence))
323 {
324 	int ret = 0;
325 
326 	dma_fence_init(&fence->base, &vmw_fence_ops, &fman->lock,
327 		       fman->ctx, seqno);
328 	INIT_LIST_HEAD(&fence->seq_passed_actions);
329 	fence->destroy = destroy;
330 
331 	spin_lock(&fman->lock);
332 	if (unlikely(fman->fifo_down)) {
333 		ret = -EBUSY;
334 		goto out_unlock;
335 	}
336 	list_add_tail(&fence->head, &fman->fence_list);
337 	++fman->num_fence_objects;
338 
339 out_unlock:
340 	spin_unlock(&fman->lock);
341 	return ret;
342 
343 }
344 
345 static void vmw_fences_perform_actions(struct vmw_fence_manager *fman,
346 				struct list_head *list)
347 {
348 	struct vmw_fence_action *action, *next_action;
349 
350 	list_for_each_entry_safe(action, next_action, list, head) {
351 		list_del_init(&action->head);
352 		fman->pending_actions[action->type]--;
353 		if (action->seq_passed != NULL)
354 			action->seq_passed(action);
355 
356 		/*
357 		 * Add the cleanup action to the cleanup list so that
358 		 * it will be performed by a worker task.
359 		 */
360 
361 		list_add_tail(&action->head, &fman->cleanup_list);
362 	}
363 }
364 
365 /**
366  * vmw_fence_goal_new_locked - Figure out a new device fence goal
367  * seqno if needed.
368  *
369  * @fman: Pointer to a fence manager.
370  * @passed_seqno: The seqno the device currently signals as passed.
371  *
372  * This function should be called with the fence manager lock held.
373  * It is typically called when we have a new passed_seqno, and
374  * we might need to update the fence goal. It checks to see whether
375  * the current fence goal has already passed, and, in that case,
376  * scans through all unsignaled fences to get the next fence object with an
377  * action attached, and sets the seqno of that fence as a new fence goal.
378  *
379  * returns true if the device goal seqno was updated. False otherwise.
380  */
381 static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
382 				      u32 passed_seqno)
383 {
384 	u32 goal_seqno;
385 	u32 *fifo_mem;
386 	struct vmw_fence_obj *fence;
387 
388 	if (likely(!fman->seqno_valid))
389 		return false;
390 
391 	fifo_mem = fman->dev_priv->mmio_virt;
392 	goal_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE_GOAL);
393 	if (likely(passed_seqno - goal_seqno >= VMW_FENCE_WRAP))
394 		return false;
395 
396 	fman->seqno_valid = false;
397 	list_for_each_entry(fence, &fman->fence_list, head) {
398 		if (!list_empty(&fence->seq_passed_actions)) {
399 			fman->seqno_valid = true;
400 			vmw_mmio_write(fence->base.seqno,
401 				       fifo_mem + SVGA_FIFO_FENCE_GOAL);
402 			break;
403 		}
404 	}
405 
406 	return true;
407 }
408 
409 
410 /**
411  * vmw_fence_goal_check_locked - Replace the device fence goal seqno if
412  * needed.
413  *
414  * @fence: Pointer to a struct vmw_fence_obj the seqno of which should be
415  * considered as a device fence goal.
416  *
417  * This function should be called with the fence manager lock held.
418  * It is typically called when an action has been attached to a fence to
419  * check whether the seqno of that fence should be used for a fence
420  * goal interrupt. This is typically needed if the current fence goal is
421  * invalid, or has a higher seqno than that of the current fence object.
422  *
423  * returns true if the device goal seqno was updated. False otherwise.
424  */
425 static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence)
426 {
427 	struct vmw_fence_manager *fman = fman_from_fence(fence);
428 	u32 goal_seqno;
429 	u32 *fifo_mem;
430 
431 	if (dma_fence_is_signaled_locked(&fence->base))
432 		return false;
433 
434 	fifo_mem = fman->dev_priv->mmio_virt;
435 	goal_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE_GOAL);
436 	if (likely(fman->seqno_valid &&
437 		   goal_seqno - fence->base.seqno < VMW_FENCE_WRAP))
438 		return false;
439 
440 	vmw_mmio_write(fence->base.seqno, fifo_mem + SVGA_FIFO_FENCE_GOAL);
441 	fman->seqno_valid = true;
442 
443 	return true;
444 }
445 
446 static void __vmw_fences_update(struct vmw_fence_manager *fman)
447 {
448 	struct vmw_fence_obj *fence, *next_fence;
449 	struct list_head action_list;
450 	bool needs_rerun;
451 	uint32_t seqno, new_seqno;
452 	u32 *fifo_mem = fman->dev_priv->mmio_virt;
453 
454 	seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
455 rerun:
456 	list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) {
457 		if (seqno - fence->base.seqno < VMW_FENCE_WRAP) {
458 			list_del_init(&fence->head);
459 			dma_fence_signal_locked(&fence->base);
460 			INIT_LIST_HEAD(&action_list);
461 			list_splice_init(&fence->seq_passed_actions,
462 					 &action_list);
463 			vmw_fences_perform_actions(fman, &action_list);
464 		} else
465 			break;
466 	}
467 
468 	/*
469 	 * Rerun if the fence goal seqno was updated, and the
470 	 * hardware might have raced with that update, so that
471 	 * we missed a fence_goal irq.
472 	 */
473 
474 	needs_rerun = vmw_fence_goal_new_locked(fman, seqno);
475 	if (unlikely(needs_rerun)) {
476 		new_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
477 		if (new_seqno != seqno) {
478 			seqno = new_seqno;
479 			goto rerun;
480 		}
481 	}
482 
483 	if (!list_empty(&fman->cleanup_list))
484 		(void) schedule_work(&fman->work);
485 }
486 
487 void vmw_fences_update(struct vmw_fence_manager *fman)
488 {
489 	spin_lock(&fman->lock);
490 	__vmw_fences_update(fman);
491 	spin_unlock(&fman->lock);
492 }
493 
494 bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence)
495 {
496 	struct vmw_fence_manager *fman = fman_from_fence(fence);
497 
498 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
499 		return 1;
500 
501 	vmw_fences_update(fman);
502 
503 	return dma_fence_is_signaled(&fence->base);
504 }
505 
506 int vmw_fence_obj_wait(struct vmw_fence_obj *fence, bool lazy,
507 		       bool interruptible, unsigned long timeout)
508 {
509 	long ret = dma_fence_wait_timeout(&fence->base, interruptible, timeout);
510 
511 	if (likely(ret > 0))
512 		return 0;
513 	else if (ret == 0)
514 		return -EBUSY;
515 	else
516 		return ret;
517 }
518 
519 void vmw_fence_obj_flush(struct vmw_fence_obj *fence)
520 {
521 	struct vmw_private *dev_priv = fman_from_fence(fence)->dev_priv;
522 
523 	vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
524 }
525 
526 static void vmw_fence_destroy(struct vmw_fence_obj *fence)
527 {
528 	dma_fence_free(&fence->base);
529 }
530 
531 int vmw_fence_create(struct vmw_fence_manager *fman,
532 		     uint32_t seqno,
533 		     struct vmw_fence_obj **p_fence)
534 {
535 	struct vmw_fence_obj *fence;
536  	int ret;
537 
538 	fence = kzalloc(sizeof(*fence), GFP_KERNEL);
539 	if (unlikely(!fence))
540 		return -ENOMEM;
541 
542 	ret = vmw_fence_obj_init(fman, fence, seqno,
543 				 vmw_fence_destroy);
544 	if (unlikely(ret != 0))
545 		goto out_err_init;
546 
547 	*p_fence = fence;
548 	return 0;
549 
550 out_err_init:
551 	kfree(fence);
552 	return ret;
553 }
554 
555 
556 static void vmw_user_fence_destroy(struct vmw_fence_obj *fence)
557 {
558 	struct vmw_user_fence *ufence =
559 		container_of(fence, struct vmw_user_fence, fence);
560 	struct vmw_fence_manager *fman = fman_from_fence(fence);
561 
562 	ttm_base_object_kfree(ufence, base);
563 	/*
564 	 * Free kernel space accounting.
565 	 */
566 	ttm_mem_global_free(vmw_mem_glob(fman->dev_priv),
567 			    fman->user_fence_size);
568 }
569 
570 static void vmw_user_fence_base_release(struct ttm_base_object **p_base)
571 {
572 	struct ttm_base_object *base = *p_base;
573 	struct vmw_user_fence *ufence =
574 		container_of(base, struct vmw_user_fence, base);
575 	struct vmw_fence_obj *fence = &ufence->fence;
576 
577 	*p_base = NULL;
578 	vmw_fence_obj_unreference(&fence);
579 }
580 
581 int vmw_user_fence_create(struct drm_file *file_priv,
582 			  struct vmw_fence_manager *fman,
583 			  uint32_t seqno,
584 			  struct vmw_fence_obj **p_fence,
585 			  uint32_t *p_handle)
586 {
587 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
588 	struct vmw_user_fence *ufence;
589 	struct vmw_fence_obj *tmp;
590 	struct ttm_mem_global *mem_glob = vmw_mem_glob(fman->dev_priv);
591 	struct ttm_operation_ctx ctx = {
592 		.interruptible = false,
593 		.no_wait_gpu = false
594 	};
595 	int ret;
596 
597 	/*
598 	 * Kernel memory space accounting, since this object may
599 	 * be created by a user-space request.
600 	 */
601 
602 	ret = ttm_mem_global_alloc(mem_glob, fman->user_fence_size,
603 				   &ctx);
604 	if (unlikely(ret != 0))
605 		return ret;
606 
607 	ufence = kzalloc(sizeof(*ufence), GFP_KERNEL);
608 	if (unlikely(!ufence)) {
609 		ret = -ENOMEM;
610 		goto out_no_object;
611 	}
612 
613 	ret = vmw_fence_obj_init(fman, &ufence->fence, seqno,
614 				 vmw_user_fence_destroy);
615 	if (unlikely(ret != 0)) {
616 		kfree(ufence);
617 		goto out_no_object;
618 	}
619 
620 	/*
621 	 * The base object holds a reference which is freed in
622 	 * vmw_user_fence_base_release.
623 	 */
624 	tmp = vmw_fence_obj_reference(&ufence->fence);
625 	ret = ttm_base_object_init(tfile, &ufence->base, false,
626 				   VMW_RES_FENCE,
627 				   &vmw_user_fence_base_release, NULL);
628 
629 
630 	if (unlikely(ret != 0)) {
631 		/*
632 		 * Free the base object's reference
633 		 */
634 		vmw_fence_obj_unreference(&tmp);
635 		goto out_err;
636 	}
637 
638 	*p_fence = &ufence->fence;
639 	*p_handle = ufence->base.hash.key;
640 
641 	return 0;
642 out_err:
643 	tmp = &ufence->fence;
644 	vmw_fence_obj_unreference(&tmp);
645 out_no_object:
646 	ttm_mem_global_free(mem_glob, fman->user_fence_size);
647 	return ret;
648 }
649 
650 
651 /**
652  * vmw_wait_dma_fence - Wait for a dma fence
653  *
654  * @fman: pointer to a fence manager
655  * @fence: DMA fence to wait on
656  *
657  * This function handles the case when the fence is actually a fence
658  * array.  If that's the case, it'll wait on each of the child fence
659  */
660 int vmw_wait_dma_fence(struct vmw_fence_manager *fman,
661 		       struct dma_fence *fence)
662 {
663 	struct dma_fence_array *fence_array;
664 	int ret = 0;
665 	int i;
666 
667 
668 	if (dma_fence_is_signaled(fence))
669 		return 0;
670 
671 	if (!dma_fence_is_array(fence))
672 		return dma_fence_wait(fence, true);
673 
674 	/* From i915: Note that if the fence-array was created in
675 	 * signal-on-any mode, we should *not* decompose it into its individual
676 	 * fences. However, we don't currently store which mode the fence-array
677 	 * is operating in. Fortunately, the only user of signal-on-any is
678 	 * private to amdgpu and we should not see any incoming fence-array
679 	 * from sync-file being in signal-on-any mode.
680 	 */
681 
682 	fence_array = to_dma_fence_array(fence);
683 	for (i = 0; i < fence_array->num_fences; i++) {
684 		struct dma_fence *child = fence_array->fences[i];
685 
686 		ret = dma_fence_wait(child, true);
687 
688 		if (ret < 0)
689 			return ret;
690 	}
691 
692 	return 0;
693 }
694 
695 
696 /**
697  * vmw_fence_fifo_down - signal all unsignaled fence objects.
698  */
699 
700 void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
701 {
702 	struct list_head action_list;
703 	int ret;
704 
705 	/*
706 	 * The list may be altered while we traverse it, so always
707 	 * restart when we've released the fman->lock.
708 	 */
709 
710 	spin_lock(&fman->lock);
711 	fman->fifo_down = true;
712 	while (!list_empty(&fman->fence_list)) {
713 		struct vmw_fence_obj *fence =
714 			list_entry(fman->fence_list.prev, struct vmw_fence_obj,
715 				   head);
716 		dma_fence_get(&fence->base);
717 		spin_unlock(&fman->lock);
718 
719 		ret = vmw_fence_obj_wait(fence, false, false,
720 					 VMW_FENCE_WAIT_TIMEOUT);
721 
722 		if (unlikely(ret != 0)) {
723 			list_del_init(&fence->head);
724 			dma_fence_signal(&fence->base);
725 			INIT_LIST_HEAD(&action_list);
726 			list_splice_init(&fence->seq_passed_actions,
727 					 &action_list);
728 			vmw_fences_perform_actions(fman, &action_list);
729 		}
730 
731 		BUG_ON(!list_empty(&fence->head));
732 		dma_fence_put(&fence->base);
733 		spin_lock(&fman->lock);
734 	}
735 	spin_unlock(&fman->lock);
736 }
737 
738 void vmw_fence_fifo_up(struct vmw_fence_manager *fman)
739 {
740 	spin_lock(&fman->lock);
741 	fman->fifo_down = false;
742 	spin_unlock(&fman->lock);
743 }
744 
745 
746 /**
747  * vmw_fence_obj_lookup - Look up a user-space fence object
748  *
749  * @tfile: A struct ttm_object_file identifying the caller.
750  * @handle: A handle identifying the fence object.
751  * @return: A struct vmw_user_fence base ttm object on success or
752  * an error pointer on failure.
753  *
754  * The fence object is looked up and type-checked. The caller needs
755  * to have opened the fence object first, but since that happens on
756  * creation and fence objects aren't shareable, that's not an
757  * issue currently.
758  */
759 static struct ttm_base_object *
760 vmw_fence_obj_lookup(struct ttm_object_file *tfile, u32 handle)
761 {
762 	struct ttm_base_object *base = ttm_base_object_lookup(tfile, handle);
763 
764 	if (!base) {
765 		pr_err("Invalid fence object handle 0x%08lx.\n",
766 		       (unsigned long)handle);
767 		return ERR_PTR(-EINVAL);
768 	}
769 
770 	if (base->refcount_release != vmw_user_fence_base_release) {
771 		pr_err("Invalid fence object handle 0x%08lx.\n",
772 		       (unsigned long)handle);
773 		ttm_base_object_unref(&base);
774 		return ERR_PTR(-EINVAL);
775 	}
776 
777 	return base;
778 }
779 
780 
781 int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,
782 			     struct drm_file *file_priv)
783 {
784 	struct drm_vmw_fence_wait_arg *arg =
785 	    (struct drm_vmw_fence_wait_arg *)data;
786 	unsigned long timeout;
787 	struct ttm_base_object *base;
788 	struct vmw_fence_obj *fence;
789 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
790 	int ret;
791 	uint64_t wait_timeout = ((uint64_t)arg->timeout_us * HZ);
792 
793 	/*
794 	 * 64-bit division not present on 32-bit systems, so do an
795 	 * approximation. (Divide by 1000000).
796 	 */
797 
798 	wait_timeout = (wait_timeout >> 20) + (wait_timeout >> 24) -
799 	  (wait_timeout >> 26);
800 
801 	if (!arg->cookie_valid) {
802 		arg->cookie_valid = 1;
803 		arg->kernel_cookie = jiffies + wait_timeout;
804 	}
805 
806 	base = vmw_fence_obj_lookup(tfile, arg->handle);
807 	if (IS_ERR(base))
808 		return PTR_ERR(base);
809 
810 	fence = &(container_of(base, struct vmw_user_fence, base)->fence);
811 
812 	timeout = jiffies;
813 	if (time_after_eq(timeout, (unsigned long)arg->kernel_cookie)) {
814 		ret = ((vmw_fence_obj_signaled(fence)) ?
815 		       0 : -EBUSY);
816 		goto out;
817 	}
818 
819 	timeout = (unsigned long)arg->kernel_cookie - timeout;
820 
821 	ret = vmw_fence_obj_wait(fence, arg->lazy, true, timeout);
822 
823 out:
824 	ttm_base_object_unref(&base);
825 
826 	/*
827 	 * Optionally unref the fence object.
828 	 */
829 
830 	if (ret == 0 && (arg->wait_options & DRM_VMW_WAIT_OPTION_UNREF))
831 		return ttm_ref_object_base_unref(tfile, arg->handle,
832 						 TTM_REF_USAGE);
833 	return ret;
834 }
835 
836 int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data,
837 				 struct drm_file *file_priv)
838 {
839 	struct drm_vmw_fence_signaled_arg *arg =
840 		(struct drm_vmw_fence_signaled_arg *) data;
841 	struct ttm_base_object *base;
842 	struct vmw_fence_obj *fence;
843 	struct vmw_fence_manager *fman;
844 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
845 	struct vmw_private *dev_priv = vmw_priv(dev);
846 
847 	base = vmw_fence_obj_lookup(tfile, arg->handle);
848 	if (IS_ERR(base))
849 		return PTR_ERR(base);
850 
851 	fence = &(container_of(base, struct vmw_user_fence, base)->fence);
852 	fman = fman_from_fence(fence);
853 
854 	arg->signaled = vmw_fence_obj_signaled(fence);
855 
856 	arg->signaled_flags = arg->flags;
857 	spin_lock(&fman->lock);
858 	arg->passed_seqno = dev_priv->last_read_seqno;
859 	spin_unlock(&fman->lock);
860 
861 	ttm_base_object_unref(&base);
862 
863 	return 0;
864 }
865 
866 
867 int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data,
868 			      struct drm_file *file_priv)
869 {
870 	struct drm_vmw_fence_arg *arg =
871 		(struct drm_vmw_fence_arg *) data;
872 
873 	return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
874 					 arg->handle,
875 					 TTM_REF_USAGE);
876 }
877 
878 /**
879  * vmw_event_fence_action_seq_passed
880  *
881  * @action: The struct vmw_fence_action embedded in a struct
882  * vmw_event_fence_action.
883  *
884  * This function is called when the seqno of the fence where @action is
885  * attached has passed. It queues the event on the submitter's event list.
886  * This function is always called from atomic context.
887  */
888 static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action)
889 {
890 	struct vmw_event_fence_action *eaction =
891 		container_of(action, struct vmw_event_fence_action, action);
892 	struct drm_device *dev = eaction->dev;
893 	struct drm_pending_event *event = eaction->event;
894 	struct drm_file *file_priv;
895 
896 
897 	if (unlikely(event == NULL))
898 		return;
899 
900 	file_priv = event->file_priv;
901 	spin_lock_irq(&dev->event_lock);
902 
903 	if (likely(eaction->tv_sec != NULL)) {
904 		struct timespec64 ts;
905 
906 		ktime_get_ts64(&ts);
907 		/* monotonic time, so no y2038 overflow */
908 		*eaction->tv_sec = ts.tv_sec;
909 		*eaction->tv_usec = ts.tv_nsec / NSEC_PER_USEC;
910 	}
911 
912 	drm_send_event_locked(dev, eaction->event);
913 	eaction->event = NULL;
914 	spin_unlock_irq(&dev->event_lock);
915 }
916 
917 /**
918  * vmw_event_fence_action_cleanup
919  *
920  * @action: The struct vmw_fence_action embedded in a struct
921  * vmw_event_fence_action.
922  *
923  * This function is the struct vmw_fence_action destructor. It's typically
924  * called from a workqueue.
925  */
926 static void vmw_event_fence_action_cleanup(struct vmw_fence_action *action)
927 {
928 	struct vmw_event_fence_action *eaction =
929 		container_of(action, struct vmw_event_fence_action, action);
930 
931 	vmw_fence_obj_unreference(&eaction->fence);
932 	kfree(eaction);
933 }
934 
935 
936 /**
937  * vmw_fence_obj_add_action - Add an action to a fence object.
938  *
939  * @fence - The fence object.
940  * @action - The action to add.
941  *
942  * Note that the action callbacks may be executed before this function
943  * returns.
944  */
945 static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence,
946 			      struct vmw_fence_action *action)
947 {
948 	struct vmw_fence_manager *fman = fman_from_fence(fence);
949 	bool run_update = false;
950 
951 	mutex_lock(&fman->goal_irq_mutex);
952 	spin_lock(&fman->lock);
953 
954 	fman->pending_actions[action->type]++;
955 	if (dma_fence_is_signaled_locked(&fence->base)) {
956 		struct list_head action_list;
957 
958 		INIT_LIST_HEAD(&action_list);
959 		list_add_tail(&action->head, &action_list);
960 		vmw_fences_perform_actions(fman, &action_list);
961 	} else {
962 		list_add_tail(&action->head, &fence->seq_passed_actions);
963 
964 		/*
965 		 * This function may set fman::seqno_valid, so it must
966 		 * be run with the goal_irq_mutex held.
967 		 */
968 		run_update = vmw_fence_goal_check_locked(fence);
969 	}
970 
971 	spin_unlock(&fman->lock);
972 
973 	if (run_update) {
974 		if (!fman->goal_irq_on) {
975 			fman->goal_irq_on = true;
976 			vmw_goal_waiter_add(fman->dev_priv);
977 		}
978 		vmw_fences_update(fman);
979 	}
980 	mutex_unlock(&fman->goal_irq_mutex);
981 
982 }
983 
984 /**
985  * vmw_event_fence_action_create - Post an event for sending when a fence
986  * object seqno has passed.
987  *
988  * @file_priv: The file connection on which the event should be posted.
989  * @fence: The fence object on which to post the event.
990  * @event: Event to be posted. This event should've been alloced
991  * using k[mz]alloc, and should've been completely initialized.
992  * @interruptible: Interruptible waits if possible.
993  *
994  * As a side effect, the object pointed to by @event may have been
995  * freed when this function returns. If this function returns with
996  * an error code, the caller needs to free that object.
997  */
998 
999 int vmw_event_fence_action_queue(struct drm_file *file_priv,
1000 				 struct vmw_fence_obj *fence,
1001 				 struct drm_pending_event *event,
1002 				 uint32_t *tv_sec,
1003 				 uint32_t *tv_usec,
1004 				 bool interruptible)
1005 {
1006 	struct vmw_event_fence_action *eaction;
1007 	struct vmw_fence_manager *fman = fman_from_fence(fence);
1008 
1009 	eaction = kzalloc(sizeof(*eaction), GFP_KERNEL);
1010 	if (unlikely(!eaction))
1011 		return -ENOMEM;
1012 
1013 	eaction->event = event;
1014 
1015 	eaction->action.seq_passed = vmw_event_fence_action_seq_passed;
1016 	eaction->action.cleanup = vmw_event_fence_action_cleanup;
1017 	eaction->action.type = VMW_ACTION_EVENT;
1018 
1019 	eaction->fence = vmw_fence_obj_reference(fence);
1020 	eaction->dev = fman->dev_priv->dev;
1021 	eaction->tv_sec = tv_sec;
1022 	eaction->tv_usec = tv_usec;
1023 
1024 	vmw_fence_obj_add_action(fence, &eaction->action);
1025 
1026 	return 0;
1027 }
1028 
1029 struct vmw_event_fence_pending {
1030 	struct drm_pending_event base;
1031 	struct drm_vmw_event_fence event;
1032 };
1033 
1034 static int vmw_event_fence_action_create(struct drm_file *file_priv,
1035 				  struct vmw_fence_obj *fence,
1036 				  uint32_t flags,
1037 				  uint64_t user_data,
1038 				  bool interruptible)
1039 {
1040 	struct vmw_event_fence_pending *event;
1041 	struct vmw_fence_manager *fman = fman_from_fence(fence);
1042 	struct drm_device *dev = fman->dev_priv->dev;
1043 	int ret;
1044 
1045 	event = kzalloc(sizeof(*event), GFP_KERNEL);
1046 	if (unlikely(!event)) {
1047 		DRM_ERROR("Failed to allocate an event.\n");
1048 		ret = -ENOMEM;
1049 		goto out_no_space;
1050 	}
1051 
1052 	event->event.base.type = DRM_VMW_EVENT_FENCE_SIGNALED;
1053 	event->event.base.length = sizeof(*event);
1054 	event->event.user_data = user_data;
1055 
1056 	ret = drm_event_reserve_init(dev, file_priv, &event->base, &event->event.base);
1057 
1058 	if (unlikely(ret != 0)) {
1059 		DRM_ERROR("Failed to allocate event space for this file.\n");
1060 		kfree(event);
1061 		goto out_no_space;
1062 	}
1063 
1064 	if (flags & DRM_VMW_FE_FLAG_REQ_TIME)
1065 		ret = vmw_event_fence_action_queue(file_priv, fence,
1066 						   &event->base,
1067 						   &event->event.tv_sec,
1068 						   &event->event.tv_usec,
1069 						   interruptible);
1070 	else
1071 		ret = vmw_event_fence_action_queue(file_priv, fence,
1072 						   &event->base,
1073 						   NULL,
1074 						   NULL,
1075 						   interruptible);
1076 	if (ret != 0)
1077 		goto out_no_queue;
1078 
1079 	return 0;
1080 
1081 out_no_queue:
1082 	drm_event_cancel_free(dev, &event->base);
1083 out_no_space:
1084 	return ret;
1085 }
1086 
1087 int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
1088 			  struct drm_file *file_priv)
1089 {
1090 	struct vmw_private *dev_priv = vmw_priv(dev);
1091 	struct drm_vmw_fence_event_arg *arg =
1092 		(struct drm_vmw_fence_event_arg *) data;
1093 	struct vmw_fence_obj *fence = NULL;
1094 	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1095 	struct ttm_object_file *tfile = vmw_fp->tfile;
1096 	struct drm_vmw_fence_rep __user *user_fence_rep =
1097 		(struct drm_vmw_fence_rep __user *)(unsigned long)
1098 		arg->fence_rep;
1099 	uint32_t handle;
1100 	int ret;
1101 
1102 	/*
1103 	 * Look up an existing fence object,
1104 	 * and if user-space wants a new reference,
1105 	 * add one.
1106 	 */
1107 	if (arg->handle) {
1108 		struct ttm_base_object *base =
1109 			vmw_fence_obj_lookup(tfile, arg->handle);
1110 
1111 		if (IS_ERR(base))
1112 			return PTR_ERR(base);
1113 
1114 		fence = &(container_of(base, struct vmw_user_fence,
1115 				       base)->fence);
1116 		(void) vmw_fence_obj_reference(fence);
1117 
1118 		if (user_fence_rep != NULL) {
1119 			ret = ttm_ref_object_add(vmw_fp->tfile, base,
1120 						 TTM_REF_USAGE, NULL, false);
1121 			if (unlikely(ret != 0)) {
1122 				DRM_ERROR("Failed to reference a fence "
1123 					  "object.\n");
1124 				goto out_no_ref_obj;
1125 			}
1126 			handle = base->hash.key;
1127 		}
1128 		ttm_base_object_unref(&base);
1129 	}
1130 
1131 	/*
1132 	 * Create a new fence object.
1133 	 */
1134 	if (!fence) {
1135 		ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
1136 						 &fence,
1137 						 (user_fence_rep) ?
1138 						 &handle : NULL);
1139 		if (unlikely(ret != 0)) {
1140 			DRM_ERROR("Fence event failed to create fence.\n");
1141 			return ret;
1142 		}
1143 	}
1144 
1145 	BUG_ON(fence == NULL);
1146 
1147 	ret = vmw_event_fence_action_create(file_priv, fence,
1148 					    arg->flags,
1149 					    arg->user_data,
1150 					    true);
1151 	if (unlikely(ret != 0)) {
1152 		if (ret != -ERESTARTSYS)
1153 			DRM_ERROR("Failed to attach event to fence.\n");
1154 		goto out_no_create;
1155 	}
1156 
1157 	vmw_execbuf_copy_fence_user(dev_priv, vmw_fp, 0, user_fence_rep, fence,
1158 				    handle, -1, NULL);
1159 	vmw_fence_obj_unreference(&fence);
1160 	return 0;
1161 out_no_create:
1162 	if (user_fence_rep != NULL)
1163 		ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE);
1164 out_no_ref_obj:
1165 	vmw_fence_obj_unreference(&fence);
1166 	return ret;
1167 }
1168