1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3  *
4  * Copyright 2011-2014 VMware, Inc., Palo Alto, CA., USA
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 
28 #include <linux/sched/signal.h>
29 
30 #include "vmwgfx_drv.h"
31 
32 #define VMW_FENCE_WRAP (1 << 31)
33 
34 struct vmw_fence_manager {
35 	int num_fence_objects;
36 	struct vmw_private *dev_priv;
37 	spinlock_t lock;
38 	struct list_head fence_list;
39 	struct work_struct work;
40 	u32 user_fence_size;
41 	u32 fence_size;
42 	u32 event_fence_action_size;
43 	bool fifo_down;
44 	struct list_head cleanup_list;
45 	uint32_t pending_actions[VMW_ACTION_MAX];
46 	struct mutex goal_irq_mutex;
47 	bool goal_irq_on; /* Protected by @goal_irq_mutex */
48 	bool seqno_valid; /* Protected by @lock, and may not be set to true
49 			     without the @goal_irq_mutex held. */
50 	u64 ctx;
51 };
52 
53 struct vmw_user_fence {
54 	struct ttm_base_object base;
55 	struct vmw_fence_obj fence;
56 };
57 
58 /**
59  * struct vmw_event_fence_action - fence action that delivers a drm event.
60  *
61  * @action: A struct vmw_fence_action to hook up to a fence.
62  * @event: A pointer to the pending event.
63  * @fence: A referenced pointer to the fence to keep it alive while @action
64  * hangs on it.
65  * @dev: Pointer to a struct drm_device so we can access the event stuff.
66  * @tv_sec: If non-null, the variable pointed to will be assigned
67  * current time tv_sec val when the fence signals.
68  * @tv_usec: Must be set if @tv_sec is set, and the variable pointed to will
69  * be assigned the current time tv_usec val when the fence signals.
70  */
71 struct vmw_event_fence_action {
72 	struct vmw_fence_action action;
73 
74 	struct drm_pending_event *event;
75 	struct vmw_fence_obj *fence;
76 	struct drm_device *dev;
77 
78 	uint32_t *tv_sec;
79 	uint32_t *tv_usec;
80 };
81 
82 static struct vmw_fence_manager *
83 fman_from_fence(struct vmw_fence_obj *fence)
84 {
85 	return container_of(fence->base.lock, struct vmw_fence_manager, lock);
86 }
87 
88 /*
89  * Note on fencing subsystem usage of irqs:
90  * Typically the vmw_fences_update function is called
91  *
92  * a) When a new fence seqno has been submitted by the fifo code.
93  * b) On-demand when we have waiters. Sleeping waiters will switch on the
94  * ANY_FENCE irq and call vmw_fences_update function each time an ANY_FENCE
95  * irq is received. When the last fence waiter is gone, that IRQ is masked
96  * away.
97  *
98  * In situations where there are no waiters and we don't submit any new fences,
99  * fence objects may not be signaled. This is perfectly OK, since there are
100  * no consumers of the signaled data, but that is NOT ok when there are fence
101  * actions attached to a fence. The fencing subsystem then makes use of the
102  * FENCE_GOAL irq and sets the fence goal seqno to that of the next fence
103  * which has an action attached, and each time vmw_fences_update is called,
104  * the subsystem makes sure the fence goal seqno is updated.
105  *
106  * The fence goal seqno irq is on as long as there are unsignaled fence
107  * objects with actions attached to them.
108  */
109 
110 static void vmw_fence_obj_destroy(struct dma_fence *f)
111 {
112 	struct vmw_fence_obj *fence =
113 		container_of(f, struct vmw_fence_obj, base);
114 
115 	struct vmw_fence_manager *fman = fman_from_fence(fence);
116 
117 	spin_lock(&fman->lock);
118 	list_del_init(&fence->head);
119 	--fman->num_fence_objects;
120 	spin_unlock(&fman->lock);
121 	fence->destroy(fence);
122 }
123 
124 static const char *vmw_fence_get_driver_name(struct dma_fence *f)
125 {
126 	return "vmwgfx";
127 }
128 
129 static const char *vmw_fence_get_timeline_name(struct dma_fence *f)
130 {
131 	return "svga";
132 }
133 
134 static bool vmw_fence_enable_signaling(struct dma_fence *f)
135 {
136 	struct vmw_fence_obj *fence =
137 		container_of(f, struct vmw_fence_obj, base);
138 
139 	struct vmw_fence_manager *fman = fman_from_fence(fence);
140 	struct vmw_private *dev_priv = fman->dev_priv;
141 
142 	u32 seqno = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_FENCE);
143 	if (seqno - fence->base.seqno < VMW_FENCE_WRAP)
144 		return false;
145 
146 	vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
147 
148 	return true;
149 }
150 
151 struct vmwgfx_wait_cb {
152 	struct dma_fence_cb base;
153 	struct task_struct *task;
154 };
155 
156 static void
157 vmwgfx_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
158 {
159 	struct vmwgfx_wait_cb *wait =
160 		container_of(cb, struct vmwgfx_wait_cb, base);
161 
162 	wake_up_process(wait->task);
163 }
164 
165 static void __vmw_fences_update(struct vmw_fence_manager *fman);
166 
167 static long vmw_fence_wait(struct dma_fence *f, bool intr, signed long timeout)
168 {
169 	struct vmw_fence_obj *fence =
170 		container_of(f, struct vmw_fence_obj, base);
171 
172 	struct vmw_fence_manager *fman = fman_from_fence(fence);
173 	struct vmw_private *dev_priv = fman->dev_priv;
174 	struct vmwgfx_wait_cb cb;
175 	long ret = timeout;
176 
177 	if (likely(vmw_fence_obj_signaled(fence)))
178 		return timeout;
179 
180 	vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
181 	vmw_seqno_waiter_add(dev_priv);
182 
183 	spin_lock(f->lock);
184 
185 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags))
186 		goto out;
187 
188 	if (intr && signal_pending(current)) {
189 		ret = -ERESTARTSYS;
190 		goto out;
191 	}
192 
193 	cb.base.func = vmwgfx_wait_cb;
194 	cb.task = current;
195 	list_add(&cb.base.node, &f->cb_list);
196 
197 	for (;;) {
198 		__vmw_fences_update(fman);
199 
200 		/*
201 		 * We can use the barrier free __set_current_state() since
202 		 * DMA_FENCE_FLAG_SIGNALED_BIT + wakeup is protected by the
203 		 * fence spinlock.
204 		 */
205 		if (intr)
206 			__set_current_state(TASK_INTERRUPTIBLE);
207 		else
208 			__set_current_state(TASK_UNINTERRUPTIBLE);
209 
210 		if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags)) {
211 			if (ret == 0 && timeout > 0)
212 				ret = 1;
213 			break;
214 		}
215 
216 		if (intr && signal_pending(current)) {
217 			ret = -ERESTARTSYS;
218 			break;
219 		}
220 
221 		if (ret == 0)
222 			break;
223 
224 		spin_unlock(f->lock);
225 
226 		ret = schedule_timeout(ret);
227 
228 		spin_lock(f->lock);
229 	}
230 	__set_current_state(TASK_RUNNING);
231 	if (!list_empty(&cb.base.node))
232 		list_del(&cb.base.node);
233 
234 out:
235 	spin_unlock(f->lock);
236 
237 	vmw_seqno_waiter_remove(dev_priv);
238 
239 	return ret;
240 }
241 
242 static const struct dma_fence_ops vmw_fence_ops = {
243 	.get_driver_name = vmw_fence_get_driver_name,
244 	.get_timeline_name = vmw_fence_get_timeline_name,
245 	.enable_signaling = vmw_fence_enable_signaling,
246 	.wait = vmw_fence_wait,
247 	.release = vmw_fence_obj_destroy,
248 };
249 
250 
251 /*
252  * Execute signal actions on fences recently signaled.
253  * This is done from a workqueue so we don't have to execute
254  * signal actions from atomic context.
255  */
256 
257 static void vmw_fence_work_func(struct work_struct *work)
258 {
259 	struct vmw_fence_manager *fman =
260 		container_of(work, struct vmw_fence_manager, work);
261 	struct list_head list;
262 	struct vmw_fence_action *action, *next_action;
263 	bool seqno_valid;
264 
265 	do {
266 		INIT_LIST_HEAD(&list);
267 		mutex_lock(&fman->goal_irq_mutex);
268 
269 		spin_lock(&fman->lock);
270 		list_splice_init(&fman->cleanup_list, &list);
271 		seqno_valid = fman->seqno_valid;
272 		spin_unlock(&fman->lock);
273 
274 		if (!seqno_valid && fman->goal_irq_on) {
275 			fman->goal_irq_on = false;
276 			vmw_goal_waiter_remove(fman->dev_priv);
277 		}
278 		mutex_unlock(&fman->goal_irq_mutex);
279 
280 		if (list_empty(&list))
281 			return;
282 
283 		/*
284 		 * At this point, only we should be able to manipulate the
285 		 * list heads of the actions we have on the private list.
286 		 * hence fman::lock not held.
287 		 */
288 
289 		list_for_each_entry_safe(action, next_action, &list, head) {
290 			list_del_init(&action->head);
291 			if (action->cleanup)
292 				action->cleanup(action);
293 		}
294 	} while (1);
295 }
296 
297 struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
298 {
299 	struct vmw_fence_manager *fman = kzalloc(sizeof(*fman), GFP_KERNEL);
300 
301 	if (unlikely(!fman))
302 		return NULL;
303 
304 	fman->dev_priv = dev_priv;
305 	spin_lock_init(&fman->lock);
306 	INIT_LIST_HEAD(&fman->fence_list);
307 	INIT_LIST_HEAD(&fman->cleanup_list);
308 	INIT_WORK(&fman->work, &vmw_fence_work_func);
309 	fman->fifo_down = true;
310 	fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence)) +
311 		TTM_OBJ_EXTRA_SIZE;
312 	fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj));
313 	fman->event_fence_action_size =
314 		ttm_round_pot(sizeof(struct vmw_event_fence_action));
315 	mutex_init(&fman->goal_irq_mutex);
316 	fman->ctx = dma_fence_context_alloc(1);
317 
318 	return fman;
319 }
320 
321 void vmw_fence_manager_takedown(struct vmw_fence_manager *fman)
322 {
323 	bool lists_empty;
324 
325 	(void) cancel_work_sync(&fman->work);
326 
327 	spin_lock(&fman->lock);
328 	lists_empty = list_empty(&fman->fence_list) &&
329 		list_empty(&fman->cleanup_list);
330 	spin_unlock(&fman->lock);
331 
332 	BUG_ON(!lists_empty);
333 	kfree(fman);
334 }
335 
336 static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
337 			      struct vmw_fence_obj *fence, u32 seqno,
338 			      void (*destroy) (struct vmw_fence_obj *fence))
339 {
340 	int ret = 0;
341 
342 	dma_fence_init(&fence->base, &vmw_fence_ops, &fman->lock,
343 		       fman->ctx, seqno);
344 	INIT_LIST_HEAD(&fence->seq_passed_actions);
345 	fence->destroy = destroy;
346 
347 	spin_lock(&fman->lock);
348 	if (unlikely(fman->fifo_down)) {
349 		ret = -EBUSY;
350 		goto out_unlock;
351 	}
352 	list_add_tail(&fence->head, &fman->fence_list);
353 	++fman->num_fence_objects;
354 
355 out_unlock:
356 	spin_unlock(&fman->lock);
357 	return ret;
358 
359 }
360 
361 static void vmw_fences_perform_actions(struct vmw_fence_manager *fman,
362 				struct list_head *list)
363 {
364 	struct vmw_fence_action *action, *next_action;
365 
366 	list_for_each_entry_safe(action, next_action, list, head) {
367 		list_del_init(&action->head);
368 		fman->pending_actions[action->type]--;
369 		if (action->seq_passed != NULL)
370 			action->seq_passed(action);
371 
372 		/*
373 		 * Add the cleanup action to the cleanup list so that
374 		 * it will be performed by a worker task.
375 		 */
376 
377 		list_add_tail(&action->head, &fman->cleanup_list);
378 	}
379 }
380 
381 /**
382  * vmw_fence_goal_new_locked - Figure out a new device fence goal
383  * seqno if needed.
384  *
385  * @fman: Pointer to a fence manager.
386  * @passed_seqno: The seqno the device currently signals as passed.
387  *
388  * This function should be called with the fence manager lock held.
389  * It is typically called when we have a new passed_seqno, and
390  * we might need to update the fence goal. It checks to see whether
391  * the current fence goal has already passed, and, in that case,
392  * scans through all unsignaled fences to get the next fence object with an
393  * action attached, and sets the seqno of that fence as a new fence goal.
394  *
395  * returns true if the device goal seqno was updated. False otherwise.
396  */
397 static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
398 				      u32 passed_seqno)
399 {
400 	u32 goal_seqno;
401 	struct vmw_fence_obj *fence;
402 
403 	if (likely(!fman->seqno_valid))
404 		return false;
405 
406 	goal_seqno = vmw_fifo_mem_read(fman->dev_priv, SVGA_FIFO_FENCE_GOAL);
407 	if (likely(passed_seqno - goal_seqno >= VMW_FENCE_WRAP))
408 		return false;
409 
410 	fman->seqno_valid = false;
411 	list_for_each_entry(fence, &fman->fence_list, head) {
412 		if (!list_empty(&fence->seq_passed_actions)) {
413 			fman->seqno_valid = true;
414 			vmw_fifo_mem_write(fman->dev_priv,
415 					   SVGA_FIFO_FENCE_GOAL,
416 					   fence->base.seqno);
417 			break;
418 		}
419 	}
420 
421 	return true;
422 }
423 
424 
425 /**
426  * vmw_fence_goal_check_locked - Replace the device fence goal seqno if
427  * needed.
428  *
429  * @fence: Pointer to a struct vmw_fence_obj the seqno of which should be
430  * considered as a device fence goal.
431  *
432  * This function should be called with the fence manager lock held.
433  * It is typically called when an action has been attached to a fence to
434  * check whether the seqno of that fence should be used for a fence
435  * goal interrupt. This is typically needed if the current fence goal is
436  * invalid, or has a higher seqno than that of the current fence object.
437  *
438  * returns true if the device goal seqno was updated. False otherwise.
439  */
440 static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence)
441 {
442 	struct vmw_fence_manager *fman = fman_from_fence(fence);
443 	u32 goal_seqno;
444 
445 	if (dma_fence_is_signaled_locked(&fence->base))
446 		return false;
447 
448 	goal_seqno = vmw_fifo_mem_read(fman->dev_priv, SVGA_FIFO_FENCE_GOAL);
449 	if (likely(fman->seqno_valid &&
450 		   goal_seqno - fence->base.seqno < VMW_FENCE_WRAP))
451 		return false;
452 
453 	vmw_fifo_mem_write(fman->dev_priv, SVGA_FIFO_FENCE_GOAL,
454 			   fence->base.seqno);
455 	fman->seqno_valid = true;
456 
457 	return true;
458 }
459 
460 static void __vmw_fences_update(struct vmw_fence_manager *fman)
461 {
462 	struct vmw_fence_obj *fence, *next_fence;
463 	struct list_head action_list;
464 	bool needs_rerun;
465 	uint32_t seqno, new_seqno;
466 
467 	seqno = vmw_fifo_mem_read(fman->dev_priv, SVGA_FIFO_FENCE);
468 rerun:
469 	list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) {
470 		if (seqno - fence->base.seqno < VMW_FENCE_WRAP) {
471 			list_del_init(&fence->head);
472 			dma_fence_signal_locked(&fence->base);
473 			INIT_LIST_HEAD(&action_list);
474 			list_splice_init(&fence->seq_passed_actions,
475 					 &action_list);
476 			vmw_fences_perform_actions(fman, &action_list);
477 		} else
478 			break;
479 	}
480 
481 	/*
482 	 * Rerun if the fence goal seqno was updated, and the
483 	 * hardware might have raced with that update, so that
484 	 * we missed a fence_goal irq.
485 	 */
486 
487 	needs_rerun = vmw_fence_goal_new_locked(fman, seqno);
488 	if (unlikely(needs_rerun)) {
489 		new_seqno = vmw_fifo_mem_read(fman->dev_priv, SVGA_FIFO_FENCE);
490 		if (new_seqno != seqno) {
491 			seqno = new_seqno;
492 			goto rerun;
493 		}
494 	}
495 
496 	if (!list_empty(&fman->cleanup_list))
497 		(void) schedule_work(&fman->work);
498 }
499 
500 void vmw_fences_update(struct vmw_fence_manager *fman)
501 {
502 	spin_lock(&fman->lock);
503 	__vmw_fences_update(fman);
504 	spin_unlock(&fman->lock);
505 }
506 
507 bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence)
508 {
509 	struct vmw_fence_manager *fman = fman_from_fence(fence);
510 
511 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
512 		return true;
513 
514 	vmw_fences_update(fman);
515 
516 	return dma_fence_is_signaled(&fence->base);
517 }
518 
519 int vmw_fence_obj_wait(struct vmw_fence_obj *fence, bool lazy,
520 		       bool interruptible, unsigned long timeout)
521 {
522 	long ret = dma_fence_wait_timeout(&fence->base, interruptible, timeout);
523 
524 	if (likely(ret > 0))
525 		return 0;
526 	else if (ret == 0)
527 		return -EBUSY;
528 	else
529 		return ret;
530 }
531 
532 void vmw_fence_obj_flush(struct vmw_fence_obj *fence)
533 {
534 	struct vmw_private *dev_priv = fman_from_fence(fence)->dev_priv;
535 
536 	vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
537 }
538 
539 static void vmw_fence_destroy(struct vmw_fence_obj *fence)
540 {
541 	dma_fence_free(&fence->base);
542 }
543 
544 int vmw_fence_create(struct vmw_fence_manager *fman,
545 		     uint32_t seqno,
546 		     struct vmw_fence_obj **p_fence)
547 {
548 	struct vmw_fence_obj *fence;
549  	int ret;
550 
551 	fence = kzalloc(sizeof(*fence), GFP_KERNEL);
552 	if (unlikely(!fence))
553 		return -ENOMEM;
554 
555 	ret = vmw_fence_obj_init(fman, fence, seqno,
556 				 vmw_fence_destroy);
557 	if (unlikely(ret != 0))
558 		goto out_err_init;
559 
560 	*p_fence = fence;
561 	return 0;
562 
563 out_err_init:
564 	kfree(fence);
565 	return ret;
566 }
567 
568 
569 static void vmw_user_fence_destroy(struct vmw_fence_obj *fence)
570 {
571 	struct vmw_user_fence *ufence =
572 		container_of(fence, struct vmw_user_fence, fence);
573 	struct vmw_fence_manager *fman = fman_from_fence(fence);
574 
575 	ttm_base_object_kfree(ufence, base);
576 	/*
577 	 * Free kernel space accounting.
578 	 */
579 	ttm_mem_global_free(vmw_mem_glob(fman->dev_priv),
580 			    fman->user_fence_size);
581 }
582 
583 static void vmw_user_fence_base_release(struct ttm_base_object **p_base)
584 {
585 	struct ttm_base_object *base = *p_base;
586 	struct vmw_user_fence *ufence =
587 		container_of(base, struct vmw_user_fence, base);
588 	struct vmw_fence_obj *fence = &ufence->fence;
589 
590 	*p_base = NULL;
591 	vmw_fence_obj_unreference(&fence);
592 }
593 
594 int vmw_user_fence_create(struct drm_file *file_priv,
595 			  struct vmw_fence_manager *fman,
596 			  uint32_t seqno,
597 			  struct vmw_fence_obj **p_fence,
598 			  uint32_t *p_handle)
599 {
600 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
601 	struct vmw_user_fence *ufence;
602 	struct vmw_fence_obj *tmp;
603 	struct ttm_mem_global *mem_glob = vmw_mem_glob(fman->dev_priv);
604 	struct ttm_operation_ctx ctx = {
605 		.interruptible = false,
606 		.no_wait_gpu = false
607 	};
608 	int ret;
609 
610 	/*
611 	 * Kernel memory space accounting, since this object may
612 	 * be created by a user-space request.
613 	 */
614 
615 	ret = ttm_mem_global_alloc(mem_glob, fman->user_fence_size,
616 				   &ctx);
617 	if (unlikely(ret != 0))
618 		return ret;
619 
620 	ufence = kzalloc(sizeof(*ufence), GFP_KERNEL);
621 	if (unlikely(!ufence)) {
622 		ret = -ENOMEM;
623 		goto out_no_object;
624 	}
625 
626 	ret = vmw_fence_obj_init(fman, &ufence->fence, seqno,
627 				 vmw_user_fence_destroy);
628 	if (unlikely(ret != 0)) {
629 		kfree(ufence);
630 		goto out_no_object;
631 	}
632 
633 	/*
634 	 * The base object holds a reference which is freed in
635 	 * vmw_user_fence_base_release.
636 	 */
637 	tmp = vmw_fence_obj_reference(&ufence->fence);
638 	ret = ttm_base_object_init(tfile, &ufence->base, false,
639 				   VMW_RES_FENCE,
640 				   &vmw_user_fence_base_release, NULL);
641 
642 
643 	if (unlikely(ret != 0)) {
644 		/*
645 		 * Free the base object's reference
646 		 */
647 		vmw_fence_obj_unreference(&tmp);
648 		goto out_err;
649 	}
650 
651 	*p_fence = &ufence->fence;
652 	*p_handle = ufence->base.handle;
653 
654 	return 0;
655 out_err:
656 	tmp = &ufence->fence;
657 	vmw_fence_obj_unreference(&tmp);
658 out_no_object:
659 	ttm_mem_global_free(mem_glob, fman->user_fence_size);
660 	return ret;
661 }
662 
663 
664 /**
665  * vmw_wait_dma_fence - Wait for a dma fence
666  *
667  * @fman: pointer to a fence manager
668  * @fence: DMA fence to wait on
669  *
670  * This function handles the case when the fence is actually a fence
671  * array.  If that's the case, it'll wait on each of the child fence
672  */
673 int vmw_wait_dma_fence(struct vmw_fence_manager *fman,
674 		       struct dma_fence *fence)
675 {
676 	struct dma_fence_array *fence_array;
677 	int ret = 0;
678 	int i;
679 
680 
681 	if (dma_fence_is_signaled(fence))
682 		return 0;
683 
684 	if (!dma_fence_is_array(fence))
685 		return dma_fence_wait(fence, true);
686 
687 	/* From i915: Note that if the fence-array was created in
688 	 * signal-on-any mode, we should *not* decompose it into its individual
689 	 * fences. However, we don't currently store which mode the fence-array
690 	 * is operating in. Fortunately, the only user of signal-on-any is
691 	 * private to amdgpu and we should not see any incoming fence-array
692 	 * from sync-file being in signal-on-any mode.
693 	 */
694 
695 	fence_array = to_dma_fence_array(fence);
696 	for (i = 0; i < fence_array->num_fences; i++) {
697 		struct dma_fence *child = fence_array->fences[i];
698 
699 		ret = dma_fence_wait(child, true);
700 
701 		if (ret < 0)
702 			return ret;
703 	}
704 
705 	return 0;
706 }
707 
708 
709 /*
710  * vmw_fence_fifo_down - signal all unsignaled fence objects.
711  */
712 
713 void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
714 {
715 	struct list_head action_list;
716 	int ret;
717 
718 	/*
719 	 * The list may be altered while we traverse it, so always
720 	 * restart when we've released the fman->lock.
721 	 */
722 
723 	spin_lock(&fman->lock);
724 	fman->fifo_down = true;
725 	while (!list_empty(&fman->fence_list)) {
726 		struct vmw_fence_obj *fence =
727 			list_entry(fman->fence_list.prev, struct vmw_fence_obj,
728 				   head);
729 		dma_fence_get(&fence->base);
730 		spin_unlock(&fman->lock);
731 
732 		ret = vmw_fence_obj_wait(fence, false, false,
733 					 VMW_FENCE_WAIT_TIMEOUT);
734 
735 		if (unlikely(ret != 0)) {
736 			list_del_init(&fence->head);
737 			dma_fence_signal(&fence->base);
738 			INIT_LIST_HEAD(&action_list);
739 			list_splice_init(&fence->seq_passed_actions,
740 					 &action_list);
741 			vmw_fences_perform_actions(fman, &action_list);
742 		}
743 
744 		BUG_ON(!list_empty(&fence->head));
745 		dma_fence_put(&fence->base);
746 		spin_lock(&fman->lock);
747 	}
748 	spin_unlock(&fman->lock);
749 }
750 
751 void vmw_fence_fifo_up(struct vmw_fence_manager *fman)
752 {
753 	spin_lock(&fman->lock);
754 	fman->fifo_down = false;
755 	spin_unlock(&fman->lock);
756 }
757 
758 
759 /**
760  * vmw_fence_obj_lookup - Look up a user-space fence object
761  *
762  * @tfile: A struct ttm_object_file identifying the caller.
763  * @handle: A handle identifying the fence object.
764  * @return: A struct vmw_user_fence base ttm object on success or
765  * an error pointer on failure.
766  *
767  * The fence object is looked up and type-checked. The caller needs
768  * to have opened the fence object first, but since that happens on
769  * creation and fence objects aren't shareable, that's not an
770  * issue currently.
771  */
772 static struct ttm_base_object *
773 vmw_fence_obj_lookup(struct ttm_object_file *tfile, u32 handle)
774 {
775 	struct ttm_base_object *base = ttm_base_object_lookup(tfile, handle);
776 
777 	if (!base) {
778 		pr_err("Invalid fence object handle 0x%08lx.\n",
779 		       (unsigned long)handle);
780 		return ERR_PTR(-EINVAL);
781 	}
782 
783 	if (base->refcount_release != vmw_user_fence_base_release) {
784 		pr_err("Invalid fence object handle 0x%08lx.\n",
785 		       (unsigned long)handle);
786 		ttm_base_object_unref(&base);
787 		return ERR_PTR(-EINVAL);
788 	}
789 
790 	return base;
791 }
792 
793 
794 int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,
795 			     struct drm_file *file_priv)
796 {
797 	struct drm_vmw_fence_wait_arg *arg =
798 	    (struct drm_vmw_fence_wait_arg *)data;
799 	unsigned long timeout;
800 	struct ttm_base_object *base;
801 	struct vmw_fence_obj *fence;
802 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
803 	int ret;
804 	uint64_t wait_timeout = ((uint64_t)arg->timeout_us * HZ);
805 
806 	/*
807 	 * 64-bit division not present on 32-bit systems, so do an
808 	 * approximation. (Divide by 1000000).
809 	 */
810 
811 	wait_timeout = (wait_timeout >> 20) + (wait_timeout >> 24) -
812 	  (wait_timeout >> 26);
813 
814 	if (!arg->cookie_valid) {
815 		arg->cookie_valid = 1;
816 		arg->kernel_cookie = jiffies + wait_timeout;
817 	}
818 
819 	base = vmw_fence_obj_lookup(tfile, arg->handle);
820 	if (IS_ERR(base))
821 		return PTR_ERR(base);
822 
823 	fence = &(container_of(base, struct vmw_user_fence, base)->fence);
824 
825 	timeout = jiffies;
826 	if (time_after_eq(timeout, (unsigned long)arg->kernel_cookie)) {
827 		ret = ((vmw_fence_obj_signaled(fence)) ?
828 		       0 : -EBUSY);
829 		goto out;
830 	}
831 
832 	timeout = (unsigned long)arg->kernel_cookie - timeout;
833 
834 	ret = vmw_fence_obj_wait(fence, arg->lazy, true, timeout);
835 
836 out:
837 	ttm_base_object_unref(&base);
838 
839 	/*
840 	 * Optionally unref the fence object.
841 	 */
842 
843 	if (ret == 0 && (arg->wait_options & DRM_VMW_WAIT_OPTION_UNREF))
844 		return ttm_ref_object_base_unref(tfile, arg->handle,
845 						 TTM_REF_USAGE);
846 	return ret;
847 }
848 
849 int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data,
850 				 struct drm_file *file_priv)
851 {
852 	struct drm_vmw_fence_signaled_arg *arg =
853 		(struct drm_vmw_fence_signaled_arg *) data;
854 	struct ttm_base_object *base;
855 	struct vmw_fence_obj *fence;
856 	struct vmw_fence_manager *fman;
857 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
858 	struct vmw_private *dev_priv = vmw_priv(dev);
859 
860 	base = vmw_fence_obj_lookup(tfile, arg->handle);
861 	if (IS_ERR(base))
862 		return PTR_ERR(base);
863 
864 	fence = &(container_of(base, struct vmw_user_fence, base)->fence);
865 	fman = fman_from_fence(fence);
866 
867 	arg->signaled = vmw_fence_obj_signaled(fence);
868 
869 	arg->signaled_flags = arg->flags;
870 	spin_lock(&fman->lock);
871 	arg->passed_seqno = dev_priv->last_read_seqno;
872 	spin_unlock(&fman->lock);
873 
874 	ttm_base_object_unref(&base);
875 
876 	return 0;
877 }
878 
879 
880 int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data,
881 			      struct drm_file *file_priv)
882 {
883 	struct drm_vmw_fence_arg *arg =
884 		(struct drm_vmw_fence_arg *) data;
885 
886 	return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
887 					 arg->handle,
888 					 TTM_REF_USAGE);
889 }
890 
891 /**
892  * vmw_event_fence_action_seq_passed
893  *
894  * @action: The struct vmw_fence_action embedded in a struct
895  * vmw_event_fence_action.
896  *
897  * This function is called when the seqno of the fence where @action is
898  * attached has passed. It queues the event on the submitter's event list.
899  * This function is always called from atomic context.
900  */
901 static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action)
902 {
903 	struct vmw_event_fence_action *eaction =
904 		container_of(action, struct vmw_event_fence_action, action);
905 	struct drm_device *dev = eaction->dev;
906 	struct drm_pending_event *event = eaction->event;
907 
908 	if (unlikely(event == NULL))
909 		return;
910 
911 	spin_lock_irq(&dev->event_lock);
912 
913 	if (likely(eaction->tv_sec != NULL)) {
914 		struct timespec64 ts;
915 
916 		ktime_get_ts64(&ts);
917 		/* monotonic time, so no y2038 overflow */
918 		*eaction->tv_sec = ts.tv_sec;
919 		*eaction->tv_usec = ts.tv_nsec / NSEC_PER_USEC;
920 	}
921 
922 	drm_send_event_locked(dev, eaction->event);
923 	eaction->event = NULL;
924 	spin_unlock_irq(&dev->event_lock);
925 }
926 
927 /**
928  * vmw_event_fence_action_cleanup
929  *
930  * @action: The struct vmw_fence_action embedded in a struct
931  * vmw_event_fence_action.
932  *
933  * This function is the struct vmw_fence_action destructor. It's typically
934  * called from a workqueue.
935  */
936 static void vmw_event_fence_action_cleanup(struct vmw_fence_action *action)
937 {
938 	struct vmw_event_fence_action *eaction =
939 		container_of(action, struct vmw_event_fence_action, action);
940 
941 	vmw_fence_obj_unreference(&eaction->fence);
942 	kfree(eaction);
943 }
944 
945 
946 /**
947  * vmw_fence_obj_add_action - Add an action to a fence object.
948  *
949  * @fence: The fence object.
950  * @action: The action to add.
951  *
952  * Note that the action callbacks may be executed before this function
953  * returns.
954  */
955 static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence,
956 			      struct vmw_fence_action *action)
957 {
958 	struct vmw_fence_manager *fman = fman_from_fence(fence);
959 	bool run_update = false;
960 
961 	mutex_lock(&fman->goal_irq_mutex);
962 	spin_lock(&fman->lock);
963 
964 	fman->pending_actions[action->type]++;
965 	if (dma_fence_is_signaled_locked(&fence->base)) {
966 		struct list_head action_list;
967 
968 		INIT_LIST_HEAD(&action_list);
969 		list_add_tail(&action->head, &action_list);
970 		vmw_fences_perform_actions(fman, &action_list);
971 	} else {
972 		list_add_tail(&action->head, &fence->seq_passed_actions);
973 
974 		/*
975 		 * This function may set fman::seqno_valid, so it must
976 		 * be run with the goal_irq_mutex held.
977 		 */
978 		run_update = vmw_fence_goal_check_locked(fence);
979 	}
980 
981 	spin_unlock(&fman->lock);
982 
983 	if (run_update) {
984 		if (!fman->goal_irq_on) {
985 			fman->goal_irq_on = true;
986 			vmw_goal_waiter_add(fman->dev_priv);
987 		}
988 		vmw_fences_update(fman);
989 	}
990 	mutex_unlock(&fman->goal_irq_mutex);
991 
992 }
993 
994 /**
995  * vmw_event_fence_action_create - Post an event for sending when a fence
996  * object seqno has passed.
997  *
998  * @file_priv: The file connection on which the event should be posted.
999  * @fence: The fence object on which to post the event.
1000  * @event: Event to be posted. This event should've been alloced
1001  * using k[mz]alloc, and should've been completely initialized.
1002  * @tv_sec: If non-null, the variable pointed to will be assigned
1003  * current time tv_sec val when the fence signals.
1004  * @tv_usec: Must be set if @tv_sec is set, and the variable pointed to will
1005  * be assigned the current time tv_usec val when the fence signals.
1006  * @interruptible: Interruptible waits if possible.
1007  *
1008  * As a side effect, the object pointed to by @event may have been
1009  * freed when this function returns. If this function returns with
1010  * an error code, the caller needs to free that object.
1011  */
1012 
1013 int vmw_event_fence_action_queue(struct drm_file *file_priv,
1014 				 struct vmw_fence_obj *fence,
1015 				 struct drm_pending_event *event,
1016 				 uint32_t *tv_sec,
1017 				 uint32_t *tv_usec,
1018 				 bool interruptible)
1019 {
1020 	struct vmw_event_fence_action *eaction;
1021 	struct vmw_fence_manager *fman = fman_from_fence(fence);
1022 
1023 	eaction = kzalloc(sizeof(*eaction), GFP_KERNEL);
1024 	if (unlikely(!eaction))
1025 		return -ENOMEM;
1026 
1027 	eaction->event = event;
1028 
1029 	eaction->action.seq_passed = vmw_event_fence_action_seq_passed;
1030 	eaction->action.cleanup = vmw_event_fence_action_cleanup;
1031 	eaction->action.type = VMW_ACTION_EVENT;
1032 
1033 	eaction->fence = vmw_fence_obj_reference(fence);
1034 	eaction->dev = &fman->dev_priv->drm;
1035 	eaction->tv_sec = tv_sec;
1036 	eaction->tv_usec = tv_usec;
1037 
1038 	vmw_fence_obj_add_action(fence, &eaction->action);
1039 
1040 	return 0;
1041 }
1042 
1043 struct vmw_event_fence_pending {
1044 	struct drm_pending_event base;
1045 	struct drm_vmw_event_fence event;
1046 };
1047 
1048 static int vmw_event_fence_action_create(struct drm_file *file_priv,
1049 				  struct vmw_fence_obj *fence,
1050 				  uint32_t flags,
1051 				  uint64_t user_data,
1052 				  bool interruptible)
1053 {
1054 	struct vmw_event_fence_pending *event;
1055 	struct vmw_fence_manager *fman = fman_from_fence(fence);
1056 	struct drm_device *dev = &fman->dev_priv->drm;
1057 	int ret;
1058 
1059 	event = kzalloc(sizeof(*event), GFP_KERNEL);
1060 	if (unlikely(!event)) {
1061 		DRM_ERROR("Failed to allocate an event.\n");
1062 		ret = -ENOMEM;
1063 		goto out_no_space;
1064 	}
1065 
1066 	event->event.base.type = DRM_VMW_EVENT_FENCE_SIGNALED;
1067 	event->event.base.length = sizeof(*event);
1068 	event->event.user_data = user_data;
1069 
1070 	ret = drm_event_reserve_init(dev, file_priv, &event->base, &event->event.base);
1071 
1072 	if (unlikely(ret != 0)) {
1073 		DRM_ERROR("Failed to allocate event space for this file.\n");
1074 		kfree(event);
1075 		goto out_no_space;
1076 	}
1077 
1078 	if (flags & DRM_VMW_FE_FLAG_REQ_TIME)
1079 		ret = vmw_event_fence_action_queue(file_priv, fence,
1080 						   &event->base,
1081 						   &event->event.tv_sec,
1082 						   &event->event.tv_usec,
1083 						   interruptible);
1084 	else
1085 		ret = vmw_event_fence_action_queue(file_priv, fence,
1086 						   &event->base,
1087 						   NULL,
1088 						   NULL,
1089 						   interruptible);
1090 	if (ret != 0)
1091 		goto out_no_queue;
1092 
1093 	return 0;
1094 
1095 out_no_queue:
1096 	drm_event_cancel_free(dev, &event->base);
1097 out_no_space:
1098 	return ret;
1099 }
1100 
1101 int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
1102 			  struct drm_file *file_priv)
1103 {
1104 	struct vmw_private *dev_priv = vmw_priv(dev);
1105 	struct drm_vmw_fence_event_arg *arg =
1106 		(struct drm_vmw_fence_event_arg *) data;
1107 	struct vmw_fence_obj *fence = NULL;
1108 	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1109 	struct ttm_object_file *tfile = vmw_fp->tfile;
1110 	struct drm_vmw_fence_rep __user *user_fence_rep =
1111 		(struct drm_vmw_fence_rep __user *)(unsigned long)
1112 		arg->fence_rep;
1113 	uint32_t handle;
1114 	int ret;
1115 
1116 	/*
1117 	 * Look up an existing fence object,
1118 	 * and if user-space wants a new reference,
1119 	 * add one.
1120 	 */
1121 	if (arg->handle) {
1122 		struct ttm_base_object *base =
1123 			vmw_fence_obj_lookup(tfile, arg->handle);
1124 
1125 		if (IS_ERR(base))
1126 			return PTR_ERR(base);
1127 
1128 		fence = &(container_of(base, struct vmw_user_fence,
1129 				       base)->fence);
1130 		(void) vmw_fence_obj_reference(fence);
1131 
1132 		if (user_fence_rep != NULL) {
1133 			ret = ttm_ref_object_add(vmw_fp->tfile, base,
1134 						 TTM_REF_USAGE, NULL, false);
1135 			if (unlikely(ret != 0)) {
1136 				DRM_ERROR("Failed to reference a fence "
1137 					  "object.\n");
1138 				goto out_no_ref_obj;
1139 			}
1140 			handle = base->handle;
1141 		}
1142 		ttm_base_object_unref(&base);
1143 	}
1144 
1145 	/*
1146 	 * Create a new fence object.
1147 	 */
1148 	if (!fence) {
1149 		ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
1150 						 &fence,
1151 						 (user_fence_rep) ?
1152 						 &handle : NULL);
1153 		if (unlikely(ret != 0)) {
1154 			DRM_ERROR("Fence event failed to create fence.\n");
1155 			return ret;
1156 		}
1157 	}
1158 
1159 	BUG_ON(fence == NULL);
1160 
1161 	ret = vmw_event_fence_action_create(file_priv, fence,
1162 					    arg->flags,
1163 					    arg->user_data,
1164 					    true);
1165 	if (unlikely(ret != 0)) {
1166 		if (ret != -ERESTARTSYS)
1167 			DRM_ERROR("Failed to attach event to fence.\n");
1168 		goto out_no_create;
1169 	}
1170 
1171 	vmw_execbuf_copy_fence_user(dev_priv, vmw_fp, 0, user_fence_rep, fence,
1172 				    handle, -1, NULL);
1173 	vmw_fence_obj_unreference(&fence);
1174 	return 0;
1175 out_no_create:
1176 	if (user_fence_rep != NULL)
1177 		ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE);
1178 out_no_ref_obj:
1179 	vmw_fence_obj_unreference(&fence);
1180 	return ret;
1181 }
1182