1 /**************************************************************************
2  *
3  * Copyright © 2011 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 
28 #include <drm/drmP.h>
29 #include "vmwgfx_drv.h"
30 
31 #define VMW_FENCE_WRAP (1 << 31)
32 
33 struct vmw_fence_manager {
34 	int num_fence_objects;
35 	struct vmw_private *dev_priv;
36 	spinlock_t lock;
37 	struct list_head fence_list;
38 	struct work_struct work;
39 	u32 user_fence_size;
40 	u32 fence_size;
41 	u32 event_fence_action_size;
42 	bool fifo_down;
43 	struct list_head cleanup_list;
44 	uint32_t pending_actions[VMW_ACTION_MAX];
45 	struct mutex goal_irq_mutex;
46 	bool goal_irq_on; /* Protected by @goal_irq_mutex */
47 	bool seqno_valid; /* Protected by @lock, and may not be set to true
48 			     without the @goal_irq_mutex held. */
49 };
50 
51 struct vmw_user_fence {
52 	struct ttm_base_object base;
53 	struct vmw_fence_obj fence;
54 };
55 
56 /**
57  * struct vmw_event_fence_action - fence action that delivers a drm event.
58  *
59  * @e: A struct drm_pending_event that controls the event delivery.
60  * @action: A struct vmw_fence_action to hook up to a fence.
61  * @fence: A referenced pointer to the fence to keep it alive while @action
62  * hangs on it.
63  * @dev: Pointer to a struct drm_device so we can access the event stuff.
64  * @kref: Both @e and @action has destructors, so we need to refcount.
65  * @size: Size accounted for this object.
66  * @tv_sec: If non-null, the variable pointed to will be assigned
67  * current time tv_sec val when the fence signals.
68  * @tv_usec: Must be set if @tv_sec is set, and the variable pointed to will
69  * be assigned the current time tv_usec val when the fence signals.
70  */
71 struct vmw_event_fence_action {
72 	struct vmw_fence_action action;
73 	struct list_head fpriv_head;
74 
75 	struct drm_pending_event *event;
76 	struct vmw_fence_obj *fence;
77 	struct drm_device *dev;
78 
79 	uint32_t *tv_sec;
80 	uint32_t *tv_usec;
81 };
82 
83 /**
84  * Note on fencing subsystem usage of irqs:
85  * Typically the vmw_fences_update function is called
86  *
87  * a) When a new fence seqno has been submitted by the fifo code.
88  * b) On-demand when we have waiters. Sleeping waiters will switch on the
89  * ANY_FENCE irq and call vmw_fences_update function each time an ANY_FENCE
90  * irq is received. When the last fence waiter is gone, that IRQ is masked
91  * away.
92  *
93  * In situations where there are no waiters and we don't submit any new fences,
94  * fence objects may not be signaled. This is perfectly OK, since there are
95  * no consumers of the signaled data, but that is NOT ok when there are fence
96  * actions attached to a fence. The fencing subsystem then makes use of the
97  * FENCE_GOAL irq and sets the fence goal seqno to that of the next fence
98  * which has an action attached, and each time vmw_fences_update is called,
99  * the subsystem makes sure the fence goal seqno is updated.
100  *
101  * The fence goal seqno irq is on as long as there are unsignaled fence
102  * objects with actions attached to them.
103  */
104 
105 static void vmw_fence_obj_destroy_locked(struct kref *kref)
106 {
107 	struct vmw_fence_obj *fence =
108 		container_of(kref, struct vmw_fence_obj, kref);
109 
110 	struct vmw_fence_manager *fman = fence->fman;
111 	unsigned int num_fences;
112 
113 	list_del_init(&fence->head);
114 	num_fences = --fman->num_fence_objects;
115 	spin_unlock_irq(&fman->lock);
116 	if (fence->destroy)
117 		fence->destroy(fence);
118 	else
119 		kfree(fence);
120 
121 	spin_lock_irq(&fman->lock);
122 }
123 
124 
125 /**
126  * Execute signal actions on fences recently signaled.
127  * This is done from a workqueue so we don't have to execute
128  * signal actions from atomic context.
129  */
130 
131 static void vmw_fence_work_func(struct work_struct *work)
132 {
133 	struct vmw_fence_manager *fman =
134 		container_of(work, struct vmw_fence_manager, work);
135 	struct list_head list;
136 	struct vmw_fence_action *action, *next_action;
137 	bool seqno_valid;
138 
139 	do {
140 		INIT_LIST_HEAD(&list);
141 		mutex_lock(&fman->goal_irq_mutex);
142 
143 		spin_lock_irq(&fman->lock);
144 		list_splice_init(&fman->cleanup_list, &list);
145 		seqno_valid = fman->seqno_valid;
146 		spin_unlock_irq(&fman->lock);
147 
148 		if (!seqno_valid && fman->goal_irq_on) {
149 			fman->goal_irq_on = false;
150 			vmw_goal_waiter_remove(fman->dev_priv);
151 		}
152 		mutex_unlock(&fman->goal_irq_mutex);
153 
154 		if (list_empty(&list))
155 			return;
156 
157 		/*
158 		 * At this point, only we should be able to manipulate the
159 		 * list heads of the actions we have on the private list.
160 		 * hence fman::lock not held.
161 		 */
162 
163 		list_for_each_entry_safe(action, next_action, &list, head) {
164 			list_del_init(&action->head);
165 			if (action->cleanup)
166 				action->cleanup(action);
167 		}
168 	} while (1);
169 }
170 
171 struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
172 {
173 	struct vmw_fence_manager *fman = kzalloc(sizeof(*fman), GFP_KERNEL);
174 
175 	if (unlikely(fman == NULL))
176 		return NULL;
177 
178 	fman->dev_priv = dev_priv;
179 	spin_lock_init(&fman->lock);
180 	INIT_LIST_HEAD(&fman->fence_list);
181 	INIT_LIST_HEAD(&fman->cleanup_list);
182 	INIT_WORK(&fman->work, &vmw_fence_work_func);
183 	fman->fifo_down = true;
184 	fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence));
185 	fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj));
186 	fman->event_fence_action_size =
187 		ttm_round_pot(sizeof(struct vmw_event_fence_action));
188 	mutex_init(&fman->goal_irq_mutex);
189 
190 	return fman;
191 }
192 
193 void vmw_fence_manager_takedown(struct vmw_fence_manager *fman)
194 {
195 	unsigned long irq_flags;
196 	bool lists_empty;
197 
198 	(void) cancel_work_sync(&fman->work);
199 
200 	spin_lock_irqsave(&fman->lock, irq_flags);
201 	lists_empty = list_empty(&fman->fence_list) &&
202 		list_empty(&fman->cleanup_list);
203 	spin_unlock_irqrestore(&fman->lock, irq_flags);
204 
205 	BUG_ON(!lists_empty);
206 	kfree(fman);
207 }
208 
209 static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
210 			      struct vmw_fence_obj *fence,
211 			      u32 seqno,
212 			      uint32_t mask,
213 			      void (*destroy) (struct vmw_fence_obj *fence))
214 {
215 	unsigned long irq_flags;
216 	unsigned int num_fences;
217 	int ret = 0;
218 
219 	fence->seqno = seqno;
220 	INIT_LIST_HEAD(&fence->seq_passed_actions);
221 	fence->fman = fman;
222 	fence->signaled = 0;
223 	fence->signal_mask = mask;
224 	kref_init(&fence->kref);
225 	fence->destroy = destroy;
226 	init_waitqueue_head(&fence->queue);
227 
228 	spin_lock_irqsave(&fman->lock, irq_flags);
229 	if (unlikely(fman->fifo_down)) {
230 		ret = -EBUSY;
231 		goto out_unlock;
232 	}
233 	list_add_tail(&fence->head, &fman->fence_list);
234 	num_fences = ++fman->num_fence_objects;
235 
236 out_unlock:
237 	spin_unlock_irqrestore(&fman->lock, irq_flags);
238 	return ret;
239 
240 }
241 
242 struct vmw_fence_obj *vmw_fence_obj_reference(struct vmw_fence_obj *fence)
243 {
244 	if (unlikely(fence == NULL))
245 		return NULL;
246 
247 	kref_get(&fence->kref);
248 	return fence;
249 }
250 
251 /**
252  * vmw_fence_obj_unreference
253  *
254  * Note that this function may not be entered with disabled irqs since
255  * it may re-enable them in the destroy function.
256  *
257  */
258 void vmw_fence_obj_unreference(struct vmw_fence_obj **fence_p)
259 {
260 	struct vmw_fence_obj *fence = *fence_p;
261 	struct vmw_fence_manager *fman;
262 
263 	if (unlikely(fence == NULL))
264 		return;
265 
266 	fman = fence->fman;
267 	*fence_p = NULL;
268 	spin_lock_irq(&fman->lock);
269 	BUG_ON(atomic_read(&fence->kref.refcount) == 0);
270 	kref_put(&fence->kref, vmw_fence_obj_destroy_locked);
271 	spin_unlock_irq(&fman->lock);
272 }
273 
274 void vmw_fences_perform_actions(struct vmw_fence_manager *fman,
275 				struct list_head *list)
276 {
277 	struct vmw_fence_action *action, *next_action;
278 
279 	list_for_each_entry_safe(action, next_action, list, head) {
280 		list_del_init(&action->head);
281 		fman->pending_actions[action->type]--;
282 		if (action->seq_passed != NULL)
283 			action->seq_passed(action);
284 
285 		/*
286 		 * Add the cleanup action to the cleanup list so that
287 		 * it will be performed by a worker task.
288 		 */
289 
290 		list_add_tail(&action->head, &fman->cleanup_list);
291 	}
292 }
293 
294 /**
295  * vmw_fence_goal_new_locked - Figure out a new device fence goal
296  * seqno if needed.
297  *
298  * @fman: Pointer to a fence manager.
299  * @passed_seqno: The seqno the device currently signals as passed.
300  *
301  * This function should be called with the fence manager lock held.
302  * It is typically called when we have a new passed_seqno, and
303  * we might need to update the fence goal. It checks to see whether
304  * the current fence goal has already passed, and, in that case,
305  * scans through all unsignaled fences to get the next fence object with an
306  * action attached, and sets the seqno of that fence as a new fence goal.
307  *
308  * returns true if the device goal seqno was updated. False otherwise.
309  */
310 static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
311 				      u32 passed_seqno)
312 {
313 	u32 goal_seqno;
314 	__le32 __iomem *fifo_mem;
315 	struct vmw_fence_obj *fence;
316 
317 	if (likely(!fman->seqno_valid))
318 		return false;
319 
320 	fifo_mem = fman->dev_priv->mmio_virt;
321 	goal_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE_GOAL);
322 	if (likely(passed_seqno - goal_seqno >= VMW_FENCE_WRAP))
323 		return false;
324 
325 	fman->seqno_valid = false;
326 	list_for_each_entry(fence, &fman->fence_list, head) {
327 		if (!list_empty(&fence->seq_passed_actions)) {
328 			fman->seqno_valid = true;
329 			iowrite32(fence->seqno,
330 				  fifo_mem + SVGA_FIFO_FENCE_GOAL);
331 			break;
332 		}
333 	}
334 
335 	return true;
336 }
337 
338 
339 /**
340  * vmw_fence_goal_check_locked - Replace the device fence goal seqno if
341  * needed.
342  *
343  * @fence: Pointer to a struct vmw_fence_obj the seqno of which should be
344  * considered as a device fence goal.
345  *
346  * This function should be called with the fence manager lock held.
347  * It is typically called when an action has been attached to a fence to
348  * check whether the seqno of that fence should be used for a fence
349  * goal interrupt. This is typically needed if the current fence goal is
350  * invalid, or has a higher seqno than that of the current fence object.
351  *
352  * returns true if the device goal seqno was updated. False otherwise.
353  */
354 static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence)
355 {
356 	u32 goal_seqno;
357 	__le32 __iomem *fifo_mem;
358 
359 	if (fence->signaled & DRM_VMW_FENCE_FLAG_EXEC)
360 		return false;
361 
362 	fifo_mem = fence->fman->dev_priv->mmio_virt;
363 	goal_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE_GOAL);
364 	if (likely(fence->fman->seqno_valid &&
365 		   goal_seqno - fence->seqno < VMW_FENCE_WRAP))
366 		return false;
367 
368 	iowrite32(fence->seqno, fifo_mem + SVGA_FIFO_FENCE_GOAL);
369 	fence->fman->seqno_valid = true;
370 
371 	return true;
372 }
373 
374 void vmw_fences_update(struct vmw_fence_manager *fman)
375 {
376 	unsigned long flags;
377 	struct vmw_fence_obj *fence, *next_fence;
378 	struct list_head action_list;
379 	bool needs_rerun;
380 	uint32_t seqno, new_seqno;
381 	__le32 __iomem *fifo_mem = fman->dev_priv->mmio_virt;
382 
383 	seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
384 rerun:
385 	spin_lock_irqsave(&fman->lock, flags);
386 	list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) {
387 		if (seqno - fence->seqno < VMW_FENCE_WRAP) {
388 			list_del_init(&fence->head);
389 			fence->signaled |= DRM_VMW_FENCE_FLAG_EXEC;
390 			INIT_LIST_HEAD(&action_list);
391 			list_splice_init(&fence->seq_passed_actions,
392 					 &action_list);
393 			vmw_fences_perform_actions(fman, &action_list);
394 			wake_up_all(&fence->queue);
395 		} else
396 			break;
397 	}
398 
399 	needs_rerun = vmw_fence_goal_new_locked(fman, seqno);
400 
401 	if (!list_empty(&fman->cleanup_list))
402 		(void) schedule_work(&fman->work);
403 	spin_unlock_irqrestore(&fman->lock, flags);
404 
405 	/*
406 	 * Rerun if the fence goal seqno was updated, and the
407 	 * hardware might have raced with that update, so that
408 	 * we missed a fence_goal irq.
409 	 */
410 
411 	if (unlikely(needs_rerun)) {
412 		new_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
413 		if (new_seqno != seqno) {
414 			seqno = new_seqno;
415 			goto rerun;
416 		}
417 	}
418 }
419 
420 bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence,
421 			    uint32_t flags)
422 {
423 	struct vmw_fence_manager *fman = fence->fman;
424 	unsigned long irq_flags;
425 	uint32_t signaled;
426 
427 	spin_lock_irqsave(&fman->lock, irq_flags);
428 	signaled = fence->signaled;
429 	spin_unlock_irqrestore(&fman->lock, irq_flags);
430 
431 	flags &= fence->signal_mask;
432 	if ((signaled & flags) == flags)
433 		return 1;
434 
435 	if ((signaled & DRM_VMW_FENCE_FLAG_EXEC) == 0)
436 		vmw_fences_update(fman);
437 
438 	spin_lock_irqsave(&fman->lock, irq_flags);
439 	signaled = fence->signaled;
440 	spin_unlock_irqrestore(&fman->lock, irq_flags);
441 
442 	return ((signaled & flags) == flags);
443 }
444 
445 int vmw_fence_obj_wait(struct vmw_fence_obj *fence,
446 		       uint32_t flags, bool lazy,
447 		       bool interruptible, unsigned long timeout)
448 {
449 	struct vmw_private *dev_priv = fence->fman->dev_priv;
450 	long ret;
451 
452 	if (likely(vmw_fence_obj_signaled(fence, flags)))
453 		return 0;
454 
455 	vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
456 	vmw_seqno_waiter_add(dev_priv);
457 
458 	if (interruptible)
459 		ret = wait_event_interruptible_timeout
460 			(fence->queue,
461 			 vmw_fence_obj_signaled(fence, flags),
462 			 timeout);
463 	else
464 		ret = wait_event_timeout
465 			(fence->queue,
466 			 vmw_fence_obj_signaled(fence, flags),
467 			 timeout);
468 
469 	vmw_seqno_waiter_remove(dev_priv);
470 
471 	if (unlikely(ret == 0))
472 		ret = -EBUSY;
473 	else if (likely(ret > 0))
474 		ret = 0;
475 
476 	return ret;
477 }
478 
479 void vmw_fence_obj_flush(struct vmw_fence_obj *fence)
480 {
481 	struct vmw_private *dev_priv = fence->fman->dev_priv;
482 
483 	vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
484 }
485 
486 static void vmw_fence_destroy(struct vmw_fence_obj *fence)
487 {
488 	struct vmw_fence_manager *fman = fence->fman;
489 
490 	kfree(fence);
491 	/*
492 	 * Free kernel space accounting.
493 	 */
494 	ttm_mem_global_free(vmw_mem_glob(fman->dev_priv),
495 			    fman->fence_size);
496 }
497 
498 int vmw_fence_create(struct vmw_fence_manager *fman,
499 		     uint32_t seqno,
500 		     uint32_t mask,
501 		     struct vmw_fence_obj **p_fence)
502 {
503 	struct ttm_mem_global *mem_glob = vmw_mem_glob(fman->dev_priv);
504 	struct vmw_fence_obj *fence;
505 	int ret;
506 
507 	ret = ttm_mem_global_alloc(mem_glob, fman->fence_size,
508 				   false, false);
509 	if (unlikely(ret != 0))
510 		return ret;
511 
512 	fence = kzalloc(sizeof(*fence), GFP_KERNEL);
513 	if (unlikely(fence == NULL)) {
514 		ret = -ENOMEM;
515 		goto out_no_object;
516 	}
517 
518 	ret = vmw_fence_obj_init(fman, fence, seqno, mask,
519 				 vmw_fence_destroy);
520 	if (unlikely(ret != 0))
521 		goto out_err_init;
522 
523 	*p_fence = fence;
524 	return 0;
525 
526 out_err_init:
527 	kfree(fence);
528 out_no_object:
529 	ttm_mem_global_free(mem_glob, fman->fence_size);
530 	return ret;
531 }
532 
533 
534 static void vmw_user_fence_destroy(struct vmw_fence_obj *fence)
535 {
536 	struct vmw_user_fence *ufence =
537 		container_of(fence, struct vmw_user_fence, fence);
538 	struct vmw_fence_manager *fman = fence->fman;
539 
540 	ttm_base_object_kfree(ufence, base);
541 	/*
542 	 * Free kernel space accounting.
543 	 */
544 	ttm_mem_global_free(vmw_mem_glob(fman->dev_priv),
545 			    fman->user_fence_size);
546 }
547 
548 static void vmw_user_fence_base_release(struct ttm_base_object **p_base)
549 {
550 	struct ttm_base_object *base = *p_base;
551 	struct vmw_user_fence *ufence =
552 		container_of(base, struct vmw_user_fence, base);
553 	struct vmw_fence_obj *fence = &ufence->fence;
554 
555 	*p_base = NULL;
556 	vmw_fence_obj_unreference(&fence);
557 }
558 
559 int vmw_user_fence_create(struct drm_file *file_priv,
560 			  struct vmw_fence_manager *fman,
561 			  uint32_t seqno,
562 			  uint32_t mask,
563 			  struct vmw_fence_obj **p_fence,
564 			  uint32_t *p_handle)
565 {
566 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
567 	struct vmw_user_fence *ufence;
568 	struct vmw_fence_obj *tmp;
569 	struct ttm_mem_global *mem_glob = vmw_mem_glob(fman->dev_priv);
570 	int ret;
571 
572 	/*
573 	 * Kernel memory space accounting, since this object may
574 	 * be created by a user-space request.
575 	 */
576 
577 	ret = ttm_mem_global_alloc(mem_glob, fman->user_fence_size,
578 				   false, false);
579 	if (unlikely(ret != 0))
580 		return ret;
581 
582 	ufence = kzalloc(sizeof(*ufence), GFP_KERNEL);
583 	if (unlikely(ufence == NULL)) {
584 		ret = -ENOMEM;
585 		goto out_no_object;
586 	}
587 
588 	ret = vmw_fence_obj_init(fman, &ufence->fence, seqno,
589 				 mask, vmw_user_fence_destroy);
590 	if (unlikely(ret != 0)) {
591 		kfree(ufence);
592 		goto out_no_object;
593 	}
594 
595 	/*
596 	 * The base object holds a reference which is freed in
597 	 * vmw_user_fence_base_release.
598 	 */
599 	tmp = vmw_fence_obj_reference(&ufence->fence);
600 	ret = ttm_base_object_init(tfile, &ufence->base, false,
601 				   VMW_RES_FENCE,
602 				   &vmw_user_fence_base_release, NULL);
603 
604 
605 	if (unlikely(ret != 0)) {
606 		/*
607 		 * Free the base object's reference
608 		 */
609 		vmw_fence_obj_unreference(&tmp);
610 		goto out_err;
611 	}
612 
613 	*p_fence = &ufence->fence;
614 	*p_handle = ufence->base.hash.key;
615 
616 	return 0;
617 out_err:
618 	tmp = &ufence->fence;
619 	vmw_fence_obj_unreference(&tmp);
620 out_no_object:
621 	ttm_mem_global_free(mem_glob, fman->user_fence_size);
622 	return ret;
623 }
624 
625 
626 /**
627  * vmw_fence_fifo_down - signal all unsignaled fence objects.
628  */
629 
630 void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
631 {
632 	unsigned long irq_flags;
633 	struct list_head action_list;
634 	int ret;
635 
636 	/*
637 	 * The list may be altered while we traverse it, so always
638 	 * restart when we've released the fman->lock.
639 	 */
640 
641 	spin_lock_irqsave(&fman->lock, irq_flags);
642 	fman->fifo_down = true;
643 	while (!list_empty(&fman->fence_list)) {
644 		struct vmw_fence_obj *fence =
645 			list_entry(fman->fence_list.prev, struct vmw_fence_obj,
646 				   head);
647 		kref_get(&fence->kref);
648 		spin_unlock_irq(&fman->lock);
649 
650 		ret = vmw_fence_obj_wait(fence, fence->signal_mask,
651 					 false, false,
652 					 VMW_FENCE_WAIT_TIMEOUT);
653 
654 		if (unlikely(ret != 0)) {
655 			list_del_init(&fence->head);
656 			fence->signaled |= DRM_VMW_FENCE_FLAG_EXEC;
657 			INIT_LIST_HEAD(&action_list);
658 			list_splice_init(&fence->seq_passed_actions,
659 					 &action_list);
660 			vmw_fences_perform_actions(fman, &action_list);
661 			wake_up_all(&fence->queue);
662 		}
663 
664 		spin_lock_irq(&fman->lock);
665 
666 		BUG_ON(!list_empty(&fence->head));
667 		kref_put(&fence->kref, vmw_fence_obj_destroy_locked);
668 	}
669 	spin_unlock_irqrestore(&fman->lock, irq_flags);
670 }
671 
672 void vmw_fence_fifo_up(struct vmw_fence_manager *fman)
673 {
674 	unsigned long irq_flags;
675 
676 	spin_lock_irqsave(&fman->lock, irq_flags);
677 	fman->fifo_down = false;
678 	spin_unlock_irqrestore(&fman->lock, irq_flags);
679 }
680 
681 
682 int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,
683 			     struct drm_file *file_priv)
684 {
685 	struct drm_vmw_fence_wait_arg *arg =
686 	    (struct drm_vmw_fence_wait_arg *)data;
687 	unsigned long timeout;
688 	struct ttm_base_object *base;
689 	struct vmw_fence_obj *fence;
690 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
691 	int ret;
692 	uint64_t wait_timeout = ((uint64_t)arg->timeout_us * HZ);
693 
694 	/*
695 	 * 64-bit division not present on 32-bit systems, so do an
696 	 * approximation. (Divide by 1000000).
697 	 */
698 
699 	wait_timeout = (wait_timeout >> 20) + (wait_timeout >> 24) -
700 	  (wait_timeout >> 26);
701 
702 	if (!arg->cookie_valid) {
703 		arg->cookie_valid = 1;
704 		arg->kernel_cookie = jiffies + wait_timeout;
705 	}
706 
707 	base = ttm_base_object_lookup(tfile, arg->handle);
708 	if (unlikely(base == NULL)) {
709 		printk(KERN_ERR "Wait invalid fence object handle "
710 		       "0x%08lx.\n",
711 		       (unsigned long)arg->handle);
712 		return -EINVAL;
713 	}
714 
715 	fence = &(container_of(base, struct vmw_user_fence, base)->fence);
716 
717 	timeout = jiffies;
718 	if (time_after_eq(timeout, (unsigned long)arg->kernel_cookie)) {
719 		ret = ((vmw_fence_obj_signaled(fence, arg->flags)) ?
720 		       0 : -EBUSY);
721 		goto out;
722 	}
723 
724 	timeout = (unsigned long)arg->kernel_cookie - timeout;
725 
726 	ret = vmw_fence_obj_wait(fence, arg->flags, arg->lazy, true, timeout);
727 
728 out:
729 	ttm_base_object_unref(&base);
730 
731 	/*
732 	 * Optionally unref the fence object.
733 	 */
734 
735 	if (ret == 0 && (arg->wait_options & DRM_VMW_WAIT_OPTION_UNREF))
736 		return ttm_ref_object_base_unref(tfile, arg->handle,
737 						 TTM_REF_USAGE);
738 	return ret;
739 }
740 
741 int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data,
742 				 struct drm_file *file_priv)
743 {
744 	struct drm_vmw_fence_signaled_arg *arg =
745 		(struct drm_vmw_fence_signaled_arg *) data;
746 	struct ttm_base_object *base;
747 	struct vmw_fence_obj *fence;
748 	struct vmw_fence_manager *fman;
749 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
750 	struct vmw_private *dev_priv = vmw_priv(dev);
751 
752 	base = ttm_base_object_lookup(tfile, arg->handle);
753 	if (unlikely(base == NULL)) {
754 		printk(KERN_ERR "Fence signaled invalid fence object handle "
755 		       "0x%08lx.\n",
756 		       (unsigned long)arg->handle);
757 		return -EINVAL;
758 	}
759 
760 	fence = &(container_of(base, struct vmw_user_fence, base)->fence);
761 	fman = fence->fman;
762 
763 	arg->signaled = vmw_fence_obj_signaled(fence, arg->flags);
764 	spin_lock_irq(&fman->lock);
765 
766 	arg->signaled_flags = fence->signaled;
767 	arg->passed_seqno = dev_priv->last_read_seqno;
768 	spin_unlock_irq(&fman->lock);
769 
770 	ttm_base_object_unref(&base);
771 
772 	return 0;
773 }
774 
775 
776 int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data,
777 			      struct drm_file *file_priv)
778 {
779 	struct drm_vmw_fence_arg *arg =
780 		(struct drm_vmw_fence_arg *) data;
781 
782 	return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
783 					 arg->handle,
784 					 TTM_REF_USAGE);
785 }
786 
787 /**
788  * vmw_event_fence_fpriv_gone - Remove references to struct drm_file objects
789  *
790  * @fman: Pointer to a struct vmw_fence_manager
791  * @event_list: Pointer to linked list of struct vmw_event_fence_action objects
792  * with pointers to a struct drm_file object about to be closed.
793  *
794  * This function removes all pending fence events with references to a
795  * specific struct drm_file object about to be closed. The caller is required
796  * to pass a list of all struct vmw_event_fence_action objects with such
797  * events attached. This function is typically called before the
798  * struct drm_file object's event management is taken down.
799  */
800 void vmw_event_fence_fpriv_gone(struct vmw_fence_manager *fman,
801 				struct list_head *event_list)
802 {
803 	struct vmw_event_fence_action *eaction;
804 	struct drm_pending_event *event;
805 	unsigned long irq_flags;
806 
807 	while (1) {
808 		spin_lock_irqsave(&fman->lock, irq_flags);
809 		if (list_empty(event_list))
810 			goto out_unlock;
811 		eaction = list_first_entry(event_list,
812 					   struct vmw_event_fence_action,
813 					   fpriv_head);
814 		list_del_init(&eaction->fpriv_head);
815 		event = eaction->event;
816 		eaction->event = NULL;
817 		spin_unlock_irqrestore(&fman->lock, irq_flags);
818 		event->destroy(event);
819 	}
820 out_unlock:
821 	spin_unlock_irqrestore(&fman->lock, irq_flags);
822 }
823 
824 
825 /**
826  * vmw_event_fence_action_seq_passed
827  *
828  * @action: The struct vmw_fence_action embedded in a struct
829  * vmw_event_fence_action.
830  *
831  * This function is called when the seqno of the fence where @action is
832  * attached has passed. It queues the event on the submitter's event list.
833  * This function is always called from atomic context, and may be called
834  * from irq context.
835  */
836 static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action)
837 {
838 	struct vmw_event_fence_action *eaction =
839 		container_of(action, struct vmw_event_fence_action, action);
840 	struct drm_device *dev = eaction->dev;
841 	struct drm_pending_event *event = eaction->event;
842 	struct drm_file *file_priv;
843 	unsigned long irq_flags;
844 
845 	if (unlikely(event == NULL))
846 		return;
847 
848 	file_priv = event->file_priv;
849 	spin_lock_irqsave(&dev->event_lock, irq_flags);
850 
851 	if (likely(eaction->tv_sec != NULL)) {
852 		struct timeval tv;
853 
854 		do_gettimeofday(&tv);
855 		*eaction->tv_sec = tv.tv_sec;
856 		*eaction->tv_usec = tv.tv_usec;
857 	}
858 
859 	list_del_init(&eaction->fpriv_head);
860 	list_add_tail(&eaction->event->link, &file_priv->event_list);
861 	eaction->event = NULL;
862 	wake_up_all(&file_priv->event_wait);
863 	spin_unlock_irqrestore(&dev->event_lock, irq_flags);
864 }
865 
866 /**
867  * vmw_event_fence_action_cleanup
868  *
869  * @action: The struct vmw_fence_action embedded in a struct
870  * vmw_event_fence_action.
871  *
872  * This function is the struct vmw_fence_action destructor. It's typically
873  * called from a workqueue.
874  */
875 static void vmw_event_fence_action_cleanup(struct vmw_fence_action *action)
876 {
877 	struct vmw_event_fence_action *eaction =
878 		container_of(action, struct vmw_event_fence_action, action);
879 	struct vmw_fence_manager *fman = eaction->fence->fman;
880 	unsigned long irq_flags;
881 
882 	spin_lock_irqsave(&fman->lock, irq_flags);
883 	list_del(&eaction->fpriv_head);
884 	spin_unlock_irqrestore(&fman->lock, irq_flags);
885 
886 	vmw_fence_obj_unreference(&eaction->fence);
887 	kfree(eaction);
888 }
889 
890 
891 /**
892  * vmw_fence_obj_add_action - Add an action to a fence object.
893  *
894  * @fence - The fence object.
895  * @action - The action to add.
896  *
897  * Note that the action callbacks may be executed before this function
898  * returns.
899  */
900 void vmw_fence_obj_add_action(struct vmw_fence_obj *fence,
901 			      struct vmw_fence_action *action)
902 {
903 	struct vmw_fence_manager *fman = fence->fman;
904 	unsigned long irq_flags;
905 	bool run_update = false;
906 
907 	mutex_lock(&fman->goal_irq_mutex);
908 	spin_lock_irqsave(&fman->lock, irq_flags);
909 
910 	fman->pending_actions[action->type]++;
911 	if (fence->signaled & DRM_VMW_FENCE_FLAG_EXEC) {
912 		struct list_head action_list;
913 
914 		INIT_LIST_HEAD(&action_list);
915 		list_add_tail(&action->head, &action_list);
916 		vmw_fences_perform_actions(fman, &action_list);
917 	} else {
918 		list_add_tail(&action->head, &fence->seq_passed_actions);
919 
920 		/*
921 		 * This function may set fman::seqno_valid, so it must
922 		 * be run with the goal_irq_mutex held.
923 		 */
924 		run_update = vmw_fence_goal_check_locked(fence);
925 	}
926 
927 	spin_unlock_irqrestore(&fman->lock, irq_flags);
928 
929 	if (run_update) {
930 		if (!fman->goal_irq_on) {
931 			fman->goal_irq_on = true;
932 			vmw_goal_waiter_add(fman->dev_priv);
933 		}
934 		vmw_fences_update(fman);
935 	}
936 	mutex_unlock(&fman->goal_irq_mutex);
937 
938 }
939 
940 /**
941  * vmw_event_fence_action_create - Post an event for sending when a fence
942  * object seqno has passed.
943  *
944  * @file_priv: The file connection on which the event should be posted.
945  * @fence: The fence object on which to post the event.
946  * @event: Event to be posted. This event should've been alloced
947  * using k[mz]alloc, and should've been completely initialized.
948  * @interruptible: Interruptible waits if possible.
949  *
950  * As a side effect, the object pointed to by @event may have been
951  * freed when this function returns. If this function returns with
952  * an error code, the caller needs to free that object.
953  */
954 
955 int vmw_event_fence_action_queue(struct drm_file *file_priv,
956 				 struct vmw_fence_obj *fence,
957 				 struct drm_pending_event *event,
958 				 uint32_t *tv_sec,
959 				 uint32_t *tv_usec,
960 				 bool interruptible)
961 {
962 	struct vmw_event_fence_action *eaction;
963 	struct vmw_fence_manager *fman = fence->fman;
964 	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
965 	unsigned long irq_flags;
966 
967 	eaction = kzalloc(sizeof(*eaction), GFP_KERNEL);
968 	if (unlikely(eaction == NULL))
969 		return -ENOMEM;
970 
971 	eaction->event = event;
972 
973 	eaction->action.seq_passed = vmw_event_fence_action_seq_passed;
974 	eaction->action.cleanup = vmw_event_fence_action_cleanup;
975 	eaction->action.type = VMW_ACTION_EVENT;
976 
977 	eaction->fence = vmw_fence_obj_reference(fence);
978 	eaction->dev = fman->dev_priv->dev;
979 	eaction->tv_sec = tv_sec;
980 	eaction->tv_usec = tv_usec;
981 
982 	spin_lock_irqsave(&fman->lock, irq_flags);
983 	list_add_tail(&eaction->fpriv_head, &vmw_fp->fence_events);
984 	spin_unlock_irqrestore(&fman->lock, irq_flags);
985 
986 	vmw_fence_obj_add_action(fence, &eaction->action);
987 
988 	return 0;
989 }
990 
991 struct vmw_event_fence_pending {
992 	struct drm_pending_event base;
993 	struct drm_vmw_event_fence event;
994 };
995 
996 int vmw_event_fence_action_create(struct drm_file *file_priv,
997 				  struct vmw_fence_obj *fence,
998 				  uint32_t flags,
999 				  uint64_t user_data,
1000 				  bool interruptible)
1001 {
1002 	struct vmw_event_fence_pending *event;
1003 	struct drm_device *dev = fence->fman->dev_priv->dev;
1004 	unsigned long irq_flags;
1005 	int ret;
1006 
1007 	spin_lock_irqsave(&dev->event_lock, irq_flags);
1008 
1009 	ret = (file_priv->event_space < sizeof(event->event)) ? -EBUSY : 0;
1010 	if (likely(ret == 0))
1011 		file_priv->event_space -= sizeof(event->event);
1012 
1013 	spin_unlock_irqrestore(&dev->event_lock, irq_flags);
1014 
1015 	if (unlikely(ret != 0)) {
1016 		DRM_ERROR("Failed to allocate event space for this file.\n");
1017 		goto out_no_space;
1018 	}
1019 
1020 
1021 	event = kzalloc(sizeof(*event), GFP_KERNEL);
1022 	if (unlikely(event == NULL)) {
1023 		DRM_ERROR("Failed to allocate an event.\n");
1024 		ret = -ENOMEM;
1025 		goto out_no_event;
1026 	}
1027 
1028 	event->event.base.type = DRM_VMW_EVENT_FENCE_SIGNALED;
1029 	event->event.base.length = sizeof(*event);
1030 	event->event.user_data = user_data;
1031 
1032 	event->base.event = &event->event.base;
1033 	event->base.file_priv = file_priv;
1034 	event->base.destroy = (void (*) (struct drm_pending_event *)) kfree;
1035 
1036 
1037 	if (flags & DRM_VMW_FE_FLAG_REQ_TIME)
1038 		ret = vmw_event_fence_action_queue(file_priv, fence,
1039 						   &event->base,
1040 						   &event->event.tv_sec,
1041 						   &event->event.tv_usec,
1042 						   interruptible);
1043 	else
1044 		ret = vmw_event_fence_action_queue(file_priv, fence,
1045 						   &event->base,
1046 						   NULL,
1047 						   NULL,
1048 						   interruptible);
1049 	if (ret != 0)
1050 		goto out_no_queue;
1051 
1052 out_no_queue:
1053 	event->base.destroy(&event->base);
1054 out_no_event:
1055 	spin_lock_irqsave(&dev->event_lock, irq_flags);
1056 	file_priv->event_space += sizeof(*event);
1057 	spin_unlock_irqrestore(&dev->event_lock, irq_flags);
1058 out_no_space:
1059 	return ret;
1060 }
1061 
1062 int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
1063 			  struct drm_file *file_priv)
1064 {
1065 	struct vmw_private *dev_priv = vmw_priv(dev);
1066 	struct drm_vmw_fence_event_arg *arg =
1067 		(struct drm_vmw_fence_event_arg *) data;
1068 	struct vmw_fence_obj *fence = NULL;
1069 	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1070 	struct drm_vmw_fence_rep __user *user_fence_rep =
1071 		(struct drm_vmw_fence_rep __user *)(unsigned long)
1072 		arg->fence_rep;
1073 	uint32_t handle;
1074 	int ret;
1075 
1076 	/*
1077 	 * Look up an existing fence object,
1078 	 * and if user-space wants a new reference,
1079 	 * add one.
1080 	 */
1081 	if (arg->handle) {
1082 		struct ttm_base_object *base =
1083 			ttm_base_object_lookup(vmw_fp->tfile, arg->handle);
1084 
1085 		if (unlikely(base == NULL)) {
1086 			DRM_ERROR("Fence event invalid fence object handle "
1087 				  "0x%08lx.\n",
1088 				  (unsigned long)arg->handle);
1089 			return -EINVAL;
1090 		}
1091 		fence = &(container_of(base, struct vmw_user_fence,
1092 				       base)->fence);
1093 		(void) vmw_fence_obj_reference(fence);
1094 
1095 		if (user_fence_rep != NULL) {
1096 			bool existed;
1097 
1098 			ret = ttm_ref_object_add(vmw_fp->tfile, base,
1099 						 TTM_REF_USAGE, &existed);
1100 			if (unlikely(ret != 0)) {
1101 				DRM_ERROR("Failed to reference a fence "
1102 					  "object.\n");
1103 				goto out_no_ref_obj;
1104 			}
1105 			handle = base->hash.key;
1106 		}
1107 		ttm_base_object_unref(&base);
1108 	}
1109 
1110 	/*
1111 	 * Create a new fence object.
1112 	 */
1113 	if (!fence) {
1114 		ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
1115 						 &fence,
1116 						 (user_fence_rep) ?
1117 						 &handle : NULL);
1118 		if (unlikely(ret != 0)) {
1119 			DRM_ERROR("Fence event failed to create fence.\n");
1120 			return ret;
1121 		}
1122 	}
1123 
1124 	BUG_ON(fence == NULL);
1125 
1126 	if (arg->flags & DRM_VMW_FE_FLAG_REQ_TIME)
1127 		ret = vmw_event_fence_action_create(file_priv, fence,
1128 						    arg->flags,
1129 						    arg->user_data,
1130 						    true);
1131 	else
1132 		ret = vmw_event_fence_action_create(file_priv, fence,
1133 						    arg->flags,
1134 						    arg->user_data,
1135 						    true);
1136 
1137 	if (unlikely(ret != 0)) {
1138 		if (ret != -ERESTARTSYS)
1139 			DRM_ERROR("Failed to attach event to fence.\n");
1140 		goto out_no_create;
1141 	}
1142 
1143 	vmw_execbuf_copy_fence_user(dev_priv, vmw_fp, 0, user_fence_rep, fence,
1144 				    handle);
1145 	vmw_fence_obj_unreference(&fence);
1146 	return 0;
1147 out_no_create:
1148 	if (user_fence_rep != NULL)
1149 		ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
1150 					  handle, TTM_REF_USAGE);
1151 out_no_ref_obj:
1152 	vmw_fence_obj_unreference(&fence);
1153 	return ret;
1154 }
1155