1dff96888SDirk Hohndel (VMware) // SPDX-License-Identifier: GPL-2.0 OR MIT
2ae2a1040SThomas Hellstrom /**************************************************************************
3ae2a1040SThomas Hellstrom  *
409881d29SZack Rusin  * Copyright 2011-2023 VMware, Inc., Palo Alto, CA., USA
5ae2a1040SThomas Hellstrom  *
6ae2a1040SThomas Hellstrom  * Permission is hereby granted, free of charge, to any person obtaining a
7ae2a1040SThomas Hellstrom  * copy of this software and associated documentation files (the
8ae2a1040SThomas Hellstrom  * "Software"), to deal in the Software without restriction, including
9ae2a1040SThomas Hellstrom  * without limitation the rights to use, copy, modify, merge, publish,
10ae2a1040SThomas Hellstrom  * distribute, sub license, and/or sell copies of the Software, and to
11ae2a1040SThomas Hellstrom  * permit persons to whom the Software is furnished to do so, subject to
12ae2a1040SThomas Hellstrom  * the following conditions:
13ae2a1040SThomas Hellstrom  *
14ae2a1040SThomas Hellstrom  * The above copyright notice and this permission notice (including the
15ae2a1040SThomas Hellstrom  * next paragraph) shall be included in all copies or substantial portions
16ae2a1040SThomas Hellstrom  * of the Software.
17ae2a1040SThomas Hellstrom  *
18ae2a1040SThomas Hellstrom  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19ae2a1040SThomas Hellstrom  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20ae2a1040SThomas Hellstrom  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21ae2a1040SThomas Hellstrom  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22ae2a1040SThomas Hellstrom  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23ae2a1040SThomas Hellstrom  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24ae2a1040SThomas Hellstrom  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25ae2a1040SThomas Hellstrom  *
26ae2a1040SThomas Hellstrom  **************************************************************************/
27ae2a1040SThomas Hellstrom 
286ae8748bSSam Ravnborg #include <linux/sched/signal.h>
296ae8748bSSam Ravnborg 
30ae2a1040SThomas Hellstrom #include "vmwgfx_drv.h"
31ae2a1040SThomas Hellstrom 
32ae2a1040SThomas Hellstrom #define VMW_FENCE_WRAP (1 << 31)
33ae2a1040SThomas Hellstrom 
34ae2a1040SThomas Hellstrom struct vmw_fence_manager {
35ae2a1040SThomas Hellstrom 	int num_fence_objects;
36ae2a1040SThomas Hellstrom 	struct vmw_private *dev_priv;
37ae2a1040SThomas Hellstrom 	spinlock_t lock;
38ae2a1040SThomas Hellstrom 	struct list_head fence_list;
39496eb6fdSThomas Hellstrom 	struct work_struct work;
40ae2a1040SThomas Hellstrom 	bool fifo_down;
41ae2a1040SThomas Hellstrom 	struct list_head cleanup_list;
4257c5ee79SThomas Hellstrom 	uint32_t pending_actions[VMW_ACTION_MAX];
4357c5ee79SThomas Hellstrom 	struct mutex goal_irq_mutex;
4457c5ee79SThomas Hellstrom 	bool goal_irq_on; /* Protected by @goal_irq_mutex */
4557c5ee79SThomas Hellstrom 	bool seqno_valid; /* Protected by @lock, and may not be set to true
4657c5ee79SThomas Hellstrom 			     without the @goal_irq_mutex held. */
4776bf0db5SChristian König 	u64 ctx;
48ae2a1040SThomas Hellstrom };
49ae2a1040SThomas Hellstrom 
50ae2a1040SThomas Hellstrom struct vmw_user_fence {
51ae2a1040SThomas Hellstrom 	struct ttm_base_object base;
52ae2a1040SThomas Hellstrom 	struct vmw_fence_obj fence;
53ae2a1040SThomas Hellstrom };
54ae2a1040SThomas Hellstrom 
55ae2a1040SThomas Hellstrom /**
5657c5ee79SThomas Hellstrom  * struct vmw_event_fence_action - fence action that delivers a drm event.
57ae2a1040SThomas Hellstrom  *
5857c5ee79SThomas Hellstrom  * @action: A struct vmw_fence_action to hook up to a fence.
59c6771b63SLee Jones  * @event: A pointer to the pending event.
6057c5ee79SThomas Hellstrom  * @fence: A referenced pointer to the fence to keep it alive while @action
6157c5ee79SThomas Hellstrom  * hangs on it.
6257c5ee79SThomas Hellstrom  * @dev: Pointer to a struct drm_device so we can access the event stuff.
6357c5ee79SThomas Hellstrom  * @tv_sec: If non-null, the variable pointed to will be assigned
6457c5ee79SThomas Hellstrom  * current time tv_sec val when the fence signals.
6557c5ee79SThomas Hellstrom  * @tv_usec: Must be set if @tv_sec is set, and the variable pointed to will
6657c5ee79SThomas Hellstrom  * be assigned the current time tv_usec val when the fence signals.
6757c5ee79SThomas Hellstrom  */
6857c5ee79SThomas Hellstrom struct vmw_event_fence_action {
6957c5ee79SThomas Hellstrom 	struct vmw_fence_action action;
708b7de6aaSJakob Bornecrantz 
718b7de6aaSJakob Bornecrantz 	struct drm_pending_event *event;
7257c5ee79SThomas Hellstrom 	struct vmw_fence_obj *fence;
7357c5ee79SThomas Hellstrom 	struct drm_device *dev;
748b7de6aaSJakob Bornecrantz 
7557c5ee79SThomas Hellstrom 	uint32_t *tv_sec;
7657c5ee79SThomas Hellstrom 	uint32_t *tv_usec;
7757c5ee79SThomas Hellstrom };
7857c5ee79SThomas Hellstrom 
792298e804SMaarten Lankhorst static struct vmw_fence_manager *
fman_from_fence(struct vmw_fence_obj * fence)802298e804SMaarten Lankhorst fman_from_fence(struct vmw_fence_obj *fence)
812298e804SMaarten Lankhorst {
822298e804SMaarten Lankhorst 	return container_of(fence->base.lock, struct vmw_fence_manager, lock);
832298e804SMaarten Lankhorst }
842298e804SMaarten Lankhorst 
vmw_fence_goal_read(struct vmw_private * vmw)85c593197bSZack Rusin static u32 vmw_fence_goal_read(struct vmw_private *vmw)
86c593197bSZack Rusin {
87c593197bSZack Rusin 	if ((vmw->capabilities2 & SVGA_CAP2_EXTRA_REGS) != 0)
88c593197bSZack Rusin 		return vmw_read(vmw, SVGA_REG_FENCE_GOAL);
89c593197bSZack Rusin 	else
90c593197bSZack Rusin 		return vmw_fifo_mem_read(vmw, SVGA_FIFO_FENCE_GOAL);
91c593197bSZack Rusin }
92c593197bSZack Rusin 
vmw_fence_goal_write(struct vmw_private * vmw,u32 value)93c593197bSZack Rusin static void vmw_fence_goal_write(struct vmw_private *vmw, u32 value)
94c593197bSZack Rusin {
95c593197bSZack Rusin 	if ((vmw->capabilities2 & SVGA_CAP2_EXTRA_REGS) != 0)
96c593197bSZack Rusin 		vmw_write(vmw, SVGA_REG_FENCE_GOAL, value);
97c593197bSZack Rusin 	else
98c593197bSZack Rusin 		vmw_fifo_mem_write(vmw, SVGA_FIFO_FENCE_GOAL, value);
99c593197bSZack Rusin }
100c593197bSZack Rusin 
101c6771b63SLee Jones /*
10257c5ee79SThomas Hellstrom  * Note on fencing subsystem usage of irqs:
10357c5ee79SThomas Hellstrom  * Typically the vmw_fences_update function is called
10457c5ee79SThomas Hellstrom  *
10557c5ee79SThomas Hellstrom  * a) When a new fence seqno has been submitted by the fifo code.
10657c5ee79SThomas Hellstrom  * b) On-demand when we have waiters. Sleeping waiters will switch on the
10757c5ee79SThomas Hellstrom  * ANY_FENCE irq and call vmw_fences_update function each time an ANY_FENCE
10857c5ee79SThomas Hellstrom  * irq is received. When the last fence waiter is gone, that IRQ is masked
10957c5ee79SThomas Hellstrom  * away.
11057c5ee79SThomas Hellstrom  *
11157c5ee79SThomas Hellstrom  * In situations where there are no waiters and we don't submit any new fences,
11257c5ee79SThomas Hellstrom  * fence objects may not be signaled. This is perfectly OK, since there are
11357c5ee79SThomas Hellstrom  * no consumers of the signaled data, but that is NOT ok when there are fence
11457c5ee79SThomas Hellstrom  * actions attached to a fence. The fencing subsystem then makes use of the
11557c5ee79SThomas Hellstrom  * FENCE_GOAL irq and sets the fence goal seqno to that of the next fence
11657c5ee79SThomas Hellstrom  * which has an action attached, and each time vmw_fences_update is called,
11757c5ee79SThomas Hellstrom  * the subsystem makes sure the fence goal seqno is updated.
11857c5ee79SThomas Hellstrom  *
11957c5ee79SThomas Hellstrom  * The fence goal seqno irq is on as long as there are unsignaled fence
12057c5ee79SThomas Hellstrom  * objects with actions attached to them.
121ae2a1040SThomas Hellstrom  */
122ae2a1040SThomas Hellstrom 
vmw_fence_obj_destroy(struct dma_fence * f)123f54d1867SChris Wilson static void vmw_fence_obj_destroy(struct dma_fence *f)
124ae2a1040SThomas Hellstrom {
125ae2a1040SThomas Hellstrom 	struct vmw_fence_obj *fence =
1262298e804SMaarten Lankhorst 		container_of(f, struct vmw_fence_obj, base);
127ae2a1040SThomas Hellstrom 
1282298e804SMaarten Lankhorst 	struct vmw_fence_manager *fman = fman_from_fence(fence);
129ae2a1040SThomas Hellstrom 
130ef369904SThomas Hellstrom 	spin_lock(&fman->lock);
131ae2a1040SThomas Hellstrom 	list_del_init(&fence->head);
1322298e804SMaarten Lankhorst 	--fman->num_fence_objects;
133ef369904SThomas Hellstrom 	spin_unlock(&fman->lock);
134ae2a1040SThomas Hellstrom 	fence->destroy(fence);
135ae2a1040SThomas Hellstrom }
136ae2a1040SThomas Hellstrom 
vmw_fence_get_driver_name(struct dma_fence * f)137f54d1867SChris Wilson static const char *vmw_fence_get_driver_name(struct dma_fence *f)
1382298e804SMaarten Lankhorst {
1392298e804SMaarten Lankhorst 	return "vmwgfx";
1402298e804SMaarten Lankhorst }
1412298e804SMaarten Lankhorst 
vmw_fence_get_timeline_name(struct dma_fence * f)142f54d1867SChris Wilson static const char *vmw_fence_get_timeline_name(struct dma_fence *f)
1432298e804SMaarten Lankhorst {
1442298e804SMaarten Lankhorst 	return "svga";
1452298e804SMaarten Lankhorst }
1462298e804SMaarten Lankhorst 
vmw_fence_enable_signaling(struct dma_fence * f)147f54d1867SChris Wilson static bool vmw_fence_enable_signaling(struct dma_fence *f)
1482298e804SMaarten Lankhorst {
1492298e804SMaarten Lankhorst 	struct vmw_fence_obj *fence =
1502298e804SMaarten Lankhorst 		container_of(f, struct vmw_fence_obj, base);
1512298e804SMaarten Lankhorst 
1522298e804SMaarten Lankhorst 	struct vmw_fence_manager *fman = fman_from_fence(fence);
1532298e804SMaarten Lankhorst 	struct vmw_private *dev_priv = fman->dev_priv;
1542298e804SMaarten Lankhorst 
1552cd80dbdSZack Rusin 	u32 seqno = vmw_fence_read(dev_priv);
1562298e804SMaarten Lankhorst 	if (seqno - fence->base.seqno < VMW_FENCE_WRAP)
1572298e804SMaarten Lankhorst 		return false;
1582298e804SMaarten Lankhorst 
1592298e804SMaarten Lankhorst 	return true;
1602298e804SMaarten Lankhorst }
1612298e804SMaarten Lankhorst 
1622298e804SMaarten Lankhorst struct vmwgfx_wait_cb {
163f54d1867SChris Wilson 	struct dma_fence_cb base;
1642298e804SMaarten Lankhorst 	struct task_struct *task;
1652298e804SMaarten Lankhorst };
1662298e804SMaarten Lankhorst 
1672298e804SMaarten Lankhorst static void
vmwgfx_wait_cb(struct dma_fence * fence,struct dma_fence_cb * cb)168f54d1867SChris Wilson vmwgfx_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
1692298e804SMaarten Lankhorst {
1702298e804SMaarten Lankhorst 	struct vmwgfx_wait_cb *wait =
1712298e804SMaarten Lankhorst 		container_of(cb, struct vmwgfx_wait_cb, base);
1722298e804SMaarten Lankhorst 
1732298e804SMaarten Lankhorst 	wake_up_process(wait->task);
1742298e804SMaarten Lankhorst }
1752298e804SMaarten Lankhorst 
1762298e804SMaarten Lankhorst static void __vmw_fences_update(struct vmw_fence_manager *fman);
1772298e804SMaarten Lankhorst 
vmw_fence_wait(struct dma_fence * f,bool intr,signed long timeout)178f54d1867SChris Wilson static long vmw_fence_wait(struct dma_fence *f, bool intr, signed long timeout)
1792298e804SMaarten Lankhorst {
1802298e804SMaarten Lankhorst 	struct vmw_fence_obj *fence =
1812298e804SMaarten Lankhorst 		container_of(f, struct vmw_fence_obj, base);
1822298e804SMaarten Lankhorst 
1832298e804SMaarten Lankhorst 	struct vmw_fence_manager *fman = fman_from_fence(fence);
1842298e804SMaarten Lankhorst 	struct vmw_private *dev_priv = fman->dev_priv;
1852298e804SMaarten Lankhorst 	struct vmwgfx_wait_cb cb;
1862298e804SMaarten Lankhorst 	long ret = timeout;
1872298e804SMaarten Lankhorst 
1882298e804SMaarten Lankhorst 	if (likely(vmw_fence_obj_signaled(fence)))
1892298e804SMaarten Lankhorst 		return timeout;
1902298e804SMaarten Lankhorst 
1912298e804SMaarten Lankhorst 	vmw_seqno_waiter_add(dev_priv);
1922298e804SMaarten Lankhorst 
19314dba717SThomas Hellstrom 	spin_lock(f->lock);
1942298e804SMaarten Lankhorst 
195f2cb60e9SChris Wilson 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags))
196f2cb60e9SChris Wilson 		goto out;
197f2cb60e9SChris Wilson 
1982298e804SMaarten Lankhorst 	if (intr && signal_pending(current)) {
1992298e804SMaarten Lankhorst 		ret = -ERESTARTSYS;
2002298e804SMaarten Lankhorst 		goto out;
2012298e804SMaarten Lankhorst 	}
2022298e804SMaarten Lankhorst 
2032298e804SMaarten Lankhorst 	cb.base.func = vmwgfx_wait_cb;
2042298e804SMaarten Lankhorst 	cb.task = current;
2052298e804SMaarten Lankhorst 	list_add(&cb.base.node, &f->cb_list);
2062298e804SMaarten Lankhorst 
20714dba717SThomas Hellstrom 	for (;;) {
2082298e804SMaarten Lankhorst 		__vmw_fences_update(fman);
2092298e804SMaarten Lankhorst 
21014dba717SThomas Hellstrom 		/*
21114dba717SThomas Hellstrom 		 * We can use the barrier free __set_current_state() since
21214dba717SThomas Hellstrom 		 * DMA_FENCE_FLAG_SIGNALED_BIT + wakeup is protected by the
21314dba717SThomas Hellstrom 		 * fence spinlock.
21414dba717SThomas Hellstrom 		 */
2152298e804SMaarten Lankhorst 		if (intr)
2162298e804SMaarten Lankhorst 			__set_current_state(TASK_INTERRUPTIBLE);
2172298e804SMaarten Lankhorst 		else
2182298e804SMaarten Lankhorst 			__set_current_state(TASK_UNINTERRUPTIBLE);
21914dba717SThomas Hellstrom 
22014dba717SThomas Hellstrom 		if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags)) {
22114dba717SThomas Hellstrom 			if (ret == 0 && timeout > 0)
22214dba717SThomas Hellstrom 				ret = 1;
22314dba717SThomas Hellstrom 			break;
22414dba717SThomas Hellstrom 		}
22514dba717SThomas Hellstrom 
22614dba717SThomas Hellstrom 		if (intr && signal_pending(current)) {
22714dba717SThomas Hellstrom 			ret = -ERESTARTSYS;
22814dba717SThomas Hellstrom 			break;
22914dba717SThomas Hellstrom 		}
23014dba717SThomas Hellstrom 
23114dba717SThomas Hellstrom 		if (ret == 0)
23214dba717SThomas Hellstrom 			break;
23314dba717SThomas Hellstrom 
23414dba717SThomas Hellstrom 		spin_unlock(f->lock);
2352298e804SMaarten Lankhorst 
2362298e804SMaarten Lankhorst 		ret = schedule_timeout(ret);
2372298e804SMaarten Lankhorst 
23814dba717SThomas Hellstrom 		spin_lock(f->lock);
2392298e804SMaarten Lankhorst 	}
24014dba717SThomas Hellstrom 	__set_current_state(TASK_RUNNING);
2412298e804SMaarten Lankhorst 	if (!list_empty(&cb.base.node))
2422298e804SMaarten Lankhorst 		list_del(&cb.base.node);
2432298e804SMaarten Lankhorst 
2442298e804SMaarten Lankhorst out:
24514dba717SThomas Hellstrom 	spin_unlock(f->lock);
2462298e804SMaarten Lankhorst 
2472298e804SMaarten Lankhorst 	vmw_seqno_waiter_remove(dev_priv);
2482298e804SMaarten Lankhorst 
2492298e804SMaarten Lankhorst 	return ret;
2502298e804SMaarten Lankhorst }
2512298e804SMaarten Lankhorst 
252ef217b1fSArvind Yadav static const struct dma_fence_ops vmw_fence_ops = {
2532298e804SMaarten Lankhorst 	.get_driver_name = vmw_fence_get_driver_name,
2542298e804SMaarten Lankhorst 	.get_timeline_name = vmw_fence_get_timeline_name,
2552298e804SMaarten Lankhorst 	.enable_signaling = vmw_fence_enable_signaling,
2562298e804SMaarten Lankhorst 	.wait = vmw_fence_wait,
2572298e804SMaarten Lankhorst 	.release = vmw_fence_obj_destroy,
2582298e804SMaarten Lankhorst };
2592298e804SMaarten Lankhorst 
260ae2a1040SThomas Hellstrom 
261c6771b63SLee Jones /*
262ae2a1040SThomas Hellstrom  * Execute signal actions on fences recently signaled.
263ae2a1040SThomas Hellstrom  * This is done from a workqueue so we don't have to execute
264ae2a1040SThomas Hellstrom  * signal actions from atomic context.
265ae2a1040SThomas Hellstrom  */
266ae2a1040SThomas Hellstrom 
vmw_fence_work_func(struct work_struct * work)267ae2a1040SThomas Hellstrom static void vmw_fence_work_func(struct work_struct *work)
268ae2a1040SThomas Hellstrom {
269ae2a1040SThomas Hellstrom 	struct vmw_fence_manager *fman =
270ae2a1040SThomas Hellstrom 		container_of(work, struct vmw_fence_manager, work);
271ae2a1040SThomas Hellstrom 	struct list_head list;
272ae2a1040SThomas Hellstrom 	struct vmw_fence_action *action, *next_action;
27357c5ee79SThomas Hellstrom 	bool seqno_valid;
274ae2a1040SThomas Hellstrom 
275ae2a1040SThomas Hellstrom 	do {
276ae2a1040SThomas Hellstrom 		INIT_LIST_HEAD(&list);
27757c5ee79SThomas Hellstrom 		mutex_lock(&fman->goal_irq_mutex);
27857c5ee79SThomas Hellstrom 
279ef369904SThomas Hellstrom 		spin_lock(&fman->lock);
280ae2a1040SThomas Hellstrom 		list_splice_init(&fman->cleanup_list, &list);
28157c5ee79SThomas Hellstrom 		seqno_valid = fman->seqno_valid;
282ef369904SThomas Hellstrom 		spin_unlock(&fman->lock);
283ae2a1040SThomas Hellstrom 
28457c5ee79SThomas Hellstrom 		if (!seqno_valid && fman->goal_irq_on) {
28557c5ee79SThomas Hellstrom 			fman->goal_irq_on = false;
28657c5ee79SThomas Hellstrom 			vmw_goal_waiter_remove(fman->dev_priv);
28757c5ee79SThomas Hellstrom 		}
28857c5ee79SThomas Hellstrom 		mutex_unlock(&fman->goal_irq_mutex);
28957c5ee79SThomas Hellstrom 
290ae2a1040SThomas Hellstrom 		if (list_empty(&list))
291ae2a1040SThomas Hellstrom 			return;
292ae2a1040SThomas Hellstrom 
293ae2a1040SThomas Hellstrom 		/*
294ae2a1040SThomas Hellstrom 		 * At this point, only we should be able to manipulate the
295ae2a1040SThomas Hellstrom 		 * list heads of the actions we have on the private list.
29657c5ee79SThomas Hellstrom 		 * hence fman::lock not held.
297ae2a1040SThomas Hellstrom 		 */
298ae2a1040SThomas Hellstrom 
299ae2a1040SThomas Hellstrom 		list_for_each_entry_safe(action, next_action, &list, head) {
300ae2a1040SThomas Hellstrom 			list_del_init(&action->head);
30157c5ee79SThomas Hellstrom 			if (action->cleanup)
302ae2a1040SThomas Hellstrom 				action->cleanup(action);
303ae2a1040SThomas Hellstrom 		}
304ae2a1040SThomas Hellstrom 	} while (1);
305ae2a1040SThomas Hellstrom }
306ae2a1040SThomas Hellstrom 
vmw_fence_manager_init(struct vmw_private * dev_priv)307ae2a1040SThomas Hellstrom struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
308ae2a1040SThomas Hellstrom {
309ae2a1040SThomas Hellstrom 	struct vmw_fence_manager *fman = kzalloc(sizeof(*fman), GFP_KERNEL);
310ae2a1040SThomas Hellstrom 
3111a4adb05SRavikant B Sharma 	if (unlikely(!fman))
312ae2a1040SThomas Hellstrom 		return NULL;
313ae2a1040SThomas Hellstrom 
314ae2a1040SThomas Hellstrom 	fman->dev_priv = dev_priv;
315ae2a1040SThomas Hellstrom 	spin_lock_init(&fman->lock);
316ae2a1040SThomas Hellstrom 	INIT_LIST_HEAD(&fman->fence_list);
317ae2a1040SThomas Hellstrom 	INIT_LIST_HEAD(&fman->cleanup_list);
318ae2a1040SThomas Hellstrom 	INIT_WORK(&fman->work, &vmw_fence_work_func);
319ae2a1040SThomas Hellstrom 	fman->fifo_down = true;
32057c5ee79SThomas Hellstrom 	mutex_init(&fman->goal_irq_mutex);
321f54d1867SChris Wilson 	fman->ctx = dma_fence_context_alloc(1);
322ae2a1040SThomas Hellstrom 
323ae2a1040SThomas Hellstrom 	return fman;
324ae2a1040SThomas Hellstrom }
325ae2a1040SThomas Hellstrom 
vmw_fence_manager_takedown(struct vmw_fence_manager * fman)326ae2a1040SThomas Hellstrom void vmw_fence_manager_takedown(struct vmw_fence_manager *fman)
327ae2a1040SThomas Hellstrom {
328ae2a1040SThomas Hellstrom 	bool lists_empty;
329ae2a1040SThomas Hellstrom 
330ae2a1040SThomas Hellstrom 	(void) cancel_work_sync(&fman->work);
331ae2a1040SThomas Hellstrom 
332ef369904SThomas Hellstrom 	spin_lock(&fman->lock);
333ae2a1040SThomas Hellstrom 	lists_empty = list_empty(&fman->fence_list) &&
334ae2a1040SThomas Hellstrom 		list_empty(&fman->cleanup_list);
335ef369904SThomas Hellstrom 	spin_unlock(&fman->lock);
336ae2a1040SThomas Hellstrom 
337ae2a1040SThomas Hellstrom 	BUG_ON(!lists_empty);
338ae2a1040SThomas Hellstrom 	kfree(fman);
339ae2a1040SThomas Hellstrom }
340ae2a1040SThomas Hellstrom 
vmw_fence_obj_init(struct vmw_fence_manager * fman,struct vmw_fence_obj * fence,u32 seqno,void (* destroy)(struct vmw_fence_obj * fence))341ae2a1040SThomas Hellstrom static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
342c060a4e1SMaarten Lankhorst 			      struct vmw_fence_obj *fence, u32 seqno,
343ae2a1040SThomas Hellstrom 			      void (*destroy) (struct vmw_fence_obj *fence))
344ae2a1040SThomas Hellstrom {
345ae2a1040SThomas Hellstrom 	int ret = 0;
346ae2a1040SThomas Hellstrom 
347f54d1867SChris Wilson 	dma_fence_init(&fence->base, &vmw_fence_ops, &fman->lock,
3482298e804SMaarten Lankhorst 		       fman->ctx, seqno);
349ae2a1040SThomas Hellstrom 	INIT_LIST_HEAD(&fence->seq_passed_actions);
350ae2a1040SThomas Hellstrom 	fence->destroy = destroy;
351ae2a1040SThomas Hellstrom 
352ef369904SThomas Hellstrom 	spin_lock(&fman->lock);
353ae2a1040SThomas Hellstrom 	if (unlikely(fman->fifo_down)) {
354ae2a1040SThomas Hellstrom 		ret = -EBUSY;
355ae2a1040SThomas Hellstrom 		goto out_unlock;
356ae2a1040SThomas Hellstrom 	}
357ae2a1040SThomas Hellstrom 	list_add_tail(&fence->head, &fman->fence_list);
3582298e804SMaarten Lankhorst 	++fman->num_fence_objects;
359ae2a1040SThomas Hellstrom 
360ae2a1040SThomas Hellstrom out_unlock:
361ef369904SThomas Hellstrom 	spin_unlock(&fman->lock);
362ae2a1040SThomas Hellstrom 	return ret;
363ae2a1040SThomas Hellstrom 
364ae2a1040SThomas Hellstrom }
365ae2a1040SThomas Hellstrom 
vmw_fences_perform_actions(struct vmw_fence_manager * fman,struct list_head * list)36694844cf0SRashika Kheria static void vmw_fences_perform_actions(struct vmw_fence_manager *fman,
367ae2a1040SThomas Hellstrom 				struct list_head *list)
368ae2a1040SThomas Hellstrom {
369ae2a1040SThomas Hellstrom 	struct vmw_fence_action *action, *next_action;
370ae2a1040SThomas Hellstrom 
371ae2a1040SThomas Hellstrom 	list_for_each_entry_safe(action, next_action, list, head) {
372ae2a1040SThomas Hellstrom 		list_del_init(&action->head);
37357c5ee79SThomas Hellstrom 		fman->pending_actions[action->type]--;
374ae2a1040SThomas Hellstrom 		if (action->seq_passed != NULL)
375ae2a1040SThomas Hellstrom 			action->seq_passed(action);
376ae2a1040SThomas Hellstrom 
377ae2a1040SThomas Hellstrom 		/*
378ae2a1040SThomas Hellstrom 		 * Add the cleanup action to the cleanup list so that
379ae2a1040SThomas Hellstrom 		 * it will be performed by a worker task.
380ae2a1040SThomas Hellstrom 		 */
381ae2a1040SThomas Hellstrom 
382ae2a1040SThomas Hellstrom 		list_add_tail(&action->head, &fman->cleanup_list);
383ae2a1040SThomas Hellstrom 	}
384ae2a1040SThomas Hellstrom }
385ae2a1040SThomas Hellstrom 
38657c5ee79SThomas Hellstrom /**
38757c5ee79SThomas Hellstrom  * vmw_fence_goal_new_locked - Figure out a new device fence goal
38857c5ee79SThomas Hellstrom  * seqno if needed.
38957c5ee79SThomas Hellstrom  *
39057c5ee79SThomas Hellstrom  * @fman: Pointer to a fence manager.
39157c5ee79SThomas Hellstrom  * @passed_seqno: The seqno the device currently signals as passed.
39257c5ee79SThomas Hellstrom  *
39357c5ee79SThomas Hellstrom  * This function should be called with the fence manager lock held.
39457c5ee79SThomas Hellstrom  * It is typically called when we have a new passed_seqno, and
39557c5ee79SThomas Hellstrom  * we might need to update the fence goal. It checks to see whether
39657c5ee79SThomas Hellstrom  * the current fence goal has already passed, and, in that case,
39757c5ee79SThomas Hellstrom  * scans through all unsignaled fences to get the next fence object with an
39857c5ee79SThomas Hellstrom  * action attached, and sets the seqno of that fence as a new fence goal.
39957c5ee79SThomas Hellstrom  *
40057c5ee79SThomas Hellstrom  * returns true if the device goal seqno was updated. False otherwise.
40157c5ee79SThomas Hellstrom  */
vmw_fence_goal_new_locked(struct vmw_fence_manager * fman,u32 passed_seqno)40257c5ee79SThomas Hellstrom static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
40357c5ee79SThomas Hellstrom 				      u32 passed_seqno)
40457c5ee79SThomas Hellstrom {
40557c5ee79SThomas Hellstrom 	u32 goal_seqno;
40657c5ee79SThomas Hellstrom 	struct vmw_fence_obj *fence;
40757c5ee79SThomas Hellstrom 
40857c5ee79SThomas Hellstrom 	if (likely(!fman->seqno_valid))
40957c5ee79SThomas Hellstrom 		return false;
41057c5ee79SThomas Hellstrom 
411c593197bSZack Rusin 	goal_seqno = vmw_fence_goal_read(fman->dev_priv);
41257c5ee79SThomas Hellstrom 	if (likely(passed_seqno - goal_seqno >= VMW_FENCE_WRAP))
41357c5ee79SThomas Hellstrom 		return false;
41457c5ee79SThomas Hellstrom 
41557c5ee79SThomas Hellstrom 	fman->seqno_valid = false;
41657c5ee79SThomas Hellstrom 	list_for_each_entry(fence, &fman->fence_list, head) {
41757c5ee79SThomas Hellstrom 		if (!list_empty(&fence->seq_passed_actions)) {
41857c5ee79SThomas Hellstrom 			fman->seqno_valid = true;
419c593197bSZack Rusin 			vmw_fence_goal_write(fman->dev_priv,
420be4f77acSZack Rusin 					     fence->base.seqno);
42157c5ee79SThomas Hellstrom 			break;
42257c5ee79SThomas Hellstrom 		}
42357c5ee79SThomas Hellstrom 	}
42457c5ee79SThomas Hellstrom 
42557c5ee79SThomas Hellstrom 	return true;
42657c5ee79SThomas Hellstrom }
42757c5ee79SThomas Hellstrom 
42857c5ee79SThomas Hellstrom 
42957c5ee79SThomas Hellstrom /**
43057c5ee79SThomas Hellstrom  * vmw_fence_goal_check_locked - Replace the device fence goal seqno if
43157c5ee79SThomas Hellstrom  * needed.
43257c5ee79SThomas Hellstrom  *
43357c5ee79SThomas Hellstrom  * @fence: Pointer to a struct vmw_fence_obj the seqno of which should be
43457c5ee79SThomas Hellstrom  * considered as a device fence goal.
43557c5ee79SThomas Hellstrom  *
43657c5ee79SThomas Hellstrom  * This function should be called with the fence manager lock held.
43757c5ee79SThomas Hellstrom  * It is typically called when an action has been attached to a fence to
43857c5ee79SThomas Hellstrom  * check whether the seqno of that fence should be used for a fence
43957c5ee79SThomas Hellstrom  * goal interrupt. This is typically needed if the current fence goal is
44057c5ee79SThomas Hellstrom  * invalid, or has a higher seqno than that of the current fence object.
44157c5ee79SThomas Hellstrom  *
44257c5ee79SThomas Hellstrom  * returns true if the device goal seqno was updated. False otherwise.
44357c5ee79SThomas Hellstrom  */
vmw_fence_goal_check_locked(struct vmw_fence_obj * fence)44457c5ee79SThomas Hellstrom static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence)
44557c5ee79SThomas Hellstrom {
4462298e804SMaarten Lankhorst 	struct vmw_fence_manager *fman = fman_from_fence(fence);
44757c5ee79SThomas Hellstrom 	u32 goal_seqno;
44857c5ee79SThomas Hellstrom 
449f54d1867SChris Wilson 	if (dma_fence_is_signaled_locked(&fence->base))
45057c5ee79SThomas Hellstrom 		return false;
45157c5ee79SThomas Hellstrom 
452c593197bSZack Rusin 	goal_seqno = vmw_fence_goal_read(fman->dev_priv);
4532298e804SMaarten Lankhorst 	if (likely(fman->seqno_valid &&
4542298e804SMaarten Lankhorst 		   goal_seqno - fence->base.seqno < VMW_FENCE_WRAP))
45557c5ee79SThomas Hellstrom 		return false;
45657c5ee79SThomas Hellstrom 
457c593197bSZack Rusin 	vmw_fence_goal_write(fman->dev_priv, fence->base.seqno);
4582298e804SMaarten Lankhorst 	fman->seqno_valid = true;
45957c5ee79SThomas Hellstrom 
46057c5ee79SThomas Hellstrom 	return true;
46157c5ee79SThomas Hellstrom }
46257c5ee79SThomas Hellstrom 
__vmw_fences_update(struct vmw_fence_manager * fman)4632298e804SMaarten Lankhorst static void __vmw_fences_update(struct vmw_fence_manager *fman)
464ae2a1040SThomas Hellstrom {
465ae2a1040SThomas Hellstrom 	struct vmw_fence_obj *fence, *next_fence;
466ae2a1040SThomas Hellstrom 	struct list_head action_list;
46757c5ee79SThomas Hellstrom 	bool needs_rerun;
46857c5ee79SThomas Hellstrom 	uint32_t seqno, new_seqno;
469ae2a1040SThomas Hellstrom 
4702cd80dbdSZack Rusin 	seqno = vmw_fence_read(fman->dev_priv);
47157c5ee79SThomas Hellstrom rerun:
472ae2a1040SThomas Hellstrom 	list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) {
4732298e804SMaarten Lankhorst 		if (seqno - fence->base.seqno < VMW_FENCE_WRAP) {
474ae2a1040SThomas Hellstrom 			list_del_init(&fence->head);
475f54d1867SChris Wilson 			dma_fence_signal_locked(&fence->base);
476ae2a1040SThomas Hellstrom 			INIT_LIST_HEAD(&action_list);
477ae2a1040SThomas Hellstrom 			list_splice_init(&fence->seq_passed_actions,
478ae2a1040SThomas Hellstrom 					 &action_list);
479ae2a1040SThomas Hellstrom 			vmw_fences_perform_actions(fman, &action_list);
48057c5ee79SThomas Hellstrom 		} else
48157c5ee79SThomas Hellstrom 			break;
482ae2a1040SThomas Hellstrom 	}
483ae2a1040SThomas Hellstrom 
48457c5ee79SThomas Hellstrom 	/*
48557c5ee79SThomas Hellstrom 	 * Rerun if the fence goal seqno was updated, and the
48657c5ee79SThomas Hellstrom 	 * hardware might have raced with that update, so that
48757c5ee79SThomas Hellstrom 	 * we missed a fence_goal irq.
48857c5ee79SThomas Hellstrom 	 */
48957c5ee79SThomas Hellstrom 
4902298e804SMaarten Lankhorst 	needs_rerun = vmw_fence_goal_new_locked(fman, seqno);
49157c5ee79SThomas Hellstrom 	if (unlikely(needs_rerun)) {
4922cd80dbdSZack Rusin 		new_seqno = vmw_fence_read(fman->dev_priv);
49357c5ee79SThomas Hellstrom 		if (new_seqno != seqno) {
49457c5ee79SThomas Hellstrom 			seqno = new_seqno;
49557c5ee79SThomas Hellstrom 			goto rerun;
49657c5ee79SThomas Hellstrom 		}
49757c5ee79SThomas Hellstrom 	}
4982298e804SMaarten Lankhorst 
4992298e804SMaarten Lankhorst 	if (!list_empty(&fman->cleanup_list))
5002298e804SMaarten Lankhorst 		(void) schedule_work(&fman->work);
5012298e804SMaarten Lankhorst }
5022298e804SMaarten Lankhorst 
vmw_fences_update(struct vmw_fence_manager * fman)5032298e804SMaarten Lankhorst void vmw_fences_update(struct vmw_fence_manager *fman)
5042298e804SMaarten Lankhorst {
505ef369904SThomas Hellstrom 	spin_lock(&fman->lock);
5062298e804SMaarten Lankhorst 	__vmw_fences_update(fman);
507ef369904SThomas Hellstrom 	spin_unlock(&fman->lock);
50857c5ee79SThomas Hellstrom }
509ae2a1040SThomas Hellstrom 
vmw_fence_obj_signaled(struct vmw_fence_obj * fence)510c060a4e1SMaarten Lankhorst bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence)
511ae2a1040SThomas Hellstrom {
5122298e804SMaarten Lankhorst 	struct vmw_fence_manager *fman = fman_from_fence(fence);
513ae2a1040SThomas Hellstrom 
514f54d1867SChris Wilson 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
51580542002SJason Yan 		return true;
516ae2a1040SThomas Hellstrom 
51757c5ee79SThomas Hellstrom 	vmw_fences_update(fman);
518ae2a1040SThomas Hellstrom 
519f54d1867SChris Wilson 	return dma_fence_is_signaled(&fence->base);
520ae2a1040SThomas Hellstrom }
521ae2a1040SThomas Hellstrom 
vmw_fence_obj_wait(struct vmw_fence_obj * fence,bool lazy,bool interruptible,unsigned long timeout)522c060a4e1SMaarten Lankhorst int vmw_fence_obj_wait(struct vmw_fence_obj *fence, bool lazy,
523ae2a1040SThomas Hellstrom 		       bool interruptible, unsigned long timeout)
524ae2a1040SThomas Hellstrom {
525f54d1867SChris Wilson 	long ret = dma_fence_wait_timeout(&fence->base, interruptible, timeout);
526ae2a1040SThomas Hellstrom 
5272298e804SMaarten Lankhorst 	if (likely(ret > 0))
528ae2a1040SThomas Hellstrom 		return 0;
5292298e804SMaarten Lankhorst 	else if (ret == 0)
5302298e804SMaarten Lankhorst 		return -EBUSY;
531ae2a1040SThomas Hellstrom 	else
532ae2a1040SThomas Hellstrom 		return ret;
533ae2a1040SThomas Hellstrom }
534ae2a1040SThomas Hellstrom 
vmw_fence_destroy(struct vmw_fence_obj * fence)535ae2a1040SThomas Hellstrom static void vmw_fence_destroy(struct vmw_fence_obj *fence)
536ae2a1040SThomas Hellstrom {
537f54d1867SChris Wilson 	dma_fence_free(&fence->base);
538ae2a1040SThomas Hellstrom }
539ae2a1040SThomas Hellstrom 
vmw_fence_create(struct vmw_fence_manager * fman,uint32_t seqno,struct vmw_fence_obj ** p_fence)540ae2a1040SThomas Hellstrom int vmw_fence_create(struct vmw_fence_manager *fman,
541ae2a1040SThomas Hellstrom 		     uint32_t seqno,
542ae2a1040SThomas Hellstrom 		     struct vmw_fence_obj **p_fence)
543ae2a1040SThomas Hellstrom {
544ae2a1040SThomas Hellstrom 	struct vmw_fence_obj *fence;
545ae2a1040SThomas Hellstrom  	int ret;
546ae2a1040SThomas Hellstrom 
547ae2a1040SThomas Hellstrom 	fence = kzalloc(sizeof(*fence), GFP_KERNEL);
5481a4adb05SRavikant B Sharma 	if (unlikely(!fence))
5491f563a6aSThomas Hellstrom 		return -ENOMEM;
550ae2a1040SThomas Hellstrom 
551c060a4e1SMaarten Lankhorst 	ret = vmw_fence_obj_init(fman, fence, seqno,
552ae2a1040SThomas Hellstrom 				 vmw_fence_destroy);
553ae2a1040SThomas Hellstrom 	if (unlikely(ret != 0))
554ae2a1040SThomas Hellstrom 		goto out_err_init;
555ae2a1040SThomas Hellstrom 
556ae2a1040SThomas Hellstrom 	*p_fence = fence;
557ae2a1040SThomas Hellstrom 	return 0;
558ae2a1040SThomas Hellstrom 
559ae2a1040SThomas Hellstrom out_err_init:
560ae2a1040SThomas Hellstrom 	kfree(fence);
561ae2a1040SThomas Hellstrom 	return ret;
562ae2a1040SThomas Hellstrom }
563ae2a1040SThomas Hellstrom 
564ae2a1040SThomas Hellstrom 
vmw_user_fence_destroy(struct vmw_fence_obj * fence)565ae2a1040SThomas Hellstrom static void vmw_user_fence_destroy(struct vmw_fence_obj *fence)
566ae2a1040SThomas Hellstrom {
567ae2a1040SThomas Hellstrom 	struct vmw_user_fence *ufence =
568ae2a1040SThomas Hellstrom 		container_of(fence, struct vmw_user_fence, fence);
569ae2a1040SThomas Hellstrom 
57035f62a58SThomas Hellstrom 	ttm_base_object_kfree(ufence, base);
571ae2a1040SThomas Hellstrom }
572ae2a1040SThomas Hellstrom 
vmw_user_fence_base_release(struct ttm_base_object ** p_base)573ae2a1040SThomas Hellstrom static void vmw_user_fence_base_release(struct ttm_base_object **p_base)
574ae2a1040SThomas Hellstrom {
575ae2a1040SThomas Hellstrom 	struct ttm_base_object *base = *p_base;
576ae2a1040SThomas Hellstrom 	struct vmw_user_fence *ufence =
577ae2a1040SThomas Hellstrom 		container_of(base, struct vmw_user_fence, base);
578ae2a1040SThomas Hellstrom 	struct vmw_fence_obj *fence = &ufence->fence;
579ae2a1040SThomas Hellstrom 
580ae2a1040SThomas Hellstrom 	*p_base = NULL;
581ae2a1040SThomas Hellstrom 	vmw_fence_obj_unreference(&fence);
582ae2a1040SThomas Hellstrom }
583ae2a1040SThomas Hellstrom 
vmw_user_fence_create(struct drm_file * file_priv,struct vmw_fence_manager * fman,uint32_t seqno,struct vmw_fence_obj ** p_fence,uint32_t * p_handle)584ae2a1040SThomas Hellstrom int vmw_user_fence_create(struct drm_file *file_priv,
585ae2a1040SThomas Hellstrom 			  struct vmw_fence_manager *fman,
586ae2a1040SThomas Hellstrom 			  uint32_t seqno,
587ae2a1040SThomas Hellstrom 			  struct vmw_fence_obj **p_fence,
588ae2a1040SThomas Hellstrom 			  uint32_t *p_handle)
589ae2a1040SThomas Hellstrom {
590ae2a1040SThomas Hellstrom 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
591ae2a1040SThomas Hellstrom 	struct vmw_user_fence *ufence;
592ae2a1040SThomas Hellstrom 	struct vmw_fence_obj *tmp;
593ae2a1040SThomas Hellstrom 	int ret;
594ae2a1040SThomas Hellstrom 
595ae2a1040SThomas Hellstrom 	ufence = kzalloc(sizeof(*ufence), GFP_KERNEL);
5961a4adb05SRavikant B Sharma 	if (unlikely(!ufence)) {
597ae2a1040SThomas Hellstrom 		ret = -ENOMEM;
598ae2a1040SThomas Hellstrom 		goto out_no_object;
599ae2a1040SThomas Hellstrom 	}
600ae2a1040SThomas Hellstrom 
601ae2a1040SThomas Hellstrom 	ret = vmw_fence_obj_init(fman, &ufence->fence, seqno,
602c060a4e1SMaarten Lankhorst 				 vmw_user_fence_destroy);
603ae2a1040SThomas Hellstrom 	if (unlikely(ret != 0)) {
604ae2a1040SThomas Hellstrom 		kfree(ufence);
605ae2a1040SThomas Hellstrom 		goto out_no_object;
606ae2a1040SThomas Hellstrom 	}
607ae2a1040SThomas Hellstrom 
608ae2a1040SThomas Hellstrom 	/*
609ae2a1040SThomas Hellstrom 	 * The base object holds a reference which is freed in
610ae2a1040SThomas Hellstrom 	 * vmw_user_fence_base_release.
611ae2a1040SThomas Hellstrom 	 */
612ae2a1040SThomas Hellstrom 	tmp = vmw_fence_obj_reference(&ufence->fence);
6138afa13a0SZack Rusin 
614ae2a1040SThomas Hellstrom 	ret = ttm_base_object_init(tfile, &ufence->base, false,
615ae2a1040SThomas Hellstrom 				   VMW_RES_FENCE,
6168afa13a0SZack Rusin 				   &vmw_user_fence_base_release);
617ae2a1040SThomas Hellstrom 
618ae2a1040SThomas Hellstrom 
619ae2a1040SThomas Hellstrom 	if (unlikely(ret != 0)) {
620ae2a1040SThomas Hellstrom 		/*
621ae2a1040SThomas Hellstrom 		 * Free the base object's reference
622ae2a1040SThomas Hellstrom 		 */
623ae2a1040SThomas Hellstrom 		vmw_fence_obj_unreference(&tmp);
624ae2a1040SThomas Hellstrom 		goto out_err;
625ae2a1040SThomas Hellstrom 	}
626ae2a1040SThomas Hellstrom 
627ae2a1040SThomas Hellstrom 	*p_fence = &ufence->fence;
628c7eae626SThomas Hellstrom 	*p_handle = ufence->base.handle;
629ae2a1040SThomas Hellstrom 
630ae2a1040SThomas Hellstrom 	return 0;
631ae2a1040SThomas Hellstrom out_err:
632ae2a1040SThomas Hellstrom 	tmp = &ufence->fence;
633ae2a1040SThomas Hellstrom 	vmw_fence_obj_unreference(&tmp);
634ae2a1040SThomas Hellstrom out_no_object:
635ae2a1040SThomas Hellstrom 	return ret;
636ae2a1040SThomas Hellstrom }
637ae2a1040SThomas Hellstrom 
638c6771b63SLee Jones /*
639ae2a1040SThomas Hellstrom  * vmw_fence_fifo_down - signal all unsignaled fence objects.
640ae2a1040SThomas Hellstrom  */
641ae2a1040SThomas Hellstrom 
vmw_fence_fifo_down(struct vmw_fence_manager * fman)642ae2a1040SThomas Hellstrom void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
643ae2a1040SThomas Hellstrom {
644ae2a1040SThomas Hellstrom 	struct list_head action_list;
645ae2a1040SThomas Hellstrom 	int ret;
646ae2a1040SThomas Hellstrom 
647ae2a1040SThomas Hellstrom 	/*
648ae2a1040SThomas Hellstrom 	 * The list may be altered while we traverse it, so always
649ae2a1040SThomas Hellstrom 	 * restart when we've released the fman->lock.
650ae2a1040SThomas Hellstrom 	 */
651ae2a1040SThomas Hellstrom 
652ef369904SThomas Hellstrom 	spin_lock(&fman->lock);
653ae2a1040SThomas Hellstrom 	fman->fifo_down = true;
654ae2a1040SThomas Hellstrom 	while (!list_empty(&fman->fence_list)) {
655ae2a1040SThomas Hellstrom 		struct vmw_fence_obj *fence =
656ae2a1040SThomas Hellstrom 			list_entry(fman->fence_list.prev, struct vmw_fence_obj,
657ae2a1040SThomas Hellstrom 				   head);
658f54d1867SChris Wilson 		dma_fence_get(&fence->base);
659ef369904SThomas Hellstrom 		spin_unlock(&fman->lock);
660ae2a1040SThomas Hellstrom 
661c060a4e1SMaarten Lankhorst 		ret = vmw_fence_obj_wait(fence, false, false,
662ae2a1040SThomas Hellstrom 					 VMW_FENCE_WAIT_TIMEOUT);
663ae2a1040SThomas Hellstrom 
664ae2a1040SThomas Hellstrom 		if (unlikely(ret != 0)) {
665ae2a1040SThomas Hellstrom 			list_del_init(&fence->head);
666f54d1867SChris Wilson 			dma_fence_signal(&fence->base);
667ae2a1040SThomas Hellstrom 			INIT_LIST_HEAD(&action_list);
668ae2a1040SThomas Hellstrom 			list_splice_init(&fence->seq_passed_actions,
669ae2a1040SThomas Hellstrom 					 &action_list);
670ae2a1040SThomas Hellstrom 			vmw_fences_perform_actions(fman, &action_list);
671ae2a1040SThomas Hellstrom 		}
672ae2a1040SThomas Hellstrom 
673ae2a1040SThomas Hellstrom 		BUG_ON(!list_empty(&fence->head));
674f54d1867SChris Wilson 		dma_fence_put(&fence->base);
675ef369904SThomas Hellstrom 		spin_lock(&fman->lock);
676ae2a1040SThomas Hellstrom 	}
677ef369904SThomas Hellstrom 	spin_unlock(&fman->lock);
678ae2a1040SThomas Hellstrom }
679ae2a1040SThomas Hellstrom 
vmw_fence_fifo_up(struct vmw_fence_manager * fman)680ae2a1040SThomas Hellstrom void vmw_fence_fifo_up(struct vmw_fence_manager *fman)
681ae2a1040SThomas Hellstrom {
682ef369904SThomas Hellstrom 	spin_lock(&fman->lock);
683ae2a1040SThomas Hellstrom 	fman->fifo_down = false;
684ef369904SThomas Hellstrom 	spin_unlock(&fman->lock);
685ae2a1040SThomas Hellstrom }
686ae2a1040SThomas Hellstrom 
687ae2a1040SThomas Hellstrom 
688f7652afaSThomas Hellstrom /**
689f7652afaSThomas Hellstrom  * vmw_fence_obj_lookup - Look up a user-space fence object
690f7652afaSThomas Hellstrom  *
691f7652afaSThomas Hellstrom  * @tfile: A struct ttm_object_file identifying the caller.
692f7652afaSThomas Hellstrom  * @handle: A handle identifying the fence object.
693f7652afaSThomas Hellstrom  * @return: A struct vmw_user_fence base ttm object on success or
694f7652afaSThomas Hellstrom  * an error pointer on failure.
695f7652afaSThomas Hellstrom  *
696f7652afaSThomas Hellstrom  * The fence object is looked up and type-checked. The caller needs
697f7652afaSThomas Hellstrom  * to have opened the fence object first, but since that happens on
698f7652afaSThomas Hellstrom  * creation and fence objects aren't shareable, that's not an
699f7652afaSThomas Hellstrom  * issue currently.
700f7652afaSThomas Hellstrom  */
701f7652afaSThomas Hellstrom static struct ttm_base_object *
vmw_fence_obj_lookup(struct ttm_object_file * tfile,u32 handle)702f7652afaSThomas Hellstrom vmw_fence_obj_lookup(struct ttm_object_file *tfile, u32 handle)
703f7652afaSThomas Hellstrom {
704f7652afaSThomas Hellstrom 	struct ttm_base_object *base = ttm_base_object_lookup(tfile, handle);
705f7652afaSThomas Hellstrom 
706f7652afaSThomas Hellstrom 	if (!base) {
707f7652afaSThomas Hellstrom 		pr_err("Invalid fence object handle 0x%08lx.\n",
708f7652afaSThomas Hellstrom 		       (unsigned long)handle);
709f7652afaSThomas Hellstrom 		return ERR_PTR(-EINVAL);
710f7652afaSThomas Hellstrom 	}
711f7652afaSThomas Hellstrom 
712f7652afaSThomas Hellstrom 	if (base->refcount_release != vmw_user_fence_base_release) {
713f7652afaSThomas Hellstrom 		pr_err("Invalid fence object handle 0x%08lx.\n",
714f7652afaSThomas Hellstrom 		       (unsigned long)handle);
715f7652afaSThomas Hellstrom 		ttm_base_object_unref(&base);
716f7652afaSThomas Hellstrom 		return ERR_PTR(-EINVAL);
717f7652afaSThomas Hellstrom 	}
718f7652afaSThomas Hellstrom 
719f7652afaSThomas Hellstrom 	return base;
720f7652afaSThomas Hellstrom }
721f7652afaSThomas Hellstrom 
722f7652afaSThomas Hellstrom 
vmw_fence_obj_wait_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)723ae2a1040SThomas Hellstrom int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,
724ae2a1040SThomas Hellstrom 			     struct drm_file *file_priv)
725ae2a1040SThomas Hellstrom {
726ae2a1040SThomas Hellstrom 	struct drm_vmw_fence_wait_arg *arg =
727ae2a1040SThomas Hellstrom 	    (struct drm_vmw_fence_wait_arg *)data;
728ae2a1040SThomas Hellstrom 	unsigned long timeout;
729ae2a1040SThomas Hellstrom 	struct ttm_base_object *base;
730ae2a1040SThomas Hellstrom 	struct vmw_fence_obj *fence;
731ae2a1040SThomas Hellstrom 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
732ae2a1040SThomas Hellstrom 	int ret;
733ae2a1040SThomas Hellstrom 	uint64_t wait_timeout = ((uint64_t)arg->timeout_us * HZ);
734ae2a1040SThomas Hellstrom 
735ae2a1040SThomas Hellstrom 	/*
736ae2a1040SThomas Hellstrom 	 * 64-bit division not present on 32-bit systems, so do an
737ae2a1040SThomas Hellstrom 	 * approximation. (Divide by 1000000).
738ae2a1040SThomas Hellstrom 	 */
739ae2a1040SThomas Hellstrom 
740ae2a1040SThomas Hellstrom 	wait_timeout = (wait_timeout >> 20) + (wait_timeout >> 24) -
741ae2a1040SThomas Hellstrom 	  (wait_timeout >> 26);
742ae2a1040SThomas Hellstrom 
743ae2a1040SThomas Hellstrom 	if (!arg->cookie_valid) {
744ae2a1040SThomas Hellstrom 		arg->cookie_valid = 1;
745ae2a1040SThomas Hellstrom 		arg->kernel_cookie = jiffies + wait_timeout;
746ae2a1040SThomas Hellstrom 	}
747ae2a1040SThomas Hellstrom 
748f7652afaSThomas Hellstrom 	base = vmw_fence_obj_lookup(tfile, arg->handle);
749f7652afaSThomas Hellstrom 	if (IS_ERR(base))
750f7652afaSThomas Hellstrom 		return PTR_ERR(base);
751ae2a1040SThomas Hellstrom 
752ae2a1040SThomas Hellstrom 	fence = &(container_of(base, struct vmw_user_fence, base)->fence);
753ae2a1040SThomas Hellstrom 
754ae2a1040SThomas Hellstrom 	timeout = jiffies;
755ae2a1040SThomas Hellstrom 	if (time_after_eq(timeout, (unsigned long)arg->kernel_cookie)) {
756c060a4e1SMaarten Lankhorst 		ret = ((vmw_fence_obj_signaled(fence)) ?
757ae2a1040SThomas Hellstrom 		       0 : -EBUSY);
758ae2a1040SThomas Hellstrom 		goto out;
759ae2a1040SThomas Hellstrom 	}
760ae2a1040SThomas Hellstrom 
761ae2a1040SThomas Hellstrom 	timeout = (unsigned long)arg->kernel_cookie - timeout;
762ae2a1040SThomas Hellstrom 
763c060a4e1SMaarten Lankhorst 	ret = vmw_fence_obj_wait(fence, arg->lazy, true, timeout);
764ae2a1040SThomas Hellstrom 
765ae2a1040SThomas Hellstrom out:
766ae2a1040SThomas Hellstrom 	ttm_base_object_unref(&base);
767ae2a1040SThomas Hellstrom 
768ae2a1040SThomas Hellstrom 	/*
769ae2a1040SThomas Hellstrom 	 * Optionally unref the fence object.
770ae2a1040SThomas Hellstrom 	 */
771ae2a1040SThomas Hellstrom 
772ae2a1040SThomas Hellstrom 	if (ret == 0 && (arg->wait_options & DRM_VMW_WAIT_OPTION_UNREF))
7738afa13a0SZack Rusin 		return ttm_ref_object_base_unref(tfile, arg->handle);
774ae2a1040SThomas Hellstrom 	return ret;
775ae2a1040SThomas Hellstrom }
776ae2a1040SThomas Hellstrom 
vmw_fence_obj_signaled_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)777ae2a1040SThomas Hellstrom int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data,
778ae2a1040SThomas Hellstrom 				 struct drm_file *file_priv)
779ae2a1040SThomas Hellstrom {
780ae2a1040SThomas Hellstrom 	struct drm_vmw_fence_signaled_arg *arg =
781ae2a1040SThomas Hellstrom 		(struct drm_vmw_fence_signaled_arg *) data;
782ae2a1040SThomas Hellstrom 	struct ttm_base_object *base;
783ae2a1040SThomas Hellstrom 	struct vmw_fence_obj *fence;
784ae2a1040SThomas Hellstrom 	struct vmw_fence_manager *fman;
785ae2a1040SThomas Hellstrom 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
786ae2a1040SThomas Hellstrom 	struct vmw_private *dev_priv = vmw_priv(dev);
787ae2a1040SThomas Hellstrom 
788f7652afaSThomas Hellstrom 	base = vmw_fence_obj_lookup(tfile, arg->handle);
789f7652afaSThomas Hellstrom 	if (IS_ERR(base))
790f7652afaSThomas Hellstrom 		return PTR_ERR(base);
791ae2a1040SThomas Hellstrom 
792ae2a1040SThomas Hellstrom 	fence = &(container_of(base, struct vmw_user_fence, base)->fence);
7932298e804SMaarten Lankhorst 	fman = fman_from_fence(fence);
794ae2a1040SThomas Hellstrom 
795c060a4e1SMaarten Lankhorst 	arg->signaled = vmw_fence_obj_signaled(fence);
796ae2a1040SThomas Hellstrom 
797c060a4e1SMaarten Lankhorst 	arg->signaled_flags = arg->flags;
798ef369904SThomas Hellstrom 	spin_lock(&fman->lock);
799ae2a1040SThomas Hellstrom 	arg->passed_seqno = dev_priv->last_read_seqno;
800ef369904SThomas Hellstrom 	spin_unlock(&fman->lock);
801ae2a1040SThomas Hellstrom 
802ae2a1040SThomas Hellstrom 	ttm_base_object_unref(&base);
803ae2a1040SThomas Hellstrom 
804ae2a1040SThomas Hellstrom 	return 0;
805ae2a1040SThomas Hellstrom }
806ae2a1040SThomas Hellstrom 
807ae2a1040SThomas Hellstrom 
vmw_fence_obj_unref_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)808ae2a1040SThomas Hellstrom int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data,
809ae2a1040SThomas Hellstrom 			      struct drm_file *file_priv)
810ae2a1040SThomas Hellstrom {
811ae2a1040SThomas Hellstrom 	struct drm_vmw_fence_arg *arg =
812ae2a1040SThomas Hellstrom 		(struct drm_vmw_fence_arg *) data;
813ae2a1040SThomas Hellstrom 
814ae2a1040SThomas Hellstrom 	return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
8158afa13a0SZack Rusin 					 arg->handle);
816ae2a1040SThomas Hellstrom }
81757c5ee79SThomas Hellstrom 
8186b82ef50SThomas Hellstrom /**
81957c5ee79SThomas Hellstrom  * vmw_event_fence_action_seq_passed
82057c5ee79SThomas Hellstrom  *
82157c5ee79SThomas Hellstrom  * @action: The struct vmw_fence_action embedded in a struct
82257c5ee79SThomas Hellstrom  * vmw_event_fence_action.
82357c5ee79SThomas Hellstrom  *
82457c5ee79SThomas Hellstrom  * This function is called when the seqno of the fence where @action is
82557c5ee79SThomas Hellstrom  * attached has passed. It queues the event on the submitter's event list.
826ef369904SThomas Hellstrom  * This function is always called from atomic context.
82757c5ee79SThomas Hellstrom  */
vmw_event_fence_action_seq_passed(struct vmw_fence_action * action)82857c5ee79SThomas Hellstrom static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action)
82957c5ee79SThomas Hellstrom {
83057c5ee79SThomas Hellstrom 	struct vmw_event_fence_action *eaction =
83157c5ee79SThomas Hellstrom 		container_of(action, struct vmw_event_fence_action, action);
83257c5ee79SThomas Hellstrom 	struct drm_device *dev = eaction->dev;
8336b82ef50SThomas Hellstrom 	struct drm_pending_event *event = eaction->event;
83457c5ee79SThomas Hellstrom 
8356b82ef50SThomas Hellstrom 	if (unlikely(event == NULL))
8366b82ef50SThomas Hellstrom 		return;
8376b82ef50SThomas Hellstrom 
838ef369904SThomas Hellstrom 	spin_lock_irq(&dev->event_lock);
83957c5ee79SThomas Hellstrom 
84057c5ee79SThomas Hellstrom 	if (likely(eaction->tv_sec != NULL)) {
84137efe80cSArnd Bergmann 		struct timespec64 ts;
84257c5ee79SThomas Hellstrom 
84337efe80cSArnd Bergmann 		ktime_get_ts64(&ts);
84437efe80cSArnd Bergmann 		/* monotonic time, so no y2038 overflow */
84537efe80cSArnd Bergmann 		*eaction->tv_sec = ts.tv_sec;
84637efe80cSArnd Bergmann 		*eaction->tv_usec = ts.tv_nsec / NSEC_PER_USEC;
84757c5ee79SThomas Hellstrom 	}
84857c5ee79SThomas Hellstrom 
849fb740cf2SDaniel Vetter 	drm_send_event_locked(dev, eaction->event);
85015b6b804SDan Carpenter 	eaction->event = NULL;
851ef369904SThomas Hellstrom 	spin_unlock_irq(&dev->event_lock);
85257c5ee79SThomas Hellstrom }
85357c5ee79SThomas Hellstrom 
85457c5ee79SThomas Hellstrom /**
85557c5ee79SThomas Hellstrom  * vmw_event_fence_action_cleanup
85657c5ee79SThomas Hellstrom  *
85757c5ee79SThomas Hellstrom  * @action: The struct vmw_fence_action embedded in a struct
85857c5ee79SThomas Hellstrom  * vmw_event_fence_action.
85957c5ee79SThomas Hellstrom  *
86057c5ee79SThomas Hellstrom  * This function is the struct vmw_fence_action destructor. It's typically
86157c5ee79SThomas Hellstrom  * called from a workqueue.
86257c5ee79SThomas Hellstrom  */
vmw_event_fence_action_cleanup(struct vmw_fence_action * action)86357c5ee79SThomas Hellstrom static void vmw_event_fence_action_cleanup(struct vmw_fence_action *action)
86457c5ee79SThomas Hellstrom {
86557c5ee79SThomas Hellstrom 	struct vmw_event_fence_action *eaction =
86657c5ee79SThomas Hellstrom 		container_of(action, struct vmw_event_fence_action, action);
86757c5ee79SThomas Hellstrom 
86857c5ee79SThomas Hellstrom 	vmw_fence_obj_unreference(&eaction->fence);
8698b7de6aaSJakob Bornecrantz 	kfree(eaction);
87057c5ee79SThomas Hellstrom }
87157c5ee79SThomas Hellstrom 
87257c5ee79SThomas Hellstrom 
87357c5ee79SThomas Hellstrom /**
87457c5ee79SThomas Hellstrom  * vmw_fence_obj_add_action - Add an action to a fence object.
87557c5ee79SThomas Hellstrom  *
876c6771b63SLee Jones  * @fence: The fence object.
877c6771b63SLee Jones  * @action: The action to add.
87857c5ee79SThomas Hellstrom  *
87957c5ee79SThomas Hellstrom  * Note that the action callbacks may be executed before this function
88057c5ee79SThomas Hellstrom  * returns.
88157c5ee79SThomas Hellstrom  */
vmw_fence_obj_add_action(struct vmw_fence_obj * fence,struct vmw_fence_action * action)88294844cf0SRashika Kheria static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence,
88357c5ee79SThomas Hellstrom 			      struct vmw_fence_action *action)
88457c5ee79SThomas Hellstrom {
8852298e804SMaarten Lankhorst 	struct vmw_fence_manager *fman = fman_from_fence(fence);
88657c5ee79SThomas Hellstrom 	bool run_update = false;
88757c5ee79SThomas Hellstrom 
88857c5ee79SThomas Hellstrom 	mutex_lock(&fman->goal_irq_mutex);
889ef369904SThomas Hellstrom 	spin_lock(&fman->lock);
89057c5ee79SThomas Hellstrom 
89157c5ee79SThomas Hellstrom 	fman->pending_actions[action->type]++;
892f54d1867SChris Wilson 	if (dma_fence_is_signaled_locked(&fence->base)) {
89357c5ee79SThomas Hellstrom 		struct list_head action_list;
89457c5ee79SThomas Hellstrom 
89557c5ee79SThomas Hellstrom 		INIT_LIST_HEAD(&action_list);
89657c5ee79SThomas Hellstrom 		list_add_tail(&action->head, &action_list);
89757c5ee79SThomas Hellstrom 		vmw_fences_perform_actions(fman, &action_list);
89857c5ee79SThomas Hellstrom 	} else {
89957c5ee79SThomas Hellstrom 		list_add_tail(&action->head, &fence->seq_passed_actions);
90057c5ee79SThomas Hellstrom 
90157c5ee79SThomas Hellstrom 		/*
90257c5ee79SThomas Hellstrom 		 * This function may set fman::seqno_valid, so it must
90357c5ee79SThomas Hellstrom 		 * be run with the goal_irq_mutex held.
90457c5ee79SThomas Hellstrom 		 */
90557c5ee79SThomas Hellstrom 		run_update = vmw_fence_goal_check_locked(fence);
90657c5ee79SThomas Hellstrom 	}
90757c5ee79SThomas Hellstrom 
908ef369904SThomas Hellstrom 	spin_unlock(&fman->lock);
90957c5ee79SThomas Hellstrom 
91057c5ee79SThomas Hellstrom 	if (run_update) {
91157c5ee79SThomas Hellstrom 		if (!fman->goal_irq_on) {
91257c5ee79SThomas Hellstrom 			fman->goal_irq_on = true;
91357c5ee79SThomas Hellstrom 			vmw_goal_waiter_add(fman->dev_priv);
91457c5ee79SThomas Hellstrom 		}
91557c5ee79SThomas Hellstrom 		vmw_fences_update(fman);
91657c5ee79SThomas Hellstrom 	}
91757c5ee79SThomas Hellstrom 	mutex_unlock(&fman->goal_irq_mutex);
91857c5ee79SThomas Hellstrom 
91957c5ee79SThomas Hellstrom }
92057c5ee79SThomas Hellstrom 
92157c5ee79SThomas Hellstrom /**
9222cd80dbdSZack Rusin  * vmw_event_fence_action_queue - Post an event for sending when a fence
92357c5ee79SThomas Hellstrom  * object seqno has passed.
92457c5ee79SThomas Hellstrom  *
92557c5ee79SThomas Hellstrom  * @file_priv: The file connection on which the event should be posted.
92657c5ee79SThomas Hellstrom  * @fence: The fence object on which to post the event.
92757c5ee79SThomas Hellstrom  * @event: Event to be posted. This event should've been alloced
92857c5ee79SThomas Hellstrom  * using k[mz]alloc, and should've been completely initialized.
929c6771b63SLee Jones  * @tv_sec: If non-null, the variable pointed to will be assigned
930c6771b63SLee Jones  * current time tv_sec val when the fence signals.
931c6771b63SLee Jones  * @tv_usec: Must be set if @tv_sec is set, and the variable pointed to will
932c6771b63SLee Jones  * be assigned the current time tv_usec val when the fence signals.
93357c5ee79SThomas Hellstrom  * @interruptible: Interruptible waits if possible.
93457c5ee79SThomas Hellstrom  *
93557c5ee79SThomas Hellstrom  * As a side effect, the object pointed to by @event may have been
93657c5ee79SThomas Hellstrom  * freed when this function returns. If this function returns with
93757c5ee79SThomas Hellstrom  * an error code, the caller needs to free that object.
93857c5ee79SThomas Hellstrom  */
93957c5ee79SThomas Hellstrom 
vmw_event_fence_action_queue(struct drm_file * file_priv,struct vmw_fence_obj * fence,struct drm_pending_event * event,uint32_t * tv_sec,uint32_t * tv_usec,bool interruptible)9408b7de6aaSJakob Bornecrantz int vmw_event_fence_action_queue(struct drm_file *file_priv,
94157c5ee79SThomas Hellstrom 				 struct vmw_fence_obj *fence,
9428b7de6aaSJakob Bornecrantz 				 struct drm_pending_event *event,
94357c5ee79SThomas Hellstrom 				 uint32_t *tv_sec,
94457c5ee79SThomas Hellstrom 				 uint32_t *tv_usec,
94557c5ee79SThomas Hellstrom 				 bool interruptible)
94657c5ee79SThomas Hellstrom {
9470c5d3703SDan Carpenter 	struct vmw_event_fence_action *eaction;
9482298e804SMaarten Lankhorst 	struct vmw_fence_manager *fman = fman_from_fence(fence);
94957c5ee79SThomas Hellstrom 
95057c5ee79SThomas Hellstrom 	eaction = kzalloc(sizeof(*eaction), GFP_KERNEL);
9511a4adb05SRavikant B Sharma 	if (unlikely(!eaction))
95257c5ee79SThomas Hellstrom 		return -ENOMEM;
95357c5ee79SThomas Hellstrom 
9548b7de6aaSJakob Bornecrantz 	eaction->event = event;
95557c5ee79SThomas Hellstrom 
95657c5ee79SThomas Hellstrom 	eaction->action.seq_passed = vmw_event_fence_action_seq_passed;
95757c5ee79SThomas Hellstrom 	eaction->action.cleanup = vmw_event_fence_action_cleanup;
95857c5ee79SThomas Hellstrom 	eaction->action.type = VMW_ACTION_EVENT;
95957c5ee79SThomas Hellstrom 
96057c5ee79SThomas Hellstrom 	eaction->fence = vmw_fence_obj_reference(fence);
9619703bb32SZack Rusin 	eaction->dev = &fman->dev_priv->drm;
96257c5ee79SThomas Hellstrom 	eaction->tv_sec = tv_sec;
96357c5ee79SThomas Hellstrom 	eaction->tv_usec = tv_usec;
96457c5ee79SThomas Hellstrom 
96557c5ee79SThomas Hellstrom 	vmw_fence_obj_add_action(fence, &eaction->action);
96657c5ee79SThomas Hellstrom 
96757c5ee79SThomas Hellstrom 	return 0;
96857c5ee79SThomas Hellstrom }
96957c5ee79SThomas Hellstrom 
9708b7de6aaSJakob Bornecrantz struct vmw_event_fence_pending {
9718b7de6aaSJakob Bornecrantz 	struct drm_pending_event base;
9728b7de6aaSJakob Bornecrantz 	struct drm_vmw_event_fence event;
9738b7de6aaSJakob Bornecrantz };
9748b7de6aaSJakob Bornecrantz 
vmw_event_fence_action_create(struct drm_file * file_priv,struct vmw_fence_obj * fence,uint32_t flags,uint64_t user_data,bool interruptible)97594844cf0SRashika Kheria static int vmw_event_fence_action_create(struct drm_file *file_priv,
9768b7de6aaSJakob Bornecrantz 				  struct vmw_fence_obj *fence,
9778b7de6aaSJakob Bornecrantz 				  uint32_t flags,
9788b7de6aaSJakob Bornecrantz 				  uint64_t user_data,
9798b7de6aaSJakob Bornecrantz 				  bool interruptible)
9808b7de6aaSJakob Bornecrantz {
9818b7de6aaSJakob Bornecrantz 	struct vmw_event_fence_pending *event;
9822298e804SMaarten Lankhorst 	struct vmw_fence_manager *fman = fman_from_fence(fence);
9839703bb32SZack Rusin 	struct drm_device *dev = &fman->dev_priv->drm;
9848b7de6aaSJakob Bornecrantz 	int ret;
9858b7de6aaSJakob Bornecrantz 
98668c4fce7SDan Carpenter 	event = kzalloc(sizeof(*event), GFP_KERNEL);
9871a4adb05SRavikant B Sharma 	if (unlikely(!event)) {
9888b7de6aaSJakob Bornecrantz 		DRM_ERROR("Failed to allocate an event.\n");
9898b7de6aaSJakob Bornecrantz 		ret = -ENOMEM;
9906d3729acSDaniel Vetter 		goto out_no_space;
9918b7de6aaSJakob Bornecrantz 	}
9928b7de6aaSJakob Bornecrantz 
9938b7de6aaSJakob Bornecrantz 	event->event.base.type = DRM_VMW_EVENT_FENCE_SIGNALED;
9947b5fd3afSZack Rusin 	event->event.base.length = sizeof(event->event);
9958b7de6aaSJakob Bornecrantz 	event->event.user_data = user_data;
9968b7de6aaSJakob Bornecrantz 
9976d3729acSDaniel Vetter 	ret = drm_event_reserve_init(dev, file_priv, &event->base, &event->event.base);
9988b7de6aaSJakob Bornecrantz 
9996d3729acSDaniel Vetter 	if (unlikely(ret != 0)) {
10006d3729acSDaniel Vetter 		DRM_ERROR("Failed to allocate event space for this file.\n");
10016d3729acSDaniel Vetter 		kfree(event);
10026d3729acSDaniel Vetter 		goto out_no_space;
10036d3729acSDaniel Vetter 	}
10048b7de6aaSJakob Bornecrantz 
10058b7de6aaSJakob Bornecrantz 	if (flags & DRM_VMW_FE_FLAG_REQ_TIME)
10068b7de6aaSJakob Bornecrantz 		ret = vmw_event_fence_action_queue(file_priv, fence,
10078b7de6aaSJakob Bornecrantz 						   &event->base,
10088b7de6aaSJakob Bornecrantz 						   &event->event.tv_sec,
10098b7de6aaSJakob Bornecrantz 						   &event->event.tv_usec,
10108b7de6aaSJakob Bornecrantz 						   interruptible);
10118b7de6aaSJakob Bornecrantz 	else
10128b7de6aaSJakob Bornecrantz 		ret = vmw_event_fence_action_queue(file_priv, fence,
10138b7de6aaSJakob Bornecrantz 						   &event->base,
10148b7de6aaSJakob Bornecrantz 						   NULL,
10158b7de6aaSJakob Bornecrantz 						   NULL,
10168b7de6aaSJakob Bornecrantz 						   interruptible);
10178b7de6aaSJakob Bornecrantz 	if (ret != 0)
10188b7de6aaSJakob Bornecrantz 		goto out_no_queue;
10198b7de6aaSJakob Bornecrantz 
102089669e7aSThomas Hellstrom 	return 0;
102189669e7aSThomas Hellstrom 
10228b7de6aaSJakob Bornecrantz out_no_queue:
10236d3729acSDaniel Vetter 	drm_event_cancel_free(dev, &event->base);
10248b7de6aaSJakob Bornecrantz out_no_space:
10258b7de6aaSJakob Bornecrantz 	return ret;
10268b7de6aaSJakob Bornecrantz }
10278b7de6aaSJakob Bornecrantz 
vmw_fence_event_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)102857c5ee79SThomas Hellstrom int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
102957c5ee79SThomas Hellstrom 			  struct drm_file *file_priv)
103057c5ee79SThomas Hellstrom {
103157c5ee79SThomas Hellstrom 	struct vmw_private *dev_priv = vmw_priv(dev);
103257c5ee79SThomas Hellstrom 	struct drm_vmw_fence_event_arg *arg =
103357c5ee79SThomas Hellstrom 		(struct drm_vmw_fence_event_arg *) data;
103457c5ee79SThomas Hellstrom 	struct vmw_fence_obj *fence = NULL;
103557c5ee79SThomas Hellstrom 	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1036f7652afaSThomas Hellstrom 	struct ttm_object_file *tfile = vmw_fp->tfile;
103757c5ee79SThomas Hellstrom 	struct drm_vmw_fence_rep __user *user_fence_rep =
103857c5ee79SThomas Hellstrom 		(struct drm_vmw_fence_rep __user *)(unsigned long)
103957c5ee79SThomas Hellstrom 		arg->fence_rep;
104057c5ee79SThomas Hellstrom 	uint32_t handle;
104157c5ee79SThomas Hellstrom 	int ret;
104257c5ee79SThomas Hellstrom 
104357c5ee79SThomas Hellstrom 	/*
104457c5ee79SThomas Hellstrom 	 * Look up an existing fence object,
104557c5ee79SThomas Hellstrom 	 * and if user-space wants a new reference,
104657c5ee79SThomas Hellstrom 	 * add one.
104757c5ee79SThomas Hellstrom 	 */
104857c5ee79SThomas Hellstrom 	if (arg->handle) {
104957c5ee79SThomas Hellstrom 		struct ttm_base_object *base =
1050f7652afaSThomas Hellstrom 			vmw_fence_obj_lookup(tfile, arg->handle);
105157c5ee79SThomas Hellstrom 
1052f7652afaSThomas Hellstrom 		if (IS_ERR(base))
1053f7652afaSThomas Hellstrom 			return PTR_ERR(base);
1054f7652afaSThomas Hellstrom 
105557c5ee79SThomas Hellstrom 		fence = &(container_of(base, struct vmw_user_fence,
105657c5ee79SThomas Hellstrom 				       base)->fence);
105757c5ee79SThomas Hellstrom 		(void) vmw_fence_obj_reference(fence);
105857c5ee79SThomas Hellstrom 
105957c5ee79SThomas Hellstrom 		if (user_fence_rep != NULL) {
1060fe25deb7SThomas Hellstrom 			ret = ttm_ref_object_add(vmw_fp->tfile, base,
10618afa13a0SZack Rusin 						 NULL, false);
106257c5ee79SThomas Hellstrom 			if (unlikely(ret != 0)) {
106357c5ee79SThomas Hellstrom 				DRM_ERROR("Failed to reference a fence "
106457c5ee79SThomas Hellstrom 					  "object.\n");
106557c5ee79SThomas Hellstrom 				goto out_no_ref_obj;
106657c5ee79SThomas Hellstrom 			}
1067c7eae626SThomas Hellstrom 			handle = base->handle;
106857c5ee79SThomas Hellstrom 		}
106957c5ee79SThomas Hellstrom 		ttm_base_object_unref(&base);
107057c5ee79SThomas Hellstrom 	}
107157c5ee79SThomas Hellstrom 
107257c5ee79SThomas Hellstrom 	/*
107357c5ee79SThomas Hellstrom 	 * Create a new fence object.
107457c5ee79SThomas Hellstrom 	 */
107557c5ee79SThomas Hellstrom 	if (!fence) {
107657c5ee79SThomas Hellstrom 		ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
107757c5ee79SThomas Hellstrom 						 &fence,
107857c5ee79SThomas Hellstrom 						 (user_fence_rep) ?
107957c5ee79SThomas Hellstrom 						 &handle : NULL);
108057c5ee79SThomas Hellstrom 		if (unlikely(ret != 0)) {
108157c5ee79SThomas Hellstrom 			DRM_ERROR("Fence event failed to create fence.\n");
108257c5ee79SThomas Hellstrom 			return ret;
108357c5ee79SThomas Hellstrom 		}
108457c5ee79SThomas Hellstrom 	}
108557c5ee79SThomas Hellstrom 
108657c5ee79SThomas Hellstrom 	BUG_ON(fence == NULL);
108757c5ee79SThomas Hellstrom 
108857c5ee79SThomas Hellstrom 	ret = vmw_event_fence_action_create(file_priv, fence,
10898b7de6aaSJakob Bornecrantz 					    arg->flags,
10908b7de6aaSJakob Bornecrantz 					    arg->user_data,
109157c5ee79SThomas Hellstrom 					    true);
109257c5ee79SThomas Hellstrom 	if (unlikely(ret != 0)) {
109357c5ee79SThomas Hellstrom 		if (ret != -ERESTARTSYS)
109457c5ee79SThomas Hellstrom 			DRM_ERROR("Failed to attach event to fence.\n");
10958b7de6aaSJakob Bornecrantz 		goto out_no_create;
109657c5ee79SThomas Hellstrom 	}
109757c5ee79SThomas Hellstrom 
109857c5ee79SThomas Hellstrom 	vmw_execbuf_copy_fence_user(dev_priv, vmw_fp, 0, user_fence_rep, fence,
1099a0f90c88SMathias Krause 				    handle, -1);
110057c5ee79SThomas Hellstrom 	vmw_fence_obj_unreference(&fence);
110157c5ee79SThomas Hellstrom 	return 0;
11028b7de6aaSJakob Bornecrantz out_no_create:
110357c5ee79SThomas Hellstrom 	if (user_fence_rep != NULL)
11048afa13a0SZack Rusin 		ttm_ref_object_base_unref(tfile, handle);
110557c5ee79SThomas Hellstrom out_no_ref_obj:
110657c5ee79SThomas Hellstrom 	vmw_fence_obj_unreference(&fence);
110757c5ee79SThomas Hellstrom 	return ret;
110857c5ee79SThomas Hellstrom }
1109