xref: /openbmc/linux/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c (revision 1ac731c529cd4d6adbce134754b51ff7d822b145)
18426ed9cSZack Rusin // SPDX-License-Identifier: GPL-2.0 OR MIT
28426ed9cSZack Rusin /**************************************************************************
38426ed9cSZack Rusin  *
409881d29SZack Rusin  * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
58426ed9cSZack Rusin  *
68426ed9cSZack Rusin  * Permission is hereby granted, free of charge, to any person obtaining a
78426ed9cSZack Rusin  * copy of this software and associated documentation files (the
88426ed9cSZack Rusin  * "Software"), to deal in the Software without restriction, including
98426ed9cSZack Rusin  * without limitation the rights to use, copy, modify, merge, publish,
108426ed9cSZack Rusin  * distribute, sub license, and/or sell copies of the Software, and to
118426ed9cSZack Rusin  * permit persons to whom the Software is furnished to do so, subject to
128426ed9cSZack Rusin  * the following conditions:
138426ed9cSZack Rusin  *
148426ed9cSZack Rusin  * The above copyright notice and this permission notice (including the
158426ed9cSZack Rusin  * next paragraph) shall be included in all copies or substantial portions
168426ed9cSZack Rusin  * of the Software.
178426ed9cSZack Rusin  *
188426ed9cSZack Rusin  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
198426ed9cSZack Rusin  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
208426ed9cSZack Rusin  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
218426ed9cSZack Rusin  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
228426ed9cSZack Rusin  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
238426ed9cSZack Rusin  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
248426ed9cSZack Rusin  * USE OR OTHER DEALINGS IN THE SOFTWARE.
258426ed9cSZack Rusin  *
268426ed9cSZack Rusin  **************************************************************************/
2709881d29SZack Rusin #include "vmwgfx_bo.h"
2809881d29SZack Rusin #include "vmwgfx_drv.h"
2909881d29SZack Rusin #include "vmwgfx_devcaps.h"
308426ed9cSZack Rusin 
318426ed9cSZack Rusin #include <drm/ttm/ttm_placement.h>
328426ed9cSZack Rusin 
3309881d29SZack Rusin #include <linux/sched/signal.h>
348426ed9cSZack Rusin 
vmw_supports_3d(struct vmw_private * dev_priv)358426ed9cSZack Rusin bool vmw_supports_3d(struct vmw_private *dev_priv)
368426ed9cSZack Rusin {
378426ed9cSZack Rusin 	uint32_t fifo_min, hwversion;
382cd80dbdSZack Rusin 	const struct vmw_fifo_state *fifo = dev_priv->fifo;
398426ed9cSZack Rusin 
408426ed9cSZack Rusin 	if (!(dev_priv->capabilities & SVGA_CAP_3D))
418426ed9cSZack Rusin 		return false;
428426ed9cSZack Rusin 
438426ed9cSZack Rusin 	if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
448426ed9cSZack Rusin 		uint32_t result;
458426ed9cSZack Rusin 
468426ed9cSZack Rusin 		if (!dev_priv->has_mob)
478426ed9cSZack Rusin 			return false;
488426ed9cSZack Rusin 
49d92223eaSZack Rusin 		result = vmw_devcap_get(dev_priv, SVGA3D_DEVCAP_3D);
508426ed9cSZack Rusin 
518426ed9cSZack Rusin 		return (result != 0);
528426ed9cSZack Rusin 	}
538426ed9cSZack Rusin 
548426ed9cSZack Rusin 	if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
558426ed9cSZack Rusin 		return false;
568426ed9cSZack Rusin 
572cd80dbdSZack Rusin 	BUG_ON(vmw_is_svga_v3(dev_priv));
582cd80dbdSZack Rusin 
598426ed9cSZack Rusin 	fifo_min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN);
608426ed9cSZack Rusin 	if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int))
618426ed9cSZack Rusin 		return false;
628426ed9cSZack Rusin 
638426ed9cSZack Rusin 	hwversion = vmw_fifo_mem_read(dev_priv,
648426ed9cSZack Rusin 				      ((fifo->capabilities &
658426ed9cSZack Rusin 					SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
668426ed9cSZack Rusin 					       SVGA_FIFO_3D_HWVERSION_REVISED :
678426ed9cSZack Rusin 					       SVGA_FIFO_3D_HWVERSION));
688426ed9cSZack Rusin 
698426ed9cSZack Rusin 	if (hwversion == 0)
708426ed9cSZack Rusin 		return false;
718426ed9cSZack Rusin 
728426ed9cSZack Rusin 	if (hwversion < SVGA3D_HWVERSION_WS8_B1)
738426ed9cSZack Rusin 		return false;
748426ed9cSZack Rusin 
758426ed9cSZack Rusin 	/* Legacy Display Unit does not support surfaces */
768426ed9cSZack Rusin 	if (dev_priv->active_display_unit == vmw_du_legacy)
778426ed9cSZack Rusin 		return false;
788426ed9cSZack Rusin 
798426ed9cSZack Rusin 	return true;
808426ed9cSZack Rusin }
818426ed9cSZack Rusin 
vmw_fifo_have_pitchlock(struct vmw_private * dev_priv)828426ed9cSZack Rusin bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv)
838426ed9cSZack Rusin {
848426ed9cSZack Rusin 	uint32_t caps;
858426ed9cSZack Rusin 
868426ed9cSZack Rusin 	if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
878426ed9cSZack Rusin 		return false;
888426ed9cSZack Rusin 
898426ed9cSZack Rusin 	caps = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CAPABILITIES);
908426ed9cSZack Rusin 	if (caps & SVGA_FIFO_CAP_PITCHLOCK)
918426ed9cSZack Rusin 		return true;
928426ed9cSZack Rusin 
938426ed9cSZack Rusin 	return false;
948426ed9cSZack Rusin }
958426ed9cSZack Rusin 
vmw_fifo_create(struct vmw_private * dev_priv)962cd80dbdSZack Rusin struct vmw_fifo_state *vmw_fifo_create(struct vmw_private *dev_priv)
978426ed9cSZack Rusin {
982cd80dbdSZack Rusin 	struct vmw_fifo_state *fifo;
998426ed9cSZack Rusin 	uint32_t max;
1008426ed9cSZack Rusin 	uint32_t min;
1018426ed9cSZack Rusin 
1022cd80dbdSZack Rusin 	if (!dev_priv->fifo_mem)
1032cd80dbdSZack Rusin 		return NULL;
1042cd80dbdSZack Rusin 
1052cd80dbdSZack Rusin 	fifo = kzalloc(sizeof(*fifo), GFP_KERNEL);
1062f70cbf7SColin Ian King 	if (!fifo)
1072f70cbf7SColin Ian King 		return ERR_PTR(-ENOMEM);
1088426ed9cSZack Rusin 	fifo->static_buffer_size = VMWGFX_FIFO_STATIC_SIZE;
1098426ed9cSZack Rusin 	fifo->static_buffer = vmalloc(fifo->static_buffer_size);
1102f70cbf7SColin Ian King 	if (unlikely(fifo->static_buffer == NULL)) {
1112f70cbf7SColin Ian King 		kfree(fifo);
1122cd80dbdSZack Rusin 		return ERR_PTR(-ENOMEM);
1132f70cbf7SColin Ian King 	}
1148426ed9cSZack Rusin 
1158426ed9cSZack Rusin 	fifo->dynamic_buffer = NULL;
1168426ed9cSZack Rusin 	fifo->reserved_size = 0;
1178426ed9cSZack Rusin 	fifo->using_bounce_buffer = false;
1188426ed9cSZack Rusin 
1198426ed9cSZack Rusin 	mutex_init(&fifo->fifo_mutex);
1208426ed9cSZack Rusin 	init_rwsem(&fifo->rwsem);
1218426ed9cSZack Rusin 	min = 4;
1228426ed9cSZack Rusin 	if (dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)
1238426ed9cSZack Rusin 		min = vmw_read(dev_priv, SVGA_REG_MEM_REGS);
1248426ed9cSZack Rusin 	min <<= 2;
1258426ed9cSZack Rusin 
1268426ed9cSZack Rusin 	if (min < PAGE_SIZE)
1278426ed9cSZack Rusin 		min = PAGE_SIZE;
1288426ed9cSZack Rusin 
1298426ed9cSZack Rusin 	vmw_fifo_mem_write(dev_priv, SVGA_FIFO_MIN, min);
1308426ed9cSZack Rusin 	vmw_fifo_mem_write(dev_priv, SVGA_FIFO_MAX, dev_priv->fifo_mem_size);
1318426ed9cSZack Rusin 	wmb();
1328426ed9cSZack Rusin 	vmw_fifo_mem_write(dev_priv, SVGA_FIFO_NEXT_CMD, min);
1338426ed9cSZack Rusin 	vmw_fifo_mem_write(dev_priv, SVGA_FIFO_STOP, min);
1348426ed9cSZack Rusin 	vmw_fifo_mem_write(dev_priv, SVGA_FIFO_BUSY, 0);
1358426ed9cSZack Rusin 	mb();
1368426ed9cSZack Rusin 
1378426ed9cSZack Rusin 	vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1);
1388426ed9cSZack Rusin 
1398426ed9cSZack Rusin 	max = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MAX);
1408426ed9cSZack Rusin 	min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN);
1418426ed9cSZack Rusin 	fifo->capabilities = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CAPABILITIES);
1428426ed9cSZack Rusin 
1432b273544SZack Rusin 	drm_info(&dev_priv->drm,
1442b273544SZack Rusin 		 "Fifo max 0x%08x min 0x%08x cap 0x%08x\n",
1458426ed9cSZack Rusin 		 (unsigned int) max,
1468426ed9cSZack Rusin 		 (unsigned int) min,
1478426ed9cSZack Rusin 		 (unsigned int) fifo->capabilities);
148c451af78SZack Rusin 
149c451af78SZack Rusin 	if (unlikely(min >= max)) {
150c451af78SZack Rusin 		drm_warn(&dev_priv->drm,
151c451af78SZack Rusin 			 "FIFO memory is not usable. Driver failed to initialize.");
152c451af78SZack Rusin 		return ERR_PTR(-ENXIO);
153c451af78SZack Rusin 	}
154c451af78SZack Rusin 
1552cd80dbdSZack Rusin 	return fifo;
1568426ed9cSZack Rusin }
1578426ed9cSZack Rusin 
vmw_fifo_ping_host(struct vmw_private * dev_priv,uint32_t reason)1588426ed9cSZack Rusin void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
1598426ed9cSZack Rusin {
1608426ed9cSZack Rusin 	u32 *fifo_mem = dev_priv->fifo_mem;
1612cd80dbdSZack Rusin 	if (fifo_mem && cmpxchg(fifo_mem + SVGA_FIFO_BUSY, 0, 1) == 0)
1628426ed9cSZack Rusin 		vmw_write(dev_priv, SVGA_REG_SYNC, reason);
1632cd80dbdSZack Rusin 
1648426ed9cSZack Rusin }
1658426ed9cSZack Rusin 
vmw_fifo_destroy(struct vmw_private * dev_priv)1662cd80dbdSZack Rusin void vmw_fifo_destroy(struct vmw_private *dev_priv)
1678426ed9cSZack Rusin {
1682cd80dbdSZack Rusin 	struct vmw_fifo_state *fifo = dev_priv->fifo;
1698426ed9cSZack Rusin 
1702cd80dbdSZack Rusin 	if (!fifo)
1712cd80dbdSZack Rusin 		return;
1728426ed9cSZack Rusin 
1738426ed9cSZack Rusin 	if (likely(fifo->static_buffer != NULL)) {
1748426ed9cSZack Rusin 		vfree(fifo->static_buffer);
1758426ed9cSZack Rusin 		fifo->static_buffer = NULL;
1768426ed9cSZack Rusin 	}
1778426ed9cSZack Rusin 
1788426ed9cSZack Rusin 	if (likely(fifo->dynamic_buffer != NULL)) {
1798426ed9cSZack Rusin 		vfree(fifo->dynamic_buffer);
1808426ed9cSZack Rusin 		fifo->dynamic_buffer = NULL;
1818426ed9cSZack Rusin 	}
1822cd80dbdSZack Rusin 	kfree(fifo);
1832cd80dbdSZack Rusin 	dev_priv->fifo = NULL;
1848426ed9cSZack Rusin }
1858426ed9cSZack Rusin 
vmw_fifo_is_full(struct vmw_private * dev_priv,uint32_t bytes)1868426ed9cSZack Rusin static bool vmw_fifo_is_full(struct vmw_private *dev_priv, uint32_t bytes)
1878426ed9cSZack Rusin {
1888426ed9cSZack Rusin 	uint32_t max = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MAX);
1898426ed9cSZack Rusin 	uint32_t next_cmd = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_NEXT_CMD);
1908426ed9cSZack Rusin 	uint32_t min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN);
1918426ed9cSZack Rusin 	uint32_t stop = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_STOP);
1928426ed9cSZack Rusin 
1938426ed9cSZack Rusin 	return ((max - next_cmd) + (stop - min) <= bytes);
1948426ed9cSZack Rusin }
1958426ed9cSZack Rusin 
vmw_fifo_wait_noirq(struct vmw_private * dev_priv,uint32_t bytes,bool interruptible,unsigned long timeout)1968426ed9cSZack Rusin static int vmw_fifo_wait_noirq(struct vmw_private *dev_priv,
1978426ed9cSZack Rusin 			       uint32_t bytes, bool interruptible,
1988426ed9cSZack Rusin 			       unsigned long timeout)
1998426ed9cSZack Rusin {
2008426ed9cSZack Rusin 	int ret = 0;
2018426ed9cSZack Rusin 	unsigned long end_jiffies = jiffies + timeout;
2028426ed9cSZack Rusin 	DEFINE_WAIT(__wait);
2038426ed9cSZack Rusin 
2048426ed9cSZack Rusin 	DRM_INFO("Fifo wait noirq.\n");
2058426ed9cSZack Rusin 
2068426ed9cSZack Rusin 	for (;;) {
2078426ed9cSZack Rusin 		prepare_to_wait(&dev_priv->fifo_queue, &__wait,
2088426ed9cSZack Rusin 				(interruptible) ?
2098426ed9cSZack Rusin 				TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
2108426ed9cSZack Rusin 		if (!vmw_fifo_is_full(dev_priv, bytes))
2118426ed9cSZack Rusin 			break;
2128426ed9cSZack Rusin 		if (time_after_eq(jiffies, end_jiffies)) {
2138426ed9cSZack Rusin 			ret = -EBUSY;
2148426ed9cSZack Rusin 			DRM_ERROR("SVGA device lockup.\n");
2158426ed9cSZack Rusin 			break;
2168426ed9cSZack Rusin 		}
2178426ed9cSZack Rusin 		schedule_timeout(1);
2188426ed9cSZack Rusin 		if (interruptible && signal_pending(current)) {
2198426ed9cSZack Rusin 			ret = -ERESTARTSYS;
2208426ed9cSZack Rusin 			break;
2218426ed9cSZack Rusin 		}
2228426ed9cSZack Rusin 	}
2238426ed9cSZack Rusin 	finish_wait(&dev_priv->fifo_queue, &__wait);
2248426ed9cSZack Rusin 	wake_up_all(&dev_priv->fifo_queue);
2258426ed9cSZack Rusin 	DRM_INFO("Fifo noirq exit.\n");
2268426ed9cSZack Rusin 	return ret;
2278426ed9cSZack Rusin }
2288426ed9cSZack Rusin 
vmw_fifo_wait(struct vmw_private * dev_priv,uint32_t bytes,bool interruptible,unsigned long timeout)2298426ed9cSZack Rusin static int vmw_fifo_wait(struct vmw_private *dev_priv,
2308426ed9cSZack Rusin 			 uint32_t bytes, bool interruptible,
2318426ed9cSZack Rusin 			 unsigned long timeout)
2328426ed9cSZack Rusin {
2338426ed9cSZack Rusin 	long ret = 1L;
2348426ed9cSZack Rusin 
2358426ed9cSZack Rusin 	if (likely(!vmw_fifo_is_full(dev_priv, bytes)))
2368426ed9cSZack Rusin 		return 0;
2378426ed9cSZack Rusin 
2388426ed9cSZack Rusin 	vmw_fifo_ping_host(dev_priv, SVGA_SYNC_FIFOFULL);
2398426ed9cSZack Rusin 	if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
2408426ed9cSZack Rusin 		return vmw_fifo_wait_noirq(dev_priv, bytes,
2418426ed9cSZack Rusin 					   interruptible, timeout);
2428426ed9cSZack Rusin 
2438426ed9cSZack Rusin 	vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_FIFO_PROGRESS,
2448426ed9cSZack Rusin 			       &dev_priv->fifo_queue_waiters);
2458426ed9cSZack Rusin 
2468426ed9cSZack Rusin 	if (interruptible)
2478426ed9cSZack Rusin 		ret = wait_event_interruptible_timeout
2488426ed9cSZack Rusin 		    (dev_priv->fifo_queue,
2498426ed9cSZack Rusin 		     !vmw_fifo_is_full(dev_priv, bytes), timeout);
2508426ed9cSZack Rusin 	else
2518426ed9cSZack Rusin 		ret = wait_event_timeout
2528426ed9cSZack Rusin 		    (dev_priv->fifo_queue,
2538426ed9cSZack Rusin 		     !vmw_fifo_is_full(dev_priv, bytes), timeout);
2548426ed9cSZack Rusin 
2558426ed9cSZack Rusin 	if (unlikely(ret == 0))
2568426ed9cSZack Rusin 		ret = -EBUSY;
2578426ed9cSZack Rusin 	else if (likely(ret > 0))
2588426ed9cSZack Rusin 		ret = 0;
2598426ed9cSZack Rusin 
2608426ed9cSZack Rusin 	vmw_generic_waiter_remove(dev_priv, SVGA_IRQFLAG_FIFO_PROGRESS,
2618426ed9cSZack Rusin 				  &dev_priv->fifo_queue_waiters);
2628426ed9cSZack Rusin 
2638426ed9cSZack Rusin 	return ret;
2648426ed9cSZack Rusin }
2658426ed9cSZack Rusin 
266b8441a4dSLee Jones /*
2678426ed9cSZack Rusin  * Reserve @bytes number of bytes in the fifo.
2688426ed9cSZack Rusin  *
2698426ed9cSZack Rusin  * This function will return NULL (error) on two conditions:
2708426ed9cSZack Rusin  *  If it timeouts waiting for fifo space, or if @bytes is larger than the
2718426ed9cSZack Rusin  *   available fifo space.
2728426ed9cSZack Rusin  *
2738426ed9cSZack Rusin  * Returns:
2748426ed9cSZack Rusin  *   Pointer to the fifo, or null on error (possible hardware hang).
2758426ed9cSZack Rusin  */
vmw_local_fifo_reserve(struct vmw_private * dev_priv,uint32_t bytes)2768426ed9cSZack Rusin static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv,
2778426ed9cSZack Rusin 				    uint32_t bytes)
2788426ed9cSZack Rusin {
2792cd80dbdSZack Rusin 	struct vmw_fifo_state *fifo_state = dev_priv->fifo;
2808426ed9cSZack Rusin 	u32  *fifo_mem = dev_priv->fifo_mem;
2818426ed9cSZack Rusin 	uint32_t max;
2828426ed9cSZack Rusin 	uint32_t min;
2838426ed9cSZack Rusin 	uint32_t next_cmd;
2848426ed9cSZack Rusin 	uint32_t reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
2858426ed9cSZack Rusin 	int ret;
2868426ed9cSZack Rusin 
2878426ed9cSZack Rusin 	mutex_lock(&fifo_state->fifo_mutex);
2888426ed9cSZack Rusin 	max = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MAX);
2898426ed9cSZack Rusin 	min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN);
2908426ed9cSZack Rusin 	next_cmd = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_NEXT_CMD);
2918426ed9cSZack Rusin 
2928426ed9cSZack Rusin 	if (unlikely(bytes >= (max - min)))
2938426ed9cSZack Rusin 		goto out_err;
2948426ed9cSZack Rusin 
2958426ed9cSZack Rusin 	BUG_ON(fifo_state->reserved_size != 0);
2968426ed9cSZack Rusin 	BUG_ON(fifo_state->dynamic_buffer != NULL);
2978426ed9cSZack Rusin 
2988426ed9cSZack Rusin 	fifo_state->reserved_size = bytes;
2998426ed9cSZack Rusin 
3008426ed9cSZack Rusin 	while (1) {
3018426ed9cSZack Rusin 		uint32_t stop = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_STOP);
3028426ed9cSZack Rusin 		bool need_bounce = false;
3038426ed9cSZack Rusin 		bool reserve_in_place = false;
3048426ed9cSZack Rusin 
3058426ed9cSZack Rusin 		if (next_cmd >= stop) {
3068426ed9cSZack Rusin 			if (likely((next_cmd + bytes < max ||
3078426ed9cSZack Rusin 				    (next_cmd + bytes == max && stop > min))))
3088426ed9cSZack Rusin 				reserve_in_place = true;
3098426ed9cSZack Rusin 
3108426ed9cSZack Rusin 			else if (vmw_fifo_is_full(dev_priv, bytes)) {
3118426ed9cSZack Rusin 				ret = vmw_fifo_wait(dev_priv, bytes,
3128426ed9cSZack Rusin 						    false, 3 * HZ);
3138426ed9cSZack Rusin 				if (unlikely(ret != 0))
3148426ed9cSZack Rusin 					goto out_err;
3158426ed9cSZack Rusin 			} else
3168426ed9cSZack Rusin 				need_bounce = true;
3178426ed9cSZack Rusin 
3188426ed9cSZack Rusin 		} else {
3198426ed9cSZack Rusin 
3208426ed9cSZack Rusin 			if (likely((next_cmd + bytes < stop)))
3218426ed9cSZack Rusin 				reserve_in_place = true;
3228426ed9cSZack Rusin 			else {
3238426ed9cSZack Rusin 				ret = vmw_fifo_wait(dev_priv, bytes,
3248426ed9cSZack Rusin 						    false, 3 * HZ);
3258426ed9cSZack Rusin 				if (unlikely(ret != 0))
3268426ed9cSZack Rusin 					goto out_err;
3278426ed9cSZack Rusin 			}
3288426ed9cSZack Rusin 		}
3298426ed9cSZack Rusin 
3308426ed9cSZack Rusin 		if (reserve_in_place) {
3318426ed9cSZack Rusin 			if (reserveable || bytes <= sizeof(uint32_t)) {
3328426ed9cSZack Rusin 				fifo_state->using_bounce_buffer = false;
3338426ed9cSZack Rusin 
3348426ed9cSZack Rusin 				if (reserveable)
3358426ed9cSZack Rusin 					vmw_fifo_mem_write(dev_priv,
3368426ed9cSZack Rusin 							   SVGA_FIFO_RESERVED,
3378426ed9cSZack Rusin 							   bytes);
3388426ed9cSZack Rusin 				return (void __force *) (fifo_mem +
3398426ed9cSZack Rusin 							 (next_cmd >> 2));
3408426ed9cSZack Rusin 			} else {
3418426ed9cSZack Rusin 				need_bounce = true;
3428426ed9cSZack Rusin 			}
3438426ed9cSZack Rusin 		}
3448426ed9cSZack Rusin 
3458426ed9cSZack Rusin 		if (need_bounce) {
3468426ed9cSZack Rusin 			fifo_state->using_bounce_buffer = true;
3478426ed9cSZack Rusin 			if (bytes < fifo_state->static_buffer_size)
3488426ed9cSZack Rusin 				return fifo_state->static_buffer;
3498426ed9cSZack Rusin 			else {
3508426ed9cSZack Rusin 				fifo_state->dynamic_buffer = vmalloc(bytes);
3518426ed9cSZack Rusin 				if (!fifo_state->dynamic_buffer)
3528426ed9cSZack Rusin 					goto out_err;
3538426ed9cSZack Rusin 				return fifo_state->dynamic_buffer;
3548426ed9cSZack Rusin 			}
3558426ed9cSZack Rusin 		}
3568426ed9cSZack Rusin 	}
3578426ed9cSZack Rusin out_err:
3588426ed9cSZack Rusin 	fifo_state->reserved_size = 0;
3598426ed9cSZack Rusin 	mutex_unlock(&fifo_state->fifo_mutex);
3608426ed9cSZack Rusin 
3618426ed9cSZack Rusin 	return NULL;
3628426ed9cSZack Rusin }
3638426ed9cSZack Rusin 
vmw_cmd_ctx_reserve(struct vmw_private * dev_priv,uint32_t bytes,int ctx_id)3648426ed9cSZack Rusin void *vmw_cmd_ctx_reserve(struct vmw_private *dev_priv, uint32_t bytes,
3658426ed9cSZack Rusin 			  int ctx_id)
3668426ed9cSZack Rusin {
3678426ed9cSZack Rusin 	void *ret;
3688426ed9cSZack Rusin 
3698426ed9cSZack Rusin 	if (dev_priv->cman)
3708426ed9cSZack Rusin 		ret = vmw_cmdbuf_reserve(dev_priv->cman, bytes,
3718426ed9cSZack Rusin 					 ctx_id, false, NULL);
3728426ed9cSZack Rusin 	else if (ctx_id == SVGA3D_INVALID_ID)
3738426ed9cSZack Rusin 		ret = vmw_local_fifo_reserve(dev_priv, bytes);
3748426ed9cSZack Rusin 	else {
3758426ed9cSZack Rusin 		WARN(1, "Command buffer has not been allocated.\n");
3768426ed9cSZack Rusin 		ret = NULL;
3778426ed9cSZack Rusin 	}
3788426ed9cSZack Rusin 	if (IS_ERR_OR_NULL(ret))
3798426ed9cSZack Rusin 		return NULL;
3808426ed9cSZack Rusin 
3818426ed9cSZack Rusin 	return ret;
3828426ed9cSZack Rusin }
3838426ed9cSZack Rusin 
vmw_fifo_res_copy(struct vmw_fifo_state * fifo_state,struct vmw_private * vmw,uint32_t next_cmd,uint32_t max,uint32_t min,uint32_t bytes)3848426ed9cSZack Rusin static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state,
3858426ed9cSZack Rusin 			      struct vmw_private *vmw,
3868426ed9cSZack Rusin 			      uint32_t next_cmd,
3878426ed9cSZack Rusin 			      uint32_t max, uint32_t min, uint32_t bytes)
3888426ed9cSZack Rusin {
3898426ed9cSZack Rusin 	u32 *fifo_mem = vmw->fifo_mem;
3908426ed9cSZack Rusin 	uint32_t chunk_size = max - next_cmd;
3918426ed9cSZack Rusin 	uint32_t rest;
3928426ed9cSZack Rusin 	uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ?
3938426ed9cSZack Rusin 	    fifo_state->dynamic_buffer : fifo_state->static_buffer;
3948426ed9cSZack Rusin 
3958426ed9cSZack Rusin 	if (bytes < chunk_size)
3968426ed9cSZack Rusin 		chunk_size = bytes;
3978426ed9cSZack Rusin 
3988426ed9cSZack Rusin 	vmw_fifo_mem_write(vmw, SVGA_FIFO_RESERVED, bytes);
3998426ed9cSZack Rusin 	mb();
4008426ed9cSZack Rusin 	memcpy(fifo_mem + (next_cmd >> 2), buffer, chunk_size);
4018426ed9cSZack Rusin 	rest = bytes - chunk_size;
4028426ed9cSZack Rusin 	if (rest)
4038426ed9cSZack Rusin 		memcpy(fifo_mem + (min >> 2), buffer + (chunk_size >> 2), rest);
4048426ed9cSZack Rusin }
4058426ed9cSZack Rusin 
vmw_fifo_slow_copy(struct vmw_fifo_state * fifo_state,struct vmw_private * vmw,uint32_t next_cmd,uint32_t max,uint32_t min,uint32_t bytes)4068426ed9cSZack Rusin static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state,
4078426ed9cSZack Rusin 			       struct vmw_private *vmw,
4088426ed9cSZack Rusin 			       uint32_t next_cmd,
4098426ed9cSZack Rusin 			       uint32_t max, uint32_t min, uint32_t bytes)
4108426ed9cSZack Rusin {
4118426ed9cSZack Rusin 	uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ?
4128426ed9cSZack Rusin 	    fifo_state->dynamic_buffer : fifo_state->static_buffer;
4138426ed9cSZack Rusin 
4148426ed9cSZack Rusin 	while (bytes > 0) {
4158426ed9cSZack Rusin 		vmw_fifo_mem_write(vmw, (next_cmd >> 2), *buffer++);
4168426ed9cSZack Rusin 		next_cmd += sizeof(uint32_t);
4178426ed9cSZack Rusin 		if (unlikely(next_cmd == max))
4188426ed9cSZack Rusin 			next_cmd = min;
4198426ed9cSZack Rusin 		mb();
4208426ed9cSZack Rusin 		vmw_fifo_mem_write(vmw, SVGA_FIFO_NEXT_CMD, next_cmd);
4218426ed9cSZack Rusin 		mb();
4228426ed9cSZack Rusin 		bytes -= sizeof(uint32_t);
4238426ed9cSZack Rusin 	}
4248426ed9cSZack Rusin }
4258426ed9cSZack Rusin 
vmw_local_fifo_commit(struct vmw_private * dev_priv,uint32_t bytes)4268426ed9cSZack Rusin static void vmw_local_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
4278426ed9cSZack Rusin {
4282cd80dbdSZack Rusin 	struct vmw_fifo_state *fifo_state = dev_priv->fifo;
4298426ed9cSZack Rusin 	uint32_t next_cmd = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_NEXT_CMD);
4308426ed9cSZack Rusin 	uint32_t max = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MAX);
4318426ed9cSZack Rusin 	uint32_t min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN);
4328426ed9cSZack Rusin 	bool reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
4338426ed9cSZack Rusin 
4348426ed9cSZack Rusin 	BUG_ON((bytes & 3) != 0);
4358426ed9cSZack Rusin 	BUG_ON(bytes > fifo_state->reserved_size);
4368426ed9cSZack Rusin 
4378426ed9cSZack Rusin 	fifo_state->reserved_size = 0;
4388426ed9cSZack Rusin 
4398426ed9cSZack Rusin 	if (fifo_state->using_bounce_buffer) {
4408426ed9cSZack Rusin 		if (reserveable)
4418426ed9cSZack Rusin 			vmw_fifo_res_copy(fifo_state, dev_priv,
4428426ed9cSZack Rusin 					  next_cmd, max, min, bytes);
4438426ed9cSZack Rusin 		else
4448426ed9cSZack Rusin 			vmw_fifo_slow_copy(fifo_state, dev_priv,
4458426ed9cSZack Rusin 					   next_cmd, max, min, bytes);
4468426ed9cSZack Rusin 
4478426ed9cSZack Rusin 		if (fifo_state->dynamic_buffer) {
4488426ed9cSZack Rusin 			vfree(fifo_state->dynamic_buffer);
4498426ed9cSZack Rusin 			fifo_state->dynamic_buffer = NULL;
4508426ed9cSZack Rusin 		}
4518426ed9cSZack Rusin 
4528426ed9cSZack Rusin 	}
4538426ed9cSZack Rusin 
4548426ed9cSZack Rusin 	down_write(&fifo_state->rwsem);
4558426ed9cSZack Rusin 	if (fifo_state->using_bounce_buffer || reserveable) {
4568426ed9cSZack Rusin 		next_cmd += bytes;
4578426ed9cSZack Rusin 		if (next_cmd >= max)
4588426ed9cSZack Rusin 			next_cmd -= max - min;
4598426ed9cSZack Rusin 		mb();
4608426ed9cSZack Rusin 		vmw_fifo_mem_write(dev_priv, SVGA_FIFO_NEXT_CMD, next_cmd);
4618426ed9cSZack Rusin 	}
4628426ed9cSZack Rusin 
4638426ed9cSZack Rusin 	if (reserveable)
4648426ed9cSZack Rusin 		vmw_fifo_mem_write(dev_priv, SVGA_FIFO_RESERVED, 0);
4658426ed9cSZack Rusin 	mb();
4668426ed9cSZack Rusin 	up_write(&fifo_state->rwsem);
4678426ed9cSZack Rusin 	vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
4688426ed9cSZack Rusin 	mutex_unlock(&fifo_state->fifo_mutex);
4698426ed9cSZack Rusin }
4708426ed9cSZack Rusin 
vmw_cmd_commit(struct vmw_private * dev_priv,uint32_t bytes)4718426ed9cSZack Rusin void vmw_cmd_commit(struct vmw_private *dev_priv, uint32_t bytes)
4728426ed9cSZack Rusin {
4738426ed9cSZack Rusin 	if (dev_priv->cman)
4748426ed9cSZack Rusin 		vmw_cmdbuf_commit(dev_priv->cman, bytes, NULL, false);
4758426ed9cSZack Rusin 	else
4768426ed9cSZack Rusin 		vmw_local_fifo_commit(dev_priv, bytes);
4778426ed9cSZack Rusin }
4788426ed9cSZack Rusin 
4798426ed9cSZack Rusin 
4808426ed9cSZack Rusin /**
4812cd80dbdSZack Rusin  * vmw_cmd_commit_flush - Commit fifo space and flush any buffered commands.
4828426ed9cSZack Rusin  *
4838426ed9cSZack Rusin  * @dev_priv: Pointer to device private structure.
4848426ed9cSZack Rusin  * @bytes: Number of bytes to commit.
4858426ed9cSZack Rusin  */
vmw_cmd_commit_flush(struct vmw_private * dev_priv,uint32_t bytes)4868426ed9cSZack Rusin void vmw_cmd_commit_flush(struct vmw_private *dev_priv, uint32_t bytes)
4878426ed9cSZack Rusin {
4888426ed9cSZack Rusin 	if (dev_priv->cman)
4898426ed9cSZack Rusin 		vmw_cmdbuf_commit(dev_priv->cman, bytes, NULL, true);
4908426ed9cSZack Rusin 	else
4918426ed9cSZack Rusin 		vmw_local_fifo_commit(dev_priv, bytes);
4928426ed9cSZack Rusin }
4938426ed9cSZack Rusin 
4948426ed9cSZack Rusin /**
4952cd80dbdSZack Rusin  * vmw_cmd_flush - Flush any buffered commands and make sure command processing
4968426ed9cSZack Rusin  * starts.
4978426ed9cSZack Rusin  *
4988426ed9cSZack Rusin  * @dev_priv: Pointer to device private structure.
4998426ed9cSZack Rusin  * @interruptible: Whether to wait interruptible if function needs to sleep.
5008426ed9cSZack Rusin  */
vmw_cmd_flush(struct vmw_private * dev_priv,bool interruptible)5018426ed9cSZack Rusin int vmw_cmd_flush(struct vmw_private *dev_priv, bool interruptible)
5028426ed9cSZack Rusin {
5038426ed9cSZack Rusin 	might_sleep();
5048426ed9cSZack Rusin 
5058426ed9cSZack Rusin 	if (dev_priv->cman)
5068426ed9cSZack Rusin 		return vmw_cmdbuf_cur_flush(dev_priv->cman, interruptible);
5078426ed9cSZack Rusin 	else
5088426ed9cSZack Rusin 		return 0;
5098426ed9cSZack Rusin }
5108426ed9cSZack Rusin 
vmw_cmd_send_fence(struct vmw_private * dev_priv,uint32_t * seqno)5118426ed9cSZack Rusin int vmw_cmd_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
5128426ed9cSZack Rusin {
5138426ed9cSZack Rusin 	struct svga_fifo_cmd_fence *cmd_fence;
5148426ed9cSZack Rusin 	u32 *fm;
5158426ed9cSZack Rusin 	int ret = 0;
5168426ed9cSZack Rusin 	uint32_t bytes = sizeof(u32) + sizeof(*cmd_fence);
5178426ed9cSZack Rusin 
5188426ed9cSZack Rusin 	fm = VMW_CMD_RESERVE(dev_priv, bytes);
5198426ed9cSZack Rusin 	if (unlikely(fm == NULL)) {
5208426ed9cSZack Rusin 		*seqno = atomic_read(&dev_priv->marker_seq);
5218426ed9cSZack Rusin 		ret = -ENOMEM;
5228426ed9cSZack Rusin 		(void)vmw_fallback_wait(dev_priv, false, true, *seqno,
5238426ed9cSZack Rusin 					false, 3*HZ);
5248426ed9cSZack Rusin 		goto out_err;
5258426ed9cSZack Rusin 	}
5268426ed9cSZack Rusin 
5278426ed9cSZack Rusin 	do {
5288426ed9cSZack Rusin 		*seqno = atomic_add_return(1, &dev_priv->marker_seq);
5298426ed9cSZack Rusin 	} while (*seqno == 0);
5308426ed9cSZack Rusin 
531c593197bSZack Rusin 	if (!vmw_has_fences(dev_priv)) {
5328426ed9cSZack Rusin 
5338426ed9cSZack Rusin 		/*
5348426ed9cSZack Rusin 		 * Don't request hardware to send a fence. The
5358426ed9cSZack Rusin 		 * waiting code in vmwgfx_irq.c will emulate this.
5368426ed9cSZack Rusin 		 */
5378426ed9cSZack Rusin 
5388426ed9cSZack Rusin 		vmw_cmd_commit(dev_priv, 0);
5398426ed9cSZack Rusin 		return 0;
5408426ed9cSZack Rusin 	}
5418426ed9cSZack Rusin 
5428426ed9cSZack Rusin 	*fm++ = SVGA_CMD_FENCE;
5438426ed9cSZack Rusin 	cmd_fence = (struct svga_fifo_cmd_fence *) fm;
5448426ed9cSZack Rusin 	cmd_fence->fence = *seqno;
5458426ed9cSZack Rusin 	vmw_cmd_commit_flush(dev_priv, bytes);
5462cd80dbdSZack Rusin 	vmw_update_seqno(dev_priv);
5478426ed9cSZack Rusin 
5488426ed9cSZack Rusin out_err:
5498426ed9cSZack Rusin 	return ret;
5508426ed9cSZack Rusin }
5518426ed9cSZack Rusin 
5528426ed9cSZack Rusin /**
5532cd80dbdSZack Rusin  * vmw_cmd_emit_dummy_legacy_query - emits a dummy query to the fifo using
5548426ed9cSZack Rusin  * legacy query commands.
5558426ed9cSZack Rusin  *
5568426ed9cSZack Rusin  * @dev_priv: The device private structure.
5578426ed9cSZack Rusin  * @cid: The hardware context id used for the query.
5588426ed9cSZack Rusin  *
5592cd80dbdSZack Rusin  * See the vmw_cmd_emit_dummy_query documentation.
5608426ed9cSZack Rusin  */
vmw_cmd_emit_dummy_legacy_query(struct vmw_private * dev_priv,uint32_t cid)5612cd80dbdSZack Rusin static int vmw_cmd_emit_dummy_legacy_query(struct vmw_private *dev_priv,
5628426ed9cSZack Rusin 					    uint32_t cid)
5638426ed9cSZack Rusin {
5648426ed9cSZack Rusin 	/*
5658426ed9cSZack Rusin 	 * A query wait without a preceding query end will
5668426ed9cSZack Rusin 	 * actually finish all queries for this cid
5678426ed9cSZack Rusin 	 * without writing to the query result structure.
5688426ed9cSZack Rusin 	 */
5698426ed9cSZack Rusin 
570*668b2066SZack Rusin 	struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->tbo;
5718426ed9cSZack Rusin 	struct {
5728426ed9cSZack Rusin 		SVGA3dCmdHeader header;
5738426ed9cSZack Rusin 		SVGA3dCmdWaitForQuery body;
5748426ed9cSZack Rusin 	} *cmd;
5758426ed9cSZack Rusin 
5768426ed9cSZack Rusin 	cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
5778426ed9cSZack Rusin 	if (unlikely(cmd == NULL))
5788426ed9cSZack Rusin 		return -ENOMEM;
5798426ed9cSZack Rusin 
5808426ed9cSZack Rusin 	cmd->header.id = SVGA_3D_CMD_WAIT_FOR_QUERY;
5818426ed9cSZack Rusin 	cmd->header.size = sizeof(cmd->body);
5828426ed9cSZack Rusin 	cmd->body.cid = cid;
5838426ed9cSZack Rusin 	cmd->body.type = SVGA3D_QUERYTYPE_OCCLUSION;
5848426ed9cSZack Rusin 
585d3116756SChristian König 	if (bo->resource->mem_type == TTM_PL_VRAM) {
5868426ed9cSZack Rusin 		cmd->body.guestResult.gmrId = SVGA_GMR_FRAMEBUFFER;
587d3116756SChristian König 		cmd->body.guestResult.offset = bo->resource->start << PAGE_SHIFT;
5888426ed9cSZack Rusin 	} else {
589d3116756SChristian König 		cmd->body.guestResult.gmrId = bo->resource->start;
5908426ed9cSZack Rusin 		cmd->body.guestResult.offset = 0;
5918426ed9cSZack Rusin 	}
5928426ed9cSZack Rusin 
5938426ed9cSZack Rusin 	vmw_cmd_commit(dev_priv, sizeof(*cmd));
5948426ed9cSZack Rusin 
5958426ed9cSZack Rusin 	return 0;
5968426ed9cSZack Rusin }
5978426ed9cSZack Rusin 
5988426ed9cSZack Rusin /**
5992cd80dbdSZack Rusin  * vmw_cmd_emit_dummy_gb_query - emits a dummy query to the fifo using
6008426ed9cSZack Rusin  * guest-backed resource query commands.
6018426ed9cSZack Rusin  *
6028426ed9cSZack Rusin  * @dev_priv: The device private structure.
6038426ed9cSZack Rusin  * @cid: The hardware context id used for the query.
6048426ed9cSZack Rusin  *
6052cd80dbdSZack Rusin  * See the vmw_cmd_emit_dummy_query documentation.
6068426ed9cSZack Rusin  */
vmw_cmd_emit_dummy_gb_query(struct vmw_private * dev_priv,uint32_t cid)6072cd80dbdSZack Rusin static int vmw_cmd_emit_dummy_gb_query(struct vmw_private *dev_priv,
6088426ed9cSZack Rusin 				       uint32_t cid)
6098426ed9cSZack Rusin {
6108426ed9cSZack Rusin 	/*
6118426ed9cSZack Rusin 	 * A query wait without a preceding query end will
6128426ed9cSZack Rusin 	 * actually finish all queries for this cid
6138426ed9cSZack Rusin 	 * without writing to the query result structure.
6148426ed9cSZack Rusin 	 */
6158426ed9cSZack Rusin 
616*668b2066SZack Rusin 	struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->tbo;
6178426ed9cSZack Rusin 	struct {
6188426ed9cSZack Rusin 		SVGA3dCmdHeader header;
6198426ed9cSZack Rusin 		SVGA3dCmdWaitForGBQuery body;
6208426ed9cSZack Rusin 	} *cmd;
6218426ed9cSZack Rusin 
6228426ed9cSZack Rusin 	cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
6238426ed9cSZack Rusin 	if (unlikely(cmd == NULL))
6248426ed9cSZack Rusin 		return -ENOMEM;
6258426ed9cSZack Rusin 
6268426ed9cSZack Rusin 	cmd->header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
6278426ed9cSZack Rusin 	cmd->header.size = sizeof(cmd->body);
6288426ed9cSZack Rusin 	cmd->body.cid = cid;
6298426ed9cSZack Rusin 	cmd->body.type = SVGA3D_QUERYTYPE_OCCLUSION;
630d3116756SChristian König 	BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
631d3116756SChristian König 	cmd->body.mobid = bo->resource->start;
6328426ed9cSZack Rusin 	cmd->body.offset = 0;
6338426ed9cSZack Rusin 
6348426ed9cSZack Rusin 	vmw_cmd_commit(dev_priv, sizeof(*cmd));
6358426ed9cSZack Rusin 
6368426ed9cSZack Rusin 	return 0;
6378426ed9cSZack Rusin }
6388426ed9cSZack Rusin 
6398426ed9cSZack Rusin 
6408426ed9cSZack Rusin /**
6412cd80dbdSZack Rusin  * vmw_cmd_emit_dummy_query - emits a dummy query to the fifo using
6428426ed9cSZack Rusin  * appropriate resource query commands.
6438426ed9cSZack Rusin  *
6448426ed9cSZack Rusin  * @dev_priv: The device private structure.
6458426ed9cSZack Rusin  * @cid: The hardware context id used for the query.
6468426ed9cSZack Rusin  *
6478426ed9cSZack Rusin  * This function is used to emit a dummy occlusion query with
6488426ed9cSZack Rusin  * no primitives rendered between query begin and query end.
6498426ed9cSZack Rusin  * It's used to provide a query barrier, in order to know that when
6508426ed9cSZack Rusin  * this query is finished, all preceding queries are also finished.
6518426ed9cSZack Rusin  *
6528426ed9cSZack Rusin  * A Query results structure should have been initialized at the start
6538426ed9cSZack Rusin  * of the dev_priv->dummy_query_bo buffer object. And that buffer object
6548426ed9cSZack Rusin  * must also be either reserved or pinned when this function is called.
6558426ed9cSZack Rusin  *
6568426ed9cSZack Rusin  * Returns -ENOMEM on failure to reserve fifo space.
6578426ed9cSZack Rusin  */
vmw_cmd_emit_dummy_query(struct vmw_private * dev_priv,uint32_t cid)6588426ed9cSZack Rusin int vmw_cmd_emit_dummy_query(struct vmw_private *dev_priv,
6598426ed9cSZack Rusin 			      uint32_t cid)
6608426ed9cSZack Rusin {
6618426ed9cSZack Rusin 	if (dev_priv->has_mob)
6622cd80dbdSZack Rusin 		return vmw_cmd_emit_dummy_gb_query(dev_priv, cid);
6638426ed9cSZack Rusin 
6642cd80dbdSZack Rusin 	return vmw_cmd_emit_dummy_legacy_query(dev_priv, cid);
6652cd80dbdSZack Rusin }
6662cd80dbdSZack Rusin 
6672cd80dbdSZack Rusin 
6682cd80dbdSZack Rusin /**
6692cd80dbdSZack Rusin  * vmw_cmd_supported - returns true if the given device supports
6702cd80dbdSZack Rusin  * command queues.
6712cd80dbdSZack Rusin  *
6722cd80dbdSZack Rusin  * @vmw: The device private structure.
6732cd80dbdSZack Rusin  *
6742cd80dbdSZack Rusin  * Returns true if we can issue commands.
6752cd80dbdSZack Rusin  */
vmw_cmd_supported(struct vmw_private * vmw)6762cd80dbdSZack Rusin bool vmw_cmd_supported(struct vmw_private *vmw)
6772cd80dbdSZack Rusin {
6786f6f9788SZack Rusin 	bool has_cmdbufs =
6796f6f9788SZack Rusin 		(vmw->capabilities & (SVGA_CAP_COMMAND_BUFFERS |
6806f6f9788SZack Rusin 				      SVGA_CAP_CMD_BUFFERS_2)) != 0;
6816f6f9788SZack Rusin 	if (vmw_is_svga_v3(vmw))
6826f6f9788SZack Rusin 		return (has_cmdbufs &&
6836f6f9788SZack Rusin 			(vmw->capabilities & SVGA_CAP_GBOBJECTS) != 0);
6842cd80dbdSZack Rusin 	/*
6852cd80dbdSZack Rusin 	 * We have FIFO cmd's
6862cd80dbdSZack Rusin 	 */
6876f6f9788SZack Rusin 	return has_cmdbufs || vmw->fifo_mem != NULL;
6888426ed9cSZack Rusin }
689