1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3  *
4  * Copyright 2009-2020 VMware, Inc., Palo Alto, CA., USA
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 
28 #include <linux/sched/signal.h>
29 
30 #include <drm/ttm/ttm_placement.h>
31 
32 #include "vmwgfx_drv.h"
33 #include "vmwgfx_devcaps.h"
34 
35 bool vmw_supports_3d(struct vmw_private *dev_priv)
36 {
37 	uint32_t fifo_min, hwversion;
38 	const struct vmw_fifo_state *fifo = dev_priv->fifo;
39 
40 	if (!(dev_priv->capabilities & SVGA_CAP_3D))
41 		return false;
42 
43 	if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
44 		uint32_t result;
45 
46 		if (!dev_priv->has_mob)
47 			return false;
48 
49 		result = vmw_devcap_get(dev_priv, SVGA3D_DEVCAP_3D);
50 
51 		return (result != 0);
52 	}
53 
54 	if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
55 		return false;
56 
57 	BUG_ON(vmw_is_svga_v3(dev_priv));
58 
59 	fifo_min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN);
60 	if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int))
61 		return false;
62 
63 	hwversion = vmw_fifo_mem_read(dev_priv,
64 				      ((fifo->capabilities &
65 					SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
66 					       SVGA_FIFO_3D_HWVERSION_REVISED :
67 					       SVGA_FIFO_3D_HWVERSION));
68 
69 	if (hwversion == 0)
70 		return false;
71 
72 	if (hwversion < SVGA3D_HWVERSION_WS8_B1)
73 		return false;
74 
75 	/* Legacy Display Unit does not support surfaces */
76 	if (dev_priv->active_display_unit == vmw_du_legacy)
77 		return false;
78 
79 	return true;
80 }
81 
82 bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv)
83 {
84 	uint32_t caps;
85 
86 	if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
87 		return false;
88 
89 	caps = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CAPABILITIES);
90 	if (caps & SVGA_FIFO_CAP_PITCHLOCK)
91 		return true;
92 
93 	return false;
94 }
95 
96 struct vmw_fifo_state *vmw_fifo_create(struct vmw_private *dev_priv)
97 {
98 	struct vmw_fifo_state *fifo;
99 	uint32_t max;
100 	uint32_t min;
101 
102 	if (!dev_priv->fifo_mem)
103 		return NULL;
104 
105 	fifo = kzalloc(sizeof(*fifo), GFP_KERNEL);
106 	if (!fifo)
107 		return ERR_PTR(-ENOMEM);
108 	fifo->static_buffer_size = VMWGFX_FIFO_STATIC_SIZE;
109 	fifo->static_buffer = vmalloc(fifo->static_buffer_size);
110 	if (unlikely(fifo->static_buffer == NULL)) {
111 		kfree(fifo);
112 		return ERR_PTR(-ENOMEM);
113 	}
114 
115 	fifo->dynamic_buffer = NULL;
116 	fifo->reserved_size = 0;
117 	fifo->using_bounce_buffer = false;
118 
119 	mutex_init(&fifo->fifo_mutex);
120 	init_rwsem(&fifo->rwsem);
121 	min = 4;
122 	if (dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)
123 		min = vmw_read(dev_priv, SVGA_REG_MEM_REGS);
124 	min <<= 2;
125 
126 	if (min < PAGE_SIZE)
127 		min = PAGE_SIZE;
128 
129 	vmw_fifo_mem_write(dev_priv, SVGA_FIFO_MIN, min);
130 	vmw_fifo_mem_write(dev_priv, SVGA_FIFO_MAX, dev_priv->fifo_mem_size);
131 	wmb();
132 	vmw_fifo_mem_write(dev_priv, SVGA_FIFO_NEXT_CMD, min);
133 	vmw_fifo_mem_write(dev_priv, SVGA_FIFO_STOP, min);
134 	vmw_fifo_mem_write(dev_priv, SVGA_FIFO_BUSY, 0);
135 	mb();
136 
137 	vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1);
138 
139 	max = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MAX);
140 	min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN);
141 	fifo->capabilities = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CAPABILITIES);
142 
143 	DRM_INFO("Fifo max 0x%08x min 0x%08x cap 0x%08x\n",
144 		 (unsigned int) max,
145 		 (unsigned int) min,
146 		 (unsigned int) fifo->capabilities);
147 	return fifo;
148 }
149 
150 void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
151 {
152 	u32 *fifo_mem = dev_priv->fifo_mem;
153 	if (fifo_mem && cmpxchg(fifo_mem + SVGA_FIFO_BUSY, 0, 1) == 0)
154 		vmw_write(dev_priv, SVGA_REG_SYNC, reason);
155 
156 }
157 
158 void vmw_fifo_destroy(struct vmw_private *dev_priv)
159 {
160 	struct vmw_fifo_state *fifo = dev_priv->fifo;
161 
162 	if (!fifo)
163 		return;
164 
165 	if (likely(fifo->static_buffer != NULL)) {
166 		vfree(fifo->static_buffer);
167 		fifo->static_buffer = NULL;
168 	}
169 
170 	if (likely(fifo->dynamic_buffer != NULL)) {
171 		vfree(fifo->dynamic_buffer);
172 		fifo->dynamic_buffer = NULL;
173 	}
174 	kfree(fifo);
175 	dev_priv->fifo = NULL;
176 }
177 
178 static bool vmw_fifo_is_full(struct vmw_private *dev_priv, uint32_t bytes)
179 {
180 	uint32_t max = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MAX);
181 	uint32_t next_cmd = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_NEXT_CMD);
182 	uint32_t min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN);
183 	uint32_t stop = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_STOP);
184 
185 	return ((max - next_cmd) + (stop - min) <= bytes);
186 }
187 
188 static int vmw_fifo_wait_noirq(struct vmw_private *dev_priv,
189 			       uint32_t bytes, bool interruptible,
190 			       unsigned long timeout)
191 {
192 	int ret = 0;
193 	unsigned long end_jiffies = jiffies + timeout;
194 	DEFINE_WAIT(__wait);
195 
196 	DRM_INFO("Fifo wait noirq.\n");
197 
198 	for (;;) {
199 		prepare_to_wait(&dev_priv->fifo_queue, &__wait,
200 				(interruptible) ?
201 				TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
202 		if (!vmw_fifo_is_full(dev_priv, bytes))
203 			break;
204 		if (time_after_eq(jiffies, end_jiffies)) {
205 			ret = -EBUSY;
206 			DRM_ERROR("SVGA device lockup.\n");
207 			break;
208 		}
209 		schedule_timeout(1);
210 		if (interruptible && signal_pending(current)) {
211 			ret = -ERESTARTSYS;
212 			break;
213 		}
214 	}
215 	finish_wait(&dev_priv->fifo_queue, &__wait);
216 	wake_up_all(&dev_priv->fifo_queue);
217 	DRM_INFO("Fifo noirq exit.\n");
218 	return ret;
219 }
220 
221 static int vmw_fifo_wait(struct vmw_private *dev_priv,
222 			 uint32_t bytes, bool interruptible,
223 			 unsigned long timeout)
224 {
225 	long ret = 1L;
226 
227 	if (likely(!vmw_fifo_is_full(dev_priv, bytes)))
228 		return 0;
229 
230 	vmw_fifo_ping_host(dev_priv, SVGA_SYNC_FIFOFULL);
231 	if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
232 		return vmw_fifo_wait_noirq(dev_priv, bytes,
233 					   interruptible, timeout);
234 
235 	vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_FIFO_PROGRESS,
236 			       &dev_priv->fifo_queue_waiters);
237 
238 	if (interruptible)
239 		ret = wait_event_interruptible_timeout
240 		    (dev_priv->fifo_queue,
241 		     !vmw_fifo_is_full(dev_priv, bytes), timeout);
242 	else
243 		ret = wait_event_timeout
244 		    (dev_priv->fifo_queue,
245 		     !vmw_fifo_is_full(dev_priv, bytes), timeout);
246 
247 	if (unlikely(ret == 0))
248 		ret = -EBUSY;
249 	else if (likely(ret > 0))
250 		ret = 0;
251 
252 	vmw_generic_waiter_remove(dev_priv, SVGA_IRQFLAG_FIFO_PROGRESS,
253 				  &dev_priv->fifo_queue_waiters);
254 
255 	return ret;
256 }
257 
258 /*
259  * Reserve @bytes number of bytes in the fifo.
260  *
261  * This function will return NULL (error) on two conditions:
262  *  If it timeouts waiting for fifo space, or if @bytes is larger than the
263  *   available fifo space.
264  *
265  * Returns:
266  *   Pointer to the fifo, or null on error (possible hardware hang).
267  */
268 static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv,
269 				    uint32_t bytes)
270 {
271 	struct vmw_fifo_state *fifo_state = dev_priv->fifo;
272 	u32  *fifo_mem = dev_priv->fifo_mem;
273 	uint32_t max;
274 	uint32_t min;
275 	uint32_t next_cmd;
276 	uint32_t reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
277 	int ret;
278 
279 	mutex_lock(&fifo_state->fifo_mutex);
280 	max = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MAX);
281 	min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN);
282 	next_cmd = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_NEXT_CMD);
283 
284 	if (unlikely(bytes >= (max - min)))
285 		goto out_err;
286 
287 	BUG_ON(fifo_state->reserved_size != 0);
288 	BUG_ON(fifo_state->dynamic_buffer != NULL);
289 
290 	fifo_state->reserved_size = bytes;
291 
292 	while (1) {
293 		uint32_t stop = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_STOP);
294 		bool need_bounce = false;
295 		bool reserve_in_place = false;
296 
297 		if (next_cmd >= stop) {
298 			if (likely((next_cmd + bytes < max ||
299 				    (next_cmd + bytes == max && stop > min))))
300 				reserve_in_place = true;
301 
302 			else if (vmw_fifo_is_full(dev_priv, bytes)) {
303 				ret = vmw_fifo_wait(dev_priv, bytes,
304 						    false, 3 * HZ);
305 				if (unlikely(ret != 0))
306 					goto out_err;
307 			} else
308 				need_bounce = true;
309 
310 		} else {
311 
312 			if (likely((next_cmd + bytes < stop)))
313 				reserve_in_place = true;
314 			else {
315 				ret = vmw_fifo_wait(dev_priv, bytes,
316 						    false, 3 * HZ);
317 				if (unlikely(ret != 0))
318 					goto out_err;
319 			}
320 		}
321 
322 		if (reserve_in_place) {
323 			if (reserveable || bytes <= sizeof(uint32_t)) {
324 				fifo_state->using_bounce_buffer = false;
325 
326 				if (reserveable)
327 					vmw_fifo_mem_write(dev_priv,
328 							   SVGA_FIFO_RESERVED,
329 							   bytes);
330 				return (void __force *) (fifo_mem +
331 							 (next_cmd >> 2));
332 			} else {
333 				need_bounce = true;
334 			}
335 		}
336 
337 		if (need_bounce) {
338 			fifo_state->using_bounce_buffer = true;
339 			if (bytes < fifo_state->static_buffer_size)
340 				return fifo_state->static_buffer;
341 			else {
342 				fifo_state->dynamic_buffer = vmalloc(bytes);
343 				if (!fifo_state->dynamic_buffer)
344 					goto out_err;
345 				return fifo_state->dynamic_buffer;
346 			}
347 		}
348 	}
349 out_err:
350 	fifo_state->reserved_size = 0;
351 	mutex_unlock(&fifo_state->fifo_mutex);
352 
353 	return NULL;
354 }
355 
356 void *vmw_cmd_ctx_reserve(struct vmw_private *dev_priv, uint32_t bytes,
357 			  int ctx_id)
358 {
359 	void *ret;
360 
361 	if (dev_priv->cman)
362 		ret = vmw_cmdbuf_reserve(dev_priv->cman, bytes,
363 					 ctx_id, false, NULL);
364 	else if (ctx_id == SVGA3D_INVALID_ID)
365 		ret = vmw_local_fifo_reserve(dev_priv, bytes);
366 	else {
367 		WARN(1, "Command buffer has not been allocated.\n");
368 		ret = NULL;
369 	}
370 	if (IS_ERR_OR_NULL(ret))
371 		return NULL;
372 
373 	return ret;
374 }
375 
376 static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state,
377 			      struct vmw_private *vmw,
378 			      uint32_t next_cmd,
379 			      uint32_t max, uint32_t min, uint32_t bytes)
380 {
381 	u32 *fifo_mem = vmw->fifo_mem;
382 	uint32_t chunk_size = max - next_cmd;
383 	uint32_t rest;
384 	uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ?
385 	    fifo_state->dynamic_buffer : fifo_state->static_buffer;
386 
387 	if (bytes < chunk_size)
388 		chunk_size = bytes;
389 
390 	vmw_fifo_mem_write(vmw, SVGA_FIFO_RESERVED, bytes);
391 	mb();
392 	memcpy(fifo_mem + (next_cmd >> 2), buffer, chunk_size);
393 	rest = bytes - chunk_size;
394 	if (rest)
395 		memcpy(fifo_mem + (min >> 2), buffer + (chunk_size >> 2), rest);
396 }
397 
398 static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state,
399 			       struct vmw_private *vmw,
400 			       uint32_t next_cmd,
401 			       uint32_t max, uint32_t min, uint32_t bytes)
402 {
403 	uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ?
404 	    fifo_state->dynamic_buffer : fifo_state->static_buffer;
405 
406 	while (bytes > 0) {
407 		vmw_fifo_mem_write(vmw, (next_cmd >> 2), *buffer++);
408 		next_cmd += sizeof(uint32_t);
409 		if (unlikely(next_cmd == max))
410 			next_cmd = min;
411 		mb();
412 		vmw_fifo_mem_write(vmw, SVGA_FIFO_NEXT_CMD, next_cmd);
413 		mb();
414 		bytes -= sizeof(uint32_t);
415 	}
416 }
417 
418 static void vmw_local_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
419 {
420 	struct vmw_fifo_state *fifo_state = dev_priv->fifo;
421 	uint32_t next_cmd = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_NEXT_CMD);
422 	uint32_t max = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MAX);
423 	uint32_t min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN);
424 	bool reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
425 
426 	BUG_ON((bytes & 3) != 0);
427 	BUG_ON(bytes > fifo_state->reserved_size);
428 
429 	fifo_state->reserved_size = 0;
430 
431 	if (fifo_state->using_bounce_buffer) {
432 		if (reserveable)
433 			vmw_fifo_res_copy(fifo_state, dev_priv,
434 					  next_cmd, max, min, bytes);
435 		else
436 			vmw_fifo_slow_copy(fifo_state, dev_priv,
437 					   next_cmd, max, min, bytes);
438 
439 		if (fifo_state->dynamic_buffer) {
440 			vfree(fifo_state->dynamic_buffer);
441 			fifo_state->dynamic_buffer = NULL;
442 		}
443 
444 	}
445 
446 	down_write(&fifo_state->rwsem);
447 	if (fifo_state->using_bounce_buffer || reserveable) {
448 		next_cmd += bytes;
449 		if (next_cmd >= max)
450 			next_cmd -= max - min;
451 		mb();
452 		vmw_fifo_mem_write(dev_priv, SVGA_FIFO_NEXT_CMD, next_cmd);
453 	}
454 
455 	if (reserveable)
456 		vmw_fifo_mem_write(dev_priv, SVGA_FIFO_RESERVED, 0);
457 	mb();
458 	up_write(&fifo_state->rwsem);
459 	vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
460 	mutex_unlock(&fifo_state->fifo_mutex);
461 }
462 
463 void vmw_cmd_commit(struct vmw_private *dev_priv, uint32_t bytes)
464 {
465 	if (dev_priv->cman)
466 		vmw_cmdbuf_commit(dev_priv->cman, bytes, NULL, false);
467 	else
468 		vmw_local_fifo_commit(dev_priv, bytes);
469 }
470 
471 
472 /**
473  * vmw_cmd_commit_flush - Commit fifo space and flush any buffered commands.
474  *
475  * @dev_priv: Pointer to device private structure.
476  * @bytes: Number of bytes to commit.
477  */
478 void vmw_cmd_commit_flush(struct vmw_private *dev_priv, uint32_t bytes)
479 {
480 	if (dev_priv->cman)
481 		vmw_cmdbuf_commit(dev_priv->cman, bytes, NULL, true);
482 	else
483 		vmw_local_fifo_commit(dev_priv, bytes);
484 }
485 
486 /**
487  * vmw_cmd_flush - Flush any buffered commands and make sure command processing
488  * starts.
489  *
490  * @dev_priv: Pointer to device private structure.
491  * @interruptible: Whether to wait interruptible if function needs to sleep.
492  */
493 int vmw_cmd_flush(struct vmw_private *dev_priv, bool interruptible)
494 {
495 	might_sleep();
496 
497 	if (dev_priv->cman)
498 		return vmw_cmdbuf_cur_flush(dev_priv->cman, interruptible);
499 	else
500 		return 0;
501 }
502 
503 int vmw_cmd_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
504 {
505 	struct svga_fifo_cmd_fence *cmd_fence;
506 	u32 *fm;
507 	int ret = 0;
508 	uint32_t bytes = sizeof(u32) + sizeof(*cmd_fence);
509 
510 	fm = VMW_CMD_RESERVE(dev_priv, bytes);
511 	if (unlikely(fm == NULL)) {
512 		*seqno = atomic_read(&dev_priv->marker_seq);
513 		ret = -ENOMEM;
514 		(void)vmw_fallback_wait(dev_priv, false, true, *seqno,
515 					false, 3*HZ);
516 		goto out_err;
517 	}
518 
519 	do {
520 		*seqno = atomic_add_return(1, &dev_priv->marker_seq);
521 	} while (*seqno == 0);
522 
523 	if (!(vmw_fifo_caps(dev_priv) & SVGA_FIFO_CAP_FENCE)) {
524 
525 		/*
526 		 * Don't request hardware to send a fence. The
527 		 * waiting code in vmwgfx_irq.c will emulate this.
528 		 */
529 
530 		vmw_cmd_commit(dev_priv, 0);
531 		return 0;
532 	}
533 
534 	*fm++ = SVGA_CMD_FENCE;
535 	cmd_fence = (struct svga_fifo_cmd_fence *) fm;
536 	cmd_fence->fence = *seqno;
537 	vmw_cmd_commit_flush(dev_priv, bytes);
538 	vmw_update_seqno(dev_priv);
539 
540 out_err:
541 	return ret;
542 }
543 
544 /**
545  * vmw_cmd_emit_dummy_legacy_query - emits a dummy query to the fifo using
546  * legacy query commands.
547  *
548  * @dev_priv: The device private structure.
549  * @cid: The hardware context id used for the query.
550  *
551  * See the vmw_cmd_emit_dummy_query documentation.
552  */
553 static int vmw_cmd_emit_dummy_legacy_query(struct vmw_private *dev_priv,
554 					    uint32_t cid)
555 {
556 	/*
557 	 * A query wait without a preceding query end will
558 	 * actually finish all queries for this cid
559 	 * without writing to the query result structure.
560 	 */
561 
562 	struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->base;
563 	struct {
564 		SVGA3dCmdHeader header;
565 		SVGA3dCmdWaitForQuery body;
566 	} *cmd;
567 
568 	cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
569 	if (unlikely(cmd == NULL))
570 		return -ENOMEM;
571 
572 	cmd->header.id = SVGA_3D_CMD_WAIT_FOR_QUERY;
573 	cmd->header.size = sizeof(cmd->body);
574 	cmd->body.cid = cid;
575 	cmd->body.type = SVGA3D_QUERYTYPE_OCCLUSION;
576 
577 	if (bo->resource->mem_type == TTM_PL_VRAM) {
578 		cmd->body.guestResult.gmrId = SVGA_GMR_FRAMEBUFFER;
579 		cmd->body.guestResult.offset = bo->resource->start << PAGE_SHIFT;
580 	} else {
581 		cmd->body.guestResult.gmrId = bo->resource->start;
582 		cmd->body.guestResult.offset = 0;
583 	}
584 
585 	vmw_cmd_commit(dev_priv, sizeof(*cmd));
586 
587 	return 0;
588 }
589 
590 /**
591  * vmw_cmd_emit_dummy_gb_query - emits a dummy query to the fifo using
592  * guest-backed resource query commands.
593  *
594  * @dev_priv: The device private structure.
595  * @cid: The hardware context id used for the query.
596  *
597  * See the vmw_cmd_emit_dummy_query documentation.
598  */
599 static int vmw_cmd_emit_dummy_gb_query(struct vmw_private *dev_priv,
600 				       uint32_t cid)
601 {
602 	/*
603 	 * A query wait without a preceding query end will
604 	 * actually finish all queries for this cid
605 	 * without writing to the query result structure.
606 	 */
607 
608 	struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->base;
609 	struct {
610 		SVGA3dCmdHeader header;
611 		SVGA3dCmdWaitForGBQuery body;
612 	} *cmd;
613 
614 	cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
615 	if (unlikely(cmd == NULL))
616 		return -ENOMEM;
617 
618 	cmd->header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
619 	cmd->header.size = sizeof(cmd->body);
620 	cmd->body.cid = cid;
621 	cmd->body.type = SVGA3D_QUERYTYPE_OCCLUSION;
622 	BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
623 	cmd->body.mobid = bo->resource->start;
624 	cmd->body.offset = 0;
625 
626 	vmw_cmd_commit(dev_priv, sizeof(*cmd));
627 
628 	return 0;
629 }
630 
631 
632 /**
633  * vmw_cmd_emit_dummy_query - emits a dummy query to the fifo using
634  * appropriate resource query commands.
635  *
636  * @dev_priv: The device private structure.
637  * @cid: The hardware context id used for the query.
638  *
639  * This function is used to emit a dummy occlusion query with
640  * no primitives rendered between query begin and query end.
641  * It's used to provide a query barrier, in order to know that when
642  * this query is finished, all preceding queries are also finished.
643  *
644  * A Query results structure should have been initialized at the start
645  * of the dev_priv->dummy_query_bo buffer object. And that buffer object
646  * must also be either reserved or pinned when this function is called.
647  *
648  * Returns -ENOMEM on failure to reserve fifo space.
649  */
650 int vmw_cmd_emit_dummy_query(struct vmw_private *dev_priv,
651 			      uint32_t cid)
652 {
653 	if (dev_priv->has_mob)
654 		return vmw_cmd_emit_dummy_gb_query(dev_priv, cid);
655 
656 	return vmw_cmd_emit_dummy_legacy_query(dev_priv, cid);
657 }
658 
659 
660 /**
661  * vmw_cmd_supported - returns true if the given device supports
662  * command queues.
663  *
664  * @vmw: The device private structure.
665  *
666  * Returns true if we can issue commands.
667  */
668 bool vmw_cmd_supported(struct vmw_private *vmw)
669 {
670 	if ((vmw->capabilities & (SVGA_CAP_COMMAND_BUFFERS |
671 				  SVGA_CAP_CMD_BUFFERS_2)) != 0)
672 		return true;
673 	/*
674 	 * We have FIFO cmd's
675 	 */
676 	return vmw->fifo_mem != NULL;
677 }
678