1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3  *
4  * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 #include "vmwgfx_kms.h"
28 
29 #include "vmwgfx_bo.h"
30 #include "vmw_surface_cache.h"
31 
32 #include <drm/drm_atomic.h>
33 #include <drm/drm_atomic_helper.h>
34 #include <drm/drm_damage_helper.h>
35 #include <drm/drm_fourcc.h>
36 #include <drm/drm_rect.h>
37 #include <drm/drm_sysfs.h>
38 
39 void vmw_du_cleanup(struct vmw_display_unit *du)
40 {
41 	struct vmw_private *dev_priv = vmw_priv(du->primary.dev);
42 	drm_plane_cleanup(&du->primary);
43 	if (vmw_cmd_supported(dev_priv))
44 		drm_plane_cleanup(&du->cursor.base);
45 
46 	drm_connector_unregister(&du->connector);
47 	drm_crtc_cleanup(&du->crtc);
48 	drm_encoder_cleanup(&du->encoder);
49 	drm_connector_cleanup(&du->connector);
50 }
51 
52 /*
53  * Display Unit Cursor functions
54  */
55 
56 static int vmw_du_cursor_plane_unmap_cm(struct vmw_plane_state *vps);
57 static void vmw_cursor_update_mob(struct vmw_private *dev_priv,
58 				  struct vmw_plane_state *vps,
59 				  u32 *image, u32 width, u32 height,
60 				  u32 hotspotX, u32 hotspotY);
61 
62 struct vmw_svga_fifo_cmd_define_cursor {
63 	u32 cmd;
64 	SVGAFifoCmdDefineAlphaCursor cursor;
65 };
66 
67 /**
68  * vmw_send_define_cursor_cmd - queue a define cursor command
69  * @dev_priv: the private driver struct
70  * @image: buffer which holds the cursor image
71  * @width: width of the mouse cursor image
72  * @height: height of the mouse cursor image
73  * @hotspotX: the horizontal position of mouse hotspot
74  * @hotspotY: the vertical position of mouse hotspot
75  */
76 static void vmw_send_define_cursor_cmd(struct vmw_private *dev_priv,
77 				       u32 *image, u32 width, u32 height,
78 				       u32 hotspotX, u32 hotspotY)
79 {
80 	struct vmw_svga_fifo_cmd_define_cursor *cmd;
81 	const u32 image_size = width * height * sizeof(*image);
82 	const u32 cmd_size = sizeof(*cmd) + image_size;
83 
84 	/* Try to reserve fifocmd space and swallow any failures;
85 	   such reservations cannot be left unconsumed for long
86 	   under the risk of clogging other fifocmd users, so
87 	   we treat reservations separtely from the way we treat
88 	   other fallible KMS-atomic resources at prepare_fb */
89 	cmd = VMW_CMD_RESERVE(dev_priv, cmd_size);
90 
91 	if (unlikely(!cmd))
92 		return;
93 
94 	memset(cmd, 0, sizeof(*cmd));
95 
96 	memcpy(&cmd[1], image, image_size);
97 
98 	cmd->cmd = SVGA_CMD_DEFINE_ALPHA_CURSOR;
99 	cmd->cursor.id = 0;
100 	cmd->cursor.width = width;
101 	cmd->cursor.height = height;
102 	cmd->cursor.hotspotX = hotspotX;
103 	cmd->cursor.hotspotY = hotspotY;
104 
105 	vmw_cmd_commit_flush(dev_priv, cmd_size);
106 }
107 
108 /**
109  * vmw_cursor_update_image - update the cursor image on the provided plane
110  * @dev_priv: the private driver struct
111  * @vps: the plane state of the cursor plane
112  * @image: buffer which holds the cursor image
113  * @width: width of the mouse cursor image
114  * @height: height of the mouse cursor image
115  * @hotspotX: the horizontal position of mouse hotspot
116  * @hotspotY: the vertical position of mouse hotspot
117  */
118 static void vmw_cursor_update_image(struct vmw_private *dev_priv,
119 				    struct vmw_plane_state *vps,
120 				    u32 *image, u32 width, u32 height,
121 				    u32 hotspotX, u32 hotspotY)
122 {
123 	if (vps->cursor.bo)
124 		vmw_cursor_update_mob(dev_priv, vps, image,
125 				      vps->base.crtc_w, vps->base.crtc_h,
126 				      hotspotX, hotspotY);
127 
128 	else
129 		vmw_send_define_cursor_cmd(dev_priv, image, width, height,
130 					   hotspotX, hotspotY);
131 }
132 
133 
134 /**
135  * vmw_cursor_update_mob - Update cursor vis CursorMob mechanism
136  *
137  * Called from inside vmw_du_cursor_plane_atomic_update to actually
138  * make the cursor-image live.
139  *
140  * @dev_priv: device to work with
141  * @vps: the plane state of the cursor plane
142  * @image: cursor source data to fill the MOB with
143  * @width: source data width
144  * @height: source data height
145  * @hotspotX: cursor hotspot x
146  * @hotspotY: cursor hotspot Y
147  */
148 static void vmw_cursor_update_mob(struct vmw_private *dev_priv,
149 				  struct vmw_plane_state *vps,
150 				  u32 *image, u32 width, u32 height,
151 				  u32 hotspotX, u32 hotspotY)
152 {
153 	SVGAGBCursorHeader *header;
154 	SVGAGBAlphaCursorHeader *alpha_header;
155 	const u32 image_size = width * height * sizeof(*image);
156 
157 	header = vmw_bo_map_and_cache(vps->cursor.bo);
158 	alpha_header = &header->header.alphaHeader;
159 
160 	memset(header, 0, sizeof(*header));
161 
162 	header->type = SVGA_ALPHA_CURSOR;
163 	header->sizeInBytes = image_size;
164 
165 	alpha_header->hotspotX = hotspotX;
166 	alpha_header->hotspotY = hotspotY;
167 	alpha_header->width = width;
168 	alpha_header->height = height;
169 
170 	memcpy(header + 1, image, image_size);
171 	vmw_write(dev_priv, SVGA_REG_CURSOR_MOBID,
172 		  vps->cursor.bo->tbo.resource->start);
173 }
174 
175 
176 static u32 vmw_du_cursor_mob_size(u32 w, u32 h)
177 {
178 	return w * h * sizeof(u32) + sizeof(SVGAGBCursorHeader);
179 }
180 
181 /**
182  * vmw_du_cursor_plane_acquire_image -- Acquire the image data
183  * @vps: cursor plane state
184  */
185 static u32 *vmw_du_cursor_plane_acquire_image(struct vmw_plane_state *vps)
186 {
187 	if (vps->surf) {
188 		if (vps->surf_mapped)
189 			return vmw_bo_map_and_cache(vps->surf->res.guest_memory_bo);
190 		return vps->surf->snooper.image;
191 	} else if (vps->bo)
192 		return vmw_bo_map_and_cache(vps->bo);
193 	return NULL;
194 }
195 
196 static bool vmw_du_cursor_plane_has_changed(struct vmw_plane_state *old_vps,
197 					    struct vmw_plane_state *new_vps)
198 {
199 	void *old_image;
200 	void *new_image;
201 	u32 size;
202 	bool changed;
203 
204 	if (old_vps->base.crtc_w != new_vps->base.crtc_w ||
205 	    old_vps->base.crtc_h != new_vps->base.crtc_h)
206 	    return true;
207 
208 	if (old_vps->cursor.hotspot_x != new_vps->cursor.hotspot_x ||
209 	    old_vps->cursor.hotspot_y != new_vps->cursor.hotspot_y)
210 	    return true;
211 
212 	size = new_vps->base.crtc_w * new_vps->base.crtc_h * sizeof(u32);
213 
214 	old_image = vmw_du_cursor_plane_acquire_image(old_vps);
215 	new_image = vmw_du_cursor_plane_acquire_image(new_vps);
216 
217 	changed = false;
218 	if (old_image && new_image)
219 		changed = memcmp(old_image, new_image, size) != 0;
220 
221 	return changed;
222 }
223 
224 static void vmw_du_destroy_cursor_mob(struct vmw_bo **vbo)
225 {
226 	if (!(*vbo))
227 		return;
228 
229 	ttm_bo_unpin(&(*vbo)->tbo);
230 	vmw_bo_unreference(vbo);
231 }
232 
233 static void vmw_du_put_cursor_mob(struct vmw_cursor_plane *vcp,
234 				  struct vmw_plane_state *vps)
235 {
236 	u32 i;
237 
238 	if (!vps->cursor.bo)
239 		return;
240 
241 	vmw_du_cursor_plane_unmap_cm(vps);
242 
243 	/* Look for a free slot to return this mob to the cache. */
244 	for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
245 		if (!vcp->cursor_mobs[i]) {
246 			vcp->cursor_mobs[i] = vps->cursor.bo;
247 			vps->cursor.bo = NULL;
248 			return;
249 		}
250 	}
251 
252 	/* Cache is full: See if this mob is bigger than an existing mob. */
253 	for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
254 		if (vcp->cursor_mobs[i]->tbo.base.size <
255 		    vps->cursor.bo->tbo.base.size) {
256 			vmw_du_destroy_cursor_mob(&vcp->cursor_mobs[i]);
257 			vcp->cursor_mobs[i] = vps->cursor.bo;
258 			vps->cursor.bo = NULL;
259 			return;
260 		}
261 	}
262 
263 	/* Destroy it if it's not worth caching. */
264 	vmw_du_destroy_cursor_mob(&vps->cursor.bo);
265 }
266 
267 static int vmw_du_get_cursor_mob(struct vmw_cursor_plane *vcp,
268 				 struct vmw_plane_state *vps)
269 {
270 	struct vmw_private *dev_priv = vcp->base.dev->dev_private;
271 	u32 size = vmw_du_cursor_mob_size(vps->base.crtc_w, vps->base.crtc_h);
272 	u32 i;
273 	u32 cursor_max_dim, mob_max_size;
274 	struct vmw_fence_obj *fence = NULL;
275 	int ret;
276 
277 	if (!dev_priv->has_mob ||
278 	    (dev_priv->capabilities2 & SVGA_CAP2_CURSOR_MOB) == 0)
279 		return -EINVAL;
280 
281 	mob_max_size = vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
282 	cursor_max_dim = vmw_read(dev_priv, SVGA_REG_CURSOR_MAX_DIMENSION);
283 
284 	if (size > mob_max_size || vps->base.crtc_w > cursor_max_dim ||
285 	    vps->base.crtc_h > cursor_max_dim)
286 		return -EINVAL;
287 
288 	if (vps->cursor.bo) {
289 		if (vps->cursor.bo->tbo.base.size >= size)
290 			return 0;
291 		vmw_du_put_cursor_mob(vcp, vps);
292 	}
293 
294 	/* Look for an unused mob in the cache. */
295 	for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
296 		if (vcp->cursor_mobs[i] &&
297 		    vcp->cursor_mobs[i]->tbo.base.size >= size) {
298 			vps->cursor.bo = vcp->cursor_mobs[i];
299 			vcp->cursor_mobs[i] = NULL;
300 			return 0;
301 		}
302 	}
303 	/* Create a new mob if we can't find an existing one. */
304 	ret = vmw_bo_create_and_populate(dev_priv, size,
305 					 VMW_BO_DOMAIN_MOB,
306 					 &vps->cursor.bo);
307 
308 	if (ret != 0)
309 		return ret;
310 
311 	/* Fence the mob creation so we are guarateed to have the mob */
312 	ret = ttm_bo_reserve(&vps->cursor.bo->tbo, false, false, NULL);
313 	if (ret != 0)
314 		goto teardown;
315 
316 	ret = vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
317 	if (ret != 0) {
318 		ttm_bo_unreserve(&vps->cursor.bo->tbo);
319 		goto teardown;
320 	}
321 
322 	dma_fence_wait(&fence->base, false);
323 	dma_fence_put(&fence->base);
324 
325 	ttm_bo_unreserve(&vps->cursor.bo->tbo);
326 	return 0;
327 
328 teardown:
329 	vmw_du_destroy_cursor_mob(&vps->cursor.bo);
330 	return ret;
331 }
332 
333 
334 static void vmw_cursor_update_position(struct vmw_private *dev_priv,
335 				       bool show, int x, int y)
336 {
337 	const uint32_t svga_cursor_on = show ? SVGA_CURSOR_ON_SHOW
338 					     : SVGA_CURSOR_ON_HIDE;
339 	uint32_t count;
340 
341 	spin_lock(&dev_priv->cursor_lock);
342 	if (dev_priv->capabilities2 & SVGA_CAP2_EXTRA_REGS) {
343 		vmw_write(dev_priv, SVGA_REG_CURSOR4_X, x);
344 		vmw_write(dev_priv, SVGA_REG_CURSOR4_Y, y);
345 		vmw_write(dev_priv, SVGA_REG_CURSOR4_SCREEN_ID, SVGA3D_INVALID_ID);
346 		vmw_write(dev_priv, SVGA_REG_CURSOR4_ON, svga_cursor_on);
347 		vmw_write(dev_priv, SVGA_REG_CURSOR4_SUBMIT, 1);
348 	} else if (vmw_is_cursor_bypass3_enabled(dev_priv)) {
349 		vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_ON, svga_cursor_on);
350 		vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_X, x);
351 		vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_Y, y);
352 		count = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CURSOR_COUNT);
353 		vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_COUNT, ++count);
354 	} else {
355 		vmw_write(dev_priv, SVGA_REG_CURSOR_X, x);
356 		vmw_write(dev_priv, SVGA_REG_CURSOR_Y, y);
357 		vmw_write(dev_priv, SVGA_REG_CURSOR_ON, svga_cursor_on);
358 	}
359 	spin_unlock(&dev_priv->cursor_lock);
360 }
361 
362 void vmw_kms_cursor_snoop(struct vmw_surface *srf,
363 			  struct ttm_object_file *tfile,
364 			  struct ttm_buffer_object *bo,
365 			  SVGA3dCmdHeader *header)
366 {
367 	struct ttm_bo_kmap_obj map;
368 	unsigned long kmap_offset;
369 	unsigned long kmap_num;
370 	SVGA3dCopyBox *box;
371 	unsigned box_count;
372 	void *virtual;
373 	bool is_iomem;
374 	struct vmw_dma_cmd {
375 		SVGA3dCmdHeader header;
376 		SVGA3dCmdSurfaceDMA dma;
377 	} *cmd;
378 	int i, ret;
379 	const struct SVGA3dSurfaceDesc *desc =
380 		vmw_surface_get_desc(VMW_CURSOR_SNOOP_FORMAT);
381 	const u32 image_pitch = VMW_CURSOR_SNOOP_WIDTH * desc->pitchBytesPerBlock;
382 
383 	cmd = container_of(header, struct vmw_dma_cmd, header);
384 
385 	/* No snooper installed, nothing to copy */
386 	if (!srf->snooper.image)
387 		return;
388 
389 	if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) {
390 		DRM_ERROR("face and mipmap for cursors should never != 0\n");
391 		return;
392 	}
393 
394 	if (cmd->header.size < 64) {
395 		DRM_ERROR("at least one full copy box must be given\n");
396 		return;
397 	}
398 
399 	box = (SVGA3dCopyBox *)&cmd[1];
400 	box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) /
401 			sizeof(SVGA3dCopyBox);
402 
403 	if (cmd->dma.guest.ptr.offset % PAGE_SIZE ||
404 	    box->x != 0    || box->y != 0    || box->z != 0    ||
405 	    box->srcx != 0 || box->srcy != 0 || box->srcz != 0 ||
406 	    box->d != 1    || box_count != 1 ||
407 	    box->w > VMW_CURSOR_SNOOP_WIDTH || box->h > VMW_CURSOR_SNOOP_HEIGHT) {
408 		/* TODO handle none page aligned offsets */
409 		/* TODO handle more dst & src != 0 */
410 		/* TODO handle more then one copy */
411 		DRM_ERROR("Can't snoop dma request for cursor!\n");
412 		DRM_ERROR("(%u, %u, %u) (%u, %u, %u) (%ux%ux%u) %u %u\n",
413 			  box->srcx, box->srcy, box->srcz,
414 			  box->x, box->y, box->z,
415 			  box->w, box->h, box->d, box_count,
416 			  cmd->dma.guest.ptr.offset);
417 		return;
418 	}
419 
420 	kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT;
421 	kmap_num = (VMW_CURSOR_SNOOP_HEIGHT*image_pitch) >> PAGE_SHIFT;
422 
423 	ret = ttm_bo_reserve(bo, true, false, NULL);
424 	if (unlikely(ret != 0)) {
425 		DRM_ERROR("reserve failed\n");
426 		return;
427 	}
428 
429 	ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
430 	if (unlikely(ret != 0))
431 		goto err_unreserve;
432 
433 	virtual = ttm_kmap_obj_virtual(&map, &is_iomem);
434 
435 	if (box->w == VMW_CURSOR_SNOOP_WIDTH && cmd->dma.guest.pitch == image_pitch) {
436 		memcpy(srf->snooper.image, virtual,
437 		       VMW_CURSOR_SNOOP_HEIGHT*image_pitch);
438 	} else {
439 		/* Image is unsigned pointer. */
440 		for (i = 0; i < box->h; i++)
441 			memcpy(srf->snooper.image + i * image_pitch,
442 			       virtual + i * cmd->dma.guest.pitch,
443 			       box->w * desc->pitchBytesPerBlock);
444 	}
445 
446 	srf->snooper.age++;
447 
448 	ttm_bo_kunmap(&map);
449 err_unreserve:
450 	ttm_bo_unreserve(bo);
451 }
452 
453 /**
454  * vmw_kms_legacy_hotspot_clear - Clear legacy hotspots
455  *
456  * @dev_priv: Pointer to the device private struct.
457  *
458  * Clears all legacy hotspots.
459  */
460 void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv)
461 {
462 	struct drm_device *dev = &dev_priv->drm;
463 	struct vmw_display_unit *du;
464 	struct drm_crtc *crtc;
465 
466 	drm_modeset_lock_all(dev);
467 	drm_for_each_crtc(crtc, dev) {
468 		du = vmw_crtc_to_du(crtc);
469 
470 		du->hotspot_x = 0;
471 		du->hotspot_y = 0;
472 	}
473 	drm_modeset_unlock_all(dev);
474 }
475 
476 void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
477 {
478 	struct drm_device *dev = &dev_priv->drm;
479 	struct vmw_display_unit *du;
480 	struct drm_crtc *crtc;
481 
482 	mutex_lock(&dev->mode_config.mutex);
483 
484 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
485 		du = vmw_crtc_to_du(crtc);
486 		if (!du->cursor_surface ||
487 		    du->cursor_age == du->cursor_surface->snooper.age ||
488 		    !du->cursor_surface->snooper.image)
489 			continue;
490 
491 		du->cursor_age = du->cursor_surface->snooper.age;
492 		vmw_send_define_cursor_cmd(dev_priv,
493 					   du->cursor_surface->snooper.image,
494 					   VMW_CURSOR_SNOOP_WIDTH,
495 					   VMW_CURSOR_SNOOP_HEIGHT,
496 					   du->hotspot_x + du->core_hotspot_x,
497 					   du->hotspot_y + du->core_hotspot_y);
498 	}
499 
500 	mutex_unlock(&dev->mode_config.mutex);
501 }
502 
503 
504 void vmw_du_cursor_plane_destroy(struct drm_plane *plane)
505 {
506 	struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
507 	u32 i;
508 
509 	vmw_cursor_update_position(plane->dev->dev_private, false, 0, 0);
510 
511 	for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++)
512 		vmw_du_destroy_cursor_mob(&vcp->cursor_mobs[i]);
513 
514 	drm_plane_cleanup(plane);
515 }
516 
517 
518 void vmw_du_primary_plane_destroy(struct drm_plane *plane)
519 {
520 	drm_plane_cleanup(plane);
521 
522 	/* Planes are static in our case so we don't free it */
523 }
524 
525 
526 /**
527  * vmw_du_plane_unpin_surf - unpins resource associated with a framebuffer surface
528  *
529  * @vps: plane state associated with the display surface
530  * @unreference: true if we also want to unreference the display.
531  */
532 void vmw_du_plane_unpin_surf(struct vmw_plane_state *vps,
533 			     bool unreference)
534 {
535 	if (vps->surf) {
536 		if (vps->pinned) {
537 			vmw_resource_unpin(&vps->surf->res);
538 			vps->pinned--;
539 		}
540 
541 		if (unreference) {
542 			if (vps->pinned)
543 				DRM_ERROR("Surface still pinned\n");
544 			vmw_surface_unreference(&vps->surf);
545 		}
546 	}
547 }
548 
549 
550 /**
551  * vmw_du_plane_cleanup_fb - Unpins the plane surface
552  *
553  * @plane:  display plane
554  * @old_state: Contains the FB to clean up
555  *
556  * Unpins the framebuffer surface
557  *
558  * Returns 0 on success
559  */
560 void
561 vmw_du_plane_cleanup_fb(struct drm_plane *plane,
562 			struct drm_plane_state *old_state)
563 {
564 	struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
565 
566 	vmw_du_plane_unpin_surf(vps, false);
567 }
568 
569 
570 /**
571  * vmw_du_cursor_plane_map_cm - Maps the cursor mobs.
572  *
573  * @vps: plane_state
574  *
575  * Returns 0 on success
576  */
577 
578 static int
579 vmw_du_cursor_plane_map_cm(struct vmw_plane_state *vps)
580 {
581 	int ret;
582 	u32 size = vmw_du_cursor_mob_size(vps->base.crtc_w, vps->base.crtc_h);
583 	struct ttm_buffer_object *bo;
584 
585 	if (!vps->cursor.bo)
586 		return -EINVAL;
587 
588 	bo = &vps->cursor.bo->tbo;
589 
590 	if (bo->base.size < size)
591 		return -EINVAL;
592 
593 	if (vps->cursor.bo->map.virtual)
594 		return 0;
595 
596 	ret = ttm_bo_reserve(bo, false, false, NULL);
597 	if (unlikely(ret != 0))
598 		return -ENOMEM;
599 
600 	vmw_bo_map_and_cache(vps->cursor.bo);
601 
602 	ttm_bo_unreserve(bo);
603 
604 	if (unlikely(ret != 0))
605 		return -ENOMEM;
606 
607 	return 0;
608 }
609 
610 
611 /**
612  * vmw_du_cursor_plane_unmap_cm - Unmaps the cursor mobs.
613  *
614  * @vps: state of the cursor plane
615  *
616  * Returns 0 on success
617  */
618 
619 static int
620 vmw_du_cursor_plane_unmap_cm(struct vmw_plane_state *vps)
621 {
622 	int ret = 0;
623 	struct vmw_bo *vbo = vps->cursor.bo;
624 
625 	if (!vbo || !vbo->map.virtual)
626 		return 0;
627 
628 	ret = ttm_bo_reserve(&vbo->tbo, true, false, NULL);
629 	if (likely(ret == 0)) {
630 		vmw_bo_unmap(vbo);
631 		ttm_bo_unreserve(&vbo->tbo);
632 	}
633 
634 	return ret;
635 }
636 
637 
638 /**
639  * vmw_du_cursor_plane_cleanup_fb - Unpins the plane surface
640  *
641  * @plane: cursor plane
642  * @old_state: contains the state to clean up
643  *
644  * Unmaps all cursor bo mappings and unpins the cursor surface
645  *
646  * Returns 0 on success
647  */
648 void
649 vmw_du_cursor_plane_cleanup_fb(struct drm_plane *plane,
650 			       struct drm_plane_state *old_state)
651 {
652 	struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
653 	struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
654 
655 	if (vps->surf_mapped) {
656 		vmw_bo_unmap(vps->surf->res.guest_memory_bo);
657 		vps->surf_mapped = false;
658 	}
659 
660 	vmw_du_cursor_plane_unmap_cm(vps);
661 	vmw_du_put_cursor_mob(vcp, vps);
662 
663 	vmw_du_plane_unpin_surf(vps, false);
664 
665 	if (vps->surf) {
666 		vmw_surface_unreference(&vps->surf);
667 		vps->surf = NULL;
668 	}
669 
670 	if (vps->bo) {
671 		vmw_bo_unreference(&vps->bo);
672 		vps->bo = NULL;
673 	}
674 }
675 
676 
677 /**
678  * vmw_du_cursor_plane_prepare_fb - Readies the cursor by referencing it
679  *
680  * @plane:  display plane
681  * @new_state: info on the new plane state, including the FB
682  *
683  * Returns 0 on success
684  */
685 int
686 vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
687 			       struct drm_plane_state *new_state)
688 {
689 	struct drm_framebuffer *fb = new_state->fb;
690 	struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
691 	struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
692 	int ret = 0;
693 
694 	if (vps->surf) {
695 		if (vps->surf_mapped) {
696 			vmw_bo_unmap(vps->surf->res.guest_memory_bo);
697 			vps->surf_mapped = false;
698 		}
699 		vmw_surface_unreference(&vps->surf);
700 		vps->surf = NULL;
701 	}
702 
703 	if (vps->bo) {
704 		vmw_bo_unreference(&vps->bo);
705 		vps->bo = NULL;
706 	}
707 
708 	if (fb) {
709 		if (vmw_framebuffer_to_vfb(fb)->bo) {
710 			vps->bo = vmw_framebuffer_to_vfbd(fb)->buffer;
711 			vmw_bo_reference(vps->bo);
712 		} else {
713 			vps->surf = vmw_framebuffer_to_vfbs(fb)->surface;
714 			vmw_surface_reference(vps->surf);
715 		}
716 	}
717 
718 	if (!vps->surf && vps->bo) {
719 		const u32 size = new_state->crtc_w * new_state->crtc_h * sizeof(u32);
720 
721 		/*
722 		 * Not using vmw_bo_map_and_cache() helper here as we need to
723 		 * reserve the ttm_buffer_object first which
724 		 * vmw_bo_map_and_cache() omits.
725 		 */
726 		ret = ttm_bo_reserve(&vps->bo->tbo, true, false, NULL);
727 
728 		if (unlikely(ret != 0))
729 			return -ENOMEM;
730 
731 		ret = ttm_bo_kmap(&vps->bo->tbo, 0, PFN_UP(size), &vps->bo->map);
732 
733 		ttm_bo_unreserve(&vps->bo->tbo);
734 
735 		if (unlikely(ret != 0))
736 			return -ENOMEM;
737 	} else if (vps->surf && !vps->bo && vps->surf->res.guest_memory_bo) {
738 
739 		WARN_ON(vps->surf->snooper.image);
740 		ret = ttm_bo_reserve(&vps->surf->res.guest_memory_bo->tbo, true, false,
741 				     NULL);
742 		if (unlikely(ret != 0))
743 			return -ENOMEM;
744 		vmw_bo_map_and_cache(vps->surf->res.guest_memory_bo);
745 		ttm_bo_unreserve(&vps->surf->res.guest_memory_bo->tbo);
746 		vps->surf_mapped = true;
747 	}
748 
749 	if (vps->surf || vps->bo) {
750 		vmw_du_get_cursor_mob(vcp, vps);
751 		vmw_du_cursor_plane_map_cm(vps);
752 	}
753 
754 	return 0;
755 }
756 
757 
758 void
759 vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
760 				  struct drm_atomic_state *state)
761 {
762 	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
763 									   plane);
764 	struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
765 									   plane);
766 	struct drm_crtc *crtc = new_state->crtc ?: old_state->crtc;
767 	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
768 	struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
769 	struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
770 	struct vmw_plane_state *old_vps = vmw_plane_state_to_vps(old_state);
771 	s32 hotspot_x, hotspot_y;
772 
773 	hotspot_x = du->hotspot_x;
774 	hotspot_y = du->hotspot_y;
775 
776 	if (new_state->fb) {
777 		hotspot_x += new_state->fb->hot_x;
778 		hotspot_y += new_state->fb->hot_y;
779 	}
780 
781 	du->cursor_surface = vps->surf;
782 	du->cursor_bo = vps->bo;
783 
784 	if (!vps->surf && !vps->bo) {
785 		vmw_cursor_update_position(dev_priv, false, 0, 0);
786 		return;
787 	}
788 
789 	vps->cursor.hotspot_x = hotspot_x;
790 	vps->cursor.hotspot_y = hotspot_y;
791 
792 	if (vps->surf) {
793 		du->cursor_age = du->cursor_surface->snooper.age;
794 	}
795 
796 	if (!vmw_du_cursor_plane_has_changed(old_vps, vps)) {
797 		/*
798 		 * If it hasn't changed, avoid making the device do extra
799 		 * work by keeping the old cursor active.
800 		 */
801 		struct vmw_cursor_plane_state tmp = old_vps->cursor;
802 		old_vps->cursor = vps->cursor;
803 		vps->cursor = tmp;
804 	} else {
805 		void *image = vmw_du_cursor_plane_acquire_image(vps);
806 		if (image)
807 			vmw_cursor_update_image(dev_priv, vps, image,
808 						new_state->crtc_w,
809 						new_state->crtc_h,
810 						hotspot_x, hotspot_y);
811 	}
812 
813 	du->cursor_x = new_state->crtc_x + du->set_gui_x;
814 	du->cursor_y = new_state->crtc_y + du->set_gui_y;
815 
816 	vmw_cursor_update_position(dev_priv, true,
817 				   du->cursor_x + hotspot_x,
818 				   du->cursor_y + hotspot_y);
819 
820 	du->core_hotspot_x = hotspot_x - du->hotspot_x;
821 	du->core_hotspot_y = hotspot_y - du->hotspot_y;
822 }
823 
824 
825 /**
826  * vmw_du_primary_plane_atomic_check - check if the new state is okay
827  *
828  * @plane: display plane
829  * @state: info on the new plane state, including the FB
830  *
831  * Check if the new state is settable given the current state.  Other
832  * than what the atomic helper checks, we care about crtc fitting
833  * the FB and maintaining one active framebuffer.
834  *
835  * Returns 0 on success
836  */
837 int vmw_du_primary_plane_atomic_check(struct drm_plane *plane,
838 				      struct drm_atomic_state *state)
839 {
840 	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
841 									   plane);
842 	struct drm_crtc_state *crtc_state = NULL;
843 	struct drm_framebuffer *new_fb = new_state->fb;
844 	int ret;
845 
846 	if (new_state->crtc)
847 		crtc_state = drm_atomic_get_new_crtc_state(state,
848 							   new_state->crtc);
849 
850 	ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
851 						  DRM_PLANE_NO_SCALING,
852 						  DRM_PLANE_NO_SCALING,
853 						  false, true);
854 
855 	if (!ret && new_fb) {
856 		struct drm_crtc *crtc = new_state->crtc;
857 		struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
858 
859 		vmw_connector_state_to_vcs(du->connector.state);
860 	}
861 
862 
863 	return ret;
864 }
865 
866 
867 /**
868  * vmw_du_cursor_plane_atomic_check - check if the new state is okay
869  *
870  * @plane: cursor plane
871  * @state: info on the new plane state
872  *
873  * This is a chance to fail if the new cursor state does not fit
874  * our requirements.
875  *
876  * Returns 0 on success
877  */
878 int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
879 				     struct drm_atomic_state *state)
880 {
881 	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
882 									   plane);
883 	int ret = 0;
884 	struct drm_crtc_state *crtc_state = NULL;
885 	struct vmw_surface *surface = NULL;
886 	struct drm_framebuffer *fb = new_state->fb;
887 
888 	if (new_state->crtc)
889 		crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
890 							   new_state->crtc);
891 
892 	ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
893 						  DRM_PLANE_NO_SCALING,
894 						  DRM_PLANE_NO_SCALING,
895 						  true, true);
896 	if (ret)
897 		return ret;
898 
899 	/* Turning off */
900 	if (!fb)
901 		return 0;
902 
903 	/* A lot of the code assumes this */
904 	if (new_state->crtc_w != 64 || new_state->crtc_h != 64) {
905 		DRM_ERROR("Invalid cursor dimensions (%d, %d)\n",
906 			  new_state->crtc_w, new_state->crtc_h);
907 		return -EINVAL;
908 	}
909 
910 	if (!vmw_framebuffer_to_vfb(fb)->bo) {
911 		surface = vmw_framebuffer_to_vfbs(fb)->surface;
912 
913 		WARN_ON(!surface);
914 
915 		if (!surface ||
916 		    (!surface->snooper.image && !surface->res.guest_memory_bo)) {
917 			DRM_ERROR("surface not suitable for cursor\n");
918 			return -EINVAL;
919 		}
920 	}
921 
922 	return 0;
923 }
924 
925 
926 int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
927 			     struct drm_atomic_state *state)
928 {
929 	struct vmw_private *vmw = vmw_priv(crtc->dev);
930 	struct drm_crtc_state *new_state = drm_atomic_get_new_crtc_state(state,
931 									 crtc);
932 	struct vmw_display_unit *du = vmw_crtc_to_du(new_state->crtc);
933 	int connector_mask = drm_connector_mask(&du->connector);
934 	bool has_primary = new_state->plane_mask &
935 			   drm_plane_mask(crtc->primary);
936 
937 	/*
938 	 * This is fine in general, but broken userspace might expect
939 	 * some actual rendering so give a clue as why it's blank.
940 	 */
941 	if (new_state->enable && !has_primary)
942 		drm_dbg_driver(&vmw->drm,
943 			       "CRTC without a primary plane will be blank.\n");
944 
945 
946 	if (new_state->connector_mask != connector_mask &&
947 	    new_state->connector_mask != 0) {
948 		DRM_ERROR("Invalid connectors configuration\n");
949 		return -EINVAL;
950 	}
951 
952 	/*
953 	 * Our virtual device does not have a dot clock, so use the logical
954 	 * clock value as the dot clock.
955 	 */
956 	if (new_state->mode.crtc_clock == 0)
957 		new_state->adjusted_mode.crtc_clock = new_state->mode.clock;
958 
959 	return 0;
960 }
961 
962 
963 void vmw_du_crtc_atomic_begin(struct drm_crtc *crtc,
964 			      struct drm_atomic_state *state)
965 {
966 }
967 
968 
969 void vmw_du_crtc_atomic_flush(struct drm_crtc *crtc,
970 			      struct drm_atomic_state *state)
971 {
972 }
973 
974 
975 /**
976  * vmw_du_crtc_duplicate_state - duplicate crtc state
977  * @crtc: DRM crtc
978  *
979  * Allocates and returns a copy of the crtc state (both common and
980  * vmw-specific) for the specified crtc.
981  *
982  * Returns: The newly allocated crtc state, or NULL on failure.
983  */
984 struct drm_crtc_state *
985 vmw_du_crtc_duplicate_state(struct drm_crtc *crtc)
986 {
987 	struct drm_crtc_state *state;
988 	struct vmw_crtc_state *vcs;
989 
990 	if (WARN_ON(!crtc->state))
991 		return NULL;
992 
993 	vcs = kmemdup(crtc->state, sizeof(*vcs), GFP_KERNEL);
994 
995 	if (!vcs)
996 		return NULL;
997 
998 	state = &vcs->base;
999 
1000 	__drm_atomic_helper_crtc_duplicate_state(crtc, state);
1001 
1002 	return state;
1003 }
1004 
1005 
1006 /**
1007  * vmw_du_crtc_reset - creates a blank vmw crtc state
1008  * @crtc: DRM crtc
1009  *
1010  * Resets the atomic state for @crtc by freeing the state pointer (which
1011  * might be NULL, e.g. at driver load time) and allocating a new empty state
1012  * object.
1013  */
1014 void vmw_du_crtc_reset(struct drm_crtc *crtc)
1015 {
1016 	struct vmw_crtc_state *vcs;
1017 
1018 
1019 	if (crtc->state) {
1020 		__drm_atomic_helper_crtc_destroy_state(crtc->state);
1021 
1022 		kfree(vmw_crtc_state_to_vcs(crtc->state));
1023 	}
1024 
1025 	vcs = kzalloc(sizeof(*vcs), GFP_KERNEL);
1026 
1027 	if (!vcs) {
1028 		DRM_ERROR("Cannot allocate vmw_crtc_state\n");
1029 		return;
1030 	}
1031 
1032 	__drm_atomic_helper_crtc_reset(crtc, &vcs->base);
1033 }
1034 
1035 
1036 /**
1037  * vmw_du_crtc_destroy_state - destroy crtc state
1038  * @crtc: DRM crtc
1039  * @state: state object to destroy
1040  *
1041  * Destroys the crtc state (both common and vmw-specific) for the
1042  * specified plane.
1043  */
1044 void
1045 vmw_du_crtc_destroy_state(struct drm_crtc *crtc,
1046 			  struct drm_crtc_state *state)
1047 {
1048 	drm_atomic_helper_crtc_destroy_state(crtc, state);
1049 }
1050 
1051 
1052 /**
1053  * vmw_du_plane_duplicate_state - duplicate plane state
1054  * @plane: drm plane
1055  *
1056  * Allocates and returns a copy of the plane state (both common and
1057  * vmw-specific) for the specified plane.
1058  *
1059  * Returns: The newly allocated plane state, or NULL on failure.
1060  */
1061 struct drm_plane_state *
1062 vmw_du_plane_duplicate_state(struct drm_plane *plane)
1063 {
1064 	struct drm_plane_state *state;
1065 	struct vmw_plane_state *vps;
1066 
1067 	vps = kmemdup(plane->state, sizeof(*vps), GFP_KERNEL);
1068 
1069 	if (!vps)
1070 		return NULL;
1071 
1072 	vps->pinned = 0;
1073 	vps->cpp = 0;
1074 
1075 	memset(&vps->cursor, 0, sizeof(vps->cursor));
1076 
1077 	/* Each ref counted resource needs to be acquired again */
1078 	if (vps->surf)
1079 		(void) vmw_surface_reference(vps->surf);
1080 
1081 	if (vps->bo)
1082 		(void) vmw_bo_reference(vps->bo);
1083 
1084 	state = &vps->base;
1085 
1086 	__drm_atomic_helper_plane_duplicate_state(plane, state);
1087 
1088 	return state;
1089 }
1090 
1091 
1092 /**
1093  * vmw_du_plane_reset - creates a blank vmw plane state
1094  * @plane: drm plane
1095  *
1096  * Resets the atomic state for @plane by freeing the state pointer (which might
1097  * be NULL, e.g. at driver load time) and allocating a new empty state object.
1098  */
1099 void vmw_du_plane_reset(struct drm_plane *plane)
1100 {
1101 	struct vmw_plane_state *vps;
1102 
1103 	if (plane->state)
1104 		vmw_du_plane_destroy_state(plane, plane->state);
1105 
1106 	vps = kzalloc(sizeof(*vps), GFP_KERNEL);
1107 
1108 	if (!vps) {
1109 		DRM_ERROR("Cannot allocate vmw_plane_state\n");
1110 		return;
1111 	}
1112 
1113 	__drm_atomic_helper_plane_reset(plane, &vps->base);
1114 }
1115 
1116 
1117 /**
1118  * vmw_du_plane_destroy_state - destroy plane state
1119  * @plane: DRM plane
1120  * @state: state object to destroy
1121  *
1122  * Destroys the plane state (both common and vmw-specific) for the
1123  * specified plane.
1124  */
1125 void
1126 vmw_du_plane_destroy_state(struct drm_plane *plane,
1127 			   struct drm_plane_state *state)
1128 {
1129 	struct vmw_plane_state *vps = vmw_plane_state_to_vps(state);
1130 
1131 	/* Should have been freed by cleanup_fb */
1132 	if (vps->surf)
1133 		vmw_surface_unreference(&vps->surf);
1134 
1135 	if (vps->bo)
1136 		vmw_bo_unreference(&vps->bo);
1137 
1138 	drm_atomic_helper_plane_destroy_state(plane, state);
1139 }
1140 
1141 
1142 /**
1143  * vmw_du_connector_duplicate_state - duplicate connector state
1144  * @connector: DRM connector
1145  *
1146  * Allocates and returns a copy of the connector state (both common and
1147  * vmw-specific) for the specified connector.
1148  *
1149  * Returns: The newly allocated connector state, or NULL on failure.
1150  */
1151 struct drm_connector_state *
1152 vmw_du_connector_duplicate_state(struct drm_connector *connector)
1153 {
1154 	struct drm_connector_state *state;
1155 	struct vmw_connector_state *vcs;
1156 
1157 	if (WARN_ON(!connector->state))
1158 		return NULL;
1159 
1160 	vcs = kmemdup(connector->state, sizeof(*vcs), GFP_KERNEL);
1161 
1162 	if (!vcs)
1163 		return NULL;
1164 
1165 	state = &vcs->base;
1166 
1167 	__drm_atomic_helper_connector_duplicate_state(connector, state);
1168 
1169 	return state;
1170 }
1171 
1172 
1173 /**
1174  * vmw_du_connector_reset - creates a blank vmw connector state
1175  * @connector: DRM connector
1176  *
1177  * Resets the atomic state for @connector by freeing the state pointer (which
1178  * might be NULL, e.g. at driver load time) and allocating a new empty state
1179  * object.
1180  */
1181 void vmw_du_connector_reset(struct drm_connector *connector)
1182 {
1183 	struct vmw_connector_state *vcs;
1184 
1185 
1186 	if (connector->state) {
1187 		__drm_atomic_helper_connector_destroy_state(connector->state);
1188 
1189 		kfree(vmw_connector_state_to_vcs(connector->state));
1190 	}
1191 
1192 	vcs = kzalloc(sizeof(*vcs), GFP_KERNEL);
1193 
1194 	if (!vcs) {
1195 		DRM_ERROR("Cannot allocate vmw_connector_state\n");
1196 		return;
1197 	}
1198 
1199 	__drm_atomic_helper_connector_reset(connector, &vcs->base);
1200 }
1201 
1202 
1203 /**
1204  * vmw_du_connector_destroy_state - destroy connector state
1205  * @connector: DRM connector
1206  * @state: state object to destroy
1207  *
1208  * Destroys the connector state (both common and vmw-specific) for the
1209  * specified plane.
1210  */
1211 void
1212 vmw_du_connector_destroy_state(struct drm_connector *connector,
1213 			  struct drm_connector_state *state)
1214 {
1215 	drm_atomic_helper_connector_destroy_state(connector, state);
1216 }
1217 /*
1218  * Generic framebuffer code
1219  */
1220 
1221 /*
1222  * Surface framebuffer code
1223  */
1224 
1225 static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
1226 {
1227 	struct vmw_framebuffer_surface *vfbs =
1228 		vmw_framebuffer_to_vfbs(framebuffer);
1229 
1230 	drm_framebuffer_cleanup(framebuffer);
1231 	vmw_surface_unreference(&vfbs->surface);
1232 
1233 	kfree(vfbs);
1234 }
1235 
1236 /**
1237  * vmw_kms_readback - Perform a readback from the screen system to
1238  * a buffer-object backed framebuffer.
1239  *
1240  * @dev_priv: Pointer to the device private structure.
1241  * @file_priv: Pointer to a struct drm_file identifying the caller.
1242  * Must be set to NULL if @user_fence_rep is NULL.
1243  * @vfb: Pointer to the buffer-object backed framebuffer.
1244  * @user_fence_rep: User-space provided structure for fence information.
1245  * Must be set to non-NULL if @file_priv is non-NULL.
1246  * @vclips: Array of clip rects.
1247  * @num_clips: Number of clip rects in @vclips.
1248  *
1249  * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
1250  * interrupted.
1251  */
1252 int vmw_kms_readback(struct vmw_private *dev_priv,
1253 		     struct drm_file *file_priv,
1254 		     struct vmw_framebuffer *vfb,
1255 		     struct drm_vmw_fence_rep __user *user_fence_rep,
1256 		     struct drm_vmw_rect *vclips,
1257 		     uint32_t num_clips)
1258 {
1259 	switch (dev_priv->active_display_unit) {
1260 	case vmw_du_screen_object:
1261 		return vmw_kms_sou_readback(dev_priv, file_priv, vfb,
1262 					    user_fence_rep, vclips, num_clips,
1263 					    NULL);
1264 	case vmw_du_screen_target:
1265 		return vmw_kms_stdu_readback(dev_priv, file_priv, vfb,
1266 					     user_fence_rep, NULL, vclips, num_clips,
1267 					     1, NULL);
1268 	default:
1269 		WARN_ONCE(true,
1270 			  "Readback called with invalid display system.\n");
1271 }
1272 
1273 	return -ENOSYS;
1274 }
1275 
1276 
1277 static const struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
1278 	.destroy = vmw_framebuffer_surface_destroy,
1279 	.dirty = drm_atomic_helper_dirtyfb,
1280 };
1281 
1282 static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
1283 					   struct vmw_surface *surface,
1284 					   struct vmw_framebuffer **out,
1285 					   const struct drm_mode_fb_cmd2
1286 					   *mode_cmd,
1287 					   bool is_bo_proxy)
1288 
1289 {
1290 	struct drm_device *dev = &dev_priv->drm;
1291 	struct vmw_framebuffer_surface *vfbs;
1292 	enum SVGA3dSurfaceFormat format;
1293 	int ret;
1294 
1295 	/* 3D is only supported on HWv8 and newer hosts */
1296 	if (dev_priv->active_display_unit == vmw_du_legacy)
1297 		return -ENOSYS;
1298 
1299 	/*
1300 	 * Sanity checks.
1301 	 */
1302 
1303 	if (!drm_any_plane_has_format(&dev_priv->drm,
1304 				      mode_cmd->pixel_format,
1305 				      mode_cmd->modifier[0])) {
1306 		drm_dbg(&dev_priv->drm,
1307 			"unsupported pixel format %p4cc / modifier 0x%llx\n",
1308 			&mode_cmd->pixel_format, mode_cmd->modifier[0]);
1309 		return -EINVAL;
1310 	}
1311 
1312 	/* Surface must be marked as a scanout. */
1313 	if (unlikely(!surface->metadata.scanout))
1314 		return -EINVAL;
1315 
1316 	if (unlikely(surface->metadata.mip_levels[0] != 1 ||
1317 		     surface->metadata.num_sizes != 1 ||
1318 		     surface->metadata.base_size.width < mode_cmd->width ||
1319 		     surface->metadata.base_size.height < mode_cmd->height ||
1320 		     surface->metadata.base_size.depth != 1)) {
1321 		DRM_ERROR("Incompatible surface dimensions "
1322 			  "for requested mode.\n");
1323 		return -EINVAL;
1324 	}
1325 
1326 	switch (mode_cmd->pixel_format) {
1327 	case DRM_FORMAT_ARGB8888:
1328 		format = SVGA3D_A8R8G8B8;
1329 		break;
1330 	case DRM_FORMAT_XRGB8888:
1331 		format = SVGA3D_X8R8G8B8;
1332 		break;
1333 	case DRM_FORMAT_RGB565:
1334 		format = SVGA3D_R5G6B5;
1335 		break;
1336 	case DRM_FORMAT_XRGB1555:
1337 		format = SVGA3D_A1R5G5B5;
1338 		break;
1339 	default:
1340 		DRM_ERROR("Invalid pixel format: %p4cc\n",
1341 			  &mode_cmd->pixel_format);
1342 		return -EINVAL;
1343 	}
1344 
1345 	/*
1346 	 * For DX, surface format validation is done when surface->scanout
1347 	 * is set.
1348 	 */
1349 	if (!has_sm4_context(dev_priv) && format != surface->metadata.format) {
1350 		DRM_ERROR("Invalid surface format for requested mode.\n");
1351 		return -EINVAL;
1352 	}
1353 
1354 	vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL);
1355 	if (!vfbs) {
1356 		ret = -ENOMEM;
1357 		goto out_err1;
1358 	}
1359 
1360 	drm_helper_mode_fill_fb_struct(dev, &vfbs->base.base, mode_cmd);
1361 	vfbs->surface = vmw_surface_reference(surface);
1362 	vfbs->base.user_handle = mode_cmd->handles[0];
1363 	vfbs->is_bo_proxy = is_bo_proxy;
1364 
1365 	*out = &vfbs->base;
1366 
1367 	ret = drm_framebuffer_init(dev, &vfbs->base.base,
1368 				   &vmw_framebuffer_surface_funcs);
1369 	if (ret)
1370 		goto out_err2;
1371 
1372 	return 0;
1373 
1374 out_err2:
1375 	vmw_surface_unreference(&surface);
1376 	kfree(vfbs);
1377 out_err1:
1378 	return ret;
1379 }
1380 
1381 /*
1382  * Buffer-object framebuffer code
1383  */
1384 
1385 static int vmw_framebuffer_bo_create_handle(struct drm_framebuffer *fb,
1386 					    struct drm_file *file_priv,
1387 					    unsigned int *handle)
1388 {
1389 	struct vmw_framebuffer_bo *vfbd =
1390 			vmw_framebuffer_to_vfbd(fb);
1391 
1392 	return drm_gem_handle_create(file_priv, &vfbd->buffer->tbo.base, handle);
1393 }
1394 
1395 static void vmw_framebuffer_bo_destroy(struct drm_framebuffer *framebuffer)
1396 {
1397 	struct vmw_framebuffer_bo *vfbd =
1398 		vmw_framebuffer_to_vfbd(framebuffer);
1399 
1400 	drm_framebuffer_cleanup(framebuffer);
1401 	vmw_bo_unreference(&vfbd->buffer);
1402 
1403 	kfree(vfbd);
1404 }
1405 
1406 static const struct drm_framebuffer_funcs vmw_framebuffer_bo_funcs = {
1407 	.create_handle = vmw_framebuffer_bo_create_handle,
1408 	.destroy = vmw_framebuffer_bo_destroy,
1409 	.dirty = drm_atomic_helper_dirtyfb,
1410 };
1411 
1412 /**
1413  * vmw_create_bo_proxy - create a proxy surface for the buffer object
1414  *
1415  * @dev: DRM device
1416  * @mode_cmd: parameters for the new surface
1417  * @bo_mob: MOB backing the buffer object
1418  * @srf_out: newly created surface
1419  *
1420  * When the content FB is a buffer object, we create a surface as a proxy to the
1421  * same buffer.  This way we can do a surface copy rather than a surface DMA.
1422  * This is a more efficient approach
1423  *
1424  * RETURNS:
1425  * 0 on success, error code otherwise
1426  */
1427 static int vmw_create_bo_proxy(struct drm_device *dev,
1428 			       const struct drm_mode_fb_cmd2 *mode_cmd,
1429 			       struct vmw_bo *bo_mob,
1430 			       struct vmw_surface **srf_out)
1431 {
1432 	struct vmw_surface_metadata metadata = {0};
1433 	uint32_t format;
1434 	struct vmw_resource *res;
1435 	unsigned int bytes_pp;
1436 	int ret;
1437 
1438 	switch (mode_cmd->pixel_format) {
1439 	case DRM_FORMAT_ARGB8888:
1440 	case DRM_FORMAT_XRGB8888:
1441 		format = SVGA3D_X8R8G8B8;
1442 		bytes_pp = 4;
1443 		break;
1444 
1445 	case DRM_FORMAT_RGB565:
1446 	case DRM_FORMAT_XRGB1555:
1447 		format = SVGA3D_R5G6B5;
1448 		bytes_pp = 2;
1449 		break;
1450 
1451 	case 8:
1452 		format = SVGA3D_P8;
1453 		bytes_pp = 1;
1454 		break;
1455 
1456 	default:
1457 		DRM_ERROR("Invalid framebuffer format %p4cc\n",
1458 			  &mode_cmd->pixel_format);
1459 		return -EINVAL;
1460 	}
1461 
1462 	metadata.format = format;
1463 	metadata.mip_levels[0] = 1;
1464 	metadata.num_sizes = 1;
1465 	metadata.base_size.width = mode_cmd->pitches[0] / bytes_pp;
1466 	metadata.base_size.height =  mode_cmd->height;
1467 	metadata.base_size.depth = 1;
1468 	metadata.scanout = true;
1469 
1470 	ret = vmw_gb_surface_define(vmw_priv(dev), &metadata, srf_out);
1471 	if (ret) {
1472 		DRM_ERROR("Failed to allocate proxy content buffer\n");
1473 		return ret;
1474 	}
1475 
1476 	res = &(*srf_out)->res;
1477 
1478 	/* Reserve and switch the backing mob. */
1479 	mutex_lock(&res->dev_priv->cmdbuf_mutex);
1480 	(void) vmw_resource_reserve(res, false, true);
1481 	vmw_user_bo_unref(&res->guest_memory_bo);
1482 	res->guest_memory_bo = vmw_user_bo_ref(bo_mob);
1483 	res->guest_memory_offset = 0;
1484 	vmw_resource_unreserve(res, false, false, false, NULL, 0);
1485 	mutex_unlock(&res->dev_priv->cmdbuf_mutex);
1486 
1487 	return 0;
1488 }
1489 
1490 
1491 
1492 static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv,
1493 				      struct vmw_bo *bo,
1494 				      struct vmw_framebuffer **out,
1495 				      const struct drm_mode_fb_cmd2
1496 				      *mode_cmd)
1497 
1498 {
1499 	struct drm_device *dev = &dev_priv->drm;
1500 	struct vmw_framebuffer_bo *vfbd;
1501 	unsigned int requested_size;
1502 	int ret;
1503 
1504 	requested_size = mode_cmd->height * mode_cmd->pitches[0];
1505 	if (unlikely(requested_size > bo->tbo.base.size)) {
1506 		DRM_ERROR("Screen buffer object size is too small "
1507 			  "for requested mode.\n");
1508 		return -EINVAL;
1509 	}
1510 
1511 	if (!drm_any_plane_has_format(&dev_priv->drm,
1512 				      mode_cmd->pixel_format,
1513 				      mode_cmd->modifier[0])) {
1514 		drm_dbg(&dev_priv->drm,
1515 			"unsupported pixel format %p4cc / modifier 0x%llx\n",
1516 			&mode_cmd->pixel_format, mode_cmd->modifier[0]);
1517 		return -EINVAL;
1518 	}
1519 
1520 	vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL);
1521 	if (!vfbd) {
1522 		ret = -ENOMEM;
1523 		goto out_err1;
1524 	}
1525 
1526 	vfbd->base.base.obj[0] = &bo->tbo.base;
1527 	drm_helper_mode_fill_fb_struct(dev, &vfbd->base.base, mode_cmd);
1528 	vfbd->base.bo = true;
1529 	vfbd->buffer = vmw_bo_reference(bo);
1530 	vfbd->base.user_handle = mode_cmd->handles[0];
1531 	*out = &vfbd->base;
1532 
1533 	ret = drm_framebuffer_init(dev, &vfbd->base.base,
1534 				   &vmw_framebuffer_bo_funcs);
1535 	if (ret)
1536 		goto out_err2;
1537 
1538 	return 0;
1539 
1540 out_err2:
1541 	vmw_bo_unreference(&bo);
1542 	kfree(vfbd);
1543 out_err1:
1544 	return ret;
1545 }
1546 
1547 
1548 /**
1549  * vmw_kms_srf_ok - check if a surface can be created
1550  *
1551  * @dev_priv: Pointer to device private struct.
1552  * @width: requested width
1553  * @height: requested height
1554  *
1555  * Surfaces need to be less than texture size
1556  */
1557 static bool
1558 vmw_kms_srf_ok(struct vmw_private *dev_priv, uint32_t width, uint32_t height)
1559 {
1560 	if (width  > dev_priv->texture_max_width ||
1561 	    height > dev_priv->texture_max_height)
1562 		return false;
1563 
1564 	return true;
1565 }
1566 
1567 /**
1568  * vmw_kms_new_framebuffer - Create a new framebuffer.
1569  *
1570  * @dev_priv: Pointer to device private struct.
1571  * @bo: Pointer to buffer object to wrap the kms framebuffer around.
1572  * Either @bo or @surface must be NULL.
1573  * @surface: Pointer to a surface to wrap the kms framebuffer around.
1574  * Either @bo or @surface must be NULL.
1575  * @only_2d: No presents will occur to this buffer object based framebuffer.
1576  * This helps the code to do some important optimizations.
1577  * @mode_cmd: Frame-buffer metadata.
1578  */
1579 struct vmw_framebuffer *
1580 vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
1581 			struct vmw_bo *bo,
1582 			struct vmw_surface *surface,
1583 			bool only_2d,
1584 			const struct drm_mode_fb_cmd2 *mode_cmd)
1585 {
1586 	struct vmw_framebuffer *vfb = NULL;
1587 	bool is_bo_proxy = false;
1588 	int ret;
1589 
1590 	/*
1591 	 * We cannot use the SurfaceDMA command in an non-accelerated VM,
1592 	 * therefore, wrap the buffer object in a surface so we can use the
1593 	 * SurfaceCopy command.
1594 	 */
1595 	if (vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height)  &&
1596 	    bo && only_2d &&
1597 	    mode_cmd->width > 64 &&  /* Don't create a proxy for cursor */
1598 	    dev_priv->active_display_unit == vmw_du_screen_target) {
1599 		ret = vmw_create_bo_proxy(&dev_priv->drm, mode_cmd,
1600 					  bo, &surface);
1601 		if (ret)
1602 			return ERR_PTR(ret);
1603 
1604 		is_bo_proxy = true;
1605 	}
1606 
1607 	/* Create the new framebuffer depending one what we have */
1608 	if (surface) {
1609 		ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb,
1610 						      mode_cmd,
1611 						      is_bo_proxy);
1612 		/*
1613 		 * vmw_create_bo_proxy() adds a reference that is no longer
1614 		 * needed
1615 		 */
1616 		if (is_bo_proxy)
1617 			vmw_surface_unreference(&surface);
1618 	} else if (bo) {
1619 		ret = vmw_kms_new_framebuffer_bo(dev_priv, bo, &vfb,
1620 						 mode_cmd);
1621 	} else {
1622 		BUG();
1623 	}
1624 
1625 	if (ret)
1626 		return ERR_PTR(ret);
1627 
1628 	return vfb;
1629 }
1630 
1631 /*
1632  * Generic Kernel modesetting functions
1633  */
1634 
1635 static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
1636 						 struct drm_file *file_priv,
1637 						 const struct drm_mode_fb_cmd2 *mode_cmd)
1638 {
1639 	struct vmw_private *dev_priv = vmw_priv(dev);
1640 	struct vmw_framebuffer *vfb = NULL;
1641 	struct vmw_surface *surface = NULL;
1642 	struct vmw_bo *bo = NULL;
1643 	int ret;
1644 
1645 	/* returns either a bo or surface */
1646 	ret = vmw_user_lookup_handle(dev_priv, file_priv,
1647 				     mode_cmd->handles[0],
1648 				     &surface, &bo);
1649 	if (ret) {
1650 		DRM_ERROR("Invalid buffer object handle %u (0x%x).\n",
1651 			  mode_cmd->handles[0], mode_cmd->handles[0]);
1652 		goto err_out;
1653 	}
1654 
1655 
1656 	if (!bo &&
1657 	    !vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height)) {
1658 		DRM_ERROR("Surface size cannot exceed %dx%d\n",
1659 			dev_priv->texture_max_width,
1660 			dev_priv->texture_max_height);
1661 		goto err_out;
1662 	}
1663 
1664 
1665 	vfb = vmw_kms_new_framebuffer(dev_priv, bo, surface,
1666 				      !(dev_priv->capabilities & SVGA_CAP_3D),
1667 				      mode_cmd);
1668 	if (IS_ERR(vfb)) {
1669 		ret = PTR_ERR(vfb);
1670 		goto err_out;
1671 	}
1672 
1673 err_out:
1674 	/* vmw_user_lookup_handle takes one ref so does new_fb */
1675 	if (bo)
1676 		vmw_user_bo_unref(&bo);
1677 	if (surface)
1678 		vmw_surface_unreference(&surface);
1679 
1680 	if (ret) {
1681 		DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
1682 		return ERR_PTR(ret);
1683 	}
1684 
1685 	return &vfb->base;
1686 }
1687 
1688 /**
1689  * vmw_kms_check_display_memory - Validates display memory required for a
1690  * topology
1691  * @dev: DRM device
1692  * @num_rects: number of drm_rect in rects
1693  * @rects: array of drm_rect representing the topology to validate indexed by
1694  * crtc index.
1695  *
1696  * Returns:
1697  * 0 on success otherwise negative error code
1698  */
1699 static int vmw_kms_check_display_memory(struct drm_device *dev,
1700 					uint32_t num_rects,
1701 					struct drm_rect *rects)
1702 {
1703 	struct vmw_private *dev_priv = vmw_priv(dev);
1704 	struct drm_rect bounding_box = {0};
1705 	u64 total_pixels = 0, pixel_mem, bb_mem;
1706 	int i;
1707 
1708 	for (i = 0; i < num_rects; i++) {
1709 		/*
1710 		 * For STDU only individual screen (screen target) is limited by
1711 		 * SCREENTARGET_MAX_WIDTH/HEIGHT registers.
1712 		 */
1713 		if (dev_priv->active_display_unit == vmw_du_screen_target &&
1714 		    (drm_rect_width(&rects[i]) > dev_priv->stdu_max_width ||
1715 		     drm_rect_height(&rects[i]) > dev_priv->stdu_max_height)) {
1716 			VMW_DEBUG_KMS("Screen size not supported.\n");
1717 			return -EINVAL;
1718 		}
1719 
1720 		/* Bounding box upper left is at (0,0). */
1721 		if (rects[i].x2 > bounding_box.x2)
1722 			bounding_box.x2 = rects[i].x2;
1723 
1724 		if (rects[i].y2 > bounding_box.y2)
1725 			bounding_box.y2 = rects[i].y2;
1726 
1727 		total_pixels += (u64) drm_rect_width(&rects[i]) *
1728 			(u64) drm_rect_height(&rects[i]);
1729 	}
1730 
1731 	/* Virtual svga device primary limits are always in 32-bpp. */
1732 	pixel_mem = total_pixels * 4;
1733 
1734 	/*
1735 	 * For HV10 and below prim_bb_mem is vram size. When
1736 	 * SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM is not present vram size is
1737 	 * limit on primary bounding box
1738 	 */
1739 	if (pixel_mem > dev_priv->max_primary_mem) {
1740 		VMW_DEBUG_KMS("Combined output size too large.\n");
1741 		return -EINVAL;
1742 	}
1743 
1744 	/* SVGA_CAP_NO_BB_RESTRICTION is available for STDU only. */
1745 	if (dev_priv->active_display_unit != vmw_du_screen_target ||
1746 	    !(dev_priv->capabilities & SVGA_CAP_NO_BB_RESTRICTION)) {
1747 		bb_mem = (u64) bounding_box.x2 * bounding_box.y2 * 4;
1748 
1749 		if (bb_mem > dev_priv->max_primary_mem) {
1750 			VMW_DEBUG_KMS("Topology is beyond supported limits.\n");
1751 			return -EINVAL;
1752 		}
1753 	}
1754 
1755 	return 0;
1756 }
1757 
1758 /**
1759  * vmw_crtc_state_and_lock - Return new or current crtc state with locked
1760  * crtc mutex
1761  * @state: The atomic state pointer containing the new atomic state
1762  * @crtc: The crtc
1763  *
1764  * This function returns the new crtc state if it's part of the state update.
1765  * Otherwise returns the current crtc state. It also makes sure that the
1766  * crtc mutex is locked.
1767  *
1768  * Returns: A valid crtc state pointer or NULL. It may also return a
1769  * pointer error, in particular -EDEADLK if locking needs to be rerun.
1770  */
1771 static struct drm_crtc_state *
1772 vmw_crtc_state_and_lock(struct drm_atomic_state *state, struct drm_crtc *crtc)
1773 {
1774 	struct drm_crtc_state *crtc_state;
1775 
1776 	crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
1777 	if (crtc_state) {
1778 		lockdep_assert_held(&crtc->mutex.mutex.base);
1779 	} else {
1780 		int ret = drm_modeset_lock(&crtc->mutex, state->acquire_ctx);
1781 
1782 		if (ret != 0 && ret != -EALREADY)
1783 			return ERR_PTR(ret);
1784 
1785 		crtc_state = crtc->state;
1786 	}
1787 
1788 	return crtc_state;
1789 }
1790 
1791 /**
1792  * vmw_kms_check_implicit - Verify that all implicit display units scan out
1793  * from the same fb after the new state is committed.
1794  * @dev: The drm_device.
1795  * @state: The new state to be checked.
1796  *
1797  * Returns:
1798  *   Zero on success,
1799  *   -EINVAL on invalid state,
1800  *   -EDEADLK if modeset locking needs to be rerun.
1801  */
1802 static int vmw_kms_check_implicit(struct drm_device *dev,
1803 				  struct drm_atomic_state *state)
1804 {
1805 	struct drm_framebuffer *implicit_fb = NULL;
1806 	struct drm_crtc *crtc;
1807 	struct drm_crtc_state *crtc_state;
1808 	struct drm_plane_state *plane_state;
1809 
1810 	drm_for_each_crtc(crtc, dev) {
1811 		struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
1812 
1813 		if (!du->is_implicit)
1814 			continue;
1815 
1816 		crtc_state = vmw_crtc_state_and_lock(state, crtc);
1817 		if (IS_ERR(crtc_state))
1818 			return PTR_ERR(crtc_state);
1819 
1820 		if (!crtc_state || !crtc_state->enable)
1821 			continue;
1822 
1823 		/*
1824 		 * Can't move primary planes across crtcs, so this is OK.
1825 		 * It also means we don't need to take the plane mutex.
1826 		 */
1827 		plane_state = du->primary.state;
1828 		if (plane_state->crtc != crtc)
1829 			continue;
1830 
1831 		if (!implicit_fb)
1832 			implicit_fb = plane_state->fb;
1833 		else if (implicit_fb != plane_state->fb)
1834 			return -EINVAL;
1835 	}
1836 
1837 	return 0;
1838 }
1839 
1840 /**
1841  * vmw_kms_check_topology - Validates topology in drm_atomic_state
1842  * @dev: DRM device
1843  * @state: the driver state object
1844  *
1845  * Returns:
1846  * 0 on success otherwise negative error code
1847  */
1848 static int vmw_kms_check_topology(struct drm_device *dev,
1849 				  struct drm_atomic_state *state)
1850 {
1851 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
1852 	struct drm_rect *rects;
1853 	struct drm_crtc *crtc;
1854 	uint32_t i;
1855 	int ret = 0;
1856 
1857 	rects = kcalloc(dev->mode_config.num_crtc, sizeof(struct drm_rect),
1858 			GFP_KERNEL);
1859 	if (!rects)
1860 		return -ENOMEM;
1861 
1862 	drm_for_each_crtc(crtc, dev) {
1863 		struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
1864 		struct drm_crtc_state *crtc_state;
1865 
1866 		i = drm_crtc_index(crtc);
1867 
1868 		crtc_state = vmw_crtc_state_and_lock(state, crtc);
1869 		if (IS_ERR(crtc_state)) {
1870 			ret = PTR_ERR(crtc_state);
1871 			goto clean;
1872 		}
1873 
1874 		if (!crtc_state)
1875 			continue;
1876 
1877 		if (crtc_state->enable) {
1878 			rects[i].x1 = du->gui_x;
1879 			rects[i].y1 = du->gui_y;
1880 			rects[i].x2 = du->gui_x + crtc_state->mode.hdisplay;
1881 			rects[i].y2 = du->gui_y + crtc_state->mode.vdisplay;
1882 		} else {
1883 			rects[i].x1 = 0;
1884 			rects[i].y1 = 0;
1885 			rects[i].x2 = 0;
1886 			rects[i].y2 = 0;
1887 		}
1888 	}
1889 
1890 	/* Determine change to topology due to new atomic state */
1891 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
1892 				      new_crtc_state, i) {
1893 		struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
1894 		struct drm_connector *connector;
1895 		struct drm_connector_state *conn_state;
1896 		struct vmw_connector_state *vmw_conn_state;
1897 
1898 		if (!du->pref_active && new_crtc_state->enable) {
1899 			VMW_DEBUG_KMS("Enabling a disabled display unit\n");
1900 			ret = -EINVAL;
1901 			goto clean;
1902 		}
1903 
1904 		/*
1905 		 * For vmwgfx each crtc has only one connector attached and it
1906 		 * is not changed so don't really need to check the
1907 		 * crtc->connector_mask and iterate over it.
1908 		 */
1909 		connector = &du->connector;
1910 		conn_state = drm_atomic_get_connector_state(state, connector);
1911 		if (IS_ERR(conn_state)) {
1912 			ret = PTR_ERR(conn_state);
1913 			goto clean;
1914 		}
1915 
1916 		vmw_conn_state = vmw_connector_state_to_vcs(conn_state);
1917 		vmw_conn_state->gui_x = du->gui_x;
1918 		vmw_conn_state->gui_y = du->gui_y;
1919 	}
1920 
1921 	ret = vmw_kms_check_display_memory(dev, dev->mode_config.num_crtc,
1922 					   rects);
1923 
1924 clean:
1925 	kfree(rects);
1926 	return ret;
1927 }
1928 
1929 /**
1930  * vmw_kms_atomic_check_modeset- validate state object for modeset changes
1931  *
1932  * @dev: DRM device
1933  * @state: the driver state object
1934  *
1935  * This is a simple wrapper around drm_atomic_helper_check_modeset() for
1936  * us to assign a value to mode->crtc_clock so that
1937  * drm_calc_timestamping_constants() won't throw an error message
1938  *
1939  * Returns:
1940  * Zero for success or -errno
1941  */
1942 static int
1943 vmw_kms_atomic_check_modeset(struct drm_device *dev,
1944 			     struct drm_atomic_state *state)
1945 {
1946 	struct drm_crtc *crtc;
1947 	struct drm_crtc_state *crtc_state;
1948 	bool need_modeset = false;
1949 	int i, ret;
1950 
1951 	ret = drm_atomic_helper_check(dev, state);
1952 	if (ret)
1953 		return ret;
1954 
1955 	ret = vmw_kms_check_implicit(dev, state);
1956 	if (ret) {
1957 		VMW_DEBUG_KMS("Invalid implicit state\n");
1958 		return ret;
1959 	}
1960 
1961 	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
1962 		if (drm_atomic_crtc_needs_modeset(crtc_state))
1963 			need_modeset = true;
1964 	}
1965 
1966 	if (need_modeset)
1967 		return vmw_kms_check_topology(dev, state);
1968 
1969 	return ret;
1970 }
1971 
1972 static const struct drm_mode_config_funcs vmw_kms_funcs = {
1973 	.fb_create = vmw_kms_fb_create,
1974 	.atomic_check = vmw_kms_atomic_check_modeset,
1975 	.atomic_commit = drm_atomic_helper_commit,
1976 };
1977 
1978 static int vmw_kms_generic_present(struct vmw_private *dev_priv,
1979 				   struct drm_file *file_priv,
1980 				   struct vmw_framebuffer *vfb,
1981 				   struct vmw_surface *surface,
1982 				   uint32_t sid,
1983 				   int32_t destX, int32_t destY,
1984 				   struct drm_vmw_rect *clips,
1985 				   uint32_t num_clips)
1986 {
1987 	return vmw_kms_sou_do_surface_dirty(dev_priv, vfb, NULL, clips,
1988 					    &surface->res, destX, destY,
1989 					    num_clips, 1, NULL, NULL);
1990 }
1991 
1992 
1993 int vmw_kms_present(struct vmw_private *dev_priv,
1994 		    struct drm_file *file_priv,
1995 		    struct vmw_framebuffer *vfb,
1996 		    struct vmw_surface *surface,
1997 		    uint32_t sid,
1998 		    int32_t destX, int32_t destY,
1999 		    struct drm_vmw_rect *clips,
2000 		    uint32_t num_clips)
2001 {
2002 	int ret;
2003 
2004 	switch (dev_priv->active_display_unit) {
2005 	case vmw_du_screen_target:
2006 		ret = vmw_kms_stdu_surface_dirty(dev_priv, vfb, NULL, clips,
2007 						 &surface->res, destX, destY,
2008 						 num_clips, 1, NULL, NULL);
2009 		break;
2010 	case vmw_du_screen_object:
2011 		ret = vmw_kms_generic_present(dev_priv, file_priv, vfb, surface,
2012 					      sid, destX, destY, clips,
2013 					      num_clips);
2014 		break;
2015 	default:
2016 		WARN_ONCE(true,
2017 			  "Present called with invalid display system.\n");
2018 		ret = -ENOSYS;
2019 		break;
2020 	}
2021 	if (ret)
2022 		return ret;
2023 
2024 	vmw_cmd_flush(dev_priv, false);
2025 
2026 	return 0;
2027 }
2028 
2029 static void
2030 vmw_kms_create_hotplug_mode_update_property(struct vmw_private *dev_priv)
2031 {
2032 	if (dev_priv->hotplug_mode_update_property)
2033 		return;
2034 
2035 	dev_priv->hotplug_mode_update_property =
2036 		drm_property_create_range(&dev_priv->drm,
2037 					  DRM_MODE_PROP_IMMUTABLE,
2038 					  "hotplug_mode_update", 0, 1);
2039 }
2040 
2041 int vmw_kms_init(struct vmw_private *dev_priv)
2042 {
2043 	struct drm_device *dev = &dev_priv->drm;
2044 	int ret;
2045 	static const char *display_unit_names[] = {
2046 		"Invalid",
2047 		"Legacy",
2048 		"Screen Object",
2049 		"Screen Target",
2050 		"Invalid (max)"
2051 	};
2052 
2053 	drm_mode_config_init(dev);
2054 	dev->mode_config.funcs = &vmw_kms_funcs;
2055 	dev->mode_config.min_width = 1;
2056 	dev->mode_config.min_height = 1;
2057 	dev->mode_config.max_width = dev_priv->texture_max_width;
2058 	dev->mode_config.max_height = dev_priv->texture_max_height;
2059 	dev->mode_config.preferred_depth = dev_priv->assume_16bpp ? 16 : 32;
2060 
2061 	drm_mode_create_suggested_offset_properties(dev);
2062 	vmw_kms_create_hotplug_mode_update_property(dev_priv);
2063 
2064 	ret = vmw_kms_stdu_init_display(dev_priv);
2065 	if (ret) {
2066 		ret = vmw_kms_sou_init_display(dev_priv);
2067 		if (ret) /* Fallback */
2068 			ret = vmw_kms_ldu_init_display(dev_priv);
2069 	}
2070 	BUILD_BUG_ON(ARRAY_SIZE(display_unit_names) != (vmw_du_max + 1));
2071 	drm_info(&dev_priv->drm, "%s display unit initialized\n",
2072 		 display_unit_names[dev_priv->active_display_unit]);
2073 
2074 	return ret;
2075 }
2076 
2077 int vmw_kms_close(struct vmw_private *dev_priv)
2078 {
2079 	int ret = 0;
2080 
2081 	/*
2082 	 * Docs says we should take the lock before calling this function
2083 	 * but since it destroys encoders and our destructor calls
2084 	 * drm_encoder_cleanup which takes the lock we deadlock.
2085 	 */
2086 	drm_mode_config_cleanup(&dev_priv->drm);
2087 	if (dev_priv->active_display_unit == vmw_du_legacy)
2088 		ret = vmw_kms_ldu_close_display(dev_priv);
2089 
2090 	return ret;
2091 }
2092 
2093 int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
2094 				struct drm_file *file_priv)
2095 {
2096 	struct drm_vmw_cursor_bypass_arg *arg = data;
2097 	struct vmw_display_unit *du;
2098 	struct drm_crtc *crtc;
2099 	int ret = 0;
2100 
2101 	mutex_lock(&dev->mode_config.mutex);
2102 	if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) {
2103 
2104 		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2105 			du = vmw_crtc_to_du(crtc);
2106 			du->hotspot_x = arg->xhot;
2107 			du->hotspot_y = arg->yhot;
2108 		}
2109 
2110 		mutex_unlock(&dev->mode_config.mutex);
2111 		return 0;
2112 	}
2113 
2114 	crtc = drm_crtc_find(dev, file_priv, arg->crtc_id);
2115 	if (!crtc) {
2116 		ret = -ENOENT;
2117 		goto out;
2118 	}
2119 
2120 	du = vmw_crtc_to_du(crtc);
2121 
2122 	du->hotspot_x = arg->xhot;
2123 	du->hotspot_y = arg->yhot;
2124 
2125 out:
2126 	mutex_unlock(&dev->mode_config.mutex);
2127 
2128 	return ret;
2129 }
2130 
2131 int vmw_kms_write_svga(struct vmw_private *vmw_priv,
2132 			unsigned width, unsigned height, unsigned pitch,
2133 			unsigned bpp, unsigned depth)
2134 {
2135 	if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
2136 		vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, pitch);
2137 	else if (vmw_fifo_have_pitchlock(vmw_priv))
2138 		vmw_fifo_mem_write(vmw_priv, SVGA_FIFO_PITCHLOCK, pitch);
2139 	vmw_write(vmw_priv, SVGA_REG_WIDTH, width);
2140 	vmw_write(vmw_priv, SVGA_REG_HEIGHT, height);
2141 	if ((vmw_priv->capabilities & SVGA_CAP_8BIT_EMULATION) != 0)
2142 		vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bpp);
2143 
2144 	if (vmw_read(vmw_priv, SVGA_REG_DEPTH) != depth) {
2145 		DRM_ERROR("Invalid depth %u for %u bpp, host expects %u\n",
2146 			  depth, bpp, vmw_read(vmw_priv, SVGA_REG_DEPTH));
2147 		return -EINVAL;
2148 	}
2149 
2150 	return 0;
2151 }
2152 
2153 bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
2154 				uint32_t pitch,
2155 				uint32_t height)
2156 {
2157 	return ((u64) pitch * (u64) height) < (u64)
2158 		((dev_priv->active_display_unit == vmw_du_screen_target) ?
2159 		 dev_priv->max_primary_mem : dev_priv->vram_size);
2160 }
2161 
2162 /**
2163  * vmw_du_update_layout - Update the display unit with topology from resolution
2164  * plugin and generate DRM uevent
2165  * @dev_priv: device private
2166  * @num_rects: number of drm_rect in rects
2167  * @rects: toplogy to update
2168  */
2169 static int vmw_du_update_layout(struct vmw_private *dev_priv,
2170 				unsigned int num_rects, struct drm_rect *rects)
2171 {
2172 	struct drm_device *dev = &dev_priv->drm;
2173 	struct vmw_display_unit *du;
2174 	struct drm_connector *con;
2175 	struct drm_connector_list_iter conn_iter;
2176 	struct drm_modeset_acquire_ctx ctx;
2177 	struct drm_crtc *crtc;
2178 	int ret;
2179 
2180 	/* Currently gui_x/y is protected with the crtc mutex */
2181 	mutex_lock(&dev->mode_config.mutex);
2182 	drm_modeset_acquire_init(&ctx, 0);
2183 retry:
2184 	drm_for_each_crtc(crtc, dev) {
2185 		ret = drm_modeset_lock(&crtc->mutex, &ctx);
2186 		if (ret < 0) {
2187 			if (ret == -EDEADLK) {
2188 				drm_modeset_backoff(&ctx);
2189 				goto retry;
2190 		}
2191 			goto out_fini;
2192 		}
2193 	}
2194 
2195 	drm_connector_list_iter_begin(dev, &conn_iter);
2196 	drm_for_each_connector_iter(con, &conn_iter) {
2197 		du = vmw_connector_to_du(con);
2198 		if (num_rects > du->unit) {
2199 			du->pref_width = drm_rect_width(&rects[du->unit]);
2200 			du->pref_height = drm_rect_height(&rects[du->unit]);
2201 			du->pref_active = true;
2202 			du->gui_x = rects[du->unit].x1;
2203 			du->gui_y = rects[du->unit].y1;
2204 		} else {
2205 			du->pref_width  = VMWGFX_MIN_INITIAL_WIDTH;
2206 			du->pref_height = VMWGFX_MIN_INITIAL_HEIGHT;
2207 			du->pref_active = false;
2208 			du->gui_x = 0;
2209 			du->gui_y = 0;
2210 		}
2211 	}
2212 	drm_connector_list_iter_end(&conn_iter);
2213 
2214 	list_for_each_entry(con, &dev->mode_config.connector_list, head) {
2215 		du = vmw_connector_to_du(con);
2216 		if (num_rects > du->unit) {
2217 			drm_object_property_set_value
2218 			  (&con->base, dev->mode_config.suggested_x_property,
2219 			   du->gui_x);
2220 			drm_object_property_set_value
2221 			  (&con->base, dev->mode_config.suggested_y_property,
2222 			   du->gui_y);
2223 		} else {
2224 			drm_object_property_set_value
2225 			  (&con->base, dev->mode_config.suggested_x_property,
2226 			   0);
2227 			drm_object_property_set_value
2228 			  (&con->base, dev->mode_config.suggested_y_property,
2229 			   0);
2230 		}
2231 		con->status = vmw_du_connector_detect(con, true);
2232 	}
2233 out_fini:
2234 	drm_modeset_drop_locks(&ctx);
2235 	drm_modeset_acquire_fini(&ctx);
2236 	mutex_unlock(&dev->mode_config.mutex);
2237 
2238 	drm_sysfs_hotplug_event(dev);
2239 
2240 	return 0;
2241 }
2242 
2243 int vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
2244 			  u16 *r, u16 *g, u16 *b,
2245 			  uint32_t size,
2246 			  struct drm_modeset_acquire_ctx *ctx)
2247 {
2248 	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
2249 	int i;
2250 
2251 	for (i = 0; i < size; i++) {
2252 		DRM_DEBUG("%d r/g/b = 0x%04x / 0x%04x / 0x%04x\n", i,
2253 			  r[i], g[i], b[i]);
2254 		vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 0, r[i] >> 8);
2255 		vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 1, g[i] >> 8);
2256 		vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 2, b[i] >> 8);
2257 	}
2258 
2259 	return 0;
2260 }
2261 
2262 int vmw_du_connector_dpms(struct drm_connector *connector, int mode)
2263 {
2264 	return 0;
2265 }
2266 
2267 enum drm_connector_status
2268 vmw_du_connector_detect(struct drm_connector *connector, bool force)
2269 {
2270 	uint32_t num_displays;
2271 	struct drm_device *dev = connector->dev;
2272 	struct vmw_private *dev_priv = vmw_priv(dev);
2273 	struct vmw_display_unit *du = vmw_connector_to_du(connector);
2274 
2275 	num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS);
2276 
2277 	return ((vmw_connector_to_du(connector)->unit < num_displays &&
2278 		 du->pref_active) ?
2279 		connector_status_connected : connector_status_disconnected);
2280 }
2281 
2282 static struct drm_display_mode vmw_kms_connector_builtin[] = {
2283 	/* 640x480@60Hz */
2284 	{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
2285 		   752, 800, 0, 480, 489, 492, 525, 0,
2286 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
2287 	/* 800x600@60Hz */
2288 	{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
2289 		   968, 1056, 0, 600, 601, 605, 628, 0,
2290 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2291 	/* 1024x768@60Hz */
2292 	{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
2293 		   1184, 1344, 0, 768, 771, 777, 806, 0,
2294 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
2295 	/* 1152x864@75Hz */
2296 	{ DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
2297 		   1344, 1600, 0, 864, 865, 868, 900, 0,
2298 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2299 	/* 1280x720@60Hz */
2300 	{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74500, 1280, 1344,
2301 		   1472, 1664, 0, 720, 723, 728, 748, 0,
2302 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2303 	/* 1280x768@60Hz */
2304 	{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
2305 		   1472, 1664, 0, 768, 771, 778, 798, 0,
2306 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2307 	/* 1280x800@60Hz */
2308 	{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
2309 		   1480, 1680, 0, 800, 803, 809, 831, 0,
2310 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
2311 	/* 1280x960@60Hz */
2312 	{ DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
2313 		   1488, 1800, 0, 960, 961, 964, 1000, 0,
2314 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2315 	/* 1280x1024@60Hz */
2316 	{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
2317 		   1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
2318 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2319 	/* 1360x768@60Hz */
2320 	{ DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
2321 		   1536, 1792, 0, 768, 771, 777, 795, 0,
2322 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2323 	/* 1440x1050@60Hz */
2324 	{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
2325 		   1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
2326 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2327 	/* 1440x900@60Hz */
2328 	{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
2329 		   1672, 1904, 0, 900, 903, 909, 934, 0,
2330 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2331 	/* 1600x1200@60Hz */
2332 	{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
2333 		   1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
2334 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2335 	/* 1680x1050@60Hz */
2336 	{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
2337 		   1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
2338 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2339 	/* 1792x1344@60Hz */
2340 	{ DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
2341 		   2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
2342 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2343 	/* 1853x1392@60Hz */
2344 	{ DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
2345 		   2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
2346 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2347 	/* 1920x1080@60Hz */
2348 	{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 173000, 1920, 2048,
2349 		   2248, 2576, 0, 1080, 1083, 1088, 1120, 0,
2350 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2351 	/* 1920x1200@60Hz */
2352 	{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
2353 		   2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
2354 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2355 	/* 1920x1440@60Hz */
2356 	{ DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
2357 		   2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
2358 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2359 	/* 2560x1440@60Hz */
2360 	{ DRM_MODE("2560x1440", DRM_MODE_TYPE_DRIVER, 241500, 2560, 2608,
2361 		   2640, 2720, 0, 1440, 1443, 1448, 1481, 0,
2362 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
2363 	/* 2560x1600@60Hz */
2364 	{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
2365 		   3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
2366 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2367 	/* 2880x1800@60Hz */
2368 	{ DRM_MODE("2880x1800", DRM_MODE_TYPE_DRIVER, 337500, 2880, 2928,
2369 		   2960, 3040, 0, 1800, 1803, 1809, 1852, 0,
2370 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
2371 	/* 3840x2160@60Hz */
2372 	{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 533000, 3840, 3888,
2373 		   3920, 4000, 0, 2160, 2163, 2168, 2222, 0,
2374 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
2375 	/* 3840x2400@60Hz */
2376 	{ DRM_MODE("3840x2400", DRM_MODE_TYPE_DRIVER, 592250, 3840, 3888,
2377 		   3920, 4000, 0, 2400, 2403, 2409, 2469, 0,
2378 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
2379 	/* Terminate */
2380 	{ DRM_MODE("", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) },
2381 };
2382 
2383 /**
2384  * vmw_guess_mode_timing - Provide fake timings for a
2385  * 60Hz vrefresh mode.
2386  *
2387  * @mode: Pointer to a struct drm_display_mode with hdisplay and vdisplay
2388  * members filled in.
2389  */
2390 void vmw_guess_mode_timing(struct drm_display_mode *mode)
2391 {
2392 	mode->hsync_start = mode->hdisplay + 50;
2393 	mode->hsync_end = mode->hsync_start + 50;
2394 	mode->htotal = mode->hsync_end + 50;
2395 
2396 	mode->vsync_start = mode->vdisplay + 50;
2397 	mode->vsync_end = mode->vsync_start + 50;
2398 	mode->vtotal = mode->vsync_end + 50;
2399 
2400 	mode->clock = (u32)mode->htotal * (u32)mode->vtotal / 100 * 6;
2401 }
2402 
2403 
2404 int vmw_du_connector_fill_modes(struct drm_connector *connector,
2405 				uint32_t max_width, uint32_t max_height)
2406 {
2407 	struct vmw_display_unit *du = vmw_connector_to_du(connector);
2408 	struct drm_device *dev = connector->dev;
2409 	struct vmw_private *dev_priv = vmw_priv(dev);
2410 	struct drm_display_mode *mode = NULL;
2411 	struct drm_display_mode *bmode;
2412 	struct drm_display_mode prefmode = { DRM_MODE("preferred",
2413 		DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
2414 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2415 		DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
2416 	};
2417 	int i;
2418 	u32 assumed_bpp = 4;
2419 
2420 	if (dev_priv->assume_16bpp)
2421 		assumed_bpp = 2;
2422 
2423 	max_width  = min(max_width,  dev_priv->texture_max_width);
2424 	max_height = min(max_height, dev_priv->texture_max_height);
2425 
2426 	/*
2427 	 * For STDU extra limit for a mode on SVGA_REG_SCREENTARGET_MAX_WIDTH/
2428 	 * HEIGHT registers.
2429 	 */
2430 	if (dev_priv->active_display_unit == vmw_du_screen_target) {
2431 		max_width  = min(max_width,  dev_priv->stdu_max_width);
2432 		max_height = min(max_height, dev_priv->stdu_max_height);
2433 	}
2434 
2435 	/* Add preferred mode */
2436 	mode = drm_mode_duplicate(dev, &prefmode);
2437 	if (!mode)
2438 		return 0;
2439 	mode->hdisplay = du->pref_width;
2440 	mode->vdisplay = du->pref_height;
2441 	vmw_guess_mode_timing(mode);
2442 	drm_mode_set_name(mode);
2443 
2444 	if (vmw_kms_validate_mode_vram(dev_priv,
2445 					mode->hdisplay * assumed_bpp,
2446 					mode->vdisplay)) {
2447 		drm_mode_probed_add(connector, mode);
2448 	} else {
2449 		drm_mode_destroy(dev, mode);
2450 		mode = NULL;
2451 	}
2452 
2453 	if (du->pref_mode) {
2454 		list_del_init(&du->pref_mode->head);
2455 		drm_mode_destroy(dev, du->pref_mode);
2456 	}
2457 
2458 	/* mode might be null here, this is intended */
2459 	du->pref_mode = mode;
2460 
2461 	for (i = 0; vmw_kms_connector_builtin[i].type != 0; i++) {
2462 		bmode = &vmw_kms_connector_builtin[i];
2463 		if (bmode->hdisplay > max_width ||
2464 		    bmode->vdisplay > max_height)
2465 			continue;
2466 
2467 		if (!vmw_kms_validate_mode_vram(dev_priv,
2468 						bmode->hdisplay * assumed_bpp,
2469 						bmode->vdisplay))
2470 			continue;
2471 
2472 		mode = drm_mode_duplicate(dev, bmode);
2473 		if (!mode)
2474 			return 0;
2475 
2476 		drm_mode_probed_add(connector, mode);
2477 	}
2478 
2479 	drm_connector_list_update(connector);
2480 	/* Move the prefered mode first, help apps pick the right mode. */
2481 	drm_mode_sort(&connector->modes);
2482 
2483 	return 1;
2484 }
2485 
2486 /**
2487  * vmw_kms_update_layout_ioctl - Handler for DRM_VMW_UPDATE_LAYOUT ioctl
2488  * @dev: drm device for the ioctl
2489  * @data: data pointer for the ioctl
2490  * @file_priv: drm file for the ioctl call
2491  *
2492  * Update preferred topology of display unit as per ioctl request. The topology
2493  * is expressed as array of drm_vmw_rect.
2494  * e.g.
2495  * [0 0 640 480] [640 0 800 600] [0 480 640 480]
2496  *
2497  * NOTE:
2498  * The x and y offset (upper left) in drm_vmw_rect cannot be less than 0. Beside
2499  * device limit on topology, x + w and y + h (lower right) cannot be greater
2500  * than INT_MAX. So topology beyond these limits will return with error.
2501  *
2502  * Returns:
2503  * Zero on success, negative errno on failure.
2504  */
2505 int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
2506 				struct drm_file *file_priv)
2507 {
2508 	struct vmw_private *dev_priv = vmw_priv(dev);
2509 	struct drm_mode_config *mode_config = &dev->mode_config;
2510 	struct drm_vmw_update_layout_arg *arg =
2511 		(struct drm_vmw_update_layout_arg *)data;
2512 	void __user *user_rects;
2513 	struct drm_vmw_rect *rects;
2514 	struct drm_rect *drm_rects;
2515 	unsigned rects_size;
2516 	int ret, i;
2517 
2518 	if (!arg->num_outputs) {
2519 		struct drm_rect def_rect = {0, 0,
2520 					    VMWGFX_MIN_INITIAL_WIDTH,
2521 					    VMWGFX_MIN_INITIAL_HEIGHT};
2522 		vmw_du_update_layout(dev_priv, 1, &def_rect);
2523 		return 0;
2524 	}
2525 
2526 	rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect);
2527 	rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect),
2528 			GFP_KERNEL);
2529 	if (unlikely(!rects))
2530 		return -ENOMEM;
2531 
2532 	user_rects = (void __user *)(unsigned long)arg->rects;
2533 	ret = copy_from_user(rects, user_rects, rects_size);
2534 	if (unlikely(ret != 0)) {
2535 		DRM_ERROR("Failed to get rects.\n");
2536 		ret = -EFAULT;
2537 		goto out_free;
2538 	}
2539 
2540 	drm_rects = (struct drm_rect *)rects;
2541 
2542 	VMW_DEBUG_KMS("Layout count = %u\n", arg->num_outputs);
2543 	for (i = 0; i < arg->num_outputs; i++) {
2544 		struct drm_vmw_rect curr_rect;
2545 
2546 		/* Verify user-space for overflow as kernel use drm_rect */
2547 		if ((rects[i].x + rects[i].w > INT_MAX) ||
2548 		    (rects[i].y + rects[i].h > INT_MAX)) {
2549 			ret = -ERANGE;
2550 			goto out_free;
2551 		}
2552 
2553 		curr_rect = rects[i];
2554 		drm_rects[i].x1 = curr_rect.x;
2555 		drm_rects[i].y1 = curr_rect.y;
2556 		drm_rects[i].x2 = curr_rect.x + curr_rect.w;
2557 		drm_rects[i].y2 = curr_rect.y + curr_rect.h;
2558 
2559 		VMW_DEBUG_KMS("  x1 = %d y1 = %d x2 = %d y2 = %d\n",
2560 			      drm_rects[i].x1, drm_rects[i].y1,
2561 			      drm_rects[i].x2, drm_rects[i].y2);
2562 
2563 		/*
2564 		 * Currently this check is limiting the topology within
2565 		 * mode_config->max (which actually is max texture size
2566 		 * supported by virtual device). This limit is here to address
2567 		 * window managers that create a big framebuffer for whole
2568 		 * topology.
2569 		 */
2570 		if (drm_rects[i].x1 < 0 ||  drm_rects[i].y1 < 0 ||
2571 		    drm_rects[i].x2 > mode_config->max_width ||
2572 		    drm_rects[i].y2 > mode_config->max_height) {
2573 			VMW_DEBUG_KMS("Invalid layout %d %d %d %d\n",
2574 				      drm_rects[i].x1, drm_rects[i].y1,
2575 				      drm_rects[i].x2, drm_rects[i].y2);
2576 			ret = -EINVAL;
2577 			goto out_free;
2578 		}
2579 	}
2580 
2581 	ret = vmw_kms_check_display_memory(dev, arg->num_outputs, drm_rects);
2582 
2583 	if (ret == 0)
2584 		vmw_du_update_layout(dev_priv, arg->num_outputs, drm_rects);
2585 
2586 out_free:
2587 	kfree(rects);
2588 	return ret;
2589 }
2590 
2591 /**
2592  * vmw_kms_helper_dirty - Helper to build commands and perform actions based
2593  * on a set of cliprects and a set of display units.
2594  *
2595  * @dev_priv: Pointer to a device private structure.
2596  * @framebuffer: Pointer to the framebuffer on which to perform the actions.
2597  * @clips: A set of struct drm_clip_rect. Either this os @vclips must be NULL.
2598  * Cliprects are given in framebuffer coordinates.
2599  * @vclips: A set of struct drm_vmw_rect cliprects. Either this or @clips must
2600  * be NULL. Cliprects are given in source coordinates.
2601  * @dest_x: X coordinate offset for the crtc / destination clip rects.
2602  * @dest_y: Y coordinate offset for the crtc / destination clip rects.
2603  * @num_clips: Number of cliprects in the @clips or @vclips array.
2604  * @increment: Integer with which to increment the clip counter when looping.
2605  * Used to skip a predetermined number of clip rects.
2606  * @dirty: Closure structure. See the description of struct vmw_kms_dirty.
2607  */
2608 int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
2609 			 struct vmw_framebuffer *framebuffer,
2610 			 const struct drm_clip_rect *clips,
2611 			 const struct drm_vmw_rect *vclips,
2612 			 s32 dest_x, s32 dest_y,
2613 			 int num_clips,
2614 			 int increment,
2615 			 struct vmw_kms_dirty *dirty)
2616 {
2617 	struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
2618 	struct drm_crtc *crtc;
2619 	u32 num_units = 0;
2620 	u32 i, k;
2621 
2622 	dirty->dev_priv = dev_priv;
2623 
2624 	/* If crtc is passed, no need to iterate over other display units */
2625 	if (dirty->crtc) {
2626 		units[num_units++] = vmw_crtc_to_du(dirty->crtc);
2627 	} else {
2628 		list_for_each_entry(crtc, &dev_priv->drm.mode_config.crtc_list,
2629 				    head) {
2630 			struct drm_plane *plane = crtc->primary;
2631 
2632 			if (plane->state->fb == &framebuffer->base)
2633 				units[num_units++] = vmw_crtc_to_du(crtc);
2634 		}
2635 	}
2636 
2637 	for (k = 0; k < num_units; k++) {
2638 		struct vmw_display_unit *unit = units[k];
2639 		s32 crtc_x = unit->crtc.x;
2640 		s32 crtc_y = unit->crtc.y;
2641 		s32 crtc_width = unit->crtc.mode.hdisplay;
2642 		s32 crtc_height = unit->crtc.mode.vdisplay;
2643 		const struct drm_clip_rect *clips_ptr = clips;
2644 		const struct drm_vmw_rect *vclips_ptr = vclips;
2645 
2646 		dirty->unit = unit;
2647 		if (dirty->fifo_reserve_size > 0) {
2648 			dirty->cmd = VMW_CMD_RESERVE(dev_priv,
2649 						      dirty->fifo_reserve_size);
2650 			if (!dirty->cmd)
2651 				return -ENOMEM;
2652 
2653 			memset(dirty->cmd, 0, dirty->fifo_reserve_size);
2654 		}
2655 		dirty->num_hits = 0;
2656 		for (i = 0; i < num_clips; i++, clips_ptr += increment,
2657 		       vclips_ptr += increment) {
2658 			s32 clip_left;
2659 			s32 clip_top;
2660 
2661 			/*
2662 			 * Select clip array type. Note that integer type
2663 			 * in @clips is unsigned short, whereas in @vclips
2664 			 * it's 32-bit.
2665 			 */
2666 			if (clips) {
2667 				dirty->fb_x = (s32) clips_ptr->x1;
2668 				dirty->fb_y = (s32) clips_ptr->y1;
2669 				dirty->unit_x2 = (s32) clips_ptr->x2 + dest_x -
2670 					crtc_x;
2671 				dirty->unit_y2 = (s32) clips_ptr->y2 + dest_y -
2672 					crtc_y;
2673 			} else {
2674 				dirty->fb_x = vclips_ptr->x;
2675 				dirty->fb_y = vclips_ptr->y;
2676 				dirty->unit_x2 = dirty->fb_x + vclips_ptr->w +
2677 					dest_x - crtc_x;
2678 				dirty->unit_y2 = dirty->fb_y + vclips_ptr->h +
2679 					dest_y - crtc_y;
2680 			}
2681 
2682 			dirty->unit_x1 = dirty->fb_x + dest_x - crtc_x;
2683 			dirty->unit_y1 = dirty->fb_y + dest_y - crtc_y;
2684 
2685 			/* Skip this clip if it's outside the crtc region */
2686 			if (dirty->unit_x1 >= crtc_width ||
2687 			    dirty->unit_y1 >= crtc_height ||
2688 			    dirty->unit_x2 <= 0 || dirty->unit_y2 <= 0)
2689 				continue;
2690 
2691 			/* Clip right and bottom to crtc limits */
2692 			dirty->unit_x2 = min_t(s32, dirty->unit_x2,
2693 					       crtc_width);
2694 			dirty->unit_y2 = min_t(s32, dirty->unit_y2,
2695 					       crtc_height);
2696 
2697 			/* Clip left and top to crtc limits */
2698 			clip_left = min_t(s32, dirty->unit_x1, 0);
2699 			clip_top = min_t(s32, dirty->unit_y1, 0);
2700 			dirty->unit_x1 -= clip_left;
2701 			dirty->unit_y1 -= clip_top;
2702 			dirty->fb_x -= clip_left;
2703 			dirty->fb_y -= clip_top;
2704 
2705 			dirty->clip(dirty);
2706 		}
2707 
2708 		dirty->fifo_commit(dirty);
2709 	}
2710 
2711 	return 0;
2712 }
2713 
2714 /**
2715  * vmw_kms_helper_validation_finish - Helper for post KMS command submission
2716  * cleanup and fencing
2717  * @dev_priv: Pointer to the device-private struct
2718  * @file_priv: Pointer identifying the client when user-space fencing is used
2719  * @ctx: Pointer to the validation context
2720  * @out_fence: If non-NULL, returned refcounted fence-pointer
2721  * @user_fence_rep: If non-NULL, pointer to user-space address area
2722  * in which to copy user-space fence info
2723  */
2724 void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv,
2725 				      struct drm_file *file_priv,
2726 				      struct vmw_validation_context *ctx,
2727 				      struct vmw_fence_obj **out_fence,
2728 				      struct drm_vmw_fence_rep __user *
2729 				      user_fence_rep)
2730 {
2731 	struct vmw_fence_obj *fence = NULL;
2732 	uint32_t handle = 0;
2733 	int ret = 0;
2734 
2735 	if (file_priv || user_fence_rep || vmw_validation_has_bos(ctx) ||
2736 	    out_fence)
2737 		ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
2738 						 file_priv ? &handle : NULL);
2739 	vmw_validation_done(ctx, fence);
2740 	if (file_priv)
2741 		vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv),
2742 					    ret, user_fence_rep, fence,
2743 					    handle, -1);
2744 	if (out_fence)
2745 		*out_fence = fence;
2746 	else
2747 		vmw_fence_obj_unreference(&fence);
2748 }
2749 
2750 /**
2751  * vmw_kms_update_proxy - Helper function to update a proxy surface from
2752  * its backing MOB.
2753  *
2754  * @res: Pointer to the surface resource
2755  * @clips: Clip rects in framebuffer (surface) space.
2756  * @num_clips: Number of clips in @clips.
2757  * @increment: Integer with which to increment the clip counter when looping.
2758  * Used to skip a predetermined number of clip rects.
2759  *
2760  * This function makes sure the proxy surface is updated from its backing MOB
2761  * using the region given by @clips. The surface resource @res and its backing
2762  * MOB needs to be reserved and validated on call.
2763  */
2764 int vmw_kms_update_proxy(struct vmw_resource *res,
2765 			 const struct drm_clip_rect *clips,
2766 			 unsigned num_clips,
2767 			 int increment)
2768 {
2769 	struct vmw_private *dev_priv = res->dev_priv;
2770 	struct drm_vmw_size *size = &vmw_res_to_srf(res)->metadata.base_size;
2771 	struct {
2772 		SVGA3dCmdHeader header;
2773 		SVGA3dCmdUpdateGBImage body;
2774 	} *cmd;
2775 	SVGA3dBox *box;
2776 	size_t copy_size = 0;
2777 	int i;
2778 
2779 	if (!clips)
2780 		return 0;
2781 
2782 	cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd) * num_clips);
2783 	if (!cmd)
2784 		return -ENOMEM;
2785 
2786 	for (i = 0; i < num_clips; ++i, clips += increment, ++cmd) {
2787 		box = &cmd->body.box;
2788 
2789 		cmd->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE;
2790 		cmd->header.size = sizeof(cmd->body);
2791 		cmd->body.image.sid = res->id;
2792 		cmd->body.image.face = 0;
2793 		cmd->body.image.mipmap = 0;
2794 
2795 		if (clips->x1 > size->width || clips->x2 > size->width ||
2796 		    clips->y1 > size->height || clips->y2 > size->height) {
2797 			DRM_ERROR("Invalid clips outsize of framebuffer.\n");
2798 			return -EINVAL;
2799 		}
2800 
2801 		box->x = clips->x1;
2802 		box->y = clips->y1;
2803 		box->z = 0;
2804 		box->w = clips->x2 - clips->x1;
2805 		box->h = clips->y2 - clips->y1;
2806 		box->d = 1;
2807 
2808 		copy_size += sizeof(*cmd);
2809 	}
2810 
2811 	vmw_cmd_commit(dev_priv, copy_size);
2812 
2813 	return 0;
2814 }
2815 
2816 /**
2817  * vmw_kms_create_implicit_placement_property - Set up the implicit placement
2818  * property.
2819  *
2820  * @dev_priv: Pointer to a device private struct.
2821  *
2822  * Sets up the implicit placement property unless it's already set up.
2823  */
2824 void
2825 vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv)
2826 {
2827 	if (dev_priv->implicit_placement_property)
2828 		return;
2829 
2830 	dev_priv->implicit_placement_property =
2831 		drm_property_create_range(&dev_priv->drm,
2832 					  DRM_MODE_PROP_IMMUTABLE,
2833 					  "implicit_placement", 0, 1);
2834 }
2835 
2836 /**
2837  * vmw_kms_suspend - Save modesetting state and turn modesetting off.
2838  *
2839  * @dev: Pointer to the drm device
2840  * Return: 0 on success. Negative error code on failure.
2841  */
2842 int vmw_kms_suspend(struct drm_device *dev)
2843 {
2844 	struct vmw_private *dev_priv = vmw_priv(dev);
2845 
2846 	dev_priv->suspend_state = drm_atomic_helper_suspend(dev);
2847 	if (IS_ERR(dev_priv->suspend_state)) {
2848 		int ret = PTR_ERR(dev_priv->suspend_state);
2849 
2850 		DRM_ERROR("Failed kms suspend: %d\n", ret);
2851 		dev_priv->suspend_state = NULL;
2852 
2853 		return ret;
2854 	}
2855 
2856 	return 0;
2857 }
2858 
2859 
2860 /**
2861  * vmw_kms_resume - Re-enable modesetting and restore state
2862  *
2863  * @dev: Pointer to the drm device
2864  * Return: 0 on success. Negative error code on failure.
2865  *
2866  * State is resumed from a previous vmw_kms_suspend(). It's illegal
2867  * to call this function without a previous vmw_kms_suspend().
2868  */
2869 int vmw_kms_resume(struct drm_device *dev)
2870 {
2871 	struct vmw_private *dev_priv = vmw_priv(dev);
2872 	int ret;
2873 
2874 	if (WARN_ON(!dev_priv->suspend_state))
2875 		return 0;
2876 
2877 	ret = drm_atomic_helper_resume(dev, dev_priv->suspend_state);
2878 	dev_priv->suspend_state = NULL;
2879 
2880 	return ret;
2881 }
2882 
2883 /**
2884  * vmw_kms_lost_device - Notify kms that modesetting capabilities will be lost
2885  *
2886  * @dev: Pointer to the drm device
2887  */
2888 void vmw_kms_lost_device(struct drm_device *dev)
2889 {
2890 	drm_atomic_helper_shutdown(dev);
2891 }
2892 
2893 /**
2894  * vmw_du_helper_plane_update - Helper to do plane update on a display unit.
2895  * @update: The closure structure.
2896  *
2897  * Call this helper after setting callbacks in &vmw_du_update_plane to do plane
2898  * update on display unit.
2899  *
2900  * Return: 0 on success or a negative error code on failure.
2901  */
2902 int vmw_du_helper_plane_update(struct vmw_du_update_plane *update)
2903 {
2904 	struct drm_plane_state *state = update->plane->state;
2905 	struct drm_plane_state *old_state = update->old_state;
2906 	struct drm_atomic_helper_damage_iter iter;
2907 	struct drm_rect clip;
2908 	struct drm_rect bb;
2909 	DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
2910 	uint32_t reserved_size = 0;
2911 	uint32_t submit_size = 0;
2912 	uint32_t curr_size = 0;
2913 	uint32_t num_hits = 0;
2914 	void *cmd_start;
2915 	char *cmd_next;
2916 	int ret;
2917 
2918 	/*
2919 	 * Iterate in advance to check if really need plane update and find the
2920 	 * number of clips that actually are in plane src for fifo allocation.
2921 	 */
2922 	drm_atomic_helper_damage_iter_init(&iter, old_state, state);
2923 	drm_atomic_for_each_plane_damage(&iter, &clip)
2924 		num_hits++;
2925 
2926 	if (num_hits == 0)
2927 		return 0;
2928 
2929 	if (update->vfb->bo) {
2930 		struct vmw_framebuffer_bo *vfbbo =
2931 			container_of(update->vfb, typeof(*vfbbo), base);
2932 
2933 		/*
2934 		 * For screen targets we want a mappable bo, for everything else we want
2935 		 * accelerated i.e. host backed (vram or gmr) bo. If the display unit
2936 		 * is not screen target then mob's shouldn't be available.
2937 		 */
2938 		if (update->dev_priv->active_display_unit == vmw_du_screen_target) {
2939 			vmw_bo_placement_set(vfbbo->buffer,
2940 					     VMW_BO_DOMAIN_SYS | VMW_BO_DOMAIN_MOB | VMW_BO_DOMAIN_GMR,
2941 					     VMW_BO_DOMAIN_SYS | VMW_BO_DOMAIN_MOB | VMW_BO_DOMAIN_GMR);
2942 		} else {
2943 			WARN_ON(update->dev_priv->has_mob);
2944 			vmw_bo_placement_set_default_accelerated(vfbbo->buffer);
2945 		}
2946 		ret = vmw_validation_add_bo(&val_ctx, vfbbo->buffer);
2947 	} else {
2948 		struct vmw_framebuffer_surface *vfbs =
2949 			container_of(update->vfb, typeof(*vfbs), base);
2950 
2951 		ret = vmw_validation_add_resource(&val_ctx, &vfbs->surface->res,
2952 						  0, VMW_RES_DIRTY_NONE, NULL,
2953 						  NULL);
2954 	}
2955 
2956 	if (ret)
2957 		return ret;
2958 
2959 	ret = vmw_validation_prepare(&val_ctx, update->mutex, update->intr);
2960 	if (ret)
2961 		goto out_unref;
2962 
2963 	reserved_size = update->calc_fifo_size(update, num_hits);
2964 	cmd_start = VMW_CMD_RESERVE(update->dev_priv, reserved_size);
2965 	if (!cmd_start) {
2966 		ret = -ENOMEM;
2967 		goto out_revert;
2968 	}
2969 
2970 	cmd_next = cmd_start;
2971 
2972 	if (update->post_prepare) {
2973 		curr_size = update->post_prepare(update, cmd_next);
2974 		cmd_next += curr_size;
2975 		submit_size += curr_size;
2976 	}
2977 
2978 	if (update->pre_clip) {
2979 		curr_size = update->pre_clip(update, cmd_next, num_hits);
2980 		cmd_next += curr_size;
2981 		submit_size += curr_size;
2982 	}
2983 
2984 	bb.x1 = INT_MAX;
2985 	bb.y1 = INT_MAX;
2986 	bb.x2 = INT_MIN;
2987 	bb.y2 = INT_MIN;
2988 
2989 	drm_atomic_helper_damage_iter_init(&iter, old_state, state);
2990 	drm_atomic_for_each_plane_damage(&iter, &clip) {
2991 		uint32_t fb_x = clip.x1;
2992 		uint32_t fb_y = clip.y1;
2993 
2994 		vmw_du_translate_to_crtc(state, &clip);
2995 		if (update->clip) {
2996 			curr_size = update->clip(update, cmd_next, &clip, fb_x,
2997 						 fb_y);
2998 			cmd_next += curr_size;
2999 			submit_size += curr_size;
3000 		}
3001 		bb.x1 = min_t(int, bb.x1, clip.x1);
3002 		bb.y1 = min_t(int, bb.y1, clip.y1);
3003 		bb.x2 = max_t(int, bb.x2, clip.x2);
3004 		bb.y2 = max_t(int, bb.y2, clip.y2);
3005 	}
3006 
3007 	curr_size = update->post_clip(update, cmd_next, &bb);
3008 	submit_size += curr_size;
3009 
3010 	if (reserved_size < submit_size)
3011 		submit_size = 0;
3012 
3013 	vmw_cmd_commit(update->dev_priv, submit_size);
3014 
3015 	vmw_kms_helper_validation_finish(update->dev_priv, NULL, &val_ctx,
3016 					 update->out_fence, NULL);
3017 	return ret;
3018 
3019 out_revert:
3020 	vmw_validation_revert(&val_ctx);
3021 
3022 out_unref:
3023 	vmw_validation_unref_lists(&val_ctx);
3024 	return ret;
3025 }
3026