1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3  *
4  * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 #include "vmwgfx_kms.h"
28 
29 #include "vmwgfx_bo.h"
30 #include "vmw_surface_cache.h"
31 
32 #include <drm/drm_atomic.h>
33 #include <drm/drm_atomic_helper.h>
34 #include <drm/drm_damage_helper.h>
35 #include <drm/drm_fourcc.h>
36 #include <drm/drm_rect.h>
37 #include <drm/drm_sysfs.h>
38 
39 void vmw_du_cleanup(struct vmw_display_unit *du)
40 {
41 	struct vmw_private *dev_priv = vmw_priv(du->primary.dev);
42 	drm_plane_cleanup(&du->primary);
43 	if (vmw_cmd_supported(dev_priv))
44 		drm_plane_cleanup(&du->cursor.base);
45 
46 	drm_connector_unregister(&du->connector);
47 	drm_crtc_cleanup(&du->crtc);
48 	drm_encoder_cleanup(&du->encoder);
49 	drm_connector_cleanup(&du->connector);
50 }
51 
52 /*
53  * Display Unit Cursor functions
54  */
55 
56 static int vmw_du_cursor_plane_unmap_cm(struct vmw_plane_state *vps);
57 static void vmw_cursor_update_mob(struct vmw_private *dev_priv,
58 				  struct vmw_plane_state *vps,
59 				  u32 *image, u32 width, u32 height,
60 				  u32 hotspotX, u32 hotspotY);
61 
62 struct vmw_svga_fifo_cmd_define_cursor {
63 	u32 cmd;
64 	SVGAFifoCmdDefineAlphaCursor cursor;
65 };
66 
67 /**
68  * vmw_send_define_cursor_cmd - queue a define cursor command
69  * @dev_priv: the private driver struct
70  * @image: buffer which holds the cursor image
71  * @width: width of the mouse cursor image
72  * @height: height of the mouse cursor image
73  * @hotspotX: the horizontal position of mouse hotspot
74  * @hotspotY: the vertical position of mouse hotspot
75  */
76 static void vmw_send_define_cursor_cmd(struct vmw_private *dev_priv,
77 				       u32 *image, u32 width, u32 height,
78 				       u32 hotspotX, u32 hotspotY)
79 {
80 	struct vmw_svga_fifo_cmd_define_cursor *cmd;
81 	const u32 image_size = width * height * sizeof(*image);
82 	const u32 cmd_size = sizeof(*cmd) + image_size;
83 
84 	/* Try to reserve fifocmd space and swallow any failures;
85 	   such reservations cannot be left unconsumed for long
86 	   under the risk of clogging other fifocmd users, so
87 	   we treat reservations separtely from the way we treat
88 	   other fallible KMS-atomic resources at prepare_fb */
89 	cmd = VMW_CMD_RESERVE(dev_priv, cmd_size);
90 
91 	if (unlikely(!cmd))
92 		return;
93 
94 	memset(cmd, 0, sizeof(*cmd));
95 
96 	memcpy(&cmd[1], image, image_size);
97 
98 	cmd->cmd = SVGA_CMD_DEFINE_ALPHA_CURSOR;
99 	cmd->cursor.id = 0;
100 	cmd->cursor.width = width;
101 	cmd->cursor.height = height;
102 	cmd->cursor.hotspotX = hotspotX;
103 	cmd->cursor.hotspotY = hotspotY;
104 
105 	vmw_cmd_commit_flush(dev_priv, cmd_size);
106 }
107 
108 /**
109  * vmw_cursor_update_image - update the cursor image on the provided plane
110  * @dev_priv: the private driver struct
111  * @vps: the plane state of the cursor plane
112  * @image: buffer which holds the cursor image
113  * @width: width of the mouse cursor image
114  * @height: height of the mouse cursor image
115  * @hotspotX: the horizontal position of mouse hotspot
116  * @hotspotY: the vertical position of mouse hotspot
117  */
118 static void vmw_cursor_update_image(struct vmw_private *dev_priv,
119 				    struct vmw_plane_state *vps,
120 				    u32 *image, u32 width, u32 height,
121 				    u32 hotspotX, u32 hotspotY)
122 {
123 	if (vps->cursor.bo)
124 		vmw_cursor_update_mob(dev_priv, vps, image,
125 				      vps->base.crtc_w, vps->base.crtc_h,
126 				      hotspotX, hotspotY);
127 
128 	else
129 		vmw_send_define_cursor_cmd(dev_priv, image, width, height,
130 					   hotspotX, hotspotY);
131 }
132 
133 
134 /**
135  * vmw_cursor_update_mob - Update cursor vis CursorMob mechanism
136  *
137  * Called from inside vmw_du_cursor_plane_atomic_update to actually
138  * make the cursor-image live.
139  *
140  * @dev_priv: device to work with
141  * @vps: the plane state of the cursor plane
142  * @image: cursor source data to fill the MOB with
143  * @width: source data width
144  * @height: source data height
145  * @hotspotX: cursor hotspot x
146  * @hotspotY: cursor hotspot Y
147  */
148 static void vmw_cursor_update_mob(struct vmw_private *dev_priv,
149 				  struct vmw_plane_state *vps,
150 				  u32 *image, u32 width, u32 height,
151 				  u32 hotspotX, u32 hotspotY)
152 {
153 	SVGAGBCursorHeader *header;
154 	SVGAGBAlphaCursorHeader *alpha_header;
155 	const u32 image_size = width * height * sizeof(*image);
156 
157 	header = vmw_bo_map_and_cache(vps->cursor.bo);
158 	alpha_header = &header->header.alphaHeader;
159 
160 	memset(header, 0, sizeof(*header));
161 
162 	header->type = SVGA_ALPHA_CURSOR;
163 	header->sizeInBytes = image_size;
164 
165 	alpha_header->hotspotX = hotspotX;
166 	alpha_header->hotspotY = hotspotY;
167 	alpha_header->width = width;
168 	alpha_header->height = height;
169 
170 	memcpy(header + 1, image, image_size);
171 	vmw_write(dev_priv, SVGA_REG_CURSOR_MOBID,
172 		  vps->cursor.bo->tbo.resource->start);
173 }
174 
175 
176 static u32 vmw_du_cursor_mob_size(u32 w, u32 h)
177 {
178 	return w * h * sizeof(u32) + sizeof(SVGAGBCursorHeader);
179 }
180 
181 /**
182  * vmw_du_cursor_plane_acquire_image -- Acquire the image data
183  * @vps: cursor plane state
184  */
185 static u32 *vmw_du_cursor_plane_acquire_image(struct vmw_plane_state *vps)
186 {
187 	bool is_iomem;
188 	if (vps->surf) {
189 		if (vps->surf_mapped)
190 			return vmw_bo_map_and_cache(vps->surf->res.guest_memory_bo);
191 		return vps->surf->snooper.image;
192 	} else if (vps->bo)
193 		return ttm_kmap_obj_virtual(&vps->bo->map, &is_iomem);
194 	return NULL;
195 }
196 
197 static bool vmw_du_cursor_plane_has_changed(struct vmw_plane_state *old_vps,
198 					    struct vmw_plane_state *new_vps)
199 {
200 	void *old_image;
201 	void *new_image;
202 	u32 size;
203 	bool changed;
204 
205 	if (old_vps->base.crtc_w != new_vps->base.crtc_w ||
206 	    old_vps->base.crtc_h != new_vps->base.crtc_h)
207 	    return true;
208 
209 	if (old_vps->cursor.hotspot_x != new_vps->cursor.hotspot_x ||
210 	    old_vps->cursor.hotspot_y != new_vps->cursor.hotspot_y)
211 	    return true;
212 
213 	size = new_vps->base.crtc_w * new_vps->base.crtc_h * sizeof(u32);
214 
215 	old_image = vmw_du_cursor_plane_acquire_image(old_vps);
216 	new_image = vmw_du_cursor_plane_acquire_image(new_vps);
217 
218 	changed = false;
219 	if (old_image && new_image)
220 		changed = memcmp(old_image, new_image, size) != 0;
221 
222 	return changed;
223 }
224 
225 static void vmw_du_destroy_cursor_mob(struct vmw_bo **vbo)
226 {
227 	if (!(*vbo))
228 		return;
229 
230 	ttm_bo_unpin(&(*vbo)->tbo);
231 	vmw_bo_unreference(vbo);
232 }
233 
234 static void vmw_du_put_cursor_mob(struct vmw_cursor_plane *vcp,
235 				  struct vmw_plane_state *vps)
236 {
237 	u32 i;
238 
239 	if (!vps->cursor.bo)
240 		return;
241 
242 	vmw_du_cursor_plane_unmap_cm(vps);
243 
244 	/* Look for a free slot to return this mob to the cache. */
245 	for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
246 		if (!vcp->cursor_mobs[i]) {
247 			vcp->cursor_mobs[i] = vps->cursor.bo;
248 			vps->cursor.bo = NULL;
249 			return;
250 		}
251 	}
252 
253 	/* Cache is full: See if this mob is bigger than an existing mob. */
254 	for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
255 		if (vcp->cursor_mobs[i]->tbo.base.size <
256 		    vps->cursor.bo->tbo.base.size) {
257 			vmw_du_destroy_cursor_mob(&vcp->cursor_mobs[i]);
258 			vcp->cursor_mobs[i] = vps->cursor.bo;
259 			vps->cursor.bo = NULL;
260 			return;
261 		}
262 	}
263 
264 	/* Destroy it if it's not worth caching. */
265 	vmw_du_destroy_cursor_mob(&vps->cursor.bo);
266 }
267 
268 static int vmw_du_get_cursor_mob(struct vmw_cursor_plane *vcp,
269 				 struct vmw_plane_state *vps)
270 {
271 	struct vmw_private *dev_priv = vcp->base.dev->dev_private;
272 	u32 size = vmw_du_cursor_mob_size(vps->base.crtc_w, vps->base.crtc_h);
273 	u32 i;
274 	u32 cursor_max_dim, mob_max_size;
275 	struct vmw_fence_obj *fence = NULL;
276 	int ret;
277 
278 	if (!dev_priv->has_mob ||
279 	    (dev_priv->capabilities2 & SVGA_CAP2_CURSOR_MOB) == 0)
280 		return -EINVAL;
281 
282 	mob_max_size = vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
283 	cursor_max_dim = vmw_read(dev_priv, SVGA_REG_CURSOR_MAX_DIMENSION);
284 
285 	if (size > mob_max_size || vps->base.crtc_w > cursor_max_dim ||
286 	    vps->base.crtc_h > cursor_max_dim)
287 		return -EINVAL;
288 
289 	if (vps->cursor.bo) {
290 		if (vps->cursor.bo->tbo.base.size >= size)
291 			return 0;
292 		vmw_du_put_cursor_mob(vcp, vps);
293 	}
294 
295 	/* Look for an unused mob in the cache. */
296 	for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
297 		if (vcp->cursor_mobs[i] &&
298 		    vcp->cursor_mobs[i]->tbo.base.size >= size) {
299 			vps->cursor.bo = vcp->cursor_mobs[i];
300 			vcp->cursor_mobs[i] = NULL;
301 			return 0;
302 		}
303 	}
304 	/* Create a new mob if we can't find an existing one. */
305 	ret = vmw_bo_create_and_populate(dev_priv, size,
306 					 VMW_BO_DOMAIN_MOB,
307 					 &vps->cursor.bo);
308 
309 	if (ret != 0)
310 		return ret;
311 
312 	/* Fence the mob creation so we are guarateed to have the mob */
313 	ret = ttm_bo_reserve(&vps->cursor.bo->tbo, false, false, NULL);
314 	if (ret != 0)
315 		goto teardown;
316 
317 	ret = vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
318 	if (ret != 0) {
319 		ttm_bo_unreserve(&vps->cursor.bo->tbo);
320 		goto teardown;
321 	}
322 
323 	dma_fence_wait(&fence->base, false);
324 	dma_fence_put(&fence->base);
325 
326 	ttm_bo_unreserve(&vps->cursor.bo->tbo);
327 	return 0;
328 
329 teardown:
330 	vmw_du_destroy_cursor_mob(&vps->cursor.bo);
331 	return ret;
332 }
333 
334 
335 static void vmw_cursor_update_position(struct vmw_private *dev_priv,
336 				       bool show, int x, int y)
337 {
338 	const uint32_t svga_cursor_on = show ? SVGA_CURSOR_ON_SHOW
339 					     : SVGA_CURSOR_ON_HIDE;
340 	uint32_t count;
341 
342 	spin_lock(&dev_priv->cursor_lock);
343 	if (dev_priv->capabilities2 & SVGA_CAP2_EXTRA_REGS) {
344 		vmw_write(dev_priv, SVGA_REG_CURSOR4_X, x);
345 		vmw_write(dev_priv, SVGA_REG_CURSOR4_Y, y);
346 		vmw_write(dev_priv, SVGA_REG_CURSOR4_SCREEN_ID, SVGA3D_INVALID_ID);
347 		vmw_write(dev_priv, SVGA_REG_CURSOR4_ON, svga_cursor_on);
348 		vmw_write(dev_priv, SVGA_REG_CURSOR4_SUBMIT, 1);
349 	} else if (vmw_is_cursor_bypass3_enabled(dev_priv)) {
350 		vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_ON, svga_cursor_on);
351 		vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_X, x);
352 		vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_Y, y);
353 		count = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CURSOR_COUNT);
354 		vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_COUNT, ++count);
355 	} else {
356 		vmw_write(dev_priv, SVGA_REG_CURSOR_X, x);
357 		vmw_write(dev_priv, SVGA_REG_CURSOR_Y, y);
358 		vmw_write(dev_priv, SVGA_REG_CURSOR_ON, svga_cursor_on);
359 	}
360 	spin_unlock(&dev_priv->cursor_lock);
361 }
362 
363 void vmw_kms_cursor_snoop(struct vmw_surface *srf,
364 			  struct ttm_object_file *tfile,
365 			  struct ttm_buffer_object *bo,
366 			  SVGA3dCmdHeader *header)
367 {
368 	struct ttm_bo_kmap_obj map;
369 	unsigned long kmap_offset;
370 	unsigned long kmap_num;
371 	SVGA3dCopyBox *box;
372 	unsigned box_count;
373 	void *virtual;
374 	bool is_iomem;
375 	struct vmw_dma_cmd {
376 		SVGA3dCmdHeader header;
377 		SVGA3dCmdSurfaceDMA dma;
378 	} *cmd;
379 	int i, ret;
380 	const struct SVGA3dSurfaceDesc *desc =
381 		vmw_surface_get_desc(VMW_CURSOR_SNOOP_FORMAT);
382 	const u32 image_pitch = VMW_CURSOR_SNOOP_WIDTH * desc->pitchBytesPerBlock;
383 
384 	cmd = container_of(header, struct vmw_dma_cmd, header);
385 
386 	/* No snooper installed, nothing to copy */
387 	if (!srf->snooper.image)
388 		return;
389 
390 	if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) {
391 		DRM_ERROR("face and mipmap for cursors should never != 0\n");
392 		return;
393 	}
394 
395 	if (cmd->header.size < 64) {
396 		DRM_ERROR("at least one full copy box must be given\n");
397 		return;
398 	}
399 
400 	box = (SVGA3dCopyBox *)&cmd[1];
401 	box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) /
402 			sizeof(SVGA3dCopyBox);
403 
404 	if (cmd->dma.guest.ptr.offset % PAGE_SIZE ||
405 	    box->x != 0    || box->y != 0    || box->z != 0    ||
406 	    box->srcx != 0 || box->srcy != 0 || box->srcz != 0 ||
407 	    box->d != 1    || box_count != 1 ||
408 	    box->w > VMW_CURSOR_SNOOP_WIDTH || box->h > VMW_CURSOR_SNOOP_HEIGHT) {
409 		/* TODO handle none page aligned offsets */
410 		/* TODO handle more dst & src != 0 */
411 		/* TODO handle more then one copy */
412 		DRM_ERROR("Can't snoop dma request for cursor!\n");
413 		DRM_ERROR("(%u, %u, %u) (%u, %u, %u) (%ux%ux%u) %u %u\n",
414 			  box->srcx, box->srcy, box->srcz,
415 			  box->x, box->y, box->z,
416 			  box->w, box->h, box->d, box_count,
417 			  cmd->dma.guest.ptr.offset);
418 		return;
419 	}
420 
421 	kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT;
422 	kmap_num = (VMW_CURSOR_SNOOP_HEIGHT*image_pitch) >> PAGE_SHIFT;
423 
424 	ret = ttm_bo_reserve(bo, true, false, NULL);
425 	if (unlikely(ret != 0)) {
426 		DRM_ERROR("reserve failed\n");
427 		return;
428 	}
429 
430 	ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
431 	if (unlikely(ret != 0))
432 		goto err_unreserve;
433 
434 	virtual = ttm_kmap_obj_virtual(&map, &is_iomem);
435 
436 	if (box->w == VMW_CURSOR_SNOOP_WIDTH && cmd->dma.guest.pitch == image_pitch) {
437 		memcpy(srf->snooper.image, virtual,
438 		       VMW_CURSOR_SNOOP_HEIGHT*image_pitch);
439 	} else {
440 		/* Image is unsigned pointer. */
441 		for (i = 0; i < box->h; i++)
442 			memcpy(srf->snooper.image + i * image_pitch,
443 			       virtual + i * cmd->dma.guest.pitch,
444 			       box->w * desc->pitchBytesPerBlock);
445 	}
446 
447 	srf->snooper.age++;
448 
449 	ttm_bo_kunmap(&map);
450 err_unreserve:
451 	ttm_bo_unreserve(bo);
452 }
453 
454 /**
455  * vmw_kms_legacy_hotspot_clear - Clear legacy hotspots
456  *
457  * @dev_priv: Pointer to the device private struct.
458  *
459  * Clears all legacy hotspots.
460  */
461 void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv)
462 {
463 	struct drm_device *dev = &dev_priv->drm;
464 	struct vmw_display_unit *du;
465 	struct drm_crtc *crtc;
466 
467 	drm_modeset_lock_all(dev);
468 	drm_for_each_crtc(crtc, dev) {
469 		du = vmw_crtc_to_du(crtc);
470 
471 		du->hotspot_x = 0;
472 		du->hotspot_y = 0;
473 	}
474 	drm_modeset_unlock_all(dev);
475 }
476 
477 void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
478 {
479 	struct drm_device *dev = &dev_priv->drm;
480 	struct vmw_display_unit *du;
481 	struct drm_crtc *crtc;
482 
483 	mutex_lock(&dev->mode_config.mutex);
484 
485 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
486 		du = vmw_crtc_to_du(crtc);
487 		if (!du->cursor_surface ||
488 		    du->cursor_age == du->cursor_surface->snooper.age ||
489 		    !du->cursor_surface->snooper.image)
490 			continue;
491 
492 		du->cursor_age = du->cursor_surface->snooper.age;
493 		vmw_send_define_cursor_cmd(dev_priv,
494 					   du->cursor_surface->snooper.image,
495 					   VMW_CURSOR_SNOOP_WIDTH,
496 					   VMW_CURSOR_SNOOP_HEIGHT,
497 					   du->hotspot_x + du->core_hotspot_x,
498 					   du->hotspot_y + du->core_hotspot_y);
499 	}
500 
501 	mutex_unlock(&dev->mode_config.mutex);
502 }
503 
504 
505 void vmw_du_cursor_plane_destroy(struct drm_plane *plane)
506 {
507 	struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
508 	u32 i;
509 
510 	vmw_cursor_update_position(plane->dev->dev_private, false, 0, 0);
511 
512 	for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++)
513 		vmw_du_destroy_cursor_mob(&vcp->cursor_mobs[i]);
514 
515 	drm_plane_cleanup(plane);
516 }
517 
518 
519 void vmw_du_primary_plane_destroy(struct drm_plane *plane)
520 {
521 	drm_plane_cleanup(plane);
522 
523 	/* Planes are static in our case so we don't free it */
524 }
525 
526 
527 /**
528  * vmw_du_plane_unpin_surf - unpins resource associated with a framebuffer surface
529  *
530  * @vps: plane state associated with the display surface
531  * @unreference: true if we also want to unreference the display.
532  */
533 void vmw_du_plane_unpin_surf(struct vmw_plane_state *vps,
534 			     bool unreference)
535 {
536 	if (vps->surf) {
537 		if (vps->pinned) {
538 			vmw_resource_unpin(&vps->surf->res);
539 			vps->pinned--;
540 		}
541 
542 		if (unreference) {
543 			if (vps->pinned)
544 				DRM_ERROR("Surface still pinned\n");
545 			vmw_surface_unreference(&vps->surf);
546 		}
547 	}
548 }
549 
550 
551 /**
552  * vmw_du_plane_cleanup_fb - Unpins the plane surface
553  *
554  * @plane:  display plane
555  * @old_state: Contains the FB to clean up
556  *
557  * Unpins the framebuffer surface
558  *
559  * Returns 0 on success
560  */
561 void
562 vmw_du_plane_cleanup_fb(struct drm_plane *plane,
563 			struct drm_plane_state *old_state)
564 {
565 	struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
566 
567 	vmw_du_plane_unpin_surf(vps, false);
568 }
569 
570 
571 /**
572  * vmw_du_cursor_plane_map_cm - Maps the cursor mobs.
573  *
574  * @vps: plane_state
575  *
576  * Returns 0 on success
577  */
578 
579 static int
580 vmw_du_cursor_plane_map_cm(struct vmw_plane_state *vps)
581 {
582 	int ret;
583 	u32 size = vmw_du_cursor_mob_size(vps->base.crtc_w, vps->base.crtc_h);
584 	struct ttm_buffer_object *bo;
585 
586 	if (!vps->cursor.bo)
587 		return -EINVAL;
588 
589 	bo = &vps->cursor.bo->tbo;
590 
591 	if (bo->base.size < size)
592 		return -EINVAL;
593 
594 	if (vps->cursor.bo->map.virtual)
595 		return 0;
596 
597 	ret = ttm_bo_reserve(bo, false, false, NULL);
598 	if (unlikely(ret != 0))
599 		return -ENOMEM;
600 
601 	vmw_bo_map_and_cache(vps->cursor.bo);
602 
603 	ttm_bo_unreserve(bo);
604 
605 	if (unlikely(ret != 0))
606 		return -ENOMEM;
607 
608 	return 0;
609 }
610 
611 
612 /**
613  * vmw_du_cursor_plane_unmap_cm - Unmaps the cursor mobs.
614  *
615  * @vps: state of the cursor plane
616  *
617  * Returns 0 on success
618  */
619 
620 static int
621 vmw_du_cursor_plane_unmap_cm(struct vmw_plane_state *vps)
622 {
623 	int ret = 0;
624 	struct vmw_bo *vbo = vps->cursor.bo;
625 
626 	if (!vbo || !vbo->map.virtual)
627 		return 0;
628 
629 	ret = ttm_bo_reserve(&vbo->tbo, true, false, NULL);
630 	if (likely(ret == 0)) {
631 		vmw_bo_unmap(vbo);
632 		ttm_bo_unreserve(&vbo->tbo);
633 	}
634 
635 	return ret;
636 }
637 
638 
639 /**
640  * vmw_du_cursor_plane_cleanup_fb - Unpins the plane surface
641  *
642  * @plane: cursor plane
643  * @old_state: contains the state to clean up
644  *
645  * Unmaps all cursor bo mappings and unpins the cursor surface
646  *
647  * Returns 0 on success
648  */
649 void
650 vmw_du_cursor_plane_cleanup_fb(struct drm_plane *plane,
651 			       struct drm_plane_state *old_state)
652 {
653 	struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
654 	struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
655 	bool is_iomem;
656 
657 	if (vps->surf_mapped) {
658 		vmw_bo_unmap(vps->surf->res.guest_memory_bo);
659 		vps->surf_mapped = false;
660 	}
661 
662 	if (vps->bo && ttm_kmap_obj_virtual(&vps->bo->map, &is_iomem)) {
663 		const int ret = ttm_bo_reserve(&vps->bo->tbo, true, false, NULL);
664 
665 		if (likely(ret == 0)) {
666 			ttm_bo_kunmap(&vps->bo->map);
667 			ttm_bo_unreserve(&vps->bo->tbo);
668 		}
669 	}
670 
671 	vmw_du_cursor_plane_unmap_cm(vps);
672 	vmw_du_put_cursor_mob(vcp, vps);
673 
674 	vmw_du_plane_unpin_surf(vps, false);
675 
676 	if (vps->surf) {
677 		vmw_surface_unreference(&vps->surf);
678 		vps->surf = NULL;
679 	}
680 
681 	if (vps->bo) {
682 		vmw_bo_unreference(&vps->bo);
683 		vps->bo = NULL;
684 	}
685 }
686 
687 
688 /**
689  * vmw_du_cursor_plane_prepare_fb - Readies the cursor by referencing it
690  *
691  * @plane:  display plane
692  * @new_state: info on the new plane state, including the FB
693  *
694  * Returns 0 on success
695  */
696 int
697 vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
698 			       struct drm_plane_state *new_state)
699 {
700 	struct drm_framebuffer *fb = new_state->fb;
701 	struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
702 	struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
703 	int ret = 0;
704 
705 	if (vps->surf) {
706 		vmw_surface_unreference(&vps->surf);
707 		vps->surf = NULL;
708 	}
709 
710 	if (vps->bo) {
711 		vmw_bo_unreference(&vps->bo);
712 		vps->bo = NULL;
713 	}
714 
715 	if (fb) {
716 		if (vmw_framebuffer_to_vfb(fb)->bo) {
717 			vps->bo = vmw_framebuffer_to_vfbd(fb)->buffer;
718 			vmw_bo_reference(vps->bo);
719 		} else {
720 			vps->surf = vmw_framebuffer_to_vfbs(fb)->surface;
721 			vmw_surface_reference(vps->surf);
722 		}
723 	}
724 
725 	if (!vps->surf && vps->bo) {
726 		const u32 size = new_state->crtc_w * new_state->crtc_h * sizeof(u32);
727 
728 		/*
729 		 * Not using vmw_bo_map_and_cache() helper here as we need to
730 		 * reserve the ttm_buffer_object first which
731 		 * vmw_bo_map_and_cache() omits.
732 		 */
733 		ret = ttm_bo_reserve(&vps->bo->tbo, true, false, NULL);
734 
735 		if (unlikely(ret != 0))
736 			return -ENOMEM;
737 
738 		ret = ttm_bo_kmap(&vps->bo->tbo, 0, PFN_UP(size), &vps->bo->map);
739 
740 		ttm_bo_unreserve(&vps->bo->tbo);
741 
742 		if (unlikely(ret != 0))
743 			return -ENOMEM;
744 	} else if (vps->surf && !vps->bo && vps->surf->res.guest_memory_bo) {
745 
746 		WARN_ON(vps->surf->snooper.image);
747 		ret = ttm_bo_reserve(&vps->surf->res.guest_memory_bo->tbo, true, false,
748 				     NULL);
749 		if (unlikely(ret != 0))
750 			return -ENOMEM;
751 		vmw_bo_map_and_cache(vps->surf->res.guest_memory_bo);
752 		ttm_bo_unreserve(&vps->surf->res.guest_memory_bo->tbo);
753 		vps->surf_mapped = true;
754 	}
755 
756 	if (vps->surf || vps->bo) {
757 		vmw_du_get_cursor_mob(vcp, vps);
758 		vmw_du_cursor_plane_map_cm(vps);
759 	}
760 
761 	return 0;
762 }
763 
764 
765 void
766 vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
767 				  struct drm_atomic_state *state)
768 {
769 	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
770 									   plane);
771 	struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
772 									   plane);
773 	struct drm_crtc *crtc = new_state->crtc ?: old_state->crtc;
774 	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
775 	struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
776 	struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
777 	struct vmw_plane_state *old_vps = vmw_plane_state_to_vps(old_state);
778 	s32 hotspot_x, hotspot_y;
779 
780 	hotspot_x = du->hotspot_x;
781 	hotspot_y = du->hotspot_y;
782 
783 	if (new_state->fb) {
784 		hotspot_x += new_state->fb->hot_x;
785 		hotspot_y += new_state->fb->hot_y;
786 	}
787 
788 	du->cursor_surface = vps->surf;
789 	du->cursor_bo = vps->bo;
790 
791 	if (!vps->surf && !vps->bo) {
792 		vmw_cursor_update_position(dev_priv, false, 0, 0);
793 		return;
794 	}
795 
796 	vps->cursor.hotspot_x = hotspot_x;
797 	vps->cursor.hotspot_y = hotspot_y;
798 
799 	if (vps->surf) {
800 		du->cursor_age = du->cursor_surface->snooper.age;
801 	}
802 
803 	if (!vmw_du_cursor_plane_has_changed(old_vps, vps)) {
804 		/*
805 		 * If it hasn't changed, avoid making the device do extra
806 		 * work by keeping the old cursor active.
807 		 */
808 		struct vmw_cursor_plane_state tmp = old_vps->cursor;
809 		old_vps->cursor = vps->cursor;
810 		vps->cursor = tmp;
811 	} else {
812 		void *image = vmw_du_cursor_plane_acquire_image(vps);
813 		if (image)
814 			vmw_cursor_update_image(dev_priv, vps, image,
815 						new_state->crtc_w,
816 						new_state->crtc_h,
817 						hotspot_x, hotspot_y);
818 	}
819 
820 	du->cursor_x = new_state->crtc_x + du->set_gui_x;
821 	du->cursor_y = new_state->crtc_y + du->set_gui_y;
822 
823 	vmw_cursor_update_position(dev_priv, true,
824 				   du->cursor_x + hotspot_x,
825 				   du->cursor_y + hotspot_y);
826 
827 	du->core_hotspot_x = hotspot_x - du->hotspot_x;
828 	du->core_hotspot_y = hotspot_y - du->hotspot_y;
829 }
830 
831 
832 /**
833  * vmw_du_primary_plane_atomic_check - check if the new state is okay
834  *
835  * @plane: display plane
836  * @state: info on the new plane state, including the FB
837  *
838  * Check if the new state is settable given the current state.  Other
839  * than what the atomic helper checks, we care about crtc fitting
840  * the FB and maintaining one active framebuffer.
841  *
842  * Returns 0 on success
843  */
844 int vmw_du_primary_plane_atomic_check(struct drm_plane *plane,
845 				      struct drm_atomic_state *state)
846 {
847 	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
848 									   plane);
849 	struct drm_crtc_state *crtc_state = NULL;
850 	struct drm_framebuffer *new_fb = new_state->fb;
851 	int ret;
852 
853 	if (new_state->crtc)
854 		crtc_state = drm_atomic_get_new_crtc_state(state,
855 							   new_state->crtc);
856 
857 	ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
858 						  DRM_PLANE_NO_SCALING,
859 						  DRM_PLANE_NO_SCALING,
860 						  false, true);
861 
862 	if (!ret && new_fb) {
863 		struct drm_crtc *crtc = new_state->crtc;
864 		struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
865 
866 		vmw_connector_state_to_vcs(du->connector.state);
867 	}
868 
869 
870 	return ret;
871 }
872 
873 
874 /**
875  * vmw_du_cursor_plane_atomic_check - check if the new state is okay
876  *
877  * @plane: cursor plane
878  * @state: info on the new plane state
879  *
880  * This is a chance to fail if the new cursor state does not fit
881  * our requirements.
882  *
883  * Returns 0 on success
884  */
885 int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
886 				     struct drm_atomic_state *state)
887 {
888 	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
889 									   plane);
890 	int ret = 0;
891 	struct drm_crtc_state *crtc_state = NULL;
892 	struct vmw_surface *surface = NULL;
893 	struct drm_framebuffer *fb = new_state->fb;
894 
895 	if (new_state->crtc)
896 		crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
897 							   new_state->crtc);
898 
899 	ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
900 						  DRM_PLANE_NO_SCALING,
901 						  DRM_PLANE_NO_SCALING,
902 						  true, true);
903 	if (ret)
904 		return ret;
905 
906 	/* Turning off */
907 	if (!fb)
908 		return 0;
909 
910 	/* A lot of the code assumes this */
911 	if (new_state->crtc_w != 64 || new_state->crtc_h != 64) {
912 		DRM_ERROR("Invalid cursor dimensions (%d, %d)\n",
913 			  new_state->crtc_w, new_state->crtc_h);
914 		return -EINVAL;
915 	}
916 
917 	if (!vmw_framebuffer_to_vfb(fb)->bo) {
918 		surface = vmw_framebuffer_to_vfbs(fb)->surface;
919 
920 		WARN_ON(!surface);
921 
922 		if (!surface ||
923 		    (!surface->snooper.image && !surface->res.guest_memory_bo)) {
924 			DRM_ERROR("surface not suitable for cursor\n");
925 			return -EINVAL;
926 		}
927 	}
928 
929 	return 0;
930 }
931 
932 
933 int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
934 			     struct drm_atomic_state *state)
935 {
936 	struct drm_crtc_state *new_state = drm_atomic_get_new_crtc_state(state,
937 									 crtc);
938 	struct vmw_display_unit *du = vmw_crtc_to_du(new_state->crtc);
939 	int connector_mask = drm_connector_mask(&du->connector);
940 	bool has_primary = new_state->plane_mask &
941 			   drm_plane_mask(crtc->primary);
942 
943 	/* We always want to have an active plane with an active CRTC */
944 	if (has_primary != new_state->enable)
945 		return -EINVAL;
946 
947 
948 	if (new_state->connector_mask != connector_mask &&
949 	    new_state->connector_mask != 0) {
950 		DRM_ERROR("Invalid connectors configuration\n");
951 		return -EINVAL;
952 	}
953 
954 	/*
955 	 * Our virtual device does not have a dot clock, so use the logical
956 	 * clock value as the dot clock.
957 	 */
958 	if (new_state->mode.crtc_clock == 0)
959 		new_state->adjusted_mode.crtc_clock = new_state->mode.clock;
960 
961 	return 0;
962 }
963 
964 
965 void vmw_du_crtc_atomic_begin(struct drm_crtc *crtc,
966 			      struct drm_atomic_state *state)
967 {
968 }
969 
970 
971 void vmw_du_crtc_atomic_flush(struct drm_crtc *crtc,
972 			      struct drm_atomic_state *state)
973 {
974 }
975 
976 
977 /**
978  * vmw_du_crtc_duplicate_state - duplicate crtc state
979  * @crtc: DRM crtc
980  *
981  * Allocates and returns a copy of the crtc state (both common and
982  * vmw-specific) for the specified crtc.
983  *
984  * Returns: The newly allocated crtc state, or NULL on failure.
985  */
986 struct drm_crtc_state *
987 vmw_du_crtc_duplicate_state(struct drm_crtc *crtc)
988 {
989 	struct drm_crtc_state *state;
990 	struct vmw_crtc_state *vcs;
991 
992 	if (WARN_ON(!crtc->state))
993 		return NULL;
994 
995 	vcs = kmemdup(crtc->state, sizeof(*vcs), GFP_KERNEL);
996 
997 	if (!vcs)
998 		return NULL;
999 
1000 	state = &vcs->base;
1001 
1002 	__drm_atomic_helper_crtc_duplicate_state(crtc, state);
1003 
1004 	return state;
1005 }
1006 
1007 
1008 /**
1009  * vmw_du_crtc_reset - creates a blank vmw crtc state
1010  * @crtc: DRM crtc
1011  *
1012  * Resets the atomic state for @crtc by freeing the state pointer (which
1013  * might be NULL, e.g. at driver load time) and allocating a new empty state
1014  * object.
1015  */
1016 void vmw_du_crtc_reset(struct drm_crtc *crtc)
1017 {
1018 	struct vmw_crtc_state *vcs;
1019 
1020 
1021 	if (crtc->state) {
1022 		__drm_atomic_helper_crtc_destroy_state(crtc->state);
1023 
1024 		kfree(vmw_crtc_state_to_vcs(crtc->state));
1025 	}
1026 
1027 	vcs = kzalloc(sizeof(*vcs), GFP_KERNEL);
1028 
1029 	if (!vcs) {
1030 		DRM_ERROR("Cannot allocate vmw_crtc_state\n");
1031 		return;
1032 	}
1033 
1034 	__drm_atomic_helper_crtc_reset(crtc, &vcs->base);
1035 }
1036 
1037 
1038 /**
1039  * vmw_du_crtc_destroy_state - destroy crtc state
1040  * @crtc: DRM crtc
1041  * @state: state object to destroy
1042  *
1043  * Destroys the crtc state (both common and vmw-specific) for the
1044  * specified plane.
1045  */
1046 void
1047 vmw_du_crtc_destroy_state(struct drm_crtc *crtc,
1048 			  struct drm_crtc_state *state)
1049 {
1050 	drm_atomic_helper_crtc_destroy_state(crtc, state);
1051 }
1052 
1053 
1054 /**
1055  * vmw_du_plane_duplicate_state - duplicate plane state
1056  * @plane: drm plane
1057  *
1058  * Allocates and returns a copy of the plane state (both common and
1059  * vmw-specific) for the specified plane.
1060  *
1061  * Returns: The newly allocated plane state, or NULL on failure.
1062  */
1063 struct drm_plane_state *
1064 vmw_du_plane_duplicate_state(struct drm_plane *plane)
1065 {
1066 	struct drm_plane_state *state;
1067 	struct vmw_plane_state *vps;
1068 
1069 	vps = kmemdup(plane->state, sizeof(*vps), GFP_KERNEL);
1070 
1071 	if (!vps)
1072 		return NULL;
1073 
1074 	vps->pinned = 0;
1075 	vps->cpp = 0;
1076 
1077 	memset(&vps->cursor, 0, sizeof(vps->cursor));
1078 
1079 	/* Each ref counted resource needs to be acquired again */
1080 	if (vps->surf)
1081 		(void) vmw_surface_reference(vps->surf);
1082 
1083 	if (vps->bo)
1084 		(void) vmw_bo_reference(vps->bo);
1085 
1086 	state = &vps->base;
1087 
1088 	__drm_atomic_helper_plane_duplicate_state(plane, state);
1089 
1090 	return state;
1091 }
1092 
1093 
1094 /**
1095  * vmw_du_plane_reset - creates a blank vmw plane state
1096  * @plane: drm plane
1097  *
1098  * Resets the atomic state for @plane by freeing the state pointer (which might
1099  * be NULL, e.g. at driver load time) and allocating a new empty state object.
1100  */
1101 void vmw_du_plane_reset(struct drm_plane *plane)
1102 {
1103 	struct vmw_plane_state *vps;
1104 
1105 	if (plane->state)
1106 		vmw_du_plane_destroy_state(plane, plane->state);
1107 
1108 	vps = kzalloc(sizeof(*vps), GFP_KERNEL);
1109 
1110 	if (!vps) {
1111 		DRM_ERROR("Cannot allocate vmw_plane_state\n");
1112 		return;
1113 	}
1114 
1115 	__drm_atomic_helper_plane_reset(plane, &vps->base);
1116 }
1117 
1118 
1119 /**
1120  * vmw_du_plane_destroy_state - destroy plane state
1121  * @plane: DRM plane
1122  * @state: state object to destroy
1123  *
1124  * Destroys the plane state (both common and vmw-specific) for the
1125  * specified plane.
1126  */
1127 void
1128 vmw_du_plane_destroy_state(struct drm_plane *plane,
1129 			   struct drm_plane_state *state)
1130 {
1131 	struct vmw_plane_state *vps = vmw_plane_state_to_vps(state);
1132 
1133 	/* Should have been freed by cleanup_fb */
1134 	if (vps->surf)
1135 		vmw_surface_unreference(&vps->surf);
1136 
1137 	if (vps->bo)
1138 		vmw_bo_unreference(&vps->bo);
1139 
1140 	drm_atomic_helper_plane_destroy_state(plane, state);
1141 }
1142 
1143 
1144 /**
1145  * vmw_du_connector_duplicate_state - duplicate connector state
1146  * @connector: DRM connector
1147  *
1148  * Allocates and returns a copy of the connector state (both common and
1149  * vmw-specific) for the specified connector.
1150  *
1151  * Returns: The newly allocated connector state, or NULL on failure.
1152  */
1153 struct drm_connector_state *
1154 vmw_du_connector_duplicate_state(struct drm_connector *connector)
1155 {
1156 	struct drm_connector_state *state;
1157 	struct vmw_connector_state *vcs;
1158 
1159 	if (WARN_ON(!connector->state))
1160 		return NULL;
1161 
1162 	vcs = kmemdup(connector->state, sizeof(*vcs), GFP_KERNEL);
1163 
1164 	if (!vcs)
1165 		return NULL;
1166 
1167 	state = &vcs->base;
1168 
1169 	__drm_atomic_helper_connector_duplicate_state(connector, state);
1170 
1171 	return state;
1172 }
1173 
1174 
1175 /**
1176  * vmw_du_connector_reset - creates a blank vmw connector state
1177  * @connector: DRM connector
1178  *
1179  * Resets the atomic state for @connector by freeing the state pointer (which
1180  * might be NULL, e.g. at driver load time) and allocating a new empty state
1181  * object.
1182  */
1183 void vmw_du_connector_reset(struct drm_connector *connector)
1184 {
1185 	struct vmw_connector_state *vcs;
1186 
1187 
1188 	if (connector->state) {
1189 		__drm_atomic_helper_connector_destroy_state(connector->state);
1190 
1191 		kfree(vmw_connector_state_to_vcs(connector->state));
1192 	}
1193 
1194 	vcs = kzalloc(sizeof(*vcs), GFP_KERNEL);
1195 
1196 	if (!vcs) {
1197 		DRM_ERROR("Cannot allocate vmw_connector_state\n");
1198 		return;
1199 	}
1200 
1201 	__drm_atomic_helper_connector_reset(connector, &vcs->base);
1202 }
1203 
1204 
1205 /**
1206  * vmw_du_connector_destroy_state - destroy connector state
1207  * @connector: DRM connector
1208  * @state: state object to destroy
1209  *
1210  * Destroys the connector state (both common and vmw-specific) for the
1211  * specified plane.
1212  */
1213 void
1214 vmw_du_connector_destroy_state(struct drm_connector *connector,
1215 			  struct drm_connector_state *state)
1216 {
1217 	drm_atomic_helper_connector_destroy_state(connector, state);
1218 }
1219 /*
1220  * Generic framebuffer code
1221  */
1222 
1223 /*
1224  * Surface framebuffer code
1225  */
1226 
1227 static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
1228 {
1229 	struct vmw_framebuffer_surface *vfbs =
1230 		vmw_framebuffer_to_vfbs(framebuffer);
1231 
1232 	drm_framebuffer_cleanup(framebuffer);
1233 	vmw_surface_unreference(&vfbs->surface);
1234 
1235 	kfree(vfbs);
1236 }
1237 
1238 /**
1239  * vmw_kms_readback - Perform a readback from the screen system to
1240  * a buffer-object backed framebuffer.
1241  *
1242  * @dev_priv: Pointer to the device private structure.
1243  * @file_priv: Pointer to a struct drm_file identifying the caller.
1244  * Must be set to NULL if @user_fence_rep is NULL.
1245  * @vfb: Pointer to the buffer-object backed framebuffer.
1246  * @user_fence_rep: User-space provided structure for fence information.
1247  * Must be set to non-NULL if @file_priv is non-NULL.
1248  * @vclips: Array of clip rects.
1249  * @num_clips: Number of clip rects in @vclips.
1250  *
1251  * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
1252  * interrupted.
1253  */
1254 int vmw_kms_readback(struct vmw_private *dev_priv,
1255 		     struct drm_file *file_priv,
1256 		     struct vmw_framebuffer *vfb,
1257 		     struct drm_vmw_fence_rep __user *user_fence_rep,
1258 		     struct drm_vmw_rect *vclips,
1259 		     uint32_t num_clips)
1260 {
1261 	switch (dev_priv->active_display_unit) {
1262 	case vmw_du_screen_object:
1263 		return vmw_kms_sou_readback(dev_priv, file_priv, vfb,
1264 					    user_fence_rep, vclips, num_clips,
1265 					    NULL);
1266 	case vmw_du_screen_target:
1267 		return vmw_kms_stdu_readback(dev_priv, file_priv, vfb,
1268 					     user_fence_rep, NULL, vclips, num_clips,
1269 					     1, NULL);
1270 	default:
1271 		WARN_ONCE(true,
1272 			  "Readback called with invalid display system.\n");
1273 }
1274 
1275 	return -ENOSYS;
1276 }
1277 
1278 
1279 static const struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
1280 	.destroy = vmw_framebuffer_surface_destroy,
1281 	.dirty = drm_atomic_helper_dirtyfb,
1282 };
1283 
1284 static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
1285 					   struct vmw_surface *surface,
1286 					   struct vmw_framebuffer **out,
1287 					   const struct drm_mode_fb_cmd2
1288 					   *mode_cmd,
1289 					   bool is_bo_proxy)
1290 
1291 {
1292 	struct drm_device *dev = &dev_priv->drm;
1293 	struct vmw_framebuffer_surface *vfbs;
1294 	enum SVGA3dSurfaceFormat format;
1295 	int ret;
1296 
1297 	/* 3D is only supported on HWv8 and newer hosts */
1298 	if (dev_priv->active_display_unit == vmw_du_legacy)
1299 		return -ENOSYS;
1300 
1301 	/*
1302 	 * Sanity checks.
1303 	 */
1304 
1305 	if (!drm_any_plane_has_format(&dev_priv->drm,
1306 				      mode_cmd->pixel_format,
1307 				      mode_cmd->modifier[0])) {
1308 		drm_dbg(&dev_priv->drm,
1309 			"unsupported pixel format %p4cc / modifier 0x%llx\n",
1310 			&mode_cmd->pixel_format, mode_cmd->modifier[0]);
1311 		return -EINVAL;
1312 	}
1313 
1314 	/* Surface must be marked as a scanout. */
1315 	if (unlikely(!surface->metadata.scanout))
1316 		return -EINVAL;
1317 
1318 	if (unlikely(surface->metadata.mip_levels[0] != 1 ||
1319 		     surface->metadata.num_sizes != 1 ||
1320 		     surface->metadata.base_size.width < mode_cmd->width ||
1321 		     surface->metadata.base_size.height < mode_cmd->height ||
1322 		     surface->metadata.base_size.depth != 1)) {
1323 		DRM_ERROR("Incompatible surface dimensions "
1324 			  "for requested mode.\n");
1325 		return -EINVAL;
1326 	}
1327 
1328 	switch (mode_cmd->pixel_format) {
1329 	case DRM_FORMAT_ARGB8888:
1330 		format = SVGA3D_A8R8G8B8;
1331 		break;
1332 	case DRM_FORMAT_XRGB8888:
1333 		format = SVGA3D_X8R8G8B8;
1334 		break;
1335 	case DRM_FORMAT_RGB565:
1336 		format = SVGA3D_R5G6B5;
1337 		break;
1338 	case DRM_FORMAT_XRGB1555:
1339 		format = SVGA3D_A1R5G5B5;
1340 		break;
1341 	default:
1342 		DRM_ERROR("Invalid pixel format: %p4cc\n",
1343 			  &mode_cmd->pixel_format);
1344 		return -EINVAL;
1345 	}
1346 
1347 	/*
1348 	 * For DX, surface format validation is done when surface->scanout
1349 	 * is set.
1350 	 */
1351 	if (!has_sm4_context(dev_priv) && format != surface->metadata.format) {
1352 		DRM_ERROR("Invalid surface format for requested mode.\n");
1353 		return -EINVAL;
1354 	}
1355 
1356 	vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL);
1357 	if (!vfbs) {
1358 		ret = -ENOMEM;
1359 		goto out_err1;
1360 	}
1361 
1362 	drm_helper_mode_fill_fb_struct(dev, &vfbs->base.base, mode_cmd);
1363 	vfbs->surface = vmw_surface_reference(surface);
1364 	vfbs->base.user_handle = mode_cmd->handles[0];
1365 	vfbs->is_bo_proxy = is_bo_proxy;
1366 
1367 	*out = &vfbs->base;
1368 
1369 	ret = drm_framebuffer_init(dev, &vfbs->base.base,
1370 				   &vmw_framebuffer_surface_funcs);
1371 	if (ret)
1372 		goto out_err2;
1373 
1374 	return 0;
1375 
1376 out_err2:
1377 	vmw_surface_unreference(&surface);
1378 	kfree(vfbs);
1379 out_err1:
1380 	return ret;
1381 }
1382 
1383 /*
1384  * Buffer-object framebuffer code
1385  */
1386 
1387 static int vmw_framebuffer_bo_create_handle(struct drm_framebuffer *fb,
1388 					    struct drm_file *file_priv,
1389 					    unsigned int *handle)
1390 {
1391 	struct vmw_framebuffer_bo *vfbd =
1392 			vmw_framebuffer_to_vfbd(fb);
1393 
1394 	return drm_gem_handle_create(file_priv, &vfbd->buffer->tbo.base, handle);
1395 }
1396 
1397 static void vmw_framebuffer_bo_destroy(struct drm_framebuffer *framebuffer)
1398 {
1399 	struct vmw_framebuffer_bo *vfbd =
1400 		vmw_framebuffer_to_vfbd(framebuffer);
1401 
1402 	drm_framebuffer_cleanup(framebuffer);
1403 	vmw_bo_unreference(&vfbd->buffer);
1404 
1405 	kfree(vfbd);
1406 }
1407 
1408 static const struct drm_framebuffer_funcs vmw_framebuffer_bo_funcs = {
1409 	.create_handle = vmw_framebuffer_bo_create_handle,
1410 	.destroy = vmw_framebuffer_bo_destroy,
1411 	.dirty = drm_atomic_helper_dirtyfb,
1412 };
1413 
1414 /**
1415  * vmw_create_bo_proxy - create a proxy surface for the buffer object
1416  *
1417  * @dev: DRM device
1418  * @mode_cmd: parameters for the new surface
1419  * @bo_mob: MOB backing the buffer object
1420  * @srf_out: newly created surface
1421  *
1422  * When the content FB is a buffer object, we create a surface as a proxy to the
1423  * same buffer.  This way we can do a surface copy rather than a surface DMA.
1424  * This is a more efficient approach
1425  *
1426  * RETURNS:
1427  * 0 on success, error code otherwise
1428  */
1429 static int vmw_create_bo_proxy(struct drm_device *dev,
1430 			       const struct drm_mode_fb_cmd2 *mode_cmd,
1431 			       struct vmw_bo *bo_mob,
1432 			       struct vmw_surface **srf_out)
1433 {
1434 	struct vmw_surface_metadata metadata = {0};
1435 	uint32_t format;
1436 	struct vmw_resource *res;
1437 	unsigned int bytes_pp;
1438 	int ret;
1439 
1440 	switch (mode_cmd->pixel_format) {
1441 	case DRM_FORMAT_ARGB8888:
1442 	case DRM_FORMAT_XRGB8888:
1443 		format = SVGA3D_X8R8G8B8;
1444 		bytes_pp = 4;
1445 		break;
1446 
1447 	case DRM_FORMAT_RGB565:
1448 	case DRM_FORMAT_XRGB1555:
1449 		format = SVGA3D_R5G6B5;
1450 		bytes_pp = 2;
1451 		break;
1452 
1453 	case 8:
1454 		format = SVGA3D_P8;
1455 		bytes_pp = 1;
1456 		break;
1457 
1458 	default:
1459 		DRM_ERROR("Invalid framebuffer format %p4cc\n",
1460 			  &mode_cmd->pixel_format);
1461 		return -EINVAL;
1462 	}
1463 
1464 	metadata.format = format;
1465 	metadata.mip_levels[0] = 1;
1466 	metadata.num_sizes = 1;
1467 	metadata.base_size.width = mode_cmd->pitches[0] / bytes_pp;
1468 	metadata.base_size.height =  mode_cmd->height;
1469 	metadata.base_size.depth = 1;
1470 	metadata.scanout = true;
1471 
1472 	ret = vmw_gb_surface_define(vmw_priv(dev), &metadata, srf_out);
1473 	if (ret) {
1474 		DRM_ERROR("Failed to allocate proxy content buffer\n");
1475 		return ret;
1476 	}
1477 
1478 	res = &(*srf_out)->res;
1479 
1480 	/* Reserve and switch the backing mob. */
1481 	mutex_lock(&res->dev_priv->cmdbuf_mutex);
1482 	(void) vmw_resource_reserve(res, false, true);
1483 	vmw_user_bo_unref(&res->guest_memory_bo);
1484 	res->guest_memory_bo = vmw_user_bo_ref(bo_mob);
1485 	res->guest_memory_offset = 0;
1486 	vmw_resource_unreserve(res, false, false, false, NULL, 0);
1487 	mutex_unlock(&res->dev_priv->cmdbuf_mutex);
1488 
1489 	return 0;
1490 }
1491 
1492 
1493 
1494 static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv,
1495 				      struct vmw_bo *bo,
1496 				      struct vmw_framebuffer **out,
1497 				      const struct drm_mode_fb_cmd2
1498 				      *mode_cmd)
1499 
1500 {
1501 	struct drm_device *dev = &dev_priv->drm;
1502 	struct vmw_framebuffer_bo *vfbd;
1503 	unsigned int requested_size;
1504 	int ret;
1505 
1506 	requested_size = mode_cmd->height * mode_cmd->pitches[0];
1507 	if (unlikely(requested_size > bo->tbo.base.size)) {
1508 		DRM_ERROR("Screen buffer object size is too small "
1509 			  "for requested mode.\n");
1510 		return -EINVAL;
1511 	}
1512 
1513 	if (!drm_any_plane_has_format(&dev_priv->drm,
1514 				      mode_cmd->pixel_format,
1515 				      mode_cmd->modifier[0])) {
1516 		drm_dbg(&dev_priv->drm,
1517 			"unsupported pixel format %p4cc / modifier 0x%llx\n",
1518 			&mode_cmd->pixel_format, mode_cmd->modifier[0]);
1519 		return -EINVAL;
1520 	}
1521 
1522 	vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL);
1523 	if (!vfbd) {
1524 		ret = -ENOMEM;
1525 		goto out_err1;
1526 	}
1527 
1528 	vfbd->base.base.obj[0] = &bo->tbo.base;
1529 	drm_helper_mode_fill_fb_struct(dev, &vfbd->base.base, mode_cmd);
1530 	vfbd->base.bo = true;
1531 	vfbd->buffer = vmw_bo_reference(bo);
1532 	vfbd->base.user_handle = mode_cmd->handles[0];
1533 	*out = &vfbd->base;
1534 
1535 	ret = drm_framebuffer_init(dev, &vfbd->base.base,
1536 				   &vmw_framebuffer_bo_funcs);
1537 	if (ret)
1538 		goto out_err2;
1539 
1540 	return 0;
1541 
1542 out_err2:
1543 	vmw_bo_unreference(&bo);
1544 	kfree(vfbd);
1545 out_err1:
1546 	return ret;
1547 }
1548 
1549 
1550 /**
1551  * vmw_kms_srf_ok - check if a surface can be created
1552  *
1553  * @dev_priv: Pointer to device private struct.
1554  * @width: requested width
1555  * @height: requested height
1556  *
1557  * Surfaces need to be less than texture size
1558  */
1559 static bool
1560 vmw_kms_srf_ok(struct vmw_private *dev_priv, uint32_t width, uint32_t height)
1561 {
1562 	if (width  > dev_priv->texture_max_width ||
1563 	    height > dev_priv->texture_max_height)
1564 		return false;
1565 
1566 	return true;
1567 }
1568 
1569 /**
1570  * vmw_kms_new_framebuffer - Create a new framebuffer.
1571  *
1572  * @dev_priv: Pointer to device private struct.
1573  * @bo: Pointer to buffer object to wrap the kms framebuffer around.
1574  * Either @bo or @surface must be NULL.
1575  * @surface: Pointer to a surface to wrap the kms framebuffer around.
1576  * Either @bo or @surface must be NULL.
1577  * @only_2d: No presents will occur to this buffer object based framebuffer.
1578  * This helps the code to do some important optimizations.
1579  * @mode_cmd: Frame-buffer metadata.
1580  */
1581 struct vmw_framebuffer *
1582 vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
1583 			struct vmw_bo *bo,
1584 			struct vmw_surface *surface,
1585 			bool only_2d,
1586 			const struct drm_mode_fb_cmd2 *mode_cmd)
1587 {
1588 	struct vmw_framebuffer *vfb = NULL;
1589 	bool is_bo_proxy = false;
1590 	int ret;
1591 
1592 	/*
1593 	 * We cannot use the SurfaceDMA command in an non-accelerated VM,
1594 	 * therefore, wrap the buffer object in a surface so we can use the
1595 	 * SurfaceCopy command.
1596 	 */
1597 	if (vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height)  &&
1598 	    bo && only_2d &&
1599 	    mode_cmd->width > 64 &&  /* Don't create a proxy for cursor */
1600 	    dev_priv->active_display_unit == vmw_du_screen_target) {
1601 		ret = vmw_create_bo_proxy(&dev_priv->drm, mode_cmd,
1602 					  bo, &surface);
1603 		if (ret)
1604 			return ERR_PTR(ret);
1605 
1606 		is_bo_proxy = true;
1607 	}
1608 
1609 	/* Create the new framebuffer depending one what we have */
1610 	if (surface) {
1611 		ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb,
1612 						      mode_cmd,
1613 						      is_bo_proxy);
1614 		/*
1615 		 * vmw_create_bo_proxy() adds a reference that is no longer
1616 		 * needed
1617 		 */
1618 		if (is_bo_proxy)
1619 			vmw_surface_unreference(&surface);
1620 	} else if (bo) {
1621 		ret = vmw_kms_new_framebuffer_bo(dev_priv, bo, &vfb,
1622 						 mode_cmd);
1623 	} else {
1624 		BUG();
1625 	}
1626 
1627 	if (ret)
1628 		return ERR_PTR(ret);
1629 
1630 	return vfb;
1631 }
1632 
1633 /*
1634  * Generic Kernel modesetting functions
1635  */
1636 
1637 static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
1638 						 struct drm_file *file_priv,
1639 						 const struct drm_mode_fb_cmd2 *mode_cmd)
1640 {
1641 	struct vmw_private *dev_priv = vmw_priv(dev);
1642 	struct vmw_framebuffer *vfb = NULL;
1643 	struct vmw_surface *surface = NULL;
1644 	struct vmw_bo *bo = NULL;
1645 	int ret;
1646 
1647 	/* returns either a bo or surface */
1648 	ret = vmw_user_lookup_handle(dev_priv, file_priv,
1649 				     mode_cmd->handles[0],
1650 				     &surface, &bo);
1651 	if (ret) {
1652 		DRM_ERROR("Invalid buffer object handle %u (0x%x).\n",
1653 			  mode_cmd->handles[0], mode_cmd->handles[0]);
1654 		goto err_out;
1655 	}
1656 
1657 
1658 	if (!bo &&
1659 	    !vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height)) {
1660 		DRM_ERROR("Surface size cannot exceed %dx%d\n",
1661 			dev_priv->texture_max_width,
1662 			dev_priv->texture_max_height);
1663 		goto err_out;
1664 	}
1665 
1666 
1667 	vfb = vmw_kms_new_framebuffer(dev_priv, bo, surface,
1668 				      !(dev_priv->capabilities & SVGA_CAP_3D),
1669 				      mode_cmd);
1670 	if (IS_ERR(vfb)) {
1671 		ret = PTR_ERR(vfb);
1672 		goto err_out;
1673 	}
1674 
1675 err_out:
1676 	/* vmw_user_lookup_handle takes one ref so does new_fb */
1677 	if (bo)
1678 		vmw_user_bo_unref(&bo);
1679 	if (surface)
1680 		vmw_surface_unreference(&surface);
1681 
1682 	if (ret) {
1683 		DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
1684 		return ERR_PTR(ret);
1685 	}
1686 
1687 	return &vfb->base;
1688 }
1689 
1690 /**
1691  * vmw_kms_check_display_memory - Validates display memory required for a
1692  * topology
1693  * @dev: DRM device
1694  * @num_rects: number of drm_rect in rects
1695  * @rects: array of drm_rect representing the topology to validate indexed by
1696  * crtc index.
1697  *
1698  * Returns:
1699  * 0 on success otherwise negative error code
1700  */
1701 static int vmw_kms_check_display_memory(struct drm_device *dev,
1702 					uint32_t num_rects,
1703 					struct drm_rect *rects)
1704 {
1705 	struct vmw_private *dev_priv = vmw_priv(dev);
1706 	struct drm_rect bounding_box = {0};
1707 	u64 total_pixels = 0, pixel_mem, bb_mem;
1708 	int i;
1709 
1710 	for (i = 0; i < num_rects; i++) {
1711 		/*
1712 		 * For STDU only individual screen (screen target) is limited by
1713 		 * SCREENTARGET_MAX_WIDTH/HEIGHT registers.
1714 		 */
1715 		if (dev_priv->active_display_unit == vmw_du_screen_target &&
1716 		    (drm_rect_width(&rects[i]) > dev_priv->stdu_max_width ||
1717 		     drm_rect_height(&rects[i]) > dev_priv->stdu_max_height)) {
1718 			VMW_DEBUG_KMS("Screen size not supported.\n");
1719 			return -EINVAL;
1720 		}
1721 
1722 		/* Bounding box upper left is at (0,0). */
1723 		if (rects[i].x2 > bounding_box.x2)
1724 			bounding_box.x2 = rects[i].x2;
1725 
1726 		if (rects[i].y2 > bounding_box.y2)
1727 			bounding_box.y2 = rects[i].y2;
1728 
1729 		total_pixels += (u64) drm_rect_width(&rects[i]) *
1730 			(u64) drm_rect_height(&rects[i]);
1731 	}
1732 
1733 	/* Virtual svga device primary limits are always in 32-bpp. */
1734 	pixel_mem = total_pixels * 4;
1735 
1736 	/*
1737 	 * For HV10 and below prim_bb_mem is vram size. When
1738 	 * SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM is not present vram size is
1739 	 * limit on primary bounding box
1740 	 */
1741 	if (pixel_mem > dev_priv->max_primary_mem) {
1742 		VMW_DEBUG_KMS("Combined output size too large.\n");
1743 		return -EINVAL;
1744 	}
1745 
1746 	/* SVGA_CAP_NO_BB_RESTRICTION is available for STDU only. */
1747 	if (dev_priv->active_display_unit != vmw_du_screen_target ||
1748 	    !(dev_priv->capabilities & SVGA_CAP_NO_BB_RESTRICTION)) {
1749 		bb_mem = (u64) bounding_box.x2 * bounding_box.y2 * 4;
1750 
1751 		if (bb_mem > dev_priv->max_primary_mem) {
1752 			VMW_DEBUG_KMS("Topology is beyond supported limits.\n");
1753 			return -EINVAL;
1754 		}
1755 	}
1756 
1757 	return 0;
1758 }
1759 
1760 /**
1761  * vmw_crtc_state_and_lock - Return new or current crtc state with locked
1762  * crtc mutex
1763  * @state: The atomic state pointer containing the new atomic state
1764  * @crtc: The crtc
1765  *
1766  * This function returns the new crtc state if it's part of the state update.
1767  * Otherwise returns the current crtc state. It also makes sure that the
1768  * crtc mutex is locked.
1769  *
1770  * Returns: A valid crtc state pointer or NULL. It may also return a
1771  * pointer error, in particular -EDEADLK if locking needs to be rerun.
1772  */
1773 static struct drm_crtc_state *
1774 vmw_crtc_state_and_lock(struct drm_atomic_state *state, struct drm_crtc *crtc)
1775 {
1776 	struct drm_crtc_state *crtc_state;
1777 
1778 	crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
1779 	if (crtc_state) {
1780 		lockdep_assert_held(&crtc->mutex.mutex.base);
1781 	} else {
1782 		int ret = drm_modeset_lock(&crtc->mutex, state->acquire_ctx);
1783 
1784 		if (ret != 0 && ret != -EALREADY)
1785 			return ERR_PTR(ret);
1786 
1787 		crtc_state = crtc->state;
1788 	}
1789 
1790 	return crtc_state;
1791 }
1792 
1793 /**
1794  * vmw_kms_check_implicit - Verify that all implicit display units scan out
1795  * from the same fb after the new state is committed.
1796  * @dev: The drm_device.
1797  * @state: The new state to be checked.
1798  *
1799  * Returns:
1800  *   Zero on success,
1801  *   -EINVAL on invalid state,
1802  *   -EDEADLK if modeset locking needs to be rerun.
1803  */
1804 static int vmw_kms_check_implicit(struct drm_device *dev,
1805 				  struct drm_atomic_state *state)
1806 {
1807 	struct drm_framebuffer *implicit_fb = NULL;
1808 	struct drm_crtc *crtc;
1809 	struct drm_crtc_state *crtc_state;
1810 	struct drm_plane_state *plane_state;
1811 
1812 	drm_for_each_crtc(crtc, dev) {
1813 		struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
1814 
1815 		if (!du->is_implicit)
1816 			continue;
1817 
1818 		crtc_state = vmw_crtc_state_and_lock(state, crtc);
1819 		if (IS_ERR(crtc_state))
1820 			return PTR_ERR(crtc_state);
1821 
1822 		if (!crtc_state || !crtc_state->enable)
1823 			continue;
1824 
1825 		/*
1826 		 * Can't move primary planes across crtcs, so this is OK.
1827 		 * It also means we don't need to take the plane mutex.
1828 		 */
1829 		plane_state = du->primary.state;
1830 		if (plane_state->crtc != crtc)
1831 			continue;
1832 
1833 		if (!implicit_fb)
1834 			implicit_fb = plane_state->fb;
1835 		else if (implicit_fb != plane_state->fb)
1836 			return -EINVAL;
1837 	}
1838 
1839 	return 0;
1840 }
1841 
1842 /**
1843  * vmw_kms_check_topology - Validates topology in drm_atomic_state
1844  * @dev: DRM device
1845  * @state: the driver state object
1846  *
1847  * Returns:
1848  * 0 on success otherwise negative error code
1849  */
1850 static int vmw_kms_check_topology(struct drm_device *dev,
1851 				  struct drm_atomic_state *state)
1852 {
1853 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
1854 	struct drm_rect *rects;
1855 	struct drm_crtc *crtc;
1856 	uint32_t i;
1857 	int ret = 0;
1858 
1859 	rects = kcalloc(dev->mode_config.num_crtc, sizeof(struct drm_rect),
1860 			GFP_KERNEL);
1861 	if (!rects)
1862 		return -ENOMEM;
1863 
1864 	drm_for_each_crtc(crtc, dev) {
1865 		struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
1866 		struct drm_crtc_state *crtc_state;
1867 
1868 		i = drm_crtc_index(crtc);
1869 
1870 		crtc_state = vmw_crtc_state_and_lock(state, crtc);
1871 		if (IS_ERR(crtc_state)) {
1872 			ret = PTR_ERR(crtc_state);
1873 			goto clean;
1874 		}
1875 
1876 		if (!crtc_state)
1877 			continue;
1878 
1879 		if (crtc_state->enable) {
1880 			rects[i].x1 = du->gui_x;
1881 			rects[i].y1 = du->gui_y;
1882 			rects[i].x2 = du->gui_x + crtc_state->mode.hdisplay;
1883 			rects[i].y2 = du->gui_y + crtc_state->mode.vdisplay;
1884 		} else {
1885 			rects[i].x1 = 0;
1886 			rects[i].y1 = 0;
1887 			rects[i].x2 = 0;
1888 			rects[i].y2 = 0;
1889 		}
1890 	}
1891 
1892 	/* Determine change to topology due to new atomic state */
1893 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
1894 				      new_crtc_state, i) {
1895 		struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
1896 		struct drm_connector *connector;
1897 		struct drm_connector_state *conn_state;
1898 		struct vmw_connector_state *vmw_conn_state;
1899 
1900 		if (!du->pref_active && new_crtc_state->enable) {
1901 			VMW_DEBUG_KMS("Enabling a disabled display unit\n");
1902 			ret = -EINVAL;
1903 			goto clean;
1904 		}
1905 
1906 		/*
1907 		 * For vmwgfx each crtc has only one connector attached and it
1908 		 * is not changed so don't really need to check the
1909 		 * crtc->connector_mask and iterate over it.
1910 		 */
1911 		connector = &du->connector;
1912 		conn_state = drm_atomic_get_connector_state(state, connector);
1913 		if (IS_ERR(conn_state)) {
1914 			ret = PTR_ERR(conn_state);
1915 			goto clean;
1916 		}
1917 
1918 		vmw_conn_state = vmw_connector_state_to_vcs(conn_state);
1919 		vmw_conn_state->gui_x = du->gui_x;
1920 		vmw_conn_state->gui_y = du->gui_y;
1921 	}
1922 
1923 	ret = vmw_kms_check_display_memory(dev, dev->mode_config.num_crtc,
1924 					   rects);
1925 
1926 clean:
1927 	kfree(rects);
1928 	return ret;
1929 }
1930 
1931 /**
1932  * vmw_kms_atomic_check_modeset- validate state object for modeset changes
1933  *
1934  * @dev: DRM device
1935  * @state: the driver state object
1936  *
1937  * This is a simple wrapper around drm_atomic_helper_check_modeset() for
1938  * us to assign a value to mode->crtc_clock so that
1939  * drm_calc_timestamping_constants() won't throw an error message
1940  *
1941  * Returns:
1942  * Zero for success or -errno
1943  */
1944 static int
1945 vmw_kms_atomic_check_modeset(struct drm_device *dev,
1946 			     struct drm_atomic_state *state)
1947 {
1948 	struct drm_crtc *crtc;
1949 	struct drm_crtc_state *crtc_state;
1950 	bool need_modeset = false;
1951 	int i, ret;
1952 
1953 	ret = drm_atomic_helper_check(dev, state);
1954 	if (ret)
1955 		return ret;
1956 
1957 	ret = vmw_kms_check_implicit(dev, state);
1958 	if (ret) {
1959 		VMW_DEBUG_KMS("Invalid implicit state\n");
1960 		return ret;
1961 	}
1962 
1963 	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
1964 		if (drm_atomic_crtc_needs_modeset(crtc_state))
1965 			need_modeset = true;
1966 	}
1967 
1968 	if (need_modeset)
1969 		return vmw_kms_check_topology(dev, state);
1970 
1971 	return ret;
1972 }
1973 
1974 static const struct drm_mode_config_funcs vmw_kms_funcs = {
1975 	.fb_create = vmw_kms_fb_create,
1976 	.atomic_check = vmw_kms_atomic_check_modeset,
1977 	.atomic_commit = drm_atomic_helper_commit,
1978 };
1979 
1980 static int vmw_kms_generic_present(struct vmw_private *dev_priv,
1981 				   struct drm_file *file_priv,
1982 				   struct vmw_framebuffer *vfb,
1983 				   struct vmw_surface *surface,
1984 				   uint32_t sid,
1985 				   int32_t destX, int32_t destY,
1986 				   struct drm_vmw_rect *clips,
1987 				   uint32_t num_clips)
1988 {
1989 	return vmw_kms_sou_do_surface_dirty(dev_priv, vfb, NULL, clips,
1990 					    &surface->res, destX, destY,
1991 					    num_clips, 1, NULL, NULL);
1992 }
1993 
1994 
1995 int vmw_kms_present(struct vmw_private *dev_priv,
1996 		    struct drm_file *file_priv,
1997 		    struct vmw_framebuffer *vfb,
1998 		    struct vmw_surface *surface,
1999 		    uint32_t sid,
2000 		    int32_t destX, int32_t destY,
2001 		    struct drm_vmw_rect *clips,
2002 		    uint32_t num_clips)
2003 {
2004 	int ret;
2005 
2006 	switch (dev_priv->active_display_unit) {
2007 	case vmw_du_screen_target:
2008 		ret = vmw_kms_stdu_surface_dirty(dev_priv, vfb, NULL, clips,
2009 						 &surface->res, destX, destY,
2010 						 num_clips, 1, NULL, NULL);
2011 		break;
2012 	case vmw_du_screen_object:
2013 		ret = vmw_kms_generic_present(dev_priv, file_priv, vfb, surface,
2014 					      sid, destX, destY, clips,
2015 					      num_clips);
2016 		break;
2017 	default:
2018 		WARN_ONCE(true,
2019 			  "Present called with invalid display system.\n");
2020 		ret = -ENOSYS;
2021 		break;
2022 	}
2023 	if (ret)
2024 		return ret;
2025 
2026 	vmw_cmd_flush(dev_priv, false);
2027 
2028 	return 0;
2029 }
2030 
2031 static void
2032 vmw_kms_create_hotplug_mode_update_property(struct vmw_private *dev_priv)
2033 {
2034 	if (dev_priv->hotplug_mode_update_property)
2035 		return;
2036 
2037 	dev_priv->hotplug_mode_update_property =
2038 		drm_property_create_range(&dev_priv->drm,
2039 					  DRM_MODE_PROP_IMMUTABLE,
2040 					  "hotplug_mode_update", 0, 1);
2041 }
2042 
2043 int vmw_kms_init(struct vmw_private *dev_priv)
2044 {
2045 	struct drm_device *dev = &dev_priv->drm;
2046 	int ret;
2047 	static const char *display_unit_names[] = {
2048 		"Invalid",
2049 		"Legacy",
2050 		"Screen Object",
2051 		"Screen Target",
2052 		"Invalid (max)"
2053 	};
2054 
2055 	drm_mode_config_init(dev);
2056 	dev->mode_config.funcs = &vmw_kms_funcs;
2057 	dev->mode_config.min_width = 1;
2058 	dev->mode_config.min_height = 1;
2059 	dev->mode_config.max_width = dev_priv->texture_max_width;
2060 	dev->mode_config.max_height = dev_priv->texture_max_height;
2061 	dev->mode_config.preferred_depth = dev_priv->assume_16bpp ? 16 : 32;
2062 
2063 	drm_mode_create_suggested_offset_properties(dev);
2064 	vmw_kms_create_hotplug_mode_update_property(dev_priv);
2065 
2066 	ret = vmw_kms_stdu_init_display(dev_priv);
2067 	if (ret) {
2068 		ret = vmw_kms_sou_init_display(dev_priv);
2069 		if (ret) /* Fallback */
2070 			ret = vmw_kms_ldu_init_display(dev_priv);
2071 	}
2072 	BUILD_BUG_ON(ARRAY_SIZE(display_unit_names) != (vmw_du_max + 1));
2073 	drm_info(&dev_priv->drm, "%s display unit initialized\n",
2074 		 display_unit_names[dev_priv->active_display_unit]);
2075 
2076 	return ret;
2077 }
2078 
2079 int vmw_kms_close(struct vmw_private *dev_priv)
2080 {
2081 	int ret = 0;
2082 
2083 	/*
2084 	 * Docs says we should take the lock before calling this function
2085 	 * but since it destroys encoders and our destructor calls
2086 	 * drm_encoder_cleanup which takes the lock we deadlock.
2087 	 */
2088 	drm_mode_config_cleanup(&dev_priv->drm);
2089 	if (dev_priv->active_display_unit == vmw_du_legacy)
2090 		ret = vmw_kms_ldu_close_display(dev_priv);
2091 
2092 	return ret;
2093 }
2094 
2095 int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
2096 				struct drm_file *file_priv)
2097 {
2098 	struct drm_vmw_cursor_bypass_arg *arg = data;
2099 	struct vmw_display_unit *du;
2100 	struct drm_crtc *crtc;
2101 	int ret = 0;
2102 
2103 	mutex_lock(&dev->mode_config.mutex);
2104 	if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) {
2105 
2106 		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2107 			du = vmw_crtc_to_du(crtc);
2108 			du->hotspot_x = arg->xhot;
2109 			du->hotspot_y = arg->yhot;
2110 		}
2111 
2112 		mutex_unlock(&dev->mode_config.mutex);
2113 		return 0;
2114 	}
2115 
2116 	crtc = drm_crtc_find(dev, file_priv, arg->crtc_id);
2117 	if (!crtc) {
2118 		ret = -ENOENT;
2119 		goto out;
2120 	}
2121 
2122 	du = vmw_crtc_to_du(crtc);
2123 
2124 	du->hotspot_x = arg->xhot;
2125 	du->hotspot_y = arg->yhot;
2126 
2127 out:
2128 	mutex_unlock(&dev->mode_config.mutex);
2129 
2130 	return ret;
2131 }
2132 
2133 int vmw_kms_write_svga(struct vmw_private *vmw_priv,
2134 			unsigned width, unsigned height, unsigned pitch,
2135 			unsigned bpp, unsigned depth)
2136 {
2137 	if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
2138 		vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, pitch);
2139 	else if (vmw_fifo_have_pitchlock(vmw_priv))
2140 		vmw_fifo_mem_write(vmw_priv, SVGA_FIFO_PITCHLOCK, pitch);
2141 	vmw_write(vmw_priv, SVGA_REG_WIDTH, width);
2142 	vmw_write(vmw_priv, SVGA_REG_HEIGHT, height);
2143 	if ((vmw_priv->capabilities & SVGA_CAP_8BIT_EMULATION) != 0)
2144 		vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bpp);
2145 
2146 	if (vmw_read(vmw_priv, SVGA_REG_DEPTH) != depth) {
2147 		DRM_ERROR("Invalid depth %u for %u bpp, host expects %u\n",
2148 			  depth, bpp, vmw_read(vmw_priv, SVGA_REG_DEPTH));
2149 		return -EINVAL;
2150 	}
2151 
2152 	return 0;
2153 }
2154 
2155 bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
2156 				uint32_t pitch,
2157 				uint32_t height)
2158 {
2159 	return ((u64) pitch * (u64) height) < (u64)
2160 		((dev_priv->active_display_unit == vmw_du_screen_target) ?
2161 		 dev_priv->max_primary_mem : dev_priv->vram_size);
2162 }
2163 
2164 /**
2165  * vmw_du_update_layout - Update the display unit with topology from resolution
2166  * plugin and generate DRM uevent
2167  * @dev_priv: device private
2168  * @num_rects: number of drm_rect in rects
2169  * @rects: toplogy to update
2170  */
2171 static int vmw_du_update_layout(struct vmw_private *dev_priv,
2172 				unsigned int num_rects, struct drm_rect *rects)
2173 {
2174 	struct drm_device *dev = &dev_priv->drm;
2175 	struct vmw_display_unit *du;
2176 	struct drm_connector *con;
2177 	struct drm_connector_list_iter conn_iter;
2178 	struct drm_modeset_acquire_ctx ctx;
2179 	struct drm_crtc *crtc;
2180 	int ret;
2181 
2182 	/* Currently gui_x/y is protected with the crtc mutex */
2183 	mutex_lock(&dev->mode_config.mutex);
2184 	drm_modeset_acquire_init(&ctx, 0);
2185 retry:
2186 	drm_for_each_crtc(crtc, dev) {
2187 		ret = drm_modeset_lock(&crtc->mutex, &ctx);
2188 		if (ret < 0) {
2189 			if (ret == -EDEADLK) {
2190 				drm_modeset_backoff(&ctx);
2191 				goto retry;
2192 		}
2193 			goto out_fini;
2194 		}
2195 	}
2196 
2197 	drm_connector_list_iter_begin(dev, &conn_iter);
2198 	drm_for_each_connector_iter(con, &conn_iter) {
2199 		du = vmw_connector_to_du(con);
2200 		if (num_rects > du->unit) {
2201 			du->pref_width = drm_rect_width(&rects[du->unit]);
2202 			du->pref_height = drm_rect_height(&rects[du->unit]);
2203 			du->pref_active = true;
2204 			du->gui_x = rects[du->unit].x1;
2205 			du->gui_y = rects[du->unit].y1;
2206 		} else {
2207 			du->pref_width  = VMWGFX_MIN_INITIAL_WIDTH;
2208 			du->pref_height = VMWGFX_MIN_INITIAL_HEIGHT;
2209 			du->pref_active = false;
2210 			du->gui_x = 0;
2211 			du->gui_y = 0;
2212 		}
2213 	}
2214 	drm_connector_list_iter_end(&conn_iter);
2215 
2216 	list_for_each_entry(con, &dev->mode_config.connector_list, head) {
2217 		du = vmw_connector_to_du(con);
2218 		if (num_rects > du->unit) {
2219 			drm_object_property_set_value
2220 			  (&con->base, dev->mode_config.suggested_x_property,
2221 			   du->gui_x);
2222 			drm_object_property_set_value
2223 			  (&con->base, dev->mode_config.suggested_y_property,
2224 			   du->gui_y);
2225 		} else {
2226 			drm_object_property_set_value
2227 			  (&con->base, dev->mode_config.suggested_x_property,
2228 			   0);
2229 			drm_object_property_set_value
2230 			  (&con->base, dev->mode_config.suggested_y_property,
2231 			   0);
2232 		}
2233 		con->status = vmw_du_connector_detect(con, true);
2234 	}
2235 out_fini:
2236 	drm_modeset_drop_locks(&ctx);
2237 	drm_modeset_acquire_fini(&ctx);
2238 	mutex_unlock(&dev->mode_config.mutex);
2239 
2240 	drm_sysfs_hotplug_event(dev);
2241 
2242 	return 0;
2243 }
2244 
2245 int vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
2246 			  u16 *r, u16 *g, u16 *b,
2247 			  uint32_t size,
2248 			  struct drm_modeset_acquire_ctx *ctx)
2249 {
2250 	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
2251 	int i;
2252 
2253 	for (i = 0; i < size; i++) {
2254 		DRM_DEBUG("%d r/g/b = 0x%04x / 0x%04x / 0x%04x\n", i,
2255 			  r[i], g[i], b[i]);
2256 		vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 0, r[i] >> 8);
2257 		vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 1, g[i] >> 8);
2258 		vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 2, b[i] >> 8);
2259 	}
2260 
2261 	return 0;
2262 }
2263 
2264 int vmw_du_connector_dpms(struct drm_connector *connector, int mode)
2265 {
2266 	return 0;
2267 }
2268 
2269 enum drm_connector_status
2270 vmw_du_connector_detect(struct drm_connector *connector, bool force)
2271 {
2272 	uint32_t num_displays;
2273 	struct drm_device *dev = connector->dev;
2274 	struct vmw_private *dev_priv = vmw_priv(dev);
2275 	struct vmw_display_unit *du = vmw_connector_to_du(connector);
2276 
2277 	num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS);
2278 
2279 	return ((vmw_connector_to_du(connector)->unit < num_displays &&
2280 		 du->pref_active) ?
2281 		connector_status_connected : connector_status_disconnected);
2282 }
2283 
2284 static struct drm_display_mode vmw_kms_connector_builtin[] = {
2285 	/* 640x480@60Hz */
2286 	{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
2287 		   752, 800, 0, 480, 489, 492, 525, 0,
2288 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
2289 	/* 800x600@60Hz */
2290 	{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
2291 		   968, 1056, 0, 600, 601, 605, 628, 0,
2292 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2293 	/* 1024x768@60Hz */
2294 	{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
2295 		   1184, 1344, 0, 768, 771, 777, 806, 0,
2296 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
2297 	/* 1152x864@75Hz */
2298 	{ DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
2299 		   1344, 1600, 0, 864, 865, 868, 900, 0,
2300 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2301 	/* 1280x720@60Hz */
2302 	{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74500, 1280, 1344,
2303 		   1472, 1664, 0, 720, 723, 728, 748, 0,
2304 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2305 	/* 1280x768@60Hz */
2306 	{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
2307 		   1472, 1664, 0, 768, 771, 778, 798, 0,
2308 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2309 	/* 1280x800@60Hz */
2310 	{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
2311 		   1480, 1680, 0, 800, 803, 809, 831, 0,
2312 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
2313 	/* 1280x960@60Hz */
2314 	{ DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
2315 		   1488, 1800, 0, 960, 961, 964, 1000, 0,
2316 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2317 	/* 1280x1024@60Hz */
2318 	{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
2319 		   1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
2320 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2321 	/* 1360x768@60Hz */
2322 	{ DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
2323 		   1536, 1792, 0, 768, 771, 777, 795, 0,
2324 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2325 	/* 1440x1050@60Hz */
2326 	{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
2327 		   1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
2328 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2329 	/* 1440x900@60Hz */
2330 	{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
2331 		   1672, 1904, 0, 900, 903, 909, 934, 0,
2332 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2333 	/* 1600x1200@60Hz */
2334 	{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
2335 		   1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
2336 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2337 	/* 1680x1050@60Hz */
2338 	{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
2339 		   1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
2340 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2341 	/* 1792x1344@60Hz */
2342 	{ DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
2343 		   2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
2344 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2345 	/* 1853x1392@60Hz */
2346 	{ DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
2347 		   2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
2348 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2349 	/* 1920x1080@60Hz */
2350 	{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 173000, 1920, 2048,
2351 		   2248, 2576, 0, 1080, 1083, 1088, 1120, 0,
2352 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2353 	/* 1920x1200@60Hz */
2354 	{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
2355 		   2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
2356 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2357 	/* 1920x1440@60Hz */
2358 	{ DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
2359 		   2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
2360 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2361 	/* 2560x1440@60Hz */
2362 	{ DRM_MODE("2560x1440", DRM_MODE_TYPE_DRIVER, 241500, 2560, 2608,
2363 		   2640, 2720, 0, 1440, 1443, 1448, 1481, 0,
2364 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
2365 	/* 2560x1600@60Hz */
2366 	{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
2367 		   3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
2368 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2369 	/* 2880x1800@60Hz */
2370 	{ DRM_MODE("2880x1800", DRM_MODE_TYPE_DRIVER, 337500, 2880, 2928,
2371 		   2960, 3040, 0, 1800, 1803, 1809, 1852, 0,
2372 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
2373 	/* 3840x2160@60Hz */
2374 	{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 533000, 3840, 3888,
2375 		   3920, 4000, 0, 2160, 2163, 2168, 2222, 0,
2376 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
2377 	/* 3840x2400@60Hz */
2378 	{ DRM_MODE("3840x2400", DRM_MODE_TYPE_DRIVER, 592250, 3840, 3888,
2379 		   3920, 4000, 0, 2400, 2403, 2409, 2469, 0,
2380 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
2381 	/* Terminate */
2382 	{ DRM_MODE("", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) },
2383 };
2384 
2385 /**
2386  * vmw_guess_mode_timing - Provide fake timings for a
2387  * 60Hz vrefresh mode.
2388  *
2389  * @mode: Pointer to a struct drm_display_mode with hdisplay and vdisplay
2390  * members filled in.
2391  */
2392 void vmw_guess_mode_timing(struct drm_display_mode *mode)
2393 {
2394 	mode->hsync_start = mode->hdisplay + 50;
2395 	mode->hsync_end = mode->hsync_start + 50;
2396 	mode->htotal = mode->hsync_end + 50;
2397 
2398 	mode->vsync_start = mode->vdisplay + 50;
2399 	mode->vsync_end = mode->vsync_start + 50;
2400 	mode->vtotal = mode->vsync_end + 50;
2401 
2402 	mode->clock = (u32)mode->htotal * (u32)mode->vtotal / 100 * 6;
2403 }
2404 
2405 
2406 int vmw_du_connector_fill_modes(struct drm_connector *connector,
2407 				uint32_t max_width, uint32_t max_height)
2408 {
2409 	struct vmw_display_unit *du = vmw_connector_to_du(connector);
2410 	struct drm_device *dev = connector->dev;
2411 	struct vmw_private *dev_priv = vmw_priv(dev);
2412 	struct drm_display_mode *mode = NULL;
2413 	struct drm_display_mode *bmode;
2414 	struct drm_display_mode prefmode = { DRM_MODE("preferred",
2415 		DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
2416 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2417 		DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
2418 	};
2419 	int i;
2420 	u32 assumed_bpp = 4;
2421 
2422 	if (dev_priv->assume_16bpp)
2423 		assumed_bpp = 2;
2424 
2425 	max_width  = min(max_width,  dev_priv->texture_max_width);
2426 	max_height = min(max_height, dev_priv->texture_max_height);
2427 
2428 	/*
2429 	 * For STDU extra limit for a mode on SVGA_REG_SCREENTARGET_MAX_WIDTH/
2430 	 * HEIGHT registers.
2431 	 */
2432 	if (dev_priv->active_display_unit == vmw_du_screen_target) {
2433 		max_width  = min(max_width,  dev_priv->stdu_max_width);
2434 		max_height = min(max_height, dev_priv->stdu_max_height);
2435 	}
2436 
2437 	/* Add preferred mode */
2438 	mode = drm_mode_duplicate(dev, &prefmode);
2439 	if (!mode)
2440 		return 0;
2441 	mode->hdisplay = du->pref_width;
2442 	mode->vdisplay = du->pref_height;
2443 	vmw_guess_mode_timing(mode);
2444 	drm_mode_set_name(mode);
2445 
2446 	if (vmw_kms_validate_mode_vram(dev_priv,
2447 					mode->hdisplay * assumed_bpp,
2448 					mode->vdisplay)) {
2449 		drm_mode_probed_add(connector, mode);
2450 	} else {
2451 		drm_mode_destroy(dev, mode);
2452 		mode = NULL;
2453 	}
2454 
2455 	if (du->pref_mode) {
2456 		list_del_init(&du->pref_mode->head);
2457 		drm_mode_destroy(dev, du->pref_mode);
2458 	}
2459 
2460 	/* mode might be null here, this is intended */
2461 	du->pref_mode = mode;
2462 
2463 	for (i = 0; vmw_kms_connector_builtin[i].type != 0; i++) {
2464 		bmode = &vmw_kms_connector_builtin[i];
2465 		if (bmode->hdisplay > max_width ||
2466 		    bmode->vdisplay > max_height)
2467 			continue;
2468 
2469 		if (!vmw_kms_validate_mode_vram(dev_priv,
2470 						bmode->hdisplay * assumed_bpp,
2471 						bmode->vdisplay))
2472 			continue;
2473 
2474 		mode = drm_mode_duplicate(dev, bmode);
2475 		if (!mode)
2476 			return 0;
2477 
2478 		drm_mode_probed_add(connector, mode);
2479 	}
2480 
2481 	drm_connector_list_update(connector);
2482 	/* Move the prefered mode first, help apps pick the right mode. */
2483 	drm_mode_sort(&connector->modes);
2484 
2485 	return 1;
2486 }
2487 
2488 /**
2489  * vmw_kms_update_layout_ioctl - Handler for DRM_VMW_UPDATE_LAYOUT ioctl
2490  * @dev: drm device for the ioctl
2491  * @data: data pointer for the ioctl
2492  * @file_priv: drm file for the ioctl call
2493  *
2494  * Update preferred topology of display unit as per ioctl request. The topology
2495  * is expressed as array of drm_vmw_rect.
2496  * e.g.
2497  * [0 0 640 480] [640 0 800 600] [0 480 640 480]
2498  *
2499  * NOTE:
2500  * The x and y offset (upper left) in drm_vmw_rect cannot be less than 0. Beside
2501  * device limit on topology, x + w and y + h (lower right) cannot be greater
2502  * than INT_MAX. So topology beyond these limits will return with error.
2503  *
2504  * Returns:
2505  * Zero on success, negative errno on failure.
2506  */
2507 int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
2508 				struct drm_file *file_priv)
2509 {
2510 	struct vmw_private *dev_priv = vmw_priv(dev);
2511 	struct drm_mode_config *mode_config = &dev->mode_config;
2512 	struct drm_vmw_update_layout_arg *arg =
2513 		(struct drm_vmw_update_layout_arg *)data;
2514 	void __user *user_rects;
2515 	struct drm_vmw_rect *rects;
2516 	struct drm_rect *drm_rects;
2517 	unsigned rects_size;
2518 	int ret, i;
2519 
2520 	if (!arg->num_outputs) {
2521 		struct drm_rect def_rect = {0, 0,
2522 					    VMWGFX_MIN_INITIAL_WIDTH,
2523 					    VMWGFX_MIN_INITIAL_HEIGHT};
2524 		vmw_du_update_layout(dev_priv, 1, &def_rect);
2525 		return 0;
2526 	}
2527 
2528 	rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect);
2529 	rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect),
2530 			GFP_KERNEL);
2531 	if (unlikely(!rects))
2532 		return -ENOMEM;
2533 
2534 	user_rects = (void __user *)(unsigned long)arg->rects;
2535 	ret = copy_from_user(rects, user_rects, rects_size);
2536 	if (unlikely(ret != 0)) {
2537 		DRM_ERROR("Failed to get rects.\n");
2538 		ret = -EFAULT;
2539 		goto out_free;
2540 	}
2541 
2542 	drm_rects = (struct drm_rect *)rects;
2543 
2544 	VMW_DEBUG_KMS("Layout count = %u\n", arg->num_outputs);
2545 	for (i = 0; i < arg->num_outputs; i++) {
2546 		struct drm_vmw_rect curr_rect;
2547 
2548 		/* Verify user-space for overflow as kernel use drm_rect */
2549 		if ((rects[i].x + rects[i].w > INT_MAX) ||
2550 		    (rects[i].y + rects[i].h > INT_MAX)) {
2551 			ret = -ERANGE;
2552 			goto out_free;
2553 		}
2554 
2555 		curr_rect = rects[i];
2556 		drm_rects[i].x1 = curr_rect.x;
2557 		drm_rects[i].y1 = curr_rect.y;
2558 		drm_rects[i].x2 = curr_rect.x + curr_rect.w;
2559 		drm_rects[i].y2 = curr_rect.y + curr_rect.h;
2560 
2561 		VMW_DEBUG_KMS("  x1 = %d y1 = %d x2 = %d y2 = %d\n",
2562 			      drm_rects[i].x1, drm_rects[i].y1,
2563 			      drm_rects[i].x2, drm_rects[i].y2);
2564 
2565 		/*
2566 		 * Currently this check is limiting the topology within
2567 		 * mode_config->max (which actually is max texture size
2568 		 * supported by virtual device). This limit is here to address
2569 		 * window managers that create a big framebuffer for whole
2570 		 * topology.
2571 		 */
2572 		if (drm_rects[i].x1 < 0 ||  drm_rects[i].y1 < 0 ||
2573 		    drm_rects[i].x2 > mode_config->max_width ||
2574 		    drm_rects[i].y2 > mode_config->max_height) {
2575 			VMW_DEBUG_KMS("Invalid layout %d %d %d %d\n",
2576 				      drm_rects[i].x1, drm_rects[i].y1,
2577 				      drm_rects[i].x2, drm_rects[i].y2);
2578 			ret = -EINVAL;
2579 			goto out_free;
2580 		}
2581 	}
2582 
2583 	ret = vmw_kms_check_display_memory(dev, arg->num_outputs, drm_rects);
2584 
2585 	if (ret == 0)
2586 		vmw_du_update_layout(dev_priv, arg->num_outputs, drm_rects);
2587 
2588 out_free:
2589 	kfree(rects);
2590 	return ret;
2591 }
2592 
2593 /**
2594  * vmw_kms_helper_dirty - Helper to build commands and perform actions based
2595  * on a set of cliprects and a set of display units.
2596  *
2597  * @dev_priv: Pointer to a device private structure.
2598  * @framebuffer: Pointer to the framebuffer on which to perform the actions.
2599  * @clips: A set of struct drm_clip_rect. Either this os @vclips must be NULL.
2600  * Cliprects are given in framebuffer coordinates.
2601  * @vclips: A set of struct drm_vmw_rect cliprects. Either this or @clips must
2602  * be NULL. Cliprects are given in source coordinates.
2603  * @dest_x: X coordinate offset for the crtc / destination clip rects.
2604  * @dest_y: Y coordinate offset for the crtc / destination clip rects.
2605  * @num_clips: Number of cliprects in the @clips or @vclips array.
2606  * @increment: Integer with which to increment the clip counter when looping.
2607  * Used to skip a predetermined number of clip rects.
2608  * @dirty: Closure structure. See the description of struct vmw_kms_dirty.
2609  */
2610 int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
2611 			 struct vmw_framebuffer *framebuffer,
2612 			 const struct drm_clip_rect *clips,
2613 			 const struct drm_vmw_rect *vclips,
2614 			 s32 dest_x, s32 dest_y,
2615 			 int num_clips,
2616 			 int increment,
2617 			 struct vmw_kms_dirty *dirty)
2618 {
2619 	struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
2620 	struct drm_crtc *crtc;
2621 	u32 num_units = 0;
2622 	u32 i, k;
2623 
2624 	dirty->dev_priv = dev_priv;
2625 
2626 	/* If crtc is passed, no need to iterate over other display units */
2627 	if (dirty->crtc) {
2628 		units[num_units++] = vmw_crtc_to_du(dirty->crtc);
2629 	} else {
2630 		list_for_each_entry(crtc, &dev_priv->drm.mode_config.crtc_list,
2631 				    head) {
2632 			struct drm_plane *plane = crtc->primary;
2633 
2634 			if (plane->state->fb == &framebuffer->base)
2635 				units[num_units++] = vmw_crtc_to_du(crtc);
2636 		}
2637 	}
2638 
2639 	for (k = 0; k < num_units; k++) {
2640 		struct vmw_display_unit *unit = units[k];
2641 		s32 crtc_x = unit->crtc.x;
2642 		s32 crtc_y = unit->crtc.y;
2643 		s32 crtc_width = unit->crtc.mode.hdisplay;
2644 		s32 crtc_height = unit->crtc.mode.vdisplay;
2645 		const struct drm_clip_rect *clips_ptr = clips;
2646 		const struct drm_vmw_rect *vclips_ptr = vclips;
2647 
2648 		dirty->unit = unit;
2649 		if (dirty->fifo_reserve_size > 0) {
2650 			dirty->cmd = VMW_CMD_RESERVE(dev_priv,
2651 						      dirty->fifo_reserve_size);
2652 			if (!dirty->cmd)
2653 				return -ENOMEM;
2654 
2655 			memset(dirty->cmd, 0, dirty->fifo_reserve_size);
2656 		}
2657 		dirty->num_hits = 0;
2658 		for (i = 0; i < num_clips; i++, clips_ptr += increment,
2659 		       vclips_ptr += increment) {
2660 			s32 clip_left;
2661 			s32 clip_top;
2662 
2663 			/*
2664 			 * Select clip array type. Note that integer type
2665 			 * in @clips is unsigned short, whereas in @vclips
2666 			 * it's 32-bit.
2667 			 */
2668 			if (clips) {
2669 				dirty->fb_x = (s32) clips_ptr->x1;
2670 				dirty->fb_y = (s32) clips_ptr->y1;
2671 				dirty->unit_x2 = (s32) clips_ptr->x2 + dest_x -
2672 					crtc_x;
2673 				dirty->unit_y2 = (s32) clips_ptr->y2 + dest_y -
2674 					crtc_y;
2675 			} else {
2676 				dirty->fb_x = vclips_ptr->x;
2677 				dirty->fb_y = vclips_ptr->y;
2678 				dirty->unit_x2 = dirty->fb_x + vclips_ptr->w +
2679 					dest_x - crtc_x;
2680 				dirty->unit_y2 = dirty->fb_y + vclips_ptr->h +
2681 					dest_y - crtc_y;
2682 			}
2683 
2684 			dirty->unit_x1 = dirty->fb_x + dest_x - crtc_x;
2685 			dirty->unit_y1 = dirty->fb_y + dest_y - crtc_y;
2686 
2687 			/* Skip this clip if it's outside the crtc region */
2688 			if (dirty->unit_x1 >= crtc_width ||
2689 			    dirty->unit_y1 >= crtc_height ||
2690 			    dirty->unit_x2 <= 0 || dirty->unit_y2 <= 0)
2691 				continue;
2692 
2693 			/* Clip right and bottom to crtc limits */
2694 			dirty->unit_x2 = min_t(s32, dirty->unit_x2,
2695 					       crtc_width);
2696 			dirty->unit_y2 = min_t(s32, dirty->unit_y2,
2697 					       crtc_height);
2698 
2699 			/* Clip left and top to crtc limits */
2700 			clip_left = min_t(s32, dirty->unit_x1, 0);
2701 			clip_top = min_t(s32, dirty->unit_y1, 0);
2702 			dirty->unit_x1 -= clip_left;
2703 			dirty->unit_y1 -= clip_top;
2704 			dirty->fb_x -= clip_left;
2705 			dirty->fb_y -= clip_top;
2706 
2707 			dirty->clip(dirty);
2708 		}
2709 
2710 		dirty->fifo_commit(dirty);
2711 	}
2712 
2713 	return 0;
2714 }
2715 
2716 /**
2717  * vmw_kms_helper_validation_finish - Helper for post KMS command submission
2718  * cleanup and fencing
2719  * @dev_priv: Pointer to the device-private struct
2720  * @file_priv: Pointer identifying the client when user-space fencing is used
2721  * @ctx: Pointer to the validation context
2722  * @out_fence: If non-NULL, returned refcounted fence-pointer
2723  * @user_fence_rep: If non-NULL, pointer to user-space address area
2724  * in which to copy user-space fence info
2725  */
2726 void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv,
2727 				      struct drm_file *file_priv,
2728 				      struct vmw_validation_context *ctx,
2729 				      struct vmw_fence_obj **out_fence,
2730 				      struct drm_vmw_fence_rep __user *
2731 				      user_fence_rep)
2732 {
2733 	struct vmw_fence_obj *fence = NULL;
2734 	uint32_t handle = 0;
2735 	int ret = 0;
2736 
2737 	if (file_priv || user_fence_rep || vmw_validation_has_bos(ctx) ||
2738 	    out_fence)
2739 		ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
2740 						 file_priv ? &handle : NULL);
2741 	vmw_validation_done(ctx, fence);
2742 	if (file_priv)
2743 		vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv),
2744 					    ret, user_fence_rep, fence,
2745 					    handle, -1);
2746 	if (out_fence)
2747 		*out_fence = fence;
2748 	else
2749 		vmw_fence_obj_unreference(&fence);
2750 }
2751 
2752 /**
2753  * vmw_kms_update_proxy - Helper function to update a proxy surface from
2754  * its backing MOB.
2755  *
2756  * @res: Pointer to the surface resource
2757  * @clips: Clip rects in framebuffer (surface) space.
2758  * @num_clips: Number of clips in @clips.
2759  * @increment: Integer with which to increment the clip counter when looping.
2760  * Used to skip a predetermined number of clip rects.
2761  *
2762  * This function makes sure the proxy surface is updated from its backing MOB
2763  * using the region given by @clips. The surface resource @res and its backing
2764  * MOB needs to be reserved and validated on call.
2765  */
2766 int vmw_kms_update_proxy(struct vmw_resource *res,
2767 			 const struct drm_clip_rect *clips,
2768 			 unsigned num_clips,
2769 			 int increment)
2770 {
2771 	struct vmw_private *dev_priv = res->dev_priv;
2772 	struct drm_vmw_size *size = &vmw_res_to_srf(res)->metadata.base_size;
2773 	struct {
2774 		SVGA3dCmdHeader header;
2775 		SVGA3dCmdUpdateGBImage body;
2776 	} *cmd;
2777 	SVGA3dBox *box;
2778 	size_t copy_size = 0;
2779 	int i;
2780 
2781 	if (!clips)
2782 		return 0;
2783 
2784 	cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd) * num_clips);
2785 	if (!cmd)
2786 		return -ENOMEM;
2787 
2788 	for (i = 0; i < num_clips; ++i, clips += increment, ++cmd) {
2789 		box = &cmd->body.box;
2790 
2791 		cmd->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE;
2792 		cmd->header.size = sizeof(cmd->body);
2793 		cmd->body.image.sid = res->id;
2794 		cmd->body.image.face = 0;
2795 		cmd->body.image.mipmap = 0;
2796 
2797 		if (clips->x1 > size->width || clips->x2 > size->width ||
2798 		    clips->y1 > size->height || clips->y2 > size->height) {
2799 			DRM_ERROR("Invalid clips outsize of framebuffer.\n");
2800 			return -EINVAL;
2801 		}
2802 
2803 		box->x = clips->x1;
2804 		box->y = clips->y1;
2805 		box->z = 0;
2806 		box->w = clips->x2 - clips->x1;
2807 		box->h = clips->y2 - clips->y1;
2808 		box->d = 1;
2809 
2810 		copy_size += sizeof(*cmd);
2811 	}
2812 
2813 	vmw_cmd_commit(dev_priv, copy_size);
2814 
2815 	return 0;
2816 }
2817 
2818 /**
2819  * vmw_kms_create_implicit_placement_property - Set up the implicit placement
2820  * property.
2821  *
2822  * @dev_priv: Pointer to a device private struct.
2823  *
2824  * Sets up the implicit placement property unless it's already set up.
2825  */
2826 void
2827 vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv)
2828 {
2829 	if (dev_priv->implicit_placement_property)
2830 		return;
2831 
2832 	dev_priv->implicit_placement_property =
2833 		drm_property_create_range(&dev_priv->drm,
2834 					  DRM_MODE_PROP_IMMUTABLE,
2835 					  "implicit_placement", 0, 1);
2836 }
2837 
2838 /**
2839  * vmw_kms_suspend - Save modesetting state and turn modesetting off.
2840  *
2841  * @dev: Pointer to the drm device
2842  * Return: 0 on success. Negative error code on failure.
2843  */
2844 int vmw_kms_suspend(struct drm_device *dev)
2845 {
2846 	struct vmw_private *dev_priv = vmw_priv(dev);
2847 
2848 	dev_priv->suspend_state = drm_atomic_helper_suspend(dev);
2849 	if (IS_ERR(dev_priv->suspend_state)) {
2850 		int ret = PTR_ERR(dev_priv->suspend_state);
2851 
2852 		DRM_ERROR("Failed kms suspend: %d\n", ret);
2853 		dev_priv->suspend_state = NULL;
2854 
2855 		return ret;
2856 	}
2857 
2858 	return 0;
2859 }
2860 
2861 
2862 /**
2863  * vmw_kms_resume - Re-enable modesetting and restore state
2864  *
2865  * @dev: Pointer to the drm device
2866  * Return: 0 on success. Negative error code on failure.
2867  *
2868  * State is resumed from a previous vmw_kms_suspend(). It's illegal
2869  * to call this function without a previous vmw_kms_suspend().
2870  */
2871 int vmw_kms_resume(struct drm_device *dev)
2872 {
2873 	struct vmw_private *dev_priv = vmw_priv(dev);
2874 	int ret;
2875 
2876 	if (WARN_ON(!dev_priv->suspend_state))
2877 		return 0;
2878 
2879 	ret = drm_atomic_helper_resume(dev, dev_priv->suspend_state);
2880 	dev_priv->suspend_state = NULL;
2881 
2882 	return ret;
2883 }
2884 
2885 /**
2886  * vmw_kms_lost_device - Notify kms that modesetting capabilities will be lost
2887  *
2888  * @dev: Pointer to the drm device
2889  */
2890 void vmw_kms_lost_device(struct drm_device *dev)
2891 {
2892 	drm_atomic_helper_shutdown(dev);
2893 }
2894 
2895 /**
2896  * vmw_du_helper_plane_update - Helper to do plane update on a display unit.
2897  * @update: The closure structure.
2898  *
2899  * Call this helper after setting callbacks in &vmw_du_update_plane to do plane
2900  * update on display unit.
2901  *
2902  * Return: 0 on success or a negative error code on failure.
2903  */
2904 int vmw_du_helper_plane_update(struct vmw_du_update_plane *update)
2905 {
2906 	struct drm_plane_state *state = update->plane->state;
2907 	struct drm_plane_state *old_state = update->old_state;
2908 	struct drm_atomic_helper_damage_iter iter;
2909 	struct drm_rect clip;
2910 	struct drm_rect bb;
2911 	DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
2912 	uint32_t reserved_size = 0;
2913 	uint32_t submit_size = 0;
2914 	uint32_t curr_size = 0;
2915 	uint32_t num_hits = 0;
2916 	void *cmd_start;
2917 	char *cmd_next;
2918 	int ret;
2919 
2920 	/*
2921 	 * Iterate in advance to check if really need plane update and find the
2922 	 * number of clips that actually are in plane src for fifo allocation.
2923 	 */
2924 	drm_atomic_helper_damage_iter_init(&iter, old_state, state);
2925 	drm_atomic_for_each_plane_damage(&iter, &clip)
2926 		num_hits++;
2927 
2928 	if (num_hits == 0)
2929 		return 0;
2930 
2931 	if (update->vfb->bo) {
2932 		struct vmw_framebuffer_bo *vfbbo =
2933 			container_of(update->vfb, typeof(*vfbbo), base);
2934 
2935 		/*
2936 		 * For screen targets we want a mappable bo, for everything else we want
2937 		 * accelerated i.e. host backed (vram or gmr) bo. If the display unit
2938 		 * is not screen target then mob's shouldn't be available.
2939 		 */
2940 		if (update->dev_priv->active_display_unit == vmw_du_screen_target) {
2941 			vmw_bo_placement_set(vfbbo->buffer,
2942 					     VMW_BO_DOMAIN_SYS | VMW_BO_DOMAIN_MOB | VMW_BO_DOMAIN_GMR,
2943 					     VMW_BO_DOMAIN_SYS | VMW_BO_DOMAIN_MOB | VMW_BO_DOMAIN_GMR);
2944 		} else {
2945 			WARN_ON(update->dev_priv->has_mob);
2946 			vmw_bo_placement_set_default_accelerated(vfbbo->buffer);
2947 		}
2948 		ret = vmw_validation_add_bo(&val_ctx, vfbbo->buffer);
2949 	} else {
2950 		struct vmw_framebuffer_surface *vfbs =
2951 			container_of(update->vfb, typeof(*vfbs), base);
2952 
2953 		ret = vmw_validation_add_resource(&val_ctx, &vfbs->surface->res,
2954 						  0, VMW_RES_DIRTY_NONE, NULL,
2955 						  NULL);
2956 	}
2957 
2958 	if (ret)
2959 		return ret;
2960 
2961 	ret = vmw_validation_prepare(&val_ctx, update->mutex, update->intr);
2962 	if (ret)
2963 		goto out_unref;
2964 
2965 	reserved_size = update->calc_fifo_size(update, num_hits);
2966 	cmd_start = VMW_CMD_RESERVE(update->dev_priv, reserved_size);
2967 	if (!cmd_start) {
2968 		ret = -ENOMEM;
2969 		goto out_revert;
2970 	}
2971 
2972 	cmd_next = cmd_start;
2973 
2974 	if (update->post_prepare) {
2975 		curr_size = update->post_prepare(update, cmd_next);
2976 		cmd_next += curr_size;
2977 		submit_size += curr_size;
2978 	}
2979 
2980 	if (update->pre_clip) {
2981 		curr_size = update->pre_clip(update, cmd_next, num_hits);
2982 		cmd_next += curr_size;
2983 		submit_size += curr_size;
2984 	}
2985 
2986 	bb.x1 = INT_MAX;
2987 	bb.y1 = INT_MAX;
2988 	bb.x2 = INT_MIN;
2989 	bb.y2 = INT_MIN;
2990 
2991 	drm_atomic_helper_damage_iter_init(&iter, old_state, state);
2992 	drm_atomic_for_each_plane_damage(&iter, &clip) {
2993 		uint32_t fb_x = clip.x1;
2994 		uint32_t fb_y = clip.y1;
2995 
2996 		vmw_du_translate_to_crtc(state, &clip);
2997 		if (update->clip) {
2998 			curr_size = update->clip(update, cmd_next, &clip, fb_x,
2999 						 fb_y);
3000 			cmd_next += curr_size;
3001 			submit_size += curr_size;
3002 		}
3003 		bb.x1 = min_t(int, bb.x1, clip.x1);
3004 		bb.y1 = min_t(int, bb.y1, clip.y1);
3005 		bb.x2 = max_t(int, bb.x2, clip.x2);
3006 		bb.y2 = max_t(int, bb.y2, clip.y2);
3007 	}
3008 
3009 	curr_size = update->post_clip(update, cmd_next, &bb);
3010 	submit_size += curr_size;
3011 
3012 	if (reserved_size < submit_size)
3013 		submit_size = 0;
3014 
3015 	vmw_cmd_commit(update->dev_priv, submit_size);
3016 
3017 	vmw_kms_helper_validation_finish(update->dev_priv, NULL, &val_ctx,
3018 					 update->out_fence, NULL);
3019 	return ret;
3020 
3021 out_revert:
3022 	vmw_validation_revert(&val_ctx);
3023 
3024 out_unref:
3025 	vmw_validation_unref_lists(&val_ctx);
3026 	return ret;
3027 }
3028