1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3  *
4  * Copyright 2009-2022 VMware, Inc., Palo Alto, CA., USA
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 
28 #include "vmwgfx_kms.h"
29 #include "vmw_surface_cache.h"
30 
31 #include <drm/drm_atomic.h>
32 #include <drm/drm_atomic_helper.h>
33 #include <drm/drm_damage_helper.h>
34 #include <drm/drm_fourcc.h>
35 #include <drm/drm_rect.h>
36 #include <drm/drm_sysfs.h>
37 
38 void vmw_du_cleanup(struct vmw_display_unit *du)
39 {
40 	struct vmw_private *dev_priv = vmw_priv(du->primary.dev);
41 	drm_plane_cleanup(&du->primary);
42 	if (vmw_cmd_supported(dev_priv))
43 		drm_plane_cleanup(&du->cursor.base);
44 
45 	drm_connector_unregister(&du->connector);
46 	drm_crtc_cleanup(&du->crtc);
47 	drm_encoder_cleanup(&du->encoder);
48 	drm_connector_cleanup(&du->connector);
49 }
50 
51 /*
52  * Display Unit Cursor functions
53  */
54 
55 static int vmw_du_cursor_plane_unmap_cm(struct vmw_plane_state *vps);
56 static void vmw_cursor_update_mob(struct vmw_private *dev_priv,
57 				  struct vmw_plane_state *vps,
58 				  u32 *image, u32 width, u32 height,
59 				  u32 hotspotX, u32 hotspotY);
60 
61 struct vmw_svga_fifo_cmd_define_cursor {
62 	u32 cmd;
63 	SVGAFifoCmdDefineAlphaCursor cursor;
64 };
65 
66 /**
67  * vmw_send_define_cursor_cmd - queue a define cursor command
68  * @dev_priv: the private driver struct
69  * @image: buffer which holds the cursor image
70  * @width: width of the mouse cursor image
71  * @height: height of the mouse cursor image
72  * @hotspotX: the horizontal position of mouse hotspot
73  * @hotspotY: the vertical position of mouse hotspot
74  */
75 static void vmw_send_define_cursor_cmd(struct vmw_private *dev_priv,
76 				       u32 *image, u32 width, u32 height,
77 				       u32 hotspotX, u32 hotspotY)
78 {
79 	struct vmw_svga_fifo_cmd_define_cursor *cmd;
80 	const u32 image_size = width * height * sizeof(*image);
81 	const u32 cmd_size = sizeof(*cmd) + image_size;
82 
83 	/* Try to reserve fifocmd space and swallow any failures;
84 	   such reservations cannot be left unconsumed for long
85 	   under the risk of clogging other fifocmd users, so
86 	   we treat reservations separtely from the way we treat
87 	   other fallible KMS-atomic resources at prepare_fb */
88 	cmd = VMW_CMD_RESERVE(dev_priv, cmd_size);
89 
90 	if (unlikely(!cmd))
91 		return;
92 
93 	memset(cmd, 0, sizeof(*cmd));
94 
95 	memcpy(&cmd[1], image, image_size);
96 
97 	cmd->cmd = SVGA_CMD_DEFINE_ALPHA_CURSOR;
98 	cmd->cursor.id = 0;
99 	cmd->cursor.width = width;
100 	cmd->cursor.height = height;
101 	cmd->cursor.hotspotX = hotspotX;
102 	cmd->cursor.hotspotY = hotspotY;
103 
104 	vmw_cmd_commit_flush(dev_priv, cmd_size);
105 }
106 
107 /**
108  * vmw_cursor_update_image - update the cursor image on the provided plane
109  * @dev_priv: the private driver struct
110  * @vps: the plane state of the cursor plane
111  * @image: buffer which holds the cursor image
112  * @width: width of the mouse cursor image
113  * @height: height of the mouse cursor image
114  * @hotspotX: the horizontal position of mouse hotspot
115  * @hotspotY: the vertical position of mouse hotspot
116  */
117 static void vmw_cursor_update_image(struct vmw_private *dev_priv,
118 				    struct vmw_plane_state *vps,
119 				    u32 *image, u32 width, u32 height,
120 				    u32 hotspotX, u32 hotspotY)
121 {
122 	if (vps->cursor.bo)
123 		vmw_cursor_update_mob(dev_priv, vps, image,
124 				      vps->base.crtc_w, vps->base.crtc_h,
125 				      hotspotX, hotspotY);
126 
127 	else
128 		vmw_send_define_cursor_cmd(dev_priv, image, width, height,
129 					   hotspotX, hotspotY);
130 }
131 
132 
133 /**
134  * vmw_cursor_update_mob - Update cursor vis CursorMob mechanism
135  *
136  * Called from inside vmw_du_cursor_plane_atomic_update to actually
137  * make the cursor-image live.
138  *
139  * @dev_priv: device to work with
140  * @vps: the plane state of the cursor plane
141  * @image: cursor source data to fill the MOB with
142  * @width: source data width
143  * @height: source data height
144  * @hotspotX: cursor hotspot x
145  * @hotspotY: cursor hotspot Y
146  */
147 static void vmw_cursor_update_mob(struct vmw_private *dev_priv,
148 				  struct vmw_plane_state *vps,
149 				  u32 *image, u32 width, u32 height,
150 				  u32 hotspotX, u32 hotspotY)
151 {
152 	SVGAGBCursorHeader *header;
153 	SVGAGBAlphaCursorHeader *alpha_header;
154 	const u32 image_size = width * height * sizeof(*image);
155 	bool dummy;
156 
157 	header = ttm_kmap_obj_virtual(&vps->cursor.map, &dummy);
158 	alpha_header = &header->header.alphaHeader;
159 
160 	memset(header, 0, sizeof(*header));
161 
162 	header->type = SVGA_ALPHA_CURSOR;
163 	header->sizeInBytes = image_size;
164 
165 	alpha_header->hotspotX = hotspotX;
166 	alpha_header->hotspotY = hotspotY;
167 	alpha_header->width = width;
168 	alpha_header->height = height;
169 
170 	memcpy(header + 1, image, image_size);
171 	vmw_write(dev_priv, SVGA_REG_CURSOR_MOBID,
172 		  vps->cursor.bo->resource->start);
173 }
174 
175 
176 static u32 vmw_du_cursor_mob_size(u32 w, u32 h)
177 {
178 	return w * h * sizeof(u32) + sizeof(SVGAGBCursorHeader);
179 }
180 
181 /**
182  * vmw_du_cursor_plane_acquire_image -- Acquire the image data
183  * @vps: cursor plane state
184  */
185 static u32 *vmw_du_cursor_plane_acquire_image(struct vmw_plane_state *vps)
186 {
187 	bool dummy;
188 	if (vps->surf) {
189 		if (vps->surf_mapped)
190 			return vmw_bo_map_and_cache(vps->surf->res.backup);
191 		return vps->surf->snooper.image;
192 	} else if (vps->bo)
193 		return ttm_kmap_obj_virtual(&vps->bo->map, &dummy);
194 	return NULL;
195 }
196 
197 static bool vmw_du_cursor_plane_has_changed(struct vmw_plane_state *old_vps,
198 					    struct vmw_plane_state *new_vps)
199 {
200 	void *old_image;
201 	void *new_image;
202 	u32 size;
203 	bool changed;
204 
205 	if (old_vps->base.crtc_w != new_vps->base.crtc_w ||
206 	    old_vps->base.crtc_h != new_vps->base.crtc_h)
207 	    return true;
208 
209 	if (old_vps->cursor.hotspot_x != new_vps->cursor.hotspot_x ||
210 	    old_vps->cursor.hotspot_y != new_vps->cursor.hotspot_y)
211 	    return true;
212 
213 	size = new_vps->base.crtc_w * new_vps->base.crtc_h * sizeof(u32);
214 
215 	old_image = vmw_du_cursor_plane_acquire_image(old_vps);
216 	new_image = vmw_du_cursor_plane_acquire_image(new_vps);
217 
218 	changed = false;
219 	if (old_image && new_image)
220 		changed = memcmp(old_image, new_image, size) != 0;
221 
222 	return changed;
223 }
224 
225 static void vmw_du_destroy_cursor_mob(struct ttm_buffer_object **bo)
226 {
227 	if (!(*bo))
228 		return;
229 
230 	ttm_bo_unpin(*bo);
231 	ttm_bo_put(*bo);
232 	kfree(*bo);
233 	*bo = NULL;
234 }
235 
236 static void vmw_du_put_cursor_mob(struct vmw_cursor_plane *vcp,
237 				  struct vmw_plane_state *vps)
238 {
239 	u32 i;
240 
241 	if (!vps->cursor.bo)
242 		return;
243 
244 	vmw_du_cursor_plane_unmap_cm(vps);
245 
246 	/* Look for a free slot to return this mob to the cache. */
247 	for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
248 		if (!vcp->cursor_mobs[i]) {
249 			vcp->cursor_mobs[i] = vps->cursor.bo;
250 			vps->cursor.bo = NULL;
251 			return;
252 		}
253 	}
254 
255 	/* Cache is full: See if this mob is bigger than an existing mob. */
256 	for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
257 		if (vcp->cursor_mobs[i]->base.size <
258 		    vps->cursor.bo->base.size) {
259 			vmw_du_destroy_cursor_mob(&vcp->cursor_mobs[i]);
260 			vcp->cursor_mobs[i] = vps->cursor.bo;
261 			vps->cursor.bo = NULL;
262 			return;
263 		}
264 	}
265 
266 	/* Destroy it if it's not worth caching. */
267 	vmw_du_destroy_cursor_mob(&vps->cursor.bo);
268 }
269 
270 static int vmw_du_get_cursor_mob(struct vmw_cursor_plane *vcp,
271 				 struct vmw_plane_state *vps)
272 {
273 	struct vmw_private *dev_priv = vcp->base.dev->dev_private;
274 	u32 size = vmw_du_cursor_mob_size(vps->base.crtc_w, vps->base.crtc_h);
275 	u32 i;
276 	u32 cursor_max_dim, mob_max_size;
277 	int ret;
278 
279 	if (!dev_priv->has_mob ||
280 	    (dev_priv->capabilities2 & SVGA_CAP2_CURSOR_MOB) == 0)
281 		return -EINVAL;
282 
283 	mob_max_size = vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
284 	cursor_max_dim = vmw_read(dev_priv, SVGA_REG_CURSOR_MAX_DIMENSION);
285 
286 	if (size > mob_max_size || vps->base.crtc_w > cursor_max_dim ||
287 	    vps->base.crtc_h > cursor_max_dim)
288 		return -EINVAL;
289 
290 	if (vps->cursor.bo) {
291 		if (vps->cursor.bo->base.size >= size)
292 			return 0;
293 		vmw_du_put_cursor_mob(vcp, vps);
294 	}
295 
296 	/* Look for an unused mob in the cache. */
297 	for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
298 		if (vcp->cursor_mobs[i] &&
299 		    vcp->cursor_mobs[i]->base.size >= size) {
300 			vps->cursor.bo = vcp->cursor_mobs[i];
301 			vcp->cursor_mobs[i] = NULL;
302 			return 0;
303 		}
304 	}
305 	/* Create a new mob if we can't find an existing one. */
306 	ret = vmw_bo_create_kernel(dev_priv, size, &vmw_mob_placement,
307 				   &vps->cursor.bo);
308 
309 	if (ret != 0)
310 		return ret;
311 
312 	/* Fence the mob creation so we are guarateed to have the mob */
313 	ret = ttm_bo_reserve(vps->cursor.bo, false, false, NULL);
314 	if (ret != 0)
315 		goto teardown;
316 
317 	vmw_bo_fence_single(vps->cursor.bo, NULL);
318 	ttm_bo_unreserve(vps->cursor.bo);
319 	return 0;
320 
321 teardown:
322 	vmw_du_destroy_cursor_mob(&vps->cursor.bo);
323 	return ret;
324 }
325 
326 
327 static void vmw_cursor_update_position(struct vmw_private *dev_priv,
328 				       bool show, int x, int y)
329 {
330 	const uint32_t svga_cursor_on = show ? SVGA_CURSOR_ON_SHOW
331 					     : SVGA_CURSOR_ON_HIDE;
332 	uint32_t count;
333 
334 	spin_lock(&dev_priv->cursor_lock);
335 	if (dev_priv->capabilities2 & SVGA_CAP2_EXTRA_REGS) {
336 		vmw_write(dev_priv, SVGA_REG_CURSOR4_X, x);
337 		vmw_write(dev_priv, SVGA_REG_CURSOR4_Y, y);
338 		vmw_write(dev_priv, SVGA_REG_CURSOR4_SCREEN_ID, SVGA3D_INVALID_ID);
339 		vmw_write(dev_priv, SVGA_REG_CURSOR4_ON, svga_cursor_on);
340 		vmw_write(dev_priv, SVGA_REG_CURSOR4_SUBMIT, 1);
341 	} else if (vmw_is_cursor_bypass3_enabled(dev_priv)) {
342 		vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_ON, svga_cursor_on);
343 		vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_X, x);
344 		vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_Y, y);
345 		count = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CURSOR_COUNT);
346 		vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_COUNT, ++count);
347 	} else {
348 		vmw_write(dev_priv, SVGA_REG_CURSOR_X, x);
349 		vmw_write(dev_priv, SVGA_REG_CURSOR_Y, y);
350 		vmw_write(dev_priv, SVGA_REG_CURSOR_ON, svga_cursor_on);
351 	}
352 	spin_unlock(&dev_priv->cursor_lock);
353 }
354 
355 void vmw_kms_cursor_snoop(struct vmw_surface *srf,
356 			  struct ttm_object_file *tfile,
357 			  struct ttm_buffer_object *bo,
358 			  SVGA3dCmdHeader *header)
359 {
360 	struct ttm_bo_kmap_obj map;
361 	unsigned long kmap_offset;
362 	unsigned long kmap_num;
363 	SVGA3dCopyBox *box;
364 	unsigned box_count;
365 	void *virtual;
366 	bool dummy;
367 	struct vmw_dma_cmd {
368 		SVGA3dCmdHeader header;
369 		SVGA3dCmdSurfaceDMA dma;
370 	} *cmd;
371 	int i, ret;
372 	const struct SVGA3dSurfaceDesc *desc =
373 		vmw_surface_get_desc(VMW_CURSOR_SNOOP_FORMAT);
374 	const u32 image_pitch = VMW_CURSOR_SNOOP_WIDTH * desc->pitchBytesPerBlock;
375 
376 	cmd = container_of(header, struct vmw_dma_cmd, header);
377 
378 	/* No snooper installed, nothing to copy */
379 	if (!srf->snooper.image)
380 		return;
381 
382 	if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) {
383 		DRM_ERROR("face and mipmap for cursors should never != 0\n");
384 		return;
385 	}
386 
387 	if (cmd->header.size < 64) {
388 		DRM_ERROR("at least one full copy box must be given\n");
389 		return;
390 	}
391 
392 	box = (SVGA3dCopyBox *)&cmd[1];
393 	box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) /
394 			sizeof(SVGA3dCopyBox);
395 
396 	if (cmd->dma.guest.ptr.offset % PAGE_SIZE ||
397 	    box->x != 0    || box->y != 0    || box->z != 0    ||
398 	    box->srcx != 0 || box->srcy != 0 || box->srcz != 0 ||
399 	    box->d != 1    || box_count != 1 ||
400 	    box->w > VMW_CURSOR_SNOOP_WIDTH || box->h > VMW_CURSOR_SNOOP_HEIGHT) {
401 		/* TODO handle none page aligned offsets */
402 		/* TODO handle more dst & src != 0 */
403 		/* TODO handle more then one copy */
404 		DRM_ERROR("Can't snoop dma request for cursor!\n");
405 		DRM_ERROR("(%u, %u, %u) (%u, %u, %u) (%ux%ux%u) %u %u\n",
406 			  box->srcx, box->srcy, box->srcz,
407 			  box->x, box->y, box->z,
408 			  box->w, box->h, box->d, box_count,
409 			  cmd->dma.guest.ptr.offset);
410 		return;
411 	}
412 
413 	kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT;
414 	kmap_num = (VMW_CURSOR_SNOOP_HEIGHT*image_pitch) >> PAGE_SHIFT;
415 
416 	ret = ttm_bo_reserve(bo, true, false, NULL);
417 	if (unlikely(ret != 0)) {
418 		DRM_ERROR("reserve failed\n");
419 		return;
420 	}
421 
422 	ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
423 	if (unlikely(ret != 0))
424 		goto err_unreserve;
425 
426 	virtual = ttm_kmap_obj_virtual(&map, &dummy);
427 
428 	if (box->w == VMW_CURSOR_SNOOP_WIDTH && cmd->dma.guest.pitch == image_pitch) {
429 		memcpy(srf->snooper.image, virtual,
430 		       VMW_CURSOR_SNOOP_HEIGHT*image_pitch);
431 	} else {
432 		/* Image is unsigned pointer. */
433 		for (i = 0; i < box->h; i++)
434 			memcpy(srf->snooper.image + i * image_pitch,
435 			       virtual + i * cmd->dma.guest.pitch,
436 			       box->w * desc->pitchBytesPerBlock);
437 	}
438 
439 	srf->snooper.age++;
440 
441 	ttm_bo_kunmap(&map);
442 err_unreserve:
443 	ttm_bo_unreserve(bo);
444 }
445 
446 /**
447  * vmw_kms_legacy_hotspot_clear - Clear legacy hotspots
448  *
449  * @dev_priv: Pointer to the device private struct.
450  *
451  * Clears all legacy hotspots.
452  */
453 void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv)
454 {
455 	struct drm_device *dev = &dev_priv->drm;
456 	struct vmw_display_unit *du;
457 	struct drm_crtc *crtc;
458 
459 	drm_modeset_lock_all(dev);
460 	drm_for_each_crtc(crtc, dev) {
461 		du = vmw_crtc_to_du(crtc);
462 
463 		du->hotspot_x = 0;
464 		du->hotspot_y = 0;
465 	}
466 	drm_modeset_unlock_all(dev);
467 }
468 
469 void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
470 {
471 	struct drm_device *dev = &dev_priv->drm;
472 	struct vmw_display_unit *du;
473 	struct drm_crtc *crtc;
474 
475 	mutex_lock(&dev->mode_config.mutex);
476 
477 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
478 		du = vmw_crtc_to_du(crtc);
479 		if (!du->cursor_surface ||
480 		    du->cursor_age == du->cursor_surface->snooper.age ||
481 		    !du->cursor_surface->snooper.image)
482 			continue;
483 
484 		du->cursor_age = du->cursor_surface->snooper.age;
485 		vmw_send_define_cursor_cmd(dev_priv,
486 					   du->cursor_surface->snooper.image,
487 					   VMW_CURSOR_SNOOP_WIDTH,
488 					   VMW_CURSOR_SNOOP_HEIGHT,
489 					   du->hotspot_x + du->core_hotspot_x,
490 					   du->hotspot_y + du->core_hotspot_y);
491 	}
492 
493 	mutex_unlock(&dev->mode_config.mutex);
494 }
495 
496 
497 void vmw_du_cursor_plane_destroy(struct drm_plane *plane)
498 {
499 	struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
500 	u32 i;
501 
502 	vmw_cursor_update_position(plane->dev->dev_private, false, 0, 0);
503 
504 	for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++)
505 		vmw_du_destroy_cursor_mob(&vcp->cursor_mobs[i]);
506 
507 	drm_plane_cleanup(plane);
508 }
509 
510 
511 void vmw_du_primary_plane_destroy(struct drm_plane *plane)
512 {
513 	drm_plane_cleanup(plane);
514 
515 	/* Planes are static in our case so we don't free it */
516 }
517 
518 
519 /**
520  * vmw_du_plane_unpin_surf - unpins resource associated with a framebuffer surface
521  *
522  * @vps: plane state associated with the display surface
523  * @unreference: true if we also want to unreference the display.
524  */
525 void vmw_du_plane_unpin_surf(struct vmw_plane_state *vps,
526 			     bool unreference)
527 {
528 	if (vps->surf) {
529 		if (vps->pinned) {
530 			vmw_resource_unpin(&vps->surf->res);
531 			vps->pinned--;
532 		}
533 
534 		if (unreference) {
535 			if (vps->pinned)
536 				DRM_ERROR("Surface still pinned\n");
537 			vmw_surface_unreference(&vps->surf);
538 		}
539 	}
540 }
541 
542 
543 /**
544  * vmw_du_plane_cleanup_fb - Unpins the plane surface
545  *
546  * @plane:  display plane
547  * @old_state: Contains the FB to clean up
548  *
549  * Unpins the framebuffer surface
550  *
551  * Returns 0 on success
552  */
553 void
554 vmw_du_plane_cleanup_fb(struct drm_plane *plane,
555 			struct drm_plane_state *old_state)
556 {
557 	struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
558 
559 	vmw_du_plane_unpin_surf(vps, false);
560 }
561 
562 
563 /**
564  * vmw_du_cursor_plane_map_cm - Maps the cursor mobs.
565  *
566  * @vps: plane_state
567  *
568  * Returns 0 on success
569  */
570 
571 static int
572 vmw_du_cursor_plane_map_cm(struct vmw_plane_state *vps)
573 {
574 	int ret;
575 	u32 size = vmw_du_cursor_mob_size(vps->base.crtc_w, vps->base.crtc_h);
576 	struct ttm_buffer_object *bo = vps->cursor.bo;
577 
578 	if (!bo)
579 		return -EINVAL;
580 
581 	if (bo->base.size < size)
582 		return -EINVAL;
583 
584 	if (vps->cursor.mapped)
585 		return 0;
586 
587 	ret = ttm_bo_reserve(bo, false, false, NULL);
588 
589 	if (unlikely(ret != 0))
590 		return -ENOMEM;
591 
592 	ret = ttm_bo_kmap(bo, 0, PFN_UP(size), &vps->cursor.map);
593 
594 	/*
595 	 * We just want to try to get mob bind to finish
596 	 * so that the first write to SVGA_REG_CURSOR_MOBID
597 	 * is done with a buffer that the device has already
598 	 * seen
599 	 */
600 	(void) ttm_bo_wait(bo, false, false);
601 
602 	ttm_bo_unreserve(bo);
603 
604 	if (unlikely(ret != 0))
605 		return -ENOMEM;
606 
607 	vps->cursor.mapped = true;
608 
609 	return 0;
610 }
611 
612 
613 /**
614  * vmw_du_cursor_plane_unmap_cm - Unmaps the cursor mobs.
615  *
616  * @vps: state of the cursor plane
617  *
618  * Returns 0 on success
619  */
620 
621 static int
622 vmw_du_cursor_plane_unmap_cm(struct vmw_plane_state *vps)
623 {
624 	int ret = 0;
625 	struct ttm_buffer_object *bo = vps->cursor.bo;
626 
627 	if (!vps->cursor.mapped)
628 		return 0;
629 
630 	if (!bo)
631 		return 0;
632 
633 	ret = ttm_bo_reserve(bo, true, false, NULL);
634 	if (likely(ret == 0)) {
635 		ttm_bo_kunmap(&vps->cursor.map);
636 		ttm_bo_unreserve(bo);
637 		vps->cursor.mapped = false;
638 	}
639 
640 	return ret;
641 }
642 
643 
644 /**
645  * vmw_du_cursor_plane_cleanup_fb - Unpins the plane surface
646  *
647  * @plane: cursor plane
648  * @old_state: contains the state to clean up
649  *
650  * Unmaps all cursor bo mappings and unpins the cursor surface
651  *
652  * Returns 0 on success
653  */
654 void
655 vmw_du_cursor_plane_cleanup_fb(struct drm_plane *plane,
656 			       struct drm_plane_state *old_state)
657 {
658 	struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
659 	struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
660 	bool dummy;
661 
662 	if (vps->surf_mapped) {
663 		vmw_bo_unmap(vps->surf->res.backup);
664 		vps->surf_mapped = false;
665 	}
666 
667 	if (vps->bo && ttm_kmap_obj_virtual(&vps->bo->map, &dummy)) {
668 		const int ret = ttm_bo_reserve(&vps->bo->base, true, false, NULL);
669 
670 		if (likely(ret == 0)) {
671 			if (atomic_read(&vps->bo->base_mapped_count) == 0)
672 			    ttm_bo_kunmap(&vps->bo->map);
673 			ttm_bo_unreserve(&vps->bo->base);
674 		}
675 	}
676 
677 	vmw_du_cursor_plane_unmap_cm(vps);
678 	vmw_du_put_cursor_mob(vcp, vps);
679 
680 	vmw_du_plane_unpin_surf(vps, false);
681 
682 	if (vps->surf) {
683 		vmw_surface_unreference(&vps->surf);
684 		vps->surf = NULL;
685 	}
686 
687 	if (vps->bo) {
688 		vmw_bo_unreference(&vps->bo);
689 		vps->bo = NULL;
690 	}
691 }
692 
693 
694 /**
695  * vmw_du_cursor_plane_prepare_fb - Readies the cursor by referencing it
696  *
697  * @plane:  display plane
698  * @new_state: info on the new plane state, including the FB
699  *
700  * Returns 0 on success
701  */
702 int
703 vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
704 			       struct drm_plane_state *new_state)
705 {
706 	struct drm_framebuffer *fb = new_state->fb;
707 	struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
708 	struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
709 	int ret = 0;
710 
711 	if (vps->surf) {
712 		vmw_surface_unreference(&vps->surf);
713 		vps->surf = NULL;
714 	}
715 
716 	if (vps->bo) {
717 		vmw_bo_unreference(&vps->bo);
718 		vps->bo = NULL;
719 	}
720 
721 	if (fb) {
722 		if (vmw_framebuffer_to_vfb(fb)->bo) {
723 			vps->bo = vmw_framebuffer_to_vfbd(fb)->buffer;
724 			vmw_bo_reference(vps->bo);
725 		} else {
726 			vps->surf = vmw_framebuffer_to_vfbs(fb)->surface;
727 			vmw_surface_reference(vps->surf);
728 		}
729 	}
730 
731 	if (!vps->surf && vps->bo) {
732 		const u32 size = new_state->crtc_w * new_state->crtc_h * sizeof(u32);
733 
734 		/*
735 		 * Not using vmw_bo_map_and_cache() helper here as we need to
736 		 * reserve the ttm_buffer_object first which
737 		 * vmw_bo_map_and_cache() omits.
738 		 */
739 		ret = ttm_bo_reserve(&vps->bo->base, true, false, NULL);
740 
741 		if (unlikely(ret != 0))
742 			return -ENOMEM;
743 
744 		ret = ttm_bo_kmap(&vps->bo->base, 0, PFN_UP(size), &vps->bo->map);
745 
746 		if (likely(ret == 0))
747 			atomic_inc(&vps->bo->base_mapped_count);
748 
749 		ttm_bo_unreserve(&vps->bo->base);
750 
751 		if (unlikely(ret != 0))
752 			return -ENOMEM;
753 	} else if (vps->surf && !vps->bo && vps->surf->res.backup) {
754 
755 		WARN_ON(vps->surf->snooper.image);
756 		ret = ttm_bo_reserve(&vps->surf->res.backup->base, true, false,
757 				     NULL);
758 		if (unlikely(ret != 0))
759 			return -ENOMEM;
760 		vmw_bo_map_and_cache(vps->surf->res.backup);
761 		ttm_bo_unreserve(&vps->surf->res.backup->base);
762 		vps->surf_mapped = true;
763 	}
764 
765 	if (vps->surf || vps->bo) {
766 		vmw_du_get_cursor_mob(vcp, vps);
767 		vmw_du_cursor_plane_map_cm(vps);
768 	}
769 
770 	return 0;
771 }
772 
773 
774 void
775 vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
776 				  struct drm_atomic_state *state)
777 {
778 	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
779 									   plane);
780 	struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
781 									   plane);
782 	struct drm_crtc *crtc = new_state->crtc ?: old_state->crtc;
783 	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
784 	struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
785 	struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
786 	struct vmw_plane_state *old_vps = vmw_plane_state_to_vps(old_state);
787 	s32 hotspot_x, hotspot_y;
788 	bool dummy;
789 
790 	hotspot_x = du->hotspot_x;
791 	hotspot_y = du->hotspot_y;
792 
793 	if (new_state->fb) {
794 		hotspot_x += new_state->fb->hot_x;
795 		hotspot_y += new_state->fb->hot_y;
796 	}
797 
798 	du->cursor_surface = vps->surf;
799 	du->cursor_bo = vps->bo;
800 
801 	if (!vps->surf && !vps->bo) {
802 		vmw_cursor_update_position(dev_priv, false, 0, 0);
803 		return;
804 	}
805 
806 	vps->cursor.hotspot_x = hotspot_x;
807 	vps->cursor.hotspot_y = hotspot_y;
808 
809 	if (vps->surf) {
810 		du->cursor_age = du->cursor_surface->snooper.age;
811 	}
812 
813 	if (!vmw_du_cursor_plane_has_changed(old_vps, vps)) {
814 		/*
815 		 * If it hasn't changed, avoid making the device do extra
816 		 * work by keeping the old cursor active.
817 		 */
818 		struct vmw_cursor_plane_state tmp = old_vps->cursor;
819 		old_vps->cursor = vps->cursor;
820 		vps->cursor = tmp;
821 	} else {
822 		void *image = vmw_du_cursor_plane_acquire_image(vps);
823 		if (image)
824 			vmw_cursor_update_image(dev_priv, vps, image,
825 						new_state->crtc_w,
826 						new_state->crtc_h,
827 						hotspot_x, hotspot_y);
828 	}
829 
830 	if (vps->bo) {
831 		if (ttm_kmap_obj_virtual(&vps->bo->map, &dummy))
832 			atomic_dec(&vps->bo->base_mapped_count);
833 	}
834 
835 	du->cursor_x = new_state->crtc_x + du->set_gui_x;
836 	du->cursor_y = new_state->crtc_y + du->set_gui_y;
837 
838 	vmw_cursor_update_position(dev_priv, true,
839 				   du->cursor_x + hotspot_x,
840 				   du->cursor_y + hotspot_y);
841 
842 	du->core_hotspot_x = hotspot_x - du->hotspot_x;
843 	du->core_hotspot_y = hotspot_y - du->hotspot_y;
844 }
845 
846 
847 /**
848  * vmw_du_primary_plane_atomic_check - check if the new state is okay
849  *
850  * @plane: display plane
851  * @state: info on the new plane state, including the FB
852  *
853  * Check if the new state is settable given the current state.  Other
854  * than what the atomic helper checks, we care about crtc fitting
855  * the FB and maintaining one active framebuffer.
856  *
857  * Returns 0 on success
858  */
859 int vmw_du_primary_plane_atomic_check(struct drm_plane *plane,
860 				      struct drm_atomic_state *state)
861 {
862 	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
863 									   plane);
864 	struct drm_crtc_state *crtc_state = NULL;
865 	struct drm_framebuffer *new_fb = new_state->fb;
866 	int ret;
867 
868 	if (new_state->crtc)
869 		crtc_state = drm_atomic_get_new_crtc_state(state,
870 							   new_state->crtc);
871 
872 	ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
873 						  DRM_PLANE_NO_SCALING,
874 						  DRM_PLANE_NO_SCALING,
875 						  false, true);
876 
877 	if (!ret && new_fb) {
878 		struct drm_crtc *crtc = new_state->crtc;
879 		struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
880 
881 		vmw_connector_state_to_vcs(du->connector.state);
882 	}
883 
884 
885 	return ret;
886 }
887 
888 
889 /**
890  * vmw_du_cursor_plane_atomic_check - check if the new state is okay
891  *
892  * @plane: cursor plane
893  * @state: info on the new plane state
894  *
895  * This is a chance to fail if the new cursor state does not fit
896  * our requirements.
897  *
898  * Returns 0 on success
899  */
900 int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
901 				     struct drm_atomic_state *state)
902 {
903 	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
904 									   plane);
905 	int ret = 0;
906 	struct drm_crtc_state *crtc_state = NULL;
907 	struct vmw_surface *surface = NULL;
908 	struct drm_framebuffer *fb = new_state->fb;
909 
910 	if (new_state->crtc)
911 		crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
912 							   new_state->crtc);
913 
914 	ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
915 						  DRM_PLANE_NO_SCALING,
916 						  DRM_PLANE_NO_SCALING,
917 						  true, true);
918 	if (ret)
919 		return ret;
920 
921 	/* Turning off */
922 	if (!fb)
923 		return 0;
924 
925 	/* A lot of the code assumes this */
926 	if (new_state->crtc_w != 64 || new_state->crtc_h != 64) {
927 		DRM_ERROR("Invalid cursor dimensions (%d, %d)\n",
928 			  new_state->crtc_w, new_state->crtc_h);
929 		return -EINVAL;
930 	}
931 
932 	if (!vmw_framebuffer_to_vfb(fb)->bo) {
933 		surface = vmw_framebuffer_to_vfbs(fb)->surface;
934 
935 		WARN_ON(!surface);
936 
937 		if (!surface ||
938 		    (!surface->snooper.image && !surface->res.backup)) {
939 			DRM_ERROR("surface not suitable for cursor\n");
940 			return -EINVAL;
941 		}
942 	}
943 
944 	return 0;
945 }
946 
947 
948 int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
949 			     struct drm_atomic_state *state)
950 {
951 	struct drm_crtc_state *new_state = drm_atomic_get_new_crtc_state(state,
952 									 crtc);
953 	struct vmw_display_unit *du = vmw_crtc_to_du(new_state->crtc);
954 	int connector_mask = drm_connector_mask(&du->connector);
955 	bool has_primary = new_state->plane_mask &
956 			   drm_plane_mask(crtc->primary);
957 
958 	/* We always want to have an active plane with an active CRTC */
959 	if (has_primary != new_state->enable)
960 		return -EINVAL;
961 
962 
963 	if (new_state->connector_mask != connector_mask &&
964 	    new_state->connector_mask != 0) {
965 		DRM_ERROR("Invalid connectors configuration\n");
966 		return -EINVAL;
967 	}
968 
969 	/*
970 	 * Our virtual device does not have a dot clock, so use the logical
971 	 * clock value as the dot clock.
972 	 */
973 	if (new_state->mode.crtc_clock == 0)
974 		new_state->adjusted_mode.crtc_clock = new_state->mode.clock;
975 
976 	return 0;
977 }
978 
979 
980 void vmw_du_crtc_atomic_begin(struct drm_crtc *crtc,
981 			      struct drm_atomic_state *state)
982 {
983 }
984 
985 
986 void vmw_du_crtc_atomic_flush(struct drm_crtc *crtc,
987 			      struct drm_atomic_state *state)
988 {
989 }
990 
991 
992 /**
993  * vmw_du_crtc_duplicate_state - duplicate crtc state
994  * @crtc: DRM crtc
995  *
996  * Allocates and returns a copy of the crtc state (both common and
997  * vmw-specific) for the specified crtc.
998  *
999  * Returns: The newly allocated crtc state, or NULL on failure.
1000  */
1001 struct drm_crtc_state *
1002 vmw_du_crtc_duplicate_state(struct drm_crtc *crtc)
1003 {
1004 	struct drm_crtc_state *state;
1005 	struct vmw_crtc_state *vcs;
1006 
1007 	if (WARN_ON(!crtc->state))
1008 		return NULL;
1009 
1010 	vcs = kmemdup(crtc->state, sizeof(*vcs), GFP_KERNEL);
1011 
1012 	if (!vcs)
1013 		return NULL;
1014 
1015 	state = &vcs->base;
1016 
1017 	__drm_atomic_helper_crtc_duplicate_state(crtc, state);
1018 
1019 	return state;
1020 }
1021 
1022 
1023 /**
1024  * vmw_du_crtc_reset - creates a blank vmw crtc state
1025  * @crtc: DRM crtc
1026  *
1027  * Resets the atomic state for @crtc by freeing the state pointer (which
1028  * might be NULL, e.g. at driver load time) and allocating a new empty state
1029  * object.
1030  */
1031 void vmw_du_crtc_reset(struct drm_crtc *crtc)
1032 {
1033 	struct vmw_crtc_state *vcs;
1034 
1035 
1036 	if (crtc->state) {
1037 		__drm_atomic_helper_crtc_destroy_state(crtc->state);
1038 
1039 		kfree(vmw_crtc_state_to_vcs(crtc->state));
1040 	}
1041 
1042 	vcs = kzalloc(sizeof(*vcs), GFP_KERNEL);
1043 
1044 	if (!vcs) {
1045 		DRM_ERROR("Cannot allocate vmw_crtc_state\n");
1046 		return;
1047 	}
1048 
1049 	__drm_atomic_helper_crtc_reset(crtc, &vcs->base);
1050 }
1051 
1052 
1053 /**
1054  * vmw_du_crtc_destroy_state - destroy crtc state
1055  * @crtc: DRM crtc
1056  * @state: state object to destroy
1057  *
1058  * Destroys the crtc state (both common and vmw-specific) for the
1059  * specified plane.
1060  */
1061 void
1062 vmw_du_crtc_destroy_state(struct drm_crtc *crtc,
1063 			  struct drm_crtc_state *state)
1064 {
1065 	drm_atomic_helper_crtc_destroy_state(crtc, state);
1066 }
1067 
1068 
1069 /**
1070  * vmw_du_plane_duplicate_state - duplicate plane state
1071  * @plane: drm plane
1072  *
1073  * Allocates and returns a copy of the plane state (both common and
1074  * vmw-specific) for the specified plane.
1075  *
1076  * Returns: The newly allocated plane state, or NULL on failure.
1077  */
1078 struct drm_plane_state *
1079 vmw_du_plane_duplicate_state(struct drm_plane *plane)
1080 {
1081 	struct drm_plane_state *state;
1082 	struct vmw_plane_state *vps;
1083 
1084 	vps = kmemdup(plane->state, sizeof(*vps), GFP_KERNEL);
1085 
1086 	if (!vps)
1087 		return NULL;
1088 
1089 	vps->pinned = 0;
1090 	vps->cpp = 0;
1091 
1092 	memset(&vps->cursor, 0, sizeof(vps->cursor));
1093 
1094 	/* Each ref counted resource needs to be acquired again */
1095 	if (vps->surf)
1096 		(void) vmw_surface_reference(vps->surf);
1097 
1098 	if (vps->bo)
1099 		(void) vmw_bo_reference(vps->bo);
1100 
1101 	state = &vps->base;
1102 
1103 	__drm_atomic_helper_plane_duplicate_state(plane, state);
1104 
1105 	return state;
1106 }
1107 
1108 
1109 /**
1110  * vmw_du_plane_reset - creates a blank vmw plane state
1111  * @plane: drm plane
1112  *
1113  * Resets the atomic state for @plane by freeing the state pointer (which might
1114  * be NULL, e.g. at driver load time) and allocating a new empty state object.
1115  */
1116 void vmw_du_plane_reset(struct drm_plane *plane)
1117 {
1118 	struct vmw_plane_state *vps;
1119 
1120 	if (plane->state)
1121 		vmw_du_plane_destroy_state(plane, plane->state);
1122 
1123 	vps = kzalloc(sizeof(*vps), GFP_KERNEL);
1124 
1125 	if (!vps) {
1126 		DRM_ERROR("Cannot allocate vmw_plane_state\n");
1127 		return;
1128 	}
1129 
1130 	__drm_atomic_helper_plane_reset(plane, &vps->base);
1131 }
1132 
1133 
1134 /**
1135  * vmw_du_plane_destroy_state - destroy plane state
1136  * @plane: DRM plane
1137  * @state: state object to destroy
1138  *
1139  * Destroys the plane state (both common and vmw-specific) for the
1140  * specified plane.
1141  */
1142 void
1143 vmw_du_plane_destroy_state(struct drm_plane *plane,
1144 			   struct drm_plane_state *state)
1145 {
1146 	struct vmw_plane_state *vps = vmw_plane_state_to_vps(state);
1147 
1148 	/* Should have been freed by cleanup_fb */
1149 	if (vps->surf)
1150 		vmw_surface_unreference(&vps->surf);
1151 
1152 	if (vps->bo)
1153 		vmw_bo_unreference(&vps->bo);
1154 
1155 	drm_atomic_helper_plane_destroy_state(plane, state);
1156 }
1157 
1158 
1159 /**
1160  * vmw_du_connector_duplicate_state - duplicate connector state
1161  * @connector: DRM connector
1162  *
1163  * Allocates and returns a copy of the connector state (both common and
1164  * vmw-specific) for the specified connector.
1165  *
1166  * Returns: The newly allocated connector state, or NULL on failure.
1167  */
1168 struct drm_connector_state *
1169 vmw_du_connector_duplicate_state(struct drm_connector *connector)
1170 {
1171 	struct drm_connector_state *state;
1172 	struct vmw_connector_state *vcs;
1173 
1174 	if (WARN_ON(!connector->state))
1175 		return NULL;
1176 
1177 	vcs = kmemdup(connector->state, sizeof(*vcs), GFP_KERNEL);
1178 
1179 	if (!vcs)
1180 		return NULL;
1181 
1182 	state = &vcs->base;
1183 
1184 	__drm_atomic_helper_connector_duplicate_state(connector, state);
1185 
1186 	return state;
1187 }
1188 
1189 
1190 /**
1191  * vmw_du_connector_reset - creates a blank vmw connector state
1192  * @connector: DRM connector
1193  *
1194  * Resets the atomic state for @connector by freeing the state pointer (which
1195  * might be NULL, e.g. at driver load time) and allocating a new empty state
1196  * object.
1197  */
1198 void vmw_du_connector_reset(struct drm_connector *connector)
1199 {
1200 	struct vmw_connector_state *vcs;
1201 
1202 
1203 	if (connector->state) {
1204 		__drm_atomic_helper_connector_destroy_state(connector->state);
1205 
1206 		kfree(vmw_connector_state_to_vcs(connector->state));
1207 	}
1208 
1209 	vcs = kzalloc(sizeof(*vcs), GFP_KERNEL);
1210 
1211 	if (!vcs) {
1212 		DRM_ERROR("Cannot allocate vmw_connector_state\n");
1213 		return;
1214 	}
1215 
1216 	__drm_atomic_helper_connector_reset(connector, &vcs->base);
1217 }
1218 
1219 
1220 /**
1221  * vmw_du_connector_destroy_state - destroy connector state
1222  * @connector: DRM connector
1223  * @state: state object to destroy
1224  *
1225  * Destroys the connector state (both common and vmw-specific) for the
1226  * specified plane.
1227  */
1228 void
1229 vmw_du_connector_destroy_state(struct drm_connector *connector,
1230 			  struct drm_connector_state *state)
1231 {
1232 	drm_atomic_helper_connector_destroy_state(connector, state);
1233 }
1234 /*
1235  * Generic framebuffer code
1236  */
1237 
1238 /*
1239  * Surface framebuffer code
1240  */
1241 
1242 static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
1243 {
1244 	struct vmw_framebuffer_surface *vfbs =
1245 		vmw_framebuffer_to_vfbs(framebuffer);
1246 
1247 	drm_framebuffer_cleanup(framebuffer);
1248 	vmw_surface_unreference(&vfbs->surface);
1249 
1250 	kfree(vfbs);
1251 }
1252 
1253 /**
1254  * vmw_kms_readback - Perform a readback from the screen system to
1255  * a buffer-object backed framebuffer.
1256  *
1257  * @dev_priv: Pointer to the device private structure.
1258  * @file_priv: Pointer to a struct drm_file identifying the caller.
1259  * Must be set to NULL if @user_fence_rep is NULL.
1260  * @vfb: Pointer to the buffer-object backed framebuffer.
1261  * @user_fence_rep: User-space provided structure for fence information.
1262  * Must be set to non-NULL if @file_priv is non-NULL.
1263  * @vclips: Array of clip rects.
1264  * @num_clips: Number of clip rects in @vclips.
1265  *
1266  * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
1267  * interrupted.
1268  */
1269 int vmw_kms_readback(struct vmw_private *dev_priv,
1270 		     struct drm_file *file_priv,
1271 		     struct vmw_framebuffer *vfb,
1272 		     struct drm_vmw_fence_rep __user *user_fence_rep,
1273 		     struct drm_vmw_rect *vclips,
1274 		     uint32_t num_clips)
1275 {
1276 	switch (dev_priv->active_display_unit) {
1277 	case vmw_du_screen_object:
1278 		return vmw_kms_sou_readback(dev_priv, file_priv, vfb,
1279 					    user_fence_rep, vclips, num_clips,
1280 					    NULL);
1281 	case vmw_du_screen_target:
1282 		return vmw_kms_stdu_dma(dev_priv, file_priv, vfb,
1283 					user_fence_rep, NULL, vclips, num_clips,
1284 					1, false, true, NULL);
1285 	default:
1286 		WARN_ONCE(true,
1287 			  "Readback called with invalid display system.\n");
1288 }
1289 
1290 	return -ENOSYS;
1291 }
1292 
1293 
1294 static const struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
1295 	.destroy = vmw_framebuffer_surface_destroy,
1296 	.dirty = drm_atomic_helper_dirtyfb,
1297 };
1298 
1299 static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
1300 					   struct vmw_surface *surface,
1301 					   struct vmw_framebuffer **out,
1302 					   const struct drm_mode_fb_cmd2
1303 					   *mode_cmd,
1304 					   bool is_bo_proxy)
1305 
1306 {
1307 	struct drm_device *dev = &dev_priv->drm;
1308 	struct vmw_framebuffer_surface *vfbs;
1309 	enum SVGA3dSurfaceFormat format;
1310 	int ret;
1311 
1312 	/* 3D is only supported on HWv8 and newer hosts */
1313 	if (dev_priv->active_display_unit == vmw_du_legacy)
1314 		return -ENOSYS;
1315 
1316 	/*
1317 	 * Sanity checks.
1318 	 */
1319 
1320 	if (!drm_any_plane_has_format(&dev_priv->drm,
1321 				      mode_cmd->pixel_format,
1322 				      mode_cmd->modifier[0])) {
1323 		drm_dbg(&dev_priv->drm,
1324 			"unsupported pixel format %p4cc / modifier 0x%llx\n",
1325 			&mode_cmd->pixel_format, mode_cmd->modifier[0]);
1326 		return -EINVAL;
1327 	}
1328 
1329 	/* Surface must be marked as a scanout. */
1330 	if (unlikely(!surface->metadata.scanout))
1331 		return -EINVAL;
1332 
1333 	if (unlikely(surface->metadata.mip_levels[0] != 1 ||
1334 		     surface->metadata.num_sizes != 1 ||
1335 		     surface->metadata.base_size.width < mode_cmd->width ||
1336 		     surface->metadata.base_size.height < mode_cmd->height ||
1337 		     surface->metadata.base_size.depth != 1)) {
1338 		DRM_ERROR("Incompatible surface dimensions "
1339 			  "for requested mode.\n");
1340 		return -EINVAL;
1341 	}
1342 
1343 	switch (mode_cmd->pixel_format) {
1344 	case DRM_FORMAT_ARGB8888:
1345 		format = SVGA3D_A8R8G8B8;
1346 		break;
1347 	case DRM_FORMAT_XRGB8888:
1348 		format = SVGA3D_X8R8G8B8;
1349 		break;
1350 	case DRM_FORMAT_RGB565:
1351 		format = SVGA3D_R5G6B5;
1352 		break;
1353 	case DRM_FORMAT_XRGB1555:
1354 		format = SVGA3D_A1R5G5B5;
1355 		break;
1356 	default:
1357 		DRM_ERROR("Invalid pixel format: %p4cc\n",
1358 			  &mode_cmd->pixel_format);
1359 		return -EINVAL;
1360 	}
1361 
1362 	/*
1363 	 * For DX, surface format validation is done when surface->scanout
1364 	 * is set.
1365 	 */
1366 	if (!has_sm4_context(dev_priv) && format != surface->metadata.format) {
1367 		DRM_ERROR("Invalid surface format for requested mode.\n");
1368 		return -EINVAL;
1369 	}
1370 
1371 	vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL);
1372 	if (!vfbs) {
1373 		ret = -ENOMEM;
1374 		goto out_err1;
1375 	}
1376 
1377 	drm_helper_mode_fill_fb_struct(dev, &vfbs->base.base, mode_cmd);
1378 	vfbs->surface = vmw_surface_reference(surface);
1379 	vfbs->base.user_handle = mode_cmd->handles[0];
1380 	vfbs->is_bo_proxy = is_bo_proxy;
1381 
1382 	*out = &vfbs->base;
1383 
1384 	ret = drm_framebuffer_init(dev, &vfbs->base.base,
1385 				   &vmw_framebuffer_surface_funcs);
1386 	if (ret)
1387 		goto out_err2;
1388 
1389 	return 0;
1390 
1391 out_err2:
1392 	vmw_surface_unreference(&surface);
1393 	kfree(vfbs);
1394 out_err1:
1395 	return ret;
1396 }
1397 
1398 /*
1399  * Buffer-object framebuffer code
1400  */
1401 
1402 static int vmw_framebuffer_bo_create_handle(struct drm_framebuffer *fb,
1403 					    struct drm_file *file_priv,
1404 					    unsigned int *handle)
1405 {
1406 	struct vmw_framebuffer_bo *vfbd =
1407 			vmw_framebuffer_to_vfbd(fb);
1408 
1409 	return drm_gem_handle_create(file_priv, &vfbd->buffer->base.base, handle);
1410 }
1411 
1412 static void vmw_framebuffer_bo_destroy(struct drm_framebuffer *framebuffer)
1413 {
1414 	struct vmw_framebuffer_bo *vfbd =
1415 		vmw_framebuffer_to_vfbd(framebuffer);
1416 
1417 	drm_framebuffer_cleanup(framebuffer);
1418 	vmw_bo_unreference(&vfbd->buffer);
1419 
1420 	kfree(vfbd);
1421 }
1422 
1423 static int vmw_framebuffer_bo_dirty(struct drm_framebuffer *framebuffer,
1424 				    struct drm_file *file_priv,
1425 				    unsigned int flags, unsigned int color,
1426 				    struct drm_clip_rect *clips,
1427 				    unsigned int num_clips)
1428 {
1429 	struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
1430 	struct vmw_framebuffer_bo *vfbd =
1431 		vmw_framebuffer_to_vfbd(framebuffer);
1432 	struct drm_clip_rect norect;
1433 	int ret, increment = 1;
1434 
1435 	drm_modeset_lock_all(&dev_priv->drm);
1436 
1437 	if (!num_clips) {
1438 		num_clips = 1;
1439 		clips = &norect;
1440 		norect.x1 = norect.y1 = 0;
1441 		norect.x2 = framebuffer->width;
1442 		norect.y2 = framebuffer->height;
1443 	} else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
1444 		num_clips /= 2;
1445 		increment = 2;
1446 	}
1447 
1448 	switch (dev_priv->active_display_unit) {
1449 	case vmw_du_legacy:
1450 		ret = vmw_kms_ldu_do_bo_dirty(dev_priv, &vfbd->base, 0, 0,
1451 					      clips, num_clips, increment);
1452 		break;
1453 	default:
1454 		ret = -EINVAL;
1455 		WARN_ONCE(true, "Dirty called with invalid display system.\n");
1456 		break;
1457 	}
1458 
1459 	vmw_cmd_flush(dev_priv, false);
1460 
1461 	drm_modeset_unlock_all(&dev_priv->drm);
1462 
1463 	return ret;
1464 }
1465 
1466 static int vmw_framebuffer_bo_dirty_ext(struct drm_framebuffer *framebuffer,
1467 					struct drm_file *file_priv,
1468 					unsigned int flags, unsigned int color,
1469 					struct drm_clip_rect *clips,
1470 					unsigned int num_clips)
1471 {
1472 	struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
1473 
1474 	if (dev_priv->active_display_unit == vmw_du_legacy &&
1475 	    vmw_cmd_supported(dev_priv))
1476 		return vmw_framebuffer_bo_dirty(framebuffer, file_priv, flags,
1477 						color, clips, num_clips);
1478 
1479 	return drm_atomic_helper_dirtyfb(framebuffer, file_priv, flags, color,
1480 					 clips, num_clips);
1481 }
1482 
1483 static const struct drm_framebuffer_funcs vmw_framebuffer_bo_funcs = {
1484 	.create_handle = vmw_framebuffer_bo_create_handle,
1485 	.destroy = vmw_framebuffer_bo_destroy,
1486 	.dirty = vmw_framebuffer_bo_dirty_ext,
1487 };
1488 
1489 /*
1490  * Pin the bofer in a location suitable for access by the
1491  * display system.
1492  */
1493 static int vmw_framebuffer_pin(struct vmw_framebuffer *vfb)
1494 {
1495 	struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
1496 	struct vmw_buffer_object *buf;
1497 	struct ttm_placement *placement;
1498 	int ret;
1499 
1500 	buf = vfb->bo ?  vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
1501 		vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup;
1502 
1503 	if (!buf)
1504 		return 0;
1505 
1506 	switch (dev_priv->active_display_unit) {
1507 	case vmw_du_legacy:
1508 		vmw_overlay_pause_all(dev_priv);
1509 		ret = vmw_bo_pin_in_start_of_vram(dev_priv, buf, false);
1510 		vmw_overlay_resume_all(dev_priv);
1511 		break;
1512 	case vmw_du_screen_object:
1513 	case vmw_du_screen_target:
1514 		if (vfb->bo) {
1515 			if (dev_priv->capabilities & SVGA_CAP_3D) {
1516 				/*
1517 				 * Use surface DMA to get content to
1518 				 * sreen target surface.
1519 				 */
1520 				placement = &vmw_vram_gmr_placement;
1521 			} else {
1522 				/* Use CPU blit. */
1523 				placement = &vmw_sys_placement;
1524 			}
1525 		} else {
1526 			/* Use surface / image update */
1527 			placement = &vmw_mob_placement;
1528 		}
1529 
1530 		return vmw_bo_pin_in_placement(dev_priv, buf, placement, false);
1531 	default:
1532 		return -EINVAL;
1533 	}
1534 
1535 	return ret;
1536 }
1537 
1538 static int vmw_framebuffer_unpin(struct vmw_framebuffer *vfb)
1539 {
1540 	struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
1541 	struct vmw_buffer_object *buf;
1542 
1543 	buf = vfb->bo ?  vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
1544 		vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup;
1545 
1546 	if (WARN_ON(!buf))
1547 		return 0;
1548 
1549 	return vmw_bo_unpin(dev_priv, buf, false);
1550 }
1551 
1552 /**
1553  * vmw_create_bo_proxy - create a proxy surface for the buffer object
1554  *
1555  * @dev: DRM device
1556  * @mode_cmd: parameters for the new surface
1557  * @bo_mob: MOB backing the buffer object
1558  * @srf_out: newly created surface
1559  *
1560  * When the content FB is a buffer object, we create a surface as a proxy to the
1561  * same buffer.  This way we can do a surface copy rather than a surface DMA.
1562  * This is a more efficient approach
1563  *
1564  * RETURNS:
1565  * 0 on success, error code otherwise
1566  */
1567 static int vmw_create_bo_proxy(struct drm_device *dev,
1568 			       const struct drm_mode_fb_cmd2 *mode_cmd,
1569 			       struct vmw_buffer_object *bo_mob,
1570 			       struct vmw_surface **srf_out)
1571 {
1572 	struct vmw_surface_metadata metadata = {0};
1573 	uint32_t format;
1574 	struct vmw_resource *res;
1575 	unsigned int bytes_pp;
1576 	int ret;
1577 
1578 	switch (mode_cmd->pixel_format) {
1579 	case DRM_FORMAT_ARGB8888:
1580 	case DRM_FORMAT_XRGB8888:
1581 		format = SVGA3D_X8R8G8B8;
1582 		bytes_pp = 4;
1583 		break;
1584 
1585 	case DRM_FORMAT_RGB565:
1586 	case DRM_FORMAT_XRGB1555:
1587 		format = SVGA3D_R5G6B5;
1588 		bytes_pp = 2;
1589 		break;
1590 
1591 	case 8:
1592 		format = SVGA3D_P8;
1593 		bytes_pp = 1;
1594 		break;
1595 
1596 	default:
1597 		DRM_ERROR("Invalid framebuffer format %p4cc\n",
1598 			  &mode_cmd->pixel_format);
1599 		return -EINVAL;
1600 	}
1601 
1602 	metadata.format = format;
1603 	metadata.mip_levels[0] = 1;
1604 	metadata.num_sizes = 1;
1605 	metadata.base_size.width = mode_cmd->pitches[0] / bytes_pp;
1606 	metadata.base_size.height =  mode_cmd->height;
1607 	metadata.base_size.depth = 1;
1608 	metadata.scanout = true;
1609 
1610 	ret = vmw_gb_surface_define(vmw_priv(dev), &metadata, srf_out);
1611 	if (ret) {
1612 		DRM_ERROR("Failed to allocate proxy content buffer\n");
1613 		return ret;
1614 	}
1615 
1616 	res = &(*srf_out)->res;
1617 
1618 	/* Reserve and switch the backing mob. */
1619 	mutex_lock(&res->dev_priv->cmdbuf_mutex);
1620 	(void) vmw_resource_reserve(res, false, true);
1621 	vmw_bo_unreference(&res->backup);
1622 	res->backup = vmw_bo_reference(bo_mob);
1623 	res->backup_offset = 0;
1624 	vmw_resource_unreserve(res, false, false, false, NULL, 0);
1625 	mutex_unlock(&res->dev_priv->cmdbuf_mutex);
1626 
1627 	return 0;
1628 }
1629 
1630 
1631 
1632 static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv,
1633 				      struct vmw_buffer_object *bo,
1634 				      struct vmw_framebuffer **out,
1635 				      const struct drm_mode_fb_cmd2
1636 				      *mode_cmd)
1637 
1638 {
1639 	struct drm_device *dev = &dev_priv->drm;
1640 	struct vmw_framebuffer_bo *vfbd;
1641 	unsigned int requested_size;
1642 	int ret;
1643 
1644 	requested_size = mode_cmd->height * mode_cmd->pitches[0];
1645 	if (unlikely(requested_size > bo->base.base.size)) {
1646 		DRM_ERROR("Screen buffer object size is too small "
1647 			  "for requested mode.\n");
1648 		return -EINVAL;
1649 	}
1650 
1651 	if (!drm_any_plane_has_format(&dev_priv->drm,
1652 				      mode_cmd->pixel_format,
1653 				      mode_cmd->modifier[0])) {
1654 		drm_dbg(&dev_priv->drm,
1655 			"unsupported pixel format %p4cc / modifier 0x%llx\n",
1656 			&mode_cmd->pixel_format, mode_cmd->modifier[0]);
1657 		return -EINVAL;
1658 	}
1659 
1660 	vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL);
1661 	if (!vfbd) {
1662 		ret = -ENOMEM;
1663 		goto out_err1;
1664 	}
1665 
1666 	vfbd->base.base.obj[0] = &bo->base.base;
1667 	drm_helper_mode_fill_fb_struct(dev, &vfbd->base.base, mode_cmd);
1668 	vfbd->base.bo = true;
1669 	vfbd->buffer = vmw_bo_reference(bo);
1670 	vfbd->base.user_handle = mode_cmd->handles[0];
1671 	*out = &vfbd->base;
1672 
1673 	ret = drm_framebuffer_init(dev, &vfbd->base.base,
1674 				   &vmw_framebuffer_bo_funcs);
1675 	if (ret)
1676 		goto out_err2;
1677 
1678 	return 0;
1679 
1680 out_err2:
1681 	vmw_bo_unreference(&bo);
1682 	kfree(vfbd);
1683 out_err1:
1684 	return ret;
1685 }
1686 
1687 
1688 /**
1689  * vmw_kms_srf_ok - check if a surface can be created
1690  *
1691  * @dev_priv: Pointer to device private struct.
1692  * @width: requested width
1693  * @height: requested height
1694  *
1695  * Surfaces need to be less than texture size
1696  */
1697 static bool
1698 vmw_kms_srf_ok(struct vmw_private *dev_priv, uint32_t width, uint32_t height)
1699 {
1700 	if (width  > dev_priv->texture_max_width ||
1701 	    height > dev_priv->texture_max_height)
1702 		return false;
1703 
1704 	return true;
1705 }
1706 
1707 /**
1708  * vmw_kms_new_framebuffer - Create a new framebuffer.
1709  *
1710  * @dev_priv: Pointer to device private struct.
1711  * @bo: Pointer to buffer object to wrap the kms framebuffer around.
1712  * Either @bo or @surface must be NULL.
1713  * @surface: Pointer to a surface to wrap the kms framebuffer around.
1714  * Either @bo or @surface must be NULL.
1715  * @only_2d: No presents will occur to this buffer object based framebuffer.
1716  * This helps the code to do some important optimizations.
1717  * @mode_cmd: Frame-buffer metadata.
1718  */
1719 struct vmw_framebuffer *
1720 vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
1721 			struct vmw_buffer_object *bo,
1722 			struct vmw_surface *surface,
1723 			bool only_2d,
1724 			const struct drm_mode_fb_cmd2 *mode_cmd)
1725 {
1726 	struct vmw_framebuffer *vfb = NULL;
1727 	bool is_bo_proxy = false;
1728 	int ret;
1729 
1730 	/*
1731 	 * We cannot use the SurfaceDMA command in an non-accelerated VM,
1732 	 * therefore, wrap the buffer object in a surface so we can use the
1733 	 * SurfaceCopy command.
1734 	 */
1735 	if (vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height)  &&
1736 	    bo && only_2d &&
1737 	    mode_cmd->width > 64 &&  /* Don't create a proxy for cursor */
1738 	    dev_priv->active_display_unit == vmw_du_screen_target) {
1739 		ret = vmw_create_bo_proxy(&dev_priv->drm, mode_cmd,
1740 					  bo, &surface);
1741 		if (ret)
1742 			return ERR_PTR(ret);
1743 
1744 		is_bo_proxy = true;
1745 	}
1746 
1747 	/* Create the new framebuffer depending one what we have */
1748 	if (surface) {
1749 		ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb,
1750 						      mode_cmd,
1751 						      is_bo_proxy);
1752 		/*
1753 		 * vmw_create_bo_proxy() adds a reference that is no longer
1754 		 * needed
1755 		 */
1756 		if (is_bo_proxy)
1757 			vmw_surface_unreference(&surface);
1758 	} else if (bo) {
1759 		ret = vmw_kms_new_framebuffer_bo(dev_priv, bo, &vfb,
1760 						 mode_cmd);
1761 	} else {
1762 		BUG();
1763 	}
1764 
1765 	if (ret)
1766 		return ERR_PTR(ret);
1767 
1768 	vfb->pin = vmw_framebuffer_pin;
1769 	vfb->unpin = vmw_framebuffer_unpin;
1770 
1771 	return vfb;
1772 }
1773 
1774 /*
1775  * Generic Kernel modesetting functions
1776  */
1777 
1778 static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
1779 						 struct drm_file *file_priv,
1780 						 const struct drm_mode_fb_cmd2 *mode_cmd)
1781 {
1782 	struct vmw_private *dev_priv = vmw_priv(dev);
1783 	struct vmw_framebuffer *vfb = NULL;
1784 	struct vmw_surface *surface = NULL;
1785 	struct vmw_buffer_object *bo = NULL;
1786 	int ret;
1787 
1788 	/* returns either a bo or surface */
1789 	ret = vmw_user_lookup_handle(dev_priv, file_priv,
1790 				     mode_cmd->handles[0],
1791 				     &surface, &bo);
1792 	if (ret) {
1793 		DRM_ERROR("Invalid buffer object handle %u (0x%x).\n",
1794 			  mode_cmd->handles[0], mode_cmd->handles[0]);
1795 		goto err_out;
1796 	}
1797 
1798 
1799 	if (!bo &&
1800 	    !vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height)) {
1801 		DRM_ERROR("Surface size cannot exceed %dx%d\n",
1802 			dev_priv->texture_max_width,
1803 			dev_priv->texture_max_height);
1804 		goto err_out;
1805 	}
1806 
1807 
1808 	vfb = vmw_kms_new_framebuffer(dev_priv, bo, surface,
1809 				      !(dev_priv->capabilities & SVGA_CAP_3D),
1810 				      mode_cmd);
1811 	if (IS_ERR(vfb)) {
1812 		ret = PTR_ERR(vfb);
1813 		goto err_out;
1814 	}
1815 
1816 err_out:
1817 	/* vmw_user_lookup_handle takes one ref so does new_fb */
1818 	if (bo)
1819 		vmw_bo_unreference(&bo);
1820 	if (surface)
1821 		vmw_surface_unreference(&surface);
1822 
1823 	if (ret) {
1824 		DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
1825 		return ERR_PTR(ret);
1826 	}
1827 
1828 	return &vfb->base;
1829 }
1830 
1831 /**
1832  * vmw_kms_check_display_memory - Validates display memory required for a
1833  * topology
1834  * @dev: DRM device
1835  * @num_rects: number of drm_rect in rects
1836  * @rects: array of drm_rect representing the topology to validate indexed by
1837  * crtc index.
1838  *
1839  * Returns:
1840  * 0 on success otherwise negative error code
1841  */
1842 static int vmw_kms_check_display_memory(struct drm_device *dev,
1843 					uint32_t num_rects,
1844 					struct drm_rect *rects)
1845 {
1846 	struct vmw_private *dev_priv = vmw_priv(dev);
1847 	struct drm_rect bounding_box = {0};
1848 	u64 total_pixels = 0, pixel_mem, bb_mem;
1849 	int i;
1850 
1851 	for (i = 0; i < num_rects; i++) {
1852 		/*
1853 		 * For STDU only individual screen (screen target) is limited by
1854 		 * SCREENTARGET_MAX_WIDTH/HEIGHT registers.
1855 		 */
1856 		if (dev_priv->active_display_unit == vmw_du_screen_target &&
1857 		    (drm_rect_width(&rects[i]) > dev_priv->stdu_max_width ||
1858 		     drm_rect_height(&rects[i]) > dev_priv->stdu_max_height)) {
1859 			VMW_DEBUG_KMS("Screen size not supported.\n");
1860 			return -EINVAL;
1861 		}
1862 
1863 		/* Bounding box upper left is at (0,0). */
1864 		if (rects[i].x2 > bounding_box.x2)
1865 			bounding_box.x2 = rects[i].x2;
1866 
1867 		if (rects[i].y2 > bounding_box.y2)
1868 			bounding_box.y2 = rects[i].y2;
1869 
1870 		total_pixels += (u64) drm_rect_width(&rects[i]) *
1871 			(u64) drm_rect_height(&rects[i]);
1872 	}
1873 
1874 	/* Virtual svga device primary limits are always in 32-bpp. */
1875 	pixel_mem = total_pixels * 4;
1876 
1877 	/*
1878 	 * For HV10 and below prim_bb_mem is vram size. When
1879 	 * SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM is not present vram size is
1880 	 * limit on primary bounding box
1881 	 */
1882 	if (pixel_mem > dev_priv->max_primary_mem) {
1883 		VMW_DEBUG_KMS("Combined output size too large.\n");
1884 		return -EINVAL;
1885 	}
1886 
1887 	/* SVGA_CAP_NO_BB_RESTRICTION is available for STDU only. */
1888 	if (dev_priv->active_display_unit != vmw_du_screen_target ||
1889 	    !(dev_priv->capabilities & SVGA_CAP_NO_BB_RESTRICTION)) {
1890 		bb_mem = (u64) bounding_box.x2 * bounding_box.y2 * 4;
1891 
1892 		if (bb_mem > dev_priv->max_primary_mem) {
1893 			VMW_DEBUG_KMS("Topology is beyond supported limits.\n");
1894 			return -EINVAL;
1895 		}
1896 	}
1897 
1898 	return 0;
1899 }
1900 
1901 /**
1902  * vmw_crtc_state_and_lock - Return new or current crtc state with locked
1903  * crtc mutex
1904  * @state: The atomic state pointer containing the new atomic state
1905  * @crtc: The crtc
1906  *
1907  * This function returns the new crtc state if it's part of the state update.
1908  * Otherwise returns the current crtc state. It also makes sure that the
1909  * crtc mutex is locked.
1910  *
1911  * Returns: A valid crtc state pointer or NULL. It may also return a
1912  * pointer error, in particular -EDEADLK if locking needs to be rerun.
1913  */
1914 static struct drm_crtc_state *
1915 vmw_crtc_state_and_lock(struct drm_atomic_state *state, struct drm_crtc *crtc)
1916 {
1917 	struct drm_crtc_state *crtc_state;
1918 
1919 	crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
1920 	if (crtc_state) {
1921 		lockdep_assert_held(&crtc->mutex.mutex.base);
1922 	} else {
1923 		int ret = drm_modeset_lock(&crtc->mutex, state->acquire_ctx);
1924 
1925 		if (ret != 0 && ret != -EALREADY)
1926 			return ERR_PTR(ret);
1927 
1928 		crtc_state = crtc->state;
1929 	}
1930 
1931 	return crtc_state;
1932 }
1933 
1934 /**
1935  * vmw_kms_check_implicit - Verify that all implicit display units scan out
1936  * from the same fb after the new state is committed.
1937  * @dev: The drm_device.
1938  * @state: The new state to be checked.
1939  *
1940  * Returns:
1941  *   Zero on success,
1942  *   -EINVAL on invalid state,
1943  *   -EDEADLK if modeset locking needs to be rerun.
1944  */
1945 static int vmw_kms_check_implicit(struct drm_device *dev,
1946 				  struct drm_atomic_state *state)
1947 {
1948 	struct drm_framebuffer *implicit_fb = NULL;
1949 	struct drm_crtc *crtc;
1950 	struct drm_crtc_state *crtc_state;
1951 	struct drm_plane_state *plane_state;
1952 
1953 	drm_for_each_crtc(crtc, dev) {
1954 		struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
1955 
1956 		if (!du->is_implicit)
1957 			continue;
1958 
1959 		crtc_state = vmw_crtc_state_and_lock(state, crtc);
1960 		if (IS_ERR(crtc_state))
1961 			return PTR_ERR(crtc_state);
1962 
1963 		if (!crtc_state || !crtc_state->enable)
1964 			continue;
1965 
1966 		/*
1967 		 * Can't move primary planes across crtcs, so this is OK.
1968 		 * It also means we don't need to take the plane mutex.
1969 		 */
1970 		plane_state = du->primary.state;
1971 		if (plane_state->crtc != crtc)
1972 			continue;
1973 
1974 		if (!implicit_fb)
1975 			implicit_fb = plane_state->fb;
1976 		else if (implicit_fb != plane_state->fb)
1977 			return -EINVAL;
1978 	}
1979 
1980 	return 0;
1981 }
1982 
1983 /**
1984  * vmw_kms_check_topology - Validates topology in drm_atomic_state
1985  * @dev: DRM device
1986  * @state: the driver state object
1987  *
1988  * Returns:
1989  * 0 on success otherwise negative error code
1990  */
1991 static int vmw_kms_check_topology(struct drm_device *dev,
1992 				  struct drm_atomic_state *state)
1993 {
1994 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
1995 	struct drm_rect *rects;
1996 	struct drm_crtc *crtc;
1997 	uint32_t i;
1998 	int ret = 0;
1999 
2000 	rects = kcalloc(dev->mode_config.num_crtc, sizeof(struct drm_rect),
2001 			GFP_KERNEL);
2002 	if (!rects)
2003 		return -ENOMEM;
2004 
2005 	drm_for_each_crtc(crtc, dev) {
2006 		struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
2007 		struct drm_crtc_state *crtc_state;
2008 
2009 		i = drm_crtc_index(crtc);
2010 
2011 		crtc_state = vmw_crtc_state_and_lock(state, crtc);
2012 		if (IS_ERR(crtc_state)) {
2013 			ret = PTR_ERR(crtc_state);
2014 			goto clean;
2015 		}
2016 
2017 		if (!crtc_state)
2018 			continue;
2019 
2020 		if (crtc_state->enable) {
2021 			rects[i].x1 = du->gui_x;
2022 			rects[i].y1 = du->gui_y;
2023 			rects[i].x2 = du->gui_x + crtc_state->mode.hdisplay;
2024 			rects[i].y2 = du->gui_y + crtc_state->mode.vdisplay;
2025 		} else {
2026 			rects[i].x1 = 0;
2027 			rects[i].y1 = 0;
2028 			rects[i].x2 = 0;
2029 			rects[i].y2 = 0;
2030 		}
2031 	}
2032 
2033 	/* Determine change to topology due to new atomic state */
2034 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
2035 				      new_crtc_state, i) {
2036 		struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
2037 		struct drm_connector *connector;
2038 		struct drm_connector_state *conn_state;
2039 		struct vmw_connector_state *vmw_conn_state;
2040 
2041 		if (!du->pref_active && new_crtc_state->enable) {
2042 			VMW_DEBUG_KMS("Enabling a disabled display unit\n");
2043 			ret = -EINVAL;
2044 			goto clean;
2045 		}
2046 
2047 		/*
2048 		 * For vmwgfx each crtc has only one connector attached and it
2049 		 * is not changed so don't really need to check the
2050 		 * crtc->connector_mask and iterate over it.
2051 		 */
2052 		connector = &du->connector;
2053 		conn_state = drm_atomic_get_connector_state(state, connector);
2054 		if (IS_ERR(conn_state)) {
2055 			ret = PTR_ERR(conn_state);
2056 			goto clean;
2057 		}
2058 
2059 		vmw_conn_state = vmw_connector_state_to_vcs(conn_state);
2060 		vmw_conn_state->gui_x = du->gui_x;
2061 		vmw_conn_state->gui_y = du->gui_y;
2062 	}
2063 
2064 	ret = vmw_kms_check_display_memory(dev, dev->mode_config.num_crtc,
2065 					   rects);
2066 
2067 clean:
2068 	kfree(rects);
2069 	return ret;
2070 }
2071 
2072 /**
2073  * vmw_kms_atomic_check_modeset- validate state object for modeset changes
2074  *
2075  * @dev: DRM device
2076  * @state: the driver state object
2077  *
2078  * This is a simple wrapper around drm_atomic_helper_check_modeset() for
2079  * us to assign a value to mode->crtc_clock so that
2080  * drm_calc_timestamping_constants() won't throw an error message
2081  *
2082  * Returns:
2083  * Zero for success or -errno
2084  */
2085 static int
2086 vmw_kms_atomic_check_modeset(struct drm_device *dev,
2087 			     struct drm_atomic_state *state)
2088 {
2089 	struct drm_crtc *crtc;
2090 	struct drm_crtc_state *crtc_state;
2091 	bool need_modeset = false;
2092 	int i, ret;
2093 
2094 	ret = drm_atomic_helper_check(dev, state);
2095 	if (ret)
2096 		return ret;
2097 
2098 	ret = vmw_kms_check_implicit(dev, state);
2099 	if (ret) {
2100 		VMW_DEBUG_KMS("Invalid implicit state\n");
2101 		return ret;
2102 	}
2103 
2104 	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
2105 		if (drm_atomic_crtc_needs_modeset(crtc_state))
2106 			need_modeset = true;
2107 	}
2108 
2109 	if (need_modeset)
2110 		return vmw_kms_check_topology(dev, state);
2111 
2112 	return ret;
2113 }
2114 
2115 static const struct drm_mode_config_funcs vmw_kms_funcs = {
2116 	.fb_create = vmw_kms_fb_create,
2117 	.atomic_check = vmw_kms_atomic_check_modeset,
2118 	.atomic_commit = drm_atomic_helper_commit,
2119 };
2120 
2121 static int vmw_kms_generic_present(struct vmw_private *dev_priv,
2122 				   struct drm_file *file_priv,
2123 				   struct vmw_framebuffer *vfb,
2124 				   struct vmw_surface *surface,
2125 				   uint32_t sid,
2126 				   int32_t destX, int32_t destY,
2127 				   struct drm_vmw_rect *clips,
2128 				   uint32_t num_clips)
2129 {
2130 	return vmw_kms_sou_do_surface_dirty(dev_priv, vfb, NULL, clips,
2131 					    &surface->res, destX, destY,
2132 					    num_clips, 1, NULL, NULL);
2133 }
2134 
2135 
2136 int vmw_kms_present(struct vmw_private *dev_priv,
2137 		    struct drm_file *file_priv,
2138 		    struct vmw_framebuffer *vfb,
2139 		    struct vmw_surface *surface,
2140 		    uint32_t sid,
2141 		    int32_t destX, int32_t destY,
2142 		    struct drm_vmw_rect *clips,
2143 		    uint32_t num_clips)
2144 {
2145 	int ret;
2146 
2147 	switch (dev_priv->active_display_unit) {
2148 	case vmw_du_screen_target:
2149 		ret = vmw_kms_stdu_surface_dirty(dev_priv, vfb, NULL, clips,
2150 						 &surface->res, destX, destY,
2151 						 num_clips, 1, NULL, NULL);
2152 		break;
2153 	case vmw_du_screen_object:
2154 		ret = vmw_kms_generic_present(dev_priv, file_priv, vfb, surface,
2155 					      sid, destX, destY, clips,
2156 					      num_clips);
2157 		break;
2158 	default:
2159 		WARN_ONCE(true,
2160 			  "Present called with invalid display system.\n");
2161 		ret = -ENOSYS;
2162 		break;
2163 	}
2164 	if (ret)
2165 		return ret;
2166 
2167 	vmw_cmd_flush(dev_priv, false);
2168 
2169 	return 0;
2170 }
2171 
2172 static void
2173 vmw_kms_create_hotplug_mode_update_property(struct vmw_private *dev_priv)
2174 {
2175 	if (dev_priv->hotplug_mode_update_property)
2176 		return;
2177 
2178 	dev_priv->hotplug_mode_update_property =
2179 		drm_property_create_range(&dev_priv->drm,
2180 					  DRM_MODE_PROP_IMMUTABLE,
2181 					  "hotplug_mode_update", 0, 1);
2182 }
2183 
2184 int vmw_kms_init(struct vmw_private *dev_priv)
2185 {
2186 	struct drm_device *dev = &dev_priv->drm;
2187 	int ret;
2188 	static const char *display_unit_names[] = {
2189 		"Invalid",
2190 		"Legacy",
2191 		"Screen Object",
2192 		"Screen Target",
2193 		"Invalid (max)"
2194 	};
2195 
2196 	drm_mode_config_init(dev);
2197 	dev->mode_config.funcs = &vmw_kms_funcs;
2198 	dev->mode_config.min_width = 1;
2199 	dev->mode_config.min_height = 1;
2200 	dev->mode_config.max_width = dev_priv->texture_max_width;
2201 	dev->mode_config.max_height = dev_priv->texture_max_height;
2202 	dev->mode_config.preferred_depth = dev_priv->assume_16bpp ? 16 : 32;
2203 	dev->mode_config.prefer_shadow_fbdev = !dev_priv->has_mob;
2204 
2205 	drm_mode_create_suggested_offset_properties(dev);
2206 	vmw_kms_create_hotplug_mode_update_property(dev_priv);
2207 
2208 	ret = vmw_kms_stdu_init_display(dev_priv);
2209 	if (ret) {
2210 		ret = vmw_kms_sou_init_display(dev_priv);
2211 		if (ret) /* Fallback */
2212 			ret = vmw_kms_ldu_init_display(dev_priv);
2213 	}
2214 	BUILD_BUG_ON(ARRAY_SIZE(display_unit_names) != (vmw_du_max + 1));
2215 	drm_info(&dev_priv->drm, "%s display unit initialized\n",
2216 		 display_unit_names[dev_priv->active_display_unit]);
2217 
2218 	return ret;
2219 }
2220 
2221 int vmw_kms_close(struct vmw_private *dev_priv)
2222 {
2223 	int ret = 0;
2224 
2225 	/*
2226 	 * Docs says we should take the lock before calling this function
2227 	 * but since it destroys encoders and our destructor calls
2228 	 * drm_encoder_cleanup which takes the lock we deadlock.
2229 	 */
2230 	drm_mode_config_cleanup(&dev_priv->drm);
2231 	if (dev_priv->active_display_unit == vmw_du_legacy)
2232 		ret = vmw_kms_ldu_close_display(dev_priv);
2233 
2234 	return ret;
2235 }
2236 
2237 int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
2238 				struct drm_file *file_priv)
2239 {
2240 	struct drm_vmw_cursor_bypass_arg *arg = data;
2241 	struct vmw_display_unit *du;
2242 	struct drm_crtc *crtc;
2243 	int ret = 0;
2244 
2245 	mutex_lock(&dev->mode_config.mutex);
2246 	if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) {
2247 
2248 		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2249 			du = vmw_crtc_to_du(crtc);
2250 			du->hotspot_x = arg->xhot;
2251 			du->hotspot_y = arg->yhot;
2252 		}
2253 
2254 		mutex_unlock(&dev->mode_config.mutex);
2255 		return 0;
2256 	}
2257 
2258 	crtc = drm_crtc_find(dev, file_priv, arg->crtc_id);
2259 	if (!crtc) {
2260 		ret = -ENOENT;
2261 		goto out;
2262 	}
2263 
2264 	du = vmw_crtc_to_du(crtc);
2265 
2266 	du->hotspot_x = arg->xhot;
2267 	du->hotspot_y = arg->yhot;
2268 
2269 out:
2270 	mutex_unlock(&dev->mode_config.mutex);
2271 
2272 	return ret;
2273 }
2274 
2275 int vmw_kms_write_svga(struct vmw_private *vmw_priv,
2276 			unsigned width, unsigned height, unsigned pitch,
2277 			unsigned bpp, unsigned depth)
2278 {
2279 	if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
2280 		vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, pitch);
2281 	else if (vmw_fifo_have_pitchlock(vmw_priv))
2282 		vmw_fifo_mem_write(vmw_priv, SVGA_FIFO_PITCHLOCK, pitch);
2283 	vmw_write(vmw_priv, SVGA_REG_WIDTH, width);
2284 	vmw_write(vmw_priv, SVGA_REG_HEIGHT, height);
2285 	if ((vmw_priv->capabilities & SVGA_CAP_8BIT_EMULATION) != 0)
2286 		vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bpp);
2287 
2288 	if (vmw_read(vmw_priv, SVGA_REG_DEPTH) != depth) {
2289 		DRM_ERROR("Invalid depth %u for %u bpp, host expects %u\n",
2290 			  depth, bpp, vmw_read(vmw_priv, SVGA_REG_DEPTH));
2291 		return -EINVAL;
2292 	}
2293 
2294 	return 0;
2295 }
2296 
2297 bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
2298 				uint32_t pitch,
2299 				uint32_t height)
2300 {
2301 	return ((u64) pitch * (u64) height) < (u64)
2302 		((dev_priv->active_display_unit == vmw_du_screen_target) ?
2303 		 dev_priv->max_primary_mem : dev_priv->vram_size);
2304 }
2305 
2306 /**
2307  * vmw_du_update_layout - Update the display unit with topology from resolution
2308  * plugin and generate DRM uevent
2309  * @dev_priv: device private
2310  * @num_rects: number of drm_rect in rects
2311  * @rects: toplogy to update
2312  */
2313 static int vmw_du_update_layout(struct vmw_private *dev_priv,
2314 				unsigned int num_rects, struct drm_rect *rects)
2315 {
2316 	struct drm_device *dev = &dev_priv->drm;
2317 	struct vmw_display_unit *du;
2318 	struct drm_connector *con;
2319 	struct drm_connector_list_iter conn_iter;
2320 	struct drm_modeset_acquire_ctx ctx;
2321 	struct drm_crtc *crtc;
2322 	int ret;
2323 
2324 	/* Currently gui_x/y is protected with the crtc mutex */
2325 	mutex_lock(&dev->mode_config.mutex);
2326 	drm_modeset_acquire_init(&ctx, 0);
2327 retry:
2328 	drm_for_each_crtc(crtc, dev) {
2329 		ret = drm_modeset_lock(&crtc->mutex, &ctx);
2330 		if (ret < 0) {
2331 			if (ret == -EDEADLK) {
2332 				drm_modeset_backoff(&ctx);
2333 				goto retry;
2334 		}
2335 			goto out_fini;
2336 		}
2337 	}
2338 
2339 	drm_connector_list_iter_begin(dev, &conn_iter);
2340 	drm_for_each_connector_iter(con, &conn_iter) {
2341 		du = vmw_connector_to_du(con);
2342 		if (num_rects > du->unit) {
2343 			du->pref_width = drm_rect_width(&rects[du->unit]);
2344 			du->pref_height = drm_rect_height(&rects[du->unit]);
2345 			du->pref_active = true;
2346 			du->gui_x = rects[du->unit].x1;
2347 			du->gui_y = rects[du->unit].y1;
2348 		} else {
2349 			du->pref_width  = VMWGFX_MIN_INITIAL_WIDTH;
2350 			du->pref_height = VMWGFX_MIN_INITIAL_HEIGHT;
2351 			du->pref_active = false;
2352 			du->gui_x = 0;
2353 			du->gui_y = 0;
2354 		}
2355 	}
2356 	drm_connector_list_iter_end(&conn_iter);
2357 
2358 	list_for_each_entry(con, &dev->mode_config.connector_list, head) {
2359 		du = vmw_connector_to_du(con);
2360 		if (num_rects > du->unit) {
2361 			drm_object_property_set_value
2362 			  (&con->base, dev->mode_config.suggested_x_property,
2363 			   du->gui_x);
2364 			drm_object_property_set_value
2365 			  (&con->base, dev->mode_config.suggested_y_property,
2366 			   du->gui_y);
2367 		} else {
2368 			drm_object_property_set_value
2369 			  (&con->base, dev->mode_config.suggested_x_property,
2370 			   0);
2371 			drm_object_property_set_value
2372 			  (&con->base, dev->mode_config.suggested_y_property,
2373 			   0);
2374 		}
2375 		con->status = vmw_du_connector_detect(con, true);
2376 	}
2377 out_fini:
2378 	drm_modeset_drop_locks(&ctx);
2379 	drm_modeset_acquire_fini(&ctx);
2380 	mutex_unlock(&dev->mode_config.mutex);
2381 
2382 	drm_sysfs_hotplug_event(dev);
2383 
2384 	return 0;
2385 }
2386 
2387 int vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
2388 			  u16 *r, u16 *g, u16 *b,
2389 			  uint32_t size,
2390 			  struct drm_modeset_acquire_ctx *ctx)
2391 {
2392 	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
2393 	int i;
2394 
2395 	for (i = 0; i < size; i++) {
2396 		DRM_DEBUG("%d r/g/b = 0x%04x / 0x%04x / 0x%04x\n", i,
2397 			  r[i], g[i], b[i]);
2398 		vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 0, r[i] >> 8);
2399 		vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 1, g[i] >> 8);
2400 		vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 2, b[i] >> 8);
2401 	}
2402 
2403 	return 0;
2404 }
2405 
2406 int vmw_du_connector_dpms(struct drm_connector *connector, int mode)
2407 {
2408 	return 0;
2409 }
2410 
2411 enum drm_connector_status
2412 vmw_du_connector_detect(struct drm_connector *connector, bool force)
2413 {
2414 	uint32_t num_displays;
2415 	struct drm_device *dev = connector->dev;
2416 	struct vmw_private *dev_priv = vmw_priv(dev);
2417 	struct vmw_display_unit *du = vmw_connector_to_du(connector);
2418 
2419 	num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS);
2420 
2421 	return ((vmw_connector_to_du(connector)->unit < num_displays &&
2422 		 du->pref_active) ?
2423 		connector_status_connected : connector_status_disconnected);
2424 }
2425 
2426 static struct drm_display_mode vmw_kms_connector_builtin[] = {
2427 	/* 640x480@60Hz */
2428 	{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
2429 		   752, 800, 0, 480, 489, 492, 525, 0,
2430 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
2431 	/* 800x600@60Hz */
2432 	{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
2433 		   968, 1056, 0, 600, 601, 605, 628, 0,
2434 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2435 	/* 1024x768@60Hz */
2436 	{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
2437 		   1184, 1344, 0, 768, 771, 777, 806, 0,
2438 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
2439 	/* 1152x864@75Hz */
2440 	{ DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
2441 		   1344, 1600, 0, 864, 865, 868, 900, 0,
2442 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2443 	/* 1280x720@60Hz */
2444 	{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74500, 1280, 1344,
2445 		   1472, 1664, 0, 720, 723, 728, 748, 0,
2446 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2447 	/* 1280x768@60Hz */
2448 	{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
2449 		   1472, 1664, 0, 768, 771, 778, 798, 0,
2450 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2451 	/* 1280x800@60Hz */
2452 	{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
2453 		   1480, 1680, 0, 800, 803, 809, 831, 0,
2454 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
2455 	/* 1280x960@60Hz */
2456 	{ DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
2457 		   1488, 1800, 0, 960, 961, 964, 1000, 0,
2458 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2459 	/* 1280x1024@60Hz */
2460 	{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
2461 		   1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
2462 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2463 	/* 1360x768@60Hz */
2464 	{ DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
2465 		   1536, 1792, 0, 768, 771, 777, 795, 0,
2466 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2467 	/* 1440x1050@60Hz */
2468 	{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
2469 		   1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
2470 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2471 	/* 1440x900@60Hz */
2472 	{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
2473 		   1672, 1904, 0, 900, 903, 909, 934, 0,
2474 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2475 	/* 1600x1200@60Hz */
2476 	{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
2477 		   1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
2478 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2479 	/* 1680x1050@60Hz */
2480 	{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
2481 		   1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
2482 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2483 	/* 1792x1344@60Hz */
2484 	{ DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
2485 		   2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
2486 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2487 	/* 1853x1392@60Hz */
2488 	{ DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
2489 		   2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
2490 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2491 	/* 1920x1080@60Hz */
2492 	{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 173000, 1920, 2048,
2493 		   2248, 2576, 0, 1080, 1083, 1088, 1120, 0,
2494 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2495 	/* 1920x1200@60Hz */
2496 	{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
2497 		   2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
2498 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2499 	/* 1920x1440@60Hz */
2500 	{ DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
2501 		   2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
2502 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2503 	/* 2560x1440@60Hz */
2504 	{ DRM_MODE("2560x1440", DRM_MODE_TYPE_DRIVER, 241500, 2560, 2608,
2505 		   2640, 2720, 0, 1440, 1443, 1448, 1481, 0,
2506 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
2507 	/* 2560x1600@60Hz */
2508 	{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
2509 		   3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
2510 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2511 	/* 2880x1800@60Hz */
2512 	{ DRM_MODE("2880x1800", DRM_MODE_TYPE_DRIVER, 337500, 2880, 2928,
2513 		   2960, 3040, 0, 1800, 1803, 1809, 1852, 0,
2514 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
2515 	/* 3840x2160@60Hz */
2516 	{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 533000, 3840, 3888,
2517 		   3920, 4000, 0, 2160, 2163, 2168, 2222, 0,
2518 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
2519 	/* 3840x2400@60Hz */
2520 	{ DRM_MODE("3840x2400", DRM_MODE_TYPE_DRIVER, 592250, 3840, 3888,
2521 		   3920, 4000, 0, 2400, 2403, 2409, 2469, 0,
2522 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
2523 	/* Terminate */
2524 	{ DRM_MODE("", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) },
2525 };
2526 
2527 /**
2528  * vmw_guess_mode_timing - Provide fake timings for a
2529  * 60Hz vrefresh mode.
2530  *
2531  * @mode: Pointer to a struct drm_display_mode with hdisplay and vdisplay
2532  * members filled in.
2533  */
2534 void vmw_guess_mode_timing(struct drm_display_mode *mode)
2535 {
2536 	mode->hsync_start = mode->hdisplay + 50;
2537 	mode->hsync_end = mode->hsync_start + 50;
2538 	mode->htotal = mode->hsync_end + 50;
2539 
2540 	mode->vsync_start = mode->vdisplay + 50;
2541 	mode->vsync_end = mode->vsync_start + 50;
2542 	mode->vtotal = mode->vsync_end + 50;
2543 
2544 	mode->clock = (u32)mode->htotal * (u32)mode->vtotal / 100 * 6;
2545 }
2546 
2547 
2548 int vmw_du_connector_fill_modes(struct drm_connector *connector,
2549 				uint32_t max_width, uint32_t max_height)
2550 {
2551 	struct vmw_display_unit *du = vmw_connector_to_du(connector);
2552 	struct drm_device *dev = connector->dev;
2553 	struct vmw_private *dev_priv = vmw_priv(dev);
2554 	struct drm_display_mode *mode = NULL;
2555 	struct drm_display_mode *bmode;
2556 	struct drm_display_mode prefmode = { DRM_MODE("preferred",
2557 		DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
2558 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2559 		DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
2560 	};
2561 	int i;
2562 	u32 assumed_bpp = 4;
2563 
2564 	if (dev_priv->assume_16bpp)
2565 		assumed_bpp = 2;
2566 
2567 	max_width  = min(max_width,  dev_priv->texture_max_width);
2568 	max_height = min(max_height, dev_priv->texture_max_height);
2569 
2570 	/*
2571 	 * For STDU extra limit for a mode on SVGA_REG_SCREENTARGET_MAX_WIDTH/
2572 	 * HEIGHT registers.
2573 	 */
2574 	if (dev_priv->active_display_unit == vmw_du_screen_target) {
2575 		max_width  = min(max_width,  dev_priv->stdu_max_width);
2576 		max_height = min(max_height, dev_priv->stdu_max_height);
2577 	}
2578 
2579 	/* Add preferred mode */
2580 	mode = drm_mode_duplicate(dev, &prefmode);
2581 	if (!mode)
2582 		return 0;
2583 	mode->hdisplay = du->pref_width;
2584 	mode->vdisplay = du->pref_height;
2585 	vmw_guess_mode_timing(mode);
2586 	drm_mode_set_name(mode);
2587 
2588 	if (vmw_kms_validate_mode_vram(dev_priv,
2589 					mode->hdisplay * assumed_bpp,
2590 					mode->vdisplay)) {
2591 		drm_mode_probed_add(connector, mode);
2592 	} else {
2593 		drm_mode_destroy(dev, mode);
2594 		mode = NULL;
2595 	}
2596 
2597 	if (du->pref_mode) {
2598 		list_del_init(&du->pref_mode->head);
2599 		drm_mode_destroy(dev, du->pref_mode);
2600 	}
2601 
2602 	/* mode might be null here, this is intended */
2603 	du->pref_mode = mode;
2604 
2605 	for (i = 0; vmw_kms_connector_builtin[i].type != 0; i++) {
2606 		bmode = &vmw_kms_connector_builtin[i];
2607 		if (bmode->hdisplay > max_width ||
2608 		    bmode->vdisplay > max_height)
2609 			continue;
2610 
2611 		if (!vmw_kms_validate_mode_vram(dev_priv,
2612 						bmode->hdisplay * assumed_bpp,
2613 						bmode->vdisplay))
2614 			continue;
2615 
2616 		mode = drm_mode_duplicate(dev, bmode);
2617 		if (!mode)
2618 			return 0;
2619 
2620 		drm_mode_probed_add(connector, mode);
2621 	}
2622 
2623 	drm_connector_list_update(connector);
2624 	/* Move the prefered mode first, help apps pick the right mode. */
2625 	drm_mode_sort(&connector->modes);
2626 
2627 	return 1;
2628 }
2629 
2630 /**
2631  * vmw_kms_update_layout_ioctl - Handler for DRM_VMW_UPDATE_LAYOUT ioctl
2632  * @dev: drm device for the ioctl
2633  * @data: data pointer for the ioctl
2634  * @file_priv: drm file for the ioctl call
2635  *
2636  * Update preferred topology of display unit as per ioctl request. The topology
2637  * is expressed as array of drm_vmw_rect.
2638  * e.g.
2639  * [0 0 640 480] [640 0 800 600] [0 480 640 480]
2640  *
2641  * NOTE:
2642  * The x and y offset (upper left) in drm_vmw_rect cannot be less than 0. Beside
2643  * device limit on topology, x + w and y + h (lower right) cannot be greater
2644  * than INT_MAX. So topology beyond these limits will return with error.
2645  *
2646  * Returns:
2647  * Zero on success, negative errno on failure.
2648  */
2649 int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
2650 				struct drm_file *file_priv)
2651 {
2652 	struct vmw_private *dev_priv = vmw_priv(dev);
2653 	struct drm_mode_config *mode_config = &dev->mode_config;
2654 	struct drm_vmw_update_layout_arg *arg =
2655 		(struct drm_vmw_update_layout_arg *)data;
2656 	void __user *user_rects;
2657 	struct drm_vmw_rect *rects;
2658 	struct drm_rect *drm_rects;
2659 	unsigned rects_size;
2660 	int ret, i;
2661 
2662 	if (!arg->num_outputs) {
2663 		struct drm_rect def_rect = {0, 0,
2664 					    VMWGFX_MIN_INITIAL_WIDTH,
2665 					    VMWGFX_MIN_INITIAL_HEIGHT};
2666 		vmw_du_update_layout(dev_priv, 1, &def_rect);
2667 		return 0;
2668 	}
2669 
2670 	rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect);
2671 	rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect),
2672 			GFP_KERNEL);
2673 	if (unlikely(!rects))
2674 		return -ENOMEM;
2675 
2676 	user_rects = (void __user *)(unsigned long)arg->rects;
2677 	ret = copy_from_user(rects, user_rects, rects_size);
2678 	if (unlikely(ret != 0)) {
2679 		DRM_ERROR("Failed to get rects.\n");
2680 		ret = -EFAULT;
2681 		goto out_free;
2682 	}
2683 
2684 	drm_rects = (struct drm_rect *)rects;
2685 
2686 	VMW_DEBUG_KMS("Layout count = %u\n", arg->num_outputs);
2687 	for (i = 0; i < arg->num_outputs; i++) {
2688 		struct drm_vmw_rect curr_rect;
2689 
2690 		/* Verify user-space for overflow as kernel use drm_rect */
2691 		if ((rects[i].x + rects[i].w > INT_MAX) ||
2692 		    (rects[i].y + rects[i].h > INT_MAX)) {
2693 			ret = -ERANGE;
2694 			goto out_free;
2695 		}
2696 
2697 		curr_rect = rects[i];
2698 		drm_rects[i].x1 = curr_rect.x;
2699 		drm_rects[i].y1 = curr_rect.y;
2700 		drm_rects[i].x2 = curr_rect.x + curr_rect.w;
2701 		drm_rects[i].y2 = curr_rect.y + curr_rect.h;
2702 
2703 		VMW_DEBUG_KMS("  x1 = %d y1 = %d x2 = %d y2 = %d\n",
2704 			      drm_rects[i].x1, drm_rects[i].y1,
2705 			      drm_rects[i].x2, drm_rects[i].y2);
2706 
2707 		/*
2708 		 * Currently this check is limiting the topology within
2709 		 * mode_config->max (which actually is max texture size
2710 		 * supported by virtual device). This limit is here to address
2711 		 * window managers that create a big framebuffer for whole
2712 		 * topology.
2713 		 */
2714 		if (drm_rects[i].x1 < 0 ||  drm_rects[i].y1 < 0 ||
2715 		    drm_rects[i].x2 > mode_config->max_width ||
2716 		    drm_rects[i].y2 > mode_config->max_height) {
2717 			VMW_DEBUG_KMS("Invalid layout %d %d %d %d\n",
2718 				      drm_rects[i].x1, drm_rects[i].y1,
2719 				      drm_rects[i].x2, drm_rects[i].y2);
2720 			ret = -EINVAL;
2721 			goto out_free;
2722 		}
2723 	}
2724 
2725 	ret = vmw_kms_check_display_memory(dev, arg->num_outputs, drm_rects);
2726 
2727 	if (ret == 0)
2728 		vmw_du_update_layout(dev_priv, arg->num_outputs, drm_rects);
2729 
2730 out_free:
2731 	kfree(rects);
2732 	return ret;
2733 }
2734 
2735 /**
2736  * vmw_kms_helper_dirty - Helper to build commands and perform actions based
2737  * on a set of cliprects and a set of display units.
2738  *
2739  * @dev_priv: Pointer to a device private structure.
2740  * @framebuffer: Pointer to the framebuffer on which to perform the actions.
2741  * @clips: A set of struct drm_clip_rect. Either this os @vclips must be NULL.
2742  * Cliprects are given in framebuffer coordinates.
2743  * @vclips: A set of struct drm_vmw_rect cliprects. Either this or @clips must
2744  * be NULL. Cliprects are given in source coordinates.
2745  * @dest_x: X coordinate offset for the crtc / destination clip rects.
2746  * @dest_y: Y coordinate offset for the crtc / destination clip rects.
2747  * @num_clips: Number of cliprects in the @clips or @vclips array.
2748  * @increment: Integer with which to increment the clip counter when looping.
2749  * Used to skip a predetermined number of clip rects.
2750  * @dirty: Closure structure. See the description of struct vmw_kms_dirty.
2751  */
2752 int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
2753 			 struct vmw_framebuffer *framebuffer,
2754 			 const struct drm_clip_rect *clips,
2755 			 const struct drm_vmw_rect *vclips,
2756 			 s32 dest_x, s32 dest_y,
2757 			 int num_clips,
2758 			 int increment,
2759 			 struct vmw_kms_dirty *dirty)
2760 {
2761 	struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
2762 	struct drm_crtc *crtc;
2763 	u32 num_units = 0;
2764 	u32 i, k;
2765 
2766 	dirty->dev_priv = dev_priv;
2767 
2768 	/* If crtc is passed, no need to iterate over other display units */
2769 	if (dirty->crtc) {
2770 		units[num_units++] = vmw_crtc_to_du(dirty->crtc);
2771 	} else {
2772 		list_for_each_entry(crtc, &dev_priv->drm.mode_config.crtc_list,
2773 				    head) {
2774 			struct drm_plane *plane = crtc->primary;
2775 
2776 			if (plane->state->fb == &framebuffer->base)
2777 				units[num_units++] = vmw_crtc_to_du(crtc);
2778 		}
2779 	}
2780 
2781 	for (k = 0; k < num_units; k++) {
2782 		struct vmw_display_unit *unit = units[k];
2783 		s32 crtc_x = unit->crtc.x;
2784 		s32 crtc_y = unit->crtc.y;
2785 		s32 crtc_width = unit->crtc.mode.hdisplay;
2786 		s32 crtc_height = unit->crtc.mode.vdisplay;
2787 		const struct drm_clip_rect *clips_ptr = clips;
2788 		const struct drm_vmw_rect *vclips_ptr = vclips;
2789 
2790 		dirty->unit = unit;
2791 		if (dirty->fifo_reserve_size > 0) {
2792 			dirty->cmd = VMW_CMD_RESERVE(dev_priv,
2793 						      dirty->fifo_reserve_size);
2794 			if (!dirty->cmd)
2795 				return -ENOMEM;
2796 
2797 			memset(dirty->cmd, 0, dirty->fifo_reserve_size);
2798 		}
2799 		dirty->num_hits = 0;
2800 		for (i = 0; i < num_clips; i++, clips_ptr += increment,
2801 		       vclips_ptr += increment) {
2802 			s32 clip_left;
2803 			s32 clip_top;
2804 
2805 			/*
2806 			 * Select clip array type. Note that integer type
2807 			 * in @clips is unsigned short, whereas in @vclips
2808 			 * it's 32-bit.
2809 			 */
2810 			if (clips) {
2811 				dirty->fb_x = (s32) clips_ptr->x1;
2812 				dirty->fb_y = (s32) clips_ptr->y1;
2813 				dirty->unit_x2 = (s32) clips_ptr->x2 + dest_x -
2814 					crtc_x;
2815 				dirty->unit_y2 = (s32) clips_ptr->y2 + dest_y -
2816 					crtc_y;
2817 			} else {
2818 				dirty->fb_x = vclips_ptr->x;
2819 				dirty->fb_y = vclips_ptr->y;
2820 				dirty->unit_x2 = dirty->fb_x + vclips_ptr->w +
2821 					dest_x - crtc_x;
2822 				dirty->unit_y2 = dirty->fb_y + vclips_ptr->h +
2823 					dest_y - crtc_y;
2824 			}
2825 
2826 			dirty->unit_x1 = dirty->fb_x + dest_x - crtc_x;
2827 			dirty->unit_y1 = dirty->fb_y + dest_y - crtc_y;
2828 
2829 			/* Skip this clip if it's outside the crtc region */
2830 			if (dirty->unit_x1 >= crtc_width ||
2831 			    dirty->unit_y1 >= crtc_height ||
2832 			    dirty->unit_x2 <= 0 || dirty->unit_y2 <= 0)
2833 				continue;
2834 
2835 			/* Clip right and bottom to crtc limits */
2836 			dirty->unit_x2 = min_t(s32, dirty->unit_x2,
2837 					       crtc_width);
2838 			dirty->unit_y2 = min_t(s32, dirty->unit_y2,
2839 					       crtc_height);
2840 
2841 			/* Clip left and top to crtc limits */
2842 			clip_left = min_t(s32, dirty->unit_x1, 0);
2843 			clip_top = min_t(s32, dirty->unit_y1, 0);
2844 			dirty->unit_x1 -= clip_left;
2845 			dirty->unit_y1 -= clip_top;
2846 			dirty->fb_x -= clip_left;
2847 			dirty->fb_y -= clip_top;
2848 
2849 			dirty->clip(dirty);
2850 		}
2851 
2852 		dirty->fifo_commit(dirty);
2853 	}
2854 
2855 	return 0;
2856 }
2857 
2858 /**
2859  * vmw_kms_helper_validation_finish - Helper for post KMS command submission
2860  * cleanup and fencing
2861  * @dev_priv: Pointer to the device-private struct
2862  * @file_priv: Pointer identifying the client when user-space fencing is used
2863  * @ctx: Pointer to the validation context
2864  * @out_fence: If non-NULL, returned refcounted fence-pointer
2865  * @user_fence_rep: If non-NULL, pointer to user-space address area
2866  * in which to copy user-space fence info
2867  */
2868 void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv,
2869 				      struct drm_file *file_priv,
2870 				      struct vmw_validation_context *ctx,
2871 				      struct vmw_fence_obj **out_fence,
2872 				      struct drm_vmw_fence_rep __user *
2873 				      user_fence_rep)
2874 {
2875 	struct vmw_fence_obj *fence = NULL;
2876 	uint32_t handle = 0;
2877 	int ret = 0;
2878 
2879 	if (file_priv || user_fence_rep || vmw_validation_has_bos(ctx) ||
2880 	    out_fence)
2881 		ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
2882 						 file_priv ? &handle : NULL);
2883 	vmw_validation_done(ctx, fence);
2884 	if (file_priv)
2885 		vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv),
2886 					    ret, user_fence_rep, fence,
2887 					    handle, -1);
2888 	if (out_fence)
2889 		*out_fence = fence;
2890 	else
2891 		vmw_fence_obj_unreference(&fence);
2892 }
2893 
2894 /**
2895  * vmw_kms_update_proxy - Helper function to update a proxy surface from
2896  * its backing MOB.
2897  *
2898  * @res: Pointer to the surface resource
2899  * @clips: Clip rects in framebuffer (surface) space.
2900  * @num_clips: Number of clips in @clips.
2901  * @increment: Integer with which to increment the clip counter when looping.
2902  * Used to skip a predetermined number of clip rects.
2903  *
2904  * This function makes sure the proxy surface is updated from its backing MOB
2905  * using the region given by @clips. The surface resource @res and its backing
2906  * MOB needs to be reserved and validated on call.
2907  */
2908 int vmw_kms_update_proxy(struct vmw_resource *res,
2909 			 const struct drm_clip_rect *clips,
2910 			 unsigned num_clips,
2911 			 int increment)
2912 {
2913 	struct vmw_private *dev_priv = res->dev_priv;
2914 	struct drm_vmw_size *size = &vmw_res_to_srf(res)->metadata.base_size;
2915 	struct {
2916 		SVGA3dCmdHeader header;
2917 		SVGA3dCmdUpdateGBImage body;
2918 	} *cmd;
2919 	SVGA3dBox *box;
2920 	size_t copy_size = 0;
2921 	int i;
2922 
2923 	if (!clips)
2924 		return 0;
2925 
2926 	cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd) * num_clips);
2927 	if (!cmd)
2928 		return -ENOMEM;
2929 
2930 	for (i = 0; i < num_clips; ++i, clips += increment, ++cmd) {
2931 		box = &cmd->body.box;
2932 
2933 		cmd->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE;
2934 		cmd->header.size = sizeof(cmd->body);
2935 		cmd->body.image.sid = res->id;
2936 		cmd->body.image.face = 0;
2937 		cmd->body.image.mipmap = 0;
2938 
2939 		if (clips->x1 > size->width || clips->x2 > size->width ||
2940 		    clips->y1 > size->height || clips->y2 > size->height) {
2941 			DRM_ERROR("Invalid clips outsize of framebuffer.\n");
2942 			return -EINVAL;
2943 		}
2944 
2945 		box->x = clips->x1;
2946 		box->y = clips->y1;
2947 		box->z = 0;
2948 		box->w = clips->x2 - clips->x1;
2949 		box->h = clips->y2 - clips->y1;
2950 		box->d = 1;
2951 
2952 		copy_size += sizeof(*cmd);
2953 	}
2954 
2955 	vmw_cmd_commit(dev_priv, copy_size);
2956 
2957 	return 0;
2958 }
2959 
2960 /**
2961  * vmw_kms_create_implicit_placement_property - Set up the implicit placement
2962  * property.
2963  *
2964  * @dev_priv: Pointer to a device private struct.
2965  *
2966  * Sets up the implicit placement property unless it's already set up.
2967  */
2968 void
2969 vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv)
2970 {
2971 	if (dev_priv->implicit_placement_property)
2972 		return;
2973 
2974 	dev_priv->implicit_placement_property =
2975 		drm_property_create_range(&dev_priv->drm,
2976 					  DRM_MODE_PROP_IMMUTABLE,
2977 					  "implicit_placement", 0, 1);
2978 }
2979 
2980 /**
2981  * vmw_kms_suspend - Save modesetting state and turn modesetting off.
2982  *
2983  * @dev: Pointer to the drm device
2984  * Return: 0 on success. Negative error code on failure.
2985  */
2986 int vmw_kms_suspend(struct drm_device *dev)
2987 {
2988 	struct vmw_private *dev_priv = vmw_priv(dev);
2989 
2990 	dev_priv->suspend_state = drm_atomic_helper_suspend(dev);
2991 	if (IS_ERR(dev_priv->suspend_state)) {
2992 		int ret = PTR_ERR(dev_priv->suspend_state);
2993 
2994 		DRM_ERROR("Failed kms suspend: %d\n", ret);
2995 		dev_priv->suspend_state = NULL;
2996 
2997 		return ret;
2998 	}
2999 
3000 	return 0;
3001 }
3002 
3003 
3004 /**
3005  * vmw_kms_resume - Re-enable modesetting and restore state
3006  *
3007  * @dev: Pointer to the drm device
3008  * Return: 0 on success. Negative error code on failure.
3009  *
3010  * State is resumed from a previous vmw_kms_suspend(). It's illegal
3011  * to call this function without a previous vmw_kms_suspend().
3012  */
3013 int vmw_kms_resume(struct drm_device *dev)
3014 {
3015 	struct vmw_private *dev_priv = vmw_priv(dev);
3016 	int ret;
3017 
3018 	if (WARN_ON(!dev_priv->suspend_state))
3019 		return 0;
3020 
3021 	ret = drm_atomic_helper_resume(dev, dev_priv->suspend_state);
3022 	dev_priv->suspend_state = NULL;
3023 
3024 	return ret;
3025 }
3026 
3027 /**
3028  * vmw_kms_lost_device - Notify kms that modesetting capabilities will be lost
3029  *
3030  * @dev: Pointer to the drm device
3031  */
3032 void vmw_kms_lost_device(struct drm_device *dev)
3033 {
3034 	drm_atomic_helper_shutdown(dev);
3035 }
3036 
3037 /**
3038  * vmw_du_helper_plane_update - Helper to do plane update on a display unit.
3039  * @update: The closure structure.
3040  *
3041  * Call this helper after setting callbacks in &vmw_du_update_plane to do plane
3042  * update on display unit.
3043  *
3044  * Return: 0 on success or a negative error code on failure.
3045  */
3046 int vmw_du_helper_plane_update(struct vmw_du_update_plane *update)
3047 {
3048 	struct drm_plane_state *state = update->plane->state;
3049 	struct drm_plane_state *old_state = update->old_state;
3050 	struct drm_atomic_helper_damage_iter iter;
3051 	struct drm_rect clip;
3052 	struct drm_rect bb;
3053 	DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
3054 	uint32_t reserved_size = 0;
3055 	uint32_t submit_size = 0;
3056 	uint32_t curr_size = 0;
3057 	uint32_t num_hits = 0;
3058 	void *cmd_start;
3059 	char *cmd_next;
3060 	int ret;
3061 
3062 	/*
3063 	 * Iterate in advance to check if really need plane update and find the
3064 	 * number of clips that actually are in plane src for fifo allocation.
3065 	 */
3066 	drm_atomic_helper_damage_iter_init(&iter, old_state, state);
3067 	drm_atomic_for_each_plane_damage(&iter, &clip)
3068 		num_hits++;
3069 
3070 	if (num_hits == 0)
3071 		return 0;
3072 
3073 	if (update->vfb->bo) {
3074 		struct vmw_framebuffer_bo *vfbbo =
3075 			container_of(update->vfb, typeof(*vfbbo), base);
3076 
3077 		ret = vmw_validation_add_bo(&val_ctx, vfbbo->buffer, false,
3078 					    update->cpu_blit);
3079 	} else {
3080 		struct vmw_framebuffer_surface *vfbs =
3081 			container_of(update->vfb, typeof(*vfbs), base);
3082 
3083 		ret = vmw_validation_add_resource(&val_ctx, &vfbs->surface->res,
3084 						  0, VMW_RES_DIRTY_NONE, NULL,
3085 						  NULL);
3086 	}
3087 
3088 	if (ret)
3089 		return ret;
3090 
3091 	ret = vmw_validation_prepare(&val_ctx, update->mutex, update->intr);
3092 	if (ret)
3093 		goto out_unref;
3094 
3095 	reserved_size = update->calc_fifo_size(update, num_hits);
3096 	cmd_start = VMW_CMD_RESERVE(update->dev_priv, reserved_size);
3097 	if (!cmd_start) {
3098 		ret = -ENOMEM;
3099 		goto out_revert;
3100 	}
3101 
3102 	cmd_next = cmd_start;
3103 
3104 	if (update->post_prepare) {
3105 		curr_size = update->post_prepare(update, cmd_next);
3106 		cmd_next += curr_size;
3107 		submit_size += curr_size;
3108 	}
3109 
3110 	if (update->pre_clip) {
3111 		curr_size = update->pre_clip(update, cmd_next, num_hits);
3112 		cmd_next += curr_size;
3113 		submit_size += curr_size;
3114 	}
3115 
3116 	bb.x1 = INT_MAX;
3117 	bb.y1 = INT_MAX;
3118 	bb.x2 = INT_MIN;
3119 	bb.y2 = INT_MIN;
3120 
3121 	drm_atomic_helper_damage_iter_init(&iter, old_state, state);
3122 	drm_atomic_for_each_plane_damage(&iter, &clip) {
3123 		uint32_t fb_x = clip.x1;
3124 		uint32_t fb_y = clip.y1;
3125 
3126 		vmw_du_translate_to_crtc(state, &clip);
3127 		if (update->clip) {
3128 			curr_size = update->clip(update, cmd_next, &clip, fb_x,
3129 						 fb_y);
3130 			cmd_next += curr_size;
3131 			submit_size += curr_size;
3132 		}
3133 		bb.x1 = min_t(int, bb.x1, clip.x1);
3134 		bb.y1 = min_t(int, bb.y1, clip.y1);
3135 		bb.x2 = max_t(int, bb.x2, clip.x2);
3136 		bb.y2 = max_t(int, bb.y2, clip.y2);
3137 	}
3138 
3139 	curr_size = update->post_clip(update, cmd_next, &bb);
3140 	submit_size += curr_size;
3141 
3142 	if (reserved_size < submit_size)
3143 		submit_size = 0;
3144 
3145 	vmw_cmd_commit(update->dev_priv, submit_size);
3146 
3147 	vmw_kms_helper_validation_finish(update->dev_priv, NULL, &val_ctx,
3148 					 update->out_fence, NULL);
3149 	return ret;
3150 
3151 out_revert:
3152 	vmw_validation_revert(&val_ctx);
3153 
3154 out_unref:
3155 	vmw_validation_unref_lists(&val_ctx);
3156 	return ret;
3157 }
3158