1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3  *
4  * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 
28 #include <drm/drm_atomic.h>
29 #include <drm/drm_atomic_helper.h>
30 #include <drm/drm_damage_helper.h>
31 #include <drm/drm_fourcc.h>
32 #include <drm/drm_plane_helper.h>
33 #include <drm/drm_rect.h>
34 #include <drm/drm_sysfs.h>
35 #include <drm/drm_vblank.h>
36 
37 #include "vmwgfx_kms.h"
38 
39 void vmw_du_cleanup(struct vmw_display_unit *du)
40 {
41 	drm_plane_cleanup(&du->primary);
42 	drm_plane_cleanup(&du->cursor);
43 
44 	drm_connector_unregister(&du->connector);
45 	drm_crtc_cleanup(&du->crtc);
46 	drm_encoder_cleanup(&du->encoder);
47 	drm_connector_cleanup(&du->connector);
48 }
49 
50 /*
51  * Display Unit Cursor functions
52  */
53 
54 static int vmw_cursor_update_image(struct vmw_private *dev_priv,
55 				   u32 *image, u32 width, u32 height,
56 				   u32 hotspotX, u32 hotspotY)
57 {
58 	struct {
59 		u32 cmd;
60 		SVGAFifoCmdDefineAlphaCursor cursor;
61 	} *cmd;
62 	u32 image_size = width * height * 4;
63 	u32 cmd_size = sizeof(*cmd) + image_size;
64 
65 	if (!image)
66 		return -EINVAL;
67 
68 	cmd = VMW_CMD_RESERVE(dev_priv, cmd_size);
69 	if (unlikely(cmd == NULL))
70 		return -ENOMEM;
71 
72 	memset(cmd, 0, sizeof(*cmd));
73 
74 	memcpy(&cmd[1], image, image_size);
75 
76 	cmd->cmd = SVGA_CMD_DEFINE_ALPHA_CURSOR;
77 	cmd->cursor.id = 0;
78 	cmd->cursor.width = width;
79 	cmd->cursor.height = height;
80 	cmd->cursor.hotspotX = hotspotX;
81 	cmd->cursor.hotspotY = hotspotY;
82 
83 	vmw_cmd_commit_flush(dev_priv, cmd_size);
84 
85 	return 0;
86 }
87 
88 static int vmw_cursor_update_bo(struct vmw_private *dev_priv,
89 				struct vmw_buffer_object *bo,
90 				u32 width, u32 height,
91 				u32 hotspotX, u32 hotspotY)
92 {
93 	struct ttm_bo_kmap_obj map;
94 	unsigned long kmap_offset;
95 	unsigned long kmap_num;
96 	void *virtual;
97 	bool dummy;
98 	int ret;
99 
100 	kmap_offset = 0;
101 	kmap_num = (width*height*4 + PAGE_SIZE - 1) >> PAGE_SHIFT;
102 
103 	ret = ttm_bo_reserve(&bo->base, true, false, NULL);
104 	if (unlikely(ret != 0)) {
105 		DRM_ERROR("reserve failed\n");
106 		return -EINVAL;
107 	}
108 
109 	ret = ttm_bo_kmap(&bo->base, kmap_offset, kmap_num, &map);
110 	if (unlikely(ret != 0))
111 		goto err_unreserve;
112 
113 	virtual = ttm_kmap_obj_virtual(&map, &dummy);
114 	ret = vmw_cursor_update_image(dev_priv, virtual, width, height,
115 				      hotspotX, hotspotY);
116 
117 	ttm_bo_kunmap(&map);
118 err_unreserve:
119 	ttm_bo_unreserve(&bo->base);
120 
121 	return ret;
122 }
123 
124 
125 static void vmw_cursor_update_position(struct vmw_private *dev_priv,
126 				       bool show, int x, int y)
127 {
128 	uint32_t count;
129 
130 	spin_lock(&dev_priv->cursor_lock);
131 	vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_ON, show ? 1 : 0);
132 	vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_X, x);
133 	vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_Y, y);
134 	count = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CURSOR_COUNT);
135 	vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_COUNT, ++count);
136 	spin_unlock(&dev_priv->cursor_lock);
137 }
138 
139 
140 void vmw_kms_cursor_snoop(struct vmw_surface *srf,
141 			  struct ttm_object_file *tfile,
142 			  struct ttm_buffer_object *bo,
143 			  SVGA3dCmdHeader *header)
144 {
145 	struct ttm_bo_kmap_obj map;
146 	unsigned long kmap_offset;
147 	unsigned long kmap_num;
148 	SVGA3dCopyBox *box;
149 	unsigned box_count;
150 	void *virtual;
151 	bool dummy;
152 	struct vmw_dma_cmd {
153 		SVGA3dCmdHeader header;
154 		SVGA3dCmdSurfaceDMA dma;
155 	} *cmd;
156 	int i, ret;
157 
158 	cmd = container_of(header, struct vmw_dma_cmd, header);
159 
160 	/* No snooper installed */
161 	if (!srf->snooper.image)
162 		return;
163 
164 	if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) {
165 		DRM_ERROR("face and mipmap for cursors should never != 0\n");
166 		return;
167 	}
168 
169 	if (cmd->header.size < 64) {
170 		DRM_ERROR("at least one full copy box must be given\n");
171 		return;
172 	}
173 
174 	box = (SVGA3dCopyBox *)&cmd[1];
175 	box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) /
176 			sizeof(SVGA3dCopyBox);
177 
178 	if (cmd->dma.guest.ptr.offset % PAGE_SIZE ||
179 	    box->x != 0    || box->y != 0    || box->z != 0    ||
180 	    box->srcx != 0 || box->srcy != 0 || box->srcz != 0 ||
181 	    box->d != 1    || box_count != 1) {
182 		/* TODO handle none page aligned offsets */
183 		/* TODO handle more dst & src != 0 */
184 		/* TODO handle more then one copy */
185 		DRM_ERROR("Can't snoop dma request for cursor!\n");
186 		DRM_ERROR("(%u, %u, %u) (%u, %u, %u) (%ux%ux%u) %u %u\n",
187 			  box->srcx, box->srcy, box->srcz,
188 			  box->x, box->y, box->z,
189 			  box->w, box->h, box->d, box_count,
190 			  cmd->dma.guest.ptr.offset);
191 		return;
192 	}
193 
194 	kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT;
195 	kmap_num = (64*64*4) >> PAGE_SHIFT;
196 
197 	ret = ttm_bo_reserve(bo, true, false, NULL);
198 	if (unlikely(ret != 0)) {
199 		DRM_ERROR("reserve failed\n");
200 		return;
201 	}
202 
203 	ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
204 	if (unlikely(ret != 0))
205 		goto err_unreserve;
206 
207 	virtual = ttm_kmap_obj_virtual(&map, &dummy);
208 
209 	if (box->w == 64 && cmd->dma.guest.pitch == 64*4) {
210 		memcpy(srf->snooper.image, virtual, 64*64*4);
211 	} else {
212 		/* Image is unsigned pointer. */
213 		for (i = 0; i < box->h; i++)
214 			memcpy(srf->snooper.image + i * 64,
215 			       virtual + i * cmd->dma.guest.pitch,
216 			       box->w * 4);
217 	}
218 
219 	srf->snooper.age++;
220 
221 	ttm_bo_kunmap(&map);
222 err_unreserve:
223 	ttm_bo_unreserve(bo);
224 }
225 
226 /**
227  * vmw_kms_legacy_hotspot_clear - Clear legacy hotspots
228  *
229  * @dev_priv: Pointer to the device private struct.
230  *
231  * Clears all legacy hotspots.
232  */
233 void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv)
234 {
235 	struct drm_device *dev = &dev_priv->drm;
236 	struct vmw_display_unit *du;
237 	struct drm_crtc *crtc;
238 
239 	drm_modeset_lock_all(dev);
240 	drm_for_each_crtc(crtc, dev) {
241 		du = vmw_crtc_to_du(crtc);
242 
243 		du->hotspot_x = 0;
244 		du->hotspot_y = 0;
245 	}
246 	drm_modeset_unlock_all(dev);
247 }
248 
249 void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
250 {
251 	struct drm_device *dev = &dev_priv->drm;
252 	struct vmw_display_unit *du;
253 	struct drm_crtc *crtc;
254 
255 	mutex_lock(&dev->mode_config.mutex);
256 
257 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
258 		du = vmw_crtc_to_du(crtc);
259 		if (!du->cursor_surface ||
260 		    du->cursor_age == du->cursor_surface->snooper.age)
261 			continue;
262 
263 		du->cursor_age = du->cursor_surface->snooper.age;
264 		vmw_cursor_update_image(dev_priv,
265 					du->cursor_surface->snooper.image,
266 					64, 64,
267 					du->hotspot_x + du->core_hotspot_x,
268 					du->hotspot_y + du->core_hotspot_y);
269 	}
270 
271 	mutex_unlock(&dev->mode_config.mutex);
272 }
273 
274 
275 void vmw_du_cursor_plane_destroy(struct drm_plane *plane)
276 {
277 	vmw_cursor_update_position(plane->dev->dev_private, false, 0, 0);
278 
279 	drm_plane_cleanup(plane);
280 }
281 
282 
283 void vmw_du_primary_plane_destroy(struct drm_plane *plane)
284 {
285 	drm_plane_cleanup(plane);
286 
287 	/* Planes are static in our case so we don't free it */
288 }
289 
290 
291 /**
292  * vmw_du_vps_unpin_surf - unpins resource associated with a framebuffer surface
293  *
294  * @vps: plane state associated with the display surface
295  * @unreference: true if we also want to unreference the display.
296  */
297 void vmw_du_plane_unpin_surf(struct vmw_plane_state *vps,
298 			     bool unreference)
299 {
300 	if (vps->surf) {
301 		if (vps->pinned) {
302 			vmw_resource_unpin(&vps->surf->res);
303 			vps->pinned--;
304 		}
305 
306 		if (unreference) {
307 			if (vps->pinned)
308 				DRM_ERROR("Surface still pinned\n");
309 			vmw_surface_unreference(&vps->surf);
310 		}
311 	}
312 }
313 
314 
315 /**
316  * vmw_du_plane_cleanup_fb - Unpins the cursor
317  *
318  * @plane:  display plane
319  * @old_state: Contains the FB to clean up
320  *
321  * Unpins the framebuffer surface
322  *
323  * Returns 0 on success
324  */
325 void
326 vmw_du_plane_cleanup_fb(struct drm_plane *plane,
327 			struct drm_plane_state *old_state)
328 {
329 	struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
330 
331 	vmw_du_plane_unpin_surf(vps, false);
332 }
333 
334 
335 /**
336  * vmw_du_cursor_plane_prepare_fb - Readies the cursor by referencing it
337  *
338  * @plane:  display plane
339  * @new_state: info on the new plane state, including the FB
340  *
341  * Returns 0 on success
342  */
343 int
344 vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
345 			       struct drm_plane_state *new_state)
346 {
347 	struct drm_framebuffer *fb = new_state->fb;
348 	struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
349 
350 
351 	if (vps->surf)
352 		vmw_surface_unreference(&vps->surf);
353 
354 	if (vps->bo)
355 		vmw_bo_unreference(&vps->bo);
356 
357 	if (fb) {
358 		if (vmw_framebuffer_to_vfb(fb)->bo) {
359 			vps->bo = vmw_framebuffer_to_vfbd(fb)->buffer;
360 			vmw_bo_reference(vps->bo);
361 		} else {
362 			vps->surf = vmw_framebuffer_to_vfbs(fb)->surface;
363 			vmw_surface_reference(vps->surf);
364 		}
365 	}
366 
367 	return 0;
368 }
369 
370 
371 void
372 vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
373 				  struct drm_atomic_state *state)
374 {
375 	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
376 									   plane);
377 	struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
378 									   plane);
379 	struct drm_crtc *crtc = new_state->crtc ?: old_state->crtc;
380 	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
381 	struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
382 	struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
383 	s32 hotspot_x, hotspot_y;
384 	int ret = 0;
385 
386 
387 	hotspot_x = du->hotspot_x;
388 	hotspot_y = du->hotspot_y;
389 
390 	if (new_state->fb) {
391 		hotspot_x += new_state->fb->hot_x;
392 		hotspot_y += new_state->fb->hot_y;
393 	}
394 
395 	du->cursor_surface = vps->surf;
396 	du->cursor_bo = vps->bo;
397 
398 	if (vps->surf) {
399 		du->cursor_age = du->cursor_surface->snooper.age;
400 
401 		ret = vmw_cursor_update_image(dev_priv,
402 					      vps->surf->snooper.image,
403 					      64, 64, hotspot_x,
404 					      hotspot_y);
405 	} else if (vps->bo) {
406 		ret = vmw_cursor_update_bo(dev_priv, vps->bo,
407 					   new_state->crtc_w,
408 					   new_state->crtc_h,
409 					   hotspot_x, hotspot_y);
410 	} else {
411 		vmw_cursor_update_position(dev_priv, false, 0, 0);
412 		return;
413 	}
414 
415 	if (!ret) {
416 		du->cursor_x = new_state->crtc_x + du->set_gui_x;
417 		du->cursor_y = new_state->crtc_y + du->set_gui_y;
418 
419 		vmw_cursor_update_position(dev_priv, true,
420 					   du->cursor_x + hotspot_x,
421 					   du->cursor_y + hotspot_y);
422 
423 		du->core_hotspot_x = hotspot_x - du->hotspot_x;
424 		du->core_hotspot_y = hotspot_y - du->hotspot_y;
425 	} else {
426 		DRM_ERROR("Failed to update cursor image\n");
427 	}
428 }
429 
430 
431 /**
432  * vmw_du_primary_plane_atomic_check - check if the new state is okay
433  *
434  * @plane: display plane
435  * @state: info on the new plane state, including the FB
436  *
437  * Check if the new state is settable given the current state.  Other
438  * than what the atomic helper checks, we care about crtc fitting
439  * the FB and maintaining one active framebuffer.
440  *
441  * Returns 0 on success
442  */
443 int vmw_du_primary_plane_atomic_check(struct drm_plane *plane,
444 				      struct drm_atomic_state *state)
445 {
446 	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
447 									   plane);
448 	struct drm_crtc_state *crtc_state = NULL;
449 	struct drm_framebuffer *new_fb = new_state->fb;
450 	int ret;
451 
452 	if (new_state->crtc)
453 		crtc_state = drm_atomic_get_new_crtc_state(state,
454 							   new_state->crtc);
455 
456 	ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
457 						  DRM_PLANE_HELPER_NO_SCALING,
458 						  DRM_PLANE_HELPER_NO_SCALING,
459 						  false, true);
460 
461 	if (!ret && new_fb) {
462 		struct drm_crtc *crtc = new_state->crtc;
463 		struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
464 
465 		vmw_connector_state_to_vcs(du->connector.state);
466 	}
467 
468 
469 	return ret;
470 }
471 
472 
473 /**
474  * vmw_du_cursor_plane_atomic_check - check if the new state is okay
475  *
476  * @plane: cursor plane
477  * @new_state: info on the new plane state
478  *
479  * This is a chance to fail if the new cursor state does not fit
480  * our requirements.
481  *
482  * Returns 0 on success
483  */
484 int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
485 				     struct drm_atomic_state *state)
486 {
487 	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
488 									   plane);
489 	int ret = 0;
490 	struct drm_crtc_state *crtc_state = NULL;
491 	struct vmw_surface *surface = NULL;
492 	struct drm_framebuffer *fb = new_state->fb;
493 
494 	if (new_state->crtc)
495 		crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
496 							   new_state->crtc);
497 
498 	ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
499 						  DRM_PLANE_HELPER_NO_SCALING,
500 						  DRM_PLANE_HELPER_NO_SCALING,
501 						  true, true);
502 	if (ret)
503 		return ret;
504 
505 	/* Turning off */
506 	if (!fb)
507 		return 0;
508 
509 	/* A lot of the code assumes this */
510 	if (new_state->crtc_w != 64 || new_state->crtc_h != 64) {
511 		DRM_ERROR("Invalid cursor dimensions (%d, %d)\n",
512 			  new_state->crtc_w, new_state->crtc_h);
513 		ret = -EINVAL;
514 	}
515 
516 	if (!vmw_framebuffer_to_vfb(fb)->bo)
517 		surface = vmw_framebuffer_to_vfbs(fb)->surface;
518 
519 	if (surface && !surface->snooper.image) {
520 		DRM_ERROR("surface not suitable for cursor\n");
521 		ret = -EINVAL;
522 	}
523 
524 	return ret;
525 }
526 
527 
528 int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
529 			     struct drm_atomic_state *state)
530 {
531 	struct drm_crtc_state *new_state = drm_atomic_get_new_crtc_state(state,
532 									 crtc);
533 	struct vmw_display_unit *du = vmw_crtc_to_du(new_state->crtc);
534 	int connector_mask = drm_connector_mask(&du->connector);
535 	bool has_primary = new_state->plane_mask &
536 			   drm_plane_mask(crtc->primary);
537 
538 	/* We always want to have an active plane with an active CRTC */
539 	if (has_primary != new_state->enable)
540 		return -EINVAL;
541 
542 
543 	if (new_state->connector_mask != connector_mask &&
544 	    new_state->connector_mask != 0) {
545 		DRM_ERROR("Invalid connectors configuration\n");
546 		return -EINVAL;
547 	}
548 
549 	/*
550 	 * Our virtual device does not have a dot clock, so use the logical
551 	 * clock value as the dot clock.
552 	 */
553 	if (new_state->mode.crtc_clock == 0)
554 		new_state->adjusted_mode.crtc_clock = new_state->mode.clock;
555 
556 	return 0;
557 }
558 
559 
560 void vmw_du_crtc_atomic_begin(struct drm_crtc *crtc,
561 			      struct drm_atomic_state *state)
562 {
563 }
564 
565 
566 void vmw_du_crtc_atomic_flush(struct drm_crtc *crtc,
567 			      struct drm_atomic_state *state)
568 {
569 	struct drm_pending_vblank_event *event = crtc->state->event;
570 
571 	if (event) {
572 		crtc->state->event = NULL;
573 
574 		spin_lock_irq(&crtc->dev->event_lock);
575 		drm_crtc_send_vblank_event(crtc, event);
576 		spin_unlock_irq(&crtc->dev->event_lock);
577 	}
578 }
579 
580 
581 /**
582  * vmw_du_crtc_duplicate_state - duplicate crtc state
583  * @crtc: DRM crtc
584  *
585  * Allocates and returns a copy of the crtc state (both common and
586  * vmw-specific) for the specified crtc.
587  *
588  * Returns: The newly allocated crtc state, or NULL on failure.
589  */
590 struct drm_crtc_state *
591 vmw_du_crtc_duplicate_state(struct drm_crtc *crtc)
592 {
593 	struct drm_crtc_state *state;
594 	struct vmw_crtc_state *vcs;
595 
596 	if (WARN_ON(!crtc->state))
597 		return NULL;
598 
599 	vcs = kmemdup(crtc->state, sizeof(*vcs), GFP_KERNEL);
600 
601 	if (!vcs)
602 		return NULL;
603 
604 	state = &vcs->base;
605 
606 	__drm_atomic_helper_crtc_duplicate_state(crtc, state);
607 
608 	return state;
609 }
610 
611 
612 /**
613  * vmw_du_crtc_reset - creates a blank vmw crtc state
614  * @crtc: DRM crtc
615  *
616  * Resets the atomic state for @crtc by freeing the state pointer (which
617  * might be NULL, e.g. at driver load time) and allocating a new empty state
618  * object.
619  */
620 void vmw_du_crtc_reset(struct drm_crtc *crtc)
621 {
622 	struct vmw_crtc_state *vcs;
623 
624 
625 	if (crtc->state) {
626 		__drm_atomic_helper_crtc_destroy_state(crtc->state);
627 
628 		kfree(vmw_crtc_state_to_vcs(crtc->state));
629 	}
630 
631 	vcs = kzalloc(sizeof(*vcs), GFP_KERNEL);
632 
633 	if (!vcs) {
634 		DRM_ERROR("Cannot allocate vmw_crtc_state\n");
635 		return;
636 	}
637 
638 	__drm_atomic_helper_crtc_reset(crtc, &vcs->base);
639 }
640 
641 
642 /**
643  * vmw_du_crtc_destroy_state - destroy crtc state
644  * @crtc: DRM crtc
645  * @state: state object to destroy
646  *
647  * Destroys the crtc state (both common and vmw-specific) for the
648  * specified plane.
649  */
650 void
651 vmw_du_crtc_destroy_state(struct drm_crtc *crtc,
652 			  struct drm_crtc_state *state)
653 {
654 	drm_atomic_helper_crtc_destroy_state(crtc, state);
655 }
656 
657 
658 /**
659  * vmw_du_plane_duplicate_state - duplicate plane state
660  * @plane: drm plane
661  *
662  * Allocates and returns a copy of the plane state (both common and
663  * vmw-specific) for the specified plane.
664  *
665  * Returns: The newly allocated plane state, or NULL on failure.
666  */
667 struct drm_plane_state *
668 vmw_du_plane_duplicate_state(struct drm_plane *plane)
669 {
670 	struct drm_plane_state *state;
671 	struct vmw_plane_state *vps;
672 
673 	vps = kmemdup(plane->state, sizeof(*vps), GFP_KERNEL);
674 
675 	if (!vps)
676 		return NULL;
677 
678 	vps->pinned = 0;
679 	vps->cpp = 0;
680 
681 	/* Each ref counted resource needs to be acquired again */
682 	if (vps->surf)
683 		(void) vmw_surface_reference(vps->surf);
684 
685 	if (vps->bo)
686 		(void) vmw_bo_reference(vps->bo);
687 
688 	state = &vps->base;
689 
690 	__drm_atomic_helper_plane_duplicate_state(plane, state);
691 
692 	return state;
693 }
694 
695 
696 /**
697  * vmw_du_plane_reset - creates a blank vmw plane state
698  * @plane: drm plane
699  *
700  * Resets the atomic state for @plane by freeing the state pointer (which might
701  * be NULL, e.g. at driver load time) and allocating a new empty state object.
702  */
703 void vmw_du_plane_reset(struct drm_plane *plane)
704 {
705 	struct vmw_plane_state *vps;
706 
707 
708 	if (plane->state)
709 		vmw_du_plane_destroy_state(plane, plane->state);
710 
711 	vps = kzalloc(sizeof(*vps), GFP_KERNEL);
712 
713 	if (!vps) {
714 		DRM_ERROR("Cannot allocate vmw_plane_state\n");
715 		return;
716 	}
717 
718 	__drm_atomic_helper_plane_reset(plane, &vps->base);
719 }
720 
721 
722 /**
723  * vmw_du_plane_destroy_state - destroy plane state
724  * @plane: DRM plane
725  * @state: state object to destroy
726  *
727  * Destroys the plane state (both common and vmw-specific) for the
728  * specified plane.
729  */
730 void
731 vmw_du_plane_destroy_state(struct drm_plane *plane,
732 			   struct drm_plane_state *state)
733 {
734 	struct vmw_plane_state *vps = vmw_plane_state_to_vps(state);
735 
736 
737 	/* Should have been freed by cleanup_fb */
738 	if (vps->surf)
739 		vmw_surface_unreference(&vps->surf);
740 
741 	if (vps->bo)
742 		vmw_bo_unreference(&vps->bo);
743 
744 	drm_atomic_helper_plane_destroy_state(plane, state);
745 }
746 
747 
748 /**
749  * vmw_du_connector_duplicate_state - duplicate connector state
750  * @connector: DRM connector
751  *
752  * Allocates and returns a copy of the connector state (both common and
753  * vmw-specific) for the specified connector.
754  *
755  * Returns: The newly allocated connector state, or NULL on failure.
756  */
757 struct drm_connector_state *
758 vmw_du_connector_duplicate_state(struct drm_connector *connector)
759 {
760 	struct drm_connector_state *state;
761 	struct vmw_connector_state *vcs;
762 
763 	if (WARN_ON(!connector->state))
764 		return NULL;
765 
766 	vcs = kmemdup(connector->state, sizeof(*vcs), GFP_KERNEL);
767 
768 	if (!vcs)
769 		return NULL;
770 
771 	state = &vcs->base;
772 
773 	__drm_atomic_helper_connector_duplicate_state(connector, state);
774 
775 	return state;
776 }
777 
778 
779 /**
780  * vmw_du_connector_reset - creates a blank vmw connector state
781  * @connector: DRM connector
782  *
783  * Resets the atomic state for @connector by freeing the state pointer (which
784  * might be NULL, e.g. at driver load time) and allocating a new empty state
785  * object.
786  */
787 void vmw_du_connector_reset(struct drm_connector *connector)
788 {
789 	struct vmw_connector_state *vcs;
790 
791 
792 	if (connector->state) {
793 		__drm_atomic_helper_connector_destroy_state(connector->state);
794 
795 		kfree(vmw_connector_state_to_vcs(connector->state));
796 	}
797 
798 	vcs = kzalloc(sizeof(*vcs), GFP_KERNEL);
799 
800 	if (!vcs) {
801 		DRM_ERROR("Cannot allocate vmw_connector_state\n");
802 		return;
803 	}
804 
805 	__drm_atomic_helper_connector_reset(connector, &vcs->base);
806 }
807 
808 
809 /**
810  * vmw_du_connector_destroy_state - destroy connector state
811  * @connector: DRM connector
812  * @state: state object to destroy
813  *
814  * Destroys the connector state (both common and vmw-specific) for the
815  * specified plane.
816  */
817 void
818 vmw_du_connector_destroy_state(struct drm_connector *connector,
819 			  struct drm_connector_state *state)
820 {
821 	drm_atomic_helper_connector_destroy_state(connector, state);
822 }
823 /*
824  * Generic framebuffer code
825  */
826 
827 /*
828  * Surface framebuffer code
829  */
830 
831 static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
832 {
833 	struct vmw_framebuffer_surface *vfbs =
834 		vmw_framebuffer_to_vfbs(framebuffer);
835 
836 	drm_framebuffer_cleanup(framebuffer);
837 	vmw_surface_unreference(&vfbs->surface);
838 	if (vfbs->base.user_obj)
839 		ttm_base_object_unref(&vfbs->base.user_obj);
840 
841 	kfree(vfbs);
842 }
843 
844 /**
845  * vmw_kms_readback - Perform a readback from the screen system to
846  * a buffer-object backed framebuffer.
847  *
848  * @dev_priv: Pointer to the device private structure.
849  * @file_priv: Pointer to a struct drm_file identifying the caller.
850  * Must be set to NULL if @user_fence_rep is NULL.
851  * @vfb: Pointer to the buffer-object backed framebuffer.
852  * @user_fence_rep: User-space provided structure for fence information.
853  * Must be set to non-NULL if @file_priv is non-NULL.
854  * @vclips: Array of clip rects.
855  * @num_clips: Number of clip rects in @vclips.
856  *
857  * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
858  * interrupted.
859  */
860 int vmw_kms_readback(struct vmw_private *dev_priv,
861 		     struct drm_file *file_priv,
862 		     struct vmw_framebuffer *vfb,
863 		     struct drm_vmw_fence_rep __user *user_fence_rep,
864 		     struct drm_vmw_rect *vclips,
865 		     uint32_t num_clips)
866 {
867 	switch (dev_priv->active_display_unit) {
868 	case vmw_du_screen_object:
869 		return vmw_kms_sou_readback(dev_priv, file_priv, vfb,
870 					    user_fence_rep, vclips, num_clips,
871 					    NULL);
872 	case vmw_du_screen_target:
873 		return vmw_kms_stdu_dma(dev_priv, file_priv, vfb,
874 					user_fence_rep, NULL, vclips, num_clips,
875 					1, false, true, NULL);
876 	default:
877 		WARN_ONCE(true,
878 			  "Readback called with invalid display system.\n");
879 }
880 
881 	return -ENOSYS;
882 }
883 
884 
885 static const struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
886 	.destroy = vmw_framebuffer_surface_destroy,
887 	.dirty = drm_atomic_helper_dirtyfb,
888 };
889 
890 static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
891 					   struct vmw_surface *surface,
892 					   struct vmw_framebuffer **out,
893 					   const struct drm_mode_fb_cmd2
894 					   *mode_cmd,
895 					   bool is_bo_proxy)
896 
897 {
898 	struct drm_device *dev = &dev_priv->drm;
899 	struct vmw_framebuffer_surface *vfbs;
900 	enum SVGA3dSurfaceFormat format;
901 	int ret;
902 
903 	/* 3D is only supported on HWv8 and newer hosts */
904 	if (dev_priv->active_display_unit == vmw_du_legacy)
905 		return -ENOSYS;
906 
907 	/*
908 	 * Sanity checks.
909 	 */
910 
911 	/* Surface must be marked as a scanout. */
912 	if (unlikely(!surface->metadata.scanout))
913 		return -EINVAL;
914 
915 	if (unlikely(surface->metadata.mip_levels[0] != 1 ||
916 		     surface->metadata.num_sizes != 1 ||
917 		     surface->metadata.base_size.width < mode_cmd->width ||
918 		     surface->metadata.base_size.height < mode_cmd->height ||
919 		     surface->metadata.base_size.depth != 1)) {
920 		DRM_ERROR("Incompatible surface dimensions "
921 			  "for requested mode.\n");
922 		return -EINVAL;
923 	}
924 
925 	switch (mode_cmd->pixel_format) {
926 	case DRM_FORMAT_ARGB8888:
927 		format = SVGA3D_A8R8G8B8;
928 		break;
929 	case DRM_FORMAT_XRGB8888:
930 		format = SVGA3D_X8R8G8B8;
931 		break;
932 	case DRM_FORMAT_RGB565:
933 		format = SVGA3D_R5G6B5;
934 		break;
935 	case DRM_FORMAT_XRGB1555:
936 		format = SVGA3D_A1R5G5B5;
937 		break;
938 	default:
939 		DRM_ERROR("Invalid pixel format: %p4cc\n",
940 			  &mode_cmd->pixel_format);
941 		return -EINVAL;
942 	}
943 
944 	/*
945 	 * For DX, surface format validation is done when surface->scanout
946 	 * is set.
947 	 */
948 	if (!has_sm4_context(dev_priv) && format != surface->metadata.format) {
949 		DRM_ERROR("Invalid surface format for requested mode.\n");
950 		return -EINVAL;
951 	}
952 
953 	vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL);
954 	if (!vfbs) {
955 		ret = -ENOMEM;
956 		goto out_err1;
957 	}
958 
959 	drm_helper_mode_fill_fb_struct(dev, &vfbs->base.base, mode_cmd);
960 	vfbs->surface = vmw_surface_reference(surface);
961 	vfbs->base.user_handle = mode_cmd->handles[0];
962 	vfbs->is_bo_proxy = is_bo_proxy;
963 
964 	*out = &vfbs->base;
965 
966 	ret = drm_framebuffer_init(dev, &vfbs->base.base,
967 				   &vmw_framebuffer_surface_funcs);
968 	if (ret)
969 		goto out_err2;
970 
971 	return 0;
972 
973 out_err2:
974 	vmw_surface_unreference(&surface);
975 	kfree(vfbs);
976 out_err1:
977 	return ret;
978 }
979 
980 /*
981  * Buffer-object framebuffer code
982  */
983 
984 static void vmw_framebuffer_bo_destroy(struct drm_framebuffer *framebuffer)
985 {
986 	struct vmw_framebuffer_bo *vfbd =
987 		vmw_framebuffer_to_vfbd(framebuffer);
988 
989 	drm_framebuffer_cleanup(framebuffer);
990 	vmw_bo_unreference(&vfbd->buffer);
991 	if (vfbd->base.user_obj)
992 		ttm_base_object_unref(&vfbd->base.user_obj);
993 
994 	kfree(vfbd);
995 }
996 
997 static int vmw_framebuffer_bo_dirty(struct drm_framebuffer *framebuffer,
998 				    struct drm_file *file_priv,
999 				    unsigned int flags, unsigned int color,
1000 				    struct drm_clip_rect *clips,
1001 				    unsigned int num_clips)
1002 {
1003 	struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
1004 	struct vmw_framebuffer_bo *vfbd =
1005 		vmw_framebuffer_to_vfbd(framebuffer);
1006 	struct drm_clip_rect norect;
1007 	int ret, increment = 1;
1008 
1009 	drm_modeset_lock_all(&dev_priv->drm);
1010 
1011 	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
1012 	if (unlikely(ret != 0)) {
1013 		drm_modeset_unlock_all(&dev_priv->drm);
1014 		return ret;
1015 	}
1016 
1017 	if (!num_clips) {
1018 		num_clips = 1;
1019 		clips = &norect;
1020 		norect.x1 = norect.y1 = 0;
1021 		norect.x2 = framebuffer->width;
1022 		norect.y2 = framebuffer->height;
1023 	} else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
1024 		num_clips /= 2;
1025 		increment = 2;
1026 	}
1027 
1028 	switch (dev_priv->active_display_unit) {
1029 	case vmw_du_legacy:
1030 		ret = vmw_kms_ldu_do_bo_dirty(dev_priv, &vfbd->base, 0, 0,
1031 					      clips, num_clips, increment);
1032 		break;
1033 	default:
1034 		ret = -EINVAL;
1035 		WARN_ONCE(true, "Dirty called with invalid display system.\n");
1036 		break;
1037 	}
1038 
1039 	vmw_cmd_flush(dev_priv, false);
1040 	ttm_read_unlock(&dev_priv->reservation_sem);
1041 
1042 	drm_modeset_unlock_all(&dev_priv->drm);
1043 
1044 	return ret;
1045 }
1046 
1047 static int vmw_framebuffer_bo_dirty_ext(struct drm_framebuffer *framebuffer,
1048 					struct drm_file *file_priv,
1049 					unsigned int flags, unsigned int color,
1050 					struct drm_clip_rect *clips,
1051 					unsigned int num_clips)
1052 {
1053 	struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
1054 
1055 	if (dev_priv->active_display_unit == vmw_du_legacy)
1056 		return vmw_framebuffer_bo_dirty(framebuffer, file_priv, flags,
1057 						color, clips, num_clips);
1058 
1059 	return drm_atomic_helper_dirtyfb(framebuffer, file_priv, flags, color,
1060 					 clips, num_clips);
1061 }
1062 
1063 static const struct drm_framebuffer_funcs vmw_framebuffer_bo_funcs = {
1064 	.destroy = vmw_framebuffer_bo_destroy,
1065 	.dirty = vmw_framebuffer_bo_dirty_ext,
1066 };
1067 
1068 /*
1069  * Pin the bofer in a location suitable for access by the
1070  * display system.
1071  */
1072 static int vmw_framebuffer_pin(struct vmw_framebuffer *vfb)
1073 {
1074 	struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
1075 	struct vmw_buffer_object *buf;
1076 	struct ttm_placement *placement;
1077 	int ret;
1078 
1079 	buf = vfb->bo ?  vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
1080 		vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup;
1081 
1082 	if (!buf)
1083 		return 0;
1084 
1085 	switch (dev_priv->active_display_unit) {
1086 	case vmw_du_legacy:
1087 		vmw_overlay_pause_all(dev_priv);
1088 		ret = vmw_bo_pin_in_start_of_vram(dev_priv, buf, false);
1089 		vmw_overlay_resume_all(dev_priv);
1090 		break;
1091 	case vmw_du_screen_object:
1092 	case vmw_du_screen_target:
1093 		if (vfb->bo) {
1094 			if (dev_priv->capabilities & SVGA_CAP_3D) {
1095 				/*
1096 				 * Use surface DMA to get content to
1097 				 * sreen target surface.
1098 				 */
1099 				placement = &vmw_vram_gmr_placement;
1100 			} else {
1101 				/* Use CPU blit. */
1102 				placement = &vmw_sys_placement;
1103 			}
1104 		} else {
1105 			/* Use surface / image update */
1106 			placement = &vmw_mob_placement;
1107 		}
1108 
1109 		return vmw_bo_pin_in_placement(dev_priv, buf, placement, false);
1110 	default:
1111 		return -EINVAL;
1112 	}
1113 
1114 	return ret;
1115 }
1116 
1117 static int vmw_framebuffer_unpin(struct vmw_framebuffer *vfb)
1118 {
1119 	struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
1120 	struct vmw_buffer_object *buf;
1121 
1122 	buf = vfb->bo ?  vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
1123 		vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup;
1124 
1125 	if (WARN_ON(!buf))
1126 		return 0;
1127 
1128 	return vmw_bo_unpin(dev_priv, buf, false);
1129 }
1130 
1131 /**
1132  * vmw_create_bo_proxy - create a proxy surface for the buffer object
1133  *
1134  * @dev: DRM device
1135  * @mode_cmd: parameters for the new surface
1136  * @bo_mob: MOB backing the buffer object
1137  * @srf_out: newly created surface
1138  *
1139  * When the content FB is a buffer object, we create a surface as a proxy to the
1140  * same buffer.  This way we can do a surface copy rather than a surface DMA.
1141  * This is a more efficient approach
1142  *
1143  * RETURNS:
1144  * 0 on success, error code otherwise
1145  */
1146 static int vmw_create_bo_proxy(struct drm_device *dev,
1147 			       const struct drm_mode_fb_cmd2 *mode_cmd,
1148 			       struct vmw_buffer_object *bo_mob,
1149 			       struct vmw_surface **srf_out)
1150 {
1151 	struct vmw_surface_metadata metadata = {0};
1152 	uint32_t format;
1153 	struct vmw_resource *res;
1154 	unsigned int bytes_pp;
1155 	int ret;
1156 
1157 	switch (mode_cmd->pixel_format) {
1158 	case DRM_FORMAT_ARGB8888:
1159 	case DRM_FORMAT_XRGB8888:
1160 		format = SVGA3D_X8R8G8B8;
1161 		bytes_pp = 4;
1162 		break;
1163 
1164 	case DRM_FORMAT_RGB565:
1165 	case DRM_FORMAT_XRGB1555:
1166 		format = SVGA3D_R5G6B5;
1167 		bytes_pp = 2;
1168 		break;
1169 
1170 	case 8:
1171 		format = SVGA3D_P8;
1172 		bytes_pp = 1;
1173 		break;
1174 
1175 	default:
1176 		DRM_ERROR("Invalid framebuffer format %p4cc\n",
1177 			  &mode_cmd->pixel_format);
1178 		return -EINVAL;
1179 	}
1180 
1181 	metadata.format = format;
1182 	metadata.mip_levels[0] = 1;
1183 	metadata.num_sizes = 1;
1184 	metadata.base_size.width = mode_cmd->pitches[0] / bytes_pp;
1185 	metadata.base_size.height =  mode_cmd->height;
1186 	metadata.base_size.depth = 1;
1187 	metadata.scanout = true;
1188 
1189 	ret = vmw_gb_surface_define(vmw_priv(dev), 0, &metadata, srf_out);
1190 	if (ret) {
1191 		DRM_ERROR("Failed to allocate proxy content buffer\n");
1192 		return ret;
1193 	}
1194 
1195 	res = &(*srf_out)->res;
1196 
1197 	/* Reserve and switch the backing mob. */
1198 	mutex_lock(&res->dev_priv->cmdbuf_mutex);
1199 	(void) vmw_resource_reserve(res, false, true);
1200 	vmw_bo_unreference(&res->backup);
1201 	res->backup = vmw_bo_reference(bo_mob);
1202 	res->backup_offset = 0;
1203 	vmw_resource_unreserve(res, false, false, false, NULL, 0);
1204 	mutex_unlock(&res->dev_priv->cmdbuf_mutex);
1205 
1206 	return 0;
1207 }
1208 
1209 
1210 
1211 static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv,
1212 				      struct vmw_buffer_object *bo,
1213 				      struct vmw_framebuffer **out,
1214 				      const struct drm_mode_fb_cmd2
1215 				      *mode_cmd)
1216 
1217 {
1218 	struct drm_device *dev = &dev_priv->drm;
1219 	struct vmw_framebuffer_bo *vfbd;
1220 	unsigned int requested_size;
1221 	int ret;
1222 
1223 	requested_size = mode_cmd->height * mode_cmd->pitches[0];
1224 	if (unlikely(requested_size > bo->base.base.size)) {
1225 		DRM_ERROR("Screen buffer object size is too small "
1226 			  "for requested mode.\n");
1227 		return -EINVAL;
1228 	}
1229 
1230 	/* Limited framebuffer color depth support for screen objects */
1231 	if (dev_priv->active_display_unit == vmw_du_screen_object) {
1232 		switch (mode_cmd->pixel_format) {
1233 		case DRM_FORMAT_XRGB8888:
1234 		case DRM_FORMAT_ARGB8888:
1235 			break;
1236 		case DRM_FORMAT_XRGB1555:
1237 		case DRM_FORMAT_RGB565:
1238 			break;
1239 		default:
1240 			DRM_ERROR("Invalid pixel format: %p4cc\n",
1241 				  &mode_cmd->pixel_format);
1242 			return -EINVAL;
1243 		}
1244 	}
1245 
1246 	vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL);
1247 	if (!vfbd) {
1248 		ret = -ENOMEM;
1249 		goto out_err1;
1250 	}
1251 
1252 	drm_helper_mode_fill_fb_struct(dev, &vfbd->base.base, mode_cmd);
1253 	vfbd->base.bo = true;
1254 	vfbd->buffer = vmw_bo_reference(bo);
1255 	vfbd->base.user_handle = mode_cmd->handles[0];
1256 	*out = &vfbd->base;
1257 
1258 	ret = drm_framebuffer_init(dev, &vfbd->base.base,
1259 				   &vmw_framebuffer_bo_funcs);
1260 	if (ret)
1261 		goto out_err2;
1262 
1263 	return 0;
1264 
1265 out_err2:
1266 	vmw_bo_unreference(&bo);
1267 	kfree(vfbd);
1268 out_err1:
1269 	return ret;
1270 }
1271 
1272 
1273 /**
1274  * vmw_kms_srf_ok - check if a surface can be created
1275  *
1276  * @dev_priv: Pointer to device private struct.
1277  * @width: requested width
1278  * @height: requested height
1279  *
1280  * Surfaces need to be less than texture size
1281  */
1282 static bool
1283 vmw_kms_srf_ok(struct vmw_private *dev_priv, uint32_t width, uint32_t height)
1284 {
1285 	if (width  > dev_priv->texture_max_width ||
1286 	    height > dev_priv->texture_max_height)
1287 		return false;
1288 
1289 	return true;
1290 }
1291 
1292 /**
1293  * vmw_kms_new_framebuffer - Create a new framebuffer.
1294  *
1295  * @dev_priv: Pointer to device private struct.
1296  * @bo: Pointer to buffer object to wrap the kms framebuffer around.
1297  * Either @bo or @surface must be NULL.
1298  * @surface: Pointer to a surface to wrap the kms framebuffer around.
1299  * Either @bo or @surface must be NULL.
1300  * @only_2d: No presents will occur to this buffer object based framebuffer.
1301  * This helps the code to do some important optimizations.
1302  * @mode_cmd: Frame-buffer metadata.
1303  */
1304 struct vmw_framebuffer *
1305 vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
1306 			struct vmw_buffer_object *bo,
1307 			struct vmw_surface *surface,
1308 			bool only_2d,
1309 			const struct drm_mode_fb_cmd2 *mode_cmd)
1310 {
1311 	struct vmw_framebuffer *vfb = NULL;
1312 	bool is_bo_proxy = false;
1313 	int ret;
1314 
1315 	/*
1316 	 * We cannot use the SurfaceDMA command in an non-accelerated VM,
1317 	 * therefore, wrap the buffer object in a surface so we can use the
1318 	 * SurfaceCopy command.
1319 	 */
1320 	if (vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height)  &&
1321 	    bo && only_2d &&
1322 	    mode_cmd->width > 64 &&  /* Don't create a proxy for cursor */
1323 	    dev_priv->active_display_unit == vmw_du_screen_target) {
1324 		ret = vmw_create_bo_proxy(&dev_priv->drm, mode_cmd,
1325 					  bo, &surface);
1326 		if (ret)
1327 			return ERR_PTR(ret);
1328 
1329 		is_bo_proxy = true;
1330 	}
1331 
1332 	/* Create the new framebuffer depending one what we have */
1333 	if (surface) {
1334 		ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb,
1335 						      mode_cmd,
1336 						      is_bo_proxy);
1337 
1338 		/*
1339 		 * vmw_create_bo_proxy() adds a reference that is no longer
1340 		 * needed
1341 		 */
1342 		if (is_bo_proxy)
1343 			vmw_surface_unreference(&surface);
1344 	} else if (bo) {
1345 		ret = vmw_kms_new_framebuffer_bo(dev_priv, bo, &vfb,
1346 						 mode_cmd);
1347 	} else {
1348 		BUG();
1349 	}
1350 
1351 	if (ret)
1352 		return ERR_PTR(ret);
1353 
1354 	vfb->pin = vmw_framebuffer_pin;
1355 	vfb->unpin = vmw_framebuffer_unpin;
1356 
1357 	return vfb;
1358 }
1359 
1360 /*
1361  * Generic Kernel modesetting functions
1362  */
1363 
1364 static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
1365 						 struct drm_file *file_priv,
1366 						 const struct drm_mode_fb_cmd2 *mode_cmd)
1367 {
1368 	struct vmw_private *dev_priv = vmw_priv(dev);
1369 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1370 	struct vmw_framebuffer *vfb = NULL;
1371 	struct vmw_surface *surface = NULL;
1372 	struct vmw_buffer_object *bo = NULL;
1373 	struct ttm_base_object *user_obj;
1374 	int ret;
1375 
1376 	/*
1377 	 * Take a reference on the user object of the resource
1378 	 * backing the kms fb. This ensures that user-space handle
1379 	 * lookups on that resource will always work as long as
1380 	 * it's registered with a kms framebuffer. This is important,
1381 	 * since vmw_execbuf_process identifies resources in the
1382 	 * command stream using user-space handles.
1383 	 */
1384 
1385 	user_obj = ttm_base_object_lookup(tfile, mode_cmd->handles[0]);
1386 	if (unlikely(user_obj == NULL)) {
1387 		DRM_ERROR("Could not locate requested kms frame buffer.\n");
1388 		return ERR_PTR(-ENOENT);
1389 	}
1390 
1391 	/**
1392 	 * End conditioned code.
1393 	 */
1394 
1395 	/* returns either a bo or surface */
1396 	ret = vmw_user_lookup_handle(dev_priv, tfile,
1397 				     mode_cmd->handles[0],
1398 				     &surface, &bo);
1399 	if (ret)
1400 		goto err_out;
1401 
1402 
1403 	if (!bo &&
1404 	    !vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height)) {
1405 		DRM_ERROR("Surface size cannot exceed %dx%d",
1406 			dev_priv->texture_max_width,
1407 			dev_priv->texture_max_height);
1408 		goto err_out;
1409 	}
1410 
1411 
1412 	vfb = vmw_kms_new_framebuffer(dev_priv, bo, surface,
1413 				      !(dev_priv->capabilities & SVGA_CAP_3D),
1414 				      mode_cmd);
1415 	if (IS_ERR(vfb)) {
1416 		ret = PTR_ERR(vfb);
1417 		goto err_out;
1418  	}
1419 
1420 err_out:
1421 	/* vmw_user_lookup_handle takes one ref so does new_fb */
1422 	if (bo)
1423 		vmw_bo_unreference(&bo);
1424 	if (surface)
1425 		vmw_surface_unreference(&surface);
1426 
1427 	if (ret) {
1428 		DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
1429 		ttm_base_object_unref(&user_obj);
1430 		return ERR_PTR(ret);
1431 	} else
1432 		vfb->user_obj = user_obj;
1433 
1434 	return &vfb->base;
1435 }
1436 
1437 /**
1438  * vmw_kms_check_display_memory - Validates display memory required for a
1439  * topology
1440  * @dev: DRM device
1441  * @num_rects: number of drm_rect in rects
1442  * @rects: array of drm_rect representing the topology to validate indexed by
1443  * crtc index.
1444  *
1445  * Returns:
1446  * 0 on success otherwise negative error code
1447  */
1448 static int vmw_kms_check_display_memory(struct drm_device *dev,
1449 					uint32_t num_rects,
1450 					struct drm_rect *rects)
1451 {
1452 	struct vmw_private *dev_priv = vmw_priv(dev);
1453 	struct drm_rect bounding_box = {0};
1454 	u64 total_pixels = 0, pixel_mem, bb_mem;
1455 	int i;
1456 
1457 	for (i = 0; i < num_rects; i++) {
1458 		/*
1459 		 * For STDU only individual screen (screen target) is limited by
1460 		 * SCREENTARGET_MAX_WIDTH/HEIGHT registers.
1461 		 */
1462 		if (dev_priv->active_display_unit == vmw_du_screen_target &&
1463 		    (drm_rect_width(&rects[i]) > dev_priv->stdu_max_width ||
1464 		     drm_rect_height(&rects[i]) > dev_priv->stdu_max_height)) {
1465 			VMW_DEBUG_KMS("Screen size not supported.\n");
1466 			return -EINVAL;
1467 		}
1468 
1469 		/* Bounding box upper left is at (0,0). */
1470 		if (rects[i].x2 > bounding_box.x2)
1471 			bounding_box.x2 = rects[i].x2;
1472 
1473 		if (rects[i].y2 > bounding_box.y2)
1474 			bounding_box.y2 = rects[i].y2;
1475 
1476 		total_pixels += (u64) drm_rect_width(&rects[i]) *
1477 			(u64) drm_rect_height(&rects[i]);
1478 	}
1479 
1480 	/* Virtual svga device primary limits are always in 32-bpp. */
1481 	pixel_mem = total_pixels * 4;
1482 
1483 	/*
1484 	 * For HV10 and below prim_bb_mem is vram size. When
1485 	 * SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM is not present vram size is
1486 	 * limit on primary bounding box
1487 	 */
1488 	if (pixel_mem > dev_priv->prim_bb_mem) {
1489 		VMW_DEBUG_KMS("Combined output size too large.\n");
1490 		return -EINVAL;
1491 	}
1492 
1493 	/* SVGA_CAP_NO_BB_RESTRICTION is available for STDU only. */
1494 	if (dev_priv->active_display_unit != vmw_du_screen_target ||
1495 	    !(dev_priv->capabilities & SVGA_CAP_NO_BB_RESTRICTION)) {
1496 		bb_mem = (u64) bounding_box.x2 * bounding_box.y2 * 4;
1497 
1498 		if (bb_mem > dev_priv->prim_bb_mem) {
1499 			VMW_DEBUG_KMS("Topology is beyond supported limits.\n");
1500 			return -EINVAL;
1501 		}
1502 	}
1503 
1504 	return 0;
1505 }
1506 
1507 /**
1508  * vmw_crtc_state_and_lock - Return new or current crtc state with locked
1509  * crtc mutex
1510  * @state: The atomic state pointer containing the new atomic state
1511  * @crtc: The crtc
1512  *
1513  * This function returns the new crtc state if it's part of the state update.
1514  * Otherwise returns the current crtc state. It also makes sure that the
1515  * crtc mutex is locked.
1516  *
1517  * Returns: A valid crtc state pointer or NULL. It may also return a
1518  * pointer error, in particular -EDEADLK if locking needs to be rerun.
1519  */
1520 static struct drm_crtc_state *
1521 vmw_crtc_state_and_lock(struct drm_atomic_state *state, struct drm_crtc *crtc)
1522 {
1523 	struct drm_crtc_state *crtc_state;
1524 
1525 	crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
1526 	if (crtc_state) {
1527 		lockdep_assert_held(&crtc->mutex.mutex.base);
1528 	} else {
1529 		int ret = drm_modeset_lock(&crtc->mutex, state->acquire_ctx);
1530 
1531 		if (ret != 0 && ret != -EALREADY)
1532 			return ERR_PTR(ret);
1533 
1534 		crtc_state = crtc->state;
1535 	}
1536 
1537 	return crtc_state;
1538 }
1539 
1540 /**
1541  * vmw_kms_check_implicit - Verify that all implicit display units scan out
1542  * from the same fb after the new state is committed.
1543  * @dev: The drm_device.
1544  * @state: The new state to be checked.
1545  *
1546  * Returns:
1547  *   Zero on success,
1548  *   -EINVAL on invalid state,
1549  *   -EDEADLK if modeset locking needs to be rerun.
1550  */
1551 static int vmw_kms_check_implicit(struct drm_device *dev,
1552 				  struct drm_atomic_state *state)
1553 {
1554 	struct drm_framebuffer *implicit_fb = NULL;
1555 	struct drm_crtc *crtc;
1556 	struct drm_crtc_state *crtc_state;
1557 	struct drm_plane_state *plane_state;
1558 
1559 	drm_for_each_crtc(crtc, dev) {
1560 		struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
1561 
1562 		if (!du->is_implicit)
1563 			continue;
1564 
1565 		crtc_state = vmw_crtc_state_and_lock(state, crtc);
1566 		if (IS_ERR(crtc_state))
1567 			return PTR_ERR(crtc_state);
1568 
1569 		if (!crtc_state || !crtc_state->enable)
1570 			continue;
1571 
1572 		/*
1573 		 * Can't move primary planes across crtcs, so this is OK.
1574 		 * It also means we don't need to take the plane mutex.
1575 		 */
1576 		plane_state = du->primary.state;
1577 		if (plane_state->crtc != crtc)
1578 			continue;
1579 
1580 		if (!implicit_fb)
1581 			implicit_fb = plane_state->fb;
1582 		else if (implicit_fb != plane_state->fb)
1583 			return -EINVAL;
1584 	}
1585 
1586 	return 0;
1587 }
1588 
1589 /**
1590  * vmw_kms_check_topology - Validates topology in drm_atomic_state
1591  * @dev: DRM device
1592  * @state: the driver state object
1593  *
1594  * Returns:
1595  * 0 on success otherwise negative error code
1596  */
1597 static int vmw_kms_check_topology(struct drm_device *dev,
1598 				  struct drm_atomic_state *state)
1599 {
1600 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
1601 	struct drm_rect *rects;
1602 	struct drm_crtc *crtc;
1603 	uint32_t i;
1604 	int ret = 0;
1605 
1606 	rects = kcalloc(dev->mode_config.num_crtc, sizeof(struct drm_rect),
1607 			GFP_KERNEL);
1608 	if (!rects)
1609 		return -ENOMEM;
1610 
1611 	drm_for_each_crtc(crtc, dev) {
1612 		struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
1613 		struct drm_crtc_state *crtc_state;
1614 
1615 		i = drm_crtc_index(crtc);
1616 
1617 		crtc_state = vmw_crtc_state_and_lock(state, crtc);
1618 		if (IS_ERR(crtc_state)) {
1619 			ret = PTR_ERR(crtc_state);
1620 			goto clean;
1621 		}
1622 
1623 		if (!crtc_state)
1624 			continue;
1625 
1626 		if (crtc_state->enable) {
1627 			rects[i].x1 = du->gui_x;
1628 			rects[i].y1 = du->gui_y;
1629 			rects[i].x2 = du->gui_x + crtc_state->mode.hdisplay;
1630 			rects[i].y2 = du->gui_y + crtc_state->mode.vdisplay;
1631 		} else {
1632 			rects[i].x1 = 0;
1633 			rects[i].y1 = 0;
1634 			rects[i].x2 = 0;
1635 			rects[i].y2 = 0;
1636 		}
1637 	}
1638 
1639 	/* Determine change to topology due to new atomic state */
1640 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
1641 				      new_crtc_state, i) {
1642 		struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
1643 		struct drm_connector *connector;
1644 		struct drm_connector_state *conn_state;
1645 		struct vmw_connector_state *vmw_conn_state;
1646 
1647 		if (!du->pref_active && new_crtc_state->enable) {
1648 			VMW_DEBUG_KMS("Enabling a disabled display unit\n");
1649 			ret = -EINVAL;
1650 			goto clean;
1651 		}
1652 
1653 		/*
1654 		 * For vmwgfx each crtc has only one connector attached and it
1655 		 * is not changed so don't really need to check the
1656 		 * crtc->connector_mask and iterate over it.
1657 		 */
1658 		connector = &du->connector;
1659 		conn_state = drm_atomic_get_connector_state(state, connector);
1660 		if (IS_ERR(conn_state)) {
1661 			ret = PTR_ERR(conn_state);
1662 			goto clean;
1663 		}
1664 
1665 		vmw_conn_state = vmw_connector_state_to_vcs(conn_state);
1666 		vmw_conn_state->gui_x = du->gui_x;
1667 		vmw_conn_state->gui_y = du->gui_y;
1668 	}
1669 
1670 	ret = vmw_kms_check_display_memory(dev, dev->mode_config.num_crtc,
1671 					   rects);
1672 
1673 clean:
1674 	kfree(rects);
1675 	return ret;
1676 }
1677 
1678 /**
1679  * vmw_kms_atomic_check_modeset- validate state object for modeset changes
1680  *
1681  * @dev: DRM device
1682  * @state: the driver state object
1683  *
1684  * This is a simple wrapper around drm_atomic_helper_check_modeset() for
1685  * us to assign a value to mode->crtc_clock so that
1686  * drm_calc_timestamping_constants() won't throw an error message
1687  *
1688  * Returns:
1689  * Zero for success or -errno
1690  */
1691 static int
1692 vmw_kms_atomic_check_modeset(struct drm_device *dev,
1693 			     struct drm_atomic_state *state)
1694 {
1695 	struct drm_crtc *crtc;
1696 	struct drm_crtc_state *crtc_state;
1697 	bool need_modeset = false;
1698 	int i, ret;
1699 
1700 	ret = drm_atomic_helper_check(dev, state);
1701 	if (ret)
1702 		return ret;
1703 
1704 	ret = vmw_kms_check_implicit(dev, state);
1705 	if (ret) {
1706 		VMW_DEBUG_KMS("Invalid implicit state\n");
1707 		return ret;
1708 	}
1709 
1710 	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
1711 		if (drm_atomic_crtc_needs_modeset(crtc_state))
1712 			need_modeset = true;
1713 	}
1714 
1715 	if (need_modeset)
1716 		return vmw_kms_check_topology(dev, state);
1717 
1718 	return ret;
1719 }
1720 
1721 static const struct drm_mode_config_funcs vmw_kms_funcs = {
1722 	.fb_create = vmw_kms_fb_create,
1723 	.atomic_check = vmw_kms_atomic_check_modeset,
1724 	.atomic_commit = drm_atomic_helper_commit,
1725 };
1726 
1727 static int vmw_kms_generic_present(struct vmw_private *dev_priv,
1728 				   struct drm_file *file_priv,
1729 				   struct vmw_framebuffer *vfb,
1730 				   struct vmw_surface *surface,
1731 				   uint32_t sid,
1732 				   int32_t destX, int32_t destY,
1733 				   struct drm_vmw_rect *clips,
1734 				   uint32_t num_clips)
1735 {
1736 	return vmw_kms_sou_do_surface_dirty(dev_priv, vfb, NULL, clips,
1737 					    &surface->res, destX, destY,
1738 					    num_clips, 1, NULL, NULL);
1739 }
1740 
1741 
1742 int vmw_kms_present(struct vmw_private *dev_priv,
1743 		    struct drm_file *file_priv,
1744 		    struct vmw_framebuffer *vfb,
1745 		    struct vmw_surface *surface,
1746 		    uint32_t sid,
1747 		    int32_t destX, int32_t destY,
1748 		    struct drm_vmw_rect *clips,
1749 		    uint32_t num_clips)
1750 {
1751 	int ret;
1752 
1753 	switch (dev_priv->active_display_unit) {
1754 	case vmw_du_screen_target:
1755 		ret = vmw_kms_stdu_surface_dirty(dev_priv, vfb, NULL, clips,
1756 						 &surface->res, destX, destY,
1757 						 num_clips, 1, NULL, NULL);
1758 		break;
1759 	case vmw_du_screen_object:
1760 		ret = vmw_kms_generic_present(dev_priv, file_priv, vfb, surface,
1761 					      sid, destX, destY, clips,
1762 					      num_clips);
1763 		break;
1764 	default:
1765 		WARN_ONCE(true,
1766 			  "Present called with invalid display system.\n");
1767 		ret = -ENOSYS;
1768 		break;
1769 	}
1770 	if (ret)
1771 		return ret;
1772 
1773 	vmw_cmd_flush(dev_priv, false);
1774 
1775 	return 0;
1776 }
1777 
1778 static void
1779 vmw_kms_create_hotplug_mode_update_property(struct vmw_private *dev_priv)
1780 {
1781 	if (dev_priv->hotplug_mode_update_property)
1782 		return;
1783 
1784 	dev_priv->hotplug_mode_update_property =
1785 		drm_property_create_range(&dev_priv->drm,
1786 					  DRM_MODE_PROP_IMMUTABLE,
1787 					  "hotplug_mode_update", 0, 1);
1788 }
1789 
1790 int vmw_kms_init(struct vmw_private *dev_priv)
1791 {
1792 	struct drm_device *dev = &dev_priv->drm;
1793 	int ret;
1794 
1795 	drm_mode_config_init(dev);
1796 	dev->mode_config.funcs = &vmw_kms_funcs;
1797 	dev->mode_config.min_width = 1;
1798 	dev->mode_config.min_height = 1;
1799 	dev->mode_config.max_width = dev_priv->texture_max_width;
1800 	dev->mode_config.max_height = dev_priv->texture_max_height;
1801 
1802 	drm_mode_create_suggested_offset_properties(dev);
1803 	vmw_kms_create_hotplug_mode_update_property(dev_priv);
1804 
1805 	ret = vmw_kms_stdu_init_display(dev_priv);
1806 	if (ret) {
1807 		ret = vmw_kms_sou_init_display(dev_priv);
1808 		if (ret) /* Fallback */
1809 			ret = vmw_kms_ldu_init_display(dev_priv);
1810 	}
1811 
1812 	return ret;
1813 }
1814 
1815 int vmw_kms_close(struct vmw_private *dev_priv)
1816 {
1817 	int ret = 0;
1818 
1819 	/*
1820 	 * Docs says we should take the lock before calling this function
1821 	 * but since it destroys encoders and our destructor calls
1822 	 * drm_encoder_cleanup which takes the lock we deadlock.
1823 	 */
1824 	drm_mode_config_cleanup(&dev_priv->drm);
1825 	if (dev_priv->active_display_unit == vmw_du_legacy)
1826 		ret = vmw_kms_ldu_close_display(dev_priv);
1827 
1828 	return ret;
1829 }
1830 
1831 int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
1832 				struct drm_file *file_priv)
1833 {
1834 	struct drm_vmw_cursor_bypass_arg *arg = data;
1835 	struct vmw_display_unit *du;
1836 	struct drm_crtc *crtc;
1837 	int ret = 0;
1838 
1839 
1840 	mutex_lock(&dev->mode_config.mutex);
1841 	if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) {
1842 
1843 		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1844 			du = vmw_crtc_to_du(crtc);
1845 			du->hotspot_x = arg->xhot;
1846 			du->hotspot_y = arg->yhot;
1847 		}
1848 
1849 		mutex_unlock(&dev->mode_config.mutex);
1850 		return 0;
1851 	}
1852 
1853 	crtc = drm_crtc_find(dev, file_priv, arg->crtc_id);
1854 	if (!crtc) {
1855 		ret = -ENOENT;
1856 		goto out;
1857 	}
1858 
1859 	du = vmw_crtc_to_du(crtc);
1860 
1861 	du->hotspot_x = arg->xhot;
1862 	du->hotspot_y = arg->yhot;
1863 
1864 out:
1865 	mutex_unlock(&dev->mode_config.mutex);
1866 
1867 	return ret;
1868 }
1869 
1870 int vmw_kms_write_svga(struct vmw_private *vmw_priv,
1871 			unsigned width, unsigned height, unsigned pitch,
1872 			unsigned bpp, unsigned depth)
1873 {
1874 	if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
1875 		vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, pitch);
1876 	else if (vmw_fifo_have_pitchlock(vmw_priv))
1877 		vmw_fifo_mem_write(vmw_priv, SVGA_FIFO_PITCHLOCK, pitch);
1878 	vmw_write(vmw_priv, SVGA_REG_WIDTH, width);
1879 	vmw_write(vmw_priv, SVGA_REG_HEIGHT, height);
1880 	if ((vmw_priv->capabilities & SVGA_CAP_8BIT_EMULATION) != 0)
1881 		vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bpp);
1882 
1883 	if (vmw_read(vmw_priv, SVGA_REG_DEPTH) != depth) {
1884 		DRM_ERROR("Invalid depth %u for %u bpp, host expects %u\n",
1885 			  depth, bpp, vmw_read(vmw_priv, SVGA_REG_DEPTH));
1886 		return -EINVAL;
1887 	}
1888 
1889 	return 0;
1890 }
1891 
1892 bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
1893 				uint32_t pitch,
1894 				uint32_t height)
1895 {
1896 	return ((u64) pitch * (u64) height) < (u64)
1897 		((dev_priv->active_display_unit == vmw_du_screen_target) ?
1898 		 dev_priv->prim_bb_mem : dev_priv->vram_size);
1899 }
1900 
1901 
1902 /*
1903  * Function called by DRM code called with vbl_lock held.
1904  */
1905 u32 vmw_get_vblank_counter(struct drm_crtc *crtc)
1906 {
1907 	return 0;
1908 }
1909 
1910 /*
1911  * Function called by DRM code called with vbl_lock held.
1912  */
1913 int vmw_enable_vblank(struct drm_crtc *crtc)
1914 {
1915 	return -EINVAL;
1916 }
1917 
1918 /*
1919  * Function called by DRM code called with vbl_lock held.
1920  */
1921 void vmw_disable_vblank(struct drm_crtc *crtc)
1922 {
1923 }
1924 
1925 /**
1926  * vmw_du_update_layout - Update the display unit with topology from resolution
1927  * plugin and generate DRM uevent
1928  * @dev_priv: device private
1929  * @num_rects: number of drm_rect in rects
1930  * @rects: toplogy to update
1931  */
1932 static int vmw_du_update_layout(struct vmw_private *dev_priv,
1933 				unsigned int num_rects, struct drm_rect *rects)
1934 {
1935 	struct drm_device *dev = &dev_priv->drm;
1936 	struct vmw_display_unit *du;
1937 	struct drm_connector *con;
1938 	struct drm_connector_list_iter conn_iter;
1939 	struct drm_modeset_acquire_ctx ctx;
1940 	struct drm_crtc *crtc;
1941 	int ret;
1942 
1943 	/* Currently gui_x/y is protected with the crtc mutex */
1944 	mutex_lock(&dev->mode_config.mutex);
1945 	drm_modeset_acquire_init(&ctx, 0);
1946 retry:
1947 	drm_for_each_crtc(crtc, dev) {
1948 		ret = drm_modeset_lock(&crtc->mutex, &ctx);
1949 		if (ret < 0) {
1950 			if (ret == -EDEADLK) {
1951 				drm_modeset_backoff(&ctx);
1952 				goto retry;
1953       		}
1954 			goto out_fini;
1955 		}
1956 	}
1957 
1958 	drm_connector_list_iter_begin(dev, &conn_iter);
1959 	drm_for_each_connector_iter(con, &conn_iter) {
1960 		du = vmw_connector_to_du(con);
1961 		if (num_rects > du->unit) {
1962 			du->pref_width = drm_rect_width(&rects[du->unit]);
1963 			du->pref_height = drm_rect_height(&rects[du->unit]);
1964 			du->pref_active = true;
1965 			du->gui_x = rects[du->unit].x1;
1966 			du->gui_y = rects[du->unit].y1;
1967 		} else {
1968 			du->pref_width = 800;
1969 			du->pref_height = 600;
1970 			du->pref_active = false;
1971 			du->gui_x = 0;
1972 			du->gui_y = 0;
1973 		}
1974 	}
1975 	drm_connector_list_iter_end(&conn_iter);
1976 
1977 	list_for_each_entry(con, &dev->mode_config.connector_list, head) {
1978 		du = vmw_connector_to_du(con);
1979 		if (num_rects > du->unit) {
1980 			drm_object_property_set_value
1981 			  (&con->base, dev->mode_config.suggested_x_property,
1982 			   du->gui_x);
1983 			drm_object_property_set_value
1984 			  (&con->base, dev->mode_config.suggested_y_property,
1985 			   du->gui_y);
1986 		} else {
1987 			drm_object_property_set_value
1988 			  (&con->base, dev->mode_config.suggested_x_property,
1989 			   0);
1990 			drm_object_property_set_value
1991 			  (&con->base, dev->mode_config.suggested_y_property,
1992 			   0);
1993 		}
1994 		con->status = vmw_du_connector_detect(con, true);
1995 	}
1996 
1997 	drm_sysfs_hotplug_event(dev);
1998 out_fini:
1999 	drm_modeset_drop_locks(&ctx);
2000 	drm_modeset_acquire_fini(&ctx);
2001 	mutex_unlock(&dev->mode_config.mutex);
2002 
2003 	return 0;
2004 }
2005 
2006 int vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
2007 			  u16 *r, u16 *g, u16 *b,
2008 			  uint32_t size,
2009 			  struct drm_modeset_acquire_ctx *ctx)
2010 {
2011 	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
2012 	int i;
2013 
2014 	for (i = 0; i < size; i++) {
2015 		DRM_DEBUG("%d r/g/b = 0x%04x / 0x%04x / 0x%04x\n", i,
2016 			  r[i], g[i], b[i]);
2017 		vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 0, r[i] >> 8);
2018 		vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 1, g[i] >> 8);
2019 		vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 2, b[i] >> 8);
2020 	}
2021 
2022 	return 0;
2023 }
2024 
2025 int vmw_du_connector_dpms(struct drm_connector *connector, int mode)
2026 {
2027 	return 0;
2028 }
2029 
2030 enum drm_connector_status
2031 vmw_du_connector_detect(struct drm_connector *connector, bool force)
2032 {
2033 	uint32_t num_displays;
2034 	struct drm_device *dev = connector->dev;
2035 	struct vmw_private *dev_priv = vmw_priv(dev);
2036 	struct vmw_display_unit *du = vmw_connector_to_du(connector);
2037 
2038 	num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS);
2039 
2040 	return ((vmw_connector_to_du(connector)->unit < num_displays &&
2041 		 du->pref_active) ?
2042 		connector_status_connected : connector_status_disconnected);
2043 }
2044 
2045 static struct drm_display_mode vmw_kms_connector_builtin[] = {
2046 	/* 640x480@60Hz */
2047 	{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
2048 		   752, 800, 0, 480, 489, 492, 525, 0,
2049 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
2050 	/* 800x600@60Hz */
2051 	{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
2052 		   968, 1056, 0, 600, 601, 605, 628, 0,
2053 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2054 	/* 1024x768@60Hz */
2055 	{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
2056 		   1184, 1344, 0, 768, 771, 777, 806, 0,
2057 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
2058 	/* 1152x864@75Hz */
2059 	{ DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
2060 		   1344, 1600, 0, 864, 865, 868, 900, 0,
2061 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2062 	/* 1280x720@60Hz */
2063 	{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74500, 1280, 1344,
2064 		   1472, 1664, 0, 720, 723, 728, 748, 0,
2065 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2066 	/* 1280x768@60Hz */
2067 	{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
2068 		   1472, 1664, 0, 768, 771, 778, 798, 0,
2069 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2070 	/* 1280x800@60Hz */
2071 	{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
2072 		   1480, 1680, 0, 800, 803, 809, 831, 0,
2073 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
2074 	/* 1280x960@60Hz */
2075 	{ DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
2076 		   1488, 1800, 0, 960, 961, 964, 1000, 0,
2077 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2078 	/* 1280x1024@60Hz */
2079 	{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
2080 		   1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
2081 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2082 	/* 1360x768@60Hz */
2083 	{ DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
2084 		   1536, 1792, 0, 768, 771, 777, 795, 0,
2085 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2086 	/* 1440x1050@60Hz */
2087 	{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
2088 		   1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
2089 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2090 	/* 1440x900@60Hz */
2091 	{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
2092 		   1672, 1904, 0, 900, 903, 909, 934, 0,
2093 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2094 	/* 1600x1200@60Hz */
2095 	{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
2096 		   1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
2097 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
2098 	/* 1680x1050@60Hz */
2099 	{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
2100 		   1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
2101 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2102 	/* 1792x1344@60Hz */
2103 	{ DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
2104 		   2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
2105 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2106 	/* 1853x1392@60Hz */
2107 	{ DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
2108 		   2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
2109 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2110 	/* 1920x1080@60Hz */
2111 	{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 173000, 1920, 2048,
2112 		   2248, 2576, 0, 1080, 1083, 1088, 1120, 0,
2113 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2114 	/* 1920x1200@60Hz */
2115 	{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
2116 		   2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
2117 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2118 	/* 1920x1440@60Hz */
2119 	{ DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
2120 		   2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
2121 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2122 	/* 2560x1440@60Hz */
2123 	{ DRM_MODE("2560x1440", DRM_MODE_TYPE_DRIVER, 241500, 2560, 2608,
2124 		   2640, 2720, 0, 1440, 1443, 1448, 1481, 0,
2125 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
2126 	/* 2560x1600@60Hz */
2127 	{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
2128 		   3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
2129 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
2130 	/* 2880x1800@60Hz */
2131 	{ DRM_MODE("2880x1800", DRM_MODE_TYPE_DRIVER, 337500, 2880, 2928,
2132 		   2960, 3040, 0, 1800, 1803, 1809, 1852, 0,
2133 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
2134 	/* 3840x2160@60Hz */
2135 	{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 533000, 3840, 3888,
2136 		   3920, 4000, 0, 2160, 2163, 2168, 2222, 0,
2137 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
2138 	/* 3840x2400@60Hz */
2139 	{ DRM_MODE("3840x2400", DRM_MODE_TYPE_DRIVER, 592250, 3840, 3888,
2140 		   3920, 4000, 0, 2400, 2403, 2409, 2469, 0,
2141 		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
2142 	/* Terminate */
2143 	{ DRM_MODE("", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) },
2144 };
2145 
2146 /**
2147  * vmw_guess_mode_timing - Provide fake timings for a
2148  * 60Hz vrefresh mode.
2149  *
2150  * @mode: Pointer to a struct drm_display_mode with hdisplay and vdisplay
2151  * members filled in.
2152  */
2153 void vmw_guess_mode_timing(struct drm_display_mode *mode)
2154 {
2155 	mode->hsync_start = mode->hdisplay + 50;
2156 	mode->hsync_end = mode->hsync_start + 50;
2157 	mode->htotal = mode->hsync_end + 50;
2158 
2159 	mode->vsync_start = mode->vdisplay + 50;
2160 	mode->vsync_end = mode->vsync_start + 50;
2161 	mode->vtotal = mode->vsync_end + 50;
2162 
2163 	mode->clock = (u32)mode->htotal * (u32)mode->vtotal / 100 * 6;
2164 }
2165 
2166 
2167 int vmw_du_connector_fill_modes(struct drm_connector *connector,
2168 				uint32_t max_width, uint32_t max_height)
2169 {
2170 	struct vmw_display_unit *du = vmw_connector_to_du(connector);
2171 	struct drm_device *dev = connector->dev;
2172 	struct vmw_private *dev_priv = vmw_priv(dev);
2173 	struct drm_display_mode *mode = NULL;
2174 	struct drm_display_mode *bmode;
2175 	struct drm_display_mode prefmode = { DRM_MODE("preferred",
2176 		DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
2177 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2178 		DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
2179 	};
2180 	int i;
2181 	u32 assumed_bpp = 4;
2182 
2183 	if (dev_priv->assume_16bpp)
2184 		assumed_bpp = 2;
2185 
2186 	max_width  = min(max_width,  dev_priv->texture_max_width);
2187 	max_height = min(max_height, dev_priv->texture_max_height);
2188 
2189 	/*
2190 	 * For STDU extra limit for a mode on SVGA_REG_SCREENTARGET_MAX_WIDTH/
2191 	 * HEIGHT registers.
2192 	 */
2193 	if (dev_priv->active_display_unit == vmw_du_screen_target) {
2194 		max_width  = min(max_width,  dev_priv->stdu_max_width);
2195 		max_height = min(max_height, dev_priv->stdu_max_height);
2196 	}
2197 
2198 	/* Add preferred mode */
2199 	mode = drm_mode_duplicate(dev, &prefmode);
2200 	if (!mode)
2201 		return 0;
2202 	mode->hdisplay = du->pref_width;
2203 	mode->vdisplay = du->pref_height;
2204 	vmw_guess_mode_timing(mode);
2205 	drm_mode_set_name(mode);
2206 
2207 	if (vmw_kms_validate_mode_vram(dev_priv,
2208 					mode->hdisplay * assumed_bpp,
2209 					mode->vdisplay)) {
2210 		drm_mode_probed_add(connector, mode);
2211 	} else {
2212 		drm_mode_destroy(dev, mode);
2213 		mode = NULL;
2214 	}
2215 
2216 	if (du->pref_mode) {
2217 		list_del_init(&du->pref_mode->head);
2218 		drm_mode_destroy(dev, du->pref_mode);
2219 	}
2220 
2221 	/* mode might be null here, this is intended */
2222 	du->pref_mode = mode;
2223 
2224 	for (i = 0; vmw_kms_connector_builtin[i].type != 0; i++) {
2225 		bmode = &vmw_kms_connector_builtin[i];
2226 		if (bmode->hdisplay > max_width ||
2227 		    bmode->vdisplay > max_height)
2228 			continue;
2229 
2230 		if (!vmw_kms_validate_mode_vram(dev_priv,
2231 						bmode->hdisplay * assumed_bpp,
2232 						bmode->vdisplay))
2233 			continue;
2234 
2235 		mode = drm_mode_duplicate(dev, bmode);
2236 		if (!mode)
2237 			return 0;
2238 
2239 		drm_mode_probed_add(connector, mode);
2240 	}
2241 
2242 	drm_connector_list_update(connector);
2243 	/* Move the prefered mode first, help apps pick the right mode. */
2244 	drm_mode_sort(&connector->modes);
2245 
2246 	return 1;
2247 }
2248 
2249 /**
2250  * vmw_kms_update_layout_ioctl - Handler for DRM_VMW_UPDATE_LAYOUT ioctl
2251  * @dev: drm device for the ioctl
2252  * @data: data pointer for the ioctl
2253  * @file_priv: drm file for the ioctl call
2254  *
2255  * Update preferred topology of display unit as per ioctl request. The topology
2256  * is expressed as array of drm_vmw_rect.
2257  * e.g.
2258  * [0 0 640 480] [640 0 800 600] [0 480 640 480]
2259  *
2260  * NOTE:
2261  * The x and y offset (upper left) in drm_vmw_rect cannot be less than 0. Beside
2262  * device limit on topology, x + w and y + h (lower right) cannot be greater
2263  * than INT_MAX. So topology beyond these limits will return with error.
2264  *
2265  * Returns:
2266  * Zero on success, negative errno on failure.
2267  */
2268 int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
2269 				struct drm_file *file_priv)
2270 {
2271 	struct vmw_private *dev_priv = vmw_priv(dev);
2272 	struct drm_mode_config *mode_config = &dev->mode_config;
2273 	struct drm_vmw_update_layout_arg *arg =
2274 		(struct drm_vmw_update_layout_arg *)data;
2275 	void __user *user_rects;
2276 	struct drm_vmw_rect *rects;
2277 	struct drm_rect *drm_rects;
2278 	unsigned rects_size;
2279 	int ret, i;
2280 
2281 	if (!arg->num_outputs) {
2282 		struct drm_rect def_rect = {0, 0, 800, 600};
2283 		VMW_DEBUG_KMS("Default layout x1 = %d y1 = %d x2 = %d y2 = %d\n",
2284 			      def_rect.x1, def_rect.y1,
2285 			      def_rect.x2, def_rect.y2);
2286 		vmw_du_update_layout(dev_priv, 1, &def_rect);
2287 		return 0;
2288 	}
2289 
2290 	rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect);
2291 	rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect),
2292 			GFP_KERNEL);
2293 	if (unlikely(!rects))
2294 		return -ENOMEM;
2295 
2296 	user_rects = (void __user *)(unsigned long)arg->rects;
2297 	ret = copy_from_user(rects, user_rects, rects_size);
2298 	if (unlikely(ret != 0)) {
2299 		DRM_ERROR("Failed to get rects.\n");
2300 		ret = -EFAULT;
2301 		goto out_free;
2302 	}
2303 
2304 	drm_rects = (struct drm_rect *)rects;
2305 
2306 	VMW_DEBUG_KMS("Layout count = %u\n", arg->num_outputs);
2307 	for (i = 0; i < arg->num_outputs; i++) {
2308 		struct drm_vmw_rect curr_rect;
2309 
2310 		/* Verify user-space for overflow as kernel use drm_rect */
2311 		if ((rects[i].x + rects[i].w > INT_MAX) ||
2312 		    (rects[i].y + rects[i].h > INT_MAX)) {
2313 			ret = -ERANGE;
2314 			goto out_free;
2315 		}
2316 
2317 		curr_rect = rects[i];
2318 		drm_rects[i].x1 = curr_rect.x;
2319 		drm_rects[i].y1 = curr_rect.y;
2320 		drm_rects[i].x2 = curr_rect.x + curr_rect.w;
2321 		drm_rects[i].y2 = curr_rect.y + curr_rect.h;
2322 
2323 		VMW_DEBUG_KMS("  x1 = %d y1 = %d x2 = %d y2 = %d\n",
2324 			      drm_rects[i].x1, drm_rects[i].y1,
2325 			      drm_rects[i].x2, drm_rects[i].y2);
2326 
2327 		/*
2328 		 * Currently this check is limiting the topology within
2329 		 * mode_config->max (which actually is max texture size
2330 		 * supported by virtual device). This limit is here to address
2331 		 * window managers that create a big framebuffer for whole
2332 		 * topology.
2333 		 */
2334 		if (drm_rects[i].x1 < 0 ||  drm_rects[i].y1 < 0 ||
2335 		    drm_rects[i].x2 > mode_config->max_width ||
2336 		    drm_rects[i].y2 > mode_config->max_height) {
2337 			VMW_DEBUG_KMS("Invalid layout %d %d %d %d\n",
2338 				      drm_rects[i].x1, drm_rects[i].y1,
2339 				      drm_rects[i].x2, drm_rects[i].y2);
2340 			ret = -EINVAL;
2341 			goto out_free;
2342 		}
2343 	}
2344 
2345 	ret = vmw_kms_check_display_memory(dev, arg->num_outputs, drm_rects);
2346 
2347 	if (ret == 0)
2348 		vmw_du_update_layout(dev_priv, arg->num_outputs, drm_rects);
2349 
2350 out_free:
2351 	kfree(rects);
2352 	return ret;
2353 }
2354 
2355 /**
2356  * vmw_kms_helper_dirty - Helper to build commands and perform actions based
2357  * on a set of cliprects and a set of display units.
2358  *
2359  * @dev_priv: Pointer to a device private structure.
2360  * @framebuffer: Pointer to the framebuffer on which to perform the actions.
2361  * @clips: A set of struct drm_clip_rect. Either this os @vclips must be NULL.
2362  * Cliprects are given in framebuffer coordinates.
2363  * @vclips: A set of struct drm_vmw_rect cliprects. Either this or @clips must
2364  * be NULL. Cliprects are given in source coordinates.
2365  * @dest_x: X coordinate offset for the crtc / destination clip rects.
2366  * @dest_y: Y coordinate offset for the crtc / destination clip rects.
2367  * @num_clips: Number of cliprects in the @clips or @vclips array.
2368  * @increment: Integer with which to increment the clip counter when looping.
2369  * Used to skip a predetermined number of clip rects.
2370  * @dirty: Closure structure. See the description of struct vmw_kms_dirty.
2371  */
2372 int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
2373 			 struct vmw_framebuffer *framebuffer,
2374 			 const struct drm_clip_rect *clips,
2375 			 const struct drm_vmw_rect *vclips,
2376 			 s32 dest_x, s32 dest_y,
2377 			 int num_clips,
2378 			 int increment,
2379 			 struct vmw_kms_dirty *dirty)
2380 {
2381 	struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
2382 	struct drm_crtc *crtc;
2383 	u32 num_units = 0;
2384 	u32 i, k;
2385 
2386 	dirty->dev_priv = dev_priv;
2387 
2388 	/* If crtc is passed, no need to iterate over other display units */
2389 	if (dirty->crtc) {
2390 		units[num_units++] = vmw_crtc_to_du(dirty->crtc);
2391 	} else {
2392 		list_for_each_entry(crtc, &dev_priv->drm.mode_config.crtc_list,
2393 				    head) {
2394 			struct drm_plane *plane = crtc->primary;
2395 
2396 			if (plane->state->fb == &framebuffer->base)
2397 				units[num_units++] = vmw_crtc_to_du(crtc);
2398 		}
2399 	}
2400 
2401 	for (k = 0; k < num_units; k++) {
2402 		struct vmw_display_unit *unit = units[k];
2403 		s32 crtc_x = unit->crtc.x;
2404 		s32 crtc_y = unit->crtc.y;
2405 		s32 crtc_width = unit->crtc.mode.hdisplay;
2406 		s32 crtc_height = unit->crtc.mode.vdisplay;
2407 		const struct drm_clip_rect *clips_ptr = clips;
2408 		const struct drm_vmw_rect *vclips_ptr = vclips;
2409 
2410 		dirty->unit = unit;
2411 		if (dirty->fifo_reserve_size > 0) {
2412 			dirty->cmd = VMW_CMD_RESERVE(dev_priv,
2413 						      dirty->fifo_reserve_size);
2414 			if (!dirty->cmd)
2415 				return -ENOMEM;
2416 
2417 			memset(dirty->cmd, 0, dirty->fifo_reserve_size);
2418 		}
2419 		dirty->num_hits = 0;
2420 		for (i = 0; i < num_clips; i++, clips_ptr += increment,
2421 		       vclips_ptr += increment) {
2422 			s32 clip_left;
2423 			s32 clip_top;
2424 
2425 			/*
2426 			 * Select clip array type. Note that integer type
2427 			 * in @clips is unsigned short, whereas in @vclips
2428 			 * it's 32-bit.
2429 			 */
2430 			if (clips) {
2431 				dirty->fb_x = (s32) clips_ptr->x1;
2432 				dirty->fb_y = (s32) clips_ptr->y1;
2433 				dirty->unit_x2 = (s32) clips_ptr->x2 + dest_x -
2434 					crtc_x;
2435 				dirty->unit_y2 = (s32) clips_ptr->y2 + dest_y -
2436 					crtc_y;
2437 			} else {
2438 				dirty->fb_x = vclips_ptr->x;
2439 				dirty->fb_y = vclips_ptr->y;
2440 				dirty->unit_x2 = dirty->fb_x + vclips_ptr->w +
2441 					dest_x - crtc_x;
2442 				dirty->unit_y2 = dirty->fb_y + vclips_ptr->h +
2443 					dest_y - crtc_y;
2444 			}
2445 
2446 			dirty->unit_x1 = dirty->fb_x + dest_x - crtc_x;
2447 			dirty->unit_y1 = dirty->fb_y + dest_y - crtc_y;
2448 
2449 			/* Skip this clip if it's outside the crtc region */
2450 			if (dirty->unit_x1 >= crtc_width ||
2451 			    dirty->unit_y1 >= crtc_height ||
2452 			    dirty->unit_x2 <= 0 || dirty->unit_y2 <= 0)
2453 				continue;
2454 
2455 			/* Clip right and bottom to crtc limits */
2456 			dirty->unit_x2 = min_t(s32, dirty->unit_x2,
2457 					       crtc_width);
2458 			dirty->unit_y2 = min_t(s32, dirty->unit_y2,
2459 					       crtc_height);
2460 
2461 			/* Clip left and top to crtc limits */
2462 			clip_left = min_t(s32, dirty->unit_x1, 0);
2463 			clip_top = min_t(s32, dirty->unit_y1, 0);
2464 			dirty->unit_x1 -= clip_left;
2465 			dirty->unit_y1 -= clip_top;
2466 			dirty->fb_x -= clip_left;
2467 			dirty->fb_y -= clip_top;
2468 
2469 			dirty->clip(dirty);
2470 		}
2471 
2472 		dirty->fifo_commit(dirty);
2473 	}
2474 
2475 	return 0;
2476 }
2477 
2478 /**
2479  * vmw_kms_helper_validation_finish - Helper for post KMS command submission
2480  * cleanup and fencing
2481  * @dev_priv: Pointer to the device-private struct
2482  * @file_priv: Pointer identifying the client when user-space fencing is used
2483  * @ctx: Pointer to the validation context
2484  * @out_fence: If non-NULL, returned refcounted fence-pointer
2485  * @user_fence_rep: If non-NULL, pointer to user-space address area
2486  * in which to copy user-space fence info
2487  */
2488 void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv,
2489 				      struct drm_file *file_priv,
2490 				      struct vmw_validation_context *ctx,
2491 				      struct vmw_fence_obj **out_fence,
2492 				      struct drm_vmw_fence_rep __user *
2493 				      user_fence_rep)
2494 {
2495 	struct vmw_fence_obj *fence = NULL;
2496 	uint32_t handle = 0;
2497 	int ret = 0;
2498 
2499 	if (file_priv || user_fence_rep || vmw_validation_has_bos(ctx) ||
2500 	    out_fence)
2501 		ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
2502 						 file_priv ? &handle : NULL);
2503 	vmw_validation_done(ctx, fence);
2504 	if (file_priv)
2505 		vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv),
2506 					    ret, user_fence_rep, fence,
2507 					    handle, -1, NULL);
2508 	if (out_fence)
2509 		*out_fence = fence;
2510 	else
2511 		vmw_fence_obj_unreference(&fence);
2512 }
2513 
2514 /**
2515  * vmw_kms_update_proxy - Helper function to update a proxy surface from
2516  * its backing MOB.
2517  *
2518  * @res: Pointer to the surface resource
2519  * @clips: Clip rects in framebuffer (surface) space.
2520  * @num_clips: Number of clips in @clips.
2521  * @increment: Integer with which to increment the clip counter when looping.
2522  * Used to skip a predetermined number of clip rects.
2523  *
2524  * This function makes sure the proxy surface is updated from its backing MOB
2525  * using the region given by @clips. The surface resource @res and its backing
2526  * MOB needs to be reserved and validated on call.
2527  */
2528 int vmw_kms_update_proxy(struct vmw_resource *res,
2529 			 const struct drm_clip_rect *clips,
2530 			 unsigned num_clips,
2531 			 int increment)
2532 {
2533 	struct vmw_private *dev_priv = res->dev_priv;
2534 	struct drm_vmw_size *size = &vmw_res_to_srf(res)->metadata.base_size;
2535 	struct {
2536 		SVGA3dCmdHeader header;
2537 		SVGA3dCmdUpdateGBImage body;
2538 	} *cmd;
2539 	SVGA3dBox *box;
2540 	size_t copy_size = 0;
2541 	int i;
2542 
2543 	if (!clips)
2544 		return 0;
2545 
2546 	cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd) * num_clips);
2547 	if (!cmd)
2548 		return -ENOMEM;
2549 
2550 	for (i = 0; i < num_clips; ++i, clips += increment, ++cmd) {
2551 		box = &cmd->body.box;
2552 
2553 		cmd->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE;
2554 		cmd->header.size = sizeof(cmd->body);
2555 		cmd->body.image.sid = res->id;
2556 		cmd->body.image.face = 0;
2557 		cmd->body.image.mipmap = 0;
2558 
2559 		if (clips->x1 > size->width || clips->x2 > size->width ||
2560 		    clips->y1 > size->height || clips->y2 > size->height) {
2561 			DRM_ERROR("Invalid clips outsize of framebuffer.\n");
2562 			return -EINVAL;
2563 		}
2564 
2565 		box->x = clips->x1;
2566 		box->y = clips->y1;
2567 		box->z = 0;
2568 		box->w = clips->x2 - clips->x1;
2569 		box->h = clips->y2 - clips->y1;
2570 		box->d = 1;
2571 
2572 		copy_size += sizeof(*cmd);
2573 	}
2574 
2575 	vmw_cmd_commit(dev_priv, copy_size);
2576 
2577 	return 0;
2578 }
2579 
2580 int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
2581 			    unsigned unit,
2582 			    u32 max_width,
2583 			    u32 max_height,
2584 			    struct drm_connector **p_con,
2585 			    struct drm_crtc **p_crtc,
2586 			    struct drm_display_mode **p_mode)
2587 {
2588 	struct drm_connector *con;
2589 	struct vmw_display_unit *du;
2590 	struct drm_display_mode *mode;
2591 	int i = 0;
2592 	int ret = 0;
2593 
2594 	mutex_lock(&dev_priv->drm.mode_config.mutex);
2595 	list_for_each_entry(con, &dev_priv->drm.mode_config.connector_list,
2596 			    head) {
2597 		if (i == unit)
2598 			break;
2599 
2600 		++i;
2601 	}
2602 
2603 	if (&con->head == &dev_priv->drm.mode_config.connector_list) {
2604 		DRM_ERROR("Could not find initial display unit.\n");
2605 		ret = -EINVAL;
2606 		goto out_unlock;
2607 	}
2608 
2609 	if (list_empty(&con->modes))
2610 		(void) vmw_du_connector_fill_modes(con, max_width, max_height);
2611 
2612 	if (list_empty(&con->modes)) {
2613 		DRM_ERROR("Could not find initial display mode.\n");
2614 		ret = -EINVAL;
2615 		goto out_unlock;
2616 	}
2617 
2618 	du = vmw_connector_to_du(con);
2619 	*p_con = con;
2620 	*p_crtc = &du->crtc;
2621 
2622 	list_for_each_entry(mode, &con->modes, head) {
2623 		if (mode->type & DRM_MODE_TYPE_PREFERRED)
2624 			break;
2625 	}
2626 
2627 	if (&mode->head == &con->modes) {
2628 		WARN_ONCE(true, "Could not find initial preferred mode.\n");
2629 		*p_mode = list_first_entry(&con->modes,
2630 					   struct drm_display_mode,
2631 					   head);
2632 	} else {
2633 		*p_mode = mode;
2634 	}
2635 
2636  out_unlock:
2637 	mutex_unlock(&dev_priv->drm.mode_config.mutex);
2638 
2639 	return ret;
2640 }
2641 
2642 /**
2643  * vmw_kms_create_implicit_placement_proparty - Set up the implicit placement
2644  * property.
2645  *
2646  * @dev_priv: Pointer to a device private struct.
2647  *
2648  * Sets up the implicit placement property unless it's already set up.
2649  */
2650 void
2651 vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv)
2652 {
2653 	if (dev_priv->implicit_placement_property)
2654 		return;
2655 
2656 	dev_priv->implicit_placement_property =
2657 		drm_property_create_range(&dev_priv->drm,
2658 					  DRM_MODE_PROP_IMMUTABLE,
2659 					  "implicit_placement", 0, 1);
2660 }
2661 
2662 /**
2663  * vmw_kms_suspend - Save modesetting state and turn modesetting off.
2664  *
2665  * @dev: Pointer to the drm device
2666  * Return: 0 on success. Negative error code on failure.
2667  */
2668 int vmw_kms_suspend(struct drm_device *dev)
2669 {
2670 	struct vmw_private *dev_priv = vmw_priv(dev);
2671 
2672 	dev_priv->suspend_state = drm_atomic_helper_suspend(dev);
2673 	if (IS_ERR(dev_priv->suspend_state)) {
2674 		int ret = PTR_ERR(dev_priv->suspend_state);
2675 
2676 		DRM_ERROR("Failed kms suspend: %d\n", ret);
2677 		dev_priv->suspend_state = NULL;
2678 
2679 		return ret;
2680 	}
2681 
2682 	return 0;
2683 }
2684 
2685 
2686 /**
2687  * vmw_kms_resume - Re-enable modesetting and restore state
2688  *
2689  * @dev: Pointer to the drm device
2690  * Return: 0 on success. Negative error code on failure.
2691  *
2692  * State is resumed from a previous vmw_kms_suspend(). It's illegal
2693  * to call this function without a previous vmw_kms_suspend().
2694  */
2695 int vmw_kms_resume(struct drm_device *dev)
2696 {
2697 	struct vmw_private *dev_priv = vmw_priv(dev);
2698 	int ret;
2699 
2700 	if (WARN_ON(!dev_priv->suspend_state))
2701 		return 0;
2702 
2703 	ret = drm_atomic_helper_resume(dev, dev_priv->suspend_state);
2704 	dev_priv->suspend_state = NULL;
2705 
2706 	return ret;
2707 }
2708 
2709 /**
2710  * vmw_kms_lost_device - Notify kms that modesetting capabilities will be lost
2711  *
2712  * @dev: Pointer to the drm device
2713  */
2714 void vmw_kms_lost_device(struct drm_device *dev)
2715 {
2716 	drm_atomic_helper_shutdown(dev);
2717 }
2718 
2719 /**
2720  * vmw_du_helper_plane_update - Helper to do plane update on a display unit.
2721  * @update: The closure structure.
2722  *
2723  * Call this helper after setting callbacks in &vmw_du_update_plane to do plane
2724  * update on display unit.
2725  *
2726  * Return: 0 on success or a negative error code on failure.
2727  */
2728 int vmw_du_helper_plane_update(struct vmw_du_update_plane *update)
2729 {
2730 	struct drm_plane_state *state = update->plane->state;
2731 	struct drm_plane_state *old_state = update->old_state;
2732 	struct drm_atomic_helper_damage_iter iter;
2733 	struct drm_rect clip;
2734 	struct drm_rect bb;
2735 	DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
2736 	uint32_t reserved_size = 0;
2737 	uint32_t submit_size = 0;
2738 	uint32_t curr_size = 0;
2739 	uint32_t num_hits = 0;
2740 	void *cmd_start;
2741 	char *cmd_next;
2742 	int ret;
2743 
2744 	/*
2745 	 * Iterate in advance to check if really need plane update and find the
2746 	 * number of clips that actually are in plane src for fifo allocation.
2747 	 */
2748 	drm_atomic_helper_damage_iter_init(&iter, old_state, state);
2749 	drm_atomic_for_each_plane_damage(&iter, &clip)
2750 		num_hits++;
2751 
2752 	if (num_hits == 0)
2753 		return 0;
2754 
2755 	if (update->vfb->bo) {
2756 		struct vmw_framebuffer_bo *vfbbo =
2757 			container_of(update->vfb, typeof(*vfbbo), base);
2758 
2759 		ret = vmw_validation_add_bo(&val_ctx, vfbbo->buffer, false,
2760 					    update->cpu_blit);
2761 	} else {
2762 		struct vmw_framebuffer_surface *vfbs =
2763 			container_of(update->vfb, typeof(*vfbs), base);
2764 
2765 		ret = vmw_validation_add_resource(&val_ctx, &vfbs->surface->res,
2766 						  0, VMW_RES_DIRTY_NONE, NULL,
2767 						  NULL);
2768 	}
2769 
2770 	if (ret)
2771 		return ret;
2772 
2773 	ret = vmw_validation_prepare(&val_ctx, update->mutex, update->intr);
2774 	if (ret)
2775 		goto out_unref;
2776 
2777 	reserved_size = update->calc_fifo_size(update, num_hits);
2778 	cmd_start = VMW_CMD_RESERVE(update->dev_priv, reserved_size);
2779 	if (!cmd_start) {
2780 		ret = -ENOMEM;
2781 		goto out_revert;
2782 	}
2783 
2784 	cmd_next = cmd_start;
2785 
2786 	if (update->post_prepare) {
2787 		curr_size = update->post_prepare(update, cmd_next);
2788 		cmd_next += curr_size;
2789 		submit_size += curr_size;
2790 	}
2791 
2792 	if (update->pre_clip) {
2793 		curr_size = update->pre_clip(update, cmd_next, num_hits);
2794 		cmd_next += curr_size;
2795 		submit_size += curr_size;
2796 	}
2797 
2798 	bb.x1 = INT_MAX;
2799 	bb.y1 = INT_MAX;
2800 	bb.x2 = INT_MIN;
2801 	bb.y2 = INT_MIN;
2802 
2803 	drm_atomic_helper_damage_iter_init(&iter, old_state, state);
2804 	drm_atomic_for_each_plane_damage(&iter, &clip) {
2805 		uint32_t fb_x = clip.x1;
2806 		uint32_t fb_y = clip.y1;
2807 
2808 		vmw_du_translate_to_crtc(state, &clip);
2809 		if (update->clip) {
2810 			curr_size = update->clip(update, cmd_next, &clip, fb_x,
2811 						 fb_y);
2812 			cmd_next += curr_size;
2813 			submit_size += curr_size;
2814 		}
2815 		bb.x1 = min_t(int, bb.x1, clip.x1);
2816 		bb.y1 = min_t(int, bb.y1, clip.y1);
2817 		bb.x2 = max_t(int, bb.x2, clip.x2);
2818 		bb.y2 = max_t(int, bb.y2, clip.y2);
2819 	}
2820 
2821 	curr_size = update->post_clip(update, cmd_next, &bb);
2822 	submit_size += curr_size;
2823 
2824 	if (reserved_size < submit_size)
2825 		submit_size = 0;
2826 
2827 	vmw_cmd_commit(update->dev_priv, submit_size);
2828 
2829 	vmw_kms_helper_validation_finish(update->dev_priv, NULL, &val_ctx,
2830 					 update->out_fence, NULL);
2831 	return ret;
2832 
2833 out_revert:
2834 	vmw_validation_revert(&val_ctx);
2835 
2836 out_unref:
2837 	vmw_validation_unref_lists(&val_ctx);
2838 	return ret;
2839 }
2840