1 /**************************************************************************
2  *
3  * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 
28 #include "vmwgfx_kms.h"
29 
30 /* Might need a hrtimer here? */
31 #define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
32 
33 static int vmw_surface_dmabuf_pin(struct vmw_framebuffer *vfb);
34 static int vmw_surface_dmabuf_unpin(struct vmw_framebuffer *vfb);
35 
36 void vmw_display_unit_cleanup(struct vmw_display_unit *du)
37 {
38 	if (du->cursor_surface)
39 		vmw_surface_unreference(&du->cursor_surface);
40 	if (du->cursor_dmabuf)
41 		vmw_dmabuf_unreference(&du->cursor_dmabuf);
42 	drm_crtc_cleanup(&du->crtc);
43 	drm_encoder_cleanup(&du->encoder);
44 	drm_connector_cleanup(&du->connector);
45 }
46 
47 /*
48  * Display Unit Cursor functions
49  */
50 
51 int vmw_cursor_update_image(struct vmw_private *dev_priv,
52 			    u32 *image, u32 width, u32 height,
53 			    u32 hotspotX, u32 hotspotY)
54 {
55 	struct {
56 		u32 cmd;
57 		SVGAFifoCmdDefineAlphaCursor cursor;
58 	} *cmd;
59 	u32 image_size = width * height * 4;
60 	u32 cmd_size = sizeof(*cmd) + image_size;
61 
62 	if (!image)
63 		return -EINVAL;
64 
65 	cmd = vmw_fifo_reserve(dev_priv, cmd_size);
66 	if (unlikely(cmd == NULL)) {
67 		DRM_ERROR("Fifo reserve failed.\n");
68 		return -ENOMEM;
69 	}
70 
71 	memset(cmd, 0, sizeof(*cmd));
72 
73 	memcpy(&cmd[1], image, image_size);
74 
75 	cmd->cmd = cpu_to_le32(SVGA_CMD_DEFINE_ALPHA_CURSOR);
76 	cmd->cursor.id = cpu_to_le32(0);
77 	cmd->cursor.width = cpu_to_le32(width);
78 	cmd->cursor.height = cpu_to_le32(height);
79 	cmd->cursor.hotspotX = cpu_to_le32(hotspotX);
80 	cmd->cursor.hotspotY = cpu_to_le32(hotspotY);
81 
82 	vmw_fifo_commit(dev_priv, cmd_size);
83 
84 	return 0;
85 }
86 
87 void vmw_cursor_update_position(struct vmw_private *dev_priv,
88 				bool show, int x, int y)
89 {
90 	__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
91 	uint32_t count;
92 
93 	iowrite32(show ? 1 : 0, fifo_mem + SVGA_FIFO_CURSOR_ON);
94 	iowrite32(x, fifo_mem + SVGA_FIFO_CURSOR_X);
95 	iowrite32(y, fifo_mem + SVGA_FIFO_CURSOR_Y);
96 	count = ioread32(fifo_mem + SVGA_FIFO_CURSOR_COUNT);
97 	iowrite32(++count, fifo_mem + SVGA_FIFO_CURSOR_COUNT);
98 }
99 
100 int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
101 			   uint32_t handle, uint32_t width, uint32_t height)
102 {
103 	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
104 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
105 	struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
106 	struct vmw_surface *surface = NULL;
107 	struct vmw_dma_buffer *dmabuf = NULL;
108 	int ret;
109 
110 	if (handle) {
111 		ret = vmw_user_surface_lookup_handle(dev_priv, tfile,
112 						     handle, &surface);
113 		if (!ret) {
114 			if (!surface->snooper.image) {
115 				DRM_ERROR("surface not suitable for cursor\n");
116 				return -EINVAL;
117 			}
118 		} else {
119 			ret = vmw_user_dmabuf_lookup(tfile,
120 						     handle, &dmabuf);
121 			if (ret) {
122 				DRM_ERROR("failed to find surface or dmabuf: %i\n", ret);
123 				return -EINVAL;
124 			}
125 		}
126 	}
127 
128 	/* takedown old cursor */
129 	if (du->cursor_surface) {
130 		du->cursor_surface->snooper.crtc = NULL;
131 		vmw_surface_unreference(&du->cursor_surface);
132 	}
133 	if (du->cursor_dmabuf)
134 		vmw_dmabuf_unreference(&du->cursor_dmabuf);
135 
136 	/* setup new image */
137 	if (surface) {
138 		/* vmw_user_surface_lookup takes one reference */
139 		du->cursor_surface = surface;
140 
141 		du->cursor_surface->snooper.crtc = crtc;
142 		du->cursor_age = du->cursor_surface->snooper.age;
143 		vmw_cursor_update_image(dev_priv, surface->snooper.image,
144 					64, 64, du->hotspot_x, du->hotspot_y);
145 	} else if (dmabuf) {
146 		struct ttm_bo_kmap_obj map;
147 		unsigned long kmap_offset;
148 		unsigned long kmap_num;
149 		void *virtual;
150 		bool dummy;
151 
152 		/* vmw_user_surface_lookup takes one reference */
153 		du->cursor_dmabuf = dmabuf;
154 
155 		kmap_offset = 0;
156 		kmap_num = (64*64*4) >> PAGE_SHIFT;
157 
158 		ret = ttm_bo_reserve(&dmabuf->base, true, false, false, 0);
159 		if (unlikely(ret != 0)) {
160 			DRM_ERROR("reserve failed\n");
161 			return -EINVAL;
162 		}
163 
164 		ret = ttm_bo_kmap(&dmabuf->base, kmap_offset, kmap_num, &map);
165 		if (unlikely(ret != 0))
166 			goto err_unreserve;
167 
168 		virtual = ttm_kmap_obj_virtual(&map, &dummy);
169 		vmw_cursor_update_image(dev_priv, virtual, 64, 64,
170 					du->hotspot_x, du->hotspot_y);
171 
172 		ttm_bo_kunmap(&map);
173 err_unreserve:
174 		ttm_bo_unreserve(&dmabuf->base);
175 
176 	} else {
177 		vmw_cursor_update_position(dev_priv, false, 0, 0);
178 		return 0;
179 	}
180 
181 	vmw_cursor_update_position(dev_priv, true, du->cursor_x, du->cursor_y);
182 
183 	return 0;
184 }
185 
186 int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
187 {
188 	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
189 	struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
190 	bool shown = du->cursor_surface || du->cursor_dmabuf ? true : false;
191 
192 	du->cursor_x = x + crtc->x;
193 	du->cursor_y = y + crtc->y;
194 
195 	vmw_cursor_update_position(dev_priv, shown,
196 				   du->cursor_x, du->cursor_y);
197 
198 	return 0;
199 }
200 
201 void vmw_kms_cursor_snoop(struct vmw_surface *srf,
202 			  struct ttm_object_file *tfile,
203 			  struct ttm_buffer_object *bo,
204 			  SVGA3dCmdHeader *header)
205 {
206 	struct ttm_bo_kmap_obj map;
207 	unsigned long kmap_offset;
208 	unsigned long kmap_num;
209 	SVGA3dCopyBox *box;
210 	unsigned box_count;
211 	void *virtual;
212 	bool dummy;
213 	struct vmw_dma_cmd {
214 		SVGA3dCmdHeader header;
215 		SVGA3dCmdSurfaceDMA dma;
216 	} *cmd;
217 	int ret;
218 
219 	cmd = container_of(header, struct vmw_dma_cmd, header);
220 
221 	/* No snooper installed */
222 	if (!srf->snooper.image)
223 		return;
224 
225 	if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) {
226 		DRM_ERROR("face and mipmap for cursors should never != 0\n");
227 		return;
228 	}
229 
230 	if (cmd->header.size < 64) {
231 		DRM_ERROR("at least one full copy box must be given\n");
232 		return;
233 	}
234 
235 	box = (SVGA3dCopyBox *)&cmd[1];
236 	box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) /
237 			sizeof(SVGA3dCopyBox);
238 
239 	if (cmd->dma.guest.pitch != (64 * 4) ||
240 	    cmd->dma.guest.ptr.offset % PAGE_SIZE ||
241 	    box->x != 0    || box->y != 0    || box->z != 0    ||
242 	    box->srcx != 0 || box->srcy != 0 || box->srcz != 0 ||
243 	    box->w != 64   || box->h != 64   || box->d != 1    ||
244 	    box_count != 1) {
245 		/* TODO handle none page aligned offsets */
246 		/* TODO handle partial uploads and pitch != 256 */
247 		/* TODO handle more then one copy (size != 64) */
248 		DRM_ERROR("lazy programer, cant handle wierd stuff\n");
249 		return;
250 	}
251 
252 	kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT;
253 	kmap_num = (64*64*4) >> PAGE_SHIFT;
254 
255 	ret = ttm_bo_reserve(bo, true, false, false, 0);
256 	if (unlikely(ret != 0)) {
257 		DRM_ERROR("reserve failed\n");
258 		return;
259 	}
260 
261 	ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
262 	if (unlikely(ret != 0))
263 		goto err_unreserve;
264 
265 	virtual = ttm_kmap_obj_virtual(&map, &dummy);
266 
267 	memcpy(srf->snooper.image, virtual, 64*64*4);
268 	srf->snooper.age++;
269 
270 	/* we can't call this function from this function since execbuf has
271 	 * reserved fifo space.
272 	 *
273 	 * if (srf->snooper.crtc)
274 	 *	vmw_ldu_crtc_cursor_update_image(dev_priv,
275 	 *					 srf->snooper.image, 64, 64,
276 	 *					 du->hotspot_x, du->hotspot_y);
277 	 */
278 
279 	ttm_bo_kunmap(&map);
280 err_unreserve:
281 	ttm_bo_unreserve(bo);
282 }
283 
284 void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
285 {
286 	struct drm_device *dev = dev_priv->dev;
287 	struct vmw_display_unit *du;
288 	struct drm_crtc *crtc;
289 
290 	mutex_lock(&dev->mode_config.mutex);
291 
292 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
293 		du = vmw_crtc_to_du(crtc);
294 		if (!du->cursor_surface ||
295 		    du->cursor_age == du->cursor_surface->snooper.age)
296 			continue;
297 
298 		du->cursor_age = du->cursor_surface->snooper.age;
299 		vmw_cursor_update_image(dev_priv,
300 					du->cursor_surface->snooper.image,
301 					64, 64, du->hotspot_x, du->hotspot_y);
302 	}
303 
304 	mutex_unlock(&dev->mode_config.mutex);
305 }
306 
307 /*
308  * Generic framebuffer code
309  */
310 
311 int vmw_framebuffer_create_handle(struct drm_framebuffer *fb,
312 				  struct drm_file *file_priv,
313 				  unsigned int *handle)
314 {
315 	if (handle)
316 		handle = 0;
317 
318 	return 0;
319 }
320 
321 /*
322  * Surface framebuffer code
323  */
324 
325 #define vmw_framebuffer_to_vfbs(x) \
326 	container_of(x, struct vmw_framebuffer_surface, base.base)
327 
328 struct vmw_framebuffer_surface {
329 	struct vmw_framebuffer base;
330 	struct vmw_surface *surface;
331 	struct vmw_dma_buffer *buffer;
332 	struct delayed_work d_work;
333 	struct mutex work_lock;
334 	bool present_fs;
335 };
336 
337 void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
338 {
339 	struct vmw_framebuffer_surface *vfb =
340 		vmw_framebuffer_to_vfbs(framebuffer);
341 
342 	cancel_delayed_work_sync(&vfb->d_work);
343 	drm_framebuffer_cleanup(framebuffer);
344 	vmw_surface_unreference(&vfb->surface);
345 
346 	kfree(framebuffer);
347 }
348 
349 static void vmw_framebuffer_present_fs_callback(struct work_struct *work)
350 {
351 	struct delayed_work *d_work =
352 		container_of(work, struct delayed_work, work);
353 	struct vmw_framebuffer_surface *vfbs =
354 		container_of(d_work, struct vmw_framebuffer_surface, d_work);
355 	struct vmw_surface *surf = vfbs->surface;
356 	struct drm_framebuffer *framebuffer = &vfbs->base.base;
357 	struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
358 
359 	struct {
360 		SVGA3dCmdHeader header;
361 		SVGA3dCmdPresent body;
362 		SVGA3dCopyRect cr;
363 	} *cmd;
364 
365 	mutex_lock(&vfbs->work_lock);
366 	if (!vfbs->present_fs)
367 		goto out_unlock;
368 
369 	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
370 	if (unlikely(cmd == NULL))
371 		goto out_resched;
372 
373 	cmd->header.id = cpu_to_le32(SVGA_3D_CMD_PRESENT);
374 	cmd->header.size = cpu_to_le32(sizeof(cmd->body) + sizeof(cmd->cr));
375 	cmd->body.sid = cpu_to_le32(surf->res.id);
376 	cmd->cr.x = cpu_to_le32(0);
377 	cmd->cr.y = cpu_to_le32(0);
378 	cmd->cr.srcx = cmd->cr.x;
379 	cmd->cr.srcy = cmd->cr.y;
380 	cmd->cr.w = cpu_to_le32(framebuffer->width);
381 	cmd->cr.h = cpu_to_le32(framebuffer->height);
382 	vfbs->present_fs = false;
383 	vmw_fifo_commit(dev_priv, sizeof(*cmd));
384 out_resched:
385 	/**
386 	 * Will not re-add if already pending.
387 	 */
388 	schedule_delayed_work(&vfbs->d_work, VMWGFX_PRESENT_RATE);
389 out_unlock:
390 	mutex_unlock(&vfbs->work_lock);
391 }
392 
393 
394 int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
395 				  unsigned flags, unsigned color,
396 				  struct drm_clip_rect *clips,
397 				  unsigned num_clips)
398 {
399 	struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
400 	struct vmw_framebuffer_surface *vfbs =
401 		vmw_framebuffer_to_vfbs(framebuffer);
402 	struct vmw_surface *surf = vfbs->surface;
403 	struct drm_clip_rect norect;
404 	SVGA3dCopyRect *cr;
405 	int i, inc = 1;
406 
407 	struct {
408 		SVGA3dCmdHeader header;
409 		SVGA3dCmdPresent body;
410 		SVGA3dCopyRect cr;
411 	} *cmd;
412 
413 	if (!num_clips ||
414 	    !(dev_priv->fifo.capabilities &
415 	      SVGA_FIFO_CAP_SCREEN_OBJECT)) {
416 		int ret;
417 
418 		mutex_lock(&vfbs->work_lock);
419 		vfbs->present_fs = true;
420 		ret = schedule_delayed_work(&vfbs->d_work, VMWGFX_PRESENT_RATE);
421 		mutex_unlock(&vfbs->work_lock);
422 		if (ret) {
423 			/**
424 			 * No work pending, Force immediate present.
425 			 */
426 			vmw_framebuffer_present_fs_callback(&vfbs->d_work.work);
427 		}
428 		return 0;
429 	}
430 
431 	if (!num_clips) {
432 		num_clips = 1;
433 		clips = &norect;
434 		norect.x1 = norect.y1 = 0;
435 		norect.x2 = framebuffer->width;
436 		norect.y2 = framebuffer->height;
437 	} else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
438 		num_clips /= 2;
439 		inc = 2; /* skip source rects */
440 	}
441 
442 	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) + (num_clips - 1) * sizeof(cmd->cr));
443 	if (unlikely(cmd == NULL)) {
444 		DRM_ERROR("Fifo reserve failed.\n");
445 		return -ENOMEM;
446 	}
447 
448 	memset(cmd, 0, sizeof(*cmd));
449 
450 	cmd->header.id = cpu_to_le32(SVGA_3D_CMD_PRESENT);
451 	cmd->header.size = cpu_to_le32(sizeof(cmd->body) + num_clips * sizeof(cmd->cr));
452 	cmd->body.sid = cpu_to_le32(surf->res.id);
453 
454 	for (i = 0, cr = &cmd->cr; i < num_clips; i++, cr++, clips += inc) {
455 		cr->x = cpu_to_le16(clips->x1);
456 		cr->y = cpu_to_le16(clips->y1);
457 		cr->srcx = cr->x;
458 		cr->srcy = cr->y;
459 		cr->w = cpu_to_le16(clips->x2 - clips->x1);
460 		cr->h = cpu_to_le16(clips->y2 - clips->y1);
461 	}
462 
463 	vmw_fifo_commit(dev_priv, sizeof(*cmd) + (num_clips - 1) * sizeof(cmd->cr));
464 
465 	return 0;
466 }
467 
468 static struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
469 	.destroy = vmw_framebuffer_surface_destroy,
470 	.dirty = vmw_framebuffer_surface_dirty,
471 	.create_handle = vmw_framebuffer_create_handle,
472 };
473 
474 int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
475 				    struct vmw_surface *surface,
476 				    struct vmw_framebuffer **out,
477 				    unsigned width, unsigned height)
478 
479 {
480 	struct drm_device *dev = dev_priv->dev;
481 	struct vmw_framebuffer_surface *vfbs;
482 	int ret;
483 
484 	vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL);
485 	if (!vfbs) {
486 		ret = -ENOMEM;
487 		goto out_err1;
488 	}
489 
490 	ret = drm_framebuffer_init(dev, &vfbs->base.base,
491 				   &vmw_framebuffer_surface_funcs);
492 	if (ret)
493 		goto out_err2;
494 
495 	if (!vmw_surface_reference(surface)) {
496 		DRM_ERROR("failed to reference surface %p\n", surface);
497 		goto out_err3;
498 	}
499 
500 	/* XXX get the first 3 from the surface info */
501 	vfbs->base.base.bits_per_pixel = 32;
502 	vfbs->base.base.pitch = width * 32 / 4;
503 	vfbs->base.base.depth = 24;
504 	vfbs->base.base.width = width;
505 	vfbs->base.base.height = height;
506 	vfbs->base.pin = &vmw_surface_dmabuf_pin;
507 	vfbs->base.unpin = &vmw_surface_dmabuf_unpin;
508 	vfbs->surface = surface;
509 	mutex_init(&vfbs->work_lock);
510 	INIT_DELAYED_WORK(&vfbs->d_work, &vmw_framebuffer_present_fs_callback);
511 	*out = &vfbs->base;
512 
513 	return 0;
514 
515 out_err3:
516 	drm_framebuffer_cleanup(&vfbs->base.base);
517 out_err2:
518 	kfree(vfbs);
519 out_err1:
520 	return ret;
521 }
522 
523 /*
524  * Dmabuf framebuffer code
525  */
526 
527 #define vmw_framebuffer_to_vfbd(x) \
528 	container_of(x, struct vmw_framebuffer_dmabuf, base.base)
529 
530 struct vmw_framebuffer_dmabuf {
531 	struct vmw_framebuffer base;
532 	struct vmw_dma_buffer *buffer;
533 };
534 
535 void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer)
536 {
537 	struct vmw_framebuffer_dmabuf *vfbd =
538 		vmw_framebuffer_to_vfbd(framebuffer);
539 
540 	drm_framebuffer_cleanup(framebuffer);
541 	vmw_dmabuf_unreference(&vfbd->buffer);
542 
543 	kfree(vfbd);
544 }
545 
546 int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
547 				 unsigned flags, unsigned color,
548 				 struct drm_clip_rect *clips,
549 				 unsigned num_clips)
550 {
551 	struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
552 	struct drm_clip_rect norect;
553 	struct {
554 		uint32_t header;
555 		SVGAFifoCmdUpdate body;
556 	} *cmd;
557 	int i, increment = 1;
558 
559 	if (!num_clips) {
560 		num_clips = 1;
561 		clips = &norect;
562 		norect.x1 = norect.y1 = 0;
563 		norect.x2 = framebuffer->width;
564 		norect.y2 = framebuffer->height;
565 	} else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
566 		num_clips /= 2;
567 		increment = 2;
568 	}
569 
570 	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) * num_clips);
571 	if (unlikely(cmd == NULL)) {
572 		DRM_ERROR("Fifo reserve failed.\n");
573 		return -ENOMEM;
574 	}
575 
576 	for (i = 0; i < num_clips; i++, clips += increment) {
577 		cmd[i].header = cpu_to_le32(SVGA_CMD_UPDATE);
578 		cmd[i].body.x = cpu_to_le32(clips->x1);
579 		cmd[i].body.y = cpu_to_le32(clips->y1);
580 		cmd[i].body.width = cpu_to_le32(clips->x2 - clips->x1);
581 		cmd[i].body.height = cpu_to_le32(clips->y2 - clips->y1);
582 	}
583 
584 	vmw_fifo_commit(dev_priv, sizeof(*cmd) * num_clips);
585 
586 	return 0;
587 }
588 
589 static struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs = {
590 	.destroy = vmw_framebuffer_dmabuf_destroy,
591 	.dirty = vmw_framebuffer_dmabuf_dirty,
592 	.create_handle = vmw_framebuffer_create_handle,
593 };
594 
595 static int vmw_surface_dmabuf_pin(struct vmw_framebuffer *vfb)
596 {
597 	struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
598 	struct vmw_framebuffer_surface *vfbs =
599 		vmw_framebuffer_to_vfbs(&vfb->base);
600 	unsigned long size = vfbs->base.base.pitch * vfbs->base.base.height;
601 	int ret;
602 
603 	vfbs->buffer = kzalloc(sizeof(*vfbs->buffer), GFP_KERNEL);
604 	if (unlikely(vfbs->buffer == NULL))
605 		return -ENOMEM;
606 
607 	vmw_overlay_pause_all(dev_priv);
608 	ret = vmw_dmabuf_init(dev_priv, vfbs->buffer, size,
609 			       &vmw_vram_ne_placement,
610 			       false, &vmw_dmabuf_bo_free);
611 	vmw_overlay_resume_all(dev_priv);
612 
613 	return ret;
614 }
615 
616 static int vmw_surface_dmabuf_unpin(struct vmw_framebuffer *vfb)
617 {
618 	struct ttm_buffer_object *bo;
619 	struct vmw_framebuffer_surface *vfbs =
620 		vmw_framebuffer_to_vfbs(&vfb->base);
621 
622 	bo = &vfbs->buffer->base;
623 	ttm_bo_unref(&bo);
624 	vfbs->buffer = NULL;
625 
626 	return 0;
627 }
628 
629 static int vmw_framebuffer_dmabuf_pin(struct vmw_framebuffer *vfb)
630 {
631 	struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
632 	struct vmw_framebuffer_dmabuf *vfbd =
633 		vmw_framebuffer_to_vfbd(&vfb->base);
634 	int ret;
635 
636 
637 	vmw_overlay_pause_all(dev_priv);
638 
639 	ret = vmw_dmabuf_to_start_of_vram(dev_priv, vfbd->buffer);
640 
641 	vmw_overlay_resume_all(dev_priv);
642 
643 	WARN_ON(ret != 0);
644 
645 	return 0;
646 }
647 
648 static int vmw_framebuffer_dmabuf_unpin(struct vmw_framebuffer *vfb)
649 {
650 	struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
651 	struct vmw_framebuffer_dmabuf *vfbd =
652 		vmw_framebuffer_to_vfbd(&vfb->base);
653 
654 	if (!vfbd->buffer) {
655 		WARN_ON(!vfbd->buffer);
656 		return 0;
657 	}
658 
659 	return vmw_dmabuf_from_vram(dev_priv, vfbd->buffer);
660 }
661 
662 int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
663 				   struct vmw_dma_buffer *dmabuf,
664 				   struct vmw_framebuffer **out,
665 				   unsigned width, unsigned height)
666 
667 {
668 	struct drm_device *dev = dev_priv->dev;
669 	struct vmw_framebuffer_dmabuf *vfbd;
670 	int ret;
671 
672 	vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL);
673 	if (!vfbd) {
674 		ret = -ENOMEM;
675 		goto out_err1;
676 	}
677 
678 	ret = drm_framebuffer_init(dev, &vfbd->base.base,
679 				   &vmw_framebuffer_dmabuf_funcs);
680 	if (ret)
681 		goto out_err2;
682 
683 	if (!vmw_dmabuf_reference(dmabuf)) {
684 		DRM_ERROR("failed to reference dmabuf %p\n", dmabuf);
685 		goto out_err3;
686 	}
687 
688 	/* XXX get the first 3 from the surface info */
689 	vfbd->base.base.bits_per_pixel = 32;
690 	vfbd->base.base.pitch = width * vfbd->base.base.bits_per_pixel / 8;
691 	vfbd->base.base.depth = 24;
692 	vfbd->base.base.width = width;
693 	vfbd->base.base.height = height;
694 	vfbd->base.pin = vmw_framebuffer_dmabuf_pin;
695 	vfbd->base.unpin = vmw_framebuffer_dmabuf_unpin;
696 	vfbd->buffer = dmabuf;
697 	*out = &vfbd->base;
698 
699 	return 0;
700 
701 out_err3:
702 	drm_framebuffer_cleanup(&vfbd->base.base);
703 out_err2:
704 	kfree(vfbd);
705 out_err1:
706 	return ret;
707 }
708 
709 /*
710  * Generic Kernel modesetting functions
711  */
712 
713 static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
714 						 struct drm_file *file_priv,
715 						 struct drm_mode_fb_cmd *mode_cmd)
716 {
717 	struct vmw_private *dev_priv = vmw_priv(dev);
718 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
719 	struct vmw_framebuffer *vfb = NULL;
720 	struct vmw_surface *surface = NULL;
721 	struct vmw_dma_buffer *bo = NULL;
722 	int ret;
723 
724 	ret = vmw_user_surface_lookup_handle(dev_priv, tfile,
725 					     mode_cmd->handle, &surface);
726 	if (ret)
727 		goto try_dmabuf;
728 
729 	if (!surface->scanout)
730 		goto err_not_scanout;
731 
732 	ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb,
733 					      mode_cmd->width, mode_cmd->height);
734 
735 	/* vmw_user_surface_lookup takes one ref so does new_fb */
736 	vmw_surface_unreference(&surface);
737 
738 	if (ret) {
739 		DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
740 		return NULL;
741 	}
742 	return &vfb->base;
743 
744 try_dmabuf:
745 	DRM_INFO("%s: trying buffer\n", __func__);
746 
747 	ret = vmw_user_dmabuf_lookup(tfile, mode_cmd->handle, &bo);
748 	if (ret) {
749 		DRM_ERROR("failed to find buffer: %i\n", ret);
750 		return NULL;
751 	}
752 
753 	ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, bo, &vfb,
754 					     mode_cmd->width, mode_cmd->height);
755 
756 	/* vmw_user_dmabuf_lookup takes one ref so does new_fb */
757 	vmw_dmabuf_unreference(&bo);
758 
759 	if (ret) {
760 		DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
761 		return NULL;
762 	}
763 
764 	return &vfb->base;
765 
766 err_not_scanout:
767 	DRM_ERROR("surface not marked as scanout\n");
768 	/* vmw_user_surface_lookup takes one ref */
769 	vmw_surface_unreference(&surface);
770 
771 	return NULL;
772 }
773 
774 static struct drm_mode_config_funcs vmw_kms_funcs = {
775 	.fb_create = vmw_kms_fb_create,
776 };
777 
778 int vmw_kms_init(struct vmw_private *dev_priv)
779 {
780 	struct drm_device *dev = dev_priv->dev;
781 	int ret;
782 
783 	drm_mode_config_init(dev);
784 	dev->mode_config.funcs = &vmw_kms_funcs;
785 	dev->mode_config.min_width = 1;
786 	dev->mode_config.min_height = 1;
787 	/* assumed largest fb size */
788 	dev->mode_config.max_width = 8192;
789 	dev->mode_config.max_height = 8192;
790 
791 	ret = vmw_kms_init_legacy_display_system(dev_priv);
792 
793 	return 0;
794 }
795 
796 int vmw_kms_close(struct vmw_private *dev_priv)
797 {
798 	/*
799 	 * Docs says we should take the lock before calling this function
800 	 * but since it destroys encoders and our destructor calls
801 	 * drm_encoder_cleanup which takes the lock we deadlock.
802 	 */
803 	drm_mode_config_cleanup(dev_priv->dev);
804 	vmw_kms_close_legacy_display_system(dev_priv);
805 	return 0;
806 }
807 
808 int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
809 				struct drm_file *file_priv)
810 {
811 	struct drm_vmw_cursor_bypass_arg *arg = data;
812 	struct vmw_display_unit *du;
813 	struct drm_mode_object *obj;
814 	struct drm_crtc *crtc;
815 	int ret = 0;
816 
817 
818 	mutex_lock(&dev->mode_config.mutex);
819 	if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) {
820 
821 		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
822 			du = vmw_crtc_to_du(crtc);
823 			du->hotspot_x = arg->xhot;
824 			du->hotspot_y = arg->yhot;
825 		}
826 
827 		mutex_unlock(&dev->mode_config.mutex);
828 		return 0;
829 	}
830 
831 	obj = drm_mode_object_find(dev, arg->crtc_id, DRM_MODE_OBJECT_CRTC);
832 	if (!obj) {
833 		ret = -EINVAL;
834 		goto out;
835 	}
836 
837 	crtc = obj_to_crtc(obj);
838 	du = vmw_crtc_to_du(crtc);
839 
840 	du->hotspot_x = arg->xhot;
841 	du->hotspot_y = arg->yhot;
842 
843 out:
844 	mutex_unlock(&dev->mode_config.mutex);
845 
846 	return ret;
847 }
848 
849 void vmw_kms_write_svga(struct vmw_private *vmw_priv,
850 			unsigned width, unsigned height, unsigned pitch,
851 			unsigned bbp, unsigned depth)
852 {
853 	if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
854 		vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, pitch);
855 	else if (vmw_fifo_have_pitchlock(vmw_priv))
856 		iowrite32(pitch, vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK);
857 	vmw_write(vmw_priv, SVGA_REG_WIDTH, width);
858 	vmw_write(vmw_priv, SVGA_REG_HEIGHT, height);
859 	vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bbp);
860 	vmw_write(vmw_priv, SVGA_REG_DEPTH, depth);
861 	vmw_write(vmw_priv, SVGA_REG_RED_MASK, 0x00ff0000);
862 	vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, 0x0000ff00);
863 	vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, 0x000000ff);
864 }
865 
866 int vmw_kms_save_vga(struct vmw_private *vmw_priv)
867 {
868 	struct vmw_vga_topology_state *save;
869 	uint32_t i;
870 
871 	vmw_priv->vga_width = vmw_read(vmw_priv, SVGA_REG_WIDTH);
872 	vmw_priv->vga_height = vmw_read(vmw_priv, SVGA_REG_HEIGHT);
873 	vmw_priv->vga_depth = vmw_read(vmw_priv, SVGA_REG_DEPTH);
874 	vmw_priv->vga_bpp = vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL);
875 	vmw_priv->vga_pseudo = vmw_read(vmw_priv, SVGA_REG_PSEUDOCOLOR);
876 	vmw_priv->vga_red_mask = vmw_read(vmw_priv, SVGA_REG_RED_MASK);
877 	vmw_priv->vga_blue_mask = vmw_read(vmw_priv, SVGA_REG_BLUE_MASK);
878 	vmw_priv->vga_green_mask = vmw_read(vmw_priv, SVGA_REG_GREEN_MASK);
879 	if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
880 		vmw_priv->vga_pitchlock =
881 		  vmw_read(vmw_priv, SVGA_REG_PITCHLOCK);
882 	else if (vmw_fifo_have_pitchlock(vmw_priv))
883 		vmw_priv->vga_pitchlock = ioread32(vmw_priv->mmio_virt +
884 						       SVGA_FIFO_PITCHLOCK);
885 
886 	if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY))
887 		return 0;
888 
889 	vmw_priv->num_displays = vmw_read(vmw_priv,
890 					  SVGA_REG_NUM_GUEST_DISPLAYS);
891 
892 	for (i = 0; i < vmw_priv->num_displays; ++i) {
893 		save = &vmw_priv->vga_save[i];
894 		vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i);
895 		save->primary = vmw_read(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY);
896 		save->pos_x = vmw_read(vmw_priv, SVGA_REG_DISPLAY_POSITION_X);
897 		save->pos_y = vmw_read(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y);
898 		save->width = vmw_read(vmw_priv, SVGA_REG_DISPLAY_WIDTH);
899 		save->height = vmw_read(vmw_priv, SVGA_REG_DISPLAY_HEIGHT);
900 		vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
901 	}
902 	return 0;
903 }
904 
905 int vmw_kms_restore_vga(struct vmw_private *vmw_priv)
906 {
907 	struct vmw_vga_topology_state *save;
908 	uint32_t i;
909 
910 	vmw_write(vmw_priv, SVGA_REG_WIDTH, vmw_priv->vga_width);
911 	vmw_write(vmw_priv, SVGA_REG_HEIGHT, vmw_priv->vga_height);
912 	vmw_write(vmw_priv, SVGA_REG_DEPTH, vmw_priv->vga_depth);
913 	vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, vmw_priv->vga_bpp);
914 	vmw_write(vmw_priv, SVGA_REG_PSEUDOCOLOR, vmw_priv->vga_pseudo);
915 	vmw_write(vmw_priv, SVGA_REG_RED_MASK, vmw_priv->vga_red_mask);
916 	vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, vmw_priv->vga_green_mask);
917 	vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, vmw_priv->vga_blue_mask);
918 	if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
919 		vmw_write(vmw_priv, SVGA_REG_PITCHLOCK,
920 			  vmw_priv->vga_pitchlock);
921 	else if (vmw_fifo_have_pitchlock(vmw_priv))
922 		iowrite32(vmw_priv->vga_pitchlock,
923 			  vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK);
924 
925 	if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY))
926 		return 0;
927 
928 	for (i = 0; i < vmw_priv->num_displays; ++i) {
929 		save = &vmw_priv->vga_save[i];
930 		vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i);
931 		vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, save->primary);
932 		vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, save->pos_x);
933 		vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, save->pos_y);
934 		vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, save->width);
935 		vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, save->height);
936 		vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
937 	}
938 
939 	return 0;
940 }
941 
942 int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
943 				struct drm_file *file_priv)
944 {
945 	struct vmw_private *dev_priv = vmw_priv(dev);
946 	struct drm_vmw_update_layout_arg *arg =
947 		(struct drm_vmw_update_layout_arg *)data;
948 	struct vmw_master *vmaster = vmw_master(file_priv->master);
949 	void __user *user_rects;
950 	struct drm_vmw_rect *rects;
951 	unsigned rects_size;
952 	int ret;
953 
954 	ret = ttm_read_lock(&vmaster->lock, true);
955 	if (unlikely(ret != 0))
956 		return ret;
957 
958 	if (!arg->num_outputs) {
959 		struct drm_vmw_rect def_rect = {0, 0, 800, 600};
960 		vmw_kms_ldu_update_layout(dev_priv, 1, &def_rect);
961 		goto out_unlock;
962 	}
963 
964 	rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect);
965 	rects = kzalloc(rects_size, GFP_KERNEL);
966 	if (unlikely(!rects)) {
967 		ret = -ENOMEM;
968 		goto out_unlock;
969 	}
970 
971 	user_rects = (void __user *)(unsigned long)arg->rects;
972 	ret = copy_from_user(rects, user_rects, rects_size);
973 	if (unlikely(ret != 0)) {
974 		DRM_ERROR("Failed to get rects.\n");
975 		goto out_free;
976 	}
977 
978 	vmw_kms_ldu_update_layout(dev_priv, arg->num_outputs, rects);
979 
980 out_free:
981 	kfree(rects);
982 out_unlock:
983 	ttm_read_unlock(&vmaster->lock);
984 	return ret;
985 }
986