1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3 *
4 * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27 #include "vmwgfx_kms.h"
28
29 #include "vmwgfx_bo.h"
30 #include "vmw_surface_cache.h"
31
32 #include <drm/drm_atomic.h>
33 #include <drm/drm_atomic_helper.h>
34 #include <drm/drm_damage_helper.h>
35 #include <drm/drm_fourcc.h>
36 #include <drm/drm_rect.h>
37 #include <drm/drm_sysfs.h>
38 #include <drm/drm_edid.h>
39
vmw_du_cleanup(struct vmw_display_unit * du)40 void vmw_du_cleanup(struct vmw_display_unit *du)
41 {
42 struct vmw_private *dev_priv = vmw_priv(du->primary.dev);
43 drm_plane_cleanup(&du->primary);
44 if (vmw_cmd_supported(dev_priv))
45 drm_plane_cleanup(&du->cursor.base);
46
47 drm_connector_unregister(&du->connector);
48 drm_crtc_cleanup(&du->crtc);
49 drm_encoder_cleanup(&du->encoder);
50 drm_connector_cleanup(&du->connector);
51 }
52
53 /*
54 * Display Unit Cursor functions
55 */
56
57 static int vmw_du_cursor_plane_unmap_cm(struct vmw_plane_state *vps);
58 static void vmw_cursor_update_mob(struct vmw_private *dev_priv,
59 struct vmw_plane_state *vps,
60 u32 *image, u32 width, u32 height,
61 u32 hotspotX, u32 hotspotY);
62
63 struct vmw_svga_fifo_cmd_define_cursor {
64 u32 cmd;
65 SVGAFifoCmdDefineAlphaCursor cursor;
66 };
67
68 /**
69 * vmw_send_define_cursor_cmd - queue a define cursor command
70 * @dev_priv: the private driver struct
71 * @image: buffer which holds the cursor image
72 * @width: width of the mouse cursor image
73 * @height: height of the mouse cursor image
74 * @hotspotX: the horizontal position of mouse hotspot
75 * @hotspotY: the vertical position of mouse hotspot
76 */
vmw_send_define_cursor_cmd(struct vmw_private * dev_priv,u32 * image,u32 width,u32 height,u32 hotspotX,u32 hotspotY)77 static void vmw_send_define_cursor_cmd(struct vmw_private *dev_priv,
78 u32 *image, u32 width, u32 height,
79 u32 hotspotX, u32 hotspotY)
80 {
81 struct vmw_svga_fifo_cmd_define_cursor *cmd;
82 const u32 image_size = width * height * sizeof(*image);
83 const u32 cmd_size = sizeof(*cmd) + image_size;
84
85 /* Try to reserve fifocmd space and swallow any failures;
86 such reservations cannot be left unconsumed for long
87 under the risk of clogging other fifocmd users, so
88 we treat reservations separtely from the way we treat
89 other fallible KMS-atomic resources at prepare_fb */
90 cmd = VMW_CMD_RESERVE(dev_priv, cmd_size);
91
92 if (unlikely(!cmd))
93 return;
94
95 memset(cmd, 0, sizeof(*cmd));
96
97 memcpy(&cmd[1], image, image_size);
98
99 cmd->cmd = SVGA_CMD_DEFINE_ALPHA_CURSOR;
100 cmd->cursor.id = 0;
101 cmd->cursor.width = width;
102 cmd->cursor.height = height;
103 cmd->cursor.hotspotX = hotspotX;
104 cmd->cursor.hotspotY = hotspotY;
105
106 vmw_cmd_commit_flush(dev_priv, cmd_size);
107 }
108
109 /**
110 * vmw_cursor_update_image - update the cursor image on the provided plane
111 * @dev_priv: the private driver struct
112 * @vps: the plane state of the cursor plane
113 * @image: buffer which holds the cursor image
114 * @width: width of the mouse cursor image
115 * @height: height of the mouse cursor image
116 * @hotspotX: the horizontal position of mouse hotspot
117 * @hotspotY: the vertical position of mouse hotspot
118 */
vmw_cursor_update_image(struct vmw_private * dev_priv,struct vmw_plane_state * vps,u32 * image,u32 width,u32 height,u32 hotspotX,u32 hotspotY)119 static void vmw_cursor_update_image(struct vmw_private *dev_priv,
120 struct vmw_plane_state *vps,
121 u32 *image, u32 width, u32 height,
122 u32 hotspotX, u32 hotspotY)
123 {
124 if (vps->cursor.bo)
125 vmw_cursor_update_mob(dev_priv, vps, image,
126 vps->base.crtc_w, vps->base.crtc_h,
127 hotspotX, hotspotY);
128
129 else
130 vmw_send_define_cursor_cmd(dev_priv, image, width, height,
131 hotspotX, hotspotY);
132 }
133
134
135 /**
136 * vmw_cursor_update_mob - Update cursor vis CursorMob mechanism
137 *
138 * Called from inside vmw_du_cursor_plane_atomic_update to actually
139 * make the cursor-image live.
140 *
141 * @dev_priv: device to work with
142 * @vps: the plane state of the cursor plane
143 * @image: cursor source data to fill the MOB with
144 * @width: source data width
145 * @height: source data height
146 * @hotspotX: cursor hotspot x
147 * @hotspotY: cursor hotspot Y
148 */
vmw_cursor_update_mob(struct vmw_private * dev_priv,struct vmw_plane_state * vps,u32 * image,u32 width,u32 height,u32 hotspotX,u32 hotspotY)149 static void vmw_cursor_update_mob(struct vmw_private *dev_priv,
150 struct vmw_plane_state *vps,
151 u32 *image, u32 width, u32 height,
152 u32 hotspotX, u32 hotspotY)
153 {
154 SVGAGBCursorHeader *header;
155 SVGAGBAlphaCursorHeader *alpha_header;
156 const u32 image_size = width * height * sizeof(*image);
157
158 header = vmw_bo_map_and_cache(vps->cursor.bo);
159 alpha_header = &header->header.alphaHeader;
160
161 memset(header, 0, sizeof(*header));
162
163 header->type = SVGA_ALPHA_CURSOR;
164 header->sizeInBytes = image_size;
165
166 alpha_header->hotspotX = hotspotX;
167 alpha_header->hotspotY = hotspotY;
168 alpha_header->width = width;
169 alpha_header->height = height;
170
171 memcpy(header + 1, image, image_size);
172 vmw_write(dev_priv, SVGA_REG_CURSOR_MOBID,
173 vps->cursor.bo->tbo.resource->start);
174 }
175
176
vmw_du_cursor_mob_size(u32 w,u32 h)177 static u32 vmw_du_cursor_mob_size(u32 w, u32 h)
178 {
179 return w * h * sizeof(u32) + sizeof(SVGAGBCursorHeader);
180 }
181
182 /**
183 * vmw_du_cursor_plane_acquire_image -- Acquire the image data
184 * @vps: cursor plane state
185 */
vmw_du_cursor_plane_acquire_image(struct vmw_plane_state * vps)186 static u32 *vmw_du_cursor_plane_acquire_image(struct vmw_plane_state *vps)
187 {
188 if (vps->surf) {
189 if (vps->surf_mapped)
190 return vmw_bo_map_and_cache(vps->surf->res.guest_memory_bo);
191 return vps->surf->snooper.image;
192 } else if (vps->bo)
193 return vmw_bo_map_and_cache(vps->bo);
194 return NULL;
195 }
196
vmw_du_cursor_plane_has_changed(struct vmw_plane_state * old_vps,struct vmw_plane_state * new_vps)197 static bool vmw_du_cursor_plane_has_changed(struct vmw_plane_state *old_vps,
198 struct vmw_plane_state *new_vps)
199 {
200 void *old_image;
201 void *new_image;
202 u32 size;
203 bool changed;
204
205 if (old_vps->base.crtc_w != new_vps->base.crtc_w ||
206 old_vps->base.crtc_h != new_vps->base.crtc_h)
207 return true;
208
209 if (old_vps->cursor.hotspot_x != new_vps->cursor.hotspot_x ||
210 old_vps->cursor.hotspot_y != new_vps->cursor.hotspot_y)
211 return true;
212
213 size = new_vps->base.crtc_w * new_vps->base.crtc_h * sizeof(u32);
214
215 old_image = vmw_du_cursor_plane_acquire_image(old_vps);
216 new_image = vmw_du_cursor_plane_acquire_image(new_vps);
217
218 changed = false;
219 if (old_image && new_image && old_image != new_image)
220 changed = memcmp(old_image, new_image, size) != 0;
221
222 return changed;
223 }
224
vmw_du_destroy_cursor_mob(struct vmw_bo ** vbo)225 static void vmw_du_destroy_cursor_mob(struct vmw_bo **vbo)
226 {
227 if (!(*vbo))
228 return;
229
230 ttm_bo_unpin(&(*vbo)->tbo);
231 vmw_bo_unreference(vbo);
232 }
233
vmw_du_put_cursor_mob(struct vmw_cursor_plane * vcp,struct vmw_plane_state * vps)234 static void vmw_du_put_cursor_mob(struct vmw_cursor_plane *vcp,
235 struct vmw_plane_state *vps)
236 {
237 u32 i;
238
239 if (!vps->cursor.bo)
240 return;
241
242 vmw_du_cursor_plane_unmap_cm(vps);
243
244 /* Look for a free slot to return this mob to the cache. */
245 for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
246 if (!vcp->cursor_mobs[i]) {
247 vcp->cursor_mobs[i] = vps->cursor.bo;
248 vps->cursor.bo = NULL;
249 return;
250 }
251 }
252
253 /* Cache is full: See if this mob is bigger than an existing mob. */
254 for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
255 if (vcp->cursor_mobs[i]->tbo.base.size <
256 vps->cursor.bo->tbo.base.size) {
257 vmw_du_destroy_cursor_mob(&vcp->cursor_mobs[i]);
258 vcp->cursor_mobs[i] = vps->cursor.bo;
259 vps->cursor.bo = NULL;
260 return;
261 }
262 }
263
264 /* Destroy it if it's not worth caching. */
265 vmw_du_destroy_cursor_mob(&vps->cursor.bo);
266 }
267
vmw_du_get_cursor_mob(struct vmw_cursor_plane * vcp,struct vmw_plane_state * vps)268 static int vmw_du_get_cursor_mob(struct vmw_cursor_plane *vcp,
269 struct vmw_plane_state *vps)
270 {
271 struct vmw_private *dev_priv = vcp->base.dev->dev_private;
272 u32 size = vmw_du_cursor_mob_size(vps->base.crtc_w, vps->base.crtc_h);
273 u32 i;
274 u32 cursor_max_dim, mob_max_size;
275 struct vmw_fence_obj *fence = NULL;
276 int ret;
277
278 if (!dev_priv->has_mob ||
279 (dev_priv->capabilities2 & SVGA_CAP2_CURSOR_MOB) == 0)
280 return -EINVAL;
281
282 mob_max_size = vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
283 cursor_max_dim = vmw_read(dev_priv, SVGA_REG_CURSOR_MAX_DIMENSION);
284
285 if (size > mob_max_size || vps->base.crtc_w > cursor_max_dim ||
286 vps->base.crtc_h > cursor_max_dim)
287 return -EINVAL;
288
289 if (vps->cursor.bo) {
290 if (vps->cursor.bo->tbo.base.size >= size)
291 return 0;
292 vmw_du_put_cursor_mob(vcp, vps);
293 }
294
295 /* Look for an unused mob in the cache. */
296 for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
297 if (vcp->cursor_mobs[i] &&
298 vcp->cursor_mobs[i]->tbo.base.size >= size) {
299 vps->cursor.bo = vcp->cursor_mobs[i];
300 vcp->cursor_mobs[i] = NULL;
301 return 0;
302 }
303 }
304 /* Create a new mob if we can't find an existing one. */
305 ret = vmw_bo_create_and_populate(dev_priv, size,
306 VMW_BO_DOMAIN_MOB,
307 &vps->cursor.bo);
308
309 if (ret != 0)
310 return ret;
311
312 /* Fence the mob creation so we are guarateed to have the mob */
313 ret = ttm_bo_reserve(&vps->cursor.bo->tbo, false, false, NULL);
314 if (ret != 0)
315 goto teardown;
316
317 ret = vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
318 if (ret != 0) {
319 ttm_bo_unreserve(&vps->cursor.bo->tbo);
320 goto teardown;
321 }
322
323 dma_fence_wait(&fence->base, false);
324 dma_fence_put(&fence->base);
325
326 ttm_bo_unreserve(&vps->cursor.bo->tbo);
327 return 0;
328
329 teardown:
330 vmw_du_destroy_cursor_mob(&vps->cursor.bo);
331 return ret;
332 }
333
334
vmw_cursor_update_position(struct vmw_private * dev_priv,bool show,int x,int y)335 static void vmw_cursor_update_position(struct vmw_private *dev_priv,
336 bool show, int x, int y)
337 {
338 const uint32_t svga_cursor_on = show ? SVGA_CURSOR_ON_SHOW
339 : SVGA_CURSOR_ON_HIDE;
340 uint32_t count;
341
342 spin_lock(&dev_priv->cursor_lock);
343 if (dev_priv->capabilities2 & SVGA_CAP2_EXTRA_REGS) {
344 vmw_write(dev_priv, SVGA_REG_CURSOR4_X, x);
345 vmw_write(dev_priv, SVGA_REG_CURSOR4_Y, y);
346 vmw_write(dev_priv, SVGA_REG_CURSOR4_SCREEN_ID, SVGA3D_INVALID_ID);
347 vmw_write(dev_priv, SVGA_REG_CURSOR4_ON, svga_cursor_on);
348 vmw_write(dev_priv, SVGA_REG_CURSOR4_SUBMIT, 1);
349 } else if (vmw_is_cursor_bypass3_enabled(dev_priv)) {
350 vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_ON, svga_cursor_on);
351 vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_X, x);
352 vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_Y, y);
353 count = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CURSOR_COUNT);
354 vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_COUNT, ++count);
355 } else {
356 vmw_write(dev_priv, SVGA_REG_CURSOR_X, x);
357 vmw_write(dev_priv, SVGA_REG_CURSOR_Y, y);
358 vmw_write(dev_priv, SVGA_REG_CURSOR_ON, svga_cursor_on);
359 }
360 spin_unlock(&dev_priv->cursor_lock);
361 }
362
vmw_kms_cursor_snoop(struct vmw_surface * srf,struct ttm_object_file * tfile,struct ttm_buffer_object * bo,SVGA3dCmdHeader * header)363 void vmw_kms_cursor_snoop(struct vmw_surface *srf,
364 struct ttm_object_file *tfile,
365 struct ttm_buffer_object *bo,
366 SVGA3dCmdHeader *header)
367 {
368 struct ttm_bo_kmap_obj map;
369 unsigned long kmap_offset;
370 unsigned long kmap_num;
371 SVGA3dCopyBox *box;
372 unsigned box_count;
373 void *virtual;
374 bool is_iomem;
375 struct vmw_dma_cmd {
376 SVGA3dCmdHeader header;
377 SVGA3dCmdSurfaceDMA dma;
378 } *cmd;
379 int i, ret;
380 const struct SVGA3dSurfaceDesc *desc =
381 vmw_surface_get_desc(VMW_CURSOR_SNOOP_FORMAT);
382 const u32 image_pitch = VMW_CURSOR_SNOOP_WIDTH * desc->pitchBytesPerBlock;
383
384 cmd = container_of(header, struct vmw_dma_cmd, header);
385
386 /* No snooper installed, nothing to copy */
387 if (!srf->snooper.image)
388 return;
389
390 if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) {
391 DRM_ERROR("face and mipmap for cursors should never != 0\n");
392 return;
393 }
394
395 if (cmd->header.size < 64) {
396 DRM_ERROR("at least one full copy box must be given\n");
397 return;
398 }
399
400 box = (SVGA3dCopyBox *)&cmd[1];
401 box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) /
402 sizeof(SVGA3dCopyBox);
403
404 if (cmd->dma.guest.ptr.offset % PAGE_SIZE ||
405 box->x != 0 || box->y != 0 || box->z != 0 ||
406 box->srcx != 0 || box->srcy != 0 || box->srcz != 0 ||
407 box->d != 1 || box_count != 1 ||
408 box->w > VMW_CURSOR_SNOOP_WIDTH || box->h > VMW_CURSOR_SNOOP_HEIGHT) {
409 /* TODO handle none page aligned offsets */
410 /* TODO handle more dst & src != 0 */
411 /* TODO handle more then one copy */
412 DRM_ERROR("Can't snoop dma request for cursor!\n");
413 DRM_ERROR("(%u, %u, %u) (%u, %u, %u) (%ux%ux%u) %u %u\n",
414 box->srcx, box->srcy, box->srcz,
415 box->x, box->y, box->z,
416 box->w, box->h, box->d, box_count,
417 cmd->dma.guest.ptr.offset);
418 return;
419 }
420
421 kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT;
422 kmap_num = (VMW_CURSOR_SNOOP_HEIGHT*image_pitch) >> PAGE_SHIFT;
423
424 ret = ttm_bo_reserve(bo, true, false, NULL);
425 if (unlikely(ret != 0)) {
426 DRM_ERROR("reserve failed\n");
427 return;
428 }
429
430 ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
431 if (unlikely(ret != 0))
432 goto err_unreserve;
433
434 virtual = ttm_kmap_obj_virtual(&map, &is_iomem);
435
436 if (box->w == VMW_CURSOR_SNOOP_WIDTH && cmd->dma.guest.pitch == image_pitch) {
437 memcpy(srf->snooper.image, virtual,
438 VMW_CURSOR_SNOOP_HEIGHT*image_pitch);
439 } else {
440 /* Image is unsigned pointer. */
441 for (i = 0; i < box->h; i++)
442 memcpy(srf->snooper.image + i * image_pitch,
443 virtual + i * cmd->dma.guest.pitch,
444 box->w * desc->pitchBytesPerBlock);
445 }
446
447 srf->snooper.age++;
448
449 ttm_bo_kunmap(&map);
450 err_unreserve:
451 ttm_bo_unreserve(bo);
452 }
453
454 /**
455 * vmw_kms_legacy_hotspot_clear - Clear legacy hotspots
456 *
457 * @dev_priv: Pointer to the device private struct.
458 *
459 * Clears all legacy hotspots.
460 */
vmw_kms_legacy_hotspot_clear(struct vmw_private * dev_priv)461 void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv)
462 {
463 struct drm_device *dev = &dev_priv->drm;
464 struct vmw_display_unit *du;
465 struct drm_crtc *crtc;
466
467 drm_modeset_lock_all(dev);
468 drm_for_each_crtc(crtc, dev) {
469 du = vmw_crtc_to_du(crtc);
470
471 du->hotspot_x = 0;
472 du->hotspot_y = 0;
473 }
474 drm_modeset_unlock_all(dev);
475 }
476
vmw_kms_cursor_post_execbuf(struct vmw_private * dev_priv)477 void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
478 {
479 struct drm_device *dev = &dev_priv->drm;
480 struct vmw_display_unit *du;
481 struct drm_crtc *crtc;
482
483 mutex_lock(&dev->mode_config.mutex);
484
485 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
486 du = vmw_crtc_to_du(crtc);
487 if (!du->cursor_surface ||
488 du->cursor_age == du->cursor_surface->snooper.age ||
489 !du->cursor_surface->snooper.image)
490 continue;
491
492 du->cursor_age = du->cursor_surface->snooper.age;
493 vmw_send_define_cursor_cmd(dev_priv,
494 du->cursor_surface->snooper.image,
495 VMW_CURSOR_SNOOP_WIDTH,
496 VMW_CURSOR_SNOOP_HEIGHT,
497 du->hotspot_x + du->core_hotspot_x,
498 du->hotspot_y + du->core_hotspot_y);
499 }
500
501 mutex_unlock(&dev->mode_config.mutex);
502 }
503
504
vmw_du_cursor_plane_destroy(struct drm_plane * plane)505 void vmw_du_cursor_plane_destroy(struct drm_plane *plane)
506 {
507 struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
508 u32 i;
509
510 vmw_cursor_update_position(plane->dev->dev_private, false, 0, 0);
511
512 for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++)
513 vmw_du_destroy_cursor_mob(&vcp->cursor_mobs[i]);
514
515 drm_plane_cleanup(plane);
516 }
517
518
vmw_du_primary_plane_destroy(struct drm_plane * plane)519 void vmw_du_primary_plane_destroy(struct drm_plane *plane)
520 {
521 drm_plane_cleanup(plane);
522
523 /* Planes are static in our case so we don't free it */
524 }
525
526
527 /**
528 * vmw_du_plane_unpin_surf - unpins resource associated with a framebuffer surface
529 *
530 * @vps: plane state associated with the display surface
531 * @unreference: true if we also want to unreference the display.
532 */
vmw_du_plane_unpin_surf(struct vmw_plane_state * vps,bool unreference)533 void vmw_du_plane_unpin_surf(struct vmw_plane_state *vps,
534 bool unreference)
535 {
536 if (vps->surf) {
537 if (vps->pinned) {
538 vmw_resource_unpin(&vps->surf->res);
539 vps->pinned--;
540 }
541
542 if (unreference) {
543 if (vps->pinned)
544 DRM_ERROR("Surface still pinned\n");
545 vmw_surface_unreference(&vps->surf);
546 }
547 }
548 }
549
550
551 /**
552 * vmw_du_plane_cleanup_fb - Unpins the plane surface
553 *
554 * @plane: display plane
555 * @old_state: Contains the FB to clean up
556 *
557 * Unpins the framebuffer surface
558 *
559 * Returns 0 on success
560 */
561 void
vmw_du_plane_cleanup_fb(struct drm_plane * plane,struct drm_plane_state * old_state)562 vmw_du_plane_cleanup_fb(struct drm_plane *plane,
563 struct drm_plane_state *old_state)
564 {
565 struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
566
567 vmw_du_plane_unpin_surf(vps, false);
568 }
569
570
571 /**
572 * vmw_du_cursor_plane_map_cm - Maps the cursor mobs.
573 *
574 * @vps: plane_state
575 *
576 * Returns 0 on success
577 */
578
579 static int
vmw_du_cursor_plane_map_cm(struct vmw_plane_state * vps)580 vmw_du_cursor_plane_map_cm(struct vmw_plane_state *vps)
581 {
582 int ret;
583 u32 size = vmw_du_cursor_mob_size(vps->base.crtc_w, vps->base.crtc_h);
584 struct ttm_buffer_object *bo;
585
586 if (!vps->cursor.bo)
587 return -EINVAL;
588
589 bo = &vps->cursor.bo->tbo;
590
591 if (bo->base.size < size)
592 return -EINVAL;
593
594 if (vps->cursor.bo->map.virtual)
595 return 0;
596
597 ret = ttm_bo_reserve(bo, false, false, NULL);
598 if (unlikely(ret != 0))
599 return -ENOMEM;
600
601 vmw_bo_map_and_cache(vps->cursor.bo);
602
603 ttm_bo_unreserve(bo);
604
605 if (unlikely(ret != 0))
606 return -ENOMEM;
607
608 return 0;
609 }
610
611
612 /**
613 * vmw_du_cursor_plane_unmap_cm - Unmaps the cursor mobs.
614 *
615 * @vps: state of the cursor plane
616 *
617 * Returns 0 on success
618 */
619
620 static int
vmw_du_cursor_plane_unmap_cm(struct vmw_plane_state * vps)621 vmw_du_cursor_plane_unmap_cm(struct vmw_plane_state *vps)
622 {
623 int ret = 0;
624 struct vmw_bo *vbo = vps->cursor.bo;
625
626 if (!vbo || !vbo->map.virtual)
627 return 0;
628
629 ret = ttm_bo_reserve(&vbo->tbo, true, false, NULL);
630 if (likely(ret == 0)) {
631 vmw_bo_unmap(vbo);
632 ttm_bo_unreserve(&vbo->tbo);
633 }
634
635 return ret;
636 }
637
638
639 /**
640 * vmw_du_cursor_plane_cleanup_fb - Unpins the plane surface
641 *
642 * @plane: cursor plane
643 * @old_state: contains the state to clean up
644 *
645 * Unmaps all cursor bo mappings and unpins the cursor surface
646 *
647 * Returns 0 on success
648 */
649 void
vmw_du_cursor_plane_cleanup_fb(struct drm_plane * plane,struct drm_plane_state * old_state)650 vmw_du_cursor_plane_cleanup_fb(struct drm_plane *plane,
651 struct drm_plane_state *old_state)
652 {
653 struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
654 struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
655
656 if (vps->surf_mapped) {
657 vmw_bo_unmap(vps->surf->res.guest_memory_bo);
658 vps->surf_mapped = false;
659 }
660
661 vmw_du_cursor_plane_unmap_cm(vps);
662 vmw_du_put_cursor_mob(vcp, vps);
663
664 vmw_du_plane_unpin_surf(vps, false);
665
666 if (vps->surf) {
667 vmw_surface_unreference(&vps->surf);
668 vps->surf = NULL;
669 }
670
671 if (vps->bo) {
672 vmw_bo_unreference(&vps->bo);
673 vps->bo = NULL;
674 }
675 }
676
677
678 /**
679 * vmw_du_cursor_plane_prepare_fb - Readies the cursor by referencing it
680 *
681 * @plane: display plane
682 * @new_state: info on the new plane state, including the FB
683 *
684 * Returns 0 on success
685 */
686 int
vmw_du_cursor_plane_prepare_fb(struct drm_plane * plane,struct drm_plane_state * new_state)687 vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
688 struct drm_plane_state *new_state)
689 {
690 struct drm_framebuffer *fb = new_state->fb;
691 struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
692 struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
693 int ret = 0;
694
695 if (vps->surf) {
696 if (vps->surf_mapped) {
697 vmw_bo_unmap(vps->surf->res.guest_memory_bo);
698 vps->surf_mapped = false;
699 }
700 vmw_surface_unreference(&vps->surf);
701 vps->surf = NULL;
702 }
703
704 if (vps->bo) {
705 vmw_bo_unreference(&vps->bo);
706 vps->bo = NULL;
707 }
708
709 if (fb) {
710 if (vmw_framebuffer_to_vfb(fb)->bo) {
711 vps->bo = vmw_framebuffer_to_vfbd(fb)->buffer;
712 vmw_bo_reference(vps->bo);
713 } else {
714 vps->surf = vmw_framebuffer_to_vfbs(fb)->surface;
715 vmw_surface_reference(vps->surf);
716 }
717 }
718
719 if (!vps->surf && vps->bo) {
720 const u32 size = new_state->crtc_w * new_state->crtc_h * sizeof(u32);
721
722 /*
723 * Not using vmw_bo_map_and_cache() helper here as we need to
724 * reserve the ttm_buffer_object first which
725 * vmw_bo_map_and_cache() omits.
726 */
727 ret = ttm_bo_reserve(&vps->bo->tbo, true, false, NULL);
728
729 if (unlikely(ret != 0))
730 return -ENOMEM;
731
732 ret = ttm_bo_kmap(&vps->bo->tbo, 0, PFN_UP(size), &vps->bo->map);
733
734 ttm_bo_unreserve(&vps->bo->tbo);
735
736 if (unlikely(ret != 0))
737 return -ENOMEM;
738 } else if (vps->surf && !vps->bo && vps->surf->res.guest_memory_bo) {
739
740 WARN_ON(vps->surf->snooper.image);
741 ret = ttm_bo_reserve(&vps->surf->res.guest_memory_bo->tbo, true, false,
742 NULL);
743 if (unlikely(ret != 0))
744 return -ENOMEM;
745 vmw_bo_map_and_cache(vps->surf->res.guest_memory_bo);
746 ttm_bo_unreserve(&vps->surf->res.guest_memory_bo->tbo);
747 vps->surf_mapped = true;
748 }
749
750 if (vps->surf || vps->bo) {
751 vmw_du_get_cursor_mob(vcp, vps);
752 vmw_du_cursor_plane_map_cm(vps);
753 }
754
755 return 0;
756 }
757
758
759 void
vmw_du_cursor_plane_atomic_update(struct drm_plane * plane,struct drm_atomic_state * state)760 vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
761 struct drm_atomic_state *state)
762 {
763 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
764 plane);
765 struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
766 plane);
767 struct drm_crtc *crtc = new_state->crtc ?: old_state->crtc;
768 struct vmw_private *dev_priv = vmw_priv(crtc->dev);
769 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
770 struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
771 struct vmw_plane_state *old_vps = vmw_plane_state_to_vps(old_state);
772 s32 hotspot_x, hotspot_y;
773
774 hotspot_x = du->hotspot_x;
775 hotspot_y = du->hotspot_y;
776
777 if (new_state->fb) {
778 hotspot_x += new_state->fb->hot_x;
779 hotspot_y += new_state->fb->hot_y;
780 }
781
782 du->cursor_surface = vps->surf;
783 du->cursor_bo = vps->bo;
784
785 if (!vps->surf && !vps->bo) {
786 vmw_cursor_update_position(dev_priv, false, 0, 0);
787 return;
788 }
789
790 vps->cursor.hotspot_x = hotspot_x;
791 vps->cursor.hotspot_y = hotspot_y;
792
793 if (vps->surf) {
794 du->cursor_age = du->cursor_surface->snooper.age;
795 }
796
797 if (!vmw_du_cursor_plane_has_changed(old_vps, vps)) {
798 /*
799 * If it hasn't changed, avoid making the device do extra
800 * work by keeping the old cursor active.
801 */
802 struct vmw_cursor_plane_state tmp = old_vps->cursor;
803 old_vps->cursor = vps->cursor;
804 vps->cursor = tmp;
805 } else {
806 void *image = vmw_du_cursor_plane_acquire_image(vps);
807 if (image)
808 vmw_cursor_update_image(dev_priv, vps, image,
809 new_state->crtc_w,
810 new_state->crtc_h,
811 hotspot_x, hotspot_y);
812 }
813
814 du->cursor_x = new_state->crtc_x + du->set_gui_x;
815 du->cursor_y = new_state->crtc_y + du->set_gui_y;
816
817 vmw_cursor_update_position(dev_priv, true,
818 du->cursor_x + hotspot_x,
819 du->cursor_y + hotspot_y);
820
821 du->core_hotspot_x = hotspot_x - du->hotspot_x;
822 du->core_hotspot_y = hotspot_y - du->hotspot_y;
823 }
824
825
826 /**
827 * vmw_du_primary_plane_atomic_check - check if the new state is okay
828 *
829 * @plane: display plane
830 * @state: info on the new plane state, including the FB
831 *
832 * Check if the new state is settable given the current state. Other
833 * than what the atomic helper checks, we care about crtc fitting
834 * the FB and maintaining one active framebuffer.
835 *
836 * Returns 0 on success
837 */
vmw_du_primary_plane_atomic_check(struct drm_plane * plane,struct drm_atomic_state * state)838 int vmw_du_primary_plane_atomic_check(struct drm_plane *plane,
839 struct drm_atomic_state *state)
840 {
841 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
842 plane);
843 struct drm_crtc_state *crtc_state = NULL;
844 struct drm_framebuffer *new_fb = new_state->fb;
845 int ret;
846
847 if (new_state->crtc)
848 crtc_state = drm_atomic_get_new_crtc_state(state,
849 new_state->crtc);
850
851 ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
852 DRM_PLANE_NO_SCALING,
853 DRM_PLANE_NO_SCALING,
854 false, true);
855
856 if (!ret && new_fb) {
857 struct drm_crtc *crtc = new_state->crtc;
858 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
859
860 vmw_connector_state_to_vcs(du->connector.state);
861 }
862
863
864 return ret;
865 }
866
867
868 /**
869 * vmw_du_cursor_plane_atomic_check - check if the new state is okay
870 *
871 * @plane: cursor plane
872 * @state: info on the new plane state
873 *
874 * This is a chance to fail if the new cursor state does not fit
875 * our requirements.
876 *
877 * Returns 0 on success
878 */
vmw_du_cursor_plane_atomic_check(struct drm_plane * plane,struct drm_atomic_state * state)879 int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
880 struct drm_atomic_state *state)
881 {
882 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
883 plane);
884 int ret = 0;
885 struct drm_crtc_state *crtc_state = NULL;
886 struct vmw_surface *surface = NULL;
887 struct drm_framebuffer *fb = new_state->fb;
888
889 if (new_state->crtc)
890 crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
891 new_state->crtc);
892
893 ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
894 DRM_PLANE_NO_SCALING,
895 DRM_PLANE_NO_SCALING,
896 true, true);
897 if (ret)
898 return ret;
899
900 /* Turning off */
901 if (!fb)
902 return 0;
903
904 /* A lot of the code assumes this */
905 if (new_state->crtc_w != 64 || new_state->crtc_h != 64) {
906 DRM_ERROR("Invalid cursor dimensions (%d, %d)\n",
907 new_state->crtc_w, new_state->crtc_h);
908 return -EINVAL;
909 }
910
911 if (!vmw_framebuffer_to_vfb(fb)->bo) {
912 surface = vmw_framebuffer_to_vfbs(fb)->surface;
913
914 WARN_ON(!surface);
915
916 if (!surface ||
917 (!surface->snooper.image && !surface->res.guest_memory_bo)) {
918 DRM_ERROR("surface not suitable for cursor\n");
919 return -EINVAL;
920 }
921 }
922
923 return 0;
924 }
925
926
vmw_du_crtc_atomic_check(struct drm_crtc * crtc,struct drm_atomic_state * state)927 int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
928 struct drm_atomic_state *state)
929 {
930 struct vmw_private *vmw = vmw_priv(crtc->dev);
931 struct drm_crtc_state *new_state = drm_atomic_get_new_crtc_state(state,
932 crtc);
933 struct vmw_display_unit *du = vmw_crtc_to_du(new_state->crtc);
934 int connector_mask = drm_connector_mask(&du->connector);
935 bool has_primary = new_state->plane_mask &
936 drm_plane_mask(crtc->primary);
937
938 /*
939 * This is fine in general, but broken userspace might expect
940 * some actual rendering so give a clue as why it's blank.
941 */
942 if (new_state->enable && !has_primary)
943 drm_dbg_driver(&vmw->drm,
944 "CRTC without a primary plane will be blank.\n");
945
946
947 if (new_state->connector_mask != connector_mask &&
948 new_state->connector_mask != 0) {
949 DRM_ERROR("Invalid connectors configuration\n");
950 return -EINVAL;
951 }
952
953 /*
954 * Our virtual device does not have a dot clock, so use the logical
955 * clock value as the dot clock.
956 */
957 if (new_state->mode.crtc_clock == 0)
958 new_state->adjusted_mode.crtc_clock = new_state->mode.clock;
959
960 return 0;
961 }
962
963
vmw_du_crtc_atomic_begin(struct drm_crtc * crtc,struct drm_atomic_state * state)964 void vmw_du_crtc_atomic_begin(struct drm_crtc *crtc,
965 struct drm_atomic_state *state)
966 {
967 }
968
969
vmw_du_crtc_atomic_flush(struct drm_crtc * crtc,struct drm_atomic_state * state)970 void vmw_du_crtc_atomic_flush(struct drm_crtc *crtc,
971 struct drm_atomic_state *state)
972 {
973 }
974
975
976 /**
977 * vmw_du_crtc_duplicate_state - duplicate crtc state
978 * @crtc: DRM crtc
979 *
980 * Allocates and returns a copy of the crtc state (both common and
981 * vmw-specific) for the specified crtc.
982 *
983 * Returns: The newly allocated crtc state, or NULL on failure.
984 */
985 struct drm_crtc_state *
vmw_du_crtc_duplicate_state(struct drm_crtc * crtc)986 vmw_du_crtc_duplicate_state(struct drm_crtc *crtc)
987 {
988 struct drm_crtc_state *state;
989 struct vmw_crtc_state *vcs;
990
991 if (WARN_ON(!crtc->state))
992 return NULL;
993
994 vcs = kmemdup(crtc->state, sizeof(*vcs), GFP_KERNEL);
995
996 if (!vcs)
997 return NULL;
998
999 state = &vcs->base;
1000
1001 __drm_atomic_helper_crtc_duplicate_state(crtc, state);
1002
1003 return state;
1004 }
1005
1006
1007 /**
1008 * vmw_du_crtc_reset - creates a blank vmw crtc state
1009 * @crtc: DRM crtc
1010 *
1011 * Resets the atomic state for @crtc by freeing the state pointer (which
1012 * might be NULL, e.g. at driver load time) and allocating a new empty state
1013 * object.
1014 */
vmw_du_crtc_reset(struct drm_crtc * crtc)1015 void vmw_du_crtc_reset(struct drm_crtc *crtc)
1016 {
1017 struct vmw_crtc_state *vcs;
1018
1019
1020 if (crtc->state) {
1021 __drm_atomic_helper_crtc_destroy_state(crtc->state);
1022
1023 kfree(vmw_crtc_state_to_vcs(crtc->state));
1024 }
1025
1026 vcs = kzalloc(sizeof(*vcs), GFP_KERNEL);
1027
1028 if (!vcs) {
1029 DRM_ERROR("Cannot allocate vmw_crtc_state\n");
1030 return;
1031 }
1032
1033 __drm_atomic_helper_crtc_reset(crtc, &vcs->base);
1034 }
1035
1036
1037 /**
1038 * vmw_du_crtc_destroy_state - destroy crtc state
1039 * @crtc: DRM crtc
1040 * @state: state object to destroy
1041 *
1042 * Destroys the crtc state (both common and vmw-specific) for the
1043 * specified plane.
1044 */
1045 void
vmw_du_crtc_destroy_state(struct drm_crtc * crtc,struct drm_crtc_state * state)1046 vmw_du_crtc_destroy_state(struct drm_crtc *crtc,
1047 struct drm_crtc_state *state)
1048 {
1049 drm_atomic_helper_crtc_destroy_state(crtc, state);
1050 }
1051
1052
1053 /**
1054 * vmw_du_plane_duplicate_state - duplicate plane state
1055 * @plane: drm plane
1056 *
1057 * Allocates and returns a copy of the plane state (both common and
1058 * vmw-specific) for the specified plane.
1059 *
1060 * Returns: The newly allocated plane state, or NULL on failure.
1061 */
1062 struct drm_plane_state *
vmw_du_plane_duplicate_state(struct drm_plane * plane)1063 vmw_du_plane_duplicate_state(struct drm_plane *plane)
1064 {
1065 struct drm_plane_state *state;
1066 struct vmw_plane_state *vps;
1067
1068 vps = kmemdup(plane->state, sizeof(*vps), GFP_KERNEL);
1069
1070 if (!vps)
1071 return NULL;
1072
1073 vps->pinned = 0;
1074 vps->cpp = 0;
1075
1076 memset(&vps->cursor, 0, sizeof(vps->cursor));
1077
1078 /* Each ref counted resource needs to be acquired again */
1079 if (vps->surf)
1080 (void) vmw_surface_reference(vps->surf);
1081
1082 if (vps->bo)
1083 (void) vmw_bo_reference(vps->bo);
1084
1085 state = &vps->base;
1086
1087 __drm_atomic_helper_plane_duplicate_state(plane, state);
1088
1089 return state;
1090 }
1091
1092
1093 /**
1094 * vmw_du_plane_reset - creates a blank vmw plane state
1095 * @plane: drm plane
1096 *
1097 * Resets the atomic state for @plane by freeing the state pointer (which might
1098 * be NULL, e.g. at driver load time) and allocating a new empty state object.
1099 */
vmw_du_plane_reset(struct drm_plane * plane)1100 void vmw_du_plane_reset(struct drm_plane *plane)
1101 {
1102 struct vmw_plane_state *vps;
1103
1104 if (plane->state)
1105 vmw_du_plane_destroy_state(plane, plane->state);
1106
1107 vps = kzalloc(sizeof(*vps), GFP_KERNEL);
1108
1109 if (!vps) {
1110 DRM_ERROR("Cannot allocate vmw_plane_state\n");
1111 return;
1112 }
1113
1114 __drm_atomic_helper_plane_reset(plane, &vps->base);
1115 }
1116
1117
1118 /**
1119 * vmw_du_plane_destroy_state - destroy plane state
1120 * @plane: DRM plane
1121 * @state: state object to destroy
1122 *
1123 * Destroys the plane state (both common and vmw-specific) for the
1124 * specified plane.
1125 */
1126 void
vmw_du_plane_destroy_state(struct drm_plane * plane,struct drm_plane_state * state)1127 vmw_du_plane_destroy_state(struct drm_plane *plane,
1128 struct drm_plane_state *state)
1129 {
1130 struct vmw_plane_state *vps = vmw_plane_state_to_vps(state);
1131
1132 /* Should have been freed by cleanup_fb */
1133 if (vps->surf)
1134 vmw_surface_unreference(&vps->surf);
1135
1136 if (vps->bo)
1137 vmw_bo_unreference(&vps->bo);
1138
1139 drm_atomic_helper_plane_destroy_state(plane, state);
1140 }
1141
1142
1143 /**
1144 * vmw_du_connector_duplicate_state - duplicate connector state
1145 * @connector: DRM connector
1146 *
1147 * Allocates and returns a copy of the connector state (both common and
1148 * vmw-specific) for the specified connector.
1149 *
1150 * Returns: The newly allocated connector state, or NULL on failure.
1151 */
1152 struct drm_connector_state *
vmw_du_connector_duplicate_state(struct drm_connector * connector)1153 vmw_du_connector_duplicate_state(struct drm_connector *connector)
1154 {
1155 struct drm_connector_state *state;
1156 struct vmw_connector_state *vcs;
1157
1158 if (WARN_ON(!connector->state))
1159 return NULL;
1160
1161 vcs = kmemdup(connector->state, sizeof(*vcs), GFP_KERNEL);
1162
1163 if (!vcs)
1164 return NULL;
1165
1166 state = &vcs->base;
1167
1168 __drm_atomic_helper_connector_duplicate_state(connector, state);
1169
1170 return state;
1171 }
1172
1173
1174 /**
1175 * vmw_du_connector_reset - creates a blank vmw connector state
1176 * @connector: DRM connector
1177 *
1178 * Resets the atomic state for @connector by freeing the state pointer (which
1179 * might be NULL, e.g. at driver load time) and allocating a new empty state
1180 * object.
1181 */
vmw_du_connector_reset(struct drm_connector * connector)1182 void vmw_du_connector_reset(struct drm_connector *connector)
1183 {
1184 struct vmw_connector_state *vcs;
1185
1186
1187 if (connector->state) {
1188 __drm_atomic_helper_connector_destroy_state(connector->state);
1189
1190 kfree(vmw_connector_state_to_vcs(connector->state));
1191 }
1192
1193 vcs = kzalloc(sizeof(*vcs), GFP_KERNEL);
1194
1195 if (!vcs) {
1196 DRM_ERROR("Cannot allocate vmw_connector_state\n");
1197 return;
1198 }
1199
1200 __drm_atomic_helper_connector_reset(connector, &vcs->base);
1201 }
1202
1203
1204 /**
1205 * vmw_du_connector_destroy_state - destroy connector state
1206 * @connector: DRM connector
1207 * @state: state object to destroy
1208 *
1209 * Destroys the connector state (both common and vmw-specific) for the
1210 * specified plane.
1211 */
1212 void
vmw_du_connector_destroy_state(struct drm_connector * connector,struct drm_connector_state * state)1213 vmw_du_connector_destroy_state(struct drm_connector *connector,
1214 struct drm_connector_state *state)
1215 {
1216 drm_atomic_helper_connector_destroy_state(connector, state);
1217 }
1218 /*
1219 * Generic framebuffer code
1220 */
1221
1222 /*
1223 * Surface framebuffer code
1224 */
1225
vmw_framebuffer_surface_destroy(struct drm_framebuffer * framebuffer)1226 static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
1227 {
1228 struct vmw_framebuffer_surface *vfbs =
1229 vmw_framebuffer_to_vfbs(framebuffer);
1230
1231 drm_framebuffer_cleanup(framebuffer);
1232 vmw_surface_unreference(&vfbs->surface);
1233
1234 kfree(vfbs);
1235 }
1236
1237 /**
1238 * vmw_kms_readback - Perform a readback from the screen system to
1239 * a buffer-object backed framebuffer.
1240 *
1241 * @dev_priv: Pointer to the device private structure.
1242 * @file_priv: Pointer to a struct drm_file identifying the caller.
1243 * Must be set to NULL if @user_fence_rep is NULL.
1244 * @vfb: Pointer to the buffer-object backed framebuffer.
1245 * @user_fence_rep: User-space provided structure for fence information.
1246 * Must be set to non-NULL if @file_priv is non-NULL.
1247 * @vclips: Array of clip rects.
1248 * @num_clips: Number of clip rects in @vclips.
1249 *
1250 * Returns 0 on success, negative error code on failure. -ERESTARTSYS if
1251 * interrupted.
1252 */
vmw_kms_readback(struct vmw_private * dev_priv,struct drm_file * file_priv,struct vmw_framebuffer * vfb,struct drm_vmw_fence_rep __user * user_fence_rep,struct drm_vmw_rect * vclips,uint32_t num_clips)1253 int vmw_kms_readback(struct vmw_private *dev_priv,
1254 struct drm_file *file_priv,
1255 struct vmw_framebuffer *vfb,
1256 struct drm_vmw_fence_rep __user *user_fence_rep,
1257 struct drm_vmw_rect *vclips,
1258 uint32_t num_clips)
1259 {
1260 switch (dev_priv->active_display_unit) {
1261 case vmw_du_screen_object:
1262 return vmw_kms_sou_readback(dev_priv, file_priv, vfb,
1263 user_fence_rep, vclips, num_clips,
1264 NULL);
1265 case vmw_du_screen_target:
1266 return vmw_kms_stdu_readback(dev_priv, file_priv, vfb,
1267 user_fence_rep, NULL, vclips, num_clips,
1268 1, NULL);
1269 default:
1270 WARN_ONCE(true,
1271 "Readback called with invalid display system.\n");
1272 }
1273
1274 return -ENOSYS;
1275 }
1276
1277
1278 static const struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
1279 .destroy = vmw_framebuffer_surface_destroy,
1280 .dirty = drm_atomic_helper_dirtyfb,
1281 };
1282
vmw_kms_new_framebuffer_surface(struct vmw_private * dev_priv,struct vmw_surface * surface,struct vmw_framebuffer ** out,const struct drm_mode_fb_cmd2 * mode_cmd,bool is_bo_proxy)1283 static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
1284 struct vmw_surface *surface,
1285 struct vmw_framebuffer **out,
1286 const struct drm_mode_fb_cmd2
1287 *mode_cmd,
1288 bool is_bo_proxy)
1289
1290 {
1291 struct drm_device *dev = &dev_priv->drm;
1292 struct vmw_framebuffer_surface *vfbs;
1293 enum SVGA3dSurfaceFormat format;
1294 int ret;
1295
1296 /* 3D is only supported on HWv8 and newer hosts */
1297 if (dev_priv->active_display_unit == vmw_du_legacy)
1298 return -ENOSYS;
1299
1300 /*
1301 * Sanity checks.
1302 */
1303
1304 if (!drm_any_plane_has_format(&dev_priv->drm,
1305 mode_cmd->pixel_format,
1306 mode_cmd->modifier[0])) {
1307 drm_dbg(&dev_priv->drm,
1308 "unsupported pixel format %p4cc / modifier 0x%llx\n",
1309 &mode_cmd->pixel_format, mode_cmd->modifier[0]);
1310 return -EINVAL;
1311 }
1312
1313 /* Surface must be marked as a scanout. */
1314 if (unlikely(!surface->metadata.scanout))
1315 return -EINVAL;
1316
1317 if (unlikely(surface->metadata.mip_levels[0] != 1 ||
1318 surface->metadata.num_sizes != 1 ||
1319 surface->metadata.base_size.width < mode_cmd->width ||
1320 surface->metadata.base_size.height < mode_cmd->height ||
1321 surface->metadata.base_size.depth != 1)) {
1322 DRM_ERROR("Incompatible surface dimensions "
1323 "for requested mode.\n");
1324 return -EINVAL;
1325 }
1326
1327 switch (mode_cmd->pixel_format) {
1328 case DRM_FORMAT_ARGB8888:
1329 format = SVGA3D_A8R8G8B8;
1330 break;
1331 case DRM_FORMAT_XRGB8888:
1332 format = SVGA3D_X8R8G8B8;
1333 break;
1334 case DRM_FORMAT_RGB565:
1335 format = SVGA3D_R5G6B5;
1336 break;
1337 case DRM_FORMAT_XRGB1555:
1338 format = SVGA3D_A1R5G5B5;
1339 break;
1340 default:
1341 DRM_ERROR("Invalid pixel format: %p4cc\n",
1342 &mode_cmd->pixel_format);
1343 return -EINVAL;
1344 }
1345
1346 /*
1347 * For DX, surface format validation is done when surface->scanout
1348 * is set.
1349 */
1350 if (!has_sm4_context(dev_priv) && format != surface->metadata.format) {
1351 DRM_ERROR("Invalid surface format for requested mode.\n");
1352 return -EINVAL;
1353 }
1354
1355 vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL);
1356 if (!vfbs) {
1357 ret = -ENOMEM;
1358 goto out_err1;
1359 }
1360
1361 drm_helper_mode_fill_fb_struct(dev, &vfbs->base.base, mode_cmd);
1362 vfbs->surface = vmw_surface_reference(surface);
1363 vfbs->base.user_handle = mode_cmd->handles[0];
1364 vfbs->is_bo_proxy = is_bo_proxy;
1365
1366 *out = &vfbs->base;
1367
1368 ret = drm_framebuffer_init(dev, &vfbs->base.base,
1369 &vmw_framebuffer_surface_funcs);
1370 if (ret)
1371 goto out_err2;
1372
1373 return 0;
1374
1375 out_err2:
1376 vmw_surface_unreference(&surface);
1377 kfree(vfbs);
1378 out_err1:
1379 return ret;
1380 }
1381
1382 /*
1383 * Buffer-object framebuffer code
1384 */
1385
vmw_framebuffer_bo_create_handle(struct drm_framebuffer * fb,struct drm_file * file_priv,unsigned int * handle)1386 static int vmw_framebuffer_bo_create_handle(struct drm_framebuffer *fb,
1387 struct drm_file *file_priv,
1388 unsigned int *handle)
1389 {
1390 struct vmw_framebuffer_bo *vfbd =
1391 vmw_framebuffer_to_vfbd(fb);
1392
1393 return drm_gem_handle_create(file_priv, &vfbd->buffer->tbo.base, handle);
1394 }
1395
vmw_framebuffer_bo_destroy(struct drm_framebuffer * framebuffer)1396 static void vmw_framebuffer_bo_destroy(struct drm_framebuffer *framebuffer)
1397 {
1398 struct vmw_framebuffer_bo *vfbd =
1399 vmw_framebuffer_to_vfbd(framebuffer);
1400
1401 drm_framebuffer_cleanup(framebuffer);
1402 vmw_bo_unreference(&vfbd->buffer);
1403
1404 kfree(vfbd);
1405 }
1406
1407 static const struct drm_framebuffer_funcs vmw_framebuffer_bo_funcs = {
1408 .create_handle = vmw_framebuffer_bo_create_handle,
1409 .destroy = vmw_framebuffer_bo_destroy,
1410 .dirty = drm_atomic_helper_dirtyfb,
1411 };
1412
1413 /**
1414 * vmw_create_bo_proxy - create a proxy surface for the buffer object
1415 *
1416 * @dev: DRM device
1417 * @mode_cmd: parameters for the new surface
1418 * @bo_mob: MOB backing the buffer object
1419 * @srf_out: newly created surface
1420 *
1421 * When the content FB is a buffer object, we create a surface as a proxy to the
1422 * same buffer. This way we can do a surface copy rather than a surface DMA.
1423 * This is a more efficient approach
1424 *
1425 * RETURNS:
1426 * 0 on success, error code otherwise
1427 */
vmw_create_bo_proxy(struct drm_device * dev,const struct drm_mode_fb_cmd2 * mode_cmd,struct vmw_bo * bo_mob,struct vmw_surface ** srf_out)1428 static int vmw_create_bo_proxy(struct drm_device *dev,
1429 const struct drm_mode_fb_cmd2 *mode_cmd,
1430 struct vmw_bo *bo_mob,
1431 struct vmw_surface **srf_out)
1432 {
1433 struct vmw_surface_metadata metadata = {0};
1434 uint32_t format;
1435 struct vmw_resource *res;
1436 unsigned int bytes_pp;
1437 int ret;
1438
1439 switch (mode_cmd->pixel_format) {
1440 case DRM_FORMAT_ARGB8888:
1441 case DRM_FORMAT_XRGB8888:
1442 format = SVGA3D_X8R8G8B8;
1443 bytes_pp = 4;
1444 break;
1445
1446 case DRM_FORMAT_RGB565:
1447 case DRM_FORMAT_XRGB1555:
1448 format = SVGA3D_R5G6B5;
1449 bytes_pp = 2;
1450 break;
1451
1452 case 8:
1453 format = SVGA3D_P8;
1454 bytes_pp = 1;
1455 break;
1456
1457 default:
1458 DRM_ERROR("Invalid framebuffer format %p4cc\n",
1459 &mode_cmd->pixel_format);
1460 return -EINVAL;
1461 }
1462
1463 metadata.format = format;
1464 metadata.mip_levels[0] = 1;
1465 metadata.num_sizes = 1;
1466 metadata.base_size.width = mode_cmd->pitches[0] / bytes_pp;
1467 metadata.base_size.height = mode_cmd->height;
1468 metadata.base_size.depth = 1;
1469 metadata.scanout = true;
1470
1471 ret = vmw_gb_surface_define(vmw_priv(dev), &metadata, srf_out);
1472 if (ret) {
1473 DRM_ERROR("Failed to allocate proxy content buffer\n");
1474 return ret;
1475 }
1476
1477 res = &(*srf_out)->res;
1478
1479 /* Reserve and switch the backing mob. */
1480 mutex_lock(&res->dev_priv->cmdbuf_mutex);
1481 (void) vmw_resource_reserve(res, false, true);
1482 vmw_user_bo_unref(&res->guest_memory_bo);
1483 res->guest_memory_bo = vmw_user_bo_ref(bo_mob);
1484 res->guest_memory_offset = 0;
1485 vmw_resource_unreserve(res, false, false, false, NULL, 0);
1486 mutex_unlock(&res->dev_priv->cmdbuf_mutex);
1487
1488 return 0;
1489 }
1490
1491
1492
vmw_kms_new_framebuffer_bo(struct vmw_private * dev_priv,struct vmw_bo * bo,struct vmw_framebuffer ** out,const struct drm_mode_fb_cmd2 * mode_cmd)1493 static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv,
1494 struct vmw_bo *bo,
1495 struct vmw_framebuffer **out,
1496 const struct drm_mode_fb_cmd2
1497 *mode_cmd)
1498
1499 {
1500 struct drm_device *dev = &dev_priv->drm;
1501 struct vmw_framebuffer_bo *vfbd;
1502 unsigned int requested_size;
1503 int ret;
1504
1505 requested_size = mode_cmd->height * mode_cmd->pitches[0];
1506 if (unlikely(requested_size > bo->tbo.base.size)) {
1507 DRM_ERROR("Screen buffer object size is too small "
1508 "for requested mode.\n");
1509 return -EINVAL;
1510 }
1511
1512 if (!drm_any_plane_has_format(&dev_priv->drm,
1513 mode_cmd->pixel_format,
1514 mode_cmd->modifier[0])) {
1515 drm_dbg(&dev_priv->drm,
1516 "unsupported pixel format %p4cc / modifier 0x%llx\n",
1517 &mode_cmd->pixel_format, mode_cmd->modifier[0]);
1518 return -EINVAL;
1519 }
1520
1521 vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL);
1522 if (!vfbd) {
1523 ret = -ENOMEM;
1524 goto out_err1;
1525 }
1526
1527 vfbd->base.base.obj[0] = &bo->tbo.base;
1528 drm_helper_mode_fill_fb_struct(dev, &vfbd->base.base, mode_cmd);
1529 vfbd->base.bo = true;
1530 vfbd->buffer = vmw_bo_reference(bo);
1531 vfbd->base.user_handle = mode_cmd->handles[0];
1532 *out = &vfbd->base;
1533
1534 ret = drm_framebuffer_init(dev, &vfbd->base.base,
1535 &vmw_framebuffer_bo_funcs);
1536 if (ret)
1537 goto out_err2;
1538
1539 return 0;
1540
1541 out_err2:
1542 vmw_bo_unreference(&bo);
1543 kfree(vfbd);
1544 out_err1:
1545 return ret;
1546 }
1547
1548
1549 /**
1550 * vmw_kms_srf_ok - check if a surface can be created
1551 *
1552 * @dev_priv: Pointer to device private struct.
1553 * @width: requested width
1554 * @height: requested height
1555 *
1556 * Surfaces need to be less than texture size
1557 */
1558 static bool
vmw_kms_srf_ok(struct vmw_private * dev_priv,uint32_t width,uint32_t height)1559 vmw_kms_srf_ok(struct vmw_private *dev_priv, uint32_t width, uint32_t height)
1560 {
1561 if (width > dev_priv->texture_max_width ||
1562 height > dev_priv->texture_max_height)
1563 return false;
1564
1565 return true;
1566 }
1567
1568 /**
1569 * vmw_kms_new_framebuffer - Create a new framebuffer.
1570 *
1571 * @dev_priv: Pointer to device private struct.
1572 * @bo: Pointer to buffer object to wrap the kms framebuffer around.
1573 * Either @bo or @surface must be NULL.
1574 * @surface: Pointer to a surface to wrap the kms framebuffer around.
1575 * Either @bo or @surface must be NULL.
1576 * @only_2d: No presents will occur to this buffer object based framebuffer.
1577 * This helps the code to do some important optimizations.
1578 * @mode_cmd: Frame-buffer metadata.
1579 */
1580 struct vmw_framebuffer *
vmw_kms_new_framebuffer(struct vmw_private * dev_priv,struct vmw_bo * bo,struct vmw_surface * surface,bool only_2d,const struct drm_mode_fb_cmd2 * mode_cmd)1581 vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
1582 struct vmw_bo *bo,
1583 struct vmw_surface *surface,
1584 bool only_2d,
1585 const struct drm_mode_fb_cmd2 *mode_cmd)
1586 {
1587 struct vmw_framebuffer *vfb = NULL;
1588 bool is_bo_proxy = false;
1589 int ret;
1590
1591 /*
1592 * We cannot use the SurfaceDMA command in an non-accelerated VM,
1593 * therefore, wrap the buffer object in a surface so we can use the
1594 * SurfaceCopy command.
1595 */
1596 if (vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height) &&
1597 bo && only_2d &&
1598 mode_cmd->width > 64 && /* Don't create a proxy for cursor */
1599 dev_priv->active_display_unit == vmw_du_screen_target) {
1600 ret = vmw_create_bo_proxy(&dev_priv->drm, mode_cmd,
1601 bo, &surface);
1602 if (ret)
1603 return ERR_PTR(ret);
1604
1605 is_bo_proxy = true;
1606 }
1607
1608 /* Create the new framebuffer depending one what we have */
1609 if (surface) {
1610 ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb,
1611 mode_cmd,
1612 is_bo_proxy);
1613 /*
1614 * vmw_create_bo_proxy() adds a reference that is no longer
1615 * needed
1616 */
1617 if (is_bo_proxy)
1618 vmw_surface_unreference(&surface);
1619 } else if (bo) {
1620 ret = vmw_kms_new_framebuffer_bo(dev_priv, bo, &vfb,
1621 mode_cmd);
1622 } else {
1623 BUG();
1624 }
1625
1626 if (ret)
1627 return ERR_PTR(ret);
1628
1629 return vfb;
1630 }
1631
1632 /*
1633 * Generic Kernel modesetting functions
1634 */
1635
vmw_kms_fb_create(struct drm_device * dev,struct drm_file * file_priv,const struct drm_mode_fb_cmd2 * mode_cmd)1636 static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
1637 struct drm_file *file_priv,
1638 const struct drm_mode_fb_cmd2 *mode_cmd)
1639 {
1640 struct vmw_private *dev_priv = vmw_priv(dev);
1641 struct vmw_framebuffer *vfb = NULL;
1642 struct vmw_surface *surface = NULL;
1643 struct vmw_bo *bo = NULL;
1644 int ret;
1645
1646 /* returns either a bo or surface */
1647 ret = vmw_user_lookup_handle(dev_priv, file_priv,
1648 mode_cmd->handles[0],
1649 &surface, &bo);
1650 if (ret) {
1651 DRM_ERROR("Invalid buffer object handle %u (0x%x).\n",
1652 mode_cmd->handles[0], mode_cmd->handles[0]);
1653 goto err_out;
1654 }
1655
1656
1657 if (!bo &&
1658 !vmw_kms_srf_ok(dev_priv, mode_cmd->width, mode_cmd->height)) {
1659 DRM_ERROR("Surface size cannot exceed %dx%d\n",
1660 dev_priv->texture_max_width,
1661 dev_priv->texture_max_height);
1662 ret = -EINVAL;
1663 goto err_out;
1664 }
1665
1666
1667 vfb = vmw_kms_new_framebuffer(dev_priv, bo, surface,
1668 !(dev_priv->capabilities & SVGA_CAP_3D),
1669 mode_cmd);
1670 if (IS_ERR(vfb)) {
1671 ret = PTR_ERR(vfb);
1672 goto err_out;
1673 }
1674
1675 err_out:
1676 /* vmw_user_lookup_handle takes one ref so does new_fb */
1677 if (bo)
1678 vmw_user_bo_unref(&bo);
1679 if (surface)
1680 vmw_surface_unreference(&surface);
1681
1682 if (ret) {
1683 DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
1684 return ERR_PTR(ret);
1685 }
1686
1687 return &vfb->base;
1688 }
1689
1690 /**
1691 * vmw_kms_check_display_memory - Validates display memory required for a
1692 * topology
1693 * @dev: DRM device
1694 * @num_rects: number of drm_rect in rects
1695 * @rects: array of drm_rect representing the topology to validate indexed by
1696 * crtc index.
1697 *
1698 * Returns:
1699 * 0 on success otherwise negative error code
1700 */
vmw_kms_check_display_memory(struct drm_device * dev,uint32_t num_rects,struct drm_rect * rects)1701 static int vmw_kms_check_display_memory(struct drm_device *dev,
1702 uint32_t num_rects,
1703 struct drm_rect *rects)
1704 {
1705 struct vmw_private *dev_priv = vmw_priv(dev);
1706 struct drm_rect bounding_box = {0};
1707 u64 total_pixels = 0, pixel_mem, bb_mem;
1708 int i;
1709
1710 for (i = 0; i < num_rects; i++) {
1711 /*
1712 * For STDU only individual screen (screen target) is limited by
1713 * SCREENTARGET_MAX_WIDTH/HEIGHT registers.
1714 */
1715 if (dev_priv->active_display_unit == vmw_du_screen_target &&
1716 (drm_rect_width(&rects[i]) > dev_priv->stdu_max_width ||
1717 drm_rect_height(&rects[i]) > dev_priv->stdu_max_height)) {
1718 VMW_DEBUG_KMS("Screen size not supported.\n");
1719 return -EINVAL;
1720 }
1721
1722 /* Bounding box upper left is at (0,0). */
1723 if (rects[i].x2 > bounding_box.x2)
1724 bounding_box.x2 = rects[i].x2;
1725
1726 if (rects[i].y2 > bounding_box.y2)
1727 bounding_box.y2 = rects[i].y2;
1728
1729 total_pixels += (u64) drm_rect_width(&rects[i]) *
1730 (u64) drm_rect_height(&rects[i]);
1731 }
1732
1733 /* Virtual svga device primary limits are always in 32-bpp. */
1734 pixel_mem = total_pixels * 4;
1735
1736 /*
1737 * For HV10 and below prim_bb_mem is vram size. When
1738 * SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM is not present vram size is
1739 * limit on primary bounding box
1740 */
1741 if (pixel_mem > dev_priv->max_primary_mem) {
1742 VMW_DEBUG_KMS("Combined output size too large.\n");
1743 return -EINVAL;
1744 }
1745
1746 /* SVGA_CAP_NO_BB_RESTRICTION is available for STDU only. */
1747 if (dev_priv->active_display_unit != vmw_du_screen_target ||
1748 !(dev_priv->capabilities & SVGA_CAP_NO_BB_RESTRICTION)) {
1749 bb_mem = (u64) bounding_box.x2 * bounding_box.y2 * 4;
1750
1751 if (bb_mem > dev_priv->max_primary_mem) {
1752 VMW_DEBUG_KMS("Topology is beyond supported limits.\n");
1753 return -EINVAL;
1754 }
1755 }
1756
1757 return 0;
1758 }
1759
1760 /**
1761 * vmw_crtc_state_and_lock - Return new or current crtc state with locked
1762 * crtc mutex
1763 * @state: The atomic state pointer containing the new atomic state
1764 * @crtc: The crtc
1765 *
1766 * This function returns the new crtc state if it's part of the state update.
1767 * Otherwise returns the current crtc state. It also makes sure that the
1768 * crtc mutex is locked.
1769 *
1770 * Returns: A valid crtc state pointer or NULL. It may also return a
1771 * pointer error, in particular -EDEADLK if locking needs to be rerun.
1772 */
1773 static struct drm_crtc_state *
vmw_crtc_state_and_lock(struct drm_atomic_state * state,struct drm_crtc * crtc)1774 vmw_crtc_state_and_lock(struct drm_atomic_state *state, struct drm_crtc *crtc)
1775 {
1776 struct drm_crtc_state *crtc_state;
1777
1778 crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
1779 if (crtc_state) {
1780 lockdep_assert_held(&crtc->mutex.mutex.base);
1781 } else {
1782 int ret = drm_modeset_lock(&crtc->mutex, state->acquire_ctx);
1783
1784 if (ret != 0 && ret != -EALREADY)
1785 return ERR_PTR(ret);
1786
1787 crtc_state = crtc->state;
1788 }
1789
1790 return crtc_state;
1791 }
1792
1793 /**
1794 * vmw_kms_check_implicit - Verify that all implicit display units scan out
1795 * from the same fb after the new state is committed.
1796 * @dev: The drm_device.
1797 * @state: The new state to be checked.
1798 *
1799 * Returns:
1800 * Zero on success,
1801 * -EINVAL on invalid state,
1802 * -EDEADLK if modeset locking needs to be rerun.
1803 */
vmw_kms_check_implicit(struct drm_device * dev,struct drm_atomic_state * state)1804 static int vmw_kms_check_implicit(struct drm_device *dev,
1805 struct drm_atomic_state *state)
1806 {
1807 struct drm_framebuffer *implicit_fb = NULL;
1808 struct drm_crtc *crtc;
1809 struct drm_crtc_state *crtc_state;
1810 struct drm_plane_state *plane_state;
1811
1812 drm_for_each_crtc(crtc, dev) {
1813 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
1814
1815 if (!du->is_implicit)
1816 continue;
1817
1818 crtc_state = vmw_crtc_state_and_lock(state, crtc);
1819 if (IS_ERR(crtc_state))
1820 return PTR_ERR(crtc_state);
1821
1822 if (!crtc_state || !crtc_state->enable)
1823 continue;
1824
1825 /*
1826 * Can't move primary planes across crtcs, so this is OK.
1827 * It also means we don't need to take the plane mutex.
1828 */
1829 plane_state = du->primary.state;
1830 if (plane_state->crtc != crtc)
1831 continue;
1832
1833 if (!implicit_fb)
1834 implicit_fb = plane_state->fb;
1835 else if (implicit_fb != plane_state->fb)
1836 return -EINVAL;
1837 }
1838
1839 return 0;
1840 }
1841
1842 /**
1843 * vmw_kms_check_topology - Validates topology in drm_atomic_state
1844 * @dev: DRM device
1845 * @state: the driver state object
1846 *
1847 * Returns:
1848 * 0 on success otherwise negative error code
1849 */
vmw_kms_check_topology(struct drm_device * dev,struct drm_atomic_state * state)1850 static int vmw_kms_check_topology(struct drm_device *dev,
1851 struct drm_atomic_state *state)
1852 {
1853 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
1854 struct drm_rect *rects;
1855 struct drm_crtc *crtc;
1856 uint32_t i;
1857 int ret = 0;
1858
1859 rects = kcalloc(dev->mode_config.num_crtc, sizeof(struct drm_rect),
1860 GFP_KERNEL);
1861 if (!rects)
1862 return -ENOMEM;
1863
1864 drm_for_each_crtc(crtc, dev) {
1865 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
1866 struct drm_crtc_state *crtc_state;
1867
1868 i = drm_crtc_index(crtc);
1869
1870 crtc_state = vmw_crtc_state_and_lock(state, crtc);
1871 if (IS_ERR(crtc_state)) {
1872 ret = PTR_ERR(crtc_state);
1873 goto clean;
1874 }
1875
1876 if (!crtc_state)
1877 continue;
1878
1879 if (crtc_state->enable) {
1880 rects[i].x1 = du->gui_x;
1881 rects[i].y1 = du->gui_y;
1882 rects[i].x2 = du->gui_x + crtc_state->mode.hdisplay;
1883 rects[i].y2 = du->gui_y + crtc_state->mode.vdisplay;
1884 } else {
1885 rects[i].x1 = 0;
1886 rects[i].y1 = 0;
1887 rects[i].x2 = 0;
1888 rects[i].y2 = 0;
1889 }
1890 }
1891
1892 /* Determine change to topology due to new atomic state */
1893 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
1894 new_crtc_state, i) {
1895 struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
1896 struct drm_connector *connector;
1897 struct drm_connector_state *conn_state;
1898 struct vmw_connector_state *vmw_conn_state;
1899
1900 if (!du->pref_active && new_crtc_state->enable) {
1901 VMW_DEBUG_KMS("Enabling a disabled display unit\n");
1902 ret = -EINVAL;
1903 goto clean;
1904 }
1905
1906 /*
1907 * For vmwgfx each crtc has only one connector attached and it
1908 * is not changed so don't really need to check the
1909 * crtc->connector_mask and iterate over it.
1910 */
1911 connector = &du->connector;
1912 conn_state = drm_atomic_get_connector_state(state, connector);
1913 if (IS_ERR(conn_state)) {
1914 ret = PTR_ERR(conn_state);
1915 goto clean;
1916 }
1917
1918 vmw_conn_state = vmw_connector_state_to_vcs(conn_state);
1919 vmw_conn_state->gui_x = du->gui_x;
1920 vmw_conn_state->gui_y = du->gui_y;
1921 }
1922
1923 ret = vmw_kms_check_display_memory(dev, dev->mode_config.num_crtc,
1924 rects);
1925
1926 clean:
1927 kfree(rects);
1928 return ret;
1929 }
1930
1931 /**
1932 * vmw_kms_atomic_check_modeset- validate state object for modeset changes
1933 *
1934 * @dev: DRM device
1935 * @state: the driver state object
1936 *
1937 * This is a simple wrapper around drm_atomic_helper_check_modeset() for
1938 * us to assign a value to mode->crtc_clock so that
1939 * drm_calc_timestamping_constants() won't throw an error message
1940 *
1941 * Returns:
1942 * Zero for success or -errno
1943 */
1944 static int
vmw_kms_atomic_check_modeset(struct drm_device * dev,struct drm_atomic_state * state)1945 vmw_kms_atomic_check_modeset(struct drm_device *dev,
1946 struct drm_atomic_state *state)
1947 {
1948 struct drm_crtc *crtc;
1949 struct drm_crtc_state *crtc_state;
1950 bool need_modeset = false;
1951 int i, ret;
1952
1953 ret = drm_atomic_helper_check(dev, state);
1954 if (ret)
1955 return ret;
1956
1957 ret = vmw_kms_check_implicit(dev, state);
1958 if (ret) {
1959 VMW_DEBUG_KMS("Invalid implicit state\n");
1960 return ret;
1961 }
1962
1963 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
1964 if (drm_atomic_crtc_needs_modeset(crtc_state))
1965 need_modeset = true;
1966 }
1967
1968 if (need_modeset)
1969 return vmw_kms_check_topology(dev, state);
1970
1971 return ret;
1972 }
1973
1974 static const struct drm_mode_config_funcs vmw_kms_funcs = {
1975 .fb_create = vmw_kms_fb_create,
1976 .atomic_check = vmw_kms_atomic_check_modeset,
1977 .atomic_commit = drm_atomic_helper_commit,
1978 };
1979
vmw_kms_generic_present(struct vmw_private * dev_priv,struct drm_file * file_priv,struct vmw_framebuffer * vfb,struct vmw_surface * surface,uint32_t sid,int32_t destX,int32_t destY,struct drm_vmw_rect * clips,uint32_t num_clips)1980 static int vmw_kms_generic_present(struct vmw_private *dev_priv,
1981 struct drm_file *file_priv,
1982 struct vmw_framebuffer *vfb,
1983 struct vmw_surface *surface,
1984 uint32_t sid,
1985 int32_t destX, int32_t destY,
1986 struct drm_vmw_rect *clips,
1987 uint32_t num_clips)
1988 {
1989 return vmw_kms_sou_do_surface_dirty(dev_priv, vfb, NULL, clips,
1990 &surface->res, destX, destY,
1991 num_clips, 1, NULL, NULL);
1992 }
1993
1994
vmw_kms_present(struct vmw_private * dev_priv,struct drm_file * file_priv,struct vmw_framebuffer * vfb,struct vmw_surface * surface,uint32_t sid,int32_t destX,int32_t destY,struct drm_vmw_rect * clips,uint32_t num_clips)1995 int vmw_kms_present(struct vmw_private *dev_priv,
1996 struct drm_file *file_priv,
1997 struct vmw_framebuffer *vfb,
1998 struct vmw_surface *surface,
1999 uint32_t sid,
2000 int32_t destX, int32_t destY,
2001 struct drm_vmw_rect *clips,
2002 uint32_t num_clips)
2003 {
2004 int ret;
2005
2006 switch (dev_priv->active_display_unit) {
2007 case vmw_du_screen_target:
2008 ret = vmw_kms_stdu_surface_dirty(dev_priv, vfb, NULL, clips,
2009 &surface->res, destX, destY,
2010 num_clips, 1, NULL, NULL);
2011 break;
2012 case vmw_du_screen_object:
2013 ret = vmw_kms_generic_present(dev_priv, file_priv, vfb, surface,
2014 sid, destX, destY, clips,
2015 num_clips);
2016 break;
2017 default:
2018 WARN_ONCE(true,
2019 "Present called with invalid display system.\n");
2020 ret = -ENOSYS;
2021 break;
2022 }
2023 if (ret)
2024 return ret;
2025
2026 vmw_cmd_flush(dev_priv, false);
2027
2028 return 0;
2029 }
2030
2031 static void
vmw_kms_create_hotplug_mode_update_property(struct vmw_private * dev_priv)2032 vmw_kms_create_hotplug_mode_update_property(struct vmw_private *dev_priv)
2033 {
2034 if (dev_priv->hotplug_mode_update_property)
2035 return;
2036
2037 dev_priv->hotplug_mode_update_property =
2038 drm_property_create_range(&dev_priv->drm,
2039 DRM_MODE_PROP_IMMUTABLE,
2040 "hotplug_mode_update", 0, 1);
2041 }
2042
vmw_kms_init(struct vmw_private * dev_priv)2043 int vmw_kms_init(struct vmw_private *dev_priv)
2044 {
2045 struct drm_device *dev = &dev_priv->drm;
2046 int ret;
2047 static const char *display_unit_names[] = {
2048 "Invalid",
2049 "Legacy",
2050 "Screen Object",
2051 "Screen Target",
2052 "Invalid (max)"
2053 };
2054
2055 drm_mode_config_init(dev);
2056 dev->mode_config.funcs = &vmw_kms_funcs;
2057 dev->mode_config.min_width = 1;
2058 dev->mode_config.min_height = 1;
2059 dev->mode_config.max_width = dev_priv->texture_max_width;
2060 dev->mode_config.max_height = dev_priv->texture_max_height;
2061 dev->mode_config.preferred_depth = dev_priv->assume_16bpp ? 16 : 32;
2062
2063 drm_mode_create_suggested_offset_properties(dev);
2064 vmw_kms_create_hotplug_mode_update_property(dev_priv);
2065
2066 ret = vmw_kms_stdu_init_display(dev_priv);
2067 if (ret) {
2068 ret = vmw_kms_sou_init_display(dev_priv);
2069 if (ret) /* Fallback */
2070 ret = vmw_kms_ldu_init_display(dev_priv);
2071 }
2072 BUILD_BUG_ON(ARRAY_SIZE(display_unit_names) != (vmw_du_max + 1));
2073 drm_info(&dev_priv->drm, "%s display unit initialized\n",
2074 display_unit_names[dev_priv->active_display_unit]);
2075
2076 return ret;
2077 }
2078
vmw_kms_close(struct vmw_private * dev_priv)2079 int vmw_kms_close(struct vmw_private *dev_priv)
2080 {
2081 int ret = 0;
2082
2083 /*
2084 * Docs says we should take the lock before calling this function
2085 * but since it destroys encoders and our destructor calls
2086 * drm_encoder_cleanup which takes the lock we deadlock.
2087 */
2088 drm_mode_config_cleanup(&dev_priv->drm);
2089 if (dev_priv->active_display_unit == vmw_du_legacy)
2090 ret = vmw_kms_ldu_close_display(dev_priv);
2091
2092 return ret;
2093 }
2094
vmw_kms_cursor_bypass_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)2095 int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
2096 struct drm_file *file_priv)
2097 {
2098 struct drm_vmw_cursor_bypass_arg *arg = data;
2099 struct vmw_display_unit *du;
2100 struct drm_crtc *crtc;
2101 int ret = 0;
2102
2103 mutex_lock(&dev->mode_config.mutex);
2104 if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) {
2105
2106 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2107 du = vmw_crtc_to_du(crtc);
2108 du->hotspot_x = arg->xhot;
2109 du->hotspot_y = arg->yhot;
2110 }
2111
2112 mutex_unlock(&dev->mode_config.mutex);
2113 return 0;
2114 }
2115
2116 crtc = drm_crtc_find(dev, file_priv, arg->crtc_id);
2117 if (!crtc) {
2118 ret = -ENOENT;
2119 goto out;
2120 }
2121
2122 du = vmw_crtc_to_du(crtc);
2123
2124 du->hotspot_x = arg->xhot;
2125 du->hotspot_y = arg->yhot;
2126
2127 out:
2128 mutex_unlock(&dev->mode_config.mutex);
2129
2130 return ret;
2131 }
2132
vmw_kms_write_svga(struct vmw_private * vmw_priv,unsigned width,unsigned height,unsigned pitch,unsigned bpp,unsigned depth)2133 int vmw_kms_write_svga(struct vmw_private *vmw_priv,
2134 unsigned width, unsigned height, unsigned pitch,
2135 unsigned bpp, unsigned depth)
2136 {
2137 if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
2138 vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, pitch);
2139 else if (vmw_fifo_have_pitchlock(vmw_priv))
2140 vmw_fifo_mem_write(vmw_priv, SVGA_FIFO_PITCHLOCK, pitch);
2141 vmw_write(vmw_priv, SVGA_REG_WIDTH, width);
2142 vmw_write(vmw_priv, SVGA_REG_HEIGHT, height);
2143 if ((vmw_priv->capabilities & SVGA_CAP_8BIT_EMULATION) != 0)
2144 vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bpp);
2145
2146 if (vmw_read(vmw_priv, SVGA_REG_DEPTH) != depth) {
2147 DRM_ERROR("Invalid depth %u for %u bpp, host expects %u\n",
2148 depth, bpp, vmw_read(vmw_priv, SVGA_REG_DEPTH));
2149 return -EINVAL;
2150 }
2151
2152 return 0;
2153 }
2154
2155 static
vmw_kms_validate_mode_vram(struct vmw_private * dev_priv,u64 pitch,u64 height)2156 bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
2157 u64 pitch,
2158 u64 height)
2159 {
2160 return (pitch * height) < (u64)dev_priv->vram_size;
2161 }
2162
2163 /**
2164 * vmw_du_update_layout - Update the display unit with topology from resolution
2165 * plugin and generate DRM uevent
2166 * @dev_priv: device private
2167 * @num_rects: number of drm_rect in rects
2168 * @rects: toplogy to update
2169 */
vmw_du_update_layout(struct vmw_private * dev_priv,unsigned int num_rects,struct drm_rect * rects)2170 static int vmw_du_update_layout(struct vmw_private *dev_priv,
2171 unsigned int num_rects, struct drm_rect *rects)
2172 {
2173 struct drm_device *dev = &dev_priv->drm;
2174 struct vmw_display_unit *du;
2175 struct drm_connector *con;
2176 struct drm_connector_list_iter conn_iter;
2177 struct drm_modeset_acquire_ctx ctx;
2178 struct drm_crtc *crtc;
2179 int ret;
2180
2181 /* Currently gui_x/y is protected with the crtc mutex */
2182 mutex_lock(&dev->mode_config.mutex);
2183 drm_modeset_acquire_init(&ctx, 0);
2184 retry:
2185 drm_for_each_crtc(crtc, dev) {
2186 ret = drm_modeset_lock(&crtc->mutex, &ctx);
2187 if (ret < 0) {
2188 if (ret == -EDEADLK) {
2189 drm_modeset_backoff(&ctx);
2190 goto retry;
2191 }
2192 goto out_fini;
2193 }
2194 }
2195
2196 drm_connector_list_iter_begin(dev, &conn_iter);
2197 drm_for_each_connector_iter(con, &conn_iter) {
2198 du = vmw_connector_to_du(con);
2199 if (num_rects > du->unit) {
2200 du->pref_width = drm_rect_width(&rects[du->unit]);
2201 du->pref_height = drm_rect_height(&rects[du->unit]);
2202 du->pref_active = true;
2203 du->gui_x = rects[du->unit].x1;
2204 du->gui_y = rects[du->unit].y1;
2205 } else {
2206 du->pref_width = VMWGFX_MIN_INITIAL_WIDTH;
2207 du->pref_height = VMWGFX_MIN_INITIAL_HEIGHT;
2208 du->pref_active = false;
2209 du->gui_x = 0;
2210 du->gui_y = 0;
2211 }
2212 }
2213 drm_connector_list_iter_end(&conn_iter);
2214
2215 list_for_each_entry(con, &dev->mode_config.connector_list, head) {
2216 du = vmw_connector_to_du(con);
2217 if (num_rects > du->unit) {
2218 drm_object_property_set_value
2219 (&con->base, dev->mode_config.suggested_x_property,
2220 du->gui_x);
2221 drm_object_property_set_value
2222 (&con->base, dev->mode_config.suggested_y_property,
2223 du->gui_y);
2224 } else {
2225 drm_object_property_set_value
2226 (&con->base, dev->mode_config.suggested_x_property,
2227 0);
2228 drm_object_property_set_value
2229 (&con->base, dev->mode_config.suggested_y_property,
2230 0);
2231 }
2232 con->status = vmw_du_connector_detect(con, true);
2233 }
2234 out_fini:
2235 drm_modeset_drop_locks(&ctx);
2236 drm_modeset_acquire_fini(&ctx);
2237 mutex_unlock(&dev->mode_config.mutex);
2238
2239 drm_sysfs_hotplug_event(dev);
2240
2241 return 0;
2242 }
2243
vmw_du_crtc_gamma_set(struct drm_crtc * crtc,u16 * r,u16 * g,u16 * b,uint32_t size,struct drm_modeset_acquire_ctx * ctx)2244 int vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
2245 u16 *r, u16 *g, u16 *b,
2246 uint32_t size,
2247 struct drm_modeset_acquire_ctx *ctx)
2248 {
2249 struct vmw_private *dev_priv = vmw_priv(crtc->dev);
2250 int i;
2251
2252 for (i = 0; i < size; i++) {
2253 DRM_DEBUG("%d r/g/b = 0x%04x / 0x%04x / 0x%04x\n", i,
2254 r[i], g[i], b[i]);
2255 vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 0, r[i] >> 8);
2256 vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 1, g[i] >> 8);
2257 vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 2, b[i] >> 8);
2258 }
2259
2260 return 0;
2261 }
2262
vmw_du_connector_dpms(struct drm_connector * connector,int mode)2263 int vmw_du_connector_dpms(struct drm_connector *connector, int mode)
2264 {
2265 return 0;
2266 }
2267
2268 enum drm_connector_status
vmw_du_connector_detect(struct drm_connector * connector,bool force)2269 vmw_du_connector_detect(struct drm_connector *connector, bool force)
2270 {
2271 uint32_t num_displays;
2272 struct drm_device *dev = connector->dev;
2273 struct vmw_private *dev_priv = vmw_priv(dev);
2274 struct vmw_display_unit *du = vmw_connector_to_du(connector);
2275
2276 num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS);
2277
2278 return ((vmw_connector_to_du(connector)->unit < num_displays &&
2279 du->pref_active) ?
2280 connector_status_connected : connector_status_disconnected);
2281 }
2282
2283 /**
2284 * vmw_guess_mode_timing - Provide fake timings for a
2285 * 60Hz vrefresh mode.
2286 *
2287 * @mode: Pointer to a struct drm_display_mode with hdisplay and vdisplay
2288 * members filled in.
2289 */
vmw_guess_mode_timing(struct drm_display_mode * mode)2290 void vmw_guess_mode_timing(struct drm_display_mode *mode)
2291 {
2292 mode->hsync_start = mode->hdisplay + 50;
2293 mode->hsync_end = mode->hsync_start + 50;
2294 mode->htotal = mode->hsync_end + 50;
2295
2296 mode->vsync_start = mode->vdisplay + 50;
2297 mode->vsync_end = mode->vsync_start + 50;
2298 mode->vtotal = mode->vsync_end + 50;
2299
2300 mode->clock = (u32)mode->htotal * (u32)mode->vtotal / 100 * 6;
2301 }
2302
2303
2304 /**
2305 * vmw_kms_update_layout_ioctl - Handler for DRM_VMW_UPDATE_LAYOUT ioctl
2306 * @dev: drm device for the ioctl
2307 * @data: data pointer for the ioctl
2308 * @file_priv: drm file for the ioctl call
2309 *
2310 * Update preferred topology of display unit as per ioctl request. The topology
2311 * is expressed as array of drm_vmw_rect.
2312 * e.g.
2313 * [0 0 640 480] [640 0 800 600] [0 480 640 480]
2314 *
2315 * NOTE:
2316 * The x and y offset (upper left) in drm_vmw_rect cannot be less than 0. Beside
2317 * device limit on topology, x + w and y + h (lower right) cannot be greater
2318 * than INT_MAX. So topology beyond these limits will return with error.
2319 *
2320 * Returns:
2321 * Zero on success, negative errno on failure.
2322 */
vmw_kms_update_layout_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)2323 int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
2324 struct drm_file *file_priv)
2325 {
2326 struct vmw_private *dev_priv = vmw_priv(dev);
2327 struct drm_mode_config *mode_config = &dev->mode_config;
2328 struct drm_vmw_update_layout_arg *arg =
2329 (struct drm_vmw_update_layout_arg *)data;
2330 const void __user *user_rects;
2331 struct drm_vmw_rect *rects;
2332 struct drm_rect *drm_rects;
2333 unsigned rects_size;
2334 int ret, i;
2335
2336 if (!arg->num_outputs) {
2337 struct drm_rect def_rect = {0, 0,
2338 VMWGFX_MIN_INITIAL_WIDTH,
2339 VMWGFX_MIN_INITIAL_HEIGHT};
2340 vmw_du_update_layout(dev_priv, 1, &def_rect);
2341 return 0;
2342 } else if (arg->num_outputs > VMWGFX_NUM_DISPLAY_UNITS) {
2343 return -E2BIG;
2344 }
2345
2346 rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect);
2347 rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect),
2348 GFP_KERNEL);
2349 if (unlikely(!rects))
2350 return -ENOMEM;
2351
2352 user_rects = (void __user *)(unsigned long)arg->rects;
2353 ret = copy_from_user(rects, user_rects, rects_size);
2354 if (unlikely(ret != 0)) {
2355 DRM_ERROR("Failed to get rects.\n");
2356 ret = -EFAULT;
2357 goto out_free;
2358 }
2359
2360 drm_rects = (struct drm_rect *)rects;
2361
2362 VMW_DEBUG_KMS("Layout count = %u\n", arg->num_outputs);
2363 for (i = 0; i < arg->num_outputs; i++) {
2364 struct drm_vmw_rect curr_rect;
2365
2366 /* Verify user-space for overflow as kernel use drm_rect */
2367 if ((rects[i].x + rects[i].w > INT_MAX) ||
2368 (rects[i].y + rects[i].h > INT_MAX)) {
2369 ret = -ERANGE;
2370 goto out_free;
2371 }
2372
2373 curr_rect = rects[i];
2374 drm_rects[i].x1 = curr_rect.x;
2375 drm_rects[i].y1 = curr_rect.y;
2376 drm_rects[i].x2 = curr_rect.x + curr_rect.w;
2377 drm_rects[i].y2 = curr_rect.y + curr_rect.h;
2378
2379 VMW_DEBUG_KMS(" x1 = %d y1 = %d x2 = %d y2 = %d\n",
2380 drm_rects[i].x1, drm_rects[i].y1,
2381 drm_rects[i].x2, drm_rects[i].y2);
2382
2383 /*
2384 * Currently this check is limiting the topology within
2385 * mode_config->max (which actually is max texture size
2386 * supported by virtual device). This limit is here to address
2387 * window managers that create a big framebuffer for whole
2388 * topology.
2389 */
2390 if (drm_rects[i].x1 < 0 || drm_rects[i].y1 < 0 ||
2391 drm_rects[i].x2 > mode_config->max_width ||
2392 drm_rects[i].y2 > mode_config->max_height) {
2393 VMW_DEBUG_KMS("Invalid layout %d %d %d %d\n",
2394 drm_rects[i].x1, drm_rects[i].y1,
2395 drm_rects[i].x2, drm_rects[i].y2);
2396 ret = -EINVAL;
2397 goto out_free;
2398 }
2399 }
2400
2401 ret = vmw_kms_check_display_memory(dev, arg->num_outputs, drm_rects);
2402
2403 if (ret == 0)
2404 vmw_du_update_layout(dev_priv, arg->num_outputs, drm_rects);
2405
2406 out_free:
2407 kfree(rects);
2408 return ret;
2409 }
2410
2411 /**
2412 * vmw_kms_helper_dirty - Helper to build commands and perform actions based
2413 * on a set of cliprects and a set of display units.
2414 *
2415 * @dev_priv: Pointer to a device private structure.
2416 * @framebuffer: Pointer to the framebuffer on which to perform the actions.
2417 * @clips: A set of struct drm_clip_rect. Either this os @vclips must be NULL.
2418 * Cliprects are given in framebuffer coordinates.
2419 * @vclips: A set of struct drm_vmw_rect cliprects. Either this or @clips must
2420 * be NULL. Cliprects are given in source coordinates.
2421 * @dest_x: X coordinate offset for the crtc / destination clip rects.
2422 * @dest_y: Y coordinate offset for the crtc / destination clip rects.
2423 * @num_clips: Number of cliprects in the @clips or @vclips array.
2424 * @increment: Integer with which to increment the clip counter when looping.
2425 * Used to skip a predetermined number of clip rects.
2426 * @dirty: Closure structure. See the description of struct vmw_kms_dirty.
2427 */
vmw_kms_helper_dirty(struct vmw_private * dev_priv,struct vmw_framebuffer * framebuffer,const struct drm_clip_rect * clips,const struct drm_vmw_rect * vclips,s32 dest_x,s32 dest_y,int num_clips,int increment,struct vmw_kms_dirty * dirty)2428 int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
2429 struct vmw_framebuffer *framebuffer,
2430 const struct drm_clip_rect *clips,
2431 const struct drm_vmw_rect *vclips,
2432 s32 dest_x, s32 dest_y,
2433 int num_clips,
2434 int increment,
2435 struct vmw_kms_dirty *dirty)
2436 {
2437 struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
2438 struct drm_crtc *crtc;
2439 u32 num_units = 0;
2440 u32 i, k;
2441
2442 dirty->dev_priv = dev_priv;
2443
2444 /* If crtc is passed, no need to iterate over other display units */
2445 if (dirty->crtc) {
2446 units[num_units++] = vmw_crtc_to_du(dirty->crtc);
2447 } else {
2448 list_for_each_entry(crtc, &dev_priv->drm.mode_config.crtc_list,
2449 head) {
2450 struct drm_plane *plane = crtc->primary;
2451
2452 if (plane->state->fb == &framebuffer->base)
2453 units[num_units++] = vmw_crtc_to_du(crtc);
2454 }
2455 }
2456
2457 for (k = 0; k < num_units; k++) {
2458 struct vmw_display_unit *unit = units[k];
2459 s32 crtc_x = unit->crtc.x;
2460 s32 crtc_y = unit->crtc.y;
2461 s32 crtc_width = unit->crtc.mode.hdisplay;
2462 s32 crtc_height = unit->crtc.mode.vdisplay;
2463 const struct drm_clip_rect *clips_ptr = clips;
2464 const struct drm_vmw_rect *vclips_ptr = vclips;
2465
2466 dirty->unit = unit;
2467 if (dirty->fifo_reserve_size > 0) {
2468 dirty->cmd = VMW_CMD_RESERVE(dev_priv,
2469 dirty->fifo_reserve_size);
2470 if (!dirty->cmd)
2471 return -ENOMEM;
2472
2473 memset(dirty->cmd, 0, dirty->fifo_reserve_size);
2474 }
2475 dirty->num_hits = 0;
2476 for (i = 0; i < num_clips; i++, clips_ptr += increment,
2477 vclips_ptr += increment) {
2478 s32 clip_left;
2479 s32 clip_top;
2480
2481 /*
2482 * Select clip array type. Note that integer type
2483 * in @clips is unsigned short, whereas in @vclips
2484 * it's 32-bit.
2485 */
2486 if (clips) {
2487 dirty->fb_x = (s32) clips_ptr->x1;
2488 dirty->fb_y = (s32) clips_ptr->y1;
2489 dirty->unit_x2 = (s32) clips_ptr->x2 + dest_x -
2490 crtc_x;
2491 dirty->unit_y2 = (s32) clips_ptr->y2 + dest_y -
2492 crtc_y;
2493 } else {
2494 dirty->fb_x = vclips_ptr->x;
2495 dirty->fb_y = vclips_ptr->y;
2496 dirty->unit_x2 = dirty->fb_x + vclips_ptr->w +
2497 dest_x - crtc_x;
2498 dirty->unit_y2 = dirty->fb_y + vclips_ptr->h +
2499 dest_y - crtc_y;
2500 }
2501
2502 dirty->unit_x1 = dirty->fb_x + dest_x - crtc_x;
2503 dirty->unit_y1 = dirty->fb_y + dest_y - crtc_y;
2504
2505 /* Skip this clip if it's outside the crtc region */
2506 if (dirty->unit_x1 >= crtc_width ||
2507 dirty->unit_y1 >= crtc_height ||
2508 dirty->unit_x2 <= 0 || dirty->unit_y2 <= 0)
2509 continue;
2510
2511 /* Clip right and bottom to crtc limits */
2512 dirty->unit_x2 = min_t(s32, dirty->unit_x2,
2513 crtc_width);
2514 dirty->unit_y2 = min_t(s32, dirty->unit_y2,
2515 crtc_height);
2516
2517 /* Clip left and top to crtc limits */
2518 clip_left = min_t(s32, dirty->unit_x1, 0);
2519 clip_top = min_t(s32, dirty->unit_y1, 0);
2520 dirty->unit_x1 -= clip_left;
2521 dirty->unit_y1 -= clip_top;
2522 dirty->fb_x -= clip_left;
2523 dirty->fb_y -= clip_top;
2524
2525 dirty->clip(dirty);
2526 }
2527
2528 dirty->fifo_commit(dirty);
2529 }
2530
2531 return 0;
2532 }
2533
2534 /**
2535 * vmw_kms_helper_validation_finish - Helper for post KMS command submission
2536 * cleanup and fencing
2537 * @dev_priv: Pointer to the device-private struct
2538 * @file_priv: Pointer identifying the client when user-space fencing is used
2539 * @ctx: Pointer to the validation context
2540 * @out_fence: If non-NULL, returned refcounted fence-pointer
2541 * @user_fence_rep: If non-NULL, pointer to user-space address area
2542 * in which to copy user-space fence info
2543 */
vmw_kms_helper_validation_finish(struct vmw_private * dev_priv,struct drm_file * file_priv,struct vmw_validation_context * ctx,struct vmw_fence_obj ** out_fence,struct drm_vmw_fence_rep __user * user_fence_rep)2544 void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv,
2545 struct drm_file *file_priv,
2546 struct vmw_validation_context *ctx,
2547 struct vmw_fence_obj **out_fence,
2548 struct drm_vmw_fence_rep __user *
2549 user_fence_rep)
2550 {
2551 struct vmw_fence_obj *fence = NULL;
2552 uint32_t handle = 0;
2553 int ret = 0;
2554
2555 if (file_priv || user_fence_rep || vmw_validation_has_bos(ctx) ||
2556 out_fence)
2557 ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
2558 file_priv ? &handle : NULL);
2559 vmw_validation_done(ctx, fence);
2560 if (file_priv)
2561 vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv),
2562 ret, user_fence_rep, fence,
2563 handle, -1);
2564 if (out_fence)
2565 *out_fence = fence;
2566 else
2567 vmw_fence_obj_unreference(&fence);
2568 }
2569
2570 /**
2571 * vmw_kms_update_proxy - Helper function to update a proxy surface from
2572 * its backing MOB.
2573 *
2574 * @res: Pointer to the surface resource
2575 * @clips: Clip rects in framebuffer (surface) space.
2576 * @num_clips: Number of clips in @clips.
2577 * @increment: Integer with which to increment the clip counter when looping.
2578 * Used to skip a predetermined number of clip rects.
2579 *
2580 * This function makes sure the proxy surface is updated from its backing MOB
2581 * using the region given by @clips. The surface resource @res and its backing
2582 * MOB needs to be reserved and validated on call.
2583 */
vmw_kms_update_proxy(struct vmw_resource * res,const struct drm_clip_rect * clips,unsigned num_clips,int increment)2584 int vmw_kms_update_proxy(struct vmw_resource *res,
2585 const struct drm_clip_rect *clips,
2586 unsigned num_clips,
2587 int increment)
2588 {
2589 struct vmw_private *dev_priv = res->dev_priv;
2590 struct drm_vmw_size *size = &vmw_res_to_srf(res)->metadata.base_size;
2591 struct {
2592 SVGA3dCmdHeader header;
2593 SVGA3dCmdUpdateGBImage body;
2594 } *cmd;
2595 SVGA3dBox *box;
2596 size_t copy_size = 0;
2597 int i;
2598
2599 if (!clips)
2600 return 0;
2601
2602 cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd) * num_clips);
2603 if (!cmd)
2604 return -ENOMEM;
2605
2606 for (i = 0; i < num_clips; ++i, clips += increment, ++cmd) {
2607 box = &cmd->body.box;
2608
2609 cmd->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE;
2610 cmd->header.size = sizeof(cmd->body);
2611 cmd->body.image.sid = res->id;
2612 cmd->body.image.face = 0;
2613 cmd->body.image.mipmap = 0;
2614
2615 if (clips->x1 > size->width || clips->x2 > size->width ||
2616 clips->y1 > size->height || clips->y2 > size->height) {
2617 DRM_ERROR("Invalid clips outsize of framebuffer.\n");
2618 return -EINVAL;
2619 }
2620
2621 box->x = clips->x1;
2622 box->y = clips->y1;
2623 box->z = 0;
2624 box->w = clips->x2 - clips->x1;
2625 box->h = clips->y2 - clips->y1;
2626 box->d = 1;
2627
2628 copy_size += sizeof(*cmd);
2629 }
2630
2631 vmw_cmd_commit(dev_priv, copy_size);
2632
2633 return 0;
2634 }
2635
2636 /**
2637 * vmw_kms_create_implicit_placement_property - Set up the implicit placement
2638 * property.
2639 *
2640 * @dev_priv: Pointer to a device private struct.
2641 *
2642 * Sets up the implicit placement property unless it's already set up.
2643 */
2644 void
vmw_kms_create_implicit_placement_property(struct vmw_private * dev_priv)2645 vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv)
2646 {
2647 if (dev_priv->implicit_placement_property)
2648 return;
2649
2650 dev_priv->implicit_placement_property =
2651 drm_property_create_range(&dev_priv->drm,
2652 DRM_MODE_PROP_IMMUTABLE,
2653 "implicit_placement", 0, 1);
2654 }
2655
2656 /**
2657 * vmw_kms_suspend - Save modesetting state and turn modesetting off.
2658 *
2659 * @dev: Pointer to the drm device
2660 * Return: 0 on success. Negative error code on failure.
2661 */
vmw_kms_suspend(struct drm_device * dev)2662 int vmw_kms_suspend(struct drm_device *dev)
2663 {
2664 struct vmw_private *dev_priv = vmw_priv(dev);
2665
2666 dev_priv->suspend_state = drm_atomic_helper_suspend(dev);
2667 if (IS_ERR(dev_priv->suspend_state)) {
2668 int ret = PTR_ERR(dev_priv->suspend_state);
2669
2670 DRM_ERROR("Failed kms suspend: %d\n", ret);
2671 dev_priv->suspend_state = NULL;
2672
2673 return ret;
2674 }
2675
2676 return 0;
2677 }
2678
2679
2680 /**
2681 * vmw_kms_resume - Re-enable modesetting and restore state
2682 *
2683 * @dev: Pointer to the drm device
2684 * Return: 0 on success. Negative error code on failure.
2685 *
2686 * State is resumed from a previous vmw_kms_suspend(). It's illegal
2687 * to call this function without a previous vmw_kms_suspend().
2688 */
vmw_kms_resume(struct drm_device * dev)2689 int vmw_kms_resume(struct drm_device *dev)
2690 {
2691 struct vmw_private *dev_priv = vmw_priv(dev);
2692 int ret;
2693
2694 if (WARN_ON(!dev_priv->suspend_state))
2695 return 0;
2696
2697 ret = drm_atomic_helper_resume(dev, dev_priv->suspend_state);
2698 dev_priv->suspend_state = NULL;
2699
2700 return ret;
2701 }
2702
2703 /**
2704 * vmw_kms_lost_device - Notify kms that modesetting capabilities will be lost
2705 *
2706 * @dev: Pointer to the drm device
2707 */
vmw_kms_lost_device(struct drm_device * dev)2708 void vmw_kms_lost_device(struct drm_device *dev)
2709 {
2710 drm_atomic_helper_shutdown(dev);
2711 }
2712
2713 /**
2714 * vmw_du_helper_plane_update - Helper to do plane update on a display unit.
2715 * @update: The closure structure.
2716 *
2717 * Call this helper after setting callbacks in &vmw_du_update_plane to do plane
2718 * update on display unit.
2719 *
2720 * Return: 0 on success or a negative error code on failure.
2721 */
vmw_du_helper_plane_update(struct vmw_du_update_plane * update)2722 int vmw_du_helper_plane_update(struct vmw_du_update_plane *update)
2723 {
2724 struct drm_plane_state *state = update->plane->state;
2725 struct drm_plane_state *old_state = update->old_state;
2726 struct drm_atomic_helper_damage_iter iter;
2727 struct drm_rect clip;
2728 struct drm_rect bb;
2729 DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
2730 uint32_t reserved_size = 0;
2731 uint32_t submit_size = 0;
2732 uint32_t curr_size = 0;
2733 uint32_t num_hits = 0;
2734 void *cmd_start;
2735 char *cmd_next;
2736 int ret;
2737
2738 /*
2739 * Iterate in advance to check if really need plane update and find the
2740 * number of clips that actually are in plane src for fifo allocation.
2741 */
2742 drm_atomic_helper_damage_iter_init(&iter, old_state, state);
2743 drm_atomic_for_each_plane_damage(&iter, &clip)
2744 num_hits++;
2745
2746 if (num_hits == 0)
2747 return 0;
2748
2749 if (update->vfb->bo) {
2750 struct vmw_framebuffer_bo *vfbbo =
2751 container_of(update->vfb, typeof(*vfbbo), base);
2752
2753 /*
2754 * For screen targets we want a mappable bo, for everything else we want
2755 * accelerated i.e. host backed (vram or gmr) bo. If the display unit
2756 * is not screen target then mob's shouldn't be available.
2757 */
2758 if (update->dev_priv->active_display_unit == vmw_du_screen_target) {
2759 vmw_bo_placement_set(vfbbo->buffer,
2760 VMW_BO_DOMAIN_SYS | VMW_BO_DOMAIN_MOB | VMW_BO_DOMAIN_GMR,
2761 VMW_BO_DOMAIN_SYS | VMW_BO_DOMAIN_MOB | VMW_BO_DOMAIN_GMR);
2762 } else {
2763 WARN_ON(update->dev_priv->has_mob);
2764 vmw_bo_placement_set_default_accelerated(vfbbo->buffer);
2765 }
2766 ret = vmw_validation_add_bo(&val_ctx, vfbbo->buffer);
2767 } else {
2768 struct vmw_framebuffer_surface *vfbs =
2769 container_of(update->vfb, typeof(*vfbs), base);
2770
2771 ret = vmw_validation_add_resource(&val_ctx, &vfbs->surface->res,
2772 0, VMW_RES_DIRTY_NONE, NULL,
2773 NULL);
2774 }
2775
2776 if (ret)
2777 return ret;
2778
2779 ret = vmw_validation_prepare(&val_ctx, update->mutex, update->intr);
2780 if (ret)
2781 goto out_unref;
2782
2783 reserved_size = update->calc_fifo_size(update, num_hits);
2784 cmd_start = VMW_CMD_RESERVE(update->dev_priv, reserved_size);
2785 if (!cmd_start) {
2786 ret = -ENOMEM;
2787 goto out_revert;
2788 }
2789
2790 cmd_next = cmd_start;
2791
2792 if (update->post_prepare) {
2793 curr_size = update->post_prepare(update, cmd_next);
2794 cmd_next += curr_size;
2795 submit_size += curr_size;
2796 }
2797
2798 if (update->pre_clip) {
2799 curr_size = update->pre_clip(update, cmd_next, num_hits);
2800 cmd_next += curr_size;
2801 submit_size += curr_size;
2802 }
2803
2804 bb.x1 = INT_MAX;
2805 bb.y1 = INT_MAX;
2806 bb.x2 = INT_MIN;
2807 bb.y2 = INT_MIN;
2808
2809 drm_atomic_helper_damage_iter_init(&iter, old_state, state);
2810 drm_atomic_for_each_plane_damage(&iter, &clip) {
2811 uint32_t fb_x = clip.x1;
2812 uint32_t fb_y = clip.y1;
2813
2814 vmw_du_translate_to_crtc(state, &clip);
2815 if (update->clip) {
2816 curr_size = update->clip(update, cmd_next, &clip, fb_x,
2817 fb_y);
2818 cmd_next += curr_size;
2819 submit_size += curr_size;
2820 }
2821 bb.x1 = min_t(int, bb.x1, clip.x1);
2822 bb.y1 = min_t(int, bb.y1, clip.y1);
2823 bb.x2 = max_t(int, bb.x2, clip.x2);
2824 bb.y2 = max_t(int, bb.y2, clip.y2);
2825 }
2826
2827 curr_size = update->post_clip(update, cmd_next, &bb);
2828 submit_size += curr_size;
2829
2830 if (reserved_size < submit_size)
2831 submit_size = 0;
2832
2833 vmw_cmd_commit(update->dev_priv, submit_size);
2834
2835 vmw_kms_helper_validation_finish(update->dev_priv, NULL, &val_ctx,
2836 update->out_fence, NULL);
2837 return ret;
2838
2839 out_revert:
2840 vmw_validation_revert(&val_ctx);
2841
2842 out_unref:
2843 vmw_validation_unref_lists(&val_ctx);
2844 return ret;
2845 }
2846
2847 /**
2848 * vmw_connector_mode_valid - implements drm_connector_helper_funcs.mode_valid callback
2849 *
2850 * @connector: the drm connector, part of a DU container
2851 * @mode: drm mode to check
2852 *
2853 * Returns MODE_OK on success, or a drm_mode_status error code.
2854 */
vmw_connector_mode_valid(struct drm_connector * connector,struct drm_display_mode * mode)2855 enum drm_mode_status vmw_connector_mode_valid(struct drm_connector *connector,
2856 struct drm_display_mode *mode)
2857 {
2858 enum drm_mode_status ret;
2859 struct drm_device *dev = connector->dev;
2860 struct vmw_private *dev_priv = vmw_priv(dev);
2861 u32 assumed_cpp = 4;
2862
2863 if (dev_priv->assume_16bpp)
2864 assumed_cpp = 2;
2865
2866 ret = drm_mode_validate_size(mode, dev_priv->texture_max_width,
2867 dev_priv->texture_max_height);
2868 if (ret != MODE_OK)
2869 return ret;
2870
2871 if (!vmw_kms_validate_mode_vram(dev_priv,
2872 mode->hdisplay * assumed_cpp,
2873 mode->vdisplay))
2874 return MODE_MEM;
2875
2876 return MODE_OK;
2877 }
2878
2879 /**
2880 * vmw_connector_get_modes - implements drm_connector_helper_funcs.get_modes callback
2881 *
2882 * @connector: the drm connector, part of a DU container
2883 *
2884 * Returns the number of added modes.
2885 */
vmw_connector_get_modes(struct drm_connector * connector)2886 int vmw_connector_get_modes(struct drm_connector *connector)
2887 {
2888 struct vmw_display_unit *du = vmw_connector_to_du(connector);
2889 struct drm_device *dev = connector->dev;
2890 struct vmw_private *dev_priv = vmw_priv(dev);
2891 struct drm_display_mode *mode = NULL;
2892 struct drm_display_mode prefmode = { DRM_MODE("preferred",
2893 DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
2894 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2895 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
2896 };
2897 u32 max_width;
2898 u32 max_height;
2899 u32 num_modes;
2900
2901 /* Add preferred mode */
2902 mode = drm_mode_duplicate(dev, &prefmode);
2903 if (!mode)
2904 return 0;
2905
2906 mode->hdisplay = du->pref_width;
2907 mode->vdisplay = du->pref_height;
2908 vmw_guess_mode_timing(mode);
2909 drm_mode_set_name(mode);
2910
2911 drm_mode_probed_add(connector, mode);
2912 drm_dbg_kms(dev, "preferred mode " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
2913
2914 /* Probe connector for all modes not exceeding our geom limits */
2915 max_width = dev_priv->texture_max_width;
2916 max_height = dev_priv->texture_max_height;
2917
2918 if (dev_priv->active_display_unit == vmw_du_screen_target) {
2919 max_width = min(dev_priv->stdu_max_width, max_width);
2920 max_height = min(dev_priv->stdu_max_height, max_height);
2921 }
2922
2923 num_modes = 1 + drm_add_modes_noedid(connector, max_width, max_height);
2924
2925 return num_modes;
2926 }
2927