1 /* 2 * Copyright (C) 2015 Red Hat, Inc. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining 6 * a copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sublicense, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * The above copyright notice and this permission notice (including the 14 * next paragraph) shall be included in all copies or substantial 15 * portions of the Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE 21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 24 */ 25 26 #include <drm/drm_atomic_helper.h> 27 #include <drm/drm_damage_helper.h> 28 #include <drm/drm_fourcc.h> 29 30 #include "virtgpu_drv.h" 31 32 static const uint32_t virtio_gpu_formats[] = { 33 DRM_FORMAT_HOST_XRGB8888, 34 }; 35 36 static const uint32_t virtio_gpu_cursor_formats[] = { 37 DRM_FORMAT_HOST_ARGB8888, 38 }; 39 40 uint32_t virtio_gpu_translate_format(uint32_t drm_fourcc) 41 { 42 uint32_t format; 43 44 switch (drm_fourcc) { 45 case DRM_FORMAT_XRGB8888: 46 format = VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM; 47 break; 48 case DRM_FORMAT_ARGB8888: 49 format = VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM; 50 break; 51 case DRM_FORMAT_BGRX8888: 52 format = VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM; 53 break; 54 case DRM_FORMAT_BGRA8888: 55 format = VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM; 56 break; 57 default: 58 /* 59 * This should not happen, we handle everything listed 60 * in virtio_gpu_formats[]. 61 */ 62 format = 0; 63 break; 64 } 65 WARN_ON(format == 0); 66 return format; 67 } 68 69 static const struct drm_plane_funcs virtio_gpu_plane_funcs = { 70 .update_plane = drm_atomic_helper_update_plane, 71 .disable_plane = drm_atomic_helper_disable_plane, 72 .destroy = drm_plane_cleanup, 73 .reset = drm_atomic_helper_plane_reset, 74 .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, 75 .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, 76 }; 77 78 static int virtio_gpu_plane_atomic_check(struct drm_plane *plane, 79 struct drm_atomic_state *state) 80 { 81 struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, 82 plane); 83 bool is_cursor = plane->type == DRM_PLANE_TYPE_CURSOR; 84 struct drm_crtc_state *crtc_state; 85 int ret; 86 87 if (!new_plane_state->fb || WARN_ON(!new_plane_state->crtc)) 88 return 0; 89 90 crtc_state = drm_atomic_get_crtc_state(state, 91 new_plane_state->crtc); 92 if (IS_ERR(crtc_state)) 93 return PTR_ERR(crtc_state); 94 95 ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state, 96 DRM_PLANE_NO_SCALING, 97 DRM_PLANE_NO_SCALING, 98 is_cursor, true); 99 return ret; 100 } 101 102 static void virtio_gpu_update_dumb_bo(struct virtio_gpu_device *vgdev, 103 struct drm_plane_state *state, 104 struct drm_rect *rect) 105 { 106 struct virtio_gpu_object *bo = 107 gem_to_virtio_gpu_obj(state->fb->obj[0]); 108 struct virtio_gpu_object_array *objs; 109 uint32_t w = rect->x2 - rect->x1; 110 uint32_t h = rect->y2 - rect->y1; 111 uint32_t x = rect->x1; 112 uint32_t y = rect->y1; 113 uint32_t off = x * state->fb->format->cpp[0] + 114 y * state->fb->pitches[0]; 115 116 objs = virtio_gpu_array_alloc(1); 117 if (!objs) 118 return; 119 virtio_gpu_array_add_obj(objs, &bo->base.base); 120 121 virtio_gpu_cmd_transfer_to_host_2d(vgdev, off, w, h, x, y, 122 objs, NULL); 123 } 124 125 static void virtio_gpu_resource_flush(struct drm_plane *plane, 126 uint32_t x, uint32_t y, 127 uint32_t width, uint32_t height) 128 { 129 struct drm_device *dev = plane->dev; 130 struct virtio_gpu_device *vgdev = dev->dev_private; 131 struct virtio_gpu_framebuffer *vgfb; 132 struct virtio_gpu_object *bo; 133 134 vgfb = to_virtio_gpu_framebuffer(plane->state->fb); 135 bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]); 136 if (vgfb->fence) { 137 struct virtio_gpu_object_array *objs; 138 139 objs = virtio_gpu_array_alloc(1); 140 if (!objs) 141 return; 142 virtio_gpu_array_add_obj(objs, vgfb->base.obj[0]); 143 virtio_gpu_array_lock_resv(objs); 144 virtio_gpu_cmd_resource_flush(vgdev, bo->hw_res_handle, x, y, 145 width, height, objs, vgfb->fence); 146 virtio_gpu_notify(vgdev); 147 148 dma_fence_wait_timeout(&vgfb->fence->f, true, 149 msecs_to_jiffies(50)); 150 dma_fence_put(&vgfb->fence->f); 151 vgfb->fence = NULL; 152 } else { 153 virtio_gpu_cmd_resource_flush(vgdev, bo->hw_res_handle, x, y, 154 width, height, NULL, NULL); 155 virtio_gpu_notify(vgdev); 156 } 157 } 158 159 static void virtio_gpu_primary_plane_update(struct drm_plane *plane, 160 struct drm_atomic_state *state) 161 { 162 struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, 163 plane); 164 struct drm_device *dev = plane->dev; 165 struct virtio_gpu_device *vgdev = dev->dev_private; 166 struct virtio_gpu_output *output = NULL; 167 struct virtio_gpu_object *bo; 168 struct drm_rect rect; 169 170 if (plane->state->crtc) 171 output = drm_crtc_to_virtio_gpu_output(plane->state->crtc); 172 if (old_state->crtc) 173 output = drm_crtc_to_virtio_gpu_output(old_state->crtc); 174 if (WARN_ON(!output)) 175 return; 176 177 if (!plane->state->fb || !output->crtc.state->active) { 178 DRM_DEBUG("nofb\n"); 179 virtio_gpu_cmd_set_scanout(vgdev, output->index, 0, 180 plane->state->src_w >> 16, 181 plane->state->src_h >> 16, 182 0, 0); 183 virtio_gpu_notify(vgdev); 184 return; 185 } 186 187 if (!drm_atomic_helper_damage_merged(old_state, plane->state, &rect)) 188 return; 189 190 bo = gem_to_virtio_gpu_obj(plane->state->fb->obj[0]); 191 if (bo->dumb) 192 virtio_gpu_update_dumb_bo(vgdev, plane->state, &rect); 193 194 if (plane->state->fb != old_state->fb || 195 plane->state->src_w != old_state->src_w || 196 plane->state->src_h != old_state->src_h || 197 plane->state->src_x != old_state->src_x || 198 plane->state->src_y != old_state->src_y || 199 output->needs_modeset) { 200 output->needs_modeset = false; 201 DRM_DEBUG("handle 0x%x, crtc %dx%d+%d+%d, src %dx%d+%d+%d\n", 202 bo->hw_res_handle, 203 plane->state->crtc_w, plane->state->crtc_h, 204 plane->state->crtc_x, plane->state->crtc_y, 205 plane->state->src_w >> 16, 206 plane->state->src_h >> 16, 207 plane->state->src_x >> 16, 208 plane->state->src_y >> 16); 209 210 if (bo->host3d_blob || bo->guest_blob) { 211 virtio_gpu_cmd_set_scanout_blob 212 (vgdev, output->index, bo, 213 plane->state->fb, 214 plane->state->src_w >> 16, 215 plane->state->src_h >> 16, 216 plane->state->src_x >> 16, 217 plane->state->src_y >> 16); 218 } else { 219 virtio_gpu_cmd_set_scanout(vgdev, output->index, 220 bo->hw_res_handle, 221 plane->state->src_w >> 16, 222 plane->state->src_h >> 16, 223 plane->state->src_x >> 16, 224 plane->state->src_y >> 16); 225 } 226 } 227 228 virtio_gpu_resource_flush(plane, 229 rect.x1, 230 rect.y1, 231 rect.x2 - rect.x1, 232 rect.y2 - rect.y1); 233 } 234 235 static int virtio_gpu_plane_prepare_fb(struct drm_plane *plane, 236 struct drm_plane_state *new_state) 237 { 238 struct drm_device *dev = plane->dev; 239 struct virtio_gpu_device *vgdev = dev->dev_private; 240 struct virtio_gpu_framebuffer *vgfb; 241 struct virtio_gpu_object *bo; 242 243 if (!new_state->fb) 244 return 0; 245 246 vgfb = to_virtio_gpu_framebuffer(new_state->fb); 247 bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]); 248 if (!bo || (plane->type == DRM_PLANE_TYPE_PRIMARY && !bo->guest_blob)) 249 return 0; 250 251 if (bo->dumb && (plane->state->fb != new_state->fb)) { 252 vgfb->fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context, 253 0); 254 if (!vgfb->fence) 255 return -ENOMEM; 256 } 257 258 return 0; 259 } 260 261 static void virtio_gpu_plane_cleanup_fb(struct drm_plane *plane, 262 struct drm_plane_state *state) 263 { 264 struct virtio_gpu_framebuffer *vgfb; 265 266 if (!state->fb) 267 return; 268 269 vgfb = to_virtio_gpu_framebuffer(state->fb); 270 if (vgfb->fence) { 271 dma_fence_put(&vgfb->fence->f); 272 vgfb->fence = NULL; 273 } 274 } 275 276 static void virtio_gpu_cursor_plane_update(struct drm_plane *plane, 277 struct drm_atomic_state *state) 278 { 279 struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, 280 plane); 281 struct drm_device *dev = plane->dev; 282 struct virtio_gpu_device *vgdev = dev->dev_private; 283 struct virtio_gpu_output *output = NULL; 284 struct virtio_gpu_framebuffer *vgfb; 285 struct virtio_gpu_object *bo = NULL; 286 uint32_t handle; 287 288 if (plane->state->crtc) 289 output = drm_crtc_to_virtio_gpu_output(plane->state->crtc); 290 if (old_state->crtc) 291 output = drm_crtc_to_virtio_gpu_output(old_state->crtc); 292 if (WARN_ON(!output)) 293 return; 294 295 if (plane->state->fb) { 296 vgfb = to_virtio_gpu_framebuffer(plane->state->fb); 297 bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]); 298 handle = bo->hw_res_handle; 299 } else { 300 handle = 0; 301 } 302 303 if (bo && bo->dumb && (plane->state->fb != old_state->fb)) { 304 /* new cursor -- update & wait */ 305 struct virtio_gpu_object_array *objs; 306 307 objs = virtio_gpu_array_alloc(1); 308 if (!objs) 309 return; 310 virtio_gpu_array_add_obj(objs, vgfb->base.obj[0]); 311 virtio_gpu_array_lock_resv(objs); 312 virtio_gpu_cmd_transfer_to_host_2d 313 (vgdev, 0, 314 plane->state->crtc_w, 315 plane->state->crtc_h, 316 0, 0, objs, vgfb->fence); 317 virtio_gpu_notify(vgdev); 318 dma_fence_wait(&vgfb->fence->f, true); 319 dma_fence_put(&vgfb->fence->f); 320 vgfb->fence = NULL; 321 } 322 323 if (plane->state->fb != old_state->fb) { 324 DRM_DEBUG("update, handle %d, pos +%d+%d, hot %d,%d\n", handle, 325 plane->state->crtc_x, 326 plane->state->crtc_y, 327 plane->state->fb ? plane->state->fb->hot_x : 0, 328 plane->state->fb ? plane->state->fb->hot_y : 0); 329 output->cursor.hdr.type = 330 cpu_to_le32(VIRTIO_GPU_CMD_UPDATE_CURSOR); 331 output->cursor.resource_id = cpu_to_le32(handle); 332 if (plane->state->fb) { 333 output->cursor.hot_x = 334 cpu_to_le32(plane->state->fb->hot_x); 335 output->cursor.hot_y = 336 cpu_to_le32(plane->state->fb->hot_y); 337 } else { 338 output->cursor.hot_x = cpu_to_le32(0); 339 output->cursor.hot_y = cpu_to_le32(0); 340 } 341 } else { 342 DRM_DEBUG("move +%d+%d\n", 343 plane->state->crtc_x, 344 plane->state->crtc_y); 345 output->cursor.hdr.type = 346 cpu_to_le32(VIRTIO_GPU_CMD_MOVE_CURSOR); 347 } 348 output->cursor.pos.x = cpu_to_le32(plane->state->crtc_x); 349 output->cursor.pos.y = cpu_to_le32(plane->state->crtc_y); 350 virtio_gpu_cursor_ping(vgdev, output); 351 } 352 353 static const struct drm_plane_helper_funcs virtio_gpu_primary_helper_funcs = { 354 .prepare_fb = virtio_gpu_plane_prepare_fb, 355 .cleanup_fb = virtio_gpu_plane_cleanup_fb, 356 .atomic_check = virtio_gpu_plane_atomic_check, 357 .atomic_update = virtio_gpu_primary_plane_update, 358 }; 359 360 static const struct drm_plane_helper_funcs virtio_gpu_cursor_helper_funcs = { 361 .prepare_fb = virtio_gpu_plane_prepare_fb, 362 .cleanup_fb = virtio_gpu_plane_cleanup_fb, 363 .atomic_check = virtio_gpu_plane_atomic_check, 364 .atomic_update = virtio_gpu_cursor_plane_update, 365 }; 366 367 struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev, 368 enum drm_plane_type type, 369 int index) 370 { 371 struct drm_device *dev = vgdev->ddev; 372 const struct drm_plane_helper_funcs *funcs; 373 struct drm_plane *plane; 374 const uint32_t *formats; 375 int nformats; 376 377 if (type == DRM_PLANE_TYPE_CURSOR) { 378 formats = virtio_gpu_cursor_formats; 379 nformats = ARRAY_SIZE(virtio_gpu_cursor_formats); 380 funcs = &virtio_gpu_cursor_helper_funcs; 381 } else { 382 formats = virtio_gpu_formats; 383 nformats = ARRAY_SIZE(virtio_gpu_formats); 384 funcs = &virtio_gpu_primary_helper_funcs; 385 } 386 387 plane = drmm_universal_plane_alloc(dev, struct drm_plane, dev, 388 1 << index, &virtio_gpu_plane_funcs, 389 formats, nformats, NULL, type, NULL); 390 if (IS_ERR(plane)) 391 return plane; 392 393 drm_plane_helper_add(plane, funcs); 394 return plane; 395 } 396