1 /* 2 * Copyright (C) 2015 Red Hat, Inc. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining 6 * a copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sublicense, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * The above copyright notice and this permission notice (including the 14 * next paragraph) shall be included in all copies or substantial 15 * portions of the Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE 21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 24 */ 25 26 #include <drm/drm_atomic_helper.h> 27 #include <drm/drm_damage_helper.h> 28 #include <drm/drm_fourcc.h> 29 #include <drm/drm_plane_helper.h> 30 31 #include "virtgpu_drv.h" 32 33 static const uint32_t virtio_gpu_formats[] = { 34 DRM_FORMAT_HOST_XRGB8888, 35 }; 36 37 static const uint32_t virtio_gpu_cursor_formats[] = { 38 DRM_FORMAT_HOST_ARGB8888, 39 }; 40 41 uint32_t virtio_gpu_translate_format(uint32_t drm_fourcc) 42 { 43 uint32_t format; 44 45 switch (drm_fourcc) { 46 case DRM_FORMAT_XRGB8888: 47 format = VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM; 48 break; 49 case DRM_FORMAT_ARGB8888: 50 format = VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM; 51 break; 52 case DRM_FORMAT_BGRX8888: 53 format = VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM; 54 break; 55 case DRM_FORMAT_BGRA8888: 56 format = VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM; 57 break; 58 default: 59 /* 60 * This should not happen, we handle everything listed 61 * in virtio_gpu_formats[]. 62 */ 63 format = 0; 64 break; 65 } 66 WARN_ON(format == 0); 67 return format; 68 } 69 70 static void virtio_gpu_plane_destroy(struct drm_plane *plane) 71 { 72 drm_plane_cleanup(plane); 73 kfree(plane); 74 } 75 76 static const struct drm_plane_funcs virtio_gpu_plane_funcs = { 77 .update_plane = drm_atomic_helper_update_plane, 78 .disable_plane = drm_atomic_helper_disable_plane, 79 .destroy = virtio_gpu_plane_destroy, 80 .reset = drm_atomic_helper_plane_reset, 81 .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, 82 .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, 83 }; 84 85 static int virtio_gpu_plane_atomic_check(struct drm_plane *plane, 86 struct drm_plane_state *state) 87 { 88 bool is_cursor = plane->type == DRM_PLANE_TYPE_CURSOR; 89 struct drm_crtc_state *crtc_state; 90 int ret; 91 92 if (!state->fb || WARN_ON(!state->crtc)) 93 return 0; 94 95 crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc); 96 if (IS_ERR(crtc_state)) 97 return PTR_ERR(crtc_state); 98 99 ret = drm_atomic_helper_check_plane_state(state, crtc_state, 100 DRM_PLANE_HELPER_NO_SCALING, 101 DRM_PLANE_HELPER_NO_SCALING, 102 is_cursor, true); 103 return ret; 104 } 105 106 static void virtio_gpu_update_dumb_bo(struct virtio_gpu_device *vgdev, 107 struct drm_plane_state *state, 108 struct drm_rect *rect) 109 { 110 struct virtio_gpu_object *bo = 111 gem_to_virtio_gpu_obj(state->fb->obj[0]); 112 struct virtio_gpu_object_array *objs; 113 uint32_t w = rect->x2 - rect->x1; 114 uint32_t h = rect->y2 - rect->y1; 115 uint32_t x = rect->x1; 116 uint32_t y = rect->y1; 117 uint32_t off = x * state->fb->format->cpp[0] + 118 y * state->fb->pitches[0]; 119 120 objs = virtio_gpu_array_alloc(1); 121 if (!objs) 122 return; 123 virtio_gpu_array_add_obj(objs, &bo->base.base); 124 125 virtio_gpu_cmd_transfer_to_host_2d(vgdev, off, w, h, x, y, 126 objs, NULL); 127 } 128 129 static void virtio_gpu_primary_plane_update(struct drm_plane *plane, 130 struct drm_plane_state *old_state) 131 { 132 struct drm_device *dev = plane->dev; 133 struct virtio_gpu_device *vgdev = dev->dev_private; 134 struct virtio_gpu_output *output = NULL; 135 struct virtio_gpu_object *bo; 136 struct drm_rect rect; 137 138 if (plane->state->crtc) 139 output = drm_crtc_to_virtio_gpu_output(plane->state->crtc); 140 if (old_state->crtc) 141 output = drm_crtc_to_virtio_gpu_output(old_state->crtc); 142 if (WARN_ON(!output)) 143 return; 144 145 if (!plane->state->fb || !output->crtc.state->active) { 146 DRM_DEBUG("nofb\n"); 147 virtio_gpu_cmd_set_scanout(vgdev, output->index, 0, 148 plane->state->src_w >> 16, 149 plane->state->src_h >> 16, 150 0, 0); 151 virtio_gpu_notify(vgdev); 152 return; 153 } 154 155 if (!drm_atomic_helper_damage_merged(old_state, plane->state, &rect)) 156 return; 157 158 bo = gem_to_virtio_gpu_obj(plane->state->fb->obj[0]); 159 if (bo->dumb) 160 virtio_gpu_update_dumb_bo(vgdev, plane->state, &rect); 161 162 if (plane->state->fb != old_state->fb || 163 plane->state->src_w != old_state->src_w || 164 plane->state->src_h != old_state->src_h || 165 plane->state->src_x != old_state->src_x || 166 plane->state->src_y != old_state->src_y || 167 output->needs_modeset) { 168 output->needs_modeset = false; 169 DRM_DEBUG("handle 0x%x, crtc %dx%d+%d+%d, src %dx%d+%d+%d\n", 170 bo->hw_res_handle, 171 plane->state->crtc_w, plane->state->crtc_h, 172 plane->state->crtc_x, plane->state->crtc_y, 173 plane->state->src_w >> 16, 174 plane->state->src_h >> 16, 175 plane->state->src_x >> 16, 176 plane->state->src_y >> 16); 177 178 if (bo->host3d_blob || bo->guest_blob) { 179 virtio_gpu_cmd_set_scanout_blob 180 (vgdev, output->index, bo, 181 plane->state->fb, 182 plane->state->src_w >> 16, 183 plane->state->src_h >> 16, 184 plane->state->src_x >> 16, 185 plane->state->src_y >> 16); 186 } else { 187 virtio_gpu_cmd_set_scanout(vgdev, output->index, 188 bo->hw_res_handle, 189 plane->state->src_w >> 16, 190 plane->state->src_h >> 16, 191 plane->state->src_x >> 16, 192 plane->state->src_y >> 16); 193 } 194 } 195 196 virtio_gpu_cmd_resource_flush(vgdev, bo->hw_res_handle, 197 rect.x1, 198 rect.y1, 199 rect.x2 - rect.x1, 200 rect.y2 - rect.y1); 201 virtio_gpu_notify(vgdev); 202 } 203 204 static int virtio_gpu_cursor_prepare_fb(struct drm_plane *plane, 205 struct drm_plane_state *new_state) 206 { 207 struct drm_device *dev = plane->dev; 208 struct virtio_gpu_device *vgdev = dev->dev_private; 209 struct virtio_gpu_framebuffer *vgfb; 210 struct virtio_gpu_object *bo; 211 212 if (!new_state->fb) 213 return 0; 214 215 vgfb = to_virtio_gpu_framebuffer(new_state->fb); 216 bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]); 217 if (bo && bo->dumb && (plane->state->fb != new_state->fb)) { 218 vgfb->fence = virtio_gpu_fence_alloc(vgdev); 219 if (!vgfb->fence) 220 return -ENOMEM; 221 } 222 223 return 0; 224 } 225 226 static void virtio_gpu_cursor_cleanup_fb(struct drm_plane *plane, 227 struct drm_plane_state *old_state) 228 { 229 struct virtio_gpu_framebuffer *vgfb; 230 231 if (!plane->state->fb) 232 return; 233 234 vgfb = to_virtio_gpu_framebuffer(plane->state->fb); 235 if (vgfb->fence) { 236 dma_fence_put(&vgfb->fence->f); 237 vgfb->fence = NULL; 238 } 239 } 240 241 static void virtio_gpu_cursor_plane_update(struct drm_plane *plane, 242 struct drm_plane_state *old_state) 243 { 244 struct drm_device *dev = plane->dev; 245 struct virtio_gpu_device *vgdev = dev->dev_private; 246 struct virtio_gpu_output *output = NULL; 247 struct virtio_gpu_framebuffer *vgfb; 248 struct virtio_gpu_object *bo = NULL; 249 uint32_t handle; 250 251 if (plane->state->crtc) 252 output = drm_crtc_to_virtio_gpu_output(plane->state->crtc); 253 if (old_state->crtc) 254 output = drm_crtc_to_virtio_gpu_output(old_state->crtc); 255 if (WARN_ON(!output)) 256 return; 257 258 if (plane->state->fb) { 259 vgfb = to_virtio_gpu_framebuffer(plane->state->fb); 260 bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]); 261 handle = bo->hw_res_handle; 262 } else { 263 handle = 0; 264 } 265 266 if (bo && bo->dumb && (plane->state->fb != old_state->fb)) { 267 /* new cursor -- update & wait */ 268 struct virtio_gpu_object_array *objs; 269 270 objs = virtio_gpu_array_alloc(1); 271 if (!objs) 272 return; 273 virtio_gpu_array_add_obj(objs, vgfb->base.obj[0]); 274 virtio_gpu_array_lock_resv(objs); 275 virtio_gpu_cmd_transfer_to_host_2d 276 (vgdev, 0, 277 plane->state->crtc_w, 278 plane->state->crtc_h, 279 0, 0, objs, vgfb->fence); 280 virtio_gpu_notify(vgdev); 281 dma_fence_wait(&vgfb->fence->f, true); 282 dma_fence_put(&vgfb->fence->f); 283 vgfb->fence = NULL; 284 } 285 286 if (plane->state->fb != old_state->fb) { 287 DRM_DEBUG("update, handle %d, pos +%d+%d, hot %d,%d\n", handle, 288 plane->state->crtc_x, 289 plane->state->crtc_y, 290 plane->state->fb ? plane->state->fb->hot_x : 0, 291 plane->state->fb ? plane->state->fb->hot_y : 0); 292 output->cursor.hdr.type = 293 cpu_to_le32(VIRTIO_GPU_CMD_UPDATE_CURSOR); 294 output->cursor.resource_id = cpu_to_le32(handle); 295 if (plane->state->fb) { 296 output->cursor.hot_x = 297 cpu_to_le32(plane->state->fb->hot_x); 298 output->cursor.hot_y = 299 cpu_to_le32(plane->state->fb->hot_y); 300 } else { 301 output->cursor.hot_x = cpu_to_le32(0); 302 output->cursor.hot_y = cpu_to_le32(0); 303 } 304 } else { 305 DRM_DEBUG("move +%d+%d\n", 306 plane->state->crtc_x, 307 plane->state->crtc_y); 308 output->cursor.hdr.type = 309 cpu_to_le32(VIRTIO_GPU_CMD_MOVE_CURSOR); 310 } 311 output->cursor.pos.x = cpu_to_le32(plane->state->crtc_x); 312 output->cursor.pos.y = cpu_to_le32(plane->state->crtc_y); 313 virtio_gpu_cursor_ping(vgdev, output); 314 } 315 316 static const struct drm_plane_helper_funcs virtio_gpu_primary_helper_funcs = { 317 .atomic_check = virtio_gpu_plane_atomic_check, 318 .atomic_update = virtio_gpu_primary_plane_update, 319 }; 320 321 static const struct drm_plane_helper_funcs virtio_gpu_cursor_helper_funcs = { 322 .prepare_fb = virtio_gpu_cursor_prepare_fb, 323 .cleanup_fb = virtio_gpu_cursor_cleanup_fb, 324 .atomic_check = virtio_gpu_plane_atomic_check, 325 .atomic_update = virtio_gpu_cursor_plane_update, 326 }; 327 328 struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev, 329 enum drm_plane_type type, 330 int index) 331 { 332 struct drm_device *dev = vgdev->ddev; 333 const struct drm_plane_helper_funcs *funcs; 334 struct drm_plane *plane; 335 const uint32_t *formats; 336 int ret, nformats; 337 338 plane = kzalloc(sizeof(*plane), GFP_KERNEL); 339 if (!plane) 340 return ERR_PTR(-ENOMEM); 341 342 if (type == DRM_PLANE_TYPE_CURSOR) { 343 formats = virtio_gpu_cursor_formats; 344 nformats = ARRAY_SIZE(virtio_gpu_cursor_formats); 345 funcs = &virtio_gpu_cursor_helper_funcs; 346 } else { 347 formats = virtio_gpu_formats; 348 nformats = ARRAY_SIZE(virtio_gpu_formats); 349 funcs = &virtio_gpu_primary_helper_funcs; 350 } 351 ret = drm_universal_plane_init(dev, plane, 1 << index, 352 &virtio_gpu_plane_funcs, 353 formats, nformats, 354 NULL, type, NULL); 355 if (ret) 356 goto err_plane_init; 357 358 drm_plane_helper_add(plane, funcs); 359 return plane; 360 361 err_plane_init: 362 kfree(plane); 363 return ERR_PTR(ret); 364 } 365