1 /* 2 * Copyright (C) 2015 Red Hat, Inc. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining 6 * a copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sublicense, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * The above copyright notice and this permission notice (including the 14 * next paragraph) shall be included in all copies or substantial 15 * portions of the Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE 21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 24 */ 25 26 #include <drm/drm_atomic_helper.h> 27 #include <drm/drm_damage_helper.h> 28 #include <drm/drm_fourcc.h> 29 #include <drm/drm_plane_helper.h> 30 31 #include "virtgpu_drv.h" 32 33 static const uint32_t virtio_gpu_formats[] = { 34 DRM_FORMAT_HOST_XRGB8888, 35 }; 36 37 static const uint32_t virtio_gpu_cursor_formats[] = { 38 DRM_FORMAT_HOST_ARGB8888, 39 }; 40 41 uint32_t virtio_gpu_translate_format(uint32_t drm_fourcc) 42 { 43 uint32_t format; 44 45 switch (drm_fourcc) { 46 case DRM_FORMAT_XRGB8888: 47 format = VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM; 48 break; 49 case DRM_FORMAT_ARGB8888: 50 format = VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM; 51 break; 52 case DRM_FORMAT_BGRX8888: 53 format = VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM; 54 break; 55 case DRM_FORMAT_BGRA8888: 56 format = VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM; 57 break; 58 default: 59 /* 60 * This should not happen, we handle everything listed 61 * in virtio_gpu_formats[]. 62 */ 63 format = 0; 64 break; 65 } 66 WARN_ON(format == 0); 67 return format; 68 } 69 70 static void virtio_gpu_plane_destroy(struct drm_plane *plane) 71 { 72 drm_plane_cleanup(plane); 73 kfree(plane); 74 } 75 76 static const struct drm_plane_funcs virtio_gpu_plane_funcs = { 77 .update_plane = drm_atomic_helper_update_plane, 78 .disable_plane = drm_atomic_helper_disable_plane, 79 .destroy = virtio_gpu_plane_destroy, 80 .reset = drm_atomic_helper_plane_reset, 81 .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, 82 .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, 83 }; 84 85 static int virtio_gpu_plane_atomic_check(struct drm_plane *plane, 86 struct drm_plane_state *state) 87 { 88 bool is_cursor = plane->type == DRM_PLANE_TYPE_CURSOR; 89 struct drm_crtc_state *crtc_state; 90 int ret; 91 92 if (!state->fb || WARN_ON(!state->crtc)) 93 return 0; 94 95 crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc); 96 if (IS_ERR(crtc_state)) 97 return PTR_ERR(crtc_state); 98 99 ret = drm_atomic_helper_check_plane_state(state, crtc_state, 100 DRM_PLANE_HELPER_NO_SCALING, 101 DRM_PLANE_HELPER_NO_SCALING, 102 is_cursor, true); 103 return ret; 104 } 105 106 static void virtio_gpu_update_dumb_bo(struct virtio_gpu_device *vgdev, 107 struct drm_plane_state *state, 108 struct drm_rect *rect) 109 { 110 struct virtio_gpu_object *bo = 111 gem_to_virtio_gpu_obj(state->fb->obj[0]); 112 struct virtio_gpu_object_array *objs; 113 uint32_t w = rect->x2 - rect->x1; 114 uint32_t h = rect->y2 - rect->y1; 115 uint32_t x = rect->x1; 116 uint32_t y = rect->y1; 117 uint32_t off = x * state->fb->format->cpp[0] + 118 y * state->fb->pitches[0]; 119 120 objs = virtio_gpu_array_alloc(1); 121 if (!objs) 122 return; 123 virtio_gpu_array_add_obj(objs, &bo->base.base); 124 125 virtio_gpu_cmd_transfer_to_host_2d(vgdev, off, w, h, x, y, 126 objs, NULL); 127 } 128 129 static void virtio_gpu_primary_plane_update(struct drm_plane *plane, 130 struct drm_plane_state *old_state) 131 { 132 struct drm_device *dev = plane->dev; 133 struct virtio_gpu_device *vgdev = dev->dev_private; 134 struct virtio_gpu_output *output = NULL; 135 struct virtio_gpu_object *bo; 136 struct drm_rect rect; 137 138 if (plane->state->crtc) 139 output = drm_crtc_to_virtio_gpu_output(plane->state->crtc); 140 if (old_state->crtc) 141 output = drm_crtc_to_virtio_gpu_output(old_state->crtc); 142 if (WARN_ON(!output)) 143 return; 144 145 if (!plane->state->fb || !output->enabled) { 146 DRM_DEBUG("nofb\n"); 147 virtio_gpu_cmd_set_scanout(vgdev, output->index, 0, 148 plane->state->src_w >> 16, 149 plane->state->src_h >> 16, 150 0, 0); 151 virtio_gpu_notify(vgdev); 152 return; 153 } 154 155 if (!drm_atomic_helper_damage_merged(old_state, plane->state, &rect)) 156 return; 157 158 bo = gem_to_virtio_gpu_obj(plane->state->fb->obj[0]); 159 if (bo->dumb) 160 virtio_gpu_update_dumb_bo(vgdev, plane->state, &rect); 161 162 if (plane->state->fb != old_state->fb || 163 plane->state->src_w != old_state->src_w || 164 plane->state->src_h != old_state->src_h || 165 plane->state->src_x != old_state->src_x || 166 plane->state->src_y != old_state->src_y) { 167 DRM_DEBUG("handle 0x%x, crtc %dx%d+%d+%d, src %dx%d+%d+%d\n", 168 bo->hw_res_handle, 169 plane->state->crtc_w, plane->state->crtc_h, 170 plane->state->crtc_x, plane->state->crtc_y, 171 plane->state->src_w >> 16, 172 plane->state->src_h >> 16, 173 plane->state->src_x >> 16, 174 plane->state->src_y >> 16); 175 virtio_gpu_cmd_set_scanout(vgdev, output->index, 176 bo->hw_res_handle, 177 plane->state->src_w >> 16, 178 plane->state->src_h >> 16, 179 plane->state->src_x >> 16, 180 plane->state->src_y >> 16); 181 } 182 183 virtio_gpu_cmd_resource_flush(vgdev, bo->hw_res_handle, 184 rect.x1, 185 rect.y1, 186 rect.x2 - rect.x1, 187 rect.y2 - rect.y1); 188 virtio_gpu_notify(vgdev); 189 } 190 191 static int virtio_gpu_cursor_prepare_fb(struct drm_plane *plane, 192 struct drm_plane_state *new_state) 193 { 194 struct drm_device *dev = plane->dev; 195 struct virtio_gpu_device *vgdev = dev->dev_private; 196 struct virtio_gpu_framebuffer *vgfb; 197 struct virtio_gpu_object *bo; 198 199 if (!new_state->fb) 200 return 0; 201 202 vgfb = to_virtio_gpu_framebuffer(new_state->fb); 203 bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]); 204 if (bo && bo->dumb && (plane->state->fb != new_state->fb)) { 205 vgfb->fence = virtio_gpu_fence_alloc(vgdev); 206 if (!vgfb->fence) 207 return -ENOMEM; 208 } 209 210 return 0; 211 } 212 213 static void virtio_gpu_cursor_cleanup_fb(struct drm_plane *plane, 214 struct drm_plane_state *old_state) 215 { 216 struct virtio_gpu_framebuffer *vgfb; 217 218 if (!plane->state->fb) 219 return; 220 221 vgfb = to_virtio_gpu_framebuffer(plane->state->fb); 222 if (vgfb->fence) { 223 dma_fence_put(&vgfb->fence->f); 224 vgfb->fence = NULL; 225 } 226 } 227 228 static void virtio_gpu_cursor_plane_update(struct drm_plane *plane, 229 struct drm_plane_state *old_state) 230 { 231 struct drm_device *dev = plane->dev; 232 struct virtio_gpu_device *vgdev = dev->dev_private; 233 struct virtio_gpu_output *output = NULL; 234 struct virtio_gpu_framebuffer *vgfb; 235 struct virtio_gpu_object *bo = NULL; 236 uint32_t handle; 237 238 if (plane->state->crtc) 239 output = drm_crtc_to_virtio_gpu_output(plane->state->crtc); 240 if (old_state->crtc) 241 output = drm_crtc_to_virtio_gpu_output(old_state->crtc); 242 if (WARN_ON(!output)) 243 return; 244 245 if (plane->state->fb) { 246 vgfb = to_virtio_gpu_framebuffer(plane->state->fb); 247 bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]); 248 handle = bo->hw_res_handle; 249 } else { 250 handle = 0; 251 } 252 253 if (bo && bo->dumb && (plane->state->fb != old_state->fb)) { 254 /* new cursor -- update & wait */ 255 struct virtio_gpu_object_array *objs; 256 257 objs = virtio_gpu_array_alloc(1); 258 if (!objs) 259 return; 260 virtio_gpu_array_add_obj(objs, vgfb->base.obj[0]); 261 virtio_gpu_array_lock_resv(objs); 262 virtio_gpu_cmd_transfer_to_host_2d 263 (vgdev, 0, 264 plane->state->crtc_w, 265 plane->state->crtc_h, 266 0, 0, objs, vgfb->fence); 267 virtio_gpu_notify(vgdev); 268 dma_fence_wait(&vgfb->fence->f, true); 269 dma_fence_put(&vgfb->fence->f); 270 vgfb->fence = NULL; 271 } 272 273 if (plane->state->fb != old_state->fb) { 274 DRM_DEBUG("update, handle %d, pos +%d+%d, hot %d,%d\n", handle, 275 plane->state->crtc_x, 276 plane->state->crtc_y, 277 plane->state->fb ? plane->state->fb->hot_x : 0, 278 plane->state->fb ? plane->state->fb->hot_y : 0); 279 output->cursor.hdr.type = 280 cpu_to_le32(VIRTIO_GPU_CMD_UPDATE_CURSOR); 281 output->cursor.resource_id = cpu_to_le32(handle); 282 if (plane->state->fb) { 283 output->cursor.hot_x = 284 cpu_to_le32(plane->state->fb->hot_x); 285 output->cursor.hot_y = 286 cpu_to_le32(plane->state->fb->hot_y); 287 } else { 288 output->cursor.hot_x = cpu_to_le32(0); 289 output->cursor.hot_y = cpu_to_le32(0); 290 } 291 } else { 292 DRM_DEBUG("move +%d+%d\n", 293 plane->state->crtc_x, 294 plane->state->crtc_y); 295 output->cursor.hdr.type = 296 cpu_to_le32(VIRTIO_GPU_CMD_MOVE_CURSOR); 297 } 298 output->cursor.pos.x = cpu_to_le32(plane->state->crtc_x); 299 output->cursor.pos.y = cpu_to_le32(plane->state->crtc_y); 300 virtio_gpu_cursor_ping(vgdev, output); 301 } 302 303 static const struct drm_plane_helper_funcs virtio_gpu_primary_helper_funcs = { 304 .atomic_check = virtio_gpu_plane_atomic_check, 305 .atomic_update = virtio_gpu_primary_plane_update, 306 }; 307 308 static const struct drm_plane_helper_funcs virtio_gpu_cursor_helper_funcs = { 309 .prepare_fb = virtio_gpu_cursor_prepare_fb, 310 .cleanup_fb = virtio_gpu_cursor_cleanup_fb, 311 .atomic_check = virtio_gpu_plane_atomic_check, 312 .atomic_update = virtio_gpu_cursor_plane_update, 313 }; 314 315 struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev, 316 enum drm_plane_type type, 317 int index) 318 { 319 struct drm_device *dev = vgdev->ddev; 320 const struct drm_plane_helper_funcs *funcs; 321 struct drm_plane *plane; 322 const uint32_t *formats; 323 int ret, nformats; 324 325 plane = kzalloc(sizeof(*plane), GFP_KERNEL); 326 if (!plane) 327 return ERR_PTR(-ENOMEM); 328 329 if (type == DRM_PLANE_TYPE_CURSOR) { 330 formats = virtio_gpu_cursor_formats; 331 nformats = ARRAY_SIZE(virtio_gpu_cursor_formats); 332 funcs = &virtio_gpu_cursor_helper_funcs; 333 } else { 334 formats = virtio_gpu_formats; 335 nformats = ARRAY_SIZE(virtio_gpu_formats); 336 funcs = &virtio_gpu_primary_helper_funcs; 337 } 338 ret = drm_universal_plane_init(dev, plane, 1 << index, 339 &virtio_gpu_plane_funcs, 340 formats, nformats, 341 NULL, type, NULL); 342 if (ret) 343 goto err_plane_init; 344 345 drm_plane_helper_add(plane, funcs); 346 return plane; 347 348 err_plane_init: 349 kfree(plane); 350 return ERR_PTR(ret); 351 } 352