1 /* 2 * Copyright (C) 2015 Red Hat, Inc. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining 6 * a copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sublicense, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * The above copyright notice and this permission notice (including the 14 * next paragraph) shall be included in all copies or substantial 15 * portions of the Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE 21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 24 */ 25 26 #include <drm/drm_atomic_helper.h> 27 #include <drm/drm_damage_helper.h> 28 #include <drm/drm_fourcc.h> 29 #include <drm/drm_plane_helper.h> 30 31 #include "virtgpu_drv.h" 32 33 static const uint32_t virtio_gpu_formats[] = { 34 DRM_FORMAT_HOST_XRGB8888, 35 }; 36 37 static const uint32_t virtio_gpu_cursor_formats[] = { 38 DRM_FORMAT_HOST_ARGB8888, 39 }; 40 41 uint32_t virtio_gpu_translate_format(uint32_t drm_fourcc) 42 { 43 uint32_t format; 44 45 switch (drm_fourcc) { 46 case DRM_FORMAT_XRGB8888: 47 format = VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM; 48 break; 49 case DRM_FORMAT_ARGB8888: 50 format = VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM; 51 break; 52 case DRM_FORMAT_BGRX8888: 53 format = VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM; 54 break; 55 case DRM_FORMAT_BGRA8888: 56 format = VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM; 57 break; 58 default: 59 /* 60 * This should not happen, we handle everything listed 61 * in virtio_gpu_formats[]. 62 */ 63 format = 0; 64 break; 65 } 66 WARN_ON(format == 0); 67 return format; 68 } 69 70 static void virtio_gpu_plane_destroy(struct drm_plane *plane) 71 { 72 drm_plane_cleanup(plane); 73 kfree(plane); 74 } 75 76 static const struct drm_plane_funcs virtio_gpu_plane_funcs = { 77 .update_plane = drm_atomic_helper_update_plane, 78 .disable_plane = drm_atomic_helper_disable_plane, 79 .destroy = virtio_gpu_plane_destroy, 80 .reset = drm_atomic_helper_plane_reset, 81 .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, 82 .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, 83 }; 84 85 static int virtio_gpu_plane_atomic_check(struct drm_plane *plane, 86 struct drm_plane_state *state) 87 { 88 bool is_cursor = plane->type == DRM_PLANE_TYPE_CURSOR; 89 struct drm_crtc_state *crtc_state; 90 int ret; 91 92 if (!state->fb || WARN_ON(!state->crtc)) 93 return 0; 94 95 crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc); 96 if (IS_ERR(crtc_state)) 97 return PTR_ERR(crtc_state); 98 99 ret = drm_atomic_helper_check_plane_state(state, crtc_state, 100 DRM_PLANE_HELPER_NO_SCALING, 101 DRM_PLANE_HELPER_NO_SCALING, 102 is_cursor, true); 103 return ret; 104 } 105 106 static void virtio_gpu_update_dumb_bo(struct virtio_gpu_device *vgdev, 107 struct drm_plane_state *state, 108 struct drm_rect *rect) 109 { 110 struct virtio_gpu_object *bo = 111 gem_to_virtio_gpu_obj(state->fb->obj[0]); 112 struct virtio_gpu_object_array *objs; 113 uint32_t w = rect->x2 - rect->x1; 114 uint32_t h = rect->y2 - rect->y1; 115 uint32_t x = rect->x1; 116 uint32_t y = rect->y1; 117 uint32_t off = x * state->fb->format->cpp[0] + 118 y * state->fb->pitches[0]; 119 120 objs = virtio_gpu_array_alloc(1); 121 if (!objs) 122 return; 123 virtio_gpu_array_add_obj(objs, &bo->base.base); 124 125 virtio_gpu_cmd_transfer_to_host_2d(vgdev, off, w, h, x, y, 126 objs, NULL); 127 } 128 129 static void virtio_gpu_primary_plane_update(struct drm_plane *plane, 130 struct drm_plane_state *old_state) 131 { 132 struct drm_device *dev = plane->dev; 133 struct virtio_gpu_device *vgdev = dev->dev_private; 134 struct virtio_gpu_output *output = NULL; 135 struct virtio_gpu_object *bo; 136 struct drm_rect rect; 137 138 if (plane->state->crtc) 139 output = drm_crtc_to_virtio_gpu_output(plane->state->crtc); 140 if (old_state->crtc) 141 output = drm_crtc_to_virtio_gpu_output(old_state->crtc); 142 if (WARN_ON(!output)) 143 return; 144 145 if (!plane->state->fb || !output->enabled) { 146 DRM_DEBUG("nofb\n"); 147 virtio_gpu_cmd_set_scanout(vgdev, output->index, 0, 148 plane->state->src_w >> 16, 149 plane->state->src_h >> 16, 150 0, 0); 151 return; 152 } 153 154 if (!drm_atomic_helper_damage_merged(old_state, plane->state, &rect)) 155 return; 156 157 virtio_gpu_disable_notify(vgdev); 158 159 bo = gem_to_virtio_gpu_obj(plane->state->fb->obj[0]); 160 if (bo->dumb) 161 virtio_gpu_update_dumb_bo(vgdev, plane->state, &rect); 162 163 if (plane->state->fb != old_state->fb || 164 plane->state->src_w != old_state->src_w || 165 plane->state->src_h != old_state->src_h || 166 plane->state->src_x != old_state->src_x || 167 plane->state->src_y != old_state->src_y) { 168 DRM_DEBUG("handle 0x%x, crtc %dx%d+%d+%d, src %dx%d+%d+%d\n", 169 bo->hw_res_handle, 170 plane->state->crtc_w, plane->state->crtc_h, 171 plane->state->crtc_x, plane->state->crtc_y, 172 plane->state->src_w >> 16, 173 plane->state->src_h >> 16, 174 plane->state->src_x >> 16, 175 plane->state->src_y >> 16); 176 virtio_gpu_cmd_set_scanout(vgdev, output->index, 177 bo->hw_res_handle, 178 plane->state->src_w >> 16, 179 plane->state->src_h >> 16, 180 plane->state->src_x >> 16, 181 plane->state->src_y >> 16); 182 } 183 184 virtio_gpu_cmd_resource_flush(vgdev, bo->hw_res_handle, 185 rect.x1, 186 rect.y1, 187 rect.x2 - rect.x1, 188 rect.y2 - rect.y1); 189 190 virtio_gpu_enable_notify(vgdev); 191 } 192 193 static int virtio_gpu_cursor_prepare_fb(struct drm_plane *plane, 194 struct drm_plane_state *new_state) 195 { 196 struct drm_device *dev = plane->dev; 197 struct virtio_gpu_device *vgdev = dev->dev_private; 198 struct virtio_gpu_framebuffer *vgfb; 199 struct virtio_gpu_object *bo; 200 201 if (!new_state->fb) 202 return 0; 203 204 vgfb = to_virtio_gpu_framebuffer(new_state->fb); 205 bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]); 206 if (bo && bo->dumb && (plane->state->fb != new_state->fb)) { 207 vgfb->fence = virtio_gpu_fence_alloc(vgdev); 208 if (!vgfb->fence) 209 return -ENOMEM; 210 } 211 212 return 0; 213 } 214 215 static void virtio_gpu_cursor_cleanup_fb(struct drm_plane *plane, 216 struct drm_plane_state *old_state) 217 { 218 struct virtio_gpu_framebuffer *vgfb; 219 220 if (!plane->state->fb) 221 return; 222 223 vgfb = to_virtio_gpu_framebuffer(plane->state->fb); 224 if (vgfb->fence) { 225 dma_fence_put(&vgfb->fence->f); 226 vgfb->fence = NULL; 227 } 228 } 229 230 static void virtio_gpu_cursor_plane_update(struct drm_plane *plane, 231 struct drm_plane_state *old_state) 232 { 233 struct drm_device *dev = plane->dev; 234 struct virtio_gpu_device *vgdev = dev->dev_private; 235 struct virtio_gpu_output *output = NULL; 236 struct virtio_gpu_framebuffer *vgfb; 237 struct virtio_gpu_object *bo = NULL; 238 uint32_t handle; 239 240 if (plane->state->crtc) 241 output = drm_crtc_to_virtio_gpu_output(plane->state->crtc); 242 if (old_state->crtc) 243 output = drm_crtc_to_virtio_gpu_output(old_state->crtc); 244 if (WARN_ON(!output)) 245 return; 246 247 if (plane->state->fb) { 248 vgfb = to_virtio_gpu_framebuffer(plane->state->fb); 249 bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]); 250 handle = bo->hw_res_handle; 251 } else { 252 handle = 0; 253 } 254 255 if (bo && bo->dumb && (plane->state->fb != old_state->fb)) { 256 /* new cursor -- update & wait */ 257 struct virtio_gpu_object_array *objs; 258 259 objs = virtio_gpu_array_alloc(1); 260 if (!objs) 261 return; 262 virtio_gpu_array_add_obj(objs, vgfb->base.obj[0]); 263 virtio_gpu_array_lock_resv(objs); 264 virtio_gpu_cmd_transfer_to_host_2d 265 (vgdev, 0, 266 plane->state->crtc_w, 267 plane->state->crtc_h, 268 0, 0, objs, vgfb->fence); 269 dma_fence_wait(&vgfb->fence->f, true); 270 dma_fence_put(&vgfb->fence->f); 271 vgfb->fence = NULL; 272 } 273 274 if (plane->state->fb != old_state->fb) { 275 DRM_DEBUG("update, handle %d, pos +%d+%d, hot %d,%d\n", handle, 276 plane->state->crtc_x, 277 plane->state->crtc_y, 278 plane->state->fb ? plane->state->fb->hot_x : 0, 279 plane->state->fb ? plane->state->fb->hot_y : 0); 280 output->cursor.hdr.type = 281 cpu_to_le32(VIRTIO_GPU_CMD_UPDATE_CURSOR); 282 output->cursor.resource_id = cpu_to_le32(handle); 283 if (plane->state->fb) { 284 output->cursor.hot_x = 285 cpu_to_le32(plane->state->fb->hot_x); 286 output->cursor.hot_y = 287 cpu_to_le32(plane->state->fb->hot_y); 288 } else { 289 output->cursor.hot_x = cpu_to_le32(0); 290 output->cursor.hot_y = cpu_to_le32(0); 291 } 292 } else { 293 DRM_DEBUG("move +%d+%d\n", 294 plane->state->crtc_x, 295 plane->state->crtc_y); 296 output->cursor.hdr.type = 297 cpu_to_le32(VIRTIO_GPU_CMD_MOVE_CURSOR); 298 } 299 output->cursor.pos.x = cpu_to_le32(plane->state->crtc_x); 300 output->cursor.pos.y = cpu_to_le32(plane->state->crtc_y); 301 virtio_gpu_cursor_ping(vgdev, output); 302 } 303 304 static const struct drm_plane_helper_funcs virtio_gpu_primary_helper_funcs = { 305 .atomic_check = virtio_gpu_plane_atomic_check, 306 .atomic_update = virtio_gpu_primary_plane_update, 307 }; 308 309 static const struct drm_plane_helper_funcs virtio_gpu_cursor_helper_funcs = { 310 .prepare_fb = virtio_gpu_cursor_prepare_fb, 311 .cleanup_fb = virtio_gpu_cursor_cleanup_fb, 312 .atomic_check = virtio_gpu_plane_atomic_check, 313 .atomic_update = virtio_gpu_cursor_plane_update, 314 }; 315 316 struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev, 317 enum drm_plane_type type, 318 int index) 319 { 320 struct drm_device *dev = vgdev->ddev; 321 const struct drm_plane_helper_funcs *funcs; 322 struct drm_plane *plane; 323 const uint32_t *formats; 324 int ret, nformats; 325 326 plane = kzalloc(sizeof(*plane), GFP_KERNEL); 327 if (!plane) 328 return ERR_PTR(-ENOMEM); 329 330 if (type == DRM_PLANE_TYPE_CURSOR) { 331 formats = virtio_gpu_cursor_formats; 332 nformats = ARRAY_SIZE(virtio_gpu_cursor_formats); 333 funcs = &virtio_gpu_cursor_helper_funcs; 334 } else { 335 formats = virtio_gpu_formats; 336 nformats = ARRAY_SIZE(virtio_gpu_formats); 337 funcs = &virtio_gpu_primary_helper_funcs; 338 } 339 ret = drm_universal_plane_init(dev, plane, 1 << index, 340 &virtio_gpu_plane_funcs, 341 formats, nformats, 342 NULL, type, NULL); 343 if (ret) 344 goto err_plane_init; 345 346 drm_plane_helper_add(plane, funcs); 347 return plane; 348 349 err_plane_init: 350 kfree(plane); 351 return ERR_PTR(ret); 352 } 353