1 /* 2 * Copyright (C) 2015 Red Hat, Inc. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining 6 * a copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sublicense, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * The above copyright notice and this permission notice (including the 14 * next paragraph) shall be included in all copies or substantial 15 * portions of the Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE 21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 24 */ 25 26 #include "virtgpu_drv.h" 27 #include <drm/drm_plane_helper.h> 28 #include <drm/drm_atomic_helper.h> 29 30 static const uint32_t virtio_gpu_formats[] = { 31 DRM_FORMAT_XRGB8888, 32 DRM_FORMAT_ARGB8888, 33 DRM_FORMAT_BGRX8888, 34 DRM_FORMAT_BGRA8888, 35 DRM_FORMAT_RGBX8888, 36 DRM_FORMAT_RGBA8888, 37 DRM_FORMAT_XBGR8888, 38 DRM_FORMAT_ABGR8888, 39 }; 40 41 static const uint32_t virtio_gpu_cursor_formats[] = { 42 DRM_FORMAT_ARGB8888, 43 }; 44 45 static void virtio_gpu_plane_destroy(struct drm_plane *plane) 46 { 47 kfree(plane); 48 } 49 50 static const struct drm_plane_funcs virtio_gpu_plane_funcs = { 51 .update_plane = drm_atomic_helper_update_plane, 52 .disable_plane = drm_atomic_helper_disable_plane, 53 .destroy = virtio_gpu_plane_destroy, 54 .reset = drm_atomic_helper_plane_reset, 55 .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, 56 .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, 57 }; 58 59 static int virtio_gpu_plane_atomic_check(struct drm_plane *plane, 60 struct drm_plane_state *state) 61 { 62 return 0; 63 } 64 65 static void virtio_gpu_primary_plane_update(struct drm_plane *plane, 66 struct drm_plane_state *old_state) 67 { 68 struct drm_device *dev = plane->dev; 69 struct virtio_gpu_device *vgdev = dev->dev_private; 70 struct virtio_gpu_output *output = NULL; 71 struct virtio_gpu_framebuffer *vgfb; 72 struct virtio_gpu_object *bo; 73 uint32_t handle; 74 75 if (plane->state->crtc) 76 output = drm_crtc_to_virtio_gpu_output(plane->state->crtc); 77 if (old_state->crtc) 78 output = drm_crtc_to_virtio_gpu_output(old_state->crtc); 79 WARN_ON(!output); 80 81 if (plane->state->fb) { 82 vgfb = to_virtio_gpu_framebuffer(plane->state->fb); 83 bo = gem_to_virtio_gpu_obj(vgfb->obj); 84 handle = bo->hw_res_handle; 85 if (bo->dumb) { 86 virtio_gpu_cmd_transfer_to_host_2d 87 (vgdev, handle, 0, 88 cpu_to_le32(plane->state->src_w >> 16), 89 cpu_to_le32(plane->state->src_h >> 16), 90 plane->state->src_x >> 16, 91 plane->state->src_y >> 16, NULL); 92 } 93 } else { 94 handle = 0; 95 } 96 97 DRM_DEBUG("handle 0x%x, crtc %dx%d+%d+%d, src %dx%d+%d+%d\n", handle, 98 plane->state->crtc_w, plane->state->crtc_h, 99 plane->state->crtc_x, plane->state->crtc_y, 100 plane->state->src_w >> 16, 101 plane->state->src_h >> 16, 102 plane->state->src_x >> 16, 103 plane->state->src_y >> 16); 104 virtio_gpu_cmd_set_scanout(vgdev, output->index, handle, 105 plane->state->src_w >> 16, 106 plane->state->src_h >> 16, 107 plane->state->src_x >> 16, 108 plane->state->src_y >> 16); 109 virtio_gpu_cmd_resource_flush(vgdev, handle, 110 plane->state->src_x >> 16, 111 plane->state->src_y >> 16, 112 plane->state->src_w >> 16, 113 plane->state->src_h >> 16); 114 } 115 116 static void virtio_gpu_cursor_plane_update(struct drm_plane *plane, 117 struct drm_plane_state *old_state) 118 { 119 struct drm_device *dev = plane->dev; 120 struct virtio_gpu_device *vgdev = dev->dev_private; 121 struct virtio_gpu_output *output = NULL; 122 struct virtio_gpu_framebuffer *vgfb; 123 struct virtio_gpu_fence *fence = NULL; 124 struct virtio_gpu_object *bo = NULL; 125 uint32_t handle; 126 int ret = 0; 127 128 if (plane->state->crtc) 129 output = drm_crtc_to_virtio_gpu_output(plane->state->crtc); 130 if (old_state->crtc) 131 output = drm_crtc_to_virtio_gpu_output(old_state->crtc); 132 WARN_ON(!output); 133 134 if (plane->state->fb) { 135 vgfb = to_virtio_gpu_framebuffer(plane->state->fb); 136 bo = gem_to_virtio_gpu_obj(vgfb->obj); 137 handle = bo->hw_res_handle; 138 } else { 139 handle = 0; 140 } 141 142 if (bo && bo->dumb && (plane->state->fb != old_state->fb)) { 143 /* new cursor -- update & wait */ 144 virtio_gpu_cmd_transfer_to_host_2d 145 (vgdev, handle, 0, 146 cpu_to_le32(plane->state->crtc_w), 147 cpu_to_le32(plane->state->crtc_h), 148 0, 0, &fence); 149 ret = virtio_gpu_object_reserve(bo, false); 150 if (!ret) { 151 reservation_object_add_excl_fence(bo->tbo.resv, 152 &fence->f); 153 fence_put(&fence->f); 154 fence = NULL; 155 virtio_gpu_object_unreserve(bo); 156 virtio_gpu_object_wait(bo, false); 157 } 158 } 159 160 if (plane->state->fb != old_state->fb) { 161 DRM_DEBUG("update, handle %d, pos +%d+%d, hot %d,%d\n", handle, 162 plane->state->crtc_x, 163 plane->state->crtc_y, 164 plane->state->fb ? plane->state->fb->hot_x : 0, 165 plane->state->fb ? plane->state->fb->hot_y : 0); 166 output->cursor.hdr.type = 167 cpu_to_le32(VIRTIO_GPU_CMD_UPDATE_CURSOR); 168 output->cursor.resource_id = cpu_to_le32(handle); 169 if (plane->state->fb) { 170 output->cursor.hot_x = 171 cpu_to_le32(plane->state->fb->hot_x); 172 output->cursor.hot_y = 173 cpu_to_le32(plane->state->fb->hot_y); 174 } else { 175 output->cursor.hot_x = cpu_to_le32(0); 176 output->cursor.hot_y = cpu_to_le32(0); 177 } 178 } else { 179 DRM_DEBUG("move +%d+%d\n", 180 plane->state->crtc_x, 181 plane->state->crtc_y); 182 output->cursor.hdr.type = 183 cpu_to_le32(VIRTIO_GPU_CMD_MOVE_CURSOR); 184 } 185 output->cursor.pos.x = cpu_to_le32(plane->state->crtc_x); 186 output->cursor.pos.y = cpu_to_le32(plane->state->crtc_y); 187 virtio_gpu_cursor_ping(vgdev, output); 188 } 189 190 static const struct drm_plane_helper_funcs virtio_gpu_primary_helper_funcs = { 191 .atomic_check = virtio_gpu_plane_atomic_check, 192 .atomic_update = virtio_gpu_primary_plane_update, 193 }; 194 195 static const struct drm_plane_helper_funcs virtio_gpu_cursor_helper_funcs = { 196 .atomic_check = virtio_gpu_plane_atomic_check, 197 .atomic_update = virtio_gpu_cursor_plane_update, 198 }; 199 200 struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev, 201 enum drm_plane_type type, 202 int index) 203 { 204 struct drm_device *dev = vgdev->ddev; 205 const struct drm_plane_helper_funcs *funcs; 206 struct drm_plane *plane; 207 const uint32_t *formats; 208 int ret, nformats; 209 210 plane = kzalloc(sizeof(*plane), GFP_KERNEL); 211 if (!plane) 212 return ERR_PTR(-ENOMEM); 213 214 if (type == DRM_PLANE_TYPE_CURSOR) { 215 formats = virtio_gpu_cursor_formats; 216 nformats = ARRAY_SIZE(virtio_gpu_cursor_formats); 217 funcs = &virtio_gpu_cursor_helper_funcs; 218 } else { 219 formats = virtio_gpu_formats; 220 nformats = ARRAY_SIZE(virtio_gpu_formats); 221 funcs = &virtio_gpu_primary_helper_funcs; 222 } 223 ret = drm_universal_plane_init(dev, plane, 1 << index, 224 &virtio_gpu_plane_funcs, 225 formats, nformats, 226 type, NULL); 227 if (ret) 228 goto err_plane_init; 229 230 drm_plane_helper_add(plane, funcs); 231 return plane; 232 233 err_plane_init: 234 kfree(plane); 235 return ERR_PTR(ret); 236 } 237