1 // SPDX-License-Identifier: GPL-2.0+ 2 3 #include <linux/crc32.h> 4 5 #include <drm/drm_atomic.h> 6 #include <drm/drm_atomic_helper.h> 7 #include <drm/drm_fourcc.h> 8 #include <drm/drm_gem_framebuffer_helper.h> 9 #include <drm/drm_vblank.h> 10 11 #include "vkms_drv.h" 12 13 static u32 get_pixel_from_buffer(int x, int y, const u8 *buffer, 14 const struct vkms_composer *composer) 15 { 16 u32 pixel; 17 int src_offset = composer->offset + (y * composer->pitch) 18 + (x * composer->cpp); 19 20 pixel = *(u32 *)&buffer[src_offset]; 21 22 return pixel; 23 } 24 25 /** 26 * compute_crc - Compute CRC value on output frame 27 * 28 * @vaddr: address to final framebuffer 29 * @composer: framebuffer's metadata 30 * 31 * returns CRC value computed using crc32 on the visible portion of 32 * the final framebuffer at vaddr_out 33 */ 34 static uint32_t compute_crc(const u8 *vaddr, 35 const struct vkms_composer *composer) 36 { 37 int x, y; 38 u32 crc = 0, pixel = 0; 39 int x_src = composer->src.x1 >> 16; 40 int y_src = composer->src.y1 >> 16; 41 int h_src = drm_rect_height(&composer->src) >> 16; 42 int w_src = drm_rect_width(&composer->src) >> 16; 43 44 for (y = y_src; y < y_src + h_src; ++y) { 45 for (x = x_src; x < x_src + w_src; ++x) { 46 pixel = get_pixel_from_buffer(x, y, vaddr, composer); 47 crc = crc32_le(crc, (void *)&pixel, sizeof(u32)); 48 } 49 } 50 51 return crc; 52 } 53 54 static u8 blend_channel(u8 src, u8 dst, u8 alpha) 55 { 56 u32 pre_blend; 57 u8 new_color; 58 59 pre_blend = (src * 255 + dst * (255 - alpha)); 60 61 /* Faster div by 255 */ 62 new_color = ((pre_blend + ((pre_blend + 257) >> 8)) >> 8); 63 64 return new_color; 65 } 66 67 /** 68 * alpha_blend - alpha blending equation 69 * @argb_src: src pixel on premultiplied alpha mode 70 * @argb_dst: dst pixel completely opaque 71 * 72 * blend pixels using premultiplied blend formula. The current DRM assumption 73 * is that pixel color values have been already pre-multiplied with the alpha 74 * channel values. See more drm_plane_create_blend_mode_property(). Also, this 75 * formula assumes a completely opaque background. 76 */ 77 static void alpha_blend(const u8 *argb_src, u8 *argb_dst) 78 { 79 u8 alpha; 80 81 alpha = argb_src[3]; 82 argb_dst[0] = blend_channel(argb_src[0], argb_dst[0], alpha); 83 argb_dst[1] = blend_channel(argb_src[1], argb_dst[1], alpha); 84 argb_dst[2] = blend_channel(argb_src[2], argb_dst[2], alpha); 85 } 86 87 /** 88 * x_blend - blending equation that ignores the pixel alpha 89 * 90 * overwrites RGB color value from src pixel to dst pixel. 91 */ 92 static void x_blend(const u8 *xrgb_src, u8 *xrgb_dst) 93 { 94 memcpy(xrgb_dst, xrgb_src, sizeof(u8) * 3); 95 } 96 97 /** 98 * blend - blend value at vaddr_src with value at vaddr_dst 99 * @vaddr_dst: destination address 100 * @vaddr_src: source address 101 * @dst_composer: destination framebuffer's metadata 102 * @src_composer: source framebuffer's metadata 103 * @pixel_blend: blending equation based on plane format 104 * 105 * Blend the vaddr_src value with the vaddr_dst value using a pixel blend 106 * equation according to the supported plane formats DRM_FORMAT_(A/XRGB8888) 107 * and clearing alpha channel to an completely opaque background. This function 108 * uses buffer's metadata to locate the new composite values at vaddr_dst. 109 * 110 * TODO: completely clear the primary plane (a = 0xff) before starting to blend 111 * pixel color values 112 */ 113 static void blend(void *vaddr_dst, void *vaddr_src, 114 struct vkms_composer *dst_composer, 115 struct vkms_composer *src_composer, 116 void (*pixel_blend)(const u8 *, u8 *)) 117 { 118 int i, j, j_dst, i_dst; 119 int offset_src, offset_dst; 120 u8 *pixel_dst, *pixel_src; 121 122 int x_src = src_composer->src.x1 >> 16; 123 int y_src = src_composer->src.y1 >> 16; 124 125 int x_dst = src_composer->dst.x1; 126 int y_dst = src_composer->dst.y1; 127 int h_dst = drm_rect_height(&src_composer->dst); 128 int w_dst = drm_rect_width(&src_composer->dst); 129 130 int y_limit = y_src + h_dst; 131 int x_limit = x_src + w_dst; 132 133 for (i = y_src, i_dst = y_dst; i < y_limit; ++i) { 134 for (j = x_src, j_dst = x_dst; j < x_limit; ++j) { 135 offset_dst = dst_composer->offset 136 + (i_dst * dst_composer->pitch) 137 + (j_dst++ * dst_composer->cpp); 138 offset_src = src_composer->offset 139 + (i * src_composer->pitch) 140 + (j * src_composer->cpp); 141 142 pixel_src = (u8 *)(vaddr_src + offset_src); 143 pixel_dst = (u8 *)(vaddr_dst + offset_dst); 144 pixel_blend(pixel_src, pixel_dst); 145 /* clearing alpha channel (0xff)*/ 146 pixel_dst[3] = 0xff; 147 } 148 i_dst++; 149 } 150 } 151 152 static void compose_plane(struct vkms_composer *primary_composer, 153 struct vkms_composer *plane_composer, 154 void *vaddr_out) 155 { 156 struct drm_framebuffer *fb = &plane_composer->fb; 157 void *vaddr; 158 void (*pixel_blend)(const u8 *p_src, u8 *p_dst); 159 160 if (WARN_ON(iosys_map_is_null(&primary_composer->map[0]))) 161 return; 162 163 vaddr = plane_composer->map[0].vaddr; 164 165 if (fb->format->format == DRM_FORMAT_ARGB8888) 166 pixel_blend = &alpha_blend; 167 else 168 pixel_blend = &x_blend; 169 170 blend(vaddr_out, vaddr, primary_composer, plane_composer, pixel_blend); 171 } 172 173 static int compose_active_planes(void **vaddr_out, 174 struct vkms_composer *primary_composer, 175 struct vkms_crtc_state *crtc_state) 176 { 177 struct drm_framebuffer *fb = &primary_composer->fb; 178 struct drm_gem_object *gem_obj = drm_gem_fb_get_obj(fb, 0); 179 const void *vaddr; 180 int i; 181 182 if (!*vaddr_out) { 183 *vaddr_out = kzalloc(gem_obj->size, GFP_KERNEL); 184 if (!*vaddr_out) { 185 DRM_ERROR("Cannot allocate memory for output frame."); 186 return -ENOMEM; 187 } 188 } 189 190 if (WARN_ON(iosys_map_is_null(&primary_composer->map[0]))) 191 return -EINVAL; 192 193 vaddr = primary_composer->map[0].vaddr; 194 195 memcpy(*vaddr_out, vaddr, gem_obj->size); 196 197 /* If there are other planes besides primary, we consider the active 198 * planes should be in z-order and compose them associatively: 199 * ((primary <- overlay) <- cursor) 200 */ 201 for (i = 1; i < crtc_state->num_active_planes; i++) 202 compose_plane(primary_composer, 203 crtc_state->active_planes[i]->composer, 204 *vaddr_out); 205 206 return 0; 207 } 208 209 /** 210 * vkms_composer_worker - ordered work_struct to compute CRC 211 * 212 * @work: work_struct 213 * 214 * Work handler for composing and computing CRCs. work_struct scheduled in 215 * an ordered workqueue that's periodically scheduled to run by 216 * _vblank_handle() and flushed at vkms_atomic_crtc_destroy_state(). 217 */ 218 void vkms_composer_worker(struct work_struct *work) 219 { 220 struct vkms_crtc_state *crtc_state = container_of(work, 221 struct vkms_crtc_state, 222 composer_work); 223 struct drm_crtc *crtc = crtc_state->base.crtc; 224 struct vkms_output *out = drm_crtc_to_vkms_output(crtc); 225 struct vkms_composer *primary_composer = NULL; 226 struct vkms_plane_state *act_plane = NULL; 227 bool crc_pending, wb_pending; 228 void *vaddr_out = NULL; 229 u32 crc32 = 0; 230 u64 frame_start, frame_end; 231 int ret; 232 233 spin_lock_irq(&out->composer_lock); 234 frame_start = crtc_state->frame_start; 235 frame_end = crtc_state->frame_end; 236 crc_pending = crtc_state->crc_pending; 237 wb_pending = crtc_state->wb_pending; 238 crtc_state->frame_start = 0; 239 crtc_state->frame_end = 0; 240 crtc_state->crc_pending = false; 241 spin_unlock_irq(&out->composer_lock); 242 243 /* 244 * We raced with the vblank hrtimer and previous work already computed 245 * the crc, nothing to do. 246 */ 247 if (!crc_pending) 248 return; 249 250 if (crtc_state->num_active_planes >= 1) { 251 act_plane = crtc_state->active_planes[0]; 252 if (act_plane->base.base.plane->type == DRM_PLANE_TYPE_PRIMARY) 253 primary_composer = act_plane->composer; 254 } 255 256 if (!primary_composer) 257 return; 258 259 if (wb_pending) 260 vaddr_out = crtc_state->active_writeback->data[0].vaddr; 261 262 ret = compose_active_planes(&vaddr_out, primary_composer, 263 crtc_state); 264 if (ret) { 265 if (ret == -EINVAL && !wb_pending) 266 kfree(vaddr_out); 267 return; 268 } 269 270 crc32 = compute_crc(vaddr_out, primary_composer); 271 272 if (wb_pending) { 273 drm_writeback_signal_completion(&out->wb_connector, 0); 274 spin_lock_irq(&out->composer_lock); 275 crtc_state->wb_pending = false; 276 spin_unlock_irq(&out->composer_lock); 277 } else { 278 kfree(vaddr_out); 279 } 280 281 /* 282 * The worker can fall behind the vblank hrtimer, make sure we catch up. 283 */ 284 while (frame_start <= frame_end) 285 drm_crtc_add_crc_entry(crtc, true, frame_start++, &crc32); 286 } 287 288 static const char * const pipe_crc_sources[] = {"auto"}; 289 290 const char *const *vkms_get_crc_sources(struct drm_crtc *crtc, 291 size_t *count) 292 { 293 *count = ARRAY_SIZE(pipe_crc_sources); 294 return pipe_crc_sources; 295 } 296 297 static int vkms_crc_parse_source(const char *src_name, bool *enabled) 298 { 299 int ret = 0; 300 301 if (!src_name) { 302 *enabled = false; 303 } else if (strcmp(src_name, "auto") == 0) { 304 *enabled = true; 305 } else { 306 *enabled = false; 307 ret = -EINVAL; 308 } 309 310 return ret; 311 } 312 313 int vkms_verify_crc_source(struct drm_crtc *crtc, const char *src_name, 314 size_t *values_cnt) 315 { 316 bool enabled; 317 318 if (vkms_crc_parse_source(src_name, &enabled) < 0) { 319 DRM_DEBUG_DRIVER("unknown source %s\n", src_name); 320 return -EINVAL; 321 } 322 323 *values_cnt = 1; 324 325 return 0; 326 } 327 328 void vkms_set_composer(struct vkms_output *out, bool enabled) 329 { 330 bool old_enabled; 331 332 if (enabled) 333 drm_crtc_vblank_get(&out->crtc); 334 335 spin_lock_irq(&out->lock); 336 old_enabled = out->composer_enabled; 337 out->composer_enabled = enabled; 338 spin_unlock_irq(&out->lock); 339 340 if (old_enabled) 341 drm_crtc_vblank_put(&out->crtc); 342 } 343 344 int vkms_set_crc_source(struct drm_crtc *crtc, const char *src_name) 345 { 346 struct vkms_output *out = drm_crtc_to_vkms_output(crtc); 347 bool enabled = false; 348 int ret = 0; 349 350 ret = vkms_crc_parse_source(src_name, &enabled); 351 352 vkms_set_composer(out, enabled); 353 354 return ret; 355 } 356