xref: /openbmc/linux/drivers/gpu/drm/gud/gud_pipe.c (revision 5ad8e63e)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright 2020 Noralf Trønnes
4  */
5 
6 #include <linux/lz4.h>
7 #include <linux/usb.h>
8 #include <linux/workqueue.h>
9 
10 #include <drm/drm_atomic.h>
11 #include <drm/drm_connector.h>
12 #include <drm/drm_damage_helper.h>
13 #include <drm/drm_drv.h>
14 #include <drm/drm_format_helper.h>
15 #include <drm/drm_fourcc.h>
16 #include <drm/drm_framebuffer.h>
17 #include <drm/drm_gem.h>
18 #include <drm/drm_gem_atomic_helper.h>
19 #include <drm/drm_gem_framebuffer_helper.h>
20 #include <drm/drm_print.h>
21 #include <drm/drm_rect.h>
22 #include <drm/drm_simple_kms_helper.h>
23 #include <drm/gud.h>
24 
25 #include "gud_internal.h"
26 
27 /*
28  * Some userspace rendering loops run all displays in the same loop.
29  * This means that a fast display will have to wait for a slow one.
30  * Such users might want to enable this module parameter.
31  */
32 static bool gud_async_flush;
33 module_param_named(async_flush, gud_async_flush, bool, 0644);
34 MODULE_PARM_DESC(async_flush, "Enable asynchronous flushing [default=0]");
35 
36 /*
37  * FIXME: The driver is probably broken on Big Endian machines.
38  * See discussion:
39  * https://lore.kernel.org/dri-devel/CAKb7UvihLX0hgBOP3VBG7O+atwZcUVCPVuBdfmDMpg0NjXe-cQ@mail.gmail.com/
40  */
41 
42 static bool gud_is_big_endian(void)
43 {
44 #if defined(__BIG_ENDIAN)
45 	return true;
46 #else
47 	return false;
48 #endif
49 }
50 
51 static size_t gud_xrgb8888_to_r124(u8 *dst, const struct drm_format_info *format,
52 				   void *src, struct drm_framebuffer *fb,
53 				   struct drm_rect *rect)
54 {
55 	unsigned int block_width = drm_format_info_block_width(format, 0);
56 	unsigned int bits_per_pixel = 8 / block_width;
57 	unsigned int x, y, width, height;
58 	u8 pix, *pix8, *block = dst; /* Assign to silence compiler warning */
59 	struct iosys_map dst_map, vmap;
60 	size_t len;
61 	void *buf;
62 
63 	WARN_ON_ONCE(format->char_per_block[0] != 1);
64 
65 	/* Start on a byte boundary */
66 	rect->x1 = ALIGN_DOWN(rect->x1, block_width);
67 	width = drm_rect_width(rect);
68 	height = drm_rect_height(rect);
69 	len = drm_format_info_min_pitch(format, 0, width) * height;
70 
71 	buf = kmalloc(width * height, GFP_KERNEL);
72 	if (!buf)
73 		return 0;
74 
75 	iosys_map_set_vaddr(&dst_map, buf);
76 	iosys_map_set_vaddr(&vmap, src);
77 	drm_fb_xrgb8888_to_gray8(&dst_map, NULL, &vmap, fb, rect);
78 	pix8 = buf;
79 
80 	for (y = 0; y < height; y++) {
81 		for (x = 0; x < width; x++) {
82 			unsigned int pixpos = x % block_width; /* within byte from the left */
83 			unsigned int pixshift = (block_width - pixpos - 1) * bits_per_pixel;
84 
85 			if (!pixpos) {
86 				block = dst++;
87 				*block = 0;
88 			}
89 
90 			pix = (*pix8++) >> (8 - bits_per_pixel);
91 			*block |= pix << pixshift;
92 		}
93 	}
94 
95 	kfree(buf);
96 
97 	return len;
98 }
99 
100 static size_t gud_xrgb8888_to_color(u8 *dst, const struct drm_format_info *format,
101 				    void *src, struct drm_framebuffer *fb,
102 				    struct drm_rect *rect)
103 {
104 	unsigned int block_width = drm_format_info_block_width(format, 0);
105 	unsigned int bits_per_pixel = 8 / block_width;
106 	u8 r, g, b, pix, *block = dst; /* Assign to silence compiler warning */
107 	unsigned int x, y, width;
108 	__le32 *sbuf32;
109 	u32 pix32;
110 	size_t len;
111 
112 	/* Start on a byte boundary */
113 	rect->x1 = ALIGN_DOWN(rect->x1, block_width);
114 	width = drm_rect_width(rect);
115 	len = drm_format_info_min_pitch(format, 0, width) * drm_rect_height(rect);
116 
117 	for (y = rect->y1; y < rect->y2; y++) {
118 		sbuf32 = src + (y * fb->pitches[0]);
119 		sbuf32 += rect->x1;
120 
121 		for (x = 0; x < width; x++) {
122 			unsigned int pixpos = x % block_width; /* within byte from the left */
123 			unsigned int pixshift = (block_width - pixpos - 1) * bits_per_pixel;
124 
125 			if (!pixpos) {
126 				block = dst++;
127 				*block = 0;
128 			}
129 
130 			pix32 = le32_to_cpu(*sbuf32++);
131 			r = pix32 >> 16;
132 			g = pix32 >> 8;
133 			b = pix32;
134 
135 			switch (format->format) {
136 			case GUD_DRM_FORMAT_XRGB1111:
137 				pix = ((r >> 7) << 2) | ((g >> 7) << 1) | (b >> 7);
138 				break;
139 			default:
140 				WARN_ON_ONCE(1);
141 				return len;
142 			}
143 
144 			*block |= pix << pixshift;
145 		}
146 	}
147 
148 	return len;
149 }
150 
151 static int gud_prep_flush(struct gud_device *gdrm, struct drm_framebuffer *fb,
152 			  const struct iosys_map *src, bool cached_reads,
153 			  const struct drm_format_info *format, struct drm_rect *rect,
154 			  struct gud_set_buffer_req *req)
155 {
156 	u8 compression = gdrm->compression;
157 	struct iosys_map dst;
158 	void *vaddr, *buf;
159 	size_t pitch, len;
160 
161 	pitch = drm_format_info_min_pitch(format, 0, drm_rect_width(rect));
162 	len = pitch * drm_rect_height(rect);
163 	if (len > gdrm->bulk_len)
164 		return -E2BIG;
165 
166 	vaddr = src[0].vaddr;
167 retry:
168 	if (compression)
169 		buf = gdrm->compress_buf;
170 	else
171 		buf = gdrm->bulk_buf;
172 	iosys_map_set_vaddr(&dst, buf);
173 
174 	/*
175 	 * Imported buffers are assumed to be write-combined and thus uncached
176 	 * with slow reads (at least on ARM).
177 	 */
178 	if (format != fb->format) {
179 		if (format->format == GUD_DRM_FORMAT_R1) {
180 			len = gud_xrgb8888_to_r124(buf, format, vaddr, fb, rect);
181 			if (!len)
182 				return -ENOMEM;
183 		} else if (format->format == DRM_FORMAT_R8) {
184 			drm_fb_xrgb8888_to_gray8(&dst, NULL, src, fb, rect);
185 		} else if (format->format == DRM_FORMAT_RGB332) {
186 			drm_fb_xrgb8888_to_rgb332(&dst, NULL, src, fb, rect);
187 		} else if (format->format == DRM_FORMAT_RGB565) {
188 			drm_fb_xrgb8888_to_rgb565(&dst, NULL, src, fb, rect,
189 						  gud_is_big_endian());
190 		} else if (format->format == DRM_FORMAT_RGB888) {
191 			drm_fb_xrgb8888_to_rgb888(&dst, NULL, src, fb, rect);
192 		} else {
193 			len = gud_xrgb8888_to_color(buf, format, vaddr, fb, rect);
194 		}
195 	} else if (gud_is_big_endian() && format->cpp[0] > 1) {
196 		drm_fb_swab(&dst, NULL, src, fb, rect, cached_reads);
197 	} else if (compression && cached_reads && pitch == fb->pitches[0]) {
198 		/* can compress directly from the framebuffer */
199 		buf = vaddr + rect->y1 * pitch;
200 	} else {
201 		drm_fb_memcpy(&dst, NULL, src, fb, rect);
202 	}
203 
204 	memset(req, 0, sizeof(*req));
205 	req->x = cpu_to_le32(rect->x1);
206 	req->y = cpu_to_le32(rect->y1);
207 	req->width = cpu_to_le32(drm_rect_width(rect));
208 	req->height = cpu_to_le32(drm_rect_height(rect));
209 	req->length = cpu_to_le32(len);
210 
211 	if (compression & GUD_COMPRESSION_LZ4) {
212 		int complen;
213 
214 		complen = LZ4_compress_default(buf, gdrm->bulk_buf, len, len, gdrm->lz4_comp_mem);
215 		if (complen <= 0) {
216 			compression = 0;
217 			goto retry;
218 		}
219 
220 		req->compression = GUD_COMPRESSION_LZ4;
221 		req->compressed_length = cpu_to_le32(complen);
222 	}
223 
224 	return 0;
225 }
226 
227 struct gud_usb_bulk_context {
228 	struct timer_list timer;
229 	struct usb_sg_request sgr;
230 };
231 
232 static void gud_usb_bulk_timeout(struct timer_list *t)
233 {
234 	struct gud_usb_bulk_context *ctx = from_timer(ctx, t, timer);
235 
236 	usb_sg_cancel(&ctx->sgr);
237 }
238 
239 static int gud_usb_bulk(struct gud_device *gdrm, size_t len)
240 {
241 	struct gud_usb_bulk_context ctx;
242 	int ret;
243 
244 	ret = usb_sg_init(&ctx.sgr, gud_to_usb_device(gdrm), gdrm->bulk_pipe, 0,
245 			  gdrm->bulk_sgt.sgl, gdrm->bulk_sgt.nents, len, GFP_KERNEL);
246 	if (ret)
247 		return ret;
248 
249 	timer_setup_on_stack(&ctx.timer, gud_usb_bulk_timeout, 0);
250 	mod_timer(&ctx.timer, jiffies + msecs_to_jiffies(3000));
251 
252 	usb_sg_wait(&ctx.sgr);
253 
254 	if (!del_timer_sync(&ctx.timer))
255 		ret = -ETIMEDOUT;
256 	else if (ctx.sgr.status < 0)
257 		ret = ctx.sgr.status;
258 	else if (ctx.sgr.bytes != len)
259 		ret = -EIO;
260 
261 	destroy_timer_on_stack(&ctx.timer);
262 
263 	return ret;
264 }
265 
266 static int gud_flush_rect(struct gud_device *gdrm, struct drm_framebuffer *fb,
267 			  const struct iosys_map *src, bool cached_reads,
268 			  const struct drm_format_info *format, struct drm_rect *rect)
269 {
270 	struct gud_set_buffer_req req;
271 	size_t len, trlen;
272 	int ret;
273 
274 	drm_dbg(&gdrm->drm, "Flushing [FB:%d] " DRM_RECT_FMT "\n", fb->base.id, DRM_RECT_ARG(rect));
275 
276 	ret = gud_prep_flush(gdrm, fb, src, cached_reads, format, rect, &req);
277 	if (ret)
278 		return ret;
279 
280 	len = le32_to_cpu(req.length);
281 
282 	if (req.compression)
283 		trlen = le32_to_cpu(req.compressed_length);
284 	else
285 		trlen = len;
286 
287 	gdrm->stats_length += len;
288 	/* Did it wrap around? */
289 	if (gdrm->stats_length <= len && gdrm->stats_actual_length) {
290 		gdrm->stats_length = len;
291 		gdrm->stats_actual_length = 0;
292 	}
293 	gdrm->stats_actual_length += trlen;
294 
295 	if (!(gdrm->flags & GUD_DISPLAY_FLAG_FULL_UPDATE) || gdrm->prev_flush_failed) {
296 		ret = gud_usb_set(gdrm, GUD_REQ_SET_BUFFER, 0, &req, sizeof(req));
297 		if (ret)
298 			return ret;
299 	}
300 
301 	ret = gud_usb_bulk(gdrm, trlen);
302 	if (ret)
303 		gdrm->stats_num_errors++;
304 
305 	return ret;
306 }
307 
308 void gud_clear_damage(struct gud_device *gdrm)
309 {
310 	gdrm->damage.x1 = INT_MAX;
311 	gdrm->damage.y1 = INT_MAX;
312 	gdrm->damage.x2 = 0;
313 	gdrm->damage.y2 = 0;
314 }
315 
316 static void gud_flush_damage(struct gud_device *gdrm, struct drm_framebuffer *fb,
317 			     const struct iosys_map *src, bool cached_reads,
318 			     struct drm_rect *damage)
319 {
320 	const struct drm_format_info *format;
321 	unsigned int i, lines;
322 	size_t pitch;
323 	int ret;
324 
325 	format = fb->format;
326 	if (format->format == DRM_FORMAT_XRGB8888 && gdrm->xrgb8888_emulation_format)
327 		format = gdrm->xrgb8888_emulation_format;
328 
329 	/* Split update if it's too big */
330 	pitch = drm_format_info_min_pitch(format, 0, drm_rect_width(damage));
331 	lines = drm_rect_height(damage);
332 
333 	if (gdrm->bulk_len < lines * pitch)
334 		lines = gdrm->bulk_len / pitch;
335 
336 	for (i = 0; i < DIV_ROUND_UP(drm_rect_height(damage), lines); i++) {
337 		struct drm_rect rect = *damage;
338 
339 		rect.y1 += i * lines;
340 		rect.y2 = min_t(u32, rect.y1 + lines, damage->y2);
341 
342 		ret = gud_flush_rect(gdrm, fb, src, cached_reads, format, &rect);
343 		if (ret) {
344 			if (ret != -ENODEV && ret != -ECONNRESET &&
345 			    ret != -ESHUTDOWN && ret != -EPROTO)
346 				dev_err_ratelimited(fb->dev->dev,
347 						    "Failed to flush framebuffer: error=%d\n", ret);
348 			gdrm->prev_flush_failed = true;
349 			break;
350 		}
351 	}
352 }
353 
354 void gud_flush_work(struct work_struct *work)
355 {
356 	struct gud_device *gdrm = container_of(work, struct gud_device, work);
357 	struct iosys_map shadow_map;
358 	struct drm_framebuffer *fb;
359 	struct drm_rect damage;
360 	int idx;
361 
362 	if (!drm_dev_enter(&gdrm->drm, &idx))
363 		return;
364 
365 	mutex_lock(&gdrm->damage_lock);
366 	fb = gdrm->fb;
367 	gdrm->fb = NULL;
368 	iosys_map_set_vaddr(&shadow_map, gdrm->shadow_buf);
369 	damage = gdrm->damage;
370 	gud_clear_damage(gdrm);
371 	mutex_unlock(&gdrm->damage_lock);
372 
373 	if (!fb)
374 		goto out;
375 
376 	gud_flush_damage(gdrm, fb, &shadow_map, true, &damage);
377 
378 	drm_framebuffer_put(fb);
379 out:
380 	drm_dev_exit(idx);
381 }
382 
383 static int gud_fb_queue_damage(struct gud_device *gdrm, struct drm_framebuffer *fb,
384 			       const struct iosys_map *src, struct drm_rect *damage)
385 {
386 	struct drm_framebuffer *old_fb = NULL;
387 	struct iosys_map shadow_map;
388 
389 	mutex_lock(&gdrm->damage_lock);
390 
391 	if (!gdrm->shadow_buf) {
392 		gdrm->shadow_buf = vzalloc(fb->pitches[0] * fb->height);
393 		if (!gdrm->shadow_buf) {
394 			mutex_unlock(&gdrm->damage_lock);
395 			return -ENOMEM;
396 		}
397 	}
398 
399 	iosys_map_set_vaddr(&shadow_map, gdrm->shadow_buf);
400 	iosys_map_incr(&shadow_map, drm_fb_clip_offset(fb->pitches[0], fb->format, damage));
401 	drm_fb_memcpy(&shadow_map, fb->pitches, src, fb, damage);
402 
403 	if (fb != gdrm->fb) {
404 		old_fb = gdrm->fb;
405 		drm_framebuffer_get(fb);
406 		gdrm->fb = fb;
407 	}
408 
409 	gdrm->damage.x1 = min(gdrm->damage.x1, damage->x1);
410 	gdrm->damage.y1 = min(gdrm->damage.y1, damage->y1);
411 	gdrm->damage.x2 = max(gdrm->damage.x2, damage->x2);
412 	gdrm->damage.y2 = max(gdrm->damage.y2, damage->y2);
413 
414 	mutex_unlock(&gdrm->damage_lock);
415 
416 	queue_work(system_long_wq, &gdrm->work);
417 
418 	if (old_fb)
419 		drm_framebuffer_put(old_fb);
420 
421 	return 0;
422 }
423 
424 static void gud_fb_handle_damage(struct gud_device *gdrm, struct drm_framebuffer *fb,
425 				 const struct iosys_map *src, struct drm_rect *damage)
426 {
427 	int ret;
428 
429 	if (gdrm->flags & GUD_DISPLAY_FLAG_FULL_UPDATE)
430 		drm_rect_init(damage, 0, 0, fb->width, fb->height);
431 
432 	if (gud_async_flush) {
433 		ret = gud_fb_queue_damage(gdrm, fb, src, damage);
434 		if (ret != -ENOMEM)
435 			return;
436 	}
437 
438 	/* Imported buffers are assumed to be WriteCombined with uncached reads */
439 	gud_flush_damage(gdrm, fb, src, !fb->obj[0]->import_attach, damage);
440 }
441 
442 int gud_pipe_check(struct drm_simple_display_pipe *pipe,
443 		   struct drm_plane_state *new_plane_state,
444 		   struct drm_crtc_state *new_crtc_state)
445 {
446 	struct gud_device *gdrm = to_gud_device(pipe->crtc.dev);
447 	struct drm_plane_state *old_plane_state = pipe->plane.state;
448 	const struct drm_display_mode *mode = &new_crtc_state->mode;
449 	struct drm_atomic_state *state = new_plane_state->state;
450 	struct drm_framebuffer *old_fb = old_plane_state->fb;
451 	struct drm_connector_state *connector_state = NULL;
452 	struct drm_framebuffer *fb = new_plane_state->fb;
453 	const struct drm_format_info *format = fb->format;
454 	struct drm_connector *connector;
455 	unsigned int i, num_properties;
456 	struct gud_state_req *req;
457 	int idx, ret;
458 	size_t len;
459 
460 	if (WARN_ON_ONCE(!fb))
461 		return -EINVAL;
462 
463 	if (old_plane_state->rotation != new_plane_state->rotation)
464 		new_crtc_state->mode_changed = true;
465 
466 	if (old_fb && old_fb->format != format)
467 		new_crtc_state->mode_changed = true;
468 
469 	if (!new_crtc_state->mode_changed && !new_crtc_state->connectors_changed)
470 		return 0;
471 
472 	/* Only one connector is supported */
473 	if (hweight32(new_crtc_state->connector_mask) != 1)
474 		return -EINVAL;
475 
476 	if (format->format == DRM_FORMAT_XRGB8888 && gdrm->xrgb8888_emulation_format)
477 		format = gdrm->xrgb8888_emulation_format;
478 
479 	for_each_new_connector_in_state(state, connector, connector_state, i) {
480 		if (connector_state->crtc)
481 			break;
482 	}
483 
484 	/*
485 	 * DRM_IOCTL_MODE_OBJ_SETPROPERTY on the rotation property will not have
486 	 * the connector included in the state.
487 	 */
488 	if (!connector_state) {
489 		struct drm_connector_list_iter conn_iter;
490 
491 		drm_connector_list_iter_begin(pipe->crtc.dev, &conn_iter);
492 		drm_for_each_connector_iter(connector, &conn_iter) {
493 			if (connector->state->crtc) {
494 				connector_state = connector->state;
495 				break;
496 			}
497 		}
498 		drm_connector_list_iter_end(&conn_iter);
499 	}
500 
501 	if (WARN_ON_ONCE(!connector_state))
502 		return -ENOENT;
503 
504 	len = struct_size(req, properties,
505 			  GUD_PROPERTIES_MAX_NUM + GUD_CONNECTOR_PROPERTIES_MAX_NUM);
506 	req = kzalloc(len, GFP_KERNEL);
507 	if (!req)
508 		return -ENOMEM;
509 
510 	gud_from_display_mode(&req->mode, mode);
511 
512 	req->format = gud_from_fourcc(format->format);
513 	if (WARN_ON_ONCE(!req->format)) {
514 		ret = -EINVAL;
515 		goto out;
516 	}
517 
518 	req->connector = drm_connector_index(connector_state->connector);
519 
520 	ret = gud_connector_fill_properties(connector_state, req->properties);
521 	if (ret < 0)
522 		goto out;
523 
524 	num_properties = ret;
525 	for (i = 0; i < gdrm->num_properties; i++) {
526 		u16 prop = gdrm->properties[i];
527 		u64 val;
528 
529 		switch (prop) {
530 		case GUD_PROPERTY_ROTATION:
531 			/* DRM UAPI matches the protocol so use value directly */
532 			val = new_plane_state->rotation;
533 			break;
534 		default:
535 			WARN_ON_ONCE(1);
536 			ret = -EINVAL;
537 			goto out;
538 		}
539 
540 		req->properties[num_properties + i].prop = cpu_to_le16(prop);
541 		req->properties[num_properties + i].val = cpu_to_le64(val);
542 		num_properties++;
543 	}
544 
545 	if (drm_dev_enter(fb->dev, &idx)) {
546 		len = struct_size(req, properties, num_properties);
547 		ret = gud_usb_set(gdrm, GUD_REQ_SET_STATE_CHECK, 0, req, len);
548 		drm_dev_exit(idx);
549 	}  else {
550 		ret = -ENODEV;
551 	}
552 out:
553 	kfree(req);
554 
555 	return ret;
556 }
557 
558 void gud_pipe_update(struct drm_simple_display_pipe *pipe,
559 		     struct drm_plane_state *old_state)
560 {
561 	struct drm_device *drm = pipe->crtc.dev;
562 	struct gud_device *gdrm = to_gud_device(drm);
563 	struct drm_plane_state *state = pipe->plane.state;
564 	struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(state);
565 	struct drm_framebuffer *fb = state->fb;
566 	struct drm_crtc *crtc = &pipe->crtc;
567 	struct drm_rect damage;
568 	int ret, idx;
569 
570 	if (crtc->state->mode_changed || !crtc->state->enable) {
571 		cancel_work_sync(&gdrm->work);
572 		mutex_lock(&gdrm->damage_lock);
573 		if (gdrm->fb) {
574 			drm_framebuffer_put(gdrm->fb);
575 			gdrm->fb = NULL;
576 		}
577 		gud_clear_damage(gdrm);
578 		vfree(gdrm->shadow_buf);
579 		gdrm->shadow_buf = NULL;
580 		mutex_unlock(&gdrm->damage_lock);
581 	}
582 
583 	if (!drm_dev_enter(drm, &idx))
584 		return;
585 
586 	if (!old_state->fb)
587 		gud_usb_set_u8(gdrm, GUD_REQ_SET_CONTROLLER_ENABLE, 1);
588 
589 	if (fb && (crtc->state->mode_changed || crtc->state->connectors_changed))
590 		gud_usb_set(gdrm, GUD_REQ_SET_STATE_COMMIT, 0, NULL, 0);
591 
592 	if (crtc->state->active_changed)
593 		gud_usb_set_u8(gdrm, GUD_REQ_SET_DISPLAY_ENABLE, crtc->state->active);
594 
595 	if (!fb)
596 		goto ctrl_disable;
597 
598 	ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE);
599 	if (ret)
600 		goto ctrl_disable;
601 
602 	if (drm_atomic_helper_damage_merged(old_state, state, &damage))
603 		gud_fb_handle_damage(gdrm, fb, &shadow_plane_state->data[0], &damage);
604 
605 	drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
606 
607 ctrl_disable:
608 	if (!crtc->state->enable)
609 		gud_usb_set_u8(gdrm, GUD_REQ_SET_CONTROLLER_ENABLE, 0);
610 
611 	drm_dev_exit(idx);
612 }
613