xref: /openbmc/linux/drivers/gpu/drm/gud/gud_drv.c (revision b7b3c35e)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright 2020 Noralf Trønnes
4  */
5 
6 #include <linux/dma-buf.h>
7 #include <linux/dma-mapping.h>
8 #include <linux/lz4.h>
9 #include <linux/module.h>
10 #include <linux/platform_device.h>
11 #include <linux/string_helpers.h>
12 #include <linux/usb.h>
13 #include <linux/vmalloc.h>
14 #include <linux/workqueue.h>
15 
16 #include <drm/drm_atomic_helper.h>
17 #include <drm/drm_damage_helper.h>
18 #include <drm/drm_debugfs.h>
19 #include <drm/drm_drv.h>
20 #include <drm/drm_fb_helper.h>
21 #include <drm/drm_fourcc.h>
22 #include <drm/drm_gem_atomic_helper.h>
23 #include <drm/drm_gem_framebuffer_helper.h>
24 #include <drm/drm_gem_shmem_helper.h>
25 #include <drm/drm_managed.h>
26 #include <drm/drm_print.h>
27 #include <drm/drm_probe_helper.h>
28 #include <drm/drm_simple_kms_helper.h>
29 #include <drm/gud.h>
30 
31 #include "gud_internal.h"
32 
33 /* Only used internally */
34 static const struct drm_format_info gud_drm_format_r1 = {
35 	.format = GUD_DRM_FORMAT_R1,
36 	.num_planes = 1,
37 	.char_per_block = { 1, 0, 0 },
38 	.block_w = { 8, 0, 0 },
39 	.block_h = { 1, 0, 0 },
40 	.hsub = 1,
41 	.vsub = 1,
42 };
43 
44 static const struct drm_format_info gud_drm_format_xrgb1111 = {
45 	.format = GUD_DRM_FORMAT_XRGB1111,
46 	.num_planes = 1,
47 	.char_per_block = { 1, 0, 0 },
48 	.block_w = { 2, 0, 0 },
49 	.block_h = { 1, 0, 0 },
50 	.hsub = 1,
51 	.vsub = 1,
52 };
53 
54 static int gud_usb_control_msg(struct usb_interface *intf, bool in,
55 			       u8 request, u16 value, void *buf, size_t len)
56 {
57 	u8 requesttype = USB_TYPE_VENDOR | USB_RECIP_INTERFACE;
58 	u8 ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
59 	struct usb_device *usb = interface_to_usbdev(intf);
60 	unsigned int pipe;
61 
62 	if (len && !buf)
63 		return -EINVAL;
64 
65 	if (in) {
66 		pipe = usb_rcvctrlpipe(usb, 0);
67 		requesttype |= USB_DIR_IN;
68 	} else {
69 		pipe = usb_sndctrlpipe(usb, 0);
70 		requesttype |= USB_DIR_OUT;
71 	}
72 
73 	return usb_control_msg(usb, pipe, request, requesttype, value,
74 			       ifnum, buf, len, USB_CTRL_GET_TIMEOUT);
75 }
76 
77 static int gud_get_display_descriptor(struct usb_interface *intf,
78 				      struct gud_display_descriptor_req *desc)
79 {
80 	void *buf;
81 	int ret;
82 
83 	buf = kmalloc(sizeof(*desc), GFP_KERNEL);
84 	if (!buf)
85 		return -ENOMEM;
86 
87 	ret = gud_usb_control_msg(intf, true, GUD_REQ_GET_DESCRIPTOR, 0, buf, sizeof(*desc));
88 	memcpy(desc, buf, sizeof(*desc));
89 	kfree(buf);
90 	if (ret < 0)
91 		return ret;
92 	if (ret != sizeof(*desc))
93 		return -EIO;
94 
95 	if (desc->magic != le32_to_cpu(GUD_DISPLAY_MAGIC))
96 		return -ENODATA;
97 
98 	DRM_DEV_DEBUG_DRIVER(&intf->dev,
99 			     "version=%u flags=0x%x compression=0x%x max_buffer_size=%u\n",
100 			     desc->version, le32_to_cpu(desc->flags), desc->compression,
101 			     le32_to_cpu(desc->max_buffer_size));
102 
103 	if (!desc->version || !desc->max_width || !desc->max_height ||
104 	    le32_to_cpu(desc->min_width) > le32_to_cpu(desc->max_width) ||
105 	    le32_to_cpu(desc->min_height) > le32_to_cpu(desc->max_height))
106 		return -EINVAL;
107 
108 	return 0;
109 }
110 
111 static int gud_status_to_errno(u8 status)
112 {
113 	switch (status) {
114 	case GUD_STATUS_OK:
115 		return 0;
116 	case GUD_STATUS_BUSY:
117 		return -EBUSY;
118 	case GUD_STATUS_REQUEST_NOT_SUPPORTED:
119 		return -EOPNOTSUPP;
120 	case GUD_STATUS_PROTOCOL_ERROR:
121 		return -EPROTO;
122 	case GUD_STATUS_INVALID_PARAMETER:
123 		return -EINVAL;
124 	case GUD_STATUS_ERROR:
125 		return -EREMOTEIO;
126 	default:
127 		return -EREMOTEIO;
128 	}
129 }
130 
131 static int gud_usb_get_status(struct usb_interface *intf)
132 {
133 	int ret, status = -EIO;
134 	u8 *buf;
135 
136 	buf = kmalloc(sizeof(*buf), GFP_KERNEL);
137 	if (!buf)
138 		return -ENOMEM;
139 
140 	ret = gud_usb_control_msg(intf, true, GUD_REQ_GET_STATUS, 0, buf, sizeof(*buf));
141 	if (ret == sizeof(*buf))
142 		status = gud_status_to_errno(*buf);
143 	kfree(buf);
144 
145 	if (ret < 0)
146 		return ret;
147 
148 	return status;
149 }
150 
151 static int gud_usb_transfer(struct gud_device *gdrm, bool in, u8 request, u16 index,
152 			    void *buf, size_t len)
153 {
154 	struct usb_interface *intf = to_usb_interface(gdrm->drm.dev);
155 	int idx, ret;
156 
157 	drm_dbg(&gdrm->drm, "%s: request=0x%x index=%u len=%zu\n",
158 		in ? "get" : "set", request, index, len);
159 
160 	if (!drm_dev_enter(&gdrm->drm, &idx))
161 		return -ENODEV;
162 
163 	mutex_lock(&gdrm->ctrl_lock);
164 
165 	ret = gud_usb_control_msg(intf, in, request, index, buf, len);
166 	if (ret == -EPIPE || ((gdrm->flags & GUD_DISPLAY_FLAG_STATUS_ON_SET) && !in && ret >= 0)) {
167 		int status;
168 
169 		status = gud_usb_get_status(intf);
170 		if (status < 0) {
171 			ret = status;
172 		} else if (ret < 0) {
173 			dev_err_once(gdrm->drm.dev,
174 				     "Unexpected status OK for failed transfer\n");
175 			ret = -EPIPE;
176 		}
177 	}
178 
179 	if (ret < 0) {
180 		drm_dbg(&gdrm->drm, "ret=%d\n", ret);
181 		gdrm->stats_num_errors++;
182 	}
183 
184 	mutex_unlock(&gdrm->ctrl_lock);
185 	drm_dev_exit(idx);
186 
187 	return ret;
188 }
189 
190 /*
191  * @buf cannot be allocated on the stack.
192  * Returns number of bytes received or negative error code on failure.
193  */
194 int gud_usb_get(struct gud_device *gdrm, u8 request, u16 index, void *buf, size_t max_len)
195 {
196 	return gud_usb_transfer(gdrm, true, request, index, buf, max_len);
197 }
198 
199 /*
200  * @buf can be allocated on the stack or NULL.
201  * Returns zero on success or negative error code on failure.
202  */
203 int gud_usb_set(struct gud_device *gdrm, u8 request, u16 index, void *buf, size_t len)
204 {
205 	void *trbuf = NULL;
206 	int ret;
207 
208 	if (buf && len) {
209 		trbuf = kmemdup(buf, len, GFP_KERNEL);
210 		if (!trbuf)
211 			return -ENOMEM;
212 	}
213 
214 	ret = gud_usb_transfer(gdrm, false, request, index, trbuf, len);
215 	kfree(trbuf);
216 	if (ret < 0)
217 		return ret;
218 
219 	return ret != len ? -EIO : 0;
220 }
221 
222 /*
223  * @val can be allocated on the stack.
224  * Returns zero on success or negative error code on failure.
225  */
226 int gud_usb_get_u8(struct gud_device *gdrm, u8 request, u16 index, u8 *val)
227 {
228 	u8 *buf;
229 	int ret;
230 
231 	buf = kmalloc(sizeof(*val), GFP_KERNEL);
232 	if (!buf)
233 		return -ENOMEM;
234 
235 	ret = gud_usb_get(gdrm, request, index, buf, sizeof(*val));
236 	*val = *buf;
237 	kfree(buf);
238 	if (ret < 0)
239 		return ret;
240 
241 	return ret != sizeof(*val) ? -EIO : 0;
242 }
243 
244 /* Returns zero on success or negative error code on failure. */
245 int gud_usb_set_u8(struct gud_device *gdrm, u8 request, u8 val)
246 {
247 	return gud_usb_set(gdrm, request, 0, &val, sizeof(val));
248 }
249 
250 static int gud_get_properties(struct gud_device *gdrm)
251 {
252 	struct gud_property_req *properties;
253 	unsigned int i, num_properties;
254 	int ret;
255 
256 	properties = kcalloc(GUD_PROPERTIES_MAX_NUM, sizeof(*properties), GFP_KERNEL);
257 	if (!properties)
258 		return -ENOMEM;
259 
260 	ret = gud_usb_get(gdrm, GUD_REQ_GET_PROPERTIES, 0,
261 			  properties, GUD_PROPERTIES_MAX_NUM * sizeof(*properties));
262 	if (ret <= 0)
263 		goto out;
264 	if (ret % sizeof(*properties)) {
265 		ret = -EIO;
266 		goto out;
267 	}
268 
269 	num_properties = ret / sizeof(*properties);
270 	ret = 0;
271 
272 	gdrm->properties = drmm_kcalloc(&gdrm->drm, num_properties, sizeof(*gdrm->properties),
273 					GFP_KERNEL);
274 	if (!gdrm->properties) {
275 		ret = -ENOMEM;
276 		goto out;
277 	}
278 
279 	for (i = 0; i < num_properties; i++) {
280 		u16 prop = le16_to_cpu(properties[i].prop);
281 		u64 val = le64_to_cpu(properties[i].val);
282 
283 		switch (prop) {
284 		case GUD_PROPERTY_ROTATION:
285 			/*
286 			 * DRM UAPI matches the protocol so use the value directly,
287 			 * but mask out any additions on future devices.
288 			 */
289 			val &= GUD_ROTATION_MASK;
290 			ret = drm_plane_create_rotation_property(&gdrm->pipe.plane,
291 								 DRM_MODE_ROTATE_0, val);
292 			break;
293 		default:
294 			/* New ones might show up in future devices, skip those we don't know. */
295 			drm_dbg(&gdrm->drm, "Ignoring unknown property: %u\n", prop);
296 			continue;
297 		}
298 
299 		if (ret)
300 			goto out;
301 
302 		gdrm->properties[gdrm->num_properties++] = prop;
303 	}
304 out:
305 	kfree(properties);
306 
307 	return ret;
308 }
309 
310 /*
311  * FIXME: Dma-buf sharing requires DMA support by the importing device.
312  *        This function is a workaround to make USB devices work as well.
313  *        See todo.rst for how to fix the issue in the dma-buf framework.
314  */
315 static struct drm_gem_object *gud_gem_prime_import(struct drm_device *drm, struct dma_buf *dma_buf)
316 {
317 	struct gud_device *gdrm = to_gud_device(drm);
318 
319 	if (!gdrm->dmadev)
320 		return ERR_PTR(-ENODEV);
321 
322 	return drm_gem_prime_import_dev(drm, dma_buf, gdrm->dmadev);
323 }
324 
325 static int gud_stats_debugfs(struct seq_file *m, void *data)
326 {
327 	struct drm_info_node *node = m->private;
328 	struct gud_device *gdrm = to_gud_device(node->minor->dev);
329 	char buf[10];
330 
331 	string_get_size(gdrm->bulk_len, 1, STRING_UNITS_2, buf, sizeof(buf));
332 	seq_printf(m, "Max buffer size: %s\n", buf);
333 	seq_printf(m, "Number of errors:  %u\n", gdrm->stats_num_errors);
334 
335 	seq_puts(m, "Compression:      ");
336 	if (gdrm->compression & GUD_COMPRESSION_LZ4)
337 		seq_puts(m, " lz4");
338 	if (!gdrm->compression)
339 		seq_puts(m, " none");
340 	seq_puts(m, "\n");
341 
342 	if (gdrm->compression) {
343 		u64 remainder;
344 		u64 ratio = div64_u64_rem(gdrm->stats_length, gdrm->stats_actual_length,
345 					  &remainder);
346 		u64 ratio_frac = div64_u64(remainder * 10, gdrm->stats_actual_length);
347 
348 		seq_printf(m, "Compression ratio: %llu.%llu\n", ratio, ratio_frac);
349 	}
350 
351 	return 0;
352 }
353 
354 static const struct drm_info_list gud_debugfs_list[] = {
355 	{ "stats", gud_stats_debugfs, 0, NULL },
356 };
357 
358 static void gud_debugfs_init(struct drm_minor *minor)
359 {
360 	drm_debugfs_create_files(gud_debugfs_list, ARRAY_SIZE(gud_debugfs_list),
361 				 minor->debugfs_root, minor);
362 }
363 
364 static const struct drm_simple_display_pipe_funcs gud_pipe_funcs = {
365 	.check      = gud_pipe_check,
366 	.update	    = gud_pipe_update,
367 };
368 
369 static const struct drm_mode_config_funcs gud_mode_config_funcs = {
370 	.fb_create = drm_gem_fb_create_with_dirty,
371 	.atomic_check = drm_atomic_helper_check,
372 	.atomic_commit = drm_atomic_helper_commit,
373 };
374 
375 static const u64 gud_pipe_modifiers[] = {
376 	DRM_FORMAT_MOD_LINEAR,
377 	DRM_FORMAT_MOD_INVALID
378 };
379 
380 DEFINE_DRM_GEM_FOPS(gud_fops);
381 
382 static const struct drm_driver gud_drm_driver = {
383 	.driver_features	= DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
384 	.fops			= &gud_fops,
385 	DRM_GEM_SHMEM_DRIVER_OPS,
386 	.gem_prime_import	= gud_gem_prime_import,
387 	.debugfs_init		= gud_debugfs_init,
388 
389 	.name			= "gud",
390 	.desc			= "Generic USB Display",
391 	.date			= "20200422",
392 	.major			= 1,
393 	.minor			= 0,
394 };
395 
396 static int gud_alloc_bulk_buffer(struct gud_device *gdrm)
397 {
398 	unsigned int i, num_pages;
399 	struct page **pages;
400 	void *ptr;
401 	int ret;
402 
403 	gdrm->bulk_buf = vmalloc_32(gdrm->bulk_len);
404 	if (!gdrm->bulk_buf)
405 		return -ENOMEM;
406 
407 	num_pages = DIV_ROUND_UP(gdrm->bulk_len, PAGE_SIZE);
408 	pages = kmalloc_array(num_pages, sizeof(struct page *), GFP_KERNEL);
409 	if (!pages)
410 		return -ENOMEM;
411 
412 	for (i = 0, ptr = gdrm->bulk_buf; i < num_pages; i++, ptr += PAGE_SIZE)
413 		pages[i] = vmalloc_to_page(ptr);
414 
415 	ret = sg_alloc_table_from_pages(&gdrm->bulk_sgt, pages, num_pages,
416 					0, gdrm->bulk_len, GFP_KERNEL);
417 	kfree(pages);
418 
419 	return ret;
420 }
421 
422 static void gud_free_buffers_and_mutex(void *data)
423 {
424 	struct gud_device *gdrm = data;
425 
426 	vfree(gdrm->compress_buf);
427 	gdrm->compress_buf = NULL;
428 	sg_free_table(&gdrm->bulk_sgt);
429 	vfree(gdrm->bulk_buf);
430 	gdrm->bulk_buf = NULL;
431 	mutex_destroy(&gdrm->ctrl_lock);
432 }
433 
434 static int gud_probe(struct usb_interface *intf, const struct usb_device_id *id)
435 {
436 	const struct drm_format_info *xrgb8888_emulation_format = NULL;
437 	bool rgb565_supported = false, xrgb8888_supported = false;
438 	unsigned int num_formats_dev, num_formats = 0;
439 	struct usb_endpoint_descriptor *bulk_out;
440 	struct gud_display_descriptor_req desc;
441 	struct device *dev = &intf->dev;
442 	size_t max_buffer_size = 0;
443 	struct gud_device *gdrm;
444 	struct drm_device *drm;
445 	u8 *formats_dev;
446 	u32 *formats;
447 	int ret, i;
448 
449 	ret = usb_find_bulk_out_endpoint(intf->cur_altsetting, &bulk_out);
450 	if (ret)
451 		return ret;
452 
453 	ret = gud_get_display_descriptor(intf, &desc);
454 	if (ret) {
455 		DRM_DEV_DEBUG_DRIVER(dev, "Not a display interface: ret=%d\n", ret);
456 		return -ENODEV;
457 	}
458 
459 	if (desc.version > 1) {
460 		dev_err(dev, "Protocol version %u is not supported\n", desc.version);
461 		return -ENODEV;
462 	}
463 
464 	gdrm = devm_drm_dev_alloc(dev, &gud_drm_driver, struct gud_device, drm);
465 	if (IS_ERR(gdrm))
466 		return PTR_ERR(gdrm);
467 
468 	drm = &gdrm->drm;
469 	drm->mode_config.funcs = &gud_mode_config_funcs;
470 	ret = drmm_mode_config_init(drm);
471 	if (ret)
472 		return ret;
473 
474 	gdrm->flags = le32_to_cpu(desc.flags);
475 	gdrm->compression = desc.compression & GUD_COMPRESSION_LZ4;
476 
477 	if (gdrm->flags & GUD_DISPLAY_FLAG_FULL_UPDATE && gdrm->compression)
478 		return -EINVAL;
479 
480 	mutex_init(&gdrm->ctrl_lock);
481 	mutex_init(&gdrm->damage_lock);
482 	INIT_WORK(&gdrm->work, gud_flush_work);
483 	gud_clear_damage(gdrm);
484 
485 	ret = devm_add_action(dev, gud_free_buffers_and_mutex, gdrm);
486 	if (ret)
487 		return ret;
488 
489 	drm->mode_config.min_width = le32_to_cpu(desc.min_width);
490 	drm->mode_config.max_width = le32_to_cpu(desc.max_width);
491 	drm->mode_config.min_height = le32_to_cpu(desc.min_height);
492 	drm->mode_config.max_height = le32_to_cpu(desc.max_height);
493 
494 	formats_dev = devm_kmalloc(dev, GUD_FORMATS_MAX_NUM, GFP_KERNEL);
495 	/* Add room for emulated XRGB8888 */
496 	formats = devm_kmalloc_array(dev, GUD_FORMATS_MAX_NUM + 1, sizeof(*formats), GFP_KERNEL);
497 	if (!formats_dev || !formats)
498 		return -ENOMEM;
499 
500 	ret = gud_usb_get(gdrm, GUD_REQ_GET_FORMATS, 0, formats_dev, GUD_FORMATS_MAX_NUM);
501 	if (ret < 0)
502 		return ret;
503 
504 	num_formats_dev = ret;
505 	for (i = 0; i < num_formats_dev; i++) {
506 		const struct drm_format_info *info;
507 		size_t fmt_buf_size;
508 		u32 format;
509 
510 		format = gud_to_fourcc(formats_dev[i]);
511 		if (!format) {
512 			drm_dbg(drm, "Unsupported format: 0x%02x\n", formats_dev[i]);
513 			continue;
514 		}
515 
516 		if (format == GUD_DRM_FORMAT_R1)
517 			info = &gud_drm_format_r1;
518 		else if (format == GUD_DRM_FORMAT_XRGB1111)
519 			info = &gud_drm_format_xrgb1111;
520 		else
521 			info = drm_format_info(format);
522 
523 		switch (format) {
524 		case GUD_DRM_FORMAT_R1:
525 			fallthrough;
526 		case DRM_FORMAT_R8:
527 			fallthrough;
528 		case GUD_DRM_FORMAT_XRGB1111:
529 			fallthrough;
530 		case DRM_FORMAT_RGB332:
531 			fallthrough;
532 		case DRM_FORMAT_RGB888:
533 			if (!xrgb8888_emulation_format)
534 				xrgb8888_emulation_format = info;
535 			break;
536 		case DRM_FORMAT_RGB565:
537 			rgb565_supported = true;
538 			if (!xrgb8888_emulation_format)
539 				xrgb8888_emulation_format = info;
540 			break;
541 		case DRM_FORMAT_XRGB8888:
542 			xrgb8888_supported = true;
543 			break;
544 		}
545 
546 		fmt_buf_size = drm_format_info_min_pitch(info, 0, drm->mode_config.max_width) *
547 			       drm->mode_config.max_height;
548 		max_buffer_size = max(max_buffer_size, fmt_buf_size);
549 
550 		if (format == GUD_DRM_FORMAT_R1 || format == GUD_DRM_FORMAT_XRGB1111)
551 			continue; /* Internal not for userspace */
552 
553 		formats[num_formats++] = format;
554 	}
555 
556 	if (!num_formats && !xrgb8888_emulation_format) {
557 		dev_err(dev, "No supported pixel formats found\n");
558 		return -EINVAL;
559 	}
560 
561 	/* Prefer speed over color depth */
562 	if (rgb565_supported)
563 		drm->mode_config.preferred_depth = 16;
564 
565 	if (!xrgb8888_supported && xrgb8888_emulation_format) {
566 		gdrm->xrgb8888_emulation_format = xrgb8888_emulation_format;
567 		formats[num_formats++] = DRM_FORMAT_XRGB8888;
568 	}
569 
570 	if (desc.max_buffer_size)
571 		max_buffer_size = le32_to_cpu(desc.max_buffer_size);
572 	/* Prevent a misbehaving device from allocating loads of RAM. 4096x4096@XRGB8888 = 64 MB */
573 	if (max_buffer_size > SZ_64M)
574 		max_buffer_size = SZ_64M;
575 
576 	gdrm->bulk_pipe = usb_sndbulkpipe(interface_to_usbdev(intf), usb_endpoint_num(bulk_out));
577 	gdrm->bulk_len = max_buffer_size;
578 
579 	ret = gud_alloc_bulk_buffer(gdrm);
580 	if (ret)
581 		return ret;
582 
583 	if (gdrm->compression & GUD_COMPRESSION_LZ4) {
584 		gdrm->lz4_comp_mem = devm_kmalloc(dev, LZ4_MEM_COMPRESS, GFP_KERNEL);
585 		if (!gdrm->lz4_comp_mem)
586 			return -ENOMEM;
587 
588 		gdrm->compress_buf = vmalloc(gdrm->bulk_len);
589 		if (!gdrm->compress_buf)
590 			return -ENOMEM;
591 	}
592 
593 	ret = drm_simple_display_pipe_init(drm, &gdrm->pipe, &gud_pipe_funcs,
594 					   formats, num_formats,
595 					   gud_pipe_modifiers, NULL);
596 	if (ret)
597 		return ret;
598 
599 	devm_kfree(dev, formats);
600 	devm_kfree(dev, formats_dev);
601 
602 	ret = gud_get_properties(gdrm);
603 	if (ret) {
604 		dev_err(dev, "Failed to get properties (error=%d)\n", ret);
605 		return ret;
606 	}
607 
608 	drm_plane_enable_fb_damage_clips(&gdrm->pipe.plane);
609 
610 	ret = gud_get_connectors(gdrm);
611 	if (ret) {
612 		dev_err(dev, "Failed to get connectors (error=%d)\n", ret);
613 		return ret;
614 	}
615 
616 	drm_mode_config_reset(drm);
617 
618 	usb_set_intfdata(intf, gdrm);
619 
620 	gdrm->dmadev = usb_intf_get_dma_device(intf);
621 	if (!gdrm->dmadev)
622 		dev_warn(dev, "buffer sharing not supported");
623 
624 	ret = drm_dev_register(drm, 0);
625 	if (ret) {
626 		put_device(gdrm->dmadev);
627 		return ret;
628 	}
629 
630 	drm_kms_helper_poll_init(drm);
631 
632 	drm_fbdev_generic_setup(drm, 0);
633 
634 	return 0;
635 }
636 
637 static void gud_disconnect(struct usb_interface *interface)
638 {
639 	struct gud_device *gdrm = usb_get_intfdata(interface);
640 	struct drm_device *drm = &gdrm->drm;
641 
642 	drm_dbg(drm, "%s:\n", __func__);
643 
644 	drm_kms_helper_poll_fini(drm);
645 	drm_dev_unplug(drm);
646 	drm_atomic_helper_shutdown(drm);
647 	put_device(gdrm->dmadev);
648 	gdrm->dmadev = NULL;
649 }
650 
651 static int gud_suspend(struct usb_interface *intf, pm_message_t message)
652 {
653 	struct gud_device *gdrm = usb_get_intfdata(intf);
654 
655 	return drm_mode_config_helper_suspend(&gdrm->drm);
656 }
657 
658 static int gud_resume(struct usb_interface *intf)
659 {
660 	struct gud_device *gdrm = usb_get_intfdata(intf);
661 
662 	drm_mode_config_helper_resume(&gdrm->drm);
663 
664 	return 0;
665 }
666 
667 static const struct usb_device_id gud_id_table[] = {
668 	{ USB_DEVICE_INTERFACE_CLASS(0x1d50, 0x614d, USB_CLASS_VENDOR_SPEC) },
669 	{ USB_DEVICE_INTERFACE_CLASS(0x16d0, 0x10a9, USB_CLASS_VENDOR_SPEC) },
670 	{ }
671 };
672 
673 MODULE_DEVICE_TABLE(usb, gud_id_table);
674 
675 static struct usb_driver gud_usb_driver = {
676 	.name		= "gud",
677 	.probe		= gud_probe,
678 	.disconnect	= gud_disconnect,
679 	.id_table	= gud_id_table,
680 	.suspend	= gud_suspend,
681 	.resume		= gud_resume,
682 	.reset_resume	= gud_resume,
683 };
684 
685 module_usb_driver(gud_usb_driver);
686 
687 MODULE_AUTHOR("Noralf Trønnes");
688 MODULE_LICENSE("Dual MIT/GPL");
689