xref: /openbmc/linux/drivers/gpu/drm/udl/udl_main.c (revision 629f59ad)
112eb90f1SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
25320918bSDave Airlie /*
35320918bSDave Airlie  * Copyright (C) 2012 Red Hat
45320918bSDave Airlie  *
55320918bSDave Airlie  * based in parts on udlfb.c:
65320918bSDave Airlie  * Copyright (C) 2009 Roberto De Ioris <roberto@unbit.it>
75320918bSDave Airlie  * Copyright (C) 2009 Jaya Kumar <jayakumar.lkml@gmail.com>
85320918bSDave Airlie  * Copyright (C) 2009 Bernie Thompson <bernie@plugable.com>
95320918bSDave Airlie  */
10a9dcf380SSam Ravnborg 
11a9dcf380SSam Ravnborg #include <drm/drm.h>
12a9dcf380SSam Ravnborg #include <drm/drm_print.h>
13fcd70cd3SDaniel Vetter #include <drm/drm_probe_helper.h>
14a9dcf380SSam Ravnborg 
155320918bSDave Airlie #include "udl_drv.h"
165320918bSDave Airlie 
175320918bSDave Airlie /* -BULK_SIZE as per usb-skeleton. Can we get full page and avoid overhead? */
185320918bSDave Airlie #define BULK_SIZE 512
195320918bSDave Airlie 
20d1c151dcSJamie Lentin #define NR_USB_REQUEST_CHANNEL 0x12
21d1c151dcSJamie Lentin 
225320918bSDave Airlie #define MAX_TRANSFER (PAGE_SIZE*16 - BULK_SIZE)
232a07a5ddSTakashi Iwai #define WRITES_IN_FLIGHT (20)
245320918bSDave Airlie #define MAX_VENDOR_DESCRIPTOR_SIZE 256
255320918bSDave Airlie 
26c5c354a3STakashi Iwai static struct urb *udl_get_urb_locked(struct udl_device *udl, long timeout);
27c5c354a3STakashi Iwai 
udl_parse_vendor_descriptor(struct udl_device * udl)283fb91f56SThomas Zimmermann static int udl_parse_vendor_descriptor(struct udl_device *udl)
295320918bSDave Airlie {
303fb91f56SThomas Zimmermann 	struct usb_device *udev = udl_to_usb_device(udl);
315320918bSDave Airlie 	char *desc;
325320918bSDave Airlie 	char *buf;
335320918bSDave Airlie 	char *desc_end;
345320918bSDave Airlie 
355320918bSDave Airlie 	u8 total_len = 0;
365320918bSDave Airlie 
375320918bSDave Airlie 	buf = kzalloc(MAX_VENDOR_DESCRIPTOR_SIZE, GFP_KERNEL);
385320918bSDave Airlie 	if (!buf)
395320918bSDave Airlie 		return false;
405320918bSDave Airlie 	desc = buf;
415320918bSDave Airlie 
423fb91f56SThomas Zimmermann 	total_len = usb_get_descriptor(udev, 0x5f, /* vendor specific */
435320918bSDave Airlie 				    0, desc, MAX_VENDOR_DESCRIPTOR_SIZE);
445320918bSDave Airlie 	if (total_len > 5) {
4508fcd72bSAndy Shevchenko 		DRM_INFO("vendor descriptor length:%x data:%11ph\n",
4608fcd72bSAndy Shevchenko 			total_len, desc);
475320918bSDave Airlie 
485320918bSDave Airlie 		if ((desc[0] != total_len) || /* descriptor length */
495320918bSDave Airlie 		    (desc[1] != 0x5f) ||   /* vendor descriptor type */
505320918bSDave Airlie 		    (desc[2] != 0x01) ||   /* version (2 bytes) */
515320918bSDave Airlie 		    (desc[3] != 0x00) ||
525320918bSDave Airlie 		    (desc[4] != total_len - 2)) /* length after type */
535320918bSDave Airlie 			goto unrecognized;
545320918bSDave Airlie 
555320918bSDave Airlie 		desc_end = desc + total_len;
565320918bSDave Airlie 		desc += 5; /* the fixed header we've already parsed */
575320918bSDave Airlie 
585320918bSDave Airlie 		while (desc < desc_end) {
595320918bSDave Airlie 			u8 length;
605320918bSDave Airlie 			u16 key;
615320918bSDave Airlie 
62d42f0349SDave Airlie 			key = le16_to_cpu(*((u16 *) desc));
635320918bSDave Airlie 			desc += sizeof(u16);
645320918bSDave Airlie 			length = *desc;
655320918bSDave Airlie 			desc++;
665320918bSDave Airlie 
675320918bSDave Airlie 			switch (key) {
685320918bSDave Airlie 			case 0x0200: { /* max_area */
695320918bSDave Airlie 				u32 max_area;
705320918bSDave Airlie 				max_area = le32_to_cpu(*((u32 *)desc));
715320918bSDave Airlie 				DRM_DEBUG("DL chip limited to %d pixel modes\n",
725320918bSDave Airlie 					max_area);
735320918bSDave Airlie 				udl->sku_pixel_limit = max_area;
745320918bSDave Airlie 				break;
755320918bSDave Airlie 			}
765320918bSDave Airlie 			default:
775320918bSDave Airlie 				break;
785320918bSDave Airlie 			}
795320918bSDave Airlie 			desc += length;
805320918bSDave Airlie 		}
815320918bSDave Airlie 	}
825320918bSDave Airlie 
835320918bSDave Airlie 	goto success;
845320918bSDave Airlie 
855320918bSDave Airlie unrecognized:
865320918bSDave Airlie 	/* allow udlfb to load for now even if firmware unrecognized */
875320918bSDave Airlie 	DRM_ERROR("Unrecognized vendor firmware descriptor\n");
885320918bSDave Airlie 
895320918bSDave Airlie success:
905320918bSDave Airlie 	kfree(buf);
915320918bSDave Airlie 	return true;
925320918bSDave Airlie }
935320918bSDave Airlie 
94d1c151dcSJamie Lentin /*
95d1c151dcSJamie Lentin  * Need to ensure a channel is selected before submitting URBs
96d1c151dcSJamie Lentin  */
udl_select_std_channel(struct udl_device * udl)971ceef996SThomas Zimmermann int udl_select_std_channel(struct udl_device *udl)
98d1c151dcSJamie Lentin {
99e5581fe2SDave Airlie 	static const u8 set_def_chn[] = {0x57, 0xCD, 0xDC, 0xA7,
100d1c151dcSJamie Lentin 					 0x1C, 0x88, 0x5E, 0x15,
101d1c151dcSJamie Lentin 					 0x60, 0xFE, 0xC6, 0x97,
102d1c151dcSJamie Lentin 					 0x16, 0x3D, 0x47, 0xF2};
1033fb91f56SThomas Zimmermann 
104e5581fe2SDave Airlie 	void *sendbuf;
1053fb91f56SThomas Zimmermann 	int ret;
1063fb91f56SThomas Zimmermann 	struct usb_device *udev = udl_to_usb_device(udl);
107e5581fe2SDave Airlie 
108e5581fe2SDave Airlie 	sendbuf = kmemdup(set_def_chn, sizeof(set_def_chn), GFP_KERNEL);
109e5581fe2SDave Airlie 	if (!sendbuf)
110e5581fe2SDave Airlie 		return -ENOMEM;
111d1c151dcSJamie Lentin 
1123fb91f56SThomas Zimmermann 	ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
113d1c151dcSJamie Lentin 			      NR_USB_REQUEST_CHANNEL,
114d1c151dcSJamie Lentin 			      (USB_DIR_OUT | USB_TYPE_VENDOR), 0, 0,
115e5581fe2SDave Airlie 			      sendbuf, sizeof(set_def_chn),
116d1c151dcSJamie Lentin 			      USB_CTRL_SET_TIMEOUT);
117e5581fe2SDave Airlie 	kfree(sendbuf);
118d1c151dcSJamie Lentin 	return ret < 0 ? ret : 0;
119d1c151dcSJamie Lentin }
120d1c151dcSJamie Lentin 
udl_urb_completion(struct urb * urb)1215320918bSDave Airlie void udl_urb_completion(struct urb *urb)
1225320918bSDave Airlie {
1235320918bSDave Airlie 	struct urb_node *unode = urb->context;
1245320918bSDave Airlie 	struct udl_device *udl = unode->dev;
1255320918bSDave Airlie 	unsigned long flags;
1265320918bSDave Airlie 
1275320918bSDave Airlie 	/* sync/async unlink faults aren't errors */
1285320918bSDave Airlie 	if (urb->status) {
1295320918bSDave Airlie 		if (!(urb->status == -ENOENT ||
1305320918bSDave Airlie 		    urb->status == -ECONNRESET ||
13153593515STakashi Iwai 		    urb->status == -EPROTO ||
1325320918bSDave Airlie 		    urb->status == -ESHUTDOWN)) {
1335320918bSDave Airlie 			DRM_ERROR("%s - nonzero write bulk status received: %d\n",
1345320918bSDave Airlie 				__func__, urb->status);
1355320918bSDave Airlie 		}
1365320918bSDave Airlie 	}
1375320918bSDave Airlie 
1385320918bSDave Airlie 	urb->transfer_buffer_length = udl->urbs.size; /* reset to actual */
1395320918bSDave Airlie 
1405320918bSDave Airlie 	spin_lock_irqsave(&udl->urbs.lock, flags);
141ed9605a6STakashi Iwai 	list_add_tail(&unode->entry, &udl->urbs.list);
1425320918bSDave Airlie 	udl->urbs.available++;
1435320918bSDave Airlie 	spin_unlock_irqrestore(&udl->urbs.lock, flags);
1445320918bSDave Airlie 
145acd45c56STakashi Iwai 	wake_up(&udl->urbs.sleep);
1465320918bSDave Airlie }
1475320918bSDave Airlie 
udl_free_urb_list(struct drm_device * dev)1485320918bSDave Airlie static void udl_free_urb_list(struct drm_device *dev)
1495320918bSDave Airlie {
150fd96e0dbSDave Airlie 	struct udl_device *udl = to_udl(dev);
1515320918bSDave Airlie 	struct urb_node *unode;
1525320918bSDave Airlie 	struct urb *urb;
1535320918bSDave Airlie 
1545320918bSDave Airlie 	DRM_DEBUG("Waiting for completes and freeing all render urbs\n");
1555320918bSDave Airlie 
1565320918bSDave Airlie 	/* keep waiting and freeing, until we've got 'em all */
157c5c354a3STakashi Iwai 	while (udl->urbs.count) {
158c5c354a3STakashi Iwai 		spin_lock_irq(&udl->urbs.lock);
159c5c354a3STakashi Iwai 		urb = udl_get_urb_locked(udl, MAX_SCHEDULE_TIMEOUT);
160c5c354a3STakashi Iwai 		udl->urbs.count--;
161c5c354a3STakashi Iwai 		spin_unlock_irq(&udl->urbs.lock);
162acd45c56STakashi Iwai 		if (WARN_ON(!urb))
163acd45c56STakashi Iwai 			break;
164acd45c56STakashi Iwai 		unode = urb->context;
1655320918bSDave Airlie 		/* Free each separately allocated piece */
1665320918bSDave Airlie 		usb_free_coherent(urb->dev, udl->urbs.size,
1675320918bSDave Airlie 				  urb->transfer_buffer, urb->transfer_dma);
1685320918bSDave Airlie 		usb_free_urb(urb);
169acd45c56STakashi Iwai 		kfree(unode);
1705320918bSDave Airlie 	}
171c5c354a3STakashi Iwai 
172c5c354a3STakashi Iwai 	wake_up_all(&udl->urbs.sleep);
1735320918bSDave Airlie }
1745320918bSDave Airlie 
udl_alloc_urb_list(struct drm_device * dev,int count,size_t size)1755320918bSDave Airlie static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size)
1765320918bSDave Airlie {
177fd96e0dbSDave Airlie 	struct udl_device *udl = to_udl(dev);
1785320918bSDave Airlie 	struct urb *urb;
1795320918bSDave Airlie 	struct urb_node *unode;
1805320918bSDave Airlie 	char *buf;
181542bb978SMikulas Patocka 	size_t wanted_size = count * size;
1823fb91f56SThomas Zimmermann 	struct usb_device *udev = udl_to_usb_device(udl);
1835320918bSDave Airlie 
1845320918bSDave Airlie 	spin_lock_init(&udl->urbs.lock);
1855320918bSDave Airlie 	INIT_LIST_HEAD(&udl->urbs.list);
186acd45c56STakashi Iwai 	init_waitqueue_head(&udl->urbs.sleep);
187542bb978SMikulas Patocka 	udl->urbs.count = 0;
188542bb978SMikulas Patocka 	udl->urbs.available = 0;
189542bb978SMikulas Patocka 
1902c2705bdSTakashi Iwai retry:
1912c2705bdSTakashi Iwai 	udl->urbs.size = size;
1922c2705bdSTakashi Iwai 
193542bb978SMikulas Patocka 	while (udl->urbs.count * size < wanted_size) {
1945320918bSDave Airlie 		unode = kzalloc(sizeof(struct urb_node), GFP_KERNEL);
1955320918bSDave Airlie 		if (!unode)
1965320918bSDave Airlie 			break;
1975320918bSDave Airlie 		unode->dev = udl;
1985320918bSDave Airlie 
1995320918bSDave Airlie 		urb = usb_alloc_urb(0, GFP_KERNEL);
2005320918bSDave Airlie 		if (!urb) {
2015320918bSDave Airlie 			kfree(unode);
2025320918bSDave Airlie 			break;
2035320918bSDave Airlie 		}
2045320918bSDave Airlie 		unode->urb = urb;
2055320918bSDave Airlie 
2063fb91f56SThomas Zimmermann 		buf = usb_alloc_coherent(udev, size, GFP_KERNEL,
2075320918bSDave Airlie 					 &urb->transfer_dma);
2085320918bSDave Airlie 		if (!buf) {
2095320918bSDave Airlie 			kfree(unode);
2105320918bSDave Airlie 			usb_free_urb(urb);
211542bb978SMikulas Patocka 			if (size > PAGE_SIZE) {
212542bb978SMikulas Patocka 				size /= 2;
213542bb978SMikulas Patocka 				udl_free_urb_list(dev);
214542bb978SMikulas Patocka 				goto retry;
215542bb978SMikulas Patocka 			}
2165320918bSDave Airlie 			break;
2175320918bSDave Airlie 		}
2185320918bSDave Airlie 
2195320918bSDave Airlie 		/* urb->transfer_buffer_length set to actual before submit */
2203fb91f56SThomas Zimmermann 		usb_fill_bulk_urb(urb, udev, usb_sndbulkpipe(udev, 1),
2215320918bSDave Airlie 				  buf, size, udl_urb_completion, unode);
2225320918bSDave Airlie 		urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
2235320918bSDave Airlie 
2245320918bSDave Airlie 		list_add_tail(&unode->entry, &udl->urbs.list);
2255320918bSDave Airlie 
226542bb978SMikulas Patocka 		udl->urbs.count++;
227542bb978SMikulas Patocka 		udl->urbs.available++;
2285320918bSDave Airlie 	}
2295320918bSDave Airlie 
230542bb978SMikulas Patocka 	DRM_DEBUG("allocated %d %d byte urbs\n", udl->urbs.count, (int) size);
2315320918bSDave Airlie 
232542bb978SMikulas Patocka 	return udl->urbs.count;
2335320918bSDave Airlie }
2345320918bSDave Airlie 
udl_get_urb_locked(struct udl_device * udl,long timeout)235c5c354a3STakashi Iwai static struct urb *udl_get_urb_locked(struct udl_device *udl, long timeout)
2365320918bSDave Airlie {
237c5c354a3STakashi Iwai 	struct urb_node *unode;
238acd45c56STakashi Iwai 
239c5c354a3STakashi Iwai 	assert_spin_locked(&udl->urbs.lock);
2405320918bSDave Airlie 
2415320918bSDave Airlie 	/* Wait for an in-flight buffer to complete and get re-queued */
242acd45c56STakashi Iwai 	if (!wait_event_lock_irq_timeout(udl->urbs.sleep,
243c5c354a3STakashi Iwai 					 !udl->urbs.count ||
244acd45c56STakashi Iwai 					 !list_empty(&udl->urbs.list),
245acd45c56STakashi Iwai 					 udl->urbs.lock, timeout)) {
246acd45c56STakashi Iwai 		DRM_INFO("wait for urb interrupted: available: %d\n",
247acd45c56STakashi Iwai 			 udl->urbs.available);
248c5c354a3STakashi Iwai 		return NULL;
2495320918bSDave Airlie 	}
2505320918bSDave Airlie 
251c5c354a3STakashi Iwai 	if (!udl->urbs.count)
252c5c354a3STakashi Iwai 		return NULL;
253c5c354a3STakashi Iwai 
254acd45c56STakashi Iwai 	unode = list_first_entry(&udl->urbs.list, struct urb_node, entry);
255ed9605a6STakashi Iwai 	list_del_init(&unode->entry);
2565320918bSDave Airlie 	udl->urbs.available--;
2575320918bSDave Airlie 
258*629f59adSDan Carpenter 	return unode->urb;
2595320918bSDave Airlie }
2605320918bSDave Airlie 
261c5c354a3STakashi Iwai #define GET_URB_TIMEOUT	HZ
udl_get_urb(struct drm_device * dev)262c5c354a3STakashi Iwai struct urb *udl_get_urb(struct drm_device *dev)
263c5c354a3STakashi Iwai {
264c5c354a3STakashi Iwai 	struct udl_device *udl = to_udl(dev);
265c5c354a3STakashi Iwai 	struct urb *urb;
266c5c354a3STakashi Iwai 
267c5c354a3STakashi Iwai 	spin_lock_irq(&udl->urbs.lock);
268c5c354a3STakashi Iwai 	urb = udl_get_urb_locked(udl, GET_URB_TIMEOUT);
269c5c354a3STakashi Iwai 	spin_unlock_irq(&udl->urbs.lock);
270c5c354a3STakashi Iwai 	return urb;
271c5c354a3STakashi Iwai }
272c5c354a3STakashi Iwai 
udl_submit_urb(struct drm_device * dev,struct urb * urb,size_t len)2735320918bSDave Airlie int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len)
2745320918bSDave Airlie {
275fd96e0dbSDave Airlie 	struct udl_device *udl = to_udl(dev);
2765320918bSDave Airlie 	int ret;
2775320918bSDave Airlie 
278046f4f0aSTakashi Iwai 	if (WARN_ON(len > udl->urbs.size)) {
279046f4f0aSTakashi Iwai 		ret = -EINVAL;
280046f4f0aSTakashi Iwai 		goto error;
281046f4f0aSTakashi Iwai 	}
2825320918bSDave Airlie 	urb->transfer_buffer_length = len; /* set to actual payload len */
2835320918bSDave Airlie 	ret = usb_submit_urb(urb, GFP_ATOMIC);
284046f4f0aSTakashi Iwai  error:
2855320918bSDave Airlie 	if (ret) {
2865320918bSDave Airlie 		udl_urb_completion(urb); /* because no one else will */
2875320918bSDave Airlie 		DRM_ERROR("usb_submit_urb error %x\n", ret);
2885320918bSDave Airlie 	}
2895320918bSDave Airlie 	return ret;
2905320918bSDave Airlie }
2915320918bSDave Airlie 
2920f7dc324STakashi Iwai /* wait until all pending URBs have been processed */
udl_sync_pending_urbs(struct drm_device * dev)293fa47573bSTakashi Iwai void udl_sync_pending_urbs(struct drm_device *dev)
2940f7dc324STakashi Iwai {
2950f7dc324STakashi Iwai 	struct udl_device *udl = to_udl(dev);
2960f7dc324STakashi Iwai 
2970f7dc324STakashi Iwai 	spin_lock_irq(&udl->urbs.lock);
2980f7dc324STakashi Iwai 	/* 2 seconds as a sane timeout */
2990f7dc324STakashi Iwai 	if (!wait_event_lock_irq_timeout(udl->urbs.sleep,
300ed9605a6STakashi Iwai 					 udl->urbs.available == udl->urbs.count,
3010f7dc324STakashi Iwai 					 udl->urbs.lock,
3020f7dc324STakashi Iwai 					 msecs_to_jiffies(2000)))
303fa47573bSTakashi Iwai 		drm_err(dev, "Timeout for syncing pending URBs\n");
3040f7dc324STakashi Iwai 	spin_unlock_irq(&udl->urbs.lock);
3050f7dc324STakashi Iwai }
3060f7dc324STakashi Iwai 
udl_init(struct udl_device * udl)3076ecac85eSDave Airlie int udl_init(struct udl_device *udl)
3085320918bSDave Airlie {
3096ecac85eSDave Airlie 	struct drm_device *dev = &udl->drm;
310737583f0SOliver Neukum 	int ret = -ENOMEM;
3115320918bSDave Airlie 
3125320918bSDave Airlie 	DRM_DEBUG("\n");
3135320918bSDave Airlie 
314659ab7a4SThomas Zimmermann 	udl->dmadev = usb_intf_get_dma_device(to_usb_interface(dev->dev));
315659ab7a4SThomas Zimmermann 	if (!udl->dmadev)
316659ab7a4SThomas Zimmermann 		drm_warn(dev, "buffer sharing not supported"); /* not an error */
317659ab7a4SThomas Zimmermann 
318ae358dacSDaniel Vetter 	mutex_init(&udl->gem_lock);
319ae358dacSDaniel Vetter 
3203fb91f56SThomas Zimmermann 	if (!udl_parse_vendor_descriptor(udl)) {
321e39a52daSDaniel Vetter 		ret = -ENODEV;
3225320918bSDave Airlie 		DRM_ERROR("firmware not recognized. Assume incompatible device\n");
3235320918bSDave Airlie 		goto err;
3245320918bSDave Airlie 	}
3255320918bSDave Airlie 
326d1c151dcSJamie Lentin 	if (udl_select_std_channel(udl))
327d1c151dcSJamie Lentin 		DRM_ERROR("Selecting channel failed\n");
328d1c151dcSJamie Lentin 
3295320918bSDave Airlie 	if (!udl_alloc_urb_list(dev, WRITES_IN_FLIGHT, MAX_TRANSFER)) {
3305320918bSDave Airlie 		DRM_ERROR("udl_alloc_urb_list failed\n");
3315320918bSDave Airlie 		goto err;
3325320918bSDave Airlie 	}
3335320918bSDave Airlie 
3345320918bSDave Airlie 	DRM_DEBUG("\n");
3355320918bSDave Airlie 	ret = udl_modeset_init(dev);
33626507b06SStéphane Marchesin 	if (ret)
33726507b06SStéphane Marchesin 		goto err;
3385320918bSDave Airlie 
339afdfc4c6SRobert Tarasov 	drm_kms_helper_poll_init(dev);
340afdfc4c6SRobert Tarasov 
3415320918bSDave Airlie 	return 0;
34232e932e3SEugeniy Paltsev 
3435320918bSDave Airlie err:
34426507b06SStéphane Marchesin 	if (udl->urbs.count)
34526507b06SStéphane Marchesin 		udl_free_urb_list(dev);
346659ab7a4SThomas Zimmermann 	put_device(udl->dmadev);
3475320918bSDave Airlie 	DRM_ERROR("%d\n", ret);
3485320918bSDave Airlie 	return ret;
3495320918bSDave Airlie }
3505320918bSDave Airlie 
udl_drop_usb(struct drm_device * dev)3515320918bSDave Airlie int udl_drop_usb(struct drm_device *dev)
3525320918bSDave Airlie {
353659ab7a4SThomas Zimmermann 	struct udl_device *udl = to_udl(dev);
354659ab7a4SThomas Zimmermann 
3555320918bSDave Airlie 	udl_free_urb_list(dev);
356659ab7a4SThomas Zimmermann 	put_device(udl->dmadev);
357659ab7a4SThomas Zimmermann 	udl->dmadev = NULL;
358659ab7a4SThomas Zimmermann 
3595320918bSDave Airlie 	return 0;
3605320918bSDave Airlie }
361