xref: /openbmc/linux/drivers/gpu/drm/udl/udl_main.c (revision 647d41d3)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2012 Red Hat
4  *
5  * based in parts on udlfb.c:
6  * Copyright (C) 2009 Roberto De Ioris <roberto@unbit.it>
7  * Copyright (C) 2009 Jaya Kumar <jayakumar.lkml@gmail.com>
8  * Copyright (C) 2009 Bernie Thompson <bernie@plugable.com>
9  */
10 
11 #include <drm/drm.h>
12 #include <drm/drm_print.h>
13 #include <drm/drm_probe_helper.h>
14 
15 #include "udl_drv.h"
16 
17 /* -BULK_SIZE as per usb-skeleton. Can we get full page and avoid overhead? */
18 #define BULK_SIZE 512
19 
20 #define NR_USB_REQUEST_CHANNEL 0x12
21 
22 #define MAX_TRANSFER (PAGE_SIZE*16 - BULK_SIZE)
23 #define WRITES_IN_FLIGHT (4)
24 #define MAX_VENDOR_DESCRIPTOR_SIZE 256
25 
26 #define GET_URB_TIMEOUT	HZ
27 #define FREE_URB_TIMEOUT (HZ*2)
28 
29 static int udl_parse_vendor_descriptor(struct udl_device *udl)
30 {
31 	struct usb_device *udev = udl_to_usb_device(udl);
32 	char *desc;
33 	char *buf;
34 	char *desc_end;
35 
36 	u8 total_len = 0;
37 
38 	buf = kzalloc(MAX_VENDOR_DESCRIPTOR_SIZE, GFP_KERNEL);
39 	if (!buf)
40 		return false;
41 	desc = buf;
42 
43 	total_len = usb_get_descriptor(udev, 0x5f, /* vendor specific */
44 				    0, desc, MAX_VENDOR_DESCRIPTOR_SIZE);
45 	if (total_len > 5) {
46 		DRM_INFO("vendor descriptor length:%x data:%11ph\n",
47 			total_len, desc);
48 
49 		if ((desc[0] != total_len) || /* descriptor length */
50 		    (desc[1] != 0x5f) ||   /* vendor descriptor type */
51 		    (desc[2] != 0x01) ||   /* version (2 bytes) */
52 		    (desc[3] != 0x00) ||
53 		    (desc[4] != total_len - 2)) /* length after type */
54 			goto unrecognized;
55 
56 		desc_end = desc + total_len;
57 		desc += 5; /* the fixed header we've already parsed */
58 
59 		while (desc < desc_end) {
60 			u8 length;
61 			u16 key;
62 
63 			key = le16_to_cpu(*((u16 *) desc));
64 			desc += sizeof(u16);
65 			length = *desc;
66 			desc++;
67 
68 			switch (key) {
69 			case 0x0200: { /* max_area */
70 				u32 max_area;
71 				max_area = le32_to_cpu(*((u32 *)desc));
72 				DRM_DEBUG("DL chip limited to %d pixel modes\n",
73 					max_area);
74 				udl->sku_pixel_limit = max_area;
75 				break;
76 			}
77 			default:
78 				break;
79 			}
80 			desc += length;
81 		}
82 	}
83 
84 	goto success;
85 
86 unrecognized:
87 	/* allow udlfb to load for now even if firmware unrecognized */
88 	DRM_ERROR("Unrecognized vendor firmware descriptor\n");
89 
90 success:
91 	kfree(buf);
92 	return true;
93 }
94 
95 /*
96  * Need to ensure a channel is selected before submitting URBs
97  */
98 static int udl_select_std_channel(struct udl_device *udl)
99 {
100 	static const u8 set_def_chn[] = {0x57, 0xCD, 0xDC, 0xA7,
101 					 0x1C, 0x88, 0x5E, 0x15,
102 					 0x60, 0xFE, 0xC6, 0x97,
103 					 0x16, 0x3D, 0x47, 0xF2};
104 
105 	void *sendbuf;
106 	int ret;
107 	struct usb_device *udev = udl_to_usb_device(udl);
108 
109 	sendbuf = kmemdup(set_def_chn, sizeof(set_def_chn), GFP_KERNEL);
110 	if (!sendbuf)
111 		return -ENOMEM;
112 
113 	ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
114 			      NR_USB_REQUEST_CHANNEL,
115 			      (USB_DIR_OUT | USB_TYPE_VENDOR), 0, 0,
116 			      sendbuf, sizeof(set_def_chn),
117 			      USB_CTRL_SET_TIMEOUT);
118 	kfree(sendbuf);
119 	return ret < 0 ? ret : 0;
120 }
121 
122 static void udl_release_urb_work(struct work_struct *work)
123 {
124 	struct urb_node *unode = container_of(work, struct urb_node,
125 					      release_urb_work.work);
126 
127 	up(&unode->dev->urbs.limit_sem);
128 }
129 
130 void udl_urb_completion(struct urb *urb)
131 {
132 	struct urb_node *unode = urb->context;
133 	struct udl_device *udl = unode->dev;
134 	unsigned long flags;
135 
136 	/* sync/async unlink faults aren't errors */
137 	if (urb->status) {
138 		if (!(urb->status == -ENOENT ||
139 		    urb->status == -ECONNRESET ||
140 		    urb->status == -ESHUTDOWN)) {
141 			DRM_ERROR("%s - nonzero write bulk status received: %d\n",
142 				__func__, urb->status);
143 		}
144 	}
145 
146 	urb->transfer_buffer_length = udl->urbs.size; /* reset to actual */
147 
148 	spin_lock_irqsave(&udl->urbs.lock, flags);
149 	list_add_tail(&unode->entry, &udl->urbs.list);
150 	udl->urbs.available++;
151 	spin_unlock_irqrestore(&udl->urbs.lock, flags);
152 
153 #if 0
154 	/*
155 	 * When using fb_defio, we deadlock if up() is called
156 	 * while another is waiting. So queue to another process.
157 	 */
158 	if (fb_defio)
159 		schedule_delayed_work(&unode->release_urb_work, 0);
160 	else
161 #endif
162 		up(&udl->urbs.limit_sem);
163 }
164 
165 static void udl_free_urb_list(struct drm_device *dev)
166 {
167 	struct udl_device *udl = to_udl(dev);
168 	int count = udl->urbs.count;
169 	struct list_head *node;
170 	struct urb_node *unode;
171 	struct urb *urb;
172 
173 	DRM_DEBUG("Waiting for completes and freeing all render urbs\n");
174 
175 	/* keep waiting and freeing, until we've got 'em all */
176 	while (count--) {
177 		down(&udl->urbs.limit_sem);
178 
179 		spin_lock_irq(&udl->urbs.lock);
180 
181 		node = udl->urbs.list.next; /* have reserved one with sem */
182 		list_del_init(node);
183 
184 		spin_unlock_irq(&udl->urbs.lock);
185 
186 		unode = list_entry(node, struct urb_node, entry);
187 		urb = unode->urb;
188 
189 		/* Free each separately allocated piece */
190 		usb_free_coherent(urb->dev, udl->urbs.size,
191 				  urb->transfer_buffer, urb->transfer_dma);
192 		usb_free_urb(urb);
193 		kfree(node);
194 	}
195 	udl->urbs.count = 0;
196 }
197 
198 static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size)
199 {
200 	struct udl_device *udl = to_udl(dev);
201 	struct urb *urb;
202 	struct urb_node *unode;
203 	char *buf;
204 	size_t wanted_size = count * size;
205 	struct usb_device *udev = udl_to_usb_device(udl);
206 
207 	spin_lock_init(&udl->urbs.lock);
208 
209 retry:
210 	udl->urbs.size = size;
211 	INIT_LIST_HEAD(&udl->urbs.list);
212 
213 	sema_init(&udl->urbs.limit_sem, 0);
214 	udl->urbs.count = 0;
215 	udl->urbs.available = 0;
216 
217 	while (udl->urbs.count * size < wanted_size) {
218 		unode = kzalloc(sizeof(struct urb_node), GFP_KERNEL);
219 		if (!unode)
220 			break;
221 		unode->dev = udl;
222 
223 		INIT_DELAYED_WORK(&unode->release_urb_work,
224 			  udl_release_urb_work);
225 
226 		urb = usb_alloc_urb(0, GFP_KERNEL);
227 		if (!urb) {
228 			kfree(unode);
229 			break;
230 		}
231 		unode->urb = urb;
232 
233 		buf = usb_alloc_coherent(udev, size, GFP_KERNEL,
234 					 &urb->transfer_dma);
235 		if (!buf) {
236 			kfree(unode);
237 			usb_free_urb(urb);
238 			if (size > PAGE_SIZE) {
239 				size /= 2;
240 				udl_free_urb_list(dev);
241 				goto retry;
242 			}
243 			break;
244 		}
245 
246 		/* urb->transfer_buffer_length set to actual before submit */
247 		usb_fill_bulk_urb(urb, udev, usb_sndbulkpipe(udev, 1),
248 				  buf, size, udl_urb_completion, unode);
249 		urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
250 
251 		list_add_tail(&unode->entry, &udl->urbs.list);
252 
253 		up(&udl->urbs.limit_sem);
254 		udl->urbs.count++;
255 		udl->urbs.available++;
256 	}
257 
258 	DRM_DEBUG("allocated %d %d byte urbs\n", udl->urbs.count, (int) size);
259 
260 	return udl->urbs.count;
261 }
262 
263 struct urb *udl_get_urb(struct drm_device *dev)
264 {
265 	struct udl_device *udl = to_udl(dev);
266 	int ret = 0;
267 	struct list_head *entry;
268 	struct urb_node *unode;
269 	struct urb *urb = NULL;
270 
271 	/* Wait for an in-flight buffer to complete and get re-queued */
272 	ret = down_timeout(&udl->urbs.limit_sem, GET_URB_TIMEOUT);
273 	if (ret) {
274 		DRM_INFO("wait for urb interrupted: %x available: %d\n",
275 		       ret, udl->urbs.available);
276 		goto error;
277 	}
278 
279 	spin_lock_irq(&udl->urbs.lock);
280 
281 	BUG_ON(list_empty(&udl->urbs.list)); /* reserved one with limit_sem */
282 	entry = udl->urbs.list.next;
283 	list_del_init(entry);
284 	udl->urbs.available--;
285 
286 	spin_unlock_irq(&udl->urbs.lock);
287 
288 	unode = list_entry(entry, struct urb_node, entry);
289 	urb = unode->urb;
290 
291 error:
292 	return urb;
293 }
294 
295 int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len)
296 {
297 	struct udl_device *udl = to_udl(dev);
298 	int ret;
299 
300 	BUG_ON(len > udl->urbs.size);
301 
302 	urb->transfer_buffer_length = len; /* set to actual payload len */
303 	ret = usb_submit_urb(urb, GFP_ATOMIC);
304 	if (ret) {
305 		udl_urb_completion(urb); /* because no one else will */
306 		DRM_ERROR("usb_submit_urb error %x\n", ret);
307 	}
308 	return ret;
309 }
310 
311 int udl_init(struct udl_device *udl)
312 {
313 	struct drm_device *dev = &udl->drm;
314 	int ret = -ENOMEM;
315 
316 	DRM_DEBUG("\n");
317 
318 	udl->dmadev = usb_intf_get_dma_device(to_usb_interface(dev->dev));
319 	if (!udl->dmadev)
320 		drm_warn(dev, "buffer sharing not supported"); /* not an error */
321 
322 	mutex_init(&udl->gem_lock);
323 
324 	if (!udl_parse_vendor_descriptor(udl)) {
325 		ret = -ENODEV;
326 		DRM_ERROR("firmware not recognized. Assume incompatible device\n");
327 		goto err;
328 	}
329 
330 	if (udl_select_std_channel(udl))
331 		DRM_ERROR("Selecting channel failed\n");
332 
333 	if (!udl_alloc_urb_list(dev, WRITES_IN_FLIGHT, MAX_TRANSFER)) {
334 		DRM_ERROR("udl_alloc_urb_list failed\n");
335 		goto err;
336 	}
337 
338 	DRM_DEBUG("\n");
339 	ret = udl_modeset_init(dev);
340 	if (ret)
341 		goto err;
342 
343 	drm_kms_helper_poll_init(dev);
344 
345 	return 0;
346 
347 err:
348 	if (udl->urbs.count)
349 		udl_free_urb_list(dev);
350 	put_device(udl->dmadev);
351 	DRM_ERROR("%d\n", ret);
352 	return ret;
353 }
354 
355 int udl_drop_usb(struct drm_device *dev)
356 {
357 	struct udl_device *udl = to_udl(dev);
358 
359 	udl_free_urb_list(dev);
360 	put_device(udl->dmadev);
361 	udl->dmadev = NULL;
362 
363 	return 0;
364 }
365