1 /* 2 * Copyright (C) 2012 Red Hat 3 * 4 * based in parts on udlfb.c: 5 * Copyright (C) 2009 Roberto De Ioris <roberto@unbit.it> 6 * Copyright (C) 2009 Jaya Kumar <jayakumar.lkml@gmail.com> 7 * Copyright (C) 2009 Bernie Thompson <bernie@plugable.com> 8 * 9 * This file is subject to the terms and conditions of the GNU General Public 10 * License v2. See the file COPYING in the main directory of this archive for 11 * more details. 12 */ 13 #include <drm/drmP.h> 14 #include <drm/drm_crtc_helper.h> 15 #include <drm/drm_probe_helper.h> 16 #include "udl_drv.h" 17 18 /* -BULK_SIZE as per usb-skeleton. Can we get full page and avoid overhead? */ 19 #define BULK_SIZE 512 20 21 #define NR_USB_REQUEST_CHANNEL 0x12 22 23 #define MAX_TRANSFER (PAGE_SIZE*16 - BULK_SIZE) 24 #define WRITES_IN_FLIGHT (4) 25 #define MAX_VENDOR_DESCRIPTOR_SIZE 256 26 27 #define GET_URB_TIMEOUT HZ 28 #define FREE_URB_TIMEOUT (HZ*2) 29 30 static int udl_parse_vendor_descriptor(struct drm_device *dev, 31 struct usb_device *usbdev) 32 { 33 struct udl_device *udl = to_udl(dev); 34 char *desc; 35 char *buf; 36 char *desc_end; 37 38 u8 total_len = 0; 39 40 buf = kzalloc(MAX_VENDOR_DESCRIPTOR_SIZE, GFP_KERNEL); 41 if (!buf) 42 return false; 43 desc = buf; 44 45 total_len = usb_get_descriptor(usbdev, 0x5f, /* vendor specific */ 46 0, desc, MAX_VENDOR_DESCRIPTOR_SIZE); 47 if (total_len > 5) { 48 DRM_INFO("vendor descriptor length:%x data:%11ph\n", 49 total_len, desc); 50 51 if ((desc[0] != total_len) || /* descriptor length */ 52 (desc[1] != 0x5f) || /* vendor descriptor type */ 53 (desc[2] != 0x01) || /* version (2 bytes) */ 54 (desc[3] != 0x00) || 55 (desc[4] != total_len - 2)) /* length after type */ 56 goto unrecognized; 57 58 desc_end = desc + total_len; 59 desc += 5; /* the fixed header we've already parsed */ 60 61 while (desc < desc_end) { 62 u8 length; 63 u16 key; 64 65 key = le16_to_cpu(*((u16 *) desc)); 66 desc += sizeof(u16); 67 length = *desc; 68 desc++; 69 70 switch (key) { 71 case 0x0200: { /* max_area */ 72 u32 max_area; 73 max_area = le32_to_cpu(*((u32 *)desc)); 74 DRM_DEBUG("DL chip limited to %d pixel modes\n", 75 max_area); 76 udl->sku_pixel_limit = max_area; 77 break; 78 } 79 default: 80 break; 81 } 82 desc += length; 83 } 84 } 85 86 goto success; 87 88 unrecognized: 89 /* allow udlfb to load for now even if firmware unrecognized */ 90 DRM_ERROR("Unrecognized vendor firmware descriptor\n"); 91 92 success: 93 kfree(buf); 94 return true; 95 } 96 97 /* 98 * Need to ensure a channel is selected before submitting URBs 99 */ 100 static int udl_select_std_channel(struct udl_device *udl) 101 { 102 int ret; 103 static const u8 set_def_chn[] = {0x57, 0xCD, 0xDC, 0xA7, 104 0x1C, 0x88, 0x5E, 0x15, 105 0x60, 0xFE, 0xC6, 0x97, 106 0x16, 0x3D, 0x47, 0xF2}; 107 void *sendbuf; 108 109 sendbuf = kmemdup(set_def_chn, sizeof(set_def_chn), GFP_KERNEL); 110 if (!sendbuf) 111 return -ENOMEM; 112 113 ret = usb_control_msg(udl->udev, 114 usb_sndctrlpipe(udl->udev, 0), 115 NR_USB_REQUEST_CHANNEL, 116 (USB_DIR_OUT | USB_TYPE_VENDOR), 0, 0, 117 sendbuf, sizeof(set_def_chn), 118 USB_CTRL_SET_TIMEOUT); 119 kfree(sendbuf); 120 return ret < 0 ? ret : 0; 121 } 122 123 static void udl_release_urb_work(struct work_struct *work) 124 { 125 struct urb_node *unode = container_of(work, struct urb_node, 126 release_urb_work.work); 127 128 up(&unode->dev->urbs.limit_sem); 129 } 130 131 void udl_urb_completion(struct urb *urb) 132 { 133 struct urb_node *unode = urb->context; 134 struct udl_device *udl = unode->dev; 135 unsigned long flags; 136 137 /* sync/async unlink faults aren't errors */ 138 if (urb->status) { 139 if (!(urb->status == -ENOENT || 140 urb->status == -ECONNRESET || 141 urb->status == -ESHUTDOWN)) { 142 DRM_ERROR("%s - nonzero write bulk status received: %d\n", 143 __func__, urb->status); 144 atomic_set(&udl->lost_pixels, 1); 145 } 146 } 147 148 urb->transfer_buffer_length = udl->urbs.size; /* reset to actual */ 149 150 spin_lock_irqsave(&udl->urbs.lock, flags); 151 list_add_tail(&unode->entry, &udl->urbs.list); 152 udl->urbs.available++; 153 spin_unlock_irqrestore(&udl->urbs.lock, flags); 154 155 #if 0 156 /* 157 * When using fb_defio, we deadlock if up() is called 158 * while another is waiting. So queue to another process. 159 */ 160 if (fb_defio) 161 schedule_delayed_work(&unode->release_urb_work, 0); 162 else 163 #endif 164 up(&udl->urbs.limit_sem); 165 } 166 167 static void udl_free_urb_list(struct drm_device *dev) 168 { 169 struct udl_device *udl = to_udl(dev); 170 int count = udl->urbs.count; 171 struct list_head *node; 172 struct urb_node *unode; 173 struct urb *urb; 174 175 DRM_DEBUG("Waiting for completes and freeing all render urbs\n"); 176 177 /* keep waiting and freeing, until we've got 'em all */ 178 while (count--) { 179 down(&udl->urbs.limit_sem); 180 181 spin_lock_irq(&udl->urbs.lock); 182 183 node = udl->urbs.list.next; /* have reserved one with sem */ 184 list_del_init(node); 185 186 spin_unlock_irq(&udl->urbs.lock); 187 188 unode = list_entry(node, struct urb_node, entry); 189 urb = unode->urb; 190 191 /* Free each separately allocated piece */ 192 usb_free_coherent(urb->dev, udl->urbs.size, 193 urb->transfer_buffer, urb->transfer_dma); 194 usb_free_urb(urb); 195 kfree(node); 196 } 197 udl->urbs.count = 0; 198 } 199 200 static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size) 201 { 202 struct udl_device *udl = to_udl(dev); 203 struct urb *urb; 204 struct urb_node *unode; 205 char *buf; 206 size_t wanted_size = count * size; 207 208 spin_lock_init(&udl->urbs.lock); 209 210 retry: 211 udl->urbs.size = size; 212 INIT_LIST_HEAD(&udl->urbs.list); 213 214 sema_init(&udl->urbs.limit_sem, 0); 215 udl->urbs.count = 0; 216 udl->urbs.available = 0; 217 218 while (udl->urbs.count * size < wanted_size) { 219 unode = kzalloc(sizeof(struct urb_node), GFP_KERNEL); 220 if (!unode) 221 break; 222 unode->dev = udl; 223 224 INIT_DELAYED_WORK(&unode->release_urb_work, 225 udl_release_urb_work); 226 227 urb = usb_alloc_urb(0, GFP_KERNEL); 228 if (!urb) { 229 kfree(unode); 230 break; 231 } 232 unode->urb = urb; 233 234 buf = usb_alloc_coherent(udl->udev, size, GFP_KERNEL, 235 &urb->transfer_dma); 236 if (!buf) { 237 kfree(unode); 238 usb_free_urb(urb); 239 if (size > PAGE_SIZE) { 240 size /= 2; 241 udl_free_urb_list(dev); 242 goto retry; 243 } 244 break; 245 } 246 247 /* urb->transfer_buffer_length set to actual before submit */ 248 usb_fill_bulk_urb(urb, udl->udev, usb_sndbulkpipe(udl->udev, 1), 249 buf, size, udl_urb_completion, unode); 250 urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; 251 252 list_add_tail(&unode->entry, &udl->urbs.list); 253 254 up(&udl->urbs.limit_sem); 255 udl->urbs.count++; 256 udl->urbs.available++; 257 } 258 259 DRM_DEBUG("allocated %d %d byte urbs\n", udl->urbs.count, (int) size); 260 261 return udl->urbs.count; 262 } 263 264 struct urb *udl_get_urb(struct drm_device *dev) 265 { 266 struct udl_device *udl = to_udl(dev); 267 int ret = 0; 268 struct list_head *entry; 269 struct urb_node *unode; 270 struct urb *urb = NULL; 271 272 /* Wait for an in-flight buffer to complete and get re-queued */ 273 ret = down_timeout(&udl->urbs.limit_sem, GET_URB_TIMEOUT); 274 if (ret) { 275 atomic_set(&udl->lost_pixels, 1); 276 DRM_INFO("wait for urb interrupted: %x available: %d\n", 277 ret, udl->urbs.available); 278 goto error; 279 } 280 281 spin_lock_irq(&udl->urbs.lock); 282 283 BUG_ON(list_empty(&udl->urbs.list)); /* reserved one with limit_sem */ 284 entry = udl->urbs.list.next; 285 list_del_init(entry); 286 udl->urbs.available--; 287 288 spin_unlock_irq(&udl->urbs.lock); 289 290 unode = list_entry(entry, struct urb_node, entry); 291 urb = unode->urb; 292 293 error: 294 return urb; 295 } 296 297 int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len) 298 { 299 struct udl_device *udl = to_udl(dev); 300 int ret; 301 302 BUG_ON(len > udl->urbs.size); 303 304 urb->transfer_buffer_length = len; /* set to actual payload len */ 305 ret = usb_submit_urb(urb, GFP_ATOMIC); 306 if (ret) { 307 udl_urb_completion(urb); /* because no one else will */ 308 atomic_set(&udl->lost_pixels, 1); 309 DRM_ERROR("usb_submit_urb error %x\n", ret); 310 } 311 return ret; 312 } 313 314 int udl_init(struct udl_device *udl) 315 { 316 struct drm_device *dev = &udl->drm; 317 int ret = -ENOMEM; 318 319 DRM_DEBUG("\n"); 320 321 mutex_init(&udl->gem_lock); 322 323 if (!udl_parse_vendor_descriptor(dev, udl->udev)) { 324 ret = -ENODEV; 325 DRM_ERROR("firmware not recognized. Assume incompatible device\n"); 326 goto err; 327 } 328 329 if (udl_select_std_channel(udl)) 330 DRM_ERROR("Selecting channel failed\n"); 331 332 if (!udl_alloc_urb_list(dev, WRITES_IN_FLIGHT, MAX_TRANSFER)) { 333 DRM_ERROR("udl_alloc_urb_list failed\n"); 334 goto err; 335 } 336 337 DRM_DEBUG("\n"); 338 ret = udl_modeset_init(dev); 339 if (ret) 340 goto err; 341 342 ret = udl_fbdev_init(dev); 343 if (ret) 344 goto err; 345 346 drm_kms_helper_poll_init(dev); 347 348 return 0; 349 350 err: 351 if (udl->urbs.count) 352 udl_free_urb_list(dev); 353 DRM_ERROR("%d\n", ret); 354 return ret; 355 } 356 357 int udl_drop_usb(struct drm_device *dev) 358 { 359 udl_free_urb_list(dev); 360 return 0; 361 } 362 363 void udl_fini(struct drm_device *dev) 364 { 365 struct udl_device *udl = to_udl(dev); 366 367 drm_kms_helper_poll_fini(dev); 368 369 if (udl->urbs.count) 370 udl_free_urb_list(dev); 371 372 udl_fbdev_cleanup(dev); 373 } 374