1 /* 2 * udlfb.c -- Framebuffer driver for DisplayLink USB controller 3 * 4 * Copyright (C) 2009 Roberto De Ioris <roberto@unbit.it> 5 * Copyright (C) 2009 Jaya Kumar <jayakumar.lkml@gmail.com> 6 * Copyright (C) 2009 Bernie Thompson <bernie@plugable.com> 7 * 8 * This file is subject to the terms and conditions of the GNU General Public 9 * License v2. See the file COPYING in the main directory of this archive for 10 * more details. 11 * 12 * Layout is based on skeletonfb by James Simmons and Geert Uytterhoeven, 13 * usb-skeleton by GregKH. 14 * 15 * Device-specific portions based on information from Displaylink, with work 16 * from Florian Echtler, Henrik Bjerregaard Pedersen, and others. 17 */ 18 19 #include <linux/module.h> 20 #include <linux/kernel.h> 21 #include <linux/init.h> 22 #include <linux/usb.h> 23 #include <linux/uaccess.h> 24 #include <linux/mm.h> 25 #include <linux/fb.h> 26 #include <linux/vmalloc.h> 27 #include <linux/slab.h> 28 #include <linux/prefetch.h> 29 #include <linux/delay.h> 30 #include <video/udlfb.h> 31 #include "edid.h" 32 33 static const struct fb_fix_screeninfo dlfb_fix = { 34 .id = "udlfb", 35 .type = FB_TYPE_PACKED_PIXELS, 36 .visual = FB_VISUAL_TRUECOLOR, 37 .xpanstep = 0, 38 .ypanstep = 0, 39 .ywrapstep = 0, 40 .accel = FB_ACCEL_NONE, 41 }; 42 43 static const u32 udlfb_info_flags = FBINFO_DEFAULT | FBINFO_READS_FAST | 44 FBINFO_VIRTFB | 45 FBINFO_HWACCEL_IMAGEBLIT | FBINFO_HWACCEL_FILLRECT | 46 FBINFO_HWACCEL_COPYAREA | FBINFO_MISC_ALWAYS_SETPAR; 47 48 /* 49 * There are many DisplayLink-based graphics products, all with unique PIDs. 50 * So we match on DisplayLink's VID + Vendor-Defined Interface Class (0xff) 51 * We also require a match on SubClass (0x00) and Protocol (0x00), 52 * which is compatible with all known USB 2.0 era graphics chips and firmware, 53 * but allows DisplayLink to increment those for any future incompatible chips 54 */ 55 static const struct usb_device_id id_table[] = { 56 {.idVendor = 0x17e9, 57 .bInterfaceClass = 0xff, 58 .bInterfaceSubClass = 0x00, 59 .bInterfaceProtocol = 0x00, 60 .match_flags = USB_DEVICE_ID_MATCH_VENDOR | 61 USB_DEVICE_ID_MATCH_INT_CLASS | 62 USB_DEVICE_ID_MATCH_INT_SUBCLASS | 63 USB_DEVICE_ID_MATCH_INT_PROTOCOL, 64 }, 65 {}, 66 }; 67 MODULE_DEVICE_TABLE(usb, id_table); 68 69 /* module options */ 70 static bool console = 1; /* Allow fbcon to open framebuffer */ 71 static bool fb_defio = 1; /* Detect mmap writes using page faults */ 72 static bool shadow = 1; /* Optionally disable shadow framebuffer */ 73 static int pixel_limit; /* Optionally force a pixel resolution limit */ 74 75 /* dlfb keeps a list of urbs for efficient bulk transfers */ 76 static void dlfb_urb_completion(struct urb *urb); 77 static struct urb *dlfb_get_urb(struct dlfb_data *dlfb); 78 static int dlfb_submit_urb(struct dlfb_data *dlfb, struct urb * urb, size_t len); 79 static int dlfb_alloc_urb_list(struct dlfb_data *dlfb, int count, size_t size); 80 static void dlfb_free_urb_list(struct dlfb_data *dlfb); 81 82 /* 83 * All DisplayLink bulk operations start with 0xAF, followed by specific code 84 * All operations are written to buffers which then later get sent to device 85 */ 86 static char *dlfb_set_register(char *buf, u8 reg, u8 val) 87 { 88 *buf++ = 0xAF; 89 *buf++ = 0x20; 90 *buf++ = reg; 91 *buf++ = val; 92 return buf; 93 } 94 95 static char *dlfb_vidreg_lock(char *buf) 96 { 97 return dlfb_set_register(buf, 0xFF, 0x00); 98 } 99 100 static char *dlfb_vidreg_unlock(char *buf) 101 { 102 return dlfb_set_register(buf, 0xFF, 0xFF); 103 } 104 105 /* 106 * Map FB_BLANK_* to DisplayLink register 107 * DLReg FB_BLANK_* 108 * ----- ----------------------------- 109 * 0x00 FB_BLANK_UNBLANK (0) 110 * 0x01 FB_BLANK (1) 111 * 0x03 FB_BLANK_VSYNC_SUSPEND (2) 112 * 0x05 FB_BLANK_HSYNC_SUSPEND (3) 113 * 0x07 FB_BLANK_POWERDOWN (4) Note: requires modeset to come back 114 */ 115 static char *dlfb_blanking(char *buf, int fb_blank) 116 { 117 u8 reg; 118 119 switch (fb_blank) { 120 case FB_BLANK_POWERDOWN: 121 reg = 0x07; 122 break; 123 case FB_BLANK_HSYNC_SUSPEND: 124 reg = 0x05; 125 break; 126 case FB_BLANK_VSYNC_SUSPEND: 127 reg = 0x03; 128 break; 129 case FB_BLANK_NORMAL: 130 reg = 0x01; 131 break; 132 default: 133 reg = 0x00; 134 } 135 136 buf = dlfb_set_register(buf, 0x1F, reg); 137 138 return buf; 139 } 140 141 static char *dlfb_set_color_depth(char *buf, u8 selection) 142 { 143 return dlfb_set_register(buf, 0x00, selection); 144 } 145 146 static char *dlfb_set_base16bpp(char *wrptr, u32 base) 147 { 148 /* the base pointer is 16 bits wide, 0x20 is hi byte. */ 149 wrptr = dlfb_set_register(wrptr, 0x20, base >> 16); 150 wrptr = dlfb_set_register(wrptr, 0x21, base >> 8); 151 return dlfb_set_register(wrptr, 0x22, base); 152 } 153 154 /* 155 * DisplayLink HW has separate 16bpp and 8bpp framebuffers. 156 * In 24bpp modes, the low 323 RGB bits go in the 8bpp framebuffer 157 */ 158 static char *dlfb_set_base8bpp(char *wrptr, u32 base) 159 { 160 wrptr = dlfb_set_register(wrptr, 0x26, base >> 16); 161 wrptr = dlfb_set_register(wrptr, 0x27, base >> 8); 162 return dlfb_set_register(wrptr, 0x28, base); 163 } 164 165 static char *dlfb_set_register_16(char *wrptr, u8 reg, u16 value) 166 { 167 wrptr = dlfb_set_register(wrptr, reg, value >> 8); 168 return dlfb_set_register(wrptr, reg+1, value); 169 } 170 171 /* 172 * This is kind of weird because the controller takes some 173 * register values in a different byte order than other registers. 174 */ 175 static char *dlfb_set_register_16be(char *wrptr, u8 reg, u16 value) 176 { 177 wrptr = dlfb_set_register(wrptr, reg, value); 178 return dlfb_set_register(wrptr, reg+1, value >> 8); 179 } 180 181 /* 182 * LFSR is linear feedback shift register. The reason we have this is 183 * because the display controller needs to minimize the clock depth of 184 * various counters used in the display path. So this code reverses the 185 * provided value into the lfsr16 value by counting backwards to get 186 * the value that needs to be set in the hardware comparator to get the 187 * same actual count. This makes sense once you read above a couple of 188 * times and think about it from a hardware perspective. 189 */ 190 static u16 dlfb_lfsr16(u16 actual_count) 191 { 192 u32 lv = 0xFFFF; /* This is the lfsr value that the hw starts with */ 193 194 while (actual_count--) { 195 lv = ((lv << 1) | 196 (((lv >> 15) ^ (lv >> 4) ^ (lv >> 2) ^ (lv >> 1)) & 1)) 197 & 0xFFFF; 198 } 199 200 return (u16) lv; 201 } 202 203 /* 204 * This does LFSR conversion on the value that is to be written. 205 * See LFSR explanation above for more detail. 206 */ 207 static char *dlfb_set_register_lfsr16(char *wrptr, u8 reg, u16 value) 208 { 209 return dlfb_set_register_16(wrptr, reg, dlfb_lfsr16(value)); 210 } 211 212 /* 213 * This takes a standard fbdev screeninfo struct and all of its monitor mode 214 * details and converts them into the DisplayLink equivalent register commands. 215 */ 216 static char *dlfb_set_vid_cmds(char *wrptr, struct fb_var_screeninfo *var) 217 { 218 u16 xds, yds; 219 u16 xde, yde; 220 u16 yec; 221 222 /* x display start */ 223 xds = var->left_margin + var->hsync_len; 224 wrptr = dlfb_set_register_lfsr16(wrptr, 0x01, xds); 225 /* x display end */ 226 xde = xds + var->xres; 227 wrptr = dlfb_set_register_lfsr16(wrptr, 0x03, xde); 228 229 /* y display start */ 230 yds = var->upper_margin + var->vsync_len; 231 wrptr = dlfb_set_register_lfsr16(wrptr, 0x05, yds); 232 /* y display end */ 233 yde = yds + var->yres; 234 wrptr = dlfb_set_register_lfsr16(wrptr, 0x07, yde); 235 236 /* x end count is active + blanking - 1 */ 237 wrptr = dlfb_set_register_lfsr16(wrptr, 0x09, 238 xde + var->right_margin - 1); 239 240 /* libdlo hardcodes hsync start to 1 */ 241 wrptr = dlfb_set_register_lfsr16(wrptr, 0x0B, 1); 242 243 /* hsync end is width of sync pulse + 1 */ 244 wrptr = dlfb_set_register_lfsr16(wrptr, 0x0D, var->hsync_len + 1); 245 246 /* hpixels is active pixels */ 247 wrptr = dlfb_set_register_16(wrptr, 0x0F, var->xres); 248 249 /* yendcount is vertical active + vertical blanking */ 250 yec = var->yres + var->upper_margin + var->lower_margin + 251 var->vsync_len; 252 wrptr = dlfb_set_register_lfsr16(wrptr, 0x11, yec); 253 254 /* libdlo hardcodes vsync start to 0 */ 255 wrptr = dlfb_set_register_lfsr16(wrptr, 0x13, 0); 256 257 /* vsync end is width of vsync pulse */ 258 wrptr = dlfb_set_register_lfsr16(wrptr, 0x15, var->vsync_len); 259 260 /* vpixels is active pixels */ 261 wrptr = dlfb_set_register_16(wrptr, 0x17, var->yres); 262 263 /* convert picoseconds to 5kHz multiple for pclk5k = x * 1E12/5k */ 264 wrptr = dlfb_set_register_16be(wrptr, 0x1B, 265 200*1000*1000/var->pixclock); 266 267 return wrptr; 268 } 269 270 /* 271 * This takes a standard fbdev screeninfo struct that was fetched or prepared 272 * and then generates the appropriate command sequence that then drives the 273 * display controller. 274 */ 275 static int dlfb_set_video_mode(struct dlfb_data *dlfb, 276 struct fb_var_screeninfo *var) 277 { 278 char *buf; 279 char *wrptr; 280 int retval; 281 int writesize; 282 struct urb *urb; 283 284 if (!atomic_read(&dlfb->usb_active)) 285 return -EPERM; 286 287 urb = dlfb_get_urb(dlfb); 288 if (!urb) 289 return -ENOMEM; 290 291 buf = (char *) urb->transfer_buffer; 292 293 /* 294 * This first section has to do with setting the base address on the 295 * controller * associated with the display. There are 2 base 296 * pointers, currently, we only * use the 16 bpp segment. 297 */ 298 wrptr = dlfb_vidreg_lock(buf); 299 wrptr = dlfb_set_color_depth(wrptr, 0x00); 300 /* set base for 16bpp segment to 0 */ 301 wrptr = dlfb_set_base16bpp(wrptr, 0); 302 /* set base for 8bpp segment to end of fb */ 303 wrptr = dlfb_set_base8bpp(wrptr, dlfb->info->fix.smem_len); 304 305 wrptr = dlfb_set_vid_cmds(wrptr, var); 306 wrptr = dlfb_blanking(wrptr, FB_BLANK_UNBLANK); 307 wrptr = dlfb_vidreg_unlock(wrptr); 308 309 writesize = wrptr - buf; 310 311 retval = dlfb_submit_urb(dlfb, urb, writesize); 312 313 dlfb->blank_mode = FB_BLANK_UNBLANK; 314 315 return retval; 316 } 317 318 static int dlfb_ops_mmap(struct fb_info *info, struct vm_area_struct *vma) 319 { 320 unsigned long start = vma->vm_start; 321 unsigned long size = vma->vm_end - vma->vm_start; 322 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; 323 unsigned long page, pos; 324 325 if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) 326 return -EINVAL; 327 if (size > info->fix.smem_len) 328 return -EINVAL; 329 if (offset > info->fix.smem_len - size) 330 return -EINVAL; 331 332 pos = (unsigned long)info->fix.smem_start + offset; 333 334 dev_dbg(info->dev, "mmap() framebuffer addr:%lu size:%lu\n", 335 pos, size); 336 337 while (size > 0) { 338 page = vmalloc_to_pfn((void *)pos); 339 if (remap_pfn_range(vma, start, page, PAGE_SIZE, PAGE_SHARED)) 340 return -EAGAIN; 341 342 start += PAGE_SIZE; 343 pos += PAGE_SIZE; 344 if (size > PAGE_SIZE) 345 size -= PAGE_SIZE; 346 else 347 size = 0; 348 } 349 350 return 0; 351 } 352 353 /* 354 * Trims identical data from front and back of line 355 * Sets new front buffer address and width 356 * And returns byte count of identical pixels 357 * Assumes CPU natural alignment (unsigned long) 358 * for back and front buffer ptrs and width 359 */ 360 static int dlfb_trim_hline(const u8 *bback, const u8 **bfront, int *width_bytes) 361 { 362 int j, k; 363 const unsigned long *back = (const unsigned long *) bback; 364 const unsigned long *front = (const unsigned long *) *bfront; 365 const int width = *width_bytes / sizeof(unsigned long); 366 int identical = width; 367 int start = width; 368 int end = width; 369 370 prefetch((void *) front); 371 prefetch((void *) back); 372 373 for (j = 0; j < width; j++) { 374 if (back[j] != front[j]) { 375 start = j; 376 break; 377 } 378 } 379 380 for (k = width - 1; k > j; k--) { 381 if (back[k] != front[k]) { 382 end = k+1; 383 break; 384 } 385 } 386 387 identical = start + (width - end); 388 *bfront = (u8 *) &front[start]; 389 *width_bytes = (end - start) * sizeof(unsigned long); 390 391 return identical * sizeof(unsigned long); 392 } 393 394 /* 395 * Render a command stream for an encoded horizontal line segment of pixels. 396 * 397 * A command buffer holds several commands. 398 * It always begins with a fresh command header 399 * (the protocol doesn't require this, but we enforce it to allow 400 * multiple buffers to be potentially encoded and sent in parallel). 401 * A single command encodes one contiguous horizontal line of pixels 402 * 403 * The function relies on the client to do all allocation, so that 404 * rendering can be done directly to output buffers (e.g. USB URBs). 405 * The function fills the supplied command buffer, providing information 406 * on where it left off, so the client may call in again with additional 407 * buffers if the line will take several buffers to complete. 408 * 409 * A single command can transmit a maximum of 256 pixels, 410 * regardless of the compression ratio (protocol design limit). 411 * To the hardware, 0 for a size byte means 256 412 * 413 * Rather than 256 pixel commands which are either rl or raw encoded, 414 * the rlx command simply assumes alternating raw and rl spans within one cmd. 415 * This has a slightly larger header overhead, but produces more even results. 416 * It also processes all data (read and write) in a single pass. 417 * Performance benchmarks of common cases show it having just slightly better 418 * compression than 256 pixel raw or rle commands, with similar CPU consumpion. 419 * But for very rl friendly data, will compress not quite as well. 420 */ 421 static void dlfb_compress_hline( 422 const uint16_t **pixel_start_ptr, 423 const uint16_t *const pixel_end, 424 uint32_t *device_address_ptr, 425 uint8_t **command_buffer_ptr, 426 const uint8_t *const cmd_buffer_end) 427 { 428 const uint16_t *pixel = *pixel_start_ptr; 429 uint32_t dev_addr = *device_address_ptr; 430 uint8_t *cmd = *command_buffer_ptr; 431 const int bpp = 2; 432 433 while ((pixel_end > pixel) && 434 (cmd_buffer_end - MIN_RLX_CMD_BYTES > cmd)) { 435 uint8_t *raw_pixels_count_byte = NULL; 436 uint8_t *cmd_pixels_count_byte = NULL; 437 const uint16_t *raw_pixel_start = NULL; 438 const uint16_t *cmd_pixel_start, *cmd_pixel_end = NULL; 439 440 prefetchw((void *) cmd); /* pull in one cache line at least */ 441 442 *cmd++ = 0xAF; 443 *cmd++ = 0x6B; 444 *cmd++ = (uint8_t) ((dev_addr >> 16) & 0xFF); 445 *cmd++ = (uint8_t) ((dev_addr >> 8) & 0xFF); 446 *cmd++ = (uint8_t) ((dev_addr) & 0xFF); 447 448 cmd_pixels_count_byte = cmd++; /* we'll know this later */ 449 cmd_pixel_start = pixel; 450 451 raw_pixels_count_byte = cmd++; /* we'll know this later */ 452 raw_pixel_start = pixel; 453 454 cmd_pixel_end = pixel + min(MAX_CMD_PIXELS + 1, 455 min((int)(pixel_end - pixel), 456 (int)(cmd_buffer_end - cmd) / bpp)); 457 458 prefetch_range((void *) pixel, (cmd_pixel_end - pixel) * bpp); 459 460 while (pixel < cmd_pixel_end) { 461 const uint16_t * const repeating_pixel = pixel; 462 463 *(uint16_t *)cmd = cpu_to_be16p(pixel); 464 cmd += 2; 465 pixel++; 466 467 if (unlikely((pixel < cmd_pixel_end) && 468 (*pixel == *repeating_pixel))) { 469 /* go back and fill in raw pixel count */ 470 *raw_pixels_count_byte = ((repeating_pixel - 471 raw_pixel_start) + 1) & 0xFF; 472 473 while ((pixel < cmd_pixel_end) 474 && (*pixel == *repeating_pixel)) { 475 pixel++; 476 } 477 478 /* immediately after raw data is repeat byte */ 479 *cmd++ = ((pixel - repeating_pixel) - 1) & 0xFF; 480 481 /* Then start another raw pixel span */ 482 raw_pixel_start = pixel; 483 raw_pixels_count_byte = cmd++; 484 } 485 } 486 487 if (pixel > raw_pixel_start) { 488 /* finalize last RAW span */ 489 *raw_pixels_count_byte = (pixel-raw_pixel_start) & 0xFF; 490 } 491 492 *cmd_pixels_count_byte = (pixel - cmd_pixel_start) & 0xFF; 493 dev_addr += (pixel - cmd_pixel_start) * bpp; 494 } 495 496 if (cmd_buffer_end <= MIN_RLX_CMD_BYTES + cmd) { 497 /* Fill leftover bytes with no-ops */ 498 if (cmd_buffer_end > cmd) 499 memset(cmd, 0xAF, cmd_buffer_end - cmd); 500 cmd = (uint8_t *) cmd_buffer_end; 501 } 502 503 *command_buffer_ptr = cmd; 504 *pixel_start_ptr = pixel; 505 *device_address_ptr = dev_addr; 506 } 507 508 /* 509 * There are 3 copies of every pixel: The front buffer that the fbdev 510 * client renders to, the actual framebuffer across the USB bus in hardware 511 * (that we can only write to, slowly, and can never read), and (optionally) 512 * our shadow copy that tracks what's been sent to that hardware buffer. 513 */ 514 static int dlfb_render_hline(struct dlfb_data *dlfb, struct urb **urb_ptr, 515 const char *front, char **urb_buf_ptr, 516 u32 byte_offset, u32 byte_width, 517 int *ident_ptr, int *sent_ptr) 518 { 519 const u8 *line_start, *line_end, *next_pixel; 520 u32 dev_addr = dlfb->base16 + byte_offset; 521 struct urb *urb = *urb_ptr; 522 u8 *cmd = *urb_buf_ptr; 523 u8 *cmd_end = (u8 *) urb->transfer_buffer + urb->transfer_buffer_length; 524 525 line_start = (u8 *) (front + byte_offset); 526 next_pixel = line_start; 527 line_end = next_pixel + byte_width; 528 529 if (dlfb->backing_buffer) { 530 int offset; 531 const u8 *back_start = (u8 *) (dlfb->backing_buffer 532 + byte_offset); 533 534 *ident_ptr += dlfb_trim_hline(back_start, &next_pixel, 535 &byte_width); 536 537 offset = next_pixel - line_start; 538 line_end = next_pixel + byte_width; 539 dev_addr += offset; 540 back_start += offset; 541 line_start += offset; 542 543 memcpy((char *)back_start, (char *) line_start, 544 byte_width); 545 } 546 547 while (next_pixel < line_end) { 548 549 dlfb_compress_hline((const uint16_t **) &next_pixel, 550 (const uint16_t *) line_end, &dev_addr, 551 (u8 **) &cmd, (u8 *) cmd_end); 552 553 if (cmd >= cmd_end) { 554 int len = cmd - (u8 *) urb->transfer_buffer; 555 if (dlfb_submit_urb(dlfb, urb, len)) 556 return 1; /* lost pixels is set */ 557 *sent_ptr += len; 558 urb = dlfb_get_urb(dlfb); 559 if (!urb) 560 return 1; /* lost_pixels is set */ 561 *urb_ptr = urb; 562 cmd = urb->transfer_buffer; 563 cmd_end = &cmd[urb->transfer_buffer_length]; 564 } 565 } 566 567 *urb_buf_ptr = cmd; 568 569 return 0; 570 } 571 572 static int dlfb_handle_damage(struct dlfb_data *dlfb, int x, int y, 573 int width, int height, char *data) 574 { 575 int i, ret; 576 char *cmd; 577 cycles_t start_cycles, end_cycles; 578 int bytes_sent = 0; 579 int bytes_identical = 0; 580 struct urb *urb; 581 int aligned_x; 582 583 start_cycles = get_cycles(); 584 585 aligned_x = DL_ALIGN_DOWN(x, sizeof(unsigned long)); 586 width = DL_ALIGN_UP(width + (x-aligned_x), sizeof(unsigned long)); 587 x = aligned_x; 588 589 if ((width <= 0) || 590 (x + width > dlfb->info->var.xres) || 591 (y + height > dlfb->info->var.yres)) 592 return -EINVAL; 593 594 if (!atomic_read(&dlfb->usb_active)) 595 return 0; 596 597 urb = dlfb_get_urb(dlfb); 598 if (!urb) 599 return 0; 600 cmd = urb->transfer_buffer; 601 602 for (i = y; i < y + height ; i++) { 603 const int line_offset = dlfb->info->fix.line_length * i; 604 const int byte_offset = line_offset + (x * BPP); 605 606 if (dlfb_render_hline(dlfb, &urb, 607 (char *) dlfb->info->fix.smem_start, 608 &cmd, byte_offset, width * BPP, 609 &bytes_identical, &bytes_sent)) 610 goto error; 611 } 612 613 if (cmd > (char *) urb->transfer_buffer) { 614 /* Send partial buffer remaining before exiting */ 615 int len = cmd - (char *) urb->transfer_buffer; 616 ret = dlfb_submit_urb(dlfb, urb, len); 617 bytes_sent += len; 618 } else 619 dlfb_urb_completion(urb); 620 621 error: 622 atomic_add(bytes_sent, &dlfb->bytes_sent); 623 atomic_add(bytes_identical, &dlfb->bytes_identical); 624 atomic_add(width*height*2, &dlfb->bytes_rendered); 625 end_cycles = get_cycles(); 626 atomic_add(((unsigned int) ((end_cycles - start_cycles) 627 >> 10)), /* Kcycles */ 628 &dlfb->cpu_kcycles_used); 629 630 return 0; 631 } 632 633 /* 634 * Path triggered by usermode clients who write to filesystem 635 * e.g. cat filename > /dev/fb1 636 * Not used by X Windows or text-mode console. But useful for testing. 637 * Slow because of extra copy and we must assume all pixels dirty. 638 */ 639 static ssize_t dlfb_ops_write(struct fb_info *info, const char __user *buf, 640 size_t count, loff_t *ppos) 641 { 642 ssize_t result; 643 struct dlfb_data *dlfb = info->par; 644 u32 offset = (u32) *ppos; 645 646 result = fb_sys_write(info, buf, count, ppos); 647 648 if (result > 0) { 649 int start = max((int)(offset / info->fix.line_length), 0); 650 int lines = min((u32)((result / info->fix.line_length) + 1), 651 (u32)info->var.yres); 652 653 dlfb_handle_damage(dlfb, 0, start, info->var.xres, 654 lines, info->screen_base); 655 } 656 657 return result; 658 } 659 660 /* hardware has native COPY command (see libdlo), but not worth it for fbcon */ 661 static void dlfb_ops_copyarea(struct fb_info *info, 662 const struct fb_copyarea *area) 663 { 664 665 struct dlfb_data *dlfb = info->par; 666 667 sys_copyarea(info, area); 668 669 dlfb_handle_damage(dlfb, area->dx, area->dy, 670 area->width, area->height, info->screen_base); 671 } 672 673 static void dlfb_ops_imageblit(struct fb_info *info, 674 const struct fb_image *image) 675 { 676 struct dlfb_data *dlfb = info->par; 677 678 sys_imageblit(info, image); 679 680 dlfb_handle_damage(dlfb, image->dx, image->dy, 681 image->width, image->height, info->screen_base); 682 } 683 684 static void dlfb_ops_fillrect(struct fb_info *info, 685 const struct fb_fillrect *rect) 686 { 687 struct dlfb_data *dlfb = info->par; 688 689 sys_fillrect(info, rect); 690 691 dlfb_handle_damage(dlfb, rect->dx, rect->dy, rect->width, 692 rect->height, info->screen_base); 693 } 694 695 /* 696 * NOTE: fb_defio.c is holding info->fbdefio.mutex 697 * Touching ANY framebuffer memory that triggers a page fault 698 * in fb_defio will cause a deadlock, when it also tries to 699 * grab the same mutex. 700 */ 701 static void dlfb_dpy_deferred_io(struct fb_info *info, 702 struct list_head *pagelist) 703 { 704 struct page *cur; 705 struct fb_deferred_io *fbdefio = info->fbdefio; 706 struct dlfb_data *dlfb = info->par; 707 struct urb *urb; 708 char *cmd; 709 cycles_t start_cycles, end_cycles; 710 int bytes_sent = 0; 711 int bytes_identical = 0; 712 int bytes_rendered = 0; 713 714 if (!fb_defio) 715 return; 716 717 if (!atomic_read(&dlfb->usb_active)) 718 return; 719 720 start_cycles = get_cycles(); 721 722 urb = dlfb_get_urb(dlfb); 723 if (!urb) 724 return; 725 726 cmd = urb->transfer_buffer; 727 728 /* walk the written page list and render each to device */ 729 list_for_each_entry(cur, &fbdefio->pagelist, lru) { 730 731 if (dlfb_render_hline(dlfb, &urb, (char *) info->fix.smem_start, 732 &cmd, cur->index << PAGE_SHIFT, 733 PAGE_SIZE, &bytes_identical, &bytes_sent)) 734 goto error; 735 bytes_rendered += PAGE_SIZE; 736 } 737 738 if (cmd > (char *) urb->transfer_buffer) { 739 /* Send partial buffer remaining before exiting */ 740 int len = cmd - (char *) urb->transfer_buffer; 741 dlfb_submit_urb(dlfb, urb, len); 742 bytes_sent += len; 743 } else 744 dlfb_urb_completion(urb); 745 746 error: 747 atomic_add(bytes_sent, &dlfb->bytes_sent); 748 atomic_add(bytes_identical, &dlfb->bytes_identical); 749 atomic_add(bytes_rendered, &dlfb->bytes_rendered); 750 end_cycles = get_cycles(); 751 atomic_add(((unsigned int) ((end_cycles - start_cycles) 752 >> 10)), /* Kcycles */ 753 &dlfb->cpu_kcycles_used); 754 } 755 756 static int dlfb_get_edid(struct dlfb_data *dlfb, char *edid, int len) 757 { 758 int i, ret; 759 char *rbuf; 760 761 rbuf = kmalloc(2, GFP_KERNEL); 762 if (!rbuf) 763 return 0; 764 765 for (i = 0; i < len; i++) { 766 ret = usb_control_msg(dlfb->udev, 767 usb_rcvctrlpipe(dlfb->udev, 0), 0x02, 768 (0x80 | (0x02 << 5)), i << 8, 0xA1, 769 rbuf, 2, USB_CTRL_GET_TIMEOUT); 770 if (ret < 2) { 771 dev_err(&dlfb->udev->dev, 772 "Read EDID byte %d failed: %d\n", i, ret); 773 i--; 774 break; 775 } 776 edid[i] = rbuf[1]; 777 } 778 779 kfree(rbuf); 780 781 return i; 782 } 783 784 static int dlfb_ops_ioctl(struct fb_info *info, unsigned int cmd, 785 unsigned long arg) 786 { 787 788 struct dlfb_data *dlfb = info->par; 789 790 if (!atomic_read(&dlfb->usb_active)) 791 return 0; 792 793 /* TODO: Update X server to get this from sysfs instead */ 794 if (cmd == DLFB_IOCTL_RETURN_EDID) { 795 void __user *edid = (void __user *)arg; 796 if (copy_to_user(edid, dlfb->edid, dlfb->edid_size)) 797 return -EFAULT; 798 return 0; 799 } 800 801 /* TODO: Help propose a standard fb.h ioctl to report mmap damage */ 802 if (cmd == DLFB_IOCTL_REPORT_DAMAGE) { 803 struct dloarea area; 804 805 if (copy_from_user(&area, (void __user *)arg, 806 sizeof(struct dloarea))) 807 return -EFAULT; 808 809 /* 810 * If we have a damage-aware client, turn fb_defio "off" 811 * To avoid perf imact of unnecessary page fault handling. 812 * Done by resetting the delay for this fb_info to a very 813 * long period. Pages will become writable and stay that way. 814 * Reset to normal value when all clients have closed this fb. 815 */ 816 if (info->fbdefio) 817 info->fbdefio->delay = DL_DEFIO_WRITE_DISABLE; 818 819 if (area.x < 0) 820 area.x = 0; 821 822 if (area.x > info->var.xres) 823 area.x = info->var.xres; 824 825 if (area.y < 0) 826 area.y = 0; 827 828 if (area.y > info->var.yres) 829 area.y = info->var.yres; 830 831 dlfb_handle_damage(dlfb, area.x, area.y, area.w, area.h, 832 info->screen_base); 833 } 834 835 return 0; 836 } 837 838 /* taken from vesafb */ 839 static int 840 dlfb_ops_setcolreg(unsigned regno, unsigned red, unsigned green, 841 unsigned blue, unsigned transp, struct fb_info *info) 842 { 843 int err = 0; 844 845 if (regno >= info->cmap.len) 846 return 1; 847 848 if (regno < 16) { 849 if (info->var.red.offset == 10) { 850 /* 1:5:5:5 */ 851 ((u32 *) (info->pseudo_palette))[regno] = 852 ((red & 0xf800) >> 1) | 853 ((green & 0xf800) >> 6) | ((blue & 0xf800) >> 11); 854 } else { 855 /* 0:5:6:5 */ 856 ((u32 *) (info->pseudo_palette))[regno] = 857 ((red & 0xf800)) | 858 ((green & 0xfc00) >> 5) | ((blue & 0xf800) >> 11); 859 } 860 } 861 862 return err; 863 } 864 865 /* 866 * It's common for several clients to have framebuffer open simultaneously. 867 * e.g. both fbcon and X. Makes things interesting. 868 * Assumes caller is holding info->lock (for open and release at least) 869 */ 870 static int dlfb_ops_open(struct fb_info *info, int user) 871 { 872 struct dlfb_data *dlfb = info->par; 873 874 /* 875 * fbcon aggressively connects to first framebuffer it finds, 876 * preventing other clients (X) from working properly. Usually 877 * not what the user wants. Fail by default with option to enable. 878 */ 879 if ((user == 0) && (!console)) 880 return -EBUSY; 881 882 /* If the USB device is gone, we don't accept new opens */ 883 if (dlfb->virtualized) 884 return -ENODEV; 885 886 dlfb->fb_count++; 887 888 kref_get(&dlfb->kref); 889 890 if (fb_defio && (info->fbdefio == NULL)) { 891 /* enable defio at last moment if not disabled by client */ 892 893 struct fb_deferred_io *fbdefio; 894 895 fbdefio = kzalloc(sizeof(struct fb_deferred_io), GFP_KERNEL); 896 897 if (fbdefio) { 898 fbdefio->delay = DL_DEFIO_WRITE_DELAY; 899 fbdefio->deferred_io = dlfb_dpy_deferred_io; 900 } 901 902 info->fbdefio = fbdefio; 903 fb_deferred_io_init(info); 904 } 905 906 dev_dbg(info->dev, "open, user=%d fb_info=%p count=%d\n", 907 user, info, dlfb->fb_count); 908 909 return 0; 910 } 911 912 /* 913 * Called when all client interfaces to start transactions have been disabled, 914 * and all references to our device instance (dlfb_data) are released. 915 * Every transaction must have a reference, so we know are fully spun down 916 */ 917 static void dlfb_free(struct kref *kref) 918 { 919 struct dlfb_data *dlfb = container_of(kref, struct dlfb_data, kref); 920 921 vfree(dlfb->backing_buffer); 922 kfree(dlfb->edid); 923 kfree(dlfb); 924 } 925 926 static void dlfb_release_urb_work(struct work_struct *work) 927 { 928 struct urb_node *unode = container_of(work, struct urb_node, 929 release_urb_work.work); 930 931 up(&unode->dlfb->urbs.limit_sem); 932 } 933 934 static void dlfb_free_framebuffer(struct dlfb_data *dlfb) 935 { 936 struct fb_info *info = dlfb->info; 937 938 if (info) { 939 unregister_framebuffer(info); 940 941 if (info->cmap.len != 0) 942 fb_dealloc_cmap(&info->cmap); 943 if (info->monspecs.modedb) 944 fb_destroy_modedb(info->monspecs.modedb); 945 vfree(info->screen_base); 946 947 fb_destroy_modelist(&info->modelist); 948 949 dlfb->info = NULL; 950 951 /* Assume info structure is freed after this point */ 952 framebuffer_release(info); 953 } 954 955 /* ref taken in probe() as part of registering framebfufer */ 956 kref_put(&dlfb->kref, dlfb_free); 957 } 958 959 static void dlfb_free_framebuffer_work(struct work_struct *work) 960 { 961 struct dlfb_data *dlfb = container_of(work, struct dlfb_data, 962 free_framebuffer_work.work); 963 dlfb_free_framebuffer(dlfb); 964 } 965 /* 966 * Assumes caller is holding info->lock mutex (for open and release at least) 967 */ 968 static int dlfb_ops_release(struct fb_info *info, int user) 969 { 970 struct dlfb_data *dlfb = info->par; 971 972 dlfb->fb_count--; 973 974 /* We can't free fb_info here - fbmem will touch it when we return */ 975 if (dlfb->virtualized && (dlfb->fb_count == 0)) 976 schedule_delayed_work(&dlfb->free_framebuffer_work, HZ); 977 978 if ((dlfb->fb_count == 0) && (info->fbdefio)) { 979 fb_deferred_io_cleanup(info); 980 kfree(info->fbdefio); 981 info->fbdefio = NULL; 982 info->fbops->fb_mmap = dlfb_ops_mmap; 983 } 984 985 dev_dbg(info->dev, "release, user=%d count=%d\n", user, dlfb->fb_count); 986 987 kref_put(&dlfb->kref, dlfb_free); 988 989 return 0; 990 } 991 992 /* 993 * Check whether a video mode is supported by the DisplayLink chip 994 * We start from monitor's modes, so don't need to filter that here 995 */ 996 static int dlfb_is_valid_mode(struct fb_videomode *mode, struct dlfb_data *dlfb) 997 { 998 if (mode->xres * mode->yres > dlfb->sku_pixel_limit) 999 return 0; 1000 1001 return 1; 1002 } 1003 1004 static void dlfb_var_color_format(struct fb_var_screeninfo *var) 1005 { 1006 const struct fb_bitfield red = { 11, 5, 0 }; 1007 const struct fb_bitfield green = { 5, 6, 0 }; 1008 const struct fb_bitfield blue = { 0, 5, 0 }; 1009 1010 var->bits_per_pixel = 16; 1011 var->red = red; 1012 var->green = green; 1013 var->blue = blue; 1014 } 1015 1016 static int dlfb_ops_check_var(struct fb_var_screeninfo *var, 1017 struct fb_info *info) 1018 { 1019 struct fb_videomode mode; 1020 struct dlfb_data *dlfb = info->par; 1021 1022 /* TODO: support dynamically changing framebuffer size */ 1023 if ((var->xres * var->yres * 2) > info->fix.smem_len) 1024 return -EINVAL; 1025 1026 /* set device-specific elements of var unrelated to mode */ 1027 dlfb_var_color_format(var); 1028 1029 fb_var_to_videomode(&mode, var); 1030 1031 if (!dlfb_is_valid_mode(&mode, dlfb)) 1032 return -EINVAL; 1033 1034 return 0; 1035 } 1036 1037 static int dlfb_ops_set_par(struct fb_info *info) 1038 { 1039 struct dlfb_data *dlfb = info->par; 1040 int result; 1041 u16 *pix_framebuffer; 1042 int i; 1043 1044 result = dlfb_set_video_mode(dlfb, &info->var); 1045 1046 if ((result == 0) && (dlfb->fb_count == 0)) { 1047 1048 /* paint greenscreen */ 1049 1050 pix_framebuffer = (u16 *) info->screen_base; 1051 for (i = 0; i < info->fix.smem_len / 2; i++) 1052 pix_framebuffer[i] = 0x37e6; 1053 1054 dlfb_handle_damage(dlfb, 0, 0, info->var.xres, info->var.yres, 1055 info->screen_base); 1056 } 1057 1058 return result; 1059 } 1060 1061 /* To fonzi the jukebox (e.g. make blanking changes take effect) */ 1062 static char *dlfb_dummy_render(char *buf) 1063 { 1064 *buf++ = 0xAF; 1065 *buf++ = 0x6A; /* copy */ 1066 *buf++ = 0x00; /* from address*/ 1067 *buf++ = 0x00; 1068 *buf++ = 0x00; 1069 *buf++ = 0x01; /* one pixel */ 1070 *buf++ = 0x00; /* to address */ 1071 *buf++ = 0x00; 1072 *buf++ = 0x00; 1073 return buf; 1074 } 1075 1076 /* 1077 * In order to come back from full DPMS off, we need to set the mode again 1078 */ 1079 static int dlfb_ops_blank(int blank_mode, struct fb_info *info) 1080 { 1081 struct dlfb_data *dlfb = info->par; 1082 char *bufptr; 1083 struct urb *urb; 1084 1085 dev_dbg(info->dev, "blank, mode %d --> %d\n", 1086 dlfb->blank_mode, blank_mode); 1087 1088 if ((dlfb->blank_mode == FB_BLANK_POWERDOWN) && 1089 (blank_mode != FB_BLANK_POWERDOWN)) { 1090 1091 /* returning from powerdown requires a fresh modeset */ 1092 dlfb_set_video_mode(dlfb, &info->var); 1093 } 1094 1095 urb = dlfb_get_urb(dlfb); 1096 if (!urb) 1097 return 0; 1098 1099 bufptr = (char *) urb->transfer_buffer; 1100 bufptr = dlfb_vidreg_lock(bufptr); 1101 bufptr = dlfb_blanking(bufptr, blank_mode); 1102 bufptr = dlfb_vidreg_unlock(bufptr); 1103 1104 /* seems like a render op is needed to have blank change take effect */ 1105 bufptr = dlfb_dummy_render(bufptr); 1106 1107 dlfb_submit_urb(dlfb, urb, bufptr - 1108 (char *) urb->transfer_buffer); 1109 1110 dlfb->blank_mode = blank_mode; 1111 1112 return 0; 1113 } 1114 1115 static struct fb_ops dlfb_ops = { 1116 .owner = THIS_MODULE, 1117 .fb_read = fb_sys_read, 1118 .fb_write = dlfb_ops_write, 1119 .fb_setcolreg = dlfb_ops_setcolreg, 1120 .fb_fillrect = dlfb_ops_fillrect, 1121 .fb_copyarea = dlfb_ops_copyarea, 1122 .fb_imageblit = dlfb_ops_imageblit, 1123 .fb_mmap = dlfb_ops_mmap, 1124 .fb_ioctl = dlfb_ops_ioctl, 1125 .fb_open = dlfb_ops_open, 1126 .fb_release = dlfb_ops_release, 1127 .fb_blank = dlfb_ops_blank, 1128 .fb_check_var = dlfb_ops_check_var, 1129 .fb_set_par = dlfb_ops_set_par, 1130 }; 1131 1132 1133 /* 1134 * Assumes &info->lock held by caller 1135 * Assumes no active clients have framebuffer open 1136 */ 1137 static int dlfb_realloc_framebuffer(struct dlfb_data *dlfb, struct fb_info *info) 1138 { 1139 int retval = -ENOMEM; 1140 int old_len = info->fix.smem_len; 1141 int new_len; 1142 unsigned char *old_fb = info->screen_base; 1143 unsigned char *new_fb; 1144 unsigned char *new_back = NULL; 1145 1146 new_len = info->fix.line_length * info->var.yres; 1147 1148 if (PAGE_ALIGN(new_len) > old_len) { 1149 /* 1150 * Alloc system memory for virtual framebuffer 1151 */ 1152 new_fb = vmalloc(new_len); 1153 if (!new_fb) { 1154 dev_err(info->dev, "Virtual framebuffer alloc failed\n"); 1155 goto error; 1156 } 1157 1158 if (info->screen_base) { 1159 memcpy(new_fb, old_fb, old_len); 1160 vfree(info->screen_base); 1161 } 1162 1163 info->screen_base = new_fb; 1164 info->fix.smem_len = PAGE_ALIGN(new_len); 1165 info->fix.smem_start = (unsigned long) new_fb; 1166 info->flags = udlfb_info_flags; 1167 1168 /* 1169 * Second framebuffer copy to mirror the framebuffer state 1170 * on the physical USB device. We can function without this. 1171 * But with imperfect damage info we may send pixels over USB 1172 * that were, in fact, unchanged - wasting limited USB bandwidth 1173 */ 1174 if (shadow) 1175 new_back = vzalloc(new_len); 1176 if (!new_back) 1177 dev_info(info->dev, 1178 "No shadow/backing buffer allocated\n"); 1179 else { 1180 vfree(dlfb->backing_buffer); 1181 dlfb->backing_buffer = new_back; 1182 } 1183 } 1184 1185 retval = 0; 1186 1187 error: 1188 return retval; 1189 } 1190 1191 /* 1192 * 1) Get EDID from hw, or use sw default 1193 * 2) Parse into various fb_info structs 1194 * 3) Allocate virtual framebuffer memory to back highest res mode 1195 * 1196 * Parses EDID into three places used by various parts of fbdev: 1197 * fb_var_screeninfo contains the timing of the monitor's preferred mode 1198 * fb_info.monspecs is full parsed EDID info, including monspecs.modedb 1199 * fb_info.modelist is a linked list of all monitor & VESA modes which work 1200 * 1201 * If EDID is not readable/valid, then modelist is all VESA modes, 1202 * monspecs is NULL, and fb_var_screeninfo is set to safe VESA mode 1203 * Returns 0 if successful 1204 */ 1205 static int dlfb_setup_modes(struct dlfb_data *dlfb, 1206 struct fb_info *info, 1207 char *default_edid, size_t default_edid_size) 1208 { 1209 char *edid; 1210 int i, result = 0, tries = 3; 1211 struct device *dev = info->device; 1212 struct fb_videomode *mode; 1213 const struct fb_videomode *default_vmode = NULL; 1214 1215 if (info->dev) { 1216 /* only use mutex if info has been registered */ 1217 mutex_lock(&info->lock); 1218 /* parent device is used otherwise */ 1219 dev = info->dev; 1220 } 1221 1222 edid = kmalloc(EDID_LENGTH, GFP_KERNEL); 1223 if (!edid) { 1224 result = -ENOMEM; 1225 goto error; 1226 } 1227 1228 fb_destroy_modelist(&info->modelist); 1229 memset(&info->monspecs, 0, sizeof(info->monspecs)); 1230 1231 /* 1232 * Try to (re)read EDID from hardware first 1233 * EDID data may return, but not parse as valid 1234 * Try again a few times, in case of e.g. analog cable noise 1235 */ 1236 while (tries--) { 1237 1238 i = dlfb_get_edid(dlfb, edid, EDID_LENGTH); 1239 1240 if (i >= EDID_LENGTH) 1241 fb_edid_to_monspecs(edid, &info->monspecs); 1242 1243 if (info->monspecs.modedb_len > 0) { 1244 dlfb->edid = edid; 1245 dlfb->edid_size = i; 1246 break; 1247 } 1248 } 1249 1250 /* If that fails, use a previously returned EDID if available */ 1251 if (info->monspecs.modedb_len == 0) { 1252 dev_err(dev, "Unable to get valid EDID from device/display\n"); 1253 1254 if (dlfb->edid) { 1255 fb_edid_to_monspecs(dlfb->edid, &info->monspecs); 1256 if (info->monspecs.modedb_len > 0) 1257 dev_err(dev, "Using previously queried EDID\n"); 1258 } 1259 } 1260 1261 /* If that fails, use the default EDID we were handed */ 1262 if (info->monspecs.modedb_len == 0) { 1263 if (default_edid_size >= EDID_LENGTH) { 1264 fb_edid_to_monspecs(default_edid, &info->monspecs); 1265 if (info->monspecs.modedb_len > 0) { 1266 memcpy(edid, default_edid, default_edid_size); 1267 dlfb->edid = edid; 1268 dlfb->edid_size = default_edid_size; 1269 dev_err(dev, "Using default/backup EDID\n"); 1270 } 1271 } 1272 } 1273 1274 /* If we've got modes, let's pick a best default mode */ 1275 if (info->monspecs.modedb_len > 0) { 1276 1277 for (i = 0; i < info->monspecs.modedb_len; i++) { 1278 mode = &info->monspecs.modedb[i]; 1279 if (dlfb_is_valid_mode(mode, dlfb)) { 1280 fb_add_videomode(mode, &info->modelist); 1281 } else { 1282 dev_dbg(dev, "Specified mode %dx%d too big\n", 1283 mode->xres, mode->yres); 1284 if (i == 0) 1285 /* if we've removed top/best mode */ 1286 info->monspecs.misc 1287 &= ~FB_MISC_1ST_DETAIL; 1288 } 1289 } 1290 1291 default_vmode = fb_find_best_display(&info->monspecs, 1292 &info->modelist); 1293 } 1294 1295 /* If everything else has failed, fall back to safe default mode */ 1296 if (default_vmode == NULL) { 1297 1298 struct fb_videomode fb_vmode = {0}; 1299 1300 /* 1301 * Add the standard VESA modes to our modelist 1302 * Since we don't have EDID, there may be modes that 1303 * overspec monitor and/or are incorrect aspect ratio, etc. 1304 * But at least the user has a chance to choose 1305 */ 1306 for (i = 0; i < VESA_MODEDB_SIZE; i++) { 1307 mode = (struct fb_videomode *)&vesa_modes[i]; 1308 if (dlfb_is_valid_mode(mode, dlfb)) 1309 fb_add_videomode(mode, &info->modelist); 1310 else 1311 dev_dbg(dev, "VESA mode %dx%d too big\n", 1312 mode->xres, mode->yres); 1313 } 1314 1315 /* 1316 * default to resolution safe for projectors 1317 * (since they are most common case without EDID) 1318 */ 1319 fb_vmode.xres = 800; 1320 fb_vmode.yres = 600; 1321 fb_vmode.refresh = 60; 1322 default_vmode = fb_find_nearest_mode(&fb_vmode, 1323 &info->modelist); 1324 } 1325 1326 /* If we have good mode and no active clients*/ 1327 if ((default_vmode != NULL) && (dlfb->fb_count == 0)) { 1328 1329 fb_videomode_to_var(&info->var, default_vmode); 1330 dlfb_var_color_format(&info->var); 1331 1332 /* 1333 * with mode size info, we can now alloc our framebuffer. 1334 */ 1335 memcpy(&info->fix, &dlfb_fix, sizeof(dlfb_fix)); 1336 info->fix.line_length = info->var.xres * 1337 (info->var.bits_per_pixel / 8); 1338 1339 result = dlfb_realloc_framebuffer(dlfb, info); 1340 1341 } else 1342 result = -EINVAL; 1343 1344 error: 1345 if (edid && (dlfb->edid != edid)) 1346 kfree(edid); 1347 1348 if (info->dev) 1349 mutex_unlock(&info->lock); 1350 1351 return result; 1352 } 1353 1354 static ssize_t metrics_bytes_rendered_show(struct device *fbdev, 1355 struct device_attribute *a, char *buf) { 1356 struct fb_info *fb_info = dev_get_drvdata(fbdev); 1357 struct dlfb_data *dlfb = fb_info->par; 1358 return snprintf(buf, PAGE_SIZE, "%u\n", 1359 atomic_read(&dlfb->bytes_rendered)); 1360 } 1361 1362 static ssize_t metrics_bytes_identical_show(struct device *fbdev, 1363 struct device_attribute *a, char *buf) { 1364 struct fb_info *fb_info = dev_get_drvdata(fbdev); 1365 struct dlfb_data *dlfb = fb_info->par; 1366 return snprintf(buf, PAGE_SIZE, "%u\n", 1367 atomic_read(&dlfb->bytes_identical)); 1368 } 1369 1370 static ssize_t metrics_bytes_sent_show(struct device *fbdev, 1371 struct device_attribute *a, char *buf) { 1372 struct fb_info *fb_info = dev_get_drvdata(fbdev); 1373 struct dlfb_data *dlfb = fb_info->par; 1374 return snprintf(buf, PAGE_SIZE, "%u\n", 1375 atomic_read(&dlfb->bytes_sent)); 1376 } 1377 1378 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev, 1379 struct device_attribute *a, char *buf) { 1380 struct fb_info *fb_info = dev_get_drvdata(fbdev); 1381 struct dlfb_data *dlfb = fb_info->par; 1382 return snprintf(buf, PAGE_SIZE, "%u\n", 1383 atomic_read(&dlfb->cpu_kcycles_used)); 1384 } 1385 1386 static ssize_t edid_show( 1387 struct file *filp, 1388 struct kobject *kobj, struct bin_attribute *a, 1389 char *buf, loff_t off, size_t count) { 1390 struct device *fbdev = container_of(kobj, struct device, kobj); 1391 struct fb_info *fb_info = dev_get_drvdata(fbdev); 1392 struct dlfb_data *dlfb = fb_info->par; 1393 1394 if (dlfb->edid == NULL) 1395 return 0; 1396 1397 if ((off >= dlfb->edid_size) || (count > dlfb->edid_size)) 1398 return 0; 1399 1400 if (off + count > dlfb->edid_size) 1401 count = dlfb->edid_size - off; 1402 1403 memcpy(buf, dlfb->edid, count); 1404 1405 return count; 1406 } 1407 1408 static ssize_t edid_store( 1409 struct file *filp, 1410 struct kobject *kobj, struct bin_attribute *a, 1411 char *src, loff_t src_off, size_t src_size) { 1412 struct device *fbdev = container_of(kobj, struct device, kobj); 1413 struct fb_info *fb_info = dev_get_drvdata(fbdev); 1414 struct dlfb_data *dlfb = fb_info->par; 1415 int ret; 1416 1417 /* We only support write of entire EDID at once, no offset*/ 1418 if ((src_size != EDID_LENGTH) || (src_off != 0)) 1419 return -EINVAL; 1420 1421 ret = dlfb_setup_modes(dlfb, fb_info, src, src_size); 1422 if (ret) 1423 return ret; 1424 1425 if (!dlfb->edid || memcmp(src, dlfb->edid, src_size)) 1426 return -EINVAL; 1427 1428 dlfb_ops_set_par(fb_info); 1429 return src_size; 1430 } 1431 1432 static ssize_t metrics_reset_store(struct device *fbdev, 1433 struct device_attribute *attr, 1434 const char *buf, size_t count) 1435 { 1436 struct fb_info *fb_info = dev_get_drvdata(fbdev); 1437 struct dlfb_data *dlfb = fb_info->par; 1438 1439 atomic_set(&dlfb->bytes_rendered, 0); 1440 atomic_set(&dlfb->bytes_identical, 0); 1441 atomic_set(&dlfb->bytes_sent, 0); 1442 atomic_set(&dlfb->cpu_kcycles_used, 0); 1443 1444 return count; 1445 } 1446 1447 static const struct bin_attribute edid_attr = { 1448 .attr.name = "edid", 1449 .attr.mode = 0666, 1450 .size = EDID_LENGTH, 1451 .read = edid_show, 1452 .write = edid_store 1453 }; 1454 1455 static const struct device_attribute fb_device_attrs[] = { 1456 __ATTR_RO(metrics_bytes_rendered), 1457 __ATTR_RO(metrics_bytes_identical), 1458 __ATTR_RO(metrics_bytes_sent), 1459 __ATTR_RO(metrics_cpu_kcycles_used), 1460 __ATTR(metrics_reset, S_IWUSR, NULL, metrics_reset_store), 1461 }; 1462 1463 /* 1464 * This is necessary before we can communicate with the display controller. 1465 */ 1466 static int dlfb_select_std_channel(struct dlfb_data *dlfb) 1467 { 1468 int ret; 1469 void *buf; 1470 static const u8 set_def_chn[] = { 1471 0x57, 0xCD, 0xDC, 0xA7, 1472 0x1C, 0x88, 0x5E, 0x15, 1473 0x60, 0xFE, 0xC6, 0x97, 1474 0x16, 0x3D, 0x47, 0xF2 }; 1475 1476 buf = kmemdup(set_def_chn, sizeof(set_def_chn), GFP_KERNEL); 1477 1478 if (!buf) 1479 return -ENOMEM; 1480 1481 ret = usb_control_msg(dlfb->udev, usb_sndctrlpipe(dlfb->udev, 0), 1482 NR_USB_REQUEST_CHANNEL, 1483 (USB_DIR_OUT | USB_TYPE_VENDOR), 0, 0, 1484 buf, sizeof(set_def_chn), USB_CTRL_SET_TIMEOUT); 1485 1486 kfree(buf); 1487 1488 return ret; 1489 } 1490 1491 static int dlfb_parse_vendor_descriptor(struct dlfb_data *dlfb, 1492 struct usb_interface *intf) 1493 { 1494 char *desc; 1495 char *buf; 1496 char *desc_end; 1497 int total_len; 1498 1499 buf = kzalloc(MAX_VENDOR_DESCRIPTOR_SIZE, GFP_KERNEL); 1500 if (!buf) 1501 return false; 1502 desc = buf; 1503 1504 total_len = usb_get_descriptor(interface_to_usbdev(intf), 1505 0x5f, /* vendor specific */ 1506 0, desc, MAX_VENDOR_DESCRIPTOR_SIZE); 1507 1508 /* if not found, look in configuration descriptor */ 1509 if (total_len < 0) { 1510 if (0 == usb_get_extra_descriptor(intf->cur_altsetting, 1511 0x5f, &desc)) 1512 total_len = (int) desc[0]; 1513 } 1514 1515 if (total_len > 5) { 1516 dev_info(&intf->dev, 1517 "vendor descriptor length: %d data: %11ph\n", 1518 total_len, desc); 1519 1520 if ((desc[0] != total_len) || /* descriptor length */ 1521 (desc[1] != 0x5f) || /* vendor descriptor type */ 1522 (desc[2] != 0x01) || /* version (2 bytes) */ 1523 (desc[3] != 0x00) || 1524 (desc[4] != total_len - 2)) /* length after type */ 1525 goto unrecognized; 1526 1527 desc_end = desc + total_len; 1528 desc += 5; /* the fixed header we've already parsed */ 1529 1530 while (desc < desc_end) { 1531 u8 length; 1532 u16 key; 1533 1534 key = le16_to_cpu(*((u16 *) desc)); 1535 desc += sizeof(u16); 1536 length = *desc; 1537 desc++; 1538 1539 switch (key) { 1540 case 0x0200: { /* max_area */ 1541 u32 max_area; 1542 max_area = le32_to_cpu(*((u32 *)desc)); 1543 dev_warn(&intf->dev, 1544 "DL chip limited to %d pixel modes\n", 1545 max_area); 1546 dlfb->sku_pixel_limit = max_area; 1547 break; 1548 } 1549 default: 1550 break; 1551 } 1552 desc += length; 1553 } 1554 } else { 1555 dev_info(&intf->dev, "vendor descriptor not available (%d)\n", 1556 total_len); 1557 } 1558 1559 goto success; 1560 1561 unrecognized: 1562 /* allow udlfb to load for now even if firmware unrecognized */ 1563 dev_err(&intf->dev, "Unrecognized vendor firmware descriptor\n"); 1564 1565 success: 1566 kfree(buf); 1567 return true; 1568 } 1569 1570 static void dlfb_init_framebuffer_work(struct work_struct *work); 1571 1572 static int dlfb_usb_probe(struct usb_interface *intf, 1573 const struct usb_device_id *id) 1574 { 1575 struct dlfb_data *dlfb; 1576 int retval = -ENOMEM; 1577 struct usb_device *usbdev = interface_to_usbdev(intf); 1578 1579 /* usb initialization */ 1580 dlfb = kzalloc(sizeof(*dlfb), GFP_KERNEL); 1581 if (!dlfb) { 1582 dev_err(&intf->dev, "%s: failed to allocate dlfb\n", __func__); 1583 goto error; 1584 } 1585 1586 kref_init(&dlfb->kref); /* matching kref_put in usb .disconnect fn */ 1587 1588 dlfb->udev = usbdev; 1589 usb_set_intfdata(intf, dlfb); 1590 1591 dev_dbg(&intf->dev, "console enable=%d\n", console); 1592 dev_dbg(&intf->dev, "fb_defio enable=%d\n", fb_defio); 1593 dev_dbg(&intf->dev, "shadow enable=%d\n", shadow); 1594 1595 dlfb->sku_pixel_limit = 2048 * 1152; /* default to maximum */ 1596 1597 if (!dlfb_parse_vendor_descriptor(dlfb, intf)) { 1598 dev_err(&intf->dev, 1599 "firmware not recognized, incompatible device?\n"); 1600 goto error; 1601 } 1602 1603 if (pixel_limit) { 1604 dev_warn(&intf->dev, 1605 "DL chip limit of %d overridden to %d\n", 1606 dlfb->sku_pixel_limit, pixel_limit); 1607 dlfb->sku_pixel_limit = pixel_limit; 1608 } 1609 1610 1611 if (!dlfb_alloc_urb_list(dlfb, WRITES_IN_FLIGHT, MAX_TRANSFER)) { 1612 retval = -ENOMEM; 1613 dev_err(&intf->dev, "unable to allocate urb list\n"); 1614 goto error; 1615 } 1616 1617 kref_get(&dlfb->kref); /* matching kref_put in free_framebuffer_work */ 1618 1619 /* We don't register a new USB class. Our client interface is dlfbev */ 1620 1621 /* Workitem keep things fast & simple during USB enumeration */ 1622 INIT_DELAYED_WORK(&dlfb->init_framebuffer_work, 1623 dlfb_init_framebuffer_work); 1624 schedule_delayed_work(&dlfb->init_framebuffer_work, 0); 1625 1626 return 0; 1627 1628 error: 1629 if (dlfb) { 1630 1631 kref_put(&dlfb->kref, dlfb_free); /* last ref from kref_init */ 1632 1633 /* dev has been deallocated. Do not dereference */ 1634 } 1635 1636 return retval; 1637 } 1638 1639 static void dlfb_init_framebuffer_work(struct work_struct *work) 1640 { 1641 int i, retval; 1642 struct fb_info *info; 1643 const struct device_attribute *attr; 1644 struct dlfb_data *dlfb = container_of(work, struct dlfb_data, 1645 init_framebuffer_work.work); 1646 1647 /* allocates framebuffer driver structure, not framebuffer memory */ 1648 info = framebuffer_alloc(0, &dlfb->udev->dev); 1649 if (!info) { 1650 dev_err(&dlfb->udev->dev, "framebuffer_alloc failed\n"); 1651 goto error; 1652 } 1653 1654 dlfb->info = info; 1655 info->par = dlfb; 1656 info->pseudo_palette = dlfb->pseudo_palette; 1657 info->fbops = &dlfb_ops; 1658 1659 retval = fb_alloc_cmap(&info->cmap, 256, 0); 1660 if (retval < 0) { 1661 dev_err(info->device, "cmap allocation failed: %d\n", retval); 1662 goto error; 1663 } 1664 1665 INIT_DELAYED_WORK(&dlfb->free_framebuffer_work, 1666 dlfb_free_framebuffer_work); 1667 1668 INIT_LIST_HEAD(&info->modelist); 1669 1670 retval = dlfb_setup_modes(dlfb, info, NULL, 0); 1671 if (retval != 0) { 1672 dev_err(info->device, 1673 "unable to find common mode for display and adapter\n"); 1674 goto error; 1675 } 1676 1677 /* ready to begin using device */ 1678 1679 atomic_set(&dlfb->usb_active, 1); 1680 dlfb_select_std_channel(dlfb); 1681 1682 dlfb_ops_check_var(&info->var, info); 1683 dlfb_ops_set_par(info); 1684 1685 retval = register_framebuffer(info); 1686 if (retval < 0) { 1687 dev_err(info->device, "unable to register framebuffer: %d\n", 1688 retval); 1689 goto error; 1690 } 1691 1692 for (i = 0; i < ARRAY_SIZE(fb_device_attrs); i++) { 1693 attr = &fb_device_attrs[i]; 1694 retval = device_create_file(info->dev, attr); 1695 if (retval) 1696 dev_warn(info->device, 1697 "failed to create '%s' attribute: %d\n", 1698 attr->attr.name, retval); 1699 } 1700 1701 retval = device_create_bin_file(info->dev, &edid_attr); 1702 if (retval) 1703 dev_warn(info->device, "failed to create '%s' attribute: %d\n", 1704 edid_attr.attr.name, retval); 1705 1706 dev_info(info->device, 1707 "%s is DisplayLink USB device (%dx%d, %dK framebuffer memory)\n", 1708 dev_name(info->dev), info->var.xres, info->var.yres, 1709 ((dlfb->backing_buffer) ? 1710 info->fix.smem_len * 2 : info->fix.smem_len) >> 10); 1711 return; 1712 1713 error: 1714 dlfb_free_framebuffer(dlfb); 1715 } 1716 1717 static void dlfb_usb_disconnect(struct usb_interface *intf) 1718 { 1719 struct dlfb_data *dlfb; 1720 struct fb_info *info; 1721 int i; 1722 1723 dlfb = usb_get_intfdata(intf); 1724 info = dlfb->info; 1725 1726 dev_dbg(&intf->dev, "USB disconnect starting\n"); 1727 1728 /* we virtualize until all fb clients release. Then we free */ 1729 dlfb->virtualized = true; 1730 1731 /* When non-active we'll update virtual framebuffer, but no new urbs */ 1732 atomic_set(&dlfb->usb_active, 0); 1733 1734 /* this function will wait for all in-flight urbs to complete */ 1735 dlfb_free_urb_list(dlfb); 1736 1737 if (info) { 1738 /* remove udlfb's sysfs interfaces */ 1739 for (i = 0; i < ARRAY_SIZE(fb_device_attrs); i++) 1740 device_remove_file(info->dev, &fb_device_attrs[i]); 1741 device_remove_bin_file(info->dev, &edid_attr); 1742 unlink_framebuffer(info); 1743 } 1744 1745 usb_set_intfdata(intf, NULL); 1746 dlfb->udev = NULL; 1747 1748 /* if clients still have us open, will be freed on last close */ 1749 if (dlfb->fb_count == 0) 1750 schedule_delayed_work(&dlfb->free_framebuffer_work, 0); 1751 1752 /* release reference taken by kref_init in probe() */ 1753 kref_put(&dlfb->kref, dlfb_free); 1754 1755 /* consider dlfb_data freed */ 1756 } 1757 1758 static struct usb_driver dlfb_driver = { 1759 .name = "udlfb", 1760 .probe = dlfb_usb_probe, 1761 .disconnect = dlfb_usb_disconnect, 1762 .id_table = id_table, 1763 }; 1764 1765 module_usb_driver(dlfb_driver); 1766 1767 static void dlfb_urb_completion(struct urb *urb) 1768 { 1769 struct urb_node *unode = urb->context; 1770 struct dlfb_data *dlfb = unode->dlfb; 1771 unsigned long flags; 1772 1773 switch (urb->status) { 1774 case 0: 1775 /* success */ 1776 break; 1777 case -ECONNRESET: 1778 case -ENOENT: 1779 case -ESHUTDOWN: 1780 /* sync/async unlink faults aren't errors */ 1781 break; 1782 default: 1783 dev_err(&dlfb->udev->dev, 1784 "%s - nonzero write bulk status received: %d\n", 1785 __func__, urb->status); 1786 atomic_set(&dlfb->lost_pixels, 1); 1787 break; 1788 } 1789 1790 urb->transfer_buffer_length = dlfb->urbs.size; /* reset to actual */ 1791 1792 spin_lock_irqsave(&dlfb->urbs.lock, flags); 1793 list_add_tail(&unode->entry, &dlfb->urbs.list); 1794 dlfb->urbs.available++; 1795 spin_unlock_irqrestore(&dlfb->urbs.lock, flags); 1796 1797 /* 1798 * When using fb_defio, we deadlock if up() is called 1799 * while another is waiting. So queue to another process. 1800 */ 1801 if (fb_defio) 1802 schedule_delayed_work(&unode->release_urb_work, 0); 1803 else 1804 up(&dlfb->urbs.limit_sem); 1805 } 1806 1807 static void dlfb_free_urb_list(struct dlfb_data *dlfb) 1808 { 1809 int count = dlfb->urbs.count; 1810 struct list_head *node; 1811 struct urb_node *unode; 1812 struct urb *urb; 1813 int ret; 1814 unsigned long flags; 1815 1816 /* keep waiting and freeing, until we've got 'em all */ 1817 while (count--) { 1818 1819 /* Getting interrupted means a leak, but ok at disconnect */ 1820 ret = down_interruptible(&dlfb->urbs.limit_sem); 1821 if (ret) 1822 break; 1823 1824 spin_lock_irqsave(&dlfb->urbs.lock, flags); 1825 1826 node = dlfb->urbs.list.next; /* have reserved one with sem */ 1827 list_del_init(node); 1828 1829 spin_unlock_irqrestore(&dlfb->urbs.lock, flags); 1830 1831 unode = list_entry(node, struct urb_node, entry); 1832 urb = unode->urb; 1833 1834 /* Free each separately allocated piece */ 1835 usb_free_coherent(urb->dev, dlfb->urbs.size, 1836 urb->transfer_buffer, urb->transfer_dma); 1837 usb_free_urb(urb); 1838 kfree(node); 1839 } 1840 1841 dlfb->urbs.count = 0; 1842 } 1843 1844 static int dlfb_alloc_urb_list(struct dlfb_data *dlfb, int count, size_t size) 1845 { 1846 int i = 0; 1847 struct urb *urb; 1848 struct urb_node *unode; 1849 char *buf; 1850 1851 spin_lock_init(&dlfb->urbs.lock); 1852 1853 dlfb->urbs.size = size; 1854 INIT_LIST_HEAD(&dlfb->urbs.list); 1855 1856 while (i < count) { 1857 unode = kzalloc(sizeof(*unode), GFP_KERNEL); 1858 if (!unode) 1859 break; 1860 unode->dlfb = dlfb; 1861 1862 INIT_DELAYED_WORK(&unode->release_urb_work, 1863 dlfb_release_urb_work); 1864 1865 urb = usb_alloc_urb(0, GFP_KERNEL); 1866 if (!urb) { 1867 kfree(unode); 1868 break; 1869 } 1870 unode->urb = urb; 1871 1872 buf = usb_alloc_coherent(dlfb->udev, MAX_TRANSFER, GFP_KERNEL, 1873 &urb->transfer_dma); 1874 if (!buf) { 1875 kfree(unode); 1876 usb_free_urb(urb); 1877 break; 1878 } 1879 1880 /* urb->transfer_buffer_length set to actual before submit */ 1881 usb_fill_bulk_urb(urb, dlfb->udev, usb_sndbulkpipe(dlfb->udev, 1), 1882 buf, size, dlfb_urb_completion, unode); 1883 urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; 1884 1885 list_add_tail(&unode->entry, &dlfb->urbs.list); 1886 1887 i++; 1888 } 1889 1890 sema_init(&dlfb->urbs.limit_sem, i); 1891 dlfb->urbs.count = i; 1892 dlfb->urbs.available = i; 1893 1894 return i; 1895 } 1896 1897 static struct urb *dlfb_get_urb(struct dlfb_data *dlfb) 1898 { 1899 int ret; 1900 struct list_head *entry; 1901 struct urb_node *unode; 1902 unsigned long flags; 1903 1904 /* Wait for an in-flight buffer to complete and get re-queued */ 1905 ret = down_timeout(&dlfb->urbs.limit_sem, GET_URB_TIMEOUT); 1906 if (ret) { 1907 atomic_set(&dlfb->lost_pixels, 1); 1908 dev_warn(&dlfb->udev->dev, 1909 "wait for urb interrupted: %d available: %d\n", 1910 ret, dlfb->urbs.available); 1911 return NULL; 1912 } 1913 1914 spin_lock_irqsave(&dlfb->urbs.lock, flags); 1915 1916 BUG_ON(list_empty(&dlfb->urbs.list)); /* reserved one with limit_sem */ 1917 entry = dlfb->urbs.list.next; 1918 list_del_init(entry); 1919 dlfb->urbs.available--; 1920 1921 spin_unlock_irqrestore(&dlfb->urbs.lock, flags); 1922 1923 unode = list_entry(entry, struct urb_node, entry); 1924 return unode->urb; 1925 } 1926 1927 static int dlfb_submit_urb(struct dlfb_data *dlfb, struct urb *urb, size_t len) 1928 { 1929 int ret; 1930 1931 BUG_ON(len > dlfb->urbs.size); 1932 1933 urb->transfer_buffer_length = len; /* set to actual payload len */ 1934 ret = usb_submit_urb(urb, GFP_KERNEL); 1935 if (ret) { 1936 dlfb_urb_completion(urb); /* because no one else will */ 1937 atomic_set(&dlfb->lost_pixels, 1); 1938 dev_err(&dlfb->udev->dev, "submit urb error: %d\n", ret); 1939 } 1940 return ret; 1941 } 1942 1943 module_param(console, bool, S_IWUSR | S_IRUSR | S_IWGRP | S_IRGRP); 1944 MODULE_PARM_DESC(console, "Allow fbcon to open framebuffer"); 1945 1946 module_param(fb_defio, bool, S_IWUSR | S_IRUSR | S_IWGRP | S_IRGRP); 1947 MODULE_PARM_DESC(fb_defio, "Page fault detection of mmap writes"); 1948 1949 module_param(shadow, bool, S_IWUSR | S_IRUSR | S_IWGRP | S_IRGRP); 1950 MODULE_PARM_DESC(shadow, "Shadow vid mem. Disable to save mem but lose perf"); 1951 1952 module_param(pixel_limit, int, S_IWUSR | S_IRUSR | S_IWGRP | S_IRGRP); 1953 MODULE_PARM_DESC(pixel_limit, "Force limit on max mode (in x*y pixels)"); 1954 1955 MODULE_AUTHOR("Roberto De Ioris <roberto@unbit.it>, " 1956 "Jaya Kumar <jayakumar.lkml@gmail.com>, " 1957 "Bernie Thompson <bernie@plugable.com>"); 1958 MODULE_DESCRIPTION("DisplayLink kernel framebuffer driver"); 1959 MODULE_LICENSE("GPL"); 1960 1961