1 /* 2 * udlfb.c -- Framebuffer driver for DisplayLink USB controller 3 * 4 * Copyright (C) 2009 Roberto De Ioris <roberto@unbit.it> 5 * Copyright (C) 2009 Jaya Kumar <jayakumar.lkml@gmail.com> 6 * Copyright (C) 2009 Bernie Thompson <bernie@plugable.com> 7 * 8 * This file is subject to the terms and conditions of the GNU General Public 9 * License v2. See the file COPYING in the main directory of this archive for 10 * more details. 11 * 12 * Layout is based on skeletonfb by James Simmons and Geert Uytterhoeven, 13 * usb-skeleton by GregKH. 14 * 15 * Device-specific portions based on information from Displaylink, with work 16 * from Florian Echtler, Henrik Bjerregaard Pedersen, and others. 17 */ 18 19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 20 21 #include <linux/module.h> 22 #include <linux/kernel.h> 23 #include <linux/init.h> 24 #include <linux/usb.h> 25 #include <linux/uaccess.h> 26 #include <linux/mm.h> 27 #include <linux/fb.h> 28 #include <linux/vmalloc.h> 29 #include <linux/slab.h> 30 #include <linux/prefetch.h> 31 #include <linux/delay.h> 32 #include <video/udlfb.h> 33 #include "edid.h" 34 35 static struct fb_fix_screeninfo dlfb_fix = { 36 .id = "udlfb", 37 .type = FB_TYPE_PACKED_PIXELS, 38 .visual = FB_VISUAL_TRUECOLOR, 39 .xpanstep = 0, 40 .ypanstep = 0, 41 .ywrapstep = 0, 42 .accel = FB_ACCEL_NONE, 43 }; 44 45 static const u32 udlfb_info_flags = FBINFO_DEFAULT | FBINFO_READS_FAST | 46 FBINFO_VIRTFB | 47 FBINFO_HWACCEL_IMAGEBLIT | FBINFO_HWACCEL_FILLRECT | 48 FBINFO_HWACCEL_COPYAREA | FBINFO_MISC_ALWAYS_SETPAR; 49 50 /* 51 * There are many DisplayLink-based graphics products, all with unique PIDs. 52 * So we match on DisplayLink's VID + Vendor-Defined Interface Class (0xff) 53 * We also require a match on SubClass (0x00) and Protocol (0x00), 54 * which is compatible with all known USB 2.0 era graphics chips and firmware, 55 * but allows DisplayLink to increment those for any future incompatible chips 56 */ 57 static struct usb_device_id id_table[] = { 58 {.idVendor = 0x17e9, 59 .bInterfaceClass = 0xff, 60 .bInterfaceSubClass = 0x00, 61 .bInterfaceProtocol = 0x00, 62 .match_flags = USB_DEVICE_ID_MATCH_VENDOR | 63 USB_DEVICE_ID_MATCH_INT_CLASS | 64 USB_DEVICE_ID_MATCH_INT_SUBCLASS | 65 USB_DEVICE_ID_MATCH_INT_PROTOCOL, 66 }, 67 {}, 68 }; 69 MODULE_DEVICE_TABLE(usb, id_table); 70 71 /* module options */ 72 static bool console = 1; /* Allow fbcon to open framebuffer */ 73 static bool fb_defio = 1; /* Detect mmap writes using page faults */ 74 static bool shadow = 1; /* Optionally disable shadow framebuffer */ 75 static int pixel_limit; /* Optionally force a pixel resolution limit */ 76 77 /* dlfb keeps a list of urbs for efficient bulk transfers */ 78 static void dlfb_urb_completion(struct urb *urb); 79 static struct urb *dlfb_get_urb(struct dlfb_data *dev); 80 static int dlfb_submit_urb(struct dlfb_data *dev, struct urb * urb, size_t len); 81 static int dlfb_alloc_urb_list(struct dlfb_data *dev, int count, size_t size); 82 static void dlfb_free_urb_list(struct dlfb_data *dev); 83 84 /* 85 * All DisplayLink bulk operations start with 0xAF, followed by specific code 86 * All operations are written to buffers which then later get sent to device 87 */ 88 static char *dlfb_set_register(char *buf, u8 reg, u8 val) 89 { 90 *buf++ = 0xAF; 91 *buf++ = 0x20; 92 *buf++ = reg; 93 *buf++ = val; 94 return buf; 95 } 96 97 static char *dlfb_vidreg_lock(char *buf) 98 { 99 return dlfb_set_register(buf, 0xFF, 0x00); 100 } 101 102 static char *dlfb_vidreg_unlock(char *buf) 103 { 104 return dlfb_set_register(buf, 0xFF, 0xFF); 105 } 106 107 /* 108 * Map FB_BLANK_* to DisplayLink register 109 * DLReg FB_BLANK_* 110 * ----- ----------------------------- 111 * 0x00 FB_BLANK_UNBLANK (0) 112 * 0x01 FB_BLANK (1) 113 * 0x03 FB_BLANK_VSYNC_SUSPEND (2) 114 * 0x05 FB_BLANK_HSYNC_SUSPEND (3) 115 * 0x07 FB_BLANK_POWERDOWN (4) Note: requires modeset to come back 116 */ 117 static char *dlfb_blanking(char *buf, int fb_blank) 118 { 119 u8 reg; 120 121 switch (fb_blank) { 122 case FB_BLANK_POWERDOWN: 123 reg = 0x07; 124 break; 125 case FB_BLANK_HSYNC_SUSPEND: 126 reg = 0x05; 127 break; 128 case FB_BLANK_VSYNC_SUSPEND: 129 reg = 0x03; 130 break; 131 case FB_BLANK_NORMAL: 132 reg = 0x01; 133 break; 134 default: 135 reg = 0x00; 136 } 137 138 buf = dlfb_set_register(buf, 0x1F, reg); 139 140 return buf; 141 } 142 143 static char *dlfb_set_color_depth(char *buf, u8 selection) 144 { 145 return dlfb_set_register(buf, 0x00, selection); 146 } 147 148 static char *dlfb_set_base16bpp(char *wrptr, u32 base) 149 { 150 /* the base pointer is 16 bits wide, 0x20 is hi byte. */ 151 wrptr = dlfb_set_register(wrptr, 0x20, base >> 16); 152 wrptr = dlfb_set_register(wrptr, 0x21, base >> 8); 153 return dlfb_set_register(wrptr, 0x22, base); 154 } 155 156 /* 157 * DisplayLink HW has separate 16bpp and 8bpp framebuffers. 158 * In 24bpp modes, the low 323 RGB bits go in the 8bpp framebuffer 159 */ 160 static char *dlfb_set_base8bpp(char *wrptr, u32 base) 161 { 162 wrptr = dlfb_set_register(wrptr, 0x26, base >> 16); 163 wrptr = dlfb_set_register(wrptr, 0x27, base >> 8); 164 return dlfb_set_register(wrptr, 0x28, base); 165 } 166 167 static char *dlfb_set_register_16(char *wrptr, u8 reg, u16 value) 168 { 169 wrptr = dlfb_set_register(wrptr, reg, value >> 8); 170 return dlfb_set_register(wrptr, reg+1, value); 171 } 172 173 /* 174 * This is kind of weird because the controller takes some 175 * register values in a different byte order than other registers. 176 */ 177 static char *dlfb_set_register_16be(char *wrptr, u8 reg, u16 value) 178 { 179 wrptr = dlfb_set_register(wrptr, reg, value); 180 return dlfb_set_register(wrptr, reg+1, value >> 8); 181 } 182 183 /* 184 * LFSR is linear feedback shift register. The reason we have this is 185 * because the display controller needs to minimize the clock depth of 186 * various counters used in the display path. So this code reverses the 187 * provided value into the lfsr16 value by counting backwards to get 188 * the value that needs to be set in the hardware comparator to get the 189 * same actual count. This makes sense once you read above a couple of 190 * times and think about it from a hardware perspective. 191 */ 192 static u16 dlfb_lfsr16(u16 actual_count) 193 { 194 u32 lv = 0xFFFF; /* This is the lfsr value that the hw starts with */ 195 196 while (actual_count--) { 197 lv = ((lv << 1) | 198 (((lv >> 15) ^ (lv >> 4) ^ (lv >> 2) ^ (lv >> 1)) & 1)) 199 & 0xFFFF; 200 } 201 202 return (u16) lv; 203 } 204 205 /* 206 * This does LFSR conversion on the value that is to be written. 207 * See LFSR explanation above for more detail. 208 */ 209 static char *dlfb_set_register_lfsr16(char *wrptr, u8 reg, u16 value) 210 { 211 return dlfb_set_register_16(wrptr, reg, dlfb_lfsr16(value)); 212 } 213 214 /* 215 * This takes a standard fbdev screeninfo struct and all of its monitor mode 216 * details and converts them into the DisplayLink equivalent register commands. 217 */ 218 static char *dlfb_set_vid_cmds(char *wrptr, struct fb_var_screeninfo *var) 219 { 220 u16 xds, yds; 221 u16 xde, yde; 222 u16 yec; 223 224 /* x display start */ 225 xds = var->left_margin + var->hsync_len; 226 wrptr = dlfb_set_register_lfsr16(wrptr, 0x01, xds); 227 /* x display end */ 228 xde = xds + var->xres; 229 wrptr = dlfb_set_register_lfsr16(wrptr, 0x03, xde); 230 231 /* y display start */ 232 yds = var->upper_margin + var->vsync_len; 233 wrptr = dlfb_set_register_lfsr16(wrptr, 0x05, yds); 234 /* y display end */ 235 yde = yds + var->yres; 236 wrptr = dlfb_set_register_lfsr16(wrptr, 0x07, yde); 237 238 /* x end count is active + blanking - 1 */ 239 wrptr = dlfb_set_register_lfsr16(wrptr, 0x09, 240 xde + var->right_margin - 1); 241 242 /* libdlo hardcodes hsync start to 1 */ 243 wrptr = dlfb_set_register_lfsr16(wrptr, 0x0B, 1); 244 245 /* hsync end is width of sync pulse + 1 */ 246 wrptr = dlfb_set_register_lfsr16(wrptr, 0x0D, var->hsync_len + 1); 247 248 /* hpixels is active pixels */ 249 wrptr = dlfb_set_register_16(wrptr, 0x0F, var->xres); 250 251 /* yendcount is vertical active + vertical blanking */ 252 yec = var->yres + var->upper_margin + var->lower_margin + 253 var->vsync_len; 254 wrptr = dlfb_set_register_lfsr16(wrptr, 0x11, yec); 255 256 /* libdlo hardcodes vsync start to 0 */ 257 wrptr = dlfb_set_register_lfsr16(wrptr, 0x13, 0); 258 259 /* vsync end is width of vsync pulse */ 260 wrptr = dlfb_set_register_lfsr16(wrptr, 0x15, var->vsync_len); 261 262 /* vpixels is active pixels */ 263 wrptr = dlfb_set_register_16(wrptr, 0x17, var->yres); 264 265 /* convert picoseconds to 5kHz multiple for pclk5k = x * 1E12/5k */ 266 wrptr = dlfb_set_register_16be(wrptr, 0x1B, 267 200*1000*1000/var->pixclock); 268 269 return wrptr; 270 } 271 272 /* 273 * This takes a standard fbdev screeninfo struct that was fetched or prepared 274 * and then generates the appropriate command sequence that then drives the 275 * display controller. 276 */ 277 static int dlfb_set_video_mode(struct dlfb_data *dev, 278 struct fb_var_screeninfo *var) 279 { 280 char *buf; 281 char *wrptr; 282 int retval = 0; 283 int writesize; 284 struct urb *urb; 285 286 if (!atomic_read(&dev->usb_active)) 287 return -EPERM; 288 289 urb = dlfb_get_urb(dev); 290 if (!urb) 291 return -ENOMEM; 292 293 buf = (char *) urb->transfer_buffer; 294 295 /* 296 * This first section has to do with setting the base address on the 297 * controller * associated with the display. There are 2 base 298 * pointers, currently, we only * use the 16 bpp segment. 299 */ 300 wrptr = dlfb_vidreg_lock(buf); 301 wrptr = dlfb_set_color_depth(wrptr, 0x00); 302 /* set base for 16bpp segment to 0 */ 303 wrptr = dlfb_set_base16bpp(wrptr, 0); 304 /* set base for 8bpp segment to end of fb */ 305 wrptr = dlfb_set_base8bpp(wrptr, dev->info->fix.smem_len); 306 307 wrptr = dlfb_set_vid_cmds(wrptr, var); 308 wrptr = dlfb_blanking(wrptr, FB_BLANK_UNBLANK); 309 wrptr = dlfb_vidreg_unlock(wrptr); 310 311 writesize = wrptr - buf; 312 313 retval = dlfb_submit_urb(dev, urb, writesize); 314 315 dev->blank_mode = FB_BLANK_UNBLANK; 316 317 return retval; 318 } 319 320 static int dlfb_ops_mmap(struct fb_info *info, struct vm_area_struct *vma) 321 { 322 unsigned long start = vma->vm_start; 323 unsigned long size = vma->vm_end - vma->vm_start; 324 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; 325 unsigned long page, pos; 326 327 if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) 328 return -EINVAL; 329 if (size > info->fix.smem_len) 330 return -EINVAL; 331 if (offset > info->fix.smem_len - size) 332 return -EINVAL; 333 334 pos = (unsigned long)info->fix.smem_start + offset; 335 336 pr_notice("mmap() framebuffer addr:%lu size:%lu\n", 337 pos, size); 338 339 while (size > 0) { 340 page = vmalloc_to_pfn((void *)pos); 341 if (remap_pfn_range(vma, start, page, PAGE_SIZE, PAGE_SHARED)) 342 return -EAGAIN; 343 344 start += PAGE_SIZE; 345 pos += PAGE_SIZE; 346 if (size > PAGE_SIZE) 347 size -= PAGE_SIZE; 348 else 349 size = 0; 350 } 351 352 return 0; 353 } 354 355 /* 356 * Trims identical data from front and back of line 357 * Sets new front buffer address and width 358 * And returns byte count of identical pixels 359 * Assumes CPU natural alignment (unsigned long) 360 * for back and front buffer ptrs and width 361 */ 362 static int dlfb_trim_hline(const u8 *bback, const u8 **bfront, int *width_bytes) 363 { 364 int j, k; 365 const unsigned long *back = (const unsigned long *) bback; 366 const unsigned long *front = (const unsigned long *) *bfront; 367 const int width = *width_bytes / sizeof(unsigned long); 368 int identical = width; 369 int start = width; 370 int end = width; 371 372 prefetch((void *) front); 373 prefetch((void *) back); 374 375 for (j = 0; j < width; j++) { 376 if (back[j] != front[j]) { 377 start = j; 378 break; 379 } 380 } 381 382 for (k = width - 1; k > j; k--) { 383 if (back[k] != front[k]) { 384 end = k+1; 385 break; 386 } 387 } 388 389 identical = start + (width - end); 390 *bfront = (u8 *) &front[start]; 391 *width_bytes = (end - start) * sizeof(unsigned long); 392 393 return identical * sizeof(unsigned long); 394 } 395 396 /* 397 * Render a command stream for an encoded horizontal line segment of pixels. 398 * 399 * A command buffer holds several commands. 400 * It always begins with a fresh command header 401 * (the protocol doesn't require this, but we enforce it to allow 402 * multiple buffers to be potentially encoded and sent in parallel). 403 * A single command encodes one contiguous horizontal line of pixels 404 * 405 * The function relies on the client to do all allocation, so that 406 * rendering can be done directly to output buffers (e.g. USB URBs). 407 * The function fills the supplied command buffer, providing information 408 * on where it left off, so the client may call in again with additional 409 * buffers if the line will take several buffers to complete. 410 * 411 * A single command can transmit a maximum of 256 pixels, 412 * regardless of the compression ratio (protocol design limit). 413 * To the hardware, 0 for a size byte means 256 414 * 415 * Rather than 256 pixel commands which are either rl or raw encoded, 416 * the rlx command simply assumes alternating raw and rl spans within one cmd. 417 * This has a slightly larger header overhead, but produces more even results. 418 * It also processes all data (read and write) in a single pass. 419 * Performance benchmarks of common cases show it having just slightly better 420 * compression than 256 pixel raw or rle commands, with similar CPU consumpion. 421 * But for very rl friendly data, will compress not quite as well. 422 */ 423 static void dlfb_compress_hline( 424 const uint16_t **pixel_start_ptr, 425 const uint16_t *const pixel_end, 426 uint32_t *device_address_ptr, 427 uint8_t **command_buffer_ptr, 428 const uint8_t *const cmd_buffer_end) 429 { 430 const uint16_t *pixel = *pixel_start_ptr; 431 uint32_t dev_addr = *device_address_ptr; 432 uint8_t *cmd = *command_buffer_ptr; 433 const int bpp = 2; 434 435 while ((pixel_end > pixel) && 436 (cmd_buffer_end - MIN_RLX_CMD_BYTES > cmd)) { 437 uint8_t *raw_pixels_count_byte = NULL; 438 uint8_t *cmd_pixels_count_byte = NULL; 439 const uint16_t *raw_pixel_start = NULL; 440 const uint16_t *cmd_pixel_start, *cmd_pixel_end = NULL; 441 442 prefetchw((void *) cmd); /* pull in one cache line at least */ 443 444 *cmd++ = 0xAF; 445 *cmd++ = 0x6B; 446 *cmd++ = (uint8_t) ((dev_addr >> 16) & 0xFF); 447 *cmd++ = (uint8_t) ((dev_addr >> 8) & 0xFF); 448 *cmd++ = (uint8_t) ((dev_addr) & 0xFF); 449 450 cmd_pixels_count_byte = cmd++; /* we'll know this later */ 451 cmd_pixel_start = pixel; 452 453 raw_pixels_count_byte = cmd++; /* we'll know this later */ 454 raw_pixel_start = pixel; 455 456 cmd_pixel_end = pixel + min(MAX_CMD_PIXELS + 1, 457 min((int)(pixel_end - pixel), 458 (int)(cmd_buffer_end - cmd) / bpp)); 459 460 prefetch_range((void *) pixel, (cmd_pixel_end - pixel) * bpp); 461 462 while (pixel < cmd_pixel_end) { 463 const uint16_t * const repeating_pixel = pixel; 464 465 *(uint16_t *)cmd = cpu_to_be16p(pixel); 466 cmd += 2; 467 pixel++; 468 469 if (unlikely((pixel < cmd_pixel_end) && 470 (*pixel == *repeating_pixel))) { 471 /* go back and fill in raw pixel count */ 472 *raw_pixels_count_byte = ((repeating_pixel - 473 raw_pixel_start) + 1) & 0xFF; 474 475 while ((pixel < cmd_pixel_end) 476 && (*pixel == *repeating_pixel)) { 477 pixel++; 478 } 479 480 /* immediately after raw data is repeat byte */ 481 *cmd++ = ((pixel - repeating_pixel) - 1) & 0xFF; 482 483 /* Then start another raw pixel span */ 484 raw_pixel_start = pixel; 485 raw_pixels_count_byte = cmd++; 486 } 487 } 488 489 if (pixel > raw_pixel_start) { 490 /* finalize last RAW span */ 491 *raw_pixels_count_byte = (pixel-raw_pixel_start) & 0xFF; 492 } 493 494 *cmd_pixels_count_byte = (pixel - cmd_pixel_start) & 0xFF; 495 dev_addr += (pixel - cmd_pixel_start) * bpp; 496 } 497 498 if (cmd_buffer_end <= MIN_RLX_CMD_BYTES + cmd) { 499 /* Fill leftover bytes with no-ops */ 500 if (cmd_buffer_end > cmd) 501 memset(cmd, 0xAF, cmd_buffer_end - cmd); 502 cmd = (uint8_t *) cmd_buffer_end; 503 } 504 505 *command_buffer_ptr = cmd; 506 *pixel_start_ptr = pixel; 507 *device_address_ptr = dev_addr; 508 509 return; 510 } 511 512 /* 513 * There are 3 copies of every pixel: The front buffer that the fbdev 514 * client renders to, the actual framebuffer across the USB bus in hardware 515 * (that we can only write to, slowly, and can never read), and (optionally) 516 * our shadow copy that tracks what's been sent to that hardware buffer. 517 */ 518 static int dlfb_render_hline(struct dlfb_data *dev, struct urb **urb_ptr, 519 const char *front, char **urb_buf_ptr, 520 u32 byte_offset, u32 byte_width, 521 int *ident_ptr, int *sent_ptr) 522 { 523 const u8 *line_start, *line_end, *next_pixel; 524 u32 dev_addr = dev->base16 + byte_offset; 525 struct urb *urb = *urb_ptr; 526 u8 *cmd = *urb_buf_ptr; 527 u8 *cmd_end = (u8 *) urb->transfer_buffer + urb->transfer_buffer_length; 528 529 line_start = (u8 *) (front + byte_offset); 530 next_pixel = line_start; 531 line_end = next_pixel + byte_width; 532 533 if (dev->backing_buffer) { 534 int offset; 535 const u8 *back_start = (u8 *) (dev->backing_buffer 536 + byte_offset); 537 538 *ident_ptr += dlfb_trim_hline(back_start, &next_pixel, 539 &byte_width); 540 541 offset = next_pixel - line_start; 542 line_end = next_pixel + byte_width; 543 dev_addr += offset; 544 back_start += offset; 545 line_start += offset; 546 547 memcpy((char *)back_start, (char *) line_start, 548 byte_width); 549 } 550 551 while (next_pixel < line_end) { 552 553 dlfb_compress_hline((const uint16_t **) &next_pixel, 554 (const uint16_t *) line_end, &dev_addr, 555 (u8 **) &cmd, (u8 *) cmd_end); 556 557 if (cmd >= cmd_end) { 558 int len = cmd - (u8 *) urb->transfer_buffer; 559 if (dlfb_submit_urb(dev, urb, len)) 560 return 1; /* lost pixels is set */ 561 *sent_ptr += len; 562 urb = dlfb_get_urb(dev); 563 if (!urb) 564 return 1; /* lost_pixels is set */ 565 *urb_ptr = urb; 566 cmd = urb->transfer_buffer; 567 cmd_end = &cmd[urb->transfer_buffer_length]; 568 } 569 } 570 571 *urb_buf_ptr = cmd; 572 573 return 0; 574 } 575 576 static int dlfb_handle_damage(struct dlfb_data *dev, int x, int y, 577 int width, int height, char *data) 578 { 579 int i, ret; 580 char *cmd; 581 cycles_t start_cycles, end_cycles; 582 int bytes_sent = 0; 583 int bytes_identical = 0; 584 struct urb *urb; 585 int aligned_x; 586 587 start_cycles = get_cycles(); 588 589 aligned_x = DL_ALIGN_DOWN(x, sizeof(unsigned long)); 590 width = DL_ALIGN_UP(width + (x-aligned_x), sizeof(unsigned long)); 591 x = aligned_x; 592 593 if ((width <= 0) || 594 (x + width > dev->info->var.xres) || 595 (y + height > dev->info->var.yres)) 596 return -EINVAL; 597 598 if (!atomic_read(&dev->usb_active)) 599 return 0; 600 601 urb = dlfb_get_urb(dev); 602 if (!urb) 603 return 0; 604 cmd = urb->transfer_buffer; 605 606 for (i = y; i < y + height ; i++) { 607 const int line_offset = dev->info->fix.line_length * i; 608 const int byte_offset = line_offset + (x * BPP); 609 610 if (dlfb_render_hline(dev, &urb, 611 (char *) dev->info->fix.smem_start, 612 &cmd, byte_offset, width * BPP, 613 &bytes_identical, &bytes_sent)) 614 goto error; 615 } 616 617 if (cmd > (char *) urb->transfer_buffer) { 618 /* Send partial buffer remaining before exiting */ 619 int len = cmd - (char *) urb->transfer_buffer; 620 ret = dlfb_submit_urb(dev, urb, len); 621 bytes_sent += len; 622 } else 623 dlfb_urb_completion(urb); 624 625 error: 626 atomic_add(bytes_sent, &dev->bytes_sent); 627 atomic_add(bytes_identical, &dev->bytes_identical); 628 atomic_add(width*height*2, &dev->bytes_rendered); 629 end_cycles = get_cycles(); 630 atomic_add(((unsigned int) ((end_cycles - start_cycles) 631 >> 10)), /* Kcycles */ 632 &dev->cpu_kcycles_used); 633 634 return 0; 635 } 636 637 /* 638 * Path triggered by usermode clients who write to filesystem 639 * e.g. cat filename > /dev/fb1 640 * Not used by X Windows or text-mode console. But useful for testing. 641 * Slow because of extra copy and we must assume all pixels dirty. 642 */ 643 static ssize_t dlfb_ops_write(struct fb_info *info, const char __user *buf, 644 size_t count, loff_t *ppos) 645 { 646 ssize_t result; 647 struct dlfb_data *dev = info->par; 648 u32 offset = (u32) *ppos; 649 650 result = fb_sys_write(info, buf, count, ppos); 651 652 if (result > 0) { 653 int start = max((int)(offset / info->fix.line_length), 0); 654 int lines = min((u32)((result / info->fix.line_length) + 1), 655 (u32)info->var.yres); 656 657 dlfb_handle_damage(dev, 0, start, info->var.xres, 658 lines, info->screen_base); 659 } 660 661 return result; 662 } 663 664 /* hardware has native COPY command (see libdlo), but not worth it for fbcon */ 665 static void dlfb_ops_copyarea(struct fb_info *info, 666 const struct fb_copyarea *area) 667 { 668 669 struct dlfb_data *dev = info->par; 670 671 sys_copyarea(info, area); 672 673 dlfb_handle_damage(dev, area->dx, area->dy, 674 area->width, area->height, info->screen_base); 675 } 676 677 static void dlfb_ops_imageblit(struct fb_info *info, 678 const struct fb_image *image) 679 { 680 struct dlfb_data *dev = info->par; 681 682 sys_imageblit(info, image); 683 684 dlfb_handle_damage(dev, image->dx, image->dy, 685 image->width, image->height, info->screen_base); 686 } 687 688 static void dlfb_ops_fillrect(struct fb_info *info, 689 const struct fb_fillrect *rect) 690 { 691 struct dlfb_data *dev = info->par; 692 693 sys_fillrect(info, rect); 694 695 dlfb_handle_damage(dev, rect->dx, rect->dy, rect->width, 696 rect->height, info->screen_base); 697 } 698 699 /* 700 * NOTE: fb_defio.c is holding info->fbdefio.mutex 701 * Touching ANY framebuffer memory that triggers a page fault 702 * in fb_defio will cause a deadlock, when it also tries to 703 * grab the same mutex. 704 */ 705 static void dlfb_dpy_deferred_io(struct fb_info *info, 706 struct list_head *pagelist) 707 { 708 struct page *cur; 709 struct fb_deferred_io *fbdefio = info->fbdefio; 710 struct dlfb_data *dev = info->par; 711 struct urb *urb; 712 char *cmd; 713 cycles_t start_cycles, end_cycles; 714 int bytes_sent = 0; 715 int bytes_identical = 0; 716 int bytes_rendered = 0; 717 718 if (!fb_defio) 719 return; 720 721 if (!atomic_read(&dev->usb_active)) 722 return; 723 724 start_cycles = get_cycles(); 725 726 urb = dlfb_get_urb(dev); 727 if (!urb) 728 return; 729 730 cmd = urb->transfer_buffer; 731 732 /* walk the written page list and render each to device */ 733 list_for_each_entry(cur, &fbdefio->pagelist, lru) { 734 735 if (dlfb_render_hline(dev, &urb, (char *) info->fix.smem_start, 736 &cmd, cur->index << PAGE_SHIFT, 737 PAGE_SIZE, &bytes_identical, &bytes_sent)) 738 goto error; 739 bytes_rendered += PAGE_SIZE; 740 } 741 742 if (cmd > (char *) urb->transfer_buffer) { 743 /* Send partial buffer remaining before exiting */ 744 int len = cmd - (char *) urb->transfer_buffer; 745 dlfb_submit_urb(dev, urb, len); 746 bytes_sent += len; 747 } else 748 dlfb_urb_completion(urb); 749 750 error: 751 atomic_add(bytes_sent, &dev->bytes_sent); 752 atomic_add(bytes_identical, &dev->bytes_identical); 753 atomic_add(bytes_rendered, &dev->bytes_rendered); 754 end_cycles = get_cycles(); 755 atomic_add(((unsigned int) ((end_cycles - start_cycles) 756 >> 10)), /* Kcycles */ 757 &dev->cpu_kcycles_used); 758 } 759 760 static int dlfb_get_edid(struct dlfb_data *dev, char *edid, int len) 761 { 762 int i; 763 int ret; 764 char *rbuf; 765 766 rbuf = kmalloc(2, GFP_KERNEL); 767 if (!rbuf) 768 return 0; 769 770 for (i = 0; i < len; i++) { 771 ret = usb_control_msg(dev->udev, 772 usb_rcvctrlpipe(dev->udev, 0), (0x02), 773 (0x80 | (0x02 << 5)), i << 8, 0xA1, rbuf, 2, 774 HZ); 775 if (ret < 1) { 776 pr_err("Read EDID byte %d failed err %x\n", i, ret); 777 i--; 778 break; 779 } 780 edid[i] = rbuf[1]; 781 } 782 783 kfree(rbuf); 784 785 return i; 786 } 787 788 static int dlfb_ops_ioctl(struct fb_info *info, unsigned int cmd, 789 unsigned long arg) 790 { 791 792 struct dlfb_data *dev = info->par; 793 794 if (!atomic_read(&dev->usb_active)) 795 return 0; 796 797 /* TODO: Update X server to get this from sysfs instead */ 798 if (cmd == DLFB_IOCTL_RETURN_EDID) { 799 void __user *edid = (void __user *)arg; 800 if (copy_to_user(edid, dev->edid, dev->edid_size)) 801 return -EFAULT; 802 return 0; 803 } 804 805 /* TODO: Help propose a standard fb.h ioctl to report mmap damage */ 806 if (cmd == DLFB_IOCTL_REPORT_DAMAGE) { 807 struct dloarea area; 808 809 if (copy_from_user(&area, (void __user *)arg, 810 sizeof(struct dloarea))) 811 return -EFAULT; 812 813 /* 814 * If we have a damage-aware client, turn fb_defio "off" 815 * To avoid perf imact of unnecessary page fault handling. 816 * Done by resetting the delay for this fb_info to a very 817 * long period. Pages will become writable and stay that way. 818 * Reset to normal value when all clients have closed this fb. 819 */ 820 if (info->fbdefio) 821 info->fbdefio->delay = DL_DEFIO_WRITE_DISABLE; 822 823 if (area.x < 0) 824 area.x = 0; 825 826 if (area.x > info->var.xres) 827 area.x = info->var.xres; 828 829 if (area.y < 0) 830 area.y = 0; 831 832 if (area.y > info->var.yres) 833 area.y = info->var.yres; 834 835 dlfb_handle_damage(dev, area.x, area.y, area.w, area.h, 836 info->screen_base); 837 } 838 839 return 0; 840 } 841 842 /* taken from vesafb */ 843 static int 844 dlfb_ops_setcolreg(unsigned regno, unsigned red, unsigned green, 845 unsigned blue, unsigned transp, struct fb_info *info) 846 { 847 int err = 0; 848 849 if (regno >= info->cmap.len) 850 return 1; 851 852 if (regno < 16) { 853 if (info->var.red.offset == 10) { 854 /* 1:5:5:5 */ 855 ((u32 *) (info->pseudo_palette))[regno] = 856 ((red & 0xf800) >> 1) | 857 ((green & 0xf800) >> 6) | ((blue & 0xf800) >> 11); 858 } else { 859 /* 0:5:6:5 */ 860 ((u32 *) (info->pseudo_palette))[regno] = 861 ((red & 0xf800)) | 862 ((green & 0xfc00) >> 5) | ((blue & 0xf800) >> 11); 863 } 864 } 865 866 return err; 867 } 868 869 /* 870 * It's common for several clients to have framebuffer open simultaneously. 871 * e.g. both fbcon and X. Makes things interesting. 872 * Assumes caller is holding info->lock (for open and release at least) 873 */ 874 static int dlfb_ops_open(struct fb_info *info, int user) 875 { 876 struct dlfb_data *dev = info->par; 877 878 /* 879 * fbcon aggressively connects to first framebuffer it finds, 880 * preventing other clients (X) from working properly. Usually 881 * not what the user wants. Fail by default with option to enable. 882 */ 883 if ((user == 0) && (!console)) 884 return -EBUSY; 885 886 /* If the USB device is gone, we don't accept new opens */ 887 if (dev->virtualized) 888 return -ENODEV; 889 890 dev->fb_count++; 891 892 kref_get(&dev->kref); 893 894 if (fb_defio && (info->fbdefio == NULL)) { 895 /* enable defio at last moment if not disabled by client */ 896 897 struct fb_deferred_io *fbdefio; 898 899 fbdefio = kzalloc(sizeof(struct fb_deferred_io), GFP_KERNEL); 900 901 if (fbdefio) { 902 fbdefio->delay = DL_DEFIO_WRITE_DELAY; 903 fbdefio->deferred_io = dlfb_dpy_deferred_io; 904 } 905 906 info->fbdefio = fbdefio; 907 fb_deferred_io_init(info); 908 } 909 910 pr_notice("open /dev/fb%d user=%d fb_info=%p count=%d\n", 911 info->node, user, info, dev->fb_count); 912 913 return 0; 914 } 915 916 /* 917 * Called when all client interfaces to start transactions have been disabled, 918 * and all references to our device instance (dlfb_data) are released. 919 * Every transaction must have a reference, so we know are fully spun down 920 */ 921 static void dlfb_free(struct kref *kref) 922 { 923 struct dlfb_data *dev = container_of(kref, struct dlfb_data, kref); 924 925 if (dev->backing_buffer) 926 vfree(dev->backing_buffer); 927 928 kfree(dev->edid); 929 930 pr_warn("freeing dlfb_data %p\n", dev); 931 932 kfree(dev); 933 } 934 935 static void dlfb_release_urb_work(struct work_struct *work) 936 { 937 struct urb_node *unode = container_of(work, struct urb_node, 938 release_urb_work.work); 939 940 up(&unode->dev->urbs.limit_sem); 941 } 942 943 static void dlfb_free_framebuffer(struct dlfb_data *dev) 944 { 945 struct fb_info *info = dev->info; 946 947 if (info) { 948 int node = info->node; 949 950 unregister_framebuffer(info); 951 952 if (info->cmap.len != 0) 953 fb_dealloc_cmap(&info->cmap); 954 if (info->monspecs.modedb) 955 fb_destroy_modedb(info->monspecs.modedb); 956 if (info->screen_base) 957 vfree(info->screen_base); 958 959 fb_destroy_modelist(&info->modelist); 960 961 dev->info = NULL; 962 963 /* Assume info structure is freed after this point */ 964 framebuffer_release(info); 965 966 pr_warn("fb_info for /dev/fb%d has been freed\n", node); 967 } 968 969 /* ref taken in probe() as part of registering framebfufer */ 970 kref_put(&dev->kref, dlfb_free); 971 } 972 973 static void dlfb_free_framebuffer_work(struct work_struct *work) 974 { 975 struct dlfb_data *dev = container_of(work, struct dlfb_data, 976 free_framebuffer_work.work); 977 dlfb_free_framebuffer(dev); 978 } 979 /* 980 * Assumes caller is holding info->lock mutex (for open and release at least) 981 */ 982 static int dlfb_ops_release(struct fb_info *info, int user) 983 { 984 struct dlfb_data *dev = info->par; 985 986 dev->fb_count--; 987 988 /* We can't free fb_info here - fbmem will touch it when we return */ 989 if (dev->virtualized && (dev->fb_count == 0)) 990 schedule_delayed_work(&dev->free_framebuffer_work, HZ); 991 992 if ((dev->fb_count == 0) && (info->fbdefio)) { 993 fb_deferred_io_cleanup(info); 994 kfree(info->fbdefio); 995 info->fbdefio = NULL; 996 info->fbops->fb_mmap = dlfb_ops_mmap; 997 } 998 999 pr_warn("released /dev/fb%d user=%d count=%d\n", 1000 info->node, user, dev->fb_count); 1001 1002 kref_put(&dev->kref, dlfb_free); 1003 1004 return 0; 1005 } 1006 1007 /* 1008 * Check whether a video mode is supported by the DisplayLink chip 1009 * We start from monitor's modes, so don't need to filter that here 1010 */ 1011 static int dlfb_is_valid_mode(struct fb_videomode *mode, 1012 struct fb_info *info) 1013 { 1014 struct dlfb_data *dev = info->par; 1015 1016 if (mode->xres * mode->yres > dev->sku_pixel_limit) { 1017 pr_warn("%dx%d beyond chip capabilities\n", 1018 mode->xres, mode->yres); 1019 return 0; 1020 } 1021 1022 pr_info("%dx%d @ %d Hz valid mode\n", mode->xres, mode->yres, 1023 mode->refresh); 1024 1025 return 1; 1026 } 1027 1028 static void dlfb_var_color_format(struct fb_var_screeninfo *var) 1029 { 1030 const struct fb_bitfield red = { 11, 5, 0 }; 1031 const struct fb_bitfield green = { 5, 6, 0 }; 1032 const struct fb_bitfield blue = { 0, 5, 0 }; 1033 1034 var->bits_per_pixel = 16; 1035 var->red = red; 1036 var->green = green; 1037 var->blue = blue; 1038 } 1039 1040 static int dlfb_ops_check_var(struct fb_var_screeninfo *var, 1041 struct fb_info *info) 1042 { 1043 struct fb_videomode mode; 1044 1045 /* TODO: support dynamically changing framebuffer size */ 1046 if ((var->xres * var->yres * 2) > info->fix.smem_len) 1047 return -EINVAL; 1048 1049 /* set device-specific elements of var unrelated to mode */ 1050 dlfb_var_color_format(var); 1051 1052 fb_var_to_videomode(&mode, var); 1053 1054 if (!dlfb_is_valid_mode(&mode, info)) 1055 return -EINVAL; 1056 1057 return 0; 1058 } 1059 1060 static int dlfb_ops_set_par(struct fb_info *info) 1061 { 1062 struct dlfb_data *dev = info->par; 1063 int result; 1064 u16 *pix_framebuffer; 1065 int i; 1066 1067 pr_notice("set_par mode %dx%d\n", info->var.xres, info->var.yres); 1068 1069 result = dlfb_set_video_mode(dev, &info->var); 1070 1071 if ((result == 0) && (dev->fb_count == 0)) { 1072 1073 /* paint greenscreen */ 1074 1075 pix_framebuffer = (u16 *) info->screen_base; 1076 for (i = 0; i < info->fix.smem_len / 2; i++) 1077 pix_framebuffer[i] = 0x37e6; 1078 1079 dlfb_handle_damage(dev, 0, 0, info->var.xres, info->var.yres, 1080 info->screen_base); 1081 } 1082 1083 return result; 1084 } 1085 1086 /* To fonzi the jukebox (e.g. make blanking changes take effect) */ 1087 static char *dlfb_dummy_render(char *buf) 1088 { 1089 *buf++ = 0xAF; 1090 *buf++ = 0x6A; /* copy */ 1091 *buf++ = 0x00; /* from address*/ 1092 *buf++ = 0x00; 1093 *buf++ = 0x00; 1094 *buf++ = 0x01; /* one pixel */ 1095 *buf++ = 0x00; /* to address */ 1096 *buf++ = 0x00; 1097 *buf++ = 0x00; 1098 return buf; 1099 } 1100 1101 /* 1102 * In order to come back from full DPMS off, we need to set the mode again 1103 */ 1104 static int dlfb_ops_blank(int blank_mode, struct fb_info *info) 1105 { 1106 struct dlfb_data *dev = info->par; 1107 char *bufptr; 1108 struct urb *urb; 1109 1110 pr_info("/dev/fb%d FB_BLANK mode %d --> %d\n", 1111 info->node, dev->blank_mode, blank_mode); 1112 1113 if ((dev->blank_mode == FB_BLANK_POWERDOWN) && 1114 (blank_mode != FB_BLANK_POWERDOWN)) { 1115 1116 /* returning from powerdown requires a fresh modeset */ 1117 dlfb_set_video_mode(dev, &info->var); 1118 } 1119 1120 urb = dlfb_get_urb(dev); 1121 if (!urb) 1122 return 0; 1123 1124 bufptr = (char *) urb->transfer_buffer; 1125 bufptr = dlfb_vidreg_lock(bufptr); 1126 bufptr = dlfb_blanking(bufptr, blank_mode); 1127 bufptr = dlfb_vidreg_unlock(bufptr); 1128 1129 /* seems like a render op is needed to have blank change take effect */ 1130 bufptr = dlfb_dummy_render(bufptr); 1131 1132 dlfb_submit_urb(dev, urb, bufptr - 1133 (char *) urb->transfer_buffer); 1134 1135 dev->blank_mode = blank_mode; 1136 1137 return 0; 1138 } 1139 1140 static struct fb_ops dlfb_ops = { 1141 .owner = THIS_MODULE, 1142 .fb_read = fb_sys_read, 1143 .fb_write = dlfb_ops_write, 1144 .fb_setcolreg = dlfb_ops_setcolreg, 1145 .fb_fillrect = dlfb_ops_fillrect, 1146 .fb_copyarea = dlfb_ops_copyarea, 1147 .fb_imageblit = dlfb_ops_imageblit, 1148 .fb_mmap = dlfb_ops_mmap, 1149 .fb_ioctl = dlfb_ops_ioctl, 1150 .fb_open = dlfb_ops_open, 1151 .fb_release = dlfb_ops_release, 1152 .fb_blank = dlfb_ops_blank, 1153 .fb_check_var = dlfb_ops_check_var, 1154 .fb_set_par = dlfb_ops_set_par, 1155 }; 1156 1157 1158 /* 1159 * Assumes &info->lock held by caller 1160 * Assumes no active clients have framebuffer open 1161 */ 1162 static int dlfb_realloc_framebuffer(struct dlfb_data *dev, struct fb_info *info) 1163 { 1164 int retval = -ENOMEM; 1165 int old_len = info->fix.smem_len; 1166 int new_len; 1167 unsigned char *old_fb = info->screen_base; 1168 unsigned char *new_fb; 1169 unsigned char *new_back = NULL; 1170 1171 pr_warn("Reallocating framebuffer. Addresses will change!\n"); 1172 1173 new_len = info->fix.line_length * info->var.yres; 1174 1175 if (PAGE_ALIGN(new_len) > old_len) { 1176 /* 1177 * Alloc system memory for virtual framebuffer 1178 */ 1179 new_fb = vmalloc(new_len); 1180 if (!new_fb) { 1181 pr_err("Virtual framebuffer alloc failed\n"); 1182 goto error; 1183 } 1184 1185 if (info->screen_base) { 1186 memcpy(new_fb, old_fb, old_len); 1187 vfree(info->screen_base); 1188 } 1189 1190 info->screen_base = new_fb; 1191 info->fix.smem_len = PAGE_ALIGN(new_len); 1192 info->fix.smem_start = (unsigned long) new_fb; 1193 info->flags = udlfb_info_flags; 1194 1195 /* 1196 * Second framebuffer copy to mirror the framebuffer state 1197 * on the physical USB device. We can function without this. 1198 * But with imperfect damage info we may send pixels over USB 1199 * that were, in fact, unchanged - wasting limited USB bandwidth 1200 */ 1201 if (shadow) 1202 new_back = vzalloc(new_len); 1203 if (!new_back) 1204 pr_info("No shadow/backing buffer allocated\n"); 1205 else { 1206 if (dev->backing_buffer) 1207 vfree(dev->backing_buffer); 1208 dev->backing_buffer = new_back; 1209 } 1210 } 1211 1212 retval = 0; 1213 1214 error: 1215 return retval; 1216 } 1217 1218 /* 1219 * 1) Get EDID from hw, or use sw default 1220 * 2) Parse into various fb_info structs 1221 * 3) Allocate virtual framebuffer memory to back highest res mode 1222 * 1223 * Parses EDID into three places used by various parts of fbdev: 1224 * fb_var_screeninfo contains the timing of the monitor's preferred mode 1225 * fb_info.monspecs is full parsed EDID info, including monspecs.modedb 1226 * fb_info.modelist is a linked list of all monitor & VESA modes which work 1227 * 1228 * If EDID is not readable/valid, then modelist is all VESA modes, 1229 * monspecs is NULL, and fb_var_screeninfo is set to safe VESA mode 1230 * Returns 0 if successful 1231 */ 1232 static int dlfb_setup_modes(struct dlfb_data *dev, 1233 struct fb_info *info, 1234 char *default_edid, size_t default_edid_size) 1235 { 1236 int i; 1237 const struct fb_videomode *default_vmode = NULL; 1238 int result = 0; 1239 char *edid; 1240 int tries = 3; 1241 1242 if (info->dev) /* only use mutex if info has been registered */ 1243 mutex_lock(&info->lock); 1244 1245 edid = kmalloc(EDID_LENGTH, GFP_KERNEL); 1246 if (!edid) { 1247 result = -ENOMEM; 1248 goto error; 1249 } 1250 1251 fb_destroy_modelist(&info->modelist); 1252 memset(&info->monspecs, 0, sizeof(info->monspecs)); 1253 1254 /* 1255 * Try to (re)read EDID from hardware first 1256 * EDID data may return, but not parse as valid 1257 * Try again a few times, in case of e.g. analog cable noise 1258 */ 1259 while (tries--) { 1260 1261 i = dlfb_get_edid(dev, edid, EDID_LENGTH); 1262 1263 if (i >= EDID_LENGTH) 1264 fb_edid_to_monspecs(edid, &info->monspecs); 1265 1266 if (info->monspecs.modedb_len > 0) { 1267 dev->edid = edid; 1268 dev->edid_size = i; 1269 break; 1270 } 1271 } 1272 1273 /* If that fails, use a previously returned EDID if available */ 1274 if (info->monspecs.modedb_len == 0) { 1275 1276 pr_err("Unable to get valid EDID from device/display\n"); 1277 1278 if (dev->edid) { 1279 fb_edid_to_monspecs(dev->edid, &info->monspecs); 1280 if (info->monspecs.modedb_len > 0) 1281 pr_err("Using previously queried EDID\n"); 1282 } 1283 } 1284 1285 /* If that fails, use the default EDID we were handed */ 1286 if (info->monspecs.modedb_len == 0) { 1287 if (default_edid_size >= EDID_LENGTH) { 1288 fb_edid_to_monspecs(default_edid, &info->monspecs); 1289 if (info->monspecs.modedb_len > 0) { 1290 memcpy(edid, default_edid, default_edid_size); 1291 dev->edid = edid; 1292 dev->edid_size = default_edid_size; 1293 pr_err("Using default/backup EDID\n"); 1294 } 1295 } 1296 } 1297 1298 /* If we've got modes, let's pick a best default mode */ 1299 if (info->monspecs.modedb_len > 0) { 1300 1301 for (i = 0; i < info->monspecs.modedb_len; i++) { 1302 if (dlfb_is_valid_mode(&info->monspecs.modedb[i], info)) 1303 fb_add_videomode(&info->monspecs.modedb[i], 1304 &info->modelist); 1305 else { 1306 if (i == 0) 1307 /* if we've removed top/best mode */ 1308 info->monspecs.misc 1309 &= ~FB_MISC_1ST_DETAIL; 1310 } 1311 } 1312 1313 default_vmode = fb_find_best_display(&info->monspecs, 1314 &info->modelist); 1315 } 1316 1317 /* If everything else has failed, fall back to safe default mode */ 1318 if (default_vmode == NULL) { 1319 1320 struct fb_videomode fb_vmode = {0}; 1321 1322 /* 1323 * Add the standard VESA modes to our modelist 1324 * Since we don't have EDID, there may be modes that 1325 * overspec monitor and/or are incorrect aspect ratio, etc. 1326 * But at least the user has a chance to choose 1327 */ 1328 for (i = 0; i < VESA_MODEDB_SIZE; i++) { 1329 if (dlfb_is_valid_mode((struct fb_videomode *) 1330 &vesa_modes[i], info)) 1331 fb_add_videomode(&vesa_modes[i], 1332 &info->modelist); 1333 } 1334 1335 /* 1336 * default to resolution safe for projectors 1337 * (since they are most common case without EDID) 1338 */ 1339 fb_vmode.xres = 800; 1340 fb_vmode.yres = 600; 1341 fb_vmode.refresh = 60; 1342 default_vmode = fb_find_nearest_mode(&fb_vmode, 1343 &info->modelist); 1344 } 1345 1346 /* If we have good mode and no active clients*/ 1347 if ((default_vmode != NULL) && (dev->fb_count == 0)) { 1348 1349 fb_videomode_to_var(&info->var, default_vmode); 1350 dlfb_var_color_format(&info->var); 1351 1352 /* 1353 * with mode size info, we can now alloc our framebuffer. 1354 */ 1355 memcpy(&info->fix, &dlfb_fix, sizeof(dlfb_fix)); 1356 info->fix.line_length = info->var.xres * 1357 (info->var.bits_per_pixel / 8); 1358 1359 result = dlfb_realloc_framebuffer(dev, info); 1360 1361 } else 1362 result = -EINVAL; 1363 1364 error: 1365 if (edid && (dev->edid != edid)) 1366 kfree(edid); 1367 1368 if (info->dev) 1369 mutex_unlock(&info->lock); 1370 1371 return result; 1372 } 1373 1374 static ssize_t metrics_bytes_rendered_show(struct device *fbdev, 1375 struct device_attribute *a, char *buf) { 1376 struct fb_info *fb_info = dev_get_drvdata(fbdev); 1377 struct dlfb_data *dev = fb_info->par; 1378 return snprintf(buf, PAGE_SIZE, "%u\n", 1379 atomic_read(&dev->bytes_rendered)); 1380 } 1381 1382 static ssize_t metrics_bytes_identical_show(struct device *fbdev, 1383 struct device_attribute *a, char *buf) { 1384 struct fb_info *fb_info = dev_get_drvdata(fbdev); 1385 struct dlfb_data *dev = fb_info->par; 1386 return snprintf(buf, PAGE_SIZE, "%u\n", 1387 atomic_read(&dev->bytes_identical)); 1388 } 1389 1390 static ssize_t metrics_bytes_sent_show(struct device *fbdev, 1391 struct device_attribute *a, char *buf) { 1392 struct fb_info *fb_info = dev_get_drvdata(fbdev); 1393 struct dlfb_data *dev = fb_info->par; 1394 return snprintf(buf, PAGE_SIZE, "%u\n", 1395 atomic_read(&dev->bytes_sent)); 1396 } 1397 1398 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev, 1399 struct device_attribute *a, char *buf) { 1400 struct fb_info *fb_info = dev_get_drvdata(fbdev); 1401 struct dlfb_data *dev = fb_info->par; 1402 return snprintf(buf, PAGE_SIZE, "%u\n", 1403 atomic_read(&dev->cpu_kcycles_used)); 1404 } 1405 1406 static ssize_t edid_show( 1407 struct file *filp, 1408 struct kobject *kobj, struct bin_attribute *a, 1409 char *buf, loff_t off, size_t count) { 1410 struct device *fbdev = container_of(kobj, struct device, kobj); 1411 struct fb_info *fb_info = dev_get_drvdata(fbdev); 1412 struct dlfb_data *dev = fb_info->par; 1413 1414 if (dev->edid == NULL) 1415 return 0; 1416 1417 if ((off >= dev->edid_size) || (count > dev->edid_size)) 1418 return 0; 1419 1420 if (off + count > dev->edid_size) 1421 count = dev->edid_size - off; 1422 1423 pr_info("sysfs edid copy %p to %p, %d bytes\n", 1424 dev->edid, buf, (int) count); 1425 1426 memcpy(buf, dev->edid, count); 1427 1428 return count; 1429 } 1430 1431 static ssize_t edid_store( 1432 struct file *filp, 1433 struct kobject *kobj, struct bin_attribute *a, 1434 char *src, loff_t src_off, size_t src_size) { 1435 struct device *fbdev = container_of(kobj, struct device, kobj); 1436 struct fb_info *fb_info = dev_get_drvdata(fbdev); 1437 struct dlfb_data *dev = fb_info->par; 1438 int ret; 1439 1440 /* We only support write of entire EDID at once, no offset*/ 1441 if ((src_size != EDID_LENGTH) || (src_off != 0)) 1442 return -EINVAL; 1443 1444 ret = dlfb_setup_modes(dev, fb_info, src, src_size); 1445 if (ret) 1446 return ret; 1447 1448 if (!dev->edid || memcmp(src, dev->edid, src_size)) 1449 return -EINVAL; 1450 1451 pr_info("sysfs written EDID is new default\n"); 1452 dlfb_ops_set_par(fb_info); 1453 return src_size; 1454 } 1455 1456 static ssize_t metrics_reset_store(struct device *fbdev, 1457 struct device_attribute *attr, 1458 const char *buf, size_t count) 1459 { 1460 struct fb_info *fb_info = dev_get_drvdata(fbdev); 1461 struct dlfb_data *dev = fb_info->par; 1462 1463 atomic_set(&dev->bytes_rendered, 0); 1464 atomic_set(&dev->bytes_identical, 0); 1465 atomic_set(&dev->bytes_sent, 0); 1466 atomic_set(&dev->cpu_kcycles_used, 0); 1467 1468 return count; 1469 } 1470 1471 static struct bin_attribute edid_attr = { 1472 .attr.name = "edid", 1473 .attr.mode = 0666, 1474 .size = EDID_LENGTH, 1475 .read = edid_show, 1476 .write = edid_store 1477 }; 1478 1479 static struct device_attribute fb_device_attrs[] = { 1480 __ATTR_RO(metrics_bytes_rendered), 1481 __ATTR_RO(metrics_bytes_identical), 1482 __ATTR_RO(metrics_bytes_sent), 1483 __ATTR_RO(metrics_cpu_kcycles_used), 1484 __ATTR(metrics_reset, S_IWUSR, NULL, metrics_reset_store), 1485 }; 1486 1487 /* 1488 * This is necessary before we can communicate with the display controller. 1489 */ 1490 static int dlfb_select_std_channel(struct dlfb_data *dev) 1491 { 1492 int ret; 1493 u8 set_def_chn[] = { 0x57, 0xCD, 0xDC, 0xA7, 1494 0x1C, 0x88, 0x5E, 0x15, 1495 0x60, 0xFE, 0xC6, 0x97, 1496 0x16, 0x3D, 0x47, 0xF2 }; 1497 1498 ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), 1499 NR_USB_REQUEST_CHANNEL, 1500 (USB_DIR_OUT | USB_TYPE_VENDOR), 0, 0, 1501 set_def_chn, sizeof(set_def_chn), USB_CTRL_SET_TIMEOUT); 1502 return ret; 1503 } 1504 1505 static int dlfb_parse_vendor_descriptor(struct dlfb_data *dev, 1506 struct usb_interface *interface) 1507 { 1508 char *desc; 1509 char *buf; 1510 char *desc_end; 1511 1512 int total_len = 0; 1513 1514 buf = kzalloc(MAX_VENDOR_DESCRIPTOR_SIZE, GFP_KERNEL); 1515 if (!buf) 1516 return false; 1517 desc = buf; 1518 1519 total_len = usb_get_descriptor(interface_to_usbdev(interface), 1520 0x5f, /* vendor specific */ 1521 0, desc, MAX_VENDOR_DESCRIPTOR_SIZE); 1522 1523 /* if not found, look in configuration descriptor */ 1524 if (total_len < 0) { 1525 if (0 == usb_get_extra_descriptor(interface->cur_altsetting, 1526 0x5f, &desc)) 1527 total_len = (int) desc[0]; 1528 } 1529 1530 if (total_len > 5) { 1531 pr_info("vendor descriptor length:%x data:%02x %02x %02x %02x" \ 1532 "%02x %02x %02x %02x %02x %02x %02x\n", 1533 total_len, desc[0], 1534 desc[1], desc[2], desc[3], desc[4], desc[5], desc[6], 1535 desc[7], desc[8], desc[9], desc[10]); 1536 1537 if ((desc[0] != total_len) || /* descriptor length */ 1538 (desc[1] != 0x5f) || /* vendor descriptor type */ 1539 (desc[2] != 0x01) || /* version (2 bytes) */ 1540 (desc[3] != 0x00) || 1541 (desc[4] != total_len - 2)) /* length after type */ 1542 goto unrecognized; 1543 1544 desc_end = desc + total_len; 1545 desc += 5; /* the fixed header we've already parsed */ 1546 1547 while (desc < desc_end) { 1548 u8 length; 1549 u16 key; 1550 1551 key = le16_to_cpu(*((u16 *) desc)); 1552 desc += sizeof(u16); 1553 length = *desc; 1554 desc++; 1555 1556 switch (key) { 1557 case 0x0200: { /* max_area */ 1558 u32 max_area; 1559 max_area = le32_to_cpu(*((u32 *)desc)); 1560 pr_warn("DL chip limited to %d pixel modes\n", 1561 max_area); 1562 dev->sku_pixel_limit = max_area; 1563 break; 1564 } 1565 default: 1566 break; 1567 } 1568 desc += length; 1569 } 1570 } else { 1571 pr_info("vendor descriptor not available (%d)\n", total_len); 1572 } 1573 1574 goto success; 1575 1576 unrecognized: 1577 /* allow udlfb to load for now even if firmware unrecognized */ 1578 pr_err("Unrecognized vendor firmware descriptor\n"); 1579 1580 success: 1581 kfree(buf); 1582 return true; 1583 } 1584 1585 static void dlfb_init_framebuffer_work(struct work_struct *work); 1586 1587 static int dlfb_usb_probe(struct usb_interface *interface, 1588 const struct usb_device_id *id) 1589 { 1590 struct usb_device *usbdev; 1591 struct dlfb_data *dev = NULL; 1592 int retval = -ENOMEM; 1593 1594 /* usb initialization */ 1595 1596 usbdev = interface_to_usbdev(interface); 1597 1598 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 1599 if (dev == NULL) { 1600 dev_err(&interface->dev, "dlfb_usb_probe: failed alloc of dev struct\n"); 1601 goto error; 1602 } 1603 1604 kref_init(&dev->kref); /* matching kref_put in usb .disconnect fn */ 1605 1606 dev->udev = usbdev; 1607 dev->gdev = &usbdev->dev; /* our generic struct device * */ 1608 usb_set_intfdata(interface, dev); 1609 1610 pr_info("%s %s - serial #%s\n", 1611 usbdev->manufacturer, usbdev->product, usbdev->serial); 1612 pr_info("vid_%04x&pid_%04x&rev_%04x driver's dlfb_data struct at %p\n", 1613 usbdev->descriptor.idVendor, usbdev->descriptor.idProduct, 1614 usbdev->descriptor.bcdDevice, dev); 1615 pr_info("console enable=%d\n", console); 1616 pr_info("fb_defio enable=%d\n", fb_defio); 1617 pr_info("shadow enable=%d\n", shadow); 1618 1619 dev->sku_pixel_limit = 2048 * 1152; /* default to maximum */ 1620 1621 if (!dlfb_parse_vendor_descriptor(dev, interface)) { 1622 pr_err("firmware not recognized. Assume incompatible device\n"); 1623 goto error; 1624 } 1625 1626 if (pixel_limit) { 1627 pr_warn("DL chip limit of %d overridden" 1628 " by module param to %d\n", 1629 dev->sku_pixel_limit, pixel_limit); 1630 dev->sku_pixel_limit = pixel_limit; 1631 } 1632 1633 1634 if (!dlfb_alloc_urb_list(dev, WRITES_IN_FLIGHT, MAX_TRANSFER)) { 1635 retval = -ENOMEM; 1636 pr_err("dlfb_alloc_urb_list failed\n"); 1637 goto error; 1638 } 1639 1640 kref_get(&dev->kref); /* matching kref_put in free_framebuffer_work */ 1641 1642 /* We don't register a new USB class. Our client interface is fbdev */ 1643 1644 /* Workitem keep things fast & simple during USB enumeration */ 1645 INIT_DELAYED_WORK(&dev->init_framebuffer_work, 1646 dlfb_init_framebuffer_work); 1647 schedule_delayed_work(&dev->init_framebuffer_work, 0); 1648 1649 return 0; 1650 1651 error: 1652 if (dev) { 1653 1654 kref_put(&dev->kref, dlfb_free); /* ref for framebuffer */ 1655 kref_put(&dev->kref, dlfb_free); /* last ref from kref_init */ 1656 1657 /* dev has been deallocated. Do not dereference */ 1658 } 1659 1660 return retval; 1661 } 1662 1663 static void dlfb_init_framebuffer_work(struct work_struct *work) 1664 { 1665 struct dlfb_data *dev = container_of(work, struct dlfb_data, 1666 init_framebuffer_work.work); 1667 struct fb_info *info; 1668 int retval; 1669 int i; 1670 1671 /* allocates framebuffer driver structure, not framebuffer memory */ 1672 info = framebuffer_alloc(0, dev->gdev); 1673 if (!info) { 1674 retval = -ENOMEM; 1675 pr_err("framebuffer_alloc failed\n"); 1676 goto error; 1677 } 1678 1679 dev->info = info; 1680 info->par = dev; 1681 info->pseudo_palette = dev->pseudo_palette; 1682 info->fbops = &dlfb_ops; 1683 1684 retval = fb_alloc_cmap(&info->cmap, 256, 0); 1685 if (retval < 0) { 1686 pr_err("fb_alloc_cmap failed %x\n", retval); 1687 goto error; 1688 } 1689 1690 INIT_DELAYED_WORK(&dev->free_framebuffer_work, 1691 dlfb_free_framebuffer_work); 1692 1693 INIT_LIST_HEAD(&info->modelist); 1694 1695 retval = dlfb_setup_modes(dev, info, NULL, 0); 1696 if (retval != 0) { 1697 pr_err("unable to find common mode for display and adapter\n"); 1698 goto error; 1699 } 1700 1701 /* ready to begin using device */ 1702 1703 atomic_set(&dev->usb_active, 1); 1704 dlfb_select_std_channel(dev); 1705 1706 dlfb_ops_check_var(&info->var, info); 1707 dlfb_ops_set_par(info); 1708 1709 retval = register_framebuffer(info); 1710 if (retval < 0) { 1711 pr_err("register_framebuffer failed %d\n", retval); 1712 goto error; 1713 } 1714 1715 for (i = 0; i < ARRAY_SIZE(fb_device_attrs); i++) { 1716 retval = device_create_file(info->dev, &fb_device_attrs[i]); 1717 if (retval) { 1718 pr_warn("device_create_file failed %d\n", retval); 1719 } 1720 } 1721 1722 retval = device_create_bin_file(info->dev, &edid_attr); 1723 if (retval) { 1724 pr_warn("device_create_bin_file failed %d\n", retval); 1725 } 1726 1727 pr_info("DisplayLink USB device /dev/fb%d attached. %dx%d resolution." 1728 " Using %dK framebuffer memory\n", info->node, 1729 info->var.xres, info->var.yres, 1730 ((dev->backing_buffer) ? 1731 info->fix.smem_len * 2 : info->fix.smem_len) >> 10); 1732 return; 1733 1734 error: 1735 dlfb_free_framebuffer(dev); 1736 } 1737 1738 static void dlfb_usb_disconnect(struct usb_interface *interface) 1739 { 1740 struct dlfb_data *dev; 1741 struct fb_info *info; 1742 int i; 1743 1744 dev = usb_get_intfdata(interface); 1745 info = dev->info; 1746 1747 pr_info("USB disconnect starting\n"); 1748 1749 /* we virtualize until all fb clients release. Then we free */ 1750 dev->virtualized = true; 1751 1752 /* When non-active we'll update virtual framebuffer, but no new urbs */ 1753 atomic_set(&dev->usb_active, 0); 1754 1755 /* this function will wait for all in-flight urbs to complete */ 1756 dlfb_free_urb_list(dev); 1757 1758 if (info) { 1759 /* remove udlfb's sysfs interfaces */ 1760 for (i = 0; i < ARRAY_SIZE(fb_device_attrs); i++) 1761 device_remove_file(info->dev, &fb_device_attrs[i]); 1762 device_remove_bin_file(info->dev, &edid_attr); 1763 unlink_framebuffer(info); 1764 } 1765 1766 usb_set_intfdata(interface, NULL); 1767 dev->udev = NULL; 1768 dev->gdev = NULL; 1769 1770 /* if clients still have us open, will be freed on last close */ 1771 if (dev->fb_count == 0) 1772 schedule_delayed_work(&dev->free_framebuffer_work, 0); 1773 1774 /* release reference taken by kref_init in probe() */ 1775 kref_put(&dev->kref, dlfb_free); 1776 1777 /* consider dlfb_data freed */ 1778 1779 return; 1780 } 1781 1782 static struct usb_driver dlfb_driver = { 1783 .name = "udlfb", 1784 .probe = dlfb_usb_probe, 1785 .disconnect = dlfb_usb_disconnect, 1786 .id_table = id_table, 1787 }; 1788 1789 module_usb_driver(dlfb_driver); 1790 1791 static void dlfb_urb_completion(struct urb *urb) 1792 { 1793 struct urb_node *unode = urb->context; 1794 struct dlfb_data *dev = unode->dev; 1795 unsigned long flags; 1796 1797 /* sync/async unlink faults aren't errors */ 1798 if (urb->status) { 1799 if (!(urb->status == -ENOENT || 1800 urb->status == -ECONNRESET || 1801 urb->status == -ESHUTDOWN)) { 1802 pr_err("%s - nonzero write bulk status received: %d\n", 1803 __func__, urb->status); 1804 atomic_set(&dev->lost_pixels, 1); 1805 } 1806 } 1807 1808 urb->transfer_buffer_length = dev->urbs.size; /* reset to actual */ 1809 1810 spin_lock_irqsave(&dev->urbs.lock, flags); 1811 list_add_tail(&unode->entry, &dev->urbs.list); 1812 dev->urbs.available++; 1813 spin_unlock_irqrestore(&dev->urbs.lock, flags); 1814 1815 /* 1816 * When using fb_defio, we deadlock if up() is called 1817 * while another is waiting. So queue to another process. 1818 */ 1819 if (fb_defio) 1820 schedule_delayed_work(&unode->release_urb_work, 0); 1821 else 1822 up(&dev->urbs.limit_sem); 1823 } 1824 1825 static void dlfb_free_urb_list(struct dlfb_data *dev) 1826 { 1827 int count = dev->urbs.count; 1828 struct list_head *node; 1829 struct urb_node *unode; 1830 struct urb *urb; 1831 int ret; 1832 unsigned long flags; 1833 1834 pr_notice("Freeing all render urbs\n"); 1835 1836 /* keep waiting and freeing, until we've got 'em all */ 1837 while (count--) { 1838 1839 /* Getting interrupted means a leak, but ok at disconnect */ 1840 ret = down_interruptible(&dev->urbs.limit_sem); 1841 if (ret) 1842 break; 1843 1844 spin_lock_irqsave(&dev->urbs.lock, flags); 1845 1846 node = dev->urbs.list.next; /* have reserved one with sem */ 1847 list_del_init(node); 1848 1849 spin_unlock_irqrestore(&dev->urbs.lock, flags); 1850 1851 unode = list_entry(node, struct urb_node, entry); 1852 urb = unode->urb; 1853 1854 /* Free each separately allocated piece */ 1855 usb_free_coherent(urb->dev, dev->urbs.size, 1856 urb->transfer_buffer, urb->transfer_dma); 1857 usb_free_urb(urb); 1858 kfree(node); 1859 } 1860 1861 dev->urbs.count = 0; 1862 } 1863 1864 static int dlfb_alloc_urb_list(struct dlfb_data *dev, int count, size_t size) 1865 { 1866 int i = 0; 1867 struct urb *urb; 1868 struct urb_node *unode; 1869 char *buf; 1870 1871 spin_lock_init(&dev->urbs.lock); 1872 1873 dev->urbs.size = size; 1874 INIT_LIST_HEAD(&dev->urbs.list); 1875 1876 while (i < count) { 1877 unode = kzalloc(sizeof(struct urb_node), GFP_KERNEL); 1878 if (!unode) 1879 break; 1880 unode->dev = dev; 1881 1882 INIT_DELAYED_WORK(&unode->release_urb_work, 1883 dlfb_release_urb_work); 1884 1885 urb = usb_alloc_urb(0, GFP_KERNEL); 1886 if (!urb) { 1887 kfree(unode); 1888 break; 1889 } 1890 unode->urb = urb; 1891 1892 buf = usb_alloc_coherent(dev->udev, MAX_TRANSFER, GFP_KERNEL, 1893 &urb->transfer_dma); 1894 if (!buf) { 1895 kfree(unode); 1896 usb_free_urb(urb); 1897 break; 1898 } 1899 1900 /* urb->transfer_buffer_length set to actual before submit */ 1901 usb_fill_bulk_urb(urb, dev->udev, usb_sndbulkpipe(dev->udev, 1), 1902 buf, size, dlfb_urb_completion, unode); 1903 urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; 1904 1905 list_add_tail(&unode->entry, &dev->urbs.list); 1906 1907 i++; 1908 } 1909 1910 sema_init(&dev->urbs.limit_sem, i); 1911 dev->urbs.count = i; 1912 dev->urbs.available = i; 1913 1914 pr_notice("allocated %d %d byte urbs\n", i, (int) size); 1915 1916 return i; 1917 } 1918 1919 static struct urb *dlfb_get_urb(struct dlfb_data *dev) 1920 { 1921 int ret = 0; 1922 struct list_head *entry; 1923 struct urb_node *unode; 1924 struct urb *urb = NULL; 1925 unsigned long flags; 1926 1927 /* Wait for an in-flight buffer to complete and get re-queued */ 1928 ret = down_timeout(&dev->urbs.limit_sem, GET_URB_TIMEOUT); 1929 if (ret) { 1930 atomic_set(&dev->lost_pixels, 1); 1931 pr_warn("wait for urb interrupted: %x available: %d\n", 1932 ret, dev->urbs.available); 1933 goto error; 1934 } 1935 1936 spin_lock_irqsave(&dev->urbs.lock, flags); 1937 1938 BUG_ON(list_empty(&dev->urbs.list)); /* reserved one with limit_sem */ 1939 entry = dev->urbs.list.next; 1940 list_del_init(entry); 1941 dev->urbs.available--; 1942 1943 spin_unlock_irqrestore(&dev->urbs.lock, flags); 1944 1945 unode = list_entry(entry, struct urb_node, entry); 1946 urb = unode->urb; 1947 1948 error: 1949 return urb; 1950 } 1951 1952 static int dlfb_submit_urb(struct dlfb_data *dev, struct urb *urb, size_t len) 1953 { 1954 int ret; 1955 1956 BUG_ON(len > dev->urbs.size); 1957 1958 urb->transfer_buffer_length = len; /* set to actual payload len */ 1959 ret = usb_submit_urb(urb, GFP_KERNEL); 1960 if (ret) { 1961 dlfb_urb_completion(urb); /* because no one else will */ 1962 atomic_set(&dev->lost_pixels, 1); 1963 pr_err("usb_submit_urb error %x\n", ret); 1964 } 1965 return ret; 1966 } 1967 1968 module_param(console, bool, S_IWUSR | S_IRUSR | S_IWGRP | S_IRGRP); 1969 MODULE_PARM_DESC(console, "Allow fbcon to open framebuffer"); 1970 1971 module_param(fb_defio, bool, S_IWUSR | S_IRUSR | S_IWGRP | S_IRGRP); 1972 MODULE_PARM_DESC(fb_defio, "Page fault detection of mmap writes"); 1973 1974 module_param(shadow, bool, S_IWUSR | S_IRUSR | S_IWGRP | S_IRGRP); 1975 MODULE_PARM_DESC(shadow, "Shadow vid mem. Disable to save mem but lose perf"); 1976 1977 module_param(pixel_limit, int, S_IWUSR | S_IRUSR | S_IWGRP | S_IRGRP); 1978 MODULE_PARM_DESC(pixel_limit, "Force limit on max mode (in x*y pixels)"); 1979 1980 MODULE_AUTHOR("Roberto De Ioris <roberto@unbit.it>, " 1981 "Jaya Kumar <jayakumar.lkml@gmail.com>, " 1982 "Bernie Thompson <bernie@plugable.com>"); 1983 MODULE_DESCRIPTION("DisplayLink kernel framebuffer driver"); 1984 MODULE_LICENSE("GPL"); 1985 1986