1 /* 2 * Copyright (c) Intel Corp. 2007. 3 * All Rights Reserved. 4 * 5 * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to 6 * develop this driver. 7 * 8 * This file is part of the Vermilion Range fb driver. 9 * The Vermilion Range fb driver is free software; 10 * you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License as published by 12 * the Free Software Foundation; either version 2 of the License, or 13 * (at your option) any later version. 14 * 15 * The Vermilion Range fb driver is distributed 16 * in the hope that it will be useful, 17 * but WITHOUT ANY WARRANTY; without even the implied warranty of 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 19 * GNU General Public License for more details. 20 * 21 * You should have received a copy of the GNU General Public License 22 * along with this driver; if not, write to the Free Software 23 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 24 * 25 * Authors: 26 * Thomas Hellström <thomas-at-tungstengraphics-dot-com> 27 * Michel Dänzer <michel-at-tungstengraphics-dot-com> 28 * Alan Hourihane <alanh-at-tungstengraphics-dot-com> 29 */ 30 31 #include <linux/module.h> 32 #include <linux/kernel.h> 33 #include <linux/errno.h> 34 #include <linux/string.h> 35 #include <linux/delay.h> 36 #include <linux/slab.h> 37 #include <linux/mm.h> 38 #include <linux/fb.h> 39 #include <linux/pci.h> 40 #include <asm/set_memory.h> 41 #include <asm/tlbflush.h> 42 #include <linux/mmzone.h> 43 44 /* #define VERMILION_DEBUG */ 45 46 #include "vermilion.h" 47 48 #define MODULE_NAME "vmlfb" 49 50 #define VML_TOHW(_val, _width) ((((_val) << (_width)) + 0x7FFF - (_val)) >> 16) 51 52 static struct mutex vml_mutex; 53 static struct list_head global_no_mode; 54 static struct list_head global_has_mode; 55 static struct fb_ops vmlfb_ops; 56 static struct vml_sys *subsys = NULL; 57 static char *vml_default_mode = "1024x768@60"; 58 static const struct fb_videomode defaultmode = { 59 NULL, 60, 1024, 768, 12896, 144, 24, 29, 3, 136, 6, 60 0, FB_VMODE_NONINTERLACED 61 }; 62 63 static u32 vml_mem_requested = (10 * 1024 * 1024); 64 static u32 vml_mem_contig = (4 * 1024 * 1024); 65 static u32 vml_mem_min = (4 * 1024 * 1024); 66 67 static u32 vml_clocks[] = { 68 6750, 69 13500, 70 27000, 71 29700, 72 37125, 73 54000, 74 59400, 75 74250, 76 120000, 77 148500 78 }; 79 80 static u32 vml_num_clocks = ARRAY_SIZE(vml_clocks); 81 82 /* 83 * Allocate a contiguous vram area and make its linear kernel map 84 * uncached. 85 */ 86 87 static int vmlfb_alloc_vram_area(struct vram_area *va, unsigned max_order, 88 unsigned min_order) 89 { 90 gfp_t flags; 91 unsigned long i; 92 93 max_order++; 94 do { 95 /* 96 * Really try hard to get the needed memory. 97 * We need memory below the first 32MB, so we 98 * add the __GFP_DMA flag that guarantees that we are 99 * below the first 16MB. 100 */ 101 102 flags = __GFP_DMA | __GFP_HIGH | __GFP_KSWAPD_RECLAIM; 103 va->logical = 104 __get_free_pages(flags, --max_order); 105 } while (va->logical == 0 && max_order > min_order); 106 107 if (!va->logical) 108 return -ENOMEM; 109 110 va->phys = virt_to_phys((void *)va->logical); 111 va->size = PAGE_SIZE << max_order; 112 va->order = max_order; 113 114 /* 115 * It seems like __get_free_pages only ups the usage count 116 * of the first page. This doesn't work with fault mapping, so 117 * up the usage count once more (XXX: should use split_page or 118 * compound page). 119 */ 120 121 memset((void *)va->logical, 0x00, va->size); 122 for (i = va->logical; i < va->logical + va->size; i += PAGE_SIZE) { 123 get_page(virt_to_page(i)); 124 } 125 126 /* 127 * Change caching policy of the linear kernel map to avoid 128 * mapping type conflicts with user-space mappings. 129 */ 130 set_pages_uc(virt_to_page(va->logical), va->size >> PAGE_SHIFT); 131 132 printk(KERN_DEBUG MODULE_NAME 133 ": Allocated %ld bytes vram area at 0x%08lx\n", 134 va->size, va->phys); 135 136 return 0; 137 } 138 139 /* 140 * Free a contiguous vram area and reset its linear kernel map 141 * mapping type. 142 */ 143 144 static void vmlfb_free_vram_area(struct vram_area *va) 145 { 146 unsigned long j; 147 148 if (va->logical) { 149 150 /* 151 * Reset the linear kernel map caching policy. 152 */ 153 154 set_pages_wb(virt_to_page(va->logical), 155 va->size >> PAGE_SHIFT); 156 157 /* 158 * Decrease the usage count on the pages we've used 159 * to compensate for upping when allocating. 160 */ 161 162 for (j = va->logical; j < va->logical + va->size; 163 j += PAGE_SIZE) { 164 (void)put_page_testzero(virt_to_page(j)); 165 } 166 167 printk(KERN_DEBUG MODULE_NAME 168 ": Freeing %ld bytes vram area at 0x%08lx\n", 169 va->size, va->phys); 170 free_pages(va->logical, va->order); 171 172 va->logical = 0; 173 } 174 } 175 176 /* 177 * Free allocated vram. 178 */ 179 180 static void vmlfb_free_vram(struct vml_info *vinfo) 181 { 182 int i; 183 184 for (i = 0; i < vinfo->num_areas; ++i) { 185 vmlfb_free_vram_area(&vinfo->vram[i]); 186 } 187 vinfo->num_areas = 0; 188 } 189 190 /* 191 * Allocate vram. Currently we try to allocate contiguous areas from the 192 * __GFP_DMA zone and puzzle them together. A better approach would be to 193 * allocate one contiguous area for scanout and use one-page allocations for 194 * offscreen areas. This requires user-space and GPU virtual mappings. 195 */ 196 197 static int vmlfb_alloc_vram(struct vml_info *vinfo, 198 size_t requested, 199 size_t min_total, size_t min_contig) 200 { 201 int i, j; 202 int order; 203 int contiguous; 204 int err; 205 struct vram_area *va; 206 struct vram_area *va2; 207 208 vinfo->num_areas = 0; 209 for (i = 0; i < VML_VRAM_AREAS; ++i) { 210 va = &vinfo->vram[i]; 211 order = 0; 212 213 while (requested > (PAGE_SIZE << order) && order < MAX_ORDER) 214 order++; 215 216 err = vmlfb_alloc_vram_area(va, order, 0); 217 218 if (err) 219 break; 220 221 if (i == 0) { 222 vinfo->vram_start = va->phys; 223 vinfo->vram_logical = (void __iomem *) va->logical; 224 vinfo->vram_contig_size = va->size; 225 vinfo->num_areas = 1; 226 } else { 227 contiguous = 0; 228 229 for (j = 0; j < i; ++j) { 230 va2 = &vinfo->vram[j]; 231 if (va->phys + va->size == va2->phys || 232 va2->phys + va2->size == va->phys) { 233 contiguous = 1; 234 break; 235 } 236 } 237 238 if (contiguous) { 239 vinfo->num_areas++; 240 if (va->phys < vinfo->vram_start) { 241 vinfo->vram_start = va->phys; 242 vinfo->vram_logical = 243 (void __iomem *)va->logical; 244 } 245 vinfo->vram_contig_size += va->size; 246 } else { 247 vmlfb_free_vram_area(va); 248 break; 249 } 250 } 251 252 if (requested < va->size) 253 break; 254 else 255 requested -= va->size; 256 } 257 258 if (vinfo->vram_contig_size > min_total && 259 vinfo->vram_contig_size > min_contig) { 260 261 printk(KERN_DEBUG MODULE_NAME 262 ": Contiguous vram: %ld bytes at physical 0x%08lx.\n", 263 (unsigned long)vinfo->vram_contig_size, 264 (unsigned long)vinfo->vram_start); 265 266 return 0; 267 } 268 269 printk(KERN_ERR MODULE_NAME 270 ": Could not allocate requested minimal amount of vram.\n"); 271 272 vmlfb_free_vram(vinfo); 273 274 return -ENOMEM; 275 } 276 277 /* 278 * Find the GPU to use with our display controller. 279 */ 280 281 static int vmlfb_get_gpu(struct vml_par *par) 282 { 283 mutex_lock(&vml_mutex); 284 285 par->gpu = pci_get_device(PCI_VENDOR_ID_INTEL, VML_DEVICE_GPU, NULL); 286 287 if (!par->gpu) { 288 mutex_unlock(&vml_mutex); 289 return -ENODEV; 290 } 291 292 mutex_unlock(&vml_mutex); 293 294 if (pci_enable_device(par->gpu) < 0) 295 return -ENODEV; 296 297 return 0; 298 } 299 300 /* 301 * Find a contiguous vram area that contains a given offset from vram start. 302 */ 303 static int vmlfb_vram_offset(struct vml_info *vinfo, unsigned long offset) 304 { 305 unsigned long aoffset; 306 unsigned i; 307 308 for (i = 0; i < vinfo->num_areas; ++i) { 309 aoffset = offset - (vinfo->vram[i].phys - vinfo->vram_start); 310 311 if (aoffset < vinfo->vram[i].size) { 312 return 0; 313 } 314 } 315 316 return -EINVAL; 317 } 318 319 /* 320 * Remap the MMIO register spaces of the VDC and the GPU. 321 */ 322 323 static int vmlfb_enable_mmio(struct vml_par *par) 324 { 325 int err; 326 327 par->vdc_mem_base = pci_resource_start(par->vdc, 0); 328 par->vdc_mem_size = pci_resource_len(par->vdc, 0); 329 if (!request_mem_region(par->vdc_mem_base, par->vdc_mem_size, "vmlfb")) { 330 printk(KERN_ERR MODULE_NAME 331 ": Could not claim display controller MMIO.\n"); 332 return -EBUSY; 333 } 334 par->vdc_mem = ioremap_nocache(par->vdc_mem_base, par->vdc_mem_size); 335 if (par->vdc_mem == NULL) { 336 printk(KERN_ERR MODULE_NAME 337 ": Could not map display controller MMIO.\n"); 338 err = -ENOMEM; 339 goto out_err_0; 340 } 341 342 par->gpu_mem_base = pci_resource_start(par->gpu, 0); 343 par->gpu_mem_size = pci_resource_len(par->gpu, 0); 344 if (!request_mem_region(par->gpu_mem_base, par->gpu_mem_size, "vmlfb")) { 345 printk(KERN_ERR MODULE_NAME ": Could not claim GPU MMIO.\n"); 346 err = -EBUSY; 347 goto out_err_1; 348 } 349 par->gpu_mem = ioremap_nocache(par->gpu_mem_base, par->gpu_mem_size); 350 if (par->gpu_mem == NULL) { 351 printk(KERN_ERR MODULE_NAME ": Could not map GPU MMIO.\n"); 352 err = -ENOMEM; 353 goto out_err_2; 354 } 355 356 return 0; 357 358 out_err_2: 359 release_mem_region(par->gpu_mem_base, par->gpu_mem_size); 360 out_err_1: 361 iounmap(par->vdc_mem); 362 out_err_0: 363 release_mem_region(par->vdc_mem_base, par->vdc_mem_size); 364 return err; 365 } 366 367 /* 368 * Unmap the VDC and GPU register spaces. 369 */ 370 371 static void vmlfb_disable_mmio(struct vml_par *par) 372 { 373 iounmap(par->gpu_mem); 374 release_mem_region(par->gpu_mem_base, par->gpu_mem_size); 375 iounmap(par->vdc_mem); 376 release_mem_region(par->vdc_mem_base, par->vdc_mem_size); 377 } 378 379 /* 380 * Release and uninit the VDC and GPU. 381 */ 382 383 static void vmlfb_release_devices(struct vml_par *par) 384 { 385 if (atomic_dec_and_test(&par->refcount)) { 386 pci_disable_device(par->gpu); 387 pci_disable_device(par->vdc); 388 } 389 } 390 391 /* 392 * Free up allocated resources for a device. 393 */ 394 395 static void vml_pci_remove(struct pci_dev *dev) 396 { 397 struct fb_info *info; 398 struct vml_info *vinfo; 399 struct vml_par *par; 400 401 info = pci_get_drvdata(dev); 402 if (info) { 403 vinfo = container_of(info, struct vml_info, info); 404 par = vinfo->par; 405 mutex_lock(&vml_mutex); 406 unregister_framebuffer(info); 407 fb_dealloc_cmap(&info->cmap); 408 vmlfb_free_vram(vinfo); 409 vmlfb_disable_mmio(par); 410 vmlfb_release_devices(par); 411 kfree(vinfo); 412 kfree(par); 413 mutex_unlock(&vml_mutex); 414 } 415 } 416 417 static void vmlfb_set_pref_pixel_format(struct fb_var_screeninfo *var) 418 { 419 switch (var->bits_per_pixel) { 420 case 16: 421 var->blue.offset = 0; 422 var->blue.length = 5; 423 var->green.offset = 5; 424 var->green.length = 5; 425 var->red.offset = 10; 426 var->red.length = 5; 427 var->transp.offset = 15; 428 var->transp.length = 1; 429 break; 430 case 32: 431 var->blue.offset = 0; 432 var->blue.length = 8; 433 var->green.offset = 8; 434 var->green.length = 8; 435 var->red.offset = 16; 436 var->red.length = 8; 437 var->transp.offset = 24; 438 var->transp.length = 0; 439 break; 440 default: 441 break; 442 } 443 444 var->blue.msb_right = var->green.msb_right = 445 var->red.msb_right = var->transp.msb_right = 0; 446 } 447 448 /* 449 * Device initialization. 450 * We initialize one vml_par struct per device and one vml_info 451 * struct per pipe. Currently we have only one pipe. 452 */ 453 454 static int vml_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) 455 { 456 struct vml_info *vinfo; 457 struct fb_info *info; 458 struct vml_par *par; 459 int err = 0; 460 461 par = kzalloc(sizeof(*par), GFP_KERNEL); 462 if (par == NULL) 463 return -ENOMEM; 464 465 vinfo = kzalloc(sizeof(*vinfo), GFP_KERNEL); 466 if (vinfo == NULL) { 467 err = -ENOMEM; 468 goto out_err_0; 469 } 470 471 vinfo->par = par; 472 par->vdc = dev; 473 atomic_set(&par->refcount, 1); 474 475 switch (id->device) { 476 case VML_DEVICE_VDC: 477 if ((err = vmlfb_get_gpu(par))) 478 goto out_err_1; 479 pci_set_drvdata(dev, &vinfo->info); 480 break; 481 default: 482 err = -ENODEV; 483 goto out_err_1; 484 } 485 486 info = &vinfo->info; 487 info->flags = FBINFO_DEFAULT | FBINFO_PARTIAL_PAN_OK; 488 489 err = vmlfb_enable_mmio(par); 490 if (err) 491 goto out_err_2; 492 493 err = vmlfb_alloc_vram(vinfo, vml_mem_requested, 494 vml_mem_contig, vml_mem_min); 495 if (err) 496 goto out_err_3; 497 498 strcpy(info->fix.id, "Vermilion Range"); 499 info->fix.mmio_start = 0; 500 info->fix.mmio_len = 0; 501 info->fix.smem_start = vinfo->vram_start; 502 info->fix.smem_len = vinfo->vram_contig_size; 503 info->fix.type = FB_TYPE_PACKED_PIXELS; 504 info->fix.visual = FB_VISUAL_TRUECOLOR; 505 info->fix.ypanstep = 1; 506 info->fix.xpanstep = 1; 507 info->fix.ywrapstep = 0; 508 info->fix.accel = FB_ACCEL_NONE; 509 info->screen_base = vinfo->vram_logical; 510 info->pseudo_palette = vinfo->pseudo_palette; 511 info->par = par; 512 info->fbops = &vmlfb_ops; 513 info->device = &dev->dev; 514 515 INIT_LIST_HEAD(&vinfo->head); 516 vinfo->pipe_disabled = 1; 517 vinfo->cur_blank_mode = FB_BLANK_UNBLANK; 518 519 info->var.grayscale = 0; 520 info->var.bits_per_pixel = 16; 521 vmlfb_set_pref_pixel_format(&info->var); 522 523 if (!fb_find_mode 524 (&info->var, info, vml_default_mode, NULL, 0, &defaultmode, 16)) { 525 printk(KERN_ERR MODULE_NAME ": Could not find initial mode\n"); 526 } 527 528 if (fb_alloc_cmap(&info->cmap, 256, 1) < 0) { 529 err = -ENOMEM; 530 goto out_err_4; 531 } 532 533 err = register_framebuffer(info); 534 if (err) { 535 printk(KERN_ERR MODULE_NAME ": Register framebuffer error.\n"); 536 goto out_err_5; 537 } 538 539 printk("Initialized vmlfb\n"); 540 541 return 0; 542 543 out_err_5: 544 fb_dealloc_cmap(&info->cmap); 545 out_err_4: 546 vmlfb_free_vram(vinfo); 547 out_err_3: 548 vmlfb_disable_mmio(par); 549 out_err_2: 550 vmlfb_release_devices(par); 551 out_err_1: 552 kfree(vinfo); 553 out_err_0: 554 kfree(par); 555 return err; 556 } 557 558 static int vmlfb_open(struct fb_info *info, int user) 559 { 560 /* 561 * Save registers here? 562 */ 563 return 0; 564 } 565 566 static int vmlfb_release(struct fb_info *info, int user) 567 { 568 /* 569 * Restore registers here. 570 */ 571 572 return 0; 573 } 574 575 static int vml_nearest_clock(int clock) 576 { 577 578 int i; 579 int cur_index; 580 int cur_diff; 581 int diff; 582 583 cur_index = 0; 584 cur_diff = clock - vml_clocks[0]; 585 cur_diff = (cur_diff < 0) ? -cur_diff : cur_diff; 586 for (i = 1; i < vml_num_clocks; ++i) { 587 diff = clock - vml_clocks[i]; 588 diff = (diff < 0) ? -diff : diff; 589 if (diff < cur_diff) { 590 cur_index = i; 591 cur_diff = diff; 592 } 593 } 594 return vml_clocks[cur_index]; 595 } 596 597 static int vmlfb_check_var_locked(struct fb_var_screeninfo *var, 598 struct vml_info *vinfo) 599 { 600 u32 pitch; 601 u64 mem; 602 int nearest_clock; 603 int clock; 604 int clock_diff; 605 struct fb_var_screeninfo v; 606 607 v = *var; 608 clock = PICOS2KHZ(var->pixclock); 609 610 if (subsys && subsys->nearest_clock) { 611 nearest_clock = subsys->nearest_clock(subsys, clock); 612 } else { 613 nearest_clock = vml_nearest_clock(clock); 614 } 615 616 /* 617 * Accept a 20% diff. 618 */ 619 620 clock_diff = nearest_clock - clock; 621 clock_diff = (clock_diff < 0) ? -clock_diff : clock_diff; 622 if (clock_diff > clock / 5) { 623 #if 0 624 printk(KERN_DEBUG MODULE_NAME ": Diff failure. %d %d\n",clock_diff,clock); 625 #endif 626 return -EINVAL; 627 } 628 629 v.pixclock = KHZ2PICOS(nearest_clock); 630 631 if (var->xres > VML_MAX_XRES || var->yres > VML_MAX_YRES) { 632 printk(KERN_DEBUG MODULE_NAME ": Resolution failure.\n"); 633 return -EINVAL; 634 } 635 if (var->xres_virtual > VML_MAX_XRES_VIRTUAL) { 636 printk(KERN_DEBUG MODULE_NAME 637 ": Virtual resolution failure.\n"); 638 return -EINVAL; 639 } 640 switch (v.bits_per_pixel) { 641 case 0 ... 16: 642 v.bits_per_pixel = 16; 643 break; 644 case 17 ... 32: 645 v.bits_per_pixel = 32; 646 break; 647 default: 648 printk(KERN_DEBUG MODULE_NAME ": Invalid bpp: %d.\n", 649 var->bits_per_pixel); 650 return -EINVAL; 651 } 652 653 pitch = ALIGN((var->xres * var->bits_per_pixel) >> 3, 0x40); 654 mem = (u64)pitch * var->yres_virtual; 655 if (mem > vinfo->vram_contig_size) { 656 return -ENOMEM; 657 } 658 659 switch (v.bits_per_pixel) { 660 case 16: 661 if (var->blue.offset != 0 || 662 var->blue.length != 5 || 663 var->green.offset != 5 || 664 var->green.length != 5 || 665 var->red.offset != 10 || 666 var->red.length != 5 || 667 var->transp.offset != 15 || var->transp.length != 1) { 668 vmlfb_set_pref_pixel_format(&v); 669 } 670 break; 671 case 32: 672 if (var->blue.offset != 0 || 673 var->blue.length != 8 || 674 var->green.offset != 8 || 675 var->green.length != 8 || 676 var->red.offset != 16 || 677 var->red.length != 8 || 678 (var->transp.length != 0 && var->transp.length != 8) || 679 (var->transp.length == 8 && var->transp.offset != 24)) { 680 vmlfb_set_pref_pixel_format(&v); 681 } 682 break; 683 default: 684 return -EINVAL; 685 } 686 687 *var = v; 688 689 return 0; 690 } 691 692 static int vmlfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) 693 { 694 struct vml_info *vinfo = container_of(info, struct vml_info, info); 695 int ret; 696 697 mutex_lock(&vml_mutex); 698 ret = vmlfb_check_var_locked(var, vinfo); 699 mutex_unlock(&vml_mutex); 700 701 return ret; 702 } 703 704 static void vml_wait_vblank(struct vml_info *vinfo) 705 { 706 /* Wait for vblank. For now, just wait for a 50Hz cycle (20ms)) */ 707 mdelay(20); 708 } 709 710 static void vmlfb_disable_pipe(struct vml_info *vinfo) 711 { 712 struct vml_par *par = vinfo->par; 713 714 /* Disable the MDVO pad */ 715 VML_WRITE32(par, VML_RCOMPSTAT, 0); 716 while (!(VML_READ32(par, VML_RCOMPSTAT) & VML_MDVO_VDC_I_RCOMP)) ; 717 718 /* Disable display planes */ 719 VML_WRITE32(par, VML_DSPCCNTR, 720 VML_READ32(par, VML_DSPCCNTR) & ~VML_GFX_ENABLE); 721 (void)VML_READ32(par, VML_DSPCCNTR); 722 /* Wait for vblank for the disable to take effect */ 723 vml_wait_vblank(vinfo); 724 725 /* Next, disable display pipes */ 726 VML_WRITE32(par, VML_PIPEACONF, 0); 727 (void)VML_READ32(par, VML_PIPEACONF); 728 729 vinfo->pipe_disabled = 1; 730 } 731 732 #ifdef VERMILION_DEBUG 733 static void vml_dump_regs(struct vml_info *vinfo) 734 { 735 struct vml_par *par = vinfo->par; 736 737 printk(KERN_DEBUG MODULE_NAME ": Modesetting register dump:\n"); 738 printk(KERN_DEBUG MODULE_NAME ": \tHTOTAL_A : 0x%08x\n", 739 (unsigned)VML_READ32(par, VML_HTOTAL_A)); 740 printk(KERN_DEBUG MODULE_NAME ": \tHBLANK_A : 0x%08x\n", 741 (unsigned)VML_READ32(par, VML_HBLANK_A)); 742 printk(KERN_DEBUG MODULE_NAME ": \tHSYNC_A : 0x%08x\n", 743 (unsigned)VML_READ32(par, VML_HSYNC_A)); 744 printk(KERN_DEBUG MODULE_NAME ": \tVTOTAL_A : 0x%08x\n", 745 (unsigned)VML_READ32(par, VML_VTOTAL_A)); 746 printk(KERN_DEBUG MODULE_NAME ": \tVBLANK_A : 0x%08x\n", 747 (unsigned)VML_READ32(par, VML_VBLANK_A)); 748 printk(KERN_DEBUG MODULE_NAME ": \tVSYNC_A : 0x%08x\n", 749 (unsigned)VML_READ32(par, VML_VSYNC_A)); 750 printk(KERN_DEBUG MODULE_NAME ": \tDSPCSTRIDE : 0x%08x\n", 751 (unsigned)VML_READ32(par, VML_DSPCSTRIDE)); 752 printk(KERN_DEBUG MODULE_NAME ": \tDSPCSIZE : 0x%08x\n", 753 (unsigned)VML_READ32(par, VML_DSPCSIZE)); 754 printk(KERN_DEBUG MODULE_NAME ": \tDSPCPOS : 0x%08x\n", 755 (unsigned)VML_READ32(par, VML_DSPCPOS)); 756 printk(KERN_DEBUG MODULE_NAME ": \tDSPARB : 0x%08x\n", 757 (unsigned)VML_READ32(par, VML_DSPARB)); 758 printk(KERN_DEBUG MODULE_NAME ": \tDSPCADDR : 0x%08x\n", 759 (unsigned)VML_READ32(par, VML_DSPCADDR)); 760 printk(KERN_DEBUG MODULE_NAME ": \tBCLRPAT_A : 0x%08x\n", 761 (unsigned)VML_READ32(par, VML_BCLRPAT_A)); 762 printk(KERN_DEBUG MODULE_NAME ": \tCANVSCLR_A : 0x%08x\n", 763 (unsigned)VML_READ32(par, VML_CANVSCLR_A)); 764 printk(KERN_DEBUG MODULE_NAME ": \tPIPEASRC : 0x%08x\n", 765 (unsigned)VML_READ32(par, VML_PIPEASRC)); 766 printk(KERN_DEBUG MODULE_NAME ": \tPIPEACONF : 0x%08x\n", 767 (unsigned)VML_READ32(par, VML_PIPEACONF)); 768 printk(KERN_DEBUG MODULE_NAME ": \tDSPCCNTR : 0x%08x\n", 769 (unsigned)VML_READ32(par, VML_DSPCCNTR)); 770 printk(KERN_DEBUG MODULE_NAME ": \tRCOMPSTAT : 0x%08x\n", 771 (unsigned)VML_READ32(par, VML_RCOMPSTAT)); 772 printk(KERN_DEBUG MODULE_NAME ": End of modesetting register dump.\n"); 773 } 774 #endif 775 776 static int vmlfb_set_par_locked(struct vml_info *vinfo) 777 { 778 struct vml_par *par = vinfo->par; 779 struct fb_info *info = &vinfo->info; 780 struct fb_var_screeninfo *var = &info->var; 781 u32 htotal, hactive, hblank_start, hblank_end, hsync_start, hsync_end; 782 u32 vtotal, vactive, vblank_start, vblank_end, vsync_start, vsync_end; 783 u32 dspcntr; 784 int clock; 785 786 vinfo->bytes_per_pixel = var->bits_per_pixel >> 3; 787 vinfo->stride = ALIGN(var->xres_virtual * vinfo->bytes_per_pixel, 0x40); 788 info->fix.line_length = vinfo->stride; 789 790 if (!subsys) 791 return 0; 792 793 htotal = 794 var->xres + var->right_margin + var->hsync_len + var->left_margin; 795 hactive = var->xres; 796 hblank_start = var->xres; 797 hblank_end = htotal; 798 hsync_start = hactive + var->right_margin; 799 hsync_end = hsync_start + var->hsync_len; 800 801 vtotal = 802 var->yres + var->lower_margin + var->vsync_len + var->upper_margin; 803 vactive = var->yres; 804 vblank_start = var->yres; 805 vblank_end = vtotal; 806 vsync_start = vactive + var->lower_margin; 807 vsync_end = vsync_start + var->vsync_len; 808 809 dspcntr = VML_GFX_ENABLE | VML_GFX_GAMMABYPASS; 810 clock = PICOS2KHZ(var->pixclock); 811 812 if (subsys->nearest_clock) { 813 clock = subsys->nearest_clock(subsys, clock); 814 } else { 815 clock = vml_nearest_clock(clock); 816 } 817 printk(KERN_DEBUG MODULE_NAME 818 ": Set mode Hfreq : %d kHz, Vfreq : %d Hz.\n", clock / htotal, 819 ((clock / htotal) * 1000) / vtotal); 820 821 switch (var->bits_per_pixel) { 822 case 16: 823 dspcntr |= VML_GFX_ARGB1555; 824 break; 825 case 32: 826 if (var->transp.length == 8) 827 dspcntr |= VML_GFX_ARGB8888 | VML_GFX_ALPHAMULT; 828 else 829 dspcntr |= VML_GFX_RGB0888; 830 break; 831 default: 832 return -EINVAL; 833 } 834 835 vmlfb_disable_pipe(vinfo); 836 mb(); 837 838 if (subsys->set_clock) 839 subsys->set_clock(subsys, clock); 840 else 841 return -EINVAL; 842 843 VML_WRITE32(par, VML_HTOTAL_A, ((htotal - 1) << 16) | (hactive - 1)); 844 VML_WRITE32(par, VML_HBLANK_A, 845 ((hblank_end - 1) << 16) | (hblank_start - 1)); 846 VML_WRITE32(par, VML_HSYNC_A, 847 ((hsync_end - 1) << 16) | (hsync_start - 1)); 848 VML_WRITE32(par, VML_VTOTAL_A, ((vtotal - 1) << 16) | (vactive - 1)); 849 VML_WRITE32(par, VML_VBLANK_A, 850 ((vblank_end - 1) << 16) | (vblank_start - 1)); 851 VML_WRITE32(par, VML_VSYNC_A, 852 ((vsync_end - 1) << 16) | (vsync_start - 1)); 853 VML_WRITE32(par, VML_DSPCSTRIDE, vinfo->stride); 854 VML_WRITE32(par, VML_DSPCSIZE, 855 ((var->yres - 1) << 16) | (var->xres - 1)); 856 VML_WRITE32(par, VML_DSPCPOS, 0x00000000); 857 VML_WRITE32(par, VML_DSPARB, VML_FIFO_DEFAULT); 858 VML_WRITE32(par, VML_BCLRPAT_A, 0x00000000); 859 VML_WRITE32(par, VML_CANVSCLR_A, 0x00000000); 860 VML_WRITE32(par, VML_PIPEASRC, 861 ((var->xres - 1) << 16) | (var->yres - 1)); 862 863 wmb(); 864 VML_WRITE32(par, VML_PIPEACONF, VML_PIPE_ENABLE); 865 wmb(); 866 VML_WRITE32(par, VML_DSPCCNTR, dspcntr); 867 wmb(); 868 VML_WRITE32(par, VML_DSPCADDR, (u32) vinfo->vram_start + 869 var->yoffset * vinfo->stride + 870 var->xoffset * vinfo->bytes_per_pixel); 871 872 VML_WRITE32(par, VML_RCOMPSTAT, VML_MDVO_PAD_ENABLE); 873 874 while (!(VML_READ32(par, VML_RCOMPSTAT) & 875 (VML_MDVO_VDC_I_RCOMP | VML_MDVO_PAD_ENABLE))) ; 876 877 vinfo->pipe_disabled = 0; 878 #ifdef VERMILION_DEBUG 879 vml_dump_regs(vinfo); 880 #endif 881 882 return 0; 883 } 884 885 static int vmlfb_set_par(struct fb_info *info) 886 { 887 struct vml_info *vinfo = container_of(info, struct vml_info, info); 888 int ret; 889 890 mutex_lock(&vml_mutex); 891 list_move(&vinfo->head, (subsys) ? &global_has_mode : &global_no_mode); 892 ret = vmlfb_set_par_locked(vinfo); 893 894 mutex_unlock(&vml_mutex); 895 return ret; 896 } 897 898 static int vmlfb_blank_locked(struct vml_info *vinfo) 899 { 900 struct vml_par *par = vinfo->par; 901 u32 cur = VML_READ32(par, VML_PIPEACONF); 902 903 switch (vinfo->cur_blank_mode) { 904 case FB_BLANK_UNBLANK: 905 if (vinfo->pipe_disabled) { 906 vmlfb_set_par_locked(vinfo); 907 } 908 VML_WRITE32(par, VML_PIPEACONF, cur & ~VML_PIPE_FORCE_BORDER); 909 (void)VML_READ32(par, VML_PIPEACONF); 910 break; 911 case FB_BLANK_NORMAL: 912 if (vinfo->pipe_disabled) { 913 vmlfb_set_par_locked(vinfo); 914 } 915 VML_WRITE32(par, VML_PIPEACONF, cur | VML_PIPE_FORCE_BORDER); 916 (void)VML_READ32(par, VML_PIPEACONF); 917 break; 918 case FB_BLANK_VSYNC_SUSPEND: 919 case FB_BLANK_HSYNC_SUSPEND: 920 if (!vinfo->pipe_disabled) { 921 vmlfb_disable_pipe(vinfo); 922 } 923 break; 924 case FB_BLANK_POWERDOWN: 925 if (!vinfo->pipe_disabled) { 926 vmlfb_disable_pipe(vinfo); 927 } 928 break; 929 default: 930 return -EINVAL; 931 } 932 933 return 0; 934 } 935 936 static int vmlfb_blank(int blank_mode, struct fb_info *info) 937 { 938 struct vml_info *vinfo = container_of(info, struct vml_info, info); 939 int ret; 940 941 mutex_lock(&vml_mutex); 942 vinfo->cur_blank_mode = blank_mode; 943 ret = vmlfb_blank_locked(vinfo); 944 mutex_unlock(&vml_mutex); 945 return ret; 946 } 947 948 static int vmlfb_pan_display(struct fb_var_screeninfo *var, 949 struct fb_info *info) 950 { 951 struct vml_info *vinfo = container_of(info, struct vml_info, info); 952 struct vml_par *par = vinfo->par; 953 954 mutex_lock(&vml_mutex); 955 VML_WRITE32(par, VML_DSPCADDR, (u32) vinfo->vram_start + 956 var->yoffset * vinfo->stride + 957 var->xoffset * vinfo->bytes_per_pixel); 958 (void)VML_READ32(par, VML_DSPCADDR); 959 mutex_unlock(&vml_mutex); 960 961 return 0; 962 } 963 964 static int vmlfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, 965 u_int transp, struct fb_info *info) 966 { 967 u32 v; 968 969 if (regno >= 16) 970 return -EINVAL; 971 972 if (info->var.grayscale) { 973 red = green = blue = (red * 77 + green * 151 + blue * 28) >> 8; 974 } 975 976 if (info->fix.visual != FB_VISUAL_TRUECOLOR) 977 return -EINVAL; 978 979 red = VML_TOHW(red, info->var.red.length); 980 blue = VML_TOHW(blue, info->var.blue.length); 981 green = VML_TOHW(green, info->var.green.length); 982 transp = VML_TOHW(transp, info->var.transp.length); 983 984 v = (red << info->var.red.offset) | 985 (green << info->var.green.offset) | 986 (blue << info->var.blue.offset) | 987 (transp << info->var.transp.offset); 988 989 switch (info->var.bits_per_pixel) { 990 case 16: 991 ((u32 *) info->pseudo_palette)[regno] = v; 992 break; 993 case 24: 994 case 32: 995 ((u32 *) info->pseudo_palette)[regno] = v; 996 break; 997 } 998 return 0; 999 } 1000 1001 static int vmlfb_mmap(struct fb_info *info, struct vm_area_struct *vma) 1002 { 1003 struct vml_info *vinfo = container_of(info, struct vml_info, info); 1004 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; 1005 int ret; 1006 unsigned long prot; 1007 1008 ret = vmlfb_vram_offset(vinfo, offset); 1009 if (ret) 1010 return -EINVAL; 1011 1012 prot = pgprot_val(vma->vm_page_prot) & ~_PAGE_CACHE_MASK; 1013 pgprot_val(vma->vm_page_prot) = 1014 prot | cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS); 1015 1016 return vm_iomap_memory(vma, vinfo->vram_start, 1017 vinfo->vram_contig_size); 1018 } 1019 1020 static int vmlfb_sync(struct fb_info *info) 1021 { 1022 return 0; 1023 } 1024 1025 static int vmlfb_cursor(struct fb_info *info, struct fb_cursor *cursor) 1026 { 1027 return -EINVAL; /* just to force soft_cursor() call */ 1028 } 1029 1030 static struct fb_ops vmlfb_ops = { 1031 .owner = THIS_MODULE, 1032 .fb_open = vmlfb_open, 1033 .fb_release = vmlfb_release, 1034 .fb_check_var = vmlfb_check_var, 1035 .fb_set_par = vmlfb_set_par, 1036 .fb_blank = vmlfb_blank, 1037 .fb_pan_display = vmlfb_pan_display, 1038 .fb_fillrect = cfb_fillrect, 1039 .fb_copyarea = cfb_copyarea, 1040 .fb_imageblit = cfb_imageblit, 1041 .fb_cursor = vmlfb_cursor, 1042 .fb_sync = vmlfb_sync, 1043 .fb_mmap = vmlfb_mmap, 1044 .fb_setcolreg = vmlfb_setcolreg 1045 }; 1046 1047 static const struct pci_device_id vml_ids[] = { 1048 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, VML_DEVICE_VDC)}, 1049 {0} 1050 }; 1051 1052 static struct pci_driver vmlfb_pci_driver = { 1053 .name = "vmlfb", 1054 .id_table = vml_ids, 1055 .probe = vml_pci_probe, 1056 .remove = vml_pci_remove, 1057 }; 1058 1059 static void __exit vmlfb_cleanup(void) 1060 { 1061 pci_unregister_driver(&vmlfb_pci_driver); 1062 } 1063 1064 static int __init vmlfb_init(void) 1065 { 1066 1067 #ifndef MODULE 1068 char *option = NULL; 1069 1070 if (fb_get_options(MODULE_NAME, &option)) 1071 return -ENODEV; 1072 #endif 1073 1074 printk(KERN_DEBUG MODULE_NAME ": initializing\n"); 1075 mutex_init(&vml_mutex); 1076 INIT_LIST_HEAD(&global_no_mode); 1077 INIT_LIST_HEAD(&global_has_mode); 1078 1079 return pci_register_driver(&vmlfb_pci_driver); 1080 } 1081 1082 int vmlfb_register_subsys(struct vml_sys *sys) 1083 { 1084 struct vml_info *entry; 1085 struct list_head *list; 1086 u32 save_activate; 1087 1088 mutex_lock(&vml_mutex); 1089 if (subsys != NULL) { 1090 subsys->restore(subsys); 1091 } 1092 subsys = sys; 1093 subsys->save(subsys); 1094 1095 /* 1096 * We need to restart list traversal for each item, since we 1097 * release the list mutex in the loop. 1098 */ 1099 1100 list = global_no_mode.next; 1101 while (list != &global_no_mode) { 1102 list_del_init(list); 1103 entry = list_entry(list, struct vml_info, head); 1104 1105 /* 1106 * First, try the current mode which might not be 1107 * completely validated with respect to the pixel clock. 1108 */ 1109 1110 if (!vmlfb_check_var_locked(&entry->info.var, entry)) { 1111 vmlfb_set_par_locked(entry); 1112 list_add_tail(list, &global_has_mode); 1113 } else { 1114 1115 /* 1116 * Didn't work. Try to find another mode, 1117 * that matches this subsys. 1118 */ 1119 1120 mutex_unlock(&vml_mutex); 1121 save_activate = entry->info.var.activate; 1122 entry->info.var.bits_per_pixel = 16; 1123 vmlfb_set_pref_pixel_format(&entry->info.var); 1124 if (fb_find_mode(&entry->info.var, 1125 &entry->info, 1126 vml_default_mode, NULL, 0, NULL, 16)) { 1127 entry->info.var.activate |= 1128 FB_ACTIVATE_FORCE | FB_ACTIVATE_NOW; 1129 fb_set_var(&entry->info, &entry->info.var); 1130 } else { 1131 printk(KERN_ERR MODULE_NAME 1132 ": Sorry. no mode found for this subsys.\n"); 1133 } 1134 entry->info.var.activate = save_activate; 1135 mutex_lock(&vml_mutex); 1136 } 1137 vmlfb_blank_locked(entry); 1138 list = global_no_mode.next; 1139 } 1140 mutex_unlock(&vml_mutex); 1141 1142 printk(KERN_DEBUG MODULE_NAME ": Registered %s subsystem.\n", 1143 subsys->name ? subsys->name : "unknown"); 1144 return 0; 1145 } 1146 1147 EXPORT_SYMBOL_GPL(vmlfb_register_subsys); 1148 1149 void vmlfb_unregister_subsys(struct vml_sys *sys) 1150 { 1151 struct vml_info *entry, *next; 1152 1153 mutex_lock(&vml_mutex); 1154 if (subsys != sys) { 1155 mutex_unlock(&vml_mutex); 1156 return; 1157 } 1158 subsys->restore(subsys); 1159 subsys = NULL; 1160 list_for_each_entry_safe(entry, next, &global_has_mode, head) { 1161 printk(KERN_DEBUG MODULE_NAME ": subsys disable pipe\n"); 1162 vmlfb_disable_pipe(entry); 1163 list_move_tail(&entry->head, &global_no_mode); 1164 } 1165 mutex_unlock(&vml_mutex); 1166 } 1167 1168 EXPORT_SYMBOL_GPL(vmlfb_unregister_subsys); 1169 1170 module_init(vmlfb_init); 1171 module_exit(vmlfb_cleanup); 1172 1173 MODULE_AUTHOR("Tungsten Graphics"); 1174 MODULE_DESCRIPTION("Initialization of the Vermilion display devices"); 1175 MODULE_VERSION("1.0.0"); 1176 MODULE_LICENSE("GPL"); 1177