1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright © 2006-2011 Intel Corporation 4 * 5 * Authors: 6 * Eric Anholt <eric@anholt.net> 7 * Patrik Jakobsson <patrik.r.jakobsson@gmail.com> 8 */ 9 10 #include <linux/delay.h> 11 #include <linux/highmem.h> 12 13 #include <drm/drm_crtc.h> 14 #include <drm/drm_fourcc.h> 15 #include <drm/drm_vblank.h> 16 17 #include "framebuffer.h" 18 #include "gem.h" 19 #include "gma_display.h" 20 #include "psb_irq.h" 21 #include "psb_intel_drv.h" 22 #include "psb_intel_reg.h" 23 24 /* 25 * Returns whether any output on the specified pipe is of the specified type 26 */ 27 bool gma_pipe_has_type(struct drm_crtc *crtc, int type) 28 { 29 struct drm_device *dev = crtc->dev; 30 struct drm_connector_list_iter conn_iter; 31 struct drm_connector *connector; 32 33 drm_connector_list_iter_begin(dev, &conn_iter); 34 drm_for_each_connector_iter(connector, &conn_iter) { 35 if (connector->encoder && connector->encoder->crtc == crtc) { 36 struct gma_encoder *gma_encoder = 37 gma_attached_encoder(connector); 38 if (gma_encoder->type == type) { 39 drm_connector_list_iter_end(&conn_iter); 40 return true; 41 } 42 } 43 } 44 drm_connector_list_iter_end(&conn_iter); 45 46 return false; 47 } 48 49 void gma_wait_for_vblank(struct drm_device *dev) 50 { 51 /* Wait for 20ms, i.e. one cycle at 50hz. */ 52 mdelay(20); 53 } 54 55 int gma_pipe_set_base(struct drm_crtc *crtc, int x, int y, 56 struct drm_framebuffer *old_fb) 57 { 58 struct drm_device *dev = crtc->dev; 59 struct drm_psb_private *dev_priv = to_drm_psb_private(dev); 60 struct gma_crtc *gma_crtc = to_gma_crtc(crtc); 61 struct drm_framebuffer *fb = crtc->primary->fb; 62 struct psb_gem_object *pobj; 63 int pipe = gma_crtc->pipe; 64 const struct psb_offset *map = &dev_priv->regmap[pipe]; 65 unsigned long start, offset; 66 u32 dspcntr; 67 int ret = 0; 68 69 if (!gma_power_begin(dev, true)) 70 return 0; 71 72 /* no fb bound */ 73 if (!fb) { 74 dev_err(dev->dev, "No FB bound\n"); 75 goto gma_pipe_cleaner; 76 } 77 78 pobj = to_psb_gem_object(fb->obj[0]); 79 80 /* We are displaying this buffer, make sure it is actually loaded 81 into the GTT */ 82 ret = psb_gem_pin(pobj); 83 if (ret < 0) 84 goto gma_pipe_set_base_exit; 85 start = pobj->offset; 86 offset = y * fb->pitches[0] + x * fb->format->cpp[0]; 87 88 REG_WRITE(map->stride, fb->pitches[0]); 89 90 dspcntr = REG_READ(map->cntr); 91 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; 92 93 switch (fb->format->cpp[0] * 8) { 94 case 8: 95 dspcntr |= DISPPLANE_8BPP; 96 break; 97 case 16: 98 if (fb->format->depth == 15) 99 dspcntr |= DISPPLANE_15_16BPP; 100 else 101 dspcntr |= DISPPLANE_16BPP; 102 break; 103 case 24: 104 case 32: 105 dspcntr |= DISPPLANE_32BPP_NO_ALPHA; 106 break; 107 default: 108 dev_err(dev->dev, "Unknown color depth\n"); 109 ret = -EINVAL; 110 goto gma_pipe_set_base_exit; 111 } 112 REG_WRITE(map->cntr, dspcntr); 113 114 dev_dbg(dev->dev, 115 "Writing base %08lX %08lX %d %d\n", start, offset, x, y); 116 117 /* FIXME: Investigate whether this really is the base for psb and why 118 the linear offset is named base for the other chips. map->surf 119 should be the base and map->linoff the offset for all chips */ 120 if (IS_PSB(dev)) { 121 REG_WRITE(map->base, offset + start); 122 REG_READ(map->base); 123 } else { 124 REG_WRITE(map->base, offset); 125 REG_READ(map->base); 126 REG_WRITE(map->surf, start); 127 REG_READ(map->surf); 128 } 129 130 gma_pipe_cleaner: 131 /* If there was a previous display we can now unpin it */ 132 if (old_fb) 133 psb_gem_unpin(to_psb_gem_object(old_fb->obj[0])); 134 135 gma_pipe_set_base_exit: 136 gma_power_end(dev); 137 return ret; 138 } 139 140 /* Loads the palette/gamma unit for the CRTC with the prepared values */ 141 void gma_crtc_load_lut(struct drm_crtc *crtc) 142 { 143 struct drm_device *dev = crtc->dev; 144 struct drm_psb_private *dev_priv = to_drm_psb_private(dev); 145 struct gma_crtc *gma_crtc = to_gma_crtc(crtc); 146 const struct psb_offset *map = &dev_priv->regmap[gma_crtc->pipe]; 147 int palreg = map->palette; 148 u16 *r, *g, *b; 149 int i; 150 151 /* The clocks have to be on to load the palette. */ 152 if (!crtc->enabled) 153 return; 154 155 r = crtc->gamma_store; 156 g = r + crtc->gamma_size; 157 b = g + crtc->gamma_size; 158 159 if (gma_power_begin(dev, false)) { 160 for (i = 0; i < 256; i++) { 161 REG_WRITE(palreg + 4 * i, 162 (((*r++ >> 8) + gma_crtc->lut_adj[i]) << 16) | 163 (((*g++ >> 8) + gma_crtc->lut_adj[i]) << 8) | 164 ((*b++ >> 8) + gma_crtc->lut_adj[i])); 165 } 166 gma_power_end(dev); 167 } else { 168 for (i = 0; i < 256; i++) { 169 /* FIXME: Why pipe[0] and not pipe[..._crtc->pipe]? */ 170 dev_priv->regs.pipe[0].palette[i] = 171 (((*r++ >> 8) + gma_crtc->lut_adj[i]) << 16) | 172 (((*g++ >> 8) + gma_crtc->lut_adj[i]) << 8) | 173 ((*b++ >> 8) + gma_crtc->lut_adj[i]); 174 } 175 176 } 177 } 178 179 static int gma_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, 180 u16 *blue, u32 size, 181 struct drm_modeset_acquire_ctx *ctx) 182 { 183 gma_crtc_load_lut(crtc); 184 185 return 0; 186 } 187 188 /* 189 * Sets the power management mode of the pipe and plane. 190 * 191 * This code should probably grow support for turning the cursor off and back 192 * on appropriately at the same time as we're turning the pipe off/on. 193 */ 194 void gma_crtc_dpms(struct drm_crtc *crtc, int mode) 195 { 196 struct drm_device *dev = crtc->dev; 197 struct drm_psb_private *dev_priv = to_drm_psb_private(dev); 198 struct gma_crtc *gma_crtc = to_gma_crtc(crtc); 199 int pipe = gma_crtc->pipe; 200 const struct psb_offset *map = &dev_priv->regmap[pipe]; 201 u32 temp; 202 203 /* XXX: When our outputs are all unaware of DPMS modes other than off 204 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. 205 */ 206 207 if (IS_CDV(dev)) 208 dev_priv->ops->disable_sr(dev); 209 210 switch (mode) { 211 case DRM_MODE_DPMS_ON: 212 case DRM_MODE_DPMS_STANDBY: 213 case DRM_MODE_DPMS_SUSPEND: 214 if (gma_crtc->active) 215 break; 216 217 gma_crtc->active = true; 218 219 /* Enable the DPLL */ 220 temp = REG_READ(map->dpll); 221 if ((temp & DPLL_VCO_ENABLE) == 0) { 222 REG_WRITE(map->dpll, temp); 223 REG_READ(map->dpll); 224 /* Wait for the clocks to stabilize. */ 225 udelay(150); 226 REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE); 227 REG_READ(map->dpll); 228 /* Wait for the clocks to stabilize. */ 229 udelay(150); 230 REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE); 231 REG_READ(map->dpll); 232 /* Wait for the clocks to stabilize. */ 233 udelay(150); 234 } 235 236 /* Enable the plane */ 237 temp = REG_READ(map->cntr); 238 if ((temp & DISPLAY_PLANE_ENABLE) == 0) { 239 REG_WRITE(map->cntr, 240 temp | DISPLAY_PLANE_ENABLE); 241 /* Flush the plane changes */ 242 REG_WRITE(map->base, REG_READ(map->base)); 243 } 244 245 udelay(150); 246 247 /* Enable the pipe */ 248 temp = REG_READ(map->conf); 249 if ((temp & PIPEACONF_ENABLE) == 0) 250 REG_WRITE(map->conf, temp | PIPEACONF_ENABLE); 251 252 temp = REG_READ(map->status); 253 temp &= ~(0xFFFF); 254 temp |= PIPE_FIFO_UNDERRUN; 255 REG_WRITE(map->status, temp); 256 REG_READ(map->status); 257 258 gma_crtc_load_lut(crtc); 259 260 /* Give the overlay scaler a chance to enable 261 * if it's on this pipe */ 262 /* psb_intel_crtc_dpms_video(crtc, true); TODO */ 263 264 drm_crtc_vblank_on(crtc); 265 break; 266 case DRM_MODE_DPMS_OFF: 267 if (!gma_crtc->active) 268 break; 269 270 gma_crtc->active = false; 271 272 /* Give the overlay scaler a chance to disable 273 * if it's on this pipe */ 274 /* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */ 275 276 /* Disable the VGA plane that we never use */ 277 REG_WRITE(VGACNTRL, VGA_DISP_DISABLE); 278 279 /* Turn off vblank interrupts */ 280 drm_crtc_vblank_off(crtc); 281 282 /* Wait for vblank for the disable to take effect */ 283 gma_wait_for_vblank(dev); 284 285 /* Disable plane */ 286 temp = REG_READ(map->cntr); 287 if ((temp & DISPLAY_PLANE_ENABLE) != 0) { 288 REG_WRITE(map->cntr, 289 temp & ~DISPLAY_PLANE_ENABLE); 290 /* Flush the plane changes */ 291 REG_WRITE(map->base, REG_READ(map->base)); 292 REG_READ(map->base); 293 } 294 295 /* Disable pipe */ 296 temp = REG_READ(map->conf); 297 if ((temp & PIPEACONF_ENABLE) != 0) { 298 REG_WRITE(map->conf, temp & ~PIPEACONF_ENABLE); 299 REG_READ(map->conf); 300 } 301 302 /* Wait for vblank for the disable to take effect. */ 303 gma_wait_for_vblank(dev); 304 305 udelay(150); 306 307 /* Disable DPLL */ 308 temp = REG_READ(map->dpll); 309 if ((temp & DPLL_VCO_ENABLE) != 0) { 310 REG_WRITE(map->dpll, temp & ~DPLL_VCO_ENABLE); 311 REG_READ(map->dpll); 312 } 313 314 /* Wait for the clocks to turn off. */ 315 udelay(150); 316 break; 317 } 318 319 if (IS_CDV(dev)) 320 dev_priv->ops->update_wm(dev, crtc); 321 322 /* Set FIFO watermarks */ 323 REG_WRITE(DSPARB, 0x3F3E); 324 } 325 326 static int gma_crtc_cursor_set(struct drm_crtc *crtc, 327 struct drm_file *file_priv, uint32_t handle, 328 uint32_t width, uint32_t height) 329 { 330 struct drm_device *dev = crtc->dev; 331 struct drm_psb_private *dev_priv = to_drm_psb_private(dev); 332 struct gma_crtc *gma_crtc = to_gma_crtc(crtc); 333 int pipe = gma_crtc->pipe; 334 uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR; 335 uint32_t base = (pipe == 0) ? CURABASE : CURBBASE; 336 uint32_t temp; 337 size_t addr = 0; 338 struct psb_gem_object *pobj; 339 struct psb_gem_object *cursor_pobj = gma_crtc->cursor_pobj; 340 struct drm_gem_object *obj; 341 void *tmp_dst; 342 int ret = 0, i, cursor_pages; 343 344 /* If we didn't get a handle then turn the cursor off */ 345 if (!handle) { 346 temp = CURSOR_MODE_DISABLE; 347 if (gma_power_begin(dev, false)) { 348 REG_WRITE(control, temp); 349 REG_WRITE(base, 0); 350 gma_power_end(dev); 351 } 352 353 /* Unpin the old GEM object */ 354 if (gma_crtc->cursor_obj) { 355 pobj = to_psb_gem_object(gma_crtc->cursor_obj); 356 psb_gem_unpin(pobj); 357 drm_gem_object_put(gma_crtc->cursor_obj); 358 gma_crtc->cursor_obj = NULL; 359 } 360 return 0; 361 } 362 363 /* Currently we only support 64x64 cursors */ 364 if (width != 64 || height != 64) { 365 dev_dbg(dev->dev, "We currently only support 64x64 cursors\n"); 366 return -EINVAL; 367 } 368 369 obj = drm_gem_object_lookup(file_priv, handle); 370 if (!obj) { 371 ret = -ENOENT; 372 goto unlock; 373 } 374 375 if (obj->size < width * height * 4) { 376 dev_dbg(dev->dev, "Buffer is too small\n"); 377 ret = -ENOMEM; 378 goto unref_cursor; 379 } 380 381 pobj = to_psb_gem_object(obj); 382 383 /* Pin the memory into the GTT */ 384 ret = psb_gem_pin(pobj); 385 if (ret) { 386 dev_err(dev->dev, "Can not pin down handle 0x%x\n", handle); 387 goto unref_cursor; 388 } 389 390 if (dev_priv->ops->cursor_needs_phys) { 391 if (!cursor_pobj) { 392 dev_err(dev->dev, "No hardware cursor mem available"); 393 ret = -ENOMEM; 394 goto unref_cursor; 395 } 396 397 cursor_pages = obj->size / PAGE_SIZE; 398 if (cursor_pages > 4) 399 cursor_pages = 4; /* Prevent overflow */ 400 401 /* Copy the cursor to cursor mem */ 402 tmp_dst = dev_priv->vram_addr + cursor_pobj->offset; 403 for (i = 0; i < cursor_pages; i++) { 404 memcpy_from_page(tmp_dst, pobj->pages[i], 0, PAGE_SIZE); 405 tmp_dst += PAGE_SIZE; 406 } 407 408 addr = gma_crtc->cursor_addr; 409 } else { 410 addr = pobj->offset; 411 gma_crtc->cursor_addr = addr; 412 } 413 414 temp = 0; 415 /* set the pipe for the cursor */ 416 temp |= (pipe << 28); 417 temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE; 418 419 if (gma_power_begin(dev, false)) { 420 REG_WRITE(control, temp); 421 REG_WRITE(base, addr); 422 gma_power_end(dev); 423 } 424 425 /* unpin the old bo */ 426 if (gma_crtc->cursor_obj) { 427 pobj = to_psb_gem_object(gma_crtc->cursor_obj); 428 psb_gem_unpin(pobj); 429 drm_gem_object_put(gma_crtc->cursor_obj); 430 } 431 432 gma_crtc->cursor_obj = obj; 433 unlock: 434 return ret; 435 436 unref_cursor: 437 drm_gem_object_put(obj); 438 return ret; 439 } 440 441 static int gma_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) 442 { 443 struct drm_device *dev = crtc->dev; 444 struct gma_crtc *gma_crtc = to_gma_crtc(crtc); 445 int pipe = gma_crtc->pipe; 446 uint32_t temp = 0; 447 uint32_t addr; 448 449 if (x < 0) { 450 temp |= (CURSOR_POS_SIGN << CURSOR_X_SHIFT); 451 x = -x; 452 } 453 if (y < 0) { 454 temp |= (CURSOR_POS_SIGN << CURSOR_Y_SHIFT); 455 y = -y; 456 } 457 458 temp |= ((x & CURSOR_POS_MASK) << CURSOR_X_SHIFT); 459 temp |= ((y & CURSOR_POS_MASK) << CURSOR_Y_SHIFT); 460 461 addr = gma_crtc->cursor_addr; 462 463 if (gma_power_begin(dev, false)) { 464 REG_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp); 465 REG_WRITE((pipe == 0) ? CURABASE : CURBBASE, addr); 466 gma_power_end(dev); 467 } 468 return 0; 469 } 470 471 void gma_crtc_prepare(struct drm_crtc *crtc) 472 { 473 const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; 474 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF); 475 } 476 477 void gma_crtc_commit(struct drm_crtc *crtc) 478 { 479 const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; 480 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON); 481 } 482 483 void gma_crtc_disable(struct drm_crtc *crtc) 484 { 485 struct psb_gem_object *pobj; 486 const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; 487 488 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF); 489 490 if (crtc->primary->fb) { 491 pobj = to_psb_gem_object(crtc->primary->fb->obj[0]); 492 psb_gem_unpin(pobj); 493 } 494 } 495 496 void gma_crtc_destroy(struct drm_crtc *crtc) 497 { 498 struct gma_crtc *gma_crtc = to_gma_crtc(crtc); 499 500 if (gma_crtc->cursor_pobj) 501 drm_gem_object_put(&gma_crtc->cursor_pobj->base); 502 503 kfree(gma_crtc->crtc_state); 504 drm_crtc_cleanup(crtc); 505 kfree(gma_crtc); 506 } 507 508 int gma_crtc_page_flip(struct drm_crtc *crtc, 509 struct drm_framebuffer *fb, 510 struct drm_pending_vblank_event *event, 511 uint32_t page_flip_flags, 512 struct drm_modeset_acquire_ctx *ctx) 513 { 514 struct gma_crtc *gma_crtc = to_gma_crtc(crtc); 515 struct drm_framebuffer *current_fb = crtc->primary->fb; 516 struct drm_framebuffer *old_fb = crtc->primary->old_fb; 517 const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; 518 struct drm_device *dev = crtc->dev; 519 unsigned long flags; 520 int ret; 521 522 if (!crtc_funcs->mode_set_base) 523 return -EINVAL; 524 525 /* Using mode_set_base requires the new fb to be set already. */ 526 crtc->primary->fb = fb; 527 528 if (event) { 529 spin_lock_irqsave(&dev->event_lock, flags); 530 531 WARN_ON(drm_crtc_vblank_get(crtc) != 0); 532 533 gma_crtc->page_flip_event = event; 534 535 /* Call this locked if we want an event at vblank interrupt. */ 536 ret = crtc_funcs->mode_set_base(crtc, crtc->x, crtc->y, old_fb); 537 if (ret) { 538 gma_crtc->page_flip_event = NULL; 539 drm_crtc_vblank_put(crtc); 540 } 541 542 spin_unlock_irqrestore(&dev->event_lock, flags); 543 } else { 544 ret = crtc_funcs->mode_set_base(crtc, crtc->x, crtc->y, old_fb); 545 } 546 547 /* Restore previous fb in case of failure. */ 548 if (ret) 549 crtc->primary->fb = current_fb; 550 551 return ret; 552 } 553 554 int gma_crtc_set_config(struct drm_mode_set *set, 555 struct drm_modeset_acquire_ctx *ctx) 556 { 557 struct drm_device *dev = set->crtc->dev; 558 struct drm_psb_private *dev_priv = to_drm_psb_private(dev); 559 int ret; 560 561 if (!dev_priv->rpm_enabled) 562 return drm_crtc_helper_set_config(set, ctx); 563 564 pm_runtime_forbid(dev->dev); 565 ret = drm_crtc_helper_set_config(set, ctx); 566 pm_runtime_allow(dev->dev); 567 568 return ret; 569 } 570 571 const struct drm_crtc_funcs gma_crtc_funcs = { 572 .cursor_set = gma_crtc_cursor_set, 573 .cursor_move = gma_crtc_cursor_move, 574 .gamma_set = gma_crtc_gamma_set, 575 .set_config = gma_crtc_set_config, 576 .destroy = gma_crtc_destroy, 577 .page_flip = gma_crtc_page_flip, 578 .enable_vblank = gma_crtc_enable_vblank, 579 .disable_vblank = gma_crtc_disable_vblank, 580 .get_vblank_counter = gma_crtc_get_vblank_counter, 581 }; 582 583 /* 584 * Save HW states of given crtc 585 */ 586 void gma_crtc_save(struct drm_crtc *crtc) 587 { 588 struct drm_device *dev = crtc->dev; 589 struct drm_psb_private *dev_priv = to_drm_psb_private(dev); 590 struct gma_crtc *gma_crtc = to_gma_crtc(crtc); 591 struct psb_intel_crtc_state *crtc_state = gma_crtc->crtc_state; 592 const struct psb_offset *map = &dev_priv->regmap[gma_crtc->pipe]; 593 uint32_t palette_reg; 594 int i; 595 596 if (!crtc_state) { 597 dev_err(dev->dev, "No CRTC state found\n"); 598 return; 599 } 600 601 crtc_state->saveDSPCNTR = REG_READ(map->cntr); 602 crtc_state->savePIPECONF = REG_READ(map->conf); 603 crtc_state->savePIPESRC = REG_READ(map->src); 604 crtc_state->saveFP0 = REG_READ(map->fp0); 605 crtc_state->saveFP1 = REG_READ(map->fp1); 606 crtc_state->saveDPLL = REG_READ(map->dpll); 607 crtc_state->saveHTOTAL = REG_READ(map->htotal); 608 crtc_state->saveHBLANK = REG_READ(map->hblank); 609 crtc_state->saveHSYNC = REG_READ(map->hsync); 610 crtc_state->saveVTOTAL = REG_READ(map->vtotal); 611 crtc_state->saveVBLANK = REG_READ(map->vblank); 612 crtc_state->saveVSYNC = REG_READ(map->vsync); 613 crtc_state->saveDSPSTRIDE = REG_READ(map->stride); 614 615 /* NOTE: DSPSIZE DSPPOS only for psb */ 616 crtc_state->saveDSPSIZE = REG_READ(map->size); 617 crtc_state->saveDSPPOS = REG_READ(map->pos); 618 619 crtc_state->saveDSPBASE = REG_READ(map->base); 620 621 palette_reg = map->palette; 622 for (i = 0; i < 256; ++i) 623 crtc_state->savePalette[i] = REG_READ(palette_reg + (i << 2)); 624 } 625 626 /* 627 * Restore HW states of given crtc 628 */ 629 void gma_crtc_restore(struct drm_crtc *crtc) 630 { 631 struct drm_device *dev = crtc->dev; 632 struct drm_psb_private *dev_priv = to_drm_psb_private(dev); 633 struct gma_crtc *gma_crtc = to_gma_crtc(crtc); 634 struct psb_intel_crtc_state *crtc_state = gma_crtc->crtc_state; 635 const struct psb_offset *map = &dev_priv->regmap[gma_crtc->pipe]; 636 uint32_t palette_reg; 637 int i; 638 639 if (!crtc_state) { 640 dev_err(dev->dev, "No crtc state\n"); 641 return; 642 } 643 644 if (crtc_state->saveDPLL & DPLL_VCO_ENABLE) { 645 REG_WRITE(map->dpll, 646 crtc_state->saveDPLL & ~DPLL_VCO_ENABLE); 647 REG_READ(map->dpll); 648 udelay(150); 649 } 650 651 REG_WRITE(map->fp0, crtc_state->saveFP0); 652 REG_READ(map->fp0); 653 654 REG_WRITE(map->fp1, crtc_state->saveFP1); 655 REG_READ(map->fp1); 656 657 REG_WRITE(map->dpll, crtc_state->saveDPLL); 658 REG_READ(map->dpll); 659 udelay(150); 660 661 REG_WRITE(map->htotal, crtc_state->saveHTOTAL); 662 REG_WRITE(map->hblank, crtc_state->saveHBLANK); 663 REG_WRITE(map->hsync, crtc_state->saveHSYNC); 664 REG_WRITE(map->vtotal, crtc_state->saveVTOTAL); 665 REG_WRITE(map->vblank, crtc_state->saveVBLANK); 666 REG_WRITE(map->vsync, crtc_state->saveVSYNC); 667 REG_WRITE(map->stride, crtc_state->saveDSPSTRIDE); 668 669 REG_WRITE(map->size, crtc_state->saveDSPSIZE); 670 REG_WRITE(map->pos, crtc_state->saveDSPPOS); 671 672 REG_WRITE(map->src, crtc_state->savePIPESRC); 673 REG_WRITE(map->base, crtc_state->saveDSPBASE); 674 REG_WRITE(map->conf, crtc_state->savePIPECONF); 675 676 gma_wait_for_vblank(dev); 677 678 REG_WRITE(map->cntr, crtc_state->saveDSPCNTR); 679 REG_WRITE(map->base, crtc_state->saveDSPBASE); 680 681 gma_wait_for_vblank(dev); 682 683 palette_reg = map->palette; 684 for (i = 0; i < 256; ++i) 685 REG_WRITE(palette_reg + (i << 2), crtc_state->savePalette[i]); 686 } 687 688 void gma_encoder_prepare(struct drm_encoder *encoder) 689 { 690 const struct drm_encoder_helper_funcs *encoder_funcs = 691 encoder->helper_private; 692 /* lvds has its own version of prepare see psb_intel_lvds_prepare */ 693 encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF); 694 } 695 696 void gma_encoder_commit(struct drm_encoder *encoder) 697 { 698 const struct drm_encoder_helper_funcs *encoder_funcs = 699 encoder->helper_private; 700 /* lvds has its own version of commit see psb_intel_lvds_commit */ 701 encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON); 702 } 703 704 void gma_encoder_destroy(struct drm_encoder *encoder) 705 { 706 struct gma_encoder *intel_encoder = to_gma_encoder(encoder); 707 708 drm_encoder_cleanup(encoder); 709 kfree(intel_encoder); 710 } 711 712 /* Currently there is only a 1:1 mapping of encoders and connectors */ 713 struct drm_encoder *gma_best_encoder(struct drm_connector *connector) 714 { 715 struct gma_encoder *gma_encoder = gma_attached_encoder(connector); 716 717 return &gma_encoder->base; 718 } 719 720 void gma_connector_attach_encoder(struct gma_connector *connector, 721 struct gma_encoder *encoder) 722 { 723 connector->encoder = encoder; 724 drm_connector_attach_encoder(&connector->base, 725 &encoder->base); 726 } 727 728 #define GMA_PLL_INVALID(s) { /* DRM_ERROR(s); */ return false; } 729 730 bool gma_pll_is_valid(struct drm_crtc *crtc, 731 const struct gma_limit_t *limit, 732 struct gma_clock_t *clock) 733 { 734 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1) 735 GMA_PLL_INVALID("p1 out of range"); 736 if (clock->p < limit->p.min || limit->p.max < clock->p) 737 GMA_PLL_INVALID("p out of range"); 738 if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2) 739 GMA_PLL_INVALID("m2 out of range"); 740 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1) 741 GMA_PLL_INVALID("m1 out of range"); 742 /* On CDV m1 is always 0 */ 743 if (clock->m1 <= clock->m2 && clock->m1 != 0) 744 GMA_PLL_INVALID("m1 <= m2 && m1 != 0"); 745 if (clock->m < limit->m.min || limit->m.max < clock->m) 746 GMA_PLL_INVALID("m out of range"); 747 if (clock->n < limit->n.min || limit->n.max < clock->n) 748 GMA_PLL_INVALID("n out of range"); 749 if (clock->vco < limit->vco.min || limit->vco.max < clock->vco) 750 GMA_PLL_INVALID("vco out of range"); 751 /* XXX: We may need to be checking "Dot clock" 752 * depending on the multiplier, connector, etc., 753 * rather than just a single range. 754 */ 755 if (clock->dot < limit->dot.min || limit->dot.max < clock->dot) 756 GMA_PLL_INVALID("dot out of range"); 757 758 return true; 759 } 760 761 bool gma_find_best_pll(const struct gma_limit_t *limit, 762 struct drm_crtc *crtc, int target, int refclk, 763 struct gma_clock_t *best_clock) 764 { 765 struct drm_device *dev = crtc->dev; 766 const struct gma_clock_funcs *clock_funcs = 767 to_gma_crtc(crtc)->clock_funcs; 768 struct gma_clock_t clock; 769 int err = target; 770 771 if (gma_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && 772 (REG_READ(LVDS) & LVDS_PORT_EN) != 0) { 773 /* 774 * For LVDS, if the panel is on, just rely on its current 775 * settings for dual-channel. We haven't figured out how to 776 * reliably set up different single/dual channel state, if we 777 * even can. 778 */ 779 if ((REG_READ(LVDS) & LVDS_CLKB_POWER_MASK) == 780 LVDS_CLKB_POWER_UP) 781 clock.p2 = limit->p2.p2_fast; 782 else 783 clock.p2 = limit->p2.p2_slow; 784 } else { 785 if (target < limit->p2.dot_limit) 786 clock.p2 = limit->p2.p2_slow; 787 else 788 clock.p2 = limit->p2.p2_fast; 789 } 790 791 memset(best_clock, 0, sizeof(*best_clock)); 792 793 /* m1 is always 0 on CDV so the outmost loop will run just once */ 794 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) { 795 for (clock.m2 = limit->m2.min; 796 (clock.m2 < clock.m1 || clock.m1 == 0) && 797 clock.m2 <= limit->m2.max; clock.m2++) { 798 for (clock.n = limit->n.min; 799 clock.n <= limit->n.max; clock.n++) { 800 for (clock.p1 = limit->p1.min; 801 clock.p1 <= limit->p1.max; 802 clock.p1++) { 803 int this_err; 804 805 clock_funcs->clock(refclk, &clock); 806 807 if (!clock_funcs->pll_is_valid(crtc, 808 limit, &clock)) 809 continue; 810 811 this_err = abs(clock.dot - target); 812 if (this_err < err) { 813 *best_clock = clock; 814 err = this_err; 815 } 816 } 817 } 818 } 819 } 820 821 return err != target; 822 } 823