1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright © 2006-2011 Intel Corporation 4 * 5 * Authors: 6 * Eric Anholt <eric@anholt.net> 7 * Patrik Jakobsson <patrik.r.jakobsson@gmail.com> 8 */ 9 10 #include <linux/delay.h> 11 #include <linux/highmem.h> 12 13 #include <drm/drm_crtc.h> 14 #include <drm/drm_fourcc.h> 15 #include <drm/drm_framebuffer.h> 16 #include <drm/drm_vblank.h> 17 18 #include "framebuffer.h" 19 #include "gem.h" 20 #include "gma_display.h" 21 #include "psb_irq.h" 22 #include "psb_intel_drv.h" 23 #include "psb_intel_reg.h" 24 25 /* 26 * Returns whether any output on the specified pipe is of the specified type 27 */ 28 bool gma_pipe_has_type(struct drm_crtc *crtc, int type) 29 { 30 struct drm_device *dev = crtc->dev; 31 struct drm_connector_list_iter conn_iter; 32 struct drm_connector *connector; 33 34 drm_connector_list_iter_begin(dev, &conn_iter); 35 drm_for_each_connector_iter(connector, &conn_iter) { 36 if (connector->encoder && connector->encoder->crtc == crtc) { 37 struct gma_encoder *gma_encoder = 38 gma_attached_encoder(connector); 39 if (gma_encoder->type == type) { 40 drm_connector_list_iter_end(&conn_iter); 41 return true; 42 } 43 } 44 } 45 drm_connector_list_iter_end(&conn_iter); 46 47 return false; 48 } 49 50 void gma_wait_for_vblank(struct drm_device *dev) 51 { 52 /* Wait for 20ms, i.e. one cycle at 50hz. */ 53 mdelay(20); 54 } 55 56 int gma_pipe_set_base(struct drm_crtc *crtc, int x, int y, 57 struct drm_framebuffer *old_fb) 58 { 59 struct drm_device *dev = crtc->dev; 60 struct drm_psb_private *dev_priv = to_drm_psb_private(dev); 61 struct gma_crtc *gma_crtc = to_gma_crtc(crtc); 62 struct drm_framebuffer *fb = crtc->primary->fb; 63 struct psb_gem_object *pobj; 64 int pipe = gma_crtc->pipe; 65 const struct psb_offset *map = &dev_priv->regmap[pipe]; 66 unsigned long start, offset; 67 u32 dspcntr; 68 int ret = 0; 69 70 if (!gma_power_begin(dev, true)) 71 return 0; 72 73 /* no fb bound */ 74 if (!fb) { 75 dev_err(dev->dev, "No FB bound\n"); 76 goto gma_pipe_cleaner; 77 } 78 79 pobj = to_psb_gem_object(fb->obj[0]); 80 81 /* We are displaying this buffer, make sure it is actually loaded 82 into the GTT */ 83 ret = psb_gem_pin(pobj); 84 if (ret < 0) 85 goto gma_pipe_set_base_exit; 86 start = pobj->offset; 87 offset = y * fb->pitches[0] + x * fb->format->cpp[0]; 88 89 REG_WRITE(map->stride, fb->pitches[0]); 90 91 dspcntr = REG_READ(map->cntr); 92 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; 93 94 switch (fb->format->cpp[0] * 8) { 95 case 8: 96 dspcntr |= DISPPLANE_8BPP; 97 break; 98 case 16: 99 if (fb->format->depth == 15) 100 dspcntr |= DISPPLANE_15_16BPP; 101 else 102 dspcntr |= DISPPLANE_16BPP; 103 break; 104 case 24: 105 case 32: 106 dspcntr |= DISPPLANE_32BPP_NO_ALPHA; 107 break; 108 default: 109 dev_err(dev->dev, "Unknown color depth\n"); 110 ret = -EINVAL; 111 goto gma_pipe_set_base_exit; 112 } 113 REG_WRITE(map->cntr, dspcntr); 114 115 dev_dbg(dev->dev, 116 "Writing base %08lX %08lX %d %d\n", start, offset, x, y); 117 118 /* FIXME: Investigate whether this really is the base for psb and why 119 the linear offset is named base for the other chips. map->surf 120 should be the base and map->linoff the offset for all chips */ 121 if (IS_PSB(dev)) { 122 REG_WRITE(map->base, offset + start); 123 REG_READ(map->base); 124 } else { 125 REG_WRITE(map->base, offset); 126 REG_READ(map->base); 127 REG_WRITE(map->surf, start); 128 REG_READ(map->surf); 129 } 130 131 gma_pipe_cleaner: 132 /* If there was a previous display we can now unpin it */ 133 if (old_fb) 134 psb_gem_unpin(to_psb_gem_object(old_fb->obj[0])); 135 136 gma_pipe_set_base_exit: 137 gma_power_end(dev); 138 return ret; 139 } 140 141 /* Loads the palette/gamma unit for the CRTC with the prepared values */ 142 void gma_crtc_load_lut(struct drm_crtc *crtc) 143 { 144 struct drm_device *dev = crtc->dev; 145 struct drm_psb_private *dev_priv = to_drm_psb_private(dev); 146 struct gma_crtc *gma_crtc = to_gma_crtc(crtc); 147 const struct psb_offset *map = &dev_priv->regmap[gma_crtc->pipe]; 148 int palreg = map->palette; 149 u16 *r, *g, *b; 150 int i; 151 152 /* The clocks have to be on to load the palette. */ 153 if (!crtc->enabled) 154 return; 155 156 r = crtc->gamma_store; 157 g = r + crtc->gamma_size; 158 b = g + crtc->gamma_size; 159 160 if (gma_power_begin(dev, false)) { 161 for (i = 0; i < 256; i++) { 162 REG_WRITE(palreg + 4 * i, 163 (((*r++ >> 8) + gma_crtc->lut_adj[i]) << 16) | 164 (((*g++ >> 8) + gma_crtc->lut_adj[i]) << 8) | 165 ((*b++ >> 8) + gma_crtc->lut_adj[i])); 166 } 167 gma_power_end(dev); 168 } else { 169 for (i = 0; i < 256; i++) { 170 /* FIXME: Why pipe[0] and not pipe[..._crtc->pipe]? */ 171 dev_priv->regs.pipe[0].palette[i] = 172 (((*r++ >> 8) + gma_crtc->lut_adj[i]) << 16) | 173 (((*g++ >> 8) + gma_crtc->lut_adj[i]) << 8) | 174 ((*b++ >> 8) + gma_crtc->lut_adj[i]); 175 } 176 177 } 178 } 179 180 static int gma_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, 181 u16 *blue, u32 size, 182 struct drm_modeset_acquire_ctx *ctx) 183 { 184 gma_crtc_load_lut(crtc); 185 186 return 0; 187 } 188 189 /* 190 * Sets the power management mode of the pipe and plane. 191 * 192 * This code should probably grow support for turning the cursor off and back 193 * on appropriately at the same time as we're turning the pipe off/on. 194 */ 195 void gma_crtc_dpms(struct drm_crtc *crtc, int mode) 196 { 197 struct drm_device *dev = crtc->dev; 198 struct drm_psb_private *dev_priv = to_drm_psb_private(dev); 199 struct gma_crtc *gma_crtc = to_gma_crtc(crtc); 200 int pipe = gma_crtc->pipe; 201 const struct psb_offset *map = &dev_priv->regmap[pipe]; 202 u32 temp; 203 204 /* XXX: When our outputs are all unaware of DPMS modes other than off 205 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. 206 */ 207 208 if (IS_CDV(dev)) 209 dev_priv->ops->disable_sr(dev); 210 211 switch (mode) { 212 case DRM_MODE_DPMS_ON: 213 case DRM_MODE_DPMS_STANDBY: 214 case DRM_MODE_DPMS_SUSPEND: 215 if (gma_crtc->active) 216 break; 217 218 gma_crtc->active = true; 219 220 /* Enable the DPLL */ 221 temp = REG_READ(map->dpll); 222 if ((temp & DPLL_VCO_ENABLE) == 0) { 223 REG_WRITE(map->dpll, temp); 224 REG_READ(map->dpll); 225 /* Wait for the clocks to stabilize. */ 226 udelay(150); 227 REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE); 228 REG_READ(map->dpll); 229 /* Wait for the clocks to stabilize. */ 230 udelay(150); 231 REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE); 232 REG_READ(map->dpll); 233 /* Wait for the clocks to stabilize. */ 234 udelay(150); 235 } 236 237 /* Enable the plane */ 238 temp = REG_READ(map->cntr); 239 if ((temp & DISPLAY_PLANE_ENABLE) == 0) { 240 REG_WRITE(map->cntr, 241 temp | DISPLAY_PLANE_ENABLE); 242 /* Flush the plane changes */ 243 REG_WRITE(map->base, REG_READ(map->base)); 244 } 245 246 udelay(150); 247 248 /* Enable the pipe */ 249 temp = REG_READ(map->conf); 250 if ((temp & PIPEACONF_ENABLE) == 0) 251 REG_WRITE(map->conf, temp | PIPEACONF_ENABLE); 252 253 temp = REG_READ(map->status); 254 temp &= ~(0xFFFF); 255 temp |= PIPE_FIFO_UNDERRUN; 256 REG_WRITE(map->status, temp); 257 REG_READ(map->status); 258 259 gma_crtc_load_lut(crtc); 260 261 /* Give the overlay scaler a chance to enable 262 * if it's on this pipe */ 263 /* psb_intel_crtc_dpms_video(crtc, true); TODO */ 264 265 drm_crtc_vblank_on(crtc); 266 break; 267 case DRM_MODE_DPMS_OFF: 268 if (!gma_crtc->active) 269 break; 270 271 gma_crtc->active = false; 272 273 /* Give the overlay scaler a chance to disable 274 * if it's on this pipe */ 275 /* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */ 276 277 /* Disable the VGA plane that we never use */ 278 REG_WRITE(VGACNTRL, VGA_DISP_DISABLE); 279 280 /* Turn off vblank interrupts */ 281 drm_crtc_vblank_off(crtc); 282 283 /* Wait for vblank for the disable to take effect */ 284 gma_wait_for_vblank(dev); 285 286 /* Disable plane */ 287 temp = REG_READ(map->cntr); 288 if ((temp & DISPLAY_PLANE_ENABLE) != 0) { 289 REG_WRITE(map->cntr, 290 temp & ~DISPLAY_PLANE_ENABLE); 291 /* Flush the plane changes */ 292 REG_WRITE(map->base, REG_READ(map->base)); 293 REG_READ(map->base); 294 } 295 296 /* Disable pipe */ 297 temp = REG_READ(map->conf); 298 if ((temp & PIPEACONF_ENABLE) != 0) { 299 REG_WRITE(map->conf, temp & ~PIPEACONF_ENABLE); 300 REG_READ(map->conf); 301 } 302 303 /* Wait for vblank for the disable to take effect. */ 304 gma_wait_for_vblank(dev); 305 306 udelay(150); 307 308 /* Disable DPLL */ 309 temp = REG_READ(map->dpll); 310 if ((temp & DPLL_VCO_ENABLE) != 0) { 311 REG_WRITE(map->dpll, temp & ~DPLL_VCO_ENABLE); 312 REG_READ(map->dpll); 313 } 314 315 /* Wait for the clocks to turn off. */ 316 udelay(150); 317 break; 318 } 319 320 if (IS_CDV(dev)) 321 dev_priv->ops->update_wm(dev, crtc); 322 323 /* Set FIFO watermarks */ 324 REG_WRITE(DSPARB, 0x3F3E); 325 } 326 327 static int gma_crtc_cursor_set(struct drm_crtc *crtc, 328 struct drm_file *file_priv, uint32_t handle, 329 uint32_t width, uint32_t height) 330 { 331 struct drm_device *dev = crtc->dev; 332 struct drm_psb_private *dev_priv = to_drm_psb_private(dev); 333 struct gma_crtc *gma_crtc = to_gma_crtc(crtc); 334 int pipe = gma_crtc->pipe; 335 uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR; 336 uint32_t base = (pipe == 0) ? CURABASE : CURBBASE; 337 uint32_t temp; 338 size_t addr = 0; 339 struct psb_gem_object *pobj; 340 struct psb_gem_object *cursor_pobj = gma_crtc->cursor_pobj; 341 struct drm_gem_object *obj; 342 void *tmp_dst; 343 int ret = 0, i, cursor_pages; 344 345 /* If we didn't get a handle then turn the cursor off */ 346 if (!handle) { 347 temp = CURSOR_MODE_DISABLE; 348 if (gma_power_begin(dev, false)) { 349 REG_WRITE(control, temp); 350 REG_WRITE(base, 0); 351 gma_power_end(dev); 352 } 353 354 /* Unpin the old GEM object */ 355 if (gma_crtc->cursor_obj) { 356 pobj = to_psb_gem_object(gma_crtc->cursor_obj); 357 psb_gem_unpin(pobj); 358 drm_gem_object_put(gma_crtc->cursor_obj); 359 gma_crtc->cursor_obj = NULL; 360 } 361 return 0; 362 } 363 364 /* Currently we only support 64x64 cursors */ 365 if (width != 64 || height != 64) { 366 dev_dbg(dev->dev, "We currently only support 64x64 cursors\n"); 367 return -EINVAL; 368 } 369 370 obj = drm_gem_object_lookup(file_priv, handle); 371 if (!obj) { 372 ret = -ENOENT; 373 goto unlock; 374 } 375 376 if (obj->size < width * height * 4) { 377 dev_dbg(dev->dev, "Buffer is too small\n"); 378 ret = -ENOMEM; 379 goto unref_cursor; 380 } 381 382 pobj = to_psb_gem_object(obj); 383 384 /* Pin the memory into the GTT */ 385 ret = psb_gem_pin(pobj); 386 if (ret) { 387 dev_err(dev->dev, "Can not pin down handle 0x%x\n", handle); 388 goto unref_cursor; 389 } 390 391 if (dev_priv->ops->cursor_needs_phys) { 392 if (!cursor_pobj) { 393 dev_err(dev->dev, "No hardware cursor mem available"); 394 ret = -ENOMEM; 395 goto unref_cursor; 396 } 397 398 cursor_pages = obj->size / PAGE_SIZE; 399 if (cursor_pages > 4) 400 cursor_pages = 4; /* Prevent overflow */ 401 402 /* Copy the cursor to cursor mem */ 403 tmp_dst = dev_priv->vram_addr + cursor_pobj->offset; 404 for (i = 0; i < cursor_pages; i++) { 405 memcpy_from_page(tmp_dst, pobj->pages[i], 0, PAGE_SIZE); 406 tmp_dst += PAGE_SIZE; 407 } 408 409 addr = gma_crtc->cursor_addr; 410 } else { 411 addr = pobj->offset; 412 gma_crtc->cursor_addr = addr; 413 } 414 415 temp = 0; 416 /* set the pipe for the cursor */ 417 temp |= (pipe << 28); 418 temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE; 419 420 if (gma_power_begin(dev, false)) { 421 REG_WRITE(control, temp); 422 REG_WRITE(base, addr); 423 gma_power_end(dev); 424 } 425 426 /* unpin the old bo */ 427 if (gma_crtc->cursor_obj) { 428 pobj = to_psb_gem_object(gma_crtc->cursor_obj); 429 psb_gem_unpin(pobj); 430 drm_gem_object_put(gma_crtc->cursor_obj); 431 } 432 433 gma_crtc->cursor_obj = obj; 434 unlock: 435 return ret; 436 437 unref_cursor: 438 drm_gem_object_put(obj); 439 return ret; 440 } 441 442 static int gma_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) 443 { 444 struct drm_device *dev = crtc->dev; 445 struct gma_crtc *gma_crtc = to_gma_crtc(crtc); 446 int pipe = gma_crtc->pipe; 447 uint32_t temp = 0; 448 uint32_t addr; 449 450 if (x < 0) { 451 temp |= (CURSOR_POS_SIGN << CURSOR_X_SHIFT); 452 x = -x; 453 } 454 if (y < 0) { 455 temp |= (CURSOR_POS_SIGN << CURSOR_Y_SHIFT); 456 y = -y; 457 } 458 459 temp |= ((x & CURSOR_POS_MASK) << CURSOR_X_SHIFT); 460 temp |= ((y & CURSOR_POS_MASK) << CURSOR_Y_SHIFT); 461 462 addr = gma_crtc->cursor_addr; 463 464 if (gma_power_begin(dev, false)) { 465 REG_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp); 466 REG_WRITE((pipe == 0) ? CURABASE : CURBBASE, addr); 467 gma_power_end(dev); 468 } 469 return 0; 470 } 471 472 void gma_crtc_prepare(struct drm_crtc *crtc) 473 { 474 const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; 475 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF); 476 } 477 478 void gma_crtc_commit(struct drm_crtc *crtc) 479 { 480 const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; 481 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON); 482 } 483 484 void gma_crtc_disable(struct drm_crtc *crtc) 485 { 486 struct psb_gem_object *pobj; 487 const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; 488 489 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF); 490 491 if (crtc->primary->fb) { 492 pobj = to_psb_gem_object(crtc->primary->fb->obj[0]); 493 psb_gem_unpin(pobj); 494 } 495 } 496 497 void gma_crtc_destroy(struct drm_crtc *crtc) 498 { 499 struct gma_crtc *gma_crtc = to_gma_crtc(crtc); 500 501 if (gma_crtc->cursor_pobj) 502 drm_gem_object_put(&gma_crtc->cursor_pobj->base); 503 504 kfree(gma_crtc->crtc_state); 505 drm_crtc_cleanup(crtc); 506 kfree(gma_crtc); 507 } 508 509 int gma_crtc_page_flip(struct drm_crtc *crtc, 510 struct drm_framebuffer *fb, 511 struct drm_pending_vblank_event *event, 512 uint32_t page_flip_flags, 513 struct drm_modeset_acquire_ctx *ctx) 514 { 515 struct gma_crtc *gma_crtc = to_gma_crtc(crtc); 516 struct drm_framebuffer *current_fb = crtc->primary->fb; 517 struct drm_framebuffer *old_fb = crtc->primary->old_fb; 518 const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; 519 struct drm_device *dev = crtc->dev; 520 unsigned long flags; 521 int ret; 522 523 if (!crtc_funcs->mode_set_base) 524 return -EINVAL; 525 526 /* Using mode_set_base requires the new fb to be set already. */ 527 crtc->primary->fb = fb; 528 529 if (event) { 530 spin_lock_irqsave(&dev->event_lock, flags); 531 532 WARN_ON(drm_crtc_vblank_get(crtc) != 0); 533 534 gma_crtc->page_flip_event = event; 535 spin_unlock_irqrestore(&dev->event_lock, flags); 536 537 /* Call this locked if we want an event at vblank interrupt. */ 538 ret = crtc_funcs->mode_set_base(crtc, crtc->x, crtc->y, old_fb); 539 if (ret) { 540 spin_lock_irqsave(&dev->event_lock, flags); 541 if (gma_crtc->page_flip_event) { 542 gma_crtc->page_flip_event = NULL; 543 drm_crtc_vblank_put(crtc); 544 } 545 spin_unlock_irqrestore(&dev->event_lock, flags); 546 } 547 } else { 548 ret = crtc_funcs->mode_set_base(crtc, crtc->x, crtc->y, old_fb); 549 } 550 551 /* Restore previous fb in case of failure. */ 552 if (ret) 553 crtc->primary->fb = current_fb; 554 555 return ret; 556 } 557 558 int gma_crtc_set_config(struct drm_mode_set *set, 559 struct drm_modeset_acquire_ctx *ctx) 560 { 561 struct drm_device *dev = set->crtc->dev; 562 struct drm_psb_private *dev_priv = to_drm_psb_private(dev); 563 int ret; 564 565 if (!dev_priv->rpm_enabled) 566 return drm_crtc_helper_set_config(set, ctx); 567 568 pm_runtime_forbid(dev->dev); 569 ret = drm_crtc_helper_set_config(set, ctx); 570 pm_runtime_allow(dev->dev); 571 572 return ret; 573 } 574 575 const struct drm_crtc_funcs gma_crtc_funcs = { 576 .cursor_set = gma_crtc_cursor_set, 577 .cursor_move = gma_crtc_cursor_move, 578 .gamma_set = gma_crtc_gamma_set, 579 .set_config = gma_crtc_set_config, 580 .destroy = gma_crtc_destroy, 581 .page_flip = gma_crtc_page_flip, 582 .enable_vblank = gma_crtc_enable_vblank, 583 .disable_vblank = gma_crtc_disable_vblank, 584 .get_vblank_counter = gma_crtc_get_vblank_counter, 585 }; 586 587 /* 588 * Save HW states of given crtc 589 */ 590 void gma_crtc_save(struct drm_crtc *crtc) 591 { 592 struct drm_device *dev = crtc->dev; 593 struct drm_psb_private *dev_priv = to_drm_psb_private(dev); 594 struct gma_crtc *gma_crtc = to_gma_crtc(crtc); 595 struct psb_intel_crtc_state *crtc_state = gma_crtc->crtc_state; 596 const struct psb_offset *map = &dev_priv->regmap[gma_crtc->pipe]; 597 uint32_t palette_reg; 598 int i; 599 600 if (!crtc_state) { 601 dev_err(dev->dev, "No CRTC state found\n"); 602 return; 603 } 604 605 crtc_state->saveDSPCNTR = REG_READ(map->cntr); 606 crtc_state->savePIPECONF = REG_READ(map->conf); 607 crtc_state->savePIPESRC = REG_READ(map->src); 608 crtc_state->saveFP0 = REG_READ(map->fp0); 609 crtc_state->saveFP1 = REG_READ(map->fp1); 610 crtc_state->saveDPLL = REG_READ(map->dpll); 611 crtc_state->saveHTOTAL = REG_READ(map->htotal); 612 crtc_state->saveHBLANK = REG_READ(map->hblank); 613 crtc_state->saveHSYNC = REG_READ(map->hsync); 614 crtc_state->saveVTOTAL = REG_READ(map->vtotal); 615 crtc_state->saveVBLANK = REG_READ(map->vblank); 616 crtc_state->saveVSYNC = REG_READ(map->vsync); 617 crtc_state->saveDSPSTRIDE = REG_READ(map->stride); 618 619 /* NOTE: DSPSIZE DSPPOS only for psb */ 620 crtc_state->saveDSPSIZE = REG_READ(map->size); 621 crtc_state->saveDSPPOS = REG_READ(map->pos); 622 623 crtc_state->saveDSPBASE = REG_READ(map->base); 624 625 palette_reg = map->palette; 626 for (i = 0; i < 256; ++i) 627 crtc_state->savePalette[i] = REG_READ(palette_reg + (i << 2)); 628 } 629 630 /* 631 * Restore HW states of given crtc 632 */ 633 void gma_crtc_restore(struct drm_crtc *crtc) 634 { 635 struct drm_device *dev = crtc->dev; 636 struct drm_psb_private *dev_priv = to_drm_psb_private(dev); 637 struct gma_crtc *gma_crtc = to_gma_crtc(crtc); 638 struct psb_intel_crtc_state *crtc_state = gma_crtc->crtc_state; 639 const struct psb_offset *map = &dev_priv->regmap[gma_crtc->pipe]; 640 uint32_t palette_reg; 641 int i; 642 643 if (!crtc_state) { 644 dev_err(dev->dev, "No crtc state\n"); 645 return; 646 } 647 648 if (crtc_state->saveDPLL & DPLL_VCO_ENABLE) { 649 REG_WRITE(map->dpll, 650 crtc_state->saveDPLL & ~DPLL_VCO_ENABLE); 651 REG_READ(map->dpll); 652 udelay(150); 653 } 654 655 REG_WRITE(map->fp0, crtc_state->saveFP0); 656 REG_READ(map->fp0); 657 658 REG_WRITE(map->fp1, crtc_state->saveFP1); 659 REG_READ(map->fp1); 660 661 REG_WRITE(map->dpll, crtc_state->saveDPLL); 662 REG_READ(map->dpll); 663 udelay(150); 664 665 REG_WRITE(map->htotal, crtc_state->saveHTOTAL); 666 REG_WRITE(map->hblank, crtc_state->saveHBLANK); 667 REG_WRITE(map->hsync, crtc_state->saveHSYNC); 668 REG_WRITE(map->vtotal, crtc_state->saveVTOTAL); 669 REG_WRITE(map->vblank, crtc_state->saveVBLANK); 670 REG_WRITE(map->vsync, crtc_state->saveVSYNC); 671 REG_WRITE(map->stride, crtc_state->saveDSPSTRIDE); 672 673 REG_WRITE(map->size, crtc_state->saveDSPSIZE); 674 REG_WRITE(map->pos, crtc_state->saveDSPPOS); 675 676 REG_WRITE(map->src, crtc_state->savePIPESRC); 677 REG_WRITE(map->base, crtc_state->saveDSPBASE); 678 REG_WRITE(map->conf, crtc_state->savePIPECONF); 679 680 gma_wait_for_vblank(dev); 681 682 REG_WRITE(map->cntr, crtc_state->saveDSPCNTR); 683 REG_WRITE(map->base, crtc_state->saveDSPBASE); 684 685 gma_wait_for_vblank(dev); 686 687 palette_reg = map->palette; 688 for (i = 0; i < 256; ++i) 689 REG_WRITE(palette_reg + (i << 2), crtc_state->savePalette[i]); 690 } 691 692 void gma_encoder_prepare(struct drm_encoder *encoder) 693 { 694 const struct drm_encoder_helper_funcs *encoder_funcs = 695 encoder->helper_private; 696 /* lvds has its own version of prepare see psb_intel_lvds_prepare */ 697 encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF); 698 } 699 700 void gma_encoder_commit(struct drm_encoder *encoder) 701 { 702 const struct drm_encoder_helper_funcs *encoder_funcs = 703 encoder->helper_private; 704 /* lvds has its own version of commit see psb_intel_lvds_commit */ 705 encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON); 706 } 707 708 void gma_encoder_destroy(struct drm_encoder *encoder) 709 { 710 struct gma_encoder *intel_encoder = to_gma_encoder(encoder); 711 712 drm_encoder_cleanup(encoder); 713 kfree(intel_encoder); 714 } 715 716 /* Currently there is only a 1:1 mapping of encoders and connectors */ 717 struct drm_encoder *gma_best_encoder(struct drm_connector *connector) 718 { 719 struct gma_encoder *gma_encoder = gma_attached_encoder(connector); 720 721 return &gma_encoder->base; 722 } 723 724 void gma_connector_attach_encoder(struct gma_connector *connector, 725 struct gma_encoder *encoder) 726 { 727 connector->encoder = encoder; 728 drm_connector_attach_encoder(&connector->base, 729 &encoder->base); 730 } 731 732 #define GMA_PLL_INVALID(s) { /* DRM_ERROR(s); */ return false; } 733 734 bool gma_pll_is_valid(struct drm_crtc *crtc, 735 const struct gma_limit_t *limit, 736 struct gma_clock_t *clock) 737 { 738 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1) 739 GMA_PLL_INVALID("p1 out of range"); 740 if (clock->p < limit->p.min || limit->p.max < clock->p) 741 GMA_PLL_INVALID("p out of range"); 742 if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2) 743 GMA_PLL_INVALID("m2 out of range"); 744 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1) 745 GMA_PLL_INVALID("m1 out of range"); 746 /* On CDV m1 is always 0 */ 747 if (clock->m1 <= clock->m2 && clock->m1 != 0) 748 GMA_PLL_INVALID("m1 <= m2 && m1 != 0"); 749 if (clock->m < limit->m.min || limit->m.max < clock->m) 750 GMA_PLL_INVALID("m out of range"); 751 if (clock->n < limit->n.min || limit->n.max < clock->n) 752 GMA_PLL_INVALID("n out of range"); 753 if (clock->vco < limit->vco.min || limit->vco.max < clock->vco) 754 GMA_PLL_INVALID("vco out of range"); 755 /* XXX: We may need to be checking "Dot clock" 756 * depending on the multiplier, connector, etc., 757 * rather than just a single range. 758 */ 759 if (clock->dot < limit->dot.min || limit->dot.max < clock->dot) 760 GMA_PLL_INVALID("dot out of range"); 761 762 return true; 763 } 764 765 bool gma_find_best_pll(const struct gma_limit_t *limit, 766 struct drm_crtc *crtc, int target, int refclk, 767 struct gma_clock_t *best_clock) 768 { 769 struct drm_device *dev = crtc->dev; 770 const struct gma_clock_funcs *clock_funcs = 771 to_gma_crtc(crtc)->clock_funcs; 772 struct gma_clock_t clock; 773 int err = target; 774 775 if (gma_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && 776 (REG_READ(LVDS) & LVDS_PORT_EN) != 0) { 777 /* 778 * For LVDS, if the panel is on, just rely on its current 779 * settings for dual-channel. We haven't figured out how to 780 * reliably set up different single/dual channel state, if we 781 * even can. 782 */ 783 if ((REG_READ(LVDS) & LVDS_CLKB_POWER_MASK) == 784 LVDS_CLKB_POWER_UP) 785 clock.p2 = limit->p2.p2_fast; 786 else 787 clock.p2 = limit->p2.p2_slow; 788 } else { 789 if (target < limit->p2.dot_limit) 790 clock.p2 = limit->p2.p2_slow; 791 else 792 clock.p2 = limit->p2.p2_fast; 793 } 794 795 memset(best_clock, 0, sizeof(*best_clock)); 796 797 /* m1 is always 0 on CDV so the outmost loop will run just once */ 798 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) { 799 for (clock.m2 = limit->m2.min; 800 (clock.m2 < clock.m1 || clock.m1 == 0) && 801 clock.m2 <= limit->m2.max; clock.m2++) { 802 for (clock.n = limit->n.min; 803 clock.n <= limit->n.max; clock.n++) { 804 for (clock.p1 = limit->p1.min; 805 clock.p1 <= limit->p1.max; 806 clock.p1++) { 807 int this_err; 808 809 clock_funcs->clock(refclk, &clock); 810 811 if (!clock_funcs->pll_is_valid(crtc, 812 limit, &clock)) 813 continue; 814 815 this_err = abs(clock.dot - target); 816 if (this_err < err) { 817 *best_clock = clock; 818 err = this_err; 819 } 820 } 821 } 822 } 823 } 824 825 return err != target; 826 } 827