1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright © 2006-2011 Intel Corporation 4 * 5 * Authors: 6 * Eric Anholt <eric@anholt.net> 7 * Patrik Jakobsson <patrik.r.jakobsson@gmail.com> 8 */ 9 10 #include <drm/drmP.h> 11 #include "gma_display.h" 12 #include "psb_intel_drv.h" 13 #include "psb_intel_reg.h" 14 #include "psb_drv.h" 15 #include "framebuffer.h" 16 17 /** 18 * Returns whether any output on the specified pipe is of the specified type 19 */ 20 bool gma_pipe_has_type(struct drm_crtc *crtc, int type) 21 { 22 struct drm_device *dev = crtc->dev; 23 struct drm_mode_config *mode_config = &dev->mode_config; 24 struct drm_connector *l_entry; 25 26 list_for_each_entry(l_entry, &mode_config->connector_list, head) { 27 if (l_entry->encoder && l_entry->encoder->crtc == crtc) { 28 struct gma_encoder *gma_encoder = 29 gma_attached_encoder(l_entry); 30 if (gma_encoder->type == type) 31 return true; 32 } 33 } 34 35 return false; 36 } 37 38 void gma_wait_for_vblank(struct drm_device *dev) 39 { 40 /* Wait for 20ms, i.e. one cycle at 50hz. */ 41 mdelay(20); 42 } 43 44 int gma_pipe_set_base(struct drm_crtc *crtc, int x, int y, 45 struct drm_framebuffer *old_fb) 46 { 47 struct drm_device *dev = crtc->dev; 48 struct drm_psb_private *dev_priv = dev->dev_private; 49 struct gma_crtc *gma_crtc = to_gma_crtc(crtc); 50 struct drm_framebuffer *fb = crtc->primary->fb; 51 struct gtt_range *gtt; 52 int pipe = gma_crtc->pipe; 53 const struct psb_offset *map = &dev_priv->regmap[pipe]; 54 unsigned long start, offset; 55 u32 dspcntr; 56 int ret = 0; 57 58 if (!gma_power_begin(dev, true)) 59 return 0; 60 61 /* no fb bound */ 62 if (!fb) { 63 dev_err(dev->dev, "No FB bound\n"); 64 goto gma_pipe_cleaner; 65 } 66 67 gtt = to_gtt_range(fb->obj[0]); 68 69 /* We are displaying this buffer, make sure it is actually loaded 70 into the GTT */ 71 ret = psb_gtt_pin(gtt); 72 if (ret < 0) 73 goto gma_pipe_set_base_exit; 74 start = gtt->offset; 75 offset = y * fb->pitches[0] + x * fb->format->cpp[0]; 76 77 REG_WRITE(map->stride, fb->pitches[0]); 78 79 dspcntr = REG_READ(map->cntr); 80 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; 81 82 switch (fb->format->cpp[0] * 8) { 83 case 8: 84 dspcntr |= DISPPLANE_8BPP; 85 break; 86 case 16: 87 if (fb->format->depth == 15) 88 dspcntr |= DISPPLANE_15_16BPP; 89 else 90 dspcntr |= DISPPLANE_16BPP; 91 break; 92 case 24: 93 case 32: 94 dspcntr |= DISPPLANE_32BPP_NO_ALPHA; 95 break; 96 default: 97 dev_err(dev->dev, "Unknown color depth\n"); 98 ret = -EINVAL; 99 goto gma_pipe_set_base_exit; 100 } 101 REG_WRITE(map->cntr, dspcntr); 102 103 dev_dbg(dev->dev, 104 "Writing base %08lX %08lX %d %d\n", start, offset, x, y); 105 106 /* FIXME: Investigate whether this really is the base for psb and why 107 the linear offset is named base for the other chips. map->surf 108 should be the base and map->linoff the offset for all chips */ 109 if (IS_PSB(dev)) { 110 REG_WRITE(map->base, offset + start); 111 REG_READ(map->base); 112 } else { 113 REG_WRITE(map->base, offset); 114 REG_READ(map->base); 115 REG_WRITE(map->surf, start); 116 REG_READ(map->surf); 117 } 118 119 gma_pipe_cleaner: 120 /* If there was a previous display we can now unpin it */ 121 if (old_fb) 122 psb_gtt_unpin(to_gtt_range(old_fb->obj[0])); 123 124 gma_pipe_set_base_exit: 125 gma_power_end(dev); 126 return ret; 127 } 128 129 /* Loads the palette/gamma unit for the CRTC with the prepared values */ 130 void gma_crtc_load_lut(struct drm_crtc *crtc) 131 { 132 struct drm_device *dev = crtc->dev; 133 struct drm_psb_private *dev_priv = dev->dev_private; 134 struct gma_crtc *gma_crtc = to_gma_crtc(crtc); 135 const struct psb_offset *map = &dev_priv->regmap[gma_crtc->pipe]; 136 int palreg = map->palette; 137 u16 *r, *g, *b; 138 int i; 139 140 /* The clocks have to be on to load the palette. */ 141 if (!crtc->enabled) 142 return; 143 144 r = crtc->gamma_store; 145 g = r + crtc->gamma_size; 146 b = g + crtc->gamma_size; 147 148 if (gma_power_begin(dev, false)) { 149 for (i = 0; i < 256; i++) { 150 REG_WRITE(palreg + 4 * i, 151 (((*r++ >> 8) + gma_crtc->lut_adj[i]) << 16) | 152 (((*g++ >> 8) + gma_crtc->lut_adj[i]) << 8) | 153 ((*b++ >> 8) + gma_crtc->lut_adj[i])); 154 } 155 gma_power_end(dev); 156 } else { 157 for (i = 0; i < 256; i++) { 158 /* FIXME: Why pipe[0] and not pipe[..._crtc->pipe]? */ 159 dev_priv->regs.pipe[0].palette[i] = 160 (((*r++ >> 8) + gma_crtc->lut_adj[i]) << 16) | 161 (((*g++ >> 8) + gma_crtc->lut_adj[i]) << 8) | 162 ((*b++ >> 8) + gma_crtc->lut_adj[i]); 163 } 164 165 } 166 } 167 168 int gma_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue, 169 u32 size, 170 struct drm_modeset_acquire_ctx *ctx) 171 { 172 gma_crtc_load_lut(crtc); 173 174 return 0; 175 } 176 177 /** 178 * Sets the power management mode of the pipe and plane. 179 * 180 * This code should probably grow support for turning the cursor off and back 181 * on appropriately at the same time as we're turning the pipe off/on. 182 */ 183 void gma_crtc_dpms(struct drm_crtc *crtc, int mode) 184 { 185 struct drm_device *dev = crtc->dev; 186 struct drm_psb_private *dev_priv = dev->dev_private; 187 struct gma_crtc *gma_crtc = to_gma_crtc(crtc); 188 int pipe = gma_crtc->pipe; 189 const struct psb_offset *map = &dev_priv->regmap[pipe]; 190 u32 temp; 191 192 /* XXX: When our outputs are all unaware of DPMS modes other than off 193 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. 194 */ 195 196 if (IS_CDV(dev)) 197 dev_priv->ops->disable_sr(dev); 198 199 switch (mode) { 200 case DRM_MODE_DPMS_ON: 201 case DRM_MODE_DPMS_STANDBY: 202 case DRM_MODE_DPMS_SUSPEND: 203 if (gma_crtc->active) 204 break; 205 206 gma_crtc->active = true; 207 208 /* Enable the DPLL */ 209 temp = REG_READ(map->dpll); 210 if ((temp & DPLL_VCO_ENABLE) == 0) { 211 REG_WRITE(map->dpll, temp); 212 REG_READ(map->dpll); 213 /* Wait for the clocks to stabilize. */ 214 udelay(150); 215 REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE); 216 REG_READ(map->dpll); 217 /* Wait for the clocks to stabilize. */ 218 udelay(150); 219 REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE); 220 REG_READ(map->dpll); 221 /* Wait for the clocks to stabilize. */ 222 udelay(150); 223 } 224 225 /* Enable the plane */ 226 temp = REG_READ(map->cntr); 227 if ((temp & DISPLAY_PLANE_ENABLE) == 0) { 228 REG_WRITE(map->cntr, 229 temp | DISPLAY_PLANE_ENABLE); 230 /* Flush the plane changes */ 231 REG_WRITE(map->base, REG_READ(map->base)); 232 } 233 234 udelay(150); 235 236 /* Enable the pipe */ 237 temp = REG_READ(map->conf); 238 if ((temp & PIPEACONF_ENABLE) == 0) 239 REG_WRITE(map->conf, temp | PIPEACONF_ENABLE); 240 241 temp = REG_READ(map->status); 242 temp &= ~(0xFFFF); 243 temp |= PIPE_FIFO_UNDERRUN; 244 REG_WRITE(map->status, temp); 245 REG_READ(map->status); 246 247 gma_crtc_load_lut(crtc); 248 249 /* Give the overlay scaler a chance to enable 250 * if it's on this pipe */ 251 /* psb_intel_crtc_dpms_video(crtc, true); TODO */ 252 break; 253 case DRM_MODE_DPMS_OFF: 254 if (!gma_crtc->active) 255 break; 256 257 gma_crtc->active = false; 258 259 /* Give the overlay scaler a chance to disable 260 * if it's on this pipe */ 261 /* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */ 262 263 /* Disable the VGA plane that we never use */ 264 REG_WRITE(VGACNTRL, VGA_DISP_DISABLE); 265 266 /* Turn off vblank interrupts */ 267 drm_crtc_vblank_off(crtc); 268 269 /* Wait for vblank for the disable to take effect */ 270 gma_wait_for_vblank(dev); 271 272 /* Disable plane */ 273 temp = REG_READ(map->cntr); 274 if ((temp & DISPLAY_PLANE_ENABLE) != 0) { 275 REG_WRITE(map->cntr, 276 temp & ~DISPLAY_PLANE_ENABLE); 277 /* Flush the plane changes */ 278 REG_WRITE(map->base, REG_READ(map->base)); 279 REG_READ(map->base); 280 } 281 282 /* Disable pipe */ 283 temp = REG_READ(map->conf); 284 if ((temp & PIPEACONF_ENABLE) != 0) { 285 REG_WRITE(map->conf, temp & ~PIPEACONF_ENABLE); 286 REG_READ(map->conf); 287 } 288 289 /* Wait for vblank for the disable to take effect. */ 290 gma_wait_for_vblank(dev); 291 292 udelay(150); 293 294 /* Disable DPLL */ 295 temp = REG_READ(map->dpll); 296 if ((temp & DPLL_VCO_ENABLE) != 0) { 297 REG_WRITE(map->dpll, temp & ~DPLL_VCO_ENABLE); 298 REG_READ(map->dpll); 299 } 300 301 /* Wait for the clocks to turn off. */ 302 udelay(150); 303 break; 304 } 305 306 if (IS_CDV(dev)) 307 dev_priv->ops->update_wm(dev, crtc); 308 309 /* Set FIFO watermarks */ 310 REG_WRITE(DSPARB, 0x3F3E); 311 } 312 313 int gma_crtc_cursor_set(struct drm_crtc *crtc, 314 struct drm_file *file_priv, 315 uint32_t handle, 316 uint32_t width, uint32_t height) 317 { 318 struct drm_device *dev = crtc->dev; 319 struct drm_psb_private *dev_priv = dev->dev_private; 320 struct gma_crtc *gma_crtc = to_gma_crtc(crtc); 321 int pipe = gma_crtc->pipe; 322 uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR; 323 uint32_t base = (pipe == 0) ? CURABASE : CURBBASE; 324 uint32_t temp; 325 size_t addr = 0; 326 struct gtt_range *gt; 327 struct gtt_range *cursor_gt = gma_crtc->cursor_gt; 328 struct drm_gem_object *obj; 329 void *tmp_dst, *tmp_src; 330 int ret = 0, i, cursor_pages; 331 332 /* If we didn't get a handle then turn the cursor off */ 333 if (!handle) { 334 temp = CURSOR_MODE_DISABLE; 335 if (gma_power_begin(dev, false)) { 336 REG_WRITE(control, temp); 337 REG_WRITE(base, 0); 338 gma_power_end(dev); 339 } 340 341 /* Unpin the old GEM object */ 342 if (gma_crtc->cursor_obj) { 343 gt = container_of(gma_crtc->cursor_obj, 344 struct gtt_range, gem); 345 psb_gtt_unpin(gt); 346 drm_gem_object_put_unlocked(gma_crtc->cursor_obj); 347 gma_crtc->cursor_obj = NULL; 348 } 349 return 0; 350 } 351 352 /* Currently we only support 64x64 cursors */ 353 if (width != 64 || height != 64) { 354 dev_dbg(dev->dev, "We currently only support 64x64 cursors\n"); 355 return -EINVAL; 356 } 357 358 obj = drm_gem_object_lookup(file_priv, handle); 359 if (!obj) { 360 ret = -ENOENT; 361 goto unlock; 362 } 363 364 if (obj->size < width * height * 4) { 365 dev_dbg(dev->dev, "Buffer is too small\n"); 366 ret = -ENOMEM; 367 goto unref_cursor; 368 } 369 370 gt = container_of(obj, struct gtt_range, gem); 371 372 /* Pin the memory into the GTT */ 373 ret = psb_gtt_pin(gt); 374 if (ret) { 375 dev_err(dev->dev, "Can not pin down handle 0x%x\n", handle); 376 goto unref_cursor; 377 } 378 379 if (dev_priv->ops->cursor_needs_phys) { 380 if (cursor_gt == NULL) { 381 dev_err(dev->dev, "No hardware cursor mem available"); 382 ret = -ENOMEM; 383 goto unref_cursor; 384 } 385 386 /* Prevent overflow */ 387 if (gt->npage > 4) 388 cursor_pages = 4; 389 else 390 cursor_pages = gt->npage; 391 392 /* Copy the cursor to cursor mem */ 393 tmp_dst = dev_priv->vram_addr + cursor_gt->offset; 394 for (i = 0; i < cursor_pages; i++) { 395 tmp_src = kmap(gt->pages[i]); 396 memcpy(tmp_dst, tmp_src, PAGE_SIZE); 397 kunmap(gt->pages[i]); 398 tmp_dst += PAGE_SIZE; 399 } 400 401 addr = gma_crtc->cursor_addr; 402 } else { 403 addr = gt->offset; 404 gma_crtc->cursor_addr = addr; 405 } 406 407 temp = 0; 408 /* set the pipe for the cursor */ 409 temp |= (pipe << 28); 410 temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE; 411 412 if (gma_power_begin(dev, false)) { 413 REG_WRITE(control, temp); 414 REG_WRITE(base, addr); 415 gma_power_end(dev); 416 } 417 418 /* unpin the old bo */ 419 if (gma_crtc->cursor_obj) { 420 gt = container_of(gma_crtc->cursor_obj, struct gtt_range, gem); 421 psb_gtt_unpin(gt); 422 drm_gem_object_put_unlocked(gma_crtc->cursor_obj); 423 } 424 425 gma_crtc->cursor_obj = obj; 426 unlock: 427 return ret; 428 429 unref_cursor: 430 drm_gem_object_put_unlocked(obj); 431 return ret; 432 } 433 434 int gma_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) 435 { 436 struct drm_device *dev = crtc->dev; 437 struct gma_crtc *gma_crtc = to_gma_crtc(crtc); 438 int pipe = gma_crtc->pipe; 439 uint32_t temp = 0; 440 uint32_t addr; 441 442 if (x < 0) { 443 temp |= (CURSOR_POS_SIGN << CURSOR_X_SHIFT); 444 x = -x; 445 } 446 if (y < 0) { 447 temp |= (CURSOR_POS_SIGN << CURSOR_Y_SHIFT); 448 y = -y; 449 } 450 451 temp |= ((x & CURSOR_POS_MASK) << CURSOR_X_SHIFT); 452 temp |= ((y & CURSOR_POS_MASK) << CURSOR_Y_SHIFT); 453 454 addr = gma_crtc->cursor_addr; 455 456 if (gma_power_begin(dev, false)) { 457 REG_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp); 458 REG_WRITE((pipe == 0) ? CURABASE : CURBBASE, addr); 459 gma_power_end(dev); 460 } 461 return 0; 462 } 463 464 void gma_crtc_prepare(struct drm_crtc *crtc) 465 { 466 const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; 467 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF); 468 } 469 470 void gma_crtc_commit(struct drm_crtc *crtc) 471 { 472 const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; 473 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON); 474 } 475 476 void gma_crtc_disable(struct drm_crtc *crtc) 477 { 478 struct gtt_range *gt; 479 const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; 480 481 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF); 482 483 if (crtc->primary->fb) { 484 gt = to_gtt_range(crtc->primary->fb->obj[0]); 485 psb_gtt_unpin(gt); 486 } 487 } 488 489 void gma_crtc_destroy(struct drm_crtc *crtc) 490 { 491 struct gma_crtc *gma_crtc = to_gma_crtc(crtc); 492 493 kfree(gma_crtc->crtc_state); 494 drm_crtc_cleanup(crtc); 495 kfree(gma_crtc); 496 } 497 498 int gma_crtc_set_config(struct drm_mode_set *set, 499 struct drm_modeset_acquire_ctx *ctx) 500 { 501 struct drm_device *dev = set->crtc->dev; 502 struct drm_psb_private *dev_priv = dev->dev_private; 503 int ret; 504 505 if (!dev_priv->rpm_enabled) 506 return drm_crtc_helper_set_config(set, ctx); 507 508 pm_runtime_forbid(&dev->pdev->dev); 509 ret = drm_crtc_helper_set_config(set, ctx); 510 pm_runtime_allow(&dev->pdev->dev); 511 512 return ret; 513 } 514 515 /** 516 * Save HW states of given crtc 517 */ 518 void gma_crtc_save(struct drm_crtc *crtc) 519 { 520 struct drm_device *dev = crtc->dev; 521 struct drm_psb_private *dev_priv = dev->dev_private; 522 struct gma_crtc *gma_crtc = to_gma_crtc(crtc); 523 struct psb_intel_crtc_state *crtc_state = gma_crtc->crtc_state; 524 const struct psb_offset *map = &dev_priv->regmap[gma_crtc->pipe]; 525 uint32_t palette_reg; 526 int i; 527 528 if (!crtc_state) { 529 dev_err(dev->dev, "No CRTC state found\n"); 530 return; 531 } 532 533 crtc_state->saveDSPCNTR = REG_READ(map->cntr); 534 crtc_state->savePIPECONF = REG_READ(map->conf); 535 crtc_state->savePIPESRC = REG_READ(map->src); 536 crtc_state->saveFP0 = REG_READ(map->fp0); 537 crtc_state->saveFP1 = REG_READ(map->fp1); 538 crtc_state->saveDPLL = REG_READ(map->dpll); 539 crtc_state->saveHTOTAL = REG_READ(map->htotal); 540 crtc_state->saveHBLANK = REG_READ(map->hblank); 541 crtc_state->saveHSYNC = REG_READ(map->hsync); 542 crtc_state->saveVTOTAL = REG_READ(map->vtotal); 543 crtc_state->saveVBLANK = REG_READ(map->vblank); 544 crtc_state->saveVSYNC = REG_READ(map->vsync); 545 crtc_state->saveDSPSTRIDE = REG_READ(map->stride); 546 547 /* NOTE: DSPSIZE DSPPOS only for psb */ 548 crtc_state->saveDSPSIZE = REG_READ(map->size); 549 crtc_state->saveDSPPOS = REG_READ(map->pos); 550 551 crtc_state->saveDSPBASE = REG_READ(map->base); 552 553 palette_reg = map->palette; 554 for (i = 0; i < 256; ++i) 555 crtc_state->savePalette[i] = REG_READ(palette_reg + (i << 2)); 556 } 557 558 /** 559 * Restore HW states of given crtc 560 */ 561 void gma_crtc_restore(struct drm_crtc *crtc) 562 { 563 struct drm_device *dev = crtc->dev; 564 struct drm_psb_private *dev_priv = dev->dev_private; 565 struct gma_crtc *gma_crtc = to_gma_crtc(crtc); 566 struct psb_intel_crtc_state *crtc_state = gma_crtc->crtc_state; 567 const struct psb_offset *map = &dev_priv->regmap[gma_crtc->pipe]; 568 uint32_t palette_reg; 569 int i; 570 571 if (!crtc_state) { 572 dev_err(dev->dev, "No crtc state\n"); 573 return; 574 } 575 576 if (crtc_state->saveDPLL & DPLL_VCO_ENABLE) { 577 REG_WRITE(map->dpll, 578 crtc_state->saveDPLL & ~DPLL_VCO_ENABLE); 579 REG_READ(map->dpll); 580 udelay(150); 581 } 582 583 REG_WRITE(map->fp0, crtc_state->saveFP0); 584 REG_READ(map->fp0); 585 586 REG_WRITE(map->fp1, crtc_state->saveFP1); 587 REG_READ(map->fp1); 588 589 REG_WRITE(map->dpll, crtc_state->saveDPLL); 590 REG_READ(map->dpll); 591 udelay(150); 592 593 REG_WRITE(map->htotal, crtc_state->saveHTOTAL); 594 REG_WRITE(map->hblank, crtc_state->saveHBLANK); 595 REG_WRITE(map->hsync, crtc_state->saveHSYNC); 596 REG_WRITE(map->vtotal, crtc_state->saveVTOTAL); 597 REG_WRITE(map->vblank, crtc_state->saveVBLANK); 598 REG_WRITE(map->vsync, crtc_state->saveVSYNC); 599 REG_WRITE(map->stride, crtc_state->saveDSPSTRIDE); 600 601 REG_WRITE(map->size, crtc_state->saveDSPSIZE); 602 REG_WRITE(map->pos, crtc_state->saveDSPPOS); 603 604 REG_WRITE(map->src, crtc_state->savePIPESRC); 605 REG_WRITE(map->base, crtc_state->saveDSPBASE); 606 REG_WRITE(map->conf, crtc_state->savePIPECONF); 607 608 gma_wait_for_vblank(dev); 609 610 REG_WRITE(map->cntr, crtc_state->saveDSPCNTR); 611 REG_WRITE(map->base, crtc_state->saveDSPBASE); 612 613 gma_wait_for_vblank(dev); 614 615 palette_reg = map->palette; 616 for (i = 0; i < 256; ++i) 617 REG_WRITE(palette_reg + (i << 2), crtc_state->savePalette[i]); 618 } 619 620 void gma_encoder_prepare(struct drm_encoder *encoder) 621 { 622 const struct drm_encoder_helper_funcs *encoder_funcs = 623 encoder->helper_private; 624 /* lvds has its own version of prepare see psb_intel_lvds_prepare */ 625 encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF); 626 } 627 628 void gma_encoder_commit(struct drm_encoder *encoder) 629 { 630 const struct drm_encoder_helper_funcs *encoder_funcs = 631 encoder->helper_private; 632 /* lvds has its own version of commit see psb_intel_lvds_commit */ 633 encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON); 634 } 635 636 void gma_encoder_destroy(struct drm_encoder *encoder) 637 { 638 struct gma_encoder *intel_encoder = to_gma_encoder(encoder); 639 640 drm_encoder_cleanup(encoder); 641 kfree(intel_encoder); 642 } 643 644 /* Currently there is only a 1:1 mapping of encoders and connectors */ 645 struct drm_encoder *gma_best_encoder(struct drm_connector *connector) 646 { 647 struct gma_encoder *gma_encoder = gma_attached_encoder(connector); 648 649 return &gma_encoder->base; 650 } 651 652 void gma_connector_attach_encoder(struct gma_connector *connector, 653 struct gma_encoder *encoder) 654 { 655 connector->encoder = encoder; 656 drm_connector_attach_encoder(&connector->base, 657 &encoder->base); 658 } 659 660 #define GMA_PLL_INVALID(s) { /* DRM_ERROR(s); */ return false; } 661 662 bool gma_pll_is_valid(struct drm_crtc *crtc, 663 const struct gma_limit_t *limit, 664 struct gma_clock_t *clock) 665 { 666 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1) 667 GMA_PLL_INVALID("p1 out of range"); 668 if (clock->p < limit->p.min || limit->p.max < clock->p) 669 GMA_PLL_INVALID("p out of range"); 670 if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2) 671 GMA_PLL_INVALID("m2 out of range"); 672 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1) 673 GMA_PLL_INVALID("m1 out of range"); 674 /* On CDV m1 is always 0 */ 675 if (clock->m1 <= clock->m2 && clock->m1 != 0) 676 GMA_PLL_INVALID("m1 <= m2 && m1 != 0"); 677 if (clock->m < limit->m.min || limit->m.max < clock->m) 678 GMA_PLL_INVALID("m out of range"); 679 if (clock->n < limit->n.min || limit->n.max < clock->n) 680 GMA_PLL_INVALID("n out of range"); 681 if (clock->vco < limit->vco.min || limit->vco.max < clock->vco) 682 GMA_PLL_INVALID("vco out of range"); 683 /* XXX: We may need to be checking "Dot clock" 684 * depending on the multiplier, connector, etc., 685 * rather than just a single range. 686 */ 687 if (clock->dot < limit->dot.min || limit->dot.max < clock->dot) 688 GMA_PLL_INVALID("dot out of range"); 689 690 return true; 691 } 692 693 bool gma_find_best_pll(const struct gma_limit_t *limit, 694 struct drm_crtc *crtc, int target, int refclk, 695 struct gma_clock_t *best_clock) 696 { 697 struct drm_device *dev = crtc->dev; 698 const struct gma_clock_funcs *clock_funcs = 699 to_gma_crtc(crtc)->clock_funcs; 700 struct gma_clock_t clock; 701 int err = target; 702 703 if (gma_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && 704 (REG_READ(LVDS) & LVDS_PORT_EN) != 0) { 705 /* 706 * For LVDS, if the panel is on, just rely on its current 707 * settings for dual-channel. We haven't figured out how to 708 * reliably set up different single/dual channel state, if we 709 * even can. 710 */ 711 if ((REG_READ(LVDS) & LVDS_CLKB_POWER_MASK) == 712 LVDS_CLKB_POWER_UP) 713 clock.p2 = limit->p2.p2_fast; 714 else 715 clock.p2 = limit->p2.p2_slow; 716 } else { 717 if (target < limit->p2.dot_limit) 718 clock.p2 = limit->p2.p2_slow; 719 else 720 clock.p2 = limit->p2.p2_fast; 721 } 722 723 memset(best_clock, 0, sizeof(*best_clock)); 724 725 /* m1 is always 0 on CDV so the outmost loop will run just once */ 726 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) { 727 for (clock.m2 = limit->m2.min; 728 (clock.m2 < clock.m1 || clock.m1 == 0) && 729 clock.m2 <= limit->m2.max; clock.m2++) { 730 for (clock.n = limit->n.min; 731 clock.n <= limit->n.max; clock.n++) { 732 for (clock.p1 = limit->p1.min; 733 clock.p1 <= limit->p1.max; 734 clock.p1++) { 735 int this_err; 736 737 clock_funcs->clock(refclk, &clock); 738 739 if (!clock_funcs->pll_is_valid(crtc, 740 limit, &clock)) 741 continue; 742 743 this_err = abs(clock.dot - target); 744 if (this_err < err) { 745 *best_clock = clock; 746 err = this_err; 747 } 748 } 749 } 750 } 751 } 752 753 return err != target; 754 } 755