1 /* 2 * Copyright (C) 2008 Maarten Maathuis. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining 6 * a copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sublicense, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * The above copyright notice and this permission notice (including the 14 * next paragraph) shall be included in all copies or substantial 15 * portions of the Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE 21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 24 * 25 */ 26 27 #include <acpi/video.h> 28 29 #include <drm/drm_atomic.h> 30 #include <drm/drm_atomic_helper.h> 31 #include <drm/drm_crtc_helper.h> 32 #include <drm/drm_fb_helper.h> 33 #include <drm/drm_fourcc.h> 34 #include <drm/drm_gem_framebuffer_helper.h> 35 #include <drm/drm_probe_helper.h> 36 #include <drm/drm_vblank.h> 37 38 #include "nouveau_crtc.h" 39 #include "nouveau_gem.h" 40 #include "nouveau_connector.h" 41 #include "nv50_display.h" 42 43 #include <nvif/class.h> 44 #include <nvif/if0011.h> 45 #include <nvif/if0013.h> 46 #include <dispnv50/crc.h> 47 48 int 49 nouveau_display_vblank_enable(struct drm_crtc *crtc) 50 { 51 struct nouveau_crtc *nv_crtc; 52 53 nv_crtc = nouveau_crtc(crtc); 54 nvif_event_allow(&nv_crtc->vblank); 55 56 return 0; 57 } 58 59 void 60 nouveau_display_vblank_disable(struct drm_crtc *crtc) 61 { 62 struct nouveau_crtc *nv_crtc; 63 64 nv_crtc = nouveau_crtc(crtc); 65 nvif_event_block(&nv_crtc->vblank); 66 } 67 68 static inline int 69 calc(int blanks, int blanke, int total, int line) 70 { 71 if (blanke >= blanks) { 72 if (line >= blanks) 73 line -= total; 74 } else { 75 if (line >= blanks) 76 line -= total; 77 line -= blanke + 1; 78 } 79 return line; 80 } 81 82 static bool 83 nouveau_display_scanoutpos_head(struct drm_crtc *crtc, int *vpos, int *hpos, 84 ktime_t *stime, ktime_t *etime) 85 { 86 struct drm_vblank_crtc *vblank = &crtc->dev->vblank[drm_crtc_index(crtc)]; 87 struct nvif_head *head = &nouveau_crtc(crtc)->head; 88 struct nvif_head_scanoutpos_v0 args; 89 int retry = 20; 90 bool ret = false; 91 92 args.version = 0; 93 94 do { 95 ret = nvif_mthd(&head->object, NVIF_HEAD_V0_SCANOUTPOS, &args, sizeof(args)); 96 if (ret != 0) 97 return false; 98 99 if (args.vline) { 100 ret = true; 101 break; 102 } 103 104 if (retry) ndelay(vblank->linedur_ns); 105 } while (retry--); 106 107 *hpos = args.hline; 108 *vpos = calc(args.vblanks, args.vblanke, args.vtotal, args.vline); 109 if (stime) *stime = ns_to_ktime(args.time[0]); 110 if (etime) *etime = ns_to_ktime(args.time[1]); 111 112 return ret; 113 } 114 115 bool 116 nouveau_display_scanoutpos(struct drm_crtc *crtc, 117 bool in_vblank_irq, int *vpos, int *hpos, 118 ktime_t *stime, ktime_t *etime, 119 const struct drm_display_mode *mode) 120 { 121 return nouveau_display_scanoutpos_head(crtc, vpos, hpos, 122 stime, etime); 123 } 124 125 static const struct drm_framebuffer_funcs nouveau_framebuffer_funcs = { 126 .destroy = drm_gem_fb_destroy, 127 .create_handle = drm_gem_fb_create_handle, 128 }; 129 130 static void 131 nouveau_decode_mod(struct nouveau_drm *drm, 132 uint64_t modifier, 133 uint32_t *tile_mode, 134 uint8_t *kind) 135 { 136 struct nouveau_display *disp = nouveau_display(drm->dev); 137 BUG_ON(!tile_mode || !kind); 138 139 if (modifier == DRM_FORMAT_MOD_LINEAR) { 140 /* tile_mode will not be used in this case */ 141 *tile_mode = 0; 142 *kind = 0; 143 } else { 144 /* 145 * Extract the block height and kind from the corresponding 146 * modifier fields. See drm_fourcc.h for details. 147 */ 148 149 if ((modifier & (0xffull << 12)) == 0ull) { 150 /* Legacy modifier. Translate to this dev's 'kind.' */ 151 modifier |= disp->format_modifiers[0] & (0xffull << 12); 152 } 153 154 *tile_mode = (uint32_t)(modifier & 0xF); 155 *kind = (uint8_t)((modifier >> 12) & 0xFF); 156 157 if (drm->client.device.info.chipset >= 0xc0) 158 *tile_mode <<= 4; 159 } 160 } 161 162 void 163 nouveau_framebuffer_get_layout(struct drm_framebuffer *fb, 164 uint32_t *tile_mode, 165 uint8_t *kind) 166 { 167 if (fb->flags & DRM_MODE_FB_MODIFIERS) { 168 struct nouveau_drm *drm = nouveau_drm(fb->dev); 169 170 nouveau_decode_mod(drm, fb->modifier, tile_mode, kind); 171 } else { 172 const struct nouveau_bo *nvbo = nouveau_gem_object(fb->obj[0]); 173 174 *tile_mode = nvbo->mode; 175 *kind = nvbo->kind; 176 } 177 } 178 179 static const u64 legacy_modifiers[] = { 180 DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(0), 181 DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(1), 182 DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(2), 183 DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(3), 184 DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(4), 185 DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(5), 186 DRM_FORMAT_MOD_INVALID 187 }; 188 189 static int 190 nouveau_validate_decode_mod(struct nouveau_drm *drm, 191 uint64_t modifier, 192 uint32_t *tile_mode, 193 uint8_t *kind) 194 { 195 struct nouveau_display *disp = nouveau_display(drm->dev); 196 int mod; 197 198 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) { 199 return -EINVAL; 200 } 201 202 BUG_ON(!disp->format_modifiers); 203 204 for (mod = 0; 205 (disp->format_modifiers[mod] != DRM_FORMAT_MOD_INVALID) && 206 (disp->format_modifiers[mod] != modifier); 207 mod++); 208 209 if (disp->format_modifiers[mod] == DRM_FORMAT_MOD_INVALID) { 210 for (mod = 0; 211 (legacy_modifiers[mod] != DRM_FORMAT_MOD_INVALID) && 212 (legacy_modifiers[mod] != modifier); 213 mod++); 214 if (legacy_modifiers[mod] == DRM_FORMAT_MOD_INVALID) 215 return -EINVAL; 216 } 217 218 nouveau_decode_mod(drm, modifier, tile_mode, kind); 219 220 return 0; 221 } 222 223 static inline uint32_t 224 nouveau_get_width_in_blocks(uint32_t stride) 225 { 226 /* GOBs per block in the x direction is always one, and GOBs are 227 * 64 bytes wide 228 */ 229 static const uint32_t log_block_width = 6; 230 231 return (stride + (1 << log_block_width) - 1) >> log_block_width; 232 } 233 234 static inline uint32_t 235 nouveau_get_height_in_blocks(struct nouveau_drm *drm, 236 uint32_t height, 237 uint32_t log_block_height_in_gobs) 238 { 239 uint32_t log_gob_height; 240 uint32_t log_block_height; 241 242 BUG_ON(drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA); 243 244 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_FERMI) 245 log_gob_height = 2; 246 else 247 log_gob_height = 3; 248 249 log_block_height = log_block_height_in_gobs + log_gob_height; 250 251 return (height + (1 << log_block_height) - 1) >> log_block_height; 252 } 253 254 static int 255 nouveau_check_bl_size(struct nouveau_drm *drm, struct nouveau_bo *nvbo, 256 uint32_t offset, uint32_t stride, uint32_t h, 257 uint32_t tile_mode) 258 { 259 uint32_t gob_size, bw, bh; 260 uint64_t bl_size; 261 262 BUG_ON(drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA); 263 264 if (drm->client.device.info.chipset >= 0xc0) { 265 if (tile_mode & 0xF) 266 return -EINVAL; 267 tile_mode >>= 4; 268 } 269 270 if (tile_mode & 0xFFFFFFF0) 271 return -EINVAL; 272 273 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_FERMI) 274 gob_size = 256; 275 else 276 gob_size = 512; 277 278 bw = nouveau_get_width_in_blocks(stride); 279 bh = nouveau_get_height_in_blocks(drm, h, tile_mode); 280 281 bl_size = bw * bh * (1 << tile_mode) * gob_size; 282 283 DRM_DEBUG_KMS("offset=%u stride=%u h=%u tile_mode=0x%02x bw=%u bh=%u gob_size=%u bl_size=%llu size=%zu\n", 284 offset, stride, h, tile_mode, bw, bh, gob_size, bl_size, 285 nvbo->bo.base.size); 286 287 if (bl_size + offset > nvbo->bo.base.size) 288 return -ERANGE; 289 290 return 0; 291 } 292 293 int 294 nouveau_framebuffer_new(struct drm_device *dev, 295 const struct drm_mode_fb_cmd2 *mode_cmd, 296 struct drm_gem_object *gem, 297 struct drm_framebuffer **pfb) 298 { 299 struct nouveau_drm *drm = nouveau_drm(dev); 300 struct nouveau_bo *nvbo = nouveau_gem_object(gem); 301 struct drm_framebuffer *fb; 302 const struct drm_format_info *info; 303 unsigned int height, i; 304 uint32_t tile_mode; 305 uint8_t kind; 306 int ret; 307 308 /* YUV overlays have special requirements pre-NV50 */ 309 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA && 310 311 (mode_cmd->pixel_format == DRM_FORMAT_YUYV || 312 mode_cmd->pixel_format == DRM_FORMAT_UYVY || 313 mode_cmd->pixel_format == DRM_FORMAT_NV12 || 314 mode_cmd->pixel_format == DRM_FORMAT_NV21) && 315 (mode_cmd->pitches[0] & 0x3f || /* align 64 */ 316 mode_cmd->pitches[0] >= 0x10000 || /* at most 64k pitch */ 317 (mode_cmd->pitches[1] && /* pitches for planes must match */ 318 mode_cmd->pitches[0] != mode_cmd->pitches[1]))) { 319 DRM_DEBUG_KMS("Unsuitable framebuffer: format: %p4cc; pitches: 0x%x\n 0x%x\n", 320 &mode_cmd->pixel_format, 321 mode_cmd->pitches[0], mode_cmd->pitches[1]); 322 return -EINVAL; 323 } 324 325 if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) { 326 if (nouveau_validate_decode_mod(drm, mode_cmd->modifier[0], 327 &tile_mode, &kind)) { 328 DRM_DEBUG_KMS("Unsupported modifier: 0x%llx\n", 329 mode_cmd->modifier[0]); 330 return -EINVAL; 331 } 332 } else { 333 tile_mode = nvbo->mode; 334 kind = nvbo->kind; 335 } 336 337 info = drm_get_format_info(dev, mode_cmd); 338 339 for (i = 0; i < info->num_planes; i++) { 340 height = drm_format_info_plane_height(info, 341 mode_cmd->height, 342 i); 343 344 if (kind) { 345 ret = nouveau_check_bl_size(drm, nvbo, 346 mode_cmd->offsets[i], 347 mode_cmd->pitches[i], 348 height, tile_mode); 349 if (ret) 350 return ret; 351 } else { 352 uint32_t size = mode_cmd->pitches[i] * height; 353 354 if (size + mode_cmd->offsets[i] > nvbo->bo.base.size) 355 return -ERANGE; 356 } 357 } 358 359 if (!(fb = *pfb = kzalloc(sizeof(*fb), GFP_KERNEL))) 360 return -ENOMEM; 361 362 drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd); 363 fb->obj[0] = gem; 364 365 ret = drm_framebuffer_init(dev, fb, &nouveau_framebuffer_funcs); 366 if (ret) 367 kfree(fb); 368 return ret; 369 } 370 371 struct drm_framebuffer * 372 nouveau_user_framebuffer_create(struct drm_device *dev, 373 struct drm_file *file_priv, 374 const struct drm_mode_fb_cmd2 *mode_cmd) 375 { 376 struct drm_framebuffer *fb; 377 struct drm_gem_object *gem; 378 int ret; 379 380 gem = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]); 381 if (!gem) 382 return ERR_PTR(-ENOENT); 383 384 ret = nouveau_framebuffer_new(dev, mode_cmd, gem, &fb); 385 if (ret == 0) 386 return fb; 387 388 drm_gem_object_put(gem); 389 return ERR_PTR(ret); 390 } 391 392 static const struct drm_mode_config_funcs nouveau_mode_config_funcs = { 393 .fb_create = nouveau_user_framebuffer_create, 394 .output_poll_changed = drm_fb_helper_output_poll_changed, 395 }; 396 397 398 struct nouveau_drm_prop_enum_list { 399 u8 gen_mask; 400 int type; 401 char *name; 402 }; 403 404 static struct nouveau_drm_prop_enum_list underscan[] = { 405 { 6, UNDERSCAN_AUTO, "auto" }, 406 { 6, UNDERSCAN_OFF, "off" }, 407 { 6, UNDERSCAN_ON, "on" }, 408 {} 409 }; 410 411 static struct nouveau_drm_prop_enum_list dither_mode[] = { 412 { 7, DITHERING_MODE_AUTO, "auto" }, 413 { 7, DITHERING_MODE_OFF, "off" }, 414 { 1, DITHERING_MODE_ON, "on" }, 415 { 6, DITHERING_MODE_STATIC2X2, "static 2x2" }, 416 { 6, DITHERING_MODE_DYNAMIC2X2, "dynamic 2x2" }, 417 { 4, DITHERING_MODE_TEMPORAL, "temporal" }, 418 {} 419 }; 420 421 static struct nouveau_drm_prop_enum_list dither_depth[] = { 422 { 6, DITHERING_DEPTH_AUTO, "auto" }, 423 { 6, DITHERING_DEPTH_6BPC, "6 bpc" }, 424 { 6, DITHERING_DEPTH_8BPC, "8 bpc" }, 425 {} 426 }; 427 428 #define PROP_ENUM(p,gen,n,list) do { \ 429 struct nouveau_drm_prop_enum_list *l = (list); \ 430 int c = 0; \ 431 while (l->gen_mask) { \ 432 if (l->gen_mask & (1 << (gen))) \ 433 c++; \ 434 l++; \ 435 } \ 436 if (c) { \ 437 p = drm_property_create(dev, DRM_MODE_PROP_ENUM, n, c); \ 438 l = (list); \ 439 while (p && l->gen_mask) { \ 440 if (l->gen_mask & (1 << (gen))) { \ 441 drm_property_add_enum(p, l->type, l->name); \ 442 } \ 443 l++; \ 444 } \ 445 } \ 446 } while(0) 447 448 void 449 nouveau_display_hpd_resume(struct drm_device *dev) 450 { 451 struct nouveau_drm *drm = nouveau_drm(dev); 452 453 spin_lock_irq(&drm->hpd_lock); 454 drm->hpd_pending = ~0; 455 spin_unlock_irq(&drm->hpd_lock); 456 457 schedule_work(&drm->hpd_work); 458 } 459 460 static void 461 nouveau_display_hpd_work(struct work_struct *work) 462 { 463 struct nouveau_drm *drm = container_of(work, typeof(*drm), hpd_work); 464 struct drm_device *dev = drm->dev; 465 struct drm_connector *connector; 466 struct drm_connector_list_iter conn_iter; 467 u32 pending; 468 bool changed = false; 469 470 pm_runtime_get_sync(dev->dev); 471 472 spin_lock_irq(&drm->hpd_lock); 473 pending = drm->hpd_pending; 474 drm->hpd_pending = 0; 475 spin_unlock_irq(&drm->hpd_lock); 476 477 /* Nothing to do, exit early without updating the last busy counter */ 478 if (!pending) 479 goto noop; 480 481 mutex_lock(&dev->mode_config.mutex); 482 drm_connector_list_iter_begin(dev, &conn_iter); 483 484 nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) { 485 struct nouveau_connector *nv_connector = nouveau_connector(connector); 486 enum drm_connector_status old_status = connector->status; 487 u64 bits, old_epoch_counter = connector->epoch_counter; 488 489 if (!(pending & drm_connector_mask(connector))) 490 continue; 491 492 spin_lock_irq(&drm->hpd_lock); 493 bits = nv_connector->hpd_pending; 494 nv_connector->hpd_pending = 0; 495 spin_unlock_irq(&drm->hpd_lock); 496 497 drm_dbg_kms(dev, "[CONNECTOR:%d:%s] plug:%d unplug:%d irq:%d\n", 498 connector->base.id, connector->name, 499 !!(bits & NVIF_CONN_EVENT_V0_PLUG), 500 !!(bits & NVIF_CONN_EVENT_V0_UNPLUG), 501 !!(bits & NVIF_CONN_EVENT_V0_IRQ)); 502 503 if (bits & NVIF_CONN_EVENT_V0_IRQ) { 504 if (nouveau_dp_link_check(nv_connector)) 505 continue; 506 } 507 508 connector->status = drm_helper_probe_detect(connector, NULL, false); 509 if (old_epoch_counter == connector->epoch_counter) 510 continue; 511 512 changed = true; 513 drm_dbg_kms(dev, "[CONNECTOR:%d:%s] status updated from %s to %s (epoch counter %llu->%llu)\n", 514 connector->base.id, connector->name, 515 drm_get_connector_status_name(old_status), 516 drm_get_connector_status_name(connector->status), 517 old_epoch_counter, connector->epoch_counter); 518 } 519 520 drm_connector_list_iter_end(&conn_iter); 521 mutex_unlock(&dev->mode_config.mutex); 522 523 if (changed) 524 drm_kms_helper_hotplug_event(dev); 525 526 pm_runtime_mark_last_busy(drm->dev->dev); 527 noop: 528 pm_runtime_put_autosuspend(dev->dev); 529 } 530 531 #ifdef CONFIG_ACPI 532 533 static int 534 nouveau_display_acpi_ntfy(struct notifier_block *nb, unsigned long val, 535 void *data) 536 { 537 struct nouveau_drm *drm = container_of(nb, typeof(*drm), acpi_nb); 538 struct acpi_bus_event *info = data; 539 int ret; 540 541 if (!strcmp(info->device_class, ACPI_VIDEO_CLASS)) { 542 if (info->type == ACPI_VIDEO_NOTIFY_PROBE) { 543 ret = pm_runtime_get(drm->dev->dev); 544 if (ret == 1 || ret == -EACCES) { 545 /* If the GPU is already awake, or in a state 546 * where we can't wake it up, it can handle 547 * it's own hotplug events. 548 */ 549 pm_runtime_put_autosuspend(drm->dev->dev); 550 } else if (ret == 0 || ret == -EINPROGRESS) { 551 /* We've started resuming the GPU already, so 552 * it will handle scheduling a full reprobe 553 * itself 554 */ 555 NV_DEBUG(drm, "ACPI requested connector reprobe\n"); 556 pm_runtime_put_noidle(drm->dev->dev); 557 } else { 558 NV_WARN(drm, "Dropped ACPI reprobe event due to RPM error: %d\n", 559 ret); 560 } 561 562 /* acpi-video should not generate keypresses for this */ 563 return NOTIFY_BAD; 564 } 565 } 566 567 return NOTIFY_DONE; 568 } 569 #endif 570 571 int 572 nouveau_display_init(struct drm_device *dev, bool resume, bool runtime) 573 { 574 struct nouveau_display *disp = nouveau_display(dev); 575 struct drm_connector *connector; 576 struct drm_connector_list_iter conn_iter; 577 int ret; 578 579 /* 580 * Enable hotplug interrupts (done as early as possible, since we need 581 * them for MST) 582 */ 583 drm_connector_list_iter_begin(dev, &conn_iter); 584 nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) { 585 struct nouveau_connector *conn = nouveau_connector(connector); 586 nvif_event_allow(&conn->hpd); 587 nvif_event_allow(&conn->irq); 588 } 589 drm_connector_list_iter_end(&conn_iter); 590 591 ret = disp->init(dev, resume, runtime); 592 if (ret) 593 return ret; 594 595 /* enable connector detection and polling for connectors without HPD 596 * support 597 */ 598 drm_kms_helper_poll_enable(dev); 599 600 return ret; 601 } 602 603 void 604 nouveau_display_fini(struct drm_device *dev, bool suspend, bool runtime) 605 { 606 struct nouveau_display *disp = nouveau_display(dev); 607 struct nouveau_drm *drm = nouveau_drm(dev); 608 struct drm_connector *connector; 609 struct drm_connector_list_iter conn_iter; 610 611 if (!suspend) { 612 if (drm_drv_uses_atomic_modeset(dev)) 613 drm_atomic_helper_shutdown(dev); 614 else 615 drm_helper_force_disable_all(dev); 616 } 617 618 /* disable hotplug interrupts */ 619 drm_connector_list_iter_begin(dev, &conn_iter); 620 nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) { 621 struct nouveau_connector *conn = nouveau_connector(connector); 622 nvif_event_block(&conn->irq); 623 nvif_event_block(&conn->hpd); 624 } 625 drm_connector_list_iter_end(&conn_iter); 626 627 if (!runtime) 628 cancel_work_sync(&drm->hpd_work); 629 630 drm_kms_helper_poll_disable(dev); 631 disp->fini(dev, runtime, suspend); 632 } 633 634 static void 635 nouveau_display_create_properties(struct drm_device *dev) 636 { 637 struct nouveau_display *disp = nouveau_display(dev); 638 int gen; 639 640 if (disp->disp.object.oclass < NV50_DISP) 641 gen = 0; 642 else 643 if (disp->disp.object.oclass < GF110_DISP) 644 gen = 1; 645 else 646 gen = 2; 647 648 PROP_ENUM(disp->dithering_mode, gen, "dithering mode", dither_mode); 649 PROP_ENUM(disp->dithering_depth, gen, "dithering depth", dither_depth); 650 PROP_ENUM(disp->underscan_property, gen, "underscan", underscan); 651 652 disp->underscan_hborder_property = 653 drm_property_create_range(dev, 0, "underscan hborder", 0, 128); 654 655 disp->underscan_vborder_property = 656 drm_property_create_range(dev, 0, "underscan vborder", 0, 128); 657 658 if (gen < 1) 659 return; 660 661 /* -90..+90 */ 662 disp->vibrant_hue_property = 663 drm_property_create_range(dev, 0, "vibrant hue", 0, 180); 664 665 /* -100..+100 */ 666 disp->color_vibrance_property = 667 drm_property_create_range(dev, 0, "color vibrance", 0, 200); 668 } 669 670 int 671 nouveau_display_create(struct drm_device *dev) 672 { 673 struct nouveau_drm *drm = nouveau_drm(dev); 674 struct nouveau_display *disp; 675 int ret; 676 677 disp = drm->display = kzalloc(sizeof(*disp), GFP_KERNEL); 678 if (!disp) 679 return -ENOMEM; 680 681 drm_mode_config_init(dev); 682 drm_mode_create_scaling_mode_property(dev); 683 drm_mode_create_dvi_i_properties(dev); 684 685 dev->mode_config.funcs = &nouveau_mode_config_funcs; 686 687 dev->mode_config.min_width = 0; 688 dev->mode_config.min_height = 0; 689 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_CELSIUS) { 690 dev->mode_config.max_width = 2048; 691 dev->mode_config.max_height = 2048; 692 } else 693 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) { 694 dev->mode_config.max_width = 4096; 695 dev->mode_config.max_height = 4096; 696 } else 697 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_FERMI) { 698 dev->mode_config.max_width = 8192; 699 dev->mode_config.max_height = 8192; 700 } else { 701 dev->mode_config.max_width = 16384; 702 dev->mode_config.max_height = 16384; 703 } 704 705 dev->mode_config.preferred_depth = 24; 706 dev->mode_config.prefer_shadow = 1; 707 708 if (drm->client.device.info.chipset < 0x11) 709 dev->mode_config.async_page_flip = false; 710 else 711 dev->mode_config.async_page_flip = true; 712 713 drm_kms_helper_poll_init(dev); 714 drm_kms_helper_poll_disable(dev); 715 716 if (nouveau_modeset != 2 && drm->vbios.dcb.entries) { 717 ret = nvif_disp_ctor(&drm->client.device, "kmsDisp", 0, 718 &disp->disp); 719 if (ret == 0) { 720 nouveau_display_create_properties(dev); 721 if (disp->disp.object.oclass < NV50_DISP) { 722 dev->mode_config.fb_modifiers_not_supported = true; 723 ret = nv04_display_create(dev); 724 } else { 725 ret = nv50_display_create(dev); 726 } 727 } 728 } else { 729 ret = 0; 730 } 731 732 if (ret) 733 goto disp_create_err; 734 735 drm_mode_config_reset(dev); 736 737 if (dev->mode_config.num_crtc) { 738 ret = drm_vblank_init(dev, dev->mode_config.num_crtc); 739 if (ret) 740 goto vblank_err; 741 742 if (disp->disp.object.oclass >= NV50_DISP) 743 nv50_crc_init(dev); 744 } 745 746 INIT_WORK(&drm->hpd_work, nouveau_display_hpd_work); 747 spin_lock_init(&drm->hpd_lock); 748 #ifdef CONFIG_ACPI 749 drm->acpi_nb.notifier_call = nouveau_display_acpi_ntfy; 750 register_acpi_notifier(&drm->acpi_nb); 751 #endif 752 753 return 0; 754 755 vblank_err: 756 disp->dtor(dev); 757 disp_create_err: 758 drm_kms_helper_poll_fini(dev); 759 drm_mode_config_cleanup(dev); 760 return ret; 761 } 762 763 void 764 nouveau_display_destroy(struct drm_device *dev) 765 { 766 struct nouveau_display *disp = nouveau_display(dev); 767 struct nouveau_drm *drm = nouveau_drm(dev); 768 769 #ifdef CONFIG_ACPI 770 unregister_acpi_notifier(&drm->acpi_nb); 771 #endif 772 773 drm_kms_helper_poll_fini(dev); 774 drm_mode_config_cleanup(dev); 775 776 if (disp->dtor) 777 disp->dtor(dev); 778 779 nvif_disp_dtor(&disp->disp); 780 781 drm->display = NULL; 782 kfree(disp); 783 } 784 785 int 786 nouveau_display_suspend(struct drm_device *dev, bool runtime) 787 { 788 struct nouveau_display *disp = nouveau_display(dev); 789 790 /* Disable console. */ 791 drm_fb_helper_set_suspend_unlocked(dev->fb_helper, true); 792 793 if (drm_drv_uses_atomic_modeset(dev)) { 794 if (!runtime) { 795 disp->suspend = drm_atomic_helper_suspend(dev); 796 if (IS_ERR(disp->suspend)) { 797 int ret = PTR_ERR(disp->suspend); 798 disp->suspend = NULL; 799 return ret; 800 } 801 } 802 } 803 804 nouveau_display_fini(dev, true, runtime); 805 return 0; 806 } 807 808 void 809 nouveau_display_resume(struct drm_device *dev, bool runtime) 810 { 811 struct nouveau_display *disp = nouveau_display(dev); 812 813 nouveau_display_init(dev, true, runtime); 814 815 if (drm_drv_uses_atomic_modeset(dev)) { 816 if (disp->suspend) { 817 drm_atomic_helper_resume(dev, disp->suspend); 818 disp->suspend = NULL; 819 } 820 } 821 822 /* Enable console. */ 823 drm_fb_helper_set_suspend_unlocked(dev->fb_helper, false); 824 } 825 826 int 827 nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev, 828 struct drm_mode_create_dumb *args) 829 { 830 struct nouveau_cli *cli = nouveau_cli(file_priv); 831 struct nouveau_bo *bo; 832 uint32_t domain; 833 int ret; 834 835 args->pitch = roundup(args->width * (args->bpp / 8), 256); 836 args->size = args->pitch * args->height; 837 args->size = roundup(args->size, PAGE_SIZE); 838 839 /* Use VRAM if there is any ; otherwise fallback to system memory */ 840 if (nouveau_drm(dev)->client.device.info.ram_size != 0) 841 domain = NOUVEAU_GEM_DOMAIN_VRAM; 842 else 843 domain = NOUVEAU_GEM_DOMAIN_GART; 844 845 ret = nouveau_gem_new(cli, args->size, 0, domain, 0, 0, &bo); 846 if (ret) 847 return ret; 848 849 ret = drm_gem_handle_create(file_priv, &bo->bo.base, &args->handle); 850 drm_gem_object_put(&bo->bo.base); 851 return ret; 852 } 853