1 /* 2 * Copyright (c) 2006-2009 Red Hat Inc. 3 * Copyright (c) 2006-2008 Intel Corporation 4 * Copyright (c) 2007 Dave Airlie <airlied@linux.ie> 5 * 6 * DRM framebuffer helper functions 7 * 8 * Permission to use, copy, modify, distribute, and sell this software and its 9 * documentation for any purpose is hereby granted without fee, provided that 10 * the above copyright notice appear in all copies and that both that copyright 11 * notice and this permission notice appear in supporting documentation, and 12 * that the name of the copyright holders not be used in advertising or 13 * publicity pertaining to distribution of the software without specific, 14 * written prior permission. The copyright holders make no representations 15 * about the suitability of this software for any purpose. It is provided "as 16 * is" without express or implied warranty. 17 * 18 * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, 19 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO 20 * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR 21 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, 22 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 23 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE 24 * OF THIS SOFTWARE. 25 * 26 * Authors: 27 * Dave Airlie <airlied@linux.ie> 28 * Jesse Barnes <jesse.barnes@intel.com> 29 */ 30 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 31 32 #include <linux/console.h> 33 #include <linux/dma-buf.h> 34 #include <linux/kernel.h> 35 #include <linux/module.h> 36 #include <linux/slab.h> 37 #include <linux/sysrq.h> 38 #include <linux/vmalloc.h> 39 40 #include <drm/drm_atomic.h> 41 #include <drm/drm_crtc.h> 42 #include <drm/drm_crtc_helper.h> 43 #include <drm/drm_drv.h> 44 #include <drm/drm_fb_helper.h> 45 #include <drm/drm_fourcc.h> 46 #include <drm/drm_framebuffer.h> 47 #include <drm/drm_print.h> 48 #include <drm/drm_vblank.h> 49 50 #include "drm_crtc_helper_internal.h" 51 #include "drm_internal.h" 52 53 static bool drm_fbdev_emulation = true; 54 module_param_named(fbdev_emulation, drm_fbdev_emulation, bool, 0600); 55 MODULE_PARM_DESC(fbdev_emulation, 56 "Enable legacy fbdev emulation [default=true]"); 57 58 static int drm_fbdev_overalloc = CONFIG_DRM_FBDEV_OVERALLOC; 59 module_param(drm_fbdev_overalloc, int, 0444); 60 MODULE_PARM_DESC(drm_fbdev_overalloc, 61 "Overallocation of the fbdev buffer (%) [default=" 62 __MODULE_STRING(CONFIG_DRM_FBDEV_OVERALLOC) "]"); 63 64 /* 65 * In order to keep user-space compatibility, we want in certain use-cases 66 * to keep leaking the fbdev physical address to the user-space program 67 * handling the fbdev buffer. 68 * This is a bad habit essentially kept into closed source opengl driver 69 * that should really be moved into open-source upstream projects instead 70 * of using legacy physical addresses in user space to communicate with 71 * other out-of-tree kernel modules. 72 * 73 * This module_param *should* be removed as soon as possible and be 74 * considered as a broken and legacy behaviour from a modern fbdev device. 75 */ 76 #if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM) 77 static bool drm_leak_fbdev_smem = false; 78 module_param_unsafe(drm_leak_fbdev_smem, bool, 0600); 79 MODULE_PARM_DESC(drm_leak_fbdev_smem, 80 "Allow unsafe leaking fbdev physical smem address [default=false]"); 81 #endif 82 83 static LIST_HEAD(kernel_fb_helper_list); 84 static DEFINE_MUTEX(kernel_fb_helper_lock); 85 86 /** 87 * DOC: fbdev helpers 88 * 89 * The fb helper functions are useful to provide an fbdev on top of a drm kernel 90 * mode setting driver. They can be used mostly independently from the crtc 91 * helper functions used by many drivers to implement the kernel mode setting 92 * interfaces. 93 * 94 * Drivers that support a dumb buffer with a virtual address and mmap support, 95 * should try out the generic fbdev emulation using drm_fbdev_generic_setup(). 96 * It will automatically set up deferred I/O if the driver requires a shadow 97 * buffer. 98 * 99 * At runtime drivers should restore the fbdev console by using 100 * drm_fb_helper_lastclose() as their &drm_driver.lastclose callback. 101 * They should also notify the fb helper code from updates to the output 102 * configuration by using drm_fb_helper_output_poll_changed() as their 103 * &drm_mode_config_funcs.output_poll_changed callback. 104 * 105 * For suspend/resume consider using drm_mode_config_helper_suspend() and 106 * drm_mode_config_helper_resume() which takes care of fbdev as well. 107 * 108 * All other functions exported by the fb helper library can be used to 109 * implement the fbdev driver interface by the driver. 110 * 111 * It is possible, though perhaps somewhat tricky, to implement race-free 112 * hotplug detection using the fbdev helpers. The drm_fb_helper_prepare() 113 * helper must be called first to initialize the minimum required to make 114 * hotplug detection work. Drivers also need to make sure to properly set up 115 * the &drm_mode_config.funcs member. After calling drm_kms_helper_poll_init() 116 * it is safe to enable interrupts and start processing hotplug events. At the 117 * same time, drivers should initialize all modeset objects such as CRTCs, 118 * encoders and connectors. To finish up the fbdev helper initialization, the 119 * drm_fb_helper_init() function is called. To probe for all attached displays 120 * and set up an initial configuration using the detected hardware, drivers 121 * should call drm_fb_helper_initial_config(). 122 * 123 * If &drm_framebuffer_funcs.dirty is set, the 124 * drm_fb_helper_{cfb,sys}_{write,fillrect,copyarea,imageblit} functions will 125 * accumulate changes and schedule &drm_fb_helper.dirty_work to run right 126 * away. This worker then calls the dirty() function ensuring that it will 127 * always run in process context since the fb_*() function could be running in 128 * atomic context. If drm_fb_helper_deferred_io() is used as the deferred_io 129 * callback it will also schedule dirty_work with the damage collected from the 130 * mmap page writes. 131 * 132 * Deferred I/O is not compatible with SHMEM. Such drivers should request an 133 * fbdev shadow buffer and call drm_fbdev_generic_setup() instead. 134 */ 135 136 static void drm_fb_helper_restore_lut_atomic(struct drm_crtc *crtc) 137 { 138 uint16_t *r_base, *g_base, *b_base; 139 140 if (crtc->funcs->gamma_set == NULL) 141 return; 142 143 r_base = crtc->gamma_store; 144 g_base = r_base + crtc->gamma_size; 145 b_base = g_base + crtc->gamma_size; 146 147 crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, 148 crtc->gamma_size, NULL); 149 } 150 151 /** 152 * drm_fb_helper_debug_enter - implementation for &fb_ops.fb_debug_enter 153 * @info: fbdev registered by the helper 154 */ 155 int drm_fb_helper_debug_enter(struct fb_info *info) 156 { 157 struct drm_fb_helper *helper = info->par; 158 const struct drm_crtc_helper_funcs *funcs; 159 struct drm_mode_set *mode_set; 160 161 list_for_each_entry(helper, &kernel_fb_helper_list, kernel_fb_list) { 162 mutex_lock(&helper->client.modeset_mutex); 163 drm_client_for_each_modeset(mode_set, &helper->client) { 164 if (!mode_set->crtc->enabled) 165 continue; 166 167 funcs = mode_set->crtc->helper_private; 168 if (funcs->mode_set_base_atomic == NULL) 169 continue; 170 171 if (drm_drv_uses_atomic_modeset(mode_set->crtc->dev)) 172 continue; 173 174 funcs->mode_set_base_atomic(mode_set->crtc, 175 mode_set->fb, 176 mode_set->x, 177 mode_set->y, 178 ENTER_ATOMIC_MODE_SET); 179 } 180 mutex_unlock(&helper->client.modeset_mutex); 181 } 182 183 return 0; 184 } 185 EXPORT_SYMBOL(drm_fb_helper_debug_enter); 186 187 /** 188 * drm_fb_helper_debug_leave - implementation for &fb_ops.fb_debug_leave 189 * @info: fbdev registered by the helper 190 */ 191 int drm_fb_helper_debug_leave(struct fb_info *info) 192 { 193 struct drm_fb_helper *helper = info->par; 194 struct drm_client_dev *client = &helper->client; 195 struct drm_device *dev = helper->dev; 196 struct drm_crtc *crtc; 197 const struct drm_crtc_helper_funcs *funcs; 198 struct drm_mode_set *mode_set; 199 struct drm_framebuffer *fb; 200 201 mutex_lock(&client->modeset_mutex); 202 drm_client_for_each_modeset(mode_set, client) { 203 crtc = mode_set->crtc; 204 if (drm_drv_uses_atomic_modeset(crtc->dev)) 205 continue; 206 207 funcs = crtc->helper_private; 208 fb = crtc->primary->fb; 209 210 if (!crtc->enabled) 211 continue; 212 213 if (!fb) { 214 drm_err(dev, "no fb to restore?\n"); 215 continue; 216 } 217 218 if (funcs->mode_set_base_atomic == NULL) 219 continue; 220 221 drm_fb_helper_restore_lut_atomic(mode_set->crtc); 222 funcs->mode_set_base_atomic(mode_set->crtc, fb, crtc->x, 223 crtc->y, LEAVE_ATOMIC_MODE_SET); 224 } 225 mutex_unlock(&client->modeset_mutex); 226 227 return 0; 228 } 229 EXPORT_SYMBOL(drm_fb_helper_debug_leave); 230 231 static int 232 __drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper, 233 bool force) 234 { 235 bool do_delayed; 236 int ret; 237 238 if (!drm_fbdev_emulation || !fb_helper) 239 return -ENODEV; 240 241 if (READ_ONCE(fb_helper->deferred_setup)) 242 return 0; 243 244 mutex_lock(&fb_helper->lock); 245 if (force) { 246 /* 247 * Yes this is the _locked version which expects the master lock 248 * to be held. But for forced restores we're intentionally 249 * racing here, see drm_fb_helper_set_par(). 250 */ 251 ret = drm_client_modeset_commit_locked(&fb_helper->client); 252 } else { 253 ret = drm_client_modeset_commit(&fb_helper->client); 254 } 255 256 do_delayed = fb_helper->delayed_hotplug; 257 if (do_delayed) 258 fb_helper->delayed_hotplug = false; 259 mutex_unlock(&fb_helper->lock); 260 261 if (do_delayed) 262 drm_fb_helper_hotplug_event(fb_helper); 263 264 return ret; 265 } 266 267 /** 268 * drm_fb_helper_restore_fbdev_mode_unlocked - restore fbdev configuration 269 * @fb_helper: driver-allocated fbdev helper, can be NULL 270 * 271 * This should be called from driver's drm &drm_driver.lastclose callback 272 * when implementing an fbcon on top of kms using this helper. This ensures that 273 * the user isn't greeted with a black screen when e.g. X dies. 274 * 275 * RETURNS: 276 * Zero if everything went ok, negative error code otherwise. 277 */ 278 int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper) 279 { 280 return __drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper, false); 281 } 282 EXPORT_SYMBOL(drm_fb_helper_restore_fbdev_mode_unlocked); 283 284 #ifdef CONFIG_MAGIC_SYSRQ 285 /* emergency restore, don't bother with error reporting */ 286 static void drm_fb_helper_restore_work_fn(struct work_struct *ignored) 287 { 288 struct drm_fb_helper *helper; 289 290 mutex_lock(&kernel_fb_helper_lock); 291 list_for_each_entry(helper, &kernel_fb_helper_list, kernel_fb_list) { 292 struct drm_device *dev = helper->dev; 293 294 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 295 continue; 296 297 mutex_lock(&helper->lock); 298 drm_client_modeset_commit_locked(&helper->client); 299 mutex_unlock(&helper->lock); 300 } 301 mutex_unlock(&kernel_fb_helper_lock); 302 } 303 304 static DECLARE_WORK(drm_fb_helper_restore_work, drm_fb_helper_restore_work_fn); 305 306 static void drm_fb_helper_sysrq(int dummy1) 307 { 308 schedule_work(&drm_fb_helper_restore_work); 309 } 310 311 static const struct sysrq_key_op sysrq_drm_fb_helper_restore_op = { 312 .handler = drm_fb_helper_sysrq, 313 .help_msg = "force-fb(v)", 314 .action_msg = "Restore framebuffer console", 315 }; 316 #else 317 static const struct sysrq_key_op sysrq_drm_fb_helper_restore_op = { }; 318 #endif 319 320 static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode) 321 { 322 struct drm_fb_helper *fb_helper = info->par; 323 324 mutex_lock(&fb_helper->lock); 325 drm_client_modeset_dpms(&fb_helper->client, dpms_mode); 326 mutex_unlock(&fb_helper->lock); 327 } 328 329 /** 330 * drm_fb_helper_blank - implementation for &fb_ops.fb_blank 331 * @blank: desired blanking state 332 * @info: fbdev registered by the helper 333 */ 334 int drm_fb_helper_blank(int blank, struct fb_info *info) 335 { 336 if (oops_in_progress) 337 return -EBUSY; 338 339 switch (blank) { 340 /* Display: On; HSync: On, VSync: On */ 341 case FB_BLANK_UNBLANK: 342 drm_fb_helper_dpms(info, DRM_MODE_DPMS_ON); 343 break; 344 /* Display: Off; HSync: On, VSync: On */ 345 case FB_BLANK_NORMAL: 346 drm_fb_helper_dpms(info, DRM_MODE_DPMS_STANDBY); 347 break; 348 /* Display: Off; HSync: Off, VSync: On */ 349 case FB_BLANK_HSYNC_SUSPEND: 350 drm_fb_helper_dpms(info, DRM_MODE_DPMS_STANDBY); 351 break; 352 /* Display: Off; HSync: On, VSync: Off */ 353 case FB_BLANK_VSYNC_SUSPEND: 354 drm_fb_helper_dpms(info, DRM_MODE_DPMS_SUSPEND); 355 break; 356 /* Display: Off; HSync: Off, VSync: Off */ 357 case FB_BLANK_POWERDOWN: 358 drm_fb_helper_dpms(info, DRM_MODE_DPMS_OFF); 359 break; 360 } 361 return 0; 362 } 363 EXPORT_SYMBOL(drm_fb_helper_blank); 364 365 static void drm_fb_helper_resume_worker(struct work_struct *work) 366 { 367 struct drm_fb_helper *helper = container_of(work, struct drm_fb_helper, 368 resume_work); 369 370 console_lock(); 371 fb_set_suspend(helper->fbdev, 0); 372 console_unlock(); 373 } 374 375 static void drm_fb_helper_damage_blit_real(struct drm_fb_helper *fb_helper, 376 struct drm_clip_rect *clip, 377 struct iosys_map *dst) 378 { 379 struct drm_framebuffer *fb = fb_helper->fb; 380 size_t offset = clip->y1 * fb->pitches[0]; 381 size_t len = clip->x2 - clip->x1; 382 unsigned int y; 383 void *src; 384 385 switch (drm_format_info_bpp(fb->format, 0)) { 386 case 1: 387 offset += clip->x1 / 8; 388 len = DIV_ROUND_UP(len + clip->x1 % 8, 8); 389 break; 390 case 2: 391 offset += clip->x1 / 4; 392 len = DIV_ROUND_UP(len + clip->x1 % 4, 4); 393 break; 394 case 4: 395 offset += clip->x1 / 2; 396 len = DIV_ROUND_UP(len + clip->x1 % 2, 2); 397 break; 398 default: 399 offset += clip->x1 * fb->format->cpp[0]; 400 len *= fb->format->cpp[0]; 401 break; 402 } 403 404 src = fb_helper->fbdev->screen_buffer + offset; 405 iosys_map_incr(dst, offset); /* go to first pixel within clip rect */ 406 407 for (y = clip->y1; y < clip->y2; y++) { 408 iosys_map_memcpy_to(dst, 0, src, len); 409 iosys_map_incr(dst, fb->pitches[0]); 410 src += fb->pitches[0]; 411 } 412 } 413 414 static int drm_fb_helper_damage_blit(struct drm_fb_helper *fb_helper, 415 struct drm_clip_rect *clip) 416 { 417 struct drm_client_buffer *buffer = fb_helper->buffer; 418 struct iosys_map map, dst; 419 int ret; 420 421 /* 422 * We have to pin the client buffer to its current location while 423 * flushing the shadow buffer. In the general case, concurrent 424 * modesetting operations could try to move the buffer and would 425 * fail. The modeset has to be serialized by acquiring the reservation 426 * object of the underlying BO here. 427 * 428 * For fbdev emulation, we only have to protect against fbdev modeset 429 * operations. Nothing else will involve the client buffer's BO. So it 430 * is sufficient to acquire struct drm_fb_helper.lock here. 431 */ 432 mutex_lock(&fb_helper->lock); 433 434 ret = drm_client_buffer_vmap(buffer, &map); 435 if (ret) 436 goto out; 437 438 dst = map; 439 drm_fb_helper_damage_blit_real(fb_helper, clip, &dst); 440 441 drm_client_buffer_vunmap(buffer); 442 443 out: 444 mutex_unlock(&fb_helper->lock); 445 446 return ret; 447 } 448 449 static void drm_fb_helper_damage_work(struct work_struct *work) 450 { 451 struct drm_fb_helper *helper = container_of(work, struct drm_fb_helper, 452 damage_work); 453 struct drm_device *dev = helper->dev; 454 struct drm_clip_rect *clip = &helper->damage_clip; 455 struct drm_clip_rect clip_copy; 456 unsigned long flags; 457 int ret; 458 459 spin_lock_irqsave(&helper->damage_lock, flags); 460 clip_copy = *clip; 461 clip->x1 = clip->y1 = ~0; 462 clip->x2 = clip->y2 = 0; 463 spin_unlock_irqrestore(&helper->damage_lock, flags); 464 465 /* Call damage handlers only if necessary */ 466 if (!(clip_copy.x1 < clip_copy.x2 && clip_copy.y1 < clip_copy.y2)) 467 return; 468 469 if (helper->buffer) { 470 ret = drm_fb_helper_damage_blit(helper, &clip_copy); 471 if (drm_WARN_ONCE(dev, ret, "Damage blitter failed: ret=%d\n", ret)) 472 goto err; 473 } 474 475 if (helper->fb->funcs->dirty) { 476 ret = helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, &clip_copy, 1); 477 if (drm_WARN_ONCE(dev, ret, "Dirty helper failed: ret=%d\n", ret)) 478 goto err; 479 } 480 481 return; 482 483 err: 484 /* 485 * Restore damage clip rectangle on errors. The next run 486 * of the damage worker will perform the update. 487 */ 488 spin_lock_irqsave(&helper->damage_lock, flags); 489 clip->x1 = min_t(u32, clip->x1, clip_copy.x1); 490 clip->y1 = min_t(u32, clip->y1, clip_copy.y1); 491 clip->x2 = max_t(u32, clip->x2, clip_copy.x2); 492 clip->y2 = max_t(u32, clip->y2, clip_copy.y2); 493 spin_unlock_irqrestore(&helper->damage_lock, flags); 494 } 495 496 /** 497 * drm_fb_helper_prepare - setup a drm_fb_helper structure 498 * @dev: DRM device 499 * @helper: driver-allocated fbdev helper structure to set up 500 * @funcs: pointer to structure of functions associate with this helper 501 * 502 * Sets up the bare minimum to make the framebuffer helper usable. This is 503 * useful to implement race-free initialization of the polling helpers. 504 */ 505 void drm_fb_helper_prepare(struct drm_device *dev, struct drm_fb_helper *helper, 506 const struct drm_fb_helper_funcs *funcs) 507 { 508 INIT_LIST_HEAD(&helper->kernel_fb_list); 509 spin_lock_init(&helper->damage_lock); 510 INIT_WORK(&helper->resume_work, drm_fb_helper_resume_worker); 511 INIT_WORK(&helper->damage_work, drm_fb_helper_damage_work); 512 helper->damage_clip.x1 = helper->damage_clip.y1 = ~0; 513 mutex_init(&helper->lock); 514 helper->funcs = funcs; 515 helper->dev = dev; 516 } 517 EXPORT_SYMBOL(drm_fb_helper_prepare); 518 519 /** 520 * drm_fb_helper_init - initialize a &struct drm_fb_helper 521 * @dev: drm device 522 * @fb_helper: driver-allocated fbdev helper structure to initialize 523 * 524 * This allocates the structures for the fbdev helper with the given limits. 525 * Note that this won't yet touch the hardware (through the driver interfaces) 526 * nor register the fbdev. This is only done in drm_fb_helper_initial_config() 527 * to allow driver writes more control over the exact init sequence. 528 * 529 * Drivers must call drm_fb_helper_prepare() before calling this function. 530 * 531 * RETURNS: 532 * Zero if everything went ok, nonzero otherwise. 533 */ 534 int drm_fb_helper_init(struct drm_device *dev, 535 struct drm_fb_helper *fb_helper) 536 { 537 int ret; 538 539 if (!drm_fbdev_emulation) { 540 dev->fb_helper = fb_helper; 541 return 0; 542 } 543 544 /* 545 * If this is not the generic fbdev client, initialize a drm_client 546 * without callbacks so we can use the modesets. 547 */ 548 if (!fb_helper->client.funcs) { 549 ret = drm_client_init(dev, &fb_helper->client, "drm_fb_helper", NULL); 550 if (ret) 551 return ret; 552 } 553 554 dev->fb_helper = fb_helper; 555 556 return 0; 557 } 558 EXPORT_SYMBOL(drm_fb_helper_init); 559 560 /** 561 * drm_fb_helper_alloc_fbi - allocate fb_info and some of its members 562 * @fb_helper: driver-allocated fbdev helper 563 * 564 * A helper to alloc fb_info and the members cmap and apertures. Called 565 * by the driver within the fb_probe fb_helper callback function. Drivers do not 566 * need to release the allocated fb_info structure themselves, this is 567 * automatically done when calling drm_fb_helper_fini(). 568 * 569 * RETURNS: 570 * fb_info pointer if things went okay, pointer containing error code 571 * otherwise 572 */ 573 struct fb_info *drm_fb_helper_alloc_fbi(struct drm_fb_helper *fb_helper) 574 { 575 struct device *dev = fb_helper->dev->dev; 576 struct fb_info *info; 577 int ret; 578 579 info = framebuffer_alloc(0, dev); 580 if (!info) 581 return ERR_PTR(-ENOMEM); 582 583 ret = fb_alloc_cmap(&info->cmap, 256, 0); 584 if (ret) 585 goto err_release; 586 587 /* 588 * TODO: We really should be smarter here and alloc an aperture 589 * for each IORESOURCE_MEM resource helper->dev->dev has and also 590 * init the ranges of the appertures based on the resources. 591 * Note some drivers currently count on there being only 1 empty 592 * aperture and fill this themselves, these will need to be dealt 593 * with somehow when fixing this. 594 */ 595 info->apertures = alloc_apertures(1); 596 if (!info->apertures) { 597 ret = -ENOMEM; 598 goto err_free_cmap; 599 } 600 601 fb_helper->fbdev = info; 602 info->skip_vt_switch = true; 603 604 return info; 605 606 err_free_cmap: 607 fb_dealloc_cmap(&info->cmap); 608 err_release: 609 framebuffer_release(info); 610 return ERR_PTR(ret); 611 } 612 EXPORT_SYMBOL(drm_fb_helper_alloc_fbi); 613 614 /** 615 * drm_fb_helper_unregister_fbi - unregister fb_info framebuffer device 616 * @fb_helper: driver-allocated fbdev helper, can be NULL 617 * 618 * A wrapper around unregister_framebuffer, to release the fb_info 619 * framebuffer device. This must be called before releasing all resources for 620 * @fb_helper by calling drm_fb_helper_fini(). 621 */ 622 void drm_fb_helper_unregister_fbi(struct drm_fb_helper *fb_helper) 623 { 624 if (fb_helper && fb_helper->fbdev) 625 unregister_framebuffer(fb_helper->fbdev); 626 } 627 EXPORT_SYMBOL(drm_fb_helper_unregister_fbi); 628 629 /** 630 * drm_fb_helper_fini - finialize a &struct drm_fb_helper 631 * @fb_helper: driver-allocated fbdev helper, can be NULL 632 * 633 * This cleans up all remaining resources associated with @fb_helper. 634 */ 635 void drm_fb_helper_fini(struct drm_fb_helper *fb_helper) 636 { 637 struct fb_info *info; 638 639 if (!fb_helper) 640 return; 641 642 fb_helper->dev->fb_helper = NULL; 643 644 if (!drm_fbdev_emulation) 645 return; 646 647 cancel_work_sync(&fb_helper->resume_work); 648 cancel_work_sync(&fb_helper->damage_work); 649 650 info = fb_helper->fbdev; 651 if (info) { 652 if (info->cmap.len) 653 fb_dealloc_cmap(&info->cmap); 654 framebuffer_release(info); 655 } 656 fb_helper->fbdev = NULL; 657 658 mutex_lock(&kernel_fb_helper_lock); 659 if (!list_empty(&fb_helper->kernel_fb_list)) { 660 list_del(&fb_helper->kernel_fb_list); 661 if (list_empty(&kernel_fb_helper_list)) 662 unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op); 663 } 664 mutex_unlock(&kernel_fb_helper_lock); 665 666 mutex_destroy(&fb_helper->lock); 667 668 if (!fb_helper->client.funcs) 669 drm_client_release(&fb_helper->client); 670 } 671 EXPORT_SYMBOL(drm_fb_helper_fini); 672 673 static bool drm_fbdev_use_shadow_fb(struct drm_fb_helper *fb_helper) 674 { 675 struct drm_device *dev = fb_helper->dev; 676 struct drm_framebuffer *fb = fb_helper->fb; 677 678 return dev->mode_config.prefer_shadow_fbdev || 679 dev->mode_config.prefer_shadow || 680 fb->funcs->dirty; 681 } 682 683 static void drm_fb_helper_damage(struct fb_info *info, u32 x, u32 y, 684 u32 width, u32 height) 685 { 686 struct drm_fb_helper *helper = info->par; 687 struct drm_clip_rect *clip = &helper->damage_clip; 688 unsigned long flags; 689 690 if (!drm_fbdev_use_shadow_fb(helper)) 691 return; 692 693 spin_lock_irqsave(&helper->damage_lock, flags); 694 clip->x1 = min_t(u32, clip->x1, x); 695 clip->y1 = min_t(u32, clip->y1, y); 696 clip->x2 = max_t(u32, clip->x2, x + width); 697 clip->y2 = max_t(u32, clip->y2, y + height); 698 spin_unlock_irqrestore(&helper->damage_lock, flags); 699 700 schedule_work(&helper->damage_work); 701 } 702 703 /* 704 * Convert memory region into area of scanlines and pixels per 705 * scanline. The parameters off and len must not reach beyond 706 * the end of the framebuffer. 707 */ 708 static void drm_fb_helper_memory_range_to_clip(struct fb_info *info, off_t off, size_t len, 709 struct drm_rect *clip) 710 { 711 off_t end = off + len; 712 u32 x1 = 0; 713 u32 y1 = off / info->fix.line_length; 714 u32 x2 = info->var.xres; 715 u32 y2 = DIV_ROUND_UP(end, info->fix.line_length); 716 717 if ((y2 - y1) == 1) { 718 /* 719 * We've only written to a single scanline. Try to reduce 720 * the number of horizontal pixels that need an update. 721 */ 722 off_t bit_off = (off % info->fix.line_length) * 8; 723 off_t bit_end = (end % info->fix.line_length) * 8; 724 725 x1 = bit_off / info->var.bits_per_pixel; 726 x2 = DIV_ROUND_UP(bit_end, info->var.bits_per_pixel); 727 } 728 729 drm_rect_init(clip, x1, y1, x2 - x1, y2 - y1); 730 } 731 732 /** 733 * drm_fb_helper_deferred_io() - fbdev deferred_io callback function 734 * @info: fb_info struct pointer 735 * @pagereflist: list of mmap framebuffer pages that have to be flushed 736 * 737 * This function is used as the &fb_deferred_io.deferred_io 738 * callback function for flushing the fbdev mmap writes. 739 */ 740 void drm_fb_helper_deferred_io(struct fb_info *info, struct list_head *pagereflist) 741 { 742 unsigned long start, end, min_off, max_off; 743 struct fb_deferred_io_pageref *pageref; 744 struct drm_rect damage_area; 745 746 min_off = ULONG_MAX; 747 max_off = 0; 748 list_for_each_entry(pageref, pagereflist, list) { 749 start = pageref->offset; 750 end = start + PAGE_SIZE; 751 min_off = min(min_off, start); 752 max_off = max(max_off, end); 753 } 754 if (min_off >= max_off) 755 return; 756 757 /* 758 * As we can only track pages, we might reach beyond the end 759 * of the screen and account for non-existing scanlines. Hence, 760 * keep the covered memory area within the screen buffer. 761 */ 762 max_off = min(max_off, info->screen_size); 763 764 drm_fb_helper_memory_range_to_clip(info, min_off, max_off - min_off, &damage_area); 765 drm_fb_helper_damage(info, damage_area.x1, damage_area.y1, 766 drm_rect_width(&damage_area), 767 drm_rect_height(&damage_area)); 768 } 769 EXPORT_SYMBOL(drm_fb_helper_deferred_io); 770 771 /** 772 * drm_fb_helper_sys_read - wrapper around fb_sys_read 773 * @info: fb_info struct pointer 774 * @buf: userspace buffer to read from framebuffer memory 775 * @count: number of bytes to read from framebuffer memory 776 * @ppos: read offset within framebuffer memory 777 * 778 * A wrapper around fb_sys_read implemented by fbdev core 779 */ 780 ssize_t drm_fb_helper_sys_read(struct fb_info *info, char __user *buf, 781 size_t count, loff_t *ppos) 782 { 783 return fb_sys_read(info, buf, count, ppos); 784 } 785 EXPORT_SYMBOL(drm_fb_helper_sys_read); 786 787 /** 788 * drm_fb_helper_sys_write - wrapper around fb_sys_write 789 * @info: fb_info struct pointer 790 * @buf: userspace buffer to write to framebuffer memory 791 * @count: number of bytes to write to framebuffer memory 792 * @ppos: write offset within framebuffer memory 793 * 794 * A wrapper around fb_sys_write implemented by fbdev core 795 */ 796 ssize_t drm_fb_helper_sys_write(struct fb_info *info, const char __user *buf, 797 size_t count, loff_t *ppos) 798 { 799 loff_t pos = *ppos; 800 ssize_t ret; 801 struct drm_rect damage_area; 802 803 ret = fb_sys_write(info, buf, count, ppos); 804 if (ret <= 0) 805 return ret; 806 807 drm_fb_helper_memory_range_to_clip(info, pos, ret, &damage_area); 808 drm_fb_helper_damage(info, damage_area.x1, damage_area.y1, 809 drm_rect_width(&damage_area), 810 drm_rect_height(&damage_area)); 811 812 return ret; 813 } 814 EXPORT_SYMBOL(drm_fb_helper_sys_write); 815 816 /** 817 * drm_fb_helper_sys_fillrect - wrapper around sys_fillrect 818 * @info: fbdev registered by the helper 819 * @rect: info about rectangle to fill 820 * 821 * A wrapper around sys_fillrect implemented by fbdev core 822 */ 823 void drm_fb_helper_sys_fillrect(struct fb_info *info, 824 const struct fb_fillrect *rect) 825 { 826 sys_fillrect(info, rect); 827 drm_fb_helper_damage(info, rect->dx, rect->dy, rect->width, rect->height); 828 } 829 EXPORT_SYMBOL(drm_fb_helper_sys_fillrect); 830 831 /** 832 * drm_fb_helper_sys_copyarea - wrapper around sys_copyarea 833 * @info: fbdev registered by the helper 834 * @area: info about area to copy 835 * 836 * A wrapper around sys_copyarea implemented by fbdev core 837 */ 838 void drm_fb_helper_sys_copyarea(struct fb_info *info, 839 const struct fb_copyarea *area) 840 { 841 sys_copyarea(info, area); 842 drm_fb_helper_damage(info, area->dx, area->dy, area->width, area->height); 843 } 844 EXPORT_SYMBOL(drm_fb_helper_sys_copyarea); 845 846 /** 847 * drm_fb_helper_sys_imageblit - wrapper around sys_imageblit 848 * @info: fbdev registered by the helper 849 * @image: info about image to blit 850 * 851 * A wrapper around sys_imageblit implemented by fbdev core 852 */ 853 void drm_fb_helper_sys_imageblit(struct fb_info *info, 854 const struct fb_image *image) 855 { 856 sys_imageblit(info, image); 857 drm_fb_helper_damage(info, image->dx, image->dy, image->width, image->height); 858 } 859 EXPORT_SYMBOL(drm_fb_helper_sys_imageblit); 860 861 /** 862 * drm_fb_helper_cfb_fillrect - wrapper around cfb_fillrect 863 * @info: fbdev registered by the helper 864 * @rect: info about rectangle to fill 865 * 866 * A wrapper around cfb_fillrect implemented by fbdev core 867 */ 868 void drm_fb_helper_cfb_fillrect(struct fb_info *info, 869 const struct fb_fillrect *rect) 870 { 871 cfb_fillrect(info, rect); 872 drm_fb_helper_damage(info, rect->dx, rect->dy, rect->width, rect->height); 873 } 874 EXPORT_SYMBOL(drm_fb_helper_cfb_fillrect); 875 876 /** 877 * drm_fb_helper_cfb_copyarea - wrapper around cfb_copyarea 878 * @info: fbdev registered by the helper 879 * @area: info about area to copy 880 * 881 * A wrapper around cfb_copyarea implemented by fbdev core 882 */ 883 void drm_fb_helper_cfb_copyarea(struct fb_info *info, 884 const struct fb_copyarea *area) 885 { 886 cfb_copyarea(info, area); 887 drm_fb_helper_damage(info, area->dx, area->dy, area->width, area->height); 888 } 889 EXPORT_SYMBOL(drm_fb_helper_cfb_copyarea); 890 891 /** 892 * drm_fb_helper_cfb_imageblit - wrapper around cfb_imageblit 893 * @info: fbdev registered by the helper 894 * @image: info about image to blit 895 * 896 * A wrapper around cfb_imageblit implemented by fbdev core 897 */ 898 void drm_fb_helper_cfb_imageblit(struct fb_info *info, 899 const struct fb_image *image) 900 { 901 cfb_imageblit(info, image); 902 drm_fb_helper_damage(info, image->dx, image->dy, image->width, image->height); 903 } 904 EXPORT_SYMBOL(drm_fb_helper_cfb_imageblit); 905 906 /** 907 * drm_fb_helper_set_suspend - wrapper around fb_set_suspend 908 * @fb_helper: driver-allocated fbdev helper, can be NULL 909 * @suspend: whether to suspend or resume 910 * 911 * A wrapper around fb_set_suspend implemented by fbdev core. 912 * Use drm_fb_helper_set_suspend_unlocked() if you don't need to take 913 * the lock yourself 914 */ 915 void drm_fb_helper_set_suspend(struct drm_fb_helper *fb_helper, bool suspend) 916 { 917 if (fb_helper && fb_helper->fbdev) 918 fb_set_suspend(fb_helper->fbdev, suspend); 919 } 920 EXPORT_SYMBOL(drm_fb_helper_set_suspend); 921 922 /** 923 * drm_fb_helper_set_suspend_unlocked - wrapper around fb_set_suspend that also 924 * takes the console lock 925 * @fb_helper: driver-allocated fbdev helper, can be NULL 926 * @suspend: whether to suspend or resume 927 * 928 * A wrapper around fb_set_suspend() that takes the console lock. If the lock 929 * isn't available on resume, a worker is tasked with waiting for the lock 930 * to become available. The console lock can be pretty contented on resume 931 * due to all the printk activity. 932 * 933 * This function can be called multiple times with the same state since 934 * &fb_info.state is checked to see if fbdev is running or not before locking. 935 * 936 * Use drm_fb_helper_set_suspend() if you need to take the lock yourself. 937 */ 938 void drm_fb_helper_set_suspend_unlocked(struct drm_fb_helper *fb_helper, 939 bool suspend) 940 { 941 if (!fb_helper || !fb_helper->fbdev) 942 return; 943 944 /* make sure there's no pending/ongoing resume */ 945 flush_work(&fb_helper->resume_work); 946 947 if (suspend) { 948 if (fb_helper->fbdev->state != FBINFO_STATE_RUNNING) 949 return; 950 951 console_lock(); 952 953 } else { 954 if (fb_helper->fbdev->state == FBINFO_STATE_RUNNING) 955 return; 956 957 if (!console_trylock()) { 958 schedule_work(&fb_helper->resume_work); 959 return; 960 } 961 } 962 963 fb_set_suspend(fb_helper->fbdev, suspend); 964 console_unlock(); 965 } 966 EXPORT_SYMBOL(drm_fb_helper_set_suspend_unlocked); 967 968 static int setcmap_pseudo_palette(struct fb_cmap *cmap, struct fb_info *info) 969 { 970 u32 *palette = (u32 *)info->pseudo_palette; 971 int i; 972 973 if (cmap->start + cmap->len > 16) 974 return -EINVAL; 975 976 for (i = 0; i < cmap->len; ++i) { 977 u16 red = cmap->red[i]; 978 u16 green = cmap->green[i]; 979 u16 blue = cmap->blue[i]; 980 u32 value; 981 982 red >>= 16 - info->var.red.length; 983 green >>= 16 - info->var.green.length; 984 blue >>= 16 - info->var.blue.length; 985 value = (red << info->var.red.offset) | 986 (green << info->var.green.offset) | 987 (blue << info->var.blue.offset); 988 if (info->var.transp.length > 0) { 989 u32 mask = (1 << info->var.transp.length) - 1; 990 991 mask <<= info->var.transp.offset; 992 value |= mask; 993 } 994 palette[cmap->start + i] = value; 995 } 996 997 return 0; 998 } 999 1000 static int setcmap_legacy(struct fb_cmap *cmap, struct fb_info *info) 1001 { 1002 struct drm_fb_helper *fb_helper = info->par; 1003 struct drm_mode_set *modeset; 1004 struct drm_crtc *crtc; 1005 u16 *r, *g, *b; 1006 int ret = 0; 1007 1008 drm_modeset_lock_all(fb_helper->dev); 1009 drm_client_for_each_modeset(modeset, &fb_helper->client) { 1010 crtc = modeset->crtc; 1011 if (!crtc->funcs->gamma_set || !crtc->gamma_size) { 1012 ret = -EINVAL; 1013 goto out; 1014 } 1015 1016 if (cmap->start + cmap->len > crtc->gamma_size) { 1017 ret = -EINVAL; 1018 goto out; 1019 } 1020 1021 r = crtc->gamma_store; 1022 g = r + crtc->gamma_size; 1023 b = g + crtc->gamma_size; 1024 1025 memcpy(r + cmap->start, cmap->red, cmap->len * sizeof(*r)); 1026 memcpy(g + cmap->start, cmap->green, cmap->len * sizeof(*g)); 1027 memcpy(b + cmap->start, cmap->blue, cmap->len * sizeof(*b)); 1028 1029 ret = crtc->funcs->gamma_set(crtc, r, g, b, 1030 crtc->gamma_size, NULL); 1031 if (ret) 1032 goto out; 1033 } 1034 out: 1035 drm_modeset_unlock_all(fb_helper->dev); 1036 1037 return ret; 1038 } 1039 1040 static struct drm_property_blob *setcmap_new_gamma_lut(struct drm_crtc *crtc, 1041 struct fb_cmap *cmap) 1042 { 1043 struct drm_device *dev = crtc->dev; 1044 struct drm_property_blob *gamma_lut; 1045 struct drm_color_lut *lut; 1046 int size = crtc->gamma_size; 1047 int i; 1048 1049 if (!size || cmap->start + cmap->len > size) 1050 return ERR_PTR(-EINVAL); 1051 1052 gamma_lut = drm_property_create_blob(dev, sizeof(*lut) * size, NULL); 1053 if (IS_ERR(gamma_lut)) 1054 return gamma_lut; 1055 1056 lut = gamma_lut->data; 1057 if (cmap->start || cmap->len != size) { 1058 u16 *r = crtc->gamma_store; 1059 u16 *g = r + crtc->gamma_size; 1060 u16 *b = g + crtc->gamma_size; 1061 1062 for (i = 0; i < cmap->start; i++) { 1063 lut[i].red = r[i]; 1064 lut[i].green = g[i]; 1065 lut[i].blue = b[i]; 1066 } 1067 for (i = cmap->start + cmap->len; i < size; i++) { 1068 lut[i].red = r[i]; 1069 lut[i].green = g[i]; 1070 lut[i].blue = b[i]; 1071 } 1072 } 1073 1074 for (i = 0; i < cmap->len; i++) { 1075 lut[cmap->start + i].red = cmap->red[i]; 1076 lut[cmap->start + i].green = cmap->green[i]; 1077 lut[cmap->start + i].blue = cmap->blue[i]; 1078 } 1079 1080 return gamma_lut; 1081 } 1082 1083 static int setcmap_atomic(struct fb_cmap *cmap, struct fb_info *info) 1084 { 1085 struct drm_fb_helper *fb_helper = info->par; 1086 struct drm_device *dev = fb_helper->dev; 1087 struct drm_property_blob *gamma_lut = NULL; 1088 struct drm_modeset_acquire_ctx ctx; 1089 struct drm_crtc_state *crtc_state; 1090 struct drm_atomic_state *state; 1091 struct drm_mode_set *modeset; 1092 struct drm_crtc *crtc; 1093 u16 *r, *g, *b; 1094 bool replaced; 1095 int ret = 0; 1096 1097 drm_modeset_acquire_init(&ctx, 0); 1098 1099 state = drm_atomic_state_alloc(dev); 1100 if (!state) { 1101 ret = -ENOMEM; 1102 goto out_ctx; 1103 } 1104 1105 state->acquire_ctx = &ctx; 1106 retry: 1107 drm_client_for_each_modeset(modeset, &fb_helper->client) { 1108 crtc = modeset->crtc; 1109 1110 if (!gamma_lut) 1111 gamma_lut = setcmap_new_gamma_lut(crtc, cmap); 1112 if (IS_ERR(gamma_lut)) { 1113 ret = PTR_ERR(gamma_lut); 1114 gamma_lut = NULL; 1115 goto out_state; 1116 } 1117 1118 crtc_state = drm_atomic_get_crtc_state(state, crtc); 1119 if (IS_ERR(crtc_state)) { 1120 ret = PTR_ERR(crtc_state); 1121 goto out_state; 1122 } 1123 1124 /* 1125 * FIXME: This always uses gamma_lut. Some HW have only 1126 * degamma_lut, in which case we should reset gamma_lut and set 1127 * degamma_lut. See drm_crtc_legacy_gamma_set(). 1128 */ 1129 replaced = drm_property_replace_blob(&crtc_state->degamma_lut, 1130 NULL); 1131 replaced |= drm_property_replace_blob(&crtc_state->ctm, NULL); 1132 replaced |= drm_property_replace_blob(&crtc_state->gamma_lut, 1133 gamma_lut); 1134 crtc_state->color_mgmt_changed |= replaced; 1135 } 1136 1137 ret = drm_atomic_commit(state); 1138 if (ret) 1139 goto out_state; 1140 1141 drm_client_for_each_modeset(modeset, &fb_helper->client) { 1142 crtc = modeset->crtc; 1143 1144 r = crtc->gamma_store; 1145 g = r + crtc->gamma_size; 1146 b = g + crtc->gamma_size; 1147 1148 memcpy(r + cmap->start, cmap->red, cmap->len * sizeof(*r)); 1149 memcpy(g + cmap->start, cmap->green, cmap->len * sizeof(*g)); 1150 memcpy(b + cmap->start, cmap->blue, cmap->len * sizeof(*b)); 1151 } 1152 1153 out_state: 1154 if (ret == -EDEADLK) 1155 goto backoff; 1156 1157 drm_property_blob_put(gamma_lut); 1158 drm_atomic_state_put(state); 1159 out_ctx: 1160 drm_modeset_drop_locks(&ctx); 1161 drm_modeset_acquire_fini(&ctx); 1162 1163 return ret; 1164 1165 backoff: 1166 drm_atomic_state_clear(state); 1167 drm_modeset_backoff(&ctx); 1168 goto retry; 1169 } 1170 1171 /** 1172 * drm_fb_helper_setcmap - implementation for &fb_ops.fb_setcmap 1173 * @cmap: cmap to set 1174 * @info: fbdev registered by the helper 1175 */ 1176 int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info) 1177 { 1178 struct drm_fb_helper *fb_helper = info->par; 1179 struct drm_device *dev = fb_helper->dev; 1180 int ret; 1181 1182 if (oops_in_progress) 1183 return -EBUSY; 1184 1185 mutex_lock(&fb_helper->lock); 1186 1187 if (!drm_master_internal_acquire(dev)) { 1188 ret = -EBUSY; 1189 goto unlock; 1190 } 1191 1192 mutex_lock(&fb_helper->client.modeset_mutex); 1193 if (info->fix.visual == FB_VISUAL_TRUECOLOR) 1194 ret = setcmap_pseudo_palette(cmap, info); 1195 else if (drm_drv_uses_atomic_modeset(fb_helper->dev)) 1196 ret = setcmap_atomic(cmap, info); 1197 else 1198 ret = setcmap_legacy(cmap, info); 1199 mutex_unlock(&fb_helper->client.modeset_mutex); 1200 1201 drm_master_internal_release(dev); 1202 unlock: 1203 mutex_unlock(&fb_helper->lock); 1204 1205 return ret; 1206 } 1207 EXPORT_SYMBOL(drm_fb_helper_setcmap); 1208 1209 /** 1210 * drm_fb_helper_ioctl - legacy ioctl implementation 1211 * @info: fbdev registered by the helper 1212 * @cmd: ioctl command 1213 * @arg: ioctl argument 1214 * 1215 * A helper to implement the standard fbdev ioctl. Only 1216 * FBIO_WAITFORVSYNC is implemented for now. 1217 */ 1218 int drm_fb_helper_ioctl(struct fb_info *info, unsigned int cmd, 1219 unsigned long arg) 1220 { 1221 struct drm_fb_helper *fb_helper = info->par; 1222 struct drm_device *dev = fb_helper->dev; 1223 struct drm_crtc *crtc; 1224 int ret = 0; 1225 1226 mutex_lock(&fb_helper->lock); 1227 if (!drm_master_internal_acquire(dev)) { 1228 ret = -EBUSY; 1229 goto unlock; 1230 } 1231 1232 switch (cmd) { 1233 case FBIO_WAITFORVSYNC: 1234 /* 1235 * Only consider the first CRTC. 1236 * 1237 * This ioctl is supposed to take the CRTC number as 1238 * an argument, but in fbdev times, what that number 1239 * was supposed to be was quite unclear, different 1240 * drivers were passing that argument differently 1241 * (some by reference, some by value), and most of the 1242 * userspace applications were just hardcoding 0 as an 1243 * argument. 1244 * 1245 * The first CRTC should be the integrated panel on 1246 * most drivers, so this is the best choice we can 1247 * make. If we're not smart enough here, one should 1248 * just consider switch the userspace to KMS. 1249 */ 1250 crtc = fb_helper->client.modesets[0].crtc; 1251 1252 /* 1253 * Only wait for a vblank event if the CRTC is 1254 * enabled, otherwise just don't do anythintg, 1255 * not even report an error. 1256 */ 1257 ret = drm_crtc_vblank_get(crtc); 1258 if (!ret) { 1259 drm_crtc_wait_one_vblank(crtc); 1260 drm_crtc_vblank_put(crtc); 1261 } 1262 1263 ret = 0; 1264 break; 1265 default: 1266 ret = -ENOTTY; 1267 } 1268 1269 drm_master_internal_release(dev); 1270 unlock: 1271 mutex_unlock(&fb_helper->lock); 1272 return ret; 1273 } 1274 EXPORT_SYMBOL(drm_fb_helper_ioctl); 1275 1276 static bool drm_fb_pixel_format_equal(const struct fb_var_screeninfo *var_1, 1277 const struct fb_var_screeninfo *var_2) 1278 { 1279 return var_1->bits_per_pixel == var_2->bits_per_pixel && 1280 var_1->grayscale == var_2->grayscale && 1281 var_1->red.offset == var_2->red.offset && 1282 var_1->red.length == var_2->red.length && 1283 var_1->red.msb_right == var_2->red.msb_right && 1284 var_1->green.offset == var_2->green.offset && 1285 var_1->green.length == var_2->green.length && 1286 var_1->green.msb_right == var_2->green.msb_right && 1287 var_1->blue.offset == var_2->blue.offset && 1288 var_1->blue.length == var_2->blue.length && 1289 var_1->blue.msb_right == var_2->blue.msb_right && 1290 var_1->transp.offset == var_2->transp.offset && 1291 var_1->transp.length == var_2->transp.length && 1292 var_1->transp.msb_right == var_2->transp.msb_right; 1293 } 1294 1295 static void drm_fb_helper_fill_pixel_fmt(struct fb_var_screeninfo *var, 1296 const struct drm_format_info *format) 1297 { 1298 u8 depth = format->depth; 1299 1300 if (format->is_color_indexed) { 1301 var->red.offset = 0; 1302 var->green.offset = 0; 1303 var->blue.offset = 0; 1304 var->red.length = depth; 1305 var->green.length = depth; 1306 var->blue.length = depth; 1307 var->transp.offset = 0; 1308 var->transp.length = 0; 1309 return; 1310 } 1311 1312 switch (depth) { 1313 case 15: 1314 var->red.offset = 10; 1315 var->green.offset = 5; 1316 var->blue.offset = 0; 1317 var->red.length = 5; 1318 var->green.length = 5; 1319 var->blue.length = 5; 1320 var->transp.offset = 15; 1321 var->transp.length = 1; 1322 break; 1323 case 16: 1324 var->red.offset = 11; 1325 var->green.offset = 5; 1326 var->blue.offset = 0; 1327 var->red.length = 5; 1328 var->green.length = 6; 1329 var->blue.length = 5; 1330 var->transp.offset = 0; 1331 break; 1332 case 24: 1333 var->red.offset = 16; 1334 var->green.offset = 8; 1335 var->blue.offset = 0; 1336 var->red.length = 8; 1337 var->green.length = 8; 1338 var->blue.length = 8; 1339 var->transp.offset = 0; 1340 var->transp.length = 0; 1341 break; 1342 case 32: 1343 var->red.offset = 16; 1344 var->green.offset = 8; 1345 var->blue.offset = 0; 1346 var->red.length = 8; 1347 var->green.length = 8; 1348 var->blue.length = 8; 1349 var->transp.offset = 24; 1350 var->transp.length = 8; 1351 break; 1352 default: 1353 break; 1354 } 1355 } 1356 1357 /** 1358 * drm_fb_helper_check_var - implementation for &fb_ops.fb_check_var 1359 * @var: screeninfo to check 1360 * @info: fbdev registered by the helper 1361 */ 1362 int drm_fb_helper_check_var(struct fb_var_screeninfo *var, 1363 struct fb_info *info) 1364 { 1365 struct drm_fb_helper *fb_helper = info->par; 1366 struct drm_framebuffer *fb = fb_helper->fb; 1367 const struct drm_format_info *format = fb->format; 1368 struct drm_device *dev = fb_helper->dev; 1369 unsigned int bpp; 1370 1371 if (in_dbg_master()) 1372 return -EINVAL; 1373 1374 if (var->pixclock != 0) { 1375 drm_dbg_kms(dev, "fbdev emulation doesn't support changing the pixel clock, value of pixclock is ignored\n"); 1376 var->pixclock = 0; 1377 } 1378 1379 switch (format->format) { 1380 case DRM_FORMAT_C1: 1381 case DRM_FORMAT_C2: 1382 case DRM_FORMAT_C4: 1383 /* supported format with sub-byte pixels */ 1384 break; 1385 1386 default: 1387 if ((drm_format_info_block_width(format, 0) > 1) || 1388 (drm_format_info_block_height(format, 0) > 1)) 1389 return -EINVAL; 1390 break; 1391 } 1392 1393 /* 1394 * Changes struct fb_var_screeninfo are currently not pushed back 1395 * to KMS, hence fail if different settings are requested. 1396 */ 1397 bpp = drm_format_info_bpp(format, 0); 1398 if (var->bits_per_pixel > bpp || 1399 var->xres > fb->width || var->yres > fb->height || 1400 var->xres_virtual > fb->width || var->yres_virtual > fb->height) { 1401 drm_dbg_kms(dev, "fb requested width/height/bpp can't fit in current fb " 1402 "request %dx%d-%d (virtual %dx%d) > %dx%d-%d\n", 1403 var->xres, var->yres, var->bits_per_pixel, 1404 var->xres_virtual, var->yres_virtual, 1405 fb->width, fb->height, bpp); 1406 return -EINVAL; 1407 } 1408 1409 /* 1410 * Workaround for SDL 1.2, which is known to be setting all pixel format 1411 * fields values to zero in some cases. We treat this situation as a 1412 * kind of "use some reasonable autodetected values". 1413 */ 1414 if (!var->red.offset && !var->green.offset && 1415 !var->blue.offset && !var->transp.offset && 1416 !var->red.length && !var->green.length && 1417 !var->blue.length && !var->transp.length && 1418 !var->red.msb_right && !var->green.msb_right && 1419 !var->blue.msb_right && !var->transp.msb_right) { 1420 drm_fb_helper_fill_pixel_fmt(var, format); 1421 } 1422 1423 /* 1424 * Likewise, bits_per_pixel should be rounded up to a supported value. 1425 */ 1426 var->bits_per_pixel = bpp; 1427 1428 /* 1429 * drm fbdev emulation doesn't support changing the pixel format at all, 1430 * so reject all pixel format changing requests. 1431 */ 1432 if (!drm_fb_pixel_format_equal(var, &info->var)) { 1433 drm_dbg_kms(dev, "fbdev emulation doesn't support changing the pixel format\n"); 1434 return -EINVAL; 1435 } 1436 1437 return 0; 1438 } 1439 EXPORT_SYMBOL(drm_fb_helper_check_var); 1440 1441 /** 1442 * drm_fb_helper_set_par - implementation for &fb_ops.fb_set_par 1443 * @info: fbdev registered by the helper 1444 * 1445 * This will let fbcon do the mode init and is called at initialization time by 1446 * the fbdev core when registering the driver, and later on through the hotplug 1447 * callback. 1448 */ 1449 int drm_fb_helper_set_par(struct fb_info *info) 1450 { 1451 struct drm_fb_helper *fb_helper = info->par; 1452 struct fb_var_screeninfo *var = &info->var; 1453 bool force; 1454 1455 if (oops_in_progress) 1456 return -EBUSY; 1457 1458 if (var->pixclock != 0) { 1459 drm_err(fb_helper->dev, "PIXEL CLOCK SET\n"); 1460 return -EINVAL; 1461 } 1462 1463 /* 1464 * Normally we want to make sure that a kms master takes precedence over 1465 * fbdev, to avoid fbdev flickering and occasionally stealing the 1466 * display status. But Xorg first sets the vt back to text mode using 1467 * the KDSET IOCTL with KD_TEXT, and only after that drops the master 1468 * status when exiting. 1469 * 1470 * In the past this was caught by drm_fb_helper_lastclose(), but on 1471 * modern systems where logind always keeps a drm fd open to orchestrate 1472 * the vt switching, this doesn't work. 1473 * 1474 * To not break the userspace ABI we have this special case here, which 1475 * is only used for the above case. Everything else uses the normal 1476 * commit function, which ensures that we never steal the display from 1477 * an active drm master. 1478 */ 1479 force = var->activate & FB_ACTIVATE_KD_TEXT; 1480 1481 __drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper, force); 1482 1483 return 0; 1484 } 1485 EXPORT_SYMBOL(drm_fb_helper_set_par); 1486 1487 static void pan_set(struct drm_fb_helper *fb_helper, int x, int y) 1488 { 1489 struct drm_mode_set *mode_set; 1490 1491 mutex_lock(&fb_helper->client.modeset_mutex); 1492 drm_client_for_each_modeset(mode_set, &fb_helper->client) { 1493 mode_set->x = x; 1494 mode_set->y = y; 1495 } 1496 mutex_unlock(&fb_helper->client.modeset_mutex); 1497 } 1498 1499 static int pan_display_atomic(struct fb_var_screeninfo *var, 1500 struct fb_info *info) 1501 { 1502 struct drm_fb_helper *fb_helper = info->par; 1503 int ret; 1504 1505 pan_set(fb_helper, var->xoffset, var->yoffset); 1506 1507 ret = drm_client_modeset_commit_locked(&fb_helper->client); 1508 if (!ret) { 1509 info->var.xoffset = var->xoffset; 1510 info->var.yoffset = var->yoffset; 1511 } else 1512 pan_set(fb_helper, info->var.xoffset, info->var.yoffset); 1513 1514 return ret; 1515 } 1516 1517 static int pan_display_legacy(struct fb_var_screeninfo *var, 1518 struct fb_info *info) 1519 { 1520 struct drm_fb_helper *fb_helper = info->par; 1521 struct drm_client_dev *client = &fb_helper->client; 1522 struct drm_mode_set *modeset; 1523 int ret = 0; 1524 1525 mutex_lock(&client->modeset_mutex); 1526 drm_modeset_lock_all(fb_helper->dev); 1527 drm_client_for_each_modeset(modeset, client) { 1528 modeset->x = var->xoffset; 1529 modeset->y = var->yoffset; 1530 1531 if (modeset->num_connectors) { 1532 ret = drm_mode_set_config_internal(modeset); 1533 if (!ret) { 1534 info->var.xoffset = var->xoffset; 1535 info->var.yoffset = var->yoffset; 1536 } 1537 } 1538 } 1539 drm_modeset_unlock_all(fb_helper->dev); 1540 mutex_unlock(&client->modeset_mutex); 1541 1542 return ret; 1543 } 1544 1545 /** 1546 * drm_fb_helper_pan_display - implementation for &fb_ops.fb_pan_display 1547 * @var: updated screen information 1548 * @info: fbdev registered by the helper 1549 */ 1550 int drm_fb_helper_pan_display(struct fb_var_screeninfo *var, 1551 struct fb_info *info) 1552 { 1553 struct drm_fb_helper *fb_helper = info->par; 1554 struct drm_device *dev = fb_helper->dev; 1555 int ret; 1556 1557 if (oops_in_progress) 1558 return -EBUSY; 1559 1560 mutex_lock(&fb_helper->lock); 1561 if (!drm_master_internal_acquire(dev)) { 1562 ret = -EBUSY; 1563 goto unlock; 1564 } 1565 1566 if (drm_drv_uses_atomic_modeset(dev)) 1567 ret = pan_display_atomic(var, info); 1568 else 1569 ret = pan_display_legacy(var, info); 1570 1571 drm_master_internal_release(dev); 1572 unlock: 1573 mutex_unlock(&fb_helper->lock); 1574 1575 return ret; 1576 } 1577 EXPORT_SYMBOL(drm_fb_helper_pan_display); 1578 1579 /* 1580 * Allocates the backing storage and sets up the fbdev info structure through 1581 * the ->fb_probe callback. 1582 */ 1583 static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper, 1584 int preferred_bpp) 1585 { 1586 struct drm_client_dev *client = &fb_helper->client; 1587 struct drm_device *dev = fb_helper->dev; 1588 struct drm_mode_config *config = &dev->mode_config; 1589 int ret = 0; 1590 int crtc_count = 0; 1591 struct drm_connector_list_iter conn_iter; 1592 struct drm_fb_helper_surface_size sizes; 1593 struct drm_connector *connector; 1594 struct drm_mode_set *mode_set; 1595 int best_depth = 0; 1596 1597 memset(&sizes, 0, sizeof(struct drm_fb_helper_surface_size)); 1598 sizes.surface_depth = 24; 1599 sizes.surface_bpp = 32; 1600 sizes.fb_width = (u32)-1; 1601 sizes.fb_height = (u32)-1; 1602 1603 /* 1604 * If driver picks 8 or 16 by default use that for both depth/bpp 1605 * to begin with 1606 */ 1607 if (preferred_bpp != sizes.surface_bpp) 1608 sizes.surface_depth = sizes.surface_bpp = preferred_bpp; 1609 1610 drm_connector_list_iter_begin(fb_helper->dev, &conn_iter); 1611 drm_client_for_each_connector_iter(connector, &conn_iter) { 1612 struct drm_cmdline_mode *cmdline_mode; 1613 1614 cmdline_mode = &connector->cmdline_mode; 1615 1616 if (cmdline_mode->bpp_specified) { 1617 switch (cmdline_mode->bpp) { 1618 case 8: 1619 sizes.surface_depth = sizes.surface_bpp = 8; 1620 break; 1621 case 15: 1622 sizes.surface_depth = 15; 1623 sizes.surface_bpp = 16; 1624 break; 1625 case 16: 1626 sizes.surface_depth = sizes.surface_bpp = 16; 1627 break; 1628 case 24: 1629 sizes.surface_depth = sizes.surface_bpp = 24; 1630 break; 1631 case 32: 1632 sizes.surface_depth = 24; 1633 sizes.surface_bpp = 32; 1634 break; 1635 } 1636 break; 1637 } 1638 } 1639 drm_connector_list_iter_end(&conn_iter); 1640 1641 /* 1642 * If we run into a situation where, for example, the primary plane 1643 * supports RGBA5551 (16 bpp, depth 15) but not RGB565 (16 bpp, depth 1644 * 16) we need to scale down the depth of the sizes we request. 1645 */ 1646 mutex_lock(&client->modeset_mutex); 1647 drm_client_for_each_modeset(mode_set, client) { 1648 struct drm_crtc *crtc = mode_set->crtc; 1649 struct drm_plane *plane = crtc->primary; 1650 int j; 1651 1652 drm_dbg_kms(dev, "test CRTC %u primary plane\n", drm_crtc_index(crtc)); 1653 1654 for (j = 0; j < plane->format_count; j++) { 1655 const struct drm_format_info *fmt; 1656 1657 fmt = drm_format_info(plane->format_types[j]); 1658 1659 /* 1660 * Do not consider YUV or other complicated formats 1661 * for framebuffers. This means only legacy formats 1662 * are supported (fmt->depth is a legacy field) but 1663 * the framebuffer emulation can only deal with such 1664 * formats, specifically RGB/BGA formats. 1665 */ 1666 if (fmt->depth == 0) 1667 continue; 1668 1669 /* We found a perfect fit, great */ 1670 if (fmt->depth == sizes.surface_depth) { 1671 best_depth = fmt->depth; 1672 break; 1673 } 1674 1675 /* Skip depths above what we're looking for */ 1676 if (fmt->depth > sizes.surface_depth) 1677 continue; 1678 1679 /* Best depth found so far */ 1680 if (fmt->depth > best_depth) 1681 best_depth = fmt->depth; 1682 } 1683 } 1684 if (sizes.surface_depth != best_depth && best_depth) { 1685 drm_info(dev, "requested bpp %d, scaled depth down to %d", 1686 sizes.surface_bpp, best_depth); 1687 sizes.surface_depth = best_depth; 1688 } 1689 1690 /* first up get a count of crtcs now in use and new min/maxes width/heights */ 1691 crtc_count = 0; 1692 drm_client_for_each_modeset(mode_set, client) { 1693 struct drm_display_mode *desired_mode; 1694 int x, y, j; 1695 /* in case of tile group, are we the last tile vert or horiz? 1696 * If no tile group you are always the last one both vertically 1697 * and horizontally 1698 */ 1699 bool lastv = true, lasth = true; 1700 1701 desired_mode = mode_set->mode; 1702 1703 if (!desired_mode) 1704 continue; 1705 1706 crtc_count++; 1707 1708 x = mode_set->x; 1709 y = mode_set->y; 1710 1711 sizes.surface_width = max_t(u32, desired_mode->hdisplay + x, sizes.surface_width); 1712 sizes.surface_height = max_t(u32, desired_mode->vdisplay + y, sizes.surface_height); 1713 1714 for (j = 0; j < mode_set->num_connectors; j++) { 1715 struct drm_connector *connector = mode_set->connectors[j]; 1716 1717 if (connector->has_tile && 1718 desired_mode->hdisplay == connector->tile_h_size && 1719 desired_mode->vdisplay == connector->tile_v_size) { 1720 lasth = (connector->tile_h_loc == (connector->num_h_tile - 1)); 1721 lastv = (connector->tile_v_loc == (connector->num_v_tile - 1)); 1722 /* cloning to multiple tiles is just crazy-talk, so: */ 1723 break; 1724 } 1725 } 1726 1727 if (lasth) 1728 sizes.fb_width = min_t(u32, desired_mode->hdisplay + x, sizes.fb_width); 1729 if (lastv) 1730 sizes.fb_height = min_t(u32, desired_mode->vdisplay + y, sizes.fb_height); 1731 } 1732 mutex_unlock(&client->modeset_mutex); 1733 1734 if (crtc_count == 0 || sizes.fb_width == -1 || sizes.fb_height == -1) { 1735 drm_info(dev, "Cannot find any crtc or sizes\n"); 1736 1737 /* First time: disable all crtc's.. */ 1738 if (!fb_helper->deferred_setup) 1739 drm_client_modeset_commit(client); 1740 return -EAGAIN; 1741 } 1742 1743 /* Handle our overallocation */ 1744 sizes.surface_height *= drm_fbdev_overalloc; 1745 sizes.surface_height /= 100; 1746 if (sizes.surface_height > config->max_height) { 1747 drm_dbg_kms(dev, "Fbdev over-allocation too large; clamping height to %d\n", 1748 config->max_height); 1749 sizes.surface_height = config->max_height; 1750 } 1751 1752 /* push down into drivers */ 1753 ret = (*fb_helper->funcs->fb_probe)(fb_helper, &sizes); 1754 if (ret < 0) 1755 return ret; 1756 1757 strcpy(fb_helper->fb->comm, "[fbcon]"); 1758 return 0; 1759 } 1760 1761 static void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch, 1762 bool is_color_indexed) 1763 { 1764 info->fix.type = FB_TYPE_PACKED_PIXELS; 1765 info->fix.visual = is_color_indexed ? FB_VISUAL_PSEUDOCOLOR 1766 : FB_VISUAL_TRUECOLOR; 1767 info->fix.mmio_start = 0; 1768 info->fix.mmio_len = 0; 1769 info->fix.type_aux = 0; 1770 info->fix.xpanstep = 1; /* doing it in hw */ 1771 info->fix.ypanstep = 1; /* doing it in hw */ 1772 info->fix.ywrapstep = 0; 1773 info->fix.accel = FB_ACCEL_NONE; 1774 1775 info->fix.line_length = pitch; 1776 } 1777 1778 static void drm_fb_helper_fill_var(struct fb_info *info, 1779 struct drm_fb_helper *fb_helper, 1780 uint32_t fb_width, uint32_t fb_height) 1781 { 1782 struct drm_framebuffer *fb = fb_helper->fb; 1783 const struct drm_format_info *format = fb->format; 1784 1785 switch (format->format) { 1786 case DRM_FORMAT_C1: 1787 case DRM_FORMAT_C2: 1788 case DRM_FORMAT_C4: 1789 /* supported format with sub-byte pixels */ 1790 break; 1791 1792 default: 1793 WARN_ON((drm_format_info_block_width(format, 0) > 1) || 1794 (drm_format_info_block_height(format, 0) > 1)); 1795 break; 1796 } 1797 1798 info->pseudo_palette = fb_helper->pseudo_palette; 1799 info->var.xres_virtual = fb->width; 1800 info->var.yres_virtual = fb->height; 1801 info->var.bits_per_pixel = drm_format_info_bpp(format, 0); 1802 info->var.accel_flags = FB_ACCELF_TEXT; 1803 info->var.xoffset = 0; 1804 info->var.yoffset = 0; 1805 info->var.activate = FB_ACTIVATE_NOW; 1806 1807 drm_fb_helper_fill_pixel_fmt(&info->var, format); 1808 1809 info->var.xres = fb_width; 1810 info->var.yres = fb_height; 1811 } 1812 1813 /** 1814 * drm_fb_helper_fill_info - initializes fbdev information 1815 * @info: fbdev instance to set up 1816 * @fb_helper: fb helper instance to use as template 1817 * @sizes: describes fbdev size and scanout surface size 1818 * 1819 * Sets up the variable and fixed fbdev metainformation from the given fb helper 1820 * instance and the drm framebuffer allocated in &drm_fb_helper.fb. 1821 * 1822 * Drivers should call this (or their equivalent setup code) from their 1823 * &drm_fb_helper_funcs.fb_probe callback after having allocated the fbdev 1824 * backing storage framebuffer. 1825 */ 1826 void drm_fb_helper_fill_info(struct fb_info *info, 1827 struct drm_fb_helper *fb_helper, 1828 struct drm_fb_helper_surface_size *sizes) 1829 { 1830 struct drm_framebuffer *fb = fb_helper->fb; 1831 1832 drm_fb_helper_fill_fix(info, fb->pitches[0], 1833 fb->format->is_color_indexed); 1834 drm_fb_helper_fill_var(info, fb_helper, 1835 sizes->fb_width, sizes->fb_height); 1836 1837 info->par = fb_helper; 1838 /* 1839 * The DRM drivers fbdev emulation device name can be confusing if the 1840 * driver name also has a "drm" suffix on it. Leading to names such as 1841 * "simpledrmdrmfb" in /proc/fb. Unfortunately, it's an uAPI and can't 1842 * be changed due user-space tools (e.g: pm-utils) matching against it. 1843 */ 1844 snprintf(info->fix.id, sizeof(info->fix.id), "%sdrmfb", 1845 fb_helper->dev->driver->name); 1846 1847 } 1848 EXPORT_SYMBOL(drm_fb_helper_fill_info); 1849 1850 /* 1851 * This is a continuation of drm_setup_crtcs() that sets up anything related 1852 * to the framebuffer. During initialization, drm_setup_crtcs() is called before 1853 * the framebuffer has been allocated (fb_helper->fb and fb_helper->fbdev). 1854 * So, any setup that touches those fields needs to be done here instead of in 1855 * drm_setup_crtcs(). 1856 */ 1857 static void drm_setup_crtcs_fb(struct drm_fb_helper *fb_helper) 1858 { 1859 struct drm_client_dev *client = &fb_helper->client; 1860 struct drm_connector_list_iter conn_iter; 1861 struct fb_info *info = fb_helper->fbdev; 1862 unsigned int rotation, sw_rotations = 0; 1863 struct drm_connector *connector; 1864 struct drm_mode_set *modeset; 1865 1866 mutex_lock(&client->modeset_mutex); 1867 drm_client_for_each_modeset(modeset, client) { 1868 if (!modeset->num_connectors) 1869 continue; 1870 1871 modeset->fb = fb_helper->fb; 1872 1873 if (drm_client_rotation(modeset, &rotation)) 1874 /* Rotating in hardware, fbcon should not rotate */ 1875 sw_rotations |= DRM_MODE_ROTATE_0; 1876 else 1877 sw_rotations |= rotation; 1878 } 1879 mutex_unlock(&client->modeset_mutex); 1880 1881 drm_connector_list_iter_begin(fb_helper->dev, &conn_iter); 1882 drm_client_for_each_connector_iter(connector, &conn_iter) { 1883 1884 /* use first connected connector for the physical dimensions */ 1885 if (connector->status == connector_status_connected) { 1886 info->var.width = connector->display_info.width_mm; 1887 info->var.height = connector->display_info.height_mm; 1888 break; 1889 } 1890 } 1891 drm_connector_list_iter_end(&conn_iter); 1892 1893 switch (sw_rotations) { 1894 case DRM_MODE_ROTATE_0: 1895 info->fbcon_rotate_hint = FB_ROTATE_UR; 1896 break; 1897 case DRM_MODE_ROTATE_90: 1898 info->fbcon_rotate_hint = FB_ROTATE_CCW; 1899 break; 1900 case DRM_MODE_ROTATE_180: 1901 info->fbcon_rotate_hint = FB_ROTATE_UD; 1902 break; 1903 case DRM_MODE_ROTATE_270: 1904 info->fbcon_rotate_hint = FB_ROTATE_CW; 1905 break; 1906 default: 1907 /* 1908 * Multiple bits are set / multiple rotations requested 1909 * fbcon cannot handle separate rotation settings per 1910 * output, so fallback to unrotated. 1911 */ 1912 info->fbcon_rotate_hint = FB_ROTATE_UR; 1913 } 1914 } 1915 1916 /* Note: Drops fb_helper->lock before returning. */ 1917 static int 1918 __drm_fb_helper_initial_config_and_unlock(struct drm_fb_helper *fb_helper, 1919 int bpp_sel) 1920 { 1921 struct drm_device *dev = fb_helper->dev; 1922 struct fb_info *info; 1923 unsigned int width, height; 1924 int ret; 1925 1926 width = dev->mode_config.max_width; 1927 height = dev->mode_config.max_height; 1928 1929 drm_client_modeset_probe(&fb_helper->client, width, height); 1930 ret = drm_fb_helper_single_fb_probe(fb_helper, bpp_sel); 1931 if (ret < 0) { 1932 if (ret == -EAGAIN) { 1933 fb_helper->preferred_bpp = bpp_sel; 1934 fb_helper->deferred_setup = true; 1935 ret = 0; 1936 } 1937 mutex_unlock(&fb_helper->lock); 1938 1939 return ret; 1940 } 1941 drm_setup_crtcs_fb(fb_helper); 1942 1943 fb_helper->deferred_setup = false; 1944 1945 info = fb_helper->fbdev; 1946 info->var.pixclock = 0; 1947 /* Shamelessly allow physical address leaking to userspace */ 1948 #if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM) 1949 if (!drm_leak_fbdev_smem) 1950 #endif 1951 /* don't leak any physical addresses to userspace */ 1952 info->flags |= FBINFO_HIDE_SMEM_START; 1953 1954 /* Need to drop locks to avoid recursive deadlock in 1955 * register_framebuffer. This is ok because the only thing left to do is 1956 * register the fbdev emulation instance in kernel_fb_helper_list. */ 1957 mutex_unlock(&fb_helper->lock); 1958 1959 ret = register_framebuffer(info); 1960 if (ret < 0) 1961 return ret; 1962 1963 drm_info(dev, "fb%d: %s frame buffer device\n", 1964 info->node, info->fix.id); 1965 1966 mutex_lock(&kernel_fb_helper_lock); 1967 if (list_empty(&kernel_fb_helper_list)) 1968 register_sysrq_key('v', &sysrq_drm_fb_helper_restore_op); 1969 1970 list_add(&fb_helper->kernel_fb_list, &kernel_fb_helper_list); 1971 mutex_unlock(&kernel_fb_helper_lock); 1972 1973 return 0; 1974 } 1975 1976 /** 1977 * drm_fb_helper_initial_config - setup a sane initial connector configuration 1978 * @fb_helper: fb_helper device struct 1979 * @bpp_sel: bpp value to use for the framebuffer configuration 1980 * 1981 * Scans the CRTCs and connectors and tries to put together an initial setup. 1982 * At the moment, this is a cloned configuration across all heads with 1983 * a new framebuffer object as the backing store. 1984 * 1985 * Note that this also registers the fbdev and so allows userspace to call into 1986 * the driver through the fbdev interfaces. 1987 * 1988 * This function will call down into the &drm_fb_helper_funcs.fb_probe callback 1989 * to let the driver allocate and initialize the fbdev info structure and the 1990 * drm framebuffer used to back the fbdev. drm_fb_helper_fill_info() is provided 1991 * as a helper to setup simple default values for the fbdev info structure. 1992 * 1993 * HANG DEBUGGING: 1994 * 1995 * When you have fbcon support built-in or already loaded, this function will do 1996 * a full modeset to setup the fbdev console. Due to locking misdesign in the 1997 * VT/fbdev subsystem that entire modeset sequence has to be done while holding 1998 * console_lock. Until console_unlock is called no dmesg lines will be sent out 1999 * to consoles, not even serial console. This means when your driver crashes, 2000 * you will see absolutely nothing else but a system stuck in this function, 2001 * with no further output. Any kind of printk() you place within your own driver 2002 * or in the drm core modeset code will also never show up. 2003 * 2004 * Standard debug practice is to run the fbcon setup without taking the 2005 * console_lock as a hack, to be able to see backtraces and crashes on the 2006 * serial line. This can be done by setting the fb.lockless_register_fb=1 kernel 2007 * cmdline option. 2008 * 2009 * The other option is to just disable fbdev emulation since very likely the 2010 * first modeset from userspace will crash in the same way, and is even easier 2011 * to debug. This can be done by setting the drm_kms_helper.fbdev_emulation=0 2012 * kernel cmdline option. 2013 * 2014 * RETURNS: 2015 * Zero if everything went ok, nonzero otherwise. 2016 */ 2017 int drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel) 2018 { 2019 int ret; 2020 2021 if (!drm_fbdev_emulation) 2022 return 0; 2023 2024 mutex_lock(&fb_helper->lock); 2025 ret = __drm_fb_helper_initial_config_and_unlock(fb_helper, bpp_sel); 2026 2027 return ret; 2028 } 2029 EXPORT_SYMBOL(drm_fb_helper_initial_config); 2030 2031 /** 2032 * drm_fb_helper_hotplug_event - respond to a hotplug notification by 2033 * probing all the outputs attached to the fb 2034 * @fb_helper: driver-allocated fbdev helper, can be NULL 2035 * 2036 * Scan the connectors attached to the fb_helper and try to put together a 2037 * setup after notification of a change in output configuration. 2038 * 2039 * Called at runtime, takes the mode config locks to be able to check/change the 2040 * modeset configuration. Must be run from process context (which usually means 2041 * either the output polling work or a work item launched from the driver's 2042 * hotplug interrupt). 2043 * 2044 * Note that drivers may call this even before calling 2045 * drm_fb_helper_initial_config but only after drm_fb_helper_init. This allows 2046 * for a race-free fbcon setup and will make sure that the fbdev emulation will 2047 * not miss any hotplug events. 2048 * 2049 * RETURNS: 2050 * 0 on success and a non-zero error code otherwise. 2051 */ 2052 int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper) 2053 { 2054 int err = 0; 2055 2056 if (!drm_fbdev_emulation || !fb_helper) 2057 return 0; 2058 2059 mutex_lock(&fb_helper->lock); 2060 if (fb_helper->deferred_setup) { 2061 err = __drm_fb_helper_initial_config_and_unlock(fb_helper, 2062 fb_helper->preferred_bpp); 2063 return err; 2064 } 2065 2066 if (!fb_helper->fb || !drm_master_internal_acquire(fb_helper->dev)) { 2067 fb_helper->delayed_hotplug = true; 2068 mutex_unlock(&fb_helper->lock); 2069 return err; 2070 } 2071 2072 drm_master_internal_release(fb_helper->dev); 2073 2074 drm_dbg_kms(fb_helper->dev, "\n"); 2075 2076 drm_client_modeset_probe(&fb_helper->client, fb_helper->fb->width, fb_helper->fb->height); 2077 drm_setup_crtcs_fb(fb_helper); 2078 mutex_unlock(&fb_helper->lock); 2079 2080 drm_fb_helper_set_par(fb_helper->fbdev); 2081 2082 return 0; 2083 } 2084 EXPORT_SYMBOL(drm_fb_helper_hotplug_event); 2085 2086 /** 2087 * drm_fb_helper_lastclose - DRM driver lastclose helper for fbdev emulation 2088 * @dev: DRM device 2089 * 2090 * This function can be used as the &drm_driver->lastclose callback for drivers 2091 * that only need to call drm_fb_helper_restore_fbdev_mode_unlocked(). 2092 */ 2093 void drm_fb_helper_lastclose(struct drm_device *dev) 2094 { 2095 drm_fb_helper_restore_fbdev_mode_unlocked(dev->fb_helper); 2096 } 2097 EXPORT_SYMBOL(drm_fb_helper_lastclose); 2098 2099 /** 2100 * drm_fb_helper_output_poll_changed - DRM mode config \.output_poll_changed 2101 * helper for fbdev emulation 2102 * @dev: DRM device 2103 * 2104 * This function can be used as the 2105 * &drm_mode_config_funcs.output_poll_changed callback for drivers that only 2106 * need to call drm_fb_helper_hotplug_event(). 2107 */ 2108 void drm_fb_helper_output_poll_changed(struct drm_device *dev) 2109 { 2110 drm_fb_helper_hotplug_event(dev->fb_helper); 2111 } 2112 EXPORT_SYMBOL(drm_fb_helper_output_poll_changed); 2113 2114 /* @user: 1=userspace, 0=fbcon */ 2115 static int drm_fbdev_fb_open(struct fb_info *info, int user) 2116 { 2117 struct drm_fb_helper *fb_helper = info->par; 2118 2119 /* No need to take a ref for fbcon because it unbinds on unregister */ 2120 if (user && !try_module_get(fb_helper->dev->driver->fops->owner)) 2121 return -ENODEV; 2122 2123 return 0; 2124 } 2125 2126 static int drm_fbdev_fb_release(struct fb_info *info, int user) 2127 { 2128 struct drm_fb_helper *fb_helper = info->par; 2129 2130 if (user) 2131 module_put(fb_helper->dev->driver->fops->owner); 2132 2133 return 0; 2134 } 2135 2136 static void drm_fbdev_cleanup(struct drm_fb_helper *fb_helper) 2137 { 2138 struct fb_info *fbi = fb_helper->fbdev; 2139 void *shadow = NULL; 2140 2141 if (!fb_helper->dev) 2142 return; 2143 2144 if (fbi) { 2145 if (fbi->fbdefio) 2146 fb_deferred_io_cleanup(fbi); 2147 if (drm_fbdev_use_shadow_fb(fb_helper)) 2148 shadow = fbi->screen_buffer; 2149 } 2150 2151 drm_fb_helper_fini(fb_helper); 2152 2153 if (shadow) 2154 vfree(shadow); 2155 else if (fb_helper->buffer) 2156 drm_client_buffer_vunmap(fb_helper->buffer); 2157 2158 drm_client_framebuffer_delete(fb_helper->buffer); 2159 } 2160 2161 static void drm_fbdev_release(struct drm_fb_helper *fb_helper) 2162 { 2163 drm_fbdev_cleanup(fb_helper); 2164 drm_client_release(&fb_helper->client); 2165 kfree(fb_helper); 2166 } 2167 2168 /* 2169 * fb_ops.fb_destroy is called by the last put_fb_info() call at the end of 2170 * unregister_framebuffer() or fb_release(). 2171 */ 2172 static void drm_fbdev_fb_destroy(struct fb_info *info) 2173 { 2174 drm_fbdev_release(info->par); 2175 } 2176 2177 static int drm_fbdev_fb_mmap(struct fb_info *info, struct vm_area_struct *vma) 2178 { 2179 struct drm_fb_helper *fb_helper = info->par; 2180 2181 if (drm_fbdev_use_shadow_fb(fb_helper)) 2182 return fb_deferred_io_mmap(info, vma); 2183 else if (fb_helper->dev->driver->gem_prime_mmap) 2184 return fb_helper->dev->driver->gem_prime_mmap(fb_helper->buffer->gem, vma); 2185 else 2186 return -ENODEV; 2187 } 2188 2189 static bool drm_fbdev_use_iomem(struct fb_info *info) 2190 { 2191 struct drm_fb_helper *fb_helper = info->par; 2192 struct drm_client_buffer *buffer = fb_helper->buffer; 2193 2194 return !drm_fbdev_use_shadow_fb(fb_helper) && buffer->map.is_iomem; 2195 } 2196 2197 static ssize_t fb_read_screen_base(struct fb_info *info, char __user *buf, size_t count, 2198 loff_t pos) 2199 { 2200 const char __iomem *src = info->screen_base + pos; 2201 size_t alloc_size = min_t(size_t, count, PAGE_SIZE); 2202 ssize_t ret = 0; 2203 int err = 0; 2204 char *tmp; 2205 2206 tmp = kmalloc(alloc_size, GFP_KERNEL); 2207 if (!tmp) 2208 return -ENOMEM; 2209 2210 while (count) { 2211 size_t c = min_t(size_t, count, alloc_size); 2212 2213 memcpy_fromio(tmp, src, c); 2214 if (copy_to_user(buf, tmp, c)) { 2215 err = -EFAULT; 2216 break; 2217 } 2218 2219 src += c; 2220 buf += c; 2221 ret += c; 2222 count -= c; 2223 } 2224 2225 kfree(tmp); 2226 2227 return ret ? ret : err; 2228 } 2229 2230 static ssize_t fb_read_screen_buffer(struct fb_info *info, char __user *buf, size_t count, 2231 loff_t pos) 2232 { 2233 const char *src = info->screen_buffer + pos; 2234 2235 if (copy_to_user(buf, src, count)) 2236 return -EFAULT; 2237 2238 return count; 2239 } 2240 2241 static ssize_t drm_fbdev_fb_read(struct fb_info *info, char __user *buf, 2242 size_t count, loff_t *ppos) 2243 { 2244 loff_t pos = *ppos; 2245 size_t total_size; 2246 ssize_t ret; 2247 2248 if (info->screen_size) 2249 total_size = info->screen_size; 2250 else 2251 total_size = info->fix.smem_len; 2252 2253 if (pos >= total_size) 2254 return 0; 2255 if (count >= total_size) 2256 count = total_size; 2257 if (total_size - count < pos) 2258 count = total_size - pos; 2259 2260 if (drm_fbdev_use_iomem(info)) 2261 ret = fb_read_screen_base(info, buf, count, pos); 2262 else 2263 ret = fb_read_screen_buffer(info, buf, count, pos); 2264 2265 if (ret > 0) 2266 *ppos += ret; 2267 2268 return ret; 2269 } 2270 2271 static ssize_t fb_write_screen_base(struct fb_info *info, const char __user *buf, size_t count, 2272 loff_t pos) 2273 { 2274 char __iomem *dst = info->screen_base + pos; 2275 size_t alloc_size = min_t(size_t, count, PAGE_SIZE); 2276 ssize_t ret = 0; 2277 int err = 0; 2278 u8 *tmp; 2279 2280 tmp = kmalloc(alloc_size, GFP_KERNEL); 2281 if (!tmp) 2282 return -ENOMEM; 2283 2284 while (count) { 2285 size_t c = min_t(size_t, count, alloc_size); 2286 2287 if (copy_from_user(tmp, buf, c)) { 2288 err = -EFAULT; 2289 break; 2290 } 2291 memcpy_toio(dst, tmp, c); 2292 2293 dst += c; 2294 buf += c; 2295 ret += c; 2296 count -= c; 2297 } 2298 2299 kfree(tmp); 2300 2301 return ret ? ret : err; 2302 } 2303 2304 static ssize_t fb_write_screen_buffer(struct fb_info *info, const char __user *buf, size_t count, 2305 loff_t pos) 2306 { 2307 char *dst = info->screen_buffer + pos; 2308 2309 if (copy_from_user(dst, buf, count)) 2310 return -EFAULT; 2311 2312 return count; 2313 } 2314 2315 static ssize_t drm_fbdev_fb_write(struct fb_info *info, const char __user *buf, 2316 size_t count, loff_t *ppos) 2317 { 2318 loff_t pos = *ppos; 2319 size_t total_size; 2320 ssize_t ret; 2321 struct drm_rect damage_area; 2322 int err = 0; 2323 2324 if (info->screen_size) 2325 total_size = info->screen_size; 2326 else 2327 total_size = info->fix.smem_len; 2328 2329 if (pos > total_size) 2330 return -EFBIG; 2331 if (count > total_size) { 2332 err = -EFBIG; 2333 count = total_size; 2334 } 2335 if (total_size - count < pos) { 2336 if (!err) 2337 err = -ENOSPC; 2338 count = total_size - pos; 2339 } 2340 2341 /* 2342 * Copy to framebuffer even if we already logged an error. Emulates 2343 * the behavior of the original fbdev implementation. 2344 */ 2345 if (drm_fbdev_use_iomem(info)) 2346 ret = fb_write_screen_base(info, buf, count, pos); 2347 else 2348 ret = fb_write_screen_buffer(info, buf, count, pos); 2349 2350 if (ret < 0) 2351 return ret; /* return last error, if any */ 2352 else if (!ret) 2353 return err; /* return previous error, if any */ 2354 2355 *ppos += ret; 2356 2357 drm_fb_helper_memory_range_to_clip(info, pos, ret, &damage_area); 2358 drm_fb_helper_damage(info, damage_area.x1, damage_area.y1, 2359 drm_rect_width(&damage_area), 2360 drm_rect_height(&damage_area)); 2361 2362 return ret; 2363 } 2364 2365 static void drm_fbdev_fb_fillrect(struct fb_info *info, 2366 const struct fb_fillrect *rect) 2367 { 2368 if (drm_fbdev_use_iomem(info)) 2369 drm_fb_helper_cfb_fillrect(info, rect); 2370 else 2371 drm_fb_helper_sys_fillrect(info, rect); 2372 } 2373 2374 static void drm_fbdev_fb_copyarea(struct fb_info *info, 2375 const struct fb_copyarea *area) 2376 { 2377 if (drm_fbdev_use_iomem(info)) 2378 drm_fb_helper_cfb_copyarea(info, area); 2379 else 2380 drm_fb_helper_sys_copyarea(info, area); 2381 } 2382 2383 static void drm_fbdev_fb_imageblit(struct fb_info *info, 2384 const struct fb_image *image) 2385 { 2386 if (drm_fbdev_use_iomem(info)) 2387 drm_fb_helper_cfb_imageblit(info, image); 2388 else 2389 drm_fb_helper_sys_imageblit(info, image); 2390 } 2391 2392 static const struct fb_ops drm_fbdev_fb_ops = { 2393 .owner = THIS_MODULE, 2394 DRM_FB_HELPER_DEFAULT_OPS, 2395 .fb_open = drm_fbdev_fb_open, 2396 .fb_release = drm_fbdev_fb_release, 2397 .fb_destroy = drm_fbdev_fb_destroy, 2398 .fb_mmap = drm_fbdev_fb_mmap, 2399 .fb_read = drm_fbdev_fb_read, 2400 .fb_write = drm_fbdev_fb_write, 2401 .fb_fillrect = drm_fbdev_fb_fillrect, 2402 .fb_copyarea = drm_fbdev_fb_copyarea, 2403 .fb_imageblit = drm_fbdev_fb_imageblit, 2404 }; 2405 2406 static struct fb_deferred_io drm_fbdev_defio = { 2407 .delay = HZ / 20, 2408 .deferred_io = drm_fb_helper_deferred_io, 2409 }; 2410 2411 /* 2412 * This function uses the client API to create a framebuffer backed by a dumb buffer. 2413 * 2414 * The _sys_ versions are used for &fb_ops.fb_read, fb_write, fb_fillrect, 2415 * fb_copyarea, fb_imageblit. 2416 */ 2417 static int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper, 2418 struct drm_fb_helper_surface_size *sizes) 2419 { 2420 struct drm_client_dev *client = &fb_helper->client; 2421 struct drm_device *dev = fb_helper->dev; 2422 struct drm_client_buffer *buffer; 2423 struct drm_framebuffer *fb; 2424 struct fb_info *fbi; 2425 u32 format; 2426 struct iosys_map map; 2427 int ret; 2428 2429 drm_dbg_kms(dev, "surface width(%d), height(%d) and bpp(%d)\n", 2430 sizes->surface_width, sizes->surface_height, 2431 sizes->surface_bpp); 2432 2433 format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth); 2434 buffer = drm_client_framebuffer_create(client, sizes->surface_width, 2435 sizes->surface_height, format); 2436 if (IS_ERR(buffer)) 2437 return PTR_ERR(buffer); 2438 2439 fb_helper->buffer = buffer; 2440 fb_helper->fb = buffer->fb; 2441 fb = buffer->fb; 2442 2443 fbi = drm_fb_helper_alloc_fbi(fb_helper); 2444 if (IS_ERR(fbi)) 2445 return PTR_ERR(fbi); 2446 2447 fbi->fbops = &drm_fbdev_fb_ops; 2448 fbi->screen_size = sizes->surface_height * fb->pitches[0]; 2449 fbi->fix.smem_len = fbi->screen_size; 2450 fbi->flags = FBINFO_DEFAULT; 2451 2452 drm_fb_helper_fill_info(fbi, fb_helper, sizes); 2453 2454 if (drm_fbdev_use_shadow_fb(fb_helper)) { 2455 fbi->screen_buffer = vzalloc(fbi->screen_size); 2456 if (!fbi->screen_buffer) 2457 return -ENOMEM; 2458 fbi->flags |= FBINFO_VIRTFB | FBINFO_READS_FAST; 2459 2460 fbi->fbdefio = &drm_fbdev_defio; 2461 fb_deferred_io_init(fbi); 2462 } else { 2463 /* buffer is mapped for HW framebuffer */ 2464 ret = drm_client_buffer_vmap(fb_helper->buffer, &map); 2465 if (ret) 2466 return ret; 2467 if (map.is_iomem) { 2468 fbi->screen_base = map.vaddr_iomem; 2469 } else { 2470 fbi->screen_buffer = map.vaddr; 2471 fbi->flags |= FBINFO_VIRTFB; 2472 } 2473 2474 /* 2475 * Shamelessly leak the physical address to user-space. As 2476 * page_to_phys() is undefined for I/O memory, warn in this 2477 * case. 2478 */ 2479 #if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM) 2480 if (drm_leak_fbdev_smem && fbi->fix.smem_start == 0 && 2481 !drm_WARN_ON_ONCE(dev, map.is_iomem)) 2482 fbi->fix.smem_start = 2483 page_to_phys(virt_to_page(fbi->screen_buffer)); 2484 #endif 2485 } 2486 2487 return 0; 2488 } 2489 2490 static const struct drm_fb_helper_funcs drm_fb_helper_generic_funcs = { 2491 .fb_probe = drm_fb_helper_generic_probe, 2492 }; 2493 2494 static void drm_fbdev_client_unregister(struct drm_client_dev *client) 2495 { 2496 struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client); 2497 2498 if (fb_helper->fbdev) 2499 /* drm_fbdev_fb_destroy() takes care of cleanup */ 2500 drm_fb_helper_unregister_fbi(fb_helper); 2501 else 2502 drm_fbdev_release(fb_helper); 2503 } 2504 2505 static int drm_fbdev_client_restore(struct drm_client_dev *client) 2506 { 2507 drm_fb_helper_lastclose(client->dev); 2508 2509 return 0; 2510 } 2511 2512 static int drm_fbdev_client_hotplug(struct drm_client_dev *client) 2513 { 2514 struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client); 2515 struct drm_device *dev = client->dev; 2516 int ret; 2517 2518 /* Setup is not retried if it has failed */ 2519 if (!fb_helper->dev && fb_helper->funcs) 2520 return 0; 2521 2522 if (dev->fb_helper) 2523 return drm_fb_helper_hotplug_event(dev->fb_helper); 2524 2525 if (!dev->mode_config.num_connector) { 2526 drm_dbg_kms(dev, "No connectors found, will not create framebuffer!\n"); 2527 return 0; 2528 } 2529 2530 drm_fb_helper_prepare(dev, fb_helper, &drm_fb_helper_generic_funcs); 2531 2532 ret = drm_fb_helper_init(dev, fb_helper); 2533 if (ret) 2534 goto err; 2535 2536 if (!drm_drv_uses_atomic_modeset(dev)) 2537 drm_helper_disable_unused_functions(dev); 2538 2539 ret = drm_fb_helper_initial_config(fb_helper, fb_helper->preferred_bpp); 2540 if (ret) 2541 goto err_cleanup; 2542 2543 return 0; 2544 2545 err_cleanup: 2546 drm_fbdev_cleanup(fb_helper); 2547 err: 2548 fb_helper->dev = NULL; 2549 fb_helper->fbdev = NULL; 2550 2551 drm_err(dev, "fbdev: Failed to setup generic emulation (ret=%d)\n", ret); 2552 2553 return ret; 2554 } 2555 2556 static const struct drm_client_funcs drm_fbdev_client_funcs = { 2557 .owner = THIS_MODULE, 2558 .unregister = drm_fbdev_client_unregister, 2559 .restore = drm_fbdev_client_restore, 2560 .hotplug = drm_fbdev_client_hotplug, 2561 }; 2562 2563 /** 2564 * drm_fbdev_generic_setup() - Setup generic fbdev emulation 2565 * @dev: DRM device 2566 * @preferred_bpp: Preferred bits per pixel for the device. 2567 * @dev->mode_config.preferred_depth is used if this is zero. 2568 * 2569 * This function sets up generic fbdev emulation for drivers that supports 2570 * dumb buffers with a virtual address and that can be mmap'ed. 2571 * drm_fbdev_generic_setup() shall be called after the DRM driver registered 2572 * the new DRM device with drm_dev_register(). 2573 * 2574 * Restore, hotplug events and teardown are all taken care of. Drivers that do 2575 * suspend/resume need to call drm_fb_helper_set_suspend_unlocked() themselves. 2576 * Simple drivers might use drm_mode_config_helper_suspend(). 2577 * 2578 * Drivers that set the dirty callback on their framebuffer will get a shadow 2579 * fbdev buffer that is blitted onto the real buffer. This is done in order to 2580 * make deferred I/O work with all kinds of buffers. A shadow buffer can be 2581 * requested explicitly by setting struct drm_mode_config.prefer_shadow or 2582 * struct drm_mode_config.prefer_shadow_fbdev to true beforehand. This is 2583 * required to use generic fbdev emulation with SHMEM helpers. 2584 * 2585 * This function is safe to call even when there are no connectors present. 2586 * Setup will be retried on the next hotplug event. 2587 * 2588 * The fbdev is destroyed by drm_dev_unregister(). 2589 */ 2590 void drm_fbdev_generic_setup(struct drm_device *dev, 2591 unsigned int preferred_bpp) 2592 { 2593 struct drm_fb_helper *fb_helper; 2594 int ret; 2595 2596 drm_WARN(dev, !dev->registered, "Device has not been registered.\n"); 2597 drm_WARN(dev, dev->fb_helper, "fb_helper is already set!\n"); 2598 2599 if (!drm_fbdev_emulation) 2600 return; 2601 2602 fb_helper = kzalloc(sizeof(*fb_helper), GFP_KERNEL); 2603 if (!fb_helper) { 2604 drm_err(dev, "Failed to allocate fb_helper\n"); 2605 return; 2606 } 2607 2608 ret = drm_client_init(dev, &fb_helper->client, "fbdev", &drm_fbdev_client_funcs); 2609 if (ret) { 2610 kfree(fb_helper); 2611 drm_err(dev, "Failed to register client: %d\n", ret); 2612 return; 2613 } 2614 2615 /* 2616 * FIXME: This mixes up depth with bpp, which results in a glorious 2617 * mess, resulting in some drivers picking wrong fbdev defaults and 2618 * others wrong preferred_depth defaults. 2619 */ 2620 if (!preferred_bpp) 2621 preferred_bpp = dev->mode_config.preferred_depth; 2622 if (!preferred_bpp) 2623 preferred_bpp = 32; 2624 fb_helper->preferred_bpp = preferred_bpp; 2625 2626 ret = drm_fbdev_client_hotplug(&fb_helper->client); 2627 if (ret) 2628 drm_dbg_kms(dev, "client hotplug ret=%d\n", ret); 2629 2630 drm_client_register(&fb_helper->client); 2631 } 2632 EXPORT_SYMBOL(drm_fbdev_generic_setup); 2633