1 /* 2 * Copyright (c) 2006-2009 Red Hat Inc. 3 * Copyright (c) 2006-2008 Intel Corporation 4 * Copyright (c) 2007 Dave Airlie <airlied@linux.ie> 5 * 6 * DRM framebuffer helper functions 7 * 8 * Permission to use, copy, modify, distribute, and sell this software and its 9 * documentation for any purpose is hereby granted without fee, provided that 10 * the above copyright notice appear in all copies and that both that copyright 11 * notice and this permission notice appear in supporting documentation, and 12 * that the name of the copyright holders not be used in advertising or 13 * publicity pertaining to distribution of the software without specific, 14 * written prior permission. The copyright holders make no representations 15 * about the suitability of this software for any purpose. It is provided "as 16 * is" without express or implied warranty. 17 * 18 * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, 19 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO 20 * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR 21 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, 22 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER 23 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE 24 * OF THIS SOFTWARE. 25 * 26 * Authors: 27 * Dave Airlie <airlied@linux.ie> 28 * Jesse Barnes <jesse.barnes@intel.com> 29 */ 30 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 31 32 #include <linux/console.h> 33 #include <linux/dma-buf.h> 34 #include <linux/kernel.h> 35 #include <linux/module.h> 36 #include <linux/slab.h> 37 #include <linux/sysrq.h> 38 #include <linux/vmalloc.h> 39 40 #include <drm/drm_atomic.h> 41 #include <drm/drm_crtc.h> 42 #include <drm/drm_crtc_helper.h> 43 #include <drm/drm_drv.h> 44 #include <drm/drm_fb_helper.h> 45 #include <drm/drm_fourcc.h> 46 #include <drm/drm_framebuffer.h> 47 #include <drm/drm_print.h> 48 #include <drm/drm_vblank.h> 49 50 #include "drm_crtc_helper_internal.h" 51 #include "drm_internal.h" 52 53 static bool drm_fbdev_emulation = true; 54 module_param_named(fbdev_emulation, drm_fbdev_emulation, bool, 0600); 55 MODULE_PARM_DESC(fbdev_emulation, 56 "Enable legacy fbdev emulation [default=true]"); 57 58 static int drm_fbdev_overalloc = CONFIG_DRM_FBDEV_OVERALLOC; 59 module_param(drm_fbdev_overalloc, int, 0444); 60 MODULE_PARM_DESC(drm_fbdev_overalloc, 61 "Overallocation of the fbdev buffer (%) [default=" 62 __MODULE_STRING(CONFIG_DRM_FBDEV_OVERALLOC) "]"); 63 64 /* 65 * In order to keep user-space compatibility, we want in certain use-cases 66 * to keep leaking the fbdev physical address to the user-space program 67 * handling the fbdev buffer. 68 * This is a bad habit essentially kept into closed source opengl driver 69 * that should really be moved into open-source upstream projects instead 70 * of using legacy physical addresses in user space to communicate with 71 * other out-of-tree kernel modules. 72 * 73 * This module_param *should* be removed as soon as possible and be 74 * considered as a broken and legacy behaviour from a modern fbdev device. 75 */ 76 #if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM) 77 static bool drm_leak_fbdev_smem = false; 78 module_param_unsafe(drm_leak_fbdev_smem, bool, 0600); 79 MODULE_PARM_DESC(drm_leak_fbdev_smem, 80 "Allow unsafe leaking fbdev physical smem address [default=false]"); 81 #endif 82 83 static LIST_HEAD(kernel_fb_helper_list); 84 static DEFINE_MUTEX(kernel_fb_helper_lock); 85 86 /** 87 * DOC: fbdev helpers 88 * 89 * The fb helper functions are useful to provide an fbdev on top of a drm kernel 90 * mode setting driver. They can be used mostly independently from the crtc 91 * helper functions used by many drivers to implement the kernel mode setting 92 * interfaces. 93 * 94 * Drivers that support a dumb buffer with a virtual address and mmap support, 95 * should try out the generic fbdev emulation using drm_fbdev_generic_setup(). 96 * It will automatically set up deferred I/O if the driver requires a shadow 97 * buffer. 98 * 99 * At runtime drivers should restore the fbdev console by using 100 * drm_fb_helper_lastclose() as their &drm_driver.lastclose callback. 101 * They should also notify the fb helper code from updates to the output 102 * configuration by using drm_fb_helper_output_poll_changed() as their 103 * &drm_mode_config_funcs.output_poll_changed callback. 104 * 105 * For suspend/resume consider using drm_mode_config_helper_suspend() and 106 * drm_mode_config_helper_resume() which takes care of fbdev as well. 107 * 108 * All other functions exported by the fb helper library can be used to 109 * implement the fbdev driver interface by the driver. 110 * 111 * It is possible, though perhaps somewhat tricky, to implement race-free 112 * hotplug detection using the fbdev helpers. The drm_fb_helper_prepare() 113 * helper must be called first to initialize the minimum required to make 114 * hotplug detection work. Drivers also need to make sure to properly set up 115 * the &drm_mode_config.funcs member. After calling drm_kms_helper_poll_init() 116 * it is safe to enable interrupts and start processing hotplug events. At the 117 * same time, drivers should initialize all modeset objects such as CRTCs, 118 * encoders and connectors. To finish up the fbdev helper initialization, the 119 * drm_fb_helper_init() function is called. To probe for all attached displays 120 * and set up an initial configuration using the detected hardware, drivers 121 * should call drm_fb_helper_initial_config(). 122 * 123 * If &drm_framebuffer_funcs.dirty is set, the 124 * drm_fb_helper_{cfb,sys}_{write,fillrect,copyarea,imageblit} functions will 125 * accumulate changes and schedule &drm_fb_helper.dirty_work to run right 126 * away. This worker then calls the dirty() function ensuring that it will 127 * always run in process context since the fb_*() function could be running in 128 * atomic context. If drm_fb_helper_deferred_io() is used as the deferred_io 129 * callback it will also schedule dirty_work with the damage collected from the 130 * mmap page writes. 131 * 132 * Deferred I/O is not compatible with SHMEM. Such drivers should request an 133 * fbdev shadow buffer and call drm_fbdev_generic_setup() instead. 134 */ 135 136 static void drm_fb_helper_restore_lut_atomic(struct drm_crtc *crtc) 137 { 138 uint16_t *r_base, *g_base, *b_base; 139 140 if (crtc->funcs->gamma_set == NULL) 141 return; 142 143 r_base = crtc->gamma_store; 144 g_base = r_base + crtc->gamma_size; 145 b_base = g_base + crtc->gamma_size; 146 147 crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, 148 crtc->gamma_size, NULL); 149 } 150 151 /** 152 * drm_fb_helper_debug_enter - implementation for &fb_ops.fb_debug_enter 153 * @info: fbdev registered by the helper 154 */ 155 int drm_fb_helper_debug_enter(struct fb_info *info) 156 { 157 struct drm_fb_helper *helper = info->par; 158 const struct drm_crtc_helper_funcs *funcs; 159 struct drm_mode_set *mode_set; 160 161 list_for_each_entry(helper, &kernel_fb_helper_list, kernel_fb_list) { 162 mutex_lock(&helper->client.modeset_mutex); 163 drm_client_for_each_modeset(mode_set, &helper->client) { 164 if (!mode_set->crtc->enabled) 165 continue; 166 167 funcs = mode_set->crtc->helper_private; 168 if (funcs->mode_set_base_atomic == NULL) 169 continue; 170 171 if (drm_drv_uses_atomic_modeset(mode_set->crtc->dev)) 172 continue; 173 174 funcs->mode_set_base_atomic(mode_set->crtc, 175 mode_set->fb, 176 mode_set->x, 177 mode_set->y, 178 ENTER_ATOMIC_MODE_SET); 179 } 180 mutex_unlock(&helper->client.modeset_mutex); 181 } 182 183 return 0; 184 } 185 EXPORT_SYMBOL(drm_fb_helper_debug_enter); 186 187 /** 188 * drm_fb_helper_debug_leave - implementation for &fb_ops.fb_debug_leave 189 * @info: fbdev registered by the helper 190 */ 191 int drm_fb_helper_debug_leave(struct fb_info *info) 192 { 193 struct drm_fb_helper *helper = info->par; 194 struct drm_client_dev *client = &helper->client; 195 struct drm_device *dev = helper->dev; 196 struct drm_crtc *crtc; 197 const struct drm_crtc_helper_funcs *funcs; 198 struct drm_mode_set *mode_set; 199 struct drm_framebuffer *fb; 200 201 mutex_lock(&client->modeset_mutex); 202 drm_client_for_each_modeset(mode_set, client) { 203 crtc = mode_set->crtc; 204 if (drm_drv_uses_atomic_modeset(crtc->dev)) 205 continue; 206 207 funcs = crtc->helper_private; 208 fb = crtc->primary->fb; 209 210 if (!crtc->enabled) 211 continue; 212 213 if (!fb) { 214 drm_err(dev, "no fb to restore?\n"); 215 continue; 216 } 217 218 if (funcs->mode_set_base_atomic == NULL) 219 continue; 220 221 drm_fb_helper_restore_lut_atomic(mode_set->crtc); 222 funcs->mode_set_base_atomic(mode_set->crtc, fb, crtc->x, 223 crtc->y, LEAVE_ATOMIC_MODE_SET); 224 } 225 mutex_unlock(&client->modeset_mutex); 226 227 return 0; 228 } 229 EXPORT_SYMBOL(drm_fb_helper_debug_leave); 230 231 static int 232 __drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper, 233 bool force) 234 { 235 bool do_delayed; 236 int ret; 237 238 if (!drm_fbdev_emulation || !fb_helper) 239 return -ENODEV; 240 241 if (READ_ONCE(fb_helper->deferred_setup)) 242 return 0; 243 244 mutex_lock(&fb_helper->lock); 245 if (force) { 246 /* 247 * Yes this is the _locked version which expects the master lock 248 * to be held. But for forced restores we're intentionally 249 * racing here, see drm_fb_helper_set_par(). 250 */ 251 ret = drm_client_modeset_commit_locked(&fb_helper->client); 252 } else { 253 ret = drm_client_modeset_commit(&fb_helper->client); 254 } 255 256 do_delayed = fb_helper->delayed_hotplug; 257 if (do_delayed) 258 fb_helper->delayed_hotplug = false; 259 mutex_unlock(&fb_helper->lock); 260 261 if (do_delayed) 262 drm_fb_helper_hotplug_event(fb_helper); 263 264 return ret; 265 } 266 267 /** 268 * drm_fb_helper_restore_fbdev_mode_unlocked - restore fbdev configuration 269 * @fb_helper: driver-allocated fbdev helper, can be NULL 270 * 271 * This should be called from driver's drm &drm_driver.lastclose callback 272 * when implementing an fbcon on top of kms using this helper. This ensures that 273 * the user isn't greeted with a black screen when e.g. X dies. 274 * 275 * RETURNS: 276 * Zero if everything went ok, negative error code otherwise. 277 */ 278 int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper) 279 { 280 return __drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper, false); 281 } 282 EXPORT_SYMBOL(drm_fb_helper_restore_fbdev_mode_unlocked); 283 284 #ifdef CONFIG_MAGIC_SYSRQ 285 /* emergency restore, don't bother with error reporting */ 286 static void drm_fb_helper_restore_work_fn(struct work_struct *ignored) 287 { 288 struct drm_fb_helper *helper; 289 290 mutex_lock(&kernel_fb_helper_lock); 291 list_for_each_entry(helper, &kernel_fb_helper_list, kernel_fb_list) { 292 struct drm_device *dev = helper->dev; 293 294 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 295 continue; 296 297 mutex_lock(&helper->lock); 298 drm_client_modeset_commit_locked(&helper->client); 299 mutex_unlock(&helper->lock); 300 } 301 mutex_unlock(&kernel_fb_helper_lock); 302 } 303 304 static DECLARE_WORK(drm_fb_helper_restore_work, drm_fb_helper_restore_work_fn); 305 306 static void drm_fb_helper_sysrq(int dummy1) 307 { 308 schedule_work(&drm_fb_helper_restore_work); 309 } 310 311 static const struct sysrq_key_op sysrq_drm_fb_helper_restore_op = { 312 .handler = drm_fb_helper_sysrq, 313 .help_msg = "force-fb(v)", 314 .action_msg = "Restore framebuffer console", 315 }; 316 #else 317 static const struct sysrq_key_op sysrq_drm_fb_helper_restore_op = { }; 318 #endif 319 320 static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode) 321 { 322 struct drm_fb_helper *fb_helper = info->par; 323 324 mutex_lock(&fb_helper->lock); 325 drm_client_modeset_dpms(&fb_helper->client, dpms_mode); 326 mutex_unlock(&fb_helper->lock); 327 } 328 329 /** 330 * drm_fb_helper_blank - implementation for &fb_ops.fb_blank 331 * @blank: desired blanking state 332 * @info: fbdev registered by the helper 333 */ 334 int drm_fb_helper_blank(int blank, struct fb_info *info) 335 { 336 if (oops_in_progress) 337 return -EBUSY; 338 339 switch (blank) { 340 /* Display: On; HSync: On, VSync: On */ 341 case FB_BLANK_UNBLANK: 342 drm_fb_helper_dpms(info, DRM_MODE_DPMS_ON); 343 break; 344 /* Display: Off; HSync: On, VSync: On */ 345 case FB_BLANK_NORMAL: 346 drm_fb_helper_dpms(info, DRM_MODE_DPMS_STANDBY); 347 break; 348 /* Display: Off; HSync: Off, VSync: On */ 349 case FB_BLANK_HSYNC_SUSPEND: 350 drm_fb_helper_dpms(info, DRM_MODE_DPMS_STANDBY); 351 break; 352 /* Display: Off; HSync: On, VSync: Off */ 353 case FB_BLANK_VSYNC_SUSPEND: 354 drm_fb_helper_dpms(info, DRM_MODE_DPMS_SUSPEND); 355 break; 356 /* Display: Off; HSync: Off, VSync: Off */ 357 case FB_BLANK_POWERDOWN: 358 drm_fb_helper_dpms(info, DRM_MODE_DPMS_OFF); 359 break; 360 } 361 return 0; 362 } 363 EXPORT_SYMBOL(drm_fb_helper_blank); 364 365 static void drm_fb_helper_resume_worker(struct work_struct *work) 366 { 367 struct drm_fb_helper *helper = container_of(work, struct drm_fb_helper, 368 resume_work); 369 370 console_lock(); 371 fb_set_suspend(helper->fbdev, 0); 372 console_unlock(); 373 } 374 375 static void drm_fb_helper_damage_blit_real(struct drm_fb_helper *fb_helper, 376 struct drm_clip_rect *clip, 377 struct iosys_map *dst) 378 { 379 struct drm_framebuffer *fb = fb_helper->fb; 380 unsigned int cpp = fb->format->cpp[0]; 381 size_t offset = clip->y1 * fb->pitches[0] + clip->x1 * cpp; 382 void *src = fb_helper->fbdev->screen_buffer + offset; 383 size_t len = (clip->x2 - clip->x1) * cpp; 384 unsigned int y; 385 386 iosys_map_incr(dst, offset); /* go to first pixel within clip rect */ 387 388 for (y = clip->y1; y < clip->y2; y++) { 389 iosys_map_memcpy_to(dst, 0, src, len); 390 iosys_map_incr(dst, fb->pitches[0]); 391 src += fb->pitches[0]; 392 } 393 } 394 395 static int drm_fb_helper_damage_blit(struct drm_fb_helper *fb_helper, 396 struct drm_clip_rect *clip) 397 { 398 struct drm_client_buffer *buffer = fb_helper->buffer; 399 struct iosys_map map, dst; 400 int ret; 401 402 /* 403 * We have to pin the client buffer to its current location while 404 * flushing the shadow buffer. In the general case, concurrent 405 * modesetting operations could try to move the buffer and would 406 * fail. The modeset has to be serialized by acquiring the reservation 407 * object of the underlying BO here. 408 * 409 * For fbdev emulation, we only have to protect against fbdev modeset 410 * operations. Nothing else will involve the client buffer's BO. So it 411 * is sufficient to acquire struct drm_fb_helper.lock here. 412 */ 413 mutex_lock(&fb_helper->lock); 414 415 ret = drm_client_buffer_vmap(buffer, &map); 416 if (ret) 417 goto out; 418 419 dst = map; 420 drm_fb_helper_damage_blit_real(fb_helper, clip, &dst); 421 422 drm_client_buffer_vunmap(buffer); 423 424 out: 425 mutex_unlock(&fb_helper->lock); 426 427 return ret; 428 } 429 430 static void drm_fb_helper_damage_work(struct work_struct *work) 431 { 432 struct drm_fb_helper *helper = container_of(work, struct drm_fb_helper, 433 damage_work); 434 struct drm_device *dev = helper->dev; 435 struct drm_clip_rect *clip = &helper->damage_clip; 436 struct drm_clip_rect clip_copy; 437 unsigned long flags; 438 int ret; 439 440 spin_lock_irqsave(&helper->damage_lock, flags); 441 clip_copy = *clip; 442 clip->x1 = clip->y1 = ~0; 443 clip->x2 = clip->y2 = 0; 444 spin_unlock_irqrestore(&helper->damage_lock, flags); 445 446 /* Call damage handlers only if necessary */ 447 if (!(clip_copy.x1 < clip_copy.x2 && clip_copy.y1 < clip_copy.y2)) 448 return; 449 450 if (helper->buffer) { 451 ret = drm_fb_helper_damage_blit(helper, &clip_copy); 452 if (drm_WARN_ONCE(dev, ret, "Damage blitter failed: ret=%d\n", ret)) 453 goto err; 454 } 455 456 if (helper->fb->funcs->dirty) { 457 ret = helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, &clip_copy, 1); 458 if (drm_WARN_ONCE(dev, ret, "Dirty helper failed: ret=%d\n", ret)) 459 goto err; 460 } 461 462 return; 463 464 err: 465 /* 466 * Restore damage clip rectangle on errors. The next run 467 * of the damage worker will perform the update. 468 */ 469 spin_lock_irqsave(&helper->damage_lock, flags); 470 clip->x1 = min_t(u32, clip->x1, clip_copy.x1); 471 clip->y1 = min_t(u32, clip->y1, clip_copy.y1); 472 clip->x2 = max_t(u32, clip->x2, clip_copy.x2); 473 clip->y2 = max_t(u32, clip->y2, clip_copy.y2); 474 spin_unlock_irqrestore(&helper->damage_lock, flags); 475 } 476 477 /** 478 * drm_fb_helper_prepare - setup a drm_fb_helper structure 479 * @dev: DRM device 480 * @helper: driver-allocated fbdev helper structure to set up 481 * @funcs: pointer to structure of functions associate with this helper 482 * 483 * Sets up the bare minimum to make the framebuffer helper usable. This is 484 * useful to implement race-free initialization of the polling helpers. 485 */ 486 void drm_fb_helper_prepare(struct drm_device *dev, struct drm_fb_helper *helper, 487 const struct drm_fb_helper_funcs *funcs) 488 { 489 INIT_LIST_HEAD(&helper->kernel_fb_list); 490 spin_lock_init(&helper->damage_lock); 491 INIT_WORK(&helper->resume_work, drm_fb_helper_resume_worker); 492 INIT_WORK(&helper->damage_work, drm_fb_helper_damage_work); 493 helper->damage_clip.x1 = helper->damage_clip.y1 = ~0; 494 mutex_init(&helper->lock); 495 helper->funcs = funcs; 496 helper->dev = dev; 497 } 498 EXPORT_SYMBOL(drm_fb_helper_prepare); 499 500 /** 501 * drm_fb_helper_init - initialize a &struct drm_fb_helper 502 * @dev: drm device 503 * @fb_helper: driver-allocated fbdev helper structure to initialize 504 * 505 * This allocates the structures for the fbdev helper with the given limits. 506 * Note that this won't yet touch the hardware (through the driver interfaces) 507 * nor register the fbdev. This is only done in drm_fb_helper_initial_config() 508 * to allow driver writes more control over the exact init sequence. 509 * 510 * Drivers must call drm_fb_helper_prepare() before calling this function. 511 * 512 * RETURNS: 513 * Zero if everything went ok, nonzero otherwise. 514 */ 515 int drm_fb_helper_init(struct drm_device *dev, 516 struct drm_fb_helper *fb_helper) 517 { 518 int ret; 519 520 if (!drm_fbdev_emulation) { 521 dev->fb_helper = fb_helper; 522 return 0; 523 } 524 525 /* 526 * If this is not the generic fbdev client, initialize a drm_client 527 * without callbacks so we can use the modesets. 528 */ 529 if (!fb_helper->client.funcs) { 530 ret = drm_client_init(dev, &fb_helper->client, "drm_fb_helper", NULL); 531 if (ret) 532 return ret; 533 } 534 535 dev->fb_helper = fb_helper; 536 537 return 0; 538 } 539 EXPORT_SYMBOL(drm_fb_helper_init); 540 541 /** 542 * drm_fb_helper_alloc_fbi - allocate fb_info and some of its members 543 * @fb_helper: driver-allocated fbdev helper 544 * 545 * A helper to alloc fb_info and the members cmap and apertures. Called 546 * by the driver within the fb_probe fb_helper callback function. Drivers do not 547 * need to release the allocated fb_info structure themselves, this is 548 * automatically done when calling drm_fb_helper_fini(). 549 * 550 * RETURNS: 551 * fb_info pointer if things went okay, pointer containing error code 552 * otherwise 553 */ 554 struct fb_info *drm_fb_helper_alloc_fbi(struct drm_fb_helper *fb_helper) 555 { 556 struct device *dev = fb_helper->dev->dev; 557 struct fb_info *info; 558 int ret; 559 560 info = framebuffer_alloc(0, dev); 561 if (!info) 562 return ERR_PTR(-ENOMEM); 563 564 ret = fb_alloc_cmap(&info->cmap, 256, 0); 565 if (ret) 566 goto err_release; 567 568 /* 569 * TODO: We really should be smarter here and alloc an aperture 570 * for each IORESOURCE_MEM resource helper->dev->dev has and also 571 * init the ranges of the appertures based on the resources. 572 * Note some drivers currently count on there being only 1 empty 573 * aperture and fill this themselves, these will need to be dealt 574 * with somehow when fixing this. 575 */ 576 info->apertures = alloc_apertures(1); 577 if (!info->apertures) { 578 ret = -ENOMEM; 579 goto err_free_cmap; 580 } 581 582 fb_helper->fbdev = info; 583 info->skip_vt_switch = true; 584 585 return info; 586 587 err_free_cmap: 588 fb_dealloc_cmap(&info->cmap); 589 err_release: 590 framebuffer_release(info); 591 return ERR_PTR(ret); 592 } 593 EXPORT_SYMBOL(drm_fb_helper_alloc_fbi); 594 595 /** 596 * drm_fb_helper_unregister_fbi - unregister fb_info framebuffer device 597 * @fb_helper: driver-allocated fbdev helper, can be NULL 598 * 599 * A wrapper around unregister_framebuffer, to release the fb_info 600 * framebuffer device. This must be called before releasing all resources for 601 * @fb_helper by calling drm_fb_helper_fini(). 602 */ 603 void drm_fb_helper_unregister_fbi(struct drm_fb_helper *fb_helper) 604 { 605 if (fb_helper && fb_helper->fbdev) 606 unregister_framebuffer(fb_helper->fbdev); 607 } 608 EXPORT_SYMBOL(drm_fb_helper_unregister_fbi); 609 610 /** 611 * drm_fb_helper_fini - finialize a &struct drm_fb_helper 612 * @fb_helper: driver-allocated fbdev helper, can be NULL 613 * 614 * This cleans up all remaining resources associated with @fb_helper. 615 */ 616 void drm_fb_helper_fini(struct drm_fb_helper *fb_helper) 617 { 618 struct fb_info *info; 619 620 if (!fb_helper) 621 return; 622 623 fb_helper->dev->fb_helper = NULL; 624 625 if (!drm_fbdev_emulation) 626 return; 627 628 cancel_work_sync(&fb_helper->resume_work); 629 cancel_work_sync(&fb_helper->damage_work); 630 631 info = fb_helper->fbdev; 632 if (info) { 633 if (info->cmap.len) 634 fb_dealloc_cmap(&info->cmap); 635 framebuffer_release(info); 636 } 637 fb_helper->fbdev = NULL; 638 639 mutex_lock(&kernel_fb_helper_lock); 640 if (!list_empty(&fb_helper->kernel_fb_list)) { 641 list_del(&fb_helper->kernel_fb_list); 642 if (list_empty(&kernel_fb_helper_list)) 643 unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op); 644 } 645 mutex_unlock(&kernel_fb_helper_lock); 646 647 mutex_destroy(&fb_helper->lock); 648 649 if (!fb_helper->client.funcs) 650 drm_client_release(&fb_helper->client); 651 } 652 EXPORT_SYMBOL(drm_fb_helper_fini); 653 654 static bool drm_fbdev_use_shadow_fb(struct drm_fb_helper *fb_helper) 655 { 656 struct drm_device *dev = fb_helper->dev; 657 struct drm_framebuffer *fb = fb_helper->fb; 658 659 return dev->mode_config.prefer_shadow_fbdev || 660 dev->mode_config.prefer_shadow || 661 fb->funcs->dirty; 662 } 663 664 static void drm_fb_helper_damage(struct fb_info *info, u32 x, u32 y, 665 u32 width, u32 height) 666 { 667 struct drm_fb_helper *helper = info->par; 668 struct drm_clip_rect *clip = &helper->damage_clip; 669 unsigned long flags; 670 671 if (!drm_fbdev_use_shadow_fb(helper)) 672 return; 673 674 spin_lock_irqsave(&helper->damage_lock, flags); 675 clip->x1 = min_t(u32, clip->x1, x); 676 clip->y1 = min_t(u32, clip->y1, y); 677 clip->x2 = max_t(u32, clip->x2, x + width); 678 clip->y2 = max_t(u32, clip->y2, y + height); 679 spin_unlock_irqrestore(&helper->damage_lock, flags); 680 681 schedule_work(&helper->damage_work); 682 } 683 684 /* 685 * Convert memory region into area of scanlines and pixels per 686 * scanline. The parameters off and len must not reach beyond 687 * the end of the framebuffer. 688 */ 689 static void drm_fb_helper_memory_range_to_clip(struct fb_info *info, off_t off, size_t len, 690 struct drm_rect *clip) 691 { 692 off_t end = off + len; 693 u32 x1 = 0; 694 u32 y1 = off / info->fix.line_length; 695 u32 x2 = info->var.xres; 696 u32 y2 = DIV_ROUND_UP(end, info->fix.line_length); 697 698 if ((y2 - y1) == 1) { 699 /* 700 * We've only written to a single scanline. Try to reduce 701 * the number of horizontal pixels that need an update. 702 */ 703 off_t bit_off = (off % info->fix.line_length) * 8; 704 off_t bit_end = (end % info->fix.line_length) * 8; 705 706 x1 = bit_off / info->var.bits_per_pixel; 707 x2 = DIV_ROUND_UP(bit_end, info->var.bits_per_pixel); 708 } 709 710 drm_rect_init(clip, x1, y1, x2 - x1, y2 - y1); 711 } 712 713 /** 714 * drm_fb_helper_deferred_io() - fbdev deferred_io callback function 715 * @info: fb_info struct pointer 716 * @pagereflist: list of mmap framebuffer pages that have to be flushed 717 * 718 * This function is used as the &fb_deferred_io.deferred_io 719 * callback function for flushing the fbdev mmap writes. 720 */ 721 void drm_fb_helper_deferred_io(struct fb_info *info, struct list_head *pagereflist) 722 { 723 unsigned long start, end, min_off, max_off; 724 struct fb_deferred_io_pageref *pageref; 725 struct drm_rect damage_area; 726 727 min_off = ULONG_MAX; 728 max_off = 0; 729 list_for_each_entry(pageref, pagereflist, list) { 730 start = pageref->offset; 731 end = start + PAGE_SIZE; 732 min_off = min(min_off, start); 733 max_off = max(max_off, end); 734 } 735 if (min_off >= max_off) 736 return; 737 738 /* 739 * As we can only track pages, we might reach beyond the end 740 * of the screen and account for non-existing scanlines. Hence, 741 * keep the covered memory area within the screen buffer. 742 */ 743 max_off = min(max_off, info->screen_size); 744 745 drm_fb_helper_memory_range_to_clip(info, min_off, max_off - min_off, &damage_area); 746 drm_fb_helper_damage(info, damage_area.x1, damage_area.y1, 747 drm_rect_width(&damage_area), 748 drm_rect_height(&damage_area)); 749 } 750 EXPORT_SYMBOL(drm_fb_helper_deferred_io); 751 752 /** 753 * drm_fb_helper_sys_read - wrapper around fb_sys_read 754 * @info: fb_info struct pointer 755 * @buf: userspace buffer to read from framebuffer memory 756 * @count: number of bytes to read from framebuffer memory 757 * @ppos: read offset within framebuffer memory 758 * 759 * A wrapper around fb_sys_read implemented by fbdev core 760 */ 761 ssize_t drm_fb_helper_sys_read(struct fb_info *info, char __user *buf, 762 size_t count, loff_t *ppos) 763 { 764 return fb_sys_read(info, buf, count, ppos); 765 } 766 EXPORT_SYMBOL(drm_fb_helper_sys_read); 767 768 /** 769 * drm_fb_helper_sys_write - wrapper around fb_sys_write 770 * @info: fb_info struct pointer 771 * @buf: userspace buffer to write to framebuffer memory 772 * @count: number of bytes to write to framebuffer memory 773 * @ppos: write offset within framebuffer memory 774 * 775 * A wrapper around fb_sys_write implemented by fbdev core 776 */ 777 ssize_t drm_fb_helper_sys_write(struct fb_info *info, const char __user *buf, 778 size_t count, loff_t *ppos) 779 { 780 loff_t pos = *ppos; 781 ssize_t ret; 782 struct drm_rect damage_area; 783 784 ret = fb_sys_write(info, buf, count, ppos); 785 if (ret <= 0) 786 return ret; 787 788 drm_fb_helper_memory_range_to_clip(info, pos, ret, &damage_area); 789 drm_fb_helper_damage(info, damage_area.x1, damage_area.y1, 790 drm_rect_width(&damage_area), 791 drm_rect_height(&damage_area)); 792 793 return ret; 794 } 795 EXPORT_SYMBOL(drm_fb_helper_sys_write); 796 797 /** 798 * drm_fb_helper_sys_fillrect - wrapper around sys_fillrect 799 * @info: fbdev registered by the helper 800 * @rect: info about rectangle to fill 801 * 802 * A wrapper around sys_fillrect implemented by fbdev core 803 */ 804 void drm_fb_helper_sys_fillrect(struct fb_info *info, 805 const struct fb_fillrect *rect) 806 { 807 sys_fillrect(info, rect); 808 drm_fb_helper_damage(info, rect->dx, rect->dy, rect->width, rect->height); 809 } 810 EXPORT_SYMBOL(drm_fb_helper_sys_fillrect); 811 812 /** 813 * drm_fb_helper_sys_copyarea - wrapper around sys_copyarea 814 * @info: fbdev registered by the helper 815 * @area: info about area to copy 816 * 817 * A wrapper around sys_copyarea implemented by fbdev core 818 */ 819 void drm_fb_helper_sys_copyarea(struct fb_info *info, 820 const struct fb_copyarea *area) 821 { 822 sys_copyarea(info, area); 823 drm_fb_helper_damage(info, area->dx, area->dy, area->width, area->height); 824 } 825 EXPORT_SYMBOL(drm_fb_helper_sys_copyarea); 826 827 /** 828 * drm_fb_helper_sys_imageblit - wrapper around sys_imageblit 829 * @info: fbdev registered by the helper 830 * @image: info about image to blit 831 * 832 * A wrapper around sys_imageblit implemented by fbdev core 833 */ 834 void drm_fb_helper_sys_imageblit(struct fb_info *info, 835 const struct fb_image *image) 836 { 837 sys_imageblit(info, image); 838 drm_fb_helper_damage(info, image->dx, image->dy, image->width, image->height); 839 } 840 EXPORT_SYMBOL(drm_fb_helper_sys_imageblit); 841 842 /** 843 * drm_fb_helper_cfb_fillrect - wrapper around cfb_fillrect 844 * @info: fbdev registered by the helper 845 * @rect: info about rectangle to fill 846 * 847 * A wrapper around cfb_fillrect implemented by fbdev core 848 */ 849 void drm_fb_helper_cfb_fillrect(struct fb_info *info, 850 const struct fb_fillrect *rect) 851 { 852 cfb_fillrect(info, rect); 853 drm_fb_helper_damage(info, rect->dx, rect->dy, rect->width, rect->height); 854 } 855 EXPORT_SYMBOL(drm_fb_helper_cfb_fillrect); 856 857 /** 858 * drm_fb_helper_cfb_copyarea - wrapper around cfb_copyarea 859 * @info: fbdev registered by the helper 860 * @area: info about area to copy 861 * 862 * A wrapper around cfb_copyarea implemented by fbdev core 863 */ 864 void drm_fb_helper_cfb_copyarea(struct fb_info *info, 865 const struct fb_copyarea *area) 866 { 867 cfb_copyarea(info, area); 868 drm_fb_helper_damage(info, area->dx, area->dy, area->width, area->height); 869 } 870 EXPORT_SYMBOL(drm_fb_helper_cfb_copyarea); 871 872 /** 873 * drm_fb_helper_cfb_imageblit - wrapper around cfb_imageblit 874 * @info: fbdev registered by the helper 875 * @image: info about image to blit 876 * 877 * A wrapper around cfb_imageblit implemented by fbdev core 878 */ 879 void drm_fb_helper_cfb_imageblit(struct fb_info *info, 880 const struct fb_image *image) 881 { 882 cfb_imageblit(info, image); 883 drm_fb_helper_damage(info, image->dx, image->dy, image->width, image->height); 884 } 885 EXPORT_SYMBOL(drm_fb_helper_cfb_imageblit); 886 887 /** 888 * drm_fb_helper_set_suspend - wrapper around fb_set_suspend 889 * @fb_helper: driver-allocated fbdev helper, can be NULL 890 * @suspend: whether to suspend or resume 891 * 892 * A wrapper around fb_set_suspend implemented by fbdev core. 893 * Use drm_fb_helper_set_suspend_unlocked() if you don't need to take 894 * the lock yourself 895 */ 896 void drm_fb_helper_set_suspend(struct drm_fb_helper *fb_helper, bool suspend) 897 { 898 if (fb_helper && fb_helper->fbdev) 899 fb_set_suspend(fb_helper->fbdev, suspend); 900 } 901 EXPORT_SYMBOL(drm_fb_helper_set_suspend); 902 903 /** 904 * drm_fb_helper_set_suspend_unlocked - wrapper around fb_set_suspend that also 905 * takes the console lock 906 * @fb_helper: driver-allocated fbdev helper, can be NULL 907 * @suspend: whether to suspend or resume 908 * 909 * A wrapper around fb_set_suspend() that takes the console lock. If the lock 910 * isn't available on resume, a worker is tasked with waiting for the lock 911 * to become available. The console lock can be pretty contented on resume 912 * due to all the printk activity. 913 * 914 * This function can be called multiple times with the same state since 915 * &fb_info.state is checked to see if fbdev is running or not before locking. 916 * 917 * Use drm_fb_helper_set_suspend() if you need to take the lock yourself. 918 */ 919 void drm_fb_helper_set_suspend_unlocked(struct drm_fb_helper *fb_helper, 920 bool suspend) 921 { 922 if (!fb_helper || !fb_helper->fbdev) 923 return; 924 925 /* make sure there's no pending/ongoing resume */ 926 flush_work(&fb_helper->resume_work); 927 928 if (suspend) { 929 if (fb_helper->fbdev->state != FBINFO_STATE_RUNNING) 930 return; 931 932 console_lock(); 933 934 } else { 935 if (fb_helper->fbdev->state == FBINFO_STATE_RUNNING) 936 return; 937 938 if (!console_trylock()) { 939 schedule_work(&fb_helper->resume_work); 940 return; 941 } 942 } 943 944 fb_set_suspend(fb_helper->fbdev, suspend); 945 console_unlock(); 946 } 947 EXPORT_SYMBOL(drm_fb_helper_set_suspend_unlocked); 948 949 static int setcmap_pseudo_palette(struct fb_cmap *cmap, struct fb_info *info) 950 { 951 u32 *palette = (u32 *)info->pseudo_palette; 952 int i; 953 954 if (cmap->start + cmap->len > 16) 955 return -EINVAL; 956 957 for (i = 0; i < cmap->len; ++i) { 958 u16 red = cmap->red[i]; 959 u16 green = cmap->green[i]; 960 u16 blue = cmap->blue[i]; 961 u32 value; 962 963 red >>= 16 - info->var.red.length; 964 green >>= 16 - info->var.green.length; 965 blue >>= 16 - info->var.blue.length; 966 value = (red << info->var.red.offset) | 967 (green << info->var.green.offset) | 968 (blue << info->var.blue.offset); 969 if (info->var.transp.length > 0) { 970 u32 mask = (1 << info->var.transp.length) - 1; 971 972 mask <<= info->var.transp.offset; 973 value |= mask; 974 } 975 palette[cmap->start + i] = value; 976 } 977 978 return 0; 979 } 980 981 static int setcmap_legacy(struct fb_cmap *cmap, struct fb_info *info) 982 { 983 struct drm_fb_helper *fb_helper = info->par; 984 struct drm_mode_set *modeset; 985 struct drm_crtc *crtc; 986 u16 *r, *g, *b; 987 int ret = 0; 988 989 drm_modeset_lock_all(fb_helper->dev); 990 drm_client_for_each_modeset(modeset, &fb_helper->client) { 991 crtc = modeset->crtc; 992 if (!crtc->funcs->gamma_set || !crtc->gamma_size) { 993 ret = -EINVAL; 994 goto out; 995 } 996 997 if (cmap->start + cmap->len > crtc->gamma_size) { 998 ret = -EINVAL; 999 goto out; 1000 } 1001 1002 r = crtc->gamma_store; 1003 g = r + crtc->gamma_size; 1004 b = g + crtc->gamma_size; 1005 1006 memcpy(r + cmap->start, cmap->red, cmap->len * sizeof(*r)); 1007 memcpy(g + cmap->start, cmap->green, cmap->len * sizeof(*g)); 1008 memcpy(b + cmap->start, cmap->blue, cmap->len * sizeof(*b)); 1009 1010 ret = crtc->funcs->gamma_set(crtc, r, g, b, 1011 crtc->gamma_size, NULL); 1012 if (ret) 1013 goto out; 1014 } 1015 out: 1016 drm_modeset_unlock_all(fb_helper->dev); 1017 1018 return ret; 1019 } 1020 1021 static struct drm_property_blob *setcmap_new_gamma_lut(struct drm_crtc *crtc, 1022 struct fb_cmap *cmap) 1023 { 1024 struct drm_device *dev = crtc->dev; 1025 struct drm_property_blob *gamma_lut; 1026 struct drm_color_lut *lut; 1027 int size = crtc->gamma_size; 1028 int i; 1029 1030 if (!size || cmap->start + cmap->len > size) 1031 return ERR_PTR(-EINVAL); 1032 1033 gamma_lut = drm_property_create_blob(dev, sizeof(*lut) * size, NULL); 1034 if (IS_ERR(gamma_lut)) 1035 return gamma_lut; 1036 1037 lut = gamma_lut->data; 1038 if (cmap->start || cmap->len != size) { 1039 u16 *r = crtc->gamma_store; 1040 u16 *g = r + crtc->gamma_size; 1041 u16 *b = g + crtc->gamma_size; 1042 1043 for (i = 0; i < cmap->start; i++) { 1044 lut[i].red = r[i]; 1045 lut[i].green = g[i]; 1046 lut[i].blue = b[i]; 1047 } 1048 for (i = cmap->start + cmap->len; i < size; i++) { 1049 lut[i].red = r[i]; 1050 lut[i].green = g[i]; 1051 lut[i].blue = b[i]; 1052 } 1053 } 1054 1055 for (i = 0; i < cmap->len; i++) { 1056 lut[cmap->start + i].red = cmap->red[i]; 1057 lut[cmap->start + i].green = cmap->green[i]; 1058 lut[cmap->start + i].blue = cmap->blue[i]; 1059 } 1060 1061 return gamma_lut; 1062 } 1063 1064 static int setcmap_atomic(struct fb_cmap *cmap, struct fb_info *info) 1065 { 1066 struct drm_fb_helper *fb_helper = info->par; 1067 struct drm_device *dev = fb_helper->dev; 1068 struct drm_property_blob *gamma_lut = NULL; 1069 struct drm_modeset_acquire_ctx ctx; 1070 struct drm_crtc_state *crtc_state; 1071 struct drm_atomic_state *state; 1072 struct drm_mode_set *modeset; 1073 struct drm_crtc *crtc; 1074 u16 *r, *g, *b; 1075 bool replaced; 1076 int ret = 0; 1077 1078 drm_modeset_acquire_init(&ctx, 0); 1079 1080 state = drm_atomic_state_alloc(dev); 1081 if (!state) { 1082 ret = -ENOMEM; 1083 goto out_ctx; 1084 } 1085 1086 state->acquire_ctx = &ctx; 1087 retry: 1088 drm_client_for_each_modeset(modeset, &fb_helper->client) { 1089 crtc = modeset->crtc; 1090 1091 if (!gamma_lut) 1092 gamma_lut = setcmap_new_gamma_lut(crtc, cmap); 1093 if (IS_ERR(gamma_lut)) { 1094 ret = PTR_ERR(gamma_lut); 1095 gamma_lut = NULL; 1096 goto out_state; 1097 } 1098 1099 crtc_state = drm_atomic_get_crtc_state(state, crtc); 1100 if (IS_ERR(crtc_state)) { 1101 ret = PTR_ERR(crtc_state); 1102 goto out_state; 1103 } 1104 1105 /* 1106 * FIXME: This always uses gamma_lut. Some HW have only 1107 * degamma_lut, in which case we should reset gamma_lut and set 1108 * degamma_lut. See drm_crtc_legacy_gamma_set(). 1109 */ 1110 replaced = drm_property_replace_blob(&crtc_state->degamma_lut, 1111 NULL); 1112 replaced |= drm_property_replace_blob(&crtc_state->ctm, NULL); 1113 replaced |= drm_property_replace_blob(&crtc_state->gamma_lut, 1114 gamma_lut); 1115 crtc_state->color_mgmt_changed |= replaced; 1116 } 1117 1118 ret = drm_atomic_commit(state); 1119 if (ret) 1120 goto out_state; 1121 1122 drm_client_for_each_modeset(modeset, &fb_helper->client) { 1123 crtc = modeset->crtc; 1124 1125 r = crtc->gamma_store; 1126 g = r + crtc->gamma_size; 1127 b = g + crtc->gamma_size; 1128 1129 memcpy(r + cmap->start, cmap->red, cmap->len * sizeof(*r)); 1130 memcpy(g + cmap->start, cmap->green, cmap->len * sizeof(*g)); 1131 memcpy(b + cmap->start, cmap->blue, cmap->len * sizeof(*b)); 1132 } 1133 1134 out_state: 1135 if (ret == -EDEADLK) 1136 goto backoff; 1137 1138 drm_property_blob_put(gamma_lut); 1139 drm_atomic_state_put(state); 1140 out_ctx: 1141 drm_modeset_drop_locks(&ctx); 1142 drm_modeset_acquire_fini(&ctx); 1143 1144 return ret; 1145 1146 backoff: 1147 drm_atomic_state_clear(state); 1148 drm_modeset_backoff(&ctx); 1149 goto retry; 1150 } 1151 1152 /** 1153 * drm_fb_helper_setcmap - implementation for &fb_ops.fb_setcmap 1154 * @cmap: cmap to set 1155 * @info: fbdev registered by the helper 1156 */ 1157 int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info) 1158 { 1159 struct drm_fb_helper *fb_helper = info->par; 1160 struct drm_device *dev = fb_helper->dev; 1161 int ret; 1162 1163 if (oops_in_progress) 1164 return -EBUSY; 1165 1166 mutex_lock(&fb_helper->lock); 1167 1168 if (!drm_master_internal_acquire(dev)) { 1169 ret = -EBUSY; 1170 goto unlock; 1171 } 1172 1173 mutex_lock(&fb_helper->client.modeset_mutex); 1174 if (info->fix.visual == FB_VISUAL_TRUECOLOR) 1175 ret = setcmap_pseudo_palette(cmap, info); 1176 else if (drm_drv_uses_atomic_modeset(fb_helper->dev)) 1177 ret = setcmap_atomic(cmap, info); 1178 else 1179 ret = setcmap_legacy(cmap, info); 1180 mutex_unlock(&fb_helper->client.modeset_mutex); 1181 1182 drm_master_internal_release(dev); 1183 unlock: 1184 mutex_unlock(&fb_helper->lock); 1185 1186 return ret; 1187 } 1188 EXPORT_SYMBOL(drm_fb_helper_setcmap); 1189 1190 /** 1191 * drm_fb_helper_ioctl - legacy ioctl implementation 1192 * @info: fbdev registered by the helper 1193 * @cmd: ioctl command 1194 * @arg: ioctl argument 1195 * 1196 * A helper to implement the standard fbdev ioctl. Only 1197 * FBIO_WAITFORVSYNC is implemented for now. 1198 */ 1199 int drm_fb_helper_ioctl(struct fb_info *info, unsigned int cmd, 1200 unsigned long arg) 1201 { 1202 struct drm_fb_helper *fb_helper = info->par; 1203 struct drm_device *dev = fb_helper->dev; 1204 struct drm_crtc *crtc; 1205 int ret = 0; 1206 1207 mutex_lock(&fb_helper->lock); 1208 if (!drm_master_internal_acquire(dev)) { 1209 ret = -EBUSY; 1210 goto unlock; 1211 } 1212 1213 switch (cmd) { 1214 case FBIO_WAITFORVSYNC: 1215 /* 1216 * Only consider the first CRTC. 1217 * 1218 * This ioctl is supposed to take the CRTC number as 1219 * an argument, but in fbdev times, what that number 1220 * was supposed to be was quite unclear, different 1221 * drivers were passing that argument differently 1222 * (some by reference, some by value), and most of the 1223 * userspace applications were just hardcoding 0 as an 1224 * argument. 1225 * 1226 * The first CRTC should be the integrated panel on 1227 * most drivers, so this is the best choice we can 1228 * make. If we're not smart enough here, one should 1229 * just consider switch the userspace to KMS. 1230 */ 1231 crtc = fb_helper->client.modesets[0].crtc; 1232 1233 /* 1234 * Only wait for a vblank event if the CRTC is 1235 * enabled, otherwise just don't do anythintg, 1236 * not even report an error. 1237 */ 1238 ret = drm_crtc_vblank_get(crtc); 1239 if (!ret) { 1240 drm_crtc_wait_one_vblank(crtc); 1241 drm_crtc_vblank_put(crtc); 1242 } 1243 1244 ret = 0; 1245 break; 1246 default: 1247 ret = -ENOTTY; 1248 } 1249 1250 drm_master_internal_release(dev); 1251 unlock: 1252 mutex_unlock(&fb_helper->lock); 1253 return ret; 1254 } 1255 EXPORT_SYMBOL(drm_fb_helper_ioctl); 1256 1257 static bool drm_fb_pixel_format_equal(const struct fb_var_screeninfo *var_1, 1258 const struct fb_var_screeninfo *var_2) 1259 { 1260 return var_1->bits_per_pixel == var_2->bits_per_pixel && 1261 var_1->grayscale == var_2->grayscale && 1262 var_1->red.offset == var_2->red.offset && 1263 var_1->red.length == var_2->red.length && 1264 var_1->red.msb_right == var_2->red.msb_right && 1265 var_1->green.offset == var_2->green.offset && 1266 var_1->green.length == var_2->green.length && 1267 var_1->green.msb_right == var_2->green.msb_right && 1268 var_1->blue.offset == var_2->blue.offset && 1269 var_1->blue.length == var_2->blue.length && 1270 var_1->blue.msb_right == var_2->blue.msb_right && 1271 var_1->transp.offset == var_2->transp.offset && 1272 var_1->transp.length == var_2->transp.length && 1273 var_1->transp.msb_right == var_2->transp.msb_right; 1274 } 1275 1276 static void drm_fb_helper_fill_pixel_fmt(struct fb_var_screeninfo *var, 1277 u8 depth) 1278 { 1279 switch (depth) { 1280 case 8: 1281 var->red.offset = 0; 1282 var->green.offset = 0; 1283 var->blue.offset = 0; 1284 var->red.length = 8; /* 8bit DAC */ 1285 var->green.length = 8; 1286 var->blue.length = 8; 1287 var->transp.offset = 0; 1288 var->transp.length = 0; 1289 break; 1290 case 15: 1291 var->red.offset = 10; 1292 var->green.offset = 5; 1293 var->blue.offset = 0; 1294 var->red.length = 5; 1295 var->green.length = 5; 1296 var->blue.length = 5; 1297 var->transp.offset = 15; 1298 var->transp.length = 1; 1299 break; 1300 case 16: 1301 var->red.offset = 11; 1302 var->green.offset = 5; 1303 var->blue.offset = 0; 1304 var->red.length = 5; 1305 var->green.length = 6; 1306 var->blue.length = 5; 1307 var->transp.offset = 0; 1308 break; 1309 case 24: 1310 var->red.offset = 16; 1311 var->green.offset = 8; 1312 var->blue.offset = 0; 1313 var->red.length = 8; 1314 var->green.length = 8; 1315 var->blue.length = 8; 1316 var->transp.offset = 0; 1317 var->transp.length = 0; 1318 break; 1319 case 32: 1320 var->red.offset = 16; 1321 var->green.offset = 8; 1322 var->blue.offset = 0; 1323 var->red.length = 8; 1324 var->green.length = 8; 1325 var->blue.length = 8; 1326 var->transp.offset = 24; 1327 var->transp.length = 8; 1328 break; 1329 default: 1330 break; 1331 } 1332 } 1333 1334 /** 1335 * drm_fb_helper_check_var - implementation for &fb_ops.fb_check_var 1336 * @var: screeninfo to check 1337 * @info: fbdev registered by the helper 1338 */ 1339 int drm_fb_helper_check_var(struct fb_var_screeninfo *var, 1340 struct fb_info *info) 1341 { 1342 struct drm_fb_helper *fb_helper = info->par; 1343 struct drm_framebuffer *fb = fb_helper->fb; 1344 struct drm_device *dev = fb_helper->dev; 1345 1346 if (in_dbg_master()) 1347 return -EINVAL; 1348 1349 if (var->pixclock != 0) { 1350 drm_dbg_kms(dev, "fbdev emulation doesn't support changing the pixel clock, value of pixclock is ignored\n"); 1351 var->pixclock = 0; 1352 } 1353 1354 if ((drm_format_info_block_width(fb->format, 0) > 1) || 1355 (drm_format_info_block_height(fb->format, 0) > 1)) 1356 return -EINVAL; 1357 1358 /* 1359 * Changes struct fb_var_screeninfo are currently not pushed back 1360 * to KMS, hence fail if different settings are requested. 1361 */ 1362 if (var->bits_per_pixel > fb->format->cpp[0] * 8 || 1363 var->xres > fb->width || var->yres > fb->height || 1364 var->xres_virtual > fb->width || var->yres_virtual > fb->height) { 1365 drm_dbg_kms(dev, "fb requested width/height/bpp can't fit in current fb " 1366 "request %dx%d-%d (virtual %dx%d) > %dx%d-%d\n", 1367 var->xres, var->yres, var->bits_per_pixel, 1368 var->xres_virtual, var->yres_virtual, 1369 fb->width, fb->height, fb->format->cpp[0] * 8); 1370 return -EINVAL; 1371 } 1372 1373 /* 1374 * Workaround for SDL 1.2, which is known to be setting all pixel format 1375 * fields values to zero in some cases. We treat this situation as a 1376 * kind of "use some reasonable autodetected values". 1377 */ 1378 if (!var->red.offset && !var->green.offset && 1379 !var->blue.offset && !var->transp.offset && 1380 !var->red.length && !var->green.length && 1381 !var->blue.length && !var->transp.length && 1382 !var->red.msb_right && !var->green.msb_right && 1383 !var->blue.msb_right && !var->transp.msb_right) { 1384 drm_fb_helper_fill_pixel_fmt(var, fb->format->depth); 1385 } 1386 1387 /* 1388 * Likewise, bits_per_pixel should be rounded up to a supported value. 1389 */ 1390 var->bits_per_pixel = fb->format->cpp[0] * 8; 1391 1392 /* 1393 * drm fbdev emulation doesn't support changing the pixel format at all, 1394 * so reject all pixel format changing requests. 1395 */ 1396 if (!drm_fb_pixel_format_equal(var, &info->var)) { 1397 drm_dbg_kms(dev, "fbdev emulation doesn't support changing the pixel format\n"); 1398 return -EINVAL; 1399 } 1400 1401 return 0; 1402 } 1403 EXPORT_SYMBOL(drm_fb_helper_check_var); 1404 1405 /** 1406 * drm_fb_helper_set_par - implementation for &fb_ops.fb_set_par 1407 * @info: fbdev registered by the helper 1408 * 1409 * This will let fbcon do the mode init and is called at initialization time by 1410 * the fbdev core when registering the driver, and later on through the hotplug 1411 * callback. 1412 */ 1413 int drm_fb_helper_set_par(struct fb_info *info) 1414 { 1415 struct drm_fb_helper *fb_helper = info->par; 1416 struct fb_var_screeninfo *var = &info->var; 1417 bool force; 1418 1419 if (oops_in_progress) 1420 return -EBUSY; 1421 1422 if (var->pixclock != 0) { 1423 drm_err(fb_helper->dev, "PIXEL CLOCK SET\n"); 1424 return -EINVAL; 1425 } 1426 1427 /* 1428 * Normally we want to make sure that a kms master takes precedence over 1429 * fbdev, to avoid fbdev flickering and occasionally stealing the 1430 * display status. But Xorg first sets the vt back to text mode using 1431 * the KDSET IOCTL with KD_TEXT, and only after that drops the master 1432 * status when exiting. 1433 * 1434 * In the past this was caught by drm_fb_helper_lastclose(), but on 1435 * modern systems where logind always keeps a drm fd open to orchestrate 1436 * the vt switching, this doesn't work. 1437 * 1438 * To not break the userspace ABI we have this special case here, which 1439 * is only used for the above case. Everything else uses the normal 1440 * commit function, which ensures that we never steal the display from 1441 * an active drm master. 1442 */ 1443 force = var->activate & FB_ACTIVATE_KD_TEXT; 1444 1445 __drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper, force); 1446 1447 return 0; 1448 } 1449 EXPORT_SYMBOL(drm_fb_helper_set_par); 1450 1451 static void pan_set(struct drm_fb_helper *fb_helper, int x, int y) 1452 { 1453 struct drm_mode_set *mode_set; 1454 1455 mutex_lock(&fb_helper->client.modeset_mutex); 1456 drm_client_for_each_modeset(mode_set, &fb_helper->client) { 1457 mode_set->x = x; 1458 mode_set->y = y; 1459 } 1460 mutex_unlock(&fb_helper->client.modeset_mutex); 1461 } 1462 1463 static int pan_display_atomic(struct fb_var_screeninfo *var, 1464 struct fb_info *info) 1465 { 1466 struct drm_fb_helper *fb_helper = info->par; 1467 int ret; 1468 1469 pan_set(fb_helper, var->xoffset, var->yoffset); 1470 1471 ret = drm_client_modeset_commit_locked(&fb_helper->client); 1472 if (!ret) { 1473 info->var.xoffset = var->xoffset; 1474 info->var.yoffset = var->yoffset; 1475 } else 1476 pan_set(fb_helper, info->var.xoffset, info->var.yoffset); 1477 1478 return ret; 1479 } 1480 1481 static int pan_display_legacy(struct fb_var_screeninfo *var, 1482 struct fb_info *info) 1483 { 1484 struct drm_fb_helper *fb_helper = info->par; 1485 struct drm_client_dev *client = &fb_helper->client; 1486 struct drm_mode_set *modeset; 1487 int ret = 0; 1488 1489 mutex_lock(&client->modeset_mutex); 1490 drm_modeset_lock_all(fb_helper->dev); 1491 drm_client_for_each_modeset(modeset, client) { 1492 modeset->x = var->xoffset; 1493 modeset->y = var->yoffset; 1494 1495 if (modeset->num_connectors) { 1496 ret = drm_mode_set_config_internal(modeset); 1497 if (!ret) { 1498 info->var.xoffset = var->xoffset; 1499 info->var.yoffset = var->yoffset; 1500 } 1501 } 1502 } 1503 drm_modeset_unlock_all(fb_helper->dev); 1504 mutex_unlock(&client->modeset_mutex); 1505 1506 return ret; 1507 } 1508 1509 /** 1510 * drm_fb_helper_pan_display - implementation for &fb_ops.fb_pan_display 1511 * @var: updated screen information 1512 * @info: fbdev registered by the helper 1513 */ 1514 int drm_fb_helper_pan_display(struct fb_var_screeninfo *var, 1515 struct fb_info *info) 1516 { 1517 struct drm_fb_helper *fb_helper = info->par; 1518 struct drm_device *dev = fb_helper->dev; 1519 int ret; 1520 1521 if (oops_in_progress) 1522 return -EBUSY; 1523 1524 mutex_lock(&fb_helper->lock); 1525 if (!drm_master_internal_acquire(dev)) { 1526 ret = -EBUSY; 1527 goto unlock; 1528 } 1529 1530 if (drm_drv_uses_atomic_modeset(dev)) 1531 ret = pan_display_atomic(var, info); 1532 else 1533 ret = pan_display_legacy(var, info); 1534 1535 drm_master_internal_release(dev); 1536 unlock: 1537 mutex_unlock(&fb_helper->lock); 1538 1539 return ret; 1540 } 1541 EXPORT_SYMBOL(drm_fb_helper_pan_display); 1542 1543 /* 1544 * Allocates the backing storage and sets up the fbdev info structure through 1545 * the ->fb_probe callback. 1546 */ 1547 static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper, 1548 int preferred_bpp) 1549 { 1550 struct drm_client_dev *client = &fb_helper->client; 1551 struct drm_device *dev = fb_helper->dev; 1552 struct drm_mode_config *config = &dev->mode_config; 1553 int ret = 0; 1554 int crtc_count = 0; 1555 struct drm_connector_list_iter conn_iter; 1556 struct drm_fb_helper_surface_size sizes; 1557 struct drm_connector *connector; 1558 struct drm_mode_set *mode_set; 1559 int best_depth = 0; 1560 1561 memset(&sizes, 0, sizeof(struct drm_fb_helper_surface_size)); 1562 sizes.surface_depth = 24; 1563 sizes.surface_bpp = 32; 1564 sizes.fb_width = (u32)-1; 1565 sizes.fb_height = (u32)-1; 1566 1567 /* 1568 * If driver picks 8 or 16 by default use that for both depth/bpp 1569 * to begin with 1570 */ 1571 if (preferred_bpp != sizes.surface_bpp) 1572 sizes.surface_depth = sizes.surface_bpp = preferred_bpp; 1573 1574 drm_connector_list_iter_begin(fb_helper->dev, &conn_iter); 1575 drm_client_for_each_connector_iter(connector, &conn_iter) { 1576 struct drm_cmdline_mode *cmdline_mode; 1577 1578 cmdline_mode = &connector->cmdline_mode; 1579 1580 if (cmdline_mode->bpp_specified) { 1581 switch (cmdline_mode->bpp) { 1582 case 8: 1583 sizes.surface_depth = sizes.surface_bpp = 8; 1584 break; 1585 case 15: 1586 sizes.surface_depth = 15; 1587 sizes.surface_bpp = 16; 1588 break; 1589 case 16: 1590 sizes.surface_depth = sizes.surface_bpp = 16; 1591 break; 1592 case 24: 1593 sizes.surface_depth = sizes.surface_bpp = 24; 1594 break; 1595 case 32: 1596 sizes.surface_depth = 24; 1597 sizes.surface_bpp = 32; 1598 break; 1599 } 1600 break; 1601 } 1602 } 1603 drm_connector_list_iter_end(&conn_iter); 1604 1605 /* 1606 * If we run into a situation where, for example, the primary plane 1607 * supports RGBA5551 (16 bpp, depth 15) but not RGB565 (16 bpp, depth 1608 * 16) we need to scale down the depth of the sizes we request. 1609 */ 1610 mutex_lock(&client->modeset_mutex); 1611 drm_client_for_each_modeset(mode_set, client) { 1612 struct drm_crtc *crtc = mode_set->crtc; 1613 struct drm_plane *plane = crtc->primary; 1614 int j; 1615 1616 drm_dbg_kms(dev, "test CRTC %u primary plane\n", drm_crtc_index(crtc)); 1617 1618 for (j = 0; j < plane->format_count; j++) { 1619 const struct drm_format_info *fmt; 1620 1621 fmt = drm_format_info(plane->format_types[j]); 1622 1623 /* 1624 * Do not consider YUV or other complicated formats 1625 * for framebuffers. This means only legacy formats 1626 * are supported (fmt->depth is a legacy field) but 1627 * the framebuffer emulation can only deal with such 1628 * formats, specifically RGB/BGA formats. 1629 */ 1630 if (fmt->depth == 0) 1631 continue; 1632 1633 /* We found a perfect fit, great */ 1634 if (fmt->depth == sizes.surface_depth) { 1635 best_depth = fmt->depth; 1636 break; 1637 } 1638 1639 /* Skip depths above what we're looking for */ 1640 if (fmt->depth > sizes.surface_depth) 1641 continue; 1642 1643 /* Best depth found so far */ 1644 if (fmt->depth > best_depth) 1645 best_depth = fmt->depth; 1646 } 1647 } 1648 if (sizes.surface_depth != best_depth && best_depth) { 1649 drm_info(dev, "requested bpp %d, scaled depth down to %d", 1650 sizes.surface_bpp, best_depth); 1651 sizes.surface_depth = best_depth; 1652 } 1653 1654 /* first up get a count of crtcs now in use and new min/maxes width/heights */ 1655 crtc_count = 0; 1656 drm_client_for_each_modeset(mode_set, client) { 1657 struct drm_display_mode *desired_mode; 1658 int x, y, j; 1659 /* in case of tile group, are we the last tile vert or horiz? 1660 * If no tile group you are always the last one both vertically 1661 * and horizontally 1662 */ 1663 bool lastv = true, lasth = true; 1664 1665 desired_mode = mode_set->mode; 1666 1667 if (!desired_mode) 1668 continue; 1669 1670 crtc_count++; 1671 1672 x = mode_set->x; 1673 y = mode_set->y; 1674 1675 sizes.surface_width = max_t(u32, desired_mode->hdisplay + x, sizes.surface_width); 1676 sizes.surface_height = max_t(u32, desired_mode->vdisplay + y, sizes.surface_height); 1677 1678 for (j = 0; j < mode_set->num_connectors; j++) { 1679 struct drm_connector *connector = mode_set->connectors[j]; 1680 1681 if (connector->has_tile && 1682 desired_mode->hdisplay == connector->tile_h_size && 1683 desired_mode->vdisplay == connector->tile_v_size) { 1684 lasth = (connector->tile_h_loc == (connector->num_h_tile - 1)); 1685 lastv = (connector->tile_v_loc == (connector->num_v_tile - 1)); 1686 /* cloning to multiple tiles is just crazy-talk, so: */ 1687 break; 1688 } 1689 } 1690 1691 if (lasth) 1692 sizes.fb_width = min_t(u32, desired_mode->hdisplay + x, sizes.fb_width); 1693 if (lastv) 1694 sizes.fb_height = min_t(u32, desired_mode->vdisplay + y, sizes.fb_height); 1695 } 1696 mutex_unlock(&client->modeset_mutex); 1697 1698 if (crtc_count == 0 || sizes.fb_width == -1 || sizes.fb_height == -1) { 1699 drm_info(dev, "Cannot find any crtc or sizes\n"); 1700 1701 /* First time: disable all crtc's.. */ 1702 if (!fb_helper->deferred_setup) 1703 drm_client_modeset_commit(client); 1704 return -EAGAIN; 1705 } 1706 1707 /* Handle our overallocation */ 1708 sizes.surface_height *= drm_fbdev_overalloc; 1709 sizes.surface_height /= 100; 1710 if (sizes.surface_height > config->max_height) { 1711 drm_dbg_kms(dev, "Fbdev over-allocation too large; clamping height to %d\n", 1712 config->max_height); 1713 sizes.surface_height = config->max_height; 1714 } 1715 1716 /* push down into drivers */ 1717 ret = (*fb_helper->funcs->fb_probe)(fb_helper, &sizes); 1718 if (ret < 0) 1719 return ret; 1720 1721 strcpy(fb_helper->fb->comm, "[fbcon]"); 1722 return 0; 1723 } 1724 1725 static void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch, 1726 uint32_t depth) 1727 { 1728 info->fix.type = FB_TYPE_PACKED_PIXELS; 1729 info->fix.visual = depth == 8 ? FB_VISUAL_PSEUDOCOLOR : 1730 FB_VISUAL_TRUECOLOR; 1731 info->fix.mmio_start = 0; 1732 info->fix.mmio_len = 0; 1733 info->fix.type_aux = 0; 1734 info->fix.xpanstep = 1; /* doing it in hw */ 1735 info->fix.ypanstep = 1; /* doing it in hw */ 1736 info->fix.ywrapstep = 0; 1737 info->fix.accel = FB_ACCEL_NONE; 1738 1739 info->fix.line_length = pitch; 1740 } 1741 1742 static void drm_fb_helper_fill_var(struct fb_info *info, 1743 struct drm_fb_helper *fb_helper, 1744 uint32_t fb_width, uint32_t fb_height) 1745 { 1746 struct drm_framebuffer *fb = fb_helper->fb; 1747 1748 WARN_ON((drm_format_info_block_width(fb->format, 0) > 1) || 1749 (drm_format_info_block_height(fb->format, 0) > 1)); 1750 info->pseudo_palette = fb_helper->pseudo_palette; 1751 info->var.xres_virtual = fb->width; 1752 info->var.yres_virtual = fb->height; 1753 info->var.bits_per_pixel = fb->format->cpp[0] * 8; 1754 info->var.accel_flags = FB_ACCELF_TEXT; 1755 info->var.xoffset = 0; 1756 info->var.yoffset = 0; 1757 info->var.activate = FB_ACTIVATE_NOW; 1758 1759 drm_fb_helper_fill_pixel_fmt(&info->var, fb->format->depth); 1760 1761 info->var.xres = fb_width; 1762 info->var.yres = fb_height; 1763 } 1764 1765 /** 1766 * drm_fb_helper_fill_info - initializes fbdev information 1767 * @info: fbdev instance to set up 1768 * @fb_helper: fb helper instance to use as template 1769 * @sizes: describes fbdev size and scanout surface size 1770 * 1771 * Sets up the variable and fixed fbdev metainformation from the given fb helper 1772 * instance and the drm framebuffer allocated in &drm_fb_helper.fb. 1773 * 1774 * Drivers should call this (or their equivalent setup code) from their 1775 * &drm_fb_helper_funcs.fb_probe callback after having allocated the fbdev 1776 * backing storage framebuffer. 1777 */ 1778 void drm_fb_helper_fill_info(struct fb_info *info, 1779 struct drm_fb_helper *fb_helper, 1780 struct drm_fb_helper_surface_size *sizes) 1781 { 1782 struct drm_framebuffer *fb = fb_helper->fb; 1783 1784 drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth); 1785 drm_fb_helper_fill_var(info, fb_helper, 1786 sizes->fb_width, sizes->fb_height); 1787 1788 info->par = fb_helper; 1789 /* 1790 * The DRM drivers fbdev emulation device name can be confusing if the 1791 * driver name also has a "drm" suffix on it. Leading to names such as 1792 * "simpledrmdrmfb" in /proc/fb. Unfortunately, it's an uAPI and can't 1793 * be changed due user-space tools (e.g: pm-utils) matching against it. 1794 */ 1795 snprintf(info->fix.id, sizeof(info->fix.id), "%sdrmfb", 1796 fb_helper->dev->driver->name); 1797 1798 } 1799 EXPORT_SYMBOL(drm_fb_helper_fill_info); 1800 1801 /* 1802 * This is a continuation of drm_setup_crtcs() that sets up anything related 1803 * to the framebuffer. During initialization, drm_setup_crtcs() is called before 1804 * the framebuffer has been allocated (fb_helper->fb and fb_helper->fbdev). 1805 * So, any setup that touches those fields needs to be done here instead of in 1806 * drm_setup_crtcs(). 1807 */ 1808 static void drm_setup_crtcs_fb(struct drm_fb_helper *fb_helper) 1809 { 1810 struct drm_client_dev *client = &fb_helper->client; 1811 struct drm_connector_list_iter conn_iter; 1812 struct fb_info *info = fb_helper->fbdev; 1813 unsigned int rotation, sw_rotations = 0; 1814 struct drm_connector *connector; 1815 struct drm_mode_set *modeset; 1816 1817 mutex_lock(&client->modeset_mutex); 1818 drm_client_for_each_modeset(modeset, client) { 1819 if (!modeset->num_connectors) 1820 continue; 1821 1822 modeset->fb = fb_helper->fb; 1823 1824 if (drm_client_rotation(modeset, &rotation)) 1825 /* Rotating in hardware, fbcon should not rotate */ 1826 sw_rotations |= DRM_MODE_ROTATE_0; 1827 else 1828 sw_rotations |= rotation; 1829 } 1830 mutex_unlock(&client->modeset_mutex); 1831 1832 drm_connector_list_iter_begin(fb_helper->dev, &conn_iter); 1833 drm_client_for_each_connector_iter(connector, &conn_iter) { 1834 1835 /* use first connected connector for the physical dimensions */ 1836 if (connector->status == connector_status_connected) { 1837 info->var.width = connector->display_info.width_mm; 1838 info->var.height = connector->display_info.height_mm; 1839 break; 1840 } 1841 } 1842 drm_connector_list_iter_end(&conn_iter); 1843 1844 switch (sw_rotations) { 1845 case DRM_MODE_ROTATE_0: 1846 info->fbcon_rotate_hint = FB_ROTATE_UR; 1847 break; 1848 case DRM_MODE_ROTATE_90: 1849 info->fbcon_rotate_hint = FB_ROTATE_CCW; 1850 break; 1851 case DRM_MODE_ROTATE_180: 1852 info->fbcon_rotate_hint = FB_ROTATE_UD; 1853 break; 1854 case DRM_MODE_ROTATE_270: 1855 info->fbcon_rotate_hint = FB_ROTATE_CW; 1856 break; 1857 default: 1858 /* 1859 * Multiple bits are set / multiple rotations requested 1860 * fbcon cannot handle separate rotation settings per 1861 * output, so fallback to unrotated. 1862 */ 1863 info->fbcon_rotate_hint = FB_ROTATE_UR; 1864 } 1865 } 1866 1867 /* Note: Drops fb_helper->lock before returning. */ 1868 static int 1869 __drm_fb_helper_initial_config_and_unlock(struct drm_fb_helper *fb_helper, 1870 int bpp_sel) 1871 { 1872 struct drm_device *dev = fb_helper->dev; 1873 struct fb_info *info; 1874 unsigned int width, height; 1875 int ret; 1876 1877 width = dev->mode_config.max_width; 1878 height = dev->mode_config.max_height; 1879 1880 drm_client_modeset_probe(&fb_helper->client, width, height); 1881 ret = drm_fb_helper_single_fb_probe(fb_helper, bpp_sel); 1882 if (ret < 0) { 1883 if (ret == -EAGAIN) { 1884 fb_helper->preferred_bpp = bpp_sel; 1885 fb_helper->deferred_setup = true; 1886 ret = 0; 1887 } 1888 mutex_unlock(&fb_helper->lock); 1889 1890 return ret; 1891 } 1892 drm_setup_crtcs_fb(fb_helper); 1893 1894 fb_helper->deferred_setup = false; 1895 1896 info = fb_helper->fbdev; 1897 info->var.pixclock = 0; 1898 /* Shamelessly allow physical address leaking to userspace */ 1899 #if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM) 1900 if (!drm_leak_fbdev_smem) 1901 #endif 1902 /* don't leak any physical addresses to userspace */ 1903 info->flags |= FBINFO_HIDE_SMEM_START; 1904 1905 /* Need to drop locks to avoid recursive deadlock in 1906 * register_framebuffer. This is ok because the only thing left to do is 1907 * register the fbdev emulation instance in kernel_fb_helper_list. */ 1908 mutex_unlock(&fb_helper->lock); 1909 1910 ret = register_framebuffer(info); 1911 if (ret < 0) 1912 return ret; 1913 1914 drm_info(dev, "fb%d: %s frame buffer device\n", 1915 info->node, info->fix.id); 1916 1917 mutex_lock(&kernel_fb_helper_lock); 1918 if (list_empty(&kernel_fb_helper_list)) 1919 register_sysrq_key('v', &sysrq_drm_fb_helper_restore_op); 1920 1921 list_add(&fb_helper->kernel_fb_list, &kernel_fb_helper_list); 1922 mutex_unlock(&kernel_fb_helper_lock); 1923 1924 return 0; 1925 } 1926 1927 /** 1928 * drm_fb_helper_initial_config - setup a sane initial connector configuration 1929 * @fb_helper: fb_helper device struct 1930 * @bpp_sel: bpp value to use for the framebuffer configuration 1931 * 1932 * Scans the CRTCs and connectors and tries to put together an initial setup. 1933 * At the moment, this is a cloned configuration across all heads with 1934 * a new framebuffer object as the backing store. 1935 * 1936 * Note that this also registers the fbdev and so allows userspace to call into 1937 * the driver through the fbdev interfaces. 1938 * 1939 * This function will call down into the &drm_fb_helper_funcs.fb_probe callback 1940 * to let the driver allocate and initialize the fbdev info structure and the 1941 * drm framebuffer used to back the fbdev. drm_fb_helper_fill_info() is provided 1942 * as a helper to setup simple default values for the fbdev info structure. 1943 * 1944 * HANG DEBUGGING: 1945 * 1946 * When you have fbcon support built-in or already loaded, this function will do 1947 * a full modeset to setup the fbdev console. Due to locking misdesign in the 1948 * VT/fbdev subsystem that entire modeset sequence has to be done while holding 1949 * console_lock. Until console_unlock is called no dmesg lines will be sent out 1950 * to consoles, not even serial console. This means when your driver crashes, 1951 * you will see absolutely nothing else but a system stuck in this function, 1952 * with no further output. Any kind of printk() you place within your own driver 1953 * or in the drm core modeset code will also never show up. 1954 * 1955 * Standard debug practice is to run the fbcon setup without taking the 1956 * console_lock as a hack, to be able to see backtraces and crashes on the 1957 * serial line. This can be done by setting the fb.lockless_register_fb=1 kernel 1958 * cmdline option. 1959 * 1960 * The other option is to just disable fbdev emulation since very likely the 1961 * first modeset from userspace will crash in the same way, and is even easier 1962 * to debug. This can be done by setting the drm_kms_helper.fbdev_emulation=0 1963 * kernel cmdline option. 1964 * 1965 * RETURNS: 1966 * Zero if everything went ok, nonzero otherwise. 1967 */ 1968 int drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel) 1969 { 1970 int ret; 1971 1972 if (!drm_fbdev_emulation) 1973 return 0; 1974 1975 mutex_lock(&fb_helper->lock); 1976 ret = __drm_fb_helper_initial_config_and_unlock(fb_helper, bpp_sel); 1977 1978 return ret; 1979 } 1980 EXPORT_SYMBOL(drm_fb_helper_initial_config); 1981 1982 /** 1983 * drm_fb_helper_hotplug_event - respond to a hotplug notification by 1984 * probing all the outputs attached to the fb 1985 * @fb_helper: driver-allocated fbdev helper, can be NULL 1986 * 1987 * Scan the connectors attached to the fb_helper and try to put together a 1988 * setup after notification of a change in output configuration. 1989 * 1990 * Called at runtime, takes the mode config locks to be able to check/change the 1991 * modeset configuration. Must be run from process context (which usually means 1992 * either the output polling work or a work item launched from the driver's 1993 * hotplug interrupt). 1994 * 1995 * Note that drivers may call this even before calling 1996 * drm_fb_helper_initial_config but only after drm_fb_helper_init. This allows 1997 * for a race-free fbcon setup and will make sure that the fbdev emulation will 1998 * not miss any hotplug events. 1999 * 2000 * RETURNS: 2001 * 0 on success and a non-zero error code otherwise. 2002 */ 2003 int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper) 2004 { 2005 int err = 0; 2006 2007 if (!drm_fbdev_emulation || !fb_helper) 2008 return 0; 2009 2010 mutex_lock(&fb_helper->lock); 2011 if (fb_helper->deferred_setup) { 2012 err = __drm_fb_helper_initial_config_and_unlock(fb_helper, 2013 fb_helper->preferred_bpp); 2014 return err; 2015 } 2016 2017 if (!fb_helper->fb || !drm_master_internal_acquire(fb_helper->dev)) { 2018 fb_helper->delayed_hotplug = true; 2019 mutex_unlock(&fb_helper->lock); 2020 return err; 2021 } 2022 2023 drm_master_internal_release(fb_helper->dev); 2024 2025 drm_dbg_kms(fb_helper->dev, "\n"); 2026 2027 drm_client_modeset_probe(&fb_helper->client, fb_helper->fb->width, fb_helper->fb->height); 2028 drm_setup_crtcs_fb(fb_helper); 2029 mutex_unlock(&fb_helper->lock); 2030 2031 drm_fb_helper_set_par(fb_helper->fbdev); 2032 2033 return 0; 2034 } 2035 EXPORT_SYMBOL(drm_fb_helper_hotplug_event); 2036 2037 /** 2038 * drm_fb_helper_lastclose - DRM driver lastclose helper for fbdev emulation 2039 * @dev: DRM device 2040 * 2041 * This function can be used as the &drm_driver->lastclose callback for drivers 2042 * that only need to call drm_fb_helper_restore_fbdev_mode_unlocked(). 2043 */ 2044 void drm_fb_helper_lastclose(struct drm_device *dev) 2045 { 2046 drm_fb_helper_restore_fbdev_mode_unlocked(dev->fb_helper); 2047 } 2048 EXPORT_SYMBOL(drm_fb_helper_lastclose); 2049 2050 /** 2051 * drm_fb_helper_output_poll_changed - DRM mode config \.output_poll_changed 2052 * helper for fbdev emulation 2053 * @dev: DRM device 2054 * 2055 * This function can be used as the 2056 * &drm_mode_config_funcs.output_poll_changed callback for drivers that only 2057 * need to call drm_fb_helper_hotplug_event(). 2058 */ 2059 void drm_fb_helper_output_poll_changed(struct drm_device *dev) 2060 { 2061 drm_fb_helper_hotplug_event(dev->fb_helper); 2062 } 2063 EXPORT_SYMBOL(drm_fb_helper_output_poll_changed); 2064 2065 /* @user: 1=userspace, 0=fbcon */ 2066 static int drm_fbdev_fb_open(struct fb_info *info, int user) 2067 { 2068 struct drm_fb_helper *fb_helper = info->par; 2069 2070 /* No need to take a ref for fbcon because it unbinds on unregister */ 2071 if (user && !try_module_get(fb_helper->dev->driver->fops->owner)) 2072 return -ENODEV; 2073 2074 return 0; 2075 } 2076 2077 static int drm_fbdev_fb_release(struct fb_info *info, int user) 2078 { 2079 struct drm_fb_helper *fb_helper = info->par; 2080 2081 if (user) 2082 module_put(fb_helper->dev->driver->fops->owner); 2083 2084 return 0; 2085 } 2086 2087 static void drm_fbdev_cleanup(struct drm_fb_helper *fb_helper) 2088 { 2089 struct fb_info *fbi = fb_helper->fbdev; 2090 void *shadow = NULL; 2091 2092 if (!fb_helper->dev) 2093 return; 2094 2095 if (fbi) { 2096 if (fbi->fbdefio) 2097 fb_deferred_io_cleanup(fbi); 2098 if (drm_fbdev_use_shadow_fb(fb_helper)) 2099 shadow = fbi->screen_buffer; 2100 } 2101 2102 drm_fb_helper_fini(fb_helper); 2103 2104 if (shadow) 2105 vfree(shadow); 2106 else if (fb_helper->buffer) 2107 drm_client_buffer_vunmap(fb_helper->buffer); 2108 2109 drm_client_framebuffer_delete(fb_helper->buffer); 2110 } 2111 2112 static void drm_fbdev_release(struct drm_fb_helper *fb_helper) 2113 { 2114 drm_fbdev_cleanup(fb_helper); 2115 drm_client_release(&fb_helper->client); 2116 kfree(fb_helper); 2117 } 2118 2119 /* 2120 * fb_ops.fb_destroy is called by the last put_fb_info() call at the end of 2121 * unregister_framebuffer() or fb_release(). 2122 */ 2123 static void drm_fbdev_fb_destroy(struct fb_info *info) 2124 { 2125 drm_fbdev_release(info->par); 2126 } 2127 2128 static int drm_fbdev_fb_mmap(struct fb_info *info, struct vm_area_struct *vma) 2129 { 2130 struct drm_fb_helper *fb_helper = info->par; 2131 2132 if (drm_fbdev_use_shadow_fb(fb_helper)) 2133 return fb_deferred_io_mmap(info, vma); 2134 else if (fb_helper->dev->driver->gem_prime_mmap) 2135 return fb_helper->dev->driver->gem_prime_mmap(fb_helper->buffer->gem, vma); 2136 else 2137 return -ENODEV; 2138 } 2139 2140 static bool drm_fbdev_use_iomem(struct fb_info *info) 2141 { 2142 struct drm_fb_helper *fb_helper = info->par; 2143 struct drm_client_buffer *buffer = fb_helper->buffer; 2144 2145 return !drm_fbdev_use_shadow_fb(fb_helper) && buffer->map.is_iomem; 2146 } 2147 2148 static ssize_t fb_read_screen_base(struct fb_info *info, char __user *buf, size_t count, 2149 loff_t pos) 2150 { 2151 const char __iomem *src = info->screen_base + pos; 2152 size_t alloc_size = min_t(size_t, count, PAGE_SIZE); 2153 ssize_t ret = 0; 2154 int err = 0; 2155 char *tmp; 2156 2157 tmp = kmalloc(alloc_size, GFP_KERNEL); 2158 if (!tmp) 2159 return -ENOMEM; 2160 2161 while (count) { 2162 size_t c = min_t(size_t, count, alloc_size); 2163 2164 memcpy_fromio(tmp, src, c); 2165 if (copy_to_user(buf, tmp, c)) { 2166 err = -EFAULT; 2167 break; 2168 } 2169 2170 src += c; 2171 buf += c; 2172 ret += c; 2173 count -= c; 2174 } 2175 2176 kfree(tmp); 2177 2178 return ret ? ret : err; 2179 } 2180 2181 static ssize_t fb_read_screen_buffer(struct fb_info *info, char __user *buf, size_t count, 2182 loff_t pos) 2183 { 2184 const char *src = info->screen_buffer + pos; 2185 2186 if (copy_to_user(buf, src, count)) 2187 return -EFAULT; 2188 2189 return count; 2190 } 2191 2192 static ssize_t drm_fbdev_fb_read(struct fb_info *info, char __user *buf, 2193 size_t count, loff_t *ppos) 2194 { 2195 loff_t pos = *ppos; 2196 size_t total_size; 2197 ssize_t ret; 2198 2199 if (info->screen_size) 2200 total_size = info->screen_size; 2201 else 2202 total_size = info->fix.smem_len; 2203 2204 if (pos >= total_size) 2205 return 0; 2206 if (count >= total_size) 2207 count = total_size; 2208 if (total_size - count < pos) 2209 count = total_size - pos; 2210 2211 if (drm_fbdev_use_iomem(info)) 2212 ret = fb_read_screen_base(info, buf, count, pos); 2213 else 2214 ret = fb_read_screen_buffer(info, buf, count, pos); 2215 2216 if (ret > 0) 2217 *ppos += ret; 2218 2219 return ret; 2220 } 2221 2222 static ssize_t fb_write_screen_base(struct fb_info *info, const char __user *buf, size_t count, 2223 loff_t pos) 2224 { 2225 char __iomem *dst = info->screen_base + pos; 2226 size_t alloc_size = min_t(size_t, count, PAGE_SIZE); 2227 ssize_t ret = 0; 2228 int err = 0; 2229 u8 *tmp; 2230 2231 tmp = kmalloc(alloc_size, GFP_KERNEL); 2232 if (!tmp) 2233 return -ENOMEM; 2234 2235 while (count) { 2236 size_t c = min_t(size_t, count, alloc_size); 2237 2238 if (copy_from_user(tmp, buf, c)) { 2239 err = -EFAULT; 2240 break; 2241 } 2242 memcpy_toio(dst, tmp, c); 2243 2244 dst += c; 2245 buf += c; 2246 ret += c; 2247 count -= c; 2248 } 2249 2250 kfree(tmp); 2251 2252 return ret ? ret : err; 2253 } 2254 2255 static ssize_t fb_write_screen_buffer(struct fb_info *info, const char __user *buf, size_t count, 2256 loff_t pos) 2257 { 2258 char *dst = info->screen_buffer + pos; 2259 2260 if (copy_from_user(dst, buf, count)) 2261 return -EFAULT; 2262 2263 return count; 2264 } 2265 2266 static ssize_t drm_fbdev_fb_write(struct fb_info *info, const char __user *buf, 2267 size_t count, loff_t *ppos) 2268 { 2269 loff_t pos = *ppos; 2270 size_t total_size; 2271 ssize_t ret; 2272 struct drm_rect damage_area; 2273 int err = 0; 2274 2275 if (info->screen_size) 2276 total_size = info->screen_size; 2277 else 2278 total_size = info->fix.smem_len; 2279 2280 if (pos > total_size) 2281 return -EFBIG; 2282 if (count > total_size) { 2283 err = -EFBIG; 2284 count = total_size; 2285 } 2286 if (total_size - count < pos) { 2287 if (!err) 2288 err = -ENOSPC; 2289 count = total_size - pos; 2290 } 2291 2292 /* 2293 * Copy to framebuffer even if we already logged an error. Emulates 2294 * the behavior of the original fbdev implementation. 2295 */ 2296 if (drm_fbdev_use_iomem(info)) 2297 ret = fb_write_screen_base(info, buf, count, pos); 2298 else 2299 ret = fb_write_screen_buffer(info, buf, count, pos); 2300 2301 if (ret < 0) 2302 return ret; /* return last error, if any */ 2303 else if (!ret) 2304 return err; /* return previous error, if any */ 2305 2306 *ppos += ret; 2307 2308 drm_fb_helper_memory_range_to_clip(info, pos, ret, &damage_area); 2309 drm_fb_helper_damage(info, damage_area.x1, damage_area.y1, 2310 drm_rect_width(&damage_area), 2311 drm_rect_height(&damage_area)); 2312 2313 return ret; 2314 } 2315 2316 static void drm_fbdev_fb_fillrect(struct fb_info *info, 2317 const struct fb_fillrect *rect) 2318 { 2319 if (drm_fbdev_use_iomem(info)) 2320 drm_fb_helper_cfb_fillrect(info, rect); 2321 else 2322 drm_fb_helper_sys_fillrect(info, rect); 2323 } 2324 2325 static void drm_fbdev_fb_copyarea(struct fb_info *info, 2326 const struct fb_copyarea *area) 2327 { 2328 if (drm_fbdev_use_iomem(info)) 2329 drm_fb_helper_cfb_copyarea(info, area); 2330 else 2331 drm_fb_helper_sys_copyarea(info, area); 2332 } 2333 2334 static void drm_fbdev_fb_imageblit(struct fb_info *info, 2335 const struct fb_image *image) 2336 { 2337 if (drm_fbdev_use_iomem(info)) 2338 drm_fb_helper_cfb_imageblit(info, image); 2339 else 2340 drm_fb_helper_sys_imageblit(info, image); 2341 } 2342 2343 static const struct fb_ops drm_fbdev_fb_ops = { 2344 .owner = THIS_MODULE, 2345 DRM_FB_HELPER_DEFAULT_OPS, 2346 .fb_open = drm_fbdev_fb_open, 2347 .fb_release = drm_fbdev_fb_release, 2348 .fb_destroy = drm_fbdev_fb_destroy, 2349 .fb_mmap = drm_fbdev_fb_mmap, 2350 .fb_read = drm_fbdev_fb_read, 2351 .fb_write = drm_fbdev_fb_write, 2352 .fb_fillrect = drm_fbdev_fb_fillrect, 2353 .fb_copyarea = drm_fbdev_fb_copyarea, 2354 .fb_imageblit = drm_fbdev_fb_imageblit, 2355 }; 2356 2357 static struct fb_deferred_io drm_fbdev_defio = { 2358 .delay = HZ / 20, 2359 .deferred_io = drm_fb_helper_deferred_io, 2360 }; 2361 2362 /* 2363 * This function uses the client API to create a framebuffer backed by a dumb buffer. 2364 * 2365 * The _sys_ versions are used for &fb_ops.fb_read, fb_write, fb_fillrect, 2366 * fb_copyarea, fb_imageblit. 2367 */ 2368 static int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper, 2369 struct drm_fb_helper_surface_size *sizes) 2370 { 2371 struct drm_client_dev *client = &fb_helper->client; 2372 struct drm_device *dev = fb_helper->dev; 2373 struct drm_client_buffer *buffer; 2374 struct drm_framebuffer *fb; 2375 struct fb_info *fbi; 2376 u32 format; 2377 struct iosys_map map; 2378 int ret; 2379 2380 drm_dbg_kms(dev, "surface width(%d), height(%d) and bpp(%d)\n", 2381 sizes->surface_width, sizes->surface_height, 2382 sizes->surface_bpp); 2383 2384 format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth); 2385 buffer = drm_client_framebuffer_create(client, sizes->surface_width, 2386 sizes->surface_height, format); 2387 if (IS_ERR(buffer)) 2388 return PTR_ERR(buffer); 2389 2390 fb_helper->buffer = buffer; 2391 fb_helper->fb = buffer->fb; 2392 fb = buffer->fb; 2393 2394 fbi = drm_fb_helper_alloc_fbi(fb_helper); 2395 if (IS_ERR(fbi)) 2396 return PTR_ERR(fbi); 2397 2398 fbi->fbops = &drm_fbdev_fb_ops; 2399 fbi->screen_size = sizes->surface_height * fb->pitches[0]; 2400 fbi->fix.smem_len = fbi->screen_size; 2401 fbi->flags = FBINFO_DEFAULT; 2402 2403 drm_fb_helper_fill_info(fbi, fb_helper, sizes); 2404 2405 if (drm_fbdev_use_shadow_fb(fb_helper)) { 2406 fbi->screen_buffer = vzalloc(fbi->screen_size); 2407 if (!fbi->screen_buffer) 2408 return -ENOMEM; 2409 fbi->flags |= FBINFO_VIRTFB | FBINFO_READS_FAST; 2410 2411 fbi->fbdefio = &drm_fbdev_defio; 2412 fb_deferred_io_init(fbi); 2413 } else { 2414 /* buffer is mapped for HW framebuffer */ 2415 ret = drm_client_buffer_vmap(fb_helper->buffer, &map); 2416 if (ret) 2417 return ret; 2418 if (map.is_iomem) { 2419 fbi->screen_base = map.vaddr_iomem; 2420 } else { 2421 fbi->screen_buffer = map.vaddr; 2422 fbi->flags |= FBINFO_VIRTFB; 2423 } 2424 2425 /* 2426 * Shamelessly leak the physical address to user-space. As 2427 * page_to_phys() is undefined for I/O memory, warn in this 2428 * case. 2429 */ 2430 #if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM) 2431 if (drm_leak_fbdev_smem && fbi->fix.smem_start == 0 && 2432 !drm_WARN_ON_ONCE(dev, map.is_iomem)) 2433 fbi->fix.smem_start = 2434 page_to_phys(virt_to_page(fbi->screen_buffer)); 2435 #endif 2436 } 2437 2438 return 0; 2439 } 2440 2441 static const struct drm_fb_helper_funcs drm_fb_helper_generic_funcs = { 2442 .fb_probe = drm_fb_helper_generic_probe, 2443 }; 2444 2445 static void drm_fbdev_client_unregister(struct drm_client_dev *client) 2446 { 2447 struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client); 2448 2449 if (fb_helper->fbdev) 2450 /* drm_fbdev_fb_destroy() takes care of cleanup */ 2451 drm_fb_helper_unregister_fbi(fb_helper); 2452 else 2453 drm_fbdev_release(fb_helper); 2454 } 2455 2456 static int drm_fbdev_client_restore(struct drm_client_dev *client) 2457 { 2458 drm_fb_helper_lastclose(client->dev); 2459 2460 return 0; 2461 } 2462 2463 static int drm_fbdev_client_hotplug(struct drm_client_dev *client) 2464 { 2465 struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client); 2466 struct drm_device *dev = client->dev; 2467 int ret; 2468 2469 /* Setup is not retried if it has failed */ 2470 if (!fb_helper->dev && fb_helper->funcs) 2471 return 0; 2472 2473 if (dev->fb_helper) 2474 return drm_fb_helper_hotplug_event(dev->fb_helper); 2475 2476 if (!dev->mode_config.num_connector) { 2477 drm_dbg_kms(dev, "No connectors found, will not create framebuffer!\n"); 2478 return 0; 2479 } 2480 2481 drm_fb_helper_prepare(dev, fb_helper, &drm_fb_helper_generic_funcs); 2482 2483 ret = drm_fb_helper_init(dev, fb_helper); 2484 if (ret) 2485 goto err; 2486 2487 if (!drm_drv_uses_atomic_modeset(dev)) 2488 drm_helper_disable_unused_functions(dev); 2489 2490 ret = drm_fb_helper_initial_config(fb_helper, fb_helper->preferred_bpp); 2491 if (ret) 2492 goto err_cleanup; 2493 2494 return 0; 2495 2496 err_cleanup: 2497 drm_fbdev_cleanup(fb_helper); 2498 err: 2499 fb_helper->dev = NULL; 2500 fb_helper->fbdev = NULL; 2501 2502 drm_err(dev, "fbdev: Failed to setup generic emulation (ret=%d)\n", ret); 2503 2504 return ret; 2505 } 2506 2507 static const struct drm_client_funcs drm_fbdev_client_funcs = { 2508 .owner = THIS_MODULE, 2509 .unregister = drm_fbdev_client_unregister, 2510 .restore = drm_fbdev_client_restore, 2511 .hotplug = drm_fbdev_client_hotplug, 2512 }; 2513 2514 /** 2515 * drm_fbdev_generic_setup() - Setup generic fbdev emulation 2516 * @dev: DRM device 2517 * @preferred_bpp: Preferred bits per pixel for the device. 2518 * @dev->mode_config.preferred_depth is used if this is zero. 2519 * 2520 * This function sets up generic fbdev emulation for drivers that supports 2521 * dumb buffers with a virtual address and that can be mmap'ed. 2522 * drm_fbdev_generic_setup() shall be called after the DRM driver registered 2523 * the new DRM device with drm_dev_register(). 2524 * 2525 * Restore, hotplug events and teardown are all taken care of. Drivers that do 2526 * suspend/resume need to call drm_fb_helper_set_suspend_unlocked() themselves. 2527 * Simple drivers might use drm_mode_config_helper_suspend(). 2528 * 2529 * Drivers that set the dirty callback on their framebuffer will get a shadow 2530 * fbdev buffer that is blitted onto the real buffer. This is done in order to 2531 * make deferred I/O work with all kinds of buffers. A shadow buffer can be 2532 * requested explicitly by setting struct drm_mode_config.prefer_shadow or 2533 * struct drm_mode_config.prefer_shadow_fbdev to true beforehand. This is 2534 * required to use generic fbdev emulation with SHMEM helpers. 2535 * 2536 * This function is safe to call even when there are no connectors present. 2537 * Setup will be retried on the next hotplug event. 2538 * 2539 * The fbdev is destroyed by drm_dev_unregister(). 2540 */ 2541 void drm_fbdev_generic_setup(struct drm_device *dev, 2542 unsigned int preferred_bpp) 2543 { 2544 struct drm_fb_helper *fb_helper; 2545 int ret; 2546 2547 drm_WARN(dev, !dev->registered, "Device has not been registered.\n"); 2548 drm_WARN(dev, dev->fb_helper, "fb_helper is already set!\n"); 2549 2550 if (!drm_fbdev_emulation) 2551 return; 2552 2553 fb_helper = kzalloc(sizeof(*fb_helper), GFP_KERNEL); 2554 if (!fb_helper) { 2555 drm_err(dev, "Failed to allocate fb_helper\n"); 2556 return; 2557 } 2558 2559 ret = drm_client_init(dev, &fb_helper->client, "fbdev", &drm_fbdev_client_funcs); 2560 if (ret) { 2561 kfree(fb_helper); 2562 drm_err(dev, "Failed to register client: %d\n", ret); 2563 return; 2564 } 2565 2566 /* 2567 * FIXME: This mixes up depth with bpp, which results in a glorious 2568 * mess, resulting in some drivers picking wrong fbdev defaults and 2569 * others wrong preferred_depth defaults. 2570 */ 2571 if (!preferred_bpp) 2572 preferred_bpp = dev->mode_config.preferred_depth; 2573 if (!preferred_bpp) 2574 preferred_bpp = 32; 2575 fb_helper->preferred_bpp = preferred_bpp; 2576 2577 ret = drm_fbdev_client_hotplug(&fb_helper->client); 2578 if (ret) 2579 drm_dbg_kms(dev, "client hotplug ret=%d\n", ret); 2580 2581 drm_client_register(&fb_helper->client); 2582 } 2583 EXPORT_SYMBOL(drm_fbdev_generic_setup); 2584