1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * (C) COPYRIGHT 2016 ARM Limited. All rights reserved. 4 * Author: Liviu Dudau <Liviu.Dudau@arm.com> 5 * 6 * ARM Mali DP500/DP550/DP650 KMS/DRM driver 7 */ 8 9 #include <linux/module.h> 10 #include <linux/clk.h> 11 #include <linux/component.h> 12 #include <linux/of_device.h> 13 #include <linux/of_graph.h> 14 #include <linux/of_reserved_mem.h> 15 #include <linux/pm_runtime.h> 16 #include <linux/debugfs.h> 17 18 #include <drm/drm_atomic.h> 19 #include <drm/drm_atomic_helper.h> 20 #include <drm/drm_crtc.h> 21 #include <drm/drm_drv.h> 22 #include <drm/drm_fb_cma_helper.h> 23 #include <drm/drm_fb_helper.h> 24 #include <drm/drm_fourcc.h> 25 #include <drm/drm_gem_cma_helper.h> 26 #include <drm/drm_gem_framebuffer_helper.h> 27 #include <drm/drm_modeset_helper.h> 28 #include <drm/drm_of.h> 29 #include <drm/drm_probe_helper.h> 30 #include <drm/drm_vblank.h> 31 32 #include "malidp_drv.h" 33 #include "malidp_mw.h" 34 #include "malidp_regs.h" 35 #include "malidp_hw.h" 36 37 #define MALIDP_CONF_VALID_TIMEOUT 250 38 #define AFBC_HEADER_SIZE 16 39 #define AFBC_SUPERBLK_ALIGNMENT 128 40 41 static void malidp_write_gamma_table(struct malidp_hw_device *hwdev, 42 u32 data[MALIDP_COEFFTAB_NUM_COEFFS]) 43 { 44 int i; 45 /* Update all channels with a single gamma curve. */ 46 const u32 gamma_write_mask = GENMASK(18, 16); 47 /* 48 * Always write an entire table, so the address field in 49 * DE_COEFFTAB_ADDR is 0 and we can use the gamma_write_mask bitmask 50 * directly. 51 */ 52 malidp_hw_write(hwdev, gamma_write_mask, 53 hwdev->hw->map.coeffs_base + MALIDP_COEF_TABLE_ADDR); 54 for (i = 0; i < MALIDP_COEFFTAB_NUM_COEFFS; ++i) 55 malidp_hw_write(hwdev, data[i], 56 hwdev->hw->map.coeffs_base + 57 MALIDP_COEF_TABLE_DATA); 58 } 59 60 static void malidp_atomic_commit_update_gamma(struct drm_crtc *crtc, 61 struct drm_crtc_state *old_state) 62 { 63 struct malidp_drm *malidp = crtc_to_malidp_device(crtc); 64 struct malidp_hw_device *hwdev = malidp->dev; 65 66 if (!crtc->state->color_mgmt_changed) 67 return; 68 69 if (!crtc->state->gamma_lut) { 70 malidp_hw_clearbits(hwdev, 71 MALIDP_DISP_FUNC_GAMMA, 72 MALIDP_DE_DISPLAY_FUNC); 73 } else { 74 struct malidp_crtc_state *mc = 75 to_malidp_crtc_state(crtc->state); 76 77 if (!old_state->gamma_lut || (crtc->state->gamma_lut->base.id != 78 old_state->gamma_lut->base.id)) 79 malidp_write_gamma_table(hwdev, mc->gamma_coeffs); 80 81 malidp_hw_setbits(hwdev, MALIDP_DISP_FUNC_GAMMA, 82 MALIDP_DE_DISPLAY_FUNC); 83 } 84 } 85 86 static 87 void malidp_atomic_commit_update_coloradj(struct drm_crtc *crtc, 88 struct drm_crtc_state *old_state) 89 { 90 struct malidp_drm *malidp = crtc_to_malidp_device(crtc); 91 struct malidp_hw_device *hwdev = malidp->dev; 92 int i; 93 94 if (!crtc->state->color_mgmt_changed) 95 return; 96 97 if (!crtc->state->ctm) { 98 malidp_hw_clearbits(hwdev, MALIDP_DISP_FUNC_CADJ, 99 MALIDP_DE_DISPLAY_FUNC); 100 } else { 101 struct malidp_crtc_state *mc = 102 to_malidp_crtc_state(crtc->state); 103 104 if (!old_state->ctm || (crtc->state->ctm->base.id != 105 old_state->ctm->base.id)) 106 for (i = 0; i < MALIDP_COLORADJ_NUM_COEFFS; ++i) 107 malidp_hw_write(hwdev, 108 mc->coloradj_coeffs[i], 109 hwdev->hw->map.coeffs_base + 110 MALIDP_COLOR_ADJ_COEF + 4 * i); 111 112 malidp_hw_setbits(hwdev, MALIDP_DISP_FUNC_CADJ, 113 MALIDP_DE_DISPLAY_FUNC); 114 } 115 } 116 117 static void malidp_atomic_commit_se_config(struct drm_crtc *crtc, 118 struct drm_crtc_state *old_state) 119 { 120 struct malidp_crtc_state *cs = to_malidp_crtc_state(crtc->state); 121 struct malidp_crtc_state *old_cs = to_malidp_crtc_state(old_state); 122 struct malidp_drm *malidp = crtc_to_malidp_device(crtc); 123 struct malidp_hw_device *hwdev = malidp->dev; 124 struct malidp_se_config *s = &cs->scaler_config; 125 struct malidp_se_config *old_s = &old_cs->scaler_config; 126 u32 se_control = hwdev->hw->map.se_base + 127 ((hwdev->hw->map.features & MALIDP_REGMAP_HAS_CLEARIRQ) ? 128 0x10 : 0xC); 129 u32 layer_control = se_control + MALIDP_SE_LAYER_CONTROL; 130 u32 scr = se_control + MALIDP_SE_SCALING_CONTROL; 131 u32 val; 132 133 /* Set SE_CONTROL */ 134 if (!s->scale_enable) { 135 val = malidp_hw_read(hwdev, se_control); 136 val &= ~MALIDP_SE_SCALING_EN; 137 malidp_hw_write(hwdev, val, se_control); 138 return; 139 } 140 141 hwdev->hw->se_set_scaling_coeffs(hwdev, s, old_s); 142 val = malidp_hw_read(hwdev, se_control); 143 val |= MALIDP_SE_SCALING_EN | MALIDP_SE_ALPHA_EN; 144 145 val &= ~MALIDP_SE_ENH(MALIDP_SE_ENH_MASK); 146 val |= s->enhancer_enable ? MALIDP_SE_ENH(3) : 0; 147 148 val |= MALIDP_SE_RGBO_IF_EN; 149 malidp_hw_write(hwdev, val, se_control); 150 151 /* Set IN_SIZE & OUT_SIZE. */ 152 val = MALIDP_SE_SET_V_SIZE(s->input_h) | 153 MALIDP_SE_SET_H_SIZE(s->input_w); 154 malidp_hw_write(hwdev, val, layer_control + MALIDP_SE_L0_IN_SIZE); 155 val = MALIDP_SE_SET_V_SIZE(s->output_h) | 156 MALIDP_SE_SET_H_SIZE(s->output_w); 157 malidp_hw_write(hwdev, val, layer_control + MALIDP_SE_L0_OUT_SIZE); 158 159 /* Set phase regs. */ 160 malidp_hw_write(hwdev, s->h_init_phase, scr + MALIDP_SE_H_INIT_PH); 161 malidp_hw_write(hwdev, s->h_delta_phase, scr + MALIDP_SE_H_DELTA_PH); 162 malidp_hw_write(hwdev, s->v_init_phase, scr + MALIDP_SE_V_INIT_PH); 163 malidp_hw_write(hwdev, s->v_delta_phase, scr + MALIDP_SE_V_DELTA_PH); 164 } 165 166 /* 167 * set the "config valid" bit and wait until the hardware acts on it 168 */ 169 static int malidp_set_and_wait_config_valid(struct drm_device *drm) 170 { 171 struct malidp_drm *malidp = drm->dev_private; 172 struct malidp_hw_device *hwdev = malidp->dev; 173 int ret; 174 175 hwdev->hw->set_config_valid(hwdev, 1); 176 /* don't wait for config_valid flag if we are in config mode */ 177 if (hwdev->hw->in_config_mode(hwdev)) { 178 atomic_set(&malidp->config_valid, MALIDP_CONFIG_VALID_DONE); 179 return 0; 180 } 181 182 ret = wait_event_interruptible_timeout(malidp->wq, 183 atomic_read(&malidp->config_valid) == MALIDP_CONFIG_VALID_DONE, 184 msecs_to_jiffies(MALIDP_CONF_VALID_TIMEOUT)); 185 186 return (ret > 0) ? 0 : -ETIMEDOUT; 187 } 188 189 static void malidp_atomic_commit_hw_done(struct drm_atomic_state *state) 190 { 191 struct drm_device *drm = state->dev; 192 struct malidp_drm *malidp = drm->dev_private; 193 int loop = 5; 194 195 malidp->event = malidp->crtc.state->event; 196 malidp->crtc.state->event = NULL; 197 198 if (malidp->crtc.state->active) { 199 /* 200 * if we have an event to deliver to userspace, make sure 201 * the vblank is enabled as we are sending it from the IRQ 202 * handler. 203 */ 204 if (malidp->event) 205 drm_crtc_vblank_get(&malidp->crtc); 206 207 /* only set config_valid if the CRTC is enabled */ 208 if (malidp_set_and_wait_config_valid(drm) < 0) { 209 /* 210 * make a loop around the second CVAL setting and 211 * try 5 times before giving up. 212 */ 213 while (loop--) { 214 if (!malidp_set_and_wait_config_valid(drm)) 215 break; 216 } 217 DRM_DEBUG_DRIVER("timed out waiting for updated configuration\n"); 218 } 219 220 } else if (malidp->event) { 221 /* CRTC inactive means vblank IRQ is disabled, send event directly */ 222 spin_lock_irq(&drm->event_lock); 223 drm_crtc_send_vblank_event(&malidp->crtc, malidp->event); 224 malidp->event = NULL; 225 spin_unlock_irq(&drm->event_lock); 226 } 227 drm_atomic_helper_commit_hw_done(state); 228 } 229 230 static void malidp_atomic_commit_tail(struct drm_atomic_state *state) 231 { 232 struct drm_device *drm = state->dev; 233 struct malidp_drm *malidp = drm->dev_private; 234 struct drm_crtc *crtc; 235 struct drm_crtc_state *old_crtc_state; 236 int i; 237 238 pm_runtime_get_sync(drm->dev); 239 240 /* 241 * set config_valid to a special value to let IRQ handlers 242 * know that we are updating registers 243 */ 244 atomic_set(&malidp->config_valid, MALIDP_CONFIG_START); 245 malidp->dev->hw->set_config_valid(malidp->dev, 0); 246 247 drm_atomic_helper_commit_modeset_disables(drm, state); 248 249 for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) { 250 malidp_atomic_commit_update_gamma(crtc, old_crtc_state); 251 malidp_atomic_commit_update_coloradj(crtc, old_crtc_state); 252 malidp_atomic_commit_se_config(crtc, old_crtc_state); 253 } 254 255 drm_atomic_helper_commit_planes(drm, state, DRM_PLANE_COMMIT_ACTIVE_ONLY); 256 257 malidp_mw_atomic_commit(drm, state); 258 259 drm_atomic_helper_commit_modeset_enables(drm, state); 260 261 malidp_atomic_commit_hw_done(state); 262 263 pm_runtime_put(drm->dev); 264 265 drm_atomic_helper_cleanup_planes(drm, state); 266 } 267 268 static const struct drm_mode_config_helper_funcs malidp_mode_config_helpers = { 269 .atomic_commit_tail = malidp_atomic_commit_tail, 270 }; 271 272 static bool 273 malidp_verify_afbc_framebuffer_caps(struct drm_device *dev, 274 const struct drm_mode_fb_cmd2 *mode_cmd) 275 { 276 if (malidp_format_mod_supported(dev, mode_cmd->pixel_format, 277 mode_cmd->modifier[0]) == false) 278 return false; 279 280 if (mode_cmd->offsets[0] != 0) { 281 DRM_DEBUG_KMS("AFBC buffers' plane offset should be 0\n"); 282 return false; 283 } 284 285 switch (mode_cmd->modifier[0] & AFBC_SIZE_MASK) { 286 case AFBC_SIZE_16X16: 287 if ((mode_cmd->width % 16) || (mode_cmd->height % 16)) { 288 DRM_DEBUG_KMS("AFBC buffers must be aligned to 16 pixels\n"); 289 return false; 290 } 291 break; 292 default: 293 DRM_DEBUG_KMS("Unsupported AFBC block size\n"); 294 return false; 295 } 296 297 return true; 298 } 299 300 static bool 301 malidp_verify_afbc_framebuffer_size(struct drm_device *dev, 302 struct drm_file *file, 303 const struct drm_mode_fb_cmd2 *mode_cmd) 304 { 305 int n_superblocks = 0; 306 const struct drm_format_info *info; 307 struct drm_gem_object *objs = NULL; 308 u32 afbc_superblock_size = 0, afbc_superblock_height = 0; 309 u32 afbc_superblock_width = 0, afbc_size = 0; 310 int bpp = 0; 311 312 switch (mode_cmd->modifier[0] & AFBC_SIZE_MASK) { 313 case AFBC_SIZE_16X16: 314 afbc_superblock_height = 16; 315 afbc_superblock_width = 16; 316 break; 317 default: 318 DRM_DEBUG_KMS("AFBC superblock size is not supported\n"); 319 return false; 320 } 321 322 info = drm_get_format_info(dev, mode_cmd); 323 324 n_superblocks = (mode_cmd->width / afbc_superblock_width) * 325 (mode_cmd->height / afbc_superblock_height); 326 327 bpp = malidp_format_get_bpp(info->format); 328 329 afbc_superblock_size = (bpp * afbc_superblock_width * afbc_superblock_height) 330 / BITS_PER_BYTE; 331 332 afbc_size = ALIGN(n_superblocks * AFBC_HEADER_SIZE, AFBC_SUPERBLK_ALIGNMENT); 333 afbc_size += n_superblocks * ALIGN(afbc_superblock_size, AFBC_SUPERBLK_ALIGNMENT); 334 335 if ((mode_cmd->width * bpp) != (mode_cmd->pitches[0] * BITS_PER_BYTE)) { 336 DRM_DEBUG_KMS("Invalid value of (pitch * BITS_PER_BYTE) (=%u) " 337 "should be same as width (=%u) * bpp (=%u)\n", 338 (mode_cmd->pitches[0] * BITS_PER_BYTE), 339 mode_cmd->width, bpp); 340 return false; 341 } 342 343 objs = drm_gem_object_lookup(file, mode_cmd->handles[0]); 344 if (!objs) { 345 DRM_DEBUG_KMS("Failed to lookup GEM object\n"); 346 return false; 347 } 348 349 if (objs->size < afbc_size) { 350 DRM_DEBUG_KMS("buffer size (%zu) too small for AFBC buffer size = %u\n", 351 objs->size, afbc_size); 352 drm_gem_object_put_unlocked(objs); 353 return false; 354 } 355 356 drm_gem_object_put_unlocked(objs); 357 358 return true; 359 } 360 361 static bool 362 malidp_verify_afbc_framebuffer(struct drm_device *dev, struct drm_file *file, 363 const struct drm_mode_fb_cmd2 *mode_cmd) 364 { 365 if (malidp_verify_afbc_framebuffer_caps(dev, mode_cmd)) 366 return malidp_verify_afbc_framebuffer_size(dev, file, mode_cmd); 367 368 return false; 369 } 370 371 static struct drm_framebuffer * 372 malidp_fb_create(struct drm_device *dev, struct drm_file *file, 373 const struct drm_mode_fb_cmd2 *mode_cmd) 374 { 375 if (mode_cmd->modifier[0]) { 376 if (!malidp_verify_afbc_framebuffer(dev, file, mode_cmd)) 377 return ERR_PTR(-EINVAL); 378 } 379 380 return drm_gem_fb_create(dev, file, mode_cmd); 381 } 382 383 static const struct drm_mode_config_funcs malidp_mode_config_funcs = { 384 .fb_create = malidp_fb_create, 385 .atomic_check = drm_atomic_helper_check, 386 .atomic_commit = drm_atomic_helper_commit, 387 }; 388 389 static int malidp_init(struct drm_device *drm) 390 { 391 int ret; 392 struct malidp_drm *malidp = drm->dev_private; 393 struct malidp_hw_device *hwdev = malidp->dev; 394 395 drm_mode_config_init(drm); 396 397 drm->mode_config.min_width = hwdev->min_line_size; 398 drm->mode_config.min_height = hwdev->min_line_size; 399 drm->mode_config.max_width = hwdev->max_line_size; 400 drm->mode_config.max_height = hwdev->max_line_size; 401 drm->mode_config.funcs = &malidp_mode_config_funcs; 402 drm->mode_config.helper_private = &malidp_mode_config_helpers; 403 drm->mode_config.allow_fb_modifiers = true; 404 405 ret = malidp_crtc_init(drm); 406 if (ret) 407 goto crtc_fail; 408 409 ret = malidp_mw_connector_init(drm); 410 if (ret) 411 goto crtc_fail; 412 413 return 0; 414 415 crtc_fail: 416 drm_mode_config_cleanup(drm); 417 return ret; 418 } 419 420 static void malidp_fini(struct drm_device *drm) 421 { 422 drm_mode_config_cleanup(drm); 423 } 424 425 static int malidp_irq_init(struct platform_device *pdev) 426 { 427 int irq_de, irq_se, ret = 0; 428 struct drm_device *drm = dev_get_drvdata(&pdev->dev); 429 struct malidp_drm *malidp = drm->dev_private; 430 struct malidp_hw_device *hwdev = malidp->dev; 431 432 /* fetch the interrupts from DT */ 433 irq_de = platform_get_irq_byname(pdev, "DE"); 434 if (irq_de < 0) { 435 DRM_ERROR("no 'DE' IRQ specified!\n"); 436 return irq_de; 437 } 438 irq_se = platform_get_irq_byname(pdev, "SE"); 439 if (irq_se < 0) { 440 DRM_ERROR("no 'SE' IRQ specified!\n"); 441 return irq_se; 442 } 443 444 ret = malidp_de_irq_init(drm, irq_de); 445 if (ret) 446 return ret; 447 448 ret = malidp_se_irq_init(drm, irq_se); 449 if (ret) { 450 malidp_de_irq_fini(hwdev); 451 return ret; 452 } 453 454 return 0; 455 } 456 457 DEFINE_DRM_GEM_CMA_FOPS(fops); 458 459 static int malidp_dumb_create(struct drm_file *file_priv, 460 struct drm_device *drm, 461 struct drm_mode_create_dumb *args) 462 { 463 struct malidp_drm *malidp = drm->dev_private; 464 /* allocate for the worst case scenario, i.e. rotated buffers */ 465 u8 alignment = malidp_hw_get_pitch_align(malidp->dev, 1); 466 467 args->pitch = ALIGN(DIV_ROUND_UP(args->width * args->bpp, 8), alignment); 468 469 return drm_gem_cma_dumb_create_internal(file_priv, drm, args); 470 } 471 472 #ifdef CONFIG_DEBUG_FS 473 474 static void malidp_error_stats_init(struct malidp_error_stats *error_stats) 475 { 476 error_stats->num_errors = 0; 477 error_stats->last_error_status = 0; 478 error_stats->last_error_vblank = -1; 479 } 480 481 void malidp_error(struct malidp_drm *malidp, 482 struct malidp_error_stats *error_stats, u32 status, 483 u64 vblank) 484 { 485 unsigned long irqflags; 486 487 spin_lock_irqsave(&malidp->errors_lock, irqflags); 488 error_stats->last_error_status = status; 489 error_stats->last_error_vblank = vblank; 490 error_stats->num_errors++; 491 spin_unlock_irqrestore(&malidp->errors_lock, irqflags); 492 } 493 494 static void malidp_error_stats_dump(const char *prefix, 495 struct malidp_error_stats error_stats, 496 struct seq_file *m) 497 { 498 seq_printf(m, "[%s] num_errors : %d\n", prefix, 499 error_stats.num_errors); 500 seq_printf(m, "[%s] last_error_status : 0x%08x\n", prefix, 501 error_stats.last_error_status); 502 seq_printf(m, "[%s] last_error_vblank : %lld\n", prefix, 503 error_stats.last_error_vblank); 504 } 505 506 static int malidp_show_stats(struct seq_file *m, void *arg) 507 { 508 struct drm_device *drm = m->private; 509 struct malidp_drm *malidp = drm->dev_private; 510 unsigned long irqflags; 511 struct malidp_error_stats de_errors, se_errors; 512 513 spin_lock_irqsave(&malidp->errors_lock, irqflags); 514 de_errors = malidp->de_errors; 515 se_errors = malidp->se_errors; 516 spin_unlock_irqrestore(&malidp->errors_lock, irqflags); 517 malidp_error_stats_dump("DE", de_errors, m); 518 malidp_error_stats_dump("SE", se_errors, m); 519 return 0; 520 } 521 522 static int malidp_debugfs_open(struct inode *inode, struct file *file) 523 { 524 return single_open(file, malidp_show_stats, inode->i_private); 525 } 526 527 static ssize_t malidp_debugfs_write(struct file *file, const char __user *ubuf, 528 size_t len, loff_t *offp) 529 { 530 struct seq_file *m = file->private_data; 531 struct drm_device *drm = m->private; 532 struct malidp_drm *malidp = drm->dev_private; 533 unsigned long irqflags; 534 535 spin_lock_irqsave(&malidp->errors_lock, irqflags); 536 malidp_error_stats_init(&malidp->de_errors); 537 malidp_error_stats_init(&malidp->se_errors); 538 spin_unlock_irqrestore(&malidp->errors_lock, irqflags); 539 return len; 540 } 541 542 static const struct file_operations malidp_debugfs_fops = { 543 .owner = THIS_MODULE, 544 .open = malidp_debugfs_open, 545 .read = seq_read, 546 .write = malidp_debugfs_write, 547 .llseek = seq_lseek, 548 .release = single_release, 549 }; 550 551 static void malidp_debugfs_init(struct drm_minor *minor) 552 { 553 struct malidp_drm *malidp = minor->dev->dev_private; 554 555 malidp_error_stats_init(&malidp->de_errors); 556 malidp_error_stats_init(&malidp->se_errors); 557 spin_lock_init(&malidp->errors_lock); 558 debugfs_create_file("debug", S_IRUGO | S_IWUSR, minor->debugfs_root, 559 minor->dev, &malidp_debugfs_fops); 560 } 561 562 #endif //CONFIG_DEBUG_FS 563 564 static struct drm_driver malidp_driver = { 565 .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, 566 .gem_free_object_unlocked = drm_gem_cma_free_object, 567 .gem_vm_ops = &drm_gem_cma_vm_ops, 568 .dumb_create = malidp_dumb_create, 569 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 570 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 571 .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table, 572 .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table, 573 .gem_prime_vmap = drm_gem_cma_prime_vmap, 574 .gem_prime_vunmap = drm_gem_cma_prime_vunmap, 575 .gem_prime_mmap = drm_gem_cma_prime_mmap, 576 #ifdef CONFIG_DEBUG_FS 577 .debugfs_init = malidp_debugfs_init, 578 #endif 579 .fops = &fops, 580 .name = "mali-dp", 581 .desc = "ARM Mali Display Processor driver", 582 .date = "20160106", 583 .major = 1, 584 .minor = 0, 585 }; 586 587 static const struct of_device_id malidp_drm_of_match[] = { 588 { 589 .compatible = "arm,mali-dp500", 590 .data = &malidp_device[MALIDP_500] 591 }, 592 { 593 .compatible = "arm,mali-dp550", 594 .data = &malidp_device[MALIDP_550] 595 }, 596 { 597 .compatible = "arm,mali-dp650", 598 .data = &malidp_device[MALIDP_650] 599 }, 600 {}, 601 }; 602 MODULE_DEVICE_TABLE(of, malidp_drm_of_match); 603 604 static bool malidp_is_compatible_hw_id(struct malidp_hw_device *hwdev, 605 const struct of_device_id *dev_id) 606 { 607 u32 core_id; 608 const char *compatstr_dp500 = "arm,mali-dp500"; 609 bool is_dp500; 610 bool dt_is_dp500; 611 612 /* 613 * The DP500 CORE_ID register is in a different location, so check it 614 * first. If the product id field matches, then this is DP500, otherwise 615 * check the DP550/650 CORE_ID register. 616 */ 617 core_id = malidp_hw_read(hwdev, MALIDP500_DC_BASE + MALIDP_DE_CORE_ID); 618 /* Offset 0x18 will never read 0x500 on products other than DP500. */ 619 is_dp500 = (MALIDP_PRODUCT_ID(core_id) == 0x500); 620 dt_is_dp500 = strnstr(dev_id->compatible, compatstr_dp500, 621 sizeof(dev_id->compatible)) != NULL; 622 if (is_dp500 != dt_is_dp500) { 623 DRM_ERROR("Device-tree expects %s, but hardware %s DP500.\n", 624 dev_id->compatible, is_dp500 ? "is" : "is not"); 625 return false; 626 } else if (!dt_is_dp500) { 627 u16 product_id; 628 char buf[32]; 629 630 core_id = malidp_hw_read(hwdev, 631 MALIDP550_DC_BASE + MALIDP_DE_CORE_ID); 632 product_id = MALIDP_PRODUCT_ID(core_id); 633 snprintf(buf, sizeof(buf), "arm,mali-dp%X", product_id); 634 if (!strnstr(dev_id->compatible, buf, 635 sizeof(dev_id->compatible))) { 636 DRM_ERROR("Device-tree expects %s, but hardware is DP%03X.\n", 637 dev_id->compatible, product_id); 638 return false; 639 } 640 } 641 return true; 642 } 643 644 static bool malidp_has_sufficient_address_space(const struct resource *res, 645 const struct of_device_id *dev_id) 646 { 647 resource_size_t res_size = resource_size(res); 648 const char *compatstr_dp500 = "arm,mali-dp500"; 649 650 if (!strnstr(dev_id->compatible, compatstr_dp500, 651 sizeof(dev_id->compatible))) 652 return res_size >= MALIDP550_ADDR_SPACE_SIZE; 653 else if (res_size < MALIDP500_ADDR_SPACE_SIZE) 654 return false; 655 return true; 656 } 657 658 static ssize_t core_id_show(struct device *dev, struct device_attribute *attr, 659 char *buf) 660 { 661 struct drm_device *drm = dev_get_drvdata(dev); 662 struct malidp_drm *malidp = drm->dev_private; 663 664 return snprintf(buf, PAGE_SIZE, "%08x\n", malidp->core_id); 665 } 666 667 static DEVICE_ATTR_RO(core_id); 668 669 static int malidp_init_sysfs(struct device *dev) 670 { 671 int ret = device_create_file(dev, &dev_attr_core_id); 672 673 if (ret) 674 DRM_ERROR("failed to create device file for core_id\n"); 675 676 return ret; 677 } 678 679 static void malidp_fini_sysfs(struct device *dev) 680 { 681 device_remove_file(dev, &dev_attr_core_id); 682 } 683 684 #define MAX_OUTPUT_CHANNELS 3 685 686 static int malidp_runtime_pm_suspend(struct device *dev) 687 { 688 struct drm_device *drm = dev_get_drvdata(dev); 689 struct malidp_drm *malidp = drm->dev_private; 690 struct malidp_hw_device *hwdev = malidp->dev; 691 692 /* we can only suspend if the hardware is in config mode */ 693 WARN_ON(!hwdev->hw->in_config_mode(hwdev)); 694 695 malidp_se_irq_fini(hwdev); 696 malidp_de_irq_fini(hwdev); 697 hwdev->pm_suspended = true; 698 clk_disable_unprepare(hwdev->mclk); 699 clk_disable_unprepare(hwdev->aclk); 700 clk_disable_unprepare(hwdev->pclk); 701 702 return 0; 703 } 704 705 static int malidp_runtime_pm_resume(struct device *dev) 706 { 707 struct drm_device *drm = dev_get_drvdata(dev); 708 struct malidp_drm *malidp = drm->dev_private; 709 struct malidp_hw_device *hwdev = malidp->dev; 710 711 clk_prepare_enable(hwdev->pclk); 712 clk_prepare_enable(hwdev->aclk); 713 clk_prepare_enable(hwdev->mclk); 714 hwdev->pm_suspended = false; 715 malidp_de_irq_hw_init(hwdev); 716 malidp_se_irq_hw_init(hwdev); 717 718 return 0; 719 } 720 721 static int malidp_bind(struct device *dev) 722 { 723 struct resource *res; 724 struct drm_device *drm; 725 struct malidp_drm *malidp; 726 struct malidp_hw_device *hwdev; 727 struct platform_device *pdev = to_platform_device(dev); 728 struct of_device_id const *dev_id; 729 struct drm_encoder *encoder; 730 /* number of lines for the R, G and B output */ 731 u8 output_width[MAX_OUTPUT_CHANNELS]; 732 int ret = 0, i; 733 u32 version, out_depth = 0; 734 735 malidp = devm_kzalloc(dev, sizeof(*malidp), GFP_KERNEL); 736 if (!malidp) 737 return -ENOMEM; 738 739 hwdev = devm_kzalloc(dev, sizeof(*hwdev), GFP_KERNEL); 740 if (!hwdev) 741 return -ENOMEM; 742 743 hwdev->hw = (struct malidp_hw *)of_device_get_match_data(dev); 744 malidp->dev = hwdev; 745 746 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 747 hwdev->regs = devm_ioremap_resource(dev, res); 748 if (IS_ERR(hwdev->regs)) 749 return PTR_ERR(hwdev->regs); 750 751 hwdev->pclk = devm_clk_get(dev, "pclk"); 752 if (IS_ERR(hwdev->pclk)) 753 return PTR_ERR(hwdev->pclk); 754 755 hwdev->aclk = devm_clk_get(dev, "aclk"); 756 if (IS_ERR(hwdev->aclk)) 757 return PTR_ERR(hwdev->aclk); 758 759 hwdev->mclk = devm_clk_get(dev, "mclk"); 760 if (IS_ERR(hwdev->mclk)) 761 return PTR_ERR(hwdev->mclk); 762 763 hwdev->pxlclk = devm_clk_get(dev, "pxlclk"); 764 if (IS_ERR(hwdev->pxlclk)) 765 return PTR_ERR(hwdev->pxlclk); 766 767 /* Get the optional framebuffer memory resource */ 768 ret = of_reserved_mem_device_init(dev); 769 if (ret && ret != -ENODEV) 770 return ret; 771 772 drm = drm_dev_alloc(&malidp_driver, dev); 773 if (IS_ERR(drm)) { 774 ret = PTR_ERR(drm); 775 goto alloc_fail; 776 } 777 778 drm->dev_private = malidp; 779 dev_set_drvdata(dev, drm); 780 781 /* Enable power management */ 782 pm_runtime_enable(dev); 783 784 /* Resume device to enable the clocks */ 785 if (pm_runtime_enabled(dev)) 786 pm_runtime_get_sync(dev); 787 else 788 malidp_runtime_pm_resume(dev); 789 790 dev_id = of_match_device(malidp_drm_of_match, dev); 791 if (!dev_id) { 792 ret = -EINVAL; 793 goto query_hw_fail; 794 } 795 796 if (!malidp_has_sufficient_address_space(res, dev_id)) { 797 DRM_ERROR("Insufficient address space in device-tree.\n"); 798 ret = -EINVAL; 799 goto query_hw_fail; 800 } 801 802 if (!malidp_is_compatible_hw_id(hwdev, dev_id)) { 803 ret = -EINVAL; 804 goto query_hw_fail; 805 } 806 807 ret = hwdev->hw->query_hw(hwdev); 808 if (ret) { 809 DRM_ERROR("Invalid HW configuration\n"); 810 goto query_hw_fail; 811 } 812 813 version = malidp_hw_read(hwdev, hwdev->hw->map.dc_base + MALIDP_DE_CORE_ID); 814 DRM_INFO("found ARM Mali-DP%3x version r%dp%d\n", version >> 16, 815 (version >> 12) & 0xf, (version >> 8) & 0xf); 816 817 malidp->core_id = version; 818 819 ret = of_property_read_u32(dev->of_node, 820 "arm,malidp-arqos-value", 821 &hwdev->arqos_value); 822 if (ret) 823 hwdev->arqos_value = 0x0; 824 825 /* set the number of lines used for output of RGB data */ 826 ret = of_property_read_u8_array(dev->of_node, 827 "arm,malidp-output-port-lines", 828 output_width, MAX_OUTPUT_CHANNELS); 829 if (ret) 830 goto query_hw_fail; 831 832 for (i = 0; i < MAX_OUTPUT_CHANNELS; i++) 833 out_depth = (out_depth << 8) | (output_width[i] & 0xf); 834 malidp_hw_write(hwdev, out_depth, hwdev->hw->map.out_depth_base); 835 hwdev->output_color_depth = out_depth; 836 837 atomic_set(&malidp->config_valid, MALIDP_CONFIG_VALID_INIT); 838 init_waitqueue_head(&malidp->wq); 839 840 ret = malidp_init(drm); 841 if (ret < 0) 842 goto query_hw_fail; 843 844 ret = malidp_init_sysfs(dev); 845 if (ret) 846 goto init_fail; 847 848 /* Set the CRTC's port so that the encoder component can find it */ 849 malidp->crtc.port = of_graph_get_port_by_id(dev->of_node, 0); 850 851 ret = component_bind_all(dev, drm); 852 if (ret) { 853 DRM_ERROR("Failed to bind all components\n"); 854 goto bind_fail; 855 } 856 857 /* We expect to have a maximum of two encoders one for the actual 858 * display and a virtual one for the writeback connector 859 */ 860 WARN_ON(drm->mode_config.num_encoder > 2); 861 list_for_each_entry(encoder, &drm->mode_config.encoder_list, head) { 862 encoder->possible_clones = 863 (1 << drm->mode_config.num_encoder) - 1; 864 } 865 866 ret = malidp_irq_init(pdev); 867 if (ret < 0) 868 goto irq_init_fail; 869 870 drm->irq_enabled = true; 871 872 ret = drm_vblank_init(drm, drm->mode_config.num_crtc); 873 drm_crtc_vblank_reset(&malidp->crtc); 874 if (ret < 0) { 875 DRM_ERROR("failed to initialise vblank\n"); 876 goto vblank_fail; 877 } 878 pm_runtime_put(dev); 879 880 drm_mode_config_reset(drm); 881 882 drm_kms_helper_poll_init(drm); 883 884 ret = drm_dev_register(drm, 0); 885 if (ret) 886 goto register_fail; 887 888 drm_fbdev_generic_setup(drm, 32); 889 890 return 0; 891 892 register_fail: 893 drm_kms_helper_poll_fini(drm); 894 pm_runtime_get_sync(dev); 895 vblank_fail: 896 malidp_se_irq_fini(hwdev); 897 malidp_de_irq_fini(hwdev); 898 drm->irq_enabled = false; 899 irq_init_fail: 900 drm_atomic_helper_shutdown(drm); 901 component_unbind_all(dev, drm); 902 bind_fail: 903 of_node_put(malidp->crtc.port); 904 malidp->crtc.port = NULL; 905 init_fail: 906 malidp_fini_sysfs(dev); 907 malidp_fini(drm); 908 query_hw_fail: 909 pm_runtime_put(dev); 910 if (pm_runtime_enabled(dev)) 911 pm_runtime_disable(dev); 912 else 913 malidp_runtime_pm_suspend(dev); 914 drm->dev_private = NULL; 915 dev_set_drvdata(dev, NULL); 916 drm_dev_put(drm); 917 alloc_fail: 918 of_reserved_mem_device_release(dev); 919 920 return ret; 921 } 922 923 static void malidp_unbind(struct device *dev) 924 { 925 struct drm_device *drm = dev_get_drvdata(dev); 926 struct malidp_drm *malidp = drm->dev_private; 927 struct malidp_hw_device *hwdev = malidp->dev; 928 929 drm_dev_unregister(drm); 930 drm_kms_helper_poll_fini(drm); 931 pm_runtime_get_sync(dev); 932 drm_crtc_vblank_off(&malidp->crtc); 933 malidp_se_irq_fini(hwdev); 934 malidp_de_irq_fini(hwdev); 935 drm->irq_enabled = false; 936 drm_atomic_helper_shutdown(drm); 937 component_unbind_all(dev, drm); 938 of_node_put(malidp->crtc.port); 939 malidp->crtc.port = NULL; 940 malidp_fini_sysfs(dev); 941 malidp_fini(drm); 942 pm_runtime_put(dev); 943 if (pm_runtime_enabled(dev)) 944 pm_runtime_disable(dev); 945 else 946 malidp_runtime_pm_suspend(dev); 947 drm->dev_private = NULL; 948 dev_set_drvdata(dev, NULL); 949 drm_dev_put(drm); 950 of_reserved_mem_device_release(dev); 951 } 952 953 static const struct component_master_ops malidp_master_ops = { 954 .bind = malidp_bind, 955 .unbind = malidp_unbind, 956 }; 957 958 static int malidp_compare_dev(struct device *dev, void *data) 959 { 960 struct device_node *np = data; 961 962 return dev->of_node == np; 963 } 964 965 static int malidp_platform_probe(struct platform_device *pdev) 966 { 967 struct device_node *port; 968 struct component_match *match = NULL; 969 970 if (!pdev->dev.of_node) 971 return -ENODEV; 972 973 /* there is only one output port inside each device, find it */ 974 port = of_graph_get_remote_node(pdev->dev.of_node, 0, 0); 975 if (!port) 976 return -ENODEV; 977 978 drm_of_component_match_add(&pdev->dev, &match, malidp_compare_dev, 979 port); 980 of_node_put(port); 981 return component_master_add_with_match(&pdev->dev, &malidp_master_ops, 982 match); 983 } 984 985 static int malidp_platform_remove(struct platform_device *pdev) 986 { 987 component_master_del(&pdev->dev, &malidp_master_ops); 988 return 0; 989 } 990 991 static int __maybe_unused malidp_pm_suspend(struct device *dev) 992 { 993 struct drm_device *drm = dev_get_drvdata(dev); 994 995 return drm_mode_config_helper_suspend(drm); 996 } 997 998 static int __maybe_unused malidp_pm_resume(struct device *dev) 999 { 1000 struct drm_device *drm = dev_get_drvdata(dev); 1001 1002 drm_mode_config_helper_resume(drm); 1003 1004 return 0; 1005 } 1006 1007 static int __maybe_unused malidp_pm_suspend_late(struct device *dev) 1008 { 1009 if (!pm_runtime_status_suspended(dev)) { 1010 malidp_runtime_pm_suspend(dev); 1011 pm_runtime_set_suspended(dev); 1012 } 1013 return 0; 1014 } 1015 1016 static int __maybe_unused malidp_pm_resume_early(struct device *dev) 1017 { 1018 malidp_runtime_pm_resume(dev); 1019 pm_runtime_set_active(dev); 1020 return 0; 1021 } 1022 1023 static const struct dev_pm_ops malidp_pm_ops = { 1024 SET_SYSTEM_SLEEP_PM_OPS(malidp_pm_suspend, malidp_pm_resume) \ 1025 SET_LATE_SYSTEM_SLEEP_PM_OPS(malidp_pm_suspend_late, malidp_pm_resume_early) \ 1026 SET_RUNTIME_PM_OPS(malidp_runtime_pm_suspend, malidp_runtime_pm_resume, NULL) 1027 }; 1028 1029 static struct platform_driver malidp_platform_driver = { 1030 .probe = malidp_platform_probe, 1031 .remove = malidp_platform_remove, 1032 .driver = { 1033 .name = "mali-dp", 1034 .pm = &malidp_pm_ops, 1035 .of_match_table = malidp_drm_of_match, 1036 }, 1037 }; 1038 1039 module_platform_driver(malidp_platform_driver); 1040 1041 MODULE_AUTHOR("Liviu Dudau <Liviu.Dudau@arm.com>"); 1042 MODULE_DESCRIPTION("ARM Mali DP DRM driver"); 1043 MODULE_LICENSE("GPL v2"); 1044