1 /* 2 * Copyright (C) 2013 Red Hat 3 * Author: Rob Clark <robdclark@gmail.com> 4 * 5 * Copyright (c) 2014 The Linux Foundation. All rights reserved. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms of the GNU General Public License version 2 as published by 9 * the Free Software Foundation. 10 * 11 * This program is distributed in the hope that it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * 16 * You should have received a copy of the GNU General Public License along with 17 * this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include <linux/ascii85.h> 21 #include <linux/kernel.h> 22 #include <linux/pm_opp.h> 23 #include <linux/slab.h> 24 #include "adreno_gpu.h" 25 #include "msm_gem.h" 26 #include "msm_mmu.h" 27 28 int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value) 29 { 30 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); 31 32 switch (param) { 33 case MSM_PARAM_GPU_ID: 34 *value = adreno_gpu->info->revn; 35 return 0; 36 case MSM_PARAM_GMEM_SIZE: 37 *value = adreno_gpu->gmem; 38 return 0; 39 case MSM_PARAM_GMEM_BASE: 40 *value = 0x100000; 41 return 0; 42 case MSM_PARAM_CHIP_ID: 43 *value = adreno_gpu->rev.patchid | 44 (adreno_gpu->rev.minor << 8) | 45 (adreno_gpu->rev.major << 16) | 46 (adreno_gpu->rev.core << 24); 47 return 0; 48 case MSM_PARAM_MAX_FREQ: 49 *value = adreno_gpu->base.fast_rate; 50 return 0; 51 case MSM_PARAM_TIMESTAMP: 52 if (adreno_gpu->funcs->get_timestamp) { 53 int ret; 54 55 pm_runtime_get_sync(&gpu->pdev->dev); 56 ret = adreno_gpu->funcs->get_timestamp(gpu, value); 57 pm_runtime_put_autosuspend(&gpu->pdev->dev); 58 59 return ret; 60 } 61 return -EINVAL; 62 case MSM_PARAM_NR_RINGS: 63 *value = gpu->nr_rings; 64 return 0; 65 default: 66 DBG("%s: invalid param: %u", gpu->name, param); 67 return -EINVAL; 68 } 69 } 70 71 const struct firmware * 72 adreno_request_fw(struct adreno_gpu *adreno_gpu, const char *fwname) 73 { 74 struct drm_device *drm = adreno_gpu->base.dev; 75 const struct firmware *fw = NULL; 76 char *newname; 77 int ret; 78 79 newname = kasprintf(GFP_KERNEL, "qcom/%s", fwname); 80 if (!newname) 81 return ERR_PTR(-ENOMEM); 82 83 /* 84 * Try first to load from qcom/$fwfile using a direct load (to avoid 85 * a potential timeout waiting for usermode helper) 86 */ 87 if ((adreno_gpu->fwloc == FW_LOCATION_UNKNOWN) || 88 (adreno_gpu->fwloc == FW_LOCATION_NEW)) { 89 90 ret = request_firmware_direct(&fw, newname, drm->dev); 91 if (!ret) { 92 dev_info(drm->dev, "loaded %s from new location\n", 93 newname); 94 adreno_gpu->fwloc = FW_LOCATION_NEW; 95 goto out; 96 } else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) { 97 dev_err(drm->dev, "failed to load %s: %d\n", 98 newname, ret); 99 fw = ERR_PTR(ret); 100 goto out; 101 } 102 } 103 104 /* 105 * Then try the legacy location without qcom/ prefix 106 */ 107 if ((adreno_gpu->fwloc == FW_LOCATION_UNKNOWN) || 108 (adreno_gpu->fwloc == FW_LOCATION_LEGACY)) { 109 110 ret = request_firmware_direct(&fw, fwname, drm->dev); 111 if (!ret) { 112 dev_info(drm->dev, "loaded %s from legacy location\n", 113 newname); 114 adreno_gpu->fwloc = FW_LOCATION_LEGACY; 115 goto out; 116 } else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) { 117 dev_err(drm->dev, "failed to load %s: %d\n", 118 fwname, ret); 119 fw = ERR_PTR(ret); 120 goto out; 121 } 122 } 123 124 /* 125 * Finally fall back to request_firmware() for cases where the 126 * usermode helper is needed (I think mainly android) 127 */ 128 if ((adreno_gpu->fwloc == FW_LOCATION_UNKNOWN) || 129 (adreno_gpu->fwloc == FW_LOCATION_HELPER)) { 130 131 ret = request_firmware(&fw, newname, drm->dev); 132 if (!ret) { 133 dev_info(drm->dev, "loaded %s with helper\n", 134 newname); 135 adreno_gpu->fwloc = FW_LOCATION_HELPER; 136 goto out; 137 } else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) { 138 dev_err(drm->dev, "failed to load %s: %d\n", 139 newname, ret); 140 fw = ERR_PTR(ret); 141 goto out; 142 } 143 } 144 145 dev_err(drm->dev, "failed to load %s\n", fwname); 146 fw = ERR_PTR(-ENOENT); 147 out: 148 kfree(newname); 149 return fw; 150 } 151 152 int adreno_load_fw(struct adreno_gpu *adreno_gpu) 153 { 154 int i; 155 156 for (i = 0; i < ARRAY_SIZE(adreno_gpu->info->fw); i++) { 157 const struct firmware *fw; 158 159 if (!adreno_gpu->info->fw[i]) 160 continue; 161 162 /* Skip if the firmware has already been loaded */ 163 if (adreno_gpu->fw[i]) 164 continue; 165 166 fw = adreno_request_fw(adreno_gpu, adreno_gpu->info->fw[i]); 167 if (IS_ERR(fw)) 168 return PTR_ERR(fw); 169 170 adreno_gpu->fw[i] = fw; 171 } 172 173 return 0; 174 } 175 176 struct drm_gem_object *adreno_fw_create_bo(struct msm_gpu *gpu, 177 const struct firmware *fw, u64 *iova) 178 { 179 struct drm_gem_object *bo; 180 void *ptr; 181 182 ptr = msm_gem_kernel_new_locked(gpu->dev, fw->size - 4, 183 MSM_BO_UNCACHED | MSM_BO_GPU_READONLY, gpu->aspace, &bo, iova); 184 185 if (IS_ERR(ptr)) 186 return ERR_CAST(ptr); 187 188 memcpy(ptr, &fw->data[4], fw->size - 4); 189 190 msm_gem_put_vaddr(bo); 191 192 return bo; 193 } 194 195 int adreno_hw_init(struct msm_gpu *gpu) 196 { 197 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); 198 int ret, i; 199 200 DBG("%s", gpu->name); 201 202 ret = adreno_load_fw(adreno_gpu); 203 if (ret) 204 return ret; 205 206 for (i = 0; i < gpu->nr_rings; i++) { 207 struct msm_ringbuffer *ring = gpu->rb[i]; 208 209 if (!ring) 210 continue; 211 212 ret = msm_gem_get_iova(ring->bo, gpu->aspace, &ring->iova); 213 if (ret) { 214 ring->iova = 0; 215 dev_err(gpu->dev->dev, 216 "could not map ringbuffer %d: %d\n", i, ret); 217 return ret; 218 } 219 220 ring->cur = ring->start; 221 ring->next = ring->start; 222 223 /* reset completed fence seqno: */ 224 ring->memptrs->fence = ring->seqno; 225 ring->memptrs->rptr = 0; 226 } 227 228 /* 229 * Setup REG_CP_RB_CNTL. The same value is used across targets (with 230 * the excpetion of A430 that disables the RPTR shadow) - the cacluation 231 * for the ringbuffer size and block size is moved to msm_gpu.h for the 232 * pre-processor to deal with and the A430 variant is ORed in here 233 */ 234 adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_CNTL, 235 MSM_GPU_RB_CNTL_DEFAULT | 236 (adreno_is_a430(adreno_gpu) ? AXXX_CP_RB_CNTL_NO_UPDATE : 0)); 237 238 /* Setup ringbuffer address - use ringbuffer[0] for GPU init */ 239 adreno_gpu_write64(adreno_gpu, REG_ADRENO_CP_RB_BASE, 240 REG_ADRENO_CP_RB_BASE_HI, gpu->rb[0]->iova); 241 242 if (!adreno_is_a430(adreno_gpu)) { 243 adreno_gpu_write64(adreno_gpu, REG_ADRENO_CP_RB_RPTR_ADDR, 244 REG_ADRENO_CP_RB_RPTR_ADDR_HI, 245 rbmemptr(gpu->rb[0], rptr)); 246 } 247 248 return 0; 249 } 250 251 /* Use this helper to read rptr, since a430 doesn't update rptr in memory */ 252 static uint32_t get_rptr(struct adreno_gpu *adreno_gpu, 253 struct msm_ringbuffer *ring) 254 { 255 if (adreno_is_a430(adreno_gpu)) 256 return ring->memptrs->rptr = adreno_gpu_read( 257 adreno_gpu, REG_ADRENO_CP_RB_RPTR); 258 else 259 return ring->memptrs->rptr; 260 } 261 262 struct msm_ringbuffer *adreno_active_ring(struct msm_gpu *gpu) 263 { 264 return gpu->rb[0]; 265 } 266 267 void adreno_recover(struct msm_gpu *gpu) 268 { 269 struct drm_device *dev = gpu->dev; 270 int ret; 271 272 // XXX pm-runtime?? we *need* the device to be off after this 273 // so maybe continuing to call ->pm_suspend/resume() is better? 274 275 gpu->funcs->pm_suspend(gpu); 276 gpu->funcs->pm_resume(gpu); 277 278 ret = msm_gpu_hw_init(gpu); 279 if (ret) { 280 dev_err(dev->dev, "gpu hw init failed: %d\n", ret); 281 /* hmm, oh well? */ 282 } 283 } 284 285 void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, 286 struct msm_file_private *ctx) 287 { 288 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); 289 struct msm_drm_private *priv = gpu->dev->dev_private; 290 struct msm_ringbuffer *ring = submit->ring; 291 unsigned i; 292 293 for (i = 0; i < submit->nr_cmds; i++) { 294 switch (submit->cmd[i].type) { 295 case MSM_SUBMIT_CMD_IB_TARGET_BUF: 296 /* ignore IB-targets */ 297 break; 298 case MSM_SUBMIT_CMD_CTX_RESTORE_BUF: 299 /* ignore if there has not been a ctx switch: */ 300 if (priv->lastctx == ctx) 301 break; 302 case MSM_SUBMIT_CMD_BUF: 303 OUT_PKT3(ring, adreno_is_a430(adreno_gpu) ? 304 CP_INDIRECT_BUFFER_PFE : CP_INDIRECT_BUFFER_PFD, 2); 305 OUT_RING(ring, lower_32_bits(submit->cmd[i].iova)); 306 OUT_RING(ring, submit->cmd[i].size); 307 OUT_PKT2(ring); 308 break; 309 } 310 } 311 312 OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1); 313 OUT_RING(ring, submit->seqno); 314 315 if (adreno_is_a3xx(adreno_gpu) || adreno_is_a4xx(adreno_gpu)) { 316 /* Flush HLSQ lazy updates to make sure there is nothing 317 * pending for indirect loads after the timestamp has 318 * passed: 319 */ 320 OUT_PKT3(ring, CP_EVENT_WRITE, 1); 321 OUT_RING(ring, HLSQ_FLUSH); 322 323 OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1); 324 OUT_RING(ring, 0x00000000); 325 } 326 327 /* BIT(31) of CACHE_FLUSH_TS triggers CACHE_FLUSH_TS IRQ from GPU */ 328 OUT_PKT3(ring, CP_EVENT_WRITE, 3); 329 OUT_RING(ring, CACHE_FLUSH_TS | BIT(31)); 330 OUT_RING(ring, rbmemptr(ring, fence)); 331 OUT_RING(ring, submit->seqno); 332 333 #if 0 334 if (adreno_is_a3xx(adreno_gpu)) { 335 /* Dummy set-constant to trigger context rollover */ 336 OUT_PKT3(ring, CP_SET_CONSTANT, 2); 337 OUT_RING(ring, CP_REG(REG_A3XX_HLSQ_CL_KERNEL_GROUP_X_REG)); 338 OUT_RING(ring, 0x00000000); 339 } 340 #endif 341 342 gpu->funcs->flush(gpu, ring); 343 } 344 345 void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring) 346 { 347 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); 348 uint32_t wptr; 349 350 /* Copy the shadow to the actual register */ 351 ring->cur = ring->next; 352 353 /* 354 * Mask wptr value that we calculate to fit in the HW range. This is 355 * to account for the possibility that the last command fit exactly into 356 * the ringbuffer and rb->next hasn't wrapped to zero yet 357 */ 358 wptr = get_wptr(ring); 359 360 /* ensure writes to ringbuffer have hit system memory: */ 361 mb(); 362 363 adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_WPTR, wptr); 364 } 365 366 bool adreno_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring) 367 { 368 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); 369 uint32_t wptr = get_wptr(ring); 370 371 /* wait for CP to drain ringbuffer: */ 372 if (!spin_until(get_rptr(adreno_gpu, ring) == wptr)) 373 return true; 374 375 /* TODO maybe we need to reset GPU here to recover from hang? */ 376 DRM_ERROR("%s: timeout waiting to drain ringbuffer %d rptr/wptr = %X/%X\n", 377 gpu->name, ring->id, get_rptr(adreno_gpu, ring), wptr); 378 379 return false; 380 } 381 382 int adreno_gpu_state_get(struct msm_gpu *gpu, struct msm_gpu_state *state) 383 { 384 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); 385 int i, count = 0; 386 387 kref_init(&state->ref); 388 389 ktime_get_real_ts64(&state->time); 390 391 for (i = 0; i < gpu->nr_rings; i++) { 392 int size = 0, j; 393 394 state->ring[i].fence = gpu->rb[i]->memptrs->fence; 395 state->ring[i].iova = gpu->rb[i]->iova; 396 state->ring[i].seqno = gpu->rb[i]->seqno; 397 state->ring[i].rptr = get_rptr(adreno_gpu, gpu->rb[i]); 398 state->ring[i].wptr = get_wptr(gpu->rb[i]); 399 400 /* Copy at least 'wptr' dwords of the data */ 401 size = state->ring[i].wptr; 402 403 /* After wptr find the last non zero dword to save space */ 404 for (j = state->ring[i].wptr; j < MSM_GPU_RINGBUFFER_SZ >> 2; j++) 405 if (gpu->rb[i]->start[j]) 406 size = j + 1; 407 408 if (size) { 409 state->ring[i].data = kmalloc(size << 2, GFP_KERNEL); 410 if (state->ring[i].data) { 411 memcpy(state->ring[i].data, gpu->rb[i]->start, size << 2); 412 state->ring[i].data_size = size << 2; 413 } 414 } 415 } 416 417 /* Count the number of registers */ 418 for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) 419 count += adreno_gpu->registers[i + 1] - 420 adreno_gpu->registers[i] + 1; 421 422 state->registers = kcalloc(count * 2, sizeof(u32), GFP_KERNEL); 423 if (state->registers) { 424 int pos = 0; 425 426 for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) { 427 u32 start = adreno_gpu->registers[i]; 428 u32 end = adreno_gpu->registers[i + 1]; 429 u32 addr; 430 431 for (addr = start; addr <= end; addr++) { 432 state->registers[pos++] = addr; 433 state->registers[pos++] = gpu_read(gpu, addr); 434 } 435 } 436 437 state->nr_registers = count; 438 } 439 440 return 0; 441 } 442 443 void adreno_gpu_state_destroy(struct msm_gpu_state *state) 444 { 445 int i; 446 447 for (i = 0; i < ARRAY_SIZE(state->ring); i++) 448 kfree(state->ring[i].data); 449 450 for (i = 0; state->bos && i < state->nr_bos; i++) 451 kvfree(state->bos[i].data); 452 453 kfree(state->bos); 454 kfree(state->comm); 455 kfree(state->cmd); 456 kfree(state->registers); 457 } 458 459 static void adreno_gpu_state_kref_destroy(struct kref *kref) 460 { 461 struct msm_gpu_state *state = container_of(kref, 462 struct msm_gpu_state, ref); 463 464 adreno_gpu_state_destroy(state); 465 kfree(state); 466 } 467 468 int adreno_gpu_state_put(struct msm_gpu_state *state) 469 { 470 if (IS_ERR_OR_NULL(state)) 471 return 1; 472 473 return kref_put(&state->ref, adreno_gpu_state_kref_destroy); 474 } 475 476 #if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP) 477 478 static void adreno_show_object(struct drm_printer *p, u32 *ptr, int len) 479 { 480 char out[ASCII85_BUFSZ]; 481 long l, datalen, i; 482 483 if (!ptr || !len) 484 return; 485 486 /* 487 * Only dump the non-zero part of the buffer - rarely will any data 488 * completely fill the entire allocated size of the buffer 489 */ 490 for (datalen = 0, i = 0; i < len >> 2; i++) { 491 if (ptr[i]) 492 datalen = (i << 2) + 1; 493 } 494 495 /* Skip printing the object if it is empty */ 496 if (datalen == 0) 497 return; 498 499 l = ascii85_encode_len(datalen); 500 501 drm_puts(p, " data: !!ascii85 |\n"); 502 drm_puts(p, " "); 503 504 for (i = 0; i < l; i++) 505 drm_puts(p, ascii85_encode(ptr[i], out)); 506 507 drm_puts(p, "\n"); 508 } 509 510 void adreno_show(struct msm_gpu *gpu, struct msm_gpu_state *state, 511 struct drm_printer *p) 512 { 513 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); 514 int i; 515 516 if (IS_ERR_OR_NULL(state)) 517 return; 518 519 drm_printf(p, "revision: %d (%d.%d.%d.%d)\n", 520 adreno_gpu->info->revn, adreno_gpu->rev.core, 521 adreno_gpu->rev.major, adreno_gpu->rev.minor, 522 adreno_gpu->rev.patchid); 523 524 drm_printf(p, "rbbm-status: 0x%08x\n", state->rbbm_status); 525 526 drm_puts(p, "ringbuffer:\n"); 527 528 for (i = 0; i < gpu->nr_rings; i++) { 529 drm_printf(p, " - id: %d\n", i); 530 drm_printf(p, " iova: 0x%016llx\n", state->ring[i].iova); 531 drm_printf(p, " last-fence: %d\n", state->ring[i].seqno); 532 drm_printf(p, " retired-fence: %d\n", state->ring[i].fence); 533 drm_printf(p, " rptr: %d\n", state->ring[i].rptr); 534 drm_printf(p, " wptr: %d\n", state->ring[i].wptr); 535 drm_printf(p, " size: %d\n", MSM_GPU_RINGBUFFER_SZ); 536 537 adreno_show_object(p, state->ring[i].data, 538 state->ring[i].data_size); 539 } 540 541 if (state->bos) { 542 drm_puts(p, "bos:\n"); 543 544 for (i = 0; i < state->nr_bos; i++) { 545 drm_printf(p, " - iova: 0x%016llx\n", 546 state->bos[i].iova); 547 drm_printf(p, " size: %zd\n", state->bos[i].size); 548 549 adreno_show_object(p, state->bos[i].data, 550 state->bos[i].size); 551 } 552 } 553 554 drm_puts(p, "registers:\n"); 555 556 for (i = 0; i < state->nr_registers; i++) { 557 drm_printf(p, " - { offset: 0x%04x, value: 0x%08x }\n", 558 state->registers[i * 2] << 2, 559 state->registers[(i * 2) + 1]); 560 } 561 } 562 #endif 563 564 /* Dump common gpu status and scratch registers on any hang, to make 565 * the hangcheck logs more useful. The scratch registers seem always 566 * safe to read when GPU has hung (unlike some other regs, depending 567 * on how the GPU hung), and they are useful to match up to cmdstream 568 * dumps when debugging hangs: 569 */ 570 void adreno_dump_info(struct msm_gpu *gpu) 571 { 572 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); 573 int i; 574 575 printk("revision: %d (%d.%d.%d.%d)\n", 576 adreno_gpu->info->revn, adreno_gpu->rev.core, 577 adreno_gpu->rev.major, adreno_gpu->rev.minor, 578 adreno_gpu->rev.patchid); 579 580 for (i = 0; i < gpu->nr_rings; i++) { 581 struct msm_ringbuffer *ring = gpu->rb[i]; 582 583 printk("rb %d: fence: %d/%d\n", i, 584 ring->memptrs->fence, 585 ring->seqno); 586 587 printk("rptr: %d\n", get_rptr(adreno_gpu, ring)); 588 printk("rb wptr: %d\n", get_wptr(ring)); 589 } 590 } 591 592 /* would be nice to not have to duplicate the _show() stuff with printk(): */ 593 void adreno_dump(struct msm_gpu *gpu) 594 { 595 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); 596 int i; 597 598 /* dump these out in a form that can be parsed by demsm: */ 599 printk("IO:region %s 00000000 00020000\n", gpu->name); 600 for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) { 601 uint32_t start = adreno_gpu->registers[i]; 602 uint32_t end = adreno_gpu->registers[i+1]; 603 uint32_t addr; 604 605 for (addr = start; addr <= end; addr++) { 606 uint32_t val = gpu_read(gpu, addr); 607 printk("IO:R %08x %08x\n", addr<<2, val); 608 } 609 } 610 } 611 612 static uint32_t ring_freewords(struct msm_ringbuffer *ring) 613 { 614 struct adreno_gpu *adreno_gpu = to_adreno_gpu(ring->gpu); 615 uint32_t size = MSM_GPU_RINGBUFFER_SZ >> 2; 616 /* Use ring->next to calculate free size */ 617 uint32_t wptr = ring->next - ring->start; 618 uint32_t rptr = get_rptr(adreno_gpu, ring); 619 return (rptr + (size - 1) - wptr) % size; 620 } 621 622 void adreno_wait_ring(struct msm_ringbuffer *ring, uint32_t ndwords) 623 { 624 if (spin_until(ring_freewords(ring) >= ndwords)) 625 DRM_DEV_ERROR(ring->gpu->dev->dev, 626 "timeout waiting for space in ringbuffer %d\n", 627 ring->id); 628 } 629 630 /* Get legacy powerlevels from qcom,gpu-pwrlevels and populate the opp table */ 631 static int adreno_get_legacy_pwrlevels(struct device *dev) 632 { 633 struct device_node *child, *node; 634 int ret; 635 636 node = of_find_compatible_node(dev->of_node, NULL, 637 "qcom,gpu-pwrlevels"); 638 if (!node) { 639 dev_err(dev, "Could not find the GPU powerlevels\n"); 640 return -ENXIO; 641 } 642 643 for_each_child_of_node(node, child) { 644 unsigned int val; 645 646 ret = of_property_read_u32(child, "qcom,gpu-freq", &val); 647 if (ret) 648 continue; 649 650 /* 651 * Skip the intentionally bogus clock value found at the bottom 652 * of most legacy frequency tables 653 */ 654 if (val != 27000000) 655 dev_pm_opp_add(dev, val, 0); 656 } 657 658 return 0; 659 } 660 661 static int adreno_get_pwrlevels(struct device *dev, 662 struct msm_gpu *gpu) 663 { 664 unsigned long freq = ULONG_MAX; 665 struct dev_pm_opp *opp; 666 int ret; 667 668 gpu->fast_rate = 0; 669 670 /* You down with OPP? */ 671 if (!of_find_property(dev->of_node, "operating-points-v2", NULL)) 672 ret = adreno_get_legacy_pwrlevels(dev); 673 else { 674 ret = dev_pm_opp_of_add_table(dev); 675 if (ret) 676 dev_err(dev, "Unable to set the OPP table\n"); 677 } 678 679 if (!ret) { 680 /* Find the fastest defined rate */ 681 opp = dev_pm_opp_find_freq_floor(dev, &freq); 682 if (!IS_ERR(opp)) { 683 gpu->fast_rate = freq; 684 dev_pm_opp_put(opp); 685 } 686 } 687 688 if (!gpu->fast_rate) { 689 dev_warn(dev, 690 "Could not find a clock rate. Using a reasonable default\n"); 691 /* Pick a suitably safe clock speed for any target */ 692 gpu->fast_rate = 200000000; 693 } 694 695 DBG("fast_rate=%u, slow_rate=27000000", gpu->fast_rate); 696 697 return 0; 698 } 699 700 int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, 701 struct adreno_gpu *adreno_gpu, 702 const struct adreno_gpu_funcs *funcs, int nr_rings) 703 { 704 struct adreno_platform_config *config = pdev->dev.platform_data; 705 struct msm_gpu_config adreno_gpu_config = { 0 }; 706 struct msm_gpu *gpu = &adreno_gpu->base; 707 708 adreno_gpu->funcs = funcs; 709 adreno_gpu->info = adreno_info(config->rev); 710 adreno_gpu->gmem = adreno_gpu->info->gmem; 711 adreno_gpu->revn = adreno_gpu->info->revn; 712 adreno_gpu->rev = config->rev; 713 714 adreno_gpu_config.ioname = "kgsl_3d0_reg_memory"; 715 adreno_gpu_config.irqname = "kgsl_3d0_irq"; 716 717 adreno_gpu_config.va_start = SZ_16M; 718 adreno_gpu_config.va_end = 0xffffffff; 719 720 adreno_gpu_config.nr_rings = nr_rings; 721 722 adreno_get_pwrlevels(&pdev->dev, gpu); 723 724 pm_runtime_set_autosuspend_delay(&pdev->dev, 725 adreno_gpu->info->inactive_period); 726 pm_runtime_use_autosuspend(&pdev->dev); 727 pm_runtime_enable(&pdev->dev); 728 729 return msm_gpu_init(drm, pdev, &adreno_gpu->base, &funcs->base, 730 adreno_gpu->info->name, &adreno_gpu_config); 731 } 732 733 void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu) 734 { 735 unsigned int i; 736 737 for (i = 0; i < ARRAY_SIZE(adreno_gpu->info->fw); i++) 738 release_firmware(adreno_gpu->fw[i]); 739 740 msm_gpu_cleanup(&adreno_gpu->base); 741 } 742