1 /* 2 * Copyright (C) 2013 Red Hat 3 * Author: Rob Clark <robdclark@gmail.com> 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 as published by 7 * the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 * 14 * You should have received a copy of the GNU General Public License along with 15 * this program. If not, see <http://www.gnu.org/licenses/>. 16 */ 17 18 #include "msm_gpu.h" 19 #include "msm_gem.h" 20 #include "msm_mmu.h" 21 #include "msm_fence.h" 22 23 24 /* 25 * Power Management: 26 */ 27 28 #ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING 29 #include <mach/board.h> 30 static void bs_init(struct msm_gpu *gpu) 31 { 32 if (gpu->bus_scale_table) { 33 gpu->bsc = msm_bus_scale_register_client(gpu->bus_scale_table); 34 DBG("bus scale client: %08x", gpu->bsc); 35 } 36 } 37 38 static void bs_fini(struct msm_gpu *gpu) 39 { 40 if (gpu->bsc) { 41 msm_bus_scale_unregister_client(gpu->bsc); 42 gpu->bsc = 0; 43 } 44 } 45 46 static void bs_set(struct msm_gpu *gpu, int idx) 47 { 48 if (gpu->bsc) { 49 DBG("set bus scaling: %d", idx); 50 msm_bus_scale_client_update_request(gpu->bsc, idx); 51 } 52 } 53 #else 54 static void bs_init(struct msm_gpu *gpu) {} 55 static void bs_fini(struct msm_gpu *gpu) {} 56 static void bs_set(struct msm_gpu *gpu, int idx) {} 57 #endif 58 59 static int enable_pwrrail(struct msm_gpu *gpu) 60 { 61 struct drm_device *dev = gpu->dev; 62 int ret = 0; 63 64 if (gpu->gpu_reg) { 65 ret = regulator_enable(gpu->gpu_reg); 66 if (ret) { 67 dev_err(dev->dev, "failed to enable 'gpu_reg': %d\n", ret); 68 return ret; 69 } 70 } 71 72 if (gpu->gpu_cx) { 73 ret = regulator_enable(gpu->gpu_cx); 74 if (ret) { 75 dev_err(dev->dev, "failed to enable 'gpu_cx': %d\n", ret); 76 return ret; 77 } 78 } 79 80 return 0; 81 } 82 83 static int disable_pwrrail(struct msm_gpu *gpu) 84 { 85 if (gpu->gpu_cx) 86 regulator_disable(gpu->gpu_cx); 87 if (gpu->gpu_reg) 88 regulator_disable(gpu->gpu_reg); 89 return 0; 90 } 91 92 static int enable_clk(struct msm_gpu *gpu) 93 { 94 int i; 95 96 if (gpu->core_clk && gpu->fast_rate) 97 clk_set_rate(gpu->core_clk, gpu->fast_rate); 98 99 /* Set the RBBM timer rate to 19.2Mhz */ 100 if (gpu->rbbmtimer_clk) 101 clk_set_rate(gpu->rbbmtimer_clk, 19200000); 102 103 for (i = gpu->nr_clocks - 1; i >= 0; i--) 104 if (gpu->grp_clks[i]) 105 clk_prepare(gpu->grp_clks[i]); 106 107 for (i = gpu->nr_clocks - 1; i >= 0; i--) 108 if (gpu->grp_clks[i]) 109 clk_enable(gpu->grp_clks[i]); 110 111 return 0; 112 } 113 114 static int disable_clk(struct msm_gpu *gpu) 115 { 116 int i; 117 118 for (i = gpu->nr_clocks - 1; i >= 0; i--) 119 if (gpu->grp_clks[i]) 120 clk_disable(gpu->grp_clks[i]); 121 122 for (i = gpu->nr_clocks - 1; i >= 0; i--) 123 if (gpu->grp_clks[i]) 124 clk_unprepare(gpu->grp_clks[i]); 125 126 /* 127 * Set the clock to a deliberately low rate. On older targets the clock 128 * speed had to be non zero to avoid problems. On newer targets this 129 * will be rounded down to zero anyway so it all works out. 130 */ 131 if (gpu->core_clk) 132 clk_set_rate(gpu->core_clk, 27000000); 133 134 if (gpu->rbbmtimer_clk) 135 clk_set_rate(gpu->rbbmtimer_clk, 0); 136 137 return 0; 138 } 139 140 static int enable_axi(struct msm_gpu *gpu) 141 { 142 if (gpu->ebi1_clk) 143 clk_prepare_enable(gpu->ebi1_clk); 144 if (gpu->bus_freq) 145 bs_set(gpu, gpu->bus_freq); 146 return 0; 147 } 148 149 static int disable_axi(struct msm_gpu *gpu) 150 { 151 if (gpu->ebi1_clk) 152 clk_disable_unprepare(gpu->ebi1_clk); 153 if (gpu->bus_freq) 154 bs_set(gpu, 0); 155 return 0; 156 } 157 158 int msm_gpu_pm_resume(struct msm_gpu *gpu) 159 { 160 int ret; 161 162 DBG("%s", gpu->name); 163 164 ret = enable_pwrrail(gpu); 165 if (ret) 166 return ret; 167 168 ret = enable_clk(gpu); 169 if (ret) 170 return ret; 171 172 ret = enable_axi(gpu); 173 if (ret) 174 return ret; 175 176 gpu->needs_hw_init = true; 177 178 return 0; 179 } 180 181 int msm_gpu_pm_suspend(struct msm_gpu *gpu) 182 { 183 int ret; 184 185 DBG("%s", gpu->name); 186 187 ret = disable_axi(gpu); 188 if (ret) 189 return ret; 190 191 ret = disable_clk(gpu); 192 if (ret) 193 return ret; 194 195 ret = disable_pwrrail(gpu); 196 if (ret) 197 return ret; 198 199 return 0; 200 } 201 202 int msm_gpu_hw_init(struct msm_gpu *gpu) 203 { 204 int ret; 205 206 WARN_ON(!mutex_is_locked(&gpu->dev->struct_mutex)); 207 208 if (!gpu->needs_hw_init) 209 return 0; 210 211 disable_irq(gpu->irq); 212 ret = gpu->funcs->hw_init(gpu); 213 if (!ret) 214 gpu->needs_hw_init = false; 215 enable_irq(gpu->irq); 216 217 return ret; 218 } 219 220 /* 221 * Hangcheck detection for locked gpu: 222 */ 223 224 static void retire_submits(struct msm_gpu *gpu); 225 226 static void recover_worker(struct work_struct *work) 227 { 228 struct msm_gpu *gpu = container_of(work, struct msm_gpu, recover_work); 229 struct drm_device *dev = gpu->dev; 230 struct msm_gem_submit *submit; 231 uint32_t fence = gpu->funcs->last_fence(gpu); 232 233 msm_update_fence(gpu->fctx, fence + 1); 234 235 mutex_lock(&dev->struct_mutex); 236 237 dev_err(dev->dev, "%s: hangcheck recover!\n", gpu->name); 238 list_for_each_entry(submit, &gpu->submit_list, node) { 239 if (submit->fence->seqno == (fence + 1)) { 240 struct task_struct *task; 241 242 rcu_read_lock(); 243 task = pid_task(submit->pid, PIDTYPE_PID); 244 if (task) { 245 dev_err(dev->dev, "%s: offending task: %s\n", 246 gpu->name, task->comm); 247 } 248 rcu_read_unlock(); 249 break; 250 } 251 } 252 253 if (msm_gpu_active(gpu)) { 254 /* retire completed submits, plus the one that hung: */ 255 retire_submits(gpu); 256 257 pm_runtime_get_sync(&gpu->pdev->dev); 258 gpu->funcs->recover(gpu); 259 pm_runtime_put_sync(&gpu->pdev->dev); 260 261 /* replay the remaining submits after the one that hung: */ 262 list_for_each_entry(submit, &gpu->submit_list, node) { 263 gpu->funcs->submit(gpu, submit, NULL); 264 } 265 } 266 267 mutex_unlock(&dev->struct_mutex); 268 269 msm_gpu_retire(gpu); 270 } 271 272 static void hangcheck_timer_reset(struct msm_gpu *gpu) 273 { 274 DBG("%s", gpu->name); 275 mod_timer(&gpu->hangcheck_timer, 276 round_jiffies_up(jiffies + DRM_MSM_HANGCHECK_JIFFIES)); 277 } 278 279 static void hangcheck_handler(unsigned long data) 280 { 281 struct msm_gpu *gpu = (struct msm_gpu *)data; 282 struct drm_device *dev = gpu->dev; 283 struct msm_drm_private *priv = dev->dev_private; 284 uint32_t fence = gpu->funcs->last_fence(gpu); 285 286 if (fence != gpu->hangcheck_fence) { 287 /* some progress has been made.. ya! */ 288 gpu->hangcheck_fence = fence; 289 } else if (fence < gpu->fctx->last_fence) { 290 /* no progress and not done.. hung! */ 291 gpu->hangcheck_fence = fence; 292 dev_err(dev->dev, "%s: hangcheck detected gpu lockup!\n", 293 gpu->name); 294 dev_err(dev->dev, "%s: completed fence: %u\n", 295 gpu->name, fence); 296 dev_err(dev->dev, "%s: submitted fence: %u\n", 297 gpu->name, gpu->fctx->last_fence); 298 queue_work(priv->wq, &gpu->recover_work); 299 } 300 301 /* if still more pending work, reset the hangcheck timer: */ 302 if (gpu->fctx->last_fence > gpu->hangcheck_fence) 303 hangcheck_timer_reset(gpu); 304 305 /* workaround for missing irq: */ 306 queue_work(priv->wq, &gpu->retire_work); 307 } 308 309 /* 310 * Performance Counters: 311 */ 312 313 /* called under perf_lock */ 314 static int update_hw_cntrs(struct msm_gpu *gpu, uint32_t ncntrs, uint32_t *cntrs) 315 { 316 uint32_t current_cntrs[ARRAY_SIZE(gpu->last_cntrs)]; 317 int i, n = min(ncntrs, gpu->num_perfcntrs); 318 319 /* read current values: */ 320 for (i = 0; i < gpu->num_perfcntrs; i++) 321 current_cntrs[i] = gpu_read(gpu, gpu->perfcntrs[i].sample_reg); 322 323 /* update cntrs: */ 324 for (i = 0; i < n; i++) 325 cntrs[i] = current_cntrs[i] - gpu->last_cntrs[i]; 326 327 /* save current values: */ 328 for (i = 0; i < gpu->num_perfcntrs; i++) 329 gpu->last_cntrs[i] = current_cntrs[i]; 330 331 return n; 332 } 333 334 static void update_sw_cntrs(struct msm_gpu *gpu) 335 { 336 ktime_t time; 337 uint32_t elapsed; 338 unsigned long flags; 339 340 spin_lock_irqsave(&gpu->perf_lock, flags); 341 if (!gpu->perfcntr_active) 342 goto out; 343 344 time = ktime_get(); 345 elapsed = ktime_to_us(ktime_sub(time, gpu->last_sample.time)); 346 347 gpu->totaltime += elapsed; 348 if (gpu->last_sample.active) 349 gpu->activetime += elapsed; 350 351 gpu->last_sample.active = msm_gpu_active(gpu); 352 gpu->last_sample.time = time; 353 354 out: 355 spin_unlock_irqrestore(&gpu->perf_lock, flags); 356 } 357 358 void msm_gpu_perfcntr_start(struct msm_gpu *gpu) 359 { 360 unsigned long flags; 361 362 pm_runtime_get_sync(&gpu->pdev->dev); 363 364 spin_lock_irqsave(&gpu->perf_lock, flags); 365 /* we could dynamically enable/disable perfcntr registers too.. */ 366 gpu->last_sample.active = msm_gpu_active(gpu); 367 gpu->last_sample.time = ktime_get(); 368 gpu->activetime = gpu->totaltime = 0; 369 gpu->perfcntr_active = true; 370 update_hw_cntrs(gpu, 0, NULL); 371 spin_unlock_irqrestore(&gpu->perf_lock, flags); 372 } 373 374 void msm_gpu_perfcntr_stop(struct msm_gpu *gpu) 375 { 376 gpu->perfcntr_active = false; 377 pm_runtime_put_sync(&gpu->pdev->dev); 378 } 379 380 /* returns -errno or # of cntrs sampled */ 381 int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime, 382 uint32_t *totaltime, uint32_t ncntrs, uint32_t *cntrs) 383 { 384 unsigned long flags; 385 int ret; 386 387 spin_lock_irqsave(&gpu->perf_lock, flags); 388 389 if (!gpu->perfcntr_active) { 390 ret = -EINVAL; 391 goto out; 392 } 393 394 *activetime = gpu->activetime; 395 *totaltime = gpu->totaltime; 396 397 gpu->activetime = gpu->totaltime = 0; 398 399 ret = update_hw_cntrs(gpu, ncntrs, cntrs); 400 401 out: 402 spin_unlock_irqrestore(&gpu->perf_lock, flags); 403 404 return ret; 405 } 406 407 /* 408 * Cmdstream submission/retirement: 409 */ 410 411 static void retire_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) 412 { 413 int i; 414 415 for (i = 0; i < submit->nr_bos; i++) { 416 struct msm_gem_object *msm_obj = submit->bos[i].obj; 417 /* move to inactive: */ 418 msm_gem_move_to_inactive(&msm_obj->base); 419 msm_gem_put_iova(&msm_obj->base, gpu->aspace); 420 drm_gem_object_unreference(&msm_obj->base); 421 } 422 423 pm_runtime_mark_last_busy(&gpu->pdev->dev); 424 pm_runtime_put_autosuspend(&gpu->pdev->dev); 425 msm_gem_submit_free(submit); 426 } 427 428 static void retire_submits(struct msm_gpu *gpu) 429 { 430 struct drm_device *dev = gpu->dev; 431 432 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 433 434 while (!list_empty(&gpu->submit_list)) { 435 struct msm_gem_submit *submit; 436 437 submit = list_first_entry(&gpu->submit_list, 438 struct msm_gem_submit, node); 439 440 if (dma_fence_is_signaled(submit->fence)) { 441 retire_submit(gpu, submit); 442 } else { 443 break; 444 } 445 } 446 } 447 448 static void retire_worker(struct work_struct *work) 449 { 450 struct msm_gpu *gpu = container_of(work, struct msm_gpu, retire_work); 451 struct drm_device *dev = gpu->dev; 452 uint32_t fence = gpu->funcs->last_fence(gpu); 453 454 msm_update_fence(gpu->fctx, fence); 455 456 mutex_lock(&dev->struct_mutex); 457 retire_submits(gpu); 458 mutex_unlock(&dev->struct_mutex); 459 } 460 461 /* call from irq handler to schedule work to retire bo's */ 462 void msm_gpu_retire(struct msm_gpu *gpu) 463 { 464 struct msm_drm_private *priv = gpu->dev->dev_private; 465 queue_work(priv->wq, &gpu->retire_work); 466 update_sw_cntrs(gpu); 467 } 468 469 /* add bo's to gpu's ring, and kick gpu: */ 470 void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, 471 struct msm_file_private *ctx) 472 { 473 struct drm_device *dev = gpu->dev; 474 struct msm_drm_private *priv = dev->dev_private; 475 int i; 476 477 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 478 479 pm_runtime_get_sync(&gpu->pdev->dev); 480 481 msm_gpu_hw_init(gpu); 482 483 list_add_tail(&submit->node, &gpu->submit_list); 484 485 msm_rd_dump_submit(submit); 486 487 update_sw_cntrs(gpu); 488 489 for (i = 0; i < submit->nr_bos; i++) { 490 struct msm_gem_object *msm_obj = submit->bos[i].obj; 491 uint64_t iova; 492 493 /* can't happen yet.. but when we add 2d support we'll have 494 * to deal w/ cross-ring synchronization: 495 */ 496 WARN_ON(is_active(msm_obj) && (msm_obj->gpu != gpu)); 497 498 /* submit takes a reference to the bo and iova until retired: */ 499 drm_gem_object_reference(&msm_obj->base); 500 msm_gem_get_iova(&msm_obj->base, 501 submit->gpu->aspace, &iova); 502 503 if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE) 504 msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence); 505 else if (submit->bos[i].flags & MSM_SUBMIT_BO_READ) 506 msm_gem_move_to_active(&msm_obj->base, gpu, false, submit->fence); 507 } 508 509 gpu->funcs->submit(gpu, submit, ctx); 510 priv->lastctx = ctx; 511 512 hangcheck_timer_reset(gpu); 513 } 514 515 /* 516 * Init/Cleanup: 517 */ 518 519 static irqreturn_t irq_handler(int irq, void *data) 520 { 521 struct msm_gpu *gpu = data; 522 return gpu->funcs->irq(gpu); 523 } 524 525 static struct clk *get_clock(struct device *dev, const char *name) 526 { 527 struct clk *clk = devm_clk_get(dev, name); 528 529 return IS_ERR(clk) ? NULL : clk; 530 } 531 532 static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu) 533 { 534 struct device *dev = &pdev->dev; 535 struct property *prop; 536 const char *name; 537 int i = 0; 538 539 gpu->nr_clocks = of_property_count_strings(dev->of_node, "clock-names"); 540 if (gpu->nr_clocks < 1) { 541 gpu->nr_clocks = 0; 542 return 0; 543 } 544 545 gpu->grp_clks = devm_kcalloc(dev, sizeof(struct clk *), gpu->nr_clocks, 546 GFP_KERNEL); 547 if (!gpu->grp_clks) 548 return -ENOMEM; 549 550 of_property_for_each_string(dev->of_node, "clock-names", prop, name) { 551 gpu->grp_clks[i] = get_clock(dev, name); 552 553 /* Remember the key clocks that we need to control later */ 554 if (!strcmp(name, "core") || !strcmp(name, "core_clk")) 555 gpu->core_clk = gpu->grp_clks[i]; 556 else if (!strcmp(name, "rbbmtimer") || !strcmp(name, "rbbmtimer_clk")) 557 gpu->rbbmtimer_clk = gpu->grp_clks[i]; 558 559 ++i; 560 } 561 562 return 0; 563 } 564 565 static struct msm_gem_address_space * 566 msm_gpu_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev, 567 uint64_t va_start, uint64_t va_end) 568 { 569 struct iommu_domain *iommu; 570 struct msm_gem_address_space *aspace; 571 int ret; 572 573 /* 574 * Setup IOMMU.. eventually we will (I think) do this once per context 575 * and have separate page tables per context. For now, to keep things 576 * simple and to get something working, just use a single address space: 577 */ 578 iommu = iommu_domain_alloc(&platform_bus_type); 579 if (!iommu) 580 return NULL; 581 582 iommu->geometry.aperture_start = va_start; 583 iommu->geometry.aperture_end = va_end; 584 585 dev_info(gpu->dev->dev, "%s: using IOMMU\n", gpu->name); 586 587 aspace = msm_gem_address_space_create(&pdev->dev, iommu, "gpu"); 588 if (IS_ERR(aspace)) { 589 dev_err(gpu->dev->dev, "failed to init iommu: %ld\n", 590 PTR_ERR(aspace)); 591 iommu_domain_free(iommu); 592 return ERR_CAST(aspace); 593 } 594 595 ret = aspace->mmu->funcs->attach(aspace->mmu, NULL, 0); 596 if (ret) { 597 msm_gem_address_space_put(aspace); 598 return ERR_PTR(ret); 599 } 600 601 return aspace; 602 } 603 604 int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, 605 struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs, 606 const char *name, struct msm_gpu_config *config) 607 { 608 int ret; 609 610 if (WARN_ON(gpu->num_perfcntrs > ARRAY_SIZE(gpu->last_cntrs))) 611 gpu->num_perfcntrs = ARRAY_SIZE(gpu->last_cntrs); 612 613 gpu->dev = drm; 614 gpu->funcs = funcs; 615 gpu->name = name; 616 gpu->fctx = msm_fence_context_alloc(drm, name); 617 if (IS_ERR(gpu->fctx)) { 618 ret = PTR_ERR(gpu->fctx); 619 gpu->fctx = NULL; 620 goto fail; 621 } 622 623 INIT_LIST_HEAD(&gpu->active_list); 624 INIT_WORK(&gpu->retire_work, retire_worker); 625 INIT_WORK(&gpu->recover_work, recover_worker); 626 627 INIT_LIST_HEAD(&gpu->submit_list); 628 629 setup_timer(&gpu->hangcheck_timer, hangcheck_handler, 630 (unsigned long)gpu); 631 632 spin_lock_init(&gpu->perf_lock); 633 634 635 /* Map registers: */ 636 gpu->mmio = msm_ioremap(pdev, config->ioname, name); 637 if (IS_ERR(gpu->mmio)) { 638 ret = PTR_ERR(gpu->mmio); 639 goto fail; 640 } 641 642 /* Get Interrupt: */ 643 gpu->irq = platform_get_irq_byname(pdev, config->irqname); 644 if (gpu->irq < 0) { 645 ret = gpu->irq; 646 dev_err(drm->dev, "failed to get irq: %d\n", ret); 647 goto fail; 648 } 649 650 ret = devm_request_irq(&pdev->dev, gpu->irq, irq_handler, 651 IRQF_TRIGGER_HIGH, gpu->name, gpu); 652 if (ret) { 653 dev_err(drm->dev, "failed to request IRQ%u: %d\n", gpu->irq, ret); 654 goto fail; 655 } 656 657 ret = get_clocks(pdev, gpu); 658 if (ret) 659 goto fail; 660 661 gpu->ebi1_clk = msm_clk_get(pdev, "bus"); 662 DBG("ebi1_clk: %p", gpu->ebi1_clk); 663 if (IS_ERR(gpu->ebi1_clk)) 664 gpu->ebi1_clk = NULL; 665 666 /* Acquire regulators: */ 667 gpu->gpu_reg = devm_regulator_get(&pdev->dev, "vdd"); 668 DBG("gpu_reg: %p", gpu->gpu_reg); 669 if (IS_ERR(gpu->gpu_reg)) 670 gpu->gpu_reg = NULL; 671 672 gpu->gpu_cx = devm_regulator_get(&pdev->dev, "vddcx"); 673 DBG("gpu_cx: %p", gpu->gpu_cx); 674 if (IS_ERR(gpu->gpu_cx)) 675 gpu->gpu_cx = NULL; 676 677 gpu->pdev = pdev; 678 platform_set_drvdata(pdev, gpu); 679 680 bs_init(gpu); 681 682 gpu->aspace = msm_gpu_create_address_space(gpu, pdev, 683 config->va_start, config->va_end); 684 685 if (gpu->aspace == NULL) 686 dev_info(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name); 687 else if (IS_ERR(gpu->aspace)) { 688 ret = PTR_ERR(gpu->aspace); 689 goto fail; 690 } 691 692 /* Create ringbuffer: */ 693 gpu->rb = msm_ringbuffer_new(gpu, config->ringsz); 694 if (IS_ERR(gpu->rb)) { 695 ret = PTR_ERR(gpu->rb); 696 gpu->rb = NULL; 697 dev_err(drm->dev, "could not create ringbuffer: %d\n", ret); 698 goto fail; 699 } 700 701 return 0; 702 703 fail: 704 platform_set_drvdata(pdev, NULL); 705 return ret; 706 } 707 708 void msm_gpu_cleanup(struct msm_gpu *gpu) 709 { 710 DBG("%s", gpu->name); 711 712 WARN_ON(!list_empty(&gpu->active_list)); 713 714 bs_fini(gpu); 715 716 if (gpu->rb) { 717 if (gpu->rb_iova) 718 msm_gem_put_iova(gpu->rb->bo, gpu->aspace); 719 msm_ringbuffer_destroy(gpu->rb); 720 } 721 if (gpu->aspace) { 722 gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu, 723 NULL, 0); 724 msm_gem_address_space_put(gpu->aspace); 725 } 726 } 727