1 // SPDX-License-Identifier: GPL-2.0 2 // 3 // Copyright (c) 2019 MediaTek Inc. 4 5 #include <asm/barrier.h> 6 #include <linux/clk.h> 7 #include <linux/dma-mapping.h> 8 #include <linux/err.h> 9 #include <linux/interrupt.h> 10 #include <linux/kernel.h> 11 #include <linux/module.h> 12 #include <linux/of_address.h> 13 #include <linux/of_platform.h> 14 #include <linux/of_reserved_mem.h> 15 #include <linux/platform_device.h> 16 #include <linux/remoteproc.h> 17 #include <linux/remoteproc/mtk_scp.h> 18 #include <linux/rpmsg/mtk_rpmsg.h> 19 20 #include "mtk_common.h" 21 #include "remoteproc_internal.h" 22 23 #define MAX_CODE_SIZE 0x500000 24 #define SCP_FW_END 0x7C000 25 26 /** 27 * scp_get() - get a reference to SCP. 28 * 29 * @pdev: the platform device of the module requesting SCP platform 30 * device for using SCP API. 31 * 32 * Return: Return NULL if failed. otherwise reference to SCP. 33 **/ 34 struct mtk_scp *scp_get(struct platform_device *pdev) 35 { 36 struct device *dev = &pdev->dev; 37 struct device_node *scp_node; 38 struct platform_device *scp_pdev; 39 40 scp_node = of_parse_phandle(dev->of_node, "mediatek,scp", 0); 41 if (!scp_node) { 42 dev_err(dev, "can't get SCP node\n"); 43 return NULL; 44 } 45 46 scp_pdev = of_find_device_by_node(scp_node); 47 of_node_put(scp_node); 48 49 if (WARN_ON(!scp_pdev)) { 50 dev_err(dev, "SCP pdev failed\n"); 51 return NULL; 52 } 53 54 return platform_get_drvdata(scp_pdev); 55 } 56 EXPORT_SYMBOL_GPL(scp_get); 57 58 /** 59 * scp_put() - "free" the SCP 60 * 61 * @scp: mtk_scp structure from scp_get(). 62 **/ 63 void scp_put(struct mtk_scp *scp) 64 { 65 put_device(scp->dev); 66 } 67 EXPORT_SYMBOL_GPL(scp_put); 68 69 static void scp_wdt_handler(struct mtk_scp *scp, u32 scp_to_host) 70 { 71 dev_err(scp->dev, "SCP watchdog timeout! 0x%x", scp_to_host); 72 rproc_report_crash(scp->rproc, RPROC_WATCHDOG); 73 } 74 75 static void scp_init_ipi_handler(void *data, unsigned int len, void *priv) 76 { 77 struct mtk_scp *scp = (struct mtk_scp *)priv; 78 struct scp_run *run = (struct scp_run *)data; 79 80 scp->run.signaled = run->signaled; 81 strscpy(scp->run.fw_ver, run->fw_ver, SCP_FW_VER_LEN); 82 scp->run.dec_capability = run->dec_capability; 83 scp->run.enc_capability = run->enc_capability; 84 wake_up_interruptible(&scp->run.wq); 85 } 86 87 static void scp_ipi_handler(struct mtk_scp *scp) 88 { 89 struct mtk_share_obj __iomem *rcv_obj = scp->recv_buf; 90 struct scp_ipi_desc *ipi_desc = scp->ipi_desc; 91 u8 tmp_data[SCP_SHARE_BUFFER_SIZE]; 92 scp_ipi_handler_t handler; 93 u32 id = readl(&rcv_obj->id); 94 u32 len = readl(&rcv_obj->len); 95 96 if (len > SCP_SHARE_BUFFER_SIZE) { 97 dev_err(scp->dev, "ipi message too long (len %d, max %d)", len, 98 SCP_SHARE_BUFFER_SIZE); 99 return; 100 } 101 if (id >= SCP_IPI_MAX) { 102 dev_err(scp->dev, "No such ipi id = %d\n", id); 103 return; 104 } 105 106 scp_ipi_lock(scp, id); 107 handler = ipi_desc[id].handler; 108 if (!handler) { 109 dev_err(scp->dev, "No such ipi id = %d\n", id); 110 scp_ipi_unlock(scp, id); 111 return; 112 } 113 114 memcpy_fromio(tmp_data, &rcv_obj->share_buf, len); 115 handler(tmp_data, len, ipi_desc[id].priv); 116 scp_ipi_unlock(scp, id); 117 118 scp->ipi_id_ack[id] = true; 119 wake_up(&scp->ack_wq); 120 } 121 122 static int scp_ipi_init(struct mtk_scp *scp) 123 { 124 size_t send_offset = SCP_FW_END - sizeof(struct mtk_share_obj); 125 size_t recv_offset = send_offset - sizeof(struct mtk_share_obj); 126 127 /* Disable SCP to host interrupt */ 128 writel(MT8183_SCP_IPC_INT_BIT, scp->reg_base + MT8183_SCP_TO_HOST); 129 130 /* shared buffer initialization */ 131 scp->recv_buf = 132 (struct mtk_share_obj __iomem *)(scp->sram_base + recv_offset); 133 scp->send_buf = 134 (struct mtk_share_obj __iomem *)(scp->sram_base + send_offset); 135 memset_io(scp->recv_buf, 0, sizeof(scp->recv_buf)); 136 memset_io(scp->send_buf, 0, sizeof(scp->send_buf)); 137 138 return 0; 139 } 140 141 static void scp_reset_assert(const struct mtk_scp *scp) 142 { 143 u32 val; 144 145 val = readl(scp->reg_base + MT8183_SW_RSTN); 146 val &= ~MT8183_SW_RSTN_BIT; 147 writel(val, scp->reg_base + MT8183_SW_RSTN); 148 } 149 150 static void scp_reset_deassert(const struct mtk_scp *scp) 151 { 152 u32 val; 153 154 val = readl(scp->reg_base + MT8183_SW_RSTN); 155 val |= MT8183_SW_RSTN_BIT; 156 writel(val, scp->reg_base + MT8183_SW_RSTN); 157 } 158 159 static irqreturn_t scp_irq_handler(int irq, void *priv) 160 { 161 struct mtk_scp *scp = priv; 162 u32 scp_to_host; 163 int ret; 164 165 ret = clk_prepare_enable(scp->clk); 166 if (ret) { 167 dev_err(scp->dev, "failed to enable clocks\n"); 168 return IRQ_NONE; 169 } 170 171 scp_to_host = readl(scp->reg_base + MT8183_SCP_TO_HOST); 172 if (scp_to_host & MT8183_SCP_IPC_INT_BIT) 173 scp_ipi_handler(scp); 174 else 175 scp_wdt_handler(scp, scp_to_host); 176 177 /* SCP won't send another interrupt until we set SCP_TO_HOST to 0. */ 178 writel(MT8183_SCP_IPC_INT_BIT | MT8183_SCP_WDT_INT_BIT, 179 scp->reg_base + MT8183_SCP_TO_HOST); 180 clk_disable_unprepare(scp->clk); 181 182 return IRQ_HANDLED; 183 } 184 185 static int scp_elf_load_segments(struct rproc *rproc, const struct firmware *fw) 186 { 187 struct device *dev = &rproc->dev; 188 struct elf32_hdr *ehdr; 189 struct elf32_phdr *phdr; 190 int i, ret = 0; 191 const u8 *elf_data = fw->data; 192 193 ehdr = (struct elf32_hdr *)elf_data; 194 phdr = (struct elf32_phdr *)(elf_data + ehdr->e_phoff); 195 196 /* go through the available ELF segments */ 197 for (i = 0; i < ehdr->e_phnum; i++, phdr++) { 198 u32 da = phdr->p_paddr; 199 u32 memsz = phdr->p_memsz; 200 u32 filesz = phdr->p_filesz; 201 u32 offset = phdr->p_offset; 202 void __iomem *ptr; 203 204 if (phdr->p_type != PT_LOAD) 205 continue; 206 207 dev_dbg(dev, "phdr: type %d da 0x%x memsz 0x%x filesz 0x%x\n", 208 phdr->p_type, da, memsz, filesz); 209 210 if (filesz > memsz) { 211 dev_err(dev, "bad phdr filesz 0x%x memsz 0x%x\n", 212 filesz, memsz); 213 ret = -EINVAL; 214 break; 215 } 216 217 if (offset + filesz > fw->size) { 218 dev_err(dev, "truncated fw: need 0x%x avail 0x%zx\n", 219 offset + filesz, fw->size); 220 ret = -EINVAL; 221 break; 222 } 223 224 /* grab the kernel address for this device address */ 225 ptr = (void __iomem *)rproc_da_to_va(rproc, da, memsz); 226 if (!ptr) { 227 dev_err(dev, "bad phdr da 0x%x mem 0x%x\n", da, memsz); 228 ret = -EINVAL; 229 break; 230 } 231 232 /* put the segment where the remote processor expects it */ 233 if (phdr->p_filesz) 234 scp_memcpy_aligned(ptr, elf_data + phdr->p_offset, 235 filesz); 236 } 237 238 return ret; 239 } 240 241 static int scp_load(struct rproc *rproc, const struct firmware *fw) 242 { 243 const struct mtk_scp *scp = rproc->priv; 244 struct device *dev = scp->dev; 245 int ret; 246 247 ret = clk_prepare_enable(scp->clk); 248 if (ret) { 249 dev_err(dev, "failed to enable clocks\n"); 250 return ret; 251 } 252 253 /* Hold SCP in reset while loading FW. */ 254 scp_reset_assert(scp); 255 256 /* Reset clocks before loading FW */ 257 writel(0x0, scp->reg_base + MT8183_SCP_CLK_SW_SEL); 258 writel(0x0, scp->reg_base + MT8183_SCP_CLK_DIV_SEL); 259 260 /* Initialize TCM before loading FW. */ 261 writel(0x0, scp->reg_base + MT8183_SCP_L1_SRAM_PD); 262 writel(0x0, scp->reg_base + MT8183_SCP_TCM_TAIL_SRAM_PD); 263 264 /* Turn on the power of SCP's SRAM before using it. */ 265 writel(0x0, scp->reg_base + MT8183_SCP_SRAM_PDN); 266 267 /* 268 * Set I-cache and D-cache size before loading SCP FW. 269 * SCP SRAM logical address may change when cache size setting differs. 270 */ 271 writel(MT8183_SCP_CACHE_CON_WAYEN | MT8183_SCP_CACHESIZE_8KB, 272 scp->reg_base + MT8183_SCP_CACHE_CON); 273 writel(MT8183_SCP_CACHESIZE_8KB, scp->reg_base + MT8183_SCP_DCACHE_CON); 274 275 ret = scp_elf_load_segments(rproc, fw); 276 clk_disable_unprepare(scp->clk); 277 278 return ret; 279 } 280 281 static int scp_start(struct rproc *rproc) 282 { 283 struct mtk_scp *scp = (struct mtk_scp *)rproc->priv; 284 struct device *dev = scp->dev; 285 struct scp_run *run = &scp->run; 286 int ret; 287 288 ret = clk_prepare_enable(scp->clk); 289 if (ret) { 290 dev_err(dev, "failed to enable clocks\n"); 291 return ret; 292 } 293 294 run->signaled = false; 295 296 scp_reset_deassert(scp); 297 298 ret = wait_event_interruptible_timeout( 299 run->wq, 300 run->signaled, 301 msecs_to_jiffies(2000)); 302 303 if (ret == 0) { 304 dev_err(dev, "wait SCP initialization timeout!\n"); 305 ret = -ETIME; 306 goto stop; 307 } 308 if (ret == -ERESTARTSYS) { 309 dev_err(dev, "wait SCP interrupted by a signal!\n"); 310 goto stop; 311 } 312 clk_disable_unprepare(scp->clk); 313 dev_info(dev, "SCP is ready. FW version %s\n", run->fw_ver); 314 315 return 0; 316 317 stop: 318 scp_reset_assert(scp); 319 clk_disable_unprepare(scp->clk); 320 return ret; 321 } 322 323 static void *scp_da_to_va(struct rproc *rproc, u64 da, int len) 324 { 325 struct mtk_scp *scp = (struct mtk_scp *)rproc->priv; 326 int offset; 327 328 if (da < scp->sram_size) { 329 offset = da; 330 if (offset >= 0 && (offset + len) < scp->sram_size) 331 return (void __force *)scp->sram_base + offset; 332 } else { 333 offset = da - scp->phys_addr; 334 if (offset >= 0 && (offset + len) < scp->dram_size) 335 return (void __force *)scp->cpu_addr + offset; 336 } 337 338 return NULL; 339 } 340 341 static int scp_stop(struct rproc *rproc) 342 { 343 struct mtk_scp *scp = (struct mtk_scp *)rproc->priv; 344 int ret; 345 346 ret = clk_prepare_enable(scp->clk); 347 if (ret) { 348 dev_err(scp->dev, "failed to enable clocks\n"); 349 return ret; 350 } 351 352 scp_reset_assert(scp); 353 /* Disable SCP watchdog */ 354 writel(0, scp->reg_base + MT8183_WDT_CFG); 355 clk_disable_unprepare(scp->clk); 356 357 return 0; 358 } 359 360 static const struct rproc_ops scp_ops = { 361 .start = scp_start, 362 .stop = scp_stop, 363 .load = scp_load, 364 .da_to_va = scp_da_to_va, 365 }; 366 367 /** 368 * scp_get_device() - get device struct of SCP 369 * 370 * @scp: mtk_scp structure 371 **/ 372 struct device *scp_get_device(struct mtk_scp *scp) 373 { 374 return scp->dev; 375 } 376 EXPORT_SYMBOL_GPL(scp_get_device); 377 378 /** 379 * scp_get_rproc() - get rproc struct of SCP 380 * 381 * @scp: mtk_scp structure 382 **/ 383 struct rproc *scp_get_rproc(struct mtk_scp *scp) 384 { 385 return scp->rproc; 386 } 387 EXPORT_SYMBOL_GPL(scp_get_rproc); 388 389 /** 390 * scp_get_vdec_hw_capa() - get video decoder hardware capability 391 * 392 * @scp: mtk_scp structure 393 * 394 * Return: video decoder hardware capability 395 **/ 396 unsigned int scp_get_vdec_hw_capa(struct mtk_scp *scp) 397 { 398 return scp->run.dec_capability; 399 } 400 EXPORT_SYMBOL_GPL(scp_get_vdec_hw_capa); 401 402 /** 403 * scp_get_venc_hw_capa() - get video encoder hardware capability 404 * 405 * @scp: mtk_scp structure 406 * 407 * Return: video encoder hardware capability 408 **/ 409 unsigned int scp_get_venc_hw_capa(struct mtk_scp *scp) 410 { 411 return scp->run.enc_capability; 412 } 413 EXPORT_SYMBOL_GPL(scp_get_venc_hw_capa); 414 415 /** 416 * scp_mapping_dm_addr() - Mapping SRAM/DRAM to kernel virtual address 417 * 418 * @scp: mtk_scp structure 419 * @mem_addr: SCP views memory address 420 * 421 * Mapping the SCP's SRAM address / 422 * DMEM (Data Extended Memory) memory address / 423 * Working buffer memory address to 424 * kernel virtual address. 425 * 426 * Return: Return ERR_PTR(-EINVAL) if mapping failed, 427 * otherwise the mapped kernel virtual address 428 **/ 429 void *scp_mapping_dm_addr(struct mtk_scp *scp, u32 mem_addr) 430 { 431 void *ptr; 432 433 ptr = scp_da_to_va(scp->rproc, mem_addr, 0); 434 if (!ptr) 435 return ERR_PTR(-EINVAL); 436 437 return ptr; 438 } 439 EXPORT_SYMBOL_GPL(scp_mapping_dm_addr); 440 441 static int scp_map_memory_region(struct mtk_scp *scp) 442 { 443 int ret; 444 445 ret = of_reserved_mem_device_init(scp->dev); 446 if (ret) { 447 dev_err(scp->dev, "failed to assign memory-region: %d\n", ret); 448 return -ENOMEM; 449 } 450 451 /* Reserved SCP code size */ 452 scp->dram_size = MAX_CODE_SIZE; 453 scp->cpu_addr = dma_alloc_coherent(scp->dev, scp->dram_size, 454 &scp->phys_addr, GFP_KERNEL); 455 if (!scp->cpu_addr) 456 return -ENOMEM; 457 458 return 0; 459 } 460 461 static void scp_unmap_memory_region(struct mtk_scp *scp) 462 { 463 dma_free_coherent(scp->dev, scp->dram_size, scp->cpu_addr, 464 scp->phys_addr); 465 of_reserved_mem_device_release(scp->dev); 466 } 467 468 static int scp_register_ipi(struct platform_device *pdev, u32 id, 469 ipi_handler_t handler, void *priv) 470 { 471 struct mtk_scp *scp = platform_get_drvdata(pdev); 472 473 return scp_ipi_register(scp, id, handler, priv); 474 } 475 476 static void scp_unregister_ipi(struct platform_device *pdev, u32 id) 477 { 478 struct mtk_scp *scp = platform_get_drvdata(pdev); 479 480 scp_ipi_unregister(scp, id); 481 } 482 483 static int scp_send_ipi(struct platform_device *pdev, u32 id, void *buf, 484 unsigned int len, unsigned int wait) 485 { 486 struct mtk_scp *scp = platform_get_drvdata(pdev); 487 488 return scp_ipi_send(scp, id, buf, len, wait); 489 } 490 491 static struct mtk_rpmsg_info mtk_scp_rpmsg_info = { 492 .send_ipi = scp_send_ipi, 493 .register_ipi = scp_register_ipi, 494 .unregister_ipi = scp_unregister_ipi, 495 .ns_ipi_id = SCP_IPI_NS_SERVICE, 496 }; 497 498 static void scp_add_rpmsg_subdev(struct mtk_scp *scp) 499 { 500 scp->rpmsg_subdev = 501 mtk_rpmsg_create_rproc_subdev(to_platform_device(scp->dev), 502 &mtk_scp_rpmsg_info); 503 if (scp->rpmsg_subdev) 504 rproc_add_subdev(scp->rproc, scp->rpmsg_subdev); 505 } 506 507 static void scp_remove_rpmsg_subdev(struct mtk_scp *scp) 508 { 509 if (scp->rpmsg_subdev) { 510 rproc_remove_subdev(scp->rproc, scp->rpmsg_subdev); 511 mtk_rpmsg_destroy_rproc_subdev(scp->rpmsg_subdev); 512 scp->rpmsg_subdev = NULL; 513 } 514 } 515 516 static int scp_probe(struct platform_device *pdev) 517 { 518 struct device *dev = &pdev->dev; 519 struct device_node *np = dev->of_node; 520 struct mtk_scp *scp; 521 struct rproc *rproc; 522 struct resource *res; 523 char *fw_name = "scp.img"; 524 int ret, i; 525 526 rproc = rproc_alloc(dev, 527 np->name, 528 &scp_ops, 529 fw_name, 530 sizeof(*scp)); 531 if (!rproc) { 532 dev_err(dev, "unable to allocate remoteproc\n"); 533 return -ENOMEM; 534 } 535 536 scp = (struct mtk_scp *)rproc->priv; 537 scp->rproc = rproc; 538 scp->dev = dev; 539 platform_set_drvdata(pdev, scp); 540 541 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sram"); 542 scp->sram_base = devm_ioremap_resource(dev, res); 543 if (IS_ERR((__force void *)scp->sram_base)) { 544 dev_err(dev, "Failed to parse and map sram memory\n"); 545 ret = PTR_ERR((__force void *)scp->sram_base); 546 goto free_rproc; 547 } 548 scp->sram_size = resource_size(res); 549 550 mutex_init(&scp->send_lock); 551 for (i = 0; i < SCP_IPI_MAX; i++) 552 mutex_init(&scp->ipi_desc[i].lock); 553 554 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg"); 555 scp->reg_base = devm_ioremap_resource(dev, res); 556 if (IS_ERR((__force void *)scp->reg_base)) { 557 dev_err(dev, "Failed to parse and map cfg memory\n"); 558 ret = PTR_ERR((__force void *)scp->reg_base); 559 goto destroy_mutex; 560 } 561 562 ret = scp_map_memory_region(scp); 563 if (ret) 564 goto destroy_mutex; 565 566 scp->clk = devm_clk_get(dev, "main"); 567 if (IS_ERR(scp->clk)) { 568 dev_err(dev, "Failed to get clock\n"); 569 ret = PTR_ERR(scp->clk); 570 goto release_dev_mem; 571 } 572 573 ret = clk_prepare_enable(scp->clk); 574 if (ret) { 575 dev_err(dev, "failed to enable clocks\n"); 576 goto release_dev_mem; 577 } 578 579 ret = scp_ipi_init(scp); 580 clk_disable_unprepare(scp->clk); 581 if (ret) { 582 dev_err(dev, "Failed to init ipi\n"); 583 goto release_dev_mem; 584 } 585 586 /* register SCP initialization IPI */ 587 ret = scp_ipi_register(scp, SCP_IPI_INIT, scp_init_ipi_handler, scp); 588 if (ret) { 589 dev_err(dev, "Failed to register IPI_SCP_INIT\n"); 590 goto release_dev_mem; 591 } 592 593 init_waitqueue_head(&scp->run.wq); 594 init_waitqueue_head(&scp->ack_wq); 595 596 scp_add_rpmsg_subdev(scp); 597 598 ret = devm_request_threaded_irq(dev, platform_get_irq(pdev, 0), NULL, 599 scp_irq_handler, IRQF_ONESHOT, 600 pdev->name, scp); 601 602 if (ret) { 603 dev_err(dev, "failed to request irq\n"); 604 goto remove_subdev; 605 } 606 607 ret = rproc_add(rproc); 608 if (ret) 609 goto remove_subdev; 610 611 return 0; 612 613 remove_subdev: 614 scp_remove_rpmsg_subdev(scp); 615 scp_ipi_unregister(scp, SCP_IPI_INIT); 616 release_dev_mem: 617 scp_unmap_memory_region(scp); 618 destroy_mutex: 619 for (i = 0; i < SCP_IPI_MAX; i++) 620 mutex_destroy(&scp->ipi_desc[i].lock); 621 mutex_destroy(&scp->send_lock); 622 free_rproc: 623 rproc_free(rproc); 624 625 return ret; 626 } 627 628 static int scp_remove(struct platform_device *pdev) 629 { 630 struct mtk_scp *scp = platform_get_drvdata(pdev); 631 int i; 632 633 rproc_del(scp->rproc); 634 scp_remove_rpmsg_subdev(scp); 635 scp_ipi_unregister(scp, SCP_IPI_INIT); 636 scp_unmap_memory_region(scp); 637 for (i = 0; i < SCP_IPI_MAX; i++) 638 mutex_destroy(&scp->ipi_desc[i].lock); 639 mutex_destroy(&scp->send_lock); 640 rproc_free(scp->rproc); 641 642 return 0; 643 } 644 645 static const struct of_device_id mtk_scp_of_match[] = { 646 { .compatible = "mediatek,mt8183-scp"}, 647 {}, 648 }; 649 MODULE_DEVICE_TABLE(of, mtk_scp_of_match); 650 651 static struct platform_driver mtk_scp_driver = { 652 .probe = scp_probe, 653 .remove = scp_remove, 654 .driver = { 655 .name = "mtk-scp", 656 .of_match_table = of_match_ptr(mtk_scp_of_match), 657 }, 658 }; 659 660 module_platform_driver(mtk_scp_driver); 661 662 MODULE_LICENSE("GPL v2"); 663 MODULE_DESCRIPTION("MediaTek SCP control driver"); 664