1 /* 2 * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. 3 * Copyright (C) 2017 Linaro Ltd. 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License version 2 and 7 * only version 2 as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 */ 15 16 #include <linux/delay.h> 17 #include <linux/device.h> 18 #include <linux/dma-mapping.h> 19 #include <linux/interrupt.h> 20 #include <linux/iopoll.h> 21 #include <linux/kernel.h> 22 #include <linux/qcom_scm.h> 23 #include <linux/slab.h> 24 25 #include "core.h" 26 #include "hfi_cmds.h" 27 #include "hfi_msgs.h" 28 #include "hfi_venus.h" 29 #include "hfi_venus_io.h" 30 31 #define HFI_MASK_QHDR_TX_TYPE 0xff000000 32 #define HFI_MASK_QHDR_RX_TYPE 0x00ff0000 33 #define HFI_MASK_QHDR_PRI_TYPE 0x0000ff00 34 #define HFI_MASK_QHDR_ID_TYPE 0x000000ff 35 36 #define HFI_HOST_TO_CTRL_CMD_Q 0 37 #define HFI_CTRL_TO_HOST_MSG_Q 1 38 #define HFI_CTRL_TO_HOST_DBG_Q 2 39 #define HFI_MASK_QHDR_STATUS 0x000000ff 40 41 #define IFACEQ_NUM 3 42 #define IFACEQ_CMD_IDX 0 43 #define IFACEQ_MSG_IDX 1 44 #define IFACEQ_DBG_IDX 2 45 #define IFACEQ_MAX_BUF_COUNT 50 46 #define IFACEQ_MAX_PARALLEL_CLNTS 16 47 #define IFACEQ_DFLT_QHDR 0x01010000 48 49 #define POLL_INTERVAL_US 50 50 51 #define IFACEQ_MAX_PKT_SIZE 1024 52 #define IFACEQ_MED_PKT_SIZE 768 53 #define IFACEQ_MIN_PKT_SIZE 8 54 #define IFACEQ_VAR_SMALL_PKT_SIZE 100 55 #define IFACEQ_VAR_LARGE_PKT_SIZE 512 56 #define IFACEQ_VAR_HUGE_PKT_SIZE (1024 * 12) 57 58 enum tzbsp_video_state { 59 TZBSP_VIDEO_STATE_SUSPEND = 0, 60 TZBSP_VIDEO_STATE_RESUME 61 }; 62 63 struct hfi_queue_table_header { 64 u32 version; 65 u32 size; 66 u32 qhdr0_offset; 67 u32 qhdr_size; 68 u32 num_q; 69 u32 num_active_q; 70 }; 71 72 struct hfi_queue_header { 73 u32 status; 74 u32 start_addr; 75 u32 type; 76 u32 q_size; 77 u32 pkt_size; 78 u32 pkt_drop_cnt; 79 u32 rx_wm; 80 u32 tx_wm; 81 u32 rx_req; 82 u32 tx_req; 83 u32 rx_irq_status; 84 u32 tx_irq_status; 85 u32 read_idx; 86 u32 write_idx; 87 }; 88 89 #define IFACEQ_TABLE_SIZE \ 90 (sizeof(struct hfi_queue_table_header) + \ 91 sizeof(struct hfi_queue_header) * IFACEQ_NUM) 92 93 #define IFACEQ_QUEUE_SIZE (IFACEQ_MAX_PKT_SIZE * \ 94 IFACEQ_MAX_BUF_COUNT * IFACEQ_MAX_PARALLEL_CLNTS) 95 96 #define IFACEQ_GET_QHDR_START_ADDR(ptr, i) \ 97 (void *)(((ptr) + sizeof(struct hfi_queue_table_header)) + \ 98 ((i) * sizeof(struct hfi_queue_header))) 99 100 #define QDSS_SIZE SZ_4K 101 #define SFR_SIZE SZ_4K 102 #define QUEUE_SIZE \ 103 (IFACEQ_TABLE_SIZE + (IFACEQ_QUEUE_SIZE * IFACEQ_NUM)) 104 105 #define ALIGNED_QDSS_SIZE ALIGN(QDSS_SIZE, SZ_4K) 106 #define ALIGNED_SFR_SIZE ALIGN(SFR_SIZE, SZ_4K) 107 #define ALIGNED_QUEUE_SIZE ALIGN(QUEUE_SIZE, SZ_4K) 108 #define SHARED_QSIZE ALIGN(ALIGNED_SFR_SIZE + ALIGNED_QUEUE_SIZE + \ 109 ALIGNED_QDSS_SIZE, SZ_1M) 110 111 struct mem_desc { 112 dma_addr_t da; /* device address */ 113 void *kva; /* kernel virtual address */ 114 u32 size; 115 unsigned long attrs; 116 }; 117 118 struct iface_queue { 119 struct hfi_queue_header *qhdr; 120 struct mem_desc qmem; 121 }; 122 123 enum venus_state { 124 VENUS_STATE_DEINIT = 1, 125 VENUS_STATE_INIT, 126 }; 127 128 struct venus_hfi_device { 129 struct venus_core *core; 130 u32 irq_status; 131 u32 last_packet_type; 132 bool power_enabled; 133 bool suspended; 134 enum venus_state state; 135 /* serialize read / write to the shared memory */ 136 struct mutex lock; 137 struct completion pwr_collapse_prep; 138 struct completion release_resource; 139 struct mem_desc ifaceq_table; 140 struct mem_desc sfr; 141 struct iface_queue queues[IFACEQ_NUM]; 142 u8 pkt_buf[IFACEQ_VAR_HUGE_PKT_SIZE]; 143 u8 dbg_buf[IFACEQ_VAR_HUGE_PKT_SIZE]; 144 }; 145 146 static bool venus_pkt_debug; 147 static int venus_fw_debug = HFI_DEBUG_MSG_ERROR | HFI_DEBUG_MSG_FATAL; 148 static bool venus_sys_idle_indicator; 149 static bool venus_fw_low_power_mode = true; 150 static int venus_hw_rsp_timeout = 1000; 151 static bool venus_fw_coverage; 152 153 static void venus_set_state(struct venus_hfi_device *hdev, 154 enum venus_state state) 155 { 156 mutex_lock(&hdev->lock); 157 hdev->state = state; 158 mutex_unlock(&hdev->lock); 159 } 160 161 static bool venus_is_valid_state(struct venus_hfi_device *hdev) 162 { 163 return hdev->state != VENUS_STATE_DEINIT; 164 } 165 166 static void venus_dump_packet(struct venus_hfi_device *hdev, const void *packet) 167 { 168 size_t pkt_size = *(u32 *)packet; 169 170 if (!venus_pkt_debug) 171 return; 172 173 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 16, 1, packet, 174 pkt_size, true); 175 } 176 177 static int venus_write_queue(struct venus_hfi_device *hdev, 178 struct iface_queue *queue, 179 void *packet, u32 *rx_req) 180 { 181 struct hfi_queue_header *qhdr; 182 u32 dwords, new_wr_idx; 183 u32 empty_space, rd_idx, wr_idx, qsize; 184 u32 *wr_ptr; 185 186 if (!queue->qmem.kva) 187 return -EINVAL; 188 189 qhdr = queue->qhdr; 190 if (!qhdr) 191 return -EINVAL; 192 193 venus_dump_packet(hdev, packet); 194 195 dwords = (*(u32 *)packet) >> 2; 196 if (!dwords) 197 return -EINVAL; 198 199 rd_idx = qhdr->read_idx; 200 wr_idx = qhdr->write_idx; 201 qsize = qhdr->q_size; 202 /* ensure rd/wr indices's are read from memory */ 203 rmb(); 204 205 if (wr_idx >= rd_idx) 206 empty_space = qsize - (wr_idx - rd_idx); 207 else 208 empty_space = rd_idx - wr_idx; 209 210 if (empty_space <= dwords) { 211 qhdr->tx_req = 1; 212 /* ensure tx_req is updated in memory */ 213 wmb(); 214 return -ENOSPC; 215 } 216 217 qhdr->tx_req = 0; 218 /* ensure tx_req is updated in memory */ 219 wmb(); 220 221 new_wr_idx = wr_idx + dwords; 222 wr_ptr = (u32 *)(queue->qmem.kva + (wr_idx << 2)); 223 if (new_wr_idx < qsize) { 224 memcpy(wr_ptr, packet, dwords << 2); 225 } else { 226 size_t len; 227 228 new_wr_idx -= qsize; 229 len = (dwords - new_wr_idx) << 2; 230 memcpy(wr_ptr, packet, len); 231 memcpy(queue->qmem.kva, packet + len, new_wr_idx << 2); 232 } 233 234 /* make sure packet is written before updating the write index */ 235 wmb(); 236 237 qhdr->write_idx = new_wr_idx; 238 *rx_req = qhdr->rx_req ? 1 : 0; 239 240 /* make sure write index is updated before an interrupt is raised */ 241 mb(); 242 243 return 0; 244 } 245 246 static int venus_read_queue(struct venus_hfi_device *hdev, 247 struct iface_queue *queue, void *pkt, u32 *tx_req) 248 { 249 struct hfi_queue_header *qhdr; 250 u32 dwords, new_rd_idx; 251 u32 rd_idx, wr_idx, type, qsize; 252 u32 *rd_ptr; 253 u32 recv_request = 0; 254 int ret = 0; 255 256 if (!queue->qmem.kva) 257 return -EINVAL; 258 259 qhdr = queue->qhdr; 260 if (!qhdr) 261 return -EINVAL; 262 263 type = qhdr->type; 264 rd_idx = qhdr->read_idx; 265 wr_idx = qhdr->write_idx; 266 qsize = qhdr->q_size; 267 268 /* make sure data is valid before using it */ 269 rmb(); 270 271 /* 272 * Do not set receive request for debug queue, if set, Venus generates 273 * interrupt for debug messages even when there is no response message 274 * available. In general debug queue will not become full as it is being 275 * emptied out for every interrupt from Venus. Venus will anyway 276 * generates interrupt if it is full. 277 */ 278 if (type & HFI_CTRL_TO_HOST_MSG_Q) 279 recv_request = 1; 280 281 if (rd_idx == wr_idx) { 282 qhdr->rx_req = recv_request; 283 *tx_req = 0; 284 /* update rx_req field in memory */ 285 wmb(); 286 return -ENODATA; 287 } 288 289 rd_ptr = (u32 *)(queue->qmem.kva + (rd_idx << 2)); 290 dwords = *rd_ptr >> 2; 291 if (!dwords) 292 return -EINVAL; 293 294 new_rd_idx = rd_idx + dwords; 295 if (((dwords << 2) <= IFACEQ_VAR_HUGE_PKT_SIZE) && rd_idx <= qsize) { 296 if (new_rd_idx < qsize) { 297 memcpy(pkt, rd_ptr, dwords << 2); 298 } else { 299 size_t len; 300 301 new_rd_idx -= qsize; 302 len = (dwords - new_rd_idx) << 2; 303 memcpy(pkt, rd_ptr, len); 304 memcpy(pkt + len, queue->qmem.kva, new_rd_idx << 2); 305 } 306 } else { 307 /* bad packet received, dropping */ 308 new_rd_idx = qhdr->write_idx; 309 ret = -EBADMSG; 310 } 311 312 /* ensure the packet is read before updating read index */ 313 rmb(); 314 315 qhdr->read_idx = new_rd_idx; 316 /* ensure updating read index */ 317 wmb(); 318 319 rd_idx = qhdr->read_idx; 320 wr_idx = qhdr->write_idx; 321 /* ensure rd/wr indices are read from memory */ 322 rmb(); 323 324 if (rd_idx != wr_idx) 325 qhdr->rx_req = 0; 326 else 327 qhdr->rx_req = recv_request; 328 329 *tx_req = qhdr->tx_req ? 1 : 0; 330 331 /* ensure rx_req is stored to memory and tx_req is loaded from memory */ 332 mb(); 333 334 venus_dump_packet(hdev, pkt); 335 336 return ret; 337 } 338 339 static int venus_alloc(struct venus_hfi_device *hdev, struct mem_desc *desc, 340 u32 size) 341 { 342 struct device *dev = hdev->core->dev; 343 344 desc->attrs = DMA_ATTR_WRITE_COMBINE; 345 desc->size = ALIGN(size, SZ_4K); 346 347 desc->kva = dma_alloc_attrs(dev, desc->size, &desc->da, GFP_KERNEL, 348 desc->attrs); 349 if (!desc->kva) 350 return -ENOMEM; 351 352 return 0; 353 } 354 355 static void venus_free(struct venus_hfi_device *hdev, struct mem_desc *mem) 356 { 357 struct device *dev = hdev->core->dev; 358 359 dma_free_attrs(dev, mem->size, mem->kva, mem->da, mem->attrs); 360 } 361 362 static void venus_writel(struct venus_hfi_device *hdev, u32 reg, u32 value) 363 { 364 writel(value, hdev->core->base + reg); 365 } 366 367 static u32 venus_readl(struct venus_hfi_device *hdev, u32 reg) 368 { 369 return readl(hdev->core->base + reg); 370 } 371 372 static void venus_set_registers(struct venus_hfi_device *hdev) 373 { 374 const struct venus_resources *res = hdev->core->res; 375 const struct reg_val *tbl = res->reg_tbl; 376 unsigned int count = res->reg_tbl_size; 377 unsigned int i; 378 379 for (i = 0; i < count; i++) 380 venus_writel(hdev, tbl[i].reg, tbl[i].value); 381 } 382 383 static void venus_soft_int(struct venus_hfi_device *hdev) 384 { 385 venus_writel(hdev, CPU_IC_SOFTINT, BIT(CPU_IC_SOFTINT_H2A_SHIFT)); 386 } 387 388 static int venus_iface_cmdq_write_nolock(struct venus_hfi_device *hdev, 389 void *pkt) 390 { 391 struct device *dev = hdev->core->dev; 392 struct hfi_pkt_hdr *cmd_packet; 393 struct iface_queue *queue; 394 u32 rx_req; 395 int ret; 396 397 if (!venus_is_valid_state(hdev)) 398 return -EINVAL; 399 400 cmd_packet = (struct hfi_pkt_hdr *)pkt; 401 hdev->last_packet_type = cmd_packet->pkt_type; 402 403 queue = &hdev->queues[IFACEQ_CMD_IDX]; 404 405 ret = venus_write_queue(hdev, queue, pkt, &rx_req); 406 if (ret) { 407 dev_err(dev, "write to iface cmd queue failed (%d)\n", ret); 408 return ret; 409 } 410 411 if (rx_req) 412 venus_soft_int(hdev); 413 414 return 0; 415 } 416 417 static int venus_iface_cmdq_write(struct venus_hfi_device *hdev, void *pkt) 418 { 419 int ret; 420 421 mutex_lock(&hdev->lock); 422 ret = venus_iface_cmdq_write_nolock(hdev, pkt); 423 mutex_unlock(&hdev->lock); 424 425 return ret; 426 } 427 428 static int venus_hfi_core_set_resource(struct venus_core *core, u32 id, 429 u32 size, u32 addr, void *cookie) 430 { 431 struct venus_hfi_device *hdev = to_hfi_priv(core); 432 struct hfi_sys_set_resource_pkt *pkt; 433 u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE]; 434 int ret; 435 436 if (id == VIDC_RESOURCE_NONE) 437 return 0; 438 439 pkt = (struct hfi_sys_set_resource_pkt *)packet; 440 441 ret = pkt_sys_set_resource(pkt, id, size, addr, cookie); 442 if (ret) 443 return ret; 444 445 ret = venus_iface_cmdq_write(hdev, pkt); 446 if (ret) 447 return ret; 448 449 return 0; 450 } 451 452 static int venus_boot_core(struct venus_hfi_device *hdev) 453 { 454 struct device *dev = hdev->core->dev; 455 static const unsigned int max_tries = 100; 456 u32 ctrl_status = 0; 457 unsigned int count = 0; 458 int ret = 0; 459 460 venus_writel(hdev, VIDC_CTRL_INIT, BIT(VIDC_CTRL_INIT_CTRL_SHIFT)); 461 venus_writel(hdev, WRAPPER_INTR_MASK, WRAPPER_INTR_MASK_A2HVCODEC_MASK); 462 venus_writel(hdev, CPU_CS_SCIACMDARG3, 1); 463 464 while (!ctrl_status && count < max_tries) { 465 ctrl_status = venus_readl(hdev, CPU_CS_SCIACMDARG0); 466 if ((ctrl_status & CPU_CS_SCIACMDARG0_ERROR_STATUS_MASK) == 4) { 467 dev_err(dev, "invalid setting for UC_REGION\n"); 468 ret = -EINVAL; 469 break; 470 } 471 472 usleep_range(500, 1000); 473 count++; 474 } 475 476 if (count >= max_tries) 477 ret = -ETIMEDOUT; 478 479 return ret; 480 } 481 482 static u32 venus_hwversion(struct venus_hfi_device *hdev) 483 { 484 struct device *dev = hdev->core->dev; 485 u32 ver = venus_readl(hdev, WRAPPER_HW_VERSION); 486 u32 major, minor, step; 487 488 major = ver & WRAPPER_HW_VERSION_MAJOR_VERSION_MASK; 489 major = major >> WRAPPER_HW_VERSION_MAJOR_VERSION_SHIFT; 490 minor = ver & WRAPPER_HW_VERSION_MINOR_VERSION_MASK; 491 minor = minor >> WRAPPER_HW_VERSION_MINOR_VERSION_SHIFT; 492 step = ver & WRAPPER_HW_VERSION_STEP_VERSION_MASK; 493 494 dev_dbg(dev, "venus hw version %x.%x.%x\n", major, minor, step); 495 496 return major; 497 } 498 499 static int venus_run(struct venus_hfi_device *hdev) 500 { 501 struct device *dev = hdev->core->dev; 502 int ret; 503 504 /* 505 * Re-program all of the registers that get reset as a result of 506 * regulator_disable() and _enable() 507 */ 508 venus_set_registers(hdev); 509 510 venus_writel(hdev, UC_REGION_ADDR, hdev->ifaceq_table.da); 511 venus_writel(hdev, UC_REGION_SIZE, SHARED_QSIZE); 512 venus_writel(hdev, CPU_CS_SCIACMDARG2, hdev->ifaceq_table.da); 513 venus_writel(hdev, CPU_CS_SCIACMDARG1, 0x01); 514 if (hdev->sfr.da) 515 venus_writel(hdev, SFR_ADDR, hdev->sfr.da); 516 517 ret = venus_boot_core(hdev); 518 if (ret) { 519 dev_err(dev, "failed to reset venus core\n"); 520 return ret; 521 } 522 523 venus_hwversion(hdev); 524 525 return 0; 526 } 527 528 static int venus_halt_axi(struct venus_hfi_device *hdev) 529 { 530 void __iomem *base = hdev->core->base; 531 struct device *dev = hdev->core->dev; 532 u32 val; 533 int ret; 534 535 /* Halt AXI and AXI IMEM VBIF Access */ 536 val = venus_readl(hdev, VBIF_AXI_HALT_CTRL0); 537 val |= VBIF_AXI_HALT_CTRL0_HALT_REQ; 538 venus_writel(hdev, VBIF_AXI_HALT_CTRL0, val); 539 540 /* Request for AXI bus port halt */ 541 ret = readl_poll_timeout(base + VBIF_AXI_HALT_CTRL1, val, 542 val & VBIF_AXI_HALT_CTRL1_HALT_ACK, 543 POLL_INTERVAL_US, 544 VBIF_AXI_HALT_ACK_TIMEOUT_US); 545 if (ret) { 546 dev_err(dev, "AXI bus port halt timeout\n"); 547 return ret; 548 } 549 550 return 0; 551 } 552 553 static int venus_power_off(struct venus_hfi_device *hdev) 554 { 555 int ret; 556 557 if (!hdev->power_enabled) 558 return 0; 559 560 ret = qcom_scm_set_remote_state(TZBSP_VIDEO_STATE_SUSPEND, 0); 561 if (ret) 562 return ret; 563 564 ret = venus_halt_axi(hdev); 565 if (ret) 566 return ret; 567 568 hdev->power_enabled = false; 569 570 return 0; 571 } 572 573 static int venus_power_on(struct venus_hfi_device *hdev) 574 { 575 int ret; 576 577 if (hdev->power_enabled) 578 return 0; 579 580 ret = qcom_scm_set_remote_state(TZBSP_VIDEO_STATE_RESUME, 0); 581 if (ret) 582 goto err; 583 584 ret = venus_run(hdev); 585 if (ret) 586 goto err_suspend; 587 588 hdev->power_enabled = true; 589 590 return 0; 591 592 err_suspend: 593 qcom_scm_set_remote_state(TZBSP_VIDEO_STATE_SUSPEND, 0); 594 err: 595 hdev->power_enabled = false; 596 return ret; 597 } 598 599 static int venus_iface_msgq_read_nolock(struct venus_hfi_device *hdev, 600 void *pkt) 601 { 602 struct iface_queue *queue; 603 u32 tx_req; 604 int ret; 605 606 if (!venus_is_valid_state(hdev)) 607 return -EINVAL; 608 609 queue = &hdev->queues[IFACEQ_MSG_IDX]; 610 611 ret = venus_read_queue(hdev, queue, pkt, &tx_req); 612 if (ret) 613 return ret; 614 615 if (tx_req) 616 venus_soft_int(hdev); 617 618 return 0; 619 } 620 621 static int venus_iface_msgq_read(struct venus_hfi_device *hdev, void *pkt) 622 { 623 int ret; 624 625 mutex_lock(&hdev->lock); 626 ret = venus_iface_msgq_read_nolock(hdev, pkt); 627 mutex_unlock(&hdev->lock); 628 629 return ret; 630 } 631 632 static int venus_iface_dbgq_read_nolock(struct venus_hfi_device *hdev, 633 void *pkt) 634 { 635 struct iface_queue *queue; 636 u32 tx_req; 637 int ret; 638 639 ret = venus_is_valid_state(hdev); 640 if (!ret) 641 return -EINVAL; 642 643 queue = &hdev->queues[IFACEQ_DBG_IDX]; 644 645 ret = venus_read_queue(hdev, queue, pkt, &tx_req); 646 if (ret) 647 return ret; 648 649 if (tx_req) 650 venus_soft_int(hdev); 651 652 return 0; 653 } 654 655 static int venus_iface_dbgq_read(struct venus_hfi_device *hdev, void *pkt) 656 { 657 int ret; 658 659 if (!pkt) 660 return -EINVAL; 661 662 mutex_lock(&hdev->lock); 663 ret = venus_iface_dbgq_read_nolock(hdev, pkt); 664 mutex_unlock(&hdev->lock); 665 666 return ret; 667 } 668 669 static void venus_set_qhdr_defaults(struct hfi_queue_header *qhdr) 670 { 671 qhdr->status = 1; 672 qhdr->type = IFACEQ_DFLT_QHDR; 673 qhdr->q_size = IFACEQ_QUEUE_SIZE / 4; 674 qhdr->pkt_size = 0; 675 qhdr->rx_wm = 1; 676 qhdr->tx_wm = 1; 677 qhdr->rx_req = 1; 678 qhdr->tx_req = 0; 679 qhdr->rx_irq_status = 0; 680 qhdr->tx_irq_status = 0; 681 qhdr->read_idx = 0; 682 qhdr->write_idx = 0; 683 } 684 685 static void venus_interface_queues_release(struct venus_hfi_device *hdev) 686 { 687 mutex_lock(&hdev->lock); 688 689 venus_free(hdev, &hdev->ifaceq_table); 690 venus_free(hdev, &hdev->sfr); 691 692 memset(hdev->queues, 0, sizeof(hdev->queues)); 693 memset(&hdev->ifaceq_table, 0, sizeof(hdev->ifaceq_table)); 694 memset(&hdev->sfr, 0, sizeof(hdev->sfr)); 695 696 mutex_unlock(&hdev->lock); 697 } 698 699 static int venus_interface_queues_init(struct venus_hfi_device *hdev) 700 { 701 struct hfi_queue_table_header *tbl_hdr; 702 struct iface_queue *queue; 703 struct hfi_sfr *sfr; 704 struct mem_desc desc = {0}; 705 unsigned int offset; 706 unsigned int i; 707 int ret; 708 709 ret = venus_alloc(hdev, &desc, ALIGNED_QUEUE_SIZE); 710 if (ret) 711 return ret; 712 713 hdev->ifaceq_table = desc; 714 offset = IFACEQ_TABLE_SIZE; 715 716 for (i = 0; i < IFACEQ_NUM; i++) { 717 queue = &hdev->queues[i]; 718 queue->qmem.da = desc.da + offset; 719 queue->qmem.kva = desc.kva + offset; 720 queue->qmem.size = IFACEQ_QUEUE_SIZE; 721 offset += queue->qmem.size; 722 queue->qhdr = 723 IFACEQ_GET_QHDR_START_ADDR(hdev->ifaceq_table.kva, i); 724 725 venus_set_qhdr_defaults(queue->qhdr); 726 727 queue->qhdr->start_addr = queue->qmem.da; 728 729 if (i == IFACEQ_CMD_IDX) 730 queue->qhdr->type |= HFI_HOST_TO_CTRL_CMD_Q; 731 else if (i == IFACEQ_MSG_IDX) 732 queue->qhdr->type |= HFI_CTRL_TO_HOST_MSG_Q; 733 else if (i == IFACEQ_DBG_IDX) 734 queue->qhdr->type |= HFI_CTRL_TO_HOST_DBG_Q; 735 } 736 737 tbl_hdr = hdev->ifaceq_table.kva; 738 tbl_hdr->version = 0; 739 tbl_hdr->size = IFACEQ_TABLE_SIZE; 740 tbl_hdr->qhdr0_offset = sizeof(struct hfi_queue_table_header); 741 tbl_hdr->qhdr_size = sizeof(struct hfi_queue_header); 742 tbl_hdr->num_q = IFACEQ_NUM; 743 tbl_hdr->num_active_q = IFACEQ_NUM; 744 745 /* 746 * Set receive request to zero on debug queue as there is no 747 * need of interrupt from video hardware for debug messages 748 */ 749 queue = &hdev->queues[IFACEQ_DBG_IDX]; 750 queue->qhdr->rx_req = 0; 751 752 ret = venus_alloc(hdev, &desc, ALIGNED_SFR_SIZE); 753 if (ret) { 754 hdev->sfr.da = 0; 755 } else { 756 hdev->sfr = desc; 757 sfr = hdev->sfr.kva; 758 sfr->buf_size = ALIGNED_SFR_SIZE; 759 } 760 761 /* ensure table and queue header structs are settled in memory */ 762 wmb(); 763 764 return 0; 765 } 766 767 static int venus_sys_set_debug(struct venus_hfi_device *hdev, u32 debug) 768 { 769 struct hfi_sys_set_property_pkt *pkt; 770 u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE]; 771 int ret; 772 773 pkt = (struct hfi_sys_set_property_pkt *)packet; 774 775 pkt_sys_debug_config(pkt, HFI_DEBUG_MODE_QUEUE, debug); 776 777 ret = venus_iface_cmdq_write(hdev, pkt); 778 if (ret) 779 return ret; 780 781 return 0; 782 } 783 784 static int venus_sys_set_coverage(struct venus_hfi_device *hdev, u32 mode) 785 { 786 struct hfi_sys_set_property_pkt *pkt; 787 u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE]; 788 int ret; 789 790 pkt = (struct hfi_sys_set_property_pkt *)packet; 791 792 pkt_sys_coverage_config(pkt, mode); 793 794 ret = venus_iface_cmdq_write(hdev, pkt); 795 if (ret) 796 return ret; 797 798 return 0; 799 } 800 801 static int venus_sys_set_idle_message(struct venus_hfi_device *hdev, 802 bool enable) 803 { 804 struct hfi_sys_set_property_pkt *pkt; 805 u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE]; 806 int ret; 807 808 if (!enable) 809 return 0; 810 811 pkt = (struct hfi_sys_set_property_pkt *)packet; 812 813 pkt_sys_idle_indicator(pkt, enable); 814 815 ret = venus_iface_cmdq_write(hdev, pkt); 816 if (ret) 817 return ret; 818 819 return 0; 820 } 821 822 static int venus_sys_set_power_control(struct venus_hfi_device *hdev, 823 bool enable) 824 { 825 struct hfi_sys_set_property_pkt *pkt; 826 u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE]; 827 int ret; 828 829 pkt = (struct hfi_sys_set_property_pkt *)packet; 830 831 pkt_sys_power_control(pkt, enable); 832 833 ret = venus_iface_cmdq_write(hdev, pkt); 834 if (ret) 835 return ret; 836 837 return 0; 838 } 839 840 static int venus_get_queue_size(struct venus_hfi_device *hdev, 841 unsigned int index) 842 { 843 struct hfi_queue_header *qhdr; 844 845 if (index >= IFACEQ_NUM) 846 return -EINVAL; 847 848 qhdr = hdev->queues[index].qhdr; 849 if (!qhdr) 850 return -EINVAL; 851 852 return abs(qhdr->read_idx - qhdr->write_idx); 853 } 854 855 static int venus_sys_set_default_properties(struct venus_hfi_device *hdev) 856 { 857 struct device *dev = hdev->core->dev; 858 int ret; 859 860 ret = venus_sys_set_debug(hdev, venus_fw_debug); 861 if (ret) 862 dev_warn(dev, "setting fw debug msg ON failed (%d)\n", ret); 863 864 ret = venus_sys_set_idle_message(hdev, venus_sys_idle_indicator); 865 if (ret) 866 dev_warn(dev, "setting idle response ON failed (%d)\n", ret); 867 868 ret = venus_sys_set_power_control(hdev, venus_fw_low_power_mode); 869 if (ret) 870 dev_warn(dev, "setting hw power collapse ON failed (%d)\n", 871 ret); 872 873 return ret; 874 } 875 876 static int venus_session_cmd(struct venus_inst *inst, u32 pkt_type) 877 { 878 struct venus_hfi_device *hdev = to_hfi_priv(inst->core); 879 struct hfi_session_pkt pkt; 880 881 pkt_session_cmd(&pkt, pkt_type, inst); 882 883 return venus_iface_cmdq_write(hdev, &pkt); 884 } 885 886 static void venus_flush_debug_queue(struct venus_hfi_device *hdev) 887 { 888 struct device *dev = hdev->core->dev; 889 void *packet = hdev->dbg_buf; 890 891 while (!venus_iface_dbgq_read(hdev, packet)) { 892 struct hfi_msg_sys_coverage_pkt *pkt = packet; 893 894 if (pkt->hdr.pkt_type != HFI_MSG_SYS_COV) { 895 struct hfi_msg_sys_debug_pkt *pkt = packet; 896 897 dev_dbg(dev, "%s", pkt->msg_data); 898 } 899 } 900 } 901 902 static int venus_prepare_power_collapse(struct venus_hfi_device *hdev, 903 bool wait) 904 { 905 unsigned long timeout = msecs_to_jiffies(venus_hw_rsp_timeout); 906 struct hfi_sys_pc_prep_pkt pkt; 907 int ret; 908 909 init_completion(&hdev->pwr_collapse_prep); 910 911 pkt_sys_pc_prep(&pkt); 912 913 ret = venus_iface_cmdq_write(hdev, &pkt); 914 if (ret) 915 return ret; 916 917 if (!wait) 918 return 0; 919 920 ret = wait_for_completion_timeout(&hdev->pwr_collapse_prep, timeout); 921 if (!ret) { 922 venus_flush_debug_queue(hdev); 923 return -ETIMEDOUT; 924 } 925 926 return 0; 927 } 928 929 static int venus_are_queues_empty(struct venus_hfi_device *hdev) 930 { 931 int ret1, ret2; 932 933 ret1 = venus_get_queue_size(hdev, IFACEQ_MSG_IDX); 934 if (ret1 < 0) 935 return ret1; 936 937 ret2 = venus_get_queue_size(hdev, IFACEQ_CMD_IDX); 938 if (ret2 < 0) 939 return ret2; 940 941 if (!ret1 && !ret2) 942 return 1; 943 944 return 0; 945 } 946 947 static void venus_sfr_print(struct venus_hfi_device *hdev) 948 { 949 struct device *dev = hdev->core->dev; 950 struct hfi_sfr *sfr = hdev->sfr.kva; 951 void *p; 952 953 if (!sfr) 954 return; 955 956 p = memchr(sfr->data, '\0', sfr->buf_size); 957 /* 958 * SFR isn't guaranteed to be NULL terminated since SYS_ERROR indicates 959 * that Venus is in the process of crashing. 960 */ 961 if (!p) 962 sfr->data[sfr->buf_size - 1] = '\0'; 963 964 dev_err_ratelimited(dev, "SFR message from FW: %s\n", sfr->data); 965 } 966 967 static void venus_process_msg_sys_error(struct venus_hfi_device *hdev, 968 void *packet) 969 { 970 struct hfi_msg_event_notify_pkt *event_pkt = packet; 971 972 if (event_pkt->event_id != HFI_EVENT_SYS_ERROR) 973 return; 974 975 venus_set_state(hdev, VENUS_STATE_DEINIT); 976 977 /* 978 * Once SYS_ERROR received from HW, it is safe to halt the AXI. 979 * With SYS_ERROR, Venus FW may have crashed and HW might be 980 * active and causing unnecessary transactions. Hence it is 981 * safe to stop all AXI transactions from venus subsystem. 982 */ 983 venus_halt_axi(hdev); 984 venus_sfr_print(hdev); 985 } 986 987 static irqreturn_t venus_isr_thread(struct venus_core *core) 988 { 989 struct venus_hfi_device *hdev = to_hfi_priv(core); 990 const struct venus_resources *res; 991 void *pkt; 992 u32 msg_ret; 993 994 if (!hdev) 995 return IRQ_NONE; 996 997 res = hdev->core->res; 998 pkt = hdev->pkt_buf; 999 1000 if (hdev->irq_status & WRAPPER_INTR_STATUS_A2HWD_MASK) { 1001 venus_sfr_print(hdev); 1002 hfi_process_watchdog_timeout(core); 1003 } 1004 1005 while (!venus_iface_msgq_read(hdev, pkt)) { 1006 msg_ret = hfi_process_msg_packet(core, pkt); 1007 switch (msg_ret) { 1008 case HFI_MSG_EVENT_NOTIFY: 1009 venus_process_msg_sys_error(hdev, pkt); 1010 break; 1011 case HFI_MSG_SYS_INIT: 1012 venus_hfi_core_set_resource(core, res->vmem_id, 1013 res->vmem_size, 1014 res->vmem_addr, 1015 hdev); 1016 break; 1017 case HFI_MSG_SYS_RELEASE_RESOURCE: 1018 complete(&hdev->release_resource); 1019 break; 1020 case HFI_MSG_SYS_PC_PREP: 1021 complete(&hdev->pwr_collapse_prep); 1022 break; 1023 default: 1024 break; 1025 } 1026 } 1027 1028 venus_flush_debug_queue(hdev); 1029 1030 return IRQ_HANDLED; 1031 } 1032 1033 static irqreturn_t venus_isr(struct venus_core *core) 1034 { 1035 struct venus_hfi_device *hdev = to_hfi_priv(core); 1036 u32 status; 1037 1038 if (!hdev) 1039 return IRQ_NONE; 1040 1041 status = venus_readl(hdev, WRAPPER_INTR_STATUS); 1042 1043 if (status & WRAPPER_INTR_STATUS_A2H_MASK || 1044 status & WRAPPER_INTR_STATUS_A2HWD_MASK || 1045 status & CPU_CS_SCIACMDARG0_INIT_IDLE_MSG_MASK) 1046 hdev->irq_status = status; 1047 1048 venus_writel(hdev, CPU_CS_A2HSOFTINTCLR, 1); 1049 venus_writel(hdev, WRAPPER_INTR_CLEAR, status); 1050 1051 return IRQ_WAKE_THREAD; 1052 } 1053 1054 static int venus_core_init(struct venus_core *core) 1055 { 1056 struct venus_hfi_device *hdev = to_hfi_priv(core); 1057 struct device *dev = core->dev; 1058 struct hfi_sys_get_property_pkt version_pkt; 1059 struct hfi_sys_init_pkt pkt; 1060 int ret; 1061 1062 pkt_sys_init(&pkt, HFI_VIDEO_ARCH_OX); 1063 1064 venus_set_state(hdev, VENUS_STATE_INIT); 1065 1066 ret = venus_iface_cmdq_write(hdev, &pkt); 1067 if (ret) 1068 return ret; 1069 1070 pkt_sys_image_version(&version_pkt); 1071 1072 ret = venus_iface_cmdq_write(hdev, &version_pkt); 1073 if (ret) 1074 dev_warn(dev, "failed to send image version pkt to fw\n"); 1075 1076 return 0; 1077 } 1078 1079 static int venus_core_deinit(struct venus_core *core) 1080 { 1081 struct venus_hfi_device *hdev = to_hfi_priv(core); 1082 1083 venus_set_state(hdev, VENUS_STATE_DEINIT); 1084 hdev->suspended = true; 1085 hdev->power_enabled = false; 1086 1087 return 0; 1088 } 1089 1090 static int venus_core_ping(struct venus_core *core, u32 cookie) 1091 { 1092 struct venus_hfi_device *hdev = to_hfi_priv(core); 1093 struct hfi_sys_ping_pkt pkt; 1094 1095 pkt_sys_ping(&pkt, cookie); 1096 1097 return venus_iface_cmdq_write(hdev, &pkt); 1098 } 1099 1100 static int venus_core_trigger_ssr(struct venus_core *core, u32 trigger_type) 1101 { 1102 struct venus_hfi_device *hdev = to_hfi_priv(core); 1103 struct hfi_sys_test_ssr_pkt pkt; 1104 int ret; 1105 1106 ret = pkt_sys_ssr_cmd(&pkt, trigger_type); 1107 if (ret) 1108 return ret; 1109 1110 return venus_iface_cmdq_write(hdev, &pkt); 1111 } 1112 1113 static int venus_session_init(struct venus_inst *inst, u32 session_type, 1114 u32 codec) 1115 { 1116 struct venus_hfi_device *hdev = to_hfi_priv(inst->core); 1117 struct hfi_session_init_pkt pkt; 1118 int ret; 1119 1120 ret = venus_sys_set_default_properties(hdev); 1121 if (ret) 1122 return ret; 1123 1124 ret = pkt_session_init(&pkt, inst, session_type, codec); 1125 if (ret) 1126 goto err; 1127 1128 ret = venus_iface_cmdq_write(hdev, &pkt); 1129 if (ret) 1130 goto err; 1131 1132 return 0; 1133 1134 err: 1135 venus_flush_debug_queue(hdev); 1136 return ret; 1137 } 1138 1139 static int venus_session_end(struct venus_inst *inst) 1140 { 1141 struct venus_hfi_device *hdev = to_hfi_priv(inst->core); 1142 struct device *dev = hdev->core->dev; 1143 1144 if (venus_fw_coverage) { 1145 if (venus_sys_set_coverage(hdev, venus_fw_coverage)) 1146 dev_warn(dev, "fw coverage msg ON failed\n"); 1147 } 1148 1149 return venus_session_cmd(inst, HFI_CMD_SYS_SESSION_END); 1150 } 1151 1152 static int venus_session_abort(struct venus_inst *inst) 1153 { 1154 struct venus_hfi_device *hdev = to_hfi_priv(inst->core); 1155 1156 venus_flush_debug_queue(hdev); 1157 1158 return venus_session_cmd(inst, HFI_CMD_SYS_SESSION_ABORT); 1159 } 1160 1161 static int venus_session_flush(struct venus_inst *inst, u32 flush_mode) 1162 { 1163 struct venus_hfi_device *hdev = to_hfi_priv(inst->core); 1164 struct hfi_session_flush_pkt pkt; 1165 int ret; 1166 1167 ret = pkt_session_flush(&pkt, inst, flush_mode); 1168 if (ret) 1169 return ret; 1170 1171 return venus_iface_cmdq_write(hdev, &pkt); 1172 } 1173 1174 static int venus_session_start(struct venus_inst *inst) 1175 { 1176 return venus_session_cmd(inst, HFI_CMD_SESSION_START); 1177 } 1178 1179 static int venus_session_stop(struct venus_inst *inst) 1180 { 1181 return venus_session_cmd(inst, HFI_CMD_SESSION_STOP); 1182 } 1183 1184 static int venus_session_continue(struct venus_inst *inst) 1185 { 1186 return venus_session_cmd(inst, HFI_CMD_SESSION_CONTINUE); 1187 } 1188 1189 static int venus_session_etb(struct venus_inst *inst, 1190 struct hfi_frame_data *in_frame) 1191 { 1192 struct venus_hfi_device *hdev = to_hfi_priv(inst->core); 1193 u32 session_type = inst->session_type; 1194 int ret; 1195 1196 if (session_type == VIDC_SESSION_TYPE_DEC) { 1197 struct hfi_session_empty_buffer_compressed_pkt pkt; 1198 1199 ret = pkt_session_etb_decoder(&pkt, inst, in_frame); 1200 if (ret) 1201 return ret; 1202 1203 ret = venus_iface_cmdq_write(hdev, &pkt); 1204 } else if (session_type == VIDC_SESSION_TYPE_ENC) { 1205 struct hfi_session_empty_buffer_uncompressed_plane0_pkt pkt; 1206 1207 ret = pkt_session_etb_encoder(&pkt, inst, in_frame); 1208 if (ret) 1209 return ret; 1210 1211 ret = venus_iface_cmdq_write(hdev, &pkt); 1212 } else { 1213 ret = -EINVAL; 1214 } 1215 1216 return ret; 1217 } 1218 1219 static int venus_session_ftb(struct venus_inst *inst, 1220 struct hfi_frame_data *out_frame) 1221 { 1222 struct venus_hfi_device *hdev = to_hfi_priv(inst->core); 1223 struct hfi_session_fill_buffer_pkt pkt; 1224 int ret; 1225 1226 ret = pkt_session_ftb(&pkt, inst, out_frame); 1227 if (ret) 1228 return ret; 1229 1230 return venus_iface_cmdq_write(hdev, &pkt); 1231 } 1232 1233 static int venus_session_set_buffers(struct venus_inst *inst, 1234 struct hfi_buffer_desc *bd) 1235 { 1236 struct venus_hfi_device *hdev = to_hfi_priv(inst->core); 1237 struct hfi_session_set_buffers_pkt *pkt; 1238 u8 packet[IFACEQ_VAR_LARGE_PKT_SIZE]; 1239 int ret; 1240 1241 if (bd->buffer_type == HFI_BUFFER_INPUT) 1242 return 0; 1243 1244 pkt = (struct hfi_session_set_buffers_pkt *)packet; 1245 1246 ret = pkt_session_set_buffers(pkt, inst, bd); 1247 if (ret) 1248 return ret; 1249 1250 return venus_iface_cmdq_write(hdev, pkt); 1251 } 1252 1253 static int venus_session_unset_buffers(struct venus_inst *inst, 1254 struct hfi_buffer_desc *bd) 1255 { 1256 struct venus_hfi_device *hdev = to_hfi_priv(inst->core); 1257 struct hfi_session_release_buffer_pkt *pkt; 1258 u8 packet[IFACEQ_VAR_LARGE_PKT_SIZE]; 1259 int ret; 1260 1261 if (bd->buffer_type == HFI_BUFFER_INPUT) 1262 return 0; 1263 1264 pkt = (struct hfi_session_release_buffer_pkt *)packet; 1265 1266 ret = pkt_session_unset_buffers(pkt, inst, bd); 1267 if (ret) 1268 return ret; 1269 1270 return venus_iface_cmdq_write(hdev, pkt); 1271 } 1272 1273 static int venus_session_load_res(struct venus_inst *inst) 1274 { 1275 return venus_session_cmd(inst, HFI_CMD_SESSION_LOAD_RESOURCES); 1276 } 1277 1278 static int venus_session_release_res(struct venus_inst *inst) 1279 { 1280 return venus_session_cmd(inst, HFI_CMD_SESSION_RELEASE_RESOURCES); 1281 } 1282 1283 static int venus_session_parse_seq_hdr(struct venus_inst *inst, u32 seq_hdr, 1284 u32 seq_hdr_len) 1285 { 1286 struct venus_hfi_device *hdev = to_hfi_priv(inst->core); 1287 struct hfi_session_parse_sequence_header_pkt *pkt; 1288 u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE]; 1289 int ret; 1290 1291 pkt = (struct hfi_session_parse_sequence_header_pkt *)packet; 1292 1293 ret = pkt_session_parse_seq_header(pkt, inst, seq_hdr, seq_hdr_len); 1294 if (ret) 1295 return ret; 1296 1297 ret = venus_iface_cmdq_write(hdev, pkt); 1298 if (ret) 1299 return ret; 1300 1301 return 0; 1302 } 1303 1304 static int venus_session_get_seq_hdr(struct venus_inst *inst, u32 seq_hdr, 1305 u32 seq_hdr_len) 1306 { 1307 struct venus_hfi_device *hdev = to_hfi_priv(inst->core); 1308 struct hfi_session_get_sequence_header_pkt *pkt; 1309 u8 packet[IFACEQ_VAR_SMALL_PKT_SIZE]; 1310 int ret; 1311 1312 pkt = (struct hfi_session_get_sequence_header_pkt *)packet; 1313 1314 ret = pkt_session_get_seq_hdr(pkt, inst, seq_hdr, seq_hdr_len); 1315 if (ret) 1316 return ret; 1317 1318 return venus_iface_cmdq_write(hdev, pkt); 1319 } 1320 1321 static int venus_session_set_property(struct venus_inst *inst, u32 ptype, 1322 void *pdata) 1323 { 1324 struct venus_hfi_device *hdev = to_hfi_priv(inst->core); 1325 struct hfi_session_set_property_pkt *pkt; 1326 u8 packet[IFACEQ_VAR_LARGE_PKT_SIZE]; 1327 int ret; 1328 1329 pkt = (struct hfi_session_set_property_pkt *)packet; 1330 1331 ret = pkt_session_set_property(pkt, inst, ptype, pdata); 1332 if (ret) 1333 return ret; 1334 1335 return venus_iface_cmdq_write(hdev, pkt); 1336 } 1337 1338 static int venus_session_get_property(struct venus_inst *inst, u32 ptype) 1339 { 1340 struct venus_hfi_device *hdev = to_hfi_priv(inst->core); 1341 struct hfi_session_get_property_pkt pkt; 1342 int ret; 1343 1344 ret = pkt_session_get_property(&pkt, inst, ptype); 1345 if (ret) 1346 return ret; 1347 1348 return venus_iface_cmdq_write(hdev, &pkt); 1349 } 1350 1351 static int venus_resume(struct venus_core *core) 1352 { 1353 struct venus_hfi_device *hdev = to_hfi_priv(core); 1354 int ret = 0; 1355 1356 mutex_lock(&hdev->lock); 1357 1358 if (!hdev->suspended) 1359 goto unlock; 1360 1361 ret = venus_power_on(hdev); 1362 1363 unlock: 1364 if (!ret) 1365 hdev->suspended = false; 1366 1367 mutex_unlock(&hdev->lock); 1368 1369 return ret; 1370 } 1371 1372 static int venus_suspend_1xx(struct venus_core *core) 1373 { 1374 struct venus_hfi_device *hdev = to_hfi_priv(core); 1375 struct device *dev = core->dev; 1376 u32 ctrl_status; 1377 int ret; 1378 1379 if (!hdev->power_enabled || hdev->suspended) 1380 return 0; 1381 1382 mutex_lock(&hdev->lock); 1383 ret = venus_is_valid_state(hdev); 1384 mutex_unlock(&hdev->lock); 1385 1386 if (!ret) { 1387 dev_err(dev, "bad state, cannot suspend\n"); 1388 return -EINVAL; 1389 } 1390 1391 ret = venus_prepare_power_collapse(hdev, true); 1392 if (ret) { 1393 dev_err(dev, "prepare for power collapse fail (%d)\n", ret); 1394 return ret; 1395 } 1396 1397 mutex_lock(&hdev->lock); 1398 1399 if (hdev->last_packet_type != HFI_CMD_SYS_PC_PREP) { 1400 mutex_unlock(&hdev->lock); 1401 return -EINVAL; 1402 } 1403 1404 ret = venus_are_queues_empty(hdev); 1405 if (ret < 0 || !ret) { 1406 mutex_unlock(&hdev->lock); 1407 return -EINVAL; 1408 } 1409 1410 ctrl_status = venus_readl(hdev, CPU_CS_SCIACMDARG0); 1411 if (!(ctrl_status & CPU_CS_SCIACMDARG0_PC_READY)) { 1412 mutex_unlock(&hdev->lock); 1413 return -EINVAL; 1414 } 1415 1416 ret = venus_power_off(hdev); 1417 if (ret) { 1418 mutex_unlock(&hdev->lock); 1419 return ret; 1420 } 1421 1422 hdev->suspended = true; 1423 1424 mutex_unlock(&hdev->lock); 1425 1426 return 0; 1427 } 1428 1429 static int venus_suspend_3xx(struct venus_core *core) 1430 { 1431 struct venus_hfi_device *hdev = to_hfi_priv(core); 1432 struct device *dev = core->dev; 1433 u32 ctrl_status, wfi_status; 1434 int ret; 1435 int cnt = 100; 1436 1437 if (!hdev->power_enabled || hdev->suspended) 1438 return 0; 1439 1440 mutex_lock(&hdev->lock); 1441 ret = venus_is_valid_state(hdev); 1442 mutex_unlock(&hdev->lock); 1443 1444 if (!ret) { 1445 dev_err(dev, "bad state, cannot suspend\n"); 1446 return -EINVAL; 1447 } 1448 1449 ctrl_status = venus_readl(hdev, CPU_CS_SCIACMDARG0); 1450 if (!(ctrl_status & CPU_CS_SCIACMDARG0_PC_READY)) { 1451 wfi_status = venus_readl(hdev, WRAPPER_CPU_STATUS); 1452 ctrl_status = venus_readl(hdev, CPU_CS_SCIACMDARG0); 1453 1454 ret = venus_prepare_power_collapse(hdev, false); 1455 if (ret) { 1456 dev_err(dev, "prepare for power collapse fail (%d)\n", 1457 ret); 1458 return ret; 1459 } 1460 1461 cnt = 100; 1462 while (cnt--) { 1463 wfi_status = venus_readl(hdev, WRAPPER_CPU_STATUS); 1464 ctrl_status = venus_readl(hdev, CPU_CS_SCIACMDARG0); 1465 if (ctrl_status & CPU_CS_SCIACMDARG0_PC_READY && 1466 wfi_status & BIT(0)) 1467 break; 1468 usleep_range(1000, 1500); 1469 } 1470 } 1471 1472 mutex_lock(&hdev->lock); 1473 1474 ret = venus_power_off(hdev); 1475 if (ret) { 1476 dev_err(dev, "venus_power_off (%d)\n", ret); 1477 mutex_unlock(&hdev->lock); 1478 return ret; 1479 } 1480 1481 hdev->suspended = true; 1482 1483 mutex_unlock(&hdev->lock); 1484 1485 return 0; 1486 } 1487 1488 static int venus_suspend(struct venus_core *core) 1489 { 1490 if (core->res->hfi_version == HFI_VERSION_3XX) 1491 return venus_suspend_3xx(core); 1492 1493 return venus_suspend_1xx(core); 1494 } 1495 1496 static const struct hfi_ops venus_hfi_ops = { 1497 .core_init = venus_core_init, 1498 .core_deinit = venus_core_deinit, 1499 .core_ping = venus_core_ping, 1500 .core_trigger_ssr = venus_core_trigger_ssr, 1501 1502 .session_init = venus_session_init, 1503 .session_end = venus_session_end, 1504 .session_abort = venus_session_abort, 1505 .session_flush = venus_session_flush, 1506 .session_start = venus_session_start, 1507 .session_stop = venus_session_stop, 1508 .session_continue = venus_session_continue, 1509 .session_etb = venus_session_etb, 1510 .session_ftb = venus_session_ftb, 1511 .session_set_buffers = venus_session_set_buffers, 1512 .session_unset_buffers = venus_session_unset_buffers, 1513 .session_load_res = venus_session_load_res, 1514 .session_release_res = venus_session_release_res, 1515 .session_parse_seq_hdr = venus_session_parse_seq_hdr, 1516 .session_get_seq_hdr = venus_session_get_seq_hdr, 1517 .session_set_property = venus_session_set_property, 1518 .session_get_property = venus_session_get_property, 1519 1520 .resume = venus_resume, 1521 .suspend = venus_suspend, 1522 1523 .isr = venus_isr, 1524 .isr_thread = venus_isr_thread, 1525 }; 1526 1527 void venus_hfi_destroy(struct venus_core *core) 1528 { 1529 struct venus_hfi_device *hdev = to_hfi_priv(core); 1530 1531 venus_interface_queues_release(hdev); 1532 mutex_destroy(&hdev->lock); 1533 kfree(hdev); 1534 core->priv = NULL; 1535 core->ops = NULL; 1536 } 1537 1538 int venus_hfi_create(struct venus_core *core) 1539 { 1540 struct venus_hfi_device *hdev; 1541 int ret; 1542 1543 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL); 1544 if (!hdev) 1545 return -ENOMEM; 1546 1547 mutex_init(&hdev->lock); 1548 1549 hdev->core = core; 1550 hdev->suspended = true; 1551 core->priv = hdev; 1552 core->ops = &venus_hfi_ops; 1553 core->core_caps = ENC_ROTATION_CAPABILITY | ENC_SCALING_CAPABILITY | 1554 ENC_DEINTERLACE_CAPABILITY | 1555 DEC_MULTI_STREAM_CAPABILITY; 1556 1557 ret = venus_interface_queues_init(hdev); 1558 if (ret) 1559 goto err_kfree; 1560 1561 return 0; 1562 1563 err_kfree: 1564 kfree(hdev); 1565 core->priv = NULL; 1566 core->ops = NULL; 1567 return ret; 1568 } 1569