Lines Matching +full:timeout +full:- +full:comms +full:- +full:ms
1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright 2016-2022 HabanaLabs, Ltd.
27 * - Range registers
28 * - MMU
31 * - Range registers (protect the first 512MB)
34 * - Range registers
35 * - Protection bits
40 * - DMA is not secured.
41 * - PQ and CQ are secured.
42 * - CP is secured: The driver needs to parse CB but WREG should be allowed
52 * - Clear SRAM on context switch (happens on context switch when device is
54 * - MMU page tables area clear (happens on init)
56 * QMAN DMA 2-7, TPC, MME, NIC:
62 #define GAUDI_BOOT_FIT_FILE "habanalabs/gaudi/gaudi-boot-fit.itb"
63 #define GAUDI_LINUX_FW_FILE "habanalabs/gaudi/gaudi-fit.itb"
68 #define GAUDI_RESET_TIMEOUT_MSEC 2000 /* 2000ms */
69 #define GAUDI_RESET_WAIT_MSEC 1 /* 1ms */
70 #define GAUDI_CPU_RESET_WAIT_MSEC 200 /* 200ms */
71 #define GAUDI_TEST_QUEUE_WAIT_USEC 100000 /* 100ms */
404 [SP_NEXT_TPC] = mmTPC1_QM_GLBL_CFG0 - mmTPC0_QM_GLBL_CFG0,
407 [SP_NEXT_MME] = mmMME2_QM_GLBL_CFG0 - mmMME0_QM_GLBL_CFG0,
410 [SP_DMA_QUEUES_OFFSET] = mmDMA1_QM_GLBL_CFG0 - mmDMA0_QM_GLBL_CFG0,
419 mmDMA0_QM_CP_FENCE0_CNT_0 - mmDMA0_QM_GLBL_CFG0,
421 mmDMA0_QM_CP_FENCE0_RDATA_0 - mmDMA0_QM_GLBL_CFG0,
422 [SP_CP_STS_OFFSET] = mmDMA0_QM_CP_STS_0 - mmDMA0_QM_GLBL_CFG0,
516 struct asic_fixed_properties *prop = &hdev->asic_prop; in set_default_power_values()
518 if (hdev->card_type == cpucp_card_type_pmc) { in set_default_power_values()
519 prop->max_power_default = MAX_POWER_DEFAULT_PMC; in set_default_power_values()
521 if (prop->fw_security_enabled) in set_default_power_values()
522 prop->dc_power_default = DC_POWER_DEFAULT_PMC_SEC; in set_default_power_values()
524 prop->dc_power_default = DC_POWER_DEFAULT_PMC; in set_default_power_values()
526 prop->max_power_default = MAX_POWER_DEFAULT_PCI; in set_default_power_values()
527 prop->dc_power_default = DC_POWER_DEFAULT_PCI; in set_default_power_values()
533 struct asic_fixed_properties *prop = &hdev->asic_prop; in gaudi_set_fixed_properties()
537 prop->max_queues = GAUDI_QUEUE_ID_SIZE; in gaudi_set_fixed_properties()
538 prop->hw_queues_props = kcalloc(prop->max_queues, in gaudi_set_fixed_properties()
542 if (!prop->hw_queues_props) in gaudi_set_fixed_properties()
543 return -ENOMEM; in gaudi_set_fixed_properties()
545 for (i = 0 ; i < prop->max_queues ; i++) { in gaudi_set_fixed_properties()
547 prop->hw_queues_props[i].type = QUEUE_TYPE_EXT; in gaudi_set_fixed_properties()
548 prop->hw_queues_props[i].driver_only = 0; in gaudi_set_fixed_properties()
549 prop->hw_queues_props[i].supports_sync_stream = 1; in gaudi_set_fixed_properties()
550 prop->hw_queues_props[i].cb_alloc_flags = in gaudi_set_fixed_properties()
554 prop->hw_queues_props[i].type = QUEUE_TYPE_CPU; in gaudi_set_fixed_properties()
555 prop->hw_queues_props[i].driver_only = 1; in gaudi_set_fixed_properties()
556 prop->hw_queues_props[i].supports_sync_stream = 0; in gaudi_set_fixed_properties()
557 prop->hw_queues_props[i].cb_alloc_flags = in gaudi_set_fixed_properties()
560 prop->hw_queues_props[i].type = QUEUE_TYPE_INT; in gaudi_set_fixed_properties()
561 prop->hw_queues_props[i].driver_only = 0; in gaudi_set_fixed_properties()
562 prop->hw_queues_props[i].supports_sync_stream = 0; in gaudi_set_fixed_properties()
563 prop->hw_queues_props[i].cb_alloc_flags = in gaudi_set_fixed_properties()
567 prop->hw_queues_props[i].collective_mode = in gaudi_set_fixed_properties()
571 prop->cache_line_size = DEVICE_CACHE_LINE_SIZE; in gaudi_set_fixed_properties()
572 prop->cfg_base_address = CFG_BASE; in gaudi_set_fixed_properties()
573 prop->device_dma_offset_for_host_access = HOST_PHYS_BASE; in gaudi_set_fixed_properties()
574 prop->host_base_address = HOST_PHYS_BASE; in gaudi_set_fixed_properties()
575 prop->host_end_address = prop->host_base_address + HOST_PHYS_SIZE; in gaudi_set_fixed_properties()
576 prop->completion_queues_count = NUMBER_OF_CMPLT_QUEUES; in gaudi_set_fixed_properties()
577 prop->completion_mode = HL_COMPLETION_MODE_JOB; in gaudi_set_fixed_properties()
578 prop->collective_first_sob = 0; in gaudi_set_fixed_properties()
579 prop->collective_first_mon = 0; in gaudi_set_fixed_properties()
582 prop->sync_stream_first_sob = in gaudi_set_fixed_properties()
589 prop->sync_stream_first_mon = in gaudi_set_fixed_properties()
593 prop->dram_base_address = DRAM_PHYS_BASE; in gaudi_set_fixed_properties()
594 prop->dram_size = GAUDI_HBM_SIZE_32GB; in gaudi_set_fixed_properties()
595 prop->dram_end_address = prop->dram_base_address + prop->dram_size; in gaudi_set_fixed_properties()
596 prop->dram_user_base_address = DRAM_BASE_ADDR_USER; in gaudi_set_fixed_properties()
598 prop->sram_base_address = SRAM_BASE_ADDR; in gaudi_set_fixed_properties()
599 prop->sram_size = SRAM_SIZE; in gaudi_set_fixed_properties()
600 prop->sram_end_address = prop->sram_base_address + prop->sram_size; in gaudi_set_fixed_properties()
601 prop->sram_user_base_address = in gaudi_set_fixed_properties()
602 prop->sram_base_address + SRAM_USER_BASE_OFFSET; in gaudi_set_fixed_properties()
604 prop->mmu_cache_mng_addr = MMU_CACHE_MNG_ADDR; in gaudi_set_fixed_properties()
605 prop->mmu_cache_mng_size = MMU_CACHE_MNG_SIZE; in gaudi_set_fixed_properties()
607 prop->mmu_pgt_addr = MMU_PAGE_TABLES_ADDR; in gaudi_set_fixed_properties()
608 if (hdev->pldm) in gaudi_set_fixed_properties()
609 prop->mmu_pgt_size = 0x800000; /* 8MB */ in gaudi_set_fixed_properties()
611 prop->mmu_pgt_size = MMU_PAGE_TABLES_SIZE; in gaudi_set_fixed_properties()
612 prop->mmu_pte_size = HL_PTE_SIZE; in gaudi_set_fixed_properties()
613 prop->mmu_hop_table_size = HOP_TABLE_SIZE_512_PTE; in gaudi_set_fixed_properties()
614 prop->mmu_hop0_tables_total_size = HOP0_512_PTE_TABLES_TOTAL_SIZE; in gaudi_set_fixed_properties()
615 prop->dram_page_size = PAGE_SIZE_2MB; in gaudi_set_fixed_properties()
616 prop->device_mem_alloc_default_page_size = prop->dram_page_size; in gaudi_set_fixed_properties()
617 prop->dram_supports_virtual_memory = false; in gaudi_set_fixed_properties()
619 prop->pmmu.hop_shifts[MMU_HOP0] = MMU_V1_1_HOP0_SHIFT; in gaudi_set_fixed_properties()
620 prop->pmmu.hop_shifts[MMU_HOP1] = MMU_V1_1_HOP1_SHIFT; in gaudi_set_fixed_properties()
621 prop->pmmu.hop_shifts[MMU_HOP2] = MMU_V1_1_HOP2_SHIFT; in gaudi_set_fixed_properties()
622 prop->pmmu.hop_shifts[MMU_HOP3] = MMU_V1_1_HOP3_SHIFT; in gaudi_set_fixed_properties()
623 prop->pmmu.hop_shifts[MMU_HOP4] = MMU_V1_1_HOP4_SHIFT; in gaudi_set_fixed_properties()
624 prop->pmmu.hop_masks[MMU_HOP0] = MMU_V1_1_HOP0_MASK; in gaudi_set_fixed_properties()
625 prop->pmmu.hop_masks[MMU_HOP1] = MMU_V1_1_HOP1_MASK; in gaudi_set_fixed_properties()
626 prop->pmmu.hop_masks[MMU_HOP2] = MMU_V1_1_HOP2_MASK; in gaudi_set_fixed_properties()
627 prop->pmmu.hop_masks[MMU_HOP3] = MMU_V1_1_HOP3_MASK; in gaudi_set_fixed_properties()
628 prop->pmmu.hop_masks[MMU_HOP4] = MMU_V1_1_HOP4_MASK; in gaudi_set_fixed_properties()
629 prop->pmmu.start_addr = VA_HOST_SPACE_START; in gaudi_set_fixed_properties()
630 prop->pmmu.end_addr = in gaudi_set_fixed_properties()
631 (VA_HOST_SPACE_START + VA_HOST_SPACE_SIZE / 2) - 1; in gaudi_set_fixed_properties()
632 prop->pmmu.page_size = PAGE_SIZE_4KB; in gaudi_set_fixed_properties()
633 prop->pmmu.num_hops = MMU_ARCH_5_HOPS; in gaudi_set_fixed_properties()
634 prop->pmmu.last_mask = LAST_MASK; in gaudi_set_fixed_properties()
635 /* TODO: will be duplicated until implementing per-MMU props */ in gaudi_set_fixed_properties()
636 prop->pmmu.hop_table_size = prop->mmu_hop_table_size; in gaudi_set_fixed_properties()
637 prop->pmmu.hop0_tables_total_size = prop->mmu_hop0_tables_total_size; in gaudi_set_fixed_properties()
640 memcpy(&prop->pmmu_huge, &prop->pmmu, sizeof(prop->pmmu)); in gaudi_set_fixed_properties()
641 prop->pmmu_huge.page_size = PAGE_SIZE_2MB; in gaudi_set_fixed_properties()
644 memcpy(&prop->dmmu, &prop->pmmu, sizeof(prop->pmmu)); in gaudi_set_fixed_properties()
645 prop->dmmu.start_addr = (VA_HOST_SPACE_START + VA_HOST_SPACE_SIZE / 2); in gaudi_set_fixed_properties()
646 prop->dmmu.end_addr = VA_HOST_SPACE_END; in gaudi_set_fixed_properties()
647 prop->dmmu.page_size = PAGE_SIZE_2MB; in gaudi_set_fixed_properties()
649 prop->cfg_size = CFG_SIZE; in gaudi_set_fixed_properties()
650 prop->max_asid = MAX_ASID; in gaudi_set_fixed_properties()
651 prop->num_of_events = GAUDI_EVENT_SIZE; in gaudi_set_fixed_properties()
652 prop->max_num_of_engines = GAUDI_ENGINE_ID_SIZE; in gaudi_set_fixed_properties()
653 prop->tpc_enabled_mask = TPC_ENABLED_MASK; in gaudi_set_fixed_properties()
657 prop->cb_pool_cb_cnt = GAUDI_CB_POOL_CB_CNT; in gaudi_set_fixed_properties()
658 prop->cb_pool_cb_size = GAUDI_CB_POOL_CB_SIZE; in gaudi_set_fixed_properties()
660 prop->pcie_dbi_base_address = mmPCIE_DBI_BASE; in gaudi_set_fixed_properties()
661 prop->pcie_aux_dbi_reg_addr = CFG_BASE + mmPCIE_AUX_DBI; in gaudi_set_fixed_properties()
663 strncpy(prop->cpucp_info.card_name, GAUDI_DEFAULT_CARD_NAME, in gaudi_set_fixed_properties()
666 prop->max_pending_cs = GAUDI_MAX_PENDING_CS; in gaudi_set_fixed_properties()
668 prop->first_available_user_sob[HL_GAUDI_WS_DCORE] = in gaudi_set_fixed_properties()
669 prop->sync_stream_first_sob + in gaudi_set_fixed_properties()
671 prop->first_available_user_mon[HL_GAUDI_WS_DCORE] = in gaudi_set_fixed_properties()
672 prop->sync_stream_first_mon + in gaudi_set_fixed_properties()
675 prop->first_available_user_interrupt = USHRT_MAX; in gaudi_set_fixed_properties()
676 prop->tpc_interrupt_id = USHRT_MAX; in gaudi_set_fixed_properties()
679 prop->eq_interrupt_id = 0; in gaudi_set_fixed_properties()
682 prop->first_available_cq[i] = USHRT_MAX; in gaudi_set_fixed_properties()
684 prop->fw_cpu_boot_dev_sts0_valid = false; in gaudi_set_fixed_properties()
685 prop->fw_cpu_boot_dev_sts1_valid = false; in gaudi_set_fixed_properties()
686 prop->hard_reset_done_by_fw = false; in gaudi_set_fixed_properties()
687 prop->gic_interrupts_enable = true; in gaudi_set_fixed_properties()
689 prop->server_type = HL_SERVER_TYPE_UNKNOWN; in gaudi_set_fixed_properties()
691 prop->clk_pll_index = HL_GAUDI_MME_PLL; in gaudi_set_fixed_properties()
692 prop->max_freq_value = GAUDI_MAX_CLK_FREQ; in gaudi_set_fixed_properties()
694 prop->use_get_power_for_reset_history = true; in gaudi_set_fixed_properties()
696 prop->configurable_stop_on_err = true; in gaudi_set_fixed_properties()
698 prop->set_max_power_on_device_init = true; in gaudi_set_fixed_properties()
700 prop->dma_mask = 48; in gaudi_set_fixed_properties()
702 prop->hbw_flush_reg = mmPCIE_WRAP_RR_ELBI_RD_SEC_REG_CTRL; in gaudi_set_fixed_properties()
717 hdev->rmmio = hdev->pcie_bar[CFG_BAR_ID] + in gaudi_pci_bars_map()
718 (CFG_BASE - SPI_FLASH_BASE_ADDR); in gaudi_pci_bars_map()
725 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_set_hbm_bar_base()
730 if ((gaudi) && (gaudi->hbm_bar_cur_addr == addr)) in gaudi_set_hbm_bar_base()
733 if (hdev->asic_prop.iatu_done_by_fw) in gaudi_set_hbm_bar_base()
736 /* Inbound Region 2 - Bar 4 - Point to HBM */ in gaudi_set_hbm_bar_base()
745 old_addr = gaudi->hbm_bar_cur_addr; in gaudi_set_hbm_bar_base()
746 gaudi->hbm_bar_cur_addr = addr; in gaudi_set_hbm_bar_base()
758 if (hdev->asic_prop.iatu_done_by_fw) in gaudi_init_iatu()
761 /* Inbound Region 0 - Bar 0 - Point to SRAM + CFG */ in gaudi_init_iatu()
769 /* Inbound Region 1 - Bar 2 - Point to SPI FLASH */ in gaudi_init_iatu()
777 /* Inbound Region 2 - Bar 4 - Point to HBM */ in gaudi_init_iatu()
785 /* Outbound Region 0 - Point to Host */ in gaudi_init_iatu()
801 struct asic_fixed_properties *prop = &hdev->asic_prop; in gaudi_early_init()
802 struct pci_dev *pdev = hdev->pdev; in gaudi_early_init()
809 dev_err(hdev->dev, "Failed setting fixed properties\n"); in gaudi_early_init()
817 dev_err(hdev->dev, "Not " HL_NAME "? BAR %d size %pa, expecting %llu\n", in gaudi_early_init()
819 rc = -ENODEV; in gaudi_early_init()
826 dev_err(hdev->dev, "Not " HL_NAME "? BAR %d size %pa, expecting %llu\n", in gaudi_early_init()
828 rc = -ENODEV; in gaudi_early_init()
832 prop->dram_pci_bar_size = pci_resource_len(pdev, HBM_BAR_ID); in gaudi_early_init()
833 hdev->dram_pci_bar_start = pci_resource_start(pdev, HBM_BAR_ID); in gaudi_early_init()
836 if (hdev->asic_prop.fw_security_enabled) { in gaudi_early_init()
837 hdev->asic_prop.iatu_done_by_fw = true; in gaudi_early_init()
840 * GIC-security-bit can ONLY be set by CPUCP, so in this stage in gaudi_early_init()
843 hdev->asic_prop.gic_interrupts_enable = false; in gaudi_early_init()
855 hdev->asic_prop.iatu_done_by_fw = true; in gaudi_early_init()
863 * version to determine whether we run with a security-enabled firmware in gaudi_early_init()
867 if (hdev->reset_on_preboot_fail) in gaudi_early_init()
869 hdev->asic_funcs->hw_fini(hdev, true, false); in gaudi_early_init()
874 dev_dbg(hdev->dev, "H/W state is dirty, must reset before initializing\n"); in gaudi_early_init()
875 rc = hdev->asic_funcs->hw_fini(hdev, true, false); in gaudi_early_init()
877 dev_err(hdev->dev, "failed to reset HW in dirty state (%d)\n", rc); in gaudi_early_init()
887 kfree(hdev->asic_prop.hw_queues_props); in gaudi_early_init()
893 kfree(hdev->asic_prop.hw_queues_props); in gaudi_early_fini()
900 * gaudi_fetch_psoc_frequency - Fetch PSOC frequency values
908 struct asic_fixed_properties *prop = &hdev->asic_prop; in gaudi_fetch_psoc_frequency()
912 if ((hdev->fw_components & FW_TYPE_LINUX) && in gaudi_fetch_psoc_frequency()
913 (prop->fw_app_cpu_boot_dev_sts0 & CPU_BOOT_DEV_STS0_PLL_INFO_EN)) { in gaudi_fetch_psoc_frequency()
914 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_fetch_psoc_frequency()
916 if (!(gaudi->hw_cap_initialized & HW_CAP_CPU_Q)) in gaudi_fetch_psoc_frequency()
948 dev_warn(hdev->dev, "Received invalid div select value: %#x", div_sel); in gaudi_fetch_psoc_frequency()
953 prop->psoc_timestamp_frequency = freq; in gaudi_fetch_psoc_frequency()
954 prop->psoc_pci_pll_nr = nr; in gaudi_fetch_psoc_frequency()
955 prop->psoc_pci_pll_nf = nf; in gaudi_fetch_psoc_frequency()
956 prop->psoc_pci_pll_od = od; in gaudi_fetch_psoc_frequency()
957 prop->psoc_pci_pll_div_factor = div_fctr; in gaudi_fetch_psoc_frequency()
965 struct asic_fixed_properties *prop = &hdev->asic_prop; in _gaudi_init_tpc_mem()
976 return -EFAULT; in _gaudi_init_tpc_mem()
978 init_tpc_mem_pkt = cb->kernel_address; in _gaudi_init_tpc_mem()
982 init_tpc_mem_pkt->tsize = cpu_to_le32(tpc_kernel_size); in _gaudi_init_tpc_mem()
989 init_tpc_mem_pkt->ctl = cpu_to_le32(ctl); in _gaudi_init_tpc_mem()
991 init_tpc_mem_pkt->src_addr = cpu_to_le64(tpc_kernel_src_addr); in _gaudi_init_tpc_mem()
995 round_up(prop->sram_user_base_address, SZ_8K)); in _gaudi_init_tpc_mem()
996 init_tpc_mem_pkt->dst_addr |= cpu_to_le64(dst_addr); in _gaudi_init_tpc_mem()
1000 dev_err(hdev->dev, "Failed to allocate a new job\n"); in _gaudi_init_tpc_mem()
1001 rc = -ENOMEM; in _gaudi_init_tpc_mem()
1005 job->id = 0; in _gaudi_init_tpc_mem()
1006 job->user_cb = cb; in _gaudi_init_tpc_mem()
1007 atomic_inc(&job->user_cb->cs_cnt); in _gaudi_init_tpc_mem()
1008 job->user_cb_size = cb_size; in _gaudi_init_tpc_mem()
1009 job->hw_queue_id = GAUDI_QUEUE_ID_DMA_0_0; in _gaudi_init_tpc_mem()
1010 job->patched_cb = job->user_cb; in _gaudi_init_tpc_mem()
1011 job->job_cb_size = job->user_cb_size + sizeof(struct packet_msg_prot); in _gaudi_init_tpc_mem()
1027 hl_userptr_delete_list(hdev, &job->userptr_list); in _gaudi_init_tpc_mem()
1030 atomic_dec(&cb->cs_cnt); in _gaudi_init_tpc_mem()
1034 hl_cb_destroy(&hdev->kernel_mem_mgr, cb->buf->handle); in _gaudi_init_tpc_mem()
1040 * gaudi_init_tpc_mem() - Initialize TPC memories.
1056 rc = request_firmware(&fw, GAUDI_TPC_FW_FILE, hdev->dev); in gaudi_init_tpc_mem()
1057 if (rc == -EINTR && count-- > 0) { in gaudi_init_tpc_mem()
1063 dev_err(hdev->dev, "Failed to load firmware file %s\n", in gaudi_init_tpc_mem()
1068 fw_size = fw->size; in gaudi_init_tpc_mem()
1071 dev_err(hdev->dev, in gaudi_init_tpc_mem()
1074 rc = -ENOMEM; in gaudi_init_tpc_mem()
1078 memcpy(cpu_addr, fw->data, fw_size); in gaudi_init_tpc_mem()
1082 hl_asic_dma_free_coherent(hdev, fw->size, cpu_addr, dma_handle); in gaudi_init_tpc_mem()
1091 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_collective_map_sobs()
1092 struct gaudi_collective_properties *prop = &gaudi->collective_props; in gaudi_collective_map_sobs()
1098 stream * HL_RSVD_SOBS + prop->curr_sob_group_idx[stream]; in gaudi_collective_map_sobs()
1099 sob_id = prop->hw_sob_group[sob_group_id].base_sob_id; in gaudi_collective_map_sobs()
1103 q = &hdev->kernel_queues[queue_id + (4 * i)]; in gaudi_collective_map_sobs()
1104 q->sync_stream_prop.collective_sob_id = sob_id + i; in gaudi_collective_map_sobs()
1111 q = &hdev->kernel_queues[queue_id]; in gaudi_collective_map_sobs()
1112 q->sync_stream_prop.collective_sob_id = in gaudi_collective_map_sobs()
1116 q = &hdev->kernel_queues[queue_id]; in gaudi_collective_map_sobs()
1117 q->sync_stream_prop.collective_sob_id = in gaudi_collective_map_sobs()
1125 struct hl_device *hdev = hw_sob_group->hdev; in gaudi_sob_group_hw_reset()
1130 (hw_sob_group->base_sob_id * 4) + (i * 4)), 0); in gaudi_sob_group_hw_reset()
1132 kref_init(&hw_sob_group->kref); in gaudi_sob_group_hw_reset()
1139 struct hl_device *hdev = hw_sob_group->hdev; in gaudi_sob_group_reset_error()
1141 dev_crit(hdev->dev, in gaudi_sob_group_reset_error()
1143 hw_sob_group->base_sob_id); in gaudi_sob_group_reset_error()
1151 prop = &gaudi->collective_props; in gaudi_collective_mstr_sob_mask_set()
1153 memset(prop->mstr_sob_mask, 0, sizeof(prop->mstr_sob_mask)); in gaudi_collective_mstr_sob_mask_set()
1156 if (gaudi->hw_cap_initialized & BIT(HW_CAP_NIC_SHIFT + i)) in gaudi_collective_mstr_sob_mask_set()
1157 prop->mstr_sob_mask[i / HL_MAX_SOBS_PER_MONITOR] |= in gaudi_collective_mstr_sob_mask_set()
1160 prop->mstr_sob_mask[i / HL_MAX_SOBS_PER_MONITOR] |= in gaudi_collective_mstr_sob_mask_set()
1170 gaudi = hdev->asic_specific; in gaudi_collective_init()
1171 prop = &gaudi->collective_props; in gaudi_collective_init()
1172 sob_id = hdev->asic_prop.collective_first_sob; in gaudi_collective_init()
1180 prop->hw_sob_group[i].hdev = hdev; in gaudi_collective_init()
1181 prop->hw_sob_group[i].base_sob_id = sob_id; in gaudi_collective_init()
1183 gaudi_sob_group_hw_reset(&prop->hw_sob_group[i].kref); in gaudi_collective_init()
1187 prop->next_sob_group_val[i] = 1; in gaudi_collective_init()
1188 prop->curr_sob_group_idx[i] = 0; in gaudi_collective_init()
1199 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_reset_sob_group()
1200 struct gaudi_collective_properties *cprop = &gaudi->collective_props; in gaudi_reset_sob_group()
1202 kref_put(&cprop->hw_sob_group[sob_group].kref, in gaudi_reset_sob_group()
1215 gaudi = hdev->asic_specific; in gaudi_collective_master_init_job()
1216 cprop = &gaudi->collective_props; in gaudi_collective_master_init_job()
1217 queue_id = job->hw_queue_id; in gaudi_collective_master_init_job()
1218 prop = &hdev->kernel_queues[queue_id].sync_stream_prop; in gaudi_collective_master_init_job()
1221 cprop->hw_sob_group[sob_group_offset].base_sob_id; in gaudi_collective_master_init_job()
1222 master_monitor = prop->collective_mstr_mon_id[0]; in gaudi_collective_master_init_job()
1224 cprop->hw_sob_group[sob_group_offset].queue_id = queue_id; in gaudi_collective_master_init_job()
1226 dev_dbg(hdev->dev, in gaudi_collective_master_init_job()
1228 master_sob_base, cprop->mstr_sob_mask[0], in gaudi_collective_master_init_job()
1229 cprop->next_sob_group_val[stream], in gaudi_collective_master_init_job()
1232 wait_prop.data = (void *) job->patched_cb; in gaudi_collective_master_init_job()
1234 wait_prop.sob_mask = cprop->mstr_sob_mask[0]; in gaudi_collective_master_init_job()
1235 wait_prop.sob_val = cprop->next_sob_group_val[stream]; in gaudi_collective_master_init_job()
1242 master_monitor = prop->collective_mstr_mon_id[1]; in gaudi_collective_master_init_job()
1244 dev_dbg(hdev->dev, in gaudi_collective_master_init_job()
1246 master_sob_base, cprop->mstr_sob_mask[1], in gaudi_collective_master_init_job()
1247 cprop->next_sob_group_val[stream], in gaudi_collective_master_init_job()
1251 wait_prop.sob_mask = cprop->mstr_sob_mask[1]; in gaudi_collective_master_init_job()
1264 queue_id = job->hw_queue_id; in gaudi_collective_slave_init_job()
1265 prop = &hdev->kernel_queues[queue_id].sync_stream_prop; in gaudi_collective_slave_init_job()
1267 if (job->cs->encaps_signals) { in gaudi_collective_slave_init_job()
1272 hl_hw_queue_encaps_sig_set_sob_info(hdev, job->cs, job, in gaudi_collective_slave_init_job()
1275 dev_dbg(hdev->dev, "collective wait: Sequence %llu found, sob_id: %u, wait for sob_val: %u\n", in gaudi_collective_slave_init_job()
1276 job->cs->sequence, in gaudi_collective_slave_init_job()
1277 cs_cmpl->hw_sob->sob_id, in gaudi_collective_slave_init_job()
1278 cs_cmpl->sob_val); in gaudi_collective_slave_init_job()
1282 wait_prop.data = (void *) job->user_cb; in gaudi_collective_slave_init_job()
1283 wait_prop.sob_base = cs_cmpl->hw_sob->sob_id; in gaudi_collective_slave_init_job()
1285 wait_prop.sob_val = cs_cmpl->sob_val; in gaudi_collective_slave_init_job()
1286 wait_prop.mon_id = prop->collective_slave_mon_id; in gaudi_collective_slave_init_job()
1290 dev_dbg(hdev->dev, in gaudi_collective_slave_init_job()
1292 cs_cmpl->hw_sob->sob_id, cs_cmpl->sob_val, in gaudi_collective_slave_init_job()
1293 prop->collective_slave_mon_id, queue_id); in gaudi_collective_slave_init_job()
1297 dev_dbg(hdev->dev, in gaudi_collective_slave_init_job()
1299 prop->collective_sob_id, queue_id); in gaudi_collective_slave_init_job()
1301 cb_size += gaudi_gen_signal_cb(hdev, job->user_cb, in gaudi_collective_slave_init_job()
1302 prop->collective_sob_id, cb_size, false); in gaudi_collective_slave_init_job()
1308 container_of(cs->signal_fence, struct hl_cs_compl, base_fence); in gaudi_collective_wait_init_cs()
1310 container_of(cs->fence, struct hl_cs_compl, base_fence); in gaudi_collective_wait_init_cs()
1311 struct hl_cs_encaps_sig_handle *handle = cs->encaps_sig_hdl; in gaudi_collective_wait_init_cs()
1319 ctx = cs->ctx; in gaudi_collective_wait_init_cs()
1320 hdev = ctx->hdev; in gaudi_collective_wait_init_cs()
1321 gaudi = hdev->asic_specific; in gaudi_collective_wait_init_cs()
1322 cprop = &gaudi->collective_props; in gaudi_collective_wait_init_cs()
1324 if (cs->encaps_signals) { in gaudi_collective_wait_init_cs()
1325 cs_cmpl->hw_sob = handle->hw_sob; in gaudi_collective_wait_init_cs()
1332 cs_cmpl->sob_val = 0; in gaudi_collective_wait_init_cs()
1335 cs_cmpl->hw_sob = signal_cs_cmpl->hw_sob; in gaudi_collective_wait_init_cs()
1336 cs_cmpl->sob_val = signal_cs_cmpl->sob_val; in gaudi_collective_wait_init_cs()
1351 spin_lock(&signal_cs_cmpl->lock); in gaudi_collective_wait_init_cs()
1353 if (completion_done(&cs->signal_fence->completion)) { in gaudi_collective_wait_init_cs()
1354 spin_unlock(&signal_cs_cmpl->lock); in gaudi_collective_wait_init_cs()
1355 return -EINVAL; in gaudi_collective_wait_init_cs()
1358 kref_get(&cs_cmpl->hw_sob->kref); in gaudi_collective_wait_init_cs()
1360 spin_unlock(&signal_cs_cmpl->lock); in gaudi_collective_wait_init_cs()
1363 job = list_first_entry(&cs->job_list, struct hl_cs_job, cs_node); in gaudi_collective_wait_init_cs()
1364 stream = job->hw_queue_id % 4; in gaudi_collective_wait_init_cs()
1366 stream * HL_RSVD_SOBS + cprop->curr_sob_group_idx[stream]; in gaudi_collective_wait_init_cs()
1368 list_for_each_entry(job, &cs->job_list, cs_node) { in gaudi_collective_wait_init_cs()
1369 queue_id = job->hw_queue_id; in gaudi_collective_wait_init_cs()
1371 if (hdev->kernel_queues[queue_id].collective_mode == in gaudi_collective_wait_init_cs()
1379 cs_cmpl->sob_group = sob_group_offset; in gaudi_collective_wait_init_cs()
1382 kref_get(&cprop->hw_sob_group[sob_group_offset].kref); in gaudi_collective_wait_init_cs()
1383 cprop->next_sob_group_val[stream]++; in gaudi_collective_wait_init_cs()
1385 if (cprop->next_sob_group_val[stream] == HL_MAX_SOB_VAL) { in gaudi_collective_wait_init_cs()
1391 kref_put(&cprop->hw_sob_group[sob_group_offset].kref, in gaudi_collective_wait_init_cs()
1393 cprop->next_sob_group_val[stream] = 1; in gaudi_collective_wait_init_cs()
1395 cprop->curr_sob_group_idx[stream] = in gaudi_collective_wait_init_cs()
1396 (cprop->curr_sob_group_idx[stream] + 1) & in gaudi_collective_wait_init_cs()
1397 (HL_RSVD_SOBS - 1); in gaudi_collective_wait_init_cs()
1401 dev_dbg(hdev->dev, "switched to SOB group %d, stream: %d\n", in gaudi_collective_wait_init_cs()
1402 cprop->curr_sob_group_idx[stream], stream); in gaudi_collective_wait_init_cs()
1406 hl_fence_put(cs->signal_fence); in gaudi_collective_wait_init_cs()
1407 cs->signal_fence = NULL; in gaudi_collective_wait_init_cs()
1420 return cacheline_end - user_cb_size + additional_commands; in gaudi_get_patched_cb_extra_size()
1437 cntr = &hdev->aggregated_cs_counters; in gaudi_collective_wait_create_job()
1462 hw_queue_prop = &hdev->asic_prop.hw_queues_props[queue_id]; in gaudi_collective_wait_create_job()
1463 job = hl_cs_allocate_job(hdev, hw_queue_prop->type, true); in gaudi_collective_wait_create_job()
1465 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt); in gaudi_collective_wait_create_job()
1466 atomic64_inc(&cntr->out_of_mem_drop_cnt); in gaudi_collective_wait_create_job()
1467 dev_err(hdev->dev, "Failed to allocate a new job\n"); in gaudi_collective_wait_create_job()
1468 return -ENOMEM; in gaudi_collective_wait_create_job()
1474 atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt); in gaudi_collective_wait_create_job()
1475 atomic64_inc(&cntr->out_of_mem_drop_cnt); in gaudi_collective_wait_create_job()
1477 return -EFAULT; in gaudi_collective_wait_create_job()
1480 job->id = 0; in gaudi_collective_wait_create_job()
1481 job->cs = cs; in gaudi_collective_wait_create_job()
1482 job->user_cb = cb; in gaudi_collective_wait_create_job()
1483 atomic_inc(&job->user_cb->cs_cnt); in gaudi_collective_wait_create_job()
1484 job->user_cb_size = cb_size; in gaudi_collective_wait_create_job()
1485 job->hw_queue_id = queue_id; in gaudi_collective_wait_create_job()
1491 if (cs->encaps_signals) in gaudi_collective_wait_create_job()
1492 job->encaps_sig_wait_offset = encaps_signal_offset; in gaudi_collective_wait_create_job()
1496 * We call hl_cb_destroy() out of two reasons - we don't need in gaudi_collective_wait_create_job()
1501 job->patched_cb = job->user_cb; in gaudi_collective_wait_create_job()
1503 job->patched_cb = NULL; in gaudi_collective_wait_create_job()
1505 job->job_cb_size = job->user_cb_size; in gaudi_collective_wait_create_job()
1506 hl_cb_destroy(&hdev->kernel_mem_mgr, cb->buf->handle); in gaudi_collective_wait_create_job()
1509 if (hw_queue_prop->type == QUEUE_TYPE_EXT) in gaudi_collective_wait_create_job()
1512 cs->jobs_in_queue_cnt[job->hw_queue_id]++; in gaudi_collective_wait_create_job()
1514 list_add_tail(&job->cs_node, &cs->job_list); in gaudi_collective_wait_create_job()
1526 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_collective_wait_create_jobs()
1534 hw_queue_prop = &hdev->asic_prop.hw_queues_props[wait_queue_id]; in gaudi_collective_wait_create_jobs()
1535 if (!(hw_queue_prop->collective_mode == HL_COLLECTIVE_MASTER)) { in gaudi_collective_wait_create_jobs()
1536 dev_err(hdev->dev, in gaudi_collective_wait_create_jobs()
1539 return -EINVAL; in gaudi_collective_wait_create_jobs()
1545 dev_err(hdev->dev, in gaudi_collective_wait_create_jobs()
1548 return -EINVAL; in gaudi_collective_wait_create_jobs()
1564 * First monitor for NICs 0-7, second monitor for NICs 8-9 and the in gaudi_collective_wait_create_jobs()
1568 * all wait for the user to signal sob 'cs_cmpl->sob_val'. in gaudi_collective_wait_create_jobs()
1578 if (gaudi->hw_cap_initialized & in gaudi_collective_wait_create_jobs()
1608 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_late_init()
1611 rc = gaudi->cpucp_info_get(hdev); in gaudi_late_init()
1613 dev_err(hdev->dev, "Failed to get cpucp info\n"); in gaudi_late_init()
1617 if ((hdev->card_type == cpucp_card_type_pci) && in gaudi_late_init()
1618 (hdev->nic_ports_mask & 0x3)) { in gaudi_late_init()
1619 dev_info(hdev->dev, in gaudi_late_init()
1621 hdev->nic_ports_mask &= ~0x3; in gaudi_late_init()
1635 gaudi->hw_cap_initialized &= ~(HW_CAP_NIC0 | HW_CAP_NIC1); in gaudi_late_init()
1640 dev_err(hdev->dev, "Failed to enable PCI access from CPU\n"); in gaudi_late_init()
1645 rc = hdev->asic_funcs->scrub_device_mem(hdev); in gaudi_late_init()
1651 dev_err(hdev->dev, "Failed to fetch psoc frequency\n"); in gaudi_late_init()
1657 dev_err(hdev->dev, "Failed to clear MMU page tables range\n"); in gaudi_late_init()
1663 dev_err(hdev->dev, "Failed to initialize TPC memories\n"); in gaudi_late_init()
1669 dev_err(hdev->dev, "Failed to init collective\n"); in gaudi_late_init()
1700 * The device CPU works with 40-bits addresses, while bit 39 must be set in gaudi_alloc_cpu_accessible_dma_mem()
1713 rc = -ENOMEM; in gaudi_alloc_cpu_accessible_dma_mem()
1717 end_addr = dma_addr_arr[i] + HL_CPU_ACCESSIBLE_MEM_SIZE - 1; in gaudi_alloc_cpu_accessible_dma_mem()
1724 dev_err(hdev->dev, in gaudi_alloc_cpu_accessible_dma_mem()
1726 rc = -EFAULT; in gaudi_alloc_cpu_accessible_dma_mem()
1730 hdev->cpu_accessible_dma_mem = virt_addr_arr[i]; in gaudi_alloc_cpu_accessible_dma_mem()
1731 hdev->cpu_accessible_dma_address = dma_addr_arr[i]; in gaudi_alloc_cpu_accessible_dma_mem()
1732 hdev->cpu_pci_msb_addr = in gaudi_alloc_cpu_accessible_dma_mem()
1733 GAUDI_CPU_PCI_MSB_ADDR(hdev->cpu_accessible_dma_address); in gaudi_alloc_cpu_accessible_dma_mem()
1735 if (!hdev->asic_prop.fw_security_enabled) in gaudi_alloc_cpu_accessible_dma_mem()
1736 GAUDI_PCI_TO_CPU_ADDR(hdev->cpu_accessible_dma_address); in gaudi_alloc_cpu_accessible_dma_mem()
1748 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_free_internal_qmans_pq_mem()
1753 q = &gaudi->internal_qmans[i]; in gaudi_free_internal_qmans_pq_mem()
1754 if (!q->pq_kernel_addr) in gaudi_free_internal_qmans_pq_mem()
1756 hl_asic_dma_free_coherent(hdev, q->pq_size, q->pq_kernel_addr, q->pq_dma_addr); in gaudi_free_internal_qmans_pq_mem()
1762 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_alloc_internal_qmans_pq_mem()
1770 q = &gaudi->internal_qmans[i]; in gaudi_alloc_internal_qmans_pq_mem()
1774 q->pq_size = HBM_DMA_QMAN_SIZE_IN_BYTES; in gaudi_alloc_internal_qmans_pq_mem()
1777 q->pq_size = MME_QMAN_SIZE_IN_BYTES; in gaudi_alloc_internal_qmans_pq_mem()
1780 q->pq_size = TPC_QMAN_SIZE_IN_BYTES; in gaudi_alloc_internal_qmans_pq_mem()
1783 q->pq_size = NIC_QMAN_SIZE_IN_BYTES; in gaudi_alloc_internal_qmans_pq_mem()
1786 dev_err(hdev->dev, "Bad internal queue index %d", i); in gaudi_alloc_internal_qmans_pq_mem()
1787 rc = -EINVAL; in gaudi_alloc_internal_qmans_pq_mem()
1791 q->pq_kernel_addr = hl_asic_dma_alloc_coherent(hdev, q->pq_size, &q->pq_dma_addr, in gaudi_alloc_internal_qmans_pq_mem()
1793 if (!q->pq_kernel_addr) { in gaudi_alloc_internal_qmans_pq_mem()
1794 rc = -ENOMEM; in gaudi_alloc_internal_qmans_pq_mem()
1808 struct asic_fixed_properties *prop = &hdev->asic_prop; in gaudi_set_pci_memory_regions()
1812 region = &hdev->pci_mem_region[PCI_REGION_CFG]; in gaudi_set_pci_memory_regions()
1813 region->region_base = CFG_BASE; in gaudi_set_pci_memory_regions()
1814 region->region_size = CFG_SIZE; in gaudi_set_pci_memory_regions()
1815 region->offset_in_bar = CFG_BASE - SPI_FLASH_BASE_ADDR; in gaudi_set_pci_memory_regions()
1816 region->bar_size = CFG_BAR_SIZE; in gaudi_set_pci_memory_regions()
1817 region->bar_id = CFG_BAR_ID; in gaudi_set_pci_memory_regions()
1818 region->used = 1; in gaudi_set_pci_memory_regions()
1821 region = &hdev->pci_mem_region[PCI_REGION_SRAM]; in gaudi_set_pci_memory_regions()
1822 region->region_base = SRAM_BASE_ADDR; in gaudi_set_pci_memory_regions()
1823 region->region_size = SRAM_SIZE; in gaudi_set_pci_memory_regions()
1824 region->offset_in_bar = 0; in gaudi_set_pci_memory_regions()
1825 region->bar_size = SRAM_BAR_SIZE; in gaudi_set_pci_memory_regions()
1826 region->bar_id = SRAM_BAR_ID; in gaudi_set_pci_memory_regions()
1827 region->used = 1; in gaudi_set_pci_memory_regions()
1830 region = &hdev->pci_mem_region[PCI_REGION_DRAM]; in gaudi_set_pci_memory_regions()
1831 region->region_base = DRAM_PHYS_BASE; in gaudi_set_pci_memory_regions()
1832 region->region_size = hdev->asic_prop.dram_size; in gaudi_set_pci_memory_regions()
1833 region->offset_in_bar = 0; in gaudi_set_pci_memory_regions()
1834 region->bar_size = prop->dram_pci_bar_size; in gaudi_set_pci_memory_regions()
1835 region->bar_id = HBM_BAR_ID; in gaudi_set_pci_memory_regions()
1836 region->used = 1; in gaudi_set_pci_memory_regions()
1839 region = &hdev->pci_mem_region[PCI_REGION_SP_SRAM]; in gaudi_set_pci_memory_regions()
1840 region->region_base = PSOC_SCRATCHPAD_ADDR; in gaudi_set_pci_memory_regions()
1841 region->region_size = PSOC_SCRATCHPAD_SIZE; in gaudi_set_pci_memory_regions()
1842 region->offset_in_bar = PSOC_SCRATCHPAD_ADDR - SPI_FLASH_BASE_ADDR; in gaudi_set_pci_memory_regions()
1843 region->bar_size = CFG_BAR_SIZE; in gaudi_set_pci_memory_regions()
1844 region->bar_id = CFG_BAR_ID; in gaudi_set_pci_memory_regions()
1845 region->used = 1; in gaudi_set_pci_memory_regions()
1857 return -ENOMEM; in gaudi_sw_init()
1862 dev_err(hdev->dev, in gaudi_sw_init()
1865 rc = -EINVAL; in gaudi_sw_init()
1869 gaudi->events[event_id++] = in gaudi_sw_init()
1874 gaudi->cpucp_info_get = gaudi_cpucp_info_get; in gaudi_sw_init()
1876 hdev->asic_specific = gaudi; in gaudi_sw_init()
1879 hdev->dma_pool = dma_pool_create(dev_name(hdev->dev), in gaudi_sw_init()
1880 &hdev->pdev->dev, GAUDI_DMA_POOL_BLK_SIZE, 8, 0); in gaudi_sw_init()
1881 if (!hdev->dma_pool) { in gaudi_sw_init()
1882 dev_err(hdev->dev, "failed to create DMA pool\n"); in gaudi_sw_init()
1883 rc = -ENOMEM; in gaudi_sw_init()
1891 hdev->cpu_accessible_dma_pool = gen_pool_create(ilog2(32), -1); in gaudi_sw_init()
1892 if (!hdev->cpu_accessible_dma_pool) { in gaudi_sw_init()
1893 dev_err(hdev->dev, in gaudi_sw_init()
1895 rc = -ENOMEM; in gaudi_sw_init()
1899 rc = gen_pool_add(hdev->cpu_accessible_dma_pool, in gaudi_sw_init()
1900 (uintptr_t) hdev->cpu_accessible_dma_mem, in gaudi_sw_init()
1901 HL_CPU_ACCESSIBLE_MEM_SIZE, -1); in gaudi_sw_init()
1903 dev_err(hdev->dev, in gaudi_sw_init()
1905 rc = -EFAULT; in gaudi_sw_init()
1913 spin_lock_init(&gaudi->hw_queues_lock); in gaudi_sw_init()
1915 hdev->supports_sync_stream = true; in gaudi_sw_init()
1916 hdev->supports_coresight = true; in gaudi_sw_init()
1917 hdev->supports_staged_submission = true; in gaudi_sw_init()
1918 hdev->supports_wait_for_multi_cs = true; in gaudi_sw_init()
1920 hdev->asic_funcs->set_pci_memory_regions(hdev); in gaudi_sw_init()
1921 hdev->stream_master_qid_arr = in gaudi_sw_init()
1922 hdev->asic_funcs->get_stream_master_qid_arr(); in gaudi_sw_init()
1923 hdev->stream_master_qid_arr_size = GAUDI_STREAM_MASTER_ARR_SIZE; in gaudi_sw_init()
1928 gen_pool_destroy(hdev->cpu_accessible_dma_pool); in gaudi_sw_init()
1930 if (!hdev->asic_prop.fw_security_enabled) in gaudi_sw_init()
1931 GAUDI_CPU_TO_PCI_ADDR(hdev->cpu_accessible_dma_address, in gaudi_sw_init()
1932 hdev->cpu_pci_msb_addr); in gaudi_sw_init()
1933 hl_asic_dma_free_coherent(hdev, HL_CPU_ACCESSIBLE_MEM_SIZE, hdev->cpu_accessible_dma_mem, in gaudi_sw_init()
1934 hdev->cpu_accessible_dma_address); in gaudi_sw_init()
1936 dma_pool_destroy(hdev->dma_pool); in gaudi_sw_init()
1944 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_sw_fini()
1948 gen_pool_destroy(hdev->cpu_accessible_dma_pool); in gaudi_sw_fini()
1950 if (!hdev->asic_prop.fw_security_enabled) in gaudi_sw_fini()
1951 GAUDI_CPU_TO_PCI_ADDR(hdev->cpu_accessible_dma_address, in gaudi_sw_fini()
1952 hdev->cpu_pci_msb_addr); in gaudi_sw_fini()
1954 hl_asic_dma_free_coherent(hdev, HL_CPU_ACCESSIBLE_MEM_SIZE, hdev->cpu_accessible_dma_mem, in gaudi_sw_fini()
1955 hdev->cpu_accessible_dma_address); in gaudi_sw_fini()
1957 dma_pool_destroy(hdev->dma_pool); in gaudi_sw_fini()
1969 if (hdev->disabled) in gaudi_irq_handler_single()
1972 for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++) in gaudi_irq_handler_single()
1973 hl_irq_handler_cq(irq, &hdev->completion_queue[i]); in gaudi_irq_handler_single()
1975 hl_irq_handler_eq(irq, &hdev->event_queue); in gaudi_irq_handler_single()
1990 dev_crit(hdev->dev, "CPU EQ must use IRQ %d\n", in gaudi_pci_irq_vector()
1996 return pci_irq_vector(hdev->pdev, msi_vec); in gaudi_pci_irq_vector()
2003 dev_dbg(hdev->dev, "Working in single MSI IRQ mode\n"); in gaudi_enable_msi_single()
2009 dev_err(hdev->dev, in gaudi_enable_msi_single()
2017 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_enable_msi()
2020 if (gaudi->hw_cap_initialized & HW_CAP_MSI) in gaudi_enable_msi()
2023 rc = pci_alloc_irq_vectors(hdev->pdev, 1, 1, PCI_IRQ_MSI); in gaudi_enable_msi()
2025 dev_err(hdev->dev, "MSI: Failed to enable support %d\n", rc); in gaudi_enable_msi()
2033 gaudi->hw_cap_initialized |= HW_CAP_MSI; in gaudi_enable_msi()
2038 pci_free_irq_vectors(hdev->pdev); in gaudi_enable_msi()
2044 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_sync_irqs()
2046 if (!(gaudi->hw_cap_initialized & HW_CAP_MSI)) in gaudi_sync_irqs()
2055 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_disable_msi()
2057 if (!(gaudi->hw_cap_initialized & HW_CAP_MSI)) in gaudi_disable_msi()
2062 pci_free_irq_vectors(hdev->pdev); in gaudi_disable_msi()
2064 gaudi->hw_cap_initialized &= ~HW_CAP_MSI; in gaudi_disable_msi()
2069 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_init_scrambler_sram()
2071 if (hdev->asic_prop.fw_security_enabled) in gaudi_init_scrambler_sram()
2074 if (hdev->asic_prop.fw_app_cpu_boot_dev_sts0 & in gaudi_init_scrambler_sram()
2078 if (gaudi->hw_cap_initialized & HW_CAP_SRAM_SCRAMBLER) in gaudi_init_scrambler_sram()
2132 gaudi->hw_cap_initialized |= HW_CAP_SRAM_SCRAMBLER; in gaudi_init_scrambler_sram()
2137 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_init_scrambler_hbm()
2139 if (hdev->asic_prop.fw_security_enabled) in gaudi_init_scrambler_hbm()
2142 if (hdev->asic_prop.fw_bootfit_cpu_boot_dev_sts0 & in gaudi_init_scrambler_hbm()
2146 if (gaudi->hw_cap_initialized & HW_CAP_HBM_SCRAMBLER) in gaudi_init_scrambler_hbm()
2200 gaudi->hw_cap_initialized |= HW_CAP_HBM_SCRAMBLER; in gaudi_init_scrambler_hbm()
2205 if (hdev->asic_prop.fw_security_enabled) in gaudi_init_e2e()
2208 if (hdev->asic_prop.fw_bootfit_cpu_boot_dev_sts0 & in gaudi_init_e2e()
2457 if (hdev->asic_prop.fw_security_enabled) in gaudi_init_hbm_cred()
2460 if (hdev->asic_prop.fw_bootfit_cpu_boot_dev_sts0 & in gaudi_init_hbm_cred()
2536 writeq(0, hdev->pcie_bar[SRAM_BAR_ID] + i); in gaudi_init_golden_registers()
2548 &hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs; in gaudi_init_pci_dma_qman()
2601 irq_handler_offset = hdev->asic_prop.gic_interrupts_enable ? in gaudi_init_pci_dma_qman()
2603 le32_to_cpu(dyn_regs->gic_dma_qm_irq_ctrl); in gaudi_init_pci_dma_qman()
2607 if (hdev->stop_on_err) in gaudi_init_pci_dma_qman()
2625 /* Set timeout to maximum */ in gaudi_init_pci_dma_qman()
2638 &hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs; in gaudi_init_dma_core()
2647 /* WA for H/W bug H3-2116 */ in gaudi_init_dma_core()
2651 if (hdev->stop_on_err) in gaudi_init_dma_core()
2656 irq_handler_offset = hdev->asic_prop.gic_interrupts_enable ? in gaudi_init_dma_core()
2658 le32_to_cpu(dyn_regs->gic_dma_core_irq_ctrl); in gaudi_init_dma_core()
2685 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_init_pci_dma_qmans()
2689 if (gaudi->hw_cap_initialized & HW_CAP_PCI_DMA) in gaudi_init_pci_dma_qmans()
2709 q = &hdev->kernel_queues[q_idx]; in gaudi_init_pci_dma_qmans()
2710 q->cq_id = cq_id++; in gaudi_init_pci_dma_qmans()
2711 q->msi_vec = nic_skip + cpu_skip + msi_vec++; in gaudi_init_pci_dma_qmans()
2713 q->bus_address); in gaudi_init_pci_dma_qmans()
2721 gaudi->hw_cap_initialized |= HW_CAP_PCI_DMA; in gaudi_init_pci_dma_qmans()
2728 &hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs; in gaudi_init_hbm_dma_qman()
2772 irq_handler_offset = hdev->asic_prop.gic_interrupts_enable ? in gaudi_init_hbm_dma_qman()
2774 le32_to_cpu(dyn_regs->gic_dma_qm_irq_ctrl); in gaudi_init_hbm_dma_qman()
2785 if (hdev->stop_on_err) in gaudi_init_hbm_dma_qman()
2803 /* Set timeout to maximum */ in gaudi_init_hbm_dma_qman()
2831 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_init_hbm_dma_qmans()
2836 if (gaudi->hw_cap_initialized & HW_CAP_HBM_DMA) in gaudi_init_hbm_dma_qmans()
2849 q = &gaudi->internal_qmans[internal_q_index]; in gaudi_init_hbm_dma_qmans()
2850 qman_base_addr = (u64) q->pq_dma_addr; in gaudi_init_hbm_dma_qmans()
2863 gaudi->hw_cap_initialized |= HW_CAP_HBM_DMA; in gaudi_init_hbm_dma_qmans()
2870 &hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs; in gaudi_init_mme_qman()
2905 irq_handler_offset = hdev->asic_prop.gic_interrupts_enable ? in gaudi_init_mme_qman()
2907 le32_to_cpu(dyn_regs->gic_mme_qm_irq_ctrl); in gaudi_init_mme_qman()
2918 (mmMME1_QM_GLBL_CFG0 - mmMME0_QM_GLBL_CFG0) / 2; in gaudi_init_mme_qman()
2921 if (hdev->stop_on_err) in gaudi_init_mme_qman()
2939 /* Set timeout to maximum */ in gaudi_init_mme_qman()
2955 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_init_mme_qmans()
2961 if (gaudi->hw_cap_initialized & HW_CAP_MME) in gaudi_init_mme_qmans()
2969 mme_offset = mmMME2_QM_GLBL_CFG0 - mmMME0_QM_GLBL_CFG0; in gaudi_init_mme_qmans()
2973 q = &gaudi->internal_qmans[internal_q_index]; in gaudi_init_mme_qmans()
2974 qman_base_addr = (u64) q->pq_dma_addr; in gaudi_init_mme_qmans()
2982 mme_offset = mmMME2_QM_GLBL_CFG0 - mmMME0_QM_GLBL_CFG0; in gaudi_init_mme_qmans()
2989 gaudi->hw_cap_initialized |= HW_CAP_MME; in gaudi_init_mme_qmans()
2996 &hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs; in gaudi_init_tpc_qman()
3022 (mmTPC1_QM_GLBL_CFG0 - mmTPC0_QM_GLBL_CFG0); in gaudi_init_tpc_qman()
3041 irq_handler_offset = hdev->asic_prop.gic_interrupts_enable ? in gaudi_init_tpc_qman()
3043 le32_to_cpu(dyn_regs->gic_tpc_qm_irq_ctrl); in gaudi_init_tpc_qman()
3054 if (hdev->stop_on_err) in gaudi_init_tpc_qman()
3072 /* Set timeout to maximum */ in gaudi_init_tpc_qman()
3100 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_init_tpc_qmans()
3104 u32 tpc_delta = mmTPC1_CFG_SM_BASE_ADDRESS_HIGH - in gaudi_init_tpc_qmans()
3108 if (gaudi->hw_cap_initialized & HW_CAP_TPC_MASK) in gaudi_init_tpc_qmans()
3118 q = &gaudi->internal_qmans[internal_q_index]; in gaudi_init_tpc_qmans()
3119 qman_base_addr = (u64) q->pq_dma_addr; in gaudi_init_tpc_qmans()
3136 tpc_offset += mmTPC1_QM_GLBL_CFG0 - mmTPC0_QM_GLBL_CFG0; in gaudi_init_tpc_qmans()
3138 gaudi->hw_cap_initialized |= in gaudi_init_tpc_qmans()
3147 &hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs; in gaudi_init_nic_qman()
3198 irq_handler_offset = hdev->asic_prop.gic_interrupts_enable ? in gaudi_init_nic_qman()
3200 le32_to_cpu(dyn_regs->gic_nic_qm_irq_ctrl); in gaudi_init_nic_qman()
3204 if (hdev->stop_on_err) in gaudi_init_nic_qman()
3222 /* Set timeout to maximum */ in gaudi_init_nic_qman()
3233 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_init_nic_qmans()
3238 mmNIC0_QM1_GLBL_CFG0 - mmNIC0_QM0_GLBL_CFG0; in gaudi_init_nic_qmans()
3240 mmNIC1_QM0_GLBL_CFG0 - mmNIC0_QM0_GLBL_CFG0; in gaudi_init_nic_qmans()
3243 if (!hdev->nic_ports_mask) in gaudi_init_nic_qmans()
3246 if (gaudi->hw_cap_initialized & HW_CAP_NIC_MASK) in gaudi_init_nic_qmans()
3249 dev_dbg(hdev->dev, "Initializing NIC QMANs\n"); in gaudi_init_nic_qmans()
3252 if (!(hdev->nic_ports_mask & (1 << nic_id))) { in gaudi_init_nic_qmans()
3255 nic_offset -= (nic_delta_between_qmans * 2); in gaudi_init_nic_qmans()
3264 q = &gaudi->internal_qmans[internal_q_index]; in gaudi_init_nic_qmans()
3265 qman_base_addr = (u64) q->pq_dma_addr; in gaudi_init_nic_qmans()
3275 nic_offset -= (nic_delta_between_qmans * 2); in gaudi_init_nic_qmans()
3279 gaudi->hw_cap_initialized |= 1 << (HW_CAP_NIC_SHIFT + nic_id); in gaudi_init_nic_qmans()
3285 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_disable_pci_dma_qmans()
3287 if (!(gaudi->hw_cap_initialized & HW_CAP_PCI_DMA)) in gaudi_disable_pci_dma_qmans()
3297 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_disable_hbm_dma_qmans()
3299 if (!(gaudi->hw_cap_initialized & HW_CAP_HBM_DMA)) in gaudi_disable_hbm_dma_qmans()
3311 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_disable_mme_qmans()
3313 if (!(gaudi->hw_cap_initialized & HW_CAP_MME)) in gaudi_disable_mme_qmans()
3322 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_disable_tpc_qmans()
3326 if (!(gaudi->hw_cap_initialized & HW_CAP_TPC_MASK)) in gaudi_disable_tpc_qmans()
3331 tpc_offset += mmTPC1_QM_GLBL_CFG0 - mmTPC0_QM_GLBL_CFG0; in gaudi_disable_tpc_qmans()
3337 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_disable_nic_qmans()
3340 mmNIC0_QM1_GLBL_CFG0 - mmNIC0_QM0_GLBL_CFG0; in gaudi_disable_nic_qmans()
3342 mmNIC1_QM0_GLBL_CFG0 - mmNIC0_QM0_GLBL_CFG0; in gaudi_disable_nic_qmans()
3348 if (gaudi->hw_cap_initialized & nic_mask) in gaudi_disable_nic_qmans()
3353 nic_offset -= (nic_delta_between_qmans * 2); in gaudi_disable_nic_qmans()
3361 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_stop_pci_dma_qmans()
3363 if (!(gaudi->hw_cap_initialized & HW_CAP_PCI_DMA)) in gaudi_stop_pci_dma_qmans()
3374 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_stop_hbm_dma_qmans()
3376 if (!(gaudi->hw_cap_initialized & HW_CAP_HBM_DMA)) in gaudi_stop_hbm_dma_qmans()
3390 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_stop_mme_qmans()
3392 if (!(gaudi->hw_cap_initialized & HW_CAP_MME)) in gaudi_stop_mme_qmans()
3402 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_stop_tpc_qmans()
3404 if (!(gaudi->hw_cap_initialized & HW_CAP_TPC_MASK)) in gaudi_stop_tpc_qmans()
3419 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_stop_nic_qmans()
3423 if (gaudi->hw_cap_initialized & HW_CAP_NIC0) in gaudi_stop_nic_qmans()
3429 if (gaudi->hw_cap_initialized & HW_CAP_NIC1) in gaudi_stop_nic_qmans()
3435 if (gaudi->hw_cap_initialized & HW_CAP_NIC2) in gaudi_stop_nic_qmans()
3441 if (gaudi->hw_cap_initialized & HW_CAP_NIC3) in gaudi_stop_nic_qmans()
3447 if (gaudi->hw_cap_initialized & HW_CAP_NIC4) in gaudi_stop_nic_qmans()
3453 if (gaudi->hw_cap_initialized & HW_CAP_NIC5) in gaudi_stop_nic_qmans()
3459 if (gaudi->hw_cap_initialized & HW_CAP_NIC6) in gaudi_stop_nic_qmans()
3465 if (gaudi->hw_cap_initialized & HW_CAP_NIC7) in gaudi_stop_nic_qmans()
3471 if (gaudi->hw_cap_initialized & HW_CAP_NIC8) in gaudi_stop_nic_qmans()
3477 if (gaudi->hw_cap_initialized & HW_CAP_NIC9) in gaudi_stop_nic_qmans()
3486 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_pci_dma_stall()
3488 if (!(gaudi->hw_cap_initialized & HW_CAP_PCI_DMA)) in gaudi_pci_dma_stall()
3498 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_hbm_dma_stall()
3500 if (!(gaudi->hw_cap_initialized & HW_CAP_HBM_DMA)) in gaudi_hbm_dma_stall()
3512 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_mme_stall()
3514 if (!(gaudi->hw_cap_initialized & HW_CAP_MME)) in gaudi_mme_stall()
3517 /* WA for H3-1800 bug: do ACC and SBAB writes twice */ in gaudi_mme_stall()
3538 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_tpc_stall()
3540 if (!(gaudi->hw_cap_initialized & HW_CAP_TPC_MASK)) in gaudi_tpc_stall()
3558 if (hdev->asic_prop.fw_security_enabled) in gaudi_disable_clock_gating()
3565 qman_offset += (mmDMA1_QM_CGM_CFG - mmDMA0_QM_CGM_CFG); in gaudi_disable_clock_gating()
3577 qman_offset += (mmTPC1_QM_CGM_CFG - mmTPC0_QM_CGM_CFG); in gaudi_disable_clock_gating()
3584 WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE, 0); in gaudi_enable_timestamp()
3586 /* Zero the lower/upper parts of the 64-bit counter */ in gaudi_enable_timestamp()
3587 WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE + 0xC, 0); in gaudi_enable_timestamp()
3588 WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE + 0x8, 0); in gaudi_enable_timestamp()
3591 WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE, 1); in gaudi_enable_timestamp()
3597 WREG32(mmPSOC_TIMESTAMP_BASE - CFG_BASE, 0); in gaudi_disable_timestamp()
3604 if (hdev->pldm) in gaudi_halt_engines()
3641 struct asic_fixed_properties *prop = &hdev->asic_prop; in gaudi_mmu_init()
3642 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_mmu_init()
3646 if (gaudi->hw_cap_initialized & HW_CAP_MMU) in gaudi_mmu_init()
3649 for (i = 0 ; i < prop->max_asid ; i++) { in gaudi_mmu_init()
3650 hop0_addr = prop->mmu_pgt_addr + in gaudi_mmu_init()
3651 (i * prop->mmu_hop_table_size); in gaudi_mmu_init()
3655 dev_err(hdev->dev, in gaudi_mmu_init()
3662 WREG32(mmSTLB_CACHE_INV_BASE_39_8, prop->mmu_cache_mng_addr >> 8); in gaudi_mmu_init()
3663 WREG32(mmSTLB_CACHE_INV_BASE_49_40, prop->mmu_cache_mng_addr >> 40); in gaudi_mmu_init()
3681 gaudi->mmu_cache_inv_pi = 1; in gaudi_mmu_init()
3683 gaudi->hw_cap_initialized |= HW_CAP_MMU; in gaudi_mmu_init()
3692 dst = hdev->pcie_bar[HBM_BAR_ID] + LINUX_FW_OFFSET; in gaudi_load_firmware_to_device()
3701 dst = hdev->pcie_bar[SRAM_BAR_ID] + BOOT_FIT_SRAM_OFFSET; in gaudi_load_boot_fit_to_device()
3711 dynamic_loader = &hdev->fw_loader.dynamic_loader; in gaudi_init_dynamic_firmware_loader()
3716 * hard-coded) in later stages of the protocol those values will be in gaudi_init_dynamic_firmware_loader()
3718 * will always be up-to-date in gaudi_init_dynamic_firmware_loader()
3720 dyn_regs = &dynamic_loader->comm_desc.cpu_dyn_regs; in gaudi_init_dynamic_firmware_loader()
3721 dyn_regs->kmd_msg_to_cpu = in gaudi_init_dynamic_firmware_loader()
3723 dyn_regs->cpu_cmd_status_to_host = in gaudi_init_dynamic_firmware_loader()
3726 dynamic_loader->wait_for_bl_timeout = GAUDI_WAIT_FOR_BL_TIMEOUT_USEC; in gaudi_init_dynamic_firmware_loader()
3733 static_loader = &hdev->fw_loader.static_loader; in gaudi_init_static_firmware_loader()
3735 static_loader->preboot_version_max_off = SRAM_SIZE - VERSION_MAX_LEN; in gaudi_init_static_firmware_loader()
3736 static_loader->boot_fit_version_max_off = SRAM_SIZE - VERSION_MAX_LEN; in gaudi_init_static_firmware_loader()
3737 static_loader->kmd_msg_to_cpu_reg = mmPSOC_GLOBAL_CONF_KMD_MSG_TO_CPU; in gaudi_init_static_firmware_loader()
3738 static_loader->cpu_cmd_status_to_host_reg = mmCPU_CMD_STATUS_TO_HOST; in gaudi_init_static_firmware_loader()
3739 static_loader->cpu_boot_status_reg = mmPSOC_GLOBAL_CONF_CPU_BOOT_STATUS; in gaudi_init_static_firmware_loader()
3740 static_loader->cpu_boot_dev_status0_reg = mmCPU_BOOT_DEV_STS0; in gaudi_init_static_firmware_loader()
3741 static_loader->cpu_boot_dev_status1_reg = mmCPU_BOOT_DEV_STS1; in gaudi_init_static_firmware_loader()
3742 static_loader->boot_err0_reg = mmCPU_BOOT_ERR0; in gaudi_init_static_firmware_loader()
3743 static_loader->boot_err1_reg = mmCPU_BOOT_ERR1; in gaudi_init_static_firmware_loader()
3744 static_loader->preboot_version_offset_reg = mmPREBOOT_VER_OFFSET; in gaudi_init_static_firmware_loader()
3745 static_loader->boot_fit_version_offset_reg = mmUBOOT_VER_OFFSET; in gaudi_init_static_firmware_loader()
3746 static_loader->sram_offset_mask = ~(lower_32_bits(SRAM_BASE_ADDR)); in gaudi_init_static_firmware_loader()
3747 static_loader->cpu_reset_wait_msec = hdev->pldm ? in gaudi_init_static_firmware_loader()
3754 struct pre_fw_load_props *pre_fw_load = &hdev->fw_loader.pre_fw_load; in gaudi_init_firmware_preload_params()
3756 pre_fw_load->cpu_boot_status_reg = mmPSOC_GLOBAL_CONF_CPU_BOOT_STATUS; in gaudi_init_firmware_preload_params()
3757 pre_fw_load->sts_boot_dev_sts0_reg = mmCPU_BOOT_DEV_STS0; in gaudi_init_firmware_preload_params()
3758 pre_fw_load->sts_boot_dev_sts1_reg = mmCPU_BOOT_DEV_STS1; in gaudi_init_firmware_preload_params()
3759 pre_fw_load->boot_err0_reg = mmCPU_BOOT_ERR0; in gaudi_init_firmware_preload_params()
3760 pre_fw_load->boot_err1_reg = mmCPU_BOOT_ERR1; in gaudi_init_firmware_preload_params()
3761 pre_fw_load->wait_for_preboot_timeout = GAUDI_BOOT_FIT_REQ_TIMEOUT_USEC; in gaudi_init_firmware_preload_params()
3766 struct asic_fixed_properties *prop = &hdev->asic_prop; in gaudi_init_firmware_loader()
3767 struct fw_load_mgr *fw_loader = &hdev->fw_loader; in gaudi_init_firmware_loader()
3770 fw_loader->fw_comp_loaded = FW_TYPE_NONE; in gaudi_init_firmware_loader()
3771 fw_loader->boot_fit_img.image_name = GAUDI_BOOT_FIT_FILE; in gaudi_init_firmware_loader()
3772 fw_loader->linux_img.image_name = GAUDI_LINUX_FW_FILE; in gaudi_init_firmware_loader()
3773 fw_loader->cpu_timeout = GAUDI_CPU_TIMEOUT_USEC; in gaudi_init_firmware_loader()
3774 fw_loader->boot_fit_timeout = GAUDI_BOOT_FIT_REQ_TIMEOUT_USEC; in gaudi_init_firmware_loader()
3775 fw_loader->skip_bmc = !hdev->bmc_enable; in gaudi_init_firmware_loader()
3776 fw_loader->sram_bar_id = SRAM_BAR_ID; in gaudi_init_firmware_loader()
3777 fw_loader->dram_bar_id = HBM_BAR_ID; in gaudi_init_firmware_loader()
3779 if (prop->dynamic_fw_load) in gaudi_init_firmware_loader()
3787 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_init_cpu()
3790 if (!(hdev->fw_components & FW_TYPE_PREBOOT_CPU)) in gaudi_init_cpu()
3793 if (gaudi->hw_cap_initialized & HW_CAP_CPU) in gaudi_init_cpu()
3800 if (!hdev->asic_prop.fw_security_enabled) in gaudi_init_cpu()
3801 WREG32(mmCPU_IF_CPU_MSB_ADDR, hdev->cpu_pci_msb_addr); in gaudi_init_cpu()
3808 gaudi->hw_cap_initialized |= HW_CAP_CPU; in gaudi_init_cpu()
3816 &hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs; in gaudi_init_cpu_queues()
3817 struct asic_fixed_properties *prop = &hdev->asic_prop; in gaudi_init_cpu_queues()
3818 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_init_cpu_queues()
3822 &hdev->kernel_queues[GAUDI_QUEUE_ID_CPU_PQ]; in gaudi_init_cpu_queues()
3825 if (!hdev->cpu_queues_enable) in gaudi_init_cpu_queues()
3828 if (gaudi->hw_cap_initialized & HW_CAP_CPU_Q) in gaudi_init_cpu_queues()
3831 eq = &hdev->event_queue; in gaudi_init_cpu_queues()
3833 WREG32(mmCPU_IF_PQ_BASE_ADDR_LOW, lower_32_bits(cpu_pq->bus_address)); in gaudi_init_cpu_queues()
3834 WREG32(mmCPU_IF_PQ_BASE_ADDR_HIGH, upper_32_bits(cpu_pq->bus_address)); in gaudi_init_cpu_queues()
3836 WREG32(mmCPU_IF_EQ_BASE_ADDR_LOW, lower_32_bits(eq->bus_address)); in gaudi_init_cpu_queues()
3837 WREG32(mmCPU_IF_EQ_BASE_ADDR_HIGH, upper_32_bits(eq->bus_address)); in gaudi_init_cpu_queues()
3840 lower_32_bits(hdev->cpu_accessible_dma_address)); in gaudi_init_cpu_queues()
3842 upper_32_bits(hdev->cpu_accessible_dma_address)); in gaudi_init_cpu_queues()
3855 irq_handler_offset = prop->gic_interrupts_enable ? in gaudi_init_cpu_queues()
3857 le32_to_cpu(dyn_regs->gic_host_pi_upd_irq); in gaudi_init_cpu_queues()
3871 dev_err(hdev->dev, in gaudi_init_cpu_queues()
3872 "Failed to communicate with Device CPU (CPU-CP timeout)\n"); in gaudi_init_cpu_queues()
3873 return -EIO; in gaudi_init_cpu_queues()
3877 if (prop->fw_cpu_boot_dev_sts0_valid) in gaudi_init_cpu_queues()
3878 prop->fw_app_cpu_boot_dev_sts0 = RREG32(mmCPU_BOOT_DEV_STS0); in gaudi_init_cpu_queues()
3879 if (prop->fw_cpu_boot_dev_sts1_valid) in gaudi_init_cpu_queues()
3880 prop->fw_app_cpu_boot_dev_sts1 = RREG32(mmCPU_BOOT_DEV_STS1); in gaudi_init_cpu_queues()
3882 gaudi->hw_cap_initialized |= HW_CAP_CPU_Q; in gaudi_init_cpu_queues()
3891 if (!hdev->asic_prop.fw_security_enabled) { in gaudi_pre_hw_init()
3916 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_hw_init()
3925 if (hdev->asic_prop.iatu_done_by_fw) in gaudi_hw_init()
3926 gaudi->hbm_bar_cur_addr = DRAM_PHYS_BASE; in gaudi_hw_init()
3929 * Before pushing u-boot/linux to device, need to set the hbm bar to in gaudi_hw_init()
3933 dev_err(hdev->dev, in gaudi_hw_init()
3935 return -EIO; in gaudi_hw_init()
3940 dev_err(hdev->dev, "failed to initialize CPU\n"); in gaudi_hw_init()
3983 dev_err(hdev->dev, "failed to initialize CPU H/W queues %d\n", in gaudi_hw_init()
4005 &hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs; in gaudi_hw_fini()
4007 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_hw_fini()
4011 dev_err(hdev->dev, "GAUDI doesn't support soft-reset\n"); in gaudi_hw_fini()
4015 if (hdev->pldm) { in gaudi_hw_fini()
4024 dev_dbg(hdev->dev, in gaudi_hw_fini()
4031 driver_performs_reset = !!(!hdev->asic_prop.fw_security_enabled && in gaudi_hw_fini()
4032 !hdev->asic_prop.hard_reset_done_by_fw); in gaudi_hw_fini()
4042 * via the GIC. Otherwise, we need to use COMMS or the MSG_TO_CPU in gaudi_hw_fini()
4045 if (hdev->fw_loader.fw_comp_loaded & FW_TYPE_LINUX) { in gaudi_hw_fini()
4046 irq_handler_offset = hdev->asic_prop.gic_interrupts_enable ? in gaudi_hw_fini()
4048 le32_to_cpu(dyn_regs->gic_host_halt_irq); in gaudi_hw_fini()
4053 /* This is a hail-mary attempt to revive the card in the small chance that the in gaudi_hw_fini()
4064 if (hdev->reset_info.curr_reset_cause == HL_RESET_CAUSE_HEARTBEAT) { in gaudi_hw_fini()
4065 if (hdev->asic_prop.hard_reset_done_by_fw) in gaudi_hw_fini()
4071 if (hdev->asic_prop.hard_reset_done_by_fw) in gaudi_hw_fini()
4107 /* Tell ASIC not to re-initialize PCIe */ in gaudi_hw_fini()
4110 /* Restart BTL/BLR upon hard-reset */ in gaudi_hw_fini()
4116 dev_dbg(hdev->dev, in gaudi_hw_fini()
4120 dev_dbg(hdev->dev, in gaudi_hw_fini()
4134 dev_err(hdev->dev, "Timeout while waiting for device to reset 0x%x\n", status); in gaudi_hw_fini()
4135 return -ETIMEDOUT; in gaudi_hw_fini()
4139 gaudi->hw_cap_initialized &= ~(HW_CAP_CPU | HW_CAP_CPU_Q | HW_CAP_HBM | in gaudi_hw_fini()
4145 memset(gaudi->events_stat, 0, sizeof(gaudi->events_stat)); in gaudi_hw_fini()
4147 hdev->device_cpu_is_halted = false; in gaudi_hw_fini()
4158 dev_err(hdev->dev, "Failed to disable PCI access from CPU\n"); in gaudi_suspend()
4176 rc = dma_mmap_coherent(hdev->dev, vma, cpu_addr, in gaudi_mmap()
4177 (dma_addr - HOST_PHYS_BASE), size); in gaudi_mmap()
4179 dev_err(hdev->dev, "dma_mmap_coherent error %d", rc); in gaudi_mmap()
4187 &hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs; in gaudi_ring_doorbell()
4189 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_ring_doorbell()
4211 q_off = dma_qm_offset + ((hw_queue_id - 1) & 0x3) * 4; in gaudi_ring_doorbell()
4218 q_off = dma_qm_offset + ((hw_queue_id - 1) & 0x3) * 4; in gaudi_ring_doorbell()
4225 q_off = dma_qm_offset + ((hw_queue_id - 1) & 0x3) * 4; in gaudi_ring_doorbell()
4232 q_off = dma_qm_offset + ((hw_queue_id - 1) & 0x3) * 4; in gaudi_ring_doorbell()
4239 q_off = dma_qm_offset + ((hw_queue_id - 1) & 0x3) * 4; in gaudi_ring_doorbell()
4246 q_off = dma_qm_offset + ((hw_queue_id - 1) & 0x3) * 4; in gaudi_ring_doorbell()
4251 if (gaudi->hw_cap_initialized & HW_CAP_CPU_Q) in gaudi_ring_doorbell()
4418 if (!(gaudi->hw_cap_initialized & HW_CAP_NIC0)) in gaudi_ring_doorbell()
4421 q_off = ((hw_queue_id - 1) & 0x3) * 4; in gaudi_ring_doorbell()
4426 if (!(gaudi->hw_cap_initialized & HW_CAP_NIC1)) in gaudi_ring_doorbell()
4429 q_off = ((hw_queue_id - 1) & 0x3) * 4; in gaudi_ring_doorbell()
4434 if (!(gaudi->hw_cap_initialized & HW_CAP_NIC2)) in gaudi_ring_doorbell()
4437 q_off = ((hw_queue_id - 1) & 0x3) * 4; in gaudi_ring_doorbell()
4442 if (!(gaudi->hw_cap_initialized & HW_CAP_NIC3)) in gaudi_ring_doorbell()
4445 q_off = ((hw_queue_id - 1) & 0x3) * 4; in gaudi_ring_doorbell()
4450 if (!(gaudi->hw_cap_initialized & HW_CAP_NIC4)) in gaudi_ring_doorbell()
4453 q_off = ((hw_queue_id - 1) & 0x3) * 4; in gaudi_ring_doorbell()
4458 if (!(gaudi->hw_cap_initialized & HW_CAP_NIC5)) in gaudi_ring_doorbell()
4461 q_off = ((hw_queue_id - 1) & 0x3) * 4; in gaudi_ring_doorbell()
4466 if (!(gaudi->hw_cap_initialized & HW_CAP_NIC6)) in gaudi_ring_doorbell()
4469 q_off = ((hw_queue_id - 1) & 0x3) * 4; in gaudi_ring_doorbell()
4474 if (!(gaudi->hw_cap_initialized & HW_CAP_NIC7)) in gaudi_ring_doorbell()
4477 q_off = ((hw_queue_id - 1) & 0x3) * 4; in gaudi_ring_doorbell()
4482 if (!(gaudi->hw_cap_initialized & HW_CAP_NIC8)) in gaudi_ring_doorbell()
4485 q_off = ((hw_queue_id - 1) & 0x3) * 4; in gaudi_ring_doorbell()
4490 if (!(gaudi->hw_cap_initialized & HW_CAP_NIC9)) in gaudi_ring_doorbell()
4493 q_off = ((hw_queue_id - 1) & 0x3) * 4; in gaudi_ring_doorbell()
4503 dev_err(hdev->dev, "h/w queue %d is invalid. Can't set pi\n", in gaudi_ring_doorbell()
4517 irq_handler_offset = hdev->asic_prop.gic_interrupts_enable ? in gaudi_ring_doorbell()
4519 le32_to_cpu(dyn_regs->gic_host_pi_upd_irq); in gaudi_ring_doorbell()
4539 void *kernel_addr = dma_alloc_coherent(&hdev->pdev->dev, size, in gaudi_dma_alloc_coherent()
4553 dma_addr_t fixed_dma_handle = dma_handle - HOST_PHYS_BASE; in gaudi_dma_free_coherent()
4555 dma_free_coherent(&hdev->pdev->dev, size, cpu_addr, fixed_dma_handle); in gaudi_dma_free_coherent()
4560 struct asic_fixed_properties *prop = &hdev->asic_prop; in gaudi_scrub_device_dram()
4561 u64 cur_addr = prop->dram_user_base_address; in gaudi_scrub_device_dram()
4565 while (cur_addr < prop->dram_end_address) { in gaudi_scrub_device_dram()
4570 min((u64)SZ_2G, prop->dram_end_address - cur_addr); in gaudi_scrub_device_dram()
4572 dev_dbg(hdev->dev, in gaudi_scrub_device_dram()
4573 "Doing HBM scrubbing for 0x%09llx - 0x%09llx\n", in gaudi_scrub_device_dram()
4592 if (cur_addr == prop->dram_end_address) in gaudi_scrub_device_dram()
4608 dev_err(hdev->dev, in gaudi_scrub_device_dram()
4609 "DMA Timeout during HBM scrubbing of DMA #%d\n", in gaudi_scrub_device_dram()
4611 return -EIO; in gaudi_scrub_device_dram()
4621 struct asic_fixed_properties *prop = &hdev->asic_prop; in gaudi_scrub_device_mem()
4622 u64 wait_to_idle_time = hdev->pdev ? HBM_SCRUBBING_TIMEOUT_US : in gaudi_scrub_device_mem()
4624 u64 addr, size, val = hdev->memory_scrub_val; in gaudi_scrub_device_mem()
4625 ktime_t timeout; in gaudi_scrub_device_mem() local
4628 if (!hdev->memory_scrub) in gaudi_scrub_device_mem()
4631 timeout = ktime_add_us(ktime_get(), wait_to_idle_time); in gaudi_scrub_device_mem()
4632 while (!hdev->asic_funcs->is_device_idle(hdev, NULL, 0, NULL)) { in gaudi_scrub_device_mem()
4633 if (ktime_compare(ktime_get(), timeout) > 0) { in gaudi_scrub_device_mem()
4634 dev_err(hdev->dev, "waiting for idle timeout\n"); in gaudi_scrub_device_mem()
4635 return -ETIMEDOUT; in gaudi_scrub_device_mem()
4641 addr = prop->sram_user_base_address; in gaudi_scrub_device_mem()
4642 size = hdev->pldm ? 0x10000 : prop->sram_size - SRAM_USER_BASE_OFFSET; in gaudi_scrub_device_mem()
4644 dev_dbg(hdev->dev, "Scrubbing SRAM: 0x%09llx - 0x%09llx val: 0x%llx\n", in gaudi_scrub_device_mem()
4648 dev_err(hdev->dev, "Failed to clear SRAM (%d)\n", rc); in gaudi_scrub_device_mem()
4655 dev_err(hdev->dev, "Failed to clear HBM (%d)\n", rc); in gaudi_scrub_device_mem()
4666 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_get_int_queue_base()
4671 dev_err(hdev->dev, "Got invalid queue id %d\n", queue_id); in gaudi_get_int_queue_base()
4675 q = &gaudi->internal_qmans[queue_id]; in gaudi_get_int_queue_base()
4676 *dma_handle = q->pq_dma_addr; in gaudi_get_int_queue_base()
4677 *queue_len = q->pq_size / QMAN_PQ_ENTRY_SIZE; in gaudi_get_int_queue_base()
4679 return q->pq_kernel_addr; in gaudi_get_int_queue_base()
4683 u16 len, u32 timeout, u64 *result) in gaudi_send_cpu_message() argument
4685 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_send_cpu_message()
4687 if (!(gaudi->hw_cap_initialized & HW_CAP_CPU_Q)) { in gaudi_send_cpu_message()
4693 if (!timeout) in gaudi_send_cpu_message()
4694 timeout = GAUDI_MSG_TO_CPU_TIMEOUT_USEC; in gaudi_send_cpu_message()
4697 timeout, result); in gaudi_send_cpu_message()
4709 if (hdev->pldm) in gaudi_test_queue()
4718 dev_err(hdev->dev, in gaudi_test_queue()
4721 return -ENOMEM; in gaudi_test_queue()
4729 dev_err(hdev->dev, in gaudi_test_queue()
4732 rc = -ENOMEM; in gaudi_test_queue()
4740 fence_pkt->ctl = cpu_to_le32(tmp); in gaudi_test_queue()
4741 fence_pkt->value = cpu_to_le32(fence_val); in gaudi_test_queue()
4742 fence_pkt->addr = cpu_to_le64(fence_dma_addr); in gaudi_test_queue()
4748 dev_err(hdev->dev, in gaudi_test_queue()
4759 if (rc == -ETIMEDOUT) { in gaudi_test_queue()
4760 dev_err(hdev->dev, in gaudi_test_queue()
4763 rc = -EIO; in gaudi_test_queue()
4775 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_test_cpu_queue()
4781 if (!(gaudi->hw_cap_initialized & HW_CAP_CPU_Q)) in gaudi_test_cpu_queue()
4791 for (i = 0 ; i < hdev->asic_prop.max_queues ; i++) { in gaudi_test_queues()
4792 if (hdev->asic_prop.hw_queues_props[i].type == QUEUE_TYPE_EXT) { in gaudi_test_queues()
4795 ret_val = -EINVAL; in gaudi_test_queues()
4801 ret_val = -EINVAL; in gaudi_test_queues()
4814 kernel_addr = dma_pool_zalloc(hdev->dma_pool, mem_flags, dma_handle); in gaudi_dma_pool_zalloc()
4827 dma_addr_t fixed_dma_addr = dma_addr - HOST_PHYS_BASE; in gaudi_dma_pool_free()
4829 dma_pool_free(hdev->dma_pool, vaddr, fixed_dma_addr); in gaudi_dma_pool_free()
4860 while ((count + 1) < sgt->nents) { in gaudi_get_dma_desc_list_size()
4892 if (hl_userptr_is_pinned(hdev, addr, le32_to_cpu(user_dma_pkt->tsize), in gaudi_pin_memory_before_cs()
4893 parser->job_userptr_list, &userptr)) in gaudi_pin_memory_before_cs()
4898 return -ENOMEM; in gaudi_pin_memory_before_cs()
4900 rc = hl_pin_host_memory(hdev, addr, le32_to_cpu(user_dma_pkt->tsize), in gaudi_pin_memory_before_cs()
4905 list_add_tail(&userptr->job_node, parser->job_userptr_list); in gaudi_pin_memory_before_cs()
4907 rc = hdev->asic_funcs->asic_dma_map_sgtable(hdev, userptr->sgt, dir); in gaudi_pin_memory_before_cs()
4909 dev_err(hdev->dev, "failed to map sgt with DMA region\n"); in gaudi_pin_memory_before_cs()
4913 userptr->dma_mapped = true; in gaudi_pin_memory_before_cs()
4914 userptr->dir = dir; in gaudi_pin_memory_before_cs()
4917 parser->patched_cb_size += in gaudi_pin_memory_before_cs()
4918 gaudi_get_dma_desc_list_size(hdev, userptr->sgt); in gaudi_pin_memory_before_cs()
4923 list_del(&userptr->job_node); in gaudi_pin_memory_before_cs()
4940 user_memset = (le32_to_cpu(user_dma_pkt->ctl) & in gaudi_validate_dma_pkt_host()
4948 dev_dbg(hdev->dev, "DMA direction is HOST --> DEVICE\n"); in gaudi_validate_dma_pkt_host()
4950 addr = le64_to_cpu(user_dma_pkt->src_addr); in gaudi_validate_dma_pkt_host()
4952 dev_dbg(hdev->dev, "DMA direction is DEVICE --> HOST\n"); in gaudi_validate_dma_pkt_host()
4954 addr = (le64_to_cpu(user_dma_pkt->dst_addr) & in gaudi_validate_dma_pkt_host()
4960 parser->patched_cb_size += sizeof(*user_dma_pkt); in gaudi_validate_dma_pkt_host()
4973 u64 dst_addr = (le64_to_cpu(user_dma_pkt->dst_addr) & in gaudi_validate_dma_pkt_no_mmu()
4977 dev_dbg(hdev->dev, "DMA packet details:\n"); in gaudi_validate_dma_pkt_no_mmu()
4978 dev_dbg(hdev->dev, "source == 0x%llx\n", in gaudi_validate_dma_pkt_no_mmu()
4979 le64_to_cpu(user_dma_pkt->src_addr)); in gaudi_validate_dma_pkt_no_mmu()
4980 dev_dbg(hdev->dev, "destination == 0x%llx\n", dst_addr); in gaudi_validate_dma_pkt_no_mmu()
4981 dev_dbg(hdev->dev, "size == %u\n", le32_to_cpu(user_dma_pkt->tsize)); in gaudi_validate_dma_pkt_no_mmu()
4988 if (!le32_to_cpu(user_dma_pkt->tsize)) { in gaudi_validate_dma_pkt_no_mmu()
4989 parser->patched_cb_size += sizeof(*user_dma_pkt); in gaudi_validate_dma_pkt_no_mmu()
4993 if (parser->hw_queue_id <= GAUDI_QUEUE_ID_DMA_0_3) in gaudi_validate_dma_pkt_no_mmu()
5006 cfg = le32_to_cpu(user_pkt->cfg); in gaudi_validate_load_and_exe_pkt()
5009 dev_err(hdev->dev, in gaudi_validate_load_and_exe_pkt()
5011 return -EPERM; in gaudi_validate_load_and_exe_pkt()
5014 parser->patched_cb_size += sizeof(struct packet_load_and_exe); in gaudi_validate_load_and_exe_pkt()
5025 parser->patched_cb_size = 0; in gaudi_validate_cb()
5028 while (cb_parsed_length < parser->user_cb_size) { in gaudi_validate_cb()
5033 user_pkt = parser->user_cb->kernel_address + cb_parsed_length; in gaudi_validate_cb()
5036 (le64_to_cpu(user_pkt->header) & in gaudi_validate_cb()
5041 dev_err(hdev->dev, "Invalid packet id %u\n", pkt_id); in gaudi_validate_cb()
5042 rc = -EINVAL; in gaudi_validate_cb()
5048 if (cb_parsed_length > parser->user_cb_size) { in gaudi_validate_cb()
5049 dev_err(hdev->dev, in gaudi_validate_cb()
5051 rc = -EINVAL; in gaudi_validate_cb()
5057 dev_err(hdev->dev, in gaudi_validate_cb()
5059 rc = -EPERM; in gaudi_validate_cb()
5063 dev_err(hdev->dev, "User not allowed to use CP_DMA\n"); in gaudi_validate_cb()
5064 rc = -EPERM; in gaudi_validate_cb()
5068 dev_err(hdev->dev, "User not allowed to use STOP\n"); in gaudi_validate_cb()
5069 rc = -EPERM; in gaudi_validate_cb()
5073 dev_err(hdev->dev, in gaudi_validate_cb()
5075 rc = -EPERM; in gaudi_validate_cb()
5084 parser->contains_dma_pkt = true; in gaudi_validate_cb()
5086 parser->patched_cb_size += pkt_size; in gaudi_validate_cb()
5099 parser->patched_cb_size += pkt_size; in gaudi_validate_cb()
5103 dev_err(hdev->dev, "Invalid packet header 0x%x\n", in gaudi_validate_cb()
5105 rc = -EINVAL; in gaudi_validate_cb()
5119 if (parser->completion) in gaudi_validate_cb()
5120 parser->patched_cb_size += gaudi_get_patched_cb_extra_size( in gaudi_validate_cb()
5121 parser->patched_cb_size); in gaudi_validate_cb()
5144 ctl = le32_to_cpu(user_dma_pkt->ctl); in gaudi_patch_dma_packet()
5146 if (parser->hw_queue_id <= GAUDI_QUEUE_ID_DMA_0_3) in gaudi_patch_dma_packet()
5153 addr = le64_to_cpu(user_dma_pkt->src_addr); in gaudi_patch_dma_packet()
5154 device_memory_addr = le64_to_cpu(user_dma_pkt->dst_addr); in gaudi_patch_dma_packet()
5159 addr = le64_to_cpu(user_dma_pkt->dst_addr); in gaudi_patch_dma_packet()
5160 device_memory_addr = le64_to_cpu(user_dma_pkt->src_addr); in gaudi_patch_dma_packet()
5166 le32_to_cpu(user_dma_pkt->tsize), in gaudi_patch_dma_packet()
5167 parser->job_userptr_list, &userptr))) { in gaudi_patch_dma_packet()
5168 dev_err(hdev->dev, "Userptr 0x%llx + 0x%x NOT mapped\n", in gaudi_patch_dma_packet()
5169 addr, user_dma_pkt->tsize); in gaudi_patch_dma_packet()
5170 return -EFAULT; in gaudi_patch_dma_packet()
5181 sgt = userptr->sgt; in gaudi_patch_dma_packet()
5191 while ((count + 1) < sgt->nents) { in gaudi_patch_dma_packet()
5209 ctl = le32_to_cpu(user_dma_pkt->ctl); in gaudi_patch_dma_packet()
5213 new_dma_pkt->ctl = cpu_to_le32(ctl); in gaudi_patch_dma_packet()
5214 new_dma_pkt->tsize = cpu_to_le32(len); in gaudi_patch_dma_packet()
5217 new_dma_pkt->src_addr = cpu_to_le64(dma_addr); in gaudi_patch_dma_packet()
5218 new_dma_pkt->dst_addr = cpu_to_le64(device_memory_addr); in gaudi_patch_dma_packet()
5220 new_dma_pkt->src_addr = cpu_to_le64(device_memory_addr); in gaudi_patch_dma_packet()
5221 new_dma_pkt->dst_addr = cpu_to_le64(dma_addr); in gaudi_patch_dma_packet()
5231 dev_err(hdev->dev, in gaudi_patch_dma_packet()
5233 return -EFAULT; in gaudi_patch_dma_packet()
5236 /* Fix the last dma packet - wrcomp must be as user set it */ in gaudi_patch_dma_packet()
5237 new_dma_pkt--; in gaudi_patch_dma_packet()
5238 new_dma_pkt->ctl |= cpu_to_le32(user_wrcomp_en_mask); in gaudi_patch_dma_packet()
5253 while (cb_parsed_length < parser->user_cb_size) { in gaudi_patch_cb()
5259 user_pkt = parser->user_cb->kernel_address + cb_parsed_length; in gaudi_patch_cb()
5260 kernel_pkt = parser->patched_cb->kernel_address + in gaudi_patch_cb()
5264 (le64_to_cpu(user_pkt->header) & in gaudi_patch_cb()
5269 dev_err(hdev->dev, "Invalid packet id %u\n", pkt_id); in gaudi_patch_cb()
5270 rc = -EINVAL; in gaudi_patch_cb()
5276 if (cb_parsed_length > parser->user_cb_size) { in gaudi_patch_cb()
5277 dev_err(hdev->dev, in gaudi_patch_cb()
5279 rc = -EINVAL; in gaudi_patch_cb()
5293 dev_err(hdev->dev, in gaudi_patch_cb()
5295 rc = -EPERM; in gaudi_patch_cb()
5299 dev_err(hdev->dev, "User not allowed to use CP_DMA\n"); in gaudi_patch_cb()
5300 rc = -EPERM; in gaudi_patch_cb()
5304 dev_err(hdev->dev, "User not allowed to use STOP\n"); in gaudi_patch_cb()
5305 rc = -EPERM; in gaudi_patch_cb()
5322 dev_err(hdev->dev, "Invalid packet header 0x%x\n", in gaudi_patch_cb()
5324 rc = -EINVAL; in gaudi_patch_cb()
5349 if (parser->completion) in gaudi_parse_cb_mmu()
5350 parser->patched_cb_size = parser->user_cb_size + in gaudi_parse_cb_mmu()
5351 gaudi_get_patched_cb_extra_size(parser->user_cb_size); in gaudi_parse_cb_mmu()
5353 parser->patched_cb_size = parser->user_cb_size; in gaudi_parse_cb_mmu()
5355 rc = hl_cb_create(hdev, &hdev->kernel_mem_mgr, hdev->kernel_ctx, in gaudi_parse_cb_mmu()
5356 parser->patched_cb_size, false, false, in gaudi_parse_cb_mmu()
5360 dev_err(hdev->dev, in gaudi_parse_cb_mmu()
5366 parser->patched_cb = hl_cb_get(&hdev->kernel_mem_mgr, handle); in gaudi_parse_cb_mmu()
5368 if (!parser->patched_cb) { in gaudi_parse_cb_mmu()
5369 dev_crit(hdev->dev, "DMA CB handle invalid 0x%llx\n", handle); in gaudi_parse_cb_mmu()
5370 rc = -EFAULT; in gaudi_parse_cb_mmu()
5376 * "parser->user_cb_size <= parser->user_cb->size" was done in get_cb_from_cs_chunk() in gaudi_parse_cb_mmu()
5384 memcpy(parser->patched_cb->kernel_address, in gaudi_parse_cb_mmu()
5385 parser->user_cb->kernel_address, in gaudi_parse_cb_mmu()
5386 parser->user_cb_size); in gaudi_parse_cb_mmu()
5388 patched_cb_size = parser->patched_cb_size; in gaudi_parse_cb_mmu()
5391 user_cb = parser->user_cb; in gaudi_parse_cb_mmu()
5392 parser->user_cb = parser->patched_cb; in gaudi_parse_cb_mmu()
5394 parser->user_cb = user_cb; in gaudi_parse_cb_mmu()
5397 hl_cb_put(parser->patched_cb); in gaudi_parse_cb_mmu()
5401 if (patched_cb_size != parser->patched_cb_size) { in gaudi_parse_cb_mmu()
5402 dev_err(hdev->dev, "user CB size mismatch\n"); in gaudi_parse_cb_mmu()
5403 hl_cb_put(parser->patched_cb); in gaudi_parse_cb_mmu()
5404 rc = -EINVAL; in gaudi_parse_cb_mmu()
5415 hl_cb_destroy(&hdev->kernel_mem_mgr, handle); in gaudi_parse_cb_mmu()
5431 rc = hl_cb_create(hdev, &hdev->kernel_mem_mgr, hdev->kernel_ctx, in gaudi_parse_cb_no_mmu()
5432 parser->patched_cb_size, false, false, in gaudi_parse_cb_no_mmu()
5435 dev_err(hdev->dev, in gaudi_parse_cb_no_mmu()
5440 parser->patched_cb = hl_cb_get(&hdev->kernel_mem_mgr, handle); in gaudi_parse_cb_no_mmu()
5442 if (!parser->patched_cb) { in gaudi_parse_cb_no_mmu()
5443 dev_crit(hdev->dev, "DMA CB handle invalid 0x%llx\n", handle); in gaudi_parse_cb_no_mmu()
5444 rc = -EFAULT; in gaudi_parse_cb_no_mmu()
5451 hl_cb_put(parser->patched_cb); in gaudi_parse_cb_no_mmu()
5460 hl_cb_destroy(&hdev->kernel_mem_mgr, handle); in gaudi_parse_cb_no_mmu()
5464 hl_userptr_delete_list(hdev, parser->job_userptr_list); in gaudi_parse_cb_no_mmu()
5471 struct asic_fixed_properties *asic_prop = &hdev->asic_prop; in gaudi_parse_cb_no_ext_queue()
5472 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_parse_cb_no_ext_queue()
5475 if ((parser->hw_queue_id >= GAUDI_QUEUE_ID_NIC_0_0) && in gaudi_parse_cb_no_ext_queue()
5476 (parser->hw_queue_id <= GAUDI_QUEUE_ID_NIC_9_3)) { in gaudi_parse_cb_no_ext_queue()
5477 nic_queue_offset = parser->hw_queue_id - GAUDI_QUEUE_ID_NIC_0_0; in gaudi_parse_cb_no_ext_queue()
5480 if (!(gaudi->hw_cap_initialized & nic_mask_q_id)) { in gaudi_parse_cb_no_ext_queue()
5481 dev_err(hdev->dev, "h/w queue %d is disabled\n", parser->hw_queue_id); in gaudi_parse_cb_no_ext_queue()
5482 return -EINVAL; in gaudi_parse_cb_no_ext_queue()
5487 if (hl_mem_area_inside_range((u64) (uintptr_t) parser->user_cb, in gaudi_parse_cb_no_ext_queue()
5488 parser->user_cb_size, in gaudi_parse_cb_no_ext_queue()
5489 asic_prop->sram_user_base_address, in gaudi_parse_cb_no_ext_queue()
5490 asic_prop->sram_end_address)) in gaudi_parse_cb_no_ext_queue()
5493 if (hl_mem_area_inside_range((u64) (uintptr_t) parser->user_cb, in gaudi_parse_cb_no_ext_queue()
5494 parser->user_cb_size, in gaudi_parse_cb_no_ext_queue()
5495 asic_prop->dram_user_base_address, in gaudi_parse_cb_no_ext_queue()
5496 asic_prop->dram_end_address)) in gaudi_parse_cb_no_ext_queue()
5500 if (hl_mem_area_inside_range((u64) (uintptr_t) parser->user_cb, in gaudi_parse_cb_no_ext_queue()
5501 parser->user_cb_size, in gaudi_parse_cb_no_ext_queue()
5502 asic_prop->pmmu.start_addr, in gaudi_parse_cb_no_ext_queue()
5503 asic_prop->pmmu.end_addr)) in gaudi_parse_cb_no_ext_queue()
5506 dev_err(hdev->dev, in gaudi_parse_cb_no_ext_queue()
5508 parser->user_cb, parser->user_cb_size); in gaudi_parse_cb_no_ext_queue()
5510 return -EFAULT; in gaudi_parse_cb_no_ext_queue()
5515 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_cs_parser()
5517 if (parser->queue_type == QUEUE_TYPE_INT) in gaudi_cs_parser()
5520 if (gaudi->hw_cap_initialized & HW_CAP_MMU) in gaudi_cs_parser()
5536 cq_pkt = kernel_address + len - (sizeof(struct packet_msg_prot) * 2); in gaudi_add_end_of_cb_packets()
5539 cq_padding->ctl = cpu_to_le32(FIELD_PREP(GAUDI_PKT_CTL_OPCODE_MASK, PACKET_NOP)); in gaudi_add_end_of_cb_packets()
5549 cq_pkt->ctl = cpu_to_le32(tmp); in gaudi_add_end_of_cb_packets()
5550 cq_pkt->value = cpu_to_le32(cq_val); in gaudi_add_end_of_cb_packets()
5551 cq_pkt->addr = cpu_to_le64(cq_addr); in gaudi_add_end_of_cb_packets()
5557 cq_pkt->ctl = cpu_to_le32(tmp); in gaudi_add_end_of_cb_packets()
5558 cq_pkt->value = cpu_to_le32(1); in gaudi_add_end_of_cb_packets()
5559 msi_addr = hdev->pdev ? mmPCIE_CORE_MSI_REQ : mmPCIE_MSI_INTR_0 + msi_vec * 4; in gaudi_add_end_of_cb_packets()
5560 cq_pkt->addr = cpu_to_le64(CFG_BASE + msi_addr); in gaudi_add_end_of_cb_packets()
5579 return -EFAULT; in gaudi_memset_device_memory()
5581 lin_dma_pkt = cb->kernel_address; in gaudi_memset_device_memory()
5591 lin_dma_pkt->ctl = cpu_to_le32(ctl); in gaudi_memset_device_memory()
5592 lin_dma_pkt->src_addr = cpu_to_le64(val); in gaudi_memset_device_memory()
5593 lin_dma_pkt->dst_addr |= cpu_to_le64(addr); in gaudi_memset_device_memory()
5594 lin_dma_pkt->tsize = cpu_to_le32(size); in gaudi_memset_device_memory()
5598 dev_err(hdev->dev, "Failed to allocate a new job\n"); in gaudi_memset_device_memory()
5599 rc = -ENOMEM; in gaudi_memset_device_memory()
5605 if (err_cause && !hdev->init_done) { in gaudi_memset_device_memory()
5606 dev_dbg(hdev->dev, in gaudi_memset_device_memory()
5612 job->id = 0; in gaudi_memset_device_memory()
5613 job->user_cb = cb; in gaudi_memset_device_memory()
5614 atomic_inc(&job->user_cb->cs_cnt); in gaudi_memset_device_memory()
5615 job->user_cb_size = cb_size; in gaudi_memset_device_memory()
5616 job->hw_queue_id = GAUDI_QUEUE_ID_DMA_0_0; in gaudi_memset_device_memory()
5617 job->patched_cb = job->user_cb; in gaudi_memset_device_memory()
5618 job->job_cb_size = job->user_cb_size + sizeof(struct packet_msg_prot); in gaudi_memset_device_memory()
5625 atomic_dec(&cb->cs_cnt); in gaudi_memset_device_memory()
5630 dev_err(hdev->dev, "DMA Failed, cause 0x%x\n", err_cause); in gaudi_memset_device_memory()
5631 rc = -EIO; in gaudi_memset_device_memory()
5632 if (!hdev->init_done) { in gaudi_memset_device_memory()
5633 dev_dbg(hdev->dev, in gaudi_memset_device_memory()
5642 hl_cb_destroy(&hdev->kernel_mem_mgr, cb->buf->handle); in gaudi_memset_device_memory()
5659 dev_err(hdev->dev, "CB size must be smaller than %uMB", SZ_2M); in gaudi_memset_registers()
5660 return -ENOMEM; in gaudi_memset_registers()
5665 return -EFAULT; in gaudi_memset_registers()
5667 pkt = cb->kernel_address; in gaudi_memset_registers()
5676 pkt->ctl = cpu_to_le32(ctl); in gaudi_memset_registers()
5677 pkt->value = cpu_to_le32(val); in gaudi_memset_registers()
5678 pkt->addr = cpu_to_le64(reg_base + (i * 4)); in gaudi_memset_registers()
5683 dev_err(hdev->dev, "Failed to allocate a new job\n"); in gaudi_memset_registers()
5684 rc = -ENOMEM; in gaudi_memset_registers()
5688 job->id = 0; in gaudi_memset_registers()
5689 job->user_cb = cb; in gaudi_memset_registers()
5690 atomic_inc(&job->user_cb->cs_cnt); in gaudi_memset_registers()
5691 job->user_cb_size = cb_size; in gaudi_memset_registers()
5692 job->hw_queue_id = GAUDI_QUEUE_ID_DMA_0_0; in gaudi_memset_registers()
5693 job->patched_cb = job->user_cb; in gaudi_memset_registers()
5694 job->job_cb_size = cb_size; in gaudi_memset_registers()
5701 atomic_dec(&cb->cs_cnt); in gaudi_memset_registers()
5705 hl_cb_destroy(&hdev->kernel_mem_mgr, cb->buf->handle); in gaudi_memset_registers()
5720 dev_err(hdev->dev, "failed resetting SM registers"); in gaudi_restore_sm_registers()
5721 return -ENOMEM; in gaudi_restore_sm_registers()
5728 dev_err(hdev->dev, "failed resetting SM registers"); in gaudi_restore_sm_registers()
5729 return -ENOMEM; in gaudi_restore_sm_registers()
5736 dev_err(hdev->dev, "failed resetting SM registers"); in gaudi_restore_sm_registers()
5737 return -ENOMEM; in gaudi_restore_sm_registers()
5744 dev_err(hdev->dev, "failed resetting SM registers"); in gaudi_restore_sm_registers()
5745 return -ENOMEM; in gaudi_restore_sm_registers()
5752 dev_err(hdev->dev, "failed resetting SM registers"); in gaudi_restore_sm_registers()
5753 return -ENOMEM; in gaudi_restore_sm_registers()
5760 dev_err(hdev->dev, "failed resetting SM registers"); in gaudi_restore_sm_registers()
5761 return -ENOMEM; in gaudi_restore_sm_registers()
5766 num_regs = NUM_OF_SOB_IN_BLOCK - GAUDI_FIRST_AVAILABLE_W_S_SYNC_OBJECT; in gaudi_restore_sm_registers()
5769 dev_err(hdev->dev, "failed resetting SM registers"); in gaudi_restore_sm_registers()
5770 return -ENOMEM; in gaudi_restore_sm_registers()
5775 num_regs = NUM_OF_MONITORS_IN_BLOCK - GAUDI_FIRST_AVAILABLE_W_S_MONITOR; in gaudi_restore_sm_registers()
5778 dev_err(hdev->dev, "failed resetting SM registers"); in gaudi_restore_sm_registers()
5779 return -ENOMEM; in gaudi_restore_sm_registers()
5787 u32 sob_delta = mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_1 - in gaudi_restore_dma_registers()
5803 /* For DMAs 2-7, need to restore WR_AWUSER_31_11 as it can be in gaudi_restore_dma_registers()
5823 qman_offset = i * (mmMME2_QM_BASE - mmMME0_QM_BASE); in gaudi_restore_qm_registers()
5860 u32 size = hdev->asic_prop.mmu_pgt_size + in gaudi_mmu_clear_pgt_range()
5861 hdev->asic_prop.mmu_cache_mng_size; in gaudi_mmu_clear_pgt_range()
5862 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_mmu_clear_pgt_range()
5863 u64 addr = hdev->asic_prop.mmu_pgt_addr; in gaudi_mmu_clear_pgt_range()
5865 if (!(gaudi->hw_cap_initialized & HW_CAP_MMU)) in gaudi_mmu_clear_pgt_range()
5902 dev_err(hdev->dev, in gaudi_dma_core_transfer()
5903 "DMA %d timed-out during reading of 0x%llx\n", in gaudi_dma_core_transfer()
5905 return -EIO; in gaudi_dma_core_transfer()
5911 dev_err(hdev->dev, "DMA Failed, cause 0x%x\n", err_cause); in gaudi_dma_core_transfer()
5912 dev_dbg(hdev->dev, in gaudi_dma_core_transfer()
5917 return -EIO; in gaudi_dma_core_transfer()
5937 return -ENOMEM; in gaudi_debugfs_read_dma()
5939 hdev->asic_funcs->hw_queues_lock(hdev); in gaudi_debugfs_read_dma()
5961 dev_err_ratelimited(hdev->dev, in gaudi_debugfs_read_dma()
5963 rc = -EAGAIN; in gaudi_debugfs_read_dma()
5981 dev_dbg(hdev->dev, in gaudi_debugfs_read_dma()
6008 size_left -= SZ_2M; in gaudi_debugfs_read_dma()
6021 hdev->asic_funcs->hw_queues_unlock(hdev); in gaudi_debugfs_read_dma()
6030 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_read_pte()
6032 if (hdev->reset_info.hard_reset_pending) in gaudi_read_pte()
6035 return readq(hdev->pcie_bar[HBM_BAR_ID] + in gaudi_read_pte()
6036 (addr - gaudi->hbm_bar_cur_addr)); in gaudi_read_pte()
6041 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_write_pte()
6043 if (hdev->reset_info.hard_reset_pending) in gaudi_write_pte()
6046 writeq(val, hdev->pcie_bar[HBM_BAR_ID] + in gaudi_write_pte()
6047 (addr - gaudi->hbm_bar_cur_addr)); in gaudi_write_pte()
6059 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_mmu_prepare()
6061 if (!(gaudi->hw_cap_initialized & HW_CAP_MMU)) in gaudi_mmu_prepare()
6065 dev_crit(hdev->dev, "asid %u is too big\n", asid); in gaudi_mmu_prepare()
6214 if (gaudi->hw_cap_initialized & HW_CAP_NIC0) { in gaudi_mmu_prepare()
6227 if (gaudi->hw_cap_initialized & HW_CAP_NIC1) { in gaudi_mmu_prepare()
6240 if (gaudi->hw_cap_initialized & HW_CAP_NIC2) { in gaudi_mmu_prepare()
6253 if (gaudi->hw_cap_initialized & HW_CAP_NIC3) { in gaudi_mmu_prepare()
6266 if (gaudi->hw_cap_initialized & HW_CAP_NIC4) { in gaudi_mmu_prepare()
6279 if (gaudi->hw_cap_initialized & HW_CAP_NIC5) { in gaudi_mmu_prepare()
6292 if (gaudi->hw_cap_initialized & HW_CAP_NIC6) { in gaudi_mmu_prepare()
6305 if (gaudi->hw_cap_initialized & HW_CAP_NIC7) { in gaudi_mmu_prepare()
6318 if (gaudi->hw_cap_initialized & HW_CAP_NIC8) { in gaudi_mmu_prepare()
6331 if (gaudi->hw_cap_initialized & HW_CAP_NIC9) { in gaudi_mmu_prepare()
6355 u32 tmp, timeout, dma_offset; in gaudi_send_job_on_qman0() local
6358 if (hdev->pldm) in gaudi_send_job_on_qman0()
6359 timeout = GAUDI_PLDM_QMAN0_TIMEOUT_USEC; in gaudi_send_job_on_qman0()
6361 timeout = HL_DEVICE_TIMEOUT_USEC; in gaudi_send_job_on_qman0()
6365 dev_err(hdev->dev, in gaudi_send_job_on_qman0()
6367 return -ENOMEM; in gaudi_send_job_on_qman0()
6370 cb = job->patched_cb; in gaudi_send_job_on_qman0()
6372 fence_pkt = cb->kernel_address + in gaudi_send_job_on_qman0()
6373 job->job_cb_size - sizeof(struct packet_msg_prot); in gaudi_send_job_on_qman0()
6379 fence_pkt->ctl = cpu_to_le32(tmp); in gaudi_send_job_on_qman0()
6380 fence_pkt->value = cpu_to_le32(GAUDI_QMAN0_FENCE_VAL); in gaudi_send_job_on_qman0()
6381 fence_pkt->addr = cpu_to_le64(fence_dma_addr); in gaudi_send_job_on_qman0()
6389 job->job_cb_size, cb->bus_address); in gaudi_send_job_on_qman0()
6391 dev_err(hdev->dev, "Failed to send CB on QMAN0, %d\n", rc); in gaudi_send_job_on_qman0()
6397 timeout, true); in gaudi_send_job_on_qman0()
6401 if (rc == -ETIMEDOUT) { in gaudi_send_job_on_qman0()
6402 dev_err(hdev->dev, "QMAN0 Job timeout (0x%x)\n", tmp); in gaudi_send_job_on_qman0()
6633 dev_err(hdev->dev, in gaudi_get_razwi_initiator_name()
6649 dev_err_ratelimited(hdev->dev, in gaudi_print_and_get_razwi_info()
6657 dev_err_ratelimited(hdev->dev, in gaudi_print_and_get_razwi_info()
6667 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_print_and_get_mmu_error_info()
6670 if (!(gaudi->hw_cap_initialized & HW_CAP_MMU)) in gaudi_print_and_get_mmu_error_info()
6679 dev_err_ratelimited(hdev->dev, "MMU page fault on va 0x%llx\n", *addr); in gaudi_print_and_get_mmu_error_info()
6691 dev_err_ratelimited(hdev->dev, "MMU access error on va 0x%llx\n", *addr); in gaudi_print_and_get_mmu_error_info()
6698 * +-------------------+------------------------------------------------------+
6701 * +-------------------+------------------------------------------------------+
6702 * | 0xF30 - 0xF3F |ECC single error indication (1 bit per memory wrapper)|
6707 * +-------------------+------------------------------------------------------+
6708 * | 0xF40 - 0xF4F |ECC double error indication (1 bit per memory wrapper)|
6713 * +-------------------+------------------------------------------------------+
6722 num_mem_regs = params->num_memories / 32 + in gaudi_extract_ecc_info()
6723 ((params->num_memories % 32) ? 1 : 0); in gaudi_extract_ecc_info()
6725 if (params->block_address >= CFG_BASE) in gaudi_extract_ecc_info()
6726 params->block_address -= CFG_BASE; in gaudi_extract_ecc_info()
6728 if (params->derr) in gaudi_extract_ecc_info()
6729 err_addr = params->block_address + GAUDI_ECC_DERR0_OFFSET; in gaudi_extract_ecc_info()
6731 err_addr = params->block_address + GAUDI_ECC_SERR0_OFFSET; in gaudi_extract_ecc_info()
6748 dev_err(hdev->dev, "ECC error information cannot be found\n"); in gaudi_extract_ecc_info()
6749 return -EINVAL; in gaudi_extract_ecc_info()
6752 WREG32(params->block_address + GAUDI_ECC_MEM_SEL_OFFSET, in gaudi_extract_ecc_info()
6756 RREG32(params->block_address + GAUDI_ECC_ADDRESS_OFFSET); in gaudi_extract_ecc_info()
6758 RREG32(params->block_address + GAUDI_ECC_SYNDROME_OFFSET); in gaudi_extract_ecc_info()
6761 reg = RREG32(params->block_address + GAUDI_ECC_MEM_INFO_CLR_OFFSET); in gaudi_extract_ecc_info()
6762 if (params->derr) in gaudi_extract_ecc_info()
6767 WREG32(params->block_address + GAUDI_ECC_MEM_INFO_CLR_OFFSET, reg); in gaudi_extract_ecc_info()
6773 * gaudi_queue_idx_dec - decrement queue index (pi/ci) and handle wrap
6782 u32 mask = q_len - 1; in gaudi_queue_idx_dec()
6785 * modular decrement is equivalent to adding (queue_size -1) in gaudi_queue_idx_dec()
6787 * range [0, queue_len - 1] in gaudi_queue_idx_dec()
6789 return (idx + q_len - 1) & mask; in gaudi_queue_idx_dec()
6793 * gaudi_handle_sw_config_stream_data - print SW config stream data
6806 cq_ptr_lo_off = mmTPC0_QM_CQ_PTR_LO_1 - mmTPC0_QM_CQ_PTR_LO_0; in gaudi_handle_sw_config_stream_data()
6808 cq_ptr_lo = qman_base + (mmTPC0_QM_CQ_PTR_LO_0 - mmTPC0_QM_BASE) + in gaudi_handle_sw_config_stream_data()
6811 (mmTPC0_QM_CQ_PTR_HI_0 - mmTPC0_QM_CQ_PTR_LO_0); in gaudi_handle_sw_config_stream_data()
6813 (mmTPC0_QM_CQ_TSIZE_0 - mmTPC0_QM_CQ_PTR_LO_0); in gaudi_handle_sw_config_stream_data()
6817 dev_info(hdev->dev, "stop on err: stream: %u, addr: %#llx, size: %u\n", in gaudi_handle_sw_config_stream_data()
6821 hdev->captured_err_info.undef_opcode.cq_addr = cq_ptr; in gaudi_handle_sw_config_stream_data()
6822 hdev->captured_err_info.undef_opcode.cq_size = size; in gaudi_handle_sw_config_stream_data()
6823 hdev->captured_err_info.undef_opcode.stream_id = stream; in gaudi_handle_sw_config_stream_data()
6828 * gaudi_handle_last_pqes_on_err - print last PQEs on error
6847 q = &hdev->kernel_queues[qid_base + stream]; in gaudi_handle_last_pqes_on_err()
6849 qm_ci_stream_off = mmTPC0_QM_PQ_CI_1 - mmTPC0_QM_PQ_CI_0; in gaudi_handle_last_pqes_on_err()
6850 pq_ci = qman_base + (mmTPC0_QM_PQ_CI_0 - mmTPC0_QM_BASE) + in gaudi_handle_last_pqes_on_err()
6853 queue_len = (q->queue_type == QUEUE_TYPE_INT) ? in gaudi_handle_last_pqes_on_err()
6854 q->int_queue_len : HL_QUEUE_LENGTH; in gaudi_handle_last_pqes_on_err()
6856 hdev->asic_funcs->hw_queues_lock(hdev); in gaudi_handle_last_pqes_on_err()
6863 /* we should start printing form ci -1 */ in gaudi_handle_last_pqes_on_err()
6871 bd = q->kernel_address; in gaudi_handle_last_pqes_on_err()
6874 len = le32_to_cpu(bd->len); in gaudi_handle_last_pqes_on_err()
6875 /* len 0 means uninitialized entry- break */ in gaudi_handle_last_pqes_on_err()
6879 addr[i] = le64_to_cpu(bd->ptr); in gaudi_handle_last_pqes_on_err()
6881 dev_info(hdev->dev, "stop on err PQE(stream %u): ci: %u, addr: %#llx, size: %u\n", in gaudi_handle_last_pqes_on_err()
6889 struct undefined_opcode_info *undef_opcode = &hdev->captured_err_info.undef_opcode; in gaudi_handle_last_pqes_on_err()
6890 u32 arr_idx = undef_opcode->cb_addr_streams_len; in gaudi_handle_last_pqes_on_err()
6893 undef_opcode->timestamp = ktime_get(); in gaudi_handle_last_pqes_on_err()
6894 undef_opcode->engine_id = gaudi_queue_id_to_engine_id[qid_base]; in gaudi_handle_last_pqes_on_err()
6897 memcpy(undef_opcode->cb_addr_streams[arr_idx], addr, sizeof(addr)); in gaudi_handle_last_pqes_on_err()
6898 undef_opcode->cb_addr_streams_len++; in gaudi_handle_last_pqes_on_err()
6901 hdev->asic_funcs->hw_queues_unlock(hdev); in gaudi_handle_last_pqes_on_err()
6905 * handle_qman_data_on_err - extract QMAN data on error
6928 /* handle Lower-CP */ in handle_qman_data_on_err()
6946 glbl_sts_addr = qman_base + (mmTPC0_QM_GLBL_STS1_0 - mmTPC0_QM_BASE); in gaudi_handle_qman_err_generic()
6947 arb_err_addr = qman_base + (mmTPC0_QM_ARB_ERR_CAUSE - mmTPC0_QM_BASE); in gaudi_handle_qman_err_generic()
6964 dev_err_ratelimited(hdev->dev, in gaudi_handle_qman_err_generic()
6973 hdev->captured_err_info.undef_opcode.write_enable) { in gaudi_handle_qman_err_generic()
6974 memset(&hdev->captured_err_info.undef_opcode, 0, in gaudi_handle_qman_err_generic()
6975 sizeof(hdev->captured_err_info.undef_opcode)); in gaudi_handle_qman_err_generic()
6977 hdev->captured_err_info.undef_opcode.write_enable = false; in gaudi_handle_qman_err_generic()
6982 if (!hdev->stop_on_err) in gaudi_handle_qman_err_generic()
6995 dev_err_ratelimited(hdev->dev, in gaudi_handle_qman_err_generic()
7006 u32 index = event_type - GAUDI_EVENT_DMA_IF_SEI_0; in gaudi_print_sm_sei_info()
7011 switch (sei_data->sei_cause) { in gaudi_print_sm_sei_info()
7013 dev_err_ratelimited(hdev->dev, in gaudi_print_sm_sei_info()
7016 le32_to_cpu(sei_data->sei_log)); in gaudi_print_sm_sei_info()
7019 dev_err_ratelimited(hdev->dev, in gaudi_print_sm_sei_info()
7020 "%s SEI Error: Unaligned 4B LBW access, monitor agent address low - %#x", in gaudi_print_sm_sei_info()
7022 le32_to_cpu(sei_data->sei_log)); in gaudi_print_sm_sei_info()
7025 dev_err_ratelimited(hdev->dev, in gaudi_print_sm_sei_info()
7028 le32_to_cpu(sei_data->sei_log)); in gaudi_print_sm_sei_info()
7031 dev_err_ratelimited(hdev->dev, "Unknown SM SEI cause %u", in gaudi_print_sm_sei_info()
7032 le32_to_cpu(sei_data->sei_log)); in gaudi_print_sm_sei_info()
7046 if (hdev->asic_prop.fw_security_enabled) { in gaudi_handle_ecc_event()
7057 index = event_type - GAUDI_EVENT_TPC0_SERR; in gaudi_handle_ecc_event()
7064 index = event_type - GAUDI_EVENT_TPC0_DERR; in gaudi_handle_ecc_event()
7075 index = (event_type - GAUDI_EVENT_MME0_ACC_SERR) / 4; in gaudi_handle_ecc_event()
7085 index = (event_type - GAUDI_EVENT_MME0_ACC_DERR) / 4; in gaudi_handle_ecc_event()
7095 index = (event_type - GAUDI_EVENT_MME0_SBAB_SERR) / 4; in gaudi_handle_ecc_event()
7106 index = (event_type - GAUDI_EVENT_MME0_SBAB_DERR) / 4; in gaudi_handle_ecc_event()
7119 ecc_address = le64_to_cpu(ecc_data->ecc_address); in gaudi_handle_ecc_event()
7120 ecc_syndrom = le64_to_cpu(ecc_data->ecc_syndrom); in gaudi_handle_ecc_event()
7121 memory_wrapper_idx = ecc_data->memory_wrapper_idx; in gaudi_handle_ecc_event()
7129 dev_err(hdev->dev, in gaudi_handle_ecc_event()
7143 index = event_type - GAUDI_EVENT_TPC0_QM; in gaudi_handle_qman_err()
7160 index = event_type - GAUDI_EVENT_DMA0_QM; in gaudi_handle_qman_err()
7242 dev_err_ratelimited(hdev->dev, "Received H/W interrupt %d [\"%s\"]\n", in gaudi_print_irq_info()
7271 struct hl_hw_queue *q = &hdev->kernel_queues[GAUDI_QUEUE_ID_CPU_PQ]; in gaudi_print_out_of_sync_info()
7273 dev_err(hdev->dev, "Out of sync with FW, FW: pi=%u, ci=%u, LKD: pi=%u, ci=%d\n", in gaudi_print_out_of_sync_info()
7274 le32_to_cpu(sync_err->pi), le32_to_cpu(sync_err->ci), q->pi, atomic_read(&q->ci)); in gaudi_print_out_of_sync_info()
7280 dev_err(hdev->dev, in gaudi_print_fw_alive_info()
7282 (fw_alive->severity == FW_ALIVE_SEVERITY_MINOR) ? "Minor" : "Critical", in gaudi_print_fw_alive_info()
7283 le32_to_cpu(fw_alive->process_id), in gaudi_print_fw_alive_info()
7284 le32_to_cpu(fw_alive->thread_id), in gaudi_print_fw_alive_info()
7285 le64_to_cpu(fw_alive->uptime_seconds)); in gaudi_print_fw_alive_info()
7293 u16 nic_id = event_type - GAUDI_EVENT_NIC_SEI_0; in gaudi_print_nic_axi_irq_info()
7295 switch (eq_nic_sei->axi_error_cause) { in gaudi_print_nic_axi_irq_info()
7318 dev_err(hdev->dev, "unknown NIC AXI cause %d\n", in gaudi_print_nic_axi_irq_info()
7319 eq_nic_sei->axi_error_cause); in gaudi_print_nic_axi_irq_info()
7325 eq_nic_sei->id); in gaudi_print_nic_axi_irq_info()
7326 dev_err_ratelimited(hdev->dev, "Received H/W interrupt %d [\"%s\"]\n", in gaudi_print_nic_axi_irq_info()
7332 /* GAUDI doesn't support any reset except hard-reset */ in gaudi_compute_reset_late_init()
7333 return -EPERM; in gaudi_compute_reset_late_init()
7342 if (hdev->asic_prop.fw_app_cpu_boot_dev_sts0 & in gaudi_hbm_read_interrupts()
7345 dev_err(hdev->dev, "No FW ECC data"); in gaudi_hbm_read_interrupts()
7350 le32_to_cpu(hbm_ecc_data->hbm_ecc_info)); in gaudi_hbm_read_interrupts()
7352 le32_to_cpu(hbm_ecc_data->hbm_ecc_info)); in gaudi_hbm_read_interrupts()
7354 le32_to_cpu(hbm_ecc_data->hbm_ecc_info)); in gaudi_hbm_read_interrupts()
7356 le32_to_cpu(hbm_ecc_data->hbm_ecc_info)); in gaudi_hbm_read_interrupts()
7358 le32_to_cpu(hbm_ecc_data->hbm_ecc_info)); in gaudi_hbm_read_interrupts()
7360 le32_to_cpu(hbm_ecc_data->hbm_ecc_info)); in gaudi_hbm_read_interrupts()
7362 le32_to_cpu(hbm_ecc_data->hbm_ecc_info)); in gaudi_hbm_read_interrupts()
7364 dev_err(hdev->dev, in gaudi_hbm_read_interrupts()
7367 dev_err(hdev->dev, in gaudi_hbm_read_interrupts()
7369 device, ch, hbm_ecc_data->first_addr, type, in gaudi_hbm_read_interrupts()
7370 hbm_ecc_data->sec_cont_cnt, hbm_ecc_data->sec_cnt, in gaudi_hbm_read_interrupts()
7371 hbm_ecc_data->dec_cnt); in gaudi_hbm_read_interrupts()
7375 if (hdev->asic_prop.fw_security_enabled) { in gaudi_hbm_read_interrupts()
7376 dev_info(hdev->dev, "Cannot access MC regs for ECC data while security is enabled\n"); in gaudi_hbm_read_interrupts()
7385 rc = -EIO; in gaudi_hbm_read_interrupts()
7386 dev_err(hdev->dev, in gaudi_hbm_read_interrupts()
7393 dev_err(hdev->dev, in gaudi_hbm_read_interrupts()
7405 rc = -EIO; in gaudi_hbm_read_interrupts()
7406 dev_err(hdev->dev, in gaudi_hbm_read_interrupts()
7413 dev_err(hdev->dev, in gaudi_hbm_read_interrupts()
7434 rc = -EIO; in gaudi_hbm_read_interrupts()
7435 dev_err(hdev->dev, in gaudi_hbm_read_interrupts()
7442 rc = -EIO; in gaudi_hbm_read_interrupts()
7443 dev_err(hdev->dev, in gaudi_hbm_read_interrupts()
7485 dev_err_ratelimited(hdev->dev, in gaudi_tpc_read_interrupts()
7489 /* If this is QM error, we need to soft-reset */ in gaudi_tpc_read_interrupts()
7502 return (tpc_dec_event_type - GAUDI_EVENT_TPC0_DEC) >> 1; in tpc_dec_event_to_tpc_id()
7507 return (tpc_dec_event_type - GAUDI_EVENT_TPC0_KRN_ERR) / 6; in tpc_krn_event_to_tpc_id()
7514 mutex_lock(&hdev->clk_throttling.lock); in gaudi_print_clk_change_info()
7518 hdev->clk_throttling.current_reason |= HL_CLK_THROTTLE_POWER; in gaudi_print_clk_change_info()
7519 hdev->clk_throttling.aggregated_reason |= HL_CLK_THROTTLE_POWER; in gaudi_print_clk_change_info()
7520 hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_POWER].start = ktime_get(); in gaudi_print_clk_change_info()
7521 hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_POWER].end = zero_time; in gaudi_print_clk_change_info()
7522 dev_info_ratelimited(hdev->dev, in gaudi_print_clk_change_info()
7527 hdev->clk_throttling.current_reason &= ~HL_CLK_THROTTLE_POWER; in gaudi_print_clk_change_info()
7528 hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_POWER].end = ktime_get(); in gaudi_print_clk_change_info()
7529 dev_info_ratelimited(hdev->dev, in gaudi_print_clk_change_info()
7534 hdev->clk_throttling.current_reason |= HL_CLK_THROTTLE_THERMAL; in gaudi_print_clk_change_info()
7535 hdev->clk_throttling.aggregated_reason |= HL_CLK_THROTTLE_THERMAL; in gaudi_print_clk_change_info()
7536 hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_THERMAL].start = ktime_get(); in gaudi_print_clk_change_info()
7537 hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_THERMAL].end = zero_time; in gaudi_print_clk_change_info()
7539 dev_info_ratelimited(hdev->dev, in gaudi_print_clk_change_info()
7544 hdev->clk_throttling.current_reason &= ~HL_CLK_THROTTLE_THERMAL; in gaudi_print_clk_change_info()
7545 hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_THERMAL].end = ktime_get(); in gaudi_print_clk_change_info()
7547 dev_info_ratelimited(hdev->dev, in gaudi_print_clk_change_info()
7552 dev_err(hdev->dev, "Received invalid clock change event %d\n", in gaudi_print_clk_change_info()
7557 mutex_unlock(&hdev->clk_throttling.lock); in gaudi_print_clk_change_info()
7562 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_handle_eqe()
7564 u64 data = le64_to_cpu(eq_entry->data[0]), event_mask = 0; in gaudi_handle_eqe()
7565 u32 ctl = le32_to_cpu(eq_entry->hdr.ctl); in gaudi_handle_eqe()
7574 dev_err(hdev->dev, "Event type %u exceeds maximum of %u", in gaudi_handle_eqe()
7575 event_type, GAUDI_EVENT_SIZE - 1); in gaudi_handle_eqe()
7579 gaudi->events_stat[event_type]++; in gaudi_handle_eqe()
7580 gaudi->events_stat_aggregate[event_type]++; in gaudi_handle_eqe()
7607 gaudi_handle_ecc_event(hdev, event_type, &eq_entry->ecc_data); in gaudi_handle_eqe()
7628 &eq_entry->hbm_ecc_data); in gaudi_handle_eqe()
7640 &eq_entry->hbm_ecc_data); in gaudi_handle_eqe()
7665 dev_err(hdev->dev, "reset required due to %s\n", in gaudi_handle_eqe()
7690 dev_err(hdev->dev, "reset required due to %s\n", in gaudi_handle_eqe()
7724 gaudi_handle_ecc_event(hdev, event_type, &eq_entry->ecc_data); in gaudi_handle_eqe()
7803 &eq_entry->sm_sei_data); in gaudi_handle_eqe()
7807 dev_err(hdev->dev, in gaudi_handle_eqe()
7821 cause = le64_to_cpu(eq_entry->data[0]) & 0xFF; in gaudi_handle_eqe()
7822 dev_err(hdev->dev, in gaudi_handle_eqe()
7835 gaudi_print_out_of_sync_info(hdev, &eq_entry->pkt_sync_err); in gaudi_handle_eqe()
7841 gaudi_print_fw_alive_info(hdev, &eq_entry->fw_alive); in gaudi_handle_eqe()
7849 dev_err(hdev->dev, "Received invalid H/W interrupt %d\n", in gaudi_handle_eqe()
7862 if (hdev->asic_prop.fw_security_enabled && !reset_direct) { in gaudi_handle_eqe()
7868 } else if (hdev->hard_reset_on_fw_events) { in gaudi_handle_eqe()
7891 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_get_events_stat()
7894 *size = (u32) sizeof(gaudi->events_stat_aggregate); in gaudi_get_events_stat()
7895 return gaudi->events_stat_aggregate; in gaudi_get_events_stat()
7898 *size = (u32) sizeof(gaudi->events_stat); in gaudi_get_events_stat()
7899 return gaudi->events_stat; in gaudi_get_events_stat()
7904 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_mmu_invalidate_cache()
7908 if (!(gaudi->hw_cap_initialized & HW_CAP_MMU) || in gaudi_mmu_invalidate_cache()
7909 hdev->reset_info.hard_reset_pending) in gaudi_mmu_invalidate_cache()
7912 if (hdev->pldm) in gaudi_mmu_invalidate_cache()
7919 WREG32(mmSTLB_CACHE_INV, gaudi->mmu_cache_inv_pi++); in gaudi_mmu_invalidate_cache()
7942 return hdev->asic_funcs->mmu_invalidate_cache(hdev, is_hard, flags); in gaudi_mmu_invalidate_cache_range()
7950 if (hdev->pldm) in gaudi_mmu_update_asid_hop0_addr()
7969 dev_err(hdev->dev, in gaudi_mmu_update_asid_hop0_addr()
7970 "Timeout during MMU hop0 config of asid %d\n", asid); in gaudi_mmu_update_asid_hop0_addr()
7979 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_send_heartbeat()
7981 if (!(gaudi->hw_cap_initialized & HW_CAP_CPU_Q)) in gaudi_send_heartbeat()
7989 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_cpucp_info_get()
7990 struct asic_fixed_properties *prop = &hdev->asic_prop; in gaudi_cpucp_info_get()
7993 if (!(gaudi->hw_cap_initialized & HW_CAP_CPU_Q)) in gaudi_cpucp_info_get()
8002 if (!strlen(prop->cpucp_info.card_name)) in gaudi_cpucp_info_get()
8003 strncpy(prop->cpucp_info.card_name, GAUDI_DEFAULT_CARD_NAME, in gaudi_cpucp_info_get()
8006 hdev->card_type = le32_to_cpu(hdev->asic_prop.cpucp_info.card_type); in gaudi_cpucp_info_get()
8016 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_is_device_idle()
8017 const char *fmt = "%-5d%-9s%#-14x%#-12x%#x\n"; in gaudi_is_device_idle()
8018 const char *mme_slave_fmt = "%-5d%-9s%-14s%-12s%#x\n"; in gaudi_is_device_idle()
8019 const char *nic_fmt = "%-5d%-9s%#-14x%#x\n"; in gaudi_is_device_idle()
8029 "--- ------- ------------ ---------- -------------\n"); in gaudi_is_device_idle()
8053 "--- ------- ------------ ---------- ----------\n"); in gaudi_is_device_idle()
8075 "--- ------- ------------ ---------- -----------\n"); in gaudi_is_device_idle()
8101 is_eng_idle ? "Y" : "N", "-", in gaudi_is_device_idle()
8102 "-", mme_arch_sts); in gaudi_is_device_idle()
8109 "--- ------- ------------ ----------\n"); in gaudi_is_device_idle()
8114 if (gaudi->hw_cap_initialized & BIT(HW_CAP_NIC_SHIFT + port)) { in gaudi_is_device_idle()
8129 if (gaudi->hw_cap_initialized & BIT(HW_CAP_NIC_SHIFT + port)) { in gaudi_is_device_idle()
8151 __acquires(&gaudi->hw_queues_lock) in gaudi_hw_queues_lock()
8153 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_hw_queues_lock()
8155 spin_lock(&gaudi->hw_queues_lock); in gaudi_hw_queues_lock()
8159 __releases(&gaudi->hw_queues_lock) in gaudi_hw_queues_unlock()
8161 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_hw_queues_unlock()
8163 spin_unlock(&gaudi->hw_queues_lock); in gaudi_hw_queues_unlock()
8168 return hdev->pdev->device; in gaudi_get_pci_id()
8174 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_get_eeprom_data()
8176 if (!(gaudi->hw_cap_initialized & HW_CAP_CPU_Q)) in gaudi_get_eeprom_data()
8184 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_get_monitor_dump()
8186 if (!(gaudi->hw_cap_initialized & HW_CAP_CPU_Q)) in gaudi_get_monitor_dump()
8202 offset = tpc_id * (mmTPC1_CFG_STATUS - mmTPC0_CFG_STATUS); in gaudi_run_tpc_kernel()
8204 if (hdev->pldm) in gaudi_run_tpc_kernel()
8245 dev_err(hdev->dev, in gaudi_run_tpc_kernel()
8246 "Timeout while waiting for TPC%d icache prefetch\n", in gaudi_run_tpc_kernel()
8248 return -EIO; in gaudi_run_tpc_kernel()
8268 dev_err(hdev->dev, in gaudi_run_tpc_kernel()
8269 "Timeout while waiting for TPC%d vector pipe\n", in gaudi_run_tpc_kernel()
8271 return -EIO; in gaudi_run_tpc_kernel()
8283 dev_err(hdev->dev, in gaudi_run_tpc_kernel()
8284 "Timeout while waiting for TPC%d kernel to execute\n", in gaudi_run_tpc_kernel()
8286 return -EIO; in gaudi_run_tpc_kernel()
8295 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_internal_cb_pool_init()
8298 if (!(gaudi->hw_cap_initialized & HW_CAP_MMU)) in gaudi_internal_cb_pool_init()
8301 hdev->internal_cb_pool_virt_addr = hl_asic_dma_alloc_coherent(hdev, in gaudi_internal_cb_pool_init()
8303 &hdev->internal_cb_pool_dma_addr, in gaudi_internal_cb_pool_init()
8306 if (!hdev->internal_cb_pool_virt_addr) in gaudi_internal_cb_pool_init()
8307 return -ENOMEM; in gaudi_internal_cb_pool_init()
8313 hdev->internal_cb_pool = gen_pool_create(min_alloc_order, -1); in gaudi_internal_cb_pool_init()
8314 if (!hdev->internal_cb_pool) { in gaudi_internal_cb_pool_init()
8315 dev_err(hdev->dev, in gaudi_internal_cb_pool_init()
8317 rc = -ENOMEM; in gaudi_internal_cb_pool_init()
8321 rc = gen_pool_add(hdev->internal_cb_pool, in gaudi_internal_cb_pool_init()
8322 (uintptr_t) hdev->internal_cb_pool_virt_addr, in gaudi_internal_cb_pool_init()
8323 HOST_SPACE_INTERNAL_CB_SZ, -1); in gaudi_internal_cb_pool_init()
8325 dev_err(hdev->dev, in gaudi_internal_cb_pool_init()
8327 rc = -EFAULT; in gaudi_internal_cb_pool_init()
8331 hdev->internal_cb_va_base = hl_reserve_va_block(hdev, ctx, in gaudi_internal_cb_pool_init()
8335 if (!hdev->internal_cb_va_base) { in gaudi_internal_cb_pool_init()
8336 rc = -ENOMEM; in gaudi_internal_cb_pool_init()
8340 mutex_lock(&hdev->mmu_lock); in gaudi_internal_cb_pool_init()
8342 rc = hl_mmu_map_contiguous(ctx, hdev->internal_cb_va_base, in gaudi_internal_cb_pool_init()
8343 hdev->internal_cb_pool_dma_addr, in gaudi_internal_cb_pool_init()
8352 mutex_unlock(&hdev->mmu_lock); in gaudi_internal_cb_pool_init()
8357 hl_mmu_unmap_contiguous(ctx, hdev->internal_cb_va_base, in gaudi_internal_cb_pool_init()
8360 mutex_unlock(&hdev->mmu_lock); in gaudi_internal_cb_pool_init()
8361 hl_unreserve_va_block(hdev, ctx, hdev->internal_cb_va_base, in gaudi_internal_cb_pool_init()
8364 gen_pool_destroy(hdev->internal_cb_pool); in gaudi_internal_cb_pool_init()
8366 hl_asic_dma_free_coherent(hdev, HOST_SPACE_INTERNAL_CB_SZ, hdev->internal_cb_pool_virt_addr, in gaudi_internal_cb_pool_init()
8367 hdev->internal_cb_pool_dma_addr); in gaudi_internal_cb_pool_init()
8375 struct gaudi_device *gaudi = hdev->asic_specific; in gaudi_internal_cb_pool_fini()
8377 if (!(gaudi->hw_cap_initialized & HW_CAP_MMU)) in gaudi_internal_cb_pool_fini()
8380 mutex_lock(&hdev->mmu_lock); in gaudi_internal_cb_pool_fini()
8381 hl_mmu_unmap_contiguous(ctx, hdev->internal_cb_va_base, in gaudi_internal_cb_pool_fini()
8383 hl_unreserve_va_block(hdev, ctx, hdev->internal_cb_va_base, in gaudi_internal_cb_pool_fini()
8386 mutex_unlock(&hdev->mmu_lock); in gaudi_internal_cb_pool_fini()
8388 gen_pool_destroy(hdev->internal_cb_pool); in gaudi_internal_cb_pool_fini()
8390 hl_asic_dma_free_coherent(hdev, HOST_SPACE_INTERNAL_CB_SZ, hdev->internal_cb_pool_virt_addr, in gaudi_internal_cb_pool_fini()
8391 hdev->internal_cb_pool_dma_addr); in gaudi_internal_cb_pool_fini()
8398 if (ctx->asid == HL_KERNEL_ASID_ID) in gaudi_ctx_init()
8401 rc = gaudi_internal_cb_pool_init(ctx->hdev, ctx); in gaudi_ctx_init()
8405 rc = gaudi_restore_user_registers(ctx->hdev); in gaudi_ctx_init()
8407 gaudi_internal_cb_pool_fini(ctx->hdev, ctx); in gaudi_ctx_init()
8414 if (ctx->asid == HL_KERNEL_ASID_ID) in gaudi_ctx_fini()
8417 gaudi_internal_cb_pool_fini(ctx->hdev, ctx); in gaudi_ctx_fini()
8455 pkt = cb->kernel_address + size; in gaudi_gen_signal_cb()
8470 pkt->value = cpu_to_le32(value); in gaudi_gen_signal_cb()
8471 pkt->ctl = cpu_to_le32(ctl); in gaudi_gen_signal_cb()
8490 pkt->value = cpu_to_le32(value); in gaudi_add_mon_msg_short()
8491 pkt->ctl = cpu_to_le32(ctl); in gaudi_add_mon_msg_short()
8506 dev_err(hdev->dev, in gaudi_add_arm_monitor_pkt()
8519 (mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_ARM_0 + mon_id * 4) - in gaudi_add_arm_monitor_pkt()
8539 pkt->value = cpu_to_le32(value); in gaudi_add_arm_monitor_pkt()
8540 pkt->ctl = cpu_to_le32(ctl); in gaudi_add_arm_monitor_pkt()
8560 pkt->cfg = cpu_to_le32(cfg); in gaudi_add_fence_pkt()
8561 pkt->ctl = cpu_to_le32(ctl); in gaudi_add_fence_pkt()
8629 nic_index = (queue_id - GAUDI_QUEUE_ID_NIC_0_0) >> 2; in gaudi_get_fence_addr()
8644 nic_index = (queue_id - GAUDI_QUEUE_ID_NIC_0_1) >> 2; in gaudi_get_fence_addr()
8659 nic_index = (queue_id - GAUDI_QUEUE_ID_NIC_0_2) >> 2; in gaudi_get_fence_addr()
8674 nic_index = (queue_id - GAUDI_QUEUE_ID_NIC_0_3) >> 2; in gaudi_get_fence_addr()
8680 return -EINVAL; in gaudi_get_fence_addr()
8702 (mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0 + mon_id * 4) - in gaudi_add_mon_pkts()
8710 (mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_ADDRH_0 + mon_id * 4) - in gaudi_add_mon_pkts()
8721 (mmSYNC_MNGR_W_S_SYNC_MNGR_OBJS_MON_PAY_DATA_0 + mon_id * 4) - in gaudi_add_mon_pkts()
8732 struct hl_cb *cb = (struct hl_cb *) prop->data; in gaudi_gen_wait_cb()
8733 void *buf = cb->kernel_address; in gaudi_gen_wait_cb()
8735 u32 size = prop->size; in gaudi_gen_wait_cb()
8737 if (gaudi_get_fence_addr(hdev, prop->q_idx, &fence_addr)) { in gaudi_gen_wait_cb()
8738 dev_crit(hdev->dev, "wrong queue id %d for wait packet\n", in gaudi_gen_wait_cb()
8739 prop->q_idx); in gaudi_gen_wait_cb()
8743 size += gaudi_add_mon_pkts(buf + size, prop->mon_id, fence_addr); in gaudi_gen_wait_cb()
8744 size += gaudi_add_arm_monitor_pkt(hdev, buf + size, prop->sob_base, in gaudi_gen_wait_cb()
8745 prop->sob_mask, prop->sob_val, prop->mon_id); in gaudi_gen_wait_cb()
8755 dev_dbg(hdev->dev, "reset SOB, q_idx: %d, sob_id: %d\n", hw_sob->q_idx, in gaudi_reset_sob()
8756 hw_sob->sob_id); in gaudi_reset_sob()
8759 hw_sob->sob_id * 4, 0); in gaudi_reset_sob()
8761 kref_init(&hw_sob->kref); in gaudi_reset_sob()
8774 return -EPERM; in gaudi_get_hw_block_id()
8781 return -EPERM; in gaudi_block_mmap()
8787 &hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs; in gaudi_enable_events_from_fw()
8788 u32 irq_handler_offset = hdev->asic_prop.gic_interrupts_enable ? in gaudi_enable_events_from_fw()
8790 le32_to_cpu(dyn_regs->gic_host_ints_irq); in gaudi_enable_events_from_fw()
8798 return -EINVAL; in gaudi_ack_mmu_page_fault_or_access_error()
8814 default: return -EINVAL; in gaudi_map_pll_idx_to_fw_idx()
8830 reg_value -= lower_32_bits(CFG_BASE); in gaudi_add_sync_to_engine_map_entry()
8835 return -ENOMEM; in gaudi_add_sync_to_engine_map_entry()
8836 entry->engine_type = engine_type; in gaudi_add_sync_to_engine_map_entry()
8837 entry->engine_id = engine_id; in gaudi_add_sync_to_engine_map_entry()
8838 entry->sync_id = reg_value; in gaudi_add_sync_to_engine_map_entry()
8839 hash_add(map->tb, &entry->node, reg_value); in gaudi_add_sync_to_engine_map_entry()
8847 struct hl_state_dump_specs *sds = &hdev->state_dump_specs; in gaudi_gen_sync_to_engine_map()
8852 for (i = 0; i < sds->props[SP_NUM_OF_TPC_ENGINES]; ++i) { in gaudi_gen_sync_to_engine_map()
8854 reg_value = RREG32(sds->props[SP_TPC0_CFG_SO] + in gaudi_gen_sync_to_engine_map()
8855 sds->props[SP_NEXT_TPC] * i); in gaudi_gen_sync_to_engine_map()
8864 for (i = 0; i < sds->props[SP_NUM_OF_MME_ENGINES]; ++i) { in gaudi_gen_sync_to_engine_map()
8865 for (j = 0; j < sds->props[SP_SUB_MME_ENG_NUM]; ++j) { in gaudi_gen_sync_to_engine_map()
8867 reg_value = RREG32(sds->props[SP_MME_CFG_SO] + in gaudi_gen_sync_to_engine_map()
8868 sds->props[SP_NEXT_MME] * i + in gaudi_gen_sync_to_engine_map()
8873 i * sds->props[SP_SUB_MME_ENG_NUM] + j); in gaudi_gen_sync_to_engine_map()
8880 for (i = 0; i < sds->props[SP_NUM_OF_DMA_ENGINES]; ++i) { in gaudi_gen_sync_to_engine_map()
8881 reg_value = RREG32(sds->props[SP_DMA_CFG_SO] + in gaudi_gen_sync_to_engine_map()
8882 sds->props[SP_DMA_QUEUES_OFFSET] * i); in gaudi_gen_sync_to_engine_map()
8901 mon->status); in gaudi_monitor_valid()
8914 mon->arm_data); in gaudi_fill_sobs_from_mon()
8916 mon->arm_data); in gaudi_fill_sobs_from_mon()
8918 for (i = 0, offset = 0; mask && offset < MONITOR_SOB_STRING_SIZE - in gaudi_fill_sobs_from_mon()
8950 mon->id, name, in gaudi_print_single_monitor()
8952 mon->arm_data), in gaudi_print_single_monitor()
8957 mon->arm_data)), in gaudi_print_single_monitor()
8959 mon->arm_data), in gaudi_print_single_monitor()
8960 mon->wr_data, in gaudi_print_single_monitor()
8961 (((u64)mon->wr_addr_high) << 32) | mon->wr_addr_low, in gaudi_print_single_monitor()
8966 mon->status)), in gaudi_print_single_monitor()
8976 struct hl_state_dump_specs *sds = &hdev->state_dump_specs; in gaudi_print_fences_single_engine()
8977 int rc = -ENOMEM, i; in gaudi_print_fences_single_engine()
8980 statuses = kcalloc(sds->props[SP_ENGINE_NUM_OF_QUEUES], in gaudi_print_fences_single_engine()
8985 fences = kcalloc(sds->props[SP_ENGINE_NUM_OF_FENCES] * in gaudi_print_fences_single_engine()
8986 sds->props[SP_ENGINE_NUM_OF_QUEUES], in gaudi_print_fences_single_engine()
8991 for (i = 0; i < sds->props[SP_ENGINE_NUM_OF_FENCES]; ++i) in gaudi_print_fences_single_engine()
8994 for (i = 0; i < sds->props[SP_ENGINE_NUM_OF_FENCES] * in gaudi_print_fences_single_engine()
8995 sds->props[SP_ENGINE_NUM_OF_QUEUES]; ++i) in gaudi_print_fences_single_engine()
8999 for (i = 0; i < sds->props[SP_ENGINE_NUM_OF_QUEUES]; ++i) { in gaudi_print_fences_single_engine()
9012 (i + fence_id * sds->props[SP_ENGINE_NUM_OF_QUEUES]); in gaudi_print_fences_single_engine()
9013 fence_rdata = fence_cnt - sds->props[SP_FENCE0_CNT_OFFSET] + in gaudi_print_fences_single_engine()
9014 sds->props[SP_FENCE0_RDATA_OFFSET]; in gaudi_print_fences_single_engine()
9050 struct hl_state_dump_specs *sds = &hdev->state_dump_specs; in gaudi_state_dump_init()
9054 hash_add(sds->so_id_to_str_tb, in gaudi_state_dump_init()
9059 hash_add(sds->monitor_id_to_str_tb, in gaudi_state_dump_init()
9063 sds->props = gaudi_state_dump_specs_props; in gaudi_state_dump_init()
9065 sds->sync_namager_names = gaudi_sync_manager_names; in gaudi_state_dump_init()
9067 sds->funcs = gaudi_state_dump_funcs; in gaudi_state_dump_init()
9094 cpucp_info = &hdev->asic_prop.cpucp_info; in infineon_ver_show()
9096 return sprintf(buf, "%#04x\n", le32_to_cpu(cpucp_info->infineon_version)); in infineon_ver_show()
9110 dev_vrm_attr_grp->attrs = gaudi_vrm_dev_attrs; in gaudi_add_device_attr()
9216 * gaudi_set_asic_funcs - set GAUDI function pointers
9223 hdev->asic_funcs = &gaudi_funcs; in gaudi_set_asic_funcs()