Lines Matching refs:dev

129 static void kfd_release_topology_device(struct kfd_topology_device *dev)  in kfd_release_topology_device()  argument
137 list_del(&dev->list); in kfd_release_topology_device()
139 while (dev->mem_props.next != &dev->mem_props) { in kfd_release_topology_device()
140 mem = container_of(dev->mem_props.next, in kfd_release_topology_device()
146 while (dev->cache_props.next != &dev->cache_props) { in kfd_release_topology_device()
147 cache = container_of(dev->cache_props.next, in kfd_release_topology_device()
153 while (dev->io_link_props.next != &dev->io_link_props) { in kfd_release_topology_device()
154 iolink = container_of(dev->io_link_props.next, in kfd_release_topology_device()
160 while (dev->p2p_link_props.next != &dev->p2p_link_props) { in kfd_release_topology_device()
161 p2plink = container_of(dev->p2p_link_props.next, in kfd_release_topology_device()
167 while (dev->perf_props.next != &dev->perf_props) { in kfd_release_topology_device()
168 perf = container_of(dev->perf_props.next, in kfd_release_topology_device()
174 kfree(dev); in kfd_release_topology_device()
179 struct kfd_topology_device *dev; in kfd_release_topology_device_list() local
182 dev = list_first_entry(device_list, in kfd_release_topology_device_list()
184 kfd_release_topology_device(dev); in kfd_release_topology_device_list()
197 struct kfd_topology_device *dev; in kfd_create_topology_device() local
199 dev = kfd_alloc_struct(dev); in kfd_create_topology_device()
200 if (!dev) { in kfd_create_topology_device()
205 INIT_LIST_HEAD(&dev->mem_props); in kfd_create_topology_device()
206 INIT_LIST_HEAD(&dev->cache_props); in kfd_create_topology_device()
207 INIT_LIST_HEAD(&dev->io_link_props); in kfd_create_topology_device()
208 INIT_LIST_HEAD(&dev->p2p_link_props); in kfd_create_topology_device()
209 INIT_LIST_HEAD(&dev->perf_props); in kfd_create_topology_device()
211 list_add_tail(&dev->list, device_list); in kfd_create_topology_device()
213 return dev; in kfd_create_topology_device()
423 struct kfd_topology_device *dev; in node_show() local
430 dev = container_of(attr, struct kfd_topology_device, in node_show()
432 if (dev->gpu && kfd_devcgroup_check_permission(dev->gpu)) in node_show()
434 return sysfs_show_32bit_val(buffer, offs, dev->gpu_id); in node_show()
438 dev = container_of(attr, struct kfd_topology_device, in node_show()
441 if (dev->gpu && kfd_devcgroup_check_permission(dev->gpu)) in node_show()
443 return sysfs_show_str_val(buffer, offs, dev->node_props.name); in node_show()
446 dev = container_of(attr, struct kfd_topology_device, in node_show()
448 if (dev->gpu && kfd_devcgroup_check_permission(dev->gpu)) in node_show()
451 dev->node_props.cpu_cores_count); in node_show()
453 dev->gpu ? dev->node_props.simd_count : 0); in node_show()
455 dev->node_props.mem_banks_count); in node_show()
457 dev->node_props.caches_count); in node_show()
459 dev->node_props.io_links_count); in node_show()
461 dev->node_props.p2p_links_count); in node_show()
463 dev->node_props.cpu_core_id_base); in node_show()
465 dev->node_props.simd_id_base); in node_show()
467 dev->node_props.max_waves_per_simd); in node_show()
469 dev->node_props.lds_size_in_kb); in node_show()
471 dev->node_props.gds_size_in_kb); in node_show()
473 dev->node_props.num_gws); in node_show()
475 dev->node_props.wave_front_size); in node_show()
477 dev->gpu ? (dev->node_props.array_count * in node_show()
478 NUM_XCC(dev->gpu->xcc_mask)) : 0); in node_show()
480 dev->node_props.simd_arrays_per_engine); in node_show()
482 dev->node_props.cu_per_simd_array); in node_show()
484 dev->node_props.simd_per_cu); in node_show()
486 dev->node_props.max_slots_scratch_cu); in node_show()
488 dev->node_props.gfx_target_version); in node_show()
490 dev->node_props.vendor_id); in node_show()
492 dev->node_props.device_id); in node_show()
494 dev->node_props.location_id); in node_show()
496 dev->node_props.domain); in node_show()
498 dev->node_props.drm_render_minor); in node_show()
500 dev->node_props.hive_id); in node_show()
502 dev->node_props.num_sdma_engines); in node_show()
504 dev->node_props.num_sdma_xgmi_engines); in node_show()
506 dev->node_props.num_sdma_queues_per_engine); in node_show()
508 dev->node_props.num_cp_queues); in node_show()
510 if (dev->gpu) { in node_show()
512 __ilog2_u32(dev->gpu->kfd->device_info.num_of_watch_points); in node_show()
515 dev->node_props.capability |= in node_show()
518 dev->node_props.capability |= in node_show()
524 if (dev->gpu->adev->asic_type == CHIP_TONGA) in node_show()
525 dev->node_props.capability |= in node_show()
529 dev->node_props.max_engine_clk_fcompute); in node_show()
534 dev->gpu->kfd->mec_fw_version); in node_show()
536 dev->node_props.capability); in node_show()
538 dev->node_props.debug_prop); in node_show()
540 dev->gpu->kfd->sdma_fw_version); in node_show()
542 dev->gpu->adev->unique_id); in node_show()
544 NUM_XCC(dev->gpu->xcc_mask)); in node_show()
567 static void kfd_remove_sysfs_node_entry(struct kfd_topology_device *dev) in kfd_remove_sysfs_node_entry() argument
575 if (dev->kobj_iolink) { in kfd_remove_sysfs_node_entry()
576 list_for_each_entry(iolink, &dev->io_link_props, list) in kfd_remove_sysfs_node_entry()
582 kobject_del(dev->kobj_iolink); in kfd_remove_sysfs_node_entry()
583 kobject_put(dev->kobj_iolink); in kfd_remove_sysfs_node_entry()
584 dev->kobj_iolink = NULL; in kfd_remove_sysfs_node_entry()
587 if (dev->kobj_p2plink) { in kfd_remove_sysfs_node_entry()
588 list_for_each_entry(p2plink, &dev->p2p_link_props, list) in kfd_remove_sysfs_node_entry()
594 kobject_del(dev->kobj_p2plink); in kfd_remove_sysfs_node_entry()
595 kobject_put(dev->kobj_p2plink); in kfd_remove_sysfs_node_entry()
596 dev->kobj_p2plink = NULL; in kfd_remove_sysfs_node_entry()
599 if (dev->kobj_cache) { in kfd_remove_sysfs_node_entry()
600 list_for_each_entry(cache, &dev->cache_props, list) in kfd_remove_sysfs_node_entry()
606 kobject_del(dev->kobj_cache); in kfd_remove_sysfs_node_entry()
607 kobject_put(dev->kobj_cache); in kfd_remove_sysfs_node_entry()
608 dev->kobj_cache = NULL; in kfd_remove_sysfs_node_entry()
611 if (dev->kobj_mem) { in kfd_remove_sysfs_node_entry()
612 list_for_each_entry(mem, &dev->mem_props, list) in kfd_remove_sysfs_node_entry()
617 kobject_del(dev->kobj_mem); in kfd_remove_sysfs_node_entry()
618 kobject_put(dev->kobj_mem); in kfd_remove_sysfs_node_entry()
619 dev->kobj_mem = NULL; in kfd_remove_sysfs_node_entry()
622 if (dev->kobj_perf) { in kfd_remove_sysfs_node_entry()
623 list_for_each_entry(perf, &dev->perf_props, list) { in kfd_remove_sysfs_node_entry()
627 kobject_del(dev->kobj_perf); in kfd_remove_sysfs_node_entry()
628 kobject_put(dev->kobj_perf); in kfd_remove_sysfs_node_entry()
629 dev->kobj_perf = NULL; in kfd_remove_sysfs_node_entry()
632 if (dev->kobj_node) { in kfd_remove_sysfs_node_entry()
633 sysfs_remove_file(dev->kobj_node, &dev->attr_gpuid); in kfd_remove_sysfs_node_entry()
634 sysfs_remove_file(dev->kobj_node, &dev->attr_name); in kfd_remove_sysfs_node_entry()
635 sysfs_remove_file(dev->kobj_node, &dev->attr_props); in kfd_remove_sysfs_node_entry()
636 kobject_del(dev->kobj_node); in kfd_remove_sysfs_node_entry()
637 kobject_put(dev->kobj_node); in kfd_remove_sysfs_node_entry()
638 dev->kobj_node = NULL; in kfd_remove_sysfs_node_entry()
642 static int kfd_build_sysfs_node_entry(struct kfd_topology_device *dev, in kfd_build_sysfs_node_entry() argument
654 if (WARN_ON(dev->kobj_node)) in kfd_build_sysfs_node_entry()
660 dev->kobj_node = kfd_alloc_struct(dev->kobj_node); in kfd_build_sysfs_node_entry()
661 if (!dev->kobj_node) in kfd_build_sysfs_node_entry()
664 ret = kobject_init_and_add(dev->kobj_node, &node_type, in kfd_build_sysfs_node_entry()
667 kobject_put(dev->kobj_node); in kfd_build_sysfs_node_entry()
671 dev->kobj_mem = kobject_create_and_add("mem_banks", dev->kobj_node); in kfd_build_sysfs_node_entry()
672 if (!dev->kobj_mem) in kfd_build_sysfs_node_entry()
675 dev->kobj_cache = kobject_create_and_add("caches", dev->kobj_node); in kfd_build_sysfs_node_entry()
676 if (!dev->kobj_cache) in kfd_build_sysfs_node_entry()
679 dev->kobj_iolink = kobject_create_and_add("io_links", dev->kobj_node); in kfd_build_sysfs_node_entry()
680 if (!dev->kobj_iolink) in kfd_build_sysfs_node_entry()
683 dev->kobj_p2plink = kobject_create_and_add("p2p_links", dev->kobj_node); in kfd_build_sysfs_node_entry()
684 if (!dev->kobj_p2plink) in kfd_build_sysfs_node_entry()
687 dev->kobj_perf = kobject_create_and_add("perf", dev->kobj_node); in kfd_build_sysfs_node_entry()
688 if (!dev->kobj_perf) in kfd_build_sysfs_node_entry()
694 dev->attr_gpuid.name = "gpu_id"; in kfd_build_sysfs_node_entry()
695 dev->attr_gpuid.mode = KFD_SYSFS_FILE_MODE; in kfd_build_sysfs_node_entry()
696 sysfs_attr_init(&dev->attr_gpuid); in kfd_build_sysfs_node_entry()
697 dev->attr_name.name = "name"; in kfd_build_sysfs_node_entry()
698 dev->attr_name.mode = KFD_SYSFS_FILE_MODE; in kfd_build_sysfs_node_entry()
699 sysfs_attr_init(&dev->attr_name); in kfd_build_sysfs_node_entry()
700 dev->attr_props.name = "properties"; in kfd_build_sysfs_node_entry()
701 dev->attr_props.mode = KFD_SYSFS_FILE_MODE; in kfd_build_sysfs_node_entry()
702 sysfs_attr_init(&dev->attr_props); in kfd_build_sysfs_node_entry()
703 ret = sysfs_create_file(dev->kobj_node, &dev->attr_gpuid); in kfd_build_sysfs_node_entry()
706 ret = sysfs_create_file(dev->kobj_node, &dev->attr_name); in kfd_build_sysfs_node_entry()
709 ret = sysfs_create_file(dev->kobj_node, &dev->attr_props); in kfd_build_sysfs_node_entry()
714 list_for_each_entry(mem, &dev->mem_props, list) { in kfd_build_sysfs_node_entry()
719 dev->kobj_mem, "%d", i); in kfd_build_sysfs_node_entry()
735 list_for_each_entry(cache, &dev->cache_props, list) { in kfd_build_sysfs_node_entry()
740 dev->kobj_cache, "%d", i); in kfd_build_sysfs_node_entry()
756 list_for_each_entry(iolink, &dev->io_link_props, list) { in kfd_build_sysfs_node_entry()
761 dev->kobj_iolink, "%d", i); in kfd_build_sysfs_node_entry()
777 list_for_each_entry(p2plink, &dev->p2p_link_props, list) { in kfd_build_sysfs_node_entry()
782 dev->kobj_p2plink, "%d", i); in kfd_build_sysfs_node_entry()
799 list_for_each_entry(perf, &dev->perf_props, list) { in kfd_build_sysfs_node_entry()
818 ret = sysfs_create_group(dev->kobj_perf, perf->attr_group); in kfd_build_sysfs_node_entry()
829 struct kfd_topology_device *dev; in kfd_build_sysfs_node_tree() local
833 list_for_each_entry(dev, &topology_device_list, list) { in kfd_build_sysfs_node_tree()
834 ret = kfd_build_sysfs_node_entry(dev, i); in kfd_build_sysfs_node_tree()
846 struct kfd_topology_device *dev; in kfd_remove_sysfs_node_tree() local
848 list_for_each_entry(dev, &topology_device_list, list) in kfd_remove_sysfs_node_tree()
849 kfd_remove_sysfs_node_entry(dev); in kfd_remove_sysfs_node_tree()
928 struct kfd_topology_device *dev; in kfd_debug_print_topology() local
932 dev = list_last_entry(&topology_device_list, in kfd_debug_print_topology()
934 if (dev) { in kfd_debug_print_topology()
935 if (dev->node_props.cpu_cores_count && in kfd_debug_print_topology()
936 dev->node_props.simd_count) { in kfd_debug_print_topology()
938 dev->node_props.device_id, in kfd_debug_print_topology()
939 dev->node_props.vendor_id); in kfd_debug_print_topology()
940 } else if (dev->node_props.cpu_cores_count) in kfd_debug_print_topology()
942 else if (dev->node_props.simd_count) in kfd_debug_print_topology()
944 dev->node_props.device_id, in kfd_debug_print_topology()
945 dev->node_props.vendor_id); in kfd_debug_print_topology()
955 struct kfd_topology_device *dev; in kfd_update_system_properties() local
958 dev = list_last_entry(&topology_device_list, in kfd_update_system_properties()
960 if (dev) { in kfd_update_system_properties()
961 sys_props.platform_id = dev->oem_id64; in kfd_update_system_properties()
962 sys_props.platform_oem = *((uint64_t *)dev->oem_table_id); in kfd_update_system_properties()
963 sys_props.platform_rev = dev->oem_revision; in kfd_update_system_properties()
1126 struct kfd_topology_device *dev; in kfd_assign_gpu() local
1133 list_for_each_entry(dev, &topology_device_list, list) { in kfd_assign_gpu()
1137 if (dev->node_props.cpu_cores_count) in kfd_assign_gpu()
1140 if (!dev->gpu && (dev->node_props.simd_count > 0)) { in kfd_assign_gpu()
1141 dev->gpu = gpu; in kfd_assign_gpu()
1142 out_dev = dev; in kfd_assign_gpu()
1144 list_for_each_entry(mem, &dev->mem_props, list) in kfd_assign_gpu()
1145 mem->gpu = dev->gpu; in kfd_assign_gpu()
1146 list_for_each_entry(cache, &dev->cache_props, list) in kfd_assign_gpu()
1147 cache->gpu = dev->gpu; in kfd_assign_gpu()
1148 list_for_each_entry(iolink, &dev->io_link_props, list) in kfd_assign_gpu()
1149 iolink->gpu = dev->gpu; in kfd_assign_gpu()
1150 list_for_each_entry(p2plink, &dev->p2p_link_props, list) in kfd_assign_gpu()
1151 p2plink->gpu = dev->gpu; in kfd_assign_gpu()
1169 static void kfd_fill_mem_clk_max_info(struct kfd_topology_device *dev) in kfd_fill_mem_clk_max_info() argument
1174 if (!dev) in kfd_fill_mem_clk_max_info()
1183 amdgpu_amdkfd_get_local_mem_info(dev->gpu->adev, &local_mem_info, in kfd_fill_mem_clk_max_info()
1184 dev->gpu->xcp); in kfd_fill_mem_clk_max_info()
1186 list_for_each_entry(mem, &dev->mem_props, list) in kfd_fill_mem_clk_max_info()
1190 static void kfd_set_iolink_no_atomics(struct kfd_topology_device *dev, in kfd_set_iolink_no_atomics() argument
1211 if (!dev->gpu->kfd->pci_atomic_requested || in kfd_set_iolink_no_atomics()
1212 dev->gpu->adev->asic_type == CHIP_HAWAII) in kfd_set_iolink_no_atomics()
1240 static void kfd_fill_iolink_non_crat_info(struct kfd_topology_device *dev) in kfd_fill_iolink_non_crat_info() argument
1245 if (!dev || !dev->gpu) in kfd_fill_iolink_non_crat_info()
1249 list_for_each_entry(link, &dev->io_link_props, list) { in kfd_fill_iolink_non_crat_info()
1251 kfd_set_iolink_no_atomics(dev, NULL, link); in kfd_fill_iolink_non_crat_info()
1265 if (!dev->node_props.hive_id) in kfd_fill_iolink_non_crat_info()
1266 dev->node_props.hive_id = pci_dev_id(dev->gpu->adev->pdev); in kfd_fill_iolink_non_crat_info()
1267 peer_dev->node_props.hive_id = dev->node_props.hive_id; in kfd_fill_iolink_non_crat_info()
1276 kfd_set_iolink_no_atomics(peer_dev, dev, inbound_link); in kfd_fill_iolink_non_crat_info()
1282 list_for_each_entry(link, &dev->p2p_link_props, list) { in kfd_fill_iolink_non_crat_info()
1284 kfd_set_iolink_no_atomics(dev, NULL, link); in kfd_fill_iolink_non_crat_info()
1297 kfd_set_iolink_no_atomics(peer_dev, dev, inbound_link); in kfd_fill_iolink_non_crat_info()
1303 static int kfd_build_p2p_node_entry(struct kfd_topology_device *dev, in kfd_build_p2p_node_entry() argument
1313 dev->kobj_p2plink, "%d", dev->node_props.p2p_links_count - 1); in kfd_build_p2p_node_entry()
1484 struct kfd_topology_device *dev; in kfd_dev_create_p2p_links() local
1493 list_for_each_entry(dev, &topology_device_list, list) in kfd_dev_create_p2p_links()
1512 list_for_each_entry(dev, &topology_device_list, list) { in kfd_dev_create_p2p_links()
1513 if (dev == new_dev) in kfd_dev_create_p2p_links()
1515 if (!dev->gpu || !dev->gpu->adev || in kfd_dev_create_p2p_links()
1516 (dev->gpu->kfd->hive_id && in kfd_dev_create_p2p_links()
1517 dev->gpu->kfd->hive_id == new_dev->gpu->kfd->hive_id)) in kfd_dev_create_p2p_links()
1521 ret = kfd_add_peer_prop(new_dev, dev, i, k); in kfd_dev_create_p2p_links()
1525 ret = kfd_add_peer_prop(dev, new_dev, k, i); in kfd_dev_create_p2p_links()
1674 static void kfd_fill_cache_non_crat_info(struct kfd_topology_device *dev, struct kfd_node *kdev) in kfd_fill_cache_non_crat_info() argument
1693 gpu_processor_id = dev->node_props.simd_id_base; in kfd_fill_cache_non_crat_info()
1731 list_add_tail(&props_ext->list, &dev->cache_props); in kfd_fill_cache_non_crat_info()
1753 list_add_tail(&props_ext->list, &dev->cache_props); in kfd_fill_cache_non_crat_info()
1757 dev->node_props.caches_count += num_of_entries; in kfd_fill_cache_non_crat_info()
1762 struct kfd_topology_device **dev) in kfd_topology_add_device_locked() argument
1795 *dev = kfd_assign_gpu(gpu); in kfd_topology_add_device_locked()
1796 if (WARN_ON(!*dev)) { in kfd_topology_add_device_locked()
1804 kfd_fill_cache_non_crat_info(*dev, gpu); in kfd_topology_add_device_locked()
1821 static void kfd_topology_set_dbg_firmware_support(struct kfd_topology_device *dev) in kfd_topology_set_dbg_firmware_support() argument
1825 if (KFD_GC_VERSION(dev->gpu) >= IP_VERSION(11, 0, 0) && in kfd_topology_set_dbg_firmware_support()
1826 KFD_GC_VERSION(dev->gpu) < IP_VERSION(12, 0, 0)) { in kfd_topology_set_dbg_firmware_support()
1827 uint32_t mes_api_rev = (dev->gpu->adev->mes.sched_version & in kfd_topology_set_dbg_firmware_support()
1830 uint32_t mes_rev = dev->gpu->adev->mes.sched_version & in kfd_topology_set_dbg_firmware_support()
1841 switch (KFD_GC_VERSION(dev->gpu)) { in kfd_topology_set_dbg_firmware_support()
1843 firmware_supported = dev->gpu->kfd->mec_fw_version >= 459 + 32768; in kfd_topology_set_dbg_firmware_support()
1850 firmware_supported = dev->gpu->kfd->mec_fw_version >= 459; in kfd_topology_set_dbg_firmware_support()
1853 firmware_supported = dev->gpu->kfd->mec_fw_version >= 60; in kfd_topology_set_dbg_firmware_support()
1856 firmware_supported = dev->gpu->kfd->mec_fw_version >= 51; in kfd_topology_set_dbg_firmware_support()
1861 firmware_supported = dev->gpu->kfd->mec_fw_version >= 144; in kfd_topology_set_dbg_firmware_support()
1868 firmware_supported = dev->gpu->kfd->mec_fw_version >= 89; in kfd_topology_set_dbg_firmware_support()
1880 dev->node_props.capability |= HSA_CAP_TRAP_DEBUG_FIRMWARE_SUPPORTED; in kfd_topology_set_dbg_firmware_support()
1883 static void kfd_topology_set_capabilities(struct kfd_topology_device *dev) in kfd_topology_set_capabilities() argument
1885 dev->node_props.capability |= ((HSA_CAP_DOORBELL_TYPE_2_0 << in kfd_topology_set_capabilities()
1889 dev->node_props.capability |= HSA_CAP_TRAP_DEBUG_SUPPORT | in kfd_topology_set_capabilities()
1893 if (kfd_dbg_has_ttmps_always_setup(dev->gpu)) in kfd_topology_set_capabilities()
1894 dev->node_props.debug_prop |= HSA_DBG_DISPATCH_INFO_ALWAYS_VALID; in kfd_topology_set_capabilities()
1896 if (KFD_GC_VERSION(dev->gpu) < IP_VERSION(10, 0, 0)) { in kfd_topology_set_capabilities()
1897 if (KFD_GC_VERSION(dev->gpu) == IP_VERSION(9, 4, 3)) in kfd_topology_set_capabilities()
1898 dev->node_props.debug_prop |= in kfd_topology_set_capabilities()
1902 dev->node_props.debug_prop |= in kfd_topology_set_capabilities()
1906 if (KFD_GC_VERSION(dev->gpu) >= IP_VERSION(9, 4, 2)) in kfd_topology_set_capabilities()
1907 dev->node_props.capability |= in kfd_topology_set_capabilities()
1910 dev->node_props.debug_prop |= HSA_DBG_WATCH_ADDR_MASK_LO_BIT_GFX10 | in kfd_topology_set_capabilities()
1913 if (KFD_GC_VERSION(dev->gpu) >= IP_VERSION(11, 0, 0)) in kfd_topology_set_capabilities()
1914 dev->node_props.capability |= in kfd_topology_set_capabilities()
1918 kfd_topology_set_dbg_firmware_support(dev); in kfd_topology_set_capabilities()
1924 struct kfd_topology_device *dev; in kfd_topology_add_device() local
1932 dev_warn(gpu->adev->dev, in kfd_topology_add_device()
1947 dev = kfd_assign_gpu(gpu); in kfd_topology_add_device()
1948 if (!dev) in kfd_topology_add_device()
1949 res = kfd_topology_add_device_locked(gpu, gpu_id, &dev); in kfd_topology_add_device()
1954 dev->gpu_id = gpu_id; in kfd_topology_add_device()
1967 amdgpu_amdkfd_get_cu_info(dev->gpu->adev, &cu_info); in kfd_topology_add_device()
1970 dev->node_props.name[i] = __tolower(asic_name[i]); in kfd_topology_add_device()
1974 dev->node_props.name[i] = '\0'; in kfd_topology_add_device()
1976 dev->node_props.simd_arrays_per_engine = in kfd_topology_add_device()
1979 dev->node_props.gfx_target_version = in kfd_topology_add_device()
1981 dev->node_props.vendor_id = gpu->adev->pdev->vendor; in kfd_topology_add_device()
1982 dev->node_props.device_id = gpu->adev->pdev->device; in kfd_topology_add_device()
1983 dev->node_props.capability |= in kfd_topology_add_device()
1984 ((dev->gpu->adev->rev_id << HSA_CAP_ASIC_REVISION_SHIFT) & in kfd_topology_add_device()
1987 dev->node_props.location_id = pci_dev_id(gpu->adev->pdev); in kfd_topology_add_device()
1988 if (KFD_GC_VERSION(dev->gpu->kfd) == IP_VERSION(9, 4, 3)) in kfd_topology_add_device()
1989 dev->node_props.location_id |= dev->gpu->node_id; in kfd_topology_add_device()
1991 dev->node_props.domain = pci_domain_nr(gpu->adev->pdev->bus); in kfd_topology_add_device()
1992 dev->node_props.max_engine_clk_fcompute = in kfd_topology_add_device()
1993 amdgpu_amdkfd_get_max_engine_clock_in_mhz(dev->gpu->adev); in kfd_topology_add_device()
1994 dev->node_props.max_engine_clk_ccompute = in kfd_topology_add_device()
1998 dev->node_props.drm_render_minor = gpu->xcp->ddev->render->index; in kfd_topology_add_device()
2000 dev->node_props.drm_render_minor = in kfd_topology_add_device()
2003 dev->node_props.hive_id = gpu->kfd->hive_id; in kfd_topology_add_device()
2004 dev->node_props.num_sdma_engines = kfd_get_num_sdma_engines(gpu); in kfd_topology_add_device()
2005 dev->node_props.num_sdma_xgmi_engines = in kfd_topology_add_device()
2007 dev->node_props.num_sdma_queues_per_engine = in kfd_topology_add_device()
2010 dev->node_props.num_gws = (dev->gpu->gws && in kfd_topology_add_device()
2011 dev->gpu->dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) ? in kfd_topology_add_device()
2012 dev->gpu->adev->gds.gws_size : 0; in kfd_topology_add_device()
2013 dev->node_props.num_cp_queues = get_cp_queues_num(dev->gpu->dqm); in kfd_topology_add_device()
2015 kfd_fill_mem_clk_max_info(dev); in kfd_topology_add_device()
2016 kfd_fill_iolink_non_crat_info(dev); in kfd_topology_add_device()
2018 switch (dev->gpu->adev->asic_type) { in kfd_topology_add_device()
2022 dev->node_props.capability |= ((HSA_CAP_DOORBELL_TYPE_PRE_1_0 << in kfd_topology_add_device()
2033 dev->node_props.capability |= ((HSA_CAP_DOORBELL_TYPE_1_0 << in kfd_topology_add_device()
2038 if (KFD_GC_VERSION(dev->gpu) < IP_VERSION(9, 0, 1)) in kfd_topology_add_device()
2040 dev->gpu->adev->asic_type); in kfd_topology_add_device()
2042 kfd_topology_set_capabilities(dev); in kfd_topology_add_device()
2049 dev->node_props.capability &= ~HSA_CAP_ATS_PRESENT; in kfd_topology_add_device()
2056 if (dev->gpu->adev->asic_type == CHIP_CARRIZO) { in kfd_topology_add_device()
2057 dev->node_props.simd_count = in kfd_topology_add_device()
2059 dev->node_props.max_waves_per_simd = 10; in kfd_topology_add_device()
2063 dev->node_props.capability |= in kfd_topology_add_device()
2064 ((dev->gpu->adev->ras_enabled & BIT(AMDGPU_RAS_BLOCK__GFX)) != 0) ? in kfd_topology_add_device()
2066 dev->node_props.capability |= in kfd_topology_add_device()
2067 ((dev->gpu->adev->ras_enabled & BIT(AMDGPU_RAS_BLOCK__UMC)) != 0) ? in kfd_topology_add_device()
2070 if (KFD_GC_VERSION(dev->gpu) != IP_VERSION(9, 0, 1)) in kfd_topology_add_device()
2071 dev->node_props.capability |= (dev->gpu->adev->ras_enabled != 0) ? in kfd_topology_add_device()
2074 if (KFD_IS_SVM_API_SUPPORTED(dev->gpu->adev)) in kfd_topology_add_device()
2075 dev->node_props.capability |= HSA_CAP_SVMAPI_SUPPORTED; in kfd_topology_add_device()
2077 if (dev->gpu->adev->gmc.is_app_apu || in kfd_topology_add_device()
2078 dev->gpu->adev->gmc.xgmi.connected_to_cpu) in kfd_topology_add_device()
2079 dev->node_props.capability |= HSA_CAP_FLAGS_COHERENTHOSTACCESS; in kfd_topology_add_device()
2109 struct kfd_topology_device *dev; in kfd_topology_update_io_links() local
2112 list_for_each_entry(dev, &topology_device_list, list) { in kfd_topology_update_io_links()
2113 if (dev->proximity_domain > proximity_domain) in kfd_topology_update_io_links()
2114 dev->proximity_domain--; in kfd_topology_update_io_links()
2116 list_for_each_entry_safe(iolink, tmp, &dev->io_link_props, list) { in kfd_topology_update_io_links()
2123 dev->node_props.io_links_count--; in kfd_topology_update_io_links()
2132 list_for_each_entry_safe(p2plink, tmp, &dev->p2p_link_props, list) { in kfd_topology_update_io_links()
2139 dev->node_props.p2p_links_count--; in kfd_topology_update_io_links()
2152 struct kfd_topology_device *dev, *tmp; in kfd_topology_remove_device() local
2159 list_for_each_entry_safe(dev, tmp, &topology_device_list, list) { in kfd_topology_remove_device()
2160 if (dev->gpu == gpu) { in kfd_topology_remove_device()
2161 gpu_id = dev->gpu_id; in kfd_topology_remove_device()
2162 kfd_remove_sysfs_node_entry(dev); in kfd_topology_remove_device()
2163 kfd_release_topology_device(dev); in kfd_topology_remove_device()
2248 struct kfd_topology_device *dev; in kfd_debugfs_hqds_by_device() local
2254 list_for_each_entry(dev, &topology_device_list, list) { in kfd_debugfs_hqds_by_device()
2255 if (!dev->gpu) { in kfd_debugfs_hqds_by_device()
2260 seq_printf(m, "Node %u, gpu_id %x:\n", i++, dev->gpu->id); in kfd_debugfs_hqds_by_device()
2261 r = dqm_debugfs_hqds(m, dev->gpu->dqm); in kfd_debugfs_hqds_by_device()
2273 struct kfd_topology_device *dev; in kfd_debugfs_rls_by_device() local
2279 list_for_each_entry(dev, &topology_device_list, list) { in kfd_debugfs_rls_by_device()
2280 if (!dev->gpu) { in kfd_debugfs_rls_by_device()
2285 seq_printf(m, "Node %u, gpu_id %x:\n", i++, dev->gpu->id); in kfd_debugfs_rls_by_device()
2286 r = pm_debugfs_runlist(m, &dev->gpu->dqm->packet_mgr); in kfd_debugfs_rls_by_device()