Home
last modified time | relevance | path

Searched refs:dqm (Results 1 – 24 of 24) sorted by relevance

/openbmc/linux/drivers/gpu/drm/amd/amdkfd/
H A Dkfd_device_queue_manager.c576 dqm->asic_ops.init_sdma_vm(dqm, q, qpd); in create_queue_nocpsch()
1445 r = pm_init(&dqm->packet_mgr, dqm); in start_nocpsch()
1636 retval = pm_init(&dqm->packet_mgr, dqm); in start_cpsch()
1679 dqm->dev->adev, dqm->wait_times, in start_cpsch()
1719 kfd_gtt_sa_free(dqm->dev, dqm->fence_mem); in stop_cpsch()
1810 dqm->asic_ops.init_sdma_vm(dqm, q, qpd); in create_queue_cpsch()
1943 if (dqm->is_hws_hang || dqm->is_resetting) in unmap_queues_cpsch()
2488 dqm = kzalloc(sizeof(*dqm), GFP_KERNEL); in device_queue_manager_init()
2598 if (!dqm->ops.initialize(dqm)) { in device_queue_manager_init()
2618 dqm->ops.stop(dqm); in device_queue_manager_uninit()
[all …]
H A Dkfd_device_queue_manager.h132 int (*create_queue)(struct device_queue_manager *dqm,
143 int (*update_queue)(struct device_queue_manager *dqm,
152 int (*initialize)(struct device_queue_manager *dqm);
153 int (*start)(struct device_queue_manager *dqm);
154 int (*stop)(struct device_queue_manager *dqm);
155 void (*pre_reset)(struct device_queue_manager *dqm);
199 int (*update_qpd)(struct device_queue_manager *dqm,
323 mutex_lock(&dqm->lock_hidden); in dqm_lock()
324 dqm->saved_flags = memalloc_noreclaim_save(); in dqm_lock()
328 memalloc_noreclaim_restore(dqm->saved_flags); in dqm_unlock()
[all …]
H A Dkfd_device_queue_manager_v9.c29 static int update_qpd_v9(struct device_queue_manager *dqm,
31 static void init_sdma_vm_v9(struct device_queue_manager *dqm, struct queue *q,
51 static int update_qpd_v9(struct device_queue_manager *dqm, in update_qpd_v9() argument
63 if (dqm->dev->kfd->noretry) in update_qpd_v9()
66 if (KFD_GC_VERSION(dqm->dev->kfd) == IP_VERSION(9, 4, 3)) in update_qpd_v9()
74 if (KFD_SUPPORT_XNACK_PER_PROCESS(dqm->dev)) { in update_qpd_v9()
89 static void init_sdma_vm_v9(struct device_queue_manager *dqm, struct queue *q, in init_sdma_vm_v9() argument
H A Dkfd_packet_manager.c48 struct kfd_node *dev = pm->dqm->dev; in pm_calc_rlib_size()
50 process_count = pm->dqm->processes_count; in pm_calc_rlib_size()
51 queue_count = pm->dqm->active_queue_count; in pm_calc_rlib_size()
52 compute_queue_count = pm->dqm->active_cp_queue_count; in pm_calc_rlib_size()
53 gws_queue_count = pm->dqm->gws_queue_count; in pm_calc_rlib_size()
146 pm->dqm->processes_count, pm->dqm->active_queue_count); in pm_create_runlist_ib()
152 if (processes_mapped >= pm->dqm->processes_count) { in pm_create_runlist_ib()
227 switch (dqm->dev->adev->asic_type) { in pm_init()
248 dqm->dev->adev->asic_type); in pm_init()
253 pm->dqm = dqm; in pm_init()
[all …]
H A Dkfd_process_queue_manager.c90 dev->dqm->ops.process_termination(dev->dqm, &pdd->qpd); in kfd_process_dequeue_from_device()
155 return pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm, in pqm_set_gws()
331 dev->dqm->ops.register_process(dev->dqm, &pdd->qpd); in pqm_create_queue()
391 retval = dev->dqm->ops.create_kernel_queue(dev->dqm, in pqm_create_queue()
443 dev->dqm->ops.unregister_process(dev->dqm, &pdd->qpd); in pqm_create_queue()
455 dqm = NULL; in pqm_destroy_queue()
481 dqm = pqn->kq->dev->dqm; in pqm_destroy_queue()
482 dqm->ops.destroy_kernel_queue(dqm, pqn->kq, &pdd->qpd); in pqm_destroy_queue()
488 dqm = pqn->q->device->dqm; in pqm_destroy_queue()
489 retval = dqm->ops.destroy_queue(dqm, &pdd->qpd, pqn->q); in pqm_destroy_queue()
[all …]
H A Dkfd_device_queue_manager_vi.c30 static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm,
36 static int update_qpd_vi(struct device_queue_manager *dqm,
38 static void init_sdma_vm(struct device_queue_manager *dqm,
79 static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm, in set_cache_memory_policy_vi() argument
106 static int update_qpd_vi(struct device_queue_manager *dqm, in update_qpd_vi() argument
140 static void init_sdma_vm(struct device_queue_manager *dqm, in init_sdma_vm() argument
H A Dkfd_mqd_manager.c57 mqd_mem_obj->gtt_mem = dev->dqm->hiq_sdma_mqd.gtt_mem; in allocate_hiq_mqd()
58 mqd_mem_obj->gpu_addr = dev->dqm->hiq_sdma_mqd.gpu_addr; in allocate_hiq_mqd()
59 mqd_mem_obj->cpu_ptr = dev->dqm->hiq_sdma_mqd.cpu_ptr; in allocate_hiq_mqd()
77 dev->dqm->mqd_mgrs[KFD_MQD_TYPE_SDMA]->mqd_size; in allocate_sdma_mqd()
79 offset += dev->dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size * in allocate_sdma_mqd()
84 mqd_mem_obj->gpu_addr = dev->dqm->hiq_sdma_mqd.gpu_addr + offset; in allocate_sdma_mqd()
86 dev->dqm->hiq_sdma_mqd.cpu_ptr + offset); in allocate_sdma_mqd()
271 return dev->dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size; in kfd_hiq_mqd_stride()
282 dev->dqm->hiq_sdma_mqd.gtt_mem : NULL; in kfd_get_hiq_xcc_mqd()
283 mqd_mem_obj->gpu_addr = dev->dqm->hiq_sdma_mqd.gpu_addr + offset; in kfd_get_hiq_xcc_mqd()
[all …]
H A Dkfd_device_queue_manager_cik.c30 static bool set_cache_memory_policy_cik(struct device_queue_manager *dqm,
36 static int update_qpd_cik(struct device_queue_manager *dqm,
38 static void init_sdma_vm(struct device_queue_manager *dqm,
78 static bool set_cache_memory_policy_cik(struct device_queue_manager *dqm, in set_cache_memory_policy_cik() argument
104 static int update_qpd_cik(struct device_queue_manager *dqm, in update_qpd_cik() argument
134 static void init_sdma_vm(struct device_queue_manager *dqm, in init_sdma_vm() argument
H A Dkfd_device_queue_manager_v11.c29 static int update_qpd_v11(struct device_queue_manager *dqm,
31 static void init_sdma_vm_v11(struct device_queue_manager *dqm, struct queue *q,
51 static int update_qpd_v11(struct device_queue_manager *dqm, in update_qpd_v11() argument
76 static void init_sdma_vm_v11(struct device_queue_manager *dqm, struct queue *q, in init_sdma_vm_v11() argument
H A Dkfd_device_queue_manager_v10.c30 static int update_qpd_v10(struct device_queue_manager *dqm,
32 static void init_sdma_vm_v10(struct device_queue_manager *dqm, struct queue *q,
52 static int update_qpd_v10(struct device_queue_manager *dqm, in update_qpd_v10() argument
76 static void init_sdma_vm_v10(struct device_queue_manager *dqm, struct queue *q, in init_sdma_vm_v10() argument
H A Dkfd_packet_manager_v9.c37 struct kfd_node *kfd = pm->dqm->dev; in pm_map_process_v9()
55 if (kfd->dqm->trap_debug_vmid && pdd->process->debug_trap_enabled && in pm_map_process_v9()
57 packet->bitfields2.debug_vmid = kfd->dqm->trap_debug_vmid; in pm_map_process_v9()
91 struct kfd_dev *kfd = pm->dqm->dev->kfd; in pm_map_process_aldebaran()
146 struct kfd_node *kfd = pm->dqm->dev; in pm_runlist_v9()
157 concurrent_proc_cnt = min(pm->dqm->processes_count, in pm_runlist_v9()
297 pm->dqm->dev->kfd2kgd->build_grace_period_packet_info( in pm_set_grace_period_v9()
298 pm->dqm->dev->adev, in pm_set_grace_period_v9()
299 pm->dqm->wait_times, in pm_set_grace_period_v9()
305 reg_data = pm->dqm->wait_times; in pm_set_grace_period_v9()
[all …]
H A Dkfd_debug.c240 kfd_dqm_evict_pasid(dev->dqm, p->pasid); in kfd_set_dbg_ev_from_interrupt()
315 err = q->device->dqm->ops.update_queue(q->device->dqm, q, &minfo); in kfd_dbg_set_queue_workaround()
421 r = debug_lock_and_unmap(pdd->dev->dqm); in kfd_dbg_trap_clear_dev_address_watch()
433 r = debug_map_and_unlock(pdd->dev->dqm); in kfd_dbg_trap_clear_dev_address_watch()
455 r = debug_lock_and_unmap(pdd->dev->dqm); in kfd_dbg_trap_set_dev_address_watch()
475 r = debug_map_and_unlock(pdd->dev->dqm); in kfd_dbg_trap_set_dev_address_watch()
517 r = debug_refresh_runlist(pdd->dev->dqm); in kfd_dbg_trap_set_flags()
540 debug_refresh_runlist(pdd->dev->dqm); in kfd_dbg_trap_set_flags()
602 debug_refresh_runlist(pdd->dev->dqm); in kfd_dbg_trap_deactivate()
719 r = debug_refresh_runlist(pdd->dev->dqm); in kfd_dbg_trap_activate()
[all …]
H A Dkfd_device.c489 if (node->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) in kfd_gws_init()
529 node->dqm = device_queue_manager_init(node); in kfd_init_node()
530 if (!node->dqm) { in kfd_init_node()
556 device_queue_manager_uninit(node->dqm); in kfd_init_node()
575 device_queue_manager_uninit(knode->dqm); in kfd_cleanup_nodes()
830 node->dqm->sched_policy); in kgd2kfd_device_init()
876 node->dqm->ops.pre_reset(node->dqm); in kgd2kfd_pre_reset()
946 node->dqm->ops.stop(node->dqm); in kgd2kfd_suspend()
979 err = node->dqm->ops.start(node->dqm); in kfd_resume()
1386 if (dev->dqm->sched_policy != KFD_SCHED_POLICY_HWS) { in kfd_debugfs_hang_hws()
[all …]
H A Dkfd_process.c118 dqm = pdd->dev->dqm; in kfd_sdma_activity_worker()
120 if (!dqm || !qpd) in kfd_sdma_activity_worker()
149 dqm_lock(dqm); in kfd_sdma_activity_worker()
158 dqm_unlock(dqm); in kfd_sdma_activity_worker()
175 dqm_unlock(dqm); in kfd_sdma_activity_worker()
179 dqm_unlock(dqm); in kfd_sdma_activity_worker()
209 dqm_lock(dqm); in kfd_sdma_activity_worker()
231 dqm_unlock(dqm); in kfd_sdma_activity_worker()
1584 pdd->qpd.dqm = dev->dqm; in kfd_create_process_device_data()
1830 r = pdd->dev->dqm->ops.evict_process_queues(pdd->dev->dqm, in kfd_process_evict_queues()
[all …]
H A Dkfd_int_process_v11.c211 if (dev->dqm->ops.reset_queues) in event_interrupt_poison_consumption_v11()
212 ret = dev->dqm->ops.reset_queues(dev->dqm, pasid); in event_interrupt_poison_consumption_v11()
H A Dkfd_int_process_v9.c164 ret = kfd_dqm_evict_pasid(dev->dqm, pasid); in event_interrupt_poison_consumption_v9()
257 if (!pasid && dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) { in event_interrupt_isr_v9()
264 pasid = dev->dqm->vmid_pasid[vmid]; in event_interrupt_isr_v9()
H A Dkfd_priv.h287 struct device_queue_manager *dqm; member
641 struct device_queue_manager *dqm; member
1297 void device_queue_manager_uninit(struct device_queue_manager *dqm);
1301 int kfd_dqm_evict_pasid(struct device_queue_manager *dqm, u32 pasid);
1360 struct device_queue_manager *dqm; member
1405 int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm);
1522 int dqm_debugfs_hang_hws(struct device_queue_manager *dqm);
H A Dkfd_packet_manager_vi.c80 struct kfd_node *kfd = pm->dqm->dev; in pm_runlist_vi()
94 concurrent_proc_cnt = min(pm->dqm->processes_count, in pm_runlist_vi()
H A Dcik_event_interrupt.c113 kfd_dqm_evict_pasid(dev->dqm, pasid); in cik_event_interrupt_wq()
H A Dkfd_kernel_queue.c65 kq->mqd_mgr = dev->dqm->mqd_mgrs[KFD_MQD_TYPE_DIQ]; in kq_initialize()
68 kq->mqd_mgr = dev->dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]; in kq_initialize()
H A Dkfd_int_process_v10.c153 ret = kfd_dqm_evict_pasid(dev->dqm, pasid); in event_interrupt_poison_consumption()
H A Dkfd_topology.c2011 dev->gpu->dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) ? in kfd_topology_add_device()
2013 dev->node_props.num_cp_queues = get_cp_queues_num(dev->gpu->dqm); in kfd_topology_add_device()
2261 r = dqm_debugfs_hqds(m, dev->gpu->dqm); in kfd_debugfs_hqds_by_device()
2286 r = pm_debugfs_runlist(m, &dev->gpu->dqm->packet_mgr); in kfd_debugfs_rls_by_device()
H A Dkfd_chardev.c610 if (!pdd->dev->dqm->ops.set_cache_memory_policy(pdd->dev->dqm, in kfd_ioctl_set_memory_policy()
924 if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS && in kfd_ioctl_set_scratch_backing_va()
1489 if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) { in kfd_ioctl_alloc_queue_gws()
2867 debug_refresh_runlist(pdd->dev->dqm); in runtime_disable()
H A Dkfd_mqd_manager_v9.c622 uint32_t local_xcc_start = mm->dev->dqm->current_logical_xcc_start++; in init_mqd_v9_4_3()