Lines Matching refs:pm
41 static void pm_calc_rlib_size(struct packet_manager *pm, in pm_calc_rlib_size() argument
48 struct kfd_node *dev = pm->dqm->dev; in pm_calc_rlib_size()
50 process_count = pm->dqm->processes_count; in pm_calc_rlib_size()
51 queue_count = pm->dqm->active_queue_count; in pm_calc_rlib_size()
52 compute_queue_count = pm->dqm->active_cp_queue_count; in pm_calc_rlib_size()
53 gws_queue_count = pm->dqm->gws_queue_count; in pm_calc_rlib_size()
66 compute_queue_count > get_cp_queues_num(pm->dqm) || in pm_calc_rlib_size()
72 map_queue_size = pm->pmf->map_queues_size; in pm_calc_rlib_size()
74 *rlib_size = process_count * pm->pmf->map_process_size + in pm_calc_rlib_size()
82 *rlib_size += pm->pmf->runlist_size; in pm_calc_rlib_size()
87 static int pm_allocate_runlist_ib(struct packet_manager *pm, in pm_allocate_runlist_ib() argument
95 if (WARN_ON(pm->allocated)) in pm_allocate_runlist_ib()
98 pm_calc_rlib_size(pm, rl_buffer_size, is_over_subscription); in pm_allocate_runlist_ib()
100 mutex_lock(&pm->lock); in pm_allocate_runlist_ib()
102 retval = kfd_gtt_sa_allocate(pm->dqm->dev, *rl_buffer_size, in pm_allocate_runlist_ib()
103 &pm->ib_buffer_obj); in pm_allocate_runlist_ib()
110 *(void **)rl_buffer = pm->ib_buffer_obj->cpu_ptr; in pm_allocate_runlist_ib()
111 *rl_gpu_buffer = pm->ib_buffer_obj->gpu_addr; in pm_allocate_runlist_ib()
114 pm->allocated = true; in pm_allocate_runlist_ib()
117 mutex_unlock(&pm->lock); in pm_allocate_runlist_ib()
121 static int pm_create_runlist_ib(struct packet_manager *pm, in pm_create_runlist_ib() argument
137 retval = pm_allocate_runlist_ib(pm, &rl_buffer, rl_gpu_addr, in pm_create_runlist_ib()
143 pm->ib_size_bytes = alloc_size_bytes; in pm_create_runlist_ib()
146 pm->dqm->processes_count, pm->dqm->active_queue_count); in pm_create_runlist_ib()
152 if (processes_mapped >= pm->dqm->processes_count) { in pm_create_runlist_ib()
154 pm_release_ib(pm); in pm_create_runlist_ib()
158 retval = pm->pmf->map_process(pm, &rl_buffer[rl_wptr], qpd); in pm_create_runlist_ib()
163 inc_wptr(&rl_wptr, pm->pmf->map_process_size, in pm_create_runlist_ib()
173 retval = pm->pmf->map_queues(pm, in pm_create_runlist_ib()
181 pm->pmf->map_queues_size, in pm_create_runlist_ib()
192 retval = pm->pmf->map_queues(pm, in pm_create_runlist_ib()
201 pm->pmf->map_queues_size, in pm_create_runlist_ib()
209 if (!pm->is_over_subscription) in pm_create_runlist_ib()
211 retval = pm->pmf->runlist(pm, &rl_buffer[rl_wptr], in pm_create_runlist_ib()
216 pm->is_over_subscription = is_over_subscription; in pm_create_runlist_ib()
225 int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm) in pm_init() argument
238 pm->pmf = &kfd_vi_pm_funcs; in pm_init()
243 pm->pmf = &kfd_aldebaran_pm_funcs; in pm_init()
245 pm->pmf = &kfd_v9_pm_funcs; in pm_init()
253 pm->dqm = dqm; in pm_init()
254 mutex_init(&pm->lock); in pm_init()
255 pm->priv_queue = kernel_queue_init(dqm->dev, KFD_QUEUE_TYPE_HIQ); in pm_init()
256 if (!pm->priv_queue) { in pm_init()
257 mutex_destroy(&pm->lock); in pm_init()
260 pm->allocated = false; in pm_init()
265 void pm_uninit(struct packet_manager *pm, bool hanging) in pm_uninit() argument
267 mutex_destroy(&pm->lock); in pm_uninit()
268 kernel_queue_uninit(pm->priv_queue, hanging); in pm_uninit()
269 pm->priv_queue = NULL; in pm_uninit()
272 int pm_send_set_resources(struct packet_manager *pm, in pm_send_set_resources() argument
278 size = pm->pmf->set_resources_size; in pm_send_set_resources()
279 mutex_lock(&pm->lock); in pm_send_set_resources()
280 kq_acquire_packet_buffer(pm->priv_queue, in pm_send_set_resources()
289 retval = pm->pmf->set_resources(pm, buffer, res); in pm_send_set_resources()
291 kq_submit_packet(pm->priv_queue); in pm_send_set_resources()
293 kq_rollback_packet(pm->priv_queue); in pm_send_set_resources()
296 mutex_unlock(&pm->lock); in pm_send_set_resources()
301 int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues) in pm_send_runlist() argument
308 retval = pm_create_runlist_ib(pm, dqm_queues, &rl_gpu_ib_addr, in pm_send_runlist()
315 packet_size_dwords = pm->pmf->runlist_size / sizeof(uint32_t); in pm_send_runlist()
316 mutex_lock(&pm->lock); in pm_send_runlist()
318 retval = kq_acquire_packet_buffer(pm->priv_queue, in pm_send_runlist()
323 retval = pm->pmf->runlist(pm, rl_buffer, rl_gpu_ib_addr, in pm_send_runlist()
328 kq_submit_packet(pm->priv_queue); in pm_send_runlist()
330 mutex_unlock(&pm->lock); in pm_send_runlist()
335 kq_rollback_packet(pm->priv_queue); in pm_send_runlist()
337 mutex_unlock(&pm->lock); in pm_send_runlist()
339 pm_release_ib(pm); in pm_send_runlist()
343 int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address, in pm_send_query_status() argument
352 size = pm->pmf->query_status_size; in pm_send_query_status()
353 mutex_lock(&pm->lock); in pm_send_query_status()
354 kq_acquire_packet_buffer(pm->priv_queue, in pm_send_query_status()
362 retval = pm->pmf->query_status(pm, buffer, fence_address, fence_value); in pm_send_query_status()
364 kq_submit_packet(pm->priv_queue); in pm_send_query_status()
366 kq_rollback_packet(pm->priv_queue); in pm_send_query_status()
369 mutex_unlock(&pm->lock); in pm_send_query_status()
373 int pm_update_grace_period(struct packet_manager *pm, uint32_t grace_period) in pm_update_grace_period() argument
378 size = pm->pmf->set_grace_period_size; in pm_update_grace_period()
380 mutex_lock(&pm->lock); in pm_update_grace_period()
383 kq_acquire_packet_buffer(pm->priv_queue, in pm_update_grace_period()
393 retval = pm->pmf->set_grace_period(pm, buffer, grace_period); in pm_update_grace_period()
395 kq_submit_packet(pm->priv_queue); in pm_update_grace_period()
397 kq_rollback_packet(pm->priv_queue); in pm_update_grace_period()
401 mutex_unlock(&pm->lock); in pm_update_grace_period()
405 int pm_send_unmap_queue(struct packet_manager *pm, in pm_send_unmap_queue() argument
412 size = pm->pmf->unmap_queues_size; in pm_send_unmap_queue()
413 mutex_lock(&pm->lock); in pm_send_unmap_queue()
414 kq_acquire_packet_buffer(pm->priv_queue, in pm_send_unmap_queue()
422 retval = pm->pmf->unmap_queues(pm, buffer, filter, filter_param, reset); in pm_send_unmap_queue()
424 kq_submit_packet(pm->priv_queue); in pm_send_unmap_queue()
426 kq_rollback_packet(pm->priv_queue); in pm_send_unmap_queue()
429 mutex_unlock(&pm->lock); in pm_send_unmap_queue()
433 void pm_release_ib(struct packet_manager *pm) in pm_release_ib() argument
435 mutex_lock(&pm->lock); in pm_release_ib()
436 if (pm->allocated) { in pm_release_ib()
437 kfd_gtt_sa_free(pm->dqm->dev, pm->ib_buffer_obj); in pm_release_ib()
438 pm->allocated = false; in pm_release_ib()
440 mutex_unlock(&pm->lock); in pm_release_ib()
447 struct packet_manager *pm = data; in pm_debugfs_runlist() local
449 mutex_lock(&pm->lock); in pm_debugfs_runlist()
451 if (!pm->allocated) { in pm_debugfs_runlist()
457 pm->ib_buffer_obj->cpu_ptr, pm->ib_size_bytes, false); in pm_debugfs_runlist()
460 mutex_unlock(&pm->lock); in pm_debugfs_runlist()
464 int pm_debugfs_hang_hws(struct packet_manager *pm) in pm_debugfs_hang_hws() argument
469 if (!pm->priv_queue) in pm_debugfs_hang_hws()
472 size = pm->pmf->query_status_size; in pm_debugfs_hang_hws()
473 mutex_lock(&pm->lock); in pm_debugfs_hang_hws()
474 kq_acquire_packet_buffer(pm->priv_queue, in pm_debugfs_hang_hws()
482 kq_submit_packet(pm->priv_queue); in pm_debugfs_hang_hws()
488 mutex_unlock(&pm->lock); in pm_debugfs_hang_hws()