Lines Matching full:pm
41 static void pm_calc_rlib_size(struct packet_manager *pm,
48 struct kfd_node *node = pm->dqm->dev;
51 process_count = pm->dqm->processes_count;
52 queue_count = pm->dqm->active_queue_count;
53 compute_queue_count = pm->dqm->active_cp_queue_count;
54 gws_queue_count = pm->dqm->gws_queue_count;
67 compute_queue_count > get_cp_queues_num(pm->dqm) ||
73 map_queue_size = pm->pmf->map_queues_size;
75 *rlib_size = process_count * pm->pmf->map_process_size +
83 *rlib_size += pm->pmf->runlist_size;
88 static int pm_allocate_runlist_ib(struct packet_manager *pm,
94 struct kfd_node *node = pm->dqm->dev;
98 if (WARN_ON(pm->allocated))
101 pm_calc_rlib_size(pm, rl_buffer_size, is_over_subscription);
103 mutex_lock(&pm->lock);
105 retval = kfd_gtt_sa_allocate(node, *rl_buffer_size, &pm->ib_buffer_obj);
112 *(void **)rl_buffer = pm->ib_buffer_obj->cpu_ptr;
113 *rl_gpu_buffer = pm->ib_buffer_obj->gpu_addr;
116 pm->allocated = true;
119 mutex_unlock(&pm->lock);
123 static int pm_create_runlist_ib(struct packet_manager *pm,
130 struct kfd_node *node = pm->dqm->dev;
141 retval = pm_allocate_runlist_ib(pm, &rl_buffer, rl_gpu_addr,
147 pm->ib_size_bytes = alloc_size_bytes;
150 pm->dqm->processes_count, pm->dqm->active_queue_count);
156 if (processes_mapped >= pm->dqm->processes_count) {
158 pm_release_ib(pm);
162 retval = pm->pmf->map_process(pm, &rl_buffer[rl_wptr], qpd);
167 inc_wptr(&rl_wptr, pm->pmf->map_process_size,
178 retval = pm->pmf->map_queues(pm,
186 pm->pmf->map_queues_size,
198 retval = pm->pmf->map_queues(pm,
207 pm->pmf->map_queues_size,
215 if (!pm->is_over_subscription)
219 retval = pm->pmf->runlist(pm, &rl_buffer[rl_wptr],
224 pm->is_over_subscription = is_over_subscription;
233 int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm)
246 pm->pmf = &kfd_vi_pm_funcs;
251 pm->pmf = &kfd_aldebaran_pm_funcs;
253 pm->pmf = &kfd_v9_pm_funcs;
261 pm->dqm = dqm;
262 mutex_init(&pm->lock);
263 pm->priv_queue = kernel_queue_init(dqm->dev, KFD_QUEUE_TYPE_HIQ);
264 if (!pm->priv_queue) {
265 mutex_destroy(&pm->lock);
268 pm->allocated = false;
273 void pm_uninit(struct packet_manager *pm, bool hanging)
275 mutex_destroy(&pm->lock);
276 kernel_queue_uninit(pm->priv_queue, hanging);
277 pm->priv_queue = NULL;
280 int pm_send_set_resources(struct packet_manager *pm,
283 struct kfd_node *node = pm->dqm->dev;
288 size = pm->pmf->set_resources_size;
289 mutex_lock(&pm->lock);
290 kq_acquire_packet_buffer(pm->priv_queue,
299 retval = pm->pmf->set_resources(pm, buffer, res);
301 kq_submit_packet(pm->priv_queue);
303 kq_rollback_packet(pm->priv_queue);
306 mutex_unlock(&pm->lock);
311 int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues)
318 retval = pm_create_runlist_ib(pm, dqm_queues, &rl_gpu_ib_addr,
325 packet_size_dwords = pm->pmf->runlist_size / sizeof(uint32_t);
326 mutex_lock(&pm->lock);
328 retval = kq_acquire_packet_buffer(pm->priv_queue,
333 retval = pm->pmf->runlist(pm, rl_buffer, rl_gpu_ib_addr,
338 kq_submit_packet(pm->priv_queue);
340 mutex_unlock(&pm->lock);
345 kq_rollback_packet(pm->priv_queue);
347 mutex_unlock(&pm->lock);
349 pm_release_ib(pm);
353 int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
356 struct kfd_node *node = pm->dqm->dev;
364 size = pm->pmf->query_status_size;
365 mutex_lock(&pm->lock);
366 kq_acquire_packet_buffer(pm->priv_queue,
374 retval = pm->pmf->query_status(pm, buffer, fence_address, fence_value);
376 kq_submit_packet(pm->priv_queue);
378 kq_rollback_packet(pm->priv_queue);
381 mutex_unlock(&pm->lock);
385 int pm_update_grace_period(struct packet_manager *pm, uint32_t grace_period)
387 struct kfd_node *node = pm->dqm->dev;
392 size = pm->pmf->set_grace_period_size;
394 mutex_lock(&pm->lock);
397 kq_acquire_packet_buffer(pm->priv_queue,
408 retval = pm->pmf->set_grace_period(pm, buffer, grace_period);
410 kq_submit_packet(pm->priv_queue);
412 kq_rollback_packet(pm->priv_queue);
416 mutex_unlock(&pm->lock);
420 int pm_send_unmap_queue(struct packet_manager *pm,
424 struct kfd_node *node = pm->dqm->dev;
429 size = pm->pmf->unmap_queues_size;
430 mutex_lock(&pm->lock);
431 kq_acquire_packet_buffer(pm->priv_queue,
439 retval = pm->pmf->unmap_queues(pm, buffer, filter, filter_param, reset);
441 kq_submit_packet(pm->priv_queue);
443 kq_rollback_packet(pm->priv_queue);
446 mutex_unlock(&pm->lock);
450 void pm_release_ib(struct packet_manager *pm)
452 mutex_lock(&pm->lock);
453 if (pm->allocated) {
454 kfd_gtt_sa_free(pm->dqm->dev, pm->ib_buffer_obj);
455 pm->allocated = false;
457 mutex_unlock(&pm->lock);
464 struct packet_manager *pm = data;
466 mutex_lock(&pm->lock);
468 if (!pm->allocated) {
474 pm->ib_buffer_obj->cpu_ptr, pm->ib_size_bytes, false);
477 mutex_unlock(&pm->lock);
481 int pm_debugfs_hang_hws(struct packet_manager *pm)
483 struct kfd_node *node = pm->dqm->dev;
488 if (!pm->priv_queue)
491 size = pm->pmf->query_status_size;
492 mutex_lock(&pm->lock);
493 kq_acquire_packet_buffer(pm->priv_queue,
501 kq_submit_packet(pm->priv_queue);
507 mutex_unlock(&pm->lock);