/openbmc/linux/include/drm/ |
H A D | gpu_scheduler.h | 88 struct drm_sched_entity { struct 260 struct drm_sched_entity *current_entity; argument 354 struct drm_sched_entity *entity; 406 struct drm_sched_entity *s_entity); 529 struct drm_sched_entity *entity, 559 struct drm_sched_entity *entity); 563 struct drm_sched_entity *entity); 565 struct drm_sched_entity *entity); 569 int drm_sched_entity_init(struct drm_sched_entity *entity, 586 struct drm_sched_entity *s_entity, void *owner); [all …]
|
/openbmc/linux/drivers/gpu/drm/scheduler/ |
H A D | sched_entity.c | 59 int drm_sched_entity_init(struct drm_sched_entity *entity, in drm_sched_entity_init() 68 memset(entity, 0, sizeof(struct drm_sched_entity)); in drm_sched_entity_init() 134 bool drm_sched_entity_is_ready(struct drm_sched_entity *entity) in drm_sched_entity_is_ready() 152 int drm_sched_entity_error(struct drm_sched_entity *entity) in drm_sched_entity_error() 307 void drm_sched_entity_fini(struct drm_sched_entity *entity) in drm_sched_entity_fini() 345 struct drm_sched_entity *entity = in drm_sched_entity_clear_dep() 346 container_of(cb, struct drm_sched_entity, cb); in drm_sched_entity_clear_dep() 359 struct drm_sched_entity *entity = in drm_sched_entity_wakeup() 360 container_of(cb, struct drm_sched_entity, cb); in drm_sched_entity_wakeup() 434 struct drm_sched_entity *entity) in drm_sched_job_dependency() [all …]
|
H A D | sched_main.c | 82 struct drm_sched_entity *ent_a = rb_entry((a), struct drm_sched_entity, rb_tree_node); in drm_sched_entity_compare_before() 83 struct drm_sched_entity *ent_b = rb_entry((b), struct drm_sched_entity, rb_tree_node); in drm_sched_entity_compare_before() 168 struct drm_sched_entity *entity) in drm_sched_rq_remove_entity() 194 static struct drm_sched_entity * 197 struct drm_sched_entity *entity; in drm_sched_rq_select_entity_rr() 238 static struct drm_sched_entity * 245 struct drm_sched_entity *entity; in drm_sched_rq_select_entity_fifo() 881 static struct drm_sched_entity * 884 struct drm_sched_entity *entity; in drm_sched_select_entity() 1178 struct drm_sched_entity *tmp; in drm_sched_increase_karma() [all …]
|
H A D | gpu_scheduler_trace.h | 36 TP_PROTO(struct drm_sched_job *sched_job, struct drm_sched_entity *entity), 39 __field(struct drm_sched_entity *, entity) 63 TP_PROTO(struct drm_sched_job *sched_job, struct drm_sched_entity *entity), 68 TP_PROTO(struct drm_sched_job *sched_job, struct drm_sched_entity *entity),
|
H A D | sched_fence.c | 208 struct drm_sched_fence *drm_sched_fence_alloc(struct drm_sched_entity *entity, in drm_sched_fence_alloc() 224 struct drm_sched_entity *entity) in drm_sched_fence_init()
|
/openbmc/linux/drivers/gpu/drm/amd/amdgpu/ |
H A D | amdgpu_ctx.h | 41 struct drm_sched_entity entity; 77 u32 ring, struct drm_sched_entity **entity); 79 struct drm_sched_entity *entity, 82 struct drm_sched_entity *entity, 91 struct drm_sched_entity *entity);
|
H A D | amdgpu_job.h | 89 struct drm_sched_entity *entity, void *owner, 92 struct drm_sched_entity *entity, void *owner,
|
H A D | amdgpu_job.c | 94 struct drm_sched_entity *entity, void *owner, in amdgpu_job_alloc() 122 struct drm_sched_entity *entity, void *owner, in amdgpu_job_alloc_with_ib() 254 struct drm_sched_entity *s_entity) in amdgpu_job_prepare_job() 326 struct drm_sched_entity *s_entity = NULL; in amdgpu_job_stop_all_jobs_on_sched()
|
H A D | amdgpu_ctx.c | 434 u32 ring, struct drm_sched_entity **entity) in amdgpu_ctx_get_entity() 437 struct drm_sched_entity *ctx_entity; in amdgpu_ctx_get_entity() 757 struct drm_sched_entity *entity, in amdgpu_ctx_add_fence() 784 struct drm_sched_entity *entity, in amdgpu_ctx_get_fence() 859 struct drm_sched_entity *entity) in amdgpu_ctx_wait_prev_fence() 907 struct drm_sched_entity *entity; in amdgpu_ctx_mgr_entity_flush() 937 struct drm_sched_entity *entity; in amdgpu_ctx_mgr_entity_fini()
|
H A D | amdgpu_ttm.h | 67 struct drm_sched_entity high_pr; 69 struct drm_sched_entity low_pr;
|
H A D | amdgpu_cs.h | 61 struct drm_sched_entity *entities[AMDGPU_CS_GANG_SIZE];
|
H A D | amdgpu_vce.h | 51 struct drm_sched_entity entity;
|
H A D | amdgpu_uvd.h | 65 struct drm_sched_entity entity;
|
H A D | amdgpu_vm.h | 299 struct drm_sched_entity immediate; 300 struct drm_sched_entity delayed;
|
H A D | amdgpu_vm_sdma.c | 56 struct drm_sched_entity *entity = p->immediate ? &p->vm->immediate in amdgpu_vm_sdma_alloc_job()
|
H A D | amdgpu_cs.c | 76 struct drm_sched_entity *entity; in amdgpu_cs_job_idx() 398 struct drm_sched_entity *entity; in amdgpu_cs_p2_dependencies() 1109 struct drm_sched_entity *entity = p->entities[i]; in amdgpu_cs_vm_handling() 1483 struct drm_sched_entity *entity; in amdgpu_cs_wait_ioctl() 1531 struct drm_sched_entity *entity; in amdgpu_cs_get_fence()
|
/openbmc/linux/drivers/gpu/drm/msm/ |
H A D | msm_submitqueue.c | 121 static struct drm_sched_entity * 137 struct drm_sched_entity *entity; in get_sched_entity()
|
H A D | msm_gpu.h | 430 struct drm_sched_entity *entities[NR_SCHED_PRIORITIES * MSM_GPU_MAX_RINGS]; 516 struct drm_sched_entity *entity;
|
/openbmc/linux/drivers/gpu/drm/nouveau/ |
H A D | nouveau_sched.h | 102 struct drm_sched_entity base;
|
/openbmc/linux/drivers/gpu/drm/lima/ |
H A D | lima_sched.h | 37 struct drm_sched_entity base;
|
/openbmc/linux/drivers/gpu/drm/etnaviv/ |
H A D | etnaviv_drv.h | 34 struct drm_sched_entity sched_entity[ETNA_MAX_PIPES];
|
/openbmc/linux/drivers/gpu/drm/panfrost/ |
H A D | panfrost_device.h | 141 struct drm_sched_entity sched_entity[NUM_JOB_SLOTS];
|
H A D | panfrost_job.c | 901 struct drm_sched_entity *entity = &panfrost_priv->sched_entity[i]; in panfrost_job_close()
|
/openbmc/linux/drivers/gpu/drm/v3d/ |
H A D | v3d_drv.h | 169 struct drm_sched_entity sched_entity[V3D_MAX_QUEUES];
|
/openbmc/linux/Documentation/gpu/rfc/ |
H A D | xe.rst | 85 drm_sched_entity. 175 drm_sched_entity) and making sure drm_scheduler can cope with the lack of job
|