/openbmc/linux/drivers/gpu/drm/msm/ |
H A D | msm_gem_submit.c | 23 #define SUBMIT_ERROR(submit, fmt, ...) \ argument 24 DRM_DEV_DEBUG_DRIVER((submit)->dev->dev, fmt, ##__VA_ARGS__) 36 struct msm_gem_submit *submit; in submit_create() local 40 sz = struct_size(submit, bos, nr_bos) + in submit_create() 41 ((u64)nr_cmds * sizeof(submit->cmd[0])); in submit_create() 46 submit = kzalloc(sz, GFP_KERNEL); in submit_create() 47 if (!submit) in submit_create() 50 submit->hw_fence = msm_fence_alloc(); in submit_create() 51 if (IS_ERR(submit->hw_fence)) { in submit_create() 52 ret = PTR_ERR(submit->hw_fence); in submit_create() [all …]
|
H A D | msm_gpu.c | 258 struct msm_gem_submit *submit, char *comm, char *cmd) in msm_gpu_crashstate_capture() argument 279 if (submit) { in msm_gpu_crashstate_capture() 282 state->bos = kcalloc(submit->nr_bos, in msm_gpu_crashstate_capture() 285 for (i = 0; state->bos && i < submit->nr_bos; i++) { in msm_gpu_crashstate_capture() 286 msm_gpu_crashstate_get_bo(state, submit->bos[i].obj, in msm_gpu_crashstate_capture() 287 submit->bos[i].iova, in msm_gpu_crashstate_capture() 288 should_dump(submit, i)); in msm_gpu_crashstate_capture() 301 struct msm_gem_submit *submit, char *comm, char *cmd) in msm_gpu_crashstate_capture() argument 313 struct msm_gem_submit *submit; in find_submit() local 317 list_for_each_entry(submit, &ring->submits, node) { in find_submit() [all …]
|
H A D | msm_rd.c | 310 struct msm_gem_submit *submit, int idx, in snapshot_buf() argument 313 struct drm_gem_object *obj = submit->bos[idx].obj; in snapshot_buf() 318 offset = iova - submit->bos[idx].iova; in snapshot_buf() 320 iova = submit->bos[idx].iova; in snapshot_buf() 335 if (!(submit->bos[idx].flags & MSM_SUBMIT_BO_READ)) in snapshot_buf() 350 void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit, in msm_rd_dump_submit() argument 373 task = pid_task(submit->pid, PIDTYPE_PID); in msm_rd_dump_submit() 377 pid_nr(submit->pid), submit->seqno); in msm_rd_dump_submit() 380 pid_nr(submit->pid), submit->seqno); in msm_rd_dump_submit() 386 for (i = 0; i < submit->nr_bos; i++) in msm_rd_dump_submit() [all …]
|
H A D | msm_ringbuffer.c | 16 struct msm_gem_submit *submit = to_msm_submit(job); in msm_job_run() local 17 struct msm_fence_context *fctx = submit->ring->fctx; in msm_job_run() 18 struct msm_gpu *gpu = submit->gpu; in msm_job_run() 22 msm_fence_init(submit->hw_fence, fctx); in msm_job_run() 26 for (i = 0; i < submit->nr_bos; i++) { in msm_job_run() 27 struct drm_gem_object *obj = submit->bos[i].obj; in msm_job_run() 30 submit->bos[i].flags &= ~BO_PINNED; in msm_job_run() 38 msm_gpu_submit(gpu, submit); in msm_job_run() 42 return dma_fence_get(submit->hw_fence); in msm_job_run() 47 struct msm_gem_submit *submit = to_msm_submit(job); in msm_job_free() local [all …]
|
H A D | msm_gpu_trace.h | 34 TP_PROTO(struct msm_gem_submit *submit, u64 ticks), 35 TP_ARGS(submit, ticks), 44 __entry->pid = pid_nr(submit->pid); 45 __entry->id = submit->ident; 46 __entry->ringid = submit->ring->id; 47 __entry->seqno = submit->seqno; 57 TP_PROTO(struct msm_gem_submit *submit, u64 elapsed, u64 clock, 59 TP_ARGS(submit, elapsed, clock, start, end), 71 __entry->pid = pid_nr(submit->pid); 72 __entry->id = submit->ident; [all …]
|
H A D | msm_gem.h | 310 static inline void msm_gem_submit_get(struct msm_gem_submit *submit) in msm_gem_submit_get() argument 312 kref_get(&submit->ref); in msm_gem_submit_get() 315 static inline void msm_gem_submit_put(struct msm_gem_submit *submit) in msm_gem_submit_put() argument 317 kref_put(&submit->ref, __msm_gem_submit_destroy); in msm_gem_submit_put() 320 void msm_submit_retire(struct msm_gem_submit *submit); 326 should_dump(struct msm_gem_submit *submit, int idx) in should_dump() argument 329 return rd_full || (submit->bos[idx].flags & MSM_SUBMIT_BO_DUMP); in should_dump()
|
/openbmc/linux/drivers/gpu/drm/virtio/ |
H A D | virtgpu_submit.c | 48 static int virtio_gpu_do_fence_wait(struct virtio_gpu_submit *submit, in virtio_gpu_do_fence_wait() argument 51 u64 context = submit->fence_ctx + submit->ring_idx; in virtio_gpu_do_fence_wait() 59 static int virtio_gpu_dma_fence_wait(struct virtio_gpu_submit *submit, in virtio_gpu_dma_fence_wait() argument 67 err = virtio_gpu_do_fence_wait(submit, f); in virtio_gpu_dma_fence_wait() 89 virtio_gpu_parse_deps(struct virtio_gpu_submit *submit) in virtio_gpu_parse_deps() argument 91 struct drm_virtgpu_execbuffer *exbuf = submit->exbuf; in virtio_gpu_parse_deps() 129 ret = drm_syncobj_find_fence(submit->file, syncobj_desc.handle, in virtio_gpu_parse_deps() 134 ret = virtio_gpu_dma_fence_wait(submit, fence); in virtio_gpu_parse_deps() 141 syncobjs[i] = drm_syncobj_find(submit->file, in virtio_gpu_parse_deps() 155 submit->num_in_syncobjs = num_in_syncobjs; in virtio_gpu_parse_deps() [all …]
|
/openbmc/linux/drivers/gpu/drm/etnaviv/ |
H A D | etnaviv_gem_submit.c | 34 struct etnaviv_gem_submit *submit; in submit_create() local 35 size_t sz = size_vstruct(nr_bos, sizeof(submit->bos[0]), sizeof(*submit)); in submit_create() 37 submit = kzalloc(sz, GFP_KERNEL); in submit_create() 38 if (!submit) in submit_create() 41 submit->pmrs = kcalloc(nr_pmrs, sizeof(struct etnaviv_perfmon_request), in submit_create() 43 if (!submit->pmrs) { in submit_create() 44 kfree(submit); in submit_create() 47 submit->nr_pmrs = nr_pmrs; in submit_create() 49 submit->gpu = gpu; in submit_create() 50 kref_init(&submit->refcount); in submit_create() [all …]
|
H A D | etnaviv_sched.c | 22 struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job); in etnaviv_sched_run_job() local 26 fence = etnaviv_gpu_submit(submit); in etnaviv_sched_run_job() 28 dev_dbg(submit->gpu->dev, "skipping bad job\n"); in etnaviv_sched_run_job() 36 struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job); in etnaviv_sched_timedout_job() local 37 struct etnaviv_gpu *gpu = submit->gpu; in etnaviv_sched_timedout_job() 45 if (dma_fence_is_signaled(submit->out_fence)) in etnaviv_sched_timedout_job() 70 etnaviv_core_dump(submit); in etnaviv_sched_timedout_job() 71 etnaviv_gpu_recover_hang(submit); in etnaviv_sched_timedout_job() 85 struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job); in etnaviv_sched_free_job() local 89 etnaviv_submit_put(submit); in etnaviv_sched_free_job() [all …]
|
H A D | etnaviv_dump.c | 118 void etnaviv_core_dump(struct etnaviv_gem_submit *submit) in etnaviv_core_dump() argument 120 struct etnaviv_gpu *gpu = submit->gpu; in etnaviv_core_dump() 133 mutex_lock(&submit->mmu_context->lock); in etnaviv_core_dump() 135 mmu_size = etnaviv_iommu_dump_size(submit->mmu_context); in etnaviv_core_dump() 142 mmu_size + gpu->buffer.size + submit->cmdbuf.size; in etnaviv_core_dump() 145 for (i = 0; i < submit->nr_bos; i++) { in etnaviv_core_dump() 146 obj = submit->bos[i].obj; in etnaviv_core_dump() 165 mutex_unlock(&submit->mmu_context->lock); in etnaviv_core_dump() 177 etnaviv_core_dump_mmu(&iter, submit->mmu_context, mmu_size); in etnaviv_core_dump() 181 &submit->mmu_context->cmdbuf_mapping)); in etnaviv_core_dump() [all …]
|
H A D | etnaviv_gpu.c | 1287 const struct etnaviv_gem_submit *submit = event->submit; in sync_point_perfmon_sample() local 1290 for (i = 0; i < submit->nr_pmrs; i++) { in sync_point_perfmon_sample() 1291 const struct etnaviv_perfmon_request *pmr = submit->pmrs + i; in sync_point_perfmon_sample() 1294 etnaviv_perfmon_process(gpu, pmr, submit->exec_state); in sync_point_perfmon_sample() 1323 const struct etnaviv_gem_submit *submit = event->submit; in sync_point_perfmon_sample_post() local 1343 for (i = 0; i < submit->nr_pmrs; i++) { in sync_point_perfmon_sample_post() 1344 const struct etnaviv_perfmon_request *pmr = submit->pmrs + i; in sync_point_perfmon_sample_post() 1352 struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit) in etnaviv_gpu_submit() argument 1354 struct etnaviv_gpu *gpu = submit->gpu; in etnaviv_gpu_submit() 1366 if (submit->nr_pmrs) in etnaviv_gpu_submit() [all …]
|
/openbmc/linux/crypto/async_tx/ |
H A D | async_raid6_recov.c | 20 size_t len, struct async_submit_ctl *submit) in async_sum_product() argument 22 struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ, in async_sum_product() 39 if (submit->flags & ASYNC_TX_FENCE) in async_sum_product() 58 async_tx_submit(chan, tx, submit); in async_sum_product() 70 async_tx_quiesce(&submit->depend_tx); in async_sum_product() 89 struct async_submit_ctl *submit) in async_mult() argument 91 struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ, in async_mult() 107 if (submit->flags & ASYNC_TX_FENCE) in async_mult() 128 async_tx_submit(chan, tx, submit); in async_mult() 141 async_tx_quiesce(&submit->depend_tx); in async_mult() [all …]
|
H A D | async_xor.c | 24 struct async_submit_ctl *submit) in do_async_xor() argument 28 dma_async_tx_callback cb_fn_orig = submit->cb_fn; in do_async_xor() 29 void *cb_param_orig = submit->cb_param; in do_async_xor() 30 enum async_tx_flags flags_orig = submit->flags; in do_async_xor() 40 submit->flags = flags_orig; in do_async_xor() 46 submit->flags &= ~ASYNC_TX_ACK; in do_async_xor() 47 submit->flags |= ASYNC_TX_FENCE; in do_async_xor() 48 submit->cb_fn = NULL; in do_async_xor() 49 submit->cb_param = NULL; in do_async_xor() 51 submit->cb_fn = cb_fn_orig; in do_async_xor() [all …]
|
H A D | async_pq.c | 39 struct async_submit_ctl *submit) in do_async_gen_syndrome() argument 43 enum async_tx_flags flags_orig = submit->flags; in do_async_gen_syndrome() 44 dma_async_tx_callback cb_fn_orig = submit->cb_fn; in do_async_gen_syndrome() 45 dma_async_tx_callback cb_param_orig = submit->cb_param; in do_async_gen_syndrome() 52 submit->flags = flags_orig; in do_async_gen_syndrome() 59 submit->flags &= ~ASYNC_TX_ACK; in do_async_gen_syndrome() 60 submit->flags |= ASYNC_TX_FENCE; in do_async_gen_syndrome() 61 submit->cb_fn = NULL; in do_async_gen_syndrome() 62 submit->cb_param = NULL; in do_async_gen_syndrome() 64 submit->cb_fn = cb_fn_orig; in do_async_gen_syndrome() [all …]
|
H A D | raid6test.c | 60 struct async_submit_ctl submit; in raid6_dual_recov() local 71 init_async_submit(&submit, 0, NULL, NULL, NULL, addr_conv); in raid6_dual_recov() 73 disks, bytes, &submit); in raid6_dual_recov() 91 init_async_submit(&submit, ASYNC_TX_XOR_ZERO_DST, NULL, in raid6_dual_recov() 93 tx = async_xor(dest, blocks, 0, count, bytes, &submit); in raid6_dual_recov() 95 init_async_submit(&submit, 0, tx, NULL, NULL, addr_conv); in raid6_dual_recov() 97 disks, bytes, &submit); in raid6_dual_recov() 102 init_async_submit(&submit, 0, NULL, NULL, NULL, addr_conv); in raid6_dual_recov() 104 faila, ptrs, offs, &submit); in raid6_dual_recov() 107 init_async_submit(&submit, 0, NULL, NULL, NULL, addr_conv); in raid6_dual_recov() [all …]
|
H A D | async_tx.c | 43 __async_tx_find_channel(struct async_submit_ctl *submit, in __async_tx_find_channel() argument 46 struct dma_async_tx_descriptor *depend_tx = submit->depend_tx; in __async_tx_find_channel() 144 struct async_submit_ctl *submit) in async_tx_submit() argument 146 struct dma_async_tx_descriptor *depend_tx = submit->depend_tx; in async_tx_submit() 148 tx->callback = submit->cb_fn; in async_tx_submit() 149 tx->callback_param = submit->cb_param; in async_tx_submit() 204 if (submit->flags & ASYNC_TX_ACK) in async_tx_submit() 221 async_trigger_callback(struct async_submit_ctl *submit) in async_trigger_callback() argument 226 struct dma_async_tx_descriptor *depend_tx = submit->depend_tx; in async_trigger_callback() 245 async_tx_submit(chan, tx, submit); in async_trigger_callback() [all …]
|
H A D | async_memcpy.c | 34 struct async_submit_ctl *submit) in async_memcpy() argument 36 struct dma_chan *chan = async_tx_find_channel(submit, DMA_MEMCPY, in async_memcpy() 48 if (submit->cb_fn) in async_memcpy() 50 if (submit->flags & ASYNC_TX_FENCE) in async_memcpy() 70 async_tx_submit(chan, tx, submit); in async_memcpy() 76 async_tx_quiesce(&submit->depend_tx); in async_memcpy() 86 async_tx_sync_epilog(submit); in async_memcpy()
|
/openbmc/linux/include/linux/ |
H A D | async_tx.h | 103 __async_tx_find_channel(struct async_submit_ctl *submit, 118 async_tx_find_channel(struct async_submit_ctl *submit, in async_tx_find_channel() argument 133 async_tx_sync_epilog(struct async_submit_ctl *submit) in async_tx_sync_epilog() argument 135 if (submit->cb_fn) in async_tx_sync_epilog() 136 submit->cb_fn(submit->cb_param); in async_tx_sync_epilog() 159 struct async_submit_ctl *submit); 163 int src_cnt, size_t len, struct async_submit_ctl *submit); 168 int src_cnt, size_t len, struct async_submit_ctl *submit); 173 struct async_submit_ctl *submit); 179 struct async_submit_ctl *submit); [all …]
|
/openbmc/linux/drivers/dma/ti/ |
H A D | cppi41.c | 119 u16 submit; member 157 [ 0] = { .submit = 32, .complete = 93}, 158 [ 1] = { .submit = 34, .complete = 94}, 159 [ 2] = { .submit = 36, .complete = 95}, 160 [ 3] = { .submit = 38, .complete = 96}, 161 [ 4] = { .submit = 40, .complete = 97}, 162 [ 5] = { .submit = 42, .complete = 98}, 163 [ 6] = { .submit = 44, .complete = 99}, 164 [ 7] = { .submit = 46, .complete = 100}, 165 [ 8] = { .submit = 48, .complete = 101}, [all …]
|
/openbmc/linux/drivers/gpu/drm/lima/ |
H A D | lima_gem.c | 280 static int lima_gem_add_deps(struct drm_file *file, struct lima_submit *submit) in lima_gem_add_deps() argument 284 for (i = 0; i < ARRAY_SIZE(submit->in_sync); i++) { in lima_gem_add_deps() 285 if (!submit->in_sync[i]) in lima_gem_add_deps() 288 err = drm_sched_job_add_syncobj_dependency(&submit->task->base, file, in lima_gem_add_deps() 289 submit->in_sync[i], 0); in lima_gem_add_deps() 297 int lima_gem_submit(struct drm_file *file, struct lima_submit *submit) in lima_gem_submit() argument 305 struct lima_bo **bos = submit->lbos; in lima_gem_submit() 307 if (submit->out_sync) { in lima_gem_submit() 308 out_sync = drm_syncobj_find(file, submit->out_sync); in lima_gem_submit() 313 for (i = 0; i < submit->nr_bos; i++) { in lima_gem_submit() [all …]
|
H A D | lima_drv.c | 111 struct lima_submit submit = {0}; in lima_ioctl_gem_submit() local 125 bos = kvcalloc(args->nr_bos, sizeof(*submit.bos) + sizeof(*submit.lbos), GFP_KERNEL); in lima_ioctl_gem_submit() 129 size = args->nr_bos * sizeof(*submit.bos); in lima_ioctl_gem_submit() 157 submit.pipe = args->pipe; in lima_ioctl_gem_submit() 158 submit.bos = bos; in lima_ioctl_gem_submit() 159 submit.lbos = (void *)bos + size; in lima_ioctl_gem_submit() 160 submit.nr_bos = args->nr_bos; in lima_ioctl_gem_submit() 161 submit.task = task; in lima_ioctl_gem_submit() 162 submit.ctx = ctx; in lima_ioctl_gem_submit() 163 submit.flags = args->flags; in lima_ioctl_gem_submit() [all …]
|
/openbmc/linux/fs/iomap/ |
H A D | direct-io.c | 46 } submit; member 175 struct task_struct *waiter = dio->submit.waiter; in iomap_dio_bio_end_io() 177 WRITE_ONCE(dio->submit.waiter, NULL); in iomap_dio_bio_end_io() 292 !bdev_iter_is_aligned(iomap->bdev, dio->submit.iter)) in iomap_dio_bio_iter() 330 orig_count = iov_iter_count(dio->submit.iter); in iomap_dio_bio_iter() 331 iov_iter_truncate(dio->submit.iter, length); in iomap_dio_bio_iter() 333 if (!iov_iter_count(dio->submit.iter)) in iomap_dio_bio_iter() 370 nr_pages = bio_iov_vecs_to_alloc(dio->submit.iter, BIO_MAX_VECS); in iomap_dio_bio_iter() 374 iov_iter_revert(dio->submit.iter, copied); in iomap_dio_bio_iter() 387 ret = bio_iov_iter_get_pages(bio, dio->submit.iter); in iomap_dio_bio_iter() [all …]
|
/openbmc/linux/drivers/gpu/drm/msm/adreno/ |
H A D | a2xx_gpu.c | 13 static void a2xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) in a2xx_submit() argument 15 struct msm_ringbuffer *ring = submit->ring; in a2xx_submit() 18 for (i = 0; i < submit->nr_cmds; i++) { in a2xx_submit() 19 switch (submit->cmd[i].type) { in a2xx_submit() 25 if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno) in a2xx_submit() 30 OUT_RING(ring, lower_32_bits(submit->cmd[i].iova)); in a2xx_submit() 31 OUT_RING(ring, submit->cmd[i].size); in a2xx_submit() 38 OUT_RING(ring, submit->seqno); in a2xx_submit() 47 OUT_RING(ring, submit->seqno); in a2xx_submit() 498 .submit = a2xx_submit,
|
H A D | a5xx_gpu.c | 66 static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit) in a5xx_submit_in_rb() argument 70 struct msm_ringbuffer *ring = submit->ring; in a5xx_submit_in_rb() 75 for (i = 0; i < submit->nr_cmds; i++) { in a5xx_submit_in_rb() 76 switch (submit->cmd[i].type) { in a5xx_submit_in_rb() 80 if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno) in a5xx_submit_in_rb() 85 obj = submit->bos[submit->cmd[i].idx].obj; in a5xx_submit_in_rb() 86 dwords = submit->cmd[i].size; in a5xx_submit_in_rb() 114 a5xx_gpu->last_seqno[ring->id] = submit->seqno; in a5xx_submit_in_rb() 123 ring->memptrs->fence = submit->seqno; in a5xx_submit_in_rb() 127 static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) in a5xx_submit() argument [all …]
|
/openbmc/linux/drivers/gpu/drm/i915/selftests/ |
H A D | i915_active.c | 81 struct i915_sw_fence *submit; in __live_active_setup() local 90 submit = heap_fence_create(GFP_KERNEL); in __live_active_setup() 91 if (!submit) { in __live_active_setup() 109 err = i915_sw_fence_await_sw_fence_gfp(&rq->submit, in __live_active_setup() 110 submit, in __live_active_setup() 135 i915_sw_fence_commit(submit); in __live_active_setup() 136 heap_fence_put(submit); in __live_active_setup()
|