Lines Matching full:slice
155 struct bo_slice *slice = container_of(kref, struct bo_slice, ref_count); in free_slice() local
157 list_del(&slice->slice); in free_slice()
158 drm_gem_object_put(&slice->bo->base); in free_slice()
159 sg_free_table(slice->sgt); in free_slice()
160 kfree(slice->sgt); in free_slice()
161 kfree(slice->reqs); in free_slice()
162 kfree(slice); in free_slice()
248 static int encode_reqs(struct qaic_device *qdev, struct bo_slice *slice, in encode_reqs() argument
260 if (!slice->no_xfer) in encode_reqs()
261 cmd |= (slice->dir == DMA_TO_DEVICE ? INBOUND_XFER : OUTBOUND_XFER); in encode_reqs()
291 * When we end up splitting up a single request (ie a buf slice) into in encode_reqs()
300 for_each_sgtable_dma_sg(slice->sgt, sg, i) { in encode_reqs()
301 slice->reqs[i].cmd = cmd; in encode_reqs()
302 slice->reqs[i].src_addr = cpu_to_le64(slice->dir == DMA_TO_DEVICE ? in encode_reqs()
304 slice->reqs[i].dest_addr = cpu_to_le64(slice->dir == DMA_TO_DEVICE ? in encode_reqs()
312 slice->reqs[i].len = cpu_to_le32((u32)sg_dma_len(sg)); in encode_reqs()
315 slice->reqs[i].sem_cmd0 = cpu_to_le32(ENCODE_SEM(req->sem0.val, in encode_reqs()
322 slice->reqs[i].sem_cmd1 = cpu_to_le32(ENCODE_SEM(req->sem1.val, in encode_reqs()
329 slice->reqs[i].sem_cmd2 = cpu_to_le32(ENCODE_SEM(req->sem2.val, in encode_reqs()
336 slice->reqs[i].sem_cmd3 = cpu_to_le32(ENCODE_SEM(req->sem3.val, in encode_reqs()
347 slice->reqs[i].cmd |= GEN_COMPLETION; in encode_reqs()
348 slice->reqs[i].db_addr = db_addr; in encode_reqs()
349 slice->reqs[i].db_len = db_len; in encode_reqs()
350 slice->reqs[i].db_data = db_data; in encode_reqs()
365 req->sem0.flags |= (slice->dir == DMA_TO_DEVICE ? in encode_reqs()
367 slice->reqs[i].sem_cmd0 = cpu_to_le32(ENCODE_SEM(req->sem0.val, req->sem0.index, in encode_reqs()
370 slice->reqs[i].sem_cmd1 = cpu_to_le32(ENCODE_SEM(req->sem1.val, req->sem1.index, in encode_reqs()
373 slice->reqs[i].sem_cmd2 = cpu_to_le32(ENCODE_SEM(req->sem2.val, req->sem2.index, in encode_reqs()
376 slice->reqs[i].sem_cmd3 = cpu_to_le32(ENCODE_SEM(req->sem3.val, req->sem3.index, in encode_reqs()
387 struct bo_slice *slice; in qaic_map_one_slice() local
394 slice = kmalloc(sizeof(*slice), GFP_KERNEL); in qaic_map_one_slice()
395 if (!slice) { in qaic_map_one_slice()
400 slice->reqs = kcalloc(sgt->nents, sizeof(*slice->reqs), GFP_KERNEL); in qaic_map_one_slice()
401 if (!slice->reqs) { in qaic_map_one_slice()
406 slice->no_xfer = !slice_ent->size; in qaic_map_one_slice()
407 slice->sgt = sgt; in qaic_map_one_slice()
408 slice->nents = sgt->nents; in qaic_map_one_slice()
409 slice->dir = bo->dir; in qaic_map_one_slice()
410 slice->bo = bo; in qaic_map_one_slice()
411 slice->size = slice_ent->size; in qaic_map_one_slice()
412 slice->offset = slice_ent->offset; in qaic_map_one_slice()
414 ret = encode_reqs(qdev, slice, slice_ent); in qaic_map_one_slice()
419 kref_init(&slice->ref_count); in qaic_map_one_slice()
421 list_add_tail(&slice->slice, &bo->slices); in qaic_map_one_slice()
426 kfree(slice->reqs); in qaic_map_one_slice()
428 kfree(slice); in qaic_map_one_slice()
894 struct bo_slice *slice, *temp; in qaic_free_slices_bo() local
896 list_for_each_entry_safe(slice, temp, &bo->slices, slice) in qaic_free_slices_bo()
897 kref_put(&slice->ref_count, free_slice); in qaic_free_slices_bo()
1048 static inline int copy_exec_reqs(struct qaic_device *qdev, struct bo_slice *slice, u32 dbc_id, in copy_exec_reqs() argument
1052 struct dbc_req *reqs = slice->reqs; in copy_exec_reqs()
1062 if (avail < slice->nents) in copy_exec_reqs()
1065 if (tail + slice->nents > dbc->nelem) { in copy_exec_reqs()
1067 avail = min_t(u32, avail, slice->nents); in copy_exec_reqs()
1071 avail = slice->nents - avail; in copy_exec_reqs()
1076 sizeof(*reqs) * slice->nents); in copy_exec_reqs()
1079 *ptail = (tail + slice->nents) % dbc->nelem; in copy_exec_reqs()
1089 static inline int copy_partial_exec_reqs(struct qaic_device *qdev, struct bo_slice *slice, in copy_partial_exec_reqs() argument
1093 struct dbc_req *reqs = slice->reqs; in copy_partial_exec_reqs()
1110 for (i = 0; i < slice->nents; i++) { in copy_partial_exec_reqs()
1148 memcpy(last_req, reqs + slice->nents - 1, sizeof(*reqs)); in copy_partial_exec_reqs()
1171 struct bo_slice *slice; in send_bo_list_to_device() local
1213 list_for_each_entry(slice, &bo->slices, slice) { in send_bo_list_to_device()
1215 * If this slice does not fall under the given in send_bo_list_to_device()
1216 * resize then skip this slice and continue the loop in send_bo_list_to_device()
1218 if (is_partial && pexec[i].resize && pexec[i].resize <= slice->offset) in send_bo_list_to_device()
1221 for (j = 0; j < slice->nents; j++) in send_bo_list_to_device()
1222 slice->reqs[j].req_id = cpu_to_le16(bo->req_id); in send_bo_list_to_device()
1226 * resize has cut this slice short then do a partial copy in send_bo_list_to_device()
1230 pexec[i].resize < slice->offset + slice->size) in send_bo_list_to_device()
1231 ret = copy_partial_exec_reqs(qdev, slice, in send_bo_list_to_device()
1232 pexec[i].resize - slice->offset, in send_bo_list_to_device()
1235 ret = copy_exec_reqs(qdev, slice, dbc->id, head, tail); in send_bo_list_to_device()
1863 struct bo_slice *slice, *slice_temp; in release_dbc() local
1881 list_for_each_entry_safe(slice, slice_temp, &bo->slices, slice) in release_dbc()
1882 kref_put(&slice->ref_count, free_slice); in release_dbc()