Lines Matching full:cb
17 static int cb_map_mem(struct hl_ctx *ctx, struct hl_cb *cb) in cb_map_mem() argument
26 "Mapping a CB to the device's MMU is not supported\n"); in cb_map_mem()
30 if (cb->is_mmu_mapped) in cb_map_mem()
33 cb->roundup_size = roundup(cb->size, page_size); in cb_map_mem()
35 cb->virtual_addr = (u64) gen_pool_alloc(ctx->cb_va_pool, cb->roundup_size); in cb_map_mem()
36 if (!cb->virtual_addr) { in cb_map_mem()
37 dev_err(hdev->dev, "Failed to allocate device virtual address for CB\n"); in cb_map_mem()
43 rc = hl_mmu_map_contiguous(ctx, cb->virtual_addr, cb->bus_address, cb->roundup_size); in cb_map_mem()
45 dev_err(hdev->dev, "Failed to map VA %#llx to CB\n", cb->virtual_addr); in cb_map_mem()
55 cb->is_mmu_mapped = true; in cb_map_mem()
60 hl_mmu_unmap_contiguous(ctx, cb->virtual_addr, cb->roundup_size); in cb_map_mem()
63 gen_pool_free(ctx->cb_va_pool, cb->virtual_addr, cb->roundup_size); in cb_map_mem()
68 static void cb_unmap_mem(struct hl_ctx *ctx, struct hl_cb *cb) in cb_unmap_mem() argument
73 hl_mmu_unmap_contiguous(ctx, cb->virtual_addr, cb->roundup_size); in cb_unmap_mem()
77 gen_pool_free(ctx->cb_va_pool, cb->virtual_addr, cb->roundup_size); in cb_unmap_mem()
80 static void cb_fini(struct hl_device *hdev, struct hl_cb *cb) in cb_fini() argument
82 if (cb->is_internal) in cb_fini()
84 (uintptr_t)cb->kernel_address, cb->size); in cb_fini()
86 hl_asic_dma_free_coherent(hdev, cb->size, cb->kernel_address, cb->bus_address); in cb_fini()
88 kfree(cb); in cb_fini()
91 static void cb_do_release(struct hl_device *hdev, struct hl_cb *cb) in cb_do_release() argument
93 if (cb->is_pool) { in cb_do_release()
94 atomic_set(&cb->is_handle_destroyed, 0); in cb_do_release()
96 list_add(&cb->pool_list, &hdev->cb_pool); in cb_do_release()
99 cb_fini(hdev, cb); in cb_do_release()
106 struct hl_cb *cb = NULL; in hl_cb_alloc() local
113 * limitations in some of the ASICs, the kernel must copy the user CB in hl_cb_alloc()
119 cb = kzalloc(sizeof(*cb), GFP_ATOMIC); in hl_cb_alloc()
121 if (!cb) in hl_cb_alloc()
122 cb = kzalloc(sizeof(*cb), GFP_KERNEL); in hl_cb_alloc()
124 if (!cb) in hl_cb_alloc()
130 kfree(cb); in hl_cb_alloc()
135 cb->is_internal = true; in hl_cb_alloc()
136 cb->bus_address = hdev->internal_cb_va_base + cb_offset; in hl_cb_alloc()
138 p = hl_asic_dma_alloc_coherent(hdev, cb_size, &cb->bus_address, GFP_ATOMIC); in hl_cb_alloc()
140 p = hl_asic_dma_alloc_coherent(hdev, cb_size, &cb->bus_address, GFP_KERNEL); in hl_cb_alloc()
142 p = hl_asic_dma_alloc_coherent(hdev, cb_size, &cb->bus_address, in hl_cb_alloc()
148 "failed to allocate %d of dma memory for CB\n", in hl_cb_alloc()
150 kfree(cb); in hl_cb_alloc()
154 cb->kernel_address = p; in hl_cb_alloc()
155 cb->size = cb_size; in hl_cb_alloc()
157 return cb; in hl_cb_alloc()
170 struct hl_cb *cb = buf->private; in hl_cb_mmap_mem_release() local
172 hl_debugfs_remove_cb(cb); in hl_cb_mmap_mem_release()
174 if (cb->is_mmu_mapped) in hl_cb_mmap_mem_release()
175 cb_unmap_mem(cb->ctx, cb); in hl_cb_mmap_mem_release()
177 hl_ctx_put(cb->ctx); in hl_cb_mmap_mem_release()
179 cb_do_release(cb->hdev, cb); in hl_cb_mmap_mem_release()
185 struct hl_cb *cb; in hl_cb_mmap_mem_alloc() local
199 cb = list_first_entry(&cb_args->hdev->cb_pool, in hl_cb_mmap_mem_alloc()
200 typeof(*cb), pool_list); in hl_cb_mmap_mem_alloc()
201 list_del(&cb->pool_list); in hl_cb_mmap_mem_alloc()
206 dev_dbg(cb_args->hdev->dev, "CB pool is empty\n"); in hl_cb_mmap_mem_alloc()
212 cb = hl_cb_alloc(cb_args->hdev, cb_args->cb_size, ctx_id, cb_args->internal_cb); in hl_cb_mmap_mem_alloc()
213 if (!cb) in hl_cb_mmap_mem_alloc()
217 cb->hdev = cb_args->hdev; in hl_cb_mmap_mem_alloc()
218 cb->ctx = cb_args->ctx; in hl_cb_mmap_mem_alloc()
219 cb->buf = buf; in hl_cb_mmap_mem_alloc()
220 cb->buf->mappable_size = cb->size; in hl_cb_mmap_mem_alloc()
221 cb->buf->private = cb; in hl_cb_mmap_mem_alloc()
223 hl_ctx_get(cb->ctx); in hl_cb_mmap_mem_alloc()
228 "CB mapping is not supported for kernel context\n"); in hl_cb_mmap_mem_alloc()
233 rc = cb_map_mem(cb_args->ctx, cb); in hl_cb_mmap_mem_alloc()
238 hl_debugfs_add_cb(cb); in hl_cb_mmap_mem_alloc()
243 hl_ctx_put(cb->ctx); in hl_cb_mmap_mem_alloc()
244 cb_do_release(cb_args->hdev, cb); in hl_cb_mmap_mem_alloc()
252 struct hl_cb *cb = buf->private; in hl_cb_mmap() local
254 return cb->hdev->asic_funcs->mmap(cb->hdev, vma, cb->kernel_address, in hl_cb_mmap()
255 cb->bus_address, cb->size); in hl_cb_mmap()
259 .topic = "CB",
287 dev_err(hdev->dev, "CB size %d must be less than %d\n", in hl_cb_create()
305 struct hl_cb *cb; in hl_cb_destroy() local
308 cb = hl_cb_get(mmg, cb_handle); in hl_cb_destroy()
309 if (!cb) { in hl_cb_destroy()
310 dev_dbg(mmg->dev, "CB destroy failed, no CB was found for handle %#llx\n", in hl_cb_destroy()
315 /* Make sure that CB handle isn't destroyed more than once */ in hl_cb_destroy()
316 rc = atomic_cmpxchg(&cb->is_handle_destroyed, 0, 1); in hl_cb_destroy()
317 hl_cb_put(cb); in hl_cb_destroy()
319 dev_dbg(mmg->dev, "CB destroy failed, handle %#llx was already destroyed\n", in hl_cb_destroy()
329 dev_dbg(mmg->dev, "CB 0x%llx is destroyed while still in use\n", cb_handle); in hl_cb_destroy()
337 struct hl_cb *cb; in hl_cb_info() local
340 cb = hl_cb_get(mmg, handle); in hl_cb_info()
341 if (!cb) { in hl_cb_info()
343 "CB info failed, no match to handle 0x%llx\n", handle); in hl_cb_info()
348 if (cb->is_mmu_mapped) { in hl_cb_info()
349 *device_va = cb->virtual_addr; in hl_cb_info()
351 dev_err(mmg->dev, "CB is not mapped to the device's MMU\n"); in hl_cb_info()
356 *usage_cnt = atomic_read(&cb->cs_cnt); in hl_cb_info()
360 hl_cb_put(cb); in hl_cb_info()
375 "Device is %s. Can't execute CB IOCTL\n", in hl_cb_ioctl()
384 "User requested CB size %d must be less than %d\n", in hl_cb_ioctl()
438 void hl_cb_put(struct hl_cb *cb) in hl_cb_put() argument
440 hl_mmap_mem_buf_put(cb->buf); in hl_cb_put()
447 struct hl_cb *cb; in hl_cb_kernel_create() local
454 "Failed to allocate CB for the kernel driver %d\n", rc); in hl_cb_kernel_create()
458 cb = hl_cb_get(&hdev->kernel_mem_mgr, cb_handle); in hl_cb_kernel_create()
460 if (!cb) { in hl_cb_kernel_create()
461 dev_crit(hdev->dev, "Kernel CB handle invalid 0x%x\n", in hl_cb_kernel_create()
466 return cb; in hl_cb_kernel_create()
476 struct hl_cb *cb; in hl_cb_pool_init() local
483 cb = hl_cb_alloc(hdev, hdev->asic_prop.cb_pool_cb_size, in hl_cb_pool_init()
485 if (cb) { in hl_cb_pool_init()
486 cb->is_pool = true; in hl_cb_pool_init()
487 list_add(&cb->pool_list, &hdev->cb_pool); in hl_cb_pool_init()
499 struct hl_cb *cb, *tmp; in hl_cb_pool_fini() local
501 list_for_each_entry_safe(cb, tmp, &hdev->cb_pool, pool_list) { in hl_cb_pool_fini()
502 list_del(&cb->pool_list); in hl_cb_pool_fini()
503 cb_fini(hdev, cb); in hl_cb_pool_fini()
521 "Failed to create VA gen pool for CB mapping\n"); in hl_cb_va_pool_init()
534 "Failed to add memory to VA gen pool for CB mapping\n"); in hl_cb_va_pool_init()