Lines Matching full:op
62 enum vm_bind_op op; member
457 struct drm_gpuva_op *op = last; in nouveau_uvmm_sm_prepare_unwind() local
462 drm_gpuva_for_each_op_from_reverse(op, ops) { in nouveau_uvmm_sm_prepare_unwind()
463 switch (op->op) { in nouveau_uvmm_sm_prepare_unwind()
468 struct drm_gpuva_op_remap *r = &op->remap; in nouveau_uvmm_sm_prepare_unwind()
480 op_unmap_prepare_unwind(op->unmap.va); in nouveau_uvmm_sm_prepare_unwind()
493 drm_gpuva_for_each_op(op, ops) { in nouveau_uvmm_sm_prepare_unwind()
494 switch (op->op) { in nouveau_uvmm_sm_prepare_unwind()
504 struct drm_gpuva_op_remap *r = &op->remap; in nouveau_uvmm_sm_prepare_unwind()
522 struct drm_gpuva_op_unmap *u = &op->unmap; in nouveau_uvmm_sm_prepare_unwind()
546 if (op == last) in nouveau_uvmm_sm_prepare_unwind()
579 struct drm_gpuva_op_map *op, in op_map_prepare() argument
592 drm_gpuva_map(&uvmm->umgr, &uvma->va, op); in op_map_prepare()
613 struct drm_gpuva_op *op; in nouveau_uvmm_sm_prepare() local
618 drm_gpuva_for_each_op(op, ops) { in nouveau_uvmm_sm_prepare()
619 switch (op->op) { in nouveau_uvmm_sm_prepare()
623 ret = op_map_prepare(uvmm, &new->map, &op->map, args); in nouveau_uvmm_sm_prepare()
638 struct drm_gpuva_op_remap *r = &op->remap; in nouveau_uvmm_sm_prepare()
679 struct drm_gpuva_op_unmap *u = &op->unmap; in nouveau_uvmm_sm_prepare()
718 if (op != drm_gpuva_first_op(ops)) in nouveau_uvmm_sm_prepare()
720 drm_gpuva_prev_op(op), in nouveau_uvmm_sm_prepare()
751 op_gem_obj(struct drm_gpuva_op *op) in op_gem_obj() argument
753 switch (op->op) { in op_gem_obj()
755 return op->map.gem.obj; in op_gem_obj()
761 return op->remap.unmap->va->gem.obj; in op_gem_obj()
763 return op->unmap.va->gem.obj; in op_gem_obj()
823 struct drm_gpuva_op *op; in nouveau_uvmm_sm() local
825 drm_gpuva_for_each_op(op, ops) { in nouveau_uvmm_sm()
826 switch (op->op) { in nouveau_uvmm_sm()
831 op_remap(&op->remap, new); in nouveau_uvmm_sm()
834 op_unmap(&op->unmap); in nouveau_uvmm_sm()
865 struct drm_gpuva_op *op; in nouveau_uvmm_sm_cleanup() local
867 drm_gpuva_for_each_op(op, ops) { in nouveau_uvmm_sm_cleanup()
868 switch (op->op) { in nouveau_uvmm_sm_cleanup()
872 struct drm_gpuva_op_remap *r = &op->remap; in nouveau_uvmm_sm_cleanup()
896 struct drm_gpuva_op_unmap *u = &op->unmap; in nouveau_uvmm_sm_cleanup()
992 struct bind_job_op *op) in bind_validate_op() argument
995 struct drm_gem_object *obj = op->gem.obj; in bind_validate_op()
997 if (op->op == OP_MAP) { in bind_validate_op()
998 if (op->gem.offset & ~PAGE_MASK) in bind_validate_op()
1001 if (obj->size <= op->gem.offset) in bind_validate_op()
1004 if (op->va.range > (obj->size - op->gem.offset)) in bind_validate_op()
1008 return nouveau_uvmm_validate_range(uvmm, op->va.addr, op->va.range); in bind_validate_op()
1016 struct bind_job_op *op; in bind_validate_map_sparse() local
1022 list_for_each_op(op, &bind_job->ops) { in bind_validate_map_sparse()
1023 if (op->op == OP_UNMAP) { in bind_validate_map_sparse()
1024 u64 op_addr = op->va.addr; in bind_validate_map_sparse()
1025 u64 op_end = op_addr + op->va.range; in bind_validate_map_sparse()
1088 struct bind_job_op *op; in bind_validate_region() local
1091 list_for_each_op(op, &bind_job->ops) { in bind_validate_region()
1092 u64 op_addr = op->va.addr; in bind_validate_region()
1093 u64 op_range = op->va.range; in bind_validate_region()
1096 switch (op->op) { in bind_validate_region()
1118 struct drm_gpuva_op *op; in bind_link_gpuvas() local
1120 drm_gpuva_for_each_op(op, ops) { in bind_link_gpuvas()
1121 switch (op->op) { in bind_link_gpuvas()
1126 if (op->remap.prev) in bind_link_gpuvas()
1128 if (op->remap.next) in bind_link_gpuvas()
1130 drm_gpuva_unlink(op->remap.unmap->va); in bind_link_gpuvas()
1133 drm_gpuva_unlink(op->unmap.va); in bind_link_gpuvas()
1148 struct bind_job_op *op; in nouveau_uvmm_bind_job_submit() local
1151 list_for_each_op(op, &bind_job->ops) { in nouveau_uvmm_bind_job_submit()
1152 if (op->op == OP_MAP) { in nouveau_uvmm_bind_job_submit()
1153 op->gem.obj = drm_gem_object_lookup(job->file_priv, in nouveau_uvmm_bind_job_submit()
1154 op->gem.handle); in nouveau_uvmm_bind_job_submit()
1155 if (!op->gem.obj) in nouveau_uvmm_bind_job_submit()
1159 ret = bind_validate_op(job, op); in nouveau_uvmm_bind_job_submit()
1179 list_for_each_op(op, &bind_job->ops) { in nouveau_uvmm_bind_job_submit()
1180 switch (op->op) { in nouveau_uvmm_bind_job_submit()
1183 op->va.addr, in nouveau_uvmm_bind_job_submit()
1184 op->va.range); in nouveau_uvmm_bind_job_submit()
1190 op->reg = nouveau_uvma_region_find(uvmm, op->va.addr, in nouveau_uvmm_bind_job_submit()
1191 op->va.range); in nouveau_uvmm_bind_job_submit()
1192 if (!op->reg || op->reg->dirty) { in nouveau_uvmm_bind_job_submit()
1197 op->ops = drm_gpuva_sm_unmap_ops_create(&uvmm->umgr, in nouveau_uvmm_bind_job_submit()
1198 op->va.addr, in nouveau_uvmm_bind_job_submit()
1199 op->va.range); in nouveau_uvmm_bind_job_submit()
1200 if (IS_ERR(op->ops)) { in nouveau_uvmm_bind_job_submit()
1201 ret = PTR_ERR(op->ops); in nouveau_uvmm_bind_job_submit()
1205 ret = nouveau_uvmm_sm_unmap_prepare(uvmm, &op->new, in nouveau_uvmm_bind_job_submit()
1206 op->ops); in nouveau_uvmm_bind_job_submit()
1208 drm_gpuva_ops_free(&uvmm->umgr, op->ops); in nouveau_uvmm_bind_job_submit()
1209 op->ops = NULL; in nouveau_uvmm_bind_job_submit()
1210 op->reg = NULL; in nouveau_uvmm_bind_job_submit()
1214 nouveau_uvma_region_dirty(op->reg); in nouveau_uvmm_bind_job_submit()
1221 op->va.addr, in nouveau_uvmm_bind_job_submit()
1222 op->va.range); in nouveau_uvmm_bind_job_submit()
1226 u64 op_addr = op->va.addr; in nouveau_uvmm_bind_job_submit()
1227 u64 op_end = op_addr + op->va.range; in nouveau_uvmm_bind_job_submit()
1243 op->ops = drm_gpuva_sm_map_ops_create(&uvmm->umgr, in nouveau_uvmm_bind_job_submit()
1244 op->va.addr, in nouveau_uvmm_bind_job_submit()
1245 op->va.range, in nouveau_uvmm_bind_job_submit()
1246 op->gem.obj, in nouveau_uvmm_bind_job_submit()
1247 op->gem.offset); in nouveau_uvmm_bind_job_submit()
1248 if (IS_ERR(op->ops)) { in nouveau_uvmm_bind_job_submit()
1249 ret = PTR_ERR(op->ops); in nouveau_uvmm_bind_job_submit()
1253 ret = nouveau_uvmm_sm_map_prepare(uvmm, &op->new, in nouveau_uvmm_bind_job_submit()
1254 reg, op->ops, in nouveau_uvmm_bind_job_submit()
1255 op->va.addr, in nouveau_uvmm_bind_job_submit()
1256 op->va.range, in nouveau_uvmm_bind_job_submit()
1257 op->flags & 0xff); in nouveau_uvmm_bind_job_submit()
1259 drm_gpuva_ops_free(&uvmm->umgr, op->ops); in nouveau_uvmm_bind_job_submit()
1260 op->ops = NULL; in nouveau_uvmm_bind_job_submit()
1267 op->ops = drm_gpuva_sm_unmap_ops_create(&uvmm->umgr, in nouveau_uvmm_bind_job_submit()
1268 op->va.addr, in nouveau_uvmm_bind_job_submit()
1269 op->va.range); in nouveau_uvmm_bind_job_submit()
1270 if (IS_ERR(op->ops)) { in nouveau_uvmm_bind_job_submit()
1271 ret = PTR_ERR(op->ops); in nouveau_uvmm_bind_job_submit()
1275 ret = nouveau_uvmm_sm_unmap_prepare(uvmm, &op->new, in nouveau_uvmm_bind_job_submit()
1276 op->ops); in nouveau_uvmm_bind_job_submit()
1278 drm_gpuva_ops_free(&uvmm->umgr, op->ops); in nouveau_uvmm_bind_job_submit()
1279 op->ops = NULL; in nouveau_uvmm_bind_job_submit()
1293 list_for_each_op(op, &bind_job->ops) { in nouveau_uvmm_bind_job_submit()
1296 if (IS_ERR_OR_NULL(op->ops)) in nouveau_uvmm_bind_job_submit()
1299 drm_gpuva_for_each_op(va_op, op->ops) { in nouveau_uvmm_bind_job_submit()
1308 op = list_last_op(&bind_job->ops); in nouveau_uvmm_bind_job_submit()
1315 list_for_each_op(op, &bind_job->ops) { in nouveau_uvmm_bind_job_submit()
1318 if (IS_ERR_OR_NULL(op->ops)) in nouveau_uvmm_bind_job_submit()
1321 drm_gpuva_for_each_op(va_op, op->ops) { in nouveau_uvmm_bind_job_submit()
1331 if (unlikely(va_op->op == DRM_GPUVA_OP_UNMAP)) in nouveau_uvmm_bind_job_submit()
1338 op = list_last_op(&bind_job->ops); in nouveau_uvmm_bind_job_submit()
1364 list_for_each_op(op, &bind_job->ops) { in nouveau_uvmm_bind_job_submit()
1365 switch (op->op) { in nouveau_uvmm_bind_job_submit()
1369 bind_link_gpuvas(op->ops, &op->new); in nouveau_uvmm_bind_job_submit()
1384 op = list_prev_op(op); in nouveau_uvmm_bind_job_submit()
1386 list_for_each_op_from_reverse(op, &bind_job->ops) { in nouveau_uvmm_bind_job_submit()
1387 switch (op->op) { in nouveau_uvmm_bind_job_submit()
1389 nouveau_uvma_region_destroy(uvmm, op->va.addr, in nouveau_uvmm_bind_job_submit()
1390 op->va.range); in nouveau_uvmm_bind_job_submit()
1393 __nouveau_uvma_region_insert(uvmm, op->reg); in nouveau_uvmm_bind_job_submit()
1394 nouveau_uvmm_sm_unmap_prepare_unwind(uvmm, &op->new, in nouveau_uvmm_bind_job_submit()
1395 op->ops); in nouveau_uvmm_bind_job_submit()
1398 nouveau_uvmm_sm_map_prepare_unwind(uvmm, &op->new, in nouveau_uvmm_bind_job_submit()
1399 op->ops, in nouveau_uvmm_bind_job_submit()
1400 op->va.addr, in nouveau_uvmm_bind_job_submit()
1401 op->va.range); in nouveau_uvmm_bind_job_submit()
1404 nouveau_uvmm_sm_unmap_prepare_unwind(uvmm, &op->new, in nouveau_uvmm_bind_job_submit()
1405 op->ops); in nouveau_uvmm_bind_job_submit()
1409 drm_gpuva_ops_free(&uvmm->umgr, op->ops); in nouveau_uvmm_bind_job_submit()
1410 op->ops = NULL; in nouveau_uvmm_bind_job_submit()
1411 op->reg = NULL; in nouveau_uvmm_bind_job_submit()
1437 struct bind_job_op *op; in nouveau_uvmm_bind_job_run() local
1440 list_for_each_op(op, &bind_job->ops) { in nouveau_uvmm_bind_job_run()
1441 switch (op->op) { in nouveau_uvmm_bind_job_run()
1446 ret = nouveau_uvmm_sm_map(uvmm, &op->new, op->ops); in nouveau_uvmm_bind_job_run()
1453 ret = nouveau_uvmm_sm_unmap(uvmm, &op->new, op->ops); in nouveau_uvmm_bind_job_run()
1474 struct bind_job_op *op, *next; in nouveau_uvmm_bind_job_free_work_fn() local
1476 list_for_each_op(op, &bind_job->ops) { in nouveau_uvmm_bind_job_free_work_fn()
1477 struct drm_gem_object *obj = op->gem.obj; in nouveau_uvmm_bind_job_free_work_fn()
1479 /* When nouveau_uvmm_bind_job_submit() fails op->ops and op->reg in nouveau_uvmm_bind_job_free_work_fn()
1482 switch (op->op) { in nouveau_uvmm_bind_job_free_work_fn()
1487 if (!IS_ERR_OR_NULL(op->ops)) in nouveau_uvmm_bind_job_free_work_fn()
1488 nouveau_uvmm_sm_unmap_cleanup(uvmm, &op->new, in nouveau_uvmm_bind_job_free_work_fn()
1489 op->ops); in nouveau_uvmm_bind_job_free_work_fn()
1491 if (op->reg) { in nouveau_uvmm_bind_job_free_work_fn()
1492 nouveau_uvma_region_sparse_unref(op->reg); in nouveau_uvmm_bind_job_free_work_fn()
1494 nouveau_uvma_region_remove(op->reg); in nouveau_uvmm_bind_job_free_work_fn()
1496 nouveau_uvma_region_complete(op->reg); in nouveau_uvmm_bind_job_free_work_fn()
1497 nouveau_uvma_region_put(op->reg); in nouveau_uvmm_bind_job_free_work_fn()
1502 if (!IS_ERR_OR_NULL(op->ops)) in nouveau_uvmm_bind_job_free_work_fn()
1503 nouveau_uvmm_sm_map_cleanup(uvmm, &op->new, in nouveau_uvmm_bind_job_free_work_fn()
1504 op->ops); in nouveau_uvmm_bind_job_free_work_fn()
1507 if (!IS_ERR_OR_NULL(op->ops)) in nouveau_uvmm_bind_job_free_work_fn()
1508 nouveau_uvmm_sm_unmap_cleanup(uvmm, &op->new, in nouveau_uvmm_bind_job_free_work_fn()
1509 op->ops); in nouveau_uvmm_bind_job_free_work_fn()
1513 if (!IS_ERR_OR_NULL(op->ops)) in nouveau_uvmm_bind_job_free_work_fn()
1514 drm_gpuva_ops_free(&uvmm->umgr, op->ops); in nouveau_uvmm_bind_job_free_work_fn()
1530 list_for_each_op_safe(op, next, &bind_job->ops) { in nouveau_uvmm_bind_job_free_work_fn()
1531 list_del(&op->entry); in nouveau_uvmm_bind_job_free_work_fn()
1532 kfree(op); in nouveau_uvmm_bind_job_free_work_fn()
1558 struct bind_job_op *op; in bind_job_op_from_uop() local
1560 op = *pop = kzalloc(sizeof(*op), GFP_KERNEL); in bind_job_op_from_uop()
1561 if (!op) in bind_job_op_from_uop()
1564 switch (uop->op) { in bind_job_op_from_uop()
1566 op->op = uop->flags & DRM_NOUVEAU_VM_BIND_SPARSE ? in bind_job_op_from_uop()
1570 op->op = uop->flags & DRM_NOUVEAU_VM_BIND_SPARSE ? in bind_job_op_from_uop()
1574 op->op = uop->op; in bind_job_op_from_uop()
1578 op->flags = uop->flags; in bind_job_op_from_uop()
1579 op->va.addr = uop->addr; in bind_job_op_from_uop()
1580 op->va.range = uop->range; in bind_job_op_from_uop()
1581 op->gem.handle = uop->handle; in bind_job_op_from_uop()
1582 op->gem.offset = uop->bo_offset; in bind_job_op_from_uop()
1590 struct bind_job_op *op, *next; in bind_job_ops_free() local
1592 list_for_each_op_safe(op, next, ops) { in bind_job_ops_free()
1593 list_del(&op->entry); in bind_job_ops_free()
1594 kfree(op); in bind_job_ops_free()
1604 struct bind_job_op *op; in nouveau_uvmm_bind_job_init() local
1614 for (i = 0; i < __args->op.count; i++) { in nouveau_uvmm_bind_job_init()
1615 ret = bind_job_op_from_uop(&op, &__args->op.s[i]); in nouveau_uvmm_bind_job_init()
1619 list_add_tail(&op->entry, &job->ops); in nouveau_uvmm_bind_job_init()
1702 args->op.count = opc; in nouveau_uvmm_vm_bind_ucopy()
1703 args->op.s = u_memcpya(ops, opc, in nouveau_uvmm_vm_bind_ucopy()
1704 sizeof(*args->op.s)); in nouveau_uvmm_vm_bind_ucopy()
1705 if (IS_ERR(args->op.s)) in nouveau_uvmm_vm_bind_ucopy()
1706 return PTR_ERR(args->op.s); in nouveau_uvmm_vm_bind_ucopy()
1734 u_free(args->op.s); in nouveau_uvmm_vm_bind_ucopy()
1743 u_free(args->op.s); in nouveau_uvmm_vm_bind_ufree()