1 // SPDX-License-Identifier: GPL-2.0 OR MIT 2 /* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */ 3 4 #include <linux/mm.h> 5 #include <linux/sync_file.h> 6 #include <linux/pagemap.h> 7 8 #include <drm/drm_file.h> 9 #include <drm/drm_syncobj.h> 10 #include <drm/drm_utils.h> 11 12 #include <drm/lima_drm.h> 13 14 #include "lima_drv.h" 15 #include "lima_gem.h" 16 #include "lima_vm.h" 17 18 int lima_gem_create_handle(struct drm_device *dev, struct drm_file *file, 19 u32 size, u32 flags, u32 *handle) 20 { 21 int err; 22 gfp_t mask; 23 struct drm_gem_shmem_object *shmem; 24 struct drm_gem_object *obj; 25 struct sg_table *sgt; 26 27 shmem = drm_gem_shmem_create(dev, size); 28 if (IS_ERR(shmem)) 29 return PTR_ERR(shmem); 30 31 obj = &shmem->base; 32 33 /* Mali Utgard GPU can only support 32bit address space */ 34 mask = mapping_gfp_mask(obj->filp->f_mapping); 35 mask &= ~__GFP_HIGHMEM; 36 mask |= __GFP_DMA32; 37 mapping_set_gfp_mask(obj->filp->f_mapping, mask); 38 39 sgt = drm_gem_shmem_get_pages_sgt(obj); 40 if (IS_ERR(sgt)) { 41 err = PTR_ERR(sgt); 42 goto out; 43 } 44 45 err = drm_gem_handle_create(file, obj, handle); 46 47 out: 48 /* drop reference from allocate - handle holds it now */ 49 drm_gem_object_put_unlocked(obj); 50 51 return err; 52 } 53 54 static void lima_gem_free_object(struct drm_gem_object *obj) 55 { 56 struct lima_bo *bo = to_lima_bo(obj); 57 58 if (!list_empty(&bo->va)) 59 dev_err(obj->dev->dev, "lima gem free bo still has va\n"); 60 61 drm_gem_shmem_free_object(obj); 62 } 63 64 static int lima_gem_object_open(struct drm_gem_object *obj, struct drm_file *file) 65 { 66 struct lima_bo *bo = to_lima_bo(obj); 67 struct lima_drm_priv *priv = to_lima_drm_priv(file); 68 struct lima_vm *vm = priv->vm; 69 70 return lima_vm_bo_add(vm, bo, true); 71 } 72 73 static void lima_gem_object_close(struct drm_gem_object *obj, struct drm_file *file) 74 { 75 struct lima_bo *bo = to_lima_bo(obj); 76 struct lima_drm_priv *priv = to_lima_drm_priv(file); 77 struct lima_vm *vm = priv->vm; 78 79 lima_vm_bo_del(vm, bo); 80 } 81 82 static const struct drm_gem_object_funcs lima_gem_funcs = { 83 .free = lima_gem_free_object, 84 .open = lima_gem_object_open, 85 .close = lima_gem_object_close, 86 .print_info = drm_gem_shmem_print_info, 87 .pin = drm_gem_shmem_pin, 88 .unpin = drm_gem_shmem_unpin, 89 .get_sg_table = drm_gem_shmem_get_sg_table, 90 .vmap = drm_gem_shmem_vmap, 91 .vunmap = drm_gem_shmem_vunmap, 92 .mmap = drm_gem_shmem_mmap, 93 }; 94 95 struct drm_gem_object *lima_gem_create_object(struct drm_device *dev, size_t size) 96 { 97 struct lima_bo *bo; 98 99 bo = kzalloc(sizeof(*bo), GFP_KERNEL); 100 if (!bo) 101 return NULL; 102 103 mutex_init(&bo->lock); 104 INIT_LIST_HEAD(&bo->va); 105 106 bo->base.base.funcs = &lima_gem_funcs; 107 108 return &bo->base.base; 109 } 110 111 int lima_gem_get_info(struct drm_file *file, u32 handle, u32 *va, u64 *offset) 112 { 113 struct drm_gem_object *obj; 114 struct lima_bo *bo; 115 struct lima_drm_priv *priv = to_lima_drm_priv(file); 116 struct lima_vm *vm = priv->vm; 117 118 obj = drm_gem_object_lookup(file, handle); 119 if (!obj) 120 return -ENOENT; 121 122 bo = to_lima_bo(obj); 123 124 *va = lima_vm_get_va(vm, bo); 125 126 *offset = drm_vma_node_offset_addr(&obj->vma_node); 127 128 drm_gem_object_put_unlocked(obj); 129 return 0; 130 } 131 132 static int lima_gem_sync_bo(struct lima_sched_task *task, struct lima_bo *bo, 133 bool write, bool explicit) 134 { 135 int err = 0; 136 137 if (!write) { 138 err = dma_resv_reserve_shared(lima_bo_resv(bo), 1); 139 if (err) 140 return err; 141 } 142 143 /* explicit sync use user passed dep fence */ 144 if (explicit) 145 return 0; 146 147 return drm_gem_fence_array_add_implicit(&task->deps, &bo->base.base, write); 148 } 149 150 static int lima_gem_add_deps(struct drm_file *file, struct lima_submit *submit) 151 { 152 int i, err; 153 154 for (i = 0; i < ARRAY_SIZE(submit->in_sync); i++) { 155 struct dma_fence *fence = NULL; 156 157 if (!submit->in_sync[i]) 158 continue; 159 160 err = drm_syncobj_find_fence(file, submit->in_sync[i], 161 0, 0, &fence); 162 if (err) 163 return err; 164 165 err = drm_gem_fence_array_add(&submit->task->deps, fence); 166 if (err) { 167 dma_fence_put(fence); 168 return err; 169 } 170 } 171 172 return 0; 173 } 174 175 int lima_gem_submit(struct drm_file *file, struct lima_submit *submit) 176 { 177 int i, err = 0; 178 struct ww_acquire_ctx ctx; 179 struct lima_drm_priv *priv = to_lima_drm_priv(file); 180 struct lima_vm *vm = priv->vm; 181 struct drm_syncobj *out_sync = NULL; 182 struct dma_fence *fence; 183 struct lima_bo **bos = submit->lbos; 184 185 if (submit->out_sync) { 186 out_sync = drm_syncobj_find(file, submit->out_sync); 187 if (!out_sync) 188 return -ENOENT; 189 } 190 191 for (i = 0; i < submit->nr_bos; i++) { 192 struct drm_gem_object *obj; 193 struct lima_bo *bo; 194 195 obj = drm_gem_object_lookup(file, submit->bos[i].handle); 196 if (!obj) { 197 err = -ENOENT; 198 goto err_out0; 199 } 200 201 bo = to_lima_bo(obj); 202 203 /* increase refcnt of gpu va map to prevent unmapped when executing, 204 * will be decreased when task done 205 */ 206 err = lima_vm_bo_add(vm, bo, false); 207 if (err) { 208 drm_gem_object_put_unlocked(obj); 209 goto err_out0; 210 } 211 212 bos[i] = bo; 213 } 214 215 err = drm_gem_lock_reservations((struct drm_gem_object **)bos, 216 submit->nr_bos, &ctx); 217 if (err) 218 goto err_out0; 219 220 err = lima_sched_task_init( 221 submit->task, submit->ctx->context + submit->pipe, 222 bos, submit->nr_bos, vm); 223 if (err) 224 goto err_out1; 225 226 err = lima_gem_add_deps(file, submit); 227 if (err) 228 goto err_out2; 229 230 for (i = 0; i < submit->nr_bos; i++) { 231 err = lima_gem_sync_bo( 232 submit->task, bos[i], 233 submit->bos[i].flags & LIMA_SUBMIT_BO_WRITE, 234 submit->flags & LIMA_SUBMIT_FLAG_EXPLICIT_FENCE); 235 if (err) 236 goto err_out2; 237 } 238 239 fence = lima_sched_context_queue_task( 240 submit->ctx->context + submit->pipe, submit->task); 241 242 for (i = 0; i < submit->nr_bos; i++) { 243 if (submit->bos[i].flags & LIMA_SUBMIT_BO_WRITE) 244 dma_resv_add_excl_fence(lima_bo_resv(bos[i]), fence); 245 else 246 dma_resv_add_shared_fence(lima_bo_resv(bos[i]), fence); 247 } 248 249 drm_gem_unlock_reservations((struct drm_gem_object **)bos, 250 submit->nr_bos, &ctx); 251 252 for (i = 0; i < submit->nr_bos; i++) 253 drm_gem_object_put_unlocked(&bos[i]->base.base); 254 255 if (out_sync) { 256 drm_syncobj_replace_fence(out_sync, fence); 257 drm_syncobj_put(out_sync); 258 } 259 260 dma_fence_put(fence); 261 262 return 0; 263 264 err_out2: 265 lima_sched_task_fini(submit->task); 266 err_out1: 267 drm_gem_unlock_reservations((struct drm_gem_object **)bos, 268 submit->nr_bos, &ctx); 269 err_out0: 270 for (i = 0; i < submit->nr_bos; i++) { 271 if (!bos[i]) 272 break; 273 lima_vm_bo_del(vm, bos[i]); 274 drm_gem_object_put_unlocked(&bos[i]->base.base); 275 } 276 if (out_sync) 277 drm_syncobj_put(out_sync); 278 return err; 279 } 280 281 int lima_gem_wait(struct drm_file *file, u32 handle, u32 op, s64 timeout_ns) 282 { 283 bool write = op & LIMA_GEM_WAIT_WRITE; 284 long ret, timeout; 285 286 if (!op) 287 return 0; 288 289 timeout = drm_timeout_abs_to_jiffies(timeout_ns); 290 291 ret = drm_gem_dma_resv_wait(file, handle, write, timeout); 292 if (ret == -ETIME) 293 ret = timeout ? -ETIMEDOUT : -EBUSY; 294 295 return ret; 296 } 297