1 /* 2 * Copyright (C) 2015 Etnaviv Project 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License version 2 as published by 6 * the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * more details. 12 * 13 * You should have received a copy of the GNU General Public License along with 14 * this program. If not, see <http://www.gnu.org/licenses/>. 15 */ 16 17 #include <linux/dma-fence-array.h> 18 #include <linux/reservation.h> 19 #include <linux/sync_file.h> 20 #include "etnaviv_cmdbuf.h" 21 #include "etnaviv_drv.h" 22 #include "etnaviv_gpu.h" 23 #include "etnaviv_gem.h" 24 25 /* 26 * Cmdstream submission: 27 */ 28 29 #define BO_INVALID_FLAGS ~(ETNA_SUBMIT_BO_READ | ETNA_SUBMIT_BO_WRITE) 30 /* make sure these don't conflict w/ ETNAVIV_SUBMIT_BO_x */ 31 #define BO_LOCKED 0x4000 32 #define BO_PINNED 0x2000 33 34 static struct etnaviv_gem_submit *submit_create(struct drm_device *dev, 35 struct etnaviv_gpu *gpu, size_t nr) 36 { 37 struct etnaviv_gem_submit *submit; 38 size_t sz = size_vstruct(nr, sizeof(submit->bos[0]), sizeof(*submit)); 39 40 submit = kmalloc(sz, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY); 41 if (submit) { 42 submit->dev = dev; 43 submit->gpu = gpu; 44 45 /* initially, until copy_from_user() and bo lookup succeeds: */ 46 submit->nr_bos = 0; 47 48 ww_acquire_init(&submit->ticket, &reservation_ww_class); 49 } 50 51 return submit; 52 } 53 54 static int submit_lookup_objects(struct etnaviv_gem_submit *submit, 55 struct drm_file *file, struct drm_etnaviv_gem_submit_bo *submit_bos, 56 unsigned nr_bos) 57 { 58 struct drm_etnaviv_gem_submit_bo *bo; 59 unsigned i; 60 int ret = 0; 61 62 spin_lock(&file->table_lock); 63 64 for (i = 0, bo = submit_bos; i < nr_bos; i++, bo++) { 65 struct drm_gem_object *obj; 66 67 if (bo->flags & BO_INVALID_FLAGS) { 68 DRM_ERROR("invalid flags: %x\n", bo->flags); 69 ret = -EINVAL; 70 goto out_unlock; 71 } 72 73 submit->bos[i].flags = bo->flags; 74 75 /* normally use drm_gem_object_lookup(), but for bulk lookup 76 * all under single table_lock just hit object_idr directly: 77 */ 78 obj = idr_find(&file->object_idr, bo->handle); 79 if (!obj) { 80 DRM_ERROR("invalid handle %u at index %u\n", 81 bo->handle, i); 82 ret = -EINVAL; 83 goto out_unlock; 84 } 85 86 /* 87 * Take a refcount on the object. The file table lock 88 * prevents the object_idr's refcount on this being dropped. 89 */ 90 drm_gem_object_reference(obj); 91 92 submit->bos[i].obj = to_etnaviv_bo(obj); 93 } 94 95 out_unlock: 96 submit->nr_bos = i; 97 spin_unlock(&file->table_lock); 98 99 return ret; 100 } 101 102 static void submit_unlock_object(struct etnaviv_gem_submit *submit, int i) 103 { 104 if (submit->bos[i].flags & BO_LOCKED) { 105 struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj; 106 107 ww_mutex_unlock(&etnaviv_obj->resv->lock); 108 submit->bos[i].flags &= ~BO_LOCKED; 109 } 110 } 111 112 static int submit_lock_objects(struct etnaviv_gem_submit *submit) 113 { 114 int contended, slow_locked = -1, i, ret = 0; 115 116 retry: 117 for (i = 0; i < submit->nr_bos; i++) { 118 struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj; 119 120 if (slow_locked == i) 121 slow_locked = -1; 122 123 contended = i; 124 125 if (!(submit->bos[i].flags & BO_LOCKED)) { 126 ret = ww_mutex_lock_interruptible(&etnaviv_obj->resv->lock, 127 &submit->ticket); 128 if (ret == -EALREADY) 129 DRM_ERROR("BO at index %u already on submit list\n", 130 i); 131 if (ret) 132 goto fail; 133 submit->bos[i].flags |= BO_LOCKED; 134 } 135 } 136 137 ww_acquire_done(&submit->ticket); 138 139 return 0; 140 141 fail: 142 for (; i >= 0; i--) 143 submit_unlock_object(submit, i); 144 145 if (slow_locked > 0) 146 submit_unlock_object(submit, slow_locked); 147 148 if (ret == -EDEADLK) { 149 struct etnaviv_gem_object *etnaviv_obj; 150 151 etnaviv_obj = submit->bos[contended].obj; 152 153 /* we lost out in a seqno race, lock and retry.. */ 154 ret = ww_mutex_lock_slow_interruptible(&etnaviv_obj->resv->lock, 155 &submit->ticket); 156 if (!ret) { 157 submit->bos[contended].flags |= BO_LOCKED; 158 slow_locked = contended; 159 goto retry; 160 } 161 } 162 163 return ret; 164 } 165 166 static int submit_fence_sync(const struct etnaviv_gem_submit *submit) 167 { 168 unsigned int context = submit->gpu->fence_context; 169 int i, ret = 0; 170 171 for (i = 0; i < submit->nr_bos; i++) { 172 struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj; 173 bool write = submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE; 174 bool explicit = !(submit->flags & ETNA_SUBMIT_NO_IMPLICIT); 175 176 ret = etnaviv_gpu_fence_sync_obj(etnaviv_obj, context, write, 177 explicit); 178 if (ret) 179 break; 180 } 181 182 return ret; 183 } 184 185 static void submit_unpin_objects(struct etnaviv_gem_submit *submit) 186 { 187 int i; 188 189 for (i = 0; i < submit->nr_bos; i++) { 190 if (submit->bos[i].flags & BO_PINNED) 191 etnaviv_gem_mapping_unreference(submit->bos[i].mapping); 192 193 submit->bos[i].mapping = NULL; 194 submit->bos[i].flags &= ~BO_PINNED; 195 } 196 } 197 198 static int submit_pin_objects(struct etnaviv_gem_submit *submit) 199 { 200 int i, ret = 0; 201 202 for (i = 0; i < submit->nr_bos; i++) { 203 struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj; 204 struct etnaviv_vram_mapping *mapping; 205 206 mapping = etnaviv_gem_mapping_get(&etnaviv_obj->base, 207 submit->gpu); 208 if (IS_ERR(mapping)) { 209 ret = PTR_ERR(mapping); 210 break; 211 } 212 213 submit->bos[i].flags |= BO_PINNED; 214 submit->bos[i].mapping = mapping; 215 } 216 217 return ret; 218 } 219 220 static int submit_bo(struct etnaviv_gem_submit *submit, u32 idx, 221 struct etnaviv_gem_submit_bo **bo) 222 { 223 if (idx >= submit->nr_bos) { 224 DRM_ERROR("invalid buffer index: %u (out of %u)\n", 225 idx, submit->nr_bos); 226 return -EINVAL; 227 } 228 229 *bo = &submit->bos[idx]; 230 231 return 0; 232 } 233 234 /* process the reloc's and patch up the cmdstream as needed: */ 235 static int submit_reloc(struct etnaviv_gem_submit *submit, void *stream, 236 u32 size, const struct drm_etnaviv_gem_submit_reloc *relocs, 237 u32 nr_relocs) 238 { 239 u32 i, last_offset = 0; 240 u32 *ptr = stream; 241 int ret; 242 243 for (i = 0; i < nr_relocs; i++) { 244 const struct drm_etnaviv_gem_submit_reloc *r = relocs + i; 245 struct etnaviv_gem_submit_bo *bo; 246 u32 off; 247 248 if (unlikely(r->flags)) { 249 DRM_ERROR("invalid reloc flags\n"); 250 return -EINVAL; 251 } 252 253 if (r->submit_offset % 4) { 254 DRM_ERROR("non-aligned reloc offset: %u\n", 255 r->submit_offset); 256 return -EINVAL; 257 } 258 259 /* offset in dwords: */ 260 off = r->submit_offset / 4; 261 262 if ((off >= size ) || 263 (off < last_offset)) { 264 DRM_ERROR("invalid offset %u at reloc %u\n", off, i); 265 return -EINVAL; 266 } 267 268 ret = submit_bo(submit, r->reloc_idx, &bo); 269 if (ret) 270 return ret; 271 272 if (r->reloc_offset >= bo->obj->base.size - sizeof(*ptr)) { 273 DRM_ERROR("relocation %u outside object", i); 274 return -EINVAL; 275 } 276 277 ptr[off] = bo->mapping->iova + r->reloc_offset; 278 279 last_offset = off; 280 } 281 282 return 0; 283 } 284 285 static void submit_cleanup(struct etnaviv_gem_submit *submit) 286 { 287 unsigned i; 288 289 for (i = 0; i < submit->nr_bos; i++) { 290 struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj; 291 292 submit_unlock_object(submit, i); 293 drm_gem_object_unreference_unlocked(&etnaviv_obj->base); 294 } 295 296 ww_acquire_fini(&submit->ticket); 297 dma_fence_put(submit->fence); 298 kfree(submit); 299 } 300 301 int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data, 302 struct drm_file *file) 303 { 304 struct etnaviv_drm_private *priv = dev->dev_private; 305 struct drm_etnaviv_gem_submit *args = data; 306 struct drm_etnaviv_gem_submit_reloc *relocs; 307 struct drm_etnaviv_gem_submit_bo *bos; 308 struct etnaviv_gem_submit *submit; 309 struct etnaviv_cmdbuf *cmdbuf; 310 struct etnaviv_gpu *gpu; 311 struct dma_fence *in_fence = NULL; 312 struct sync_file *sync_file = NULL; 313 int out_fence_fd = -1; 314 void *stream; 315 int ret; 316 317 if (args->pipe >= ETNA_MAX_PIPES) 318 return -EINVAL; 319 320 gpu = priv->gpu[args->pipe]; 321 if (!gpu) 322 return -ENXIO; 323 324 if (args->stream_size % 4) { 325 DRM_ERROR("non-aligned cmdstream buffer size: %u\n", 326 args->stream_size); 327 return -EINVAL; 328 } 329 330 if (args->exec_state != ETNA_PIPE_3D && 331 args->exec_state != ETNA_PIPE_2D && 332 args->exec_state != ETNA_PIPE_VG) { 333 DRM_ERROR("invalid exec_state: 0x%x\n", args->exec_state); 334 return -EINVAL; 335 } 336 337 if (args->flags & ~ETNA_SUBMIT_FLAGS) { 338 DRM_ERROR("invalid flags: 0x%x\n", args->flags); 339 return -EINVAL; 340 } 341 342 /* 343 * Copy the command submission and bo array to kernel space in 344 * one go, and do this outside of any locks. 345 */ 346 bos = drm_malloc_ab(args->nr_bos, sizeof(*bos)); 347 relocs = drm_malloc_ab(args->nr_relocs, sizeof(*relocs)); 348 stream = drm_malloc_ab(1, args->stream_size); 349 cmdbuf = etnaviv_cmdbuf_new(gpu->cmdbuf_suballoc, 350 ALIGN(args->stream_size, 8) + 8, 351 args->nr_bos); 352 if (!bos || !relocs || !stream || !cmdbuf) { 353 ret = -ENOMEM; 354 goto err_submit_cmds; 355 } 356 357 cmdbuf->exec_state = args->exec_state; 358 cmdbuf->ctx = file->driver_priv; 359 360 ret = copy_from_user(bos, u64_to_user_ptr(args->bos), 361 args->nr_bos * sizeof(*bos)); 362 if (ret) { 363 ret = -EFAULT; 364 goto err_submit_cmds; 365 } 366 367 ret = copy_from_user(relocs, u64_to_user_ptr(args->relocs), 368 args->nr_relocs * sizeof(*relocs)); 369 if (ret) { 370 ret = -EFAULT; 371 goto err_submit_cmds; 372 } 373 374 ret = copy_from_user(stream, u64_to_user_ptr(args->stream), 375 args->stream_size); 376 if (ret) { 377 ret = -EFAULT; 378 goto err_submit_cmds; 379 } 380 381 if (args->flags & ETNA_SUBMIT_FENCE_FD_OUT) { 382 out_fence_fd = get_unused_fd_flags(O_CLOEXEC); 383 if (out_fence_fd < 0) { 384 ret = out_fence_fd; 385 goto err_submit_cmds; 386 } 387 } 388 389 submit = submit_create(dev, gpu, args->nr_bos); 390 if (!submit) { 391 ret = -ENOMEM; 392 goto err_submit_cmds; 393 } 394 395 submit->flags = args->flags; 396 397 ret = submit_lookup_objects(submit, file, bos, args->nr_bos); 398 if (ret) 399 goto err_submit_objects; 400 401 ret = submit_lock_objects(submit); 402 if (ret) 403 goto err_submit_objects; 404 405 if (!etnaviv_cmd_validate_one(gpu, stream, args->stream_size / 4, 406 relocs, args->nr_relocs)) { 407 ret = -EINVAL; 408 goto err_submit_objects; 409 } 410 411 if (args->flags & ETNA_SUBMIT_FENCE_FD_IN) { 412 in_fence = sync_file_get_fence(args->fence_fd); 413 if (!in_fence) { 414 ret = -EINVAL; 415 goto err_submit_objects; 416 } 417 418 /* 419 * Wait if the fence is from a foreign context, or if the fence 420 * array contains any fence from a foreign context. 421 */ 422 if (!dma_fence_match_context(in_fence, gpu->fence_context)) { 423 ret = dma_fence_wait(in_fence, true); 424 if (ret) 425 goto err_submit_objects; 426 } 427 } 428 429 ret = submit_fence_sync(submit); 430 if (ret) 431 goto err_submit_objects; 432 433 ret = submit_pin_objects(submit); 434 if (ret) 435 goto out; 436 437 ret = submit_reloc(submit, stream, args->stream_size / 4, 438 relocs, args->nr_relocs); 439 if (ret) 440 goto out; 441 442 memcpy(cmdbuf->vaddr, stream, args->stream_size); 443 cmdbuf->user_size = ALIGN(args->stream_size, 8); 444 445 ret = etnaviv_gpu_submit(gpu, submit, cmdbuf); 446 if (ret == 0) 447 cmdbuf = NULL; 448 449 if (args->flags & ETNA_SUBMIT_FENCE_FD_OUT) { 450 /* 451 * This can be improved: ideally we want to allocate the sync 452 * file before kicking off the GPU job and just attach the 453 * fence to the sync file here, eliminating the ENOMEM 454 * possibility at this stage. 455 */ 456 sync_file = sync_file_create(submit->fence); 457 if (!sync_file) { 458 ret = -ENOMEM; 459 goto out; 460 } 461 fd_install(out_fence_fd, sync_file->file); 462 } 463 464 args->fence_fd = out_fence_fd; 465 args->fence = submit->fence->seqno; 466 467 out: 468 submit_unpin_objects(submit); 469 470 /* 471 * If we're returning -EAGAIN, it may be due to the userptr code 472 * wanting to run its workqueue outside of any locks. Flush our 473 * workqueue to ensure that it is run in a timely manner. 474 */ 475 if (ret == -EAGAIN) 476 flush_workqueue(priv->wq); 477 478 err_submit_objects: 479 if (in_fence) 480 dma_fence_put(in_fence); 481 submit_cleanup(submit); 482 483 err_submit_cmds: 484 if (ret && (out_fence_fd >= 0)) 485 put_unused_fd(out_fence_fd); 486 /* if we still own the cmdbuf */ 487 if (cmdbuf) 488 etnaviv_cmdbuf_free(cmdbuf); 489 if (stream) 490 drm_free_large(stream); 491 if (bos) 492 drm_free_large(bos); 493 if (relocs) 494 drm_free_large(relocs); 495 496 return ret; 497 } 498