1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3 *
4 * Copyright © 2011-2023 VMware, Inc., Palo Alto, CA., USA
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 **************************************************************************/
28
29 #include "vmwgfx_bo.h"
30 #include "vmwgfx_drv.h"
31
32
33 #include <drm/ttm/ttm_placement.h>
34
vmw_bo_release(struct vmw_bo * vbo)35 static void vmw_bo_release(struct vmw_bo *vbo)
36 {
37 WARN_ON(vbo->tbo.base.funcs &&
38 kref_read(&vbo->tbo.base.refcount) != 0);
39 vmw_bo_unmap(vbo);
40 drm_gem_object_release(&vbo->tbo.base);
41 }
42
43 /**
44 * vmw_bo_free - vmw_bo destructor
45 *
46 * @bo: Pointer to the embedded struct ttm_buffer_object
47 */
vmw_bo_free(struct ttm_buffer_object * bo)48 static void vmw_bo_free(struct ttm_buffer_object *bo)
49 {
50 struct vmw_bo *vbo = to_vmw_bo(&bo->base);
51
52 WARN_ON(vbo->dirty);
53 WARN_ON(!RB_EMPTY_ROOT(&vbo->res_tree));
54 vmw_bo_release(vbo);
55 kfree(vbo);
56 }
57
58 /**
59 * vmw_bo_pin_in_placement - Validate a buffer to placement.
60 *
61 * @dev_priv: Driver private.
62 * @buf: DMA buffer to move.
63 * @placement: The placement to pin it.
64 * @interruptible: Use interruptible wait.
65 * Return: Zero on success, Negative error code on failure. In particular
66 * -ERESTARTSYS if interrupted by a signal
67 */
vmw_bo_pin_in_placement(struct vmw_private * dev_priv,struct vmw_bo * buf,struct ttm_placement * placement,bool interruptible)68 static int vmw_bo_pin_in_placement(struct vmw_private *dev_priv,
69 struct vmw_bo *buf,
70 struct ttm_placement *placement,
71 bool interruptible)
72 {
73 struct ttm_operation_ctx ctx = {interruptible, false };
74 struct ttm_buffer_object *bo = &buf->tbo;
75 int ret;
76
77 vmw_execbuf_release_pinned_bo(dev_priv);
78
79 ret = ttm_bo_reserve(bo, interruptible, false, NULL);
80 if (unlikely(ret != 0))
81 goto err;
82
83 ret = ttm_bo_validate(bo, placement, &ctx);
84 if (!ret)
85 vmw_bo_pin_reserved(buf, true);
86
87 ttm_bo_unreserve(bo);
88 err:
89 return ret;
90 }
91
92
93 /**
94 * vmw_bo_pin_in_vram_or_gmr - Move a buffer to vram or gmr.
95 *
96 * This function takes the reservation_sem in write mode.
97 * Flushes and unpins the query bo to avoid failures.
98 *
99 * @dev_priv: Driver private.
100 * @buf: DMA buffer to move.
101 * @interruptible: Use interruptible wait.
102 * Return: Zero on success, Negative error code on failure. In particular
103 * -ERESTARTSYS if interrupted by a signal
104 */
vmw_bo_pin_in_vram_or_gmr(struct vmw_private * dev_priv,struct vmw_bo * buf,bool interruptible)105 int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
106 struct vmw_bo *buf,
107 bool interruptible)
108 {
109 struct ttm_operation_ctx ctx = {interruptible, false };
110 struct ttm_buffer_object *bo = &buf->tbo;
111 int ret;
112
113 vmw_execbuf_release_pinned_bo(dev_priv);
114
115 ret = ttm_bo_reserve(bo, interruptible, false, NULL);
116 if (unlikely(ret != 0))
117 goto err;
118
119 vmw_bo_placement_set(buf,
120 VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM,
121 VMW_BO_DOMAIN_GMR);
122 ret = ttm_bo_validate(bo, &buf->placement, &ctx);
123 if (likely(ret == 0) || ret == -ERESTARTSYS)
124 goto out_unreserve;
125
126 vmw_bo_placement_set(buf,
127 VMW_BO_DOMAIN_VRAM,
128 VMW_BO_DOMAIN_VRAM);
129 ret = ttm_bo_validate(bo, &buf->placement, &ctx);
130
131 out_unreserve:
132 if (!ret)
133 vmw_bo_pin_reserved(buf, true);
134
135 ttm_bo_unreserve(bo);
136 err:
137 return ret;
138 }
139
140
141 /**
142 * vmw_bo_pin_in_vram - Move a buffer to vram.
143 *
144 * This function takes the reservation_sem in write mode.
145 * Flushes and unpins the query bo to avoid failures.
146 *
147 * @dev_priv: Driver private.
148 * @buf: DMA buffer to move.
149 * @interruptible: Use interruptible wait.
150 * Return: Zero on success, Negative error code on failure. In particular
151 * -ERESTARTSYS if interrupted by a signal
152 */
vmw_bo_pin_in_vram(struct vmw_private * dev_priv,struct vmw_bo * buf,bool interruptible)153 int vmw_bo_pin_in_vram(struct vmw_private *dev_priv,
154 struct vmw_bo *buf,
155 bool interruptible)
156 {
157 return vmw_bo_pin_in_placement(dev_priv, buf, &vmw_vram_placement,
158 interruptible);
159 }
160
161
162 /**
163 * vmw_bo_pin_in_start_of_vram - Move a buffer to start of vram.
164 *
165 * This function takes the reservation_sem in write mode.
166 * Flushes and unpins the query bo to avoid failures.
167 *
168 * @dev_priv: Driver private.
169 * @buf: DMA buffer to pin.
170 * @interruptible: Use interruptible wait.
171 * Return: Zero on success, Negative error code on failure. In particular
172 * -ERESTARTSYS if interrupted by a signal
173 */
vmw_bo_pin_in_start_of_vram(struct vmw_private * dev_priv,struct vmw_bo * buf,bool interruptible)174 int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
175 struct vmw_bo *buf,
176 bool interruptible)
177 {
178 struct ttm_operation_ctx ctx = {interruptible, false };
179 struct ttm_buffer_object *bo = &buf->tbo;
180 int ret = 0;
181
182 vmw_execbuf_release_pinned_bo(dev_priv);
183 ret = ttm_bo_reserve(bo, interruptible, false, NULL);
184 if (unlikely(ret != 0))
185 goto err_unlock;
186
187 /*
188 * Is this buffer already in vram but not at the start of it?
189 * In that case, evict it first because TTM isn't good at handling
190 * that situation.
191 */
192 if (bo->resource->mem_type == TTM_PL_VRAM &&
193 bo->resource->start < PFN_UP(bo->resource->size) &&
194 bo->resource->start > 0 &&
195 buf->tbo.pin_count == 0) {
196 ctx.interruptible = false;
197 vmw_bo_placement_set(buf,
198 VMW_BO_DOMAIN_SYS,
199 VMW_BO_DOMAIN_SYS);
200 (void)ttm_bo_validate(bo, &buf->placement, &ctx);
201 }
202
203 vmw_bo_placement_set(buf,
204 VMW_BO_DOMAIN_VRAM,
205 VMW_BO_DOMAIN_VRAM);
206 buf->places[0].lpfn = PFN_UP(bo->resource->size);
207 buf->busy_places[0].lpfn = PFN_UP(bo->resource->size);
208 ret = ttm_bo_validate(bo, &buf->placement, &ctx);
209
210 /* For some reason we didn't end up at the start of vram */
211 WARN_ON(ret == 0 && bo->resource->start != 0);
212 if (!ret)
213 vmw_bo_pin_reserved(buf, true);
214
215 ttm_bo_unreserve(bo);
216 err_unlock:
217
218 return ret;
219 }
220
221
222 /**
223 * vmw_bo_unpin - Unpin the buffer given buffer, does not move the buffer.
224 *
225 * This function takes the reservation_sem in write mode.
226 *
227 * @dev_priv: Driver private.
228 * @buf: DMA buffer to unpin.
229 * @interruptible: Use interruptible wait.
230 * Return: Zero on success, Negative error code on failure. In particular
231 * -ERESTARTSYS if interrupted by a signal
232 */
vmw_bo_unpin(struct vmw_private * dev_priv,struct vmw_bo * buf,bool interruptible)233 int vmw_bo_unpin(struct vmw_private *dev_priv,
234 struct vmw_bo *buf,
235 bool interruptible)
236 {
237 struct ttm_buffer_object *bo = &buf->tbo;
238 int ret;
239
240 ret = ttm_bo_reserve(bo, interruptible, false, NULL);
241 if (unlikely(ret != 0))
242 goto err;
243
244 vmw_bo_pin_reserved(buf, false);
245
246 ttm_bo_unreserve(bo);
247
248 err:
249 return ret;
250 }
251
252 /**
253 * vmw_bo_get_guest_ptr - Get the guest ptr representing the current placement
254 * of a buffer.
255 *
256 * @bo: Pointer to a struct ttm_buffer_object. Must be pinned or reserved.
257 * @ptr: SVGAGuestPtr returning the result.
258 */
vmw_bo_get_guest_ptr(const struct ttm_buffer_object * bo,SVGAGuestPtr * ptr)259 void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
260 SVGAGuestPtr *ptr)
261 {
262 if (bo->resource->mem_type == TTM_PL_VRAM) {
263 ptr->gmrId = SVGA_GMR_FRAMEBUFFER;
264 ptr->offset = bo->resource->start << PAGE_SHIFT;
265 } else {
266 ptr->gmrId = bo->resource->start;
267 ptr->offset = 0;
268 }
269 }
270
271
272 /**
273 * vmw_bo_pin_reserved - Pin or unpin a buffer object without moving it.
274 *
275 * @vbo: The buffer object. Must be reserved.
276 * @pin: Whether to pin or unpin.
277 *
278 */
vmw_bo_pin_reserved(struct vmw_bo * vbo,bool pin)279 void vmw_bo_pin_reserved(struct vmw_bo *vbo, bool pin)
280 {
281 struct ttm_operation_ctx ctx = { false, true };
282 struct ttm_place pl;
283 struct ttm_placement placement;
284 struct ttm_buffer_object *bo = &vbo->tbo;
285 uint32_t old_mem_type = bo->resource->mem_type;
286 int ret;
287
288 dma_resv_assert_held(bo->base.resv);
289
290 if (pin == !!bo->pin_count)
291 return;
292
293 pl.fpfn = 0;
294 pl.lpfn = 0;
295 pl.mem_type = bo->resource->mem_type;
296 pl.flags = bo->resource->placement;
297
298 memset(&placement, 0, sizeof(placement));
299 placement.num_placement = 1;
300 placement.placement = &pl;
301
302 ret = ttm_bo_validate(bo, &placement, &ctx);
303
304 BUG_ON(ret != 0 || bo->resource->mem_type != old_mem_type);
305
306 if (pin)
307 ttm_bo_pin(bo);
308 else
309 ttm_bo_unpin(bo);
310 }
311
312 /**
313 * vmw_bo_map_and_cache - Map a buffer object and cache the map
314 *
315 * @vbo: The buffer object to map
316 * Return: A kernel virtual address or NULL if mapping failed.
317 *
318 * This function maps a buffer object into the kernel address space, or
319 * returns the virtual kernel address of an already existing map. The virtual
320 * address remains valid as long as the buffer object is pinned or reserved.
321 * The cached map is torn down on either
322 * 1) Buffer object move
323 * 2) Buffer object swapout
324 * 3) Buffer object destruction
325 *
326 */
vmw_bo_map_and_cache(struct vmw_bo * vbo)327 void *vmw_bo_map_and_cache(struct vmw_bo *vbo)
328 {
329 struct ttm_buffer_object *bo = &vbo->tbo;
330 bool not_used;
331 void *virtual;
332 int ret;
333
334 atomic_inc(&vbo->map_count);
335
336 virtual = ttm_kmap_obj_virtual(&vbo->map, ¬_used);
337 if (virtual)
338 return virtual;
339
340 ret = ttm_bo_kmap(bo, 0, PFN_UP(bo->base.size), &vbo->map);
341 if (ret)
342 DRM_ERROR("Buffer object map failed: %d.\n", ret);
343
344 return ttm_kmap_obj_virtual(&vbo->map, ¬_used);
345 }
346
347
348 /**
349 * vmw_bo_unmap - Tear down a cached buffer object map.
350 *
351 * @vbo: The buffer object whose map we are tearing down.
352 *
353 * This function tears down a cached map set up using
354 * vmw_bo_map_and_cache().
355 */
vmw_bo_unmap(struct vmw_bo * vbo)356 void vmw_bo_unmap(struct vmw_bo *vbo)
357 {
358 int map_count;
359
360 if (vbo->map.bo == NULL)
361 return;
362
363 map_count = atomic_dec_return(&vbo->map_count);
364
365 if (!map_count) {
366 ttm_bo_kunmap(&vbo->map);
367 vbo->map.bo = NULL;
368 }
369 }
370
371
372 /**
373 * vmw_bo_init - Initialize a vmw buffer object
374 *
375 * @dev_priv: Pointer to the device private struct
376 * @vmw_bo: Buffer object to initialize
377 * @params: Parameters used to initialize the buffer object
378 * @destroy: The function used to delete the buffer object
379 * Returns: Zero on success, negative error code on error.
380 *
381 */
vmw_bo_init(struct vmw_private * dev_priv,struct vmw_bo * vmw_bo,struct vmw_bo_params * params,void (* destroy)(struct ttm_buffer_object *))382 static int vmw_bo_init(struct vmw_private *dev_priv,
383 struct vmw_bo *vmw_bo,
384 struct vmw_bo_params *params,
385 void (*destroy)(struct ttm_buffer_object *))
386 {
387 struct ttm_operation_ctx ctx = {
388 .interruptible = params->bo_type != ttm_bo_type_kernel,
389 .no_wait_gpu = false,
390 .resv = params->resv,
391 };
392 struct ttm_device *bdev = &dev_priv->bdev;
393 struct drm_device *vdev = &dev_priv->drm;
394 int ret;
395
396 memset(vmw_bo, 0, sizeof(*vmw_bo));
397
398 BUILD_BUG_ON(TTM_MAX_BO_PRIORITY <= 3);
399 vmw_bo->tbo.priority = 3;
400 vmw_bo->res_tree = RB_ROOT;
401 atomic_set(&vmw_bo->map_count, 0);
402
403 params->size = ALIGN(params->size, PAGE_SIZE);
404 drm_gem_private_object_init(vdev, &vmw_bo->tbo.base, params->size);
405
406 vmw_bo_placement_set(vmw_bo, params->domain, params->busy_domain);
407 ret = ttm_bo_init_reserved(bdev, &vmw_bo->tbo, params->bo_type,
408 &vmw_bo->placement, 0, &ctx,
409 params->sg, params->resv, destroy);
410 if (unlikely(ret))
411 return ret;
412
413 if (params->pin)
414 ttm_bo_pin(&vmw_bo->tbo);
415 ttm_bo_unreserve(&vmw_bo->tbo);
416
417 return 0;
418 }
419
vmw_bo_create(struct vmw_private * vmw,struct vmw_bo_params * params,struct vmw_bo ** p_bo)420 int vmw_bo_create(struct vmw_private *vmw,
421 struct vmw_bo_params *params,
422 struct vmw_bo **p_bo)
423 {
424 int ret;
425
426 *p_bo = kmalloc(sizeof(**p_bo), GFP_KERNEL);
427 if (unlikely(!*p_bo)) {
428 DRM_ERROR("Failed to allocate a buffer.\n");
429 return -ENOMEM;
430 }
431
432 /*
433 * vmw_bo_init will delete the *p_bo object if it fails
434 */
435 ret = vmw_bo_init(vmw, *p_bo, params, vmw_bo_free);
436 if (unlikely(ret != 0))
437 goto out_error;
438
439 return ret;
440 out_error:
441 *p_bo = NULL;
442 return ret;
443 }
444
445 /**
446 * vmw_user_bo_synccpu_grab - Grab a struct vmw_bo for cpu
447 * access, idling previous GPU operations on the buffer and optionally
448 * blocking it for further command submissions.
449 *
450 * @vmw_bo: Pointer to the buffer object being grabbed for CPU access
451 * @flags: Flags indicating how the grab should be performed.
452 * Return: Zero on success, Negative error code on error. In particular,
453 * -EBUSY will be returned if a dontblock operation is requested and the
454 * buffer object is busy, and -ERESTARTSYS will be returned if a wait is
455 * interrupted by a signal.
456 *
457 * A blocking grab will be automatically released when @tfile is closed.
458 */
vmw_user_bo_synccpu_grab(struct vmw_bo * vmw_bo,uint32_t flags)459 static int vmw_user_bo_synccpu_grab(struct vmw_bo *vmw_bo,
460 uint32_t flags)
461 {
462 bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
463 struct ttm_buffer_object *bo = &vmw_bo->tbo;
464 int ret;
465
466 if (flags & drm_vmw_synccpu_allow_cs) {
467 long lret;
468
469 lret = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_READ,
470 true, nonblock ? 0 :
471 MAX_SCHEDULE_TIMEOUT);
472 if (!lret)
473 return -EBUSY;
474 else if (lret < 0)
475 return lret;
476 return 0;
477 }
478
479 ret = ttm_bo_reserve(bo, true, nonblock, NULL);
480 if (unlikely(ret != 0))
481 return ret;
482
483 ret = ttm_bo_wait(bo, true, nonblock);
484 if (likely(ret == 0))
485 atomic_inc(&vmw_bo->cpu_writers);
486
487 ttm_bo_unreserve(bo);
488 if (unlikely(ret != 0))
489 return ret;
490
491 return ret;
492 }
493
494 /**
495 * vmw_user_bo_synccpu_release - Release a previous grab for CPU access,
496 * and unblock command submission on the buffer if blocked.
497 *
498 * @filp: Identifying the caller.
499 * @handle: Handle identifying the buffer object.
500 * @flags: Flags indicating the type of release.
501 */
vmw_user_bo_synccpu_release(struct drm_file * filp,uint32_t handle,uint32_t flags)502 static int vmw_user_bo_synccpu_release(struct drm_file *filp,
503 uint32_t handle,
504 uint32_t flags)
505 {
506 struct vmw_bo *vmw_bo;
507 int ret = vmw_user_bo_lookup(filp, handle, &vmw_bo);
508
509 if (!ret) {
510 if (!(flags & drm_vmw_synccpu_allow_cs)) {
511 atomic_dec(&vmw_bo->cpu_writers);
512 }
513 vmw_user_bo_unref(&vmw_bo);
514 }
515
516 return ret;
517 }
518
519
520 /**
521 * vmw_user_bo_synccpu_ioctl - ioctl function implementing the synccpu
522 * functionality.
523 *
524 * @dev: Identifies the drm device.
525 * @data: Pointer to the ioctl argument.
526 * @file_priv: Identifies the caller.
527 * Return: Zero on success, negative error code on error.
528 *
529 * This function checks the ioctl arguments for validity and calls the
530 * relevant synccpu functions.
531 */
vmw_user_bo_synccpu_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)532 int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
533 struct drm_file *file_priv)
534 {
535 struct drm_vmw_synccpu_arg *arg =
536 (struct drm_vmw_synccpu_arg *) data;
537 struct vmw_bo *vbo;
538 int ret;
539
540 if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
541 || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write |
542 drm_vmw_synccpu_dontblock |
543 drm_vmw_synccpu_allow_cs)) != 0) {
544 DRM_ERROR("Illegal synccpu flags.\n");
545 return -EINVAL;
546 }
547
548 switch (arg->op) {
549 case drm_vmw_synccpu_grab:
550 ret = vmw_user_bo_lookup(file_priv, arg->handle, &vbo);
551 if (unlikely(ret != 0))
552 return ret;
553
554 ret = vmw_user_bo_synccpu_grab(vbo, arg->flags);
555 vmw_user_bo_unref(&vbo);
556 if (unlikely(ret != 0)) {
557 if (ret == -ERESTARTSYS || ret == -EBUSY)
558 return -EBUSY;
559 DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
560 (unsigned int) arg->handle);
561 return ret;
562 }
563 break;
564 case drm_vmw_synccpu_release:
565 ret = vmw_user_bo_synccpu_release(file_priv,
566 arg->handle,
567 arg->flags);
568 if (unlikely(ret != 0)) {
569 DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
570 (unsigned int) arg->handle);
571 return ret;
572 }
573 break;
574 default:
575 DRM_ERROR("Invalid synccpu operation.\n");
576 return -EINVAL;
577 }
578
579 return 0;
580 }
581
582 /**
583 * vmw_bo_unref_ioctl - Generic handle close ioctl.
584 *
585 * @dev: Identifies the drm device.
586 * @data: Pointer to the ioctl argument.
587 * @file_priv: Identifies the caller.
588 * Return: Zero on success, negative error code on error.
589 *
590 * This function checks the ioctl arguments for validity and closes a
591 * handle to a TTM base object, optionally freeing the object.
592 */
vmw_bo_unref_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)593 int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
594 struct drm_file *file_priv)
595 {
596 struct drm_vmw_unref_dmabuf_arg *arg =
597 (struct drm_vmw_unref_dmabuf_arg *)data;
598
599 return drm_gem_handle_delete(file_priv, arg->handle);
600 }
601
602
603 /**
604 * vmw_user_bo_lookup - Look up a vmw user buffer object from a handle.
605 *
606 * @filp: The file the handle is registered with.
607 * @handle: The user buffer object handle
608 * @out: Pointer to a where a pointer to the embedded
609 * struct vmw_bo should be placed.
610 * Return: Zero on success, Negative error code on error.
611 *
612 * The vmw buffer object pointer will be refcounted (both ttm and gem)
613 */
vmw_user_bo_lookup(struct drm_file * filp,u32 handle,struct vmw_bo ** out)614 int vmw_user_bo_lookup(struct drm_file *filp,
615 u32 handle,
616 struct vmw_bo **out)
617 {
618 struct drm_gem_object *gobj;
619
620 gobj = drm_gem_object_lookup(filp, handle);
621 if (!gobj) {
622 DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
623 (unsigned long)handle);
624 return -ESRCH;
625 }
626
627 *out = to_vmw_bo(gobj);
628
629 return 0;
630 }
631
632 /**
633 * vmw_bo_fence_single - Utility function to fence a single TTM buffer
634 * object without unreserving it.
635 *
636 * @bo: Pointer to the struct ttm_buffer_object to fence.
637 * @fence: Pointer to the fence. If NULL, this function will
638 * insert a fence into the command stream..
639 *
640 * Contrary to the ttm_eu version of this function, it takes only
641 * a single buffer object instead of a list, and it also doesn't
642 * unreserve the buffer object, which needs to be done separately.
643 */
vmw_bo_fence_single(struct ttm_buffer_object * bo,struct vmw_fence_obj * fence)644 void vmw_bo_fence_single(struct ttm_buffer_object *bo,
645 struct vmw_fence_obj *fence)
646 {
647 struct ttm_device *bdev = bo->bdev;
648 struct vmw_private *dev_priv = vmw_priv_from_ttm(bdev);
649 int ret;
650
651 if (fence == NULL)
652 vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
653 else
654 dma_fence_get(&fence->base);
655
656 ret = dma_resv_reserve_fences(bo->base.resv, 1);
657 if (!ret)
658 dma_resv_add_fence(bo->base.resv, &fence->base,
659 DMA_RESV_USAGE_KERNEL);
660 else
661 /* Last resort fallback when we are OOM */
662 dma_fence_wait(&fence->base, false);
663 dma_fence_put(&fence->base);
664 }
665
666
667 /**
668 * vmw_dumb_create - Create a dumb kms buffer
669 *
670 * @file_priv: Pointer to a struct drm_file identifying the caller.
671 * @dev: Pointer to the drm device.
672 * @args: Pointer to a struct drm_mode_create_dumb structure
673 * Return: Zero on success, negative error code on failure.
674 *
675 * This is a driver callback for the core drm create_dumb functionality.
676 * Note that this is very similar to the vmw_bo_alloc ioctl, except
677 * that the arguments have a different format.
678 */
vmw_dumb_create(struct drm_file * file_priv,struct drm_device * dev,struct drm_mode_create_dumb * args)679 int vmw_dumb_create(struct drm_file *file_priv,
680 struct drm_device *dev,
681 struct drm_mode_create_dumb *args)
682 {
683 struct vmw_private *dev_priv = vmw_priv(dev);
684 struct vmw_bo *vbo;
685 int cpp = DIV_ROUND_UP(args->bpp, 8);
686 int ret;
687
688 switch (cpp) {
689 case 1: /* DRM_FORMAT_C8 */
690 case 2: /* DRM_FORMAT_RGB565 */
691 case 4: /* DRM_FORMAT_XRGB8888 */
692 break;
693 default:
694 /*
695 * Dumb buffers don't allow anything else.
696 * This is tested via IGT's dumb_buffers
697 */
698 return -EINVAL;
699 }
700
701 args->pitch = args->width * cpp;
702 args->size = ALIGN(args->pitch * args->height, PAGE_SIZE);
703
704 ret = vmw_gem_object_create_with_handle(dev_priv, file_priv,
705 args->size, &args->handle,
706 &vbo);
707 /* drop reference from allocate - handle holds it now */
708 drm_gem_object_put(&vbo->tbo.base);
709 return ret;
710 }
711
712 /**
713 * vmw_bo_swap_notify - swapout notify callback.
714 *
715 * @bo: The buffer object to be swapped out.
716 */
vmw_bo_swap_notify(struct ttm_buffer_object * bo)717 void vmw_bo_swap_notify(struct ttm_buffer_object *bo)
718 {
719 /* Kill any cached kernel maps before swapout */
720 vmw_bo_unmap(to_vmw_bo(&bo->base));
721 }
722
723
724 /**
725 * vmw_bo_move_notify - TTM move_notify_callback
726 *
727 * @bo: The TTM buffer object about to move.
728 * @mem: The struct ttm_resource indicating to what memory
729 * region the move is taking place.
730 *
731 * Detaches cached maps and device bindings that require that the
732 * buffer doesn't move.
733 */
vmw_bo_move_notify(struct ttm_buffer_object * bo,struct ttm_resource * mem)734 void vmw_bo_move_notify(struct ttm_buffer_object *bo,
735 struct ttm_resource *mem)
736 {
737 struct vmw_bo *vbo = to_vmw_bo(&bo->base);
738
739 /*
740 * Kill any cached kernel maps before move to or from VRAM.
741 * With other types of moves, the underlying pages stay the same,
742 * and the map can be kept.
743 */
744 if (mem->mem_type == TTM_PL_VRAM || bo->resource->mem_type == TTM_PL_VRAM)
745 vmw_bo_unmap(vbo);
746
747 /*
748 * If we're moving a backup MOB out of MOB placement, then make sure we
749 * read back all resource content first, and unbind the MOB from
750 * the resource.
751 */
752 if (mem->mem_type != VMW_PL_MOB && bo->resource->mem_type == VMW_PL_MOB)
753 vmw_resource_unbind_list(vbo);
754 }
755
756 static u32
set_placement_list(struct ttm_place * pl,u32 domain)757 set_placement_list(struct ttm_place *pl, u32 domain)
758 {
759 u32 n = 0;
760
761 /*
762 * The placements are ordered according to our preferences
763 */
764 if (domain & VMW_BO_DOMAIN_MOB) {
765 pl[n].mem_type = VMW_PL_MOB;
766 pl[n].flags = 0;
767 pl[n].fpfn = 0;
768 pl[n].lpfn = 0;
769 n++;
770 }
771 if (domain & VMW_BO_DOMAIN_GMR) {
772 pl[n].mem_type = VMW_PL_GMR;
773 pl[n].flags = 0;
774 pl[n].fpfn = 0;
775 pl[n].lpfn = 0;
776 n++;
777 }
778 if (domain & VMW_BO_DOMAIN_VRAM) {
779 pl[n].mem_type = TTM_PL_VRAM;
780 pl[n].flags = 0;
781 pl[n].fpfn = 0;
782 pl[n].lpfn = 0;
783 n++;
784 }
785 if (domain & VMW_BO_DOMAIN_WAITABLE_SYS) {
786 pl[n].mem_type = VMW_PL_SYSTEM;
787 pl[n].flags = 0;
788 pl[n].fpfn = 0;
789 pl[n].lpfn = 0;
790 n++;
791 }
792 if (domain & VMW_BO_DOMAIN_SYS) {
793 pl[n].mem_type = TTM_PL_SYSTEM;
794 pl[n].flags = 0;
795 pl[n].fpfn = 0;
796 pl[n].lpfn = 0;
797 n++;
798 }
799
800 WARN_ON(!n);
801 if (!n) {
802 pl[n].mem_type = TTM_PL_SYSTEM;
803 pl[n].flags = 0;
804 pl[n].fpfn = 0;
805 pl[n].lpfn = 0;
806 n++;
807 }
808 return n;
809 }
810
vmw_bo_placement_set(struct vmw_bo * bo,u32 domain,u32 busy_domain)811 void vmw_bo_placement_set(struct vmw_bo *bo, u32 domain, u32 busy_domain)
812 {
813 struct ttm_device *bdev = bo->tbo.bdev;
814 struct vmw_private *vmw = vmw_priv_from_ttm(bdev);
815 struct ttm_placement *pl = &bo->placement;
816 bool mem_compatible = false;
817 u32 i;
818
819 pl->placement = bo->places;
820 pl->num_placement = set_placement_list(bo->places, domain);
821
822 if (drm_debug_enabled(DRM_UT_DRIVER) && bo->tbo.resource) {
823 for (i = 0; i < pl->num_placement; ++i) {
824 if (bo->tbo.resource->mem_type == TTM_PL_SYSTEM ||
825 bo->tbo.resource->mem_type == pl->placement[i].mem_type)
826 mem_compatible = true;
827 }
828 if (!mem_compatible)
829 drm_warn(&vmw->drm,
830 "%s: Incompatible transition from "
831 "bo->base.resource->mem_type = %u to domain = %u\n",
832 __func__, bo->tbo.resource->mem_type, domain);
833 }
834
835 pl->busy_placement = bo->busy_places;
836 pl->num_busy_placement = set_placement_list(bo->busy_places, busy_domain);
837 }
838
vmw_bo_placement_set_default_accelerated(struct vmw_bo * bo)839 void vmw_bo_placement_set_default_accelerated(struct vmw_bo *bo)
840 {
841 struct ttm_device *bdev = bo->tbo.bdev;
842 struct vmw_private *vmw = vmw_priv_from_ttm(bdev);
843 u32 domain = VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM;
844
845 if (vmw->has_mob)
846 domain = VMW_BO_DOMAIN_MOB;
847
848 vmw_bo_placement_set(bo, domain, domain);
849 }
850