xref: /openbmc/linux/drivers/gpu/drm/qxl/qxl_object.h (revision 12eb4683)
1 /*
2  * Copyright 2013 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Dave Airlie
23  *          Alon Levy
24  */
25 #ifndef QXL_OBJECT_H
26 #define QXL_OBJECT_H
27 
28 #include "qxl_drv.h"
29 
30 static inline int qxl_bo_reserve(struct qxl_bo *bo, bool no_wait)
31 {
32 	int r;
33 
34 	r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
35 	if (unlikely(r != 0)) {
36 		if (r != -ERESTARTSYS) {
37 			struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
38 			dev_err(qdev->dev, "%p reserve failed\n", bo);
39 		}
40 		return r;
41 	}
42 	return 0;
43 }
44 
45 static inline void qxl_bo_unreserve(struct qxl_bo *bo)
46 {
47 	ttm_bo_unreserve(&bo->tbo);
48 }
49 
50 static inline u64 qxl_bo_gpu_offset(struct qxl_bo *bo)
51 {
52 	return bo->tbo.offset;
53 }
54 
55 static inline unsigned long qxl_bo_size(struct qxl_bo *bo)
56 {
57 	return bo->tbo.num_pages << PAGE_SHIFT;
58 }
59 
60 static inline u64 qxl_bo_mmap_offset(struct qxl_bo *bo)
61 {
62 	return drm_vma_node_offset_addr(&bo->tbo.vma_node);
63 }
64 
65 static inline int qxl_bo_wait(struct qxl_bo *bo, u32 *mem_type,
66 			      bool no_wait)
67 {
68 	int r;
69 
70 	r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
71 	if (unlikely(r != 0)) {
72 		if (r != -ERESTARTSYS) {
73 			struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
74 			dev_err(qdev->dev, "%p reserve failed for wait\n",
75 				bo);
76 		}
77 		return r;
78 	}
79 	spin_lock(&bo->tbo.bdev->fence_lock);
80 	if (mem_type)
81 		*mem_type = bo->tbo.mem.mem_type;
82 	if (bo->tbo.sync_obj)
83 		r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
84 	spin_unlock(&bo->tbo.bdev->fence_lock);
85 	ttm_bo_unreserve(&bo->tbo);
86 	return r;
87 }
88 
89 extern int qxl_bo_create(struct qxl_device *qdev,
90 			 unsigned long size,
91 			 bool kernel, bool pinned, u32 domain,
92 			 struct qxl_surface *surf,
93 			 struct qxl_bo **bo_ptr);
94 extern int qxl_bo_kmap(struct qxl_bo *bo, void **ptr);
95 extern void qxl_bo_kunmap(struct qxl_bo *bo);
96 void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev, struct qxl_bo *bo, int page_offset);
97 void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev, struct qxl_bo *bo, void *map);
98 extern struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo);
99 extern void qxl_bo_unref(struct qxl_bo **bo);
100 extern int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr);
101 extern int qxl_bo_unpin(struct qxl_bo *bo);
102 extern void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned);
103 extern bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo);
104 
105 #endif
106