xref: /openbmc/linux/include/drm/drm_gem.h (revision 9cfc5c90)
1 #ifndef __DRM_GEM_H__
2 #define __DRM_GEM_H__
3 
4 /*
5  * GEM Graphics Execution Manager Driver Interfaces
6  *
7  * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
8  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
9  * Copyright (c) 2009-2010, Code Aurora Forum.
10  * All rights reserved.
11  * Copyright © 2014 Intel Corporation
12  *   Daniel Vetter <daniel.vetter@ffwll.ch>
13  *
14  * Author: Rickard E. (Rik) Faith <faith@valinux.com>
15  * Author: Gareth Hughes <gareth@valinux.com>
16  *
17  * Permission is hereby granted, free of charge, to any person obtaining a
18  * copy of this software and associated documentation files (the "Software"),
19  * to deal in the Software without restriction, including without limitation
20  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
21  * and/or sell copies of the Software, and to permit persons to whom the
22  * Software is furnished to do so, subject to the following conditions:
23  *
24  * The above copyright notice and this permission notice (including the next
25  * paragraph) shall be included in all copies or substantial portions of the
26  * Software.
27  *
28  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
29  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
30  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
31  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
32  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
33  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
34  * OTHER DEALINGS IN THE SOFTWARE.
35  */
36 
37 /**
38  * This structure defines the drm_mm memory object, which will be used by the
39  * DRM for its buffer objects.
40  */
41 struct drm_gem_object {
42 	/** Reference count of this object */
43 	struct kref refcount;
44 
45 	/**
46 	 * handle_count - gem file_priv handle count of this object
47 	 *
48 	 * Each handle also holds a reference. Note that when the handle_count
49 	 * drops to 0 any global names (e.g. the id in the flink namespace) will
50 	 * be cleared.
51 	 *
52 	 * Protected by dev->object_name_lock.
53 	 * */
54 	unsigned handle_count;
55 
56 	/** Related drm device */
57 	struct drm_device *dev;
58 
59 	/** File representing the shmem storage */
60 	struct file *filp;
61 
62 	/* Mapping info for this object */
63 	struct drm_vma_offset_node vma_node;
64 
65 	/**
66 	 * Size of the object, in bytes.  Immutable over the object's
67 	 * lifetime.
68 	 */
69 	size_t size;
70 
71 	/**
72 	 * Global name for this object, starts at 1. 0 means unnamed.
73 	 * Access is covered by the object_name_lock in the related drm_device
74 	 */
75 	int name;
76 
77 	/**
78 	 * Memory domains. These monitor which caches contain read/write data
79 	 * related to the object. When transitioning from one set of domains
80 	 * to another, the driver is called to ensure that caches are suitably
81 	 * flushed and invalidated
82 	 */
83 	uint32_t read_domains;
84 	uint32_t write_domain;
85 
86 	/**
87 	 * While validating an exec operation, the
88 	 * new read/write domain values are computed here.
89 	 * They will be transferred to the above values
90 	 * at the point that any cache flushing occurs
91 	 */
92 	uint32_t pending_read_domains;
93 	uint32_t pending_write_domain;
94 
95 	/**
96 	 * dma_buf - dma buf associated with this GEM object
97 	 *
98 	 * Pointer to the dma-buf associated with this gem object (either
99 	 * through importing or exporting). We break the resulting reference
100 	 * loop when the last gem handle for this object is released.
101 	 *
102 	 * Protected by obj->object_name_lock
103 	 */
104 	struct dma_buf *dma_buf;
105 
106 	/**
107 	 * import_attach - dma buf attachment backing this object
108 	 *
109 	 * Any foreign dma_buf imported as a gem object has this set to the
110 	 * attachment point for the device. This is invariant over the lifetime
111 	 * of a gem object.
112 	 *
113 	 * The driver's ->gem_free_object callback is responsible for cleaning
114 	 * up the dma_buf attachment and references acquired at import time.
115 	 *
116 	 * Note that the drm gem/prime core does not depend upon drivers setting
117 	 * this field any more. So for drivers where this doesn't make sense
118 	 * (e.g. virtual devices or a displaylink behind an usb bus) they can
119 	 * simply leave it as NULL.
120 	 */
121 	struct dma_buf_attachment *import_attach;
122 };
123 
124 void drm_gem_object_release(struct drm_gem_object *obj);
125 void drm_gem_object_free(struct kref *kref);
126 int drm_gem_object_init(struct drm_device *dev,
127 			struct drm_gem_object *obj, size_t size);
128 void drm_gem_private_object_init(struct drm_device *dev,
129 				 struct drm_gem_object *obj, size_t size);
130 void drm_gem_vm_open(struct vm_area_struct *vma);
131 void drm_gem_vm_close(struct vm_area_struct *vma);
132 int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
133 		     struct vm_area_struct *vma);
134 int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
135 
136 static inline void
137 drm_gem_object_reference(struct drm_gem_object *obj)
138 {
139 	kref_get(&obj->refcount);
140 }
141 
142 static inline void
143 drm_gem_object_unreference(struct drm_gem_object *obj)
144 {
145 	if (obj != NULL) {
146 		WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
147 
148 		kref_put(&obj->refcount, drm_gem_object_free);
149 	}
150 }
151 
152 static inline void
153 drm_gem_object_unreference_unlocked(struct drm_gem_object *obj)
154 {
155 	struct drm_device *dev;
156 
157 	if (!obj)
158 		return;
159 
160 	dev = obj->dev;
161 	if (kref_put_mutex(&obj->refcount, drm_gem_object_free, &dev->struct_mutex))
162 		mutex_unlock(&dev->struct_mutex);
163 	else
164 		might_lock(&dev->struct_mutex);
165 }
166 
167 int drm_gem_handle_create(struct drm_file *file_priv,
168 			  struct drm_gem_object *obj,
169 			  u32 *handlep);
170 int drm_gem_handle_delete(struct drm_file *filp, u32 handle);
171 
172 
173 void drm_gem_free_mmap_offset(struct drm_gem_object *obj);
174 int drm_gem_create_mmap_offset(struct drm_gem_object *obj);
175 int drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size);
176 
177 struct page **drm_gem_get_pages(struct drm_gem_object *obj);
178 void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
179 		bool dirty, bool accessed);
180 
181 struct drm_gem_object *drm_gem_object_lookup(struct drm_device *dev,
182 					     struct drm_file *filp,
183 					     u32 handle);
184 int drm_gem_dumb_destroy(struct drm_file *file,
185 			 struct drm_device *dev,
186 			 uint32_t handle);
187 
188 #endif /* __DRM_GEM_H__ */
189