xref: /openbmc/linux/include/uapi/drm/ivpu_accel.h (revision ecefa105)
1 /* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
2 /*
3  * Copyright (C) 2020-2023 Intel Corporation
4  */
5 
6 #ifndef __UAPI_IVPU_DRM_H__
7 #define __UAPI_IVPU_DRM_H__
8 
9 #include "drm.h"
10 
11 #if defined(__cplusplus)
12 extern "C" {
13 #endif
14 
15 #define DRM_IVPU_DRIVER_MAJOR 1
16 #define DRM_IVPU_DRIVER_MINOR 0
17 
18 #define DRM_IVPU_GET_PARAM		  0x00
19 #define DRM_IVPU_SET_PARAM		  0x01
20 #define DRM_IVPU_BO_CREATE		  0x02
21 #define DRM_IVPU_BO_INFO		  0x03
22 #define DRM_IVPU_SUBMIT			  0x05
23 #define DRM_IVPU_BO_WAIT		  0x06
24 
25 #define DRM_IOCTL_IVPU_GET_PARAM                                               \
26 	DRM_IOWR(DRM_COMMAND_BASE + DRM_IVPU_GET_PARAM, struct drm_ivpu_param)
27 
28 #define DRM_IOCTL_IVPU_SET_PARAM                                               \
29 	DRM_IOW(DRM_COMMAND_BASE + DRM_IVPU_SET_PARAM, struct drm_ivpu_param)
30 
31 #define DRM_IOCTL_IVPU_BO_CREATE                                               \
32 	DRM_IOWR(DRM_COMMAND_BASE + DRM_IVPU_BO_CREATE, struct drm_ivpu_bo_create)
33 
34 #define DRM_IOCTL_IVPU_BO_INFO                                                 \
35 	DRM_IOWR(DRM_COMMAND_BASE + DRM_IVPU_BO_INFO, struct drm_ivpu_bo_info)
36 
37 #define DRM_IOCTL_IVPU_SUBMIT                                                  \
38 	DRM_IOW(DRM_COMMAND_BASE + DRM_IVPU_SUBMIT, struct drm_ivpu_submit)
39 
40 #define DRM_IOCTL_IVPU_BO_WAIT                                                 \
41 	DRM_IOWR(DRM_COMMAND_BASE + DRM_IVPU_BO_WAIT, struct drm_ivpu_bo_wait)
42 
43 /**
44  * DOC: contexts
45  *
46  * VPU contexts have private virtual address space, job queues and priority.
47  * Each context is identified by an unique ID. Context is created on open().
48  */
49 
50 #define DRM_IVPU_PARAM_DEVICE_ID	    0
51 #define DRM_IVPU_PARAM_DEVICE_REVISION	    1
52 #define DRM_IVPU_PARAM_PLATFORM_TYPE	    2
53 #define DRM_IVPU_PARAM_CORE_CLOCK_RATE	    3
54 #define DRM_IVPU_PARAM_NUM_CONTEXTS	    4
55 #define DRM_IVPU_PARAM_CONTEXT_BASE_ADDRESS 5
56 #define DRM_IVPU_PARAM_CONTEXT_PRIORITY	    6
57 #define DRM_IVPU_PARAM_CONTEXT_ID	    7
58 #define DRM_IVPU_PARAM_FW_API_VERSION	    8
59 #define DRM_IVPU_PARAM_ENGINE_HEARTBEAT	    9
60 #define DRM_IVPU_PARAM_UNIQUE_INFERENCE_ID  10
61 #define DRM_IVPU_PARAM_TILE_CONFIG	    11
62 #define DRM_IVPU_PARAM_SKU		    12
63 
64 #define DRM_IVPU_PLATFORM_TYPE_SILICON	    0
65 
66 #define DRM_IVPU_CONTEXT_PRIORITY_IDLE	    0
67 #define DRM_IVPU_CONTEXT_PRIORITY_NORMAL    1
68 #define DRM_IVPU_CONTEXT_PRIORITY_FOCUS	    2
69 #define DRM_IVPU_CONTEXT_PRIORITY_REALTIME  3
70 
71 /**
72  * struct drm_ivpu_param - Get/Set VPU parameters
73  */
74 struct drm_ivpu_param {
75 	/**
76 	 * @param:
77 	 *
78 	 * Supported params:
79 	 *
80 	 * %DRM_IVPU_PARAM_DEVICE_ID:
81 	 * PCI Device ID of the VPU device (read-only)
82 	 *
83 	 * %DRM_IVPU_PARAM_DEVICE_REVISION:
84 	 * VPU device revision (read-only)
85 	 *
86 	 * %DRM_IVPU_PARAM_PLATFORM_TYPE:
87 	 * Returns %DRM_IVPU_PLATFORM_TYPE_SILICON on real hardware or device specific
88 	 * platform type when executing on a simulator or emulator (read-only)
89 	 *
90 	 * %DRM_IVPU_PARAM_CORE_CLOCK_RATE:
91 	 * Current PLL frequency (read-only)
92 	 *
93 	 * %DRM_IVPU_PARAM_NUM_CONTEXTS:
94 	 * Maximum number of simultaneously existing contexts (read-only)
95 	 *
96 	 * %DRM_IVPU_PARAM_CONTEXT_BASE_ADDRESS:
97 	 * Lowest VPU virtual address available in the current context (read-only)
98 	 *
99 	 * %DRM_IVPU_PARAM_CONTEXT_PRIORITY:
100 	 * Value of current context scheduling priority (read-write).
101 	 * See DRM_IVPU_CONTEXT_PRIORITY_* for possible values.
102 	 *
103 	 * %DRM_IVPU_PARAM_CONTEXT_ID:
104 	 * Current context ID, always greater than 0 (read-only)
105 	 *
106 	 * %DRM_IVPU_PARAM_FW_API_VERSION:
107 	 * Firmware API version array (read-only)
108 	 *
109 	 * %DRM_IVPU_PARAM_ENGINE_HEARTBEAT:
110 	 * Heartbeat value from an engine (read-only).
111 	 * Engine ID (i.e. DRM_IVPU_ENGINE_COMPUTE) is given via index.
112 	 *
113 	 * %DRM_IVPU_PARAM_UNIQUE_INFERENCE_ID:
114 	 * Device-unique inference ID (read-only)
115 	 *
116 	 * %DRM_IVPU_PARAM_TILE_CONFIG:
117 	 * VPU tile configuration  (read-only)
118 	 *
119 	 * %DRM_IVPU_PARAM_SKU:
120 	 * VPU SKU ID (read-only)
121 	 *
122 	 */
123 	__u32 param;
124 
125 	/** @index: Index for params that have multiple instances */
126 	__u32 index;
127 
128 	/** @value: Param value */
129 	__u64 value;
130 };
131 
132 #define DRM_IVPU_BO_HIGH_MEM   0x00000001
133 #define DRM_IVPU_BO_MAPPABLE   0x00000002
134 
135 #define DRM_IVPU_BO_CACHED     0x00000000
136 #define DRM_IVPU_BO_UNCACHED   0x00010000
137 #define DRM_IVPU_BO_WC	       0x00020000
138 #define DRM_IVPU_BO_CACHE_MASK 0x00030000
139 
140 #define DRM_IVPU_BO_FLAGS \
141 	(DRM_IVPU_BO_HIGH_MEM | \
142 	 DRM_IVPU_BO_MAPPABLE | \
143 	 DRM_IVPU_BO_CACHE_MASK)
144 
145 /**
146  * struct drm_ivpu_bo_create - Create BO backed by SHMEM
147  *
148  * Create GEM buffer object allocated in SHMEM memory.
149  */
150 struct drm_ivpu_bo_create {
151 	/** @size: The size in bytes of the allocated memory */
152 	__u64 size;
153 
154 	/**
155 	 * @flags:
156 	 *
157 	 * Supported flags:
158 	 *
159 	 * %DRM_IVPU_BO_HIGH_MEM:
160 	 *
161 	 * Allocate VPU address from >4GB range.
162 	 * Buffer object with vpu address >4GB can be always accessed by the
163 	 * VPU DMA engine, but some HW generation may not be able to access
164 	 * this memory from then firmware running on the VPU management processor.
165 	 * Suitable for input, output and some scratch buffers.
166 	 *
167 	 * %DRM_IVPU_BO_MAPPABLE:
168 	 *
169 	 * Buffer object can be mapped using mmap().
170 	 *
171 	 * %DRM_IVPU_BO_CACHED:
172 	 *
173 	 * Allocated BO will be cached on host side (WB) and snooped on the VPU side.
174 	 * This is the default caching mode.
175 	 *
176 	 * %DRM_IVPU_BO_UNCACHED:
177 	 *
178 	 * Allocated BO will not be cached on host side nor snooped on the VPU side.
179 	 *
180 	 * %DRM_IVPU_BO_WC:
181 	 *
182 	 * Allocated BO will use write combining buffer for writes but reads will be
183 	 * uncached.
184 	 */
185 	__u32 flags;
186 
187 	/** @handle: Returned GEM object handle */
188 	__u32 handle;
189 
190 	/** @vpu_addr: Returned VPU virtual address */
191 	__u64 vpu_addr;
192 };
193 
194 /**
195  * struct drm_ivpu_bo_info - Query buffer object info
196  */
197 struct drm_ivpu_bo_info {
198 	/** @handle: Handle of the queried BO */
199 	__u32 handle;
200 
201 	/** @flags: Returned flags used to create the BO */
202 	__u32 flags;
203 
204 	/** @vpu_addr: Returned VPU virtual address */
205 	__u64 vpu_addr;
206 
207 	/**
208 	 * @mmap_offset:
209 	 *
210 	 * Returned offset to be used in mmap(). 0 in case the BO is not mappable.
211 	 */
212 	__u64 mmap_offset;
213 
214 	/** @size: Returned GEM object size, aligned to PAGE_SIZE */
215 	__u64 size;
216 };
217 
218 /* drm_ivpu_submit engines */
219 #define DRM_IVPU_ENGINE_COMPUTE 0
220 #define DRM_IVPU_ENGINE_COPY    1
221 
222 /**
223  * struct drm_ivpu_submit - Submit commands to the VPU
224  *
225  * Execute a single command buffer on a given VPU engine.
226  * Handles to all referenced buffer objects have to be provided in @buffers_ptr.
227  *
228  * User space may wait on job completion using %DRM_IVPU_BO_WAIT ioctl.
229  */
230 struct drm_ivpu_submit {
231 	/**
232 	 * @buffers_ptr:
233 	 *
234 	 * A pointer to an u32 array of GEM handles of the BOs required for this job.
235 	 * The number of elements in the array must be equal to the value given by @buffer_count.
236 	 *
237 	 * The first BO is the command buffer. The rest of array has to contain all
238 	 * BOs referenced from the command buffer.
239 	 */
240 	__u64 buffers_ptr;
241 
242 	/** @buffer_count: Number of elements in the @buffers_ptr */
243 	__u32 buffer_count;
244 
245 	/**
246 	 * @engine: Select the engine this job should be executed on
247 	 *
248 	 * %DRM_IVPU_ENGINE_COMPUTE:
249 	 *
250 	 * Performs Deep Learning Neural Compute Inference Operations
251 	 *
252 	 * %DRM_IVPU_ENGINE_COPY:
253 	 *
254 	 * Performs memory copy operations to/from system memory allocated for VPU
255 	 */
256 	__u32 engine;
257 
258 	/** @flags: Reserved for future use - must be zero */
259 	__u32 flags;
260 
261 	/**
262 	 * @commands_offset:
263 	 *
264 	 * Offset inside the first buffer in @buffers_ptr containing commands
265 	 * to be executed. The offset has to be 8-byte aligned.
266 	 */
267 	__u32 commands_offset;
268 };
269 
270 /* drm_ivpu_bo_wait job status codes */
271 #define DRM_IVPU_JOB_STATUS_SUCCESS 0
272 
273 /**
274  * struct drm_ivpu_bo_wait - Wait for BO to become inactive
275  *
276  * Blocks until a given buffer object becomes inactive.
277  * With @timeout_ms set to 0 returns immediately.
278  */
279 struct drm_ivpu_bo_wait {
280 	/** @handle: Handle to the buffer object to be waited on */
281 	__u32 handle;
282 
283 	/** @flags: Reserved for future use - must be zero */
284 	__u32 flags;
285 
286 	/** @timeout_ns: Absolute timeout in nanoseconds (may be zero) */
287 	__s64 timeout_ns;
288 
289 	/**
290 	 * @job_status:
291 	 *
292 	 * Job status code which is updated after the job is completed.
293 	 * &DRM_IVPU_JOB_STATUS_SUCCESS or device specific error otherwise.
294 	 * Valid only if @handle points to a command buffer.
295 	 */
296 	__u32 job_status;
297 
298 	/** @pad: Padding - must be zero */
299 	__u32 pad;
300 };
301 
302 #if defined(__cplusplus)
303 }
304 #endif
305 
306 #endif /* __UAPI_IVPU_DRM_H__ */
307