xref: /openbmc/linux/include/uapi/linux/kfd_ioctl.h (revision d699090510c3223641a23834b4710e2d4309a6ad)
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 
23 #ifndef KFD_IOCTL_H_INCLUDED
24 #define KFD_IOCTL_H_INCLUDED
25 
26 #include <drm/drm.h>
27 #include <linux/ioctl.h>
28 
29 /*
30  * - 1.1 - initial version
31  * - 1.3 - Add SMI events support
32  * - 1.4 - Indicate new SRAM EDC bit in device properties
33  * - 1.5 - Add SVM API
34  * - 1.6 - Query clear flags in SVM get_attr API
35  * - 1.7 - Checkpoint Restore (CRIU) API
36  * - 1.8 - CRIU - Support for SDMA transfers with GTT BOs
37  * - 1.9 - Add available memory ioctl
38  * - 1.10 - Add SMI profiler event log
39  * - 1.11 - Add unified memory for ctx save/restore area
40  * - 1.12 - Add DMA buf export ioctl
41  * - 1.13 - Add debugger API
42  * - 1.14 - Update kfd_event_data
43  */
44 #define KFD_IOCTL_MAJOR_VERSION 1
45 #define KFD_IOCTL_MINOR_VERSION 14
46 
47 struct kfd_ioctl_get_version_args {
48 	__u32 major_version;	/* from KFD */
49 	__u32 minor_version;	/* from KFD */
50 };
51 
52 /* For kfd_ioctl_create_queue_args.queue_type. */
53 #define KFD_IOC_QUEUE_TYPE_COMPUTE		0x0
54 #define KFD_IOC_QUEUE_TYPE_SDMA			0x1
55 #define KFD_IOC_QUEUE_TYPE_COMPUTE_AQL		0x2
56 #define KFD_IOC_QUEUE_TYPE_SDMA_XGMI		0x3
57 
58 #define KFD_MAX_QUEUE_PERCENTAGE	100
59 #define KFD_MAX_QUEUE_PRIORITY		15
60 
61 #define KFD_MIN_QUEUE_RING_SIZE		1024
62 
63 struct kfd_ioctl_create_queue_args {
64 	__u64 ring_base_address;	/* to KFD */
65 	__u64 write_pointer_address;	/* from KFD */
66 	__u64 read_pointer_address;	/* from KFD */
67 	__u64 doorbell_offset;	/* from KFD */
68 
69 	__u32 ring_size;		/* to KFD */
70 	__u32 gpu_id;		/* to KFD */
71 	__u32 queue_type;		/* to KFD */
72 	__u32 queue_percentage;	/* to KFD */
73 	__u32 queue_priority;	/* to KFD */
74 	__u32 queue_id;		/* from KFD */
75 
76 	__u64 eop_buffer_address;	/* to KFD */
77 	__u64 eop_buffer_size;	/* to KFD */
78 	__u64 ctx_save_restore_address; /* to KFD */
79 	__u32 ctx_save_restore_size;	/* to KFD */
80 	__u32 ctl_stack_size;		/* to KFD */
81 };
82 
83 struct kfd_ioctl_destroy_queue_args {
84 	__u32 queue_id;		/* to KFD */
85 	__u32 pad;
86 };
87 
88 struct kfd_ioctl_update_queue_args {
89 	__u64 ring_base_address;	/* to KFD */
90 
91 	__u32 queue_id;		/* to KFD */
92 	__u32 ring_size;		/* to KFD */
93 	__u32 queue_percentage;	/* to KFD */
94 	__u32 queue_priority;	/* to KFD */
95 };
96 
97 struct kfd_ioctl_set_cu_mask_args {
98 	__u32 queue_id;		/* to KFD */
99 	__u32 num_cu_mask;		/* to KFD */
100 	__u64 cu_mask_ptr;		/* to KFD */
101 };
102 
103 struct kfd_ioctl_get_queue_wave_state_args {
104 	__u64 ctl_stack_address;	/* to KFD */
105 	__u32 ctl_stack_used_size;	/* from KFD */
106 	__u32 save_area_used_size;	/* from KFD */
107 	__u32 queue_id;			/* to KFD */
108 	__u32 pad;
109 };
110 
111 struct kfd_ioctl_get_available_memory_args {
112 	__u64 available;	/* from KFD */
113 	__u32 gpu_id;		/* to KFD */
114 	__u32 pad;
115 };
116 
117 struct kfd_dbg_device_info_entry {
118 	__u64 exception_status;
119 	__u64 lds_base;
120 	__u64 lds_limit;
121 	__u64 scratch_base;
122 	__u64 scratch_limit;
123 	__u64 gpuvm_base;
124 	__u64 gpuvm_limit;
125 	__u32 gpu_id;
126 	__u32 location_id;
127 	__u32 vendor_id;
128 	__u32 device_id;
129 	__u32 revision_id;
130 	__u32 subsystem_vendor_id;
131 	__u32 subsystem_device_id;
132 	__u32 fw_version;
133 	__u32 gfx_target_version;
134 	__u32 simd_count;
135 	__u32 max_waves_per_simd;
136 	__u32 array_count;
137 	__u32 simd_arrays_per_engine;
138 	__u32 num_xcc;
139 	__u32 capability;
140 	__u32 debug_prop;
141 };
142 
143 /* For kfd_ioctl_set_memory_policy_args.default_policy and alternate_policy */
144 #define KFD_IOC_CACHE_POLICY_COHERENT 0
145 #define KFD_IOC_CACHE_POLICY_NONCOHERENT 1
146 
147 struct kfd_ioctl_set_memory_policy_args {
148 	__u64 alternate_aperture_base;	/* to KFD */
149 	__u64 alternate_aperture_size;	/* to KFD */
150 
151 	__u32 gpu_id;			/* to KFD */
152 	__u32 default_policy;		/* to KFD */
153 	__u32 alternate_policy;		/* to KFD */
154 	__u32 pad;
155 };
156 
157 /*
158  * All counters are monotonic. They are used for profiling of compute jobs.
159  * The profiling is done by userspace.
160  *
161  * In case of GPU reset, the counter should not be affected.
162  */
163 
164 struct kfd_ioctl_get_clock_counters_args {
165 	__u64 gpu_clock_counter;	/* from KFD */
166 	__u64 cpu_clock_counter;	/* from KFD */
167 	__u64 system_clock_counter;	/* from KFD */
168 	__u64 system_clock_freq;	/* from KFD */
169 
170 	__u32 gpu_id;		/* to KFD */
171 	__u32 pad;
172 };
173 
174 struct kfd_process_device_apertures {
175 	__u64 lds_base;		/* from KFD */
176 	__u64 lds_limit;		/* from KFD */
177 	__u64 scratch_base;		/* from KFD */
178 	__u64 scratch_limit;		/* from KFD */
179 	__u64 gpuvm_base;		/* from KFD */
180 	__u64 gpuvm_limit;		/* from KFD */
181 	__u32 gpu_id;		/* from KFD */
182 	__u32 pad;
183 };
184 
185 /*
186  * AMDKFD_IOC_GET_PROCESS_APERTURES is deprecated. Use
187  * AMDKFD_IOC_GET_PROCESS_APERTURES_NEW instead, which supports an
188  * unlimited number of GPUs.
189  */
190 #define NUM_OF_SUPPORTED_GPUS 7
191 struct kfd_ioctl_get_process_apertures_args {
192 	struct kfd_process_device_apertures
193 			process_apertures[NUM_OF_SUPPORTED_GPUS];/* from KFD */
194 
195 	/* from KFD, should be in the range [1 - NUM_OF_SUPPORTED_GPUS] */
196 	__u32 num_of_nodes;
197 	__u32 pad;
198 };
199 
200 struct kfd_ioctl_get_process_apertures_new_args {
201 	/* User allocated. Pointer to struct kfd_process_device_apertures
202 	 * filled in by Kernel
203 	 */
204 	__u64 kfd_process_device_apertures_ptr;
205 	/* to KFD - indicates amount of memory present in
206 	 *  kfd_process_device_apertures_ptr
207 	 * from KFD - Number of entries filled by KFD.
208 	 */
209 	__u32 num_of_nodes;
210 	__u32 pad;
211 };
212 
213 #define MAX_ALLOWED_NUM_POINTS    100
214 #define MAX_ALLOWED_AW_BUFF_SIZE 4096
215 #define MAX_ALLOWED_WAC_BUFF_SIZE  128
216 
217 struct kfd_ioctl_dbg_register_args {
218 	__u32 gpu_id;		/* to KFD */
219 	__u32 pad;
220 };
221 
222 struct kfd_ioctl_dbg_unregister_args {
223 	__u32 gpu_id;		/* to KFD */
224 	__u32 pad;
225 };
226 
227 struct kfd_ioctl_dbg_address_watch_args {
228 	__u64 content_ptr;		/* a pointer to the actual content */
229 	__u32 gpu_id;		/* to KFD */
230 	__u32 buf_size_in_bytes;	/*including gpu_id and buf_size */
231 };
232 
233 struct kfd_ioctl_dbg_wave_control_args {
234 	__u64 content_ptr;		/* a pointer to the actual content */
235 	__u32 gpu_id;		/* to KFD */
236 	__u32 buf_size_in_bytes;	/*including gpu_id and buf_size */
237 };
238 
239 #define KFD_INVALID_FD     0xffffffff
240 
241 /* Matching HSA_EVENTTYPE */
242 #define KFD_IOC_EVENT_SIGNAL			0
243 #define KFD_IOC_EVENT_NODECHANGE		1
244 #define KFD_IOC_EVENT_DEVICESTATECHANGE		2
245 #define KFD_IOC_EVENT_HW_EXCEPTION		3
246 #define KFD_IOC_EVENT_SYSTEM_EVENT		4
247 #define KFD_IOC_EVENT_DEBUG_EVENT		5
248 #define KFD_IOC_EVENT_PROFILE_EVENT		6
249 #define KFD_IOC_EVENT_QUEUE_EVENT		7
250 #define KFD_IOC_EVENT_MEMORY			8
251 
252 #define KFD_IOC_WAIT_RESULT_COMPLETE		0
253 #define KFD_IOC_WAIT_RESULT_TIMEOUT		1
254 #define KFD_IOC_WAIT_RESULT_FAIL		2
255 
256 #define KFD_SIGNAL_EVENT_LIMIT			4096
257 
258 /* For kfd_event_data.hw_exception_data.reset_type. */
259 #define KFD_HW_EXCEPTION_WHOLE_GPU_RESET	0
260 #define KFD_HW_EXCEPTION_PER_ENGINE_RESET	1
261 
262 /* For kfd_event_data.hw_exception_data.reset_cause. */
263 #define KFD_HW_EXCEPTION_GPU_HANG	0
264 #define KFD_HW_EXCEPTION_ECC		1
265 
266 /* For kfd_hsa_memory_exception_data.ErrorType */
267 #define KFD_MEM_ERR_NO_RAS		0
268 #define KFD_MEM_ERR_SRAM_ECC		1
269 #define KFD_MEM_ERR_POISON_CONSUMED	2
270 #define KFD_MEM_ERR_GPU_HANG		3
271 
272 struct kfd_ioctl_create_event_args {
273 	__u64 event_page_offset;	/* from KFD */
274 	__u32 event_trigger_data;	/* from KFD - signal events only */
275 	__u32 event_type;		/* to KFD */
276 	__u32 auto_reset;		/* to KFD */
277 	__u32 node_id;		/* to KFD - only valid for certain
278 							event types */
279 	__u32 event_id;		/* from KFD */
280 	__u32 event_slot_index;	/* from KFD */
281 };
282 
283 struct kfd_ioctl_destroy_event_args {
284 	__u32 event_id;		/* to KFD */
285 	__u32 pad;
286 };
287 
288 struct kfd_ioctl_set_event_args {
289 	__u32 event_id;		/* to KFD */
290 	__u32 pad;
291 };
292 
293 struct kfd_ioctl_reset_event_args {
294 	__u32 event_id;		/* to KFD */
295 	__u32 pad;
296 };
297 
298 struct kfd_memory_exception_failure {
299 	__u32 NotPresent;	/* Page not present or supervisor privilege */
300 	__u32 ReadOnly;	/* Write access to a read-only page */
301 	__u32 NoExecute;	/* Execute access to a page marked NX */
302 	__u32 imprecise;	/* Can't determine the	exact fault address */
303 };
304 
305 /* memory exception data */
306 struct kfd_hsa_memory_exception_data {
307 	struct kfd_memory_exception_failure failure;
308 	__u64 va;
309 	__u32 gpu_id;
310 	__u32 ErrorType; /* 0 = no RAS error,
311 			  * 1 = ECC_SRAM,
312 			  * 2 = Link_SYNFLOOD (poison),
313 			  * 3 = GPU hang (not attributable to a specific cause),
314 			  * other values reserved
315 			  */
316 };
317 
318 /* hw exception data */
319 struct kfd_hsa_hw_exception_data {
320 	__u32 reset_type;
321 	__u32 reset_cause;
322 	__u32 memory_lost;
323 	__u32 gpu_id;
324 };
325 
326 /* hsa signal event data */
327 struct kfd_hsa_signal_event_data {
328 	__u64 last_event_age;	/* to and from KFD */
329 };
330 
331 /* Event data */
332 struct kfd_event_data {
333 	union {
334 		/* From KFD */
335 		struct kfd_hsa_memory_exception_data memory_exception_data;
336 		struct kfd_hsa_hw_exception_data hw_exception_data;
337 		/* To and From KFD */
338 		struct kfd_hsa_signal_event_data signal_event_data;
339 	};
340 	__u64 kfd_event_data_ext;	/* pointer to an extension structure
341 					   for future exception types */
342 	__u32 event_id;		/* to KFD */
343 	__u32 pad;
344 };
345 
346 struct kfd_ioctl_wait_events_args {
347 	__u64 events_ptr;		/* pointed to struct
348 					   kfd_event_data array, to KFD */
349 	__u32 num_events;		/* to KFD */
350 	__u32 wait_for_all;		/* to KFD */
351 	__u32 timeout;		/* to KFD */
352 	__u32 wait_result;		/* from KFD */
353 };
354 
355 struct kfd_ioctl_set_scratch_backing_va_args {
356 	__u64 va_addr;	/* to KFD */
357 	__u32 gpu_id;	/* to KFD */
358 	__u32 pad;
359 };
360 
361 struct kfd_ioctl_get_tile_config_args {
362 	/* to KFD: pointer to tile array */
363 	__u64 tile_config_ptr;
364 	/* to KFD: pointer to macro tile array */
365 	__u64 macro_tile_config_ptr;
366 	/* to KFD: array size allocated by user mode
367 	 * from KFD: array size filled by kernel
368 	 */
369 	__u32 num_tile_configs;
370 	/* to KFD: array size allocated by user mode
371 	 * from KFD: array size filled by kernel
372 	 */
373 	__u32 num_macro_tile_configs;
374 
375 	__u32 gpu_id;		/* to KFD */
376 	__u32 gb_addr_config;	/* from KFD */
377 	__u32 num_banks;		/* from KFD */
378 	__u32 num_ranks;		/* from KFD */
379 	/* struct size can be extended later if needed
380 	 * without breaking ABI compatibility
381 	 */
382 };
383 
384 struct kfd_ioctl_set_trap_handler_args {
385 	__u64 tba_addr;		/* to KFD */
386 	__u64 tma_addr;		/* to KFD */
387 	__u32 gpu_id;		/* to KFD */
388 	__u32 pad;
389 };
390 
391 struct kfd_ioctl_acquire_vm_args {
392 	__u32 drm_fd;	/* to KFD */
393 	__u32 gpu_id;	/* to KFD */
394 };
395 
396 /* Allocation flags: memory types */
397 #define KFD_IOC_ALLOC_MEM_FLAGS_VRAM		(1 << 0)
398 #define KFD_IOC_ALLOC_MEM_FLAGS_GTT		(1 << 1)
399 #define KFD_IOC_ALLOC_MEM_FLAGS_USERPTR		(1 << 2)
400 #define KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL	(1 << 3)
401 #define KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP	(1 << 4)
402 /* Allocation flags: attributes/access options */
403 #define KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE	(1 << 31)
404 #define KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE	(1 << 30)
405 #define KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC		(1 << 29)
406 #define KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE	(1 << 28)
407 #define KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM	(1 << 27)
408 #define KFD_IOC_ALLOC_MEM_FLAGS_COHERENT	(1 << 26)
409 #define KFD_IOC_ALLOC_MEM_FLAGS_UNCACHED	(1 << 25)
410 
411 /* Allocate memory for later SVM (shared virtual memory) mapping.
412  *
413  * @va_addr:     virtual address of the memory to be allocated
414  *               all later mappings on all GPUs will use this address
415  * @size:        size in bytes
416  * @handle:      buffer handle returned to user mode, used to refer to
417  *               this allocation for mapping, unmapping and freeing
418  * @mmap_offset: for CPU-mapping the allocation by mmapping a render node
419  *               for userptrs this is overloaded to specify the CPU address
420  * @gpu_id:      device identifier
421  * @flags:       memory type and attributes. See KFD_IOC_ALLOC_MEM_FLAGS above
422  */
423 struct kfd_ioctl_alloc_memory_of_gpu_args {
424 	__u64 va_addr;		/* to KFD */
425 	__u64 size;		/* to KFD */
426 	__u64 handle;		/* from KFD */
427 	__u64 mmap_offset;	/* to KFD (userptr), from KFD (mmap offset) */
428 	__u32 gpu_id;		/* to KFD */
429 	__u32 flags;
430 };
431 
432 /* Free memory allocated with kfd_ioctl_alloc_memory_of_gpu
433  *
434  * @handle: memory handle returned by alloc
435  */
436 struct kfd_ioctl_free_memory_of_gpu_args {
437 	__u64 handle;		/* to KFD */
438 };
439 
440 /* Map memory to one or more GPUs
441  *
442  * @handle:                memory handle returned by alloc
443  * @device_ids_array_ptr:  array of gpu_ids (__u32 per device)
444  * @n_devices:             number of devices in the array
445  * @n_success:             number of devices mapped successfully
446  *
447  * @n_success returns information to the caller how many devices from
448  * the start of the array have mapped the buffer successfully. It can
449  * be passed into a subsequent retry call to skip those devices. For
450  * the first call the caller should initialize it to 0.
451  *
452  * If the ioctl completes with return code 0 (success), n_success ==
453  * n_devices.
454  */
455 struct kfd_ioctl_map_memory_to_gpu_args {
456 	__u64 handle;			/* to KFD */
457 	__u64 device_ids_array_ptr;	/* to KFD */
458 	__u32 n_devices;		/* to KFD */
459 	__u32 n_success;		/* to/from KFD */
460 };
461 
462 /* Unmap memory from one or more GPUs
463  *
464  * same arguments as for mapping
465  */
466 struct kfd_ioctl_unmap_memory_from_gpu_args {
467 	__u64 handle;			/* to KFD */
468 	__u64 device_ids_array_ptr;	/* to KFD */
469 	__u32 n_devices;		/* to KFD */
470 	__u32 n_success;		/* to/from KFD */
471 };
472 
473 /* Allocate GWS for specific queue
474  *
475  * @queue_id:    queue's id that GWS is allocated for
476  * @num_gws:     how many GWS to allocate
477  * @first_gws:   index of the first GWS allocated.
478  *               only support contiguous GWS allocation
479  */
480 struct kfd_ioctl_alloc_queue_gws_args {
481 	__u32 queue_id;		/* to KFD */
482 	__u32 num_gws;		/* to KFD */
483 	__u32 first_gws;	/* from KFD */
484 	__u32 pad;
485 };
486 
487 struct kfd_ioctl_get_dmabuf_info_args {
488 	__u64 size;		/* from KFD */
489 	__u64 metadata_ptr;	/* to KFD */
490 	__u32 metadata_size;	/* to KFD (space allocated by user)
491 				 * from KFD (actual metadata size)
492 				 */
493 	__u32 gpu_id;	/* from KFD */
494 	__u32 flags;		/* from KFD (KFD_IOC_ALLOC_MEM_FLAGS) */
495 	__u32 dmabuf_fd;	/* to KFD */
496 };
497 
498 struct kfd_ioctl_import_dmabuf_args {
499 	__u64 va_addr;	/* to KFD */
500 	__u64 handle;	/* from KFD */
501 	__u32 gpu_id;	/* to KFD */
502 	__u32 dmabuf_fd;	/* to KFD */
503 };
504 
505 struct kfd_ioctl_export_dmabuf_args {
506 	__u64 handle;		/* to KFD */
507 	__u32 flags;		/* to KFD */
508 	__u32 dmabuf_fd;	/* from KFD */
509 };
510 
511 /*
512  * KFD SMI(System Management Interface) events
513  */
514 enum kfd_smi_event {
515 	KFD_SMI_EVENT_NONE = 0, /* not used */
516 	KFD_SMI_EVENT_VMFAULT = 1, /* event start counting at 1 */
517 	KFD_SMI_EVENT_THERMAL_THROTTLE = 2,
518 	KFD_SMI_EVENT_GPU_PRE_RESET = 3,
519 	KFD_SMI_EVENT_GPU_POST_RESET = 4,
520 	KFD_SMI_EVENT_MIGRATE_START = 5,
521 	KFD_SMI_EVENT_MIGRATE_END = 6,
522 	KFD_SMI_EVENT_PAGE_FAULT_START = 7,
523 	KFD_SMI_EVENT_PAGE_FAULT_END = 8,
524 	KFD_SMI_EVENT_QUEUE_EVICTION = 9,
525 	KFD_SMI_EVENT_QUEUE_RESTORE = 10,
526 	KFD_SMI_EVENT_UNMAP_FROM_GPU = 11,
527 
528 	/*
529 	 * max event number, as a flag bit to get events from all processes,
530 	 * this requires super user permission, otherwise will not be able to
531 	 * receive event from any process. Without this flag to receive events
532 	 * from same process.
533 	 */
534 	KFD_SMI_EVENT_ALL_PROCESS = 64
535 };
536 
537 enum KFD_MIGRATE_TRIGGERS {
538 	KFD_MIGRATE_TRIGGER_PREFETCH,
539 	KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU,
540 	KFD_MIGRATE_TRIGGER_PAGEFAULT_CPU,
541 	KFD_MIGRATE_TRIGGER_TTM_EVICTION
542 };
543 
544 enum KFD_QUEUE_EVICTION_TRIGGERS {
545 	KFD_QUEUE_EVICTION_TRIGGER_SVM,
546 	KFD_QUEUE_EVICTION_TRIGGER_USERPTR,
547 	KFD_QUEUE_EVICTION_TRIGGER_TTM,
548 	KFD_QUEUE_EVICTION_TRIGGER_SUSPEND,
549 	KFD_QUEUE_EVICTION_CRIU_CHECKPOINT,
550 	KFD_QUEUE_EVICTION_CRIU_RESTORE
551 };
552 
553 enum KFD_SVM_UNMAP_TRIGGERS {
554 	KFD_SVM_UNMAP_TRIGGER_MMU_NOTIFY,
555 	KFD_SVM_UNMAP_TRIGGER_MMU_NOTIFY_MIGRATE,
556 	KFD_SVM_UNMAP_TRIGGER_UNMAP_FROM_CPU
557 };
558 
559 #define KFD_SMI_EVENT_MASK_FROM_INDEX(i) (1ULL << ((i) - 1))
560 #define KFD_SMI_EVENT_MSG_SIZE	96
561 
562 struct kfd_ioctl_smi_events_args {
563 	__u32 gpuid;	/* to KFD */
564 	__u32 anon_fd;	/* from KFD */
565 };
566 
567 /**************************************************************************************************
568  * CRIU IOCTLs (Checkpoint Restore In Userspace)
569  *
570  * When checkpointing a process, the userspace application will perform:
571  * 1. PROCESS_INFO op to determine current process information. This pauses execution and evicts
572  *    all the queues.
573  * 2. CHECKPOINT op to checkpoint process contents (BOs, queues, events, svm-ranges)
574  * 3. UNPAUSE op to un-evict all the queues
575  *
576  * When restoring a process, the CRIU userspace application will perform:
577  *
578  * 1. RESTORE op to restore process contents
579  * 2. RESUME op to start the process
580  *
581  * Note: Queues are forced into an evicted state after a successful PROCESS_INFO. User
582  * application needs to perform an UNPAUSE operation after calling PROCESS_INFO.
583  */
584 
585 enum kfd_criu_op {
586 	KFD_CRIU_OP_PROCESS_INFO,
587 	KFD_CRIU_OP_CHECKPOINT,
588 	KFD_CRIU_OP_UNPAUSE,
589 	KFD_CRIU_OP_RESTORE,
590 	KFD_CRIU_OP_RESUME,
591 };
592 
593 /**
594  * kfd_ioctl_criu_args - Arguments perform CRIU operation
595  * @devices:		[in/out] User pointer to memory location for devices information.
596  * 			This is an array of type kfd_criu_device_bucket.
597  * @bos:		[in/out] User pointer to memory location for BOs information
598  * 			This is an array of type kfd_criu_bo_bucket.
599  * @priv_data:		[in/out] User pointer to memory location for private data
600  * @priv_data_size:	[in/out] Size of priv_data in bytes
601  * @num_devices:	[in/out] Number of GPUs used by process. Size of @devices array.
602  * @num_bos		[in/out] Number of BOs used by process. Size of @bos array.
603  * @num_objects:	[in/out] Number of objects used by process. Objects are opaque to
604  *				 user application.
605  * @pid:		[in/out] PID of the process being checkpointed
606  * @op			[in] Type of operation (kfd_criu_op)
607  *
608  * Return: 0 on success, -errno on failure
609  */
610 struct kfd_ioctl_criu_args {
611 	__u64 devices;		/* Used during ops: CHECKPOINT, RESTORE */
612 	__u64 bos;		/* Used during ops: CHECKPOINT, RESTORE */
613 	__u64 priv_data;	/* Used during ops: CHECKPOINT, RESTORE */
614 	__u64 priv_data_size;	/* Used during ops: PROCESS_INFO, RESTORE */
615 	__u32 num_devices;	/* Used during ops: PROCESS_INFO, RESTORE */
616 	__u32 num_bos;		/* Used during ops: PROCESS_INFO, RESTORE */
617 	__u32 num_objects;	/* Used during ops: PROCESS_INFO, RESTORE */
618 	__u32 pid;		/* Used during ops: PROCESS_INFO, RESUME */
619 	__u32 op;
620 };
621 
622 struct kfd_criu_device_bucket {
623 	__u32 user_gpu_id;
624 	__u32 actual_gpu_id;
625 	__u32 drm_fd;
626 	__u32 pad;
627 };
628 
629 struct kfd_criu_bo_bucket {
630 	__u64 addr;
631 	__u64 size;
632 	__u64 offset;
633 	__u64 restored_offset;    /* During restore, updated offset for BO */
634 	__u32 gpu_id;             /* This is the user_gpu_id */
635 	__u32 alloc_flags;
636 	__u32 dmabuf_fd;
637 	__u32 pad;
638 };
639 
640 /* CRIU IOCTLs - END */
641 /**************************************************************************************************/
642 
643 /* Register offset inside the remapped mmio page
644  */
645 enum kfd_mmio_remap {
646 	KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL = 0,
647 	KFD_MMIO_REMAP_HDP_REG_FLUSH_CNTL = 4,
648 };
649 
650 /* Guarantee host access to memory */
651 #define KFD_IOCTL_SVM_FLAG_HOST_ACCESS 0x00000001
652 /* Fine grained coherency between all devices with access */
653 #define KFD_IOCTL_SVM_FLAG_COHERENT    0x00000002
654 /* Use any GPU in same hive as preferred device */
655 #define KFD_IOCTL_SVM_FLAG_HIVE_LOCAL  0x00000004
656 /* GPUs only read, allows replication */
657 #define KFD_IOCTL_SVM_FLAG_GPU_RO      0x00000008
658 /* Allow execution on GPU */
659 #define KFD_IOCTL_SVM_FLAG_GPU_EXEC    0x00000010
660 /* GPUs mostly read, may allow similar optimizations as RO, but writes fault */
661 #define KFD_IOCTL_SVM_FLAG_GPU_READ_MOSTLY     0x00000020
662 /* Keep GPU memory mapping always valid as if XNACK is disable */
663 #define KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED   0x00000040
664 
665 /**
666  * kfd_ioctl_svm_op - SVM ioctl operations
667  *
668  * @KFD_IOCTL_SVM_OP_SET_ATTR: Modify one or more attributes
669  * @KFD_IOCTL_SVM_OP_GET_ATTR: Query one or more attributes
670  */
671 enum kfd_ioctl_svm_op {
672 	KFD_IOCTL_SVM_OP_SET_ATTR,
673 	KFD_IOCTL_SVM_OP_GET_ATTR
674 };
675 
676 /** kfd_ioctl_svm_location - Enum for preferred and prefetch locations
677  *
678  * GPU IDs are used to specify GPUs as preferred and prefetch locations.
679  * Below definitions are used for system memory or for leaving the preferred
680  * location unspecified.
681  */
682 enum kfd_ioctl_svm_location {
683 	KFD_IOCTL_SVM_LOCATION_SYSMEM = 0,
684 	KFD_IOCTL_SVM_LOCATION_UNDEFINED = 0xffffffff
685 };
686 
687 /**
688  * kfd_ioctl_svm_attr_type - SVM attribute types
689  *
690  * @KFD_IOCTL_SVM_ATTR_PREFERRED_LOC: gpuid of the preferred location, 0 for
691  *                                    system memory
692  * @KFD_IOCTL_SVM_ATTR_PREFETCH_LOC: gpuid of the prefetch location, 0 for
693  *                                   system memory. Setting this triggers an
694  *                                   immediate prefetch (migration).
695  * @KFD_IOCTL_SVM_ATTR_ACCESS:
696  * @KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
697  * @KFD_IOCTL_SVM_ATTR_NO_ACCESS: specify memory access for the gpuid given
698  *                                by the attribute value
699  * @KFD_IOCTL_SVM_ATTR_SET_FLAGS: bitmask of flags to set (see
700  *                                KFD_IOCTL_SVM_FLAG_...)
701  * @KFD_IOCTL_SVM_ATTR_CLR_FLAGS: bitmask of flags to clear
702  * @KFD_IOCTL_SVM_ATTR_GRANULARITY: migration granularity
703  *                                  (log2 num pages)
704  */
705 enum kfd_ioctl_svm_attr_type {
706 	KFD_IOCTL_SVM_ATTR_PREFERRED_LOC,
707 	KFD_IOCTL_SVM_ATTR_PREFETCH_LOC,
708 	KFD_IOCTL_SVM_ATTR_ACCESS,
709 	KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE,
710 	KFD_IOCTL_SVM_ATTR_NO_ACCESS,
711 	KFD_IOCTL_SVM_ATTR_SET_FLAGS,
712 	KFD_IOCTL_SVM_ATTR_CLR_FLAGS,
713 	KFD_IOCTL_SVM_ATTR_GRANULARITY
714 };
715 
716 /**
717  * kfd_ioctl_svm_attribute - Attributes as pairs of type and value
718  *
719  * The meaning of the @value depends on the attribute type.
720  *
721  * @type: attribute type (see enum @kfd_ioctl_svm_attr_type)
722  * @value: attribute value
723  */
724 struct kfd_ioctl_svm_attribute {
725 	__u32 type;
726 	__u32 value;
727 };
728 
729 /**
730  * kfd_ioctl_svm_args - Arguments for SVM ioctl
731  *
732  * @op specifies the operation to perform (see enum
733  * @kfd_ioctl_svm_op).  @start_addr and @size are common for all
734  * operations.
735  *
736  * A variable number of attributes can be given in @attrs.
737  * @nattr specifies the number of attributes. New attributes can be
738  * added in the future without breaking the ABI. If unknown attributes
739  * are given, the function returns -EINVAL.
740  *
741  * @KFD_IOCTL_SVM_OP_SET_ATTR sets attributes for a virtual address
742  * range. It may overlap existing virtual address ranges. If it does,
743  * the existing ranges will be split such that the attribute changes
744  * only apply to the specified address range.
745  *
746  * @KFD_IOCTL_SVM_OP_GET_ATTR returns the intersection of attributes
747  * over all memory in the given range and returns the result as the
748  * attribute value. If different pages have different preferred or
749  * prefetch locations, 0xffffffff will be returned for
750  * @KFD_IOCTL_SVM_ATTR_PREFERRED_LOC or
751  * @KFD_IOCTL_SVM_ATTR_PREFETCH_LOC resepctively. For
752  * @KFD_IOCTL_SVM_ATTR_SET_FLAGS, flags of all pages will be
753  * aggregated by bitwise AND. That means, a flag will be set in the
754  * output, if that flag is set for all pages in the range. For
755  * @KFD_IOCTL_SVM_ATTR_CLR_FLAGS, flags of all pages will be
756  * aggregated by bitwise NOR. That means, a flag will be set in the
757  * output, if that flag is clear for all pages in the range.
758  * The minimum migration granularity throughout the range will be
759  * returned for @KFD_IOCTL_SVM_ATTR_GRANULARITY.
760  *
761  * Querying of accessibility attributes works by initializing the
762  * attribute type to @KFD_IOCTL_SVM_ATTR_ACCESS and the value to the
763  * GPUID being queried. Multiple attributes can be given to allow
764  * querying multiple GPUIDs. The ioctl function overwrites the
765  * attribute type to indicate the access for the specified GPU.
766  */
767 struct kfd_ioctl_svm_args {
768 	__u64 start_addr;
769 	__u64 size;
770 	__u32 op;
771 	__u32 nattr;
772 	/* Variable length array of attributes */
773 	struct kfd_ioctl_svm_attribute attrs[];
774 };
775 
776 /**
777  * kfd_ioctl_set_xnack_mode_args - Arguments for set_xnack_mode
778  *
779  * @xnack_enabled:       [in/out] Whether to enable XNACK mode for this process
780  *
781  * @xnack_enabled indicates whether recoverable page faults should be
782  * enabled for the current process. 0 means disabled, positive means
783  * enabled, negative means leave unchanged. If enabled, virtual address
784  * translations on GFXv9 and later AMD GPUs can return XNACK and retry
785  * the access until a valid PTE is available. This is used to implement
786  * device page faults.
787  *
788  * On output, @xnack_enabled returns the (new) current mode (0 or
789  * positive). Therefore, a negative input value can be used to query
790  * the current mode without changing it.
791  *
792  * The XNACK mode fundamentally changes the way SVM managed memory works
793  * in the driver, with subtle effects on application performance and
794  * functionality.
795  *
796  * Enabling XNACK mode requires shader programs to be compiled
797  * differently. Furthermore, not all GPUs support changing the mode
798  * per-process. Therefore changing the mode is only allowed while no
799  * user mode queues exist in the process. This ensure that no shader
800  * code is running that may be compiled for the wrong mode. And GPUs
801  * that cannot change to the requested mode will prevent the XNACK
802  * mode from occurring. All GPUs used by the process must be in the
803  * same XNACK mode.
804  *
805  * GFXv8 or older GPUs do not support 48 bit virtual addresses or SVM.
806  * Therefore those GPUs are not considered for the XNACK mode switch.
807  *
808  * Return: 0 on success, -errno on failure
809  */
810 struct kfd_ioctl_set_xnack_mode_args {
811 	__s32 xnack_enabled;
812 };
813 
814 /* Wave launch override modes */
815 enum kfd_dbg_trap_override_mode {
816 	KFD_DBG_TRAP_OVERRIDE_OR = 0,
817 	KFD_DBG_TRAP_OVERRIDE_REPLACE = 1
818 };
819 
820 /* Wave launch overrides */
821 enum kfd_dbg_trap_mask {
822 	KFD_DBG_TRAP_MASK_FP_INVALID = 1,
823 	KFD_DBG_TRAP_MASK_FP_INPUT_DENORMAL = 2,
824 	KFD_DBG_TRAP_MASK_FP_DIVIDE_BY_ZERO = 4,
825 	KFD_DBG_TRAP_MASK_FP_OVERFLOW = 8,
826 	KFD_DBG_TRAP_MASK_FP_UNDERFLOW = 16,
827 	KFD_DBG_TRAP_MASK_FP_INEXACT = 32,
828 	KFD_DBG_TRAP_MASK_INT_DIVIDE_BY_ZERO = 64,
829 	KFD_DBG_TRAP_MASK_DBG_ADDRESS_WATCH = 128,
830 	KFD_DBG_TRAP_MASK_DBG_MEMORY_VIOLATION = 256,
831 	KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_START = (1 << 30),
832 	KFD_DBG_TRAP_MASK_TRAP_ON_WAVE_END = (1 << 31)
833 };
834 
835 /* Wave launch modes */
836 enum kfd_dbg_trap_wave_launch_mode {
837 	KFD_DBG_TRAP_WAVE_LAUNCH_MODE_NORMAL = 0,
838 	KFD_DBG_TRAP_WAVE_LAUNCH_MODE_HALT = 1,
839 	KFD_DBG_TRAP_WAVE_LAUNCH_MODE_DEBUG = 3
840 };
841 
842 /* Address watch modes */
843 enum kfd_dbg_trap_address_watch_mode {
844 	KFD_DBG_TRAP_ADDRESS_WATCH_MODE_READ = 0,
845 	KFD_DBG_TRAP_ADDRESS_WATCH_MODE_NONREAD = 1,
846 	KFD_DBG_TRAP_ADDRESS_WATCH_MODE_ATOMIC = 2,
847 	KFD_DBG_TRAP_ADDRESS_WATCH_MODE_ALL = 3
848 };
849 
850 /* Additional wave settings */
851 enum kfd_dbg_trap_flags {
852 	KFD_DBG_TRAP_FLAG_SINGLE_MEM_OP = 1,
853 };
854 
855 /* Trap exceptions */
856 enum kfd_dbg_trap_exception_code {
857 	EC_NONE = 0,
858 	/* per queue */
859 	EC_QUEUE_WAVE_ABORT = 1,
860 	EC_QUEUE_WAVE_TRAP = 2,
861 	EC_QUEUE_WAVE_MATH_ERROR = 3,
862 	EC_QUEUE_WAVE_ILLEGAL_INSTRUCTION = 4,
863 	EC_QUEUE_WAVE_MEMORY_VIOLATION = 5,
864 	EC_QUEUE_WAVE_APERTURE_VIOLATION = 6,
865 	EC_QUEUE_PACKET_DISPATCH_DIM_INVALID = 16,
866 	EC_QUEUE_PACKET_DISPATCH_GROUP_SEGMENT_SIZE_INVALID = 17,
867 	EC_QUEUE_PACKET_DISPATCH_CODE_INVALID = 18,
868 	EC_QUEUE_PACKET_RESERVED = 19,
869 	EC_QUEUE_PACKET_UNSUPPORTED = 20,
870 	EC_QUEUE_PACKET_DISPATCH_WORK_GROUP_SIZE_INVALID = 21,
871 	EC_QUEUE_PACKET_DISPATCH_REGISTER_INVALID = 22,
872 	EC_QUEUE_PACKET_VENDOR_UNSUPPORTED = 23,
873 	EC_QUEUE_PREEMPTION_ERROR = 30,
874 	EC_QUEUE_NEW = 31,
875 	/* per device */
876 	EC_DEVICE_QUEUE_DELETE = 32,
877 	EC_DEVICE_MEMORY_VIOLATION = 33,
878 	EC_DEVICE_RAS_ERROR = 34,
879 	EC_DEVICE_FATAL_HALT = 35,
880 	EC_DEVICE_NEW = 36,
881 	/* per process */
882 	EC_PROCESS_RUNTIME = 48,
883 	EC_PROCESS_DEVICE_REMOVE = 49,
884 	EC_MAX
885 };
886 
887 /* Mask generated by ecode in kfd_dbg_trap_exception_code */
888 #define KFD_EC_MASK(ecode)	(1ULL << (ecode - 1))
889 
890 /* Masks for exception code type checks below */
891 #define KFD_EC_MASK_QUEUE	(KFD_EC_MASK(EC_QUEUE_WAVE_ABORT) |	\
892 				 KFD_EC_MASK(EC_QUEUE_WAVE_TRAP) |	\
893 				 KFD_EC_MASK(EC_QUEUE_WAVE_MATH_ERROR) |	\
894 				 KFD_EC_MASK(EC_QUEUE_WAVE_ILLEGAL_INSTRUCTION) |	\
895 				 KFD_EC_MASK(EC_QUEUE_WAVE_MEMORY_VIOLATION) |	\
896 				 KFD_EC_MASK(EC_QUEUE_WAVE_APERTURE_VIOLATION) |	\
897 				 KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_DIM_INVALID) |	\
898 				 KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_GROUP_SEGMENT_SIZE_INVALID) |	\
899 				 KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_CODE_INVALID) |	\
900 				 KFD_EC_MASK(EC_QUEUE_PACKET_RESERVED) |	\
901 				 KFD_EC_MASK(EC_QUEUE_PACKET_UNSUPPORTED) |	\
902 				 KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_WORK_GROUP_SIZE_INVALID) |	\
903 				 KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_REGISTER_INVALID) |	\
904 				 KFD_EC_MASK(EC_QUEUE_PACKET_VENDOR_UNSUPPORTED)	|	\
905 				 KFD_EC_MASK(EC_QUEUE_PREEMPTION_ERROR)	|	\
906 				 KFD_EC_MASK(EC_QUEUE_NEW))
907 #define KFD_EC_MASK_DEVICE	(KFD_EC_MASK(EC_DEVICE_QUEUE_DELETE) |		\
908 				 KFD_EC_MASK(EC_DEVICE_RAS_ERROR) |		\
909 				 KFD_EC_MASK(EC_DEVICE_FATAL_HALT) |		\
910 				 KFD_EC_MASK(EC_DEVICE_MEMORY_VIOLATION) |	\
911 				 KFD_EC_MASK(EC_DEVICE_NEW))
912 #define KFD_EC_MASK_PROCESS	(KFD_EC_MASK(EC_PROCESS_RUNTIME) |	\
913 				 KFD_EC_MASK(EC_PROCESS_DEVICE_REMOVE))
914 #define KFD_EC_MASK_PACKET	(KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_DIM_INVALID) |	\
915 				 KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_GROUP_SEGMENT_SIZE_INVALID) |	\
916 				 KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_CODE_INVALID) |	\
917 				 KFD_EC_MASK(EC_QUEUE_PACKET_RESERVED) |	\
918 				 KFD_EC_MASK(EC_QUEUE_PACKET_UNSUPPORTED) |	\
919 				 KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_WORK_GROUP_SIZE_INVALID) |	\
920 				 KFD_EC_MASK(EC_QUEUE_PACKET_DISPATCH_REGISTER_INVALID) |	\
921 				 KFD_EC_MASK(EC_QUEUE_PACKET_VENDOR_UNSUPPORTED))
922 
923 /* Checks for exception code types for KFD search */
924 #define KFD_DBG_EC_IS_VALID(ecode) (ecode > EC_NONE && ecode < EC_MAX)
925 #define KFD_DBG_EC_TYPE_IS_QUEUE(ecode)					\
926 			(KFD_DBG_EC_IS_VALID(ecode) && !!(KFD_EC_MASK(ecode) & KFD_EC_MASK_QUEUE))
927 #define KFD_DBG_EC_TYPE_IS_DEVICE(ecode)				\
928 			(KFD_DBG_EC_IS_VALID(ecode) && !!(KFD_EC_MASK(ecode) & KFD_EC_MASK_DEVICE))
929 #define KFD_DBG_EC_TYPE_IS_PROCESS(ecode)				\
930 			(KFD_DBG_EC_IS_VALID(ecode) && !!(KFD_EC_MASK(ecode) & KFD_EC_MASK_PROCESS))
931 #define KFD_DBG_EC_TYPE_IS_PACKET(ecode)				\
932 			(KFD_DBG_EC_IS_VALID(ecode) && !!(KFD_EC_MASK(ecode) & KFD_EC_MASK_PACKET))
933 
934 
935 /* Runtime enable states */
936 enum kfd_dbg_runtime_state {
937 	DEBUG_RUNTIME_STATE_DISABLED = 0,
938 	DEBUG_RUNTIME_STATE_ENABLED = 1,
939 	DEBUG_RUNTIME_STATE_ENABLED_BUSY = 2,
940 	DEBUG_RUNTIME_STATE_ENABLED_ERROR = 3
941 };
942 
943 /* Runtime enable status */
944 struct kfd_runtime_info {
945 	__u64 r_debug;
946 	__u32 runtime_state;
947 	__u32 ttmp_setup;
948 };
949 
950 /* Enable modes for runtime enable */
951 #define KFD_RUNTIME_ENABLE_MODE_ENABLE_MASK	1
952 #define KFD_RUNTIME_ENABLE_MODE_TTMP_SAVE_MASK	2
953 
954 /**
955  * kfd_ioctl_runtime_enable_args - Arguments for runtime enable
956  *
957  * Coordinates debug exception signalling and debug device enablement with runtime.
958  *
959  * @r_debug - pointer to user struct for sharing information between ROCr and the debuggger
960  * @mode_mask - mask to set mode
961  *	KFD_RUNTIME_ENABLE_MODE_ENABLE_MASK - enable runtime for debugging, otherwise disable
962  *	KFD_RUNTIME_ENABLE_MODE_TTMP_SAVE_MASK - enable trap temporary setup (ignore on disable)
963  * @capabilities_mask - mask to notify runtime on what KFD supports
964  *
965  * Return - 0 on SUCCESS.
966  *	  - EBUSY if runtime enable call already pending.
967  *	  - EEXIST if user queues already active prior to call.
968  *	    If process is debug enabled, runtime enable will enable debug devices and
969  *	    wait for debugger process to send runtime exception EC_PROCESS_RUNTIME
970  *	    to unblock - see kfd_ioctl_dbg_trap_args.
971  *
972  */
973 struct kfd_ioctl_runtime_enable_args {
974 	__u64 r_debug;
975 	__u32 mode_mask;
976 	__u32 capabilities_mask;
977 };
978 
979 /* Queue information */
980 struct kfd_queue_snapshot_entry {
981 	__u64 exception_status;
982 	__u64 ring_base_address;
983 	__u64 write_pointer_address;
984 	__u64 read_pointer_address;
985 	__u64 ctx_save_restore_address;
986 	__u32 queue_id;
987 	__u32 gpu_id;
988 	__u32 ring_size;
989 	__u32 queue_type;
990 	__u32 ctx_save_restore_area_size;
991 	__u32 reserved;
992 };
993 
994 /* Queue status return for suspend/resume */
995 #define KFD_DBG_QUEUE_ERROR_BIT		30
996 #define KFD_DBG_QUEUE_INVALID_BIT	31
997 #define KFD_DBG_QUEUE_ERROR_MASK	(1 << KFD_DBG_QUEUE_ERROR_BIT)
998 #define KFD_DBG_QUEUE_INVALID_MASK	(1 << KFD_DBG_QUEUE_INVALID_BIT)
999 
1000 /* Context save area header information */
1001 struct kfd_context_save_area_header {
1002 	struct {
1003 		__u32 control_stack_offset;
1004 		__u32 control_stack_size;
1005 		__u32 wave_state_offset;
1006 		__u32 wave_state_size;
1007 	} wave_state;
1008 	__u32 debug_offset;
1009 	__u32 debug_size;
1010 	__u64 err_payload_addr;
1011 	__u32 err_event_id;
1012 	__u32 reserved1;
1013 };
1014 
1015 /*
1016  * Debug operations
1017  *
1018  * For specifics on usage and return values, see documentation per operation
1019  * below.  Otherwise, generic error returns apply:
1020  *	- ESRCH if the process to debug does not exist.
1021  *
1022  *	- EINVAL (with KFD_IOC_DBG_TRAP_ENABLE exempt) if operation
1023  *		 KFD_IOC_DBG_TRAP_ENABLE has not succeeded prior.
1024  *		 Also returns this error if GPU hardware scheduling is not supported.
1025  *
1026  *	- EPERM (with KFD_IOC_DBG_TRAP_DISABLE exempt) if target process is not
1027  *		 PTRACE_ATTACHED.  KFD_IOC_DBG_TRAP_DISABLE is exempt to allow
1028  *		 clean up of debug mode as long as process is debug enabled.
1029  *
1030  *	- EACCES if any DBG_HW_OP (debug hardware operation) is requested when
1031  *		 AMDKFD_IOC_RUNTIME_ENABLE has not succeeded prior.
1032  *
1033  *	- ENODEV if any GPU does not support debugging on a DBG_HW_OP call.
1034  *
1035  *	- Other errors may be returned when a DBG_HW_OP occurs while the GPU
1036  *	  is in a fatal state.
1037  *
1038  */
1039 enum kfd_dbg_trap_operations {
1040 	KFD_IOC_DBG_TRAP_ENABLE = 0,
1041 	KFD_IOC_DBG_TRAP_DISABLE = 1,
1042 	KFD_IOC_DBG_TRAP_SEND_RUNTIME_EVENT = 2,
1043 	KFD_IOC_DBG_TRAP_SET_EXCEPTIONS_ENABLED = 3,
1044 	KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_OVERRIDE = 4,  /* DBG_HW_OP */
1045 	KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_MODE = 5,      /* DBG_HW_OP */
1046 	KFD_IOC_DBG_TRAP_SUSPEND_QUEUES = 6,		/* DBG_HW_OP */
1047 	KFD_IOC_DBG_TRAP_RESUME_QUEUES = 7,		/* DBG_HW_OP */
1048 	KFD_IOC_DBG_TRAP_SET_NODE_ADDRESS_WATCH = 8,	/* DBG_HW_OP */
1049 	KFD_IOC_DBG_TRAP_CLEAR_NODE_ADDRESS_WATCH = 9,	/* DBG_HW_OP */
1050 	KFD_IOC_DBG_TRAP_SET_FLAGS = 10,
1051 	KFD_IOC_DBG_TRAP_QUERY_DEBUG_EVENT = 11,
1052 	KFD_IOC_DBG_TRAP_QUERY_EXCEPTION_INFO = 12,
1053 	KFD_IOC_DBG_TRAP_GET_QUEUE_SNAPSHOT = 13,
1054 	KFD_IOC_DBG_TRAP_GET_DEVICE_SNAPSHOT = 14
1055 };
1056 
1057 /**
1058  * kfd_ioctl_dbg_trap_enable_args
1059  *
1060  *     Arguments for KFD_IOC_DBG_TRAP_ENABLE.
1061  *
1062  *     Enables debug session for target process. Call @op KFD_IOC_DBG_TRAP_DISABLE in
1063  *     kfd_ioctl_dbg_trap_args to disable debug session.
1064  *
1065  *     @exception_mask (IN)	- exceptions to raise to the debugger
1066  *     @rinfo_ptr      (IN)	- pointer to runtime info buffer (see kfd_runtime_info)
1067  *     @rinfo_size     (IN/OUT)	- size of runtime info buffer in bytes
1068  *     @dbg_fd	       (IN)	- fd the KFD will nofify the debugger with of raised
1069  *				  exceptions set in exception_mask.
1070  *
1071  *     Generic errors apply (see kfd_dbg_trap_operations).
1072  *     Return - 0 on SUCCESS.
1073  *		Copies KFD saved kfd_runtime_info to @rinfo_ptr on enable.
1074  *		Size of kfd_runtime saved by the KFD returned to @rinfo_size.
1075  *            - EBADF if KFD cannot get a reference to dbg_fd.
1076  *            - EFAULT if KFD cannot copy runtime info to rinfo_ptr.
1077  *            - EINVAL if target process is already debug enabled.
1078  *
1079  */
1080 struct kfd_ioctl_dbg_trap_enable_args {
1081 	__u64 exception_mask;
1082 	__u64 rinfo_ptr;
1083 	__u32 rinfo_size;
1084 	__u32 dbg_fd;
1085 };
1086 
1087 /**
1088  * kfd_ioctl_dbg_trap_send_runtime_event_args
1089  *
1090  *
1091  *     Arguments for KFD_IOC_DBG_TRAP_SEND_RUNTIME_EVENT.
1092  *     Raises exceptions to runtime.
1093  *
1094  *     @exception_mask (IN) - exceptions to raise to runtime
1095  *     @gpu_id	       (IN) - target device id
1096  *     @queue_id       (IN) - target queue id
1097  *
1098  *     Generic errors apply (see kfd_dbg_trap_operations).
1099  *     Return - 0 on SUCCESS.
1100  *	      - ENODEV if gpu_id not found.
1101  *		If exception_mask contains EC_PROCESS_RUNTIME, unblocks pending
1102  *		AMDKFD_IOC_RUNTIME_ENABLE call - see kfd_ioctl_runtime_enable_args.
1103  *		All other exceptions are raised to runtime through err_payload_addr.
1104  *		See kfd_context_save_area_header.
1105  */
1106 struct kfd_ioctl_dbg_trap_send_runtime_event_args {
1107 	__u64 exception_mask;
1108 	__u32 gpu_id;
1109 	__u32 queue_id;
1110 };
1111 
1112 /**
1113  * kfd_ioctl_dbg_trap_set_exceptions_enabled_args
1114  *
1115  *     Arguments for KFD_IOC_SET_EXCEPTIONS_ENABLED
1116  *     Set new exceptions to be raised to the debugger.
1117  *
1118  *     @exception_mask (IN) - new exceptions to raise the debugger
1119  *
1120  *     Generic errors apply (see kfd_dbg_trap_operations).
1121  *     Return - 0 on SUCCESS.
1122  */
1123 struct kfd_ioctl_dbg_trap_set_exceptions_enabled_args {
1124 	__u64 exception_mask;
1125 };
1126 
1127 /**
1128  * kfd_ioctl_dbg_trap_set_wave_launch_override_args
1129  *
1130  *     Arguments for KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_OVERRIDE
1131  *     Enable HW exceptions to raise trap.
1132  *
1133  *     @override_mode	     (IN)     - see kfd_dbg_trap_override_mode
1134  *     @enable_mask	     (IN/OUT) - reference kfd_dbg_trap_mask.
1135  *					IN is the override modes requested to be enabled.
1136  *					OUT is referenced in Return below.
1137  *     @support_request_mask (IN/OUT) - reference kfd_dbg_trap_mask.
1138  *					IN is the override modes requested for support check.
1139  *					OUT is referenced in Return below.
1140  *
1141  *     Generic errors apply (see kfd_dbg_trap_operations).
1142  *     Return - 0 on SUCCESS.
1143  *		Previous enablement is returned in @enable_mask.
1144  *		Actual override support is returned in @support_request_mask.
1145  *	      - EINVAL if override mode is not supported.
1146  *	      - EACCES if trap support requested is not actually supported.
1147  *		i.e. enable_mask (IN) is not a subset of support_request_mask (OUT).
1148  *		Otherwise it is considered a generic error (see kfd_dbg_trap_operations).
1149  */
1150 struct kfd_ioctl_dbg_trap_set_wave_launch_override_args {
1151 	__u32 override_mode;
1152 	__u32 enable_mask;
1153 	__u32 support_request_mask;
1154 	__u32 pad;
1155 };
1156 
1157 /**
1158  * kfd_ioctl_dbg_trap_set_wave_launch_mode_args
1159  *
1160  *     Arguments for KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_MODE
1161  *     Set wave launch mode.
1162  *
1163  *     @mode (IN) - see kfd_dbg_trap_wave_launch_mode
1164  *
1165  *     Generic errors apply (see kfd_dbg_trap_operations).
1166  *     Return - 0 on SUCCESS.
1167  */
1168 struct kfd_ioctl_dbg_trap_set_wave_launch_mode_args {
1169 	__u32 launch_mode;
1170 	__u32 pad;
1171 };
1172 
1173 /**
1174  * kfd_ioctl_dbg_trap_suspend_queues_ags
1175  *
1176  *     Arguments for KFD_IOC_DBG_TRAP_SUSPEND_QUEUES
1177  *     Suspend queues.
1178  *
1179  *     @exception_mask	(IN) - raised exceptions to clear
1180  *     @queue_array_ptr (IN) - pointer to array of queue ids (u32 per queue id)
1181  *			       to suspend
1182  *     @num_queues	(IN) - number of queues to suspend in @queue_array_ptr
1183  *     @grace_period	(IN) - wave time allowance before preemption
1184  *			       per 1K GPU clock cycle unit
1185  *
1186  *     Generic errors apply (see kfd_dbg_trap_operations).
1187  *     Destruction of a suspended queue is blocked until the queue is
1188  *     resumed.  This allows the debugger to access queue information and
1189  *     the its context save area without running into a race condition on
1190  *     queue destruction.
1191  *     Automatically copies per queue context save area header information
1192  *     into the save area base
1193  *     (see kfd_queue_snapshot_entry and kfd_context_save_area_header).
1194  *
1195  *     Return - Number of queues suspended on SUCCESS.
1196  *	.	KFD_DBG_QUEUE_ERROR_MASK and KFD_DBG_QUEUE_INVALID_MASK masked
1197  *		for each queue id in @queue_array_ptr array reports unsuccessful
1198  *		suspend reason.
1199  *		KFD_DBG_QUEUE_ERROR_MASK = HW failure.
1200  *		KFD_DBG_QUEUE_INVALID_MASK = queue does not exist, is new or
1201  *		is being destroyed.
1202  */
1203 struct kfd_ioctl_dbg_trap_suspend_queues_args {
1204 	__u64 exception_mask;
1205 	__u64 queue_array_ptr;
1206 	__u32 num_queues;
1207 	__u32 grace_period;
1208 };
1209 
1210 /**
1211  * kfd_ioctl_dbg_trap_resume_queues_args
1212  *
1213  *     Arguments for KFD_IOC_DBG_TRAP_RESUME_QUEUES
1214  *     Resume queues.
1215  *
1216  *     @queue_array_ptr (IN) - pointer to array of queue ids (u32 per queue id)
1217  *			       to resume
1218  *     @num_queues	(IN) - number of queues to resume in @queue_array_ptr
1219  *
1220  *     Generic errors apply (see kfd_dbg_trap_operations).
1221  *     Return - Number of queues resumed on SUCCESS.
1222  *		KFD_DBG_QUEUE_ERROR_MASK and KFD_DBG_QUEUE_INVALID_MASK mask
1223  *		for each queue id in @queue_array_ptr array reports unsuccessful
1224  *		resume reason.
1225  *		KFD_DBG_QUEUE_ERROR_MASK = HW failure.
1226  *		KFD_DBG_QUEUE_INVALID_MASK = queue does not exist.
1227  */
1228 struct kfd_ioctl_dbg_trap_resume_queues_args {
1229 	__u64 queue_array_ptr;
1230 	__u32 num_queues;
1231 	__u32 pad;
1232 };
1233 
1234 /**
1235  * kfd_ioctl_dbg_trap_set_node_address_watch_args
1236  *
1237  *     Arguments for KFD_IOC_DBG_TRAP_SET_NODE_ADDRESS_WATCH
1238  *     Sets address watch for device.
1239  *
1240  *     @address	(IN)  - watch address to set
1241  *     @mode    (IN)  - see kfd_dbg_trap_address_watch_mode
1242  *     @mask    (IN)  - watch address mask
1243  *     @gpu_id  (IN)  - target gpu to set watch point
1244  *     @id      (OUT) - watch id allocated
1245  *
1246  *     Generic errors apply (see kfd_dbg_trap_operations).
1247  *     Return - 0 on SUCCESS.
1248  *		Allocated watch ID returned to @id.
1249  *	      - ENODEV if gpu_id not found.
1250  *	      - ENOMEM if watch IDs can be allocated
1251  */
1252 struct kfd_ioctl_dbg_trap_set_node_address_watch_args {
1253 	__u64 address;
1254 	__u32 mode;
1255 	__u32 mask;
1256 	__u32 gpu_id;
1257 	__u32 id;
1258 };
1259 
1260 /**
1261  * kfd_ioctl_dbg_trap_clear_node_address_watch_args
1262  *
1263  *     Arguments for KFD_IOC_DBG_TRAP_CLEAR_NODE_ADDRESS_WATCH
1264  *     Clear address watch for device.
1265  *
1266  *     @gpu_id  (IN)  - target device to clear watch point
1267  *     @id      (IN) - allocated watch id to clear
1268  *
1269  *     Generic errors apply (see kfd_dbg_trap_operations).
1270  *     Return - 0 on SUCCESS.
1271  *	      - ENODEV if gpu_id not found.
1272  *	      - EINVAL if watch ID has not been allocated.
1273  */
1274 struct kfd_ioctl_dbg_trap_clear_node_address_watch_args {
1275 	__u32 gpu_id;
1276 	__u32 id;
1277 };
1278 
1279 /**
1280  * kfd_ioctl_dbg_trap_set_flags_args
1281  *
1282  *     Arguments for KFD_IOC_DBG_TRAP_SET_FLAGS
1283  *     Sets flags for wave behaviour.
1284  *
1285  *     @flags (IN/OUT) - IN = flags to enable, OUT = flags previously enabled
1286  *
1287  *     Generic errors apply (see kfd_dbg_trap_operations).
1288  *     Return - 0 on SUCCESS.
1289  *	      - EACCESS if any debug device does not allow flag options.
1290  */
1291 struct kfd_ioctl_dbg_trap_set_flags_args {
1292 	__u32 flags;
1293 	__u32 pad;
1294 };
1295 
1296 /**
1297  * kfd_ioctl_dbg_trap_query_debug_event_args
1298  *
1299  *     Arguments for KFD_IOC_DBG_TRAP_QUERY_DEBUG_EVENT
1300  *
1301  *     Find one or more raised exceptions. This function can return multiple
1302  *     exceptions from a single queue or a single device with one call. To find
1303  *     all raised exceptions, this function must be called repeatedly until it
1304  *     returns -EAGAIN. Returned exceptions can optionally be cleared by
1305  *     setting the corresponding bit in the @exception_mask input parameter.
1306  *     However, clearing an exception prevents retrieving further information
1307  *     about it with KFD_IOC_DBG_TRAP_QUERY_EXCEPTION_INFO.
1308  *
1309  *     @exception_mask (IN/OUT) - exception to clear (IN) and raised (OUT)
1310  *     @gpu_id	       (OUT)    - gpu id of exceptions raised
1311  *     @queue_id       (OUT)    - queue id of exceptions raised
1312  *
1313  *     Generic errors apply (see kfd_dbg_trap_operations).
1314  *     Return - 0 on raised exception found
1315  *              Raised exceptions found are returned in @exception mask
1316  *              with reported source id returned in @gpu_id or @queue_id.
1317  *            - EAGAIN if no raised exception has been found
1318  */
1319 struct kfd_ioctl_dbg_trap_query_debug_event_args {
1320 	__u64 exception_mask;
1321 	__u32 gpu_id;
1322 	__u32 queue_id;
1323 };
1324 
1325 /**
1326  * kfd_ioctl_dbg_trap_query_exception_info_args
1327  *
1328  *     Arguments KFD_IOC_DBG_TRAP_QUERY_EXCEPTION_INFO
1329  *     Get additional info on raised exception.
1330  *
1331  *     @info_ptr	(IN)	 - pointer to exception info buffer to copy to
1332  *     @info_size	(IN/OUT) - exception info buffer size (bytes)
1333  *     @source_id	(IN)     - target gpu or queue id
1334  *     @exception_code	(IN)     - target exception
1335  *     @clear_exception	(IN)     - clear raised @exception_code exception
1336  *				   (0 = false, 1 = true)
1337  *
1338  *     Generic errors apply (see kfd_dbg_trap_operations).
1339  *     Return - 0 on SUCCESS.
1340  *              If @exception_code is EC_DEVICE_MEMORY_VIOLATION, copy @info_size(OUT)
1341  *		bytes of memory exception data to @info_ptr.
1342  *              If @exception_code is EC_PROCESS_RUNTIME, copy saved
1343  *              kfd_runtime_info to @info_ptr.
1344  *              Actual required @info_ptr size (bytes) is returned in @info_size.
1345  */
1346 struct kfd_ioctl_dbg_trap_query_exception_info_args {
1347 	__u64 info_ptr;
1348 	__u32 info_size;
1349 	__u32 source_id;
1350 	__u32 exception_code;
1351 	__u32 clear_exception;
1352 };
1353 
1354 /**
1355  * kfd_ioctl_dbg_trap_get_queue_snapshot_args
1356  *
1357  *     Arguments KFD_IOC_DBG_TRAP_GET_QUEUE_SNAPSHOT
1358  *     Get queue information.
1359  *
1360  *     @exception_mask	 (IN)	  - exceptions raised to clear
1361  *     @snapshot_buf_ptr (IN)	  - queue snapshot entry buffer (see kfd_queue_snapshot_entry)
1362  *     @num_queues	 (IN/OUT) - number of queue snapshot entries
1363  *         The debugger specifies the size of the array allocated in @num_queues.
1364  *         KFD returns the number of queues that actually existed. If this is
1365  *         larger than the size specified by the debugger, KFD will not overflow
1366  *         the array allocated by the debugger.
1367  *
1368  *     @entry_size	 (IN/OUT) - size per entry in bytes
1369  *         The debugger specifies sizeof(struct kfd_queue_snapshot_entry) in
1370  *         @entry_size. KFD returns the number of bytes actually populated per
1371  *         entry. The debugger should use the KFD_IOCTL_MINOR_VERSION to determine,
1372  *         which fields in struct kfd_queue_snapshot_entry are valid. This allows
1373  *         growing the ABI in a backwards compatible manner.
1374  *         Note that entry_size(IN) should still be used to stride the snapshot buffer in the
1375  *         event that it's larger than actual kfd_queue_snapshot_entry.
1376  *
1377  *     Generic errors apply (see kfd_dbg_trap_operations).
1378  *     Return - 0 on SUCCESS.
1379  *              Copies @num_queues(IN) queue snapshot entries of size @entry_size(IN)
1380  *              into @snapshot_buf_ptr if @num_queues(IN) > 0.
1381  *              Otherwise return @num_queues(OUT) queue snapshot entries that exist.
1382  */
1383 struct kfd_ioctl_dbg_trap_queue_snapshot_args {
1384 	__u64 exception_mask;
1385 	__u64 snapshot_buf_ptr;
1386 	__u32 num_queues;
1387 	__u32 entry_size;
1388 };
1389 
1390 /**
1391  * kfd_ioctl_dbg_trap_get_device_snapshot_args
1392  *
1393  *     Arguments for KFD_IOC_DBG_TRAP_GET_DEVICE_SNAPSHOT
1394  *     Get device information.
1395  *
1396  *     @exception_mask	 (IN)	  - exceptions raised to clear
1397  *     @snapshot_buf_ptr (IN)	  - pointer to snapshot buffer (see kfd_dbg_device_info_entry)
1398  *     @num_devices	 (IN/OUT) - number of debug devices to snapshot
1399  *         The debugger specifies the size of the array allocated in @num_devices.
1400  *         KFD returns the number of devices that actually existed. If this is
1401  *         larger than the size specified by the debugger, KFD will not overflow
1402  *         the array allocated by the debugger.
1403  *
1404  *     @entry_size	 (IN/OUT) - size per entry in bytes
1405  *         The debugger specifies sizeof(struct kfd_dbg_device_info_entry) in
1406  *         @entry_size. KFD returns the number of bytes actually populated. The
1407  *         debugger should use KFD_IOCTL_MINOR_VERSION to determine, which fields
1408  *         in struct kfd_dbg_device_info_entry are valid. This allows growing the
1409  *         ABI in a backwards compatible manner.
1410  *         Note that entry_size(IN) should still be used to stride the snapshot buffer in the
1411  *         event that it's larger than actual kfd_dbg_device_info_entry.
1412  *
1413  *     Generic errors apply (see kfd_dbg_trap_operations).
1414  *     Return - 0 on SUCCESS.
1415  *              Copies @num_devices(IN) device snapshot entries of size @entry_size(IN)
1416  *              into @snapshot_buf_ptr if @num_devices(IN) > 0.
1417  *              Otherwise return @num_devices(OUT) queue snapshot entries that exist.
1418  */
1419 struct kfd_ioctl_dbg_trap_device_snapshot_args {
1420 	__u64 exception_mask;
1421 	__u64 snapshot_buf_ptr;
1422 	__u32 num_devices;
1423 	__u32 entry_size;
1424 };
1425 
1426 /**
1427  * kfd_ioctl_dbg_trap_args
1428  *
1429  * Arguments to debug target process.
1430  *
1431  *     @pid - target process to debug
1432  *     @op  - debug operation (see kfd_dbg_trap_operations)
1433  *
1434  *     @op determines which union struct args to use.
1435  *     Refer to kern docs for each kfd_ioctl_dbg_trap_*_args struct.
1436  */
1437 struct kfd_ioctl_dbg_trap_args {
1438 	__u32 pid;
1439 	__u32 op;
1440 
1441 	union {
1442 		struct kfd_ioctl_dbg_trap_enable_args enable;
1443 		struct kfd_ioctl_dbg_trap_send_runtime_event_args send_runtime_event;
1444 		struct kfd_ioctl_dbg_trap_set_exceptions_enabled_args set_exceptions_enabled;
1445 		struct kfd_ioctl_dbg_trap_set_wave_launch_override_args launch_override;
1446 		struct kfd_ioctl_dbg_trap_set_wave_launch_mode_args launch_mode;
1447 		struct kfd_ioctl_dbg_trap_suspend_queues_args suspend_queues;
1448 		struct kfd_ioctl_dbg_trap_resume_queues_args resume_queues;
1449 		struct kfd_ioctl_dbg_trap_set_node_address_watch_args set_node_address_watch;
1450 		struct kfd_ioctl_dbg_trap_clear_node_address_watch_args clear_node_address_watch;
1451 		struct kfd_ioctl_dbg_trap_set_flags_args set_flags;
1452 		struct kfd_ioctl_dbg_trap_query_debug_event_args query_debug_event;
1453 		struct kfd_ioctl_dbg_trap_query_exception_info_args query_exception_info;
1454 		struct kfd_ioctl_dbg_trap_queue_snapshot_args queue_snapshot;
1455 		struct kfd_ioctl_dbg_trap_device_snapshot_args device_snapshot;
1456 	};
1457 };
1458 
1459 #define AMDKFD_IOCTL_BASE 'K'
1460 #define AMDKFD_IO(nr)			_IO(AMDKFD_IOCTL_BASE, nr)
1461 #define AMDKFD_IOR(nr, type)		_IOR(AMDKFD_IOCTL_BASE, nr, type)
1462 #define AMDKFD_IOW(nr, type)		_IOW(AMDKFD_IOCTL_BASE, nr, type)
1463 #define AMDKFD_IOWR(nr, type)		_IOWR(AMDKFD_IOCTL_BASE, nr, type)
1464 
1465 #define AMDKFD_IOC_GET_VERSION			\
1466 		AMDKFD_IOR(0x01, struct kfd_ioctl_get_version_args)
1467 
1468 #define AMDKFD_IOC_CREATE_QUEUE			\
1469 		AMDKFD_IOWR(0x02, struct kfd_ioctl_create_queue_args)
1470 
1471 #define AMDKFD_IOC_DESTROY_QUEUE		\
1472 		AMDKFD_IOWR(0x03, struct kfd_ioctl_destroy_queue_args)
1473 
1474 #define AMDKFD_IOC_SET_MEMORY_POLICY		\
1475 		AMDKFD_IOW(0x04, struct kfd_ioctl_set_memory_policy_args)
1476 
1477 #define AMDKFD_IOC_GET_CLOCK_COUNTERS		\
1478 		AMDKFD_IOWR(0x05, struct kfd_ioctl_get_clock_counters_args)
1479 
1480 #define AMDKFD_IOC_GET_PROCESS_APERTURES	\
1481 		AMDKFD_IOR(0x06, struct kfd_ioctl_get_process_apertures_args)
1482 
1483 #define AMDKFD_IOC_UPDATE_QUEUE			\
1484 		AMDKFD_IOW(0x07, struct kfd_ioctl_update_queue_args)
1485 
1486 #define AMDKFD_IOC_CREATE_EVENT			\
1487 		AMDKFD_IOWR(0x08, struct kfd_ioctl_create_event_args)
1488 
1489 #define AMDKFD_IOC_DESTROY_EVENT		\
1490 		AMDKFD_IOW(0x09, struct kfd_ioctl_destroy_event_args)
1491 
1492 #define AMDKFD_IOC_SET_EVENT			\
1493 		AMDKFD_IOW(0x0A, struct kfd_ioctl_set_event_args)
1494 
1495 #define AMDKFD_IOC_RESET_EVENT			\
1496 		AMDKFD_IOW(0x0B, struct kfd_ioctl_reset_event_args)
1497 
1498 #define AMDKFD_IOC_WAIT_EVENTS			\
1499 		AMDKFD_IOWR(0x0C, struct kfd_ioctl_wait_events_args)
1500 
1501 #define AMDKFD_IOC_DBG_REGISTER_DEPRECATED	\
1502 		AMDKFD_IOW(0x0D, struct kfd_ioctl_dbg_register_args)
1503 
1504 #define AMDKFD_IOC_DBG_UNREGISTER_DEPRECATED	\
1505 		AMDKFD_IOW(0x0E, struct kfd_ioctl_dbg_unregister_args)
1506 
1507 #define AMDKFD_IOC_DBG_ADDRESS_WATCH_DEPRECATED	\
1508 		AMDKFD_IOW(0x0F, struct kfd_ioctl_dbg_address_watch_args)
1509 
1510 #define AMDKFD_IOC_DBG_WAVE_CONTROL_DEPRECATED	\
1511 		AMDKFD_IOW(0x10, struct kfd_ioctl_dbg_wave_control_args)
1512 
1513 #define AMDKFD_IOC_SET_SCRATCH_BACKING_VA	\
1514 		AMDKFD_IOWR(0x11, struct kfd_ioctl_set_scratch_backing_va_args)
1515 
1516 #define AMDKFD_IOC_GET_TILE_CONFIG                                      \
1517 		AMDKFD_IOWR(0x12, struct kfd_ioctl_get_tile_config_args)
1518 
1519 #define AMDKFD_IOC_SET_TRAP_HANDLER		\
1520 		AMDKFD_IOW(0x13, struct kfd_ioctl_set_trap_handler_args)
1521 
1522 #define AMDKFD_IOC_GET_PROCESS_APERTURES_NEW	\
1523 		AMDKFD_IOWR(0x14,		\
1524 			struct kfd_ioctl_get_process_apertures_new_args)
1525 
1526 #define AMDKFD_IOC_ACQUIRE_VM			\
1527 		AMDKFD_IOW(0x15, struct kfd_ioctl_acquire_vm_args)
1528 
1529 #define AMDKFD_IOC_ALLOC_MEMORY_OF_GPU		\
1530 		AMDKFD_IOWR(0x16, struct kfd_ioctl_alloc_memory_of_gpu_args)
1531 
1532 #define AMDKFD_IOC_FREE_MEMORY_OF_GPU		\
1533 		AMDKFD_IOW(0x17, struct kfd_ioctl_free_memory_of_gpu_args)
1534 
1535 #define AMDKFD_IOC_MAP_MEMORY_TO_GPU		\
1536 		AMDKFD_IOWR(0x18, struct kfd_ioctl_map_memory_to_gpu_args)
1537 
1538 #define AMDKFD_IOC_UNMAP_MEMORY_FROM_GPU	\
1539 		AMDKFD_IOWR(0x19, struct kfd_ioctl_unmap_memory_from_gpu_args)
1540 
1541 #define AMDKFD_IOC_SET_CU_MASK		\
1542 		AMDKFD_IOW(0x1A, struct kfd_ioctl_set_cu_mask_args)
1543 
1544 #define AMDKFD_IOC_GET_QUEUE_WAVE_STATE		\
1545 		AMDKFD_IOWR(0x1B, struct kfd_ioctl_get_queue_wave_state_args)
1546 
1547 #define AMDKFD_IOC_GET_DMABUF_INFO		\
1548 		AMDKFD_IOWR(0x1C, struct kfd_ioctl_get_dmabuf_info_args)
1549 
1550 #define AMDKFD_IOC_IMPORT_DMABUF		\
1551 		AMDKFD_IOWR(0x1D, struct kfd_ioctl_import_dmabuf_args)
1552 
1553 #define AMDKFD_IOC_ALLOC_QUEUE_GWS		\
1554 		AMDKFD_IOWR(0x1E, struct kfd_ioctl_alloc_queue_gws_args)
1555 
1556 #define AMDKFD_IOC_SMI_EVENTS			\
1557 		AMDKFD_IOWR(0x1F, struct kfd_ioctl_smi_events_args)
1558 
1559 #define AMDKFD_IOC_SVM	AMDKFD_IOWR(0x20, struct kfd_ioctl_svm_args)
1560 
1561 #define AMDKFD_IOC_SET_XNACK_MODE		\
1562 		AMDKFD_IOWR(0x21, struct kfd_ioctl_set_xnack_mode_args)
1563 
1564 #define AMDKFD_IOC_CRIU_OP			\
1565 		AMDKFD_IOWR(0x22, struct kfd_ioctl_criu_args)
1566 
1567 #define AMDKFD_IOC_AVAILABLE_MEMORY		\
1568 		AMDKFD_IOWR(0x23, struct kfd_ioctl_get_available_memory_args)
1569 
1570 #define AMDKFD_IOC_EXPORT_DMABUF		\
1571 		AMDKFD_IOWR(0x24, struct kfd_ioctl_export_dmabuf_args)
1572 
1573 #define AMDKFD_IOC_RUNTIME_ENABLE		\
1574 		AMDKFD_IOWR(0x25, struct kfd_ioctl_runtime_enable_args)
1575 
1576 #define AMDKFD_IOC_DBG_TRAP			\
1577 		AMDKFD_IOWR(0x26, struct kfd_ioctl_dbg_trap_args)
1578 
1579 #define AMDKFD_COMMAND_START		0x01
1580 #define AMDKFD_COMMAND_END		0x27
1581 
1582 #endif
1583