xref: /openbmc/linux/drivers/gpu/drm/amd/amdkfd/kfd_priv.h (revision f9c32db1)
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 
23 #ifndef KFD_PRIV_H_INCLUDED
24 #define KFD_PRIV_H_INCLUDED
25 
26 #include <linux/hashtable.h>
27 #include <linux/mmu_notifier.h>
28 #include <linux/mutex.h>
29 #include <linux/types.h>
30 #include <linux/atomic.h>
31 #include <linux/workqueue.h>
32 #include <linux/spinlock.h>
33 #include <linux/kfd_ioctl.h>
34 #include <linux/idr.h>
35 #include <linux/kfifo.h>
36 #include <linux/seq_file.h>
37 #include <linux/kref.h>
38 #include <kgd_kfd_interface.h>
39 
40 #include "amd_shared.h"
41 
42 #define KFD_MAX_RING_ENTRY_SIZE	8
43 
44 #define KFD_SYSFS_FILE_MODE 0444
45 
46 /* GPU ID hash width in bits */
47 #define KFD_GPU_ID_HASH_WIDTH 16
48 
49 /* Use upper bits of mmap offset to store KFD driver specific information.
50  * BITS[63:62] - Encode MMAP type
51  * BITS[61:46] - Encode gpu_id. To identify to which GPU the offset belongs to
52  * BITS[45:0]  - MMAP offset value
53  *
54  * NOTE: struct vm_area_struct.vm_pgoff uses offset in pages. Hence, these
55  *  defines are w.r.t to PAGE_SIZE
56  */
57 #define KFD_MMAP_TYPE_SHIFT	(62 - PAGE_SHIFT)
58 #define KFD_MMAP_TYPE_MASK	(0x3ULL << KFD_MMAP_TYPE_SHIFT)
59 #define KFD_MMAP_TYPE_DOORBELL	(0x3ULL << KFD_MMAP_TYPE_SHIFT)
60 #define KFD_MMAP_TYPE_EVENTS	(0x2ULL << KFD_MMAP_TYPE_SHIFT)
61 #define KFD_MMAP_TYPE_RESERVED_MEM	(0x1ULL << KFD_MMAP_TYPE_SHIFT)
62 
63 #define KFD_MMAP_GPU_ID_SHIFT (46 - PAGE_SHIFT)
64 #define KFD_MMAP_GPU_ID_MASK (((1ULL << KFD_GPU_ID_HASH_WIDTH) - 1) \
65 				<< KFD_MMAP_GPU_ID_SHIFT)
66 #define KFD_MMAP_GPU_ID(gpu_id) ((((uint64_t)gpu_id) << KFD_MMAP_GPU_ID_SHIFT)\
67 				& KFD_MMAP_GPU_ID_MASK)
68 #define KFD_MMAP_GPU_ID_GET(offset)    ((offset & KFD_MMAP_GPU_ID_MASK) \
69 				>> KFD_MMAP_GPU_ID_SHIFT)
70 
71 #define KFD_MMAP_OFFSET_VALUE_MASK	(0x3FFFFFFFFFFFULL >> PAGE_SHIFT)
72 #define KFD_MMAP_OFFSET_VALUE_GET(offset) (offset & KFD_MMAP_OFFSET_VALUE_MASK)
73 
74 /*
75  * When working with cp scheduler we should assign the HIQ manually or via
76  * the amdgpu driver to a fixed hqd slot, here are the fixed HIQ hqd slot
77  * definitions for Kaveri. In Kaveri only the first ME queues participates
78  * in the cp scheduling taking that in mind we set the HIQ slot in the
79  * second ME.
80  */
81 #define KFD_CIK_HIQ_PIPE 4
82 #define KFD_CIK_HIQ_QUEUE 0
83 
84 /* Macro for allocating structures */
85 #define kfd_alloc_struct(ptr_to_struct)	\
86 	((typeof(ptr_to_struct)) kzalloc(sizeof(*ptr_to_struct), GFP_KERNEL))
87 
88 #define KFD_MAX_NUM_OF_PROCESSES 512
89 #define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS 1024
90 
91 /*
92  * Size of the per-process TBA+TMA buffer: 2 pages
93  *
94  * The first page is the TBA used for the CWSR ISA code. The second
95  * page is used as TMA for daisy changing a user-mode trap handler.
96  */
97 #define KFD_CWSR_TBA_TMA_SIZE (PAGE_SIZE * 2)
98 #define KFD_CWSR_TMA_OFFSET PAGE_SIZE
99 
100 /*
101  * Kernel module parameter to specify maximum number of supported queues per
102  * device
103  */
104 extern int max_num_of_queues_per_device;
105 
106 #define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE		\
107 	(KFD_MAX_NUM_OF_PROCESSES *			\
108 			KFD_MAX_NUM_OF_QUEUES_PER_PROCESS)
109 
110 #define KFD_KERNEL_QUEUE_SIZE 2048
111 
112 /* Kernel module parameter to specify the scheduling policy */
113 extern int sched_policy;
114 
115 /*
116  * Kernel module parameter to specify the maximum process
117  * number per HW scheduler
118  */
119 extern int hws_max_conc_proc;
120 
121 extern int cwsr_enable;
122 
123 /*
124  * Kernel module parameter to specify whether to send sigterm to HSA process on
125  * unhandled exception
126  */
127 extern int send_sigterm;
128 
129 /*
130  * This kernel module is used to simulate large bar machine on non-large bar
131  * enabled machines.
132  */
133 extern int debug_largebar;
134 
135 /*
136  * Ignore CRAT table during KFD initialization, can be used to work around
137  * broken CRAT tables on some AMD systems
138  */
139 extern int ignore_crat;
140 
141 /*
142  * Set sh_mem_config.retry_disable on Vega10
143  */
144 extern int noretry;
145 
146 /*
147  * Halt if HWS hang is detected
148  */
149 extern int halt_if_hws_hang;
150 
151 enum cache_policy {
152 	cache_policy_coherent,
153 	cache_policy_noncoherent
154 };
155 
156 #define KFD_IS_SOC15(chip) ((chip) >= CHIP_VEGA10)
157 
158 struct kfd_event_interrupt_class {
159 	bool (*interrupt_isr)(struct kfd_dev *dev,
160 			const uint32_t *ih_ring_entry, uint32_t *patched_ihre,
161 			bool *patched_flag);
162 	void (*interrupt_wq)(struct kfd_dev *dev,
163 			const uint32_t *ih_ring_entry);
164 };
165 
166 struct kfd_device_info {
167 	enum amd_asic_type asic_family;
168 	const struct kfd_event_interrupt_class *event_interrupt_class;
169 	unsigned int max_pasid_bits;
170 	unsigned int max_no_of_hqd;
171 	unsigned int doorbell_size;
172 	size_t ih_ring_entry_size;
173 	uint8_t num_of_watch_points;
174 	uint16_t mqd_size_aligned;
175 	bool supports_cwsr;
176 	bool needs_iommu_device;
177 	bool needs_pci_atomics;
178 	unsigned int num_sdma_engines;
179 };
180 
181 struct kfd_mem_obj {
182 	uint32_t range_start;
183 	uint32_t range_end;
184 	uint64_t gpu_addr;
185 	uint32_t *cpu_ptr;
186 	void *gtt_mem;
187 };
188 
189 struct kfd_vmid_info {
190 	uint32_t first_vmid_kfd;
191 	uint32_t last_vmid_kfd;
192 	uint32_t vmid_num_kfd;
193 };
194 
195 struct kfd_dev {
196 	struct kgd_dev *kgd;
197 
198 	const struct kfd_device_info *device_info;
199 	struct pci_dev *pdev;
200 
201 	unsigned int id;		/* topology stub index */
202 
203 	phys_addr_t doorbell_base;	/* Start of actual doorbells used by
204 					 * KFD. It is aligned for mapping
205 					 * into user mode
206 					 */
207 	size_t doorbell_id_offset;	/* Doorbell offset (from KFD doorbell
208 					 * to HW doorbell, GFX reserved some
209 					 * at the start)
210 					 */
211 	u32 __iomem *doorbell_kernel_ptr; /* This is a pointer for a doorbells
212 					   * page used by kernel queue
213 					   */
214 
215 	struct kgd2kfd_shared_resources shared_resources;
216 	struct kfd_vmid_info vm_info;
217 
218 	const struct kfd2kgd_calls *kfd2kgd;
219 	struct mutex doorbell_mutex;
220 	DECLARE_BITMAP(doorbell_available_index,
221 			KFD_MAX_NUM_OF_QUEUES_PER_PROCESS);
222 
223 	void *gtt_mem;
224 	uint64_t gtt_start_gpu_addr;
225 	void *gtt_start_cpu_ptr;
226 	void *gtt_sa_bitmap;
227 	struct mutex gtt_sa_lock;
228 	unsigned int gtt_sa_chunk_size;
229 	unsigned int gtt_sa_num_of_chunks;
230 
231 	/* Interrupts */
232 	struct kfifo ih_fifo;
233 	struct workqueue_struct *ih_wq;
234 	struct work_struct interrupt_work;
235 	spinlock_t interrupt_lock;
236 
237 	/* QCM Device instance */
238 	struct device_queue_manager *dqm;
239 
240 	bool init_complete;
241 	/*
242 	 * Interrupts of interest to KFD are copied
243 	 * from the HW ring into a SW ring.
244 	 */
245 	bool interrupts_active;
246 
247 	/* Debug manager */
248 	struct kfd_dbgmgr           *dbgmgr;
249 
250 	/* Maximum process number mapped to HW scheduler */
251 	unsigned int max_proc_per_quantum;
252 
253 	/* CWSR */
254 	bool cwsr_enabled;
255 	const void *cwsr_isa;
256 	unsigned int cwsr_isa_size;
257 
258 	/* xGMI */
259 	uint64_t hive_id;
260 };
261 
262 /* KGD2KFD callbacks */
263 void kgd2kfd_exit(void);
264 struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd,
265 			struct pci_dev *pdev, const struct kfd2kgd_calls *f2g);
266 bool kgd2kfd_device_init(struct kfd_dev *kfd,
267 			const struct kgd2kfd_shared_resources *gpu_resources);
268 void kgd2kfd_device_exit(struct kfd_dev *kfd);
269 
270 enum kfd_mempool {
271 	KFD_MEMPOOL_SYSTEM_CACHEABLE = 1,
272 	KFD_MEMPOOL_SYSTEM_WRITECOMBINE = 2,
273 	KFD_MEMPOOL_FRAMEBUFFER = 3,
274 };
275 
276 /* Character device interface */
277 int kfd_chardev_init(void);
278 void kfd_chardev_exit(void);
279 struct device *kfd_chardev(void);
280 
281 /**
282  * enum kfd_unmap_queues_filter
283  *
284  * @KFD_UNMAP_QUEUES_FILTER_SINGLE_QUEUE: Preempts single queue.
285  *
286  * @KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES: Preempts all queues in the
287  *						running queues list.
288  *
289  * @KFD_UNMAP_QUEUES_FILTER_BY_PASID: Preempts queues that belongs to
290  *						specific process.
291  *
292  */
293 enum kfd_unmap_queues_filter {
294 	KFD_UNMAP_QUEUES_FILTER_SINGLE_QUEUE,
295 	KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES,
296 	KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES,
297 	KFD_UNMAP_QUEUES_FILTER_BY_PASID
298 };
299 
300 /**
301  * enum kfd_queue_type
302  *
303  * @KFD_QUEUE_TYPE_COMPUTE: Regular user mode queue type.
304  *
305  * @KFD_QUEUE_TYPE_SDMA: Sdma user mode queue type.
306  *
307  * @KFD_QUEUE_TYPE_HIQ: HIQ queue type.
308  *
309  * @KFD_QUEUE_TYPE_DIQ: DIQ queue type.
310  */
311 enum kfd_queue_type  {
312 	KFD_QUEUE_TYPE_COMPUTE,
313 	KFD_QUEUE_TYPE_SDMA,
314 	KFD_QUEUE_TYPE_HIQ,
315 	KFD_QUEUE_TYPE_DIQ
316 };
317 
318 enum kfd_queue_format {
319 	KFD_QUEUE_FORMAT_PM4,
320 	KFD_QUEUE_FORMAT_AQL
321 };
322 
323 /**
324  * struct queue_properties
325  *
326  * @type: The queue type.
327  *
328  * @queue_id: Queue identifier.
329  *
330  * @queue_address: Queue ring buffer address.
331  *
332  * @queue_size: Queue ring buffer size.
333  *
334  * @priority: Defines the queue priority relative to other queues in the
335  * process.
336  * This is just an indication and HW scheduling may override the priority as
337  * necessary while keeping the relative prioritization.
338  * the priority granularity is from 0 to f which f is the highest priority.
339  * currently all queues are initialized with the highest priority.
340  *
341  * @queue_percent: This field is partially implemented and currently a zero in
342  * this field defines that the queue is non active.
343  *
344  * @read_ptr: User space address which points to the number of dwords the
345  * cp read from the ring buffer. This field updates automatically by the H/W.
346  *
347  * @write_ptr: Defines the number of dwords written to the ring buffer.
348  *
349  * @doorbell_ptr: This field aim is to notify the H/W of new packet written to
350  * the queue ring buffer. This field should be similar to write_ptr and the
351  * user should update this field after he updated the write_ptr.
352  *
353  * @doorbell_off: The doorbell offset in the doorbell pci-bar.
354  *
355  * @is_interop: Defines if this is a interop queue. Interop queue means that
356  * the queue can access both graphics and compute resources.
357  *
358  * @is_evicted: Defines if the queue is evicted. Only active queues
359  * are evicted, rendering them inactive.
360  *
361  * @is_active: Defines if the queue is active or not. @is_active and
362  * @is_evicted are protected by the DQM lock.
363  *
364  * @vmid: If the scheduling mode is no cp scheduling the field defines the vmid
365  * of the queue.
366  *
367  * This structure represents the queue properties for each queue no matter if
368  * it's user mode or kernel mode queue.
369  *
370  */
371 struct queue_properties {
372 	enum kfd_queue_type type;
373 	enum kfd_queue_format format;
374 	unsigned int queue_id;
375 	uint64_t queue_address;
376 	uint64_t  queue_size;
377 	uint32_t priority;
378 	uint32_t queue_percent;
379 	uint32_t *read_ptr;
380 	uint32_t *write_ptr;
381 	void __iomem *doorbell_ptr;
382 	uint32_t doorbell_off;
383 	bool is_interop;
384 	bool is_evicted;
385 	bool is_active;
386 	/* Not relevant for user mode queues in cp scheduling */
387 	unsigned int vmid;
388 	/* Relevant only for sdma queues*/
389 	uint32_t sdma_engine_id;
390 	uint32_t sdma_queue_id;
391 	uint32_t sdma_vm_addr;
392 	/* Relevant only for VI */
393 	uint64_t eop_ring_buffer_address;
394 	uint32_t eop_ring_buffer_size;
395 	uint64_t ctx_save_restore_area_address;
396 	uint32_t ctx_save_restore_area_size;
397 	uint32_t ctl_stack_size;
398 	uint64_t tba_addr;
399 	uint64_t tma_addr;
400 	/* Relevant for CU */
401 	uint32_t cu_mask_count; /* Must be a multiple of 32 */
402 	uint32_t *cu_mask;
403 };
404 
405 /**
406  * struct queue
407  *
408  * @list: Queue linked list.
409  *
410  * @mqd: The queue MQD.
411  *
412  * @mqd_mem_obj: The MQD local gpu memory object.
413  *
414  * @gart_mqd_addr: The MQD gart mc address.
415  *
416  * @properties: The queue properties.
417  *
418  * @mec: Used only in no cp scheduling mode and identifies to micro engine id
419  *	 that the queue should be execute on.
420  *
421  * @pipe: Used only in no cp scheduling mode and identifies the queue's pipe
422  *	  id.
423  *
424  * @queue: Used only in no cp scheduliong mode and identifies the queue's slot.
425  *
426  * @process: The kfd process that created this queue.
427  *
428  * @device: The kfd device that created this queue.
429  *
430  * This structure represents user mode compute queues.
431  * It contains all the necessary data to handle such queues.
432  *
433  */
434 
435 struct queue {
436 	struct list_head list;
437 	void *mqd;
438 	struct kfd_mem_obj *mqd_mem_obj;
439 	uint64_t gart_mqd_addr;
440 	struct queue_properties properties;
441 
442 	uint32_t mec;
443 	uint32_t pipe;
444 	uint32_t queue;
445 
446 	unsigned int sdma_id;
447 	unsigned int doorbell_id;
448 
449 	struct kfd_process	*process;
450 	struct kfd_dev		*device;
451 };
452 
453 /*
454  * Please read the kfd_mqd_manager.h description.
455  */
456 enum KFD_MQD_TYPE {
457 	KFD_MQD_TYPE_COMPUTE = 0,	/* for no cp scheduling */
458 	KFD_MQD_TYPE_HIQ,		/* for hiq */
459 	KFD_MQD_TYPE_CP,		/* for cp queues and diq */
460 	KFD_MQD_TYPE_SDMA,		/* for sdma queues */
461 	KFD_MQD_TYPE_MAX
462 };
463 
464 struct scheduling_resources {
465 	unsigned int vmid_mask;
466 	enum kfd_queue_type type;
467 	uint64_t queue_mask;
468 	uint64_t gws_mask;
469 	uint32_t oac_mask;
470 	uint32_t gds_heap_base;
471 	uint32_t gds_heap_size;
472 };
473 
474 struct process_queue_manager {
475 	/* data */
476 	struct kfd_process	*process;
477 	struct list_head	queues;
478 	unsigned long		*queue_slot_bitmap;
479 };
480 
481 struct qcm_process_device {
482 	/* The Device Queue Manager that owns this data */
483 	struct device_queue_manager *dqm;
484 	struct process_queue_manager *pqm;
485 	/* Queues list */
486 	struct list_head queues_list;
487 	struct list_head priv_queue_list;
488 
489 	unsigned int queue_count;
490 	unsigned int vmid;
491 	bool is_debug;
492 	unsigned int evicted; /* eviction counter, 0=active */
493 
494 	/* This flag tells if we should reset all wavefronts on
495 	 * process termination
496 	 */
497 	bool reset_wavefronts;
498 
499 	/*
500 	 * All the memory management data should be here too
501 	 */
502 	uint64_t gds_context_area;
503 	uint32_t sh_mem_config;
504 	uint32_t sh_mem_bases;
505 	uint32_t sh_mem_ape1_base;
506 	uint32_t sh_mem_ape1_limit;
507 	uint32_t page_table_base;
508 	uint32_t gds_size;
509 	uint32_t num_gws;
510 	uint32_t num_oac;
511 	uint32_t sh_hidden_private_base;
512 
513 	/* CWSR memory */
514 	void *cwsr_kaddr;
515 	uint64_t cwsr_base;
516 	uint64_t tba_addr;
517 	uint64_t tma_addr;
518 
519 	/* IB memory */
520 	uint64_t ib_base;
521 	void *ib_kaddr;
522 
523 	/* doorbell resources per process per device */
524 	unsigned long *doorbell_bitmap;
525 };
526 
527 /* KFD Memory Eviction */
528 
529 /* Approx. wait time before attempting to restore evicted BOs */
530 #define PROCESS_RESTORE_TIME_MS 100
531 /* Approx. back off time if restore fails due to lack of memory */
532 #define PROCESS_BACK_OFF_TIME_MS 100
533 /* Approx. time before evicting the process again */
534 #define PROCESS_ACTIVE_TIME_MS 10
535 
536 int kgd2kfd_quiesce_mm(struct mm_struct *mm);
537 int kgd2kfd_resume_mm(struct mm_struct *mm);
538 int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm,
539 					       struct dma_fence *fence);
540 
541 /* 8 byte handle containing GPU ID in the most significant 4 bytes and
542  * idr_handle in the least significant 4 bytes
543  */
544 #define MAKE_HANDLE(gpu_id, idr_handle) \
545 	(((uint64_t)(gpu_id) << 32) + idr_handle)
546 #define GET_GPU_ID(handle) (handle >> 32)
547 #define GET_IDR_HANDLE(handle) (handle & 0xFFFFFFFF)
548 
549 enum kfd_pdd_bound {
550 	PDD_UNBOUND = 0,
551 	PDD_BOUND,
552 	PDD_BOUND_SUSPENDED,
553 };
554 
555 /* Data that is per-process-per device. */
556 struct kfd_process_device {
557 	/*
558 	 * List of all per-device data for a process.
559 	 * Starts from kfd_process.per_device_data.
560 	 */
561 	struct list_head per_device_list;
562 
563 	/* The device that owns this data. */
564 	struct kfd_dev *dev;
565 
566 	/* The process that owns this kfd_process_device. */
567 	struct kfd_process *process;
568 
569 	/* per-process-per device QCM data structure */
570 	struct qcm_process_device qpd;
571 
572 	/*Apertures*/
573 	uint64_t lds_base;
574 	uint64_t lds_limit;
575 	uint64_t gpuvm_base;
576 	uint64_t gpuvm_limit;
577 	uint64_t scratch_base;
578 	uint64_t scratch_limit;
579 
580 	/* VM context for GPUVM allocations */
581 	struct file *drm_file;
582 	void *vm;
583 
584 	/* GPUVM allocations storage */
585 	struct idr alloc_idr;
586 
587 	/* Flag used to tell the pdd has dequeued from the dqm.
588 	 * This is used to prevent dev->dqm->ops.process_termination() from
589 	 * being called twice when it is already called in IOMMU callback
590 	 * function.
591 	 */
592 	bool already_dequeued;
593 
594 	/* Is this process/pasid bound to this device? (amd_iommu_bind_pasid) */
595 	enum kfd_pdd_bound bound;
596 };
597 
598 #define qpd_to_pdd(x) container_of(x, struct kfd_process_device, qpd)
599 
600 /* Process data */
601 struct kfd_process {
602 	/*
603 	 * kfd_process are stored in an mm_struct*->kfd_process*
604 	 * hash table (kfd_processes in kfd_process.c)
605 	 */
606 	struct hlist_node kfd_processes;
607 
608 	/*
609 	 * Opaque pointer to mm_struct. We don't hold a reference to
610 	 * it so it should never be dereferenced from here. This is
611 	 * only used for looking up processes by their mm.
612 	 */
613 	void *mm;
614 
615 	struct kref ref;
616 	struct work_struct release_work;
617 
618 	struct mutex mutex;
619 
620 	/*
621 	 * In any process, the thread that started main() is the lead
622 	 * thread and outlives the rest.
623 	 * It is here because amd_iommu_bind_pasid wants a task_struct.
624 	 * It can also be used for safely getting a reference to the
625 	 * mm_struct of the process.
626 	 */
627 	struct task_struct *lead_thread;
628 
629 	/* We want to receive a notification when the mm_struct is destroyed */
630 	struct mmu_notifier mmu_notifier;
631 
632 	/* Use for delayed freeing of kfd_process structure */
633 	struct rcu_head	rcu;
634 
635 	unsigned int pasid;
636 	unsigned int doorbell_index;
637 
638 	/*
639 	 * List of kfd_process_device structures,
640 	 * one for each device the process is using.
641 	 */
642 	struct list_head per_device_data;
643 
644 	struct process_queue_manager pqm;
645 
646 	/*Is the user space process 32 bit?*/
647 	bool is_32bit_user_mode;
648 
649 	/* Event-related data */
650 	struct mutex event_mutex;
651 	/* Event ID allocator and lookup */
652 	struct idr event_idr;
653 	/* Event page */
654 	struct kfd_signal_page *signal_page;
655 	size_t signal_mapped_size;
656 	size_t signal_event_count;
657 	bool signal_event_limit_reached;
658 
659 	/* Information used for memory eviction */
660 	void *kgd_process_info;
661 	/* Eviction fence that is attached to all the BOs of this process. The
662 	 * fence will be triggered during eviction and new one will be created
663 	 * during restore
664 	 */
665 	struct dma_fence *ef;
666 
667 	/* Work items for evicting and restoring BOs */
668 	struct delayed_work eviction_work;
669 	struct delayed_work restore_work;
670 	/* seqno of the last scheduled eviction */
671 	unsigned int last_eviction_seqno;
672 	/* Approx. the last timestamp (in jiffies) when the process was
673 	 * restored after an eviction
674 	 */
675 	unsigned long last_restore_timestamp;
676 };
677 
678 #define KFD_PROCESS_TABLE_SIZE 5 /* bits: 32 entries */
679 extern DECLARE_HASHTABLE(kfd_processes_table, KFD_PROCESS_TABLE_SIZE);
680 extern struct srcu_struct kfd_processes_srcu;
681 
682 /**
683  * Ioctl function type.
684  *
685  * \param filep pointer to file structure.
686  * \param p amdkfd process pointer.
687  * \param data pointer to arg that was copied from user.
688  */
689 typedef int amdkfd_ioctl_t(struct file *filep, struct kfd_process *p,
690 				void *data);
691 
692 struct amdkfd_ioctl_desc {
693 	unsigned int cmd;
694 	int flags;
695 	amdkfd_ioctl_t *func;
696 	unsigned int cmd_drv;
697 	const char *name;
698 };
699 bool kfd_dev_is_large_bar(struct kfd_dev *dev);
700 
701 int kfd_process_create_wq(void);
702 void kfd_process_destroy_wq(void);
703 struct kfd_process *kfd_create_process(struct file *filep);
704 struct kfd_process *kfd_get_process(const struct task_struct *);
705 struct kfd_process *kfd_lookup_process_by_pasid(unsigned int pasid);
706 struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm);
707 void kfd_unref_process(struct kfd_process *p);
708 int kfd_process_evict_queues(struct kfd_process *p);
709 int kfd_process_restore_queues(struct kfd_process *p);
710 void kfd_suspend_all_processes(void);
711 int kfd_resume_all_processes(void);
712 
713 int kfd_process_device_init_vm(struct kfd_process_device *pdd,
714 			       struct file *drm_file);
715 struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
716 						struct kfd_process *p);
717 struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev,
718 							struct kfd_process *p);
719 struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
720 							struct kfd_process *p);
721 
722 int kfd_reserved_mem_mmap(struct kfd_dev *dev, struct kfd_process *process,
723 			  struct vm_area_struct *vma);
724 
725 /* KFD process API for creating and translating handles */
726 int kfd_process_device_create_obj_handle(struct kfd_process_device *pdd,
727 					void *mem);
728 void *kfd_process_device_translate_handle(struct kfd_process_device *p,
729 					int handle);
730 void kfd_process_device_remove_obj_handle(struct kfd_process_device *pdd,
731 					int handle);
732 
733 /* Process device data iterator */
734 struct kfd_process_device *kfd_get_first_process_device_data(
735 							struct kfd_process *p);
736 struct kfd_process_device *kfd_get_next_process_device_data(
737 						struct kfd_process *p,
738 						struct kfd_process_device *pdd);
739 bool kfd_has_process_device_data(struct kfd_process *p);
740 
741 /* PASIDs */
742 int kfd_pasid_init(void);
743 void kfd_pasid_exit(void);
744 bool kfd_set_pasid_limit(unsigned int new_limit);
745 unsigned int kfd_get_pasid_limit(void);
746 unsigned int kfd_pasid_alloc(void);
747 void kfd_pasid_free(unsigned int pasid);
748 
749 /* Doorbells */
750 size_t kfd_doorbell_process_slice(struct kfd_dev *kfd);
751 int kfd_doorbell_init(struct kfd_dev *kfd);
752 void kfd_doorbell_fini(struct kfd_dev *kfd);
753 int kfd_doorbell_mmap(struct kfd_dev *dev, struct kfd_process *process,
754 		      struct vm_area_struct *vma);
755 void __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd,
756 					unsigned int *doorbell_off);
757 void kfd_release_kernel_doorbell(struct kfd_dev *kfd, u32 __iomem *db_addr);
758 u32 read_kernel_doorbell(u32 __iomem *db);
759 void write_kernel_doorbell(void __iomem *db, u32 value);
760 void write_kernel_doorbell64(void __iomem *db, u64 value);
761 unsigned int kfd_doorbell_id_to_offset(struct kfd_dev *kfd,
762 					struct kfd_process *process,
763 					unsigned int doorbell_id);
764 phys_addr_t kfd_get_process_doorbells(struct kfd_dev *dev,
765 					struct kfd_process *process);
766 int kfd_alloc_process_doorbells(struct kfd_process *process);
767 void kfd_free_process_doorbells(struct kfd_process *process);
768 
769 /* GTT Sub-Allocator */
770 
771 int kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned int size,
772 			struct kfd_mem_obj **mem_obj);
773 
774 int kfd_gtt_sa_free(struct kfd_dev *kfd, struct kfd_mem_obj *mem_obj);
775 
776 extern struct device *kfd_device;
777 
778 /* Topology */
779 int kfd_topology_init(void);
780 void kfd_topology_shutdown(void);
781 int kfd_topology_add_device(struct kfd_dev *gpu);
782 int kfd_topology_remove_device(struct kfd_dev *gpu);
783 struct kfd_topology_device *kfd_topology_device_by_proximity_domain(
784 						uint32_t proximity_domain);
785 struct kfd_dev *kfd_device_by_id(uint32_t gpu_id);
786 struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev);
787 int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_dev **kdev);
788 int kfd_numa_node_to_apic_id(int numa_node_id);
789 
790 /* Interrupts */
791 int kfd_interrupt_init(struct kfd_dev *dev);
792 void kfd_interrupt_exit(struct kfd_dev *dev);
793 void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry);
794 bool enqueue_ih_ring_entry(struct kfd_dev *kfd,	const void *ih_ring_entry);
795 bool interrupt_is_wanted(struct kfd_dev *dev,
796 				const uint32_t *ih_ring_entry,
797 				uint32_t *patched_ihre, bool *flag);
798 
799 /* Power Management */
800 void kgd2kfd_suspend(struct kfd_dev *kfd);
801 int kgd2kfd_resume(struct kfd_dev *kfd);
802 
803 /* GPU reset */
804 int kgd2kfd_pre_reset(struct kfd_dev *kfd);
805 int kgd2kfd_post_reset(struct kfd_dev *kfd);
806 
807 /* amdkfd Apertures */
808 int kfd_init_apertures(struct kfd_process *process);
809 
810 /* Queue Context Management */
811 int init_queue(struct queue **q, const struct queue_properties *properties);
812 void uninit_queue(struct queue *q);
813 void print_queue_properties(struct queue_properties *q);
814 void print_queue(struct queue *q);
815 
816 struct mqd_manager *mqd_manager_init(enum KFD_MQD_TYPE type,
817 					struct kfd_dev *dev);
818 struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type,
819 		struct kfd_dev *dev);
820 struct mqd_manager *mqd_manager_init_cik_hawaii(enum KFD_MQD_TYPE type,
821 		struct kfd_dev *dev);
822 struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type,
823 		struct kfd_dev *dev);
824 struct mqd_manager *mqd_manager_init_vi_tonga(enum KFD_MQD_TYPE type,
825 		struct kfd_dev *dev);
826 struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type,
827 		struct kfd_dev *dev);
828 struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev);
829 void device_queue_manager_uninit(struct device_queue_manager *dqm);
830 struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
831 					enum kfd_queue_type type);
832 void kernel_queue_uninit(struct kernel_queue *kq);
833 int kfd_process_vm_fault(struct device_queue_manager *dqm, unsigned int pasid);
834 
835 /* Process Queue Manager */
836 struct process_queue_node {
837 	struct queue *q;
838 	struct kernel_queue *kq;
839 	struct list_head process_queue_list;
840 };
841 
842 void kfd_process_dequeue_from_device(struct kfd_process_device *pdd);
843 void kfd_process_dequeue_from_all_devices(struct kfd_process *p);
844 int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p);
845 void pqm_uninit(struct process_queue_manager *pqm);
846 int pqm_create_queue(struct process_queue_manager *pqm,
847 			    struct kfd_dev *dev,
848 			    struct file *f,
849 			    struct queue_properties *properties,
850 			    unsigned int *qid);
851 int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid);
852 int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid,
853 			struct queue_properties *p);
854 int pqm_set_cu_mask(struct process_queue_manager *pqm, unsigned int qid,
855 			struct queue_properties *p);
856 struct kernel_queue *pqm_get_kernel_queue(struct process_queue_manager *pqm,
857 						unsigned int qid);
858 
859 int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
860 				unsigned int fence_value,
861 				unsigned int timeout_ms);
862 
863 /* Packet Manager */
864 
865 #define KFD_FENCE_COMPLETED (100)
866 #define KFD_FENCE_INIT   (10)
867 
868 struct packet_manager {
869 	struct device_queue_manager *dqm;
870 	struct kernel_queue *priv_queue;
871 	struct mutex lock;
872 	bool allocated;
873 	struct kfd_mem_obj *ib_buffer_obj;
874 	unsigned int ib_size_bytes;
875 
876 	const struct packet_manager_funcs *pmf;
877 };
878 
879 struct packet_manager_funcs {
880 	/* Support ASIC-specific packet formats for PM4 packets */
881 	int (*map_process)(struct packet_manager *pm, uint32_t *buffer,
882 			struct qcm_process_device *qpd);
883 	int (*runlist)(struct packet_manager *pm, uint32_t *buffer,
884 			uint64_t ib, size_t ib_size_in_dwords, bool chain);
885 	int (*set_resources)(struct packet_manager *pm, uint32_t *buffer,
886 			struct scheduling_resources *res);
887 	int (*map_queues)(struct packet_manager *pm, uint32_t *buffer,
888 			struct queue *q, bool is_static);
889 	int (*unmap_queues)(struct packet_manager *pm, uint32_t *buffer,
890 			enum kfd_queue_type type,
891 			enum kfd_unmap_queues_filter mode,
892 			uint32_t filter_param, bool reset,
893 			unsigned int sdma_engine);
894 	int (*query_status)(struct packet_manager *pm, uint32_t *buffer,
895 			uint64_t fence_address,	uint32_t fence_value);
896 	int (*release_mem)(uint64_t gpu_addr, uint32_t *buffer);
897 
898 	/* Packet sizes */
899 	int map_process_size;
900 	int runlist_size;
901 	int set_resources_size;
902 	int map_queues_size;
903 	int unmap_queues_size;
904 	int query_status_size;
905 	int release_mem_size;
906 };
907 
908 extern const struct packet_manager_funcs kfd_vi_pm_funcs;
909 extern const struct packet_manager_funcs kfd_v9_pm_funcs;
910 
911 int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm);
912 void pm_uninit(struct packet_manager *pm);
913 int pm_send_set_resources(struct packet_manager *pm,
914 				struct scheduling_resources *res);
915 int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues);
916 int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
917 				uint32_t fence_value);
918 
919 int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
920 			enum kfd_unmap_queues_filter mode,
921 			uint32_t filter_param, bool reset,
922 			unsigned int sdma_engine);
923 
924 void pm_release_ib(struct packet_manager *pm);
925 
926 /* Following PM funcs can be shared among VI and AI */
927 unsigned int pm_build_pm4_header(unsigned int opcode, size_t packet_size);
928 int pm_set_resources_vi(struct packet_manager *pm, uint32_t *buffer,
929 				struct scheduling_resources *res);
930 
931 uint64_t kfd_get_number_elems(struct kfd_dev *kfd);
932 
933 /* Events */
934 extern const struct kfd_event_interrupt_class event_interrupt_class_cik;
935 extern const struct kfd_event_interrupt_class event_interrupt_class_v9;
936 
937 extern const struct kfd_device_global_init_class device_global_init_class_cik;
938 
939 void kfd_event_init_process(struct kfd_process *p);
940 void kfd_event_free_process(struct kfd_process *p);
941 int kfd_event_mmap(struct kfd_process *process, struct vm_area_struct *vma);
942 int kfd_wait_on_events(struct kfd_process *p,
943 		       uint32_t num_events, void __user *data,
944 		       bool all, uint32_t user_timeout_ms,
945 		       uint32_t *wait_result);
946 void kfd_signal_event_interrupt(unsigned int pasid, uint32_t partial_id,
947 				uint32_t valid_id_bits);
948 void kfd_signal_iommu_event(struct kfd_dev *dev,
949 		unsigned int pasid, unsigned long address,
950 		bool is_write_requested, bool is_execute_requested);
951 void kfd_signal_hw_exception_event(unsigned int pasid);
952 int kfd_set_event(struct kfd_process *p, uint32_t event_id);
953 int kfd_reset_event(struct kfd_process *p, uint32_t event_id);
954 int kfd_event_page_set(struct kfd_process *p, void *kernel_address,
955 		       uint64_t size);
956 int kfd_event_create(struct file *devkfd, struct kfd_process *p,
957 		     uint32_t event_type, bool auto_reset, uint32_t node_id,
958 		     uint32_t *event_id, uint32_t *event_trigger_data,
959 		     uint64_t *event_page_offset, uint32_t *event_slot_index);
960 int kfd_event_destroy(struct kfd_process *p, uint32_t event_id);
961 
962 void kfd_signal_vm_fault_event(struct kfd_dev *dev, unsigned int pasid,
963 				struct kfd_vm_fault_info *info);
964 
965 void kfd_signal_reset_event(struct kfd_dev *dev);
966 
967 void kfd_flush_tlb(struct kfd_process_device *pdd);
968 
969 int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process *p);
970 
971 bool kfd_is_locked(void);
972 
973 /* Debugfs */
974 #if defined(CONFIG_DEBUG_FS)
975 
976 void kfd_debugfs_init(void);
977 void kfd_debugfs_fini(void);
978 int kfd_debugfs_mqds_by_process(struct seq_file *m, void *data);
979 int pqm_debugfs_mqds(struct seq_file *m, void *data);
980 int kfd_debugfs_hqds_by_device(struct seq_file *m, void *data);
981 int dqm_debugfs_hqds(struct seq_file *m, void *data);
982 int kfd_debugfs_rls_by_device(struct seq_file *m, void *data);
983 int pm_debugfs_runlist(struct seq_file *m, void *data);
984 
985 int kfd_debugfs_hang_hws(struct kfd_dev *dev);
986 int pm_debugfs_hang_hws(struct packet_manager *pm);
987 int dqm_debugfs_execute_queues(struct device_queue_manager *dqm);
988 
989 #else
990 
991 static inline void kfd_debugfs_init(void) {}
992 static inline void kfd_debugfs_fini(void) {}
993 
994 #endif
995 
996 #endif
997