1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 
23 /*
24  * This file defines the private interface between the
25  * AMD kernel graphics drivers and the AMD KFD.
26  */
27 
28 #ifndef KGD_KFD_INTERFACE_H_INCLUDED
29 #define KGD_KFD_INTERFACE_H_INCLUDED
30 
31 #include <linux/types.h>
32 #include <linux/bitmap.h>
33 #include <linux/dma-fence.h>
34 
35 struct pci_dev;
36 
37 #define KFD_INTERFACE_VERSION 2
38 #define KGD_MAX_QUEUES 128
39 
40 struct kfd_dev;
41 struct kgd_dev;
42 
43 struct kgd_mem;
44 
45 enum kfd_preempt_type {
46 	KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN = 0,
47 	KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
48 };
49 
50 struct kfd_vm_fault_info {
51 	uint64_t	page_addr;
52 	uint32_t	vmid;
53 	uint32_t	mc_id;
54 	uint32_t	status;
55 	bool		prot_valid;
56 	bool		prot_read;
57 	bool		prot_write;
58 	bool		prot_exec;
59 };
60 
61 struct kfd_cu_info {
62 	uint32_t num_shader_engines;
63 	uint32_t num_shader_arrays_per_engine;
64 	uint32_t num_cu_per_sh;
65 	uint32_t cu_active_number;
66 	uint32_t cu_ao_mask;
67 	uint32_t simd_per_cu;
68 	uint32_t max_waves_per_simd;
69 	uint32_t wave_front_size;
70 	uint32_t max_scratch_slots_per_cu;
71 	uint32_t lds_size;
72 	uint32_t cu_bitmap[4][4];
73 };
74 
75 /* For getting GPU local memory information from KGD */
76 struct kfd_local_mem_info {
77 	uint64_t local_mem_size_private;
78 	uint64_t local_mem_size_public;
79 	uint32_t vram_width;
80 	uint32_t mem_clk_max;
81 };
82 
83 enum kgd_memory_pool {
84 	KGD_POOL_SYSTEM_CACHEABLE = 1,
85 	KGD_POOL_SYSTEM_WRITECOMBINE = 2,
86 	KGD_POOL_FRAMEBUFFER = 3,
87 };
88 
89 enum kgd_engine_type {
90 	KGD_ENGINE_PFP = 1,
91 	KGD_ENGINE_ME,
92 	KGD_ENGINE_CE,
93 	KGD_ENGINE_MEC1,
94 	KGD_ENGINE_MEC2,
95 	KGD_ENGINE_RLC,
96 	KGD_ENGINE_SDMA1,
97 	KGD_ENGINE_SDMA2,
98 	KGD_ENGINE_MAX
99 };
100 
101 /**
102  * enum kfd_sched_policy
103  *
104  * @KFD_SCHED_POLICY_HWS: H/W scheduling policy known as command processor (cp)
105  * scheduling. In this scheduling mode we're using the firmware code to
106  * schedule the user mode queues and kernel queues such as HIQ and DIQ.
107  * the HIQ queue is used as a special queue that dispatches the configuration
108  * to the cp and the user mode queues list that are currently running.
109  * the DIQ queue is a debugging queue that dispatches debugging commands to the
110  * firmware.
111  * in this scheduling mode user mode queues over subscription feature is
112  * enabled.
113  *
114  * @KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION: The same as above but the over
115  * subscription feature disabled.
116  *
117  * @KFD_SCHED_POLICY_NO_HWS: no H/W scheduling policy is a mode which directly
118  * set the command processor registers and sets the queues "manually". This
119  * mode is used *ONLY* for debugging proposes.
120  *
121  */
122 enum kfd_sched_policy {
123 	KFD_SCHED_POLICY_HWS = 0,
124 	KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION,
125 	KFD_SCHED_POLICY_NO_HWS
126 };
127 
128 struct kgd2kfd_shared_resources {
129 	/* Bit n == 1 means VMID n is available for KFD. */
130 	unsigned int compute_vmid_bitmap;
131 
132 	/* number of pipes per mec */
133 	uint32_t num_pipe_per_mec;
134 
135 	/* number of queues per pipe */
136 	uint32_t num_queue_per_pipe;
137 
138 	/* Bit n == 1 means Queue n is available for KFD */
139 	DECLARE_BITMAP(queue_bitmap, KGD_MAX_QUEUES);
140 
141 	/* Doorbell assignments (SOC15 and later chips only). Only
142 	 * specific doorbells are routed to each SDMA engine. Others
143 	 * are routed to IH and VCN. They are not usable by the CP.
144 	 *
145 	 * Any doorbell number D that satisfies the following condition
146 	 * is reserved: (D & reserved_doorbell_mask) == reserved_doorbell_val
147 	 *
148 	 * KFD currently uses 1024 (= 0x3ff) doorbells per process. If
149 	 * doorbells 0x0e0-0x0ff and 0x2e0-0x2ff are reserved, that means
150 	 * mask would be set to 0x1e0 and val set to 0x0e0.
151 	 */
152 	unsigned int sdma_doorbell[2][8];
153 	unsigned int reserved_doorbell_mask;
154 	unsigned int reserved_doorbell_val;
155 
156 	/* Base address of doorbell aperture. */
157 	phys_addr_t doorbell_physical_address;
158 
159 	/* Size in bytes of doorbell aperture. */
160 	size_t doorbell_aperture_size;
161 
162 	/* Number of bytes at start of aperture reserved for KGD. */
163 	size_t doorbell_start_offset;
164 
165 	/* GPUVM address space size in bytes */
166 	uint64_t gpuvm_size;
167 
168 	/* Minor device number of the render node */
169 	int drm_render_minor;
170 };
171 
172 struct tile_config {
173 	uint32_t *tile_config_ptr;
174 	uint32_t *macro_tile_config_ptr;
175 	uint32_t num_tile_configs;
176 	uint32_t num_macro_tile_configs;
177 
178 	uint32_t gb_addr_config;
179 	uint32_t num_banks;
180 	uint32_t num_ranks;
181 };
182 
183 #define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT 4096
184 
185 /*
186  * Allocation flag domains
187  * NOTE: This must match the corresponding definitions in kfd_ioctl.h.
188  */
189 #define ALLOC_MEM_FLAGS_VRAM		(1 << 0)
190 #define ALLOC_MEM_FLAGS_GTT		(1 << 1)
191 #define ALLOC_MEM_FLAGS_USERPTR		(1 << 2) /* TODO */
192 #define ALLOC_MEM_FLAGS_DOORBELL	(1 << 3) /* TODO */
193 
194 /*
195  * Allocation flags attributes/access options.
196  * NOTE: This must match the corresponding definitions in kfd_ioctl.h.
197  */
198 #define ALLOC_MEM_FLAGS_WRITABLE	(1 << 31)
199 #define ALLOC_MEM_FLAGS_EXECUTABLE	(1 << 30)
200 #define ALLOC_MEM_FLAGS_PUBLIC		(1 << 29)
201 #define ALLOC_MEM_FLAGS_NO_SUBSTITUTE	(1 << 28) /* TODO */
202 #define ALLOC_MEM_FLAGS_AQL_QUEUE_MEM	(1 << 27)
203 #define ALLOC_MEM_FLAGS_COHERENT	(1 << 26) /* For GFXv9 or later */
204 
205 /**
206  * struct kfd2kgd_calls
207  *
208  * @init_gtt_mem_allocation: Allocate a buffer on the gart aperture.
209  * The buffer can be used for mqds, hpds, kernel queue, fence and runlists
210  *
211  * @free_gtt_mem: Frees a buffer that was allocated on the gart aperture
212  *
213  * @get_local_mem_info: Retrieves information about GPU local memory
214  *
215  * @get_gpu_clock_counter: Retrieves GPU clock counter
216  *
217  * @get_max_engine_clock_in_mhz: Retrieves maximum GPU clock in MHz
218  *
219  * @alloc_pasid: Allocate a PASID
220  * @free_pasid: Free a PASID
221  *
222  * @program_sh_mem_settings: A function that should initiate the memory
223  * properties such as main aperture memory type (cache / non cached) and
224  * secondary aperture base address, size and memory type.
225  * This function is used only for no cp scheduling mode.
226  *
227  * @set_pasid_vmid_mapping: Exposes pasid/vmid pair to the H/W for no cp
228  * scheduling mode. Only used for no cp scheduling mode.
229  *
230  * @hqd_load: Loads the mqd structure to a H/W hqd slot. used only for no cp
231  * sceduling mode.
232  *
233  * @hqd_sdma_load: Loads the SDMA mqd structure to a H/W SDMA hqd slot.
234  * used only for no HWS mode.
235  *
236  * @hqd_dump: Dumps CPC HQD registers to an array of address-value pairs.
237  * Array is allocated with kmalloc, needs to be freed with kfree by caller.
238  *
239  * @hqd_sdma_dump: Dumps SDMA HQD registers to an array of address-value pairs.
240  * Array is allocated with kmalloc, needs to be freed with kfree by caller.
241  *
242  * @hqd_is_occupies: Checks if a hqd slot is occupied.
243  *
244  * @hqd_destroy: Destructs and preempts the queue assigned to that hqd slot.
245  *
246  * @hqd_sdma_is_occupied: Checks if an SDMA hqd slot is occupied.
247  *
248  * @hqd_sdma_destroy: Destructs and preempts the SDMA queue assigned to that
249  * SDMA hqd slot.
250  *
251  * @get_fw_version: Returns FW versions from the header
252  *
253  * @set_scratch_backing_va: Sets VA for scratch backing memory of a VMID.
254  * Only used for no cp scheduling mode
255  *
256  * @get_tile_config: Returns GPU-specific tiling mode information
257  *
258  * @get_cu_info: Retrieves activated cu info
259  *
260  * @get_vram_usage: Returns current VRAM usage
261  *
262  * @create_process_vm: Create a VM address space for a given process and GPU
263  *
264  * @destroy_process_vm: Destroy a VM
265  *
266  * @get_process_page_dir: Get physical address of a VM page directory
267  *
268  * @set_vm_context_page_table_base: Program page table base for a VMID
269  *
270  * @alloc_memory_of_gpu: Allocate GPUVM memory
271  *
272  * @free_memory_of_gpu: Free GPUVM memory
273  *
274  * @map_memory_to_gpu: Map GPUVM memory into a specific VM address
275  * space. Allocates and updates page tables and page directories as
276  * needed. This function may return before all page table updates have
277  * completed. This allows multiple map operations (on multiple GPUs)
278  * to happen concurrently. Use sync_memory to synchronize with all
279  * pending updates.
280  *
281  * @unmap_memor_to_gpu: Unmap GPUVM memory from a specific VM address space
282  *
283  * @sync_memory: Wait for pending page table updates to complete
284  *
285  * @map_gtt_bo_to_kernel: Map a GTT BO for kernel access
286  * Pins the BO, maps it to kernel address space. Such BOs are never evicted.
287  * The kernel virtual address remains valid until the BO is freed.
288  *
289  * @restore_process_bos: Restore all BOs that belong to the
290  * process. This is intended for restoring memory mappings after a TTM
291  * eviction.
292  *
293  * @invalidate_tlbs: Invalidate TLBs for a specific PASID
294  *
295  * @invalidate_tlbs_vmid: Invalidate TLBs for a specific VMID
296  *
297  * @submit_ib: Submits an IB to the engine specified by inserting the
298  * IB to the corresponding ring (ring type). The IB is executed with the
299  * specified VMID in a user mode context.
300  *
301  * @get_vm_fault_info: Return information about a recent VM fault on
302  * GFXv7 and v8. If multiple VM faults occurred since the last call of
303  * this function, it will return information about the first of those
304  * faults. On GFXv9 VM fault information is fully contained in the IH
305  * packet and this function is not needed.
306  *
307  * @read_vmid_from_vmfault_reg: On Hawaii the VMID is not set in the
308  * IH ring entry. This function allows the KFD ISR to get the VMID
309  * from the fault status register as early as possible.
310  *
311  * @gpu_recover: let kgd reset gpu after kfd detect CPC hang
312  *
313  * @set_compute_idle: Indicates that compute is idle on a device. This
314  * can be used to change power profiles depending on compute activity.
315  *
316  * @get_hive_id: Returns hive id of current  device,  0 if xgmi is not enabled
317  *
318  * This structure contains function pointers to services that the kgd driver
319  * provides to amdkfd driver.
320  *
321  */
322 struct kfd2kgd_calls {
323 	int (*init_gtt_mem_allocation)(struct kgd_dev *kgd, size_t size,
324 					void **mem_obj, uint64_t *gpu_addr,
325 					void **cpu_ptr, bool mqd_gfx9);
326 
327 	void (*free_gtt_mem)(struct kgd_dev *kgd, void *mem_obj);
328 
329 	void (*get_local_mem_info)(struct kgd_dev *kgd,
330 			struct kfd_local_mem_info *mem_info);
331 	uint64_t (*get_gpu_clock_counter)(struct kgd_dev *kgd);
332 
333 	uint32_t (*get_max_engine_clock_in_mhz)(struct kgd_dev *kgd);
334 
335 	int (*alloc_pasid)(unsigned int bits);
336 	void (*free_pasid)(unsigned int pasid);
337 
338 	/* Register access functions */
339 	void (*program_sh_mem_settings)(struct kgd_dev *kgd, uint32_t vmid,
340 			uint32_t sh_mem_config,	uint32_t sh_mem_ape1_base,
341 			uint32_t sh_mem_ape1_limit, uint32_t sh_mem_bases);
342 
343 	int (*set_pasid_vmid_mapping)(struct kgd_dev *kgd, unsigned int pasid,
344 					unsigned int vmid);
345 
346 	int (*init_interrupts)(struct kgd_dev *kgd, uint32_t pipe_id);
347 
348 	int (*hqd_load)(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
349 			uint32_t queue_id, uint32_t __user *wptr,
350 			uint32_t wptr_shift, uint32_t wptr_mask,
351 			struct mm_struct *mm);
352 
353 	int (*hqd_sdma_load)(struct kgd_dev *kgd, void *mqd,
354 			     uint32_t __user *wptr, struct mm_struct *mm);
355 
356 	int (*hqd_dump)(struct kgd_dev *kgd,
357 			uint32_t pipe_id, uint32_t queue_id,
358 			uint32_t (**dump)[2], uint32_t *n_regs);
359 
360 	int (*hqd_sdma_dump)(struct kgd_dev *kgd,
361 			     uint32_t engine_id, uint32_t queue_id,
362 			     uint32_t (**dump)[2], uint32_t *n_regs);
363 
364 	bool (*hqd_is_occupied)(struct kgd_dev *kgd, uint64_t queue_address,
365 				uint32_t pipe_id, uint32_t queue_id);
366 
367 	int (*hqd_destroy)(struct kgd_dev *kgd, void *mqd, uint32_t reset_type,
368 				unsigned int timeout, uint32_t pipe_id,
369 				uint32_t queue_id);
370 
371 	bool (*hqd_sdma_is_occupied)(struct kgd_dev *kgd, void *mqd);
372 
373 	int (*hqd_sdma_destroy)(struct kgd_dev *kgd, void *mqd,
374 				unsigned int timeout);
375 
376 	int (*address_watch_disable)(struct kgd_dev *kgd);
377 	int (*address_watch_execute)(struct kgd_dev *kgd,
378 					unsigned int watch_point_id,
379 					uint32_t cntl_val,
380 					uint32_t addr_hi,
381 					uint32_t addr_lo);
382 	int (*wave_control_execute)(struct kgd_dev *kgd,
383 					uint32_t gfx_index_val,
384 					uint32_t sq_cmd);
385 	uint32_t (*address_watch_get_offset)(struct kgd_dev *kgd,
386 					unsigned int watch_point_id,
387 					unsigned int reg_offset);
388 	bool (*get_atc_vmid_pasid_mapping_valid)(
389 					struct kgd_dev *kgd,
390 					uint8_t vmid);
391 	uint16_t (*get_atc_vmid_pasid_mapping_pasid)(
392 					struct kgd_dev *kgd,
393 					uint8_t vmid);
394 
395 	uint16_t (*get_fw_version)(struct kgd_dev *kgd,
396 				enum kgd_engine_type type);
397 	void (*set_scratch_backing_va)(struct kgd_dev *kgd,
398 				uint64_t va, uint32_t vmid);
399 	int (*get_tile_config)(struct kgd_dev *kgd, struct tile_config *config);
400 
401 	void (*get_cu_info)(struct kgd_dev *kgd,
402 			struct kfd_cu_info *cu_info);
403 	uint64_t (*get_vram_usage)(struct kgd_dev *kgd);
404 
405 	int (*create_process_vm)(struct kgd_dev *kgd, unsigned int pasid, void **vm,
406 			void **process_info, struct dma_fence **ef);
407 	int (*acquire_process_vm)(struct kgd_dev *kgd, struct file *filp,
408 			unsigned int pasid, void **vm, void **process_info,
409 			struct dma_fence **ef);
410 	void (*destroy_process_vm)(struct kgd_dev *kgd, void *vm);
411 	void (*release_process_vm)(struct kgd_dev *kgd, void *vm);
412 	uint64_t (*get_process_page_dir)(void *vm);
413 	void (*set_vm_context_page_table_base)(struct kgd_dev *kgd,
414 			uint32_t vmid, uint64_t page_table_base);
415 	int (*alloc_memory_of_gpu)(struct kgd_dev *kgd, uint64_t va,
416 			uint64_t size, void *vm,
417 			struct kgd_mem **mem, uint64_t *offset,
418 			uint32_t flags);
419 	int (*free_memory_of_gpu)(struct kgd_dev *kgd, struct kgd_mem *mem);
420 	int (*map_memory_to_gpu)(struct kgd_dev *kgd, struct kgd_mem *mem,
421 			void *vm);
422 	int (*unmap_memory_to_gpu)(struct kgd_dev *kgd, struct kgd_mem *mem,
423 			void *vm);
424 	int (*sync_memory)(struct kgd_dev *kgd, struct kgd_mem *mem, bool intr);
425 	int (*map_gtt_bo_to_kernel)(struct kgd_dev *kgd, struct kgd_mem *mem,
426 			void **kptr, uint64_t *size);
427 	int (*restore_process_bos)(void *process_info, struct dma_fence **ef);
428 
429 	int (*invalidate_tlbs)(struct kgd_dev *kgd, uint16_t pasid);
430 	int (*invalidate_tlbs_vmid)(struct kgd_dev *kgd, uint16_t vmid);
431 
432 	int (*submit_ib)(struct kgd_dev *kgd, enum kgd_engine_type engine,
433 			uint32_t vmid, uint64_t gpu_addr,
434 			uint32_t *ib_cmd, uint32_t ib_len);
435 
436 	int (*get_vm_fault_info)(struct kgd_dev *kgd,
437 			struct kfd_vm_fault_info *info);
438 	uint32_t (*read_vmid_from_vmfault_reg)(struct kgd_dev *kgd);
439 
440 	void (*gpu_recover)(struct kgd_dev *kgd);
441 
442 	void (*set_compute_idle)(struct kgd_dev *kgd, bool idle);
443 
444 	uint64_t (*get_hive_id)(struct kgd_dev *kgd);
445 
446 };
447 
448 /**
449  * struct kgd2kfd_calls
450  *
451  * @exit: Notifies amdkfd that kgd module is unloaded
452  *
453  * @probe: Notifies amdkfd about a probe done on a device in the kgd driver.
454  *
455  * @device_init: Initialize the newly probed device (if it is a device that
456  * amdkfd supports)
457  *
458  * @device_exit: Notifies amdkfd about a removal of a kgd device
459  *
460  * @suspend: Notifies amdkfd about a suspend action done to a kgd device
461  *
462  * @resume: Notifies amdkfd about a resume action done to a kgd device
463  *
464  * @quiesce_mm: Quiesce all user queue access to specified MM address space
465  *
466  * @resume_mm: Resume user queue access to specified MM address space
467  *
468  * @schedule_evict_and_restore_process: Schedules work queue that will prepare
469  * for safe eviction of KFD BOs that belong to the specified process.
470  *
471  * @pre_reset: Notifies amdkfd that amdgpu about to reset the gpu
472  *
473  * @post_reset: Notify amdkfd that amgpu successfully reseted the gpu
474  *
475  * This structure contains function callback pointers so the kgd driver
476  * will notify to the amdkfd about certain status changes.
477  *
478  */
479 struct kgd2kfd_calls {
480 	void (*exit)(void);
481 	struct kfd_dev* (*probe)(struct kgd_dev *kgd, struct pci_dev *pdev,
482 		const struct kfd2kgd_calls *f2g);
483 	bool (*device_init)(struct kfd_dev *kfd,
484 			const struct kgd2kfd_shared_resources *gpu_resources);
485 	void (*device_exit)(struct kfd_dev *kfd);
486 	void (*interrupt)(struct kfd_dev *kfd, const void *ih_ring_entry);
487 	void (*suspend)(struct kfd_dev *kfd);
488 	int (*resume)(struct kfd_dev *kfd);
489 	int (*quiesce_mm)(struct mm_struct *mm);
490 	int (*resume_mm)(struct mm_struct *mm);
491 	int (*schedule_evict_and_restore_process)(struct mm_struct *mm,
492 			struct dma_fence *fence);
493 	int  (*pre_reset)(struct kfd_dev *kfd);
494 	int  (*post_reset)(struct kfd_dev *kfd);
495 };
496 
497 int kgd2kfd_init(unsigned interface_version,
498 		const struct kgd2kfd_calls **g2f);
499 
500 #endif	/* KGD_KFD_INTERFACE_H_INCLUDED */
501