1a538bbe7SJack Xiao /*
2a538bbe7SJack Xiao * Copyright 2019 Advanced Micro Devices, Inc.
3a538bbe7SJack Xiao *
4a538bbe7SJack Xiao * Permission is hereby granted, free of charge, to any person obtaining a
5a538bbe7SJack Xiao * copy of this software and associated documentation files (the "Software"),
6a538bbe7SJack Xiao * to deal in the Software without restriction, including without limitation
7a538bbe7SJack Xiao * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8a538bbe7SJack Xiao * and/or sell copies of the Software, and to permit persons to whom the
9a538bbe7SJack Xiao * Software is furnished to do so, subject to the following conditions:
10a538bbe7SJack Xiao *
11a538bbe7SJack Xiao * The above copyright notice and this permission notice shall be included in
12a538bbe7SJack Xiao * all copies or substantial portions of the Software.
13a538bbe7SJack Xiao *
14a538bbe7SJack Xiao * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15a538bbe7SJack Xiao * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16a538bbe7SJack Xiao * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17a538bbe7SJack Xiao * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18a538bbe7SJack Xiao * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19a538bbe7SJack Xiao * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20a538bbe7SJack Xiao * OTHER DEALINGS IN THE SOFTWARE.
21a538bbe7SJack Xiao *
22a538bbe7SJack Xiao */
23a538bbe7SJack Xiao
24a538bbe7SJack Xiao #ifndef __AMDGPU_MES_H__
25a538bbe7SJack Xiao #define __AMDGPU_MES_H__
26a538bbe7SJack Xiao
273a42c7f3SJack Xiao #include "amdgpu_irq.h"
283a42c7f3SJack Xiao #include "kgd_kfd_interface.h"
293a42c7f3SJack Xiao #include "amdgpu_gfx.h"
30e3cbb1f4SShashank Sharma #include "amdgpu_doorbell.h"
313a42c7f3SJack Xiao #include <linux/sched/mm.h>
323a42c7f3SJack Xiao
333bbd31e0SJack Xiao #define AMDGPU_MES_MAX_COMPUTE_PIPES 8
343bbd31e0SJack Xiao #define AMDGPU_MES_MAX_GFX_PIPES 2
353bbd31e0SJack Xiao #define AMDGPU_MES_MAX_SDMA_PIPES 2
363bbd31e0SJack Xiao
37e77a541fSGraham Sider #define AMDGPU_MES_API_VERSION_SHIFT 12
38e77a541fSGraham Sider #define AMDGPU_MES_FEAT_VERSION_SHIFT 24
39e77a541fSGraham Sider
40e77a541fSGraham Sider #define AMDGPU_MES_VERSION_MASK 0x00000fff
41e77a541fSGraham Sider #define AMDGPU_MES_API_VERSION_MASK 0x00fff000
42e77a541fSGraham Sider #define AMDGPU_MES_FEAT_VERSION_MASK 0xff000000
43e77a541fSGraham Sider
443bbd31e0SJack Xiao enum amdgpu_mes_priority_level {
453bbd31e0SJack Xiao AMDGPU_MES_PRIORITY_LEVEL_LOW = 0,
463bbd31e0SJack Xiao AMDGPU_MES_PRIORITY_LEVEL_NORMAL = 1,
473bbd31e0SJack Xiao AMDGPU_MES_PRIORITY_LEVEL_MEDIUM = 2,
483bbd31e0SJack Xiao AMDGPU_MES_PRIORITY_LEVEL_HIGH = 3,
493bbd31e0SJack Xiao AMDGPU_MES_PRIORITY_LEVEL_REALTIME = 4,
503bbd31e0SJack Xiao AMDGPU_MES_PRIORITY_NUM_LEVELS
513bbd31e0SJack Xiao };
523bbd31e0SJack Xiao
533a42c7f3SJack Xiao #define AMDGPU_MES_PROC_CTX_SIZE 0x1000 /* one page area */
543a42c7f3SJack Xiao #define AMDGPU_MES_GANG_CTX_SIZE 0x1000 /* one page area */
553a42c7f3SJack Xiao
567bbc3676SJack Xiao struct amdgpu_mes_funcs;
57a538bbe7SJack Xiao
58207e8bbeSJack Xiao enum admgpu_mes_pipe {
59207e8bbeSJack Xiao AMDGPU_MES_SCHED_PIPE = 0,
60207e8bbeSJack Xiao AMDGPU_MES_KIQ_PIPE,
61207e8bbeSJack Xiao AMDGPU_MAX_MES_PIPES = 2,
62207e8bbeSJack Xiao };
63207e8bbeSJack Xiao
647bbc3676SJack Xiao struct amdgpu_mes {
653bbd31e0SJack Xiao struct amdgpu_device *adev;
663bbd31e0SJack Xiao
6718ee4ce6SJack Xiao struct mutex mutex_hidden;
683a42c7f3SJack Xiao
693a42c7f3SJack Xiao struct idr pasid_idr;
703a42c7f3SJack Xiao struct idr gang_id_idr;
713a42c7f3SJack Xiao struct idr queue_id_idr;
723a42c7f3SJack Xiao struct ida doorbell_ida;
733a42c7f3SJack Xiao
743a42c7f3SJack Xiao spinlock_t queue_id_lock;
753a42c7f3SJack Xiao
76ff83e6e7SGraham Sider uint32_t sched_version;
77ff83e6e7SGraham Sider uint32_t kiq_version;
78ff83e6e7SGraham Sider
793bbd31e0SJack Xiao uint32_t total_max_queue;
803bbd31e0SJack Xiao uint32_t max_doorbell_slices;
813bbd31e0SJack Xiao
823bbd31e0SJack Xiao uint64_t default_process_quantum;
833bbd31e0SJack Xiao uint64_t default_gang_quantum;
843bbd31e0SJack Xiao
853bbd31e0SJack Xiao struct amdgpu_ring ring;
8635ba8850SJack Xiao spinlock_t ring_lock;
877bbc3676SJack Xiao
88207e8bbeSJack Xiao const struct firmware *fw[AMDGPU_MAX_MES_PIPES];
895aa91248SJack Xiao
905aa91248SJack Xiao /* mes ucode */
91207e8bbeSJack Xiao struct amdgpu_bo *ucode_fw_obj[AMDGPU_MAX_MES_PIPES];
92207e8bbeSJack Xiao uint64_t ucode_fw_gpu_addr[AMDGPU_MAX_MES_PIPES];
93207e8bbeSJack Xiao uint32_t *ucode_fw_ptr[AMDGPU_MAX_MES_PIPES];
94207e8bbeSJack Xiao uint64_t uc_start_addr[AMDGPU_MAX_MES_PIPES];
955aa91248SJack Xiao
965aa91248SJack Xiao /* mes ucode data */
97207e8bbeSJack Xiao struct amdgpu_bo *data_fw_obj[AMDGPU_MAX_MES_PIPES];
98207e8bbeSJack Xiao uint64_t data_fw_gpu_addr[AMDGPU_MAX_MES_PIPES];
99207e8bbeSJack Xiao uint32_t *data_fw_ptr[AMDGPU_MAX_MES_PIPES];
100207e8bbeSJack Xiao uint64_t data_start_addr[AMDGPU_MAX_MES_PIPES];
1015aa91248SJack Xiao
1023bbd31e0SJack Xiao /* eop gpu obj */
103207e8bbeSJack Xiao struct amdgpu_bo *eop_gpu_obj[AMDGPU_MAX_MES_PIPES];
104207e8bbeSJack Xiao uint64_t eop_gpu_addr[AMDGPU_MAX_MES_PIPES];
1053bbd31e0SJack Xiao
106207e8bbeSJack Xiao void *mqd_backup[AMDGPU_MAX_MES_PIPES];
107207e8bbeSJack Xiao struct amdgpu_irq_src irq[AMDGPU_MAX_MES_PIPES];
1083bbd31e0SJack Xiao
1093bbd31e0SJack Xiao uint32_t vmid_mask_gfxhub;
1103bbd31e0SJack Xiao uint32_t vmid_mask_mmhub;
1113bbd31e0SJack Xiao uint32_t compute_hqd_mask[AMDGPU_MES_MAX_COMPUTE_PIPES];
1123bbd31e0SJack Xiao uint32_t gfx_hqd_mask[AMDGPU_MES_MAX_GFX_PIPES];
1133bbd31e0SJack Xiao uint32_t sdma_hqd_mask[AMDGPU_MES_MAX_SDMA_PIPES];
1140fe69062SLe Ma uint32_t aggregated_doorbells[AMDGPU_MES_PRIORITY_NUM_LEVELS];
1153bbd31e0SJack Xiao uint32_t sch_ctx_offs;
1163bbd31e0SJack Xiao uint64_t sch_ctx_gpu_addr;
1173bbd31e0SJack Xiao uint64_t *sch_ctx_ptr;
118ae4e3b62SLe Ma uint32_t query_status_fence_offs;
119ae4e3b62SLe Ma uint64_t query_status_fence_gpu_addr;
120ae4e3b62SLe Ma uint64_t *query_status_fence_ptr;
1216a4a1f60SJack Xiao uint32_t read_val_offs;
1226a4a1f60SJack Xiao uint64_t read_val_gpu_addr;
1236a4a1f60SJack Xiao uint32_t *read_val_ptr;
1246a4a1f60SJack Xiao
12518ee4ce6SJack Xiao uint32_t saved_flags;
1263bbd31e0SJack Xiao
127cf064b45SJack Xiao /* initialize kiq pipe */
128cf064b45SJack Xiao int (*kiq_hw_init)(struct amdgpu_device *adev);
12918ee4ce6SJack Xiao int (*kiq_hw_fini)(struct amdgpu_device *adev);
130cf064b45SJack Xiao
131e3cbb1f4SShashank Sharma /* MES doorbells */
132e3cbb1f4SShashank Sharma uint32_t db_start_dw_offset;
133e3cbb1f4SShashank Sharma uint32_t num_mes_dbs;
134e3cbb1f4SShashank Sharma unsigned long *doorbell_bitmap;
135e3cbb1f4SShashank Sharma
1367bbc3676SJack Xiao /* ip specific functions */
1373bbd31e0SJack Xiao const struct amdgpu_mes_funcs *funcs;
1387bbc3676SJack Xiao };
1397bbc3676SJack Xiao
1403a42c7f3SJack Xiao struct amdgpu_mes_process {
1413a42c7f3SJack Xiao int pasid;
1423a42c7f3SJack Xiao struct amdgpu_vm *vm;
1433a42c7f3SJack Xiao uint64_t pd_gpu_addr;
1443a42c7f3SJack Xiao struct amdgpu_bo *proc_ctx_bo;
1453a42c7f3SJack Xiao uint64_t proc_ctx_gpu_addr;
1463a42c7f3SJack Xiao void *proc_ctx_cpu_ptr;
1473a42c7f3SJack Xiao uint64_t process_quantum;
1483a42c7f3SJack Xiao struct list_head gang_list;
1493a42c7f3SJack Xiao uint32_t doorbell_index;
1503a42c7f3SJack Xiao struct mutex doorbell_lock;
1513a42c7f3SJack Xiao };
1523a42c7f3SJack Xiao
1533a42c7f3SJack Xiao struct amdgpu_mes_gang {
1543a42c7f3SJack Xiao int gang_id;
1553a42c7f3SJack Xiao int priority;
1563a42c7f3SJack Xiao int inprocess_gang_priority;
1573a42c7f3SJack Xiao int global_priority_level;
1583a42c7f3SJack Xiao struct list_head list;
1593a42c7f3SJack Xiao struct amdgpu_mes_process *process;
1603a42c7f3SJack Xiao struct amdgpu_bo *gang_ctx_bo;
1613a42c7f3SJack Xiao uint64_t gang_ctx_gpu_addr;
1623a42c7f3SJack Xiao void *gang_ctx_cpu_ptr;
1633a42c7f3SJack Xiao uint64_t gang_quantum;
1643a42c7f3SJack Xiao struct list_head queue_list;
1653a42c7f3SJack Xiao };
1663a42c7f3SJack Xiao
1673a42c7f3SJack Xiao struct amdgpu_mes_queue {
1683a42c7f3SJack Xiao struct list_head list;
1693a42c7f3SJack Xiao struct amdgpu_mes_gang *gang;
1703a42c7f3SJack Xiao int queue_id;
1713a42c7f3SJack Xiao uint64_t doorbell_off;
1723a42c7f3SJack Xiao struct amdgpu_bo *mqd_obj;
1733a42c7f3SJack Xiao void *mqd_cpu_ptr;
1743a42c7f3SJack Xiao uint64_t mqd_gpu_addr;
1753a42c7f3SJack Xiao uint64_t wptr_gpu_addr;
1763a42c7f3SJack Xiao int queue_type;
1773a42c7f3SJack Xiao int paging;
1783a42c7f3SJack Xiao struct amdgpu_ring *ring;
1793a42c7f3SJack Xiao };
1803a42c7f3SJack Xiao
181be5609deSJack Xiao struct amdgpu_mes_queue_properties {
182be5609deSJack Xiao int queue_type;
183be5609deSJack Xiao uint64_t hqd_base_gpu_addr;
184be5609deSJack Xiao uint64_t rptr_gpu_addr;
185be5609deSJack Xiao uint64_t wptr_gpu_addr;
186fe4e9ff9SJack Xiao uint64_t wptr_mc_addr;
187be5609deSJack Xiao uint32_t queue_size;
188be5609deSJack Xiao uint64_t eop_gpu_addr;
189be5609deSJack Xiao uint32_t hqd_pipe_priority;
190be5609deSJack Xiao uint32_t hqd_queue_priority;
191be5609deSJack Xiao bool paging;
192be5609deSJack Xiao struct amdgpu_ring *ring;
193be5609deSJack Xiao /* out */
194be5609deSJack Xiao uint64_t doorbell_off;
195be5609deSJack Xiao };
196be5609deSJack Xiao
1975d0f619fSJack Xiao struct amdgpu_mes_gang_properties {
1985d0f619fSJack Xiao uint32_t priority;
1995d0f619fSJack Xiao uint32_t gang_quantum;
2005d0f619fSJack Xiao uint32_t inprocess_gang_priority;
2015d0f619fSJack Xiao uint32_t priority_level;
2025d0f619fSJack Xiao int global_priority_level;
2035d0f619fSJack Xiao };
2045d0f619fSJack Xiao
2057bbc3676SJack Xiao struct mes_add_queue_input {
2067bbc3676SJack Xiao uint32_t process_id;
2077bbc3676SJack Xiao uint64_t page_table_base_addr;
2087bbc3676SJack Xiao uint64_t process_va_start;
2097bbc3676SJack Xiao uint64_t process_va_end;
2107bbc3676SJack Xiao uint64_t process_quantum;
2117bbc3676SJack Xiao uint64_t process_context_addr;
2127bbc3676SJack Xiao uint64_t gang_quantum;
2137bbc3676SJack Xiao uint64_t gang_context_addr;
2147bbc3676SJack Xiao uint32_t inprocess_gang_priority;
2157bbc3676SJack Xiao uint32_t gang_global_priority_level;
2167bbc3676SJack Xiao uint32_t doorbell_offset;
2177bbc3676SJack Xiao uint64_t mqd_addr;
2187bbc3676SJack Xiao uint64_t wptr_addr;
219fe4e9ff9SJack Xiao uint64_t wptr_mc_addr;
2207bbc3676SJack Xiao uint32_t queue_type;
2217bbc3676SJack Xiao uint32_t paging;
22218ee4ce6SJack Xiao uint32_t gws_base;
22318ee4ce6SJack Xiao uint32_t gws_size;
22418ee4ce6SJack Xiao uint64_t tba_addr;
22518ee4ce6SJack Xiao uint64_t tma_addr;
22669a8c3aeSJonathan Kim uint32_t trap_en;
22769a8c3aeSJonathan Kim uint32_t skip_process_ctx_clear;
228a9579956SGraham Sider uint32_t is_kfd_process;
22991ef6cfdSGraham Sider uint32_t is_aql_queue;
23091ef6cfdSGraham Sider uint32_t queue_size;
2317a1c5c67SJonathan Kim uint32_t exclusively_scheduled;
2327bbc3676SJack Xiao };
2337bbc3676SJack Xiao
2347bbc3676SJack Xiao struct mes_remove_queue_input {
2357bbc3676SJack Xiao uint32_t doorbell_offset;
2367bbc3676SJack Xiao uint64_t gang_context_addr;
2377bbc3676SJack Xiao };
2387bbc3676SJack Xiao
23918ee4ce6SJack Xiao struct mes_unmap_legacy_queue_input {
24018ee4ce6SJack Xiao enum amdgpu_unmap_queues_action action;
24118ee4ce6SJack Xiao uint32_t queue_type;
24218ee4ce6SJack Xiao uint32_t doorbell_offset;
24318ee4ce6SJack Xiao uint32_t pipe_id;
24418ee4ce6SJack Xiao uint32_t queue_id;
24518ee4ce6SJack Xiao uint64_t trail_fence_addr;
24618ee4ce6SJack Xiao uint64_t trail_fence_data;
24718ee4ce6SJack Xiao };
24818ee4ce6SJack Xiao
2497bbc3676SJack Xiao struct mes_suspend_gang_input {
2507bbc3676SJack Xiao bool suspend_all_gangs;
2517bbc3676SJack Xiao uint64_t gang_context_addr;
2527bbc3676SJack Xiao uint64_t suspend_fence_addr;
2537bbc3676SJack Xiao uint32_t suspend_fence_value;
2547bbc3676SJack Xiao };
2557bbc3676SJack Xiao
2567bbc3676SJack Xiao struct mes_resume_gang_input {
2577bbc3676SJack Xiao bool resume_all_gangs;
2587bbc3676SJack Xiao uint64_t gang_context_addr;
2597bbc3676SJack Xiao };
2607bbc3676SJack Xiao
2616a4a1f60SJack Xiao enum mes_misc_opcode {
2626a4a1f60SJack Xiao MES_MISC_OP_WRITE_REG,
2636a4a1f60SJack Xiao MES_MISC_OP_READ_REG,
2646a4a1f60SJack Xiao MES_MISC_OP_WRM_REG_WAIT,
2656a4a1f60SJack Xiao MES_MISC_OP_WRM_REG_WR_WAIT,
266a9818854SJonathan Kim MES_MISC_OP_SET_SHADER_DEBUGGER,
2676a4a1f60SJack Xiao };
2686a4a1f60SJack Xiao
2696a4a1f60SJack Xiao struct mes_misc_op_input {
2706a4a1f60SJack Xiao enum mes_misc_opcode op;
2716a4a1f60SJack Xiao
2726a4a1f60SJack Xiao union {
2736a4a1f60SJack Xiao struct {
2746a4a1f60SJack Xiao uint32_t reg_offset;
2756a4a1f60SJack Xiao uint64_t buffer_addr;
2766a4a1f60SJack Xiao } read_reg;
2776a4a1f60SJack Xiao
2786a4a1f60SJack Xiao struct {
2796a4a1f60SJack Xiao uint32_t reg_offset;
2806a4a1f60SJack Xiao uint32_t reg_value;
2816a4a1f60SJack Xiao } write_reg;
2826a4a1f60SJack Xiao
2836a4a1f60SJack Xiao struct {
2846a4a1f60SJack Xiao uint32_t ref;
2856a4a1f60SJack Xiao uint32_t mask;
2866a4a1f60SJack Xiao uint32_t reg0;
2876a4a1f60SJack Xiao uint32_t reg1;
2886a4a1f60SJack Xiao } wrm_reg;
289a9818854SJonathan Kim
290a9818854SJonathan Kim struct {
291a9818854SJonathan Kim uint64_t process_context_addr;
292a9818854SJonathan Kim union {
293a9818854SJonathan Kim struct {
294*3a950c56SJonathan Kim uint32_t single_memop : 1;
295*3a950c56SJonathan Kim uint32_t single_alu_op : 1;
296*3a950c56SJonathan Kim uint32_t reserved: 29;
297*3a950c56SJonathan Kim uint32_t process_ctx_flush: 1;
298a9818854SJonathan Kim };
299a9818854SJonathan Kim uint32_t u32all;
300a9818854SJonathan Kim } flags;
301a9818854SJonathan Kim uint32_t spi_gdbg_per_vmid_cntl;
302a9818854SJonathan Kim uint32_t tcp_watch_cntl[4];
30309d49e14SJonathan Kim uint32_t trap_en;
304a9818854SJonathan Kim } set_shader_debugger;
3056a4a1f60SJack Xiao };
3066a4a1f60SJack Xiao };
3076a4a1f60SJack Xiao
3087bbc3676SJack Xiao struct amdgpu_mes_funcs {
3097bbc3676SJack Xiao int (*add_hw_queue)(struct amdgpu_mes *mes,
3107bbc3676SJack Xiao struct mes_add_queue_input *input);
3117bbc3676SJack Xiao
3127bbc3676SJack Xiao int (*remove_hw_queue)(struct amdgpu_mes *mes,
3137bbc3676SJack Xiao struct mes_remove_queue_input *input);
3147bbc3676SJack Xiao
31518ee4ce6SJack Xiao int (*unmap_legacy_queue)(struct amdgpu_mes *mes,
31618ee4ce6SJack Xiao struct mes_unmap_legacy_queue_input *input);
31718ee4ce6SJack Xiao
3187bbc3676SJack Xiao int (*suspend_gang)(struct amdgpu_mes *mes,
3197bbc3676SJack Xiao struct mes_suspend_gang_input *input);
3207bbc3676SJack Xiao
3217bbc3676SJack Xiao int (*resume_gang)(struct amdgpu_mes *mes,
3227bbc3676SJack Xiao struct mes_resume_gang_input *input);
3236a4a1f60SJack Xiao
3246a4a1f60SJack Xiao int (*misc_op)(struct amdgpu_mes *mes,
3256a4a1f60SJack Xiao struct mes_misc_op_input *input);
326a538bbe7SJack Xiao };
327a538bbe7SJack Xiao
328cf064b45SJack Xiao #define amdgpu_mes_kiq_hw_init(adev) (adev)->mes.kiq_hw_init((adev))
32918ee4ce6SJack Xiao #define amdgpu_mes_kiq_hw_fini(adev) (adev)->mes.kiq_hw_fini((adev))
330cf064b45SJack Xiao
33111ec5b36SJack Xiao int amdgpu_mes_ctx_get_offs(struct amdgpu_ring *ring, unsigned int id_offs);
33211ec5b36SJack Xiao
333cc42e76eSMario Limonciello int amdgpu_mes_init_microcode(struct amdgpu_device *adev, int pipe);
334b04c1d64SJack Xiao int amdgpu_mes_init(struct amdgpu_device *adev);
335b04c1d64SJack Xiao void amdgpu_mes_fini(struct amdgpu_device *adev);
336b04c1d64SJack Xiao
33748dcd2b7SJack Xiao int amdgpu_mes_create_process(struct amdgpu_device *adev, int pasid,
33848dcd2b7SJack Xiao struct amdgpu_vm *vm);
339063a38d6SJack Xiao void amdgpu_mes_destroy_process(struct amdgpu_device *adev, int pasid);
34048dcd2b7SJack Xiao
3415d0f619fSJack Xiao int amdgpu_mes_add_gang(struct amdgpu_device *adev, int pasid,
3425d0f619fSJack Xiao struct amdgpu_mes_gang_properties *gprops,
3435d0f619fSJack Xiao int *gang_id);
344b0306e58SJack Xiao int amdgpu_mes_remove_gang(struct amdgpu_device *adev, int gang_id);
3455d0f619fSJack Xiao
346c8bb1057SJack Xiao int amdgpu_mes_suspend(struct amdgpu_device *adev);
347ea756bd5SJack Xiao int amdgpu_mes_resume(struct amdgpu_device *adev);
348c8bb1057SJack Xiao
349be5609deSJack Xiao int amdgpu_mes_add_hw_queue(struct amdgpu_device *adev, int gang_id,
350be5609deSJack Xiao struct amdgpu_mes_queue_properties *qprops,
351be5609deSJack Xiao int *queue_id);
352bcc4e1e1SJack Xiao int amdgpu_mes_remove_hw_queue(struct amdgpu_device *adev, int queue_id);
353be5609deSJack Xiao
35418ee4ce6SJack Xiao int amdgpu_mes_unmap_legacy_queue(struct amdgpu_device *adev,
35518ee4ce6SJack Xiao struct amdgpu_ring *ring,
35618ee4ce6SJack Xiao enum amdgpu_unmap_queues_action action,
35718ee4ce6SJack Xiao u64 gpu_addr, u64 seq);
35818ee4ce6SJack Xiao
3596a4a1f60SJack Xiao uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg);
3606a4a1f60SJack Xiao int amdgpu_mes_wreg(struct amdgpu_device *adev,
3616a4a1f60SJack Xiao uint32_t reg, uint32_t val);
3626a4a1f60SJack Xiao int amdgpu_mes_reg_wait(struct amdgpu_device *adev, uint32_t reg,
3636a4a1f60SJack Xiao uint32_t val, uint32_t mask);
3646a4a1f60SJack Xiao int amdgpu_mes_reg_write_reg_wait(struct amdgpu_device *adev,
3656a4a1f60SJack Xiao uint32_t reg0, uint32_t reg1,
3666a4a1f60SJack Xiao uint32_t ref, uint32_t mask);
367a9818854SJonathan Kim int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev,
368a9818854SJonathan Kim uint64_t process_context_addr,
369a9818854SJonathan Kim uint32_t spi_gdbg_per_vmid_cntl,
370a9818854SJonathan Kim const uint32_t *tcp_watch_cntl,
37109d49e14SJonathan Kim uint32_t flags,
37209d49e14SJonathan Kim bool trap_en);
373*3a950c56SJonathan Kim int amdgpu_mes_flush_shader_debugger(struct amdgpu_device *adev,
374*3a950c56SJonathan Kim uint64_t process_context_addr);
375d0c423b6SJack Xiao int amdgpu_mes_add_ring(struct amdgpu_device *adev, int gang_id,
376d0c423b6SJack Xiao int queue_type, int idx,
377d0c423b6SJack Xiao struct amdgpu_mes_ctx_data *ctx_data,
378d0c423b6SJack Xiao struct amdgpu_ring **out);
3799cc654c8SJack Xiao void amdgpu_mes_remove_ring(struct amdgpu_device *adev,
3809cc654c8SJack Xiao struct amdgpu_ring *ring);
381d0c423b6SJack Xiao
3822d7a1f71SLe Ma uint32_t amdgpu_mes_get_aggregated_doorbell_index(struct amdgpu_device *adev,
3832d7a1f71SLe Ma enum amdgpu_mes_priority_level prio);
3842d7a1f71SLe Ma
385e3652b09SJack Xiao int amdgpu_mes_ctx_alloc_meta_data(struct amdgpu_device *adev,
386e3652b09SJack Xiao struct amdgpu_mes_ctx_data *ctx_data);
387e3652b09SJack Xiao void amdgpu_mes_ctx_free_meta_data(struct amdgpu_mes_ctx_data *ctx_data);
3887c18b40eSJack Xiao int amdgpu_mes_ctx_map_meta_data(struct amdgpu_device *adev,
3897c18b40eSJack Xiao struct amdgpu_vm *vm,
3907c18b40eSJack Xiao struct amdgpu_mes_ctx_data *ctx_data);
391737dad0bSJack Xiao int amdgpu_mes_ctx_unmap_meta_data(struct amdgpu_device *adev,
392737dad0bSJack Xiao struct amdgpu_mes_ctx_data *ctx_data);
393e3652b09SJack Xiao
3946624d161SJack Xiao int amdgpu_mes_self_test(struct amdgpu_device *adev);
3956624d161SJack Xiao
396464913c0SMukul Joshi int amdgpu_mes_doorbell_process_slice(struct amdgpu_device *adev);
39718ee4ce6SJack Xiao
39818ee4ce6SJack Xiao /*
39918ee4ce6SJack Xiao * MES lock can be taken in MMU notifiers.
40018ee4ce6SJack Xiao *
40118ee4ce6SJack Xiao * A bit more detail about why to set no-FS reclaim with MES lock:
40218ee4ce6SJack Xiao *
40318ee4ce6SJack Xiao * The purpose of the MMU notifier is to stop GPU access to memory so
40418ee4ce6SJack Xiao * that the Linux VM subsystem can move pages around safely. This is
40518ee4ce6SJack Xiao * done by preempting user mode queues for the affected process. When
40618ee4ce6SJack Xiao * MES is used, MES lock needs to be taken to preempt the queues.
40718ee4ce6SJack Xiao *
40818ee4ce6SJack Xiao * The MMU notifier callback entry point in the driver is
40918ee4ce6SJack Xiao * amdgpu_mn_invalidate_range_start_hsa. The relevant call chain from
41018ee4ce6SJack Xiao * there is:
41118ee4ce6SJack Xiao * amdgpu_amdkfd_evict_userptr -> kgd2kfd_quiesce_mm ->
41218ee4ce6SJack Xiao * kfd_process_evict_queues -> pdd->dev->dqm->ops.evict_process_queues
41318ee4ce6SJack Xiao *
41418ee4ce6SJack Xiao * The last part of the chain is a function pointer where we take the
41518ee4ce6SJack Xiao * MES lock.
41618ee4ce6SJack Xiao *
41718ee4ce6SJack Xiao * The problem with taking locks in the MMU notifier is, that MMU
41818ee4ce6SJack Xiao * notifiers can be called in reclaim-FS context. That's where the
41918ee4ce6SJack Xiao * kernel frees up pages to make room for new page allocations under
42018ee4ce6SJack Xiao * memory pressure. While we are running in reclaim-FS context, we must
42118ee4ce6SJack Xiao * not trigger another memory reclaim operation because that would
42218ee4ce6SJack Xiao * recursively reenter the reclaim code and cause a deadlock. The
42318ee4ce6SJack Xiao * memalloc_nofs_save/restore calls guarantee that.
42418ee4ce6SJack Xiao *
42518ee4ce6SJack Xiao * In addition we also need to avoid lock dependencies on other locks taken
42618ee4ce6SJack Xiao * under the MES lock, for example reservation locks. Here is a possible
42718ee4ce6SJack Xiao * scenario of a deadlock:
42818ee4ce6SJack Xiao * Thread A: takes and holds reservation lock | triggers reclaim-FS |
42918ee4ce6SJack Xiao * MMU notifier | blocks trying to take MES lock
43018ee4ce6SJack Xiao * Thread B: takes and holds MES lock | blocks trying to take reservation lock
43118ee4ce6SJack Xiao *
43218ee4ce6SJack Xiao * In this scenario Thread B gets involved in a deadlock even without
43318ee4ce6SJack Xiao * triggering a reclaim-FS operation itself.
43418ee4ce6SJack Xiao * To fix this and break the lock dependency chain you'd need to either:
43518ee4ce6SJack Xiao * 1. protect reservation locks with memalloc_nofs_save/restore, or
43618ee4ce6SJack Xiao * 2. avoid taking reservation locks under the MES lock.
43718ee4ce6SJack Xiao *
43818ee4ce6SJack Xiao * Reservation locks are taken all over the kernel in different subsystems, we
43918ee4ce6SJack Xiao * have no control over them and their lock dependencies.So the only workable
44018ee4ce6SJack Xiao * solution is to avoid taking other locks under the MES lock.
44118ee4ce6SJack Xiao * As a result, make sure no reclaim-FS happens while holding this lock anywhere
44218ee4ce6SJack Xiao * to prevent deadlocks when an MMU notifier runs in reclaim-FS context.
44318ee4ce6SJack Xiao */
amdgpu_mes_lock(struct amdgpu_mes * mes)44418ee4ce6SJack Xiao static inline void amdgpu_mes_lock(struct amdgpu_mes *mes)
44518ee4ce6SJack Xiao {
44618ee4ce6SJack Xiao mutex_lock(&mes->mutex_hidden);
44718ee4ce6SJack Xiao mes->saved_flags = memalloc_noreclaim_save();
44818ee4ce6SJack Xiao }
44918ee4ce6SJack Xiao
amdgpu_mes_unlock(struct amdgpu_mes * mes)45018ee4ce6SJack Xiao static inline void amdgpu_mes_unlock(struct amdgpu_mes *mes)
45118ee4ce6SJack Xiao {
45218ee4ce6SJack Xiao memalloc_noreclaim_restore(mes->saved_flags);
45318ee4ce6SJack Xiao mutex_unlock(&mes->mutex_hidden);
45418ee4ce6SJack Xiao }
455a538bbe7SJack Xiao #endif /* __AMDGPU_MES_H__ */
456