1 /* 2 * Copyright 2014 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #ifndef KFD_DEVICE_QUEUE_MANAGER_H_ 25 #define KFD_DEVICE_QUEUE_MANAGER_H_ 26 27 #include <linux/rwsem.h> 28 #include <linux/list.h> 29 #include <linux/mutex.h> 30 #include <linux/sched/mm.h> 31 #include "kfd_priv.h" 32 #include "kfd_mqd_manager.h" 33 34 35 #define VMID_NUM 16 36 37 struct device_process_node { 38 struct qcm_process_device *qpd; 39 struct list_head list; 40 }; 41 42 /** 43 * struct device_queue_manager_ops 44 * 45 * @create_queue: Queue creation routine. 46 * 47 * @destroy_queue: Queue destruction routine. 48 * 49 * @update_queue: Queue update routine. 50 * 51 * @exeute_queues: Dispatches the queues list to the H/W. 52 * 53 * @register_process: This routine associates a specific process with device. 54 * 55 * @unregister_process: destroys the associations between process to device. 56 * 57 * @initialize: Initializes the pipelines and memory module for that device. 58 * 59 * @start: Initializes the resources/modules the the device needs for queues 60 * execution. This function is called on device initialization and after the 61 * system woke up after suspension. 62 * 63 * @stop: This routine stops execution of all the active queue running on the 64 * H/W and basically this function called on system suspend. 65 * 66 * @uninitialize: Destroys all the device queue manager resources allocated in 67 * initialize routine. 68 * 69 * @create_kernel_queue: Creates kernel queue. Used for debug queue. 70 * 71 * @destroy_kernel_queue: Destroys kernel queue. Used for debug queue. 72 * 73 * @set_cache_memory_policy: Sets memory policy (cached/ non cached) for the 74 * memory apertures. 75 * 76 * @process_termination: Clears all process queues belongs to that device. 77 * 78 * @evict_process_queues: Evict all active queues of a process 79 * 80 * @restore_process_queues: Restore all evicted queues queues of a process 81 * 82 * @get_wave_state: Retrieves context save state and optionally copies the 83 * control stack, if kept in the MQD, to the given userspace address. 84 */ 85 86 struct device_queue_manager_ops { 87 int (*create_queue)(struct device_queue_manager *dqm, 88 struct queue *q, 89 struct qcm_process_device *qpd); 90 91 int (*destroy_queue)(struct device_queue_manager *dqm, 92 struct qcm_process_device *qpd, 93 struct queue *q); 94 95 int (*update_queue)(struct device_queue_manager *dqm, 96 struct queue *q, struct mqd_update_info *minfo); 97 98 int (*register_process)(struct device_queue_manager *dqm, 99 struct qcm_process_device *qpd); 100 101 int (*unregister_process)(struct device_queue_manager *dqm, 102 struct qcm_process_device *qpd); 103 104 int (*initialize)(struct device_queue_manager *dqm); 105 int (*start)(struct device_queue_manager *dqm); 106 int (*stop)(struct device_queue_manager *dqm); 107 void (*pre_reset)(struct device_queue_manager *dqm); 108 void (*uninitialize)(struct device_queue_manager *dqm); 109 int (*create_kernel_queue)(struct device_queue_manager *dqm, 110 struct kernel_queue *kq, 111 struct qcm_process_device *qpd); 112 113 void (*destroy_kernel_queue)(struct device_queue_manager *dqm, 114 struct kernel_queue *kq, 115 struct qcm_process_device *qpd); 116 117 bool (*set_cache_memory_policy)(struct device_queue_manager *dqm, 118 struct qcm_process_device *qpd, 119 enum cache_policy default_policy, 120 enum cache_policy alternate_policy, 121 void __user *alternate_aperture_base, 122 uint64_t alternate_aperture_size); 123 124 int (*process_termination)(struct device_queue_manager *dqm, 125 struct qcm_process_device *qpd); 126 127 int (*evict_process_queues)(struct device_queue_manager *dqm, 128 struct qcm_process_device *qpd); 129 int (*restore_process_queues)(struct device_queue_manager *dqm, 130 struct qcm_process_device *qpd); 131 132 int (*get_wave_state)(struct device_queue_manager *dqm, 133 struct queue *q, 134 void __user *ctl_stack, 135 u32 *ctl_stack_used_size, 136 u32 *save_area_used_size); 137 }; 138 139 struct device_queue_manager_asic_ops { 140 int (*update_qpd)(struct device_queue_manager *dqm, 141 struct qcm_process_device *qpd); 142 bool (*set_cache_memory_policy)(struct device_queue_manager *dqm, 143 struct qcm_process_device *qpd, 144 enum cache_policy default_policy, 145 enum cache_policy alternate_policy, 146 void __user *alternate_aperture_base, 147 uint64_t alternate_aperture_size); 148 void (*init_sdma_vm)(struct device_queue_manager *dqm, 149 struct queue *q, 150 struct qcm_process_device *qpd); 151 struct mqd_manager * (*mqd_manager_init)(enum KFD_MQD_TYPE type, 152 struct kfd_dev *dev); 153 }; 154 155 /** 156 * struct device_queue_manager 157 * 158 * This struct is a base class for the kfd queues scheduler in the 159 * device level. The device base class should expose the basic operations 160 * for queue creation and queue destruction. This base class hides the 161 * scheduling mode of the driver and the specific implementation of the 162 * concrete device. This class is the only class in the queues scheduler 163 * that configures the H/W. 164 * 165 */ 166 167 struct device_queue_manager { 168 struct device_queue_manager_ops ops; 169 struct device_queue_manager_asic_ops asic_ops; 170 171 struct mqd_manager *mqd_mgrs[KFD_MQD_TYPE_MAX]; 172 struct packet_manager packet_mgr; 173 struct kfd_dev *dev; 174 struct mutex lock_hidden; /* use dqm_lock/unlock(dqm) */ 175 struct list_head queues; 176 unsigned int saved_flags; 177 unsigned int processes_count; 178 unsigned int active_queue_count; 179 unsigned int active_cp_queue_count; 180 unsigned int gws_queue_count; 181 unsigned int total_queue_count; 182 unsigned int next_pipe_to_allocate; 183 unsigned int *allocated_queues; 184 uint64_t sdma_bitmap; 185 uint64_t xgmi_sdma_bitmap; 186 /* the pasid mapping for each kfd vmid */ 187 uint16_t vmid_pasid[VMID_NUM]; 188 uint64_t pipelines_addr; 189 uint64_t fence_gpu_addr; 190 uint64_t *fence_addr; 191 struct kfd_mem_obj *fence_mem; 192 bool active_runlist; 193 int sched_policy; 194 195 /* hw exception */ 196 bool is_hws_hang; 197 bool is_resetting; 198 struct work_struct hw_exception_work; 199 struct kfd_mem_obj hiq_sdma_mqd; 200 bool sched_running; 201 }; 202 203 void device_queue_manager_init_cik( 204 struct device_queue_manager_asic_ops *asic_ops); 205 void device_queue_manager_init_cik_hawaii( 206 struct device_queue_manager_asic_ops *asic_ops); 207 void device_queue_manager_init_vi( 208 struct device_queue_manager_asic_ops *asic_ops); 209 void device_queue_manager_init_vi_tonga( 210 struct device_queue_manager_asic_ops *asic_ops); 211 void device_queue_manager_init_v9( 212 struct device_queue_manager_asic_ops *asic_ops); 213 void device_queue_manager_init_v10_navi10( 214 struct device_queue_manager_asic_ops *asic_ops); 215 void program_sh_mem_settings(struct device_queue_manager *dqm, 216 struct qcm_process_device *qpd); 217 unsigned int get_cp_queues_num(struct device_queue_manager *dqm); 218 unsigned int get_queues_per_pipe(struct device_queue_manager *dqm); 219 unsigned int get_pipes_per_mec(struct device_queue_manager *dqm); 220 unsigned int get_num_sdma_queues(struct device_queue_manager *dqm); 221 unsigned int get_num_xgmi_sdma_queues(struct device_queue_manager *dqm); 222 223 static inline unsigned int get_sh_mem_bases_32(struct kfd_process_device *pdd) 224 { 225 return (pdd->lds_base >> 16) & 0xFF; 226 } 227 228 static inline unsigned int 229 get_sh_mem_bases_nybble_64(struct kfd_process_device *pdd) 230 { 231 return (pdd->lds_base >> 60) & 0x0E; 232 } 233 234 /* The DQM lock can be taken in MMU notifiers. Make sure no reclaim-FS 235 * happens while holding this lock anywhere to prevent deadlocks when 236 * an MMU notifier runs in reclaim-FS context. 237 */ 238 static inline void dqm_lock(struct device_queue_manager *dqm) 239 { 240 mutex_lock(&dqm->lock_hidden); 241 dqm->saved_flags = memalloc_noreclaim_save(); 242 } 243 static inline void dqm_unlock(struct device_queue_manager *dqm) 244 { 245 memalloc_noreclaim_restore(dqm->saved_flags); 246 mutex_unlock(&dqm->lock_hidden); 247 } 248 249 static inline int read_sdma_queue_counter(uint64_t __user *q_rptr, uint64_t *val) 250 { 251 /* 252 * SDMA activity counter is stored at queue's RPTR + 0x8 location. 253 */ 254 return get_user(*val, q_rptr + 1); 255 } 256 #endif /* KFD_DEVICE_QUEUE_MANAGER_H_ */ 257