1 /* 2 * Copyright 2014 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include <linux/slab.h> 25 #include <linux/mutex.h> 26 #include "kfd_device_queue_manager.h" 27 #include "kfd_kernel_queue.h" 28 #include "kfd_priv.h" 29 30 static inline void inc_wptr(unsigned int *wptr, unsigned int increment_bytes, 31 unsigned int buffer_size_bytes) 32 { 33 unsigned int temp = *wptr + increment_bytes / sizeof(uint32_t); 34 35 WARN((temp * sizeof(uint32_t)) > buffer_size_bytes, 36 "Runlist IB overflow"); 37 *wptr = temp; 38 } 39 40 static void pm_calc_rlib_size(struct packet_manager *pm, 41 unsigned int *rlib_size, 42 bool *over_subscription) 43 { 44 unsigned int process_count, queue_count, compute_queue_count; 45 unsigned int map_queue_size; 46 unsigned int max_proc_per_quantum = 1; 47 struct kfd_dev *dev = pm->dqm->dev; 48 49 process_count = pm->dqm->processes_count; 50 queue_count = pm->dqm->queue_count; 51 compute_queue_count = queue_count - pm->dqm->sdma_queue_count; 52 53 /* check if there is over subscription 54 * Note: the arbitration between the number of VMIDs and 55 * hws_max_conc_proc has been done in 56 * kgd2kfd_device_init(). 57 */ 58 *over_subscription = false; 59 60 if (dev->max_proc_per_quantum > 1) 61 max_proc_per_quantum = dev->max_proc_per_quantum; 62 63 if ((process_count > max_proc_per_quantum) || 64 compute_queue_count > get_queues_num(pm->dqm)) { 65 *over_subscription = true; 66 pr_debug("Over subscribed runlist\n"); 67 } 68 69 map_queue_size = pm->pmf->map_queues_size; 70 /* calculate run list ib allocation size */ 71 *rlib_size = process_count * pm->pmf->map_process_size + 72 queue_count * map_queue_size; 73 74 /* 75 * Increase the allocation size in case we need a chained run list 76 * when over subscription 77 */ 78 if (*over_subscription) 79 *rlib_size += pm->pmf->runlist_size; 80 81 pr_debug("runlist ib size %d\n", *rlib_size); 82 } 83 84 static int pm_allocate_runlist_ib(struct packet_manager *pm, 85 unsigned int **rl_buffer, 86 uint64_t *rl_gpu_buffer, 87 unsigned int *rl_buffer_size, 88 bool *is_over_subscription) 89 { 90 int retval; 91 92 if (WARN_ON(pm->allocated)) 93 return -EINVAL; 94 95 pm_calc_rlib_size(pm, rl_buffer_size, is_over_subscription); 96 97 mutex_lock(&pm->lock); 98 99 retval = kfd_gtt_sa_allocate(pm->dqm->dev, *rl_buffer_size, 100 &pm->ib_buffer_obj); 101 102 if (retval) { 103 pr_err("Failed to allocate runlist IB\n"); 104 goto out; 105 } 106 107 *(void **)rl_buffer = pm->ib_buffer_obj->cpu_ptr; 108 *rl_gpu_buffer = pm->ib_buffer_obj->gpu_addr; 109 110 memset(*rl_buffer, 0, *rl_buffer_size); 111 pm->allocated = true; 112 113 out: 114 mutex_unlock(&pm->lock); 115 return retval; 116 } 117 118 static int pm_create_runlist_ib(struct packet_manager *pm, 119 struct list_head *queues, 120 uint64_t *rl_gpu_addr, 121 size_t *rl_size_bytes) 122 { 123 unsigned int alloc_size_bytes; 124 unsigned int *rl_buffer, rl_wptr, i; 125 int retval, proccesses_mapped; 126 struct device_process_node *cur; 127 struct qcm_process_device *qpd; 128 struct queue *q; 129 struct kernel_queue *kq; 130 bool is_over_subscription; 131 132 rl_wptr = retval = proccesses_mapped = 0; 133 134 retval = pm_allocate_runlist_ib(pm, &rl_buffer, rl_gpu_addr, 135 &alloc_size_bytes, &is_over_subscription); 136 if (retval) 137 return retval; 138 139 *rl_size_bytes = alloc_size_bytes; 140 pm->ib_size_bytes = alloc_size_bytes; 141 142 pr_debug("Building runlist ib process count: %d queues count %d\n", 143 pm->dqm->processes_count, pm->dqm->queue_count); 144 145 /* build the run list ib packet */ 146 list_for_each_entry(cur, queues, list) { 147 qpd = cur->qpd; 148 /* build map process packet */ 149 if (proccesses_mapped >= pm->dqm->processes_count) { 150 pr_debug("Not enough space left in runlist IB\n"); 151 pm_release_ib(pm); 152 return -ENOMEM; 153 } 154 155 retval = pm->pmf->map_process(pm, &rl_buffer[rl_wptr], qpd); 156 if (retval) 157 return retval; 158 159 proccesses_mapped++; 160 inc_wptr(&rl_wptr, pm->pmf->map_process_size, 161 alloc_size_bytes); 162 163 list_for_each_entry(kq, &qpd->priv_queue_list, list) { 164 if (!kq->queue->properties.is_active) 165 continue; 166 167 pr_debug("static_queue, mapping kernel q %d, is debug status %d\n", 168 kq->queue->queue, qpd->is_debug); 169 170 retval = pm->pmf->map_queues(pm, 171 &rl_buffer[rl_wptr], 172 kq->queue, 173 qpd->is_debug); 174 if (retval) 175 return retval; 176 177 inc_wptr(&rl_wptr, 178 pm->pmf->map_queues_size, 179 alloc_size_bytes); 180 } 181 182 list_for_each_entry(q, &qpd->queues_list, list) { 183 if (!q->properties.is_active) 184 continue; 185 186 pr_debug("static_queue, mapping user queue %d, is debug status %d\n", 187 q->queue, qpd->is_debug); 188 189 retval = pm->pmf->map_queues(pm, 190 &rl_buffer[rl_wptr], 191 q, 192 qpd->is_debug); 193 194 if (retval) 195 return retval; 196 197 inc_wptr(&rl_wptr, 198 pm->pmf->map_queues_size, 199 alloc_size_bytes); 200 } 201 } 202 203 pr_debug("Finished map process and queues to runlist\n"); 204 205 if (is_over_subscription) 206 retval = pm->pmf->runlist(pm, &rl_buffer[rl_wptr], 207 *rl_gpu_addr, 208 alloc_size_bytes / sizeof(uint32_t), 209 true); 210 211 for (i = 0; i < alloc_size_bytes / sizeof(uint32_t); i++) 212 pr_debug("0x%2X ", rl_buffer[i]); 213 pr_debug("\n"); 214 215 return retval; 216 } 217 218 int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm) 219 { 220 switch (dqm->dev->device_info->asic_family) { 221 case CHIP_KAVERI: 222 case CHIP_HAWAII: 223 /* PM4 packet structures on CIK are the same as on VI */ 224 case CHIP_CARRIZO: 225 case CHIP_TONGA: 226 case CHIP_FIJI: 227 case CHIP_POLARIS10: 228 case CHIP_POLARIS11: 229 case CHIP_POLARIS12: 230 pm->pmf = &kfd_vi_pm_funcs; 231 break; 232 case CHIP_VEGA10: 233 case CHIP_VEGA12: 234 case CHIP_VEGA20: 235 case CHIP_RAVEN: 236 pm->pmf = &kfd_v9_pm_funcs; 237 break; 238 default: 239 WARN(1, "Unexpected ASIC family %u", 240 dqm->dev->device_info->asic_family); 241 return -EINVAL; 242 } 243 244 pm->dqm = dqm; 245 mutex_init(&pm->lock); 246 pm->priv_queue = kernel_queue_init(dqm->dev, KFD_QUEUE_TYPE_HIQ); 247 if (!pm->priv_queue) { 248 mutex_destroy(&pm->lock); 249 return -ENOMEM; 250 } 251 pm->allocated = false; 252 253 return 0; 254 } 255 256 void pm_uninit(struct packet_manager *pm) 257 { 258 mutex_destroy(&pm->lock); 259 kernel_queue_uninit(pm->priv_queue); 260 } 261 262 int pm_send_set_resources(struct packet_manager *pm, 263 struct scheduling_resources *res) 264 { 265 uint32_t *buffer, size; 266 int retval = 0; 267 268 size = pm->pmf->set_resources_size; 269 mutex_lock(&pm->lock); 270 pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue, 271 size / sizeof(uint32_t), 272 (unsigned int **)&buffer); 273 if (!buffer) { 274 pr_err("Failed to allocate buffer on kernel queue\n"); 275 retval = -ENOMEM; 276 goto out; 277 } 278 279 retval = pm->pmf->set_resources(pm, buffer, res); 280 if (!retval) 281 pm->priv_queue->ops.submit_packet(pm->priv_queue); 282 else 283 pm->priv_queue->ops.rollback_packet(pm->priv_queue); 284 285 out: 286 mutex_unlock(&pm->lock); 287 288 return retval; 289 } 290 291 int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues) 292 { 293 uint64_t rl_gpu_ib_addr; 294 uint32_t *rl_buffer; 295 size_t rl_ib_size, packet_size_dwords; 296 int retval; 297 298 retval = pm_create_runlist_ib(pm, dqm_queues, &rl_gpu_ib_addr, 299 &rl_ib_size); 300 if (retval) 301 goto fail_create_runlist_ib; 302 303 pr_debug("runlist IB address: 0x%llX\n", rl_gpu_ib_addr); 304 305 packet_size_dwords = pm->pmf->runlist_size / sizeof(uint32_t); 306 mutex_lock(&pm->lock); 307 308 retval = pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue, 309 packet_size_dwords, &rl_buffer); 310 if (retval) 311 goto fail_acquire_packet_buffer; 312 313 retval = pm->pmf->runlist(pm, rl_buffer, rl_gpu_ib_addr, 314 rl_ib_size / sizeof(uint32_t), false); 315 if (retval) 316 goto fail_create_runlist; 317 318 pm->priv_queue->ops.submit_packet(pm->priv_queue); 319 320 mutex_unlock(&pm->lock); 321 322 return retval; 323 324 fail_create_runlist: 325 pm->priv_queue->ops.rollback_packet(pm->priv_queue); 326 fail_acquire_packet_buffer: 327 mutex_unlock(&pm->lock); 328 fail_create_runlist_ib: 329 pm_release_ib(pm); 330 return retval; 331 } 332 333 int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address, 334 uint32_t fence_value) 335 { 336 uint32_t *buffer, size; 337 int retval = 0; 338 339 if (WARN_ON(!fence_address)) 340 return -EFAULT; 341 342 size = pm->pmf->query_status_size; 343 mutex_lock(&pm->lock); 344 pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue, 345 size / sizeof(uint32_t), (unsigned int **)&buffer); 346 if (!buffer) { 347 pr_err("Failed to allocate buffer on kernel queue\n"); 348 retval = -ENOMEM; 349 goto out; 350 } 351 352 retval = pm->pmf->query_status(pm, buffer, fence_address, fence_value); 353 if (!retval) 354 pm->priv_queue->ops.submit_packet(pm->priv_queue); 355 else 356 pm->priv_queue->ops.rollback_packet(pm->priv_queue); 357 358 out: 359 mutex_unlock(&pm->lock); 360 return retval; 361 } 362 363 int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type, 364 enum kfd_unmap_queues_filter filter, 365 uint32_t filter_param, bool reset, 366 unsigned int sdma_engine) 367 { 368 uint32_t *buffer, size; 369 int retval = 0; 370 371 size = pm->pmf->unmap_queues_size; 372 mutex_lock(&pm->lock); 373 pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue, 374 size / sizeof(uint32_t), (unsigned int **)&buffer); 375 if (!buffer) { 376 pr_err("Failed to allocate buffer on kernel queue\n"); 377 retval = -ENOMEM; 378 goto out; 379 } 380 381 retval = pm->pmf->unmap_queues(pm, buffer, type, filter, filter_param, 382 reset, sdma_engine); 383 if (!retval) 384 pm->priv_queue->ops.submit_packet(pm->priv_queue); 385 else 386 pm->priv_queue->ops.rollback_packet(pm->priv_queue); 387 388 out: 389 mutex_unlock(&pm->lock); 390 return retval; 391 } 392 393 void pm_release_ib(struct packet_manager *pm) 394 { 395 mutex_lock(&pm->lock); 396 if (pm->allocated) { 397 kfd_gtt_sa_free(pm->dqm->dev, pm->ib_buffer_obj); 398 pm->allocated = false; 399 } 400 mutex_unlock(&pm->lock); 401 } 402 403 #if defined(CONFIG_DEBUG_FS) 404 405 int pm_debugfs_runlist(struct seq_file *m, void *data) 406 { 407 struct packet_manager *pm = data; 408 409 mutex_lock(&pm->lock); 410 411 if (!pm->allocated) { 412 seq_puts(m, " No active runlist\n"); 413 goto out; 414 } 415 416 seq_hex_dump(m, " ", DUMP_PREFIX_OFFSET, 32, 4, 417 pm->ib_buffer_obj->cpu_ptr, pm->ib_size_bytes, false); 418 419 out: 420 mutex_unlock(&pm->lock); 421 return 0; 422 } 423 424 int pm_debugfs_hang_hws(struct packet_manager *pm) 425 { 426 uint32_t *buffer, size; 427 int r = 0; 428 429 size = pm->pmf->query_status_size; 430 mutex_lock(&pm->lock); 431 pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue, 432 size / sizeof(uint32_t), (unsigned int **)&buffer); 433 if (!buffer) { 434 pr_err("Failed to allocate buffer on kernel queue\n"); 435 r = -ENOMEM; 436 goto out; 437 } 438 memset(buffer, 0x55, size); 439 pm->priv_queue->ops.submit_packet(pm->priv_queue); 440 441 pr_info("Submitting %x %x %x %x %x %x %x to HIQ to hang the HWS.", 442 buffer[0], buffer[1], buffer[2], buffer[3], 443 buffer[4], buffer[5], buffer[6]); 444 out: 445 mutex_unlock(&pm->lock); 446 return r; 447 } 448 449 450 #endif 451