1 /* 2 * Copyright 2014 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24 #include <linux/slab.h> 25 #include <linux/mutex.h> 26 #include "kfd_device_queue_manager.h" 27 #include "kfd_kernel_queue.h" 28 #include "kfd_priv.h" 29 #include "kfd_pm4_headers_vi.h" 30 #include "kfd_pm4_opcodes.h" 31 32 static inline void inc_wptr(unsigned int *wptr, unsigned int increment_bytes, 33 unsigned int buffer_size_bytes) 34 { 35 unsigned int temp = *wptr + increment_bytes / sizeof(uint32_t); 36 37 WARN((temp * sizeof(uint32_t)) > buffer_size_bytes, 38 "Runlist IB overflow"); 39 *wptr = temp; 40 } 41 42 static unsigned int build_pm4_header(unsigned int opcode, size_t packet_size) 43 { 44 union PM4_MES_TYPE_3_HEADER header; 45 46 header.u32All = 0; 47 header.opcode = opcode; 48 header.count = packet_size/sizeof(uint32_t) - 2; 49 header.type = PM4_TYPE_3; 50 51 return header.u32All; 52 } 53 54 static void pm_calc_rlib_size(struct packet_manager *pm, 55 unsigned int *rlib_size, 56 bool *over_subscription) 57 { 58 unsigned int process_count, queue_count; 59 unsigned int map_queue_size; 60 61 process_count = pm->dqm->processes_count; 62 queue_count = pm->dqm->queue_count; 63 64 /* check if there is over subscription*/ 65 *over_subscription = false; 66 if ((process_count > 1) || queue_count > get_queues_num(pm->dqm)) { 67 *over_subscription = true; 68 pr_debug("Over subscribed runlist\n"); 69 } 70 71 map_queue_size = sizeof(struct pm4_mes_map_queues); 72 /* calculate run list ib allocation size */ 73 *rlib_size = process_count * sizeof(struct pm4_mes_map_process) + 74 queue_count * map_queue_size; 75 76 /* 77 * Increase the allocation size in case we need a chained run list 78 * when over subscription 79 */ 80 if (*over_subscription) 81 *rlib_size += sizeof(struct pm4_mes_runlist); 82 83 pr_debug("runlist ib size %d\n", *rlib_size); 84 } 85 86 static int pm_allocate_runlist_ib(struct packet_manager *pm, 87 unsigned int **rl_buffer, 88 uint64_t *rl_gpu_buffer, 89 unsigned int *rl_buffer_size, 90 bool *is_over_subscription) 91 { 92 int retval; 93 94 if (WARN_ON(pm->allocated)) 95 return -EINVAL; 96 97 pm_calc_rlib_size(pm, rl_buffer_size, is_over_subscription); 98 99 retval = kfd_gtt_sa_allocate(pm->dqm->dev, *rl_buffer_size, 100 &pm->ib_buffer_obj); 101 102 if (retval) { 103 pr_err("Failed to allocate runlist IB\n"); 104 return retval; 105 } 106 107 *(void **)rl_buffer = pm->ib_buffer_obj->cpu_ptr; 108 *rl_gpu_buffer = pm->ib_buffer_obj->gpu_addr; 109 110 memset(*rl_buffer, 0, *rl_buffer_size); 111 pm->allocated = true; 112 return retval; 113 } 114 115 static int pm_create_runlist(struct packet_manager *pm, uint32_t *buffer, 116 uint64_t ib, size_t ib_size_in_dwords, bool chain) 117 { 118 struct pm4_mes_runlist *packet; 119 120 if (WARN_ON(!ib)) 121 return -EFAULT; 122 123 packet = (struct pm4_mes_runlist *)buffer; 124 125 memset(buffer, 0, sizeof(struct pm4_mes_runlist)); 126 packet->header.u32All = build_pm4_header(IT_RUN_LIST, 127 sizeof(struct pm4_mes_runlist)); 128 129 packet->bitfields4.ib_size = ib_size_in_dwords; 130 packet->bitfields4.chain = chain ? 1 : 0; 131 packet->bitfields4.offload_polling = 0; 132 packet->bitfields4.valid = 1; 133 packet->ordinal2 = lower_32_bits(ib); 134 packet->bitfields3.ib_base_hi = upper_32_bits(ib); 135 136 return 0; 137 } 138 139 static int pm_create_map_process(struct packet_manager *pm, uint32_t *buffer, 140 struct qcm_process_device *qpd) 141 { 142 struct pm4_mes_map_process *packet; 143 struct queue *cur; 144 uint32_t num_queues; 145 146 packet = (struct pm4_mes_map_process *)buffer; 147 148 memset(buffer, 0, sizeof(struct pm4_mes_map_process)); 149 150 packet->header.u32All = build_pm4_header(IT_MAP_PROCESS, 151 sizeof(struct pm4_mes_map_process)); 152 packet->bitfields2.diq_enable = (qpd->is_debug) ? 1 : 0; 153 packet->bitfields2.process_quantum = 1; 154 packet->bitfields2.pasid = qpd->pqm->process->pasid; 155 packet->bitfields3.page_table_base = qpd->page_table_base; 156 packet->bitfields10.gds_size = qpd->gds_size; 157 packet->bitfields10.num_gws = qpd->num_gws; 158 packet->bitfields10.num_oac = qpd->num_oac; 159 num_queues = 0; 160 list_for_each_entry(cur, &qpd->queues_list, list) 161 num_queues++; 162 packet->bitfields10.num_queues = (qpd->is_debug) ? 0 : num_queues; 163 164 packet->sh_mem_config = qpd->sh_mem_config; 165 packet->sh_mem_bases = qpd->sh_mem_bases; 166 packet->sh_mem_ape1_base = qpd->sh_mem_ape1_base; 167 packet->sh_mem_ape1_limit = qpd->sh_mem_ape1_limit; 168 169 /* TODO: scratch support */ 170 packet->sh_hidden_private_base_vmid = 0; 171 172 packet->gds_addr_lo = lower_32_bits(qpd->gds_context_area); 173 packet->gds_addr_hi = upper_32_bits(qpd->gds_context_area); 174 175 return 0; 176 } 177 178 static int pm_create_map_queue(struct packet_manager *pm, uint32_t *buffer, 179 struct queue *q, bool is_static) 180 { 181 struct pm4_mes_map_queues *packet; 182 bool use_static = is_static; 183 184 packet = (struct pm4_mes_map_queues *)buffer; 185 memset(buffer, 0, sizeof(struct pm4_mes_map_queues)); 186 187 packet->header.u32All = build_pm4_header(IT_MAP_QUEUES, 188 sizeof(struct pm4_mes_map_queues)); 189 packet->bitfields2.alloc_format = 190 alloc_format__mes_map_queues__one_per_pipe_vi; 191 packet->bitfields2.num_queues = 1; 192 packet->bitfields2.queue_sel = 193 queue_sel__mes_map_queues__map_to_hws_determined_queue_slots_vi; 194 195 packet->bitfields2.engine_sel = 196 engine_sel__mes_map_queues__compute_vi; 197 packet->bitfields2.queue_type = 198 queue_type__mes_map_queues__normal_compute_vi; 199 200 switch (q->properties.type) { 201 case KFD_QUEUE_TYPE_COMPUTE: 202 if (use_static) 203 packet->bitfields2.queue_type = 204 queue_type__mes_map_queues__normal_latency_static_queue_vi; 205 break; 206 case KFD_QUEUE_TYPE_DIQ: 207 packet->bitfields2.queue_type = 208 queue_type__mes_map_queues__debug_interface_queue_vi; 209 break; 210 case KFD_QUEUE_TYPE_SDMA: 211 packet->bitfields2.engine_sel = 212 engine_sel__mes_map_queues__sdma0_vi; 213 use_static = false; /* no static queues under SDMA */ 214 break; 215 default: 216 WARN(1, "queue type %d", q->properties.type); 217 return -EINVAL; 218 } 219 packet->bitfields3.doorbell_offset = 220 q->properties.doorbell_off; 221 222 packet->mqd_addr_lo = 223 lower_32_bits(q->gart_mqd_addr); 224 225 packet->mqd_addr_hi = 226 upper_32_bits(q->gart_mqd_addr); 227 228 packet->wptr_addr_lo = 229 lower_32_bits((uint64_t)q->properties.write_ptr); 230 231 packet->wptr_addr_hi = 232 upper_32_bits((uint64_t)q->properties.write_ptr); 233 234 return 0; 235 } 236 237 static int pm_create_runlist_ib(struct packet_manager *pm, 238 struct list_head *queues, 239 uint64_t *rl_gpu_addr, 240 size_t *rl_size_bytes) 241 { 242 unsigned int alloc_size_bytes; 243 unsigned int *rl_buffer, rl_wptr, i; 244 int retval, proccesses_mapped; 245 struct device_process_node *cur; 246 struct qcm_process_device *qpd; 247 struct queue *q; 248 struct kernel_queue *kq; 249 bool is_over_subscription; 250 251 rl_wptr = retval = proccesses_mapped = 0; 252 253 retval = pm_allocate_runlist_ib(pm, &rl_buffer, rl_gpu_addr, 254 &alloc_size_bytes, &is_over_subscription); 255 if (retval) 256 return retval; 257 258 *rl_size_bytes = alloc_size_bytes; 259 260 pr_debug("Building runlist ib process count: %d queues count %d\n", 261 pm->dqm->processes_count, pm->dqm->queue_count); 262 263 /* build the run list ib packet */ 264 list_for_each_entry(cur, queues, list) { 265 qpd = cur->qpd; 266 /* build map process packet */ 267 if (proccesses_mapped >= pm->dqm->processes_count) { 268 pr_debug("Not enough space left in runlist IB\n"); 269 pm_release_ib(pm); 270 return -ENOMEM; 271 } 272 273 retval = pm_create_map_process(pm, &rl_buffer[rl_wptr], qpd); 274 if (retval) 275 return retval; 276 277 proccesses_mapped++; 278 inc_wptr(&rl_wptr, sizeof(struct pm4_mes_map_process), 279 alloc_size_bytes); 280 281 list_for_each_entry(kq, &qpd->priv_queue_list, list) { 282 if (!kq->queue->properties.is_active) 283 continue; 284 285 pr_debug("static_queue, mapping kernel q %d, is debug status %d\n", 286 kq->queue->queue, qpd->is_debug); 287 288 retval = pm_create_map_queue(pm, 289 &rl_buffer[rl_wptr], 290 kq->queue, 291 qpd->is_debug); 292 if (retval) 293 return retval; 294 295 inc_wptr(&rl_wptr, 296 sizeof(struct pm4_mes_map_queues), 297 alloc_size_bytes); 298 } 299 300 list_for_each_entry(q, &qpd->queues_list, list) { 301 if (!q->properties.is_active) 302 continue; 303 304 pr_debug("static_queue, mapping user queue %d, is debug status %d\n", 305 q->queue, qpd->is_debug); 306 307 retval = pm_create_map_queue(pm, 308 &rl_buffer[rl_wptr], 309 q, 310 qpd->is_debug); 311 312 if (retval) 313 return retval; 314 315 inc_wptr(&rl_wptr, 316 sizeof(struct pm4_mes_map_queues), 317 alloc_size_bytes); 318 } 319 } 320 321 pr_debug("Finished map process and queues to runlist\n"); 322 323 if (is_over_subscription) 324 retval = pm_create_runlist(pm, &rl_buffer[rl_wptr], 325 *rl_gpu_addr, 326 alloc_size_bytes / sizeof(uint32_t), 327 true); 328 329 for (i = 0; i < alloc_size_bytes / sizeof(uint32_t); i++) 330 pr_debug("0x%2X ", rl_buffer[i]); 331 pr_debug("\n"); 332 333 return retval; 334 } 335 336 int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm) 337 { 338 pm->dqm = dqm; 339 mutex_init(&pm->lock); 340 pm->priv_queue = kernel_queue_init(dqm->dev, KFD_QUEUE_TYPE_HIQ); 341 if (!pm->priv_queue) { 342 mutex_destroy(&pm->lock); 343 return -ENOMEM; 344 } 345 pm->allocated = false; 346 347 return 0; 348 } 349 350 void pm_uninit(struct packet_manager *pm) 351 { 352 mutex_destroy(&pm->lock); 353 kernel_queue_uninit(pm->priv_queue); 354 } 355 356 int pm_send_set_resources(struct packet_manager *pm, 357 struct scheduling_resources *res) 358 { 359 struct pm4_mes_set_resources *packet; 360 int retval = 0; 361 362 mutex_lock(&pm->lock); 363 pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue, 364 sizeof(*packet) / sizeof(uint32_t), 365 (unsigned int **)&packet); 366 if (!packet) { 367 pr_err("Failed to allocate buffer on kernel queue\n"); 368 retval = -ENOMEM; 369 goto out; 370 } 371 372 memset(packet, 0, sizeof(struct pm4_mes_set_resources)); 373 packet->header.u32All = build_pm4_header(IT_SET_RESOURCES, 374 sizeof(struct pm4_mes_set_resources)); 375 376 packet->bitfields2.queue_type = 377 queue_type__mes_set_resources__hsa_interface_queue_hiq; 378 packet->bitfields2.vmid_mask = res->vmid_mask; 379 packet->bitfields2.unmap_latency = KFD_UNMAP_LATENCY; 380 packet->bitfields7.oac_mask = res->oac_mask; 381 packet->bitfields8.gds_heap_base = res->gds_heap_base; 382 packet->bitfields8.gds_heap_size = res->gds_heap_size; 383 384 packet->gws_mask_lo = lower_32_bits(res->gws_mask); 385 packet->gws_mask_hi = upper_32_bits(res->gws_mask); 386 387 packet->queue_mask_lo = lower_32_bits(res->queue_mask); 388 packet->queue_mask_hi = upper_32_bits(res->queue_mask); 389 390 pm->priv_queue->ops.submit_packet(pm->priv_queue); 391 392 out: 393 mutex_unlock(&pm->lock); 394 395 return retval; 396 } 397 398 int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues) 399 { 400 uint64_t rl_gpu_ib_addr; 401 uint32_t *rl_buffer; 402 size_t rl_ib_size, packet_size_dwords; 403 int retval; 404 405 retval = pm_create_runlist_ib(pm, dqm_queues, &rl_gpu_ib_addr, 406 &rl_ib_size); 407 if (retval) 408 goto fail_create_runlist_ib; 409 410 pr_debug("runlist IB address: 0x%llX\n", rl_gpu_ib_addr); 411 412 packet_size_dwords = sizeof(struct pm4_mes_runlist) / sizeof(uint32_t); 413 mutex_lock(&pm->lock); 414 415 retval = pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue, 416 packet_size_dwords, &rl_buffer); 417 if (retval) 418 goto fail_acquire_packet_buffer; 419 420 retval = pm_create_runlist(pm, rl_buffer, rl_gpu_ib_addr, 421 rl_ib_size / sizeof(uint32_t), false); 422 if (retval) 423 goto fail_create_runlist; 424 425 pm->priv_queue->ops.submit_packet(pm->priv_queue); 426 427 mutex_unlock(&pm->lock); 428 429 return retval; 430 431 fail_create_runlist: 432 pm->priv_queue->ops.rollback_packet(pm->priv_queue); 433 fail_acquire_packet_buffer: 434 mutex_unlock(&pm->lock); 435 fail_create_runlist_ib: 436 pm_release_ib(pm); 437 return retval; 438 } 439 440 int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address, 441 uint32_t fence_value) 442 { 443 int retval; 444 struct pm4_mes_query_status *packet; 445 446 if (WARN_ON(!fence_address)) 447 return -EFAULT; 448 449 mutex_lock(&pm->lock); 450 retval = pm->priv_queue->ops.acquire_packet_buffer( 451 pm->priv_queue, 452 sizeof(struct pm4_mes_query_status) / sizeof(uint32_t), 453 (unsigned int **)&packet); 454 if (retval) 455 goto fail_acquire_packet_buffer; 456 457 packet->header.u32All = build_pm4_header(IT_QUERY_STATUS, 458 sizeof(struct pm4_mes_query_status)); 459 460 packet->bitfields2.context_id = 0; 461 packet->bitfields2.interrupt_sel = 462 interrupt_sel__mes_query_status__completion_status; 463 packet->bitfields2.command = 464 command__mes_query_status__fence_only_after_write_ack; 465 466 packet->addr_hi = upper_32_bits((uint64_t)fence_address); 467 packet->addr_lo = lower_32_bits((uint64_t)fence_address); 468 packet->data_hi = upper_32_bits((uint64_t)fence_value); 469 packet->data_lo = lower_32_bits((uint64_t)fence_value); 470 471 pm->priv_queue->ops.submit_packet(pm->priv_queue); 472 473 fail_acquire_packet_buffer: 474 mutex_unlock(&pm->lock); 475 return retval; 476 } 477 478 int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type, 479 enum kfd_preempt_type_filter mode, 480 uint32_t filter_param, bool reset, 481 unsigned int sdma_engine) 482 { 483 int retval; 484 uint32_t *buffer; 485 struct pm4_mes_unmap_queues *packet; 486 487 mutex_lock(&pm->lock); 488 retval = pm->priv_queue->ops.acquire_packet_buffer( 489 pm->priv_queue, 490 sizeof(struct pm4_mes_unmap_queues) / sizeof(uint32_t), 491 &buffer); 492 if (retval) 493 goto err_acquire_packet_buffer; 494 495 packet = (struct pm4_mes_unmap_queues *)buffer; 496 memset(buffer, 0, sizeof(struct pm4_mes_unmap_queues)); 497 pr_debug("static_queue: unmapping queues: mode is %d , reset is %d , type is %d\n", 498 mode, reset, type); 499 packet->header.u32All = build_pm4_header(IT_UNMAP_QUEUES, 500 sizeof(struct pm4_mes_unmap_queues)); 501 switch (type) { 502 case KFD_QUEUE_TYPE_COMPUTE: 503 case KFD_QUEUE_TYPE_DIQ: 504 packet->bitfields2.engine_sel = 505 engine_sel__mes_unmap_queues__compute; 506 break; 507 case KFD_QUEUE_TYPE_SDMA: 508 packet->bitfields2.engine_sel = 509 engine_sel__mes_unmap_queues__sdma0 + sdma_engine; 510 break; 511 default: 512 WARN(1, "queue type %d", type); 513 retval = -EINVAL; 514 goto err_invalid; 515 } 516 517 if (reset) 518 packet->bitfields2.action = 519 action__mes_unmap_queues__reset_queues; 520 else 521 packet->bitfields2.action = 522 action__mes_unmap_queues__preempt_queues; 523 524 switch (mode) { 525 case KFD_PREEMPT_TYPE_FILTER_SINGLE_QUEUE: 526 packet->bitfields2.queue_sel = 527 queue_sel__mes_unmap_queues__perform_request_on_specified_queues; 528 packet->bitfields2.num_queues = 1; 529 packet->bitfields3b.doorbell_offset0 = filter_param; 530 break; 531 case KFD_PREEMPT_TYPE_FILTER_BY_PASID: 532 packet->bitfields2.queue_sel = 533 queue_sel__mes_unmap_queues__perform_request_on_pasid_queues; 534 packet->bitfields3a.pasid = filter_param; 535 break; 536 case KFD_PREEMPT_TYPE_FILTER_ALL_QUEUES: 537 packet->bitfields2.queue_sel = 538 queue_sel__mes_unmap_queues__unmap_all_queues; 539 break; 540 case KFD_PREEMPT_TYPE_FILTER_DYNAMIC_QUEUES: 541 /* in this case, we do not preempt static queues */ 542 packet->bitfields2.queue_sel = 543 queue_sel__mes_unmap_queues__unmap_all_non_static_queues; 544 break; 545 default: 546 WARN(1, "filter %d", mode); 547 retval = -EINVAL; 548 goto err_invalid; 549 } 550 551 pm->priv_queue->ops.submit_packet(pm->priv_queue); 552 553 mutex_unlock(&pm->lock); 554 return 0; 555 556 err_invalid: 557 pm->priv_queue->ops.rollback_packet(pm->priv_queue); 558 err_acquire_packet_buffer: 559 mutex_unlock(&pm->lock); 560 return retval; 561 } 562 563 void pm_release_ib(struct packet_manager *pm) 564 { 565 mutex_lock(&pm->lock); 566 if (pm->allocated) { 567 kfd_gtt_sa_free(pm->dqm->dev, pm->ib_buffer_obj); 568 pm->allocated = false; 569 } 570 mutex_unlock(&pm->lock); 571 } 572