1 /* 2 * Copyright 2014 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 */ 22 23 #ifndef KFD_PRIV_H_INCLUDED 24 #define KFD_PRIV_H_INCLUDED 25 26 #include <linux/hashtable.h> 27 #include <linux/mmu_notifier.h> 28 #include <linux/mutex.h> 29 #include <linux/types.h> 30 #include <linux/atomic.h> 31 #include <linux/workqueue.h> 32 #include <linux/spinlock.h> 33 #include <linux/kfd_ioctl.h> 34 #include <linux/idr.h> 35 #include <linux/kfifo.h> 36 #include <linux/seq_file.h> 37 #include <linux/kref.h> 38 #include <kgd_kfd_interface.h> 39 40 #include "amd_shared.h" 41 42 #define KFD_MAX_RING_ENTRY_SIZE 8 43 44 #define KFD_SYSFS_FILE_MODE 0444 45 46 /* GPU ID hash width in bits */ 47 #define KFD_GPU_ID_HASH_WIDTH 16 48 49 /* Use upper bits of mmap offset to store KFD driver specific information. 50 * BITS[63:62] - Encode MMAP type 51 * BITS[61:46] - Encode gpu_id. To identify to which GPU the offset belongs to 52 * BITS[45:0] - MMAP offset value 53 * 54 * NOTE: struct vm_area_struct.vm_pgoff uses offset in pages. Hence, these 55 * defines are w.r.t to PAGE_SIZE 56 */ 57 #define KFD_MMAP_TYPE_SHIFT (62 - PAGE_SHIFT) 58 #define KFD_MMAP_TYPE_MASK (0x3ULL << KFD_MMAP_TYPE_SHIFT) 59 #define KFD_MMAP_TYPE_DOORBELL (0x3ULL << KFD_MMAP_TYPE_SHIFT) 60 #define KFD_MMAP_TYPE_EVENTS (0x2ULL << KFD_MMAP_TYPE_SHIFT) 61 #define KFD_MMAP_TYPE_RESERVED_MEM (0x1ULL << KFD_MMAP_TYPE_SHIFT) 62 63 #define KFD_MMAP_GPU_ID_SHIFT (46 - PAGE_SHIFT) 64 #define KFD_MMAP_GPU_ID_MASK (((1ULL << KFD_GPU_ID_HASH_WIDTH) - 1) \ 65 << KFD_MMAP_GPU_ID_SHIFT) 66 #define KFD_MMAP_GPU_ID(gpu_id) ((((uint64_t)gpu_id) << KFD_MMAP_GPU_ID_SHIFT)\ 67 & KFD_MMAP_GPU_ID_MASK) 68 #define KFD_MMAP_GPU_ID_GET(offset) ((offset & KFD_MMAP_GPU_ID_MASK) \ 69 >> KFD_MMAP_GPU_ID_SHIFT) 70 71 #define KFD_MMAP_OFFSET_VALUE_MASK (0x3FFFFFFFFFFFULL >> PAGE_SHIFT) 72 #define KFD_MMAP_OFFSET_VALUE_GET(offset) (offset & KFD_MMAP_OFFSET_VALUE_MASK) 73 74 /* 75 * When working with cp scheduler we should assign the HIQ manually or via 76 * the amdgpu driver to a fixed hqd slot, here are the fixed HIQ hqd slot 77 * definitions for Kaveri. In Kaveri only the first ME queues participates 78 * in the cp scheduling taking that in mind we set the HIQ slot in the 79 * second ME. 80 */ 81 #define KFD_CIK_HIQ_PIPE 4 82 #define KFD_CIK_HIQ_QUEUE 0 83 84 /* Macro for allocating structures */ 85 #define kfd_alloc_struct(ptr_to_struct) \ 86 ((typeof(ptr_to_struct)) kzalloc(sizeof(*ptr_to_struct), GFP_KERNEL)) 87 88 #define KFD_MAX_NUM_OF_PROCESSES 512 89 #define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS 1024 90 91 /* 92 * Size of the per-process TBA+TMA buffer: 2 pages 93 * 94 * The first page is the TBA used for the CWSR ISA code. The second 95 * page is used as TMA for daisy changing a user-mode trap handler. 96 */ 97 #define KFD_CWSR_TBA_TMA_SIZE (PAGE_SIZE * 2) 98 #define KFD_CWSR_TMA_OFFSET PAGE_SIZE 99 100 #define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE \ 101 (KFD_MAX_NUM_OF_PROCESSES * \ 102 KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) 103 104 #define KFD_KERNEL_QUEUE_SIZE 2048 105 106 /* 107 * 512 = 0x200 108 * The doorbell index distance between SDMA RLC (2*i) and (2*i+1) in the 109 * same SDMA engine on SOC15, which has 8-byte doorbells for SDMA. 110 * 512 8-byte doorbell distance (i.e. one page away) ensures that SDMA RLC 111 * (2*i+1) doorbells (in terms of the lower 12 bit address) lie exactly in 112 * the OFFSET and SIZE set in registers like BIF_SDMA0_DOORBELL_RANGE. 113 */ 114 #define KFD_QUEUE_DOORBELL_MIRROR_OFFSET 512 115 116 117 /* 118 * Kernel module parameter to specify maximum number of supported queues per 119 * device 120 */ 121 extern int max_num_of_queues_per_device; 122 123 124 /* Kernel module parameter to specify the scheduling policy */ 125 extern int sched_policy; 126 127 /* 128 * Kernel module parameter to specify the maximum process 129 * number per HW scheduler 130 */ 131 extern int hws_max_conc_proc; 132 133 extern int cwsr_enable; 134 135 /* 136 * Kernel module parameter to specify whether to send sigterm to HSA process on 137 * unhandled exception 138 */ 139 extern int send_sigterm; 140 141 /* 142 * This kernel module is used to simulate large bar machine on non-large bar 143 * enabled machines. 144 */ 145 extern int debug_largebar; 146 147 /* 148 * Ignore CRAT table during KFD initialization, can be used to work around 149 * broken CRAT tables on some AMD systems 150 */ 151 extern int ignore_crat; 152 153 /* 154 * Set sh_mem_config.retry_disable on Vega10 155 */ 156 extern int noretry; 157 158 /* 159 * Halt if HWS hang is detected 160 */ 161 extern int halt_if_hws_hang; 162 163 enum cache_policy { 164 cache_policy_coherent, 165 cache_policy_noncoherent 166 }; 167 168 #define KFD_IS_SOC15(chip) ((chip) >= CHIP_VEGA10) 169 170 struct kfd_event_interrupt_class { 171 bool (*interrupt_isr)(struct kfd_dev *dev, 172 const uint32_t *ih_ring_entry, uint32_t *patched_ihre, 173 bool *patched_flag); 174 void (*interrupt_wq)(struct kfd_dev *dev, 175 const uint32_t *ih_ring_entry); 176 }; 177 178 struct kfd_device_info { 179 enum amd_asic_type asic_family; 180 const struct kfd_event_interrupt_class *event_interrupt_class; 181 unsigned int max_pasid_bits; 182 unsigned int max_no_of_hqd; 183 unsigned int doorbell_size; 184 size_t ih_ring_entry_size; 185 uint8_t num_of_watch_points; 186 uint16_t mqd_size_aligned; 187 bool supports_cwsr; 188 bool needs_iommu_device; 189 bool needs_pci_atomics; 190 unsigned int num_sdma_engines; 191 unsigned int num_sdma_queues_per_engine; 192 }; 193 194 struct kfd_mem_obj { 195 uint32_t range_start; 196 uint32_t range_end; 197 uint64_t gpu_addr; 198 uint32_t *cpu_ptr; 199 void *gtt_mem; 200 }; 201 202 struct kfd_vmid_info { 203 uint32_t first_vmid_kfd; 204 uint32_t last_vmid_kfd; 205 uint32_t vmid_num_kfd; 206 }; 207 208 struct kfd_dev { 209 struct kgd_dev *kgd; 210 211 const struct kfd_device_info *device_info; 212 struct pci_dev *pdev; 213 214 unsigned int id; /* topology stub index */ 215 216 phys_addr_t doorbell_base; /* Start of actual doorbells used by 217 * KFD. It is aligned for mapping 218 * into user mode 219 */ 220 size_t doorbell_id_offset; /* Doorbell offset (from KFD doorbell 221 * to HW doorbell, GFX reserved some 222 * at the start) 223 */ 224 u32 __iomem *doorbell_kernel_ptr; /* This is a pointer for a doorbells 225 * page used by kernel queue 226 */ 227 228 struct kgd2kfd_shared_resources shared_resources; 229 struct kfd_vmid_info vm_info; 230 231 const struct kfd2kgd_calls *kfd2kgd; 232 struct mutex doorbell_mutex; 233 DECLARE_BITMAP(doorbell_available_index, 234 KFD_MAX_NUM_OF_QUEUES_PER_PROCESS); 235 236 void *gtt_mem; 237 uint64_t gtt_start_gpu_addr; 238 void *gtt_start_cpu_ptr; 239 void *gtt_sa_bitmap; 240 struct mutex gtt_sa_lock; 241 unsigned int gtt_sa_chunk_size; 242 unsigned int gtt_sa_num_of_chunks; 243 244 /* Interrupts */ 245 struct kfifo ih_fifo; 246 struct workqueue_struct *ih_wq; 247 struct work_struct interrupt_work; 248 spinlock_t interrupt_lock; 249 250 /* QCM Device instance */ 251 struct device_queue_manager *dqm; 252 253 bool init_complete; 254 /* 255 * Interrupts of interest to KFD are copied 256 * from the HW ring into a SW ring. 257 */ 258 bool interrupts_active; 259 260 /* Debug manager */ 261 struct kfd_dbgmgr *dbgmgr; 262 263 /* Firmware versions */ 264 uint16_t mec_fw_version; 265 uint16_t sdma_fw_version; 266 267 /* Maximum process number mapped to HW scheduler */ 268 unsigned int max_proc_per_quantum; 269 270 /* CWSR */ 271 bool cwsr_enabled; 272 const void *cwsr_isa; 273 unsigned int cwsr_isa_size; 274 275 /* xGMI */ 276 uint64_t hive_id; 277 278 bool pci_atomic_requested; 279 280 /* SRAM ECC flag */ 281 atomic_t sram_ecc_flag; 282 283 /* Compute Profile ref. count */ 284 atomic_t compute_profile; 285 }; 286 287 enum kfd_mempool { 288 KFD_MEMPOOL_SYSTEM_CACHEABLE = 1, 289 KFD_MEMPOOL_SYSTEM_WRITECOMBINE = 2, 290 KFD_MEMPOOL_FRAMEBUFFER = 3, 291 }; 292 293 /* Character device interface */ 294 int kfd_chardev_init(void); 295 void kfd_chardev_exit(void); 296 struct device *kfd_chardev(void); 297 298 /** 299 * enum kfd_unmap_queues_filter 300 * 301 * @KFD_UNMAP_QUEUES_FILTER_SINGLE_QUEUE: Preempts single queue. 302 * 303 * @KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES: Preempts all queues in the 304 * running queues list. 305 * 306 * @KFD_UNMAP_QUEUES_FILTER_BY_PASID: Preempts queues that belongs to 307 * specific process. 308 * 309 */ 310 enum kfd_unmap_queues_filter { 311 KFD_UNMAP_QUEUES_FILTER_SINGLE_QUEUE, 312 KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 313 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 314 KFD_UNMAP_QUEUES_FILTER_BY_PASID 315 }; 316 317 /** 318 * enum kfd_queue_type 319 * 320 * @KFD_QUEUE_TYPE_COMPUTE: Regular user mode queue type. 321 * 322 * @KFD_QUEUE_TYPE_SDMA: Sdma user mode queue type. 323 * 324 * @KFD_QUEUE_TYPE_HIQ: HIQ queue type. 325 * 326 * @KFD_QUEUE_TYPE_DIQ: DIQ queue type. 327 */ 328 enum kfd_queue_type { 329 KFD_QUEUE_TYPE_COMPUTE, 330 KFD_QUEUE_TYPE_SDMA, 331 KFD_QUEUE_TYPE_HIQ, 332 KFD_QUEUE_TYPE_DIQ 333 }; 334 335 enum kfd_queue_format { 336 KFD_QUEUE_FORMAT_PM4, 337 KFD_QUEUE_FORMAT_AQL 338 }; 339 340 /** 341 * struct queue_properties 342 * 343 * @type: The queue type. 344 * 345 * @queue_id: Queue identifier. 346 * 347 * @queue_address: Queue ring buffer address. 348 * 349 * @queue_size: Queue ring buffer size. 350 * 351 * @priority: Defines the queue priority relative to other queues in the 352 * process. 353 * This is just an indication and HW scheduling may override the priority as 354 * necessary while keeping the relative prioritization. 355 * the priority granularity is from 0 to f which f is the highest priority. 356 * currently all queues are initialized with the highest priority. 357 * 358 * @queue_percent: This field is partially implemented and currently a zero in 359 * this field defines that the queue is non active. 360 * 361 * @read_ptr: User space address which points to the number of dwords the 362 * cp read from the ring buffer. This field updates automatically by the H/W. 363 * 364 * @write_ptr: Defines the number of dwords written to the ring buffer. 365 * 366 * @doorbell_ptr: This field aim is to notify the H/W of new packet written to 367 * the queue ring buffer. This field should be similar to write_ptr and the 368 * user should update this field after he updated the write_ptr. 369 * 370 * @doorbell_off: The doorbell offset in the doorbell pci-bar. 371 * 372 * @is_interop: Defines if this is a interop queue. Interop queue means that 373 * the queue can access both graphics and compute resources. 374 * 375 * @is_evicted: Defines if the queue is evicted. Only active queues 376 * are evicted, rendering them inactive. 377 * 378 * @is_active: Defines if the queue is active or not. @is_active and 379 * @is_evicted are protected by the DQM lock. 380 * 381 * @vmid: If the scheduling mode is no cp scheduling the field defines the vmid 382 * of the queue. 383 * 384 * This structure represents the queue properties for each queue no matter if 385 * it's user mode or kernel mode queue. 386 * 387 */ 388 struct queue_properties { 389 enum kfd_queue_type type; 390 enum kfd_queue_format format; 391 unsigned int queue_id; 392 uint64_t queue_address; 393 uint64_t queue_size; 394 uint32_t priority; 395 uint32_t queue_percent; 396 uint32_t *read_ptr; 397 uint32_t *write_ptr; 398 void __iomem *doorbell_ptr; 399 uint32_t doorbell_off; 400 bool is_interop; 401 bool is_evicted; 402 bool is_active; 403 /* Not relevant for user mode queues in cp scheduling */ 404 unsigned int vmid; 405 /* Relevant only for sdma queues*/ 406 uint32_t sdma_engine_id; 407 uint32_t sdma_queue_id; 408 uint32_t sdma_vm_addr; 409 /* Relevant only for VI */ 410 uint64_t eop_ring_buffer_address; 411 uint32_t eop_ring_buffer_size; 412 uint64_t ctx_save_restore_area_address; 413 uint32_t ctx_save_restore_area_size; 414 uint32_t ctl_stack_size; 415 uint64_t tba_addr; 416 uint64_t tma_addr; 417 /* Relevant for CU */ 418 uint32_t cu_mask_count; /* Must be a multiple of 32 */ 419 uint32_t *cu_mask; 420 }; 421 422 /** 423 * struct queue 424 * 425 * @list: Queue linked list. 426 * 427 * @mqd: The queue MQD. 428 * 429 * @mqd_mem_obj: The MQD local gpu memory object. 430 * 431 * @gart_mqd_addr: The MQD gart mc address. 432 * 433 * @properties: The queue properties. 434 * 435 * @mec: Used only in no cp scheduling mode and identifies to micro engine id 436 * that the queue should be execute on. 437 * 438 * @pipe: Used only in no cp scheduling mode and identifies the queue's pipe 439 * id. 440 * 441 * @queue: Used only in no cp scheduliong mode and identifies the queue's slot. 442 * 443 * @process: The kfd process that created this queue. 444 * 445 * @device: The kfd device that created this queue. 446 * 447 * This structure represents user mode compute queues. 448 * It contains all the necessary data to handle such queues. 449 * 450 */ 451 452 struct queue { 453 struct list_head list; 454 void *mqd; 455 struct kfd_mem_obj *mqd_mem_obj; 456 uint64_t gart_mqd_addr; 457 struct queue_properties properties; 458 459 uint32_t mec; 460 uint32_t pipe; 461 uint32_t queue; 462 463 unsigned int sdma_id; 464 unsigned int doorbell_id; 465 466 struct kfd_process *process; 467 struct kfd_dev *device; 468 }; 469 470 /* 471 * Please read the kfd_mqd_manager.h description. 472 */ 473 enum KFD_MQD_TYPE { 474 KFD_MQD_TYPE_COMPUTE = 0, /* for no cp scheduling */ 475 KFD_MQD_TYPE_HIQ, /* for hiq */ 476 KFD_MQD_TYPE_CP, /* for cp queues and diq */ 477 KFD_MQD_TYPE_SDMA, /* for sdma queues */ 478 KFD_MQD_TYPE_MAX 479 }; 480 481 struct scheduling_resources { 482 unsigned int vmid_mask; 483 enum kfd_queue_type type; 484 uint64_t queue_mask; 485 uint64_t gws_mask; 486 uint32_t oac_mask; 487 uint32_t gds_heap_base; 488 uint32_t gds_heap_size; 489 }; 490 491 struct process_queue_manager { 492 /* data */ 493 struct kfd_process *process; 494 struct list_head queues; 495 unsigned long *queue_slot_bitmap; 496 }; 497 498 struct qcm_process_device { 499 /* The Device Queue Manager that owns this data */ 500 struct device_queue_manager *dqm; 501 struct process_queue_manager *pqm; 502 /* Queues list */ 503 struct list_head queues_list; 504 struct list_head priv_queue_list; 505 506 unsigned int queue_count; 507 unsigned int vmid; 508 bool is_debug; 509 unsigned int evicted; /* eviction counter, 0=active */ 510 511 /* This flag tells if we should reset all wavefronts on 512 * process termination 513 */ 514 bool reset_wavefronts; 515 516 /* 517 * All the memory management data should be here too 518 */ 519 uint64_t gds_context_area; 520 /* Contains page table flags such as AMDGPU_PTE_VALID since gfx9 */ 521 uint64_t page_table_base; 522 uint32_t sh_mem_config; 523 uint32_t sh_mem_bases; 524 uint32_t sh_mem_ape1_base; 525 uint32_t sh_mem_ape1_limit; 526 uint32_t gds_size; 527 uint32_t num_gws; 528 uint32_t num_oac; 529 uint32_t sh_hidden_private_base; 530 531 /* CWSR memory */ 532 void *cwsr_kaddr; 533 uint64_t cwsr_base; 534 uint64_t tba_addr; 535 uint64_t tma_addr; 536 537 /* IB memory */ 538 uint64_t ib_base; 539 void *ib_kaddr; 540 541 /* doorbell resources per process per device */ 542 unsigned long *doorbell_bitmap; 543 }; 544 545 /* KFD Memory Eviction */ 546 547 /* Approx. wait time before attempting to restore evicted BOs */ 548 #define PROCESS_RESTORE_TIME_MS 100 549 /* Approx. back off time if restore fails due to lack of memory */ 550 #define PROCESS_BACK_OFF_TIME_MS 100 551 /* Approx. time before evicting the process again */ 552 #define PROCESS_ACTIVE_TIME_MS 10 553 554 /* 8 byte handle containing GPU ID in the most significant 4 bytes and 555 * idr_handle in the least significant 4 bytes 556 */ 557 #define MAKE_HANDLE(gpu_id, idr_handle) \ 558 (((uint64_t)(gpu_id) << 32) + idr_handle) 559 #define GET_GPU_ID(handle) (handle >> 32) 560 #define GET_IDR_HANDLE(handle) (handle & 0xFFFFFFFF) 561 562 enum kfd_pdd_bound { 563 PDD_UNBOUND = 0, 564 PDD_BOUND, 565 PDD_BOUND_SUSPENDED, 566 }; 567 568 /* Data that is per-process-per device. */ 569 struct kfd_process_device { 570 /* 571 * List of all per-device data for a process. 572 * Starts from kfd_process.per_device_data. 573 */ 574 struct list_head per_device_list; 575 576 /* The device that owns this data. */ 577 struct kfd_dev *dev; 578 579 /* The process that owns this kfd_process_device. */ 580 struct kfd_process *process; 581 582 /* per-process-per device QCM data structure */ 583 struct qcm_process_device qpd; 584 585 /*Apertures*/ 586 uint64_t lds_base; 587 uint64_t lds_limit; 588 uint64_t gpuvm_base; 589 uint64_t gpuvm_limit; 590 uint64_t scratch_base; 591 uint64_t scratch_limit; 592 593 /* VM context for GPUVM allocations */ 594 struct file *drm_file; 595 void *vm; 596 597 /* GPUVM allocations storage */ 598 struct idr alloc_idr; 599 600 /* Flag used to tell the pdd has dequeued from the dqm. 601 * This is used to prevent dev->dqm->ops.process_termination() from 602 * being called twice when it is already called in IOMMU callback 603 * function. 604 */ 605 bool already_dequeued; 606 607 /* Is this process/pasid bound to this device? (amd_iommu_bind_pasid) */ 608 enum kfd_pdd_bound bound; 609 }; 610 611 #define qpd_to_pdd(x) container_of(x, struct kfd_process_device, qpd) 612 613 /* Process data */ 614 struct kfd_process { 615 /* 616 * kfd_process are stored in an mm_struct*->kfd_process* 617 * hash table (kfd_processes in kfd_process.c) 618 */ 619 struct hlist_node kfd_processes; 620 621 /* 622 * Opaque pointer to mm_struct. We don't hold a reference to 623 * it so it should never be dereferenced from here. This is 624 * only used for looking up processes by their mm. 625 */ 626 void *mm; 627 628 struct kref ref; 629 struct work_struct release_work; 630 631 struct mutex mutex; 632 633 /* 634 * In any process, the thread that started main() is the lead 635 * thread and outlives the rest. 636 * It is here because amd_iommu_bind_pasid wants a task_struct. 637 * It can also be used for safely getting a reference to the 638 * mm_struct of the process. 639 */ 640 struct task_struct *lead_thread; 641 642 /* We want to receive a notification when the mm_struct is destroyed */ 643 struct mmu_notifier mmu_notifier; 644 645 /* Use for delayed freeing of kfd_process structure */ 646 struct rcu_head rcu; 647 648 unsigned int pasid; 649 unsigned int doorbell_index; 650 651 /* 652 * List of kfd_process_device structures, 653 * one for each device the process is using. 654 */ 655 struct list_head per_device_data; 656 657 struct process_queue_manager pqm; 658 659 /*Is the user space process 32 bit?*/ 660 bool is_32bit_user_mode; 661 662 /* Event-related data */ 663 struct mutex event_mutex; 664 /* Event ID allocator and lookup */ 665 struct idr event_idr; 666 /* Event page */ 667 struct kfd_signal_page *signal_page; 668 size_t signal_mapped_size; 669 size_t signal_event_count; 670 bool signal_event_limit_reached; 671 672 /* Information used for memory eviction */ 673 void *kgd_process_info; 674 /* Eviction fence that is attached to all the BOs of this process. The 675 * fence will be triggered during eviction and new one will be created 676 * during restore 677 */ 678 struct dma_fence *ef; 679 680 /* Work items for evicting and restoring BOs */ 681 struct delayed_work eviction_work; 682 struct delayed_work restore_work; 683 /* seqno of the last scheduled eviction */ 684 unsigned int last_eviction_seqno; 685 /* Approx. the last timestamp (in jiffies) when the process was 686 * restored after an eviction 687 */ 688 unsigned long last_restore_timestamp; 689 }; 690 691 #define KFD_PROCESS_TABLE_SIZE 5 /* bits: 32 entries */ 692 extern DECLARE_HASHTABLE(kfd_processes_table, KFD_PROCESS_TABLE_SIZE); 693 extern struct srcu_struct kfd_processes_srcu; 694 695 /** 696 * Ioctl function type. 697 * 698 * \param filep pointer to file structure. 699 * \param p amdkfd process pointer. 700 * \param data pointer to arg that was copied from user. 701 */ 702 typedef int amdkfd_ioctl_t(struct file *filep, struct kfd_process *p, 703 void *data); 704 705 struct amdkfd_ioctl_desc { 706 unsigned int cmd; 707 int flags; 708 amdkfd_ioctl_t *func; 709 unsigned int cmd_drv; 710 const char *name; 711 }; 712 bool kfd_dev_is_large_bar(struct kfd_dev *dev); 713 714 int kfd_process_create_wq(void); 715 void kfd_process_destroy_wq(void); 716 struct kfd_process *kfd_create_process(struct file *filep); 717 struct kfd_process *kfd_get_process(const struct task_struct *); 718 struct kfd_process *kfd_lookup_process_by_pasid(unsigned int pasid); 719 struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm); 720 void kfd_unref_process(struct kfd_process *p); 721 int kfd_process_evict_queues(struct kfd_process *p); 722 int kfd_process_restore_queues(struct kfd_process *p); 723 void kfd_suspend_all_processes(void); 724 int kfd_resume_all_processes(void); 725 726 int kfd_process_device_init_vm(struct kfd_process_device *pdd, 727 struct file *drm_file); 728 struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev, 729 struct kfd_process *p); 730 struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev, 731 struct kfd_process *p); 732 struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev, 733 struct kfd_process *p); 734 735 int kfd_reserved_mem_mmap(struct kfd_dev *dev, struct kfd_process *process, 736 struct vm_area_struct *vma); 737 738 /* KFD process API for creating and translating handles */ 739 int kfd_process_device_create_obj_handle(struct kfd_process_device *pdd, 740 void *mem); 741 void *kfd_process_device_translate_handle(struct kfd_process_device *p, 742 int handle); 743 void kfd_process_device_remove_obj_handle(struct kfd_process_device *pdd, 744 int handle); 745 746 /* Process device data iterator */ 747 struct kfd_process_device *kfd_get_first_process_device_data( 748 struct kfd_process *p); 749 struct kfd_process_device *kfd_get_next_process_device_data( 750 struct kfd_process *p, 751 struct kfd_process_device *pdd); 752 bool kfd_has_process_device_data(struct kfd_process *p); 753 754 /* PASIDs */ 755 int kfd_pasid_init(void); 756 void kfd_pasid_exit(void); 757 bool kfd_set_pasid_limit(unsigned int new_limit); 758 unsigned int kfd_get_pasid_limit(void); 759 unsigned int kfd_pasid_alloc(void); 760 void kfd_pasid_free(unsigned int pasid); 761 762 /* Doorbells */ 763 size_t kfd_doorbell_process_slice(struct kfd_dev *kfd); 764 int kfd_doorbell_init(struct kfd_dev *kfd); 765 void kfd_doorbell_fini(struct kfd_dev *kfd); 766 int kfd_doorbell_mmap(struct kfd_dev *dev, struct kfd_process *process, 767 struct vm_area_struct *vma); 768 void __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd, 769 unsigned int *doorbell_off); 770 void kfd_release_kernel_doorbell(struct kfd_dev *kfd, u32 __iomem *db_addr); 771 u32 read_kernel_doorbell(u32 __iomem *db); 772 void write_kernel_doorbell(void __iomem *db, u32 value); 773 void write_kernel_doorbell64(void __iomem *db, u64 value); 774 unsigned int kfd_doorbell_id_to_offset(struct kfd_dev *kfd, 775 struct kfd_process *process, 776 unsigned int doorbell_id); 777 phys_addr_t kfd_get_process_doorbells(struct kfd_dev *dev, 778 struct kfd_process *process); 779 int kfd_alloc_process_doorbells(struct kfd_process *process); 780 void kfd_free_process_doorbells(struct kfd_process *process); 781 782 /* GTT Sub-Allocator */ 783 784 int kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned int size, 785 struct kfd_mem_obj **mem_obj); 786 787 int kfd_gtt_sa_free(struct kfd_dev *kfd, struct kfd_mem_obj *mem_obj); 788 789 extern struct device *kfd_device; 790 791 /* Topology */ 792 int kfd_topology_init(void); 793 void kfd_topology_shutdown(void); 794 int kfd_topology_add_device(struct kfd_dev *gpu); 795 int kfd_topology_remove_device(struct kfd_dev *gpu); 796 struct kfd_topology_device *kfd_topology_device_by_proximity_domain( 797 uint32_t proximity_domain); 798 struct kfd_topology_device *kfd_topology_device_by_id(uint32_t gpu_id); 799 struct kfd_dev *kfd_device_by_id(uint32_t gpu_id); 800 struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev); 801 struct kfd_dev *kfd_device_by_kgd(const struct kgd_dev *kgd); 802 int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_dev **kdev); 803 int kfd_numa_node_to_apic_id(int numa_node_id); 804 805 /* Interrupts */ 806 int kfd_interrupt_init(struct kfd_dev *dev); 807 void kfd_interrupt_exit(struct kfd_dev *dev); 808 bool enqueue_ih_ring_entry(struct kfd_dev *kfd, const void *ih_ring_entry); 809 bool interrupt_is_wanted(struct kfd_dev *dev, 810 const uint32_t *ih_ring_entry, 811 uint32_t *patched_ihre, bool *flag); 812 813 /* amdkfd Apertures */ 814 int kfd_init_apertures(struct kfd_process *process); 815 816 /* Queue Context Management */ 817 int init_queue(struct queue **q, const struct queue_properties *properties); 818 void uninit_queue(struct queue *q); 819 void print_queue_properties(struct queue_properties *q); 820 void print_queue(struct queue *q); 821 822 struct mqd_manager *mqd_manager_init(enum KFD_MQD_TYPE type, 823 struct kfd_dev *dev); 824 struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type, 825 struct kfd_dev *dev); 826 struct mqd_manager *mqd_manager_init_cik_hawaii(enum KFD_MQD_TYPE type, 827 struct kfd_dev *dev); 828 struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type, 829 struct kfd_dev *dev); 830 struct mqd_manager *mqd_manager_init_vi_tonga(enum KFD_MQD_TYPE type, 831 struct kfd_dev *dev); 832 struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type, 833 struct kfd_dev *dev); 834 struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev); 835 void device_queue_manager_uninit(struct device_queue_manager *dqm); 836 struct kernel_queue *kernel_queue_init(struct kfd_dev *dev, 837 enum kfd_queue_type type); 838 void kernel_queue_uninit(struct kernel_queue *kq); 839 int kfd_process_vm_fault(struct device_queue_manager *dqm, unsigned int pasid); 840 841 /* Process Queue Manager */ 842 struct process_queue_node { 843 struct queue *q; 844 struct kernel_queue *kq; 845 struct list_head process_queue_list; 846 }; 847 848 void kfd_process_dequeue_from_device(struct kfd_process_device *pdd); 849 void kfd_process_dequeue_from_all_devices(struct kfd_process *p); 850 int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p); 851 void pqm_uninit(struct process_queue_manager *pqm); 852 int pqm_create_queue(struct process_queue_manager *pqm, 853 struct kfd_dev *dev, 854 struct file *f, 855 struct queue_properties *properties, 856 unsigned int *qid); 857 int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid); 858 int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid, 859 struct queue_properties *p); 860 int pqm_set_cu_mask(struct process_queue_manager *pqm, unsigned int qid, 861 struct queue_properties *p); 862 struct kernel_queue *pqm_get_kernel_queue(struct process_queue_manager *pqm, 863 unsigned int qid); 864 int pqm_get_wave_state(struct process_queue_manager *pqm, 865 unsigned int qid, 866 void __user *ctl_stack, 867 u32 *ctl_stack_used_size, 868 u32 *save_area_used_size); 869 870 int amdkfd_fence_wait_timeout(unsigned int *fence_addr, 871 unsigned int fence_value, 872 unsigned int timeout_ms); 873 874 /* Packet Manager */ 875 876 #define KFD_FENCE_COMPLETED (100) 877 #define KFD_FENCE_INIT (10) 878 879 struct packet_manager { 880 struct device_queue_manager *dqm; 881 struct kernel_queue *priv_queue; 882 struct mutex lock; 883 bool allocated; 884 struct kfd_mem_obj *ib_buffer_obj; 885 unsigned int ib_size_bytes; 886 887 const struct packet_manager_funcs *pmf; 888 }; 889 890 struct packet_manager_funcs { 891 /* Support ASIC-specific packet formats for PM4 packets */ 892 int (*map_process)(struct packet_manager *pm, uint32_t *buffer, 893 struct qcm_process_device *qpd); 894 int (*runlist)(struct packet_manager *pm, uint32_t *buffer, 895 uint64_t ib, size_t ib_size_in_dwords, bool chain); 896 int (*set_resources)(struct packet_manager *pm, uint32_t *buffer, 897 struct scheduling_resources *res); 898 int (*map_queues)(struct packet_manager *pm, uint32_t *buffer, 899 struct queue *q, bool is_static); 900 int (*unmap_queues)(struct packet_manager *pm, uint32_t *buffer, 901 enum kfd_queue_type type, 902 enum kfd_unmap_queues_filter mode, 903 uint32_t filter_param, bool reset, 904 unsigned int sdma_engine); 905 int (*query_status)(struct packet_manager *pm, uint32_t *buffer, 906 uint64_t fence_address, uint32_t fence_value); 907 int (*release_mem)(uint64_t gpu_addr, uint32_t *buffer); 908 909 /* Packet sizes */ 910 int map_process_size; 911 int runlist_size; 912 int set_resources_size; 913 int map_queues_size; 914 int unmap_queues_size; 915 int query_status_size; 916 int release_mem_size; 917 }; 918 919 extern const struct packet_manager_funcs kfd_vi_pm_funcs; 920 extern const struct packet_manager_funcs kfd_v9_pm_funcs; 921 922 int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm); 923 void pm_uninit(struct packet_manager *pm); 924 int pm_send_set_resources(struct packet_manager *pm, 925 struct scheduling_resources *res); 926 int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues); 927 int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address, 928 uint32_t fence_value); 929 930 int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type, 931 enum kfd_unmap_queues_filter mode, 932 uint32_t filter_param, bool reset, 933 unsigned int sdma_engine); 934 935 void pm_release_ib(struct packet_manager *pm); 936 937 /* Following PM funcs can be shared among VI and AI */ 938 unsigned int pm_build_pm4_header(unsigned int opcode, size_t packet_size); 939 int pm_set_resources_vi(struct packet_manager *pm, uint32_t *buffer, 940 struct scheduling_resources *res); 941 942 uint64_t kfd_get_number_elems(struct kfd_dev *kfd); 943 944 /* Events */ 945 extern const struct kfd_event_interrupt_class event_interrupt_class_cik; 946 extern const struct kfd_event_interrupt_class event_interrupt_class_v9; 947 948 extern const struct kfd_device_global_init_class device_global_init_class_cik; 949 950 void kfd_event_init_process(struct kfd_process *p); 951 void kfd_event_free_process(struct kfd_process *p); 952 int kfd_event_mmap(struct kfd_process *process, struct vm_area_struct *vma); 953 int kfd_wait_on_events(struct kfd_process *p, 954 uint32_t num_events, void __user *data, 955 bool all, uint32_t user_timeout_ms, 956 uint32_t *wait_result); 957 void kfd_signal_event_interrupt(unsigned int pasid, uint32_t partial_id, 958 uint32_t valid_id_bits); 959 void kfd_signal_iommu_event(struct kfd_dev *dev, 960 unsigned int pasid, unsigned long address, 961 bool is_write_requested, bool is_execute_requested); 962 void kfd_signal_hw_exception_event(unsigned int pasid); 963 int kfd_set_event(struct kfd_process *p, uint32_t event_id); 964 int kfd_reset_event(struct kfd_process *p, uint32_t event_id); 965 int kfd_event_page_set(struct kfd_process *p, void *kernel_address, 966 uint64_t size); 967 int kfd_event_create(struct file *devkfd, struct kfd_process *p, 968 uint32_t event_type, bool auto_reset, uint32_t node_id, 969 uint32_t *event_id, uint32_t *event_trigger_data, 970 uint64_t *event_page_offset, uint32_t *event_slot_index); 971 int kfd_event_destroy(struct kfd_process *p, uint32_t event_id); 972 973 void kfd_signal_vm_fault_event(struct kfd_dev *dev, unsigned int pasid, 974 struct kfd_vm_fault_info *info); 975 976 void kfd_signal_reset_event(struct kfd_dev *dev); 977 978 void kfd_flush_tlb(struct kfd_process_device *pdd); 979 980 int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process *p); 981 982 bool kfd_is_locked(void); 983 984 /* Compute profile */ 985 void kfd_inc_compute_active(struct kfd_dev *dev); 986 void kfd_dec_compute_active(struct kfd_dev *dev); 987 988 /* Debugfs */ 989 #if defined(CONFIG_DEBUG_FS) 990 991 void kfd_debugfs_init(void); 992 void kfd_debugfs_fini(void); 993 int kfd_debugfs_mqds_by_process(struct seq_file *m, void *data); 994 int pqm_debugfs_mqds(struct seq_file *m, void *data); 995 int kfd_debugfs_hqds_by_device(struct seq_file *m, void *data); 996 int dqm_debugfs_hqds(struct seq_file *m, void *data); 997 int kfd_debugfs_rls_by_device(struct seq_file *m, void *data); 998 int pm_debugfs_runlist(struct seq_file *m, void *data); 999 1000 int kfd_debugfs_hang_hws(struct kfd_dev *dev); 1001 int pm_debugfs_hang_hws(struct packet_manager *pm); 1002 int dqm_debugfs_execute_queues(struct device_queue_manager *dqm); 1003 1004 #else 1005 1006 static inline void kfd_debugfs_init(void) {} 1007 static inline void kfd_debugfs_fini(void) {} 1008 1009 #endif 1010 1011 #endif 1012