1 /* 2 * Copyright 2014 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 */ 22 23 #ifndef KFD_PRIV_H_INCLUDED 24 #define KFD_PRIV_H_INCLUDED 25 26 #include <linux/hashtable.h> 27 #include <linux/mmu_notifier.h> 28 #include <linux/mutex.h> 29 #include <linux/types.h> 30 #include <linux/atomic.h> 31 #include <linux/workqueue.h> 32 #include <linux/spinlock.h> 33 #include <linux/kfd_ioctl.h> 34 #include <linux/idr.h> 35 #include <linux/kfifo.h> 36 #include <linux/seq_file.h> 37 #include <linux/kref.h> 38 #include <linux/sysfs.h> 39 #include <linux/device_cgroup.h> 40 #include <drm/drm_file.h> 41 #include <drm/drm_drv.h> 42 #include <drm/drm_device.h> 43 #include <drm/drm_ioctl.h> 44 #include <kgd_kfd_interface.h> 45 #include <linux/swap.h> 46 47 #include "amd_shared.h" 48 #include "amdgpu.h" 49 50 #define KFD_MAX_RING_ENTRY_SIZE 8 51 52 #define KFD_SYSFS_FILE_MODE 0444 53 54 /* GPU ID hash width in bits */ 55 #define KFD_GPU_ID_HASH_WIDTH 16 56 57 /* Use upper bits of mmap offset to store KFD driver specific information. 58 * BITS[63:62] - Encode MMAP type 59 * BITS[61:46] - Encode gpu_id. To identify to which GPU the offset belongs to 60 * BITS[45:0] - MMAP offset value 61 * 62 * NOTE: struct vm_area_struct.vm_pgoff uses offset in pages. Hence, these 63 * defines are w.r.t to PAGE_SIZE 64 */ 65 #define KFD_MMAP_TYPE_SHIFT 62 66 #define KFD_MMAP_TYPE_MASK (0x3ULL << KFD_MMAP_TYPE_SHIFT) 67 #define KFD_MMAP_TYPE_DOORBELL (0x3ULL << KFD_MMAP_TYPE_SHIFT) 68 #define KFD_MMAP_TYPE_EVENTS (0x2ULL << KFD_MMAP_TYPE_SHIFT) 69 #define KFD_MMAP_TYPE_RESERVED_MEM (0x1ULL << KFD_MMAP_TYPE_SHIFT) 70 #define KFD_MMAP_TYPE_MMIO (0x0ULL << KFD_MMAP_TYPE_SHIFT) 71 72 #define KFD_MMAP_GPU_ID_SHIFT 46 73 #define KFD_MMAP_GPU_ID_MASK (((1ULL << KFD_GPU_ID_HASH_WIDTH) - 1) \ 74 << KFD_MMAP_GPU_ID_SHIFT) 75 #define KFD_MMAP_GPU_ID(gpu_id) ((((uint64_t)gpu_id) << KFD_MMAP_GPU_ID_SHIFT)\ 76 & KFD_MMAP_GPU_ID_MASK) 77 #define KFD_MMAP_GET_GPU_ID(offset) ((offset & KFD_MMAP_GPU_ID_MASK) \ 78 >> KFD_MMAP_GPU_ID_SHIFT) 79 80 /* 81 * When working with cp scheduler we should assign the HIQ manually or via 82 * the amdgpu driver to a fixed hqd slot, here are the fixed HIQ hqd slot 83 * definitions for Kaveri. In Kaveri only the first ME queues participates 84 * in the cp scheduling taking that in mind we set the HIQ slot in the 85 * second ME. 86 */ 87 #define KFD_CIK_HIQ_PIPE 4 88 #define KFD_CIK_HIQ_QUEUE 0 89 90 /* Macro for allocating structures */ 91 #define kfd_alloc_struct(ptr_to_struct) \ 92 ((typeof(ptr_to_struct)) kzalloc(sizeof(*ptr_to_struct), GFP_KERNEL)) 93 94 #define KFD_MAX_NUM_OF_PROCESSES 512 95 #define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS 1024 96 97 /* 98 * Size of the per-process TBA+TMA buffer: 2 pages 99 * 100 * The first page is the TBA used for the CWSR ISA code. The second 101 * page is used as TMA for user-mode trap handler setup in daisy-chain mode. 102 */ 103 #define KFD_CWSR_TBA_TMA_SIZE (PAGE_SIZE * 2) 104 #define KFD_CWSR_TMA_OFFSET PAGE_SIZE 105 106 #define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE \ 107 (KFD_MAX_NUM_OF_PROCESSES * \ 108 KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) 109 110 #define KFD_KERNEL_QUEUE_SIZE 2048 111 112 #define KFD_UNMAP_LATENCY_MS (4000) 113 114 /* 115 * 512 = 0x200 116 * The doorbell index distance between SDMA RLC (2*i) and (2*i+1) in the 117 * same SDMA engine on SOC15, which has 8-byte doorbells for SDMA. 118 * 512 8-byte doorbell distance (i.e. one page away) ensures that SDMA RLC 119 * (2*i+1) doorbells (in terms of the lower 12 bit address) lie exactly in 120 * the OFFSET and SIZE set in registers like BIF_SDMA0_DOORBELL_RANGE. 121 */ 122 #define KFD_QUEUE_DOORBELL_MIRROR_OFFSET 512 123 124 /** 125 * enum kfd_ioctl_flags - KFD ioctl flags 126 * Various flags that can be set in &amdkfd_ioctl_desc.flags to control how 127 * userspace can use a given ioctl. 128 */ 129 enum kfd_ioctl_flags { 130 /* 131 * @KFD_IOC_FLAG_CHECKPOINT_RESTORE: 132 * Certain KFD ioctls such as AMDKFD_IOC_CRIU_OP can potentially 133 * perform privileged operations and load arbitrary data into MQDs and 134 * eventually HQD registers when the queue is mapped by HWS. In order to 135 * prevent this we should perform additional security checks. 136 * 137 * This is equivalent to callers with the CHECKPOINT_RESTORE capability. 138 * 139 * Note: Since earlier versions of docker do not support CHECKPOINT_RESTORE, 140 * we also allow ioctls with SYS_ADMIN capability. 141 */ 142 KFD_IOC_FLAG_CHECKPOINT_RESTORE = BIT(0), 143 }; 144 /* 145 * Kernel module parameter to specify maximum number of supported queues per 146 * device 147 */ 148 extern int max_num_of_queues_per_device; 149 150 151 /* Kernel module parameter to specify the scheduling policy */ 152 extern int sched_policy; 153 154 /* 155 * Kernel module parameter to specify the maximum process 156 * number per HW scheduler 157 */ 158 extern int hws_max_conc_proc; 159 160 extern int cwsr_enable; 161 162 /* 163 * Kernel module parameter to specify whether to send sigterm to HSA process on 164 * unhandled exception 165 */ 166 extern int send_sigterm; 167 168 /* 169 * This kernel module is used to simulate large bar machine on non-large bar 170 * enabled machines. 171 */ 172 extern int debug_largebar; 173 174 /* 175 * Ignore CRAT table during KFD initialization, can be used to work around 176 * broken CRAT tables on some AMD systems 177 */ 178 extern int ignore_crat; 179 180 /* Set sh_mem_config.retry_disable on GFX v9 */ 181 extern int amdgpu_noretry; 182 183 /* Halt if HWS hang is detected */ 184 extern int halt_if_hws_hang; 185 186 /* Whether MEC FW support GWS barriers */ 187 extern bool hws_gws_support; 188 189 /* Queue preemption timeout in ms */ 190 extern int queue_preemption_timeout_ms; 191 192 /* 193 * Don't evict process queues on vm fault 194 */ 195 extern int amdgpu_no_queue_eviction_on_vm_fault; 196 197 /* Enable eviction debug messages */ 198 extern bool debug_evictions; 199 200 enum cache_policy { 201 cache_policy_coherent, 202 cache_policy_noncoherent 203 }; 204 205 #define KFD_GC_VERSION(dev) ((dev)->adev->ip_versions[GC_HWIP][0]) 206 #define KFD_IS_SOC15(dev) ((KFD_GC_VERSION(dev)) >= (IP_VERSION(9, 0, 1))) 207 208 struct kfd_event_interrupt_class { 209 bool (*interrupt_isr)(struct kfd_dev *dev, 210 const uint32_t *ih_ring_entry, uint32_t *patched_ihre, 211 bool *patched_flag); 212 void (*interrupt_wq)(struct kfd_dev *dev, 213 const uint32_t *ih_ring_entry); 214 }; 215 216 struct kfd_device_info { 217 uint32_t gfx_target_version; 218 const struct kfd_event_interrupt_class *event_interrupt_class; 219 unsigned int max_pasid_bits; 220 unsigned int max_no_of_hqd; 221 unsigned int doorbell_size; 222 size_t ih_ring_entry_size; 223 uint8_t num_of_watch_points; 224 uint16_t mqd_size_aligned; 225 bool supports_cwsr; 226 bool needs_iommu_device; 227 bool needs_pci_atomics; 228 uint32_t no_atomic_fw_version; 229 unsigned int num_sdma_queues_per_engine; 230 }; 231 232 unsigned int kfd_get_num_sdma_engines(struct kfd_dev *kdev); 233 unsigned int kfd_get_num_xgmi_sdma_engines(struct kfd_dev *kdev); 234 235 struct kfd_mem_obj { 236 uint32_t range_start; 237 uint32_t range_end; 238 uint64_t gpu_addr; 239 uint32_t *cpu_ptr; 240 void *gtt_mem; 241 }; 242 243 struct kfd_vmid_info { 244 uint32_t first_vmid_kfd; 245 uint32_t last_vmid_kfd; 246 uint32_t vmid_num_kfd; 247 }; 248 249 struct kfd_dev { 250 struct amdgpu_device *adev; 251 252 struct kfd_device_info device_info; 253 struct pci_dev *pdev; 254 struct drm_device *ddev; 255 256 unsigned int id; /* topology stub index */ 257 258 phys_addr_t doorbell_base; /* Start of actual doorbells used by 259 * KFD. It is aligned for mapping 260 * into user mode 261 */ 262 size_t doorbell_base_dw_offset; /* Offset from the start of the PCI 263 * doorbell BAR to the first KFD 264 * doorbell in dwords. GFX reserves 265 * the segment before this offset. 266 */ 267 u32 __iomem *doorbell_kernel_ptr; /* This is a pointer for a doorbells 268 * page used by kernel queue 269 */ 270 271 struct kgd2kfd_shared_resources shared_resources; 272 struct kfd_vmid_info vm_info; 273 274 const struct kfd2kgd_calls *kfd2kgd; 275 struct mutex doorbell_mutex; 276 DECLARE_BITMAP(doorbell_available_index, 277 KFD_MAX_NUM_OF_QUEUES_PER_PROCESS); 278 279 void *gtt_mem; 280 uint64_t gtt_start_gpu_addr; 281 void *gtt_start_cpu_ptr; 282 void *gtt_sa_bitmap; 283 struct mutex gtt_sa_lock; 284 unsigned int gtt_sa_chunk_size; 285 unsigned int gtt_sa_num_of_chunks; 286 287 /* Interrupts */ 288 struct kfifo ih_fifo; 289 struct workqueue_struct *ih_wq; 290 struct work_struct interrupt_work; 291 spinlock_t interrupt_lock; 292 293 /* QCM Device instance */ 294 struct device_queue_manager *dqm; 295 296 bool init_complete; 297 /* 298 * Interrupts of interest to KFD are copied 299 * from the HW ring into a SW ring. 300 */ 301 bool interrupts_active; 302 303 /* Debug manager */ 304 struct kfd_dbgmgr *dbgmgr; 305 306 /* Firmware versions */ 307 uint16_t mec_fw_version; 308 uint16_t mec2_fw_version; 309 uint16_t sdma_fw_version; 310 311 /* Maximum process number mapped to HW scheduler */ 312 unsigned int max_proc_per_quantum; 313 314 /* CWSR */ 315 bool cwsr_enabled; 316 const void *cwsr_isa; 317 unsigned int cwsr_isa_size; 318 319 /* xGMI */ 320 uint64_t hive_id; 321 322 bool pci_atomic_requested; 323 324 /* Use IOMMU v2 flag */ 325 bool use_iommu_v2; 326 327 /* SRAM ECC flag */ 328 atomic_t sram_ecc_flag; 329 330 /* Compute Profile ref. count */ 331 atomic_t compute_profile; 332 333 /* Global GWS resource shared between processes */ 334 void *gws; 335 336 /* Clients watching SMI events */ 337 struct list_head smi_clients; 338 spinlock_t smi_lock; 339 340 uint32_t reset_seq_num; 341 342 struct ida doorbell_ida; 343 unsigned int max_doorbell_slices; 344 345 int noretry; 346 347 /* HMM page migration MEMORY_DEVICE_PRIVATE mapping */ 348 struct dev_pagemap pgmap; 349 }; 350 351 enum kfd_mempool { 352 KFD_MEMPOOL_SYSTEM_CACHEABLE = 1, 353 KFD_MEMPOOL_SYSTEM_WRITECOMBINE = 2, 354 KFD_MEMPOOL_FRAMEBUFFER = 3, 355 }; 356 357 /* Character device interface */ 358 int kfd_chardev_init(void); 359 void kfd_chardev_exit(void); 360 struct device *kfd_chardev(void); 361 362 /** 363 * enum kfd_unmap_queues_filter - Enum for queue filters. 364 * 365 * @KFD_UNMAP_QUEUES_FILTER_SINGLE_QUEUE: Preempts single queue. 366 * 367 * @KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES: Preempts all queues in the 368 * running queues list. 369 * 370 * @KFD_UNMAP_QUEUES_FILTER_BY_PASID: Preempts queues that belongs to 371 * specific process. 372 * 373 */ 374 enum kfd_unmap_queues_filter { 375 KFD_UNMAP_QUEUES_FILTER_SINGLE_QUEUE, 376 KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 377 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 378 KFD_UNMAP_QUEUES_FILTER_BY_PASID 379 }; 380 381 /** 382 * enum kfd_queue_type - Enum for various queue types. 383 * 384 * @KFD_QUEUE_TYPE_COMPUTE: Regular user mode queue type. 385 * 386 * @KFD_QUEUE_TYPE_SDMA: SDMA user mode queue type. 387 * 388 * @KFD_QUEUE_TYPE_HIQ: HIQ queue type. 389 * 390 * @KFD_QUEUE_TYPE_DIQ: DIQ queue type. 391 * 392 * @KFD_QUEUE_TYPE_SDMA_XGMI: Special SDMA queue for XGMI interface. 393 */ 394 enum kfd_queue_type { 395 KFD_QUEUE_TYPE_COMPUTE, 396 KFD_QUEUE_TYPE_SDMA, 397 KFD_QUEUE_TYPE_HIQ, 398 KFD_QUEUE_TYPE_DIQ, 399 KFD_QUEUE_TYPE_SDMA_XGMI 400 }; 401 402 enum kfd_queue_format { 403 KFD_QUEUE_FORMAT_PM4, 404 KFD_QUEUE_FORMAT_AQL 405 }; 406 407 enum KFD_QUEUE_PRIORITY { 408 KFD_QUEUE_PRIORITY_MINIMUM = 0, 409 KFD_QUEUE_PRIORITY_MAXIMUM = 15 410 }; 411 412 /** 413 * struct queue_properties 414 * 415 * @type: The queue type. 416 * 417 * @queue_id: Queue identifier. 418 * 419 * @queue_address: Queue ring buffer address. 420 * 421 * @queue_size: Queue ring buffer size. 422 * 423 * @priority: Defines the queue priority relative to other queues in the 424 * process. 425 * This is just an indication and HW scheduling may override the priority as 426 * necessary while keeping the relative prioritization. 427 * the priority granularity is from 0 to f which f is the highest priority. 428 * currently all queues are initialized with the highest priority. 429 * 430 * @queue_percent: This field is partially implemented and currently a zero in 431 * this field defines that the queue is non active. 432 * 433 * @read_ptr: User space address which points to the number of dwords the 434 * cp read from the ring buffer. This field updates automatically by the H/W. 435 * 436 * @write_ptr: Defines the number of dwords written to the ring buffer. 437 * 438 * @doorbell_ptr: Notifies the H/W of new packet written to the queue ring 439 * buffer. This field should be similar to write_ptr and the user should 440 * update this field after updating the write_ptr. 441 * 442 * @doorbell_off: The doorbell offset in the doorbell pci-bar. 443 * 444 * @is_interop: Defines if this is a interop queue. Interop queue means that 445 * the queue can access both graphics and compute resources. 446 * 447 * @is_evicted: Defines if the queue is evicted. Only active queues 448 * are evicted, rendering them inactive. 449 * 450 * @is_active: Defines if the queue is active or not. @is_active and 451 * @is_evicted are protected by the DQM lock. 452 * 453 * @is_gws: Defines if the queue has been updated to be GWS-capable or not. 454 * @is_gws should be protected by the DQM lock, since changing it can yield the 455 * possibility of updating DQM state on number of GWS queues. 456 * 457 * @vmid: If the scheduling mode is no cp scheduling the field defines the vmid 458 * of the queue. 459 * 460 * This structure represents the queue properties for each queue no matter if 461 * it's user mode or kernel mode queue. 462 * 463 */ 464 465 struct queue_properties { 466 enum kfd_queue_type type; 467 enum kfd_queue_format format; 468 unsigned int queue_id; 469 uint64_t queue_address; 470 uint64_t queue_size; 471 uint32_t priority; 472 uint32_t queue_percent; 473 uint32_t *read_ptr; 474 uint32_t *write_ptr; 475 void __iomem *doorbell_ptr; 476 uint32_t doorbell_off; 477 bool is_interop; 478 bool is_evicted; 479 bool is_active; 480 bool is_gws; 481 /* Not relevant for user mode queues in cp scheduling */ 482 unsigned int vmid; 483 /* Relevant only for sdma queues*/ 484 uint32_t sdma_engine_id; 485 uint32_t sdma_queue_id; 486 uint32_t sdma_vm_addr; 487 /* Relevant only for VI */ 488 uint64_t eop_ring_buffer_address; 489 uint32_t eop_ring_buffer_size; 490 uint64_t ctx_save_restore_area_address; 491 uint32_t ctx_save_restore_area_size; 492 uint32_t ctl_stack_size; 493 uint64_t tba_addr; 494 uint64_t tma_addr; 495 }; 496 497 #define QUEUE_IS_ACTIVE(q) ((q).queue_size > 0 && \ 498 (q).queue_address != 0 && \ 499 (q).queue_percent > 0 && \ 500 !(q).is_evicted) 501 502 enum mqd_update_flag { 503 UPDATE_FLAG_CU_MASK = 0, 504 }; 505 506 struct mqd_update_info { 507 union { 508 struct { 509 uint32_t count; /* Must be a multiple of 32 */ 510 uint32_t *ptr; 511 } cu_mask; 512 }; 513 enum mqd_update_flag update_flag; 514 }; 515 516 /** 517 * struct queue 518 * 519 * @list: Queue linked list. 520 * 521 * @mqd: The queue MQD (memory queue descriptor). 522 * 523 * @mqd_mem_obj: The MQD local gpu memory object. 524 * 525 * @gart_mqd_addr: The MQD gart mc address. 526 * 527 * @properties: The queue properties. 528 * 529 * @mec: Used only in no cp scheduling mode and identifies to micro engine id 530 * that the queue should be executed on. 531 * 532 * @pipe: Used only in no cp scheduling mode and identifies the queue's pipe 533 * id. 534 * 535 * @queue: Used only in no cp scheduliong mode and identifies the queue's slot. 536 * 537 * @process: The kfd process that created this queue. 538 * 539 * @device: The kfd device that created this queue. 540 * 541 * @gws: Pointing to gws kgd_mem if this is a gws control queue; NULL 542 * otherwise. 543 * 544 * This structure represents user mode compute queues. 545 * It contains all the necessary data to handle such queues. 546 * 547 */ 548 549 struct queue { 550 struct list_head list; 551 void *mqd; 552 struct kfd_mem_obj *mqd_mem_obj; 553 uint64_t gart_mqd_addr; 554 struct queue_properties properties; 555 556 uint32_t mec; 557 uint32_t pipe; 558 uint32_t queue; 559 560 unsigned int sdma_id; 561 unsigned int doorbell_id; 562 563 struct kfd_process *process; 564 struct kfd_dev *device; 565 void *gws; 566 567 /* procfs */ 568 struct kobject kobj; 569 }; 570 571 enum KFD_MQD_TYPE { 572 KFD_MQD_TYPE_HIQ = 0, /* for hiq */ 573 KFD_MQD_TYPE_CP, /* for cp queues and diq */ 574 KFD_MQD_TYPE_SDMA, /* for sdma queues */ 575 KFD_MQD_TYPE_DIQ, /* for diq */ 576 KFD_MQD_TYPE_MAX 577 }; 578 579 enum KFD_PIPE_PRIORITY { 580 KFD_PIPE_PRIORITY_CS_LOW = 0, 581 KFD_PIPE_PRIORITY_CS_MEDIUM, 582 KFD_PIPE_PRIORITY_CS_HIGH 583 }; 584 585 struct scheduling_resources { 586 unsigned int vmid_mask; 587 enum kfd_queue_type type; 588 uint64_t queue_mask; 589 uint64_t gws_mask; 590 uint32_t oac_mask; 591 uint32_t gds_heap_base; 592 uint32_t gds_heap_size; 593 }; 594 595 struct process_queue_manager { 596 /* data */ 597 struct kfd_process *process; 598 struct list_head queues; 599 unsigned long *queue_slot_bitmap; 600 }; 601 602 struct qcm_process_device { 603 /* The Device Queue Manager that owns this data */ 604 struct device_queue_manager *dqm; 605 struct process_queue_manager *pqm; 606 /* Queues list */ 607 struct list_head queues_list; 608 struct list_head priv_queue_list; 609 610 unsigned int queue_count; 611 unsigned int vmid; 612 bool is_debug; 613 unsigned int evicted; /* eviction counter, 0=active */ 614 615 /* This flag tells if we should reset all wavefronts on 616 * process termination 617 */ 618 bool reset_wavefronts; 619 620 /* This flag tells us if this process has a GWS-capable 621 * queue that will be mapped into the runlist. It's 622 * possible to request a GWS BO, but not have the queue 623 * currently mapped, and this changes how the MAP_PROCESS 624 * PM4 packet is configured. 625 */ 626 bool mapped_gws_queue; 627 628 /* All the memory management data should be here too */ 629 uint64_t gds_context_area; 630 /* Contains page table flags such as AMDGPU_PTE_VALID since gfx9 */ 631 uint64_t page_table_base; 632 uint32_t sh_mem_config; 633 uint32_t sh_mem_bases; 634 uint32_t sh_mem_ape1_base; 635 uint32_t sh_mem_ape1_limit; 636 uint32_t gds_size; 637 uint32_t num_gws; 638 uint32_t num_oac; 639 uint32_t sh_hidden_private_base; 640 641 /* CWSR memory */ 642 struct kgd_mem *cwsr_mem; 643 void *cwsr_kaddr; 644 uint64_t cwsr_base; 645 uint64_t tba_addr; 646 uint64_t tma_addr; 647 648 /* IB memory */ 649 struct kgd_mem *ib_mem; 650 uint64_t ib_base; 651 void *ib_kaddr; 652 653 /* doorbell resources per process per device */ 654 unsigned long *doorbell_bitmap; 655 }; 656 657 /* KFD Memory Eviction */ 658 659 /* Approx. wait time before attempting to restore evicted BOs */ 660 #define PROCESS_RESTORE_TIME_MS 100 661 /* Approx. back off time if restore fails due to lack of memory */ 662 #define PROCESS_BACK_OFF_TIME_MS 100 663 /* Approx. time before evicting the process again */ 664 #define PROCESS_ACTIVE_TIME_MS 10 665 666 /* 8 byte handle containing GPU ID in the most significant 4 bytes and 667 * idr_handle in the least significant 4 bytes 668 */ 669 #define MAKE_HANDLE(gpu_id, idr_handle) \ 670 (((uint64_t)(gpu_id) << 32) + idr_handle) 671 #define GET_GPU_ID(handle) (handle >> 32) 672 #define GET_IDR_HANDLE(handle) (handle & 0xFFFFFFFF) 673 674 enum kfd_pdd_bound { 675 PDD_UNBOUND = 0, 676 PDD_BOUND, 677 PDD_BOUND_SUSPENDED, 678 }; 679 680 #define MAX_SYSFS_FILENAME_LEN 15 681 682 /* 683 * SDMA counter runs at 100MHz frequency. 684 * We display SDMA activity in microsecond granularity in sysfs. 685 * As a result, the divisor is 100. 686 */ 687 #define SDMA_ACTIVITY_DIVISOR 100 688 689 /* Data that is per-process-per device. */ 690 struct kfd_process_device { 691 /* The device that owns this data. */ 692 struct kfd_dev *dev; 693 694 /* The process that owns this kfd_process_device. */ 695 struct kfd_process *process; 696 697 /* per-process-per device QCM data structure */ 698 struct qcm_process_device qpd; 699 700 /*Apertures*/ 701 uint64_t lds_base; 702 uint64_t lds_limit; 703 uint64_t gpuvm_base; 704 uint64_t gpuvm_limit; 705 uint64_t scratch_base; 706 uint64_t scratch_limit; 707 708 /* VM context for GPUVM allocations */ 709 struct file *drm_file; 710 void *drm_priv; 711 712 /* GPUVM allocations storage */ 713 struct idr alloc_idr; 714 715 /* Flag used to tell the pdd has dequeued from the dqm. 716 * This is used to prevent dev->dqm->ops.process_termination() from 717 * being called twice when it is already called in IOMMU callback 718 * function. 719 */ 720 bool already_dequeued; 721 bool runtime_inuse; 722 723 /* Is this process/pasid bound to this device? (amd_iommu_bind_pasid) */ 724 enum kfd_pdd_bound bound; 725 726 /* VRAM usage */ 727 uint64_t vram_usage; 728 struct attribute attr_vram; 729 char vram_filename[MAX_SYSFS_FILENAME_LEN]; 730 731 /* SDMA activity tracking */ 732 uint64_t sdma_past_activity_counter; 733 struct attribute attr_sdma; 734 char sdma_filename[MAX_SYSFS_FILENAME_LEN]; 735 736 /* Eviction activity tracking */ 737 uint64_t last_evict_timestamp; 738 atomic64_t evict_duration_counter; 739 struct attribute attr_evict; 740 741 struct kobject *kobj_stats; 742 unsigned int doorbell_index; 743 744 /* 745 * @cu_occupancy: Reports occupancy of Compute Units (CU) of a process 746 * that is associated with device encoded by "this" struct instance. The 747 * value reflects CU usage by all of the waves launched by this process 748 * on this device. A very important property of occupancy parameter is 749 * that its value is a snapshot of current use. 750 * 751 * Following is to be noted regarding how this parameter is reported: 752 * 753 * The number of waves that a CU can launch is limited by couple of 754 * parameters. These are encoded by struct amdgpu_cu_info instance 755 * that is part of every device definition. For GFX9 devices this 756 * translates to 40 waves (simd_per_cu * max_waves_per_simd) when waves 757 * do not use scratch memory and 32 waves (max_scratch_slots_per_cu) 758 * when they do use scratch memory. This could change for future 759 * devices and therefore this example should be considered as a guide. 760 * 761 * All CU's of a device are available for the process. This may not be true 762 * under certain conditions - e.g. CU masking. 763 * 764 * Finally number of CU's that are occupied by a process is affected by both 765 * number of CU's a device has along with number of other competing processes 766 */ 767 struct attribute attr_cu_occupancy; 768 769 /* sysfs counters for GPU retry fault and page migration tracking */ 770 struct kobject *kobj_counters; 771 struct attribute attr_faults; 772 struct attribute attr_page_in; 773 struct attribute attr_page_out; 774 uint64_t faults; 775 uint64_t page_in; 776 uint64_t page_out; 777 /* 778 * If this process has been checkpointed before, then the user 779 * application will use the original gpu_id on the 780 * checkpointed node to refer to this device. 781 */ 782 uint32_t user_gpu_id; 783 }; 784 785 #define qpd_to_pdd(x) container_of(x, struct kfd_process_device, qpd) 786 787 struct svm_range_list { 788 struct mutex lock; 789 struct rb_root_cached objects; 790 struct list_head list; 791 struct work_struct deferred_list_work; 792 struct list_head deferred_range_list; 793 struct list_head criu_svm_metadata_list; 794 spinlock_t deferred_list_lock; 795 atomic_t evicted_ranges; 796 atomic_t drain_pagefaults; 797 struct delayed_work restore_work; 798 DECLARE_BITMAP(bitmap_supported, MAX_GPU_INSTANCE); 799 struct task_struct *faulting_task; 800 }; 801 802 /* Process data */ 803 struct kfd_process { 804 /* 805 * kfd_process are stored in an mm_struct*->kfd_process* 806 * hash table (kfd_processes in kfd_process.c) 807 */ 808 struct hlist_node kfd_processes; 809 810 /* 811 * Opaque pointer to mm_struct. We don't hold a reference to 812 * it so it should never be dereferenced from here. This is 813 * only used for looking up processes by their mm. 814 */ 815 void *mm; 816 817 struct kref ref; 818 struct work_struct release_work; 819 820 struct mutex mutex; 821 822 /* 823 * In any process, the thread that started main() is the lead 824 * thread and outlives the rest. 825 * It is here because amd_iommu_bind_pasid wants a task_struct. 826 * It can also be used for safely getting a reference to the 827 * mm_struct of the process. 828 */ 829 struct task_struct *lead_thread; 830 831 /* We want to receive a notification when the mm_struct is destroyed */ 832 struct mmu_notifier mmu_notifier; 833 834 u32 pasid; 835 836 /* 837 * Array of kfd_process_device pointers, 838 * one for each device the process is using. 839 */ 840 struct kfd_process_device *pdds[MAX_GPU_INSTANCE]; 841 uint32_t n_pdds; 842 843 struct process_queue_manager pqm; 844 845 /*Is the user space process 32 bit?*/ 846 bool is_32bit_user_mode; 847 848 /* Event-related data */ 849 struct mutex event_mutex; 850 /* Event ID allocator and lookup */ 851 struct idr event_idr; 852 /* Event page */ 853 u64 signal_handle; 854 struct kfd_signal_page *signal_page; 855 size_t signal_mapped_size; 856 size_t signal_event_count; 857 bool signal_event_limit_reached; 858 859 /* Information used for memory eviction */ 860 void *kgd_process_info; 861 /* Eviction fence that is attached to all the BOs of this process. The 862 * fence will be triggered during eviction and new one will be created 863 * during restore 864 */ 865 struct dma_fence *ef; 866 867 /* Work items for evicting and restoring BOs */ 868 struct delayed_work eviction_work; 869 struct delayed_work restore_work; 870 /* seqno of the last scheduled eviction */ 871 unsigned int last_eviction_seqno; 872 /* Approx. the last timestamp (in jiffies) when the process was 873 * restored after an eviction 874 */ 875 unsigned long last_restore_timestamp; 876 877 /* Kobj for our procfs */ 878 struct kobject *kobj; 879 struct kobject *kobj_queues; 880 struct attribute attr_pasid; 881 882 /* shared virtual memory registered by this process */ 883 struct svm_range_list svms; 884 885 bool xnack_enabled; 886 887 atomic_t poison; 888 /* Queues are in paused stated because we are in the process of doing a CRIU checkpoint */ 889 bool queues_paused; 890 }; 891 892 #define KFD_PROCESS_TABLE_SIZE 5 /* bits: 32 entries */ 893 extern DECLARE_HASHTABLE(kfd_processes_table, KFD_PROCESS_TABLE_SIZE); 894 extern struct srcu_struct kfd_processes_srcu; 895 896 /** 897 * typedef amdkfd_ioctl_t - typedef for ioctl function pointer. 898 * 899 * @filep: pointer to file structure. 900 * @p: amdkfd process pointer. 901 * @data: pointer to arg that was copied from user. 902 * 903 * Return: returns ioctl completion code. 904 */ 905 typedef int amdkfd_ioctl_t(struct file *filep, struct kfd_process *p, 906 void *data); 907 908 struct amdkfd_ioctl_desc { 909 unsigned int cmd; 910 int flags; 911 amdkfd_ioctl_t *func; 912 unsigned int cmd_drv; 913 const char *name; 914 }; 915 bool kfd_dev_is_large_bar(struct kfd_dev *dev); 916 917 int kfd_process_create_wq(void); 918 void kfd_process_destroy_wq(void); 919 struct kfd_process *kfd_create_process(struct file *filep); 920 struct kfd_process *kfd_get_process(const struct task_struct *); 921 struct kfd_process *kfd_lookup_process_by_pasid(u32 pasid); 922 struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm); 923 924 int kfd_process_gpuidx_from_gpuid(struct kfd_process *p, uint32_t gpu_id); 925 int kfd_process_gpuid_from_adev(struct kfd_process *p, 926 struct amdgpu_device *adev, uint32_t *gpuid, 927 uint32_t *gpuidx); 928 static inline int kfd_process_gpuid_from_gpuidx(struct kfd_process *p, 929 uint32_t gpuidx, uint32_t *gpuid) { 930 return gpuidx < p->n_pdds ? p->pdds[gpuidx]->dev->id : -EINVAL; 931 } 932 static inline struct kfd_process_device *kfd_process_device_from_gpuidx( 933 struct kfd_process *p, uint32_t gpuidx) { 934 return gpuidx < p->n_pdds ? p->pdds[gpuidx] : NULL; 935 } 936 937 void kfd_unref_process(struct kfd_process *p); 938 int kfd_process_evict_queues(struct kfd_process *p); 939 int kfd_process_restore_queues(struct kfd_process *p); 940 void kfd_suspend_all_processes(void); 941 int kfd_resume_all_processes(void); 942 943 struct kfd_process_device *kfd_process_device_data_by_id(struct kfd_process *process, 944 uint32_t gpu_id); 945 946 int kfd_process_get_user_gpu_id(struct kfd_process *p, uint32_t actual_gpu_id); 947 948 int kfd_process_device_init_vm(struct kfd_process_device *pdd, 949 struct file *drm_file); 950 struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev, 951 struct kfd_process *p); 952 struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev, 953 struct kfd_process *p); 954 struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev, 955 struct kfd_process *p); 956 957 bool kfd_process_xnack_mode(struct kfd_process *p, bool supported); 958 959 int kfd_reserved_mem_mmap(struct kfd_dev *dev, struct kfd_process *process, 960 struct vm_area_struct *vma); 961 962 /* KFD process API for creating and translating handles */ 963 int kfd_process_device_create_obj_handle(struct kfd_process_device *pdd, 964 void *mem); 965 void *kfd_process_device_translate_handle(struct kfd_process_device *p, 966 int handle); 967 void kfd_process_device_remove_obj_handle(struct kfd_process_device *pdd, 968 int handle); 969 struct kfd_process *kfd_lookup_process_by_pid(struct pid *pid); 970 971 /* PASIDs */ 972 int kfd_pasid_init(void); 973 void kfd_pasid_exit(void); 974 bool kfd_set_pasid_limit(unsigned int new_limit); 975 unsigned int kfd_get_pasid_limit(void); 976 u32 kfd_pasid_alloc(void); 977 void kfd_pasid_free(u32 pasid); 978 979 /* Doorbells */ 980 size_t kfd_doorbell_process_slice(struct kfd_dev *kfd); 981 int kfd_doorbell_init(struct kfd_dev *kfd); 982 void kfd_doorbell_fini(struct kfd_dev *kfd); 983 int kfd_doorbell_mmap(struct kfd_dev *dev, struct kfd_process *process, 984 struct vm_area_struct *vma); 985 void __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd, 986 unsigned int *doorbell_off); 987 void kfd_release_kernel_doorbell(struct kfd_dev *kfd, u32 __iomem *db_addr); 988 u32 read_kernel_doorbell(u32 __iomem *db); 989 void write_kernel_doorbell(void __iomem *db, u32 value); 990 void write_kernel_doorbell64(void __iomem *db, u64 value); 991 unsigned int kfd_get_doorbell_dw_offset_in_bar(struct kfd_dev *kfd, 992 struct kfd_process_device *pdd, 993 unsigned int doorbell_id); 994 phys_addr_t kfd_get_process_doorbells(struct kfd_process_device *pdd); 995 int kfd_alloc_process_doorbells(struct kfd_dev *kfd, 996 unsigned int *doorbell_index); 997 void kfd_free_process_doorbells(struct kfd_dev *kfd, 998 unsigned int doorbell_index); 999 /* GTT Sub-Allocator */ 1000 1001 int kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned int size, 1002 struct kfd_mem_obj **mem_obj); 1003 1004 int kfd_gtt_sa_free(struct kfd_dev *kfd, struct kfd_mem_obj *mem_obj); 1005 1006 extern struct device *kfd_device; 1007 1008 /* KFD's procfs */ 1009 void kfd_procfs_init(void); 1010 void kfd_procfs_shutdown(void); 1011 int kfd_procfs_add_queue(struct queue *q); 1012 void kfd_procfs_del_queue(struct queue *q); 1013 1014 /* Topology */ 1015 int kfd_topology_init(void); 1016 void kfd_topology_shutdown(void); 1017 int kfd_topology_add_device(struct kfd_dev *gpu); 1018 int kfd_topology_remove_device(struct kfd_dev *gpu); 1019 struct kfd_topology_device *kfd_topology_device_by_proximity_domain( 1020 uint32_t proximity_domain); 1021 struct kfd_topology_device *kfd_topology_device_by_id(uint32_t gpu_id); 1022 struct kfd_dev *kfd_device_by_id(uint32_t gpu_id); 1023 struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev); 1024 struct kfd_dev *kfd_device_by_adev(const struct amdgpu_device *adev); 1025 int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_dev **kdev); 1026 int kfd_numa_node_to_apic_id(int numa_node_id); 1027 void kfd_double_confirm_iommu_support(struct kfd_dev *gpu); 1028 1029 /* Interrupts */ 1030 int kfd_interrupt_init(struct kfd_dev *dev); 1031 void kfd_interrupt_exit(struct kfd_dev *dev); 1032 bool enqueue_ih_ring_entry(struct kfd_dev *kfd, const void *ih_ring_entry); 1033 bool interrupt_is_wanted(struct kfd_dev *dev, 1034 const uint32_t *ih_ring_entry, 1035 uint32_t *patched_ihre, bool *flag); 1036 1037 /* amdkfd Apertures */ 1038 int kfd_init_apertures(struct kfd_process *process); 1039 1040 void kfd_process_set_trap_handler(struct qcm_process_device *qpd, 1041 uint64_t tba_addr, 1042 uint64_t tma_addr); 1043 1044 /* CRIU */ 1045 /* 1046 * Need to increment KFD_CRIU_PRIV_VERSION each time a change is made to any of the CRIU private 1047 * structures: 1048 * kfd_criu_process_priv_data 1049 * kfd_criu_device_priv_data 1050 * kfd_criu_bo_priv_data 1051 * kfd_criu_queue_priv_data 1052 * kfd_criu_event_priv_data 1053 * kfd_criu_svm_range_priv_data 1054 */ 1055 1056 #define KFD_CRIU_PRIV_VERSION 1 1057 1058 struct kfd_criu_process_priv_data { 1059 uint32_t version; 1060 uint32_t xnack_mode; 1061 }; 1062 1063 struct kfd_criu_device_priv_data { 1064 /* For future use */ 1065 uint64_t reserved; 1066 }; 1067 1068 struct kfd_criu_bo_priv_data { 1069 uint64_t user_addr; 1070 uint32_t idr_handle; 1071 uint32_t mapped_gpuids[MAX_GPU_INSTANCE]; 1072 }; 1073 1074 /* 1075 * The first 4 bytes of kfd_criu_queue_priv_data, kfd_criu_event_priv_data, 1076 * kfd_criu_svm_range_priv_data is the object type 1077 */ 1078 enum kfd_criu_object_type { 1079 KFD_CRIU_OBJECT_TYPE_QUEUE, 1080 KFD_CRIU_OBJECT_TYPE_EVENT, 1081 KFD_CRIU_OBJECT_TYPE_SVM_RANGE, 1082 }; 1083 1084 struct kfd_criu_svm_range_priv_data { 1085 uint32_t object_type; 1086 uint64_t start_addr; 1087 uint64_t size; 1088 /* Variable length array of attributes */ 1089 struct kfd_ioctl_svm_attribute attrs[0]; 1090 }; 1091 1092 struct kfd_criu_queue_priv_data { 1093 uint32_t object_type; 1094 uint64_t q_address; 1095 uint64_t q_size; 1096 uint64_t read_ptr_addr; 1097 uint64_t write_ptr_addr; 1098 uint64_t doorbell_off; 1099 uint64_t eop_ring_buffer_address; 1100 uint64_t ctx_save_restore_area_address; 1101 uint32_t gpu_id; 1102 uint32_t type; 1103 uint32_t format; 1104 uint32_t q_id; 1105 uint32_t priority; 1106 uint32_t q_percent; 1107 uint32_t doorbell_id; 1108 uint32_t is_gws; 1109 uint32_t sdma_id; 1110 uint32_t eop_ring_buffer_size; 1111 uint32_t ctx_save_restore_area_size; 1112 uint32_t ctl_stack_size; 1113 uint32_t mqd_size; 1114 }; 1115 1116 struct kfd_criu_event_priv_data { 1117 uint32_t object_type; 1118 uint64_t user_handle; 1119 uint32_t event_id; 1120 uint32_t auto_reset; 1121 uint32_t type; 1122 uint32_t signaled; 1123 1124 union { 1125 struct kfd_hsa_memory_exception_data memory_exception_data; 1126 struct kfd_hsa_hw_exception_data hw_exception_data; 1127 }; 1128 }; 1129 1130 int kfd_process_get_queue_info(struct kfd_process *p, 1131 uint32_t *num_queues, 1132 uint64_t *priv_data_sizes); 1133 1134 int kfd_criu_checkpoint_queues(struct kfd_process *p, 1135 uint8_t __user *user_priv_data, 1136 uint64_t *priv_data_offset); 1137 1138 int kfd_criu_restore_queue(struct kfd_process *p, 1139 uint8_t __user *user_priv_data, 1140 uint64_t *priv_data_offset, 1141 uint64_t max_priv_data_size); 1142 1143 int kfd_criu_checkpoint_events(struct kfd_process *p, 1144 uint8_t __user *user_priv_data, 1145 uint64_t *priv_data_offset); 1146 1147 int kfd_criu_restore_event(struct file *devkfd, 1148 struct kfd_process *p, 1149 uint8_t __user *user_priv_data, 1150 uint64_t *priv_data_offset, 1151 uint64_t max_priv_data_size); 1152 /* CRIU - End */ 1153 1154 /* Queue Context Management */ 1155 int init_queue(struct queue **q, const struct queue_properties *properties); 1156 void uninit_queue(struct queue *q); 1157 void print_queue_properties(struct queue_properties *q); 1158 void print_queue(struct queue *q); 1159 1160 struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type, 1161 struct kfd_dev *dev); 1162 struct mqd_manager *mqd_manager_init_cik_hawaii(enum KFD_MQD_TYPE type, 1163 struct kfd_dev *dev); 1164 struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type, 1165 struct kfd_dev *dev); 1166 struct mqd_manager *mqd_manager_init_vi_tonga(enum KFD_MQD_TYPE type, 1167 struct kfd_dev *dev); 1168 struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type, 1169 struct kfd_dev *dev); 1170 struct mqd_manager *mqd_manager_init_v10(enum KFD_MQD_TYPE type, 1171 struct kfd_dev *dev); 1172 struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev); 1173 void device_queue_manager_uninit(struct device_queue_manager *dqm); 1174 struct kernel_queue *kernel_queue_init(struct kfd_dev *dev, 1175 enum kfd_queue_type type); 1176 void kernel_queue_uninit(struct kernel_queue *kq, bool hanging); 1177 int kfd_process_vm_fault(struct device_queue_manager *dqm, u32 pasid); 1178 1179 /* Process Queue Manager */ 1180 struct process_queue_node { 1181 struct queue *q; 1182 struct kernel_queue *kq; 1183 struct list_head process_queue_list; 1184 }; 1185 1186 void kfd_process_dequeue_from_device(struct kfd_process_device *pdd); 1187 void kfd_process_dequeue_from_all_devices(struct kfd_process *p); 1188 int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p); 1189 void pqm_uninit(struct process_queue_manager *pqm); 1190 int pqm_create_queue(struct process_queue_manager *pqm, 1191 struct kfd_dev *dev, 1192 struct file *f, 1193 struct queue_properties *properties, 1194 unsigned int *qid, 1195 const struct kfd_criu_queue_priv_data *q_data, 1196 const void *restore_mqd, 1197 const void *restore_ctl_stack, 1198 uint32_t *p_doorbell_offset_in_process); 1199 int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid); 1200 int pqm_update_queue_properties(struct process_queue_manager *pqm, unsigned int qid, 1201 struct queue_properties *p); 1202 int pqm_update_mqd(struct process_queue_manager *pqm, unsigned int qid, 1203 struct mqd_update_info *minfo); 1204 int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid, 1205 void *gws); 1206 struct kernel_queue *pqm_get_kernel_queue(struct process_queue_manager *pqm, 1207 unsigned int qid); 1208 struct queue *pqm_get_user_queue(struct process_queue_manager *pqm, 1209 unsigned int qid); 1210 int pqm_get_wave_state(struct process_queue_manager *pqm, 1211 unsigned int qid, 1212 void __user *ctl_stack, 1213 u32 *ctl_stack_used_size, 1214 u32 *save_area_used_size); 1215 1216 int amdkfd_fence_wait_timeout(uint64_t *fence_addr, 1217 uint64_t fence_value, 1218 unsigned int timeout_ms); 1219 1220 int pqm_get_queue_checkpoint_info(struct process_queue_manager *pqm, 1221 unsigned int qid, 1222 u32 *mqd_size, 1223 u32 *ctl_stack_size); 1224 /* Packet Manager */ 1225 1226 #define KFD_FENCE_COMPLETED (100) 1227 #define KFD_FENCE_INIT (10) 1228 1229 struct packet_manager { 1230 struct device_queue_manager *dqm; 1231 struct kernel_queue *priv_queue; 1232 struct mutex lock; 1233 bool allocated; 1234 struct kfd_mem_obj *ib_buffer_obj; 1235 unsigned int ib_size_bytes; 1236 bool is_over_subscription; 1237 1238 const struct packet_manager_funcs *pmf; 1239 }; 1240 1241 struct packet_manager_funcs { 1242 /* Support ASIC-specific packet formats for PM4 packets */ 1243 int (*map_process)(struct packet_manager *pm, uint32_t *buffer, 1244 struct qcm_process_device *qpd); 1245 int (*runlist)(struct packet_manager *pm, uint32_t *buffer, 1246 uint64_t ib, size_t ib_size_in_dwords, bool chain); 1247 int (*set_resources)(struct packet_manager *pm, uint32_t *buffer, 1248 struct scheduling_resources *res); 1249 int (*map_queues)(struct packet_manager *pm, uint32_t *buffer, 1250 struct queue *q, bool is_static); 1251 int (*unmap_queues)(struct packet_manager *pm, uint32_t *buffer, 1252 enum kfd_queue_type type, 1253 enum kfd_unmap_queues_filter mode, 1254 uint32_t filter_param, bool reset, 1255 unsigned int sdma_engine); 1256 int (*query_status)(struct packet_manager *pm, uint32_t *buffer, 1257 uint64_t fence_address, uint64_t fence_value); 1258 int (*release_mem)(uint64_t gpu_addr, uint32_t *buffer); 1259 1260 /* Packet sizes */ 1261 int map_process_size; 1262 int runlist_size; 1263 int set_resources_size; 1264 int map_queues_size; 1265 int unmap_queues_size; 1266 int query_status_size; 1267 int release_mem_size; 1268 }; 1269 1270 extern const struct packet_manager_funcs kfd_vi_pm_funcs; 1271 extern const struct packet_manager_funcs kfd_v9_pm_funcs; 1272 extern const struct packet_manager_funcs kfd_aldebaran_pm_funcs; 1273 1274 int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm); 1275 void pm_uninit(struct packet_manager *pm, bool hanging); 1276 int pm_send_set_resources(struct packet_manager *pm, 1277 struct scheduling_resources *res); 1278 int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues); 1279 int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address, 1280 uint64_t fence_value); 1281 1282 int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type, 1283 enum kfd_unmap_queues_filter mode, 1284 uint32_t filter_param, bool reset, 1285 unsigned int sdma_engine); 1286 1287 void pm_release_ib(struct packet_manager *pm); 1288 1289 /* Following PM funcs can be shared among VI and AI */ 1290 unsigned int pm_build_pm4_header(unsigned int opcode, size_t packet_size); 1291 1292 uint64_t kfd_get_number_elems(struct kfd_dev *kfd); 1293 1294 /* Events */ 1295 extern const struct kfd_event_interrupt_class event_interrupt_class_cik; 1296 extern const struct kfd_event_interrupt_class event_interrupt_class_v9; 1297 1298 extern const struct kfd_device_global_init_class device_global_init_class_cik; 1299 1300 void kfd_event_init_process(struct kfd_process *p); 1301 void kfd_event_free_process(struct kfd_process *p); 1302 int kfd_event_mmap(struct kfd_process *process, struct vm_area_struct *vma); 1303 int kfd_wait_on_events(struct kfd_process *p, 1304 uint32_t num_events, void __user *data, 1305 bool all, uint32_t user_timeout_ms, 1306 uint32_t *wait_result); 1307 void kfd_signal_event_interrupt(u32 pasid, uint32_t partial_id, 1308 uint32_t valid_id_bits); 1309 void kfd_signal_iommu_event(struct kfd_dev *dev, 1310 u32 pasid, unsigned long address, 1311 bool is_write_requested, bool is_execute_requested); 1312 void kfd_signal_hw_exception_event(u32 pasid); 1313 int kfd_set_event(struct kfd_process *p, uint32_t event_id); 1314 int kfd_reset_event(struct kfd_process *p, uint32_t event_id); 1315 int kfd_kmap_event_page(struct kfd_process *p, uint64_t event_page_offset); 1316 1317 int kfd_event_create(struct file *devkfd, struct kfd_process *p, 1318 uint32_t event_type, bool auto_reset, uint32_t node_id, 1319 uint32_t *event_id, uint32_t *event_trigger_data, 1320 uint64_t *event_page_offset, uint32_t *event_slot_index); 1321 1322 int kfd_get_num_events(struct kfd_process *p); 1323 int kfd_event_destroy(struct kfd_process *p, uint32_t event_id); 1324 1325 void kfd_signal_vm_fault_event(struct kfd_dev *dev, u32 pasid, 1326 struct kfd_vm_fault_info *info); 1327 1328 void kfd_signal_reset_event(struct kfd_dev *dev); 1329 1330 void kfd_signal_poison_consumed_event(struct kfd_dev *dev, u32 pasid); 1331 1332 void kfd_flush_tlb(struct kfd_process_device *pdd, enum TLB_FLUSH_TYPE type); 1333 1334 int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process *p); 1335 1336 bool kfd_is_locked(void); 1337 1338 /* Compute profile */ 1339 void kfd_inc_compute_active(struct kfd_dev *dev); 1340 void kfd_dec_compute_active(struct kfd_dev *dev); 1341 1342 /* Cgroup Support */ 1343 /* Check with device cgroup if @kfd device is accessible */ 1344 static inline int kfd_devcgroup_check_permission(struct kfd_dev *kfd) 1345 { 1346 #if defined(CONFIG_CGROUP_DEVICE) || defined(CONFIG_CGROUP_BPF) 1347 struct drm_device *ddev = kfd->ddev; 1348 1349 return devcgroup_check_permission(DEVCG_DEV_CHAR, DRM_MAJOR, 1350 ddev->render->index, 1351 DEVCG_ACC_WRITE | DEVCG_ACC_READ); 1352 #else 1353 return 0; 1354 #endif 1355 } 1356 1357 /* Debugfs */ 1358 #if defined(CONFIG_DEBUG_FS) 1359 1360 void kfd_debugfs_init(void); 1361 void kfd_debugfs_fini(void); 1362 int kfd_debugfs_mqds_by_process(struct seq_file *m, void *data); 1363 int pqm_debugfs_mqds(struct seq_file *m, void *data); 1364 int kfd_debugfs_hqds_by_device(struct seq_file *m, void *data); 1365 int dqm_debugfs_hqds(struct seq_file *m, void *data); 1366 int kfd_debugfs_rls_by_device(struct seq_file *m, void *data); 1367 int pm_debugfs_runlist(struct seq_file *m, void *data); 1368 1369 int kfd_debugfs_hang_hws(struct kfd_dev *dev); 1370 int pm_debugfs_hang_hws(struct packet_manager *pm); 1371 int dqm_debugfs_hang_hws(struct device_queue_manager *dqm); 1372 1373 #else 1374 1375 static inline void kfd_debugfs_init(void) {} 1376 static inline void kfd_debugfs_fini(void) {} 1377 1378 #endif 1379 1380 #endif 1381