1 /* 2 * Copyright 2014 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 */ 22 23 #ifndef KFD_PRIV_H_INCLUDED 24 #define KFD_PRIV_H_INCLUDED 25 26 #include <linux/hashtable.h> 27 #include <linux/mmu_notifier.h> 28 #include <linux/mutex.h> 29 #include <linux/types.h> 30 #include <linux/atomic.h> 31 #include <linux/workqueue.h> 32 #include <linux/spinlock.h> 33 #include <linux/kfd_ioctl.h> 34 #include <linux/idr.h> 35 #include <linux/kfifo.h> 36 #include <linux/seq_file.h> 37 #include <linux/kref.h> 38 #include <linux/sysfs.h> 39 #include <linux/device_cgroup.h> 40 #include <drm/drm_file.h> 41 #include <drm/drm_drv.h> 42 #include <drm/drm_device.h> 43 #include <drm/drm_ioctl.h> 44 #include <kgd_kfd_interface.h> 45 #include <linux/swap.h> 46 47 #include "amd_shared.h" 48 #include "amdgpu.h" 49 50 #define KFD_MAX_RING_ENTRY_SIZE 8 51 52 #define KFD_SYSFS_FILE_MODE 0444 53 54 /* GPU ID hash width in bits */ 55 #define KFD_GPU_ID_HASH_WIDTH 16 56 57 /* Use upper bits of mmap offset to store KFD driver specific information. 58 * BITS[63:62] - Encode MMAP type 59 * BITS[61:46] - Encode gpu_id. To identify to which GPU the offset belongs to 60 * BITS[45:0] - MMAP offset value 61 * 62 * NOTE: struct vm_area_struct.vm_pgoff uses offset in pages. Hence, these 63 * defines are w.r.t to PAGE_SIZE 64 */ 65 #define KFD_MMAP_TYPE_SHIFT 62 66 #define KFD_MMAP_TYPE_MASK (0x3ULL << KFD_MMAP_TYPE_SHIFT) 67 #define KFD_MMAP_TYPE_DOORBELL (0x3ULL << KFD_MMAP_TYPE_SHIFT) 68 #define KFD_MMAP_TYPE_EVENTS (0x2ULL << KFD_MMAP_TYPE_SHIFT) 69 #define KFD_MMAP_TYPE_RESERVED_MEM (0x1ULL << KFD_MMAP_TYPE_SHIFT) 70 #define KFD_MMAP_TYPE_MMIO (0x0ULL << KFD_MMAP_TYPE_SHIFT) 71 72 #define KFD_MMAP_GPU_ID_SHIFT 46 73 #define KFD_MMAP_GPU_ID_MASK (((1ULL << KFD_GPU_ID_HASH_WIDTH) - 1) \ 74 << KFD_MMAP_GPU_ID_SHIFT) 75 #define KFD_MMAP_GPU_ID(gpu_id) ((((uint64_t)gpu_id) << KFD_MMAP_GPU_ID_SHIFT)\ 76 & KFD_MMAP_GPU_ID_MASK) 77 #define KFD_MMAP_GET_GPU_ID(offset) ((offset & KFD_MMAP_GPU_ID_MASK) \ 78 >> KFD_MMAP_GPU_ID_SHIFT) 79 80 /* 81 * When working with cp scheduler we should assign the HIQ manually or via 82 * the amdgpu driver to a fixed hqd slot, here are the fixed HIQ hqd slot 83 * definitions for Kaveri. In Kaveri only the first ME queues participates 84 * in the cp scheduling taking that in mind we set the HIQ slot in the 85 * second ME. 86 */ 87 #define KFD_CIK_HIQ_PIPE 4 88 #define KFD_CIK_HIQ_QUEUE 0 89 90 /* Macro for allocating structures */ 91 #define kfd_alloc_struct(ptr_to_struct) \ 92 ((typeof(ptr_to_struct)) kzalloc(sizeof(*ptr_to_struct), GFP_KERNEL)) 93 94 #define KFD_MAX_NUM_OF_PROCESSES 512 95 #define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS 1024 96 97 /* 98 * Size of the per-process TBA+TMA buffer: 2 pages 99 * 100 * The first page is the TBA used for the CWSR ISA code. The second 101 * page is used as TMA for user-mode trap handler setup in daisy-chain mode. 102 */ 103 #define KFD_CWSR_TBA_TMA_SIZE (PAGE_SIZE * 2) 104 #define KFD_CWSR_TMA_OFFSET PAGE_SIZE 105 106 #define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE \ 107 (KFD_MAX_NUM_OF_PROCESSES * \ 108 KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) 109 110 #define KFD_KERNEL_QUEUE_SIZE 2048 111 112 #define KFD_UNMAP_LATENCY_MS (4000) 113 114 /* 115 * 512 = 0x200 116 * The doorbell index distance between SDMA RLC (2*i) and (2*i+1) in the 117 * same SDMA engine on SOC15, which has 8-byte doorbells for SDMA. 118 * 512 8-byte doorbell distance (i.e. one page away) ensures that SDMA RLC 119 * (2*i+1) doorbells (in terms of the lower 12 bit address) lie exactly in 120 * the OFFSET and SIZE set in registers like BIF_SDMA0_DOORBELL_RANGE. 121 */ 122 #define KFD_QUEUE_DOORBELL_MIRROR_OFFSET 512 123 124 /** 125 * enum kfd_ioctl_flags - KFD ioctl flags 126 * Various flags that can be set in &amdkfd_ioctl_desc.flags to control how 127 * userspace can use a given ioctl. 128 */ 129 enum kfd_ioctl_flags { 130 /* 131 * @KFD_IOC_FLAG_CHECKPOINT_RESTORE: 132 * Certain KFD ioctls such as AMDKFD_IOC_CRIU_OP can potentially 133 * perform privileged operations and load arbitrary data into MQDs and 134 * eventually HQD registers when the queue is mapped by HWS. In order to 135 * prevent this we should perform additional security checks. 136 * 137 * This is equivalent to callers with the CHECKPOINT_RESTORE capability. 138 * 139 * Note: Since earlier versions of docker do not support CHECKPOINT_RESTORE, 140 * we also allow ioctls with SYS_ADMIN capability. 141 */ 142 KFD_IOC_FLAG_CHECKPOINT_RESTORE = BIT(0), 143 }; 144 /* 145 * Kernel module parameter to specify maximum number of supported queues per 146 * device 147 */ 148 extern int max_num_of_queues_per_device; 149 150 151 /* Kernel module parameter to specify the scheduling policy */ 152 extern int sched_policy; 153 154 /* 155 * Kernel module parameter to specify the maximum process 156 * number per HW scheduler 157 */ 158 extern int hws_max_conc_proc; 159 160 extern int cwsr_enable; 161 162 /* 163 * Kernel module parameter to specify whether to send sigterm to HSA process on 164 * unhandled exception 165 */ 166 extern int send_sigterm; 167 168 /* 169 * This kernel module is used to simulate large bar machine on non-large bar 170 * enabled machines. 171 */ 172 extern int debug_largebar; 173 174 /* 175 * Ignore CRAT table during KFD initialization, can be used to work around 176 * broken CRAT tables on some AMD systems 177 */ 178 extern int ignore_crat; 179 180 /* Set sh_mem_config.retry_disable on GFX v9 */ 181 extern int amdgpu_noretry; 182 183 /* Halt if HWS hang is detected */ 184 extern int halt_if_hws_hang; 185 186 /* Whether MEC FW support GWS barriers */ 187 extern bool hws_gws_support; 188 189 /* Queue preemption timeout in ms */ 190 extern int queue_preemption_timeout_ms; 191 192 /* 193 * Don't evict process queues on vm fault 194 */ 195 extern int amdgpu_no_queue_eviction_on_vm_fault; 196 197 /* Enable eviction debug messages */ 198 extern bool debug_evictions; 199 200 enum cache_policy { 201 cache_policy_coherent, 202 cache_policy_noncoherent 203 }; 204 205 #define KFD_GC_VERSION(dev) ((dev)->adev->ip_versions[GC_HWIP][0]) 206 #define KFD_IS_SOC15(dev) ((KFD_GC_VERSION(dev)) >= (IP_VERSION(9, 0, 1))) 207 208 struct kfd_event_interrupt_class { 209 bool (*interrupt_isr)(struct kfd_dev *dev, 210 const uint32_t *ih_ring_entry, uint32_t *patched_ihre, 211 bool *patched_flag); 212 void (*interrupt_wq)(struct kfd_dev *dev, 213 const uint32_t *ih_ring_entry); 214 }; 215 216 struct kfd_device_info { 217 uint32_t gfx_target_version; 218 const struct kfd_event_interrupt_class *event_interrupt_class; 219 unsigned int max_pasid_bits; 220 unsigned int max_no_of_hqd; 221 unsigned int doorbell_size; 222 size_t ih_ring_entry_size; 223 uint8_t num_of_watch_points; 224 uint16_t mqd_size_aligned; 225 bool supports_cwsr; 226 bool needs_iommu_device; 227 bool needs_pci_atomics; 228 uint32_t no_atomic_fw_version; 229 unsigned int num_sdma_queues_per_engine; 230 }; 231 232 unsigned int kfd_get_num_sdma_engines(struct kfd_dev *kdev); 233 unsigned int kfd_get_num_xgmi_sdma_engines(struct kfd_dev *kdev); 234 235 struct kfd_mem_obj { 236 uint32_t range_start; 237 uint32_t range_end; 238 uint64_t gpu_addr; 239 uint32_t *cpu_ptr; 240 void *gtt_mem; 241 }; 242 243 struct kfd_vmid_info { 244 uint32_t first_vmid_kfd; 245 uint32_t last_vmid_kfd; 246 uint32_t vmid_num_kfd; 247 }; 248 249 struct kfd_dev { 250 struct amdgpu_device *adev; 251 252 struct kfd_device_info device_info; 253 struct pci_dev *pdev; 254 struct drm_device *ddev; 255 256 unsigned int id; /* topology stub index */ 257 258 phys_addr_t doorbell_base; /* Start of actual doorbells used by 259 * KFD. It is aligned for mapping 260 * into user mode 261 */ 262 size_t doorbell_base_dw_offset; /* Offset from the start of the PCI 263 * doorbell BAR to the first KFD 264 * doorbell in dwords. GFX reserves 265 * the segment before this offset. 266 */ 267 u32 __iomem *doorbell_kernel_ptr; /* This is a pointer for a doorbells 268 * page used by kernel queue 269 */ 270 271 struct kgd2kfd_shared_resources shared_resources; 272 struct kfd_vmid_info vm_info; 273 274 const struct kfd2kgd_calls *kfd2kgd; 275 struct mutex doorbell_mutex; 276 DECLARE_BITMAP(doorbell_available_index, 277 KFD_MAX_NUM_OF_QUEUES_PER_PROCESS); 278 279 void *gtt_mem; 280 uint64_t gtt_start_gpu_addr; 281 void *gtt_start_cpu_ptr; 282 void *gtt_sa_bitmap; 283 struct mutex gtt_sa_lock; 284 unsigned int gtt_sa_chunk_size; 285 unsigned int gtt_sa_num_of_chunks; 286 287 /* Interrupts */ 288 struct kfifo ih_fifo; 289 struct workqueue_struct *ih_wq; 290 struct work_struct interrupt_work; 291 spinlock_t interrupt_lock; 292 293 /* QCM Device instance */ 294 struct device_queue_manager *dqm; 295 296 bool init_complete; 297 /* 298 * Interrupts of interest to KFD are copied 299 * from the HW ring into a SW ring. 300 */ 301 bool interrupts_active; 302 303 /* Debug manager */ 304 struct kfd_dbgmgr *dbgmgr; 305 306 /* Firmware versions */ 307 uint16_t mec_fw_version; 308 uint16_t mec2_fw_version; 309 uint16_t sdma_fw_version; 310 311 /* Maximum process number mapped to HW scheduler */ 312 unsigned int max_proc_per_quantum; 313 314 /* CWSR */ 315 bool cwsr_enabled; 316 const void *cwsr_isa; 317 unsigned int cwsr_isa_size; 318 319 /* xGMI */ 320 uint64_t hive_id; 321 322 bool pci_atomic_requested; 323 324 /* Use IOMMU v2 flag */ 325 bool use_iommu_v2; 326 327 /* SRAM ECC flag */ 328 atomic_t sram_ecc_flag; 329 330 /* Compute Profile ref. count */ 331 atomic_t compute_profile; 332 333 /* Global GWS resource shared between processes */ 334 void *gws; 335 336 /* Clients watching SMI events */ 337 struct list_head smi_clients; 338 spinlock_t smi_lock; 339 340 uint32_t reset_seq_num; 341 342 struct ida doorbell_ida; 343 unsigned int max_doorbell_slices; 344 345 int noretry; 346 347 /* HMM page migration MEMORY_DEVICE_PRIVATE mapping */ 348 struct dev_pagemap pgmap; 349 }; 350 351 enum kfd_mempool { 352 KFD_MEMPOOL_SYSTEM_CACHEABLE = 1, 353 KFD_MEMPOOL_SYSTEM_WRITECOMBINE = 2, 354 KFD_MEMPOOL_FRAMEBUFFER = 3, 355 }; 356 357 /* Character device interface */ 358 int kfd_chardev_init(void); 359 void kfd_chardev_exit(void); 360 struct device *kfd_chardev(void); 361 362 /** 363 * enum kfd_unmap_queues_filter - Enum for queue filters. 364 * 365 * @KFD_UNMAP_QUEUES_FILTER_SINGLE_QUEUE: Preempts single queue. 366 * 367 * @KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES: Preempts all queues in the 368 * running queues list. 369 * 370 * @KFD_UNMAP_QUEUES_FILTER_BY_PASID: Preempts queues that belongs to 371 * specific process. 372 * 373 */ 374 enum kfd_unmap_queues_filter { 375 KFD_UNMAP_QUEUES_FILTER_SINGLE_QUEUE, 376 KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 377 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 378 KFD_UNMAP_QUEUES_FILTER_BY_PASID 379 }; 380 381 /** 382 * enum kfd_queue_type - Enum for various queue types. 383 * 384 * @KFD_QUEUE_TYPE_COMPUTE: Regular user mode queue type. 385 * 386 * @KFD_QUEUE_TYPE_SDMA: SDMA user mode queue type. 387 * 388 * @KFD_QUEUE_TYPE_HIQ: HIQ queue type. 389 * 390 * @KFD_QUEUE_TYPE_DIQ: DIQ queue type. 391 * 392 * @KFD_QUEUE_TYPE_SDMA_XGMI: Special SDMA queue for XGMI interface. 393 */ 394 enum kfd_queue_type { 395 KFD_QUEUE_TYPE_COMPUTE, 396 KFD_QUEUE_TYPE_SDMA, 397 KFD_QUEUE_TYPE_HIQ, 398 KFD_QUEUE_TYPE_DIQ, 399 KFD_QUEUE_TYPE_SDMA_XGMI 400 }; 401 402 enum kfd_queue_format { 403 KFD_QUEUE_FORMAT_PM4, 404 KFD_QUEUE_FORMAT_AQL 405 }; 406 407 enum KFD_QUEUE_PRIORITY { 408 KFD_QUEUE_PRIORITY_MINIMUM = 0, 409 KFD_QUEUE_PRIORITY_MAXIMUM = 15 410 }; 411 412 /** 413 * struct queue_properties 414 * 415 * @type: The queue type. 416 * 417 * @queue_id: Queue identifier. 418 * 419 * @queue_address: Queue ring buffer address. 420 * 421 * @queue_size: Queue ring buffer size. 422 * 423 * @priority: Defines the queue priority relative to other queues in the 424 * process. 425 * This is just an indication and HW scheduling may override the priority as 426 * necessary while keeping the relative prioritization. 427 * the priority granularity is from 0 to f which f is the highest priority. 428 * currently all queues are initialized with the highest priority. 429 * 430 * @queue_percent: This field is partially implemented and currently a zero in 431 * this field defines that the queue is non active. 432 * 433 * @read_ptr: User space address which points to the number of dwords the 434 * cp read from the ring buffer. This field updates automatically by the H/W. 435 * 436 * @write_ptr: Defines the number of dwords written to the ring buffer. 437 * 438 * @doorbell_ptr: Notifies the H/W of new packet written to the queue ring 439 * buffer. This field should be similar to write_ptr and the user should 440 * update this field after updating the write_ptr. 441 * 442 * @doorbell_off: The doorbell offset in the doorbell pci-bar. 443 * 444 * @is_interop: Defines if this is a interop queue. Interop queue means that 445 * the queue can access both graphics and compute resources. 446 * 447 * @is_evicted: Defines if the queue is evicted. Only active queues 448 * are evicted, rendering them inactive. 449 * 450 * @is_active: Defines if the queue is active or not. @is_active and 451 * @is_evicted are protected by the DQM lock. 452 * 453 * @is_gws: Defines if the queue has been updated to be GWS-capable or not. 454 * @is_gws should be protected by the DQM lock, since changing it can yield the 455 * possibility of updating DQM state on number of GWS queues. 456 * 457 * @vmid: If the scheduling mode is no cp scheduling the field defines the vmid 458 * of the queue. 459 * 460 * This structure represents the queue properties for each queue no matter if 461 * it's user mode or kernel mode queue. 462 * 463 */ 464 465 struct queue_properties { 466 enum kfd_queue_type type; 467 enum kfd_queue_format format; 468 unsigned int queue_id; 469 uint64_t queue_address; 470 uint64_t queue_size; 471 uint32_t priority; 472 uint32_t queue_percent; 473 uint32_t *read_ptr; 474 uint32_t *write_ptr; 475 void __iomem *doorbell_ptr; 476 uint32_t doorbell_off; 477 bool is_interop; 478 bool is_evicted; 479 bool is_active; 480 bool is_gws; 481 /* Not relevant for user mode queues in cp scheduling */ 482 unsigned int vmid; 483 /* Relevant only for sdma queues*/ 484 uint32_t sdma_engine_id; 485 uint32_t sdma_queue_id; 486 uint32_t sdma_vm_addr; 487 /* Relevant only for VI */ 488 uint64_t eop_ring_buffer_address; 489 uint32_t eop_ring_buffer_size; 490 uint64_t ctx_save_restore_area_address; 491 uint32_t ctx_save_restore_area_size; 492 uint32_t ctl_stack_size; 493 uint64_t tba_addr; 494 uint64_t tma_addr; 495 }; 496 497 #define QUEUE_IS_ACTIVE(q) ((q).queue_size > 0 && \ 498 (q).queue_address != 0 && \ 499 (q).queue_percent > 0 && \ 500 !(q).is_evicted) 501 502 enum mqd_update_flag { 503 UPDATE_FLAG_CU_MASK = 0, 504 }; 505 506 struct mqd_update_info { 507 union { 508 struct { 509 uint32_t count; /* Must be a multiple of 32 */ 510 uint32_t *ptr; 511 } cu_mask; 512 }; 513 enum mqd_update_flag update_flag; 514 }; 515 516 /** 517 * struct queue 518 * 519 * @list: Queue linked list. 520 * 521 * @mqd: The queue MQD (memory queue descriptor). 522 * 523 * @mqd_mem_obj: The MQD local gpu memory object. 524 * 525 * @gart_mqd_addr: The MQD gart mc address. 526 * 527 * @properties: The queue properties. 528 * 529 * @mec: Used only in no cp scheduling mode and identifies to micro engine id 530 * that the queue should be executed on. 531 * 532 * @pipe: Used only in no cp scheduling mode and identifies the queue's pipe 533 * id. 534 * 535 * @queue: Used only in no cp scheduliong mode and identifies the queue's slot. 536 * 537 * @process: The kfd process that created this queue. 538 * 539 * @device: The kfd device that created this queue. 540 * 541 * @gws: Pointing to gws kgd_mem if this is a gws control queue; NULL 542 * otherwise. 543 * 544 * This structure represents user mode compute queues. 545 * It contains all the necessary data to handle such queues. 546 * 547 */ 548 549 struct queue { 550 struct list_head list; 551 void *mqd; 552 struct kfd_mem_obj *mqd_mem_obj; 553 uint64_t gart_mqd_addr; 554 struct queue_properties properties; 555 556 uint32_t mec; 557 uint32_t pipe; 558 uint32_t queue; 559 560 unsigned int sdma_id; 561 unsigned int doorbell_id; 562 563 struct kfd_process *process; 564 struct kfd_dev *device; 565 void *gws; 566 567 /* procfs */ 568 struct kobject kobj; 569 }; 570 571 enum KFD_MQD_TYPE { 572 KFD_MQD_TYPE_HIQ = 0, /* for hiq */ 573 KFD_MQD_TYPE_CP, /* for cp queues and diq */ 574 KFD_MQD_TYPE_SDMA, /* for sdma queues */ 575 KFD_MQD_TYPE_DIQ, /* for diq */ 576 KFD_MQD_TYPE_MAX 577 }; 578 579 enum KFD_PIPE_PRIORITY { 580 KFD_PIPE_PRIORITY_CS_LOW = 0, 581 KFD_PIPE_PRIORITY_CS_MEDIUM, 582 KFD_PIPE_PRIORITY_CS_HIGH 583 }; 584 585 struct scheduling_resources { 586 unsigned int vmid_mask; 587 enum kfd_queue_type type; 588 uint64_t queue_mask; 589 uint64_t gws_mask; 590 uint32_t oac_mask; 591 uint32_t gds_heap_base; 592 uint32_t gds_heap_size; 593 }; 594 595 struct process_queue_manager { 596 /* data */ 597 struct kfd_process *process; 598 struct list_head queues; 599 unsigned long *queue_slot_bitmap; 600 }; 601 602 struct qcm_process_device { 603 /* The Device Queue Manager that owns this data */ 604 struct device_queue_manager *dqm; 605 struct process_queue_manager *pqm; 606 /* Queues list */ 607 struct list_head queues_list; 608 struct list_head priv_queue_list; 609 610 unsigned int queue_count; 611 unsigned int vmid; 612 bool is_debug; 613 unsigned int evicted; /* eviction counter, 0=active */ 614 615 /* This flag tells if we should reset all wavefronts on 616 * process termination 617 */ 618 bool reset_wavefronts; 619 620 /* This flag tells us if this process has a GWS-capable 621 * queue that will be mapped into the runlist. It's 622 * possible to request a GWS BO, but not have the queue 623 * currently mapped, and this changes how the MAP_PROCESS 624 * PM4 packet is configured. 625 */ 626 bool mapped_gws_queue; 627 628 /* All the memory management data should be here too */ 629 uint64_t gds_context_area; 630 /* Contains page table flags such as AMDGPU_PTE_VALID since gfx9 */ 631 uint64_t page_table_base; 632 uint32_t sh_mem_config; 633 uint32_t sh_mem_bases; 634 uint32_t sh_mem_ape1_base; 635 uint32_t sh_mem_ape1_limit; 636 uint32_t gds_size; 637 uint32_t num_gws; 638 uint32_t num_oac; 639 uint32_t sh_hidden_private_base; 640 641 /* CWSR memory */ 642 struct kgd_mem *cwsr_mem; 643 void *cwsr_kaddr; 644 uint64_t cwsr_base; 645 uint64_t tba_addr; 646 uint64_t tma_addr; 647 648 /* IB memory */ 649 struct kgd_mem *ib_mem; 650 uint64_t ib_base; 651 void *ib_kaddr; 652 653 /* doorbell resources per process per device */ 654 unsigned long *doorbell_bitmap; 655 }; 656 657 /* KFD Memory Eviction */ 658 659 /* Approx. wait time before attempting to restore evicted BOs */ 660 #define PROCESS_RESTORE_TIME_MS 100 661 /* Approx. back off time if restore fails due to lack of memory */ 662 #define PROCESS_BACK_OFF_TIME_MS 100 663 /* Approx. time before evicting the process again */ 664 #define PROCESS_ACTIVE_TIME_MS 10 665 666 /* 8 byte handle containing GPU ID in the most significant 4 bytes and 667 * idr_handle in the least significant 4 bytes 668 */ 669 #define MAKE_HANDLE(gpu_id, idr_handle) \ 670 (((uint64_t)(gpu_id) << 32) + idr_handle) 671 #define GET_GPU_ID(handle) (handle >> 32) 672 #define GET_IDR_HANDLE(handle) (handle & 0xFFFFFFFF) 673 674 enum kfd_pdd_bound { 675 PDD_UNBOUND = 0, 676 PDD_BOUND, 677 PDD_BOUND_SUSPENDED, 678 }; 679 680 #define MAX_SYSFS_FILENAME_LEN 15 681 682 /* 683 * SDMA counter runs at 100MHz frequency. 684 * We display SDMA activity in microsecond granularity in sysfs. 685 * As a result, the divisor is 100. 686 */ 687 #define SDMA_ACTIVITY_DIVISOR 100 688 689 /* Data that is per-process-per device. */ 690 struct kfd_process_device { 691 /* The device that owns this data. */ 692 struct kfd_dev *dev; 693 694 /* The process that owns this kfd_process_device. */ 695 struct kfd_process *process; 696 697 /* per-process-per device QCM data structure */ 698 struct qcm_process_device qpd; 699 700 /*Apertures*/ 701 uint64_t lds_base; 702 uint64_t lds_limit; 703 uint64_t gpuvm_base; 704 uint64_t gpuvm_limit; 705 uint64_t scratch_base; 706 uint64_t scratch_limit; 707 708 /* VM context for GPUVM allocations */ 709 struct file *drm_file; 710 void *drm_priv; 711 712 /* GPUVM allocations storage */ 713 struct idr alloc_idr; 714 715 /* Flag used to tell the pdd has dequeued from the dqm. 716 * This is used to prevent dev->dqm->ops.process_termination() from 717 * being called twice when it is already called in IOMMU callback 718 * function. 719 */ 720 bool already_dequeued; 721 bool runtime_inuse; 722 723 /* Is this process/pasid bound to this device? (amd_iommu_bind_pasid) */ 724 enum kfd_pdd_bound bound; 725 726 /* VRAM usage */ 727 uint64_t vram_usage; 728 struct attribute attr_vram; 729 char vram_filename[MAX_SYSFS_FILENAME_LEN]; 730 731 /* SDMA activity tracking */ 732 uint64_t sdma_past_activity_counter; 733 struct attribute attr_sdma; 734 char sdma_filename[MAX_SYSFS_FILENAME_LEN]; 735 736 /* Eviction activity tracking */ 737 uint64_t last_evict_timestamp; 738 atomic64_t evict_duration_counter; 739 struct attribute attr_evict; 740 741 struct kobject *kobj_stats; 742 unsigned int doorbell_index; 743 744 /* 745 * @cu_occupancy: Reports occupancy of Compute Units (CU) of a process 746 * that is associated with device encoded by "this" struct instance. The 747 * value reflects CU usage by all of the waves launched by this process 748 * on this device. A very important property of occupancy parameter is 749 * that its value is a snapshot of current use. 750 * 751 * Following is to be noted regarding how this parameter is reported: 752 * 753 * The number of waves that a CU can launch is limited by couple of 754 * parameters. These are encoded by struct amdgpu_cu_info instance 755 * that is part of every device definition. For GFX9 devices this 756 * translates to 40 waves (simd_per_cu * max_waves_per_simd) when waves 757 * do not use scratch memory and 32 waves (max_scratch_slots_per_cu) 758 * when they do use scratch memory. This could change for future 759 * devices and therefore this example should be considered as a guide. 760 * 761 * All CU's of a device are available for the process. This may not be true 762 * under certain conditions - e.g. CU masking. 763 * 764 * Finally number of CU's that are occupied by a process is affected by both 765 * number of CU's a device has along with number of other competing processes 766 */ 767 struct attribute attr_cu_occupancy; 768 769 /* sysfs counters for GPU retry fault and page migration tracking */ 770 struct kobject *kobj_counters; 771 struct attribute attr_faults; 772 struct attribute attr_page_in; 773 struct attribute attr_page_out; 774 uint64_t faults; 775 uint64_t page_in; 776 uint64_t page_out; 777 /* 778 * If this process has been checkpointed before, then the user 779 * application will use the original gpu_id on the 780 * checkpointed node to refer to this device. 781 */ 782 uint32_t user_gpu_id; 783 }; 784 785 #define qpd_to_pdd(x) container_of(x, struct kfd_process_device, qpd) 786 787 struct svm_range_list { 788 struct mutex lock; 789 struct rb_root_cached objects; 790 struct list_head list; 791 struct work_struct deferred_list_work; 792 struct list_head deferred_range_list; 793 spinlock_t deferred_list_lock; 794 atomic_t evicted_ranges; 795 atomic_t drain_pagefaults; 796 struct delayed_work restore_work; 797 DECLARE_BITMAP(bitmap_supported, MAX_GPU_INSTANCE); 798 struct task_struct *faulting_task; 799 }; 800 801 /* Process data */ 802 struct kfd_process { 803 /* 804 * kfd_process are stored in an mm_struct*->kfd_process* 805 * hash table (kfd_processes in kfd_process.c) 806 */ 807 struct hlist_node kfd_processes; 808 809 /* 810 * Opaque pointer to mm_struct. We don't hold a reference to 811 * it so it should never be dereferenced from here. This is 812 * only used for looking up processes by their mm. 813 */ 814 void *mm; 815 816 struct kref ref; 817 struct work_struct release_work; 818 819 struct mutex mutex; 820 821 /* 822 * In any process, the thread that started main() is the lead 823 * thread and outlives the rest. 824 * It is here because amd_iommu_bind_pasid wants a task_struct. 825 * It can also be used for safely getting a reference to the 826 * mm_struct of the process. 827 */ 828 struct task_struct *lead_thread; 829 830 /* We want to receive a notification when the mm_struct is destroyed */ 831 struct mmu_notifier mmu_notifier; 832 833 u32 pasid; 834 835 /* 836 * Array of kfd_process_device pointers, 837 * one for each device the process is using. 838 */ 839 struct kfd_process_device *pdds[MAX_GPU_INSTANCE]; 840 uint32_t n_pdds; 841 842 struct process_queue_manager pqm; 843 844 /*Is the user space process 32 bit?*/ 845 bool is_32bit_user_mode; 846 847 /* Event-related data */ 848 struct mutex event_mutex; 849 /* Event ID allocator and lookup */ 850 struct idr event_idr; 851 /* Event page */ 852 u64 signal_handle; 853 struct kfd_signal_page *signal_page; 854 size_t signal_mapped_size; 855 size_t signal_event_count; 856 bool signal_event_limit_reached; 857 858 /* Information used for memory eviction */ 859 void *kgd_process_info; 860 /* Eviction fence that is attached to all the BOs of this process. The 861 * fence will be triggered during eviction and new one will be created 862 * during restore 863 */ 864 struct dma_fence *ef; 865 866 /* Work items for evicting and restoring BOs */ 867 struct delayed_work eviction_work; 868 struct delayed_work restore_work; 869 /* seqno of the last scheduled eviction */ 870 unsigned int last_eviction_seqno; 871 /* Approx. the last timestamp (in jiffies) when the process was 872 * restored after an eviction 873 */ 874 unsigned long last_restore_timestamp; 875 876 /* Kobj for our procfs */ 877 struct kobject *kobj; 878 struct kobject *kobj_queues; 879 struct attribute attr_pasid; 880 881 /* shared virtual memory registered by this process */ 882 struct svm_range_list svms; 883 884 bool xnack_enabled; 885 886 atomic_t poison; 887 /* Queues are in paused stated because we are in the process of doing a CRIU checkpoint */ 888 bool queues_paused; 889 }; 890 891 #define KFD_PROCESS_TABLE_SIZE 5 /* bits: 32 entries */ 892 extern DECLARE_HASHTABLE(kfd_processes_table, KFD_PROCESS_TABLE_SIZE); 893 extern struct srcu_struct kfd_processes_srcu; 894 895 /** 896 * typedef amdkfd_ioctl_t - typedef for ioctl function pointer. 897 * 898 * @filep: pointer to file structure. 899 * @p: amdkfd process pointer. 900 * @data: pointer to arg that was copied from user. 901 * 902 * Return: returns ioctl completion code. 903 */ 904 typedef int amdkfd_ioctl_t(struct file *filep, struct kfd_process *p, 905 void *data); 906 907 struct amdkfd_ioctl_desc { 908 unsigned int cmd; 909 int flags; 910 amdkfd_ioctl_t *func; 911 unsigned int cmd_drv; 912 const char *name; 913 }; 914 bool kfd_dev_is_large_bar(struct kfd_dev *dev); 915 916 int kfd_process_create_wq(void); 917 void kfd_process_destroy_wq(void); 918 struct kfd_process *kfd_create_process(struct file *filep); 919 struct kfd_process *kfd_get_process(const struct task_struct *); 920 struct kfd_process *kfd_lookup_process_by_pasid(u32 pasid); 921 struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm); 922 923 int kfd_process_gpuidx_from_gpuid(struct kfd_process *p, uint32_t gpu_id); 924 int kfd_process_gpuid_from_adev(struct kfd_process *p, 925 struct amdgpu_device *adev, uint32_t *gpuid, 926 uint32_t *gpuidx); 927 static inline int kfd_process_gpuid_from_gpuidx(struct kfd_process *p, 928 uint32_t gpuidx, uint32_t *gpuid) { 929 return gpuidx < p->n_pdds ? p->pdds[gpuidx]->dev->id : -EINVAL; 930 } 931 static inline struct kfd_process_device *kfd_process_device_from_gpuidx( 932 struct kfd_process *p, uint32_t gpuidx) { 933 return gpuidx < p->n_pdds ? p->pdds[gpuidx] : NULL; 934 } 935 936 void kfd_unref_process(struct kfd_process *p); 937 int kfd_process_evict_queues(struct kfd_process *p); 938 int kfd_process_restore_queues(struct kfd_process *p); 939 void kfd_suspend_all_processes(void); 940 int kfd_resume_all_processes(void); 941 942 struct kfd_process_device *kfd_process_device_data_by_id(struct kfd_process *process, 943 uint32_t gpu_id); 944 945 int kfd_process_get_user_gpu_id(struct kfd_process *p, uint32_t actual_gpu_id); 946 947 int kfd_process_device_init_vm(struct kfd_process_device *pdd, 948 struct file *drm_file); 949 struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev, 950 struct kfd_process *p); 951 struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev, 952 struct kfd_process *p); 953 struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev, 954 struct kfd_process *p); 955 956 bool kfd_process_xnack_mode(struct kfd_process *p, bool supported); 957 958 int kfd_reserved_mem_mmap(struct kfd_dev *dev, struct kfd_process *process, 959 struct vm_area_struct *vma); 960 961 /* KFD process API for creating and translating handles */ 962 int kfd_process_device_create_obj_handle(struct kfd_process_device *pdd, 963 void *mem); 964 void *kfd_process_device_translate_handle(struct kfd_process_device *p, 965 int handle); 966 void kfd_process_device_remove_obj_handle(struct kfd_process_device *pdd, 967 int handle); 968 struct kfd_process *kfd_lookup_process_by_pid(struct pid *pid); 969 970 /* PASIDs */ 971 int kfd_pasid_init(void); 972 void kfd_pasid_exit(void); 973 bool kfd_set_pasid_limit(unsigned int new_limit); 974 unsigned int kfd_get_pasid_limit(void); 975 u32 kfd_pasid_alloc(void); 976 void kfd_pasid_free(u32 pasid); 977 978 /* Doorbells */ 979 size_t kfd_doorbell_process_slice(struct kfd_dev *kfd); 980 int kfd_doorbell_init(struct kfd_dev *kfd); 981 void kfd_doorbell_fini(struct kfd_dev *kfd); 982 int kfd_doorbell_mmap(struct kfd_dev *dev, struct kfd_process *process, 983 struct vm_area_struct *vma); 984 void __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd, 985 unsigned int *doorbell_off); 986 void kfd_release_kernel_doorbell(struct kfd_dev *kfd, u32 __iomem *db_addr); 987 u32 read_kernel_doorbell(u32 __iomem *db); 988 void write_kernel_doorbell(void __iomem *db, u32 value); 989 void write_kernel_doorbell64(void __iomem *db, u64 value); 990 unsigned int kfd_get_doorbell_dw_offset_in_bar(struct kfd_dev *kfd, 991 struct kfd_process_device *pdd, 992 unsigned int doorbell_id); 993 phys_addr_t kfd_get_process_doorbells(struct kfd_process_device *pdd); 994 int kfd_alloc_process_doorbells(struct kfd_dev *kfd, 995 unsigned int *doorbell_index); 996 void kfd_free_process_doorbells(struct kfd_dev *kfd, 997 unsigned int doorbell_index); 998 /* GTT Sub-Allocator */ 999 1000 int kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned int size, 1001 struct kfd_mem_obj **mem_obj); 1002 1003 int kfd_gtt_sa_free(struct kfd_dev *kfd, struct kfd_mem_obj *mem_obj); 1004 1005 extern struct device *kfd_device; 1006 1007 /* KFD's procfs */ 1008 void kfd_procfs_init(void); 1009 void kfd_procfs_shutdown(void); 1010 int kfd_procfs_add_queue(struct queue *q); 1011 void kfd_procfs_del_queue(struct queue *q); 1012 1013 /* Topology */ 1014 int kfd_topology_init(void); 1015 void kfd_topology_shutdown(void); 1016 int kfd_topology_add_device(struct kfd_dev *gpu); 1017 int kfd_topology_remove_device(struct kfd_dev *gpu); 1018 struct kfd_topology_device *kfd_topology_device_by_proximity_domain( 1019 uint32_t proximity_domain); 1020 struct kfd_topology_device *kfd_topology_device_by_id(uint32_t gpu_id); 1021 struct kfd_dev *kfd_device_by_id(uint32_t gpu_id); 1022 struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev); 1023 struct kfd_dev *kfd_device_by_adev(const struct amdgpu_device *adev); 1024 int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_dev **kdev); 1025 int kfd_numa_node_to_apic_id(int numa_node_id); 1026 void kfd_double_confirm_iommu_support(struct kfd_dev *gpu); 1027 1028 /* Interrupts */ 1029 int kfd_interrupt_init(struct kfd_dev *dev); 1030 void kfd_interrupt_exit(struct kfd_dev *dev); 1031 bool enqueue_ih_ring_entry(struct kfd_dev *kfd, const void *ih_ring_entry); 1032 bool interrupt_is_wanted(struct kfd_dev *dev, 1033 const uint32_t *ih_ring_entry, 1034 uint32_t *patched_ihre, bool *flag); 1035 1036 /* amdkfd Apertures */ 1037 int kfd_init_apertures(struct kfd_process *process); 1038 1039 void kfd_process_set_trap_handler(struct qcm_process_device *qpd, 1040 uint64_t tba_addr, 1041 uint64_t tma_addr); 1042 1043 /* CRIU */ 1044 /* 1045 * Need to increment KFD_CRIU_PRIV_VERSION each time a change is made to any of the CRIU private 1046 * structures: 1047 * kfd_criu_process_priv_data 1048 * kfd_criu_device_priv_data 1049 * kfd_criu_bo_priv_data 1050 * kfd_criu_queue_priv_data 1051 * kfd_criu_event_priv_data 1052 * kfd_criu_svm_range_priv_data 1053 */ 1054 1055 #define KFD_CRIU_PRIV_VERSION 1 1056 1057 struct kfd_criu_process_priv_data { 1058 uint32_t version; 1059 }; 1060 1061 struct kfd_criu_device_priv_data { 1062 /* For future use */ 1063 uint64_t reserved; 1064 }; 1065 1066 struct kfd_criu_bo_priv_data { 1067 uint64_t user_addr; 1068 uint32_t idr_handle; 1069 uint32_t mapped_gpuids[MAX_GPU_INSTANCE]; 1070 }; 1071 1072 /* 1073 * The first 4 bytes of kfd_criu_queue_priv_data, kfd_criu_event_priv_data, 1074 * kfd_criu_svm_range_priv_data is the object type 1075 */ 1076 enum kfd_criu_object_type { 1077 KFD_CRIU_OBJECT_TYPE_QUEUE, 1078 KFD_CRIU_OBJECT_TYPE_EVENT, 1079 KFD_CRIU_OBJECT_TYPE_SVM_RANGE, 1080 }; 1081 1082 struct kfd_criu_svm_range_priv_data { 1083 uint32_t object_type; 1084 uint32_t reserved; 1085 }; 1086 1087 struct kfd_criu_queue_priv_data { 1088 uint32_t object_type; 1089 uint64_t q_address; 1090 uint64_t q_size; 1091 uint64_t read_ptr_addr; 1092 uint64_t write_ptr_addr; 1093 uint64_t doorbell_off; 1094 uint64_t eop_ring_buffer_address; 1095 uint64_t ctx_save_restore_area_address; 1096 uint32_t gpu_id; 1097 uint32_t type; 1098 uint32_t format; 1099 uint32_t q_id; 1100 uint32_t priority; 1101 uint32_t q_percent; 1102 uint32_t doorbell_id; 1103 uint32_t is_gws; 1104 uint32_t sdma_id; 1105 uint32_t eop_ring_buffer_size; 1106 uint32_t ctx_save_restore_area_size; 1107 uint32_t ctl_stack_size; 1108 uint32_t mqd_size; 1109 }; 1110 1111 struct kfd_criu_event_priv_data { 1112 uint32_t object_type; 1113 uint64_t user_handle; 1114 uint32_t event_id; 1115 uint32_t auto_reset; 1116 uint32_t type; 1117 uint32_t signaled; 1118 1119 union { 1120 struct kfd_hsa_memory_exception_data memory_exception_data; 1121 struct kfd_hsa_hw_exception_data hw_exception_data; 1122 }; 1123 }; 1124 1125 int kfd_process_get_queue_info(struct kfd_process *p, 1126 uint32_t *num_queues, 1127 uint64_t *priv_data_sizes); 1128 1129 int kfd_criu_checkpoint_queues(struct kfd_process *p, 1130 uint8_t __user *user_priv_data, 1131 uint64_t *priv_data_offset); 1132 1133 int kfd_criu_restore_queue(struct kfd_process *p, 1134 uint8_t __user *user_priv_data, 1135 uint64_t *priv_data_offset, 1136 uint64_t max_priv_data_size); 1137 1138 int kfd_criu_checkpoint_events(struct kfd_process *p, 1139 uint8_t __user *user_priv_data, 1140 uint64_t *priv_data_offset); 1141 1142 int kfd_criu_restore_event(struct file *devkfd, 1143 struct kfd_process *p, 1144 uint8_t __user *user_priv_data, 1145 uint64_t *priv_data_offset, 1146 uint64_t max_priv_data_size); 1147 /* CRIU - End */ 1148 1149 /* Queue Context Management */ 1150 int init_queue(struct queue **q, const struct queue_properties *properties); 1151 void uninit_queue(struct queue *q); 1152 void print_queue_properties(struct queue_properties *q); 1153 void print_queue(struct queue *q); 1154 1155 struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type, 1156 struct kfd_dev *dev); 1157 struct mqd_manager *mqd_manager_init_cik_hawaii(enum KFD_MQD_TYPE type, 1158 struct kfd_dev *dev); 1159 struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type, 1160 struct kfd_dev *dev); 1161 struct mqd_manager *mqd_manager_init_vi_tonga(enum KFD_MQD_TYPE type, 1162 struct kfd_dev *dev); 1163 struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type, 1164 struct kfd_dev *dev); 1165 struct mqd_manager *mqd_manager_init_v10(enum KFD_MQD_TYPE type, 1166 struct kfd_dev *dev); 1167 struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev); 1168 void device_queue_manager_uninit(struct device_queue_manager *dqm); 1169 struct kernel_queue *kernel_queue_init(struct kfd_dev *dev, 1170 enum kfd_queue_type type); 1171 void kernel_queue_uninit(struct kernel_queue *kq, bool hanging); 1172 int kfd_process_vm_fault(struct device_queue_manager *dqm, u32 pasid); 1173 1174 /* Process Queue Manager */ 1175 struct process_queue_node { 1176 struct queue *q; 1177 struct kernel_queue *kq; 1178 struct list_head process_queue_list; 1179 }; 1180 1181 void kfd_process_dequeue_from_device(struct kfd_process_device *pdd); 1182 void kfd_process_dequeue_from_all_devices(struct kfd_process *p); 1183 int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p); 1184 void pqm_uninit(struct process_queue_manager *pqm); 1185 int pqm_create_queue(struct process_queue_manager *pqm, 1186 struct kfd_dev *dev, 1187 struct file *f, 1188 struct queue_properties *properties, 1189 unsigned int *qid, 1190 const struct kfd_criu_queue_priv_data *q_data, 1191 const void *restore_mqd, 1192 const void *restore_ctl_stack, 1193 uint32_t *p_doorbell_offset_in_process); 1194 int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid); 1195 int pqm_update_queue_properties(struct process_queue_manager *pqm, unsigned int qid, 1196 struct queue_properties *p); 1197 int pqm_update_mqd(struct process_queue_manager *pqm, unsigned int qid, 1198 struct mqd_update_info *minfo); 1199 int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid, 1200 void *gws); 1201 struct kernel_queue *pqm_get_kernel_queue(struct process_queue_manager *pqm, 1202 unsigned int qid); 1203 struct queue *pqm_get_user_queue(struct process_queue_manager *pqm, 1204 unsigned int qid); 1205 int pqm_get_wave_state(struct process_queue_manager *pqm, 1206 unsigned int qid, 1207 void __user *ctl_stack, 1208 u32 *ctl_stack_used_size, 1209 u32 *save_area_used_size); 1210 1211 int amdkfd_fence_wait_timeout(uint64_t *fence_addr, 1212 uint64_t fence_value, 1213 unsigned int timeout_ms); 1214 1215 int pqm_get_queue_checkpoint_info(struct process_queue_manager *pqm, 1216 unsigned int qid, 1217 u32 *mqd_size, 1218 u32 *ctl_stack_size); 1219 /* Packet Manager */ 1220 1221 #define KFD_FENCE_COMPLETED (100) 1222 #define KFD_FENCE_INIT (10) 1223 1224 struct packet_manager { 1225 struct device_queue_manager *dqm; 1226 struct kernel_queue *priv_queue; 1227 struct mutex lock; 1228 bool allocated; 1229 struct kfd_mem_obj *ib_buffer_obj; 1230 unsigned int ib_size_bytes; 1231 bool is_over_subscription; 1232 1233 const struct packet_manager_funcs *pmf; 1234 }; 1235 1236 struct packet_manager_funcs { 1237 /* Support ASIC-specific packet formats for PM4 packets */ 1238 int (*map_process)(struct packet_manager *pm, uint32_t *buffer, 1239 struct qcm_process_device *qpd); 1240 int (*runlist)(struct packet_manager *pm, uint32_t *buffer, 1241 uint64_t ib, size_t ib_size_in_dwords, bool chain); 1242 int (*set_resources)(struct packet_manager *pm, uint32_t *buffer, 1243 struct scheduling_resources *res); 1244 int (*map_queues)(struct packet_manager *pm, uint32_t *buffer, 1245 struct queue *q, bool is_static); 1246 int (*unmap_queues)(struct packet_manager *pm, uint32_t *buffer, 1247 enum kfd_queue_type type, 1248 enum kfd_unmap_queues_filter mode, 1249 uint32_t filter_param, bool reset, 1250 unsigned int sdma_engine); 1251 int (*query_status)(struct packet_manager *pm, uint32_t *buffer, 1252 uint64_t fence_address, uint64_t fence_value); 1253 int (*release_mem)(uint64_t gpu_addr, uint32_t *buffer); 1254 1255 /* Packet sizes */ 1256 int map_process_size; 1257 int runlist_size; 1258 int set_resources_size; 1259 int map_queues_size; 1260 int unmap_queues_size; 1261 int query_status_size; 1262 int release_mem_size; 1263 }; 1264 1265 extern const struct packet_manager_funcs kfd_vi_pm_funcs; 1266 extern const struct packet_manager_funcs kfd_v9_pm_funcs; 1267 extern const struct packet_manager_funcs kfd_aldebaran_pm_funcs; 1268 1269 int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm); 1270 void pm_uninit(struct packet_manager *pm, bool hanging); 1271 int pm_send_set_resources(struct packet_manager *pm, 1272 struct scheduling_resources *res); 1273 int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues); 1274 int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address, 1275 uint64_t fence_value); 1276 1277 int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type, 1278 enum kfd_unmap_queues_filter mode, 1279 uint32_t filter_param, bool reset, 1280 unsigned int sdma_engine); 1281 1282 void pm_release_ib(struct packet_manager *pm); 1283 1284 /* Following PM funcs can be shared among VI and AI */ 1285 unsigned int pm_build_pm4_header(unsigned int opcode, size_t packet_size); 1286 1287 uint64_t kfd_get_number_elems(struct kfd_dev *kfd); 1288 1289 /* Events */ 1290 extern const struct kfd_event_interrupt_class event_interrupt_class_cik; 1291 extern const struct kfd_event_interrupt_class event_interrupt_class_v9; 1292 1293 extern const struct kfd_device_global_init_class device_global_init_class_cik; 1294 1295 void kfd_event_init_process(struct kfd_process *p); 1296 void kfd_event_free_process(struct kfd_process *p); 1297 int kfd_event_mmap(struct kfd_process *process, struct vm_area_struct *vma); 1298 int kfd_wait_on_events(struct kfd_process *p, 1299 uint32_t num_events, void __user *data, 1300 bool all, uint32_t user_timeout_ms, 1301 uint32_t *wait_result); 1302 void kfd_signal_event_interrupt(u32 pasid, uint32_t partial_id, 1303 uint32_t valid_id_bits); 1304 void kfd_signal_iommu_event(struct kfd_dev *dev, 1305 u32 pasid, unsigned long address, 1306 bool is_write_requested, bool is_execute_requested); 1307 void kfd_signal_hw_exception_event(u32 pasid); 1308 int kfd_set_event(struct kfd_process *p, uint32_t event_id); 1309 int kfd_reset_event(struct kfd_process *p, uint32_t event_id); 1310 int kfd_kmap_event_page(struct kfd_process *p, uint64_t event_page_offset); 1311 1312 int kfd_event_create(struct file *devkfd, struct kfd_process *p, 1313 uint32_t event_type, bool auto_reset, uint32_t node_id, 1314 uint32_t *event_id, uint32_t *event_trigger_data, 1315 uint64_t *event_page_offset, uint32_t *event_slot_index); 1316 1317 int kfd_get_num_events(struct kfd_process *p); 1318 int kfd_event_destroy(struct kfd_process *p, uint32_t event_id); 1319 1320 void kfd_signal_vm_fault_event(struct kfd_dev *dev, u32 pasid, 1321 struct kfd_vm_fault_info *info); 1322 1323 void kfd_signal_reset_event(struct kfd_dev *dev); 1324 1325 void kfd_signal_poison_consumed_event(struct kfd_dev *dev, u32 pasid); 1326 1327 void kfd_flush_tlb(struct kfd_process_device *pdd, enum TLB_FLUSH_TYPE type); 1328 1329 int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process *p); 1330 1331 bool kfd_is_locked(void); 1332 1333 /* Compute profile */ 1334 void kfd_inc_compute_active(struct kfd_dev *dev); 1335 void kfd_dec_compute_active(struct kfd_dev *dev); 1336 1337 /* Cgroup Support */ 1338 /* Check with device cgroup if @kfd device is accessible */ 1339 static inline int kfd_devcgroup_check_permission(struct kfd_dev *kfd) 1340 { 1341 #if defined(CONFIG_CGROUP_DEVICE) || defined(CONFIG_CGROUP_BPF) 1342 struct drm_device *ddev = kfd->ddev; 1343 1344 return devcgroup_check_permission(DEVCG_DEV_CHAR, DRM_MAJOR, 1345 ddev->render->index, 1346 DEVCG_ACC_WRITE | DEVCG_ACC_READ); 1347 #else 1348 return 0; 1349 #endif 1350 } 1351 1352 /* Debugfs */ 1353 #if defined(CONFIG_DEBUG_FS) 1354 1355 void kfd_debugfs_init(void); 1356 void kfd_debugfs_fini(void); 1357 int kfd_debugfs_mqds_by_process(struct seq_file *m, void *data); 1358 int pqm_debugfs_mqds(struct seq_file *m, void *data); 1359 int kfd_debugfs_hqds_by_device(struct seq_file *m, void *data); 1360 int dqm_debugfs_hqds(struct seq_file *m, void *data); 1361 int kfd_debugfs_rls_by_device(struct seq_file *m, void *data); 1362 int pm_debugfs_runlist(struct seq_file *m, void *data); 1363 1364 int kfd_debugfs_hang_hws(struct kfd_dev *dev); 1365 int pm_debugfs_hang_hws(struct packet_manager *pm); 1366 int dqm_debugfs_hang_hws(struct device_queue_manager *dqm); 1367 1368 #else 1369 1370 static inline void kfd_debugfs_init(void) {} 1371 static inline void kfd_debugfs_fini(void) {} 1372 1373 #endif 1374 1375 #endif 1376