1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */ 2 /* 3 * Copyright 2014-2022 Advanced Micro Devices, Inc. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be included in 13 * all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 * OTHER DEALINGS IN THE SOFTWARE. 22 */ 23 24 #ifndef KFD_PRIV_H_INCLUDED 25 #define KFD_PRIV_H_INCLUDED 26 27 #include <linux/hashtable.h> 28 #include <linux/mmu_notifier.h> 29 #include <linux/memremap.h> 30 #include <linux/mutex.h> 31 #include <linux/types.h> 32 #include <linux/atomic.h> 33 #include <linux/workqueue.h> 34 #include <linux/spinlock.h> 35 #include <linux/kfd_ioctl.h> 36 #include <linux/idr.h> 37 #include <linux/kfifo.h> 38 #include <linux/seq_file.h> 39 #include <linux/kref.h> 40 #include <linux/sysfs.h> 41 #include <linux/device_cgroup.h> 42 #include <drm/drm_file.h> 43 #include <drm/drm_drv.h> 44 #include <drm/drm_device.h> 45 #include <drm/drm_ioctl.h> 46 #include <kgd_kfd_interface.h> 47 #include <linux/swap.h> 48 49 #include "amd_shared.h" 50 #include "amdgpu.h" 51 52 #define KFD_MAX_RING_ENTRY_SIZE 8 53 54 #define KFD_SYSFS_FILE_MODE 0444 55 56 /* GPU ID hash width in bits */ 57 #define KFD_GPU_ID_HASH_WIDTH 16 58 59 /* Use upper bits of mmap offset to store KFD driver specific information. 60 * BITS[63:62] - Encode MMAP type 61 * BITS[61:46] - Encode gpu_id. To identify to which GPU the offset belongs to 62 * BITS[45:0] - MMAP offset value 63 * 64 * NOTE: struct vm_area_struct.vm_pgoff uses offset in pages. Hence, these 65 * defines are w.r.t to PAGE_SIZE 66 */ 67 #define KFD_MMAP_TYPE_SHIFT 62 68 #define KFD_MMAP_TYPE_MASK (0x3ULL << KFD_MMAP_TYPE_SHIFT) 69 #define KFD_MMAP_TYPE_DOORBELL (0x3ULL << KFD_MMAP_TYPE_SHIFT) 70 #define KFD_MMAP_TYPE_EVENTS (0x2ULL << KFD_MMAP_TYPE_SHIFT) 71 #define KFD_MMAP_TYPE_RESERVED_MEM (0x1ULL << KFD_MMAP_TYPE_SHIFT) 72 #define KFD_MMAP_TYPE_MMIO (0x0ULL << KFD_MMAP_TYPE_SHIFT) 73 74 #define KFD_MMAP_GPU_ID_SHIFT 46 75 #define KFD_MMAP_GPU_ID_MASK (((1ULL << KFD_GPU_ID_HASH_WIDTH) - 1) \ 76 << KFD_MMAP_GPU_ID_SHIFT) 77 #define KFD_MMAP_GPU_ID(gpu_id) ((((uint64_t)gpu_id) << KFD_MMAP_GPU_ID_SHIFT)\ 78 & KFD_MMAP_GPU_ID_MASK) 79 #define KFD_MMAP_GET_GPU_ID(offset) ((offset & KFD_MMAP_GPU_ID_MASK) \ 80 >> KFD_MMAP_GPU_ID_SHIFT) 81 82 /* 83 * When working with cp scheduler we should assign the HIQ manually or via 84 * the amdgpu driver to a fixed hqd slot, here are the fixed HIQ hqd slot 85 * definitions for Kaveri. In Kaveri only the first ME queues participates 86 * in the cp scheduling taking that in mind we set the HIQ slot in the 87 * second ME. 88 */ 89 #define KFD_CIK_HIQ_PIPE 4 90 #define KFD_CIK_HIQ_QUEUE 0 91 92 /* Macro for allocating structures */ 93 #define kfd_alloc_struct(ptr_to_struct) \ 94 ((typeof(ptr_to_struct)) kzalloc(sizeof(*ptr_to_struct), GFP_KERNEL)) 95 96 #define KFD_MAX_NUM_OF_PROCESSES 512 97 #define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS 1024 98 99 /* 100 * Size of the per-process TBA+TMA buffer: 2 pages 101 * 102 * The first page is the TBA used for the CWSR ISA code. The second 103 * page is used as TMA for user-mode trap handler setup in daisy-chain mode. 104 */ 105 #define KFD_CWSR_TBA_TMA_SIZE (PAGE_SIZE * 2) 106 #define KFD_CWSR_TMA_OFFSET PAGE_SIZE 107 108 #define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE \ 109 (KFD_MAX_NUM_OF_PROCESSES * \ 110 KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) 111 112 #define KFD_KERNEL_QUEUE_SIZE 2048 113 114 #define KFD_UNMAP_LATENCY_MS (4000) 115 116 #define KFD_MAX_SDMA_QUEUES 128 117 118 /* 119 * 512 = 0x200 120 * The doorbell index distance between SDMA RLC (2*i) and (2*i+1) in the 121 * same SDMA engine on SOC15, which has 8-byte doorbells for SDMA. 122 * 512 8-byte doorbell distance (i.e. one page away) ensures that SDMA RLC 123 * (2*i+1) doorbells (in terms of the lower 12 bit address) lie exactly in 124 * the OFFSET and SIZE set in registers like BIF_SDMA0_DOORBELL_RANGE. 125 */ 126 #define KFD_QUEUE_DOORBELL_MIRROR_OFFSET 512 127 128 /** 129 * enum kfd_ioctl_flags - KFD ioctl flags 130 * Various flags that can be set in &amdkfd_ioctl_desc.flags to control how 131 * userspace can use a given ioctl. 132 */ 133 enum kfd_ioctl_flags { 134 /* 135 * @KFD_IOC_FLAG_CHECKPOINT_RESTORE: 136 * Certain KFD ioctls such as AMDKFD_IOC_CRIU_OP can potentially 137 * perform privileged operations and load arbitrary data into MQDs and 138 * eventually HQD registers when the queue is mapped by HWS. In order to 139 * prevent this we should perform additional security checks. 140 * 141 * This is equivalent to callers with the CHECKPOINT_RESTORE capability. 142 * 143 * Note: Since earlier versions of docker do not support CHECKPOINT_RESTORE, 144 * we also allow ioctls with SYS_ADMIN capability. 145 */ 146 KFD_IOC_FLAG_CHECKPOINT_RESTORE = BIT(0), 147 }; 148 /* 149 * Kernel module parameter to specify maximum number of supported queues per 150 * device 151 */ 152 extern int max_num_of_queues_per_device; 153 154 155 /* Kernel module parameter to specify the scheduling policy */ 156 extern int sched_policy; 157 158 /* 159 * Kernel module parameter to specify the maximum process 160 * number per HW scheduler 161 */ 162 extern int hws_max_conc_proc; 163 164 extern int cwsr_enable; 165 166 /* 167 * Kernel module parameter to specify whether to send sigterm to HSA process on 168 * unhandled exception 169 */ 170 extern int send_sigterm; 171 172 /* 173 * This kernel module is used to simulate large bar machine on non-large bar 174 * enabled machines. 175 */ 176 extern int debug_largebar; 177 178 /* 179 * Ignore CRAT table during KFD initialization, can be used to work around 180 * broken CRAT tables on some AMD systems 181 */ 182 extern int ignore_crat; 183 184 /* Set sh_mem_config.retry_disable on GFX v9 */ 185 extern int amdgpu_noretry; 186 187 /* Halt if HWS hang is detected */ 188 extern int halt_if_hws_hang; 189 190 /* Whether MEC FW support GWS barriers */ 191 extern bool hws_gws_support; 192 193 /* Queue preemption timeout in ms */ 194 extern int queue_preemption_timeout_ms; 195 196 /* 197 * Don't evict process queues on vm fault 198 */ 199 extern int amdgpu_no_queue_eviction_on_vm_fault; 200 201 /* Enable eviction debug messages */ 202 extern bool debug_evictions; 203 204 extern struct mutex kfd_processes_mutex; 205 206 enum cache_policy { 207 cache_policy_coherent, 208 cache_policy_noncoherent 209 }; 210 211 #define KFD_GC_VERSION(dev) ((dev)->adev->ip_versions[GC_HWIP][0]) 212 #define KFD_IS_SOC15(dev) ((KFD_GC_VERSION(dev)) >= (IP_VERSION(9, 0, 1))) 213 #define KFD_SUPPORT_XNACK_PER_PROCESS(dev)\ 214 ((KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2)) || \ 215 (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 3))) 216 217 struct kfd_node; 218 219 struct kfd_event_interrupt_class { 220 bool (*interrupt_isr)(struct kfd_node *dev, 221 const uint32_t *ih_ring_entry, uint32_t *patched_ihre, 222 bool *patched_flag); 223 void (*interrupt_wq)(struct kfd_node *dev, 224 const uint32_t *ih_ring_entry); 225 }; 226 227 struct kfd_device_info { 228 uint32_t gfx_target_version; 229 const struct kfd_event_interrupt_class *event_interrupt_class; 230 unsigned int max_pasid_bits; 231 unsigned int max_no_of_hqd; 232 unsigned int doorbell_size; 233 size_t ih_ring_entry_size; 234 uint8_t num_of_watch_points; 235 uint16_t mqd_size_aligned; 236 bool supports_cwsr; 237 bool needs_iommu_device; 238 bool needs_pci_atomics; 239 uint32_t no_atomic_fw_version; 240 unsigned int num_sdma_queues_per_engine; 241 unsigned int num_reserved_sdma_queues_per_engine; 242 uint64_t reserved_sdma_queues_bitmap; 243 }; 244 245 unsigned int kfd_get_num_sdma_engines(struct kfd_node *kdev); 246 unsigned int kfd_get_num_xgmi_sdma_engines(struct kfd_node *kdev); 247 248 struct kfd_mem_obj { 249 uint32_t range_start; 250 uint32_t range_end; 251 uint64_t gpu_addr; 252 uint32_t *cpu_ptr; 253 void *gtt_mem; 254 }; 255 256 struct kfd_vmid_info { 257 uint32_t first_vmid_kfd; 258 uint32_t last_vmid_kfd; 259 uint32_t vmid_num_kfd; 260 }; 261 262 #define MAX_KFD_NODES 8 263 264 struct kfd_dev; 265 266 struct kfd_node { 267 unsigned int node_id; 268 struct amdgpu_device *adev; /* Duplicated here along with keeping 269 * a copy in kfd_dev to save a hop 270 */ 271 const struct kfd2kgd_calls *kfd2kgd; /* Duplicated here along with 272 * keeping a copy in kfd_dev to 273 * save a hop 274 */ 275 struct kfd_vmid_info vm_info; 276 unsigned int id; /* topology stub index */ 277 uint32_t xcc_mask; /* Instance mask of XCCs present */ 278 struct amdgpu_xcp *xcp; 279 280 /* Interrupts */ 281 struct kfifo ih_fifo; 282 struct workqueue_struct *ih_wq; 283 struct work_struct interrupt_work; 284 spinlock_t interrupt_lock; 285 286 /* 287 * Interrupts of interest to KFD are copied 288 * from the HW ring into a SW ring. 289 */ 290 bool interrupts_active; 291 uint32_t interrupt_bitmap; /* Only used for GFX 9.4.3 */ 292 293 /* QCM Device instance */ 294 struct device_queue_manager *dqm; 295 296 /* Global GWS resource shared between processes */ 297 void *gws; 298 bool gws_debug_workaround; 299 300 /* Clients watching SMI events */ 301 struct list_head smi_clients; 302 spinlock_t smi_lock; 303 uint32_t reset_seq_num; 304 305 /* SRAM ECC flag */ 306 atomic_t sram_ecc_flag; 307 308 /*spm process id */ 309 unsigned int spm_pasid; 310 311 /* Maximum process number mapped to HW scheduler */ 312 unsigned int max_proc_per_quantum; 313 314 unsigned int compute_vmid_bitmap; 315 316 struct kfd_local_mem_info local_mem_info; 317 318 struct kfd_dev *kfd; 319 }; 320 321 struct kfd_dev { 322 struct amdgpu_device *adev; 323 324 struct kfd_device_info device_info; 325 326 phys_addr_t doorbell_base; /* Start of actual doorbells used by 327 * KFD. It is aligned for mapping 328 * into user mode 329 */ 330 size_t doorbell_base_dw_offset; /* Offset from the start of the PCI 331 * doorbell BAR to the first KFD 332 * doorbell in dwords. GFX reserves 333 * the segment before this offset. 334 */ 335 u32 __iomem *doorbell_kernel_ptr; /* This is a pointer for a doorbells 336 * page used by kernel queue 337 */ 338 339 struct kgd2kfd_shared_resources shared_resources; 340 341 const struct kfd2kgd_calls *kfd2kgd; 342 struct mutex doorbell_mutex; 343 DECLARE_BITMAP(doorbell_available_index, 344 KFD_MAX_NUM_OF_QUEUES_PER_PROCESS); 345 346 void *gtt_mem; 347 uint64_t gtt_start_gpu_addr; 348 void *gtt_start_cpu_ptr; 349 void *gtt_sa_bitmap; 350 struct mutex gtt_sa_lock; 351 unsigned int gtt_sa_chunk_size; 352 unsigned int gtt_sa_num_of_chunks; 353 354 bool init_complete; 355 356 /* Firmware versions */ 357 uint16_t mec_fw_version; 358 uint16_t mec2_fw_version; 359 uint16_t sdma_fw_version; 360 361 /* CWSR */ 362 bool cwsr_enabled; 363 const void *cwsr_isa; 364 unsigned int cwsr_isa_size; 365 366 /* xGMI */ 367 uint64_t hive_id; 368 369 bool pci_atomic_requested; 370 371 /* Use IOMMU v2 flag */ 372 bool use_iommu_v2; 373 374 /* Compute Profile ref. count */ 375 atomic_t compute_profile; 376 377 struct ida doorbell_ida; 378 unsigned int max_doorbell_slices; 379 380 int noretry; 381 382 struct kfd_node *nodes[MAX_KFD_NODES]; 383 unsigned int num_nodes; 384 }; 385 386 enum kfd_mempool { 387 KFD_MEMPOOL_SYSTEM_CACHEABLE = 1, 388 KFD_MEMPOOL_SYSTEM_WRITECOMBINE = 2, 389 KFD_MEMPOOL_FRAMEBUFFER = 3, 390 }; 391 392 /* Character device interface */ 393 int kfd_chardev_init(void); 394 void kfd_chardev_exit(void); 395 396 /** 397 * enum kfd_unmap_queues_filter - Enum for queue filters. 398 * 399 * @KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES: Preempts all queues in the 400 * running queues list. 401 * 402 * @KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES: Preempts all non-static queues 403 * in the run list. 404 * 405 * @KFD_UNMAP_QUEUES_FILTER_BY_PASID: Preempts queues that belongs to 406 * specific process. 407 * 408 */ 409 enum kfd_unmap_queues_filter { 410 KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES = 1, 411 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES = 2, 412 KFD_UNMAP_QUEUES_FILTER_BY_PASID = 3 413 }; 414 415 /** 416 * enum kfd_queue_type - Enum for various queue types. 417 * 418 * @KFD_QUEUE_TYPE_COMPUTE: Regular user mode queue type. 419 * 420 * @KFD_QUEUE_TYPE_SDMA: SDMA user mode queue type. 421 * 422 * @KFD_QUEUE_TYPE_HIQ: HIQ queue type. 423 * 424 * @KFD_QUEUE_TYPE_DIQ: DIQ queue type. 425 * 426 * @KFD_QUEUE_TYPE_SDMA_XGMI: Special SDMA queue for XGMI interface. 427 */ 428 enum kfd_queue_type { 429 KFD_QUEUE_TYPE_COMPUTE, 430 KFD_QUEUE_TYPE_SDMA, 431 KFD_QUEUE_TYPE_HIQ, 432 KFD_QUEUE_TYPE_DIQ, 433 KFD_QUEUE_TYPE_SDMA_XGMI 434 }; 435 436 enum kfd_queue_format { 437 KFD_QUEUE_FORMAT_PM4, 438 KFD_QUEUE_FORMAT_AQL 439 }; 440 441 enum KFD_QUEUE_PRIORITY { 442 KFD_QUEUE_PRIORITY_MINIMUM = 0, 443 KFD_QUEUE_PRIORITY_MAXIMUM = 15 444 }; 445 446 /** 447 * struct queue_properties 448 * 449 * @type: The queue type. 450 * 451 * @queue_id: Queue identifier. 452 * 453 * @queue_address: Queue ring buffer address. 454 * 455 * @queue_size: Queue ring buffer size. 456 * 457 * @priority: Defines the queue priority relative to other queues in the 458 * process. 459 * This is just an indication and HW scheduling may override the priority as 460 * necessary while keeping the relative prioritization. 461 * the priority granularity is from 0 to f which f is the highest priority. 462 * currently all queues are initialized with the highest priority. 463 * 464 * @queue_percent: This field is partially implemented and currently a zero in 465 * this field defines that the queue is non active. 466 * 467 * @read_ptr: User space address which points to the number of dwords the 468 * cp read from the ring buffer. This field updates automatically by the H/W. 469 * 470 * @write_ptr: Defines the number of dwords written to the ring buffer. 471 * 472 * @doorbell_ptr: Notifies the H/W of new packet written to the queue ring 473 * buffer. This field should be similar to write_ptr and the user should 474 * update this field after updating the write_ptr. 475 * 476 * @doorbell_off: The doorbell offset in the doorbell pci-bar. 477 * 478 * @is_interop: Defines if this is a interop queue. Interop queue means that 479 * the queue can access both graphics and compute resources. 480 * 481 * @is_evicted: Defines if the queue is evicted. Only active queues 482 * are evicted, rendering them inactive. 483 * 484 * @is_active: Defines if the queue is active or not. @is_active and 485 * @is_evicted are protected by the DQM lock. 486 * 487 * @is_gws: Defines if the queue has been updated to be GWS-capable or not. 488 * @is_gws should be protected by the DQM lock, since changing it can yield the 489 * possibility of updating DQM state on number of GWS queues. 490 * 491 * @vmid: If the scheduling mode is no cp scheduling the field defines the vmid 492 * of the queue. 493 * 494 * This structure represents the queue properties for each queue no matter if 495 * it's user mode or kernel mode queue. 496 * 497 */ 498 499 struct queue_properties { 500 enum kfd_queue_type type; 501 enum kfd_queue_format format; 502 unsigned int queue_id; 503 uint64_t queue_address; 504 uint64_t queue_size; 505 uint32_t priority; 506 uint32_t queue_percent; 507 uint32_t *read_ptr; 508 uint32_t *write_ptr; 509 void __iomem *doorbell_ptr; 510 uint32_t doorbell_off; 511 bool is_interop; 512 bool is_evicted; 513 bool is_active; 514 bool is_gws; 515 uint32_t pm4_target_xcc; 516 /* Not relevant for user mode queues in cp scheduling */ 517 unsigned int vmid; 518 /* Relevant only for sdma queues*/ 519 uint32_t sdma_engine_id; 520 uint32_t sdma_queue_id; 521 uint32_t sdma_vm_addr; 522 /* Relevant only for VI */ 523 uint64_t eop_ring_buffer_address; 524 uint32_t eop_ring_buffer_size; 525 uint64_t ctx_save_restore_area_address; 526 uint32_t ctx_save_restore_area_size; 527 uint32_t ctl_stack_size; 528 uint64_t tba_addr; 529 uint64_t tma_addr; 530 }; 531 532 #define QUEUE_IS_ACTIVE(q) ((q).queue_size > 0 && \ 533 (q).queue_address != 0 && \ 534 (q).queue_percent > 0 && \ 535 !(q).is_evicted) 536 537 enum mqd_update_flag { 538 UPDATE_FLAG_CU_MASK = 0, 539 }; 540 541 struct mqd_update_info { 542 union { 543 struct { 544 uint32_t count; /* Must be a multiple of 32 */ 545 uint32_t *ptr; 546 } cu_mask; 547 }; 548 enum mqd_update_flag update_flag; 549 }; 550 551 /** 552 * struct queue 553 * 554 * @list: Queue linked list. 555 * 556 * @mqd: The queue MQD (memory queue descriptor). 557 * 558 * @mqd_mem_obj: The MQD local gpu memory object. 559 * 560 * @gart_mqd_addr: The MQD gart mc address. 561 * 562 * @properties: The queue properties. 563 * 564 * @mec: Used only in no cp scheduling mode and identifies to micro engine id 565 * that the queue should be executed on. 566 * 567 * @pipe: Used only in no cp scheduling mode and identifies the queue's pipe 568 * id. 569 * 570 * @queue: Used only in no cp scheduliong mode and identifies the queue's slot. 571 * 572 * @process: The kfd process that created this queue. 573 * 574 * @device: The kfd device that created this queue. 575 * 576 * @gws: Pointing to gws kgd_mem if this is a gws control queue; NULL 577 * otherwise. 578 * 579 * This structure represents user mode compute queues. 580 * It contains all the necessary data to handle such queues. 581 * 582 */ 583 584 struct queue { 585 struct list_head list; 586 void *mqd; 587 struct kfd_mem_obj *mqd_mem_obj; 588 uint64_t gart_mqd_addr; 589 struct queue_properties properties; 590 591 uint32_t mec; 592 uint32_t pipe; 593 uint32_t queue; 594 595 unsigned int sdma_id; 596 unsigned int doorbell_id; 597 598 struct kfd_process *process; 599 struct kfd_node *device; 600 void *gws; 601 602 /* procfs */ 603 struct kobject kobj; 604 605 void *gang_ctx_bo; 606 uint64_t gang_ctx_gpu_addr; 607 void *gang_ctx_cpu_ptr; 608 609 struct amdgpu_bo *wptr_bo; 610 }; 611 612 enum KFD_MQD_TYPE { 613 KFD_MQD_TYPE_HIQ = 0, /* for hiq */ 614 KFD_MQD_TYPE_CP, /* for cp queues and diq */ 615 KFD_MQD_TYPE_SDMA, /* for sdma queues */ 616 KFD_MQD_TYPE_DIQ, /* for diq */ 617 KFD_MQD_TYPE_MAX 618 }; 619 620 enum KFD_PIPE_PRIORITY { 621 KFD_PIPE_PRIORITY_CS_LOW = 0, 622 KFD_PIPE_PRIORITY_CS_MEDIUM, 623 KFD_PIPE_PRIORITY_CS_HIGH 624 }; 625 626 struct scheduling_resources { 627 unsigned int vmid_mask; 628 enum kfd_queue_type type; 629 uint64_t queue_mask; 630 uint64_t gws_mask; 631 uint32_t oac_mask; 632 uint32_t gds_heap_base; 633 uint32_t gds_heap_size; 634 }; 635 636 struct process_queue_manager { 637 /* data */ 638 struct kfd_process *process; 639 struct list_head queues; 640 unsigned long *queue_slot_bitmap; 641 }; 642 643 struct qcm_process_device { 644 /* The Device Queue Manager that owns this data */ 645 struct device_queue_manager *dqm; 646 struct process_queue_manager *pqm; 647 /* Queues list */ 648 struct list_head queues_list; 649 struct list_head priv_queue_list; 650 651 unsigned int queue_count; 652 unsigned int vmid; 653 bool is_debug; 654 unsigned int evicted; /* eviction counter, 0=active */ 655 656 /* This flag tells if we should reset all wavefronts on 657 * process termination 658 */ 659 bool reset_wavefronts; 660 661 /* This flag tells us if this process has a GWS-capable 662 * queue that will be mapped into the runlist. It's 663 * possible to request a GWS BO, but not have the queue 664 * currently mapped, and this changes how the MAP_PROCESS 665 * PM4 packet is configured. 666 */ 667 bool mapped_gws_queue; 668 669 /* All the memory management data should be here too */ 670 uint64_t gds_context_area; 671 /* Contains page table flags such as AMDGPU_PTE_VALID since gfx9 */ 672 uint64_t page_table_base; 673 uint32_t sh_mem_config; 674 uint32_t sh_mem_bases; 675 uint32_t sh_mem_ape1_base; 676 uint32_t sh_mem_ape1_limit; 677 uint32_t gds_size; 678 uint32_t num_gws; 679 uint32_t num_oac; 680 uint32_t sh_hidden_private_base; 681 682 /* CWSR memory */ 683 struct kgd_mem *cwsr_mem; 684 void *cwsr_kaddr; 685 uint64_t cwsr_base; 686 uint64_t tba_addr; 687 uint64_t tma_addr; 688 689 /* IB memory */ 690 struct kgd_mem *ib_mem; 691 uint64_t ib_base; 692 void *ib_kaddr; 693 694 /* doorbell resources per process per device */ 695 unsigned long *doorbell_bitmap; 696 }; 697 698 /* KFD Memory Eviction */ 699 700 /* Approx. wait time before attempting to restore evicted BOs */ 701 #define PROCESS_RESTORE_TIME_MS 100 702 /* Approx. back off time if restore fails due to lack of memory */ 703 #define PROCESS_BACK_OFF_TIME_MS 100 704 /* Approx. time before evicting the process again */ 705 #define PROCESS_ACTIVE_TIME_MS 10 706 707 /* 8 byte handle containing GPU ID in the most significant 4 bytes and 708 * idr_handle in the least significant 4 bytes 709 */ 710 #define MAKE_HANDLE(gpu_id, idr_handle) \ 711 (((uint64_t)(gpu_id) << 32) + idr_handle) 712 #define GET_GPU_ID(handle) (handle >> 32) 713 #define GET_IDR_HANDLE(handle) (handle & 0xFFFFFFFF) 714 715 enum kfd_pdd_bound { 716 PDD_UNBOUND = 0, 717 PDD_BOUND, 718 PDD_BOUND_SUSPENDED, 719 }; 720 721 #define MAX_SYSFS_FILENAME_LEN 15 722 723 /* 724 * SDMA counter runs at 100MHz frequency. 725 * We display SDMA activity in microsecond granularity in sysfs. 726 * As a result, the divisor is 100. 727 */ 728 #define SDMA_ACTIVITY_DIVISOR 100 729 730 /* Data that is per-process-per device. */ 731 struct kfd_process_device { 732 /* The device that owns this data. */ 733 struct kfd_node *dev; 734 735 /* The process that owns this kfd_process_device. */ 736 struct kfd_process *process; 737 738 /* per-process-per device QCM data structure */ 739 struct qcm_process_device qpd; 740 741 /*Apertures*/ 742 uint64_t lds_base; 743 uint64_t lds_limit; 744 uint64_t gpuvm_base; 745 uint64_t gpuvm_limit; 746 uint64_t scratch_base; 747 uint64_t scratch_limit; 748 749 /* VM context for GPUVM allocations */ 750 struct file *drm_file; 751 void *drm_priv; 752 atomic64_t tlb_seq; 753 754 /* GPUVM allocations storage */ 755 struct idr alloc_idr; 756 757 /* Flag used to tell the pdd has dequeued from the dqm. 758 * This is used to prevent dev->dqm->ops.process_termination() from 759 * being called twice when it is already called in IOMMU callback 760 * function. 761 */ 762 bool already_dequeued; 763 bool runtime_inuse; 764 765 /* Is this process/pasid bound to this device? (amd_iommu_bind_pasid) */ 766 enum kfd_pdd_bound bound; 767 768 /* VRAM usage */ 769 uint64_t vram_usage; 770 struct attribute attr_vram; 771 char vram_filename[MAX_SYSFS_FILENAME_LEN]; 772 773 /* SDMA activity tracking */ 774 uint64_t sdma_past_activity_counter; 775 struct attribute attr_sdma; 776 char sdma_filename[MAX_SYSFS_FILENAME_LEN]; 777 778 /* Eviction activity tracking */ 779 uint64_t last_evict_timestamp; 780 atomic64_t evict_duration_counter; 781 struct attribute attr_evict; 782 783 struct kobject *kobj_stats; 784 unsigned int doorbell_index; 785 786 /* 787 * @cu_occupancy: Reports occupancy of Compute Units (CU) of a process 788 * that is associated with device encoded by "this" struct instance. The 789 * value reflects CU usage by all of the waves launched by this process 790 * on this device. A very important property of occupancy parameter is 791 * that its value is a snapshot of current use. 792 * 793 * Following is to be noted regarding how this parameter is reported: 794 * 795 * The number of waves that a CU can launch is limited by couple of 796 * parameters. These are encoded by struct amdgpu_cu_info instance 797 * that is part of every device definition. For GFX9 devices this 798 * translates to 40 waves (simd_per_cu * max_waves_per_simd) when waves 799 * do not use scratch memory and 32 waves (max_scratch_slots_per_cu) 800 * when they do use scratch memory. This could change for future 801 * devices and therefore this example should be considered as a guide. 802 * 803 * All CU's of a device are available for the process. This may not be true 804 * under certain conditions - e.g. CU masking. 805 * 806 * Finally number of CU's that are occupied by a process is affected by both 807 * number of CU's a device has along with number of other competing processes 808 */ 809 struct attribute attr_cu_occupancy; 810 811 /* sysfs counters for GPU retry fault and page migration tracking */ 812 struct kobject *kobj_counters; 813 struct attribute attr_faults; 814 struct attribute attr_page_in; 815 struct attribute attr_page_out; 816 uint64_t faults; 817 uint64_t page_in; 818 uint64_t page_out; 819 820 /* Tracks debug per-vmid request settings */ 821 uint32_t spi_dbg_override; 822 uint32_t spi_dbg_launch_mode; 823 uint32_t watch_points[4]; 824 825 /* 826 * If this process has been checkpointed before, then the user 827 * application will use the original gpu_id on the 828 * checkpointed node to refer to this device. 829 */ 830 uint32_t user_gpu_id; 831 832 void *proc_ctx_bo; 833 uint64_t proc_ctx_gpu_addr; 834 void *proc_ctx_cpu_ptr; 835 }; 836 837 #define qpd_to_pdd(x) container_of(x, struct kfd_process_device, qpd) 838 839 struct svm_range_list { 840 struct mutex lock; 841 struct rb_root_cached objects; 842 struct list_head list; 843 struct work_struct deferred_list_work; 844 struct list_head deferred_range_list; 845 struct list_head criu_svm_metadata_list; 846 spinlock_t deferred_list_lock; 847 atomic_t evicted_ranges; 848 atomic_t drain_pagefaults; 849 struct delayed_work restore_work; 850 DECLARE_BITMAP(bitmap_supported, MAX_GPU_INSTANCE); 851 struct task_struct *faulting_task; 852 }; 853 854 /* Process data */ 855 struct kfd_process { 856 /* 857 * kfd_process are stored in an mm_struct*->kfd_process* 858 * hash table (kfd_processes in kfd_process.c) 859 */ 860 struct hlist_node kfd_processes; 861 862 /* 863 * Opaque pointer to mm_struct. We don't hold a reference to 864 * it so it should never be dereferenced from here. This is 865 * only used for looking up processes by their mm. 866 */ 867 void *mm; 868 869 struct kref ref; 870 struct work_struct release_work; 871 872 struct mutex mutex; 873 874 /* 875 * In any process, the thread that started main() is the lead 876 * thread and outlives the rest. 877 * It is here because amd_iommu_bind_pasid wants a task_struct. 878 * It can also be used for safely getting a reference to the 879 * mm_struct of the process. 880 */ 881 struct task_struct *lead_thread; 882 883 /* We want to receive a notification when the mm_struct is destroyed */ 884 struct mmu_notifier mmu_notifier; 885 886 u32 pasid; 887 888 /* 889 * Array of kfd_process_device pointers, 890 * one for each device the process is using. 891 */ 892 struct kfd_process_device *pdds[MAX_GPU_INSTANCE]; 893 uint32_t n_pdds; 894 895 struct process_queue_manager pqm; 896 897 /*Is the user space process 32 bit?*/ 898 bool is_32bit_user_mode; 899 900 /* Event-related data */ 901 struct mutex event_mutex; 902 /* Event ID allocator and lookup */ 903 struct idr event_idr; 904 /* Event page */ 905 u64 signal_handle; 906 struct kfd_signal_page *signal_page; 907 size_t signal_mapped_size; 908 size_t signal_event_count; 909 bool signal_event_limit_reached; 910 911 /* Information used for memory eviction */ 912 void *kgd_process_info; 913 /* Eviction fence that is attached to all the BOs of this process. The 914 * fence will be triggered during eviction and new one will be created 915 * during restore 916 */ 917 struct dma_fence *ef; 918 919 /* Work items for evicting and restoring BOs */ 920 struct delayed_work eviction_work; 921 struct delayed_work restore_work; 922 /* seqno of the last scheduled eviction */ 923 unsigned int last_eviction_seqno; 924 /* Approx. the last timestamp (in jiffies) when the process was 925 * restored after an eviction 926 */ 927 unsigned long last_restore_timestamp; 928 929 /* Indicates device process is debug attached with reserved vmid. */ 930 bool debug_trap_enabled; 931 932 /* per-process-per device debug event fd file */ 933 struct file *dbg_ev_file; 934 935 /* If the process is a kfd debugger, we need to know so we can clean 936 * up at exit time. If a process enables debugging on itself, it does 937 * its own clean-up, so we don't set the flag here. We track this by 938 * counting the number of processes this process is debugging. 939 */ 940 atomic_t debugged_process_count; 941 942 /* If the process is a debugged, this is the debugger process */ 943 struct kfd_process *debugger_process; 944 945 /* Kobj for our procfs */ 946 struct kobject *kobj; 947 struct kobject *kobj_queues; 948 struct attribute attr_pasid; 949 950 /* Keep track cwsr init */ 951 bool has_cwsr; 952 953 /* Exception code enable mask and status */ 954 uint64_t exception_enable_mask; 955 956 /* shared virtual memory registered by this process */ 957 struct svm_range_list svms; 958 959 bool xnack_enabled; 960 961 /* Tracks debug per-vmid request for debug flags */ 962 bool dbg_flags; 963 964 atomic_t poison; 965 /* Queues are in paused stated because we are in the process of doing a CRIU checkpoint */ 966 bool queues_paused; 967 968 /* Tracks runtime enable status */ 969 struct kfd_runtime_info runtime_info; 970 971 }; 972 973 #define KFD_PROCESS_TABLE_SIZE 5 /* bits: 32 entries */ 974 extern DECLARE_HASHTABLE(kfd_processes_table, KFD_PROCESS_TABLE_SIZE); 975 extern struct srcu_struct kfd_processes_srcu; 976 977 /** 978 * typedef amdkfd_ioctl_t - typedef for ioctl function pointer. 979 * 980 * @filep: pointer to file structure. 981 * @p: amdkfd process pointer. 982 * @data: pointer to arg that was copied from user. 983 * 984 * Return: returns ioctl completion code. 985 */ 986 typedef int amdkfd_ioctl_t(struct file *filep, struct kfd_process *p, 987 void *data); 988 989 struct amdkfd_ioctl_desc { 990 unsigned int cmd; 991 int flags; 992 amdkfd_ioctl_t *func; 993 unsigned int cmd_drv; 994 const char *name; 995 }; 996 bool kfd_dev_is_large_bar(struct kfd_node *dev); 997 998 int kfd_process_create_wq(void); 999 void kfd_process_destroy_wq(void); 1000 void kfd_cleanup_processes(void); 1001 struct kfd_process *kfd_create_process(struct task_struct *thread); 1002 struct kfd_process *kfd_get_process(const struct task_struct *task); 1003 struct kfd_process *kfd_lookup_process_by_pasid(u32 pasid); 1004 struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm); 1005 1006 int kfd_process_gpuidx_from_gpuid(struct kfd_process *p, uint32_t gpu_id); 1007 int kfd_process_gpuid_from_node(struct kfd_process *p, struct kfd_node *node, 1008 uint32_t *gpuid, uint32_t *gpuidx); 1009 static inline int kfd_process_gpuid_from_gpuidx(struct kfd_process *p, 1010 uint32_t gpuidx, uint32_t *gpuid) { 1011 return gpuidx < p->n_pdds ? p->pdds[gpuidx]->dev->id : -EINVAL; 1012 } 1013 static inline struct kfd_process_device *kfd_process_device_from_gpuidx( 1014 struct kfd_process *p, uint32_t gpuidx) { 1015 return gpuidx < p->n_pdds ? p->pdds[gpuidx] : NULL; 1016 } 1017 1018 void kfd_unref_process(struct kfd_process *p); 1019 int kfd_process_evict_queues(struct kfd_process *p, uint32_t trigger); 1020 int kfd_process_restore_queues(struct kfd_process *p); 1021 void kfd_suspend_all_processes(void); 1022 int kfd_resume_all_processes(void); 1023 1024 struct kfd_process_device *kfd_process_device_data_by_id(struct kfd_process *process, 1025 uint32_t gpu_id); 1026 1027 int kfd_process_get_user_gpu_id(struct kfd_process *p, uint32_t actual_gpu_id); 1028 1029 int kfd_process_device_init_vm(struct kfd_process_device *pdd, 1030 struct file *drm_file); 1031 struct kfd_process_device *kfd_bind_process_to_device(struct kfd_node *dev, 1032 struct kfd_process *p); 1033 struct kfd_process_device *kfd_get_process_device_data(struct kfd_node *dev, 1034 struct kfd_process *p); 1035 struct kfd_process_device *kfd_create_process_device_data(struct kfd_node *dev, 1036 struct kfd_process *p); 1037 1038 bool kfd_process_xnack_mode(struct kfd_process *p, bool supported); 1039 1040 int kfd_reserved_mem_mmap(struct kfd_node *dev, struct kfd_process *process, 1041 struct vm_area_struct *vma); 1042 1043 /* KFD process API for creating and translating handles */ 1044 int kfd_process_device_create_obj_handle(struct kfd_process_device *pdd, 1045 void *mem); 1046 void *kfd_process_device_translate_handle(struct kfd_process_device *p, 1047 int handle); 1048 void kfd_process_device_remove_obj_handle(struct kfd_process_device *pdd, 1049 int handle); 1050 struct kfd_process *kfd_lookup_process_by_pid(struct pid *pid); 1051 1052 /* PASIDs */ 1053 int kfd_pasid_init(void); 1054 void kfd_pasid_exit(void); 1055 bool kfd_set_pasid_limit(unsigned int new_limit); 1056 unsigned int kfd_get_pasid_limit(void); 1057 u32 kfd_pasid_alloc(void); 1058 void kfd_pasid_free(u32 pasid); 1059 1060 /* Doorbells */ 1061 size_t kfd_doorbell_process_slice(struct kfd_dev *kfd); 1062 int kfd_doorbell_init(struct kfd_dev *kfd); 1063 void kfd_doorbell_fini(struct kfd_dev *kfd); 1064 int kfd_doorbell_mmap(struct kfd_node *dev, struct kfd_process *process, 1065 struct vm_area_struct *vma); 1066 void __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd, 1067 unsigned int *doorbell_off); 1068 void kfd_release_kernel_doorbell(struct kfd_dev *kfd, u32 __iomem *db_addr); 1069 u32 read_kernel_doorbell(u32 __iomem *db); 1070 void write_kernel_doorbell(void __iomem *db, u32 value); 1071 void write_kernel_doorbell64(void __iomem *db, u64 value); 1072 unsigned int kfd_get_doorbell_dw_offset_in_bar(struct kfd_dev *kfd, 1073 struct kfd_process_device *pdd, 1074 unsigned int doorbell_id); 1075 phys_addr_t kfd_get_process_doorbells(struct kfd_process_device *pdd); 1076 int kfd_alloc_process_doorbells(struct kfd_dev *kfd, 1077 unsigned int *doorbell_index); 1078 void kfd_free_process_doorbells(struct kfd_dev *kfd, 1079 unsigned int doorbell_index); 1080 /* GTT Sub-Allocator */ 1081 1082 int kfd_gtt_sa_allocate(struct kfd_node *node, unsigned int size, 1083 struct kfd_mem_obj **mem_obj); 1084 1085 int kfd_gtt_sa_free(struct kfd_node *node, struct kfd_mem_obj *mem_obj); 1086 1087 extern struct device *kfd_device; 1088 1089 /* KFD's procfs */ 1090 void kfd_procfs_init(void); 1091 void kfd_procfs_shutdown(void); 1092 int kfd_procfs_add_queue(struct queue *q); 1093 void kfd_procfs_del_queue(struct queue *q); 1094 1095 /* Topology */ 1096 int kfd_topology_init(void); 1097 void kfd_topology_shutdown(void); 1098 int kfd_topology_add_device(struct kfd_node *gpu); 1099 int kfd_topology_remove_device(struct kfd_node *gpu); 1100 struct kfd_topology_device *kfd_topology_device_by_proximity_domain( 1101 uint32_t proximity_domain); 1102 struct kfd_topology_device *kfd_topology_device_by_proximity_domain_no_lock( 1103 uint32_t proximity_domain); 1104 struct kfd_topology_device *kfd_topology_device_by_id(uint32_t gpu_id); 1105 struct kfd_node *kfd_device_by_id(uint32_t gpu_id); 1106 struct kfd_node *kfd_device_by_pci_dev(const struct pci_dev *pdev); 1107 static inline bool kfd_irq_is_from_node(struct kfd_node *node, uint32_t node_id, 1108 uint32_t vmid) 1109 { 1110 return (node->interrupt_bitmap & (1 << node_id)) != 0 && 1111 (node->compute_vmid_bitmap & (1 << vmid)) != 0; 1112 } 1113 static inline struct kfd_node *kfd_node_by_irq_ids(struct amdgpu_device *adev, 1114 uint32_t node_id, uint32_t vmid) { 1115 struct kfd_dev *dev = adev->kfd.dev; 1116 uint32_t i; 1117 1118 if (adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 3)) 1119 return dev->nodes[0]; 1120 1121 for (i = 0; i < dev->num_nodes; i++) 1122 if (kfd_irq_is_from_node(dev->nodes[i], node_id, vmid)) 1123 return dev->nodes[i]; 1124 1125 return NULL; 1126 } 1127 int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_node **kdev); 1128 int kfd_numa_node_to_apic_id(int numa_node_id); 1129 void kfd_double_confirm_iommu_support(struct kfd_dev *gpu); 1130 1131 /* Interrupts */ 1132 int kfd_interrupt_init(struct kfd_node *dev); 1133 void kfd_interrupt_exit(struct kfd_node *dev); 1134 bool enqueue_ih_ring_entry(struct kfd_node *kfd, const void *ih_ring_entry); 1135 bool interrupt_is_wanted(struct kfd_node *dev, 1136 const uint32_t *ih_ring_entry, 1137 uint32_t *patched_ihre, bool *flag); 1138 1139 /* amdkfd Apertures */ 1140 int kfd_init_apertures(struct kfd_process *process); 1141 1142 void kfd_process_set_trap_handler(struct qcm_process_device *qpd, 1143 uint64_t tba_addr, 1144 uint64_t tma_addr); 1145 1146 /* CWSR initialization */ 1147 int kfd_process_init_cwsr_apu(struct kfd_process *process, struct file *filep); 1148 1149 /* CRIU */ 1150 /* 1151 * Need to increment KFD_CRIU_PRIV_VERSION each time a change is made to any of the CRIU private 1152 * structures: 1153 * kfd_criu_process_priv_data 1154 * kfd_criu_device_priv_data 1155 * kfd_criu_bo_priv_data 1156 * kfd_criu_queue_priv_data 1157 * kfd_criu_event_priv_data 1158 * kfd_criu_svm_range_priv_data 1159 */ 1160 1161 #define KFD_CRIU_PRIV_VERSION 1 1162 1163 struct kfd_criu_process_priv_data { 1164 uint32_t version; 1165 uint32_t xnack_mode; 1166 }; 1167 1168 struct kfd_criu_device_priv_data { 1169 /* For future use */ 1170 uint64_t reserved; 1171 }; 1172 1173 struct kfd_criu_bo_priv_data { 1174 uint64_t user_addr; 1175 uint32_t idr_handle; 1176 uint32_t mapped_gpuids[MAX_GPU_INSTANCE]; 1177 }; 1178 1179 /* 1180 * The first 4 bytes of kfd_criu_queue_priv_data, kfd_criu_event_priv_data, 1181 * kfd_criu_svm_range_priv_data is the object type 1182 */ 1183 enum kfd_criu_object_type { 1184 KFD_CRIU_OBJECT_TYPE_QUEUE, 1185 KFD_CRIU_OBJECT_TYPE_EVENT, 1186 KFD_CRIU_OBJECT_TYPE_SVM_RANGE, 1187 }; 1188 1189 struct kfd_criu_svm_range_priv_data { 1190 uint32_t object_type; 1191 uint64_t start_addr; 1192 uint64_t size; 1193 /* Variable length array of attributes */ 1194 struct kfd_ioctl_svm_attribute attrs[]; 1195 }; 1196 1197 struct kfd_criu_queue_priv_data { 1198 uint32_t object_type; 1199 uint64_t q_address; 1200 uint64_t q_size; 1201 uint64_t read_ptr_addr; 1202 uint64_t write_ptr_addr; 1203 uint64_t doorbell_off; 1204 uint64_t eop_ring_buffer_address; 1205 uint64_t ctx_save_restore_area_address; 1206 uint32_t gpu_id; 1207 uint32_t type; 1208 uint32_t format; 1209 uint32_t q_id; 1210 uint32_t priority; 1211 uint32_t q_percent; 1212 uint32_t doorbell_id; 1213 uint32_t gws; 1214 uint32_t sdma_id; 1215 uint32_t eop_ring_buffer_size; 1216 uint32_t ctx_save_restore_area_size; 1217 uint32_t ctl_stack_size; 1218 uint32_t mqd_size; 1219 }; 1220 1221 struct kfd_criu_event_priv_data { 1222 uint32_t object_type; 1223 uint64_t user_handle; 1224 uint32_t event_id; 1225 uint32_t auto_reset; 1226 uint32_t type; 1227 uint32_t signaled; 1228 1229 union { 1230 struct kfd_hsa_memory_exception_data memory_exception_data; 1231 struct kfd_hsa_hw_exception_data hw_exception_data; 1232 }; 1233 }; 1234 1235 int kfd_process_get_queue_info(struct kfd_process *p, 1236 uint32_t *num_queues, 1237 uint64_t *priv_data_sizes); 1238 1239 int kfd_criu_checkpoint_queues(struct kfd_process *p, 1240 uint8_t __user *user_priv_data, 1241 uint64_t *priv_data_offset); 1242 1243 int kfd_criu_restore_queue(struct kfd_process *p, 1244 uint8_t __user *user_priv_data, 1245 uint64_t *priv_data_offset, 1246 uint64_t max_priv_data_size); 1247 1248 int kfd_criu_checkpoint_events(struct kfd_process *p, 1249 uint8_t __user *user_priv_data, 1250 uint64_t *priv_data_offset); 1251 1252 int kfd_criu_restore_event(struct file *devkfd, 1253 struct kfd_process *p, 1254 uint8_t __user *user_priv_data, 1255 uint64_t *priv_data_offset, 1256 uint64_t max_priv_data_size); 1257 /* CRIU - End */ 1258 1259 /* Queue Context Management */ 1260 int init_queue(struct queue **q, const struct queue_properties *properties); 1261 void uninit_queue(struct queue *q); 1262 void print_queue_properties(struct queue_properties *q); 1263 void print_queue(struct queue *q); 1264 1265 struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type, 1266 struct kfd_node *dev); 1267 struct mqd_manager *mqd_manager_init_cik_hawaii(enum KFD_MQD_TYPE type, 1268 struct kfd_node *dev); 1269 struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type, 1270 struct kfd_node *dev); 1271 struct mqd_manager *mqd_manager_init_vi_tonga(enum KFD_MQD_TYPE type, 1272 struct kfd_node *dev); 1273 struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type, 1274 struct kfd_node *dev); 1275 struct mqd_manager *mqd_manager_init_v10(enum KFD_MQD_TYPE type, 1276 struct kfd_node *dev); 1277 struct mqd_manager *mqd_manager_init_v11(enum KFD_MQD_TYPE type, 1278 struct kfd_node *dev); 1279 struct device_queue_manager *device_queue_manager_init(struct kfd_node *dev); 1280 void device_queue_manager_uninit(struct device_queue_manager *dqm); 1281 struct kernel_queue *kernel_queue_init(struct kfd_node *dev, 1282 enum kfd_queue_type type); 1283 void kernel_queue_uninit(struct kernel_queue *kq, bool hanging); 1284 int kfd_dqm_evict_pasid(struct device_queue_manager *dqm, u32 pasid); 1285 1286 /* Process Queue Manager */ 1287 struct process_queue_node { 1288 struct queue *q; 1289 struct kernel_queue *kq; 1290 struct list_head process_queue_list; 1291 }; 1292 1293 void kfd_process_dequeue_from_device(struct kfd_process_device *pdd); 1294 void kfd_process_dequeue_from_all_devices(struct kfd_process *p); 1295 int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p); 1296 void pqm_uninit(struct process_queue_manager *pqm); 1297 int pqm_create_queue(struct process_queue_manager *pqm, 1298 struct kfd_node *dev, 1299 struct file *f, 1300 struct queue_properties *properties, 1301 unsigned int *qid, 1302 struct amdgpu_bo *wptr_bo, 1303 const struct kfd_criu_queue_priv_data *q_data, 1304 const void *restore_mqd, 1305 const void *restore_ctl_stack, 1306 uint32_t *p_doorbell_offset_in_process); 1307 int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid); 1308 int pqm_update_queue_properties(struct process_queue_manager *pqm, unsigned int qid, 1309 struct queue_properties *p); 1310 int pqm_update_mqd(struct process_queue_manager *pqm, unsigned int qid, 1311 struct mqd_update_info *minfo); 1312 int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid, 1313 void *gws); 1314 struct kernel_queue *pqm_get_kernel_queue(struct process_queue_manager *pqm, 1315 unsigned int qid); 1316 struct queue *pqm_get_user_queue(struct process_queue_manager *pqm, 1317 unsigned int qid); 1318 int pqm_get_wave_state(struct process_queue_manager *pqm, 1319 unsigned int qid, 1320 void __user *ctl_stack, 1321 u32 *ctl_stack_used_size, 1322 u32 *save_area_used_size); 1323 1324 int amdkfd_fence_wait_timeout(uint64_t *fence_addr, 1325 uint64_t fence_value, 1326 unsigned int timeout_ms); 1327 1328 int pqm_get_queue_checkpoint_info(struct process_queue_manager *pqm, 1329 unsigned int qid, 1330 u32 *mqd_size, 1331 u32 *ctl_stack_size); 1332 /* Packet Manager */ 1333 1334 #define KFD_FENCE_COMPLETED (100) 1335 #define KFD_FENCE_INIT (10) 1336 1337 struct packet_manager { 1338 struct device_queue_manager *dqm; 1339 struct kernel_queue *priv_queue; 1340 struct mutex lock; 1341 bool allocated; 1342 struct kfd_mem_obj *ib_buffer_obj; 1343 unsigned int ib_size_bytes; 1344 bool is_over_subscription; 1345 1346 const struct packet_manager_funcs *pmf; 1347 }; 1348 1349 struct packet_manager_funcs { 1350 /* Support ASIC-specific packet formats for PM4 packets */ 1351 int (*map_process)(struct packet_manager *pm, uint32_t *buffer, 1352 struct qcm_process_device *qpd); 1353 int (*runlist)(struct packet_manager *pm, uint32_t *buffer, 1354 uint64_t ib, size_t ib_size_in_dwords, bool chain); 1355 int (*set_resources)(struct packet_manager *pm, uint32_t *buffer, 1356 struct scheduling_resources *res); 1357 int (*map_queues)(struct packet_manager *pm, uint32_t *buffer, 1358 struct queue *q, bool is_static); 1359 int (*unmap_queues)(struct packet_manager *pm, uint32_t *buffer, 1360 enum kfd_unmap_queues_filter mode, 1361 uint32_t filter_param, bool reset); 1362 int (*set_grace_period)(struct packet_manager *pm, uint32_t *buffer, 1363 uint32_t grace_period); 1364 int (*query_status)(struct packet_manager *pm, uint32_t *buffer, 1365 uint64_t fence_address, uint64_t fence_value); 1366 int (*release_mem)(uint64_t gpu_addr, uint32_t *buffer); 1367 1368 /* Packet sizes */ 1369 int map_process_size; 1370 int runlist_size; 1371 int set_resources_size; 1372 int map_queues_size; 1373 int unmap_queues_size; 1374 int set_grace_period_size; 1375 int query_status_size; 1376 int release_mem_size; 1377 }; 1378 1379 extern const struct packet_manager_funcs kfd_vi_pm_funcs; 1380 extern const struct packet_manager_funcs kfd_v9_pm_funcs; 1381 extern const struct packet_manager_funcs kfd_aldebaran_pm_funcs; 1382 1383 int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm); 1384 void pm_uninit(struct packet_manager *pm, bool hanging); 1385 int pm_send_set_resources(struct packet_manager *pm, 1386 struct scheduling_resources *res); 1387 int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues); 1388 int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address, 1389 uint64_t fence_value); 1390 1391 int pm_send_unmap_queue(struct packet_manager *pm, 1392 enum kfd_unmap_queues_filter mode, 1393 uint32_t filter_param, bool reset); 1394 1395 void pm_release_ib(struct packet_manager *pm); 1396 1397 int pm_update_grace_period(struct packet_manager *pm, uint32_t grace_period); 1398 1399 /* Following PM funcs can be shared among VI and AI */ 1400 unsigned int pm_build_pm4_header(unsigned int opcode, size_t packet_size); 1401 1402 uint64_t kfd_get_number_elems(struct kfd_dev *kfd); 1403 1404 /* Events */ 1405 extern const struct kfd_event_interrupt_class event_interrupt_class_cik; 1406 extern const struct kfd_event_interrupt_class event_interrupt_class_v9; 1407 extern const struct kfd_event_interrupt_class event_interrupt_class_v11; 1408 1409 extern const struct kfd_device_global_init_class device_global_init_class_cik; 1410 1411 int kfd_event_init_process(struct kfd_process *p); 1412 void kfd_event_free_process(struct kfd_process *p); 1413 int kfd_event_mmap(struct kfd_process *process, struct vm_area_struct *vma); 1414 int kfd_wait_on_events(struct kfd_process *p, 1415 uint32_t num_events, void __user *data, 1416 bool all, uint32_t *user_timeout_ms, 1417 uint32_t *wait_result); 1418 void kfd_signal_event_interrupt(u32 pasid, uint32_t partial_id, 1419 uint32_t valid_id_bits); 1420 void kfd_signal_iommu_event(struct kfd_node *dev, 1421 u32 pasid, unsigned long address, 1422 bool is_write_requested, bool is_execute_requested); 1423 void kfd_signal_hw_exception_event(u32 pasid); 1424 int kfd_set_event(struct kfd_process *p, uint32_t event_id); 1425 int kfd_reset_event(struct kfd_process *p, uint32_t event_id); 1426 int kfd_kmap_event_page(struct kfd_process *p, uint64_t event_page_offset); 1427 1428 int kfd_event_create(struct file *devkfd, struct kfd_process *p, 1429 uint32_t event_type, bool auto_reset, uint32_t node_id, 1430 uint32_t *event_id, uint32_t *event_trigger_data, 1431 uint64_t *event_page_offset, uint32_t *event_slot_index); 1432 1433 int kfd_get_num_events(struct kfd_process *p); 1434 int kfd_event_destroy(struct kfd_process *p, uint32_t event_id); 1435 1436 void kfd_signal_vm_fault_event(struct kfd_node *dev, u32 pasid, 1437 struct kfd_vm_fault_info *info); 1438 1439 void kfd_signal_reset_event(struct kfd_node *dev); 1440 1441 void kfd_signal_poison_consumed_event(struct kfd_node *dev, u32 pasid); 1442 1443 void kfd_flush_tlb(struct kfd_process_device *pdd, enum TLB_FLUSH_TYPE type); 1444 1445 static inline bool kfd_flush_tlb_after_unmap(struct kfd_dev *dev) 1446 { 1447 return KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 3) || 1448 KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2) || 1449 (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 1) && dev->sdma_fw_version >= 18) || 1450 KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 0); 1451 } 1452 1453 bool kfd_is_locked(void); 1454 1455 /* Compute profile */ 1456 void kfd_inc_compute_active(struct kfd_node *dev); 1457 void kfd_dec_compute_active(struct kfd_node *dev); 1458 1459 /* Cgroup Support */ 1460 /* Check with device cgroup if @kfd device is accessible */ 1461 static inline int kfd_devcgroup_check_permission(struct kfd_node *kfd) 1462 { 1463 #if defined(CONFIG_CGROUP_DEVICE) || defined(CONFIG_CGROUP_BPF) 1464 struct drm_device *ddev = adev_to_drm(kfd->adev); 1465 1466 return devcgroup_check_permission(DEVCG_DEV_CHAR, DRM_MAJOR, 1467 ddev->render->index, 1468 DEVCG_ACC_WRITE | DEVCG_ACC_READ); 1469 #else 1470 return 0; 1471 #endif 1472 } 1473 1474 static inline bool kfd_is_first_node(struct kfd_node *node) 1475 { 1476 return (node == node->kfd->nodes[0]); 1477 } 1478 1479 /* Debugfs */ 1480 #if defined(CONFIG_DEBUG_FS) 1481 1482 void kfd_debugfs_init(void); 1483 void kfd_debugfs_fini(void); 1484 int kfd_debugfs_mqds_by_process(struct seq_file *m, void *data); 1485 int pqm_debugfs_mqds(struct seq_file *m, void *data); 1486 int kfd_debugfs_hqds_by_device(struct seq_file *m, void *data); 1487 int dqm_debugfs_hqds(struct seq_file *m, void *data); 1488 int kfd_debugfs_rls_by_device(struct seq_file *m, void *data); 1489 int pm_debugfs_runlist(struct seq_file *m, void *data); 1490 1491 int kfd_debugfs_hang_hws(struct kfd_node *dev); 1492 int pm_debugfs_hang_hws(struct packet_manager *pm); 1493 int dqm_debugfs_hang_hws(struct device_queue_manager *dqm); 1494 1495 #else 1496 1497 static inline void kfd_debugfs_init(void) {} 1498 static inline void kfd_debugfs_fini(void) {} 1499 1500 #endif 1501 1502 #endif 1503