1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */ 2 /* 3 * Copyright 2014-2022 Advanced Micro Devices, Inc. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice shall be included in 13 * all copies or substantial portions of the Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 * OTHER DEALINGS IN THE SOFTWARE. 22 */ 23 24 #ifndef KFD_PRIV_H_INCLUDED 25 #define KFD_PRIV_H_INCLUDED 26 27 #include <linux/hashtable.h> 28 #include <linux/mmu_notifier.h> 29 #include <linux/memremap.h> 30 #include <linux/mutex.h> 31 #include <linux/types.h> 32 #include <linux/atomic.h> 33 #include <linux/workqueue.h> 34 #include <linux/spinlock.h> 35 #include <linux/kfd_ioctl.h> 36 #include <linux/idr.h> 37 #include <linux/kfifo.h> 38 #include <linux/seq_file.h> 39 #include <linux/kref.h> 40 #include <linux/sysfs.h> 41 #include <linux/device_cgroup.h> 42 #include <drm/drm_file.h> 43 #include <drm/drm_drv.h> 44 #include <drm/drm_device.h> 45 #include <drm/drm_ioctl.h> 46 #include <kgd_kfd_interface.h> 47 #include <linux/swap.h> 48 49 #include "amd_shared.h" 50 #include "amdgpu.h" 51 52 #define KFD_MAX_RING_ENTRY_SIZE 8 53 54 #define KFD_SYSFS_FILE_MODE 0444 55 56 /* GPU ID hash width in bits */ 57 #define KFD_GPU_ID_HASH_WIDTH 16 58 59 /* Use upper bits of mmap offset to store KFD driver specific information. 60 * BITS[63:62] - Encode MMAP type 61 * BITS[61:46] - Encode gpu_id. To identify to which GPU the offset belongs to 62 * BITS[45:0] - MMAP offset value 63 * 64 * NOTE: struct vm_area_struct.vm_pgoff uses offset in pages. Hence, these 65 * defines are w.r.t to PAGE_SIZE 66 */ 67 #define KFD_MMAP_TYPE_SHIFT 62 68 #define KFD_MMAP_TYPE_MASK (0x3ULL << KFD_MMAP_TYPE_SHIFT) 69 #define KFD_MMAP_TYPE_DOORBELL (0x3ULL << KFD_MMAP_TYPE_SHIFT) 70 #define KFD_MMAP_TYPE_EVENTS (0x2ULL << KFD_MMAP_TYPE_SHIFT) 71 #define KFD_MMAP_TYPE_RESERVED_MEM (0x1ULL << KFD_MMAP_TYPE_SHIFT) 72 #define KFD_MMAP_TYPE_MMIO (0x0ULL << KFD_MMAP_TYPE_SHIFT) 73 74 #define KFD_MMAP_GPU_ID_SHIFT 46 75 #define KFD_MMAP_GPU_ID_MASK (((1ULL << KFD_GPU_ID_HASH_WIDTH) - 1) \ 76 << KFD_MMAP_GPU_ID_SHIFT) 77 #define KFD_MMAP_GPU_ID(gpu_id) ((((uint64_t)gpu_id) << KFD_MMAP_GPU_ID_SHIFT)\ 78 & KFD_MMAP_GPU_ID_MASK) 79 #define KFD_MMAP_GET_GPU_ID(offset) ((offset & KFD_MMAP_GPU_ID_MASK) \ 80 >> KFD_MMAP_GPU_ID_SHIFT) 81 82 /* 83 * When working with cp scheduler we should assign the HIQ manually or via 84 * the amdgpu driver to a fixed hqd slot, here are the fixed HIQ hqd slot 85 * definitions for Kaveri. In Kaveri only the first ME queues participates 86 * in the cp scheduling taking that in mind we set the HIQ slot in the 87 * second ME. 88 */ 89 #define KFD_CIK_HIQ_PIPE 4 90 #define KFD_CIK_HIQ_QUEUE 0 91 92 /* Macro for allocating structures */ 93 #define kfd_alloc_struct(ptr_to_struct) \ 94 ((typeof(ptr_to_struct)) kzalloc(sizeof(*ptr_to_struct), GFP_KERNEL)) 95 96 #define KFD_MAX_NUM_OF_PROCESSES 512 97 #define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS 1024 98 99 /* 100 * Size of the per-process TBA+TMA buffer: 2 pages 101 * 102 * The first page is the TBA used for the CWSR ISA code. The second 103 * page is used as TMA for user-mode trap handler setup in daisy-chain mode. 104 */ 105 #define KFD_CWSR_TBA_TMA_SIZE (PAGE_SIZE * 2) 106 #define KFD_CWSR_TMA_OFFSET PAGE_SIZE 107 108 #define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE \ 109 (KFD_MAX_NUM_OF_PROCESSES * \ 110 KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) 111 112 #define KFD_KERNEL_QUEUE_SIZE 2048 113 114 #define KFD_UNMAP_LATENCY_MS (4000) 115 116 #define KFD_MAX_SDMA_QUEUES 128 117 118 /* 119 * 512 = 0x200 120 * The doorbell index distance between SDMA RLC (2*i) and (2*i+1) in the 121 * same SDMA engine on SOC15, which has 8-byte doorbells for SDMA. 122 * 512 8-byte doorbell distance (i.e. one page away) ensures that SDMA RLC 123 * (2*i+1) doorbells (in terms of the lower 12 bit address) lie exactly in 124 * the OFFSET and SIZE set in registers like BIF_SDMA0_DOORBELL_RANGE. 125 */ 126 #define KFD_QUEUE_DOORBELL_MIRROR_OFFSET 512 127 128 /** 129 * enum kfd_ioctl_flags - KFD ioctl flags 130 * Various flags that can be set in &amdkfd_ioctl_desc.flags to control how 131 * userspace can use a given ioctl. 132 */ 133 enum kfd_ioctl_flags { 134 /* 135 * @KFD_IOC_FLAG_CHECKPOINT_RESTORE: 136 * Certain KFD ioctls such as AMDKFD_IOC_CRIU_OP can potentially 137 * perform privileged operations and load arbitrary data into MQDs and 138 * eventually HQD registers when the queue is mapped by HWS. In order to 139 * prevent this we should perform additional security checks. 140 * 141 * This is equivalent to callers with the CHECKPOINT_RESTORE capability. 142 * 143 * Note: Since earlier versions of docker do not support CHECKPOINT_RESTORE, 144 * we also allow ioctls with SYS_ADMIN capability. 145 */ 146 KFD_IOC_FLAG_CHECKPOINT_RESTORE = BIT(0), 147 }; 148 /* 149 * Kernel module parameter to specify maximum number of supported queues per 150 * device 151 */ 152 extern int max_num_of_queues_per_device; 153 154 155 /* Kernel module parameter to specify the scheduling policy */ 156 extern int sched_policy; 157 158 /* 159 * Kernel module parameter to specify the maximum process 160 * number per HW scheduler 161 */ 162 extern int hws_max_conc_proc; 163 164 extern int cwsr_enable; 165 166 /* 167 * Kernel module parameter to specify whether to send sigterm to HSA process on 168 * unhandled exception 169 */ 170 extern int send_sigterm; 171 172 /* 173 * This kernel module is used to simulate large bar machine on non-large bar 174 * enabled machines. 175 */ 176 extern int debug_largebar; 177 178 /* 179 * Ignore CRAT table during KFD initialization, can be used to work around 180 * broken CRAT tables on some AMD systems 181 */ 182 extern int ignore_crat; 183 184 /* Set sh_mem_config.retry_disable on GFX v9 */ 185 extern int amdgpu_noretry; 186 187 /* Halt if HWS hang is detected */ 188 extern int halt_if_hws_hang; 189 190 /* Whether MEC FW support GWS barriers */ 191 extern bool hws_gws_support; 192 193 /* Queue preemption timeout in ms */ 194 extern int queue_preemption_timeout_ms; 195 196 /* 197 * Don't evict process queues on vm fault 198 */ 199 extern int amdgpu_no_queue_eviction_on_vm_fault; 200 201 /* Enable eviction debug messages */ 202 extern bool debug_evictions; 203 204 extern struct mutex kfd_processes_mutex; 205 206 enum cache_policy { 207 cache_policy_coherent, 208 cache_policy_noncoherent 209 }; 210 211 #define KFD_GC_VERSION(dev) ((dev)->adev->ip_versions[GC_HWIP][0]) 212 #define KFD_IS_SOC15(dev) ((KFD_GC_VERSION(dev)) >= (IP_VERSION(9, 0, 1))) 213 #define KFD_SUPPORT_XNACK_PER_PROCESS(dev)\ 214 ((KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2)) || \ 215 (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 3))) 216 217 struct kfd_node; 218 219 struct kfd_event_interrupt_class { 220 bool (*interrupt_isr)(struct kfd_node *dev, 221 const uint32_t *ih_ring_entry, uint32_t *patched_ihre, 222 bool *patched_flag); 223 void (*interrupt_wq)(struct kfd_node *dev, 224 const uint32_t *ih_ring_entry); 225 }; 226 227 struct kfd_device_info { 228 uint32_t gfx_target_version; 229 const struct kfd_event_interrupt_class *event_interrupt_class; 230 unsigned int max_pasid_bits; 231 unsigned int max_no_of_hqd; 232 unsigned int doorbell_size; 233 size_t ih_ring_entry_size; 234 uint8_t num_of_watch_points; 235 uint16_t mqd_size_aligned; 236 bool supports_cwsr; 237 bool needs_iommu_device; 238 bool needs_pci_atomics; 239 uint32_t no_atomic_fw_version; 240 unsigned int num_sdma_queues_per_engine; 241 unsigned int num_reserved_sdma_queues_per_engine; 242 DECLARE_BITMAP(reserved_sdma_queues_bitmap, KFD_MAX_SDMA_QUEUES); 243 }; 244 245 unsigned int kfd_get_num_sdma_engines(struct kfd_node *kdev); 246 unsigned int kfd_get_num_xgmi_sdma_engines(struct kfd_node *kdev); 247 248 struct kfd_mem_obj { 249 uint32_t range_start; 250 uint32_t range_end; 251 uint64_t gpu_addr; 252 uint32_t *cpu_ptr; 253 void *gtt_mem; 254 }; 255 256 struct kfd_vmid_info { 257 uint32_t first_vmid_kfd; 258 uint32_t last_vmid_kfd; 259 uint32_t vmid_num_kfd; 260 }; 261 262 #define MAX_KFD_NODES 8 263 264 struct kfd_dev; 265 266 struct kfd_node { 267 unsigned int node_id; 268 struct amdgpu_device *adev; /* Duplicated here along with keeping 269 * a copy in kfd_dev to save a hop 270 */ 271 const struct kfd2kgd_calls *kfd2kgd; /* Duplicated here along with 272 * keeping a copy in kfd_dev to 273 * save a hop 274 */ 275 struct kfd_vmid_info vm_info; 276 unsigned int id; /* topology stub index */ 277 uint32_t xcc_mask; /* Instance mask of XCCs present */ 278 struct amdgpu_xcp *xcp; 279 280 /* Interrupts */ 281 struct kfifo ih_fifo; 282 struct workqueue_struct *ih_wq; 283 struct work_struct interrupt_work; 284 spinlock_t interrupt_lock; 285 286 /* 287 * Interrupts of interest to KFD are copied 288 * from the HW ring into a SW ring. 289 */ 290 bool interrupts_active; 291 uint32_t interrupt_bitmap; /* Only used for GFX 9.4.3 */ 292 293 /* QCM Device instance */ 294 struct device_queue_manager *dqm; 295 296 /* Global GWS resource shared between processes */ 297 void *gws; 298 bool gws_debug_workaround; 299 300 /* Clients watching SMI events */ 301 struct list_head smi_clients; 302 spinlock_t smi_lock; 303 uint32_t reset_seq_num; 304 305 /* SRAM ECC flag */ 306 atomic_t sram_ecc_flag; 307 308 /*spm process id */ 309 unsigned int spm_pasid; 310 311 /* Maximum process number mapped to HW scheduler */ 312 unsigned int max_proc_per_quantum; 313 314 unsigned int compute_vmid_bitmap; 315 316 struct kfd_local_mem_info local_mem_info; 317 318 struct kfd_dev *kfd; 319 }; 320 321 struct kfd_dev { 322 struct amdgpu_device *adev; 323 324 struct kfd_device_info device_info; 325 326 phys_addr_t doorbell_base; /* Start of actual doorbells used by 327 * KFD. It is aligned for mapping 328 * into user mode 329 */ 330 size_t doorbell_base_dw_offset; /* Offset from the start of the PCI 331 * doorbell BAR to the first KFD 332 * doorbell in dwords. GFX reserves 333 * the segment before this offset. 334 */ 335 u32 __iomem *doorbell_kernel_ptr; /* This is a pointer for a doorbells 336 * page used by kernel queue 337 */ 338 339 struct kgd2kfd_shared_resources shared_resources; 340 341 const struct kfd2kgd_calls *kfd2kgd; 342 struct mutex doorbell_mutex; 343 DECLARE_BITMAP(doorbell_available_index, 344 KFD_MAX_NUM_OF_QUEUES_PER_PROCESS); 345 346 void *gtt_mem; 347 uint64_t gtt_start_gpu_addr; 348 void *gtt_start_cpu_ptr; 349 void *gtt_sa_bitmap; 350 struct mutex gtt_sa_lock; 351 unsigned int gtt_sa_chunk_size; 352 unsigned int gtt_sa_num_of_chunks; 353 354 bool init_complete; 355 356 /* Firmware versions */ 357 uint16_t mec_fw_version; 358 uint16_t mec2_fw_version; 359 uint16_t sdma_fw_version; 360 361 /* CWSR */ 362 bool cwsr_enabled; 363 const void *cwsr_isa; 364 unsigned int cwsr_isa_size; 365 366 /* xGMI */ 367 uint64_t hive_id; 368 369 bool pci_atomic_requested; 370 371 /* Use IOMMU v2 flag */ 372 bool use_iommu_v2; 373 374 /* Compute Profile ref. count */ 375 atomic_t compute_profile; 376 377 struct ida doorbell_ida; 378 unsigned int max_doorbell_slices; 379 380 int noretry; 381 382 struct kfd_node *nodes[MAX_KFD_NODES]; 383 unsigned int num_nodes; 384 385 /* Track per device allocated watch points */ 386 uint32_t alloc_watch_ids; 387 spinlock_t watch_points_lock; 388 389 /* Kernel doorbells for KFD device */ 390 struct amdgpu_bo *doorbells; 391 392 /* bitmap for dynamic doorbell allocation from doorbell object */ 393 unsigned long *doorbell_bitmap; 394 }; 395 396 enum kfd_mempool { 397 KFD_MEMPOOL_SYSTEM_CACHEABLE = 1, 398 KFD_MEMPOOL_SYSTEM_WRITECOMBINE = 2, 399 KFD_MEMPOOL_FRAMEBUFFER = 3, 400 }; 401 402 /* Character device interface */ 403 int kfd_chardev_init(void); 404 void kfd_chardev_exit(void); 405 406 /** 407 * enum kfd_unmap_queues_filter - Enum for queue filters. 408 * 409 * @KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES: Preempts all queues in the 410 * running queues list. 411 * 412 * @KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES: Preempts all non-static queues 413 * in the run list. 414 * 415 * @KFD_UNMAP_QUEUES_FILTER_BY_PASID: Preempts queues that belongs to 416 * specific process. 417 * 418 */ 419 enum kfd_unmap_queues_filter { 420 KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES = 1, 421 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES = 2, 422 KFD_UNMAP_QUEUES_FILTER_BY_PASID = 3 423 }; 424 425 /** 426 * enum kfd_queue_type - Enum for various queue types. 427 * 428 * @KFD_QUEUE_TYPE_COMPUTE: Regular user mode queue type. 429 * 430 * @KFD_QUEUE_TYPE_SDMA: SDMA user mode queue type. 431 * 432 * @KFD_QUEUE_TYPE_HIQ: HIQ queue type. 433 * 434 * @KFD_QUEUE_TYPE_DIQ: DIQ queue type. 435 * 436 * @KFD_QUEUE_TYPE_SDMA_XGMI: Special SDMA queue for XGMI interface. 437 */ 438 enum kfd_queue_type { 439 KFD_QUEUE_TYPE_COMPUTE, 440 KFD_QUEUE_TYPE_SDMA, 441 KFD_QUEUE_TYPE_HIQ, 442 KFD_QUEUE_TYPE_DIQ, 443 KFD_QUEUE_TYPE_SDMA_XGMI 444 }; 445 446 enum kfd_queue_format { 447 KFD_QUEUE_FORMAT_PM4, 448 KFD_QUEUE_FORMAT_AQL 449 }; 450 451 enum KFD_QUEUE_PRIORITY { 452 KFD_QUEUE_PRIORITY_MINIMUM = 0, 453 KFD_QUEUE_PRIORITY_MAXIMUM = 15 454 }; 455 456 /** 457 * struct queue_properties 458 * 459 * @type: The queue type. 460 * 461 * @queue_id: Queue identifier. 462 * 463 * @queue_address: Queue ring buffer address. 464 * 465 * @queue_size: Queue ring buffer size. 466 * 467 * @priority: Defines the queue priority relative to other queues in the 468 * process. 469 * This is just an indication and HW scheduling may override the priority as 470 * necessary while keeping the relative prioritization. 471 * the priority granularity is from 0 to f which f is the highest priority. 472 * currently all queues are initialized with the highest priority. 473 * 474 * @queue_percent: This field is partially implemented and currently a zero in 475 * this field defines that the queue is non active. 476 * 477 * @read_ptr: User space address which points to the number of dwords the 478 * cp read from the ring buffer. This field updates automatically by the H/W. 479 * 480 * @write_ptr: Defines the number of dwords written to the ring buffer. 481 * 482 * @doorbell_ptr: Notifies the H/W of new packet written to the queue ring 483 * buffer. This field should be similar to write_ptr and the user should 484 * update this field after updating the write_ptr. 485 * 486 * @doorbell_off: The doorbell offset in the doorbell pci-bar. 487 * 488 * @is_interop: Defines if this is a interop queue. Interop queue means that 489 * the queue can access both graphics and compute resources. 490 * 491 * @is_evicted: Defines if the queue is evicted. Only active queues 492 * are evicted, rendering them inactive. 493 * 494 * @is_active: Defines if the queue is active or not. @is_active and 495 * @is_evicted are protected by the DQM lock. 496 * 497 * @is_gws: Defines if the queue has been updated to be GWS-capable or not. 498 * @is_gws should be protected by the DQM lock, since changing it can yield the 499 * possibility of updating DQM state on number of GWS queues. 500 * 501 * @vmid: If the scheduling mode is no cp scheduling the field defines the vmid 502 * of the queue. 503 * 504 * This structure represents the queue properties for each queue no matter if 505 * it's user mode or kernel mode queue. 506 * 507 */ 508 509 struct queue_properties { 510 enum kfd_queue_type type; 511 enum kfd_queue_format format; 512 unsigned int queue_id; 513 uint64_t queue_address; 514 uint64_t queue_size; 515 uint32_t priority; 516 uint32_t queue_percent; 517 uint32_t *read_ptr; 518 uint32_t *write_ptr; 519 void __iomem *doorbell_ptr; 520 uint32_t doorbell_off; 521 bool is_interop; 522 bool is_evicted; 523 bool is_suspended; 524 bool is_being_destroyed; 525 bool is_active; 526 bool is_gws; 527 uint32_t pm4_target_xcc; 528 bool is_dbg_wa; 529 bool is_user_cu_masked; 530 /* Not relevant for user mode queues in cp scheduling */ 531 unsigned int vmid; 532 /* Relevant only for sdma queues*/ 533 uint32_t sdma_engine_id; 534 uint32_t sdma_queue_id; 535 uint32_t sdma_vm_addr; 536 /* Relevant only for VI */ 537 uint64_t eop_ring_buffer_address; 538 uint32_t eop_ring_buffer_size; 539 uint64_t ctx_save_restore_area_address; 540 uint32_t ctx_save_restore_area_size; 541 uint32_t ctl_stack_size; 542 uint64_t tba_addr; 543 uint64_t tma_addr; 544 uint64_t exception_status; 545 }; 546 547 #define QUEUE_IS_ACTIVE(q) ((q).queue_size > 0 && \ 548 (q).queue_address != 0 && \ 549 (q).queue_percent > 0 && \ 550 !(q).is_evicted && \ 551 !(q).is_suspended) 552 553 enum mqd_update_flag { 554 UPDATE_FLAG_DBG_WA_ENABLE = 1, 555 UPDATE_FLAG_DBG_WA_DISABLE = 2, 556 }; 557 558 struct mqd_update_info { 559 union { 560 struct { 561 uint32_t count; /* Must be a multiple of 32 */ 562 uint32_t *ptr; 563 } cu_mask; 564 }; 565 enum mqd_update_flag update_flag; 566 }; 567 568 /** 569 * struct queue 570 * 571 * @list: Queue linked list. 572 * 573 * @mqd: The queue MQD (memory queue descriptor). 574 * 575 * @mqd_mem_obj: The MQD local gpu memory object. 576 * 577 * @gart_mqd_addr: The MQD gart mc address. 578 * 579 * @properties: The queue properties. 580 * 581 * @mec: Used only in no cp scheduling mode and identifies to micro engine id 582 * that the queue should be executed on. 583 * 584 * @pipe: Used only in no cp scheduling mode and identifies the queue's pipe 585 * id. 586 * 587 * @queue: Used only in no cp scheduliong mode and identifies the queue's slot. 588 * 589 * @process: The kfd process that created this queue. 590 * 591 * @device: The kfd device that created this queue. 592 * 593 * @gws: Pointing to gws kgd_mem if this is a gws control queue; NULL 594 * otherwise. 595 * 596 * This structure represents user mode compute queues. 597 * It contains all the necessary data to handle such queues. 598 * 599 */ 600 601 struct queue { 602 struct list_head list; 603 void *mqd; 604 struct kfd_mem_obj *mqd_mem_obj; 605 uint64_t gart_mqd_addr; 606 struct queue_properties properties; 607 608 uint32_t mec; 609 uint32_t pipe; 610 uint32_t queue; 611 612 unsigned int sdma_id; 613 unsigned int doorbell_id; 614 615 struct kfd_process *process; 616 struct kfd_node *device; 617 void *gws; 618 619 /* procfs */ 620 struct kobject kobj; 621 622 void *gang_ctx_bo; 623 uint64_t gang_ctx_gpu_addr; 624 void *gang_ctx_cpu_ptr; 625 626 struct amdgpu_bo *wptr_bo; 627 }; 628 629 enum KFD_MQD_TYPE { 630 KFD_MQD_TYPE_HIQ = 0, /* for hiq */ 631 KFD_MQD_TYPE_CP, /* for cp queues and diq */ 632 KFD_MQD_TYPE_SDMA, /* for sdma queues */ 633 KFD_MQD_TYPE_DIQ, /* for diq */ 634 KFD_MQD_TYPE_MAX 635 }; 636 637 enum KFD_PIPE_PRIORITY { 638 KFD_PIPE_PRIORITY_CS_LOW = 0, 639 KFD_PIPE_PRIORITY_CS_MEDIUM, 640 KFD_PIPE_PRIORITY_CS_HIGH 641 }; 642 643 struct scheduling_resources { 644 unsigned int vmid_mask; 645 enum kfd_queue_type type; 646 uint64_t queue_mask; 647 uint64_t gws_mask; 648 uint32_t oac_mask; 649 uint32_t gds_heap_base; 650 uint32_t gds_heap_size; 651 }; 652 653 struct process_queue_manager { 654 /* data */ 655 struct kfd_process *process; 656 struct list_head queues; 657 unsigned long *queue_slot_bitmap; 658 }; 659 660 struct qcm_process_device { 661 /* The Device Queue Manager that owns this data */ 662 struct device_queue_manager *dqm; 663 struct process_queue_manager *pqm; 664 /* Queues list */ 665 struct list_head queues_list; 666 struct list_head priv_queue_list; 667 668 unsigned int queue_count; 669 unsigned int vmid; 670 bool is_debug; 671 unsigned int evicted; /* eviction counter, 0=active */ 672 673 /* This flag tells if we should reset all wavefronts on 674 * process termination 675 */ 676 bool reset_wavefronts; 677 678 /* This flag tells us if this process has a GWS-capable 679 * queue that will be mapped into the runlist. It's 680 * possible to request a GWS BO, but not have the queue 681 * currently mapped, and this changes how the MAP_PROCESS 682 * PM4 packet is configured. 683 */ 684 bool mapped_gws_queue; 685 686 /* All the memory management data should be here too */ 687 uint64_t gds_context_area; 688 /* Contains page table flags such as AMDGPU_PTE_VALID since gfx9 */ 689 uint64_t page_table_base; 690 uint32_t sh_mem_config; 691 uint32_t sh_mem_bases; 692 uint32_t sh_mem_ape1_base; 693 uint32_t sh_mem_ape1_limit; 694 uint32_t gds_size; 695 uint32_t num_gws; 696 uint32_t num_oac; 697 uint32_t sh_hidden_private_base; 698 699 /* CWSR memory */ 700 struct kgd_mem *cwsr_mem; 701 void *cwsr_kaddr; 702 uint64_t cwsr_base; 703 uint64_t tba_addr; 704 uint64_t tma_addr; 705 706 /* IB memory */ 707 struct kgd_mem *ib_mem; 708 uint64_t ib_base; 709 void *ib_kaddr; 710 711 /* doorbells for kfd process */ 712 struct amdgpu_bo *proc_doorbells; 713 714 /* bitmap for dynamic doorbell allocation from the bo */ 715 unsigned long *doorbell_bitmap; 716 }; 717 718 /* KFD Memory Eviction */ 719 720 /* Approx. wait time before attempting to restore evicted BOs */ 721 #define PROCESS_RESTORE_TIME_MS 100 722 /* Approx. back off time if restore fails due to lack of memory */ 723 #define PROCESS_BACK_OFF_TIME_MS 100 724 /* Approx. time before evicting the process again */ 725 #define PROCESS_ACTIVE_TIME_MS 10 726 727 /* 8 byte handle containing GPU ID in the most significant 4 bytes and 728 * idr_handle in the least significant 4 bytes 729 */ 730 #define MAKE_HANDLE(gpu_id, idr_handle) \ 731 (((uint64_t)(gpu_id) << 32) + idr_handle) 732 #define GET_GPU_ID(handle) (handle >> 32) 733 #define GET_IDR_HANDLE(handle) (handle & 0xFFFFFFFF) 734 735 enum kfd_pdd_bound { 736 PDD_UNBOUND = 0, 737 PDD_BOUND, 738 PDD_BOUND_SUSPENDED, 739 }; 740 741 #define MAX_SYSFS_FILENAME_LEN 15 742 743 /* 744 * SDMA counter runs at 100MHz frequency. 745 * We display SDMA activity in microsecond granularity in sysfs. 746 * As a result, the divisor is 100. 747 */ 748 #define SDMA_ACTIVITY_DIVISOR 100 749 750 /* Data that is per-process-per device. */ 751 struct kfd_process_device { 752 /* The device that owns this data. */ 753 struct kfd_node *dev; 754 755 /* The process that owns this kfd_process_device. */ 756 struct kfd_process *process; 757 758 /* per-process-per device QCM data structure */ 759 struct qcm_process_device qpd; 760 761 /*Apertures*/ 762 uint64_t lds_base; 763 uint64_t lds_limit; 764 uint64_t gpuvm_base; 765 uint64_t gpuvm_limit; 766 uint64_t scratch_base; 767 uint64_t scratch_limit; 768 769 /* VM context for GPUVM allocations */ 770 struct file *drm_file; 771 void *drm_priv; 772 atomic64_t tlb_seq; 773 774 /* GPUVM allocations storage */ 775 struct idr alloc_idr; 776 777 /* Flag used to tell the pdd has dequeued from the dqm. 778 * This is used to prevent dev->dqm->ops.process_termination() from 779 * being called twice when it is already called in IOMMU callback 780 * function. 781 */ 782 bool already_dequeued; 783 bool runtime_inuse; 784 785 /* Is this process/pasid bound to this device? (amd_iommu_bind_pasid) */ 786 enum kfd_pdd_bound bound; 787 788 /* VRAM usage */ 789 uint64_t vram_usage; 790 struct attribute attr_vram; 791 char vram_filename[MAX_SYSFS_FILENAME_LEN]; 792 793 /* SDMA activity tracking */ 794 uint64_t sdma_past_activity_counter; 795 struct attribute attr_sdma; 796 char sdma_filename[MAX_SYSFS_FILENAME_LEN]; 797 798 /* Eviction activity tracking */ 799 uint64_t last_evict_timestamp; 800 atomic64_t evict_duration_counter; 801 struct attribute attr_evict; 802 803 struct kobject *kobj_stats; 804 unsigned int doorbell_index; 805 806 /* 807 * @cu_occupancy: Reports occupancy of Compute Units (CU) of a process 808 * that is associated with device encoded by "this" struct instance. The 809 * value reflects CU usage by all of the waves launched by this process 810 * on this device. A very important property of occupancy parameter is 811 * that its value is a snapshot of current use. 812 * 813 * Following is to be noted regarding how this parameter is reported: 814 * 815 * The number of waves that a CU can launch is limited by couple of 816 * parameters. These are encoded by struct amdgpu_cu_info instance 817 * that is part of every device definition. For GFX9 devices this 818 * translates to 40 waves (simd_per_cu * max_waves_per_simd) when waves 819 * do not use scratch memory and 32 waves (max_scratch_slots_per_cu) 820 * when they do use scratch memory. This could change for future 821 * devices and therefore this example should be considered as a guide. 822 * 823 * All CU's of a device are available for the process. This may not be true 824 * under certain conditions - e.g. CU masking. 825 * 826 * Finally number of CU's that are occupied by a process is affected by both 827 * number of CU's a device has along with number of other competing processes 828 */ 829 struct attribute attr_cu_occupancy; 830 831 /* sysfs counters for GPU retry fault and page migration tracking */ 832 struct kobject *kobj_counters; 833 struct attribute attr_faults; 834 struct attribute attr_page_in; 835 struct attribute attr_page_out; 836 uint64_t faults; 837 uint64_t page_in; 838 uint64_t page_out; 839 840 /* Exception code status*/ 841 uint64_t exception_status; 842 void *vm_fault_exc_data; 843 size_t vm_fault_exc_data_size; 844 845 /* Tracks debug per-vmid request settings */ 846 uint32_t spi_dbg_override; 847 uint32_t spi_dbg_launch_mode; 848 uint32_t watch_points[4]; 849 uint32_t alloc_watch_ids; 850 851 /* 852 * If this process has been checkpointed before, then the user 853 * application will use the original gpu_id on the 854 * checkpointed node to refer to this device. 855 */ 856 uint32_t user_gpu_id; 857 858 void *proc_ctx_bo; 859 uint64_t proc_ctx_gpu_addr; 860 void *proc_ctx_cpu_ptr; 861 }; 862 863 #define qpd_to_pdd(x) container_of(x, struct kfd_process_device, qpd) 864 865 struct svm_range_list { 866 struct mutex lock; 867 struct rb_root_cached objects; 868 struct list_head list; 869 struct work_struct deferred_list_work; 870 struct list_head deferred_range_list; 871 struct list_head criu_svm_metadata_list; 872 spinlock_t deferred_list_lock; 873 atomic_t evicted_ranges; 874 atomic_t drain_pagefaults; 875 struct delayed_work restore_work; 876 DECLARE_BITMAP(bitmap_supported, MAX_GPU_INSTANCE); 877 struct task_struct *faulting_task; 878 }; 879 880 /* Process data */ 881 struct kfd_process { 882 /* 883 * kfd_process are stored in an mm_struct*->kfd_process* 884 * hash table (kfd_processes in kfd_process.c) 885 */ 886 struct hlist_node kfd_processes; 887 888 /* 889 * Opaque pointer to mm_struct. We don't hold a reference to 890 * it so it should never be dereferenced from here. This is 891 * only used for looking up processes by their mm. 892 */ 893 void *mm; 894 895 struct kref ref; 896 struct work_struct release_work; 897 898 struct mutex mutex; 899 900 /* 901 * In any process, the thread that started main() is the lead 902 * thread and outlives the rest. 903 * It is here because amd_iommu_bind_pasid wants a task_struct. 904 * It can also be used for safely getting a reference to the 905 * mm_struct of the process. 906 */ 907 struct task_struct *lead_thread; 908 909 /* We want to receive a notification when the mm_struct is destroyed */ 910 struct mmu_notifier mmu_notifier; 911 912 u32 pasid; 913 914 /* 915 * Array of kfd_process_device pointers, 916 * one for each device the process is using. 917 */ 918 struct kfd_process_device *pdds[MAX_GPU_INSTANCE]; 919 uint32_t n_pdds; 920 921 struct process_queue_manager pqm; 922 923 /*Is the user space process 32 bit?*/ 924 bool is_32bit_user_mode; 925 926 /* Event-related data */ 927 struct mutex event_mutex; 928 /* Event ID allocator and lookup */ 929 struct idr event_idr; 930 /* Event page */ 931 u64 signal_handle; 932 struct kfd_signal_page *signal_page; 933 size_t signal_mapped_size; 934 size_t signal_event_count; 935 bool signal_event_limit_reached; 936 937 /* Information used for memory eviction */ 938 void *kgd_process_info; 939 /* Eviction fence that is attached to all the BOs of this process. The 940 * fence will be triggered during eviction and new one will be created 941 * during restore 942 */ 943 struct dma_fence *ef; 944 945 /* Work items for evicting and restoring BOs */ 946 struct delayed_work eviction_work; 947 struct delayed_work restore_work; 948 /* seqno of the last scheduled eviction */ 949 unsigned int last_eviction_seqno; 950 /* Approx. the last timestamp (in jiffies) when the process was 951 * restored after an eviction 952 */ 953 unsigned long last_restore_timestamp; 954 955 /* Indicates device process is debug attached with reserved vmid. */ 956 bool debug_trap_enabled; 957 958 /* per-process-per device debug event fd file */ 959 struct file *dbg_ev_file; 960 961 /* If the process is a kfd debugger, we need to know so we can clean 962 * up at exit time. If a process enables debugging on itself, it does 963 * its own clean-up, so we don't set the flag here. We track this by 964 * counting the number of processes this process is debugging. 965 */ 966 atomic_t debugged_process_count; 967 968 /* If the process is a debugged, this is the debugger process */ 969 struct kfd_process *debugger_process; 970 971 /* Kobj for our procfs */ 972 struct kobject *kobj; 973 struct kobject *kobj_queues; 974 struct attribute attr_pasid; 975 976 /* Keep track cwsr init */ 977 bool has_cwsr; 978 979 /* Exception code enable mask and status */ 980 uint64_t exception_enable_mask; 981 uint64_t exception_status; 982 983 /* Used to drain stale interrupts */ 984 wait_queue_head_t wait_irq_drain; 985 bool irq_drain_is_open; 986 987 /* shared virtual memory registered by this process */ 988 struct svm_range_list svms; 989 990 bool xnack_enabled; 991 992 /* Work area for debugger event writer worker. */ 993 struct work_struct debug_event_workarea; 994 995 /* Tracks debug per-vmid request for debug flags */ 996 bool dbg_flags; 997 998 atomic_t poison; 999 /* Queues are in paused stated because we are in the process of doing a CRIU checkpoint */ 1000 bool queues_paused; 1001 1002 /* Tracks runtime enable status */ 1003 struct semaphore runtime_enable_sema; 1004 bool is_runtime_retry; 1005 struct kfd_runtime_info runtime_info; 1006 }; 1007 1008 #define KFD_PROCESS_TABLE_SIZE 5 /* bits: 32 entries */ 1009 extern DECLARE_HASHTABLE(kfd_processes_table, KFD_PROCESS_TABLE_SIZE); 1010 extern struct srcu_struct kfd_processes_srcu; 1011 1012 /** 1013 * typedef amdkfd_ioctl_t - typedef for ioctl function pointer. 1014 * 1015 * @filep: pointer to file structure. 1016 * @p: amdkfd process pointer. 1017 * @data: pointer to arg that was copied from user. 1018 * 1019 * Return: returns ioctl completion code. 1020 */ 1021 typedef int amdkfd_ioctl_t(struct file *filep, struct kfd_process *p, 1022 void *data); 1023 1024 struct amdkfd_ioctl_desc { 1025 unsigned int cmd; 1026 int flags; 1027 amdkfd_ioctl_t *func; 1028 unsigned int cmd_drv; 1029 const char *name; 1030 }; 1031 bool kfd_dev_is_large_bar(struct kfd_node *dev); 1032 1033 int kfd_process_create_wq(void); 1034 void kfd_process_destroy_wq(void); 1035 void kfd_cleanup_processes(void); 1036 struct kfd_process *kfd_create_process(struct task_struct *thread); 1037 struct kfd_process *kfd_get_process(const struct task_struct *task); 1038 struct kfd_process *kfd_lookup_process_by_pasid(u32 pasid); 1039 struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm); 1040 1041 int kfd_process_gpuidx_from_gpuid(struct kfd_process *p, uint32_t gpu_id); 1042 int kfd_process_gpuid_from_node(struct kfd_process *p, struct kfd_node *node, 1043 uint32_t *gpuid, uint32_t *gpuidx); 1044 static inline int kfd_process_gpuid_from_gpuidx(struct kfd_process *p, 1045 uint32_t gpuidx, uint32_t *gpuid) { 1046 return gpuidx < p->n_pdds ? p->pdds[gpuidx]->dev->id : -EINVAL; 1047 } 1048 static inline struct kfd_process_device *kfd_process_device_from_gpuidx( 1049 struct kfd_process *p, uint32_t gpuidx) { 1050 return gpuidx < p->n_pdds ? p->pdds[gpuidx] : NULL; 1051 } 1052 1053 void kfd_unref_process(struct kfd_process *p); 1054 int kfd_process_evict_queues(struct kfd_process *p, uint32_t trigger); 1055 int kfd_process_restore_queues(struct kfd_process *p); 1056 void kfd_suspend_all_processes(void); 1057 int kfd_resume_all_processes(void); 1058 1059 struct kfd_process_device *kfd_process_device_data_by_id(struct kfd_process *process, 1060 uint32_t gpu_id); 1061 1062 int kfd_process_get_user_gpu_id(struct kfd_process *p, uint32_t actual_gpu_id); 1063 1064 int kfd_process_device_init_vm(struct kfd_process_device *pdd, 1065 struct file *drm_file); 1066 struct kfd_process_device *kfd_bind_process_to_device(struct kfd_node *dev, 1067 struct kfd_process *p); 1068 struct kfd_process_device *kfd_get_process_device_data(struct kfd_node *dev, 1069 struct kfd_process *p); 1070 struct kfd_process_device *kfd_create_process_device_data(struct kfd_node *dev, 1071 struct kfd_process *p); 1072 1073 bool kfd_process_xnack_mode(struct kfd_process *p, bool supported); 1074 1075 int kfd_reserved_mem_mmap(struct kfd_node *dev, struct kfd_process *process, 1076 struct vm_area_struct *vma); 1077 1078 /* KFD process API for creating and translating handles */ 1079 int kfd_process_device_create_obj_handle(struct kfd_process_device *pdd, 1080 void *mem); 1081 void *kfd_process_device_translate_handle(struct kfd_process_device *p, 1082 int handle); 1083 void kfd_process_device_remove_obj_handle(struct kfd_process_device *pdd, 1084 int handle); 1085 struct kfd_process *kfd_lookup_process_by_pid(struct pid *pid); 1086 1087 /* PASIDs */ 1088 int kfd_pasid_init(void); 1089 void kfd_pasid_exit(void); 1090 bool kfd_set_pasid_limit(unsigned int new_limit); 1091 unsigned int kfd_get_pasid_limit(void); 1092 u32 kfd_pasid_alloc(void); 1093 void kfd_pasid_free(u32 pasid); 1094 1095 /* Doorbells */ 1096 size_t kfd_doorbell_process_slice(struct kfd_dev *kfd); 1097 int kfd_doorbell_init(struct kfd_dev *kfd); 1098 void kfd_doorbell_fini(struct kfd_dev *kfd); 1099 int kfd_doorbell_mmap(struct kfd_node *dev, struct kfd_process *process, 1100 struct vm_area_struct *vma); 1101 void __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd, 1102 unsigned int *doorbell_off); 1103 void kfd_release_kernel_doorbell(struct kfd_dev *kfd, u32 __iomem *db_addr); 1104 u32 read_kernel_doorbell(u32 __iomem *db); 1105 void write_kernel_doorbell(void __iomem *db, u32 value); 1106 void write_kernel_doorbell64(void __iomem *db, u64 value); 1107 unsigned int kfd_get_doorbell_dw_offset_in_bar(struct kfd_dev *kfd, 1108 struct kfd_process_device *pdd, 1109 unsigned int doorbell_id); 1110 phys_addr_t kfd_get_process_doorbells(struct kfd_process_device *pdd); 1111 int kfd_alloc_process_doorbells(struct kfd_dev *kfd, 1112 struct kfd_process_device *pdd); 1113 void kfd_free_process_doorbells(struct kfd_dev *kfd, 1114 struct kfd_process_device *pdd); 1115 /* GTT Sub-Allocator */ 1116 1117 int kfd_gtt_sa_allocate(struct kfd_node *node, unsigned int size, 1118 struct kfd_mem_obj **mem_obj); 1119 1120 int kfd_gtt_sa_free(struct kfd_node *node, struct kfd_mem_obj *mem_obj); 1121 1122 extern struct device *kfd_device; 1123 1124 /* KFD's procfs */ 1125 void kfd_procfs_init(void); 1126 void kfd_procfs_shutdown(void); 1127 int kfd_procfs_add_queue(struct queue *q); 1128 void kfd_procfs_del_queue(struct queue *q); 1129 1130 /* Topology */ 1131 int kfd_topology_init(void); 1132 void kfd_topology_shutdown(void); 1133 int kfd_topology_add_device(struct kfd_node *gpu); 1134 int kfd_topology_remove_device(struct kfd_node *gpu); 1135 struct kfd_topology_device *kfd_topology_device_by_proximity_domain( 1136 uint32_t proximity_domain); 1137 struct kfd_topology_device *kfd_topology_device_by_proximity_domain_no_lock( 1138 uint32_t proximity_domain); 1139 struct kfd_topology_device *kfd_topology_device_by_id(uint32_t gpu_id); 1140 struct kfd_node *kfd_device_by_id(uint32_t gpu_id); 1141 struct kfd_node *kfd_device_by_pci_dev(const struct pci_dev *pdev); 1142 static inline bool kfd_irq_is_from_node(struct kfd_node *node, uint32_t node_id, 1143 uint32_t vmid) 1144 { 1145 return (node->interrupt_bitmap & (1 << node_id)) != 0 && 1146 (node->compute_vmid_bitmap & (1 << vmid)) != 0; 1147 } 1148 static inline struct kfd_node *kfd_node_by_irq_ids(struct amdgpu_device *adev, 1149 uint32_t node_id, uint32_t vmid) { 1150 struct kfd_dev *dev = adev->kfd.dev; 1151 uint32_t i; 1152 1153 if (adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 3)) 1154 return dev->nodes[0]; 1155 1156 for (i = 0; i < dev->num_nodes; i++) 1157 if (kfd_irq_is_from_node(dev->nodes[i], node_id, vmid)) 1158 return dev->nodes[i]; 1159 1160 return NULL; 1161 } 1162 int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_node **kdev); 1163 int kfd_numa_node_to_apic_id(int numa_node_id); 1164 void kfd_double_confirm_iommu_support(struct kfd_dev *gpu); 1165 1166 /* Interrupts */ 1167 #define KFD_IRQ_FENCE_CLIENTID 0xff 1168 #define KFD_IRQ_FENCE_SOURCEID 0xff 1169 #define KFD_IRQ_IS_FENCE(client, source) \ 1170 ((client) == KFD_IRQ_FENCE_CLIENTID && \ 1171 (source) == KFD_IRQ_FENCE_SOURCEID) 1172 int kfd_interrupt_init(struct kfd_node *dev); 1173 void kfd_interrupt_exit(struct kfd_node *dev); 1174 bool enqueue_ih_ring_entry(struct kfd_node *kfd, const void *ih_ring_entry); 1175 bool interrupt_is_wanted(struct kfd_node *dev, 1176 const uint32_t *ih_ring_entry, 1177 uint32_t *patched_ihre, bool *flag); 1178 int kfd_process_drain_interrupts(struct kfd_process_device *pdd); 1179 void kfd_process_close_interrupt_drain(unsigned int pasid); 1180 1181 /* amdkfd Apertures */ 1182 int kfd_init_apertures(struct kfd_process *process); 1183 1184 void kfd_process_set_trap_handler(struct qcm_process_device *qpd, 1185 uint64_t tba_addr, 1186 uint64_t tma_addr); 1187 void kfd_process_set_trap_debug_flag(struct qcm_process_device *qpd, 1188 bool enabled); 1189 1190 /* CWSR initialization */ 1191 int kfd_process_init_cwsr_apu(struct kfd_process *process, struct file *filep); 1192 1193 /* CRIU */ 1194 /* 1195 * Need to increment KFD_CRIU_PRIV_VERSION each time a change is made to any of the CRIU private 1196 * structures: 1197 * kfd_criu_process_priv_data 1198 * kfd_criu_device_priv_data 1199 * kfd_criu_bo_priv_data 1200 * kfd_criu_queue_priv_data 1201 * kfd_criu_event_priv_data 1202 * kfd_criu_svm_range_priv_data 1203 */ 1204 1205 #define KFD_CRIU_PRIV_VERSION 1 1206 1207 struct kfd_criu_process_priv_data { 1208 uint32_t version; 1209 uint32_t xnack_mode; 1210 }; 1211 1212 struct kfd_criu_device_priv_data { 1213 /* For future use */ 1214 uint64_t reserved; 1215 }; 1216 1217 struct kfd_criu_bo_priv_data { 1218 uint64_t user_addr; 1219 uint32_t idr_handle; 1220 uint32_t mapped_gpuids[MAX_GPU_INSTANCE]; 1221 }; 1222 1223 /* 1224 * The first 4 bytes of kfd_criu_queue_priv_data, kfd_criu_event_priv_data, 1225 * kfd_criu_svm_range_priv_data is the object type 1226 */ 1227 enum kfd_criu_object_type { 1228 KFD_CRIU_OBJECT_TYPE_QUEUE, 1229 KFD_CRIU_OBJECT_TYPE_EVENT, 1230 KFD_CRIU_OBJECT_TYPE_SVM_RANGE, 1231 }; 1232 1233 struct kfd_criu_svm_range_priv_data { 1234 uint32_t object_type; 1235 uint64_t start_addr; 1236 uint64_t size; 1237 /* Variable length array of attributes */ 1238 struct kfd_ioctl_svm_attribute attrs[]; 1239 }; 1240 1241 struct kfd_criu_queue_priv_data { 1242 uint32_t object_type; 1243 uint64_t q_address; 1244 uint64_t q_size; 1245 uint64_t read_ptr_addr; 1246 uint64_t write_ptr_addr; 1247 uint64_t doorbell_off; 1248 uint64_t eop_ring_buffer_address; 1249 uint64_t ctx_save_restore_area_address; 1250 uint32_t gpu_id; 1251 uint32_t type; 1252 uint32_t format; 1253 uint32_t q_id; 1254 uint32_t priority; 1255 uint32_t q_percent; 1256 uint32_t doorbell_id; 1257 uint32_t gws; 1258 uint32_t sdma_id; 1259 uint32_t eop_ring_buffer_size; 1260 uint32_t ctx_save_restore_area_size; 1261 uint32_t ctl_stack_size; 1262 uint32_t mqd_size; 1263 }; 1264 1265 struct kfd_criu_event_priv_data { 1266 uint32_t object_type; 1267 uint64_t user_handle; 1268 uint32_t event_id; 1269 uint32_t auto_reset; 1270 uint32_t type; 1271 uint32_t signaled; 1272 1273 union { 1274 struct kfd_hsa_memory_exception_data memory_exception_data; 1275 struct kfd_hsa_hw_exception_data hw_exception_data; 1276 }; 1277 }; 1278 1279 int kfd_process_get_queue_info(struct kfd_process *p, 1280 uint32_t *num_queues, 1281 uint64_t *priv_data_sizes); 1282 1283 int kfd_criu_checkpoint_queues(struct kfd_process *p, 1284 uint8_t __user *user_priv_data, 1285 uint64_t *priv_data_offset); 1286 1287 int kfd_criu_restore_queue(struct kfd_process *p, 1288 uint8_t __user *user_priv_data, 1289 uint64_t *priv_data_offset, 1290 uint64_t max_priv_data_size); 1291 1292 int kfd_criu_checkpoint_events(struct kfd_process *p, 1293 uint8_t __user *user_priv_data, 1294 uint64_t *priv_data_offset); 1295 1296 int kfd_criu_restore_event(struct file *devkfd, 1297 struct kfd_process *p, 1298 uint8_t __user *user_priv_data, 1299 uint64_t *priv_data_offset, 1300 uint64_t max_priv_data_size); 1301 /* CRIU - End */ 1302 1303 /* Queue Context Management */ 1304 int init_queue(struct queue **q, const struct queue_properties *properties); 1305 void uninit_queue(struct queue *q); 1306 void print_queue_properties(struct queue_properties *q); 1307 void print_queue(struct queue *q); 1308 1309 struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type, 1310 struct kfd_node *dev); 1311 struct mqd_manager *mqd_manager_init_cik_hawaii(enum KFD_MQD_TYPE type, 1312 struct kfd_node *dev); 1313 struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type, 1314 struct kfd_node *dev); 1315 struct mqd_manager *mqd_manager_init_vi_tonga(enum KFD_MQD_TYPE type, 1316 struct kfd_node *dev); 1317 struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type, 1318 struct kfd_node *dev); 1319 struct mqd_manager *mqd_manager_init_v10(enum KFD_MQD_TYPE type, 1320 struct kfd_node *dev); 1321 struct mqd_manager *mqd_manager_init_v11(enum KFD_MQD_TYPE type, 1322 struct kfd_node *dev); 1323 struct device_queue_manager *device_queue_manager_init(struct kfd_node *dev); 1324 void device_queue_manager_uninit(struct device_queue_manager *dqm); 1325 struct kernel_queue *kernel_queue_init(struct kfd_node *dev, 1326 enum kfd_queue_type type); 1327 void kernel_queue_uninit(struct kernel_queue *kq, bool hanging); 1328 int kfd_dqm_evict_pasid(struct device_queue_manager *dqm, u32 pasid); 1329 1330 /* Process Queue Manager */ 1331 struct process_queue_node { 1332 struct queue *q; 1333 struct kernel_queue *kq; 1334 struct list_head process_queue_list; 1335 }; 1336 1337 void kfd_process_dequeue_from_device(struct kfd_process_device *pdd); 1338 void kfd_process_dequeue_from_all_devices(struct kfd_process *p); 1339 int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p); 1340 void pqm_uninit(struct process_queue_manager *pqm); 1341 int pqm_create_queue(struct process_queue_manager *pqm, 1342 struct kfd_node *dev, 1343 struct file *f, 1344 struct queue_properties *properties, 1345 unsigned int *qid, 1346 struct amdgpu_bo *wptr_bo, 1347 const struct kfd_criu_queue_priv_data *q_data, 1348 const void *restore_mqd, 1349 const void *restore_ctl_stack, 1350 uint32_t *p_doorbell_offset_in_process); 1351 int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid); 1352 int pqm_update_queue_properties(struct process_queue_manager *pqm, unsigned int qid, 1353 struct queue_properties *p); 1354 int pqm_update_mqd(struct process_queue_manager *pqm, unsigned int qid, 1355 struct mqd_update_info *minfo); 1356 int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid, 1357 void *gws); 1358 struct kernel_queue *pqm_get_kernel_queue(struct process_queue_manager *pqm, 1359 unsigned int qid); 1360 struct queue *pqm_get_user_queue(struct process_queue_manager *pqm, 1361 unsigned int qid); 1362 int pqm_get_wave_state(struct process_queue_manager *pqm, 1363 unsigned int qid, 1364 void __user *ctl_stack, 1365 u32 *ctl_stack_used_size, 1366 u32 *save_area_used_size); 1367 int pqm_get_queue_snapshot(struct process_queue_manager *pqm, 1368 uint64_t exception_clear_mask, 1369 void __user *buf, 1370 int *num_qss_entries, 1371 uint32_t *entry_size); 1372 1373 int amdkfd_fence_wait_timeout(uint64_t *fence_addr, 1374 uint64_t fence_value, 1375 unsigned int timeout_ms); 1376 1377 int pqm_get_queue_checkpoint_info(struct process_queue_manager *pqm, 1378 unsigned int qid, 1379 u32 *mqd_size, 1380 u32 *ctl_stack_size); 1381 /* Packet Manager */ 1382 1383 #define KFD_FENCE_COMPLETED (100) 1384 #define KFD_FENCE_INIT (10) 1385 1386 struct packet_manager { 1387 struct device_queue_manager *dqm; 1388 struct kernel_queue *priv_queue; 1389 struct mutex lock; 1390 bool allocated; 1391 struct kfd_mem_obj *ib_buffer_obj; 1392 unsigned int ib_size_bytes; 1393 bool is_over_subscription; 1394 1395 const struct packet_manager_funcs *pmf; 1396 }; 1397 1398 struct packet_manager_funcs { 1399 /* Support ASIC-specific packet formats for PM4 packets */ 1400 int (*map_process)(struct packet_manager *pm, uint32_t *buffer, 1401 struct qcm_process_device *qpd); 1402 int (*runlist)(struct packet_manager *pm, uint32_t *buffer, 1403 uint64_t ib, size_t ib_size_in_dwords, bool chain); 1404 int (*set_resources)(struct packet_manager *pm, uint32_t *buffer, 1405 struct scheduling_resources *res); 1406 int (*map_queues)(struct packet_manager *pm, uint32_t *buffer, 1407 struct queue *q, bool is_static); 1408 int (*unmap_queues)(struct packet_manager *pm, uint32_t *buffer, 1409 enum kfd_unmap_queues_filter mode, 1410 uint32_t filter_param, bool reset); 1411 int (*set_grace_period)(struct packet_manager *pm, uint32_t *buffer, 1412 uint32_t grace_period); 1413 int (*query_status)(struct packet_manager *pm, uint32_t *buffer, 1414 uint64_t fence_address, uint64_t fence_value); 1415 int (*release_mem)(uint64_t gpu_addr, uint32_t *buffer); 1416 1417 /* Packet sizes */ 1418 int map_process_size; 1419 int runlist_size; 1420 int set_resources_size; 1421 int map_queues_size; 1422 int unmap_queues_size; 1423 int set_grace_period_size; 1424 int query_status_size; 1425 int release_mem_size; 1426 }; 1427 1428 extern const struct packet_manager_funcs kfd_vi_pm_funcs; 1429 extern const struct packet_manager_funcs kfd_v9_pm_funcs; 1430 extern const struct packet_manager_funcs kfd_aldebaran_pm_funcs; 1431 1432 int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm); 1433 void pm_uninit(struct packet_manager *pm, bool hanging); 1434 int pm_send_set_resources(struct packet_manager *pm, 1435 struct scheduling_resources *res); 1436 int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues); 1437 int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address, 1438 uint64_t fence_value); 1439 1440 int pm_send_unmap_queue(struct packet_manager *pm, 1441 enum kfd_unmap_queues_filter mode, 1442 uint32_t filter_param, bool reset); 1443 1444 void pm_release_ib(struct packet_manager *pm); 1445 1446 int pm_update_grace_period(struct packet_manager *pm, uint32_t grace_period); 1447 1448 /* Following PM funcs can be shared among VI and AI */ 1449 unsigned int pm_build_pm4_header(unsigned int opcode, size_t packet_size); 1450 1451 uint64_t kfd_get_number_elems(struct kfd_dev *kfd); 1452 1453 /* Events */ 1454 extern const struct kfd_event_interrupt_class event_interrupt_class_cik; 1455 extern const struct kfd_event_interrupt_class event_interrupt_class_v9; 1456 extern const struct kfd_event_interrupt_class event_interrupt_class_v9_4_3; 1457 extern const struct kfd_event_interrupt_class event_interrupt_class_v10; 1458 extern const struct kfd_event_interrupt_class event_interrupt_class_v11; 1459 1460 extern const struct kfd_device_global_init_class device_global_init_class_cik; 1461 1462 int kfd_event_init_process(struct kfd_process *p); 1463 void kfd_event_free_process(struct kfd_process *p); 1464 int kfd_event_mmap(struct kfd_process *process, struct vm_area_struct *vma); 1465 int kfd_wait_on_events(struct kfd_process *p, 1466 uint32_t num_events, void __user *data, 1467 bool all, uint32_t *user_timeout_ms, 1468 uint32_t *wait_result); 1469 void kfd_signal_event_interrupt(u32 pasid, uint32_t partial_id, 1470 uint32_t valid_id_bits); 1471 void kfd_signal_iommu_event(struct kfd_node *dev, 1472 u32 pasid, unsigned long address, 1473 bool is_write_requested, bool is_execute_requested); 1474 void kfd_signal_hw_exception_event(u32 pasid); 1475 int kfd_set_event(struct kfd_process *p, uint32_t event_id); 1476 int kfd_reset_event(struct kfd_process *p, uint32_t event_id); 1477 int kfd_kmap_event_page(struct kfd_process *p, uint64_t event_page_offset); 1478 1479 int kfd_event_create(struct file *devkfd, struct kfd_process *p, 1480 uint32_t event_type, bool auto_reset, uint32_t node_id, 1481 uint32_t *event_id, uint32_t *event_trigger_data, 1482 uint64_t *event_page_offset, uint32_t *event_slot_index); 1483 1484 int kfd_get_num_events(struct kfd_process *p); 1485 int kfd_event_destroy(struct kfd_process *p, uint32_t event_id); 1486 1487 void kfd_signal_vm_fault_event(struct kfd_node *dev, u32 pasid, 1488 struct kfd_vm_fault_info *info, 1489 struct kfd_hsa_memory_exception_data *data); 1490 1491 void kfd_signal_reset_event(struct kfd_node *dev); 1492 1493 void kfd_signal_poison_consumed_event(struct kfd_node *dev, u32 pasid); 1494 1495 void kfd_flush_tlb(struct kfd_process_device *pdd, enum TLB_FLUSH_TYPE type); 1496 1497 static inline bool kfd_flush_tlb_after_unmap(struct kfd_dev *dev) 1498 { 1499 return KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 3) || 1500 KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2) || 1501 (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 1) && dev->sdma_fw_version >= 18) || 1502 KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 0); 1503 } 1504 1505 int kfd_send_exception_to_runtime(struct kfd_process *p, 1506 unsigned int queue_id, 1507 uint64_t error_reason); 1508 bool kfd_is_locked(void); 1509 1510 /* Compute profile */ 1511 void kfd_inc_compute_active(struct kfd_node *dev); 1512 void kfd_dec_compute_active(struct kfd_node *dev); 1513 1514 /* Cgroup Support */ 1515 /* Check with device cgroup if @kfd device is accessible */ 1516 static inline int kfd_devcgroup_check_permission(struct kfd_node *kfd) 1517 { 1518 #if defined(CONFIG_CGROUP_DEVICE) || defined(CONFIG_CGROUP_BPF) 1519 struct drm_device *ddev = adev_to_drm(kfd->adev); 1520 1521 return devcgroup_check_permission(DEVCG_DEV_CHAR, DRM_MAJOR, 1522 ddev->render->index, 1523 DEVCG_ACC_WRITE | DEVCG_ACC_READ); 1524 #else 1525 return 0; 1526 #endif 1527 } 1528 1529 static inline bool kfd_is_first_node(struct kfd_node *node) 1530 { 1531 return (node == node->kfd->nodes[0]); 1532 } 1533 1534 /* Debugfs */ 1535 #if defined(CONFIG_DEBUG_FS) 1536 1537 void kfd_debugfs_init(void); 1538 void kfd_debugfs_fini(void); 1539 int kfd_debugfs_mqds_by_process(struct seq_file *m, void *data); 1540 int pqm_debugfs_mqds(struct seq_file *m, void *data); 1541 int kfd_debugfs_hqds_by_device(struct seq_file *m, void *data); 1542 int dqm_debugfs_hqds(struct seq_file *m, void *data); 1543 int kfd_debugfs_rls_by_device(struct seq_file *m, void *data); 1544 int pm_debugfs_runlist(struct seq_file *m, void *data); 1545 1546 int kfd_debugfs_hang_hws(struct kfd_node *dev); 1547 int pm_debugfs_hang_hws(struct packet_manager *pm); 1548 int dqm_debugfs_hang_hws(struct device_queue_manager *dqm); 1549 1550 #else 1551 1552 static inline void kfd_debugfs_init(void) {} 1553 static inline void kfd_debugfs_fini(void) {} 1554 1555 #endif 1556 1557 #endif 1558