1 /* 2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 * SOFTWARE. 22 * 23 * Authors: 24 * Kevin Tian <kevin.tian@intel.com> 25 * Eddie Dong <eddie.dong@intel.com> 26 * 27 * Contributors: 28 * Niu Bing <bing.niu@intel.com> 29 * Zhi Wang <zhi.a.wang@intel.com> 30 * 31 */ 32 33 #ifndef _GVT_H_ 34 #define _GVT_H_ 35 36 #include <uapi/linux/pci_regs.h> 37 38 #include "i915_drv.h" 39 40 #include "debug.h" 41 #include "hypercall.h" 42 #include "mmio.h" 43 #include "reg.h" 44 #include "interrupt.h" 45 #include "gtt.h" 46 #include "display.h" 47 #include "edid.h" 48 #include "execlist.h" 49 #include "scheduler.h" 50 #include "sched_policy.h" 51 #include "mmio_context.h" 52 #include "cmd_parser.h" 53 #include "fb_decoder.h" 54 #include "dmabuf.h" 55 #include "page_track.h" 56 57 #define GVT_MAX_VGPU 8 58 59 struct intel_gvt_host { 60 struct device *dev; 61 bool initialized; 62 int hypervisor_type; 63 const struct intel_gvt_mpt *mpt; 64 }; 65 66 extern struct intel_gvt_host intel_gvt_host; 67 68 /* Describe per-platform limitations. */ 69 struct intel_gvt_device_info { 70 u32 max_support_vgpus; 71 u32 cfg_space_size; 72 u32 mmio_size; 73 u32 mmio_bar; 74 unsigned long msi_cap_offset; 75 u32 gtt_start_offset; 76 u32 gtt_entry_size; 77 u32 gtt_entry_size_shift; 78 int gmadr_bytes_in_cmd; 79 u32 max_surface_size; 80 }; 81 82 /* GM resources owned by a vGPU */ 83 struct intel_vgpu_gm { 84 u64 aperture_sz; 85 u64 hidden_sz; 86 struct drm_mm_node low_gm_node; 87 struct drm_mm_node high_gm_node; 88 }; 89 90 #define INTEL_GVT_MAX_NUM_FENCES 32 91 92 /* Fences owned by a vGPU */ 93 struct intel_vgpu_fence { 94 struct i915_fence_reg *regs[INTEL_GVT_MAX_NUM_FENCES]; 95 u32 base; 96 u32 size; 97 }; 98 99 struct intel_vgpu_mmio { 100 void *vreg; 101 }; 102 103 #define INTEL_GVT_MAX_BAR_NUM 4 104 105 struct intel_vgpu_pci_bar { 106 u64 size; 107 bool tracked; 108 }; 109 110 struct intel_vgpu_cfg_space { 111 unsigned char virtual_cfg_space[PCI_CFG_SPACE_EXP_SIZE]; 112 struct intel_vgpu_pci_bar bar[INTEL_GVT_MAX_BAR_NUM]; 113 u32 pmcsr_off; 114 }; 115 116 #define vgpu_cfg_space(vgpu) ((vgpu)->cfg_space.virtual_cfg_space) 117 118 struct intel_vgpu_irq { 119 bool irq_warn_once[INTEL_GVT_EVENT_MAX]; 120 DECLARE_BITMAP(flip_done_event[I915_MAX_PIPES], 121 INTEL_GVT_EVENT_MAX); 122 }; 123 124 struct intel_vgpu_opregion { 125 bool mapped; 126 void *va; 127 u32 gfn[INTEL_GVT_OPREGION_PAGES]; 128 }; 129 130 #define vgpu_opregion(vgpu) (&(vgpu->opregion)) 131 132 struct intel_vgpu_display { 133 struct intel_vgpu_i2c_edid i2c_edid; 134 struct intel_vgpu_port ports[I915_MAX_PORTS]; 135 struct intel_vgpu_sbi sbi; 136 enum port port_num; 137 }; 138 139 struct vgpu_sched_ctl { 140 int weight; 141 }; 142 143 enum { 144 INTEL_VGPU_EXECLIST_SUBMISSION = 1, 145 INTEL_VGPU_GUC_SUBMISSION, 146 }; 147 148 struct intel_vgpu_submission_ops { 149 const char *name; 150 int (*init)(struct intel_vgpu *vgpu, intel_engine_mask_t engine_mask); 151 void (*clean)(struct intel_vgpu *vgpu, intel_engine_mask_t engine_mask); 152 void (*reset)(struct intel_vgpu *vgpu, intel_engine_mask_t engine_mask); 153 }; 154 155 struct intel_vgpu_submission { 156 struct intel_vgpu_execlist execlist[I915_NUM_ENGINES]; 157 struct list_head workload_q_head[I915_NUM_ENGINES]; 158 struct intel_context *shadow[I915_NUM_ENGINES]; 159 struct kmem_cache *workloads; 160 atomic_t running_workload_num; 161 union { 162 u64 i915_context_pml4; 163 u64 i915_context_pdps[GEN8_3LVL_PDPES]; 164 }; 165 DECLARE_BITMAP(shadow_ctx_desc_updated, I915_NUM_ENGINES); 166 DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES); 167 void *ring_scan_buffer[I915_NUM_ENGINES]; 168 int ring_scan_buffer_size[I915_NUM_ENGINES]; 169 const struct intel_vgpu_submission_ops *ops; 170 int virtual_submission_interface; 171 bool active; 172 struct { 173 u32 lrca; 174 bool valid; 175 u64 ring_context_gpa; 176 } last_ctx[I915_NUM_ENGINES]; 177 }; 178 179 struct intel_vgpu { 180 struct intel_gvt *gvt; 181 struct mutex vgpu_lock; 182 int id; 183 unsigned long handle; /* vGPU handle used by hypervisor MPT modules */ 184 bool active; 185 bool pv_notified; 186 bool failsafe; 187 unsigned int resetting_eng; 188 189 /* Both sched_data and sched_ctl can be seen a part of the global gvt 190 * scheduler structure. So below 2 vgpu data are protected 191 * by sched_lock, not vgpu_lock. 192 */ 193 void *sched_data; 194 struct vgpu_sched_ctl sched_ctl; 195 196 struct intel_vgpu_fence fence; 197 struct intel_vgpu_gm gm; 198 struct intel_vgpu_cfg_space cfg_space; 199 struct intel_vgpu_mmio mmio; 200 struct intel_vgpu_irq irq; 201 struct intel_vgpu_gtt gtt; 202 struct intel_vgpu_opregion opregion; 203 struct intel_vgpu_display display; 204 struct intel_vgpu_submission submission; 205 struct radix_tree_root page_track_tree; 206 u32 hws_pga[I915_NUM_ENGINES]; 207 /* Set on PCI_D3, reset on DMLR, not reflecting the actual PM state */ 208 bool d3_entered; 209 210 struct dentry *debugfs; 211 212 /* Hypervisor-specific device state. */ 213 void *vdev; 214 215 struct list_head dmabuf_obj_list_head; 216 struct mutex dmabuf_lock; 217 struct idr object_idr; 218 struct intel_vgpu_vblank_timer vblank_timer; 219 220 u32 scan_nonprivbb; 221 }; 222 223 static inline void *intel_vgpu_vdev(struct intel_vgpu *vgpu) 224 { 225 return vgpu->vdev; 226 } 227 228 /* validating GM healthy status*/ 229 #define vgpu_is_vm_unhealthy(ret_val) \ 230 (((ret_val) == -EBADRQC) || ((ret_val) == -EFAULT)) 231 232 struct intel_gvt_gm { 233 unsigned long vgpu_allocated_low_gm_size; 234 unsigned long vgpu_allocated_high_gm_size; 235 }; 236 237 struct intel_gvt_fence { 238 unsigned long vgpu_allocated_fence_num; 239 }; 240 241 /* Special MMIO blocks. */ 242 struct gvt_mmio_block { 243 unsigned int device; 244 i915_reg_t offset; 245 unsigned int size; 246 gvt_mmio_func read; 247 gvt_mmio_func write; 248 }; 249 250 #define INTEL_GVT_MMIO_HASH_BITS 11 251 252 struct intel_gvt_mmio { 253 u16 *mmio_attribute; 254 /* Register contains RO bits */ 255 #define F_RO (1 << 0) 256 /* Register contains graphics address */ 257 #define F_GMADR (1 << 1) 258 /* Mode mask registers with high 16 bits as the mask bits */ 259 #define F_MODE_MASK (1 << 2) 260 /* This reg can be accessed by GPU commands */ 261 #define F_CMD_ACCESS (1 << 3) 262 /* This reg has been accessed by a VM */ 263 #define F_ACCESSED (1 << 4) 264 /* This reg requires save & restore during host PM suspend/resume */ 265 #define F_PM_SAVE (1 << 5) 266 /* This reg could be accessed by unaligned address */ 267 #define F_UNALIGN (1 << 6) 268 /* This reg is in GVT's mmio save-restor list and in hardware 269 * logical context image 270 */ 271 #define F_SR_IN_CTX (1 << 7) 272 /* Value of command write of this reg needs to be patched */ 273 #define F_CMD_WRITE_PATCH (1 << 8) 274 275 struct gvt_mmio_block *mmio_block; 276 unsigned int num_mmio_block; 277 278 DECLARE_HASHTABLE(mmio_info_table, INTEL_GVT_MMIO_HASH_BITS); 279 unsigned long num_tracked_mmio; 280 }; 281 282 struct intel_gvt_firmware { 283 void *cfg_space; 284 void *mmio; 285 bool firmware_loaded; 286 }; 287 288 #define NR_MAX_INTEL_VGPU_TYPES 20 289 struct intel_vgpu_type { 290 char name[16]; 291 unsigned int avail_instance; 292 unsigned int low_gm_size; 293 unsigned int high_gm_size; 294 unsigned int fence; 295 unsigned int weight; 296 enum intel_vgpu_edid resolution; 297 }; 298 299 struct intel_gvt { 300 /* GVT scope lock, protect GVT itself, and all resource currently 301 * not yet protected by special locks(vgpu and scheduler lock). 302 */ 303 struct mutex lock; 304 /* scheduler scope lock, protect gvt and vgpu schedule related data */ 305 struct mutex sched_lock; 306 307 struct intel_gt *gt; 308 struct idr vgpu_idr; /* vGPU IDR pool */ 309 310 struct intel_gvt_device_info device_info; 311 struct intel_gvt_gm gm; 312 struct intel_gvt_fence fence; 313 struct intel_gvt_mmio mmio; 314 struct intel_gvt_firmware firmware; 315 struct intel_gvt_irq irq; 316 struct intel_gvt_gtt gtt; 317 struct intel_gvt_workload_scheduler scheduler; 318 struct notifier_block shadow_ctx_notifier_block[I915_NUM_ENGINES]; 319 DECLARE_HASHTABLE(cmd_table, GVT_CMD_HASH_BITS); 320 struct intel_vgpu_type *types; 321 unsigned int num_types; 322 struct intel_vgpu *idle_vgpu; 323 324 struct task_struct *service_thread; 325 wait_queue_head_t service_thread_wq; 326 327 /* service_request is always used in bit operation, we should always 328 * use it with atomic bit ops so that no need to use gvt big lock. 329 */ 330 unsigned long service_request; 331 332 struct { 333 struct engine_mmio *mmio; 334 int ctx_mmio_count[I915_NUM_ENGINES]; 335 u32 *tlb_mmio_offset_list; 336 u32 tlb_mmio_offset_list_cnt; 337 u32 *mocs_mmio_offset_list; 338 u32 mocs_mmio_offset_list_cnt; 339 } engine_mmio_list; 340 bool is_reg_whitelist_updated; 341 342 struct dentry *debugfs_root; 343 }; 344 345 static inline struct intel_gvt *to_gvt(struct drm_i915_private *i915) 346 { 347 return i915->gvt; 348 } 349 350 enum { 351 /* Scheduling trigger by timer */ 352 INTEL_GVT_REQUEST_SCHED = 0, 353 354 /* Scheduling trigger by event */ 355 INTEL_GVT_REQUEST_EVENT_SCHED = 1, 356 357 /* per-vGPU vblank emulation request */ 358 INTEL_GVT_REQUEST_EMULATE_VBLANK = 2, 359 INTEL_GVT_REQUEST_EMULATE_VBLANK_MAX = INTEL_GVT_REQUEST_EMULATE_VBLANK 360 + GVT_MAX_VGPU, 361 }; 362 363 static inline void intel_gvt_request_service(struct intel_gvt *gvt, 364 int service) 365 { 366 set_bit(service, (void *)&gvt->service_request); 367 wake_up(&gvt->service_thread_wq); 368 } 369 370 void intel_gvt_free_firmware(struct intel_gvt *gvt); 371 int intel_gvt_load_firmware(struct intel_gvt *gvt); 372 373 /* Aperture/GM space definitions for GVT device */ 374 #define MB_TO_BYTES(mb) ((mb) << 20ULL) 375 #define BYTES_TO_MB(b) ((b) >> 20ULL) 376 377 #define HOST_LOW_GM_SIZE MB_TO_BYTES(128) 378 #define HOST_HIGH_GM_SIZE MB_TO_BYTES(384) 379 #define HOST_FENCE 4 380 381 #define gvt_to_ggtt(gvt) ((gvt)->gt->ggtt) 382 383 /* Aperture/GM space definitions for GVT device */ 384 #define gvt_aperture_sz(gvt) gvt_to_ggtt(gvt)->mappable_end 385 #define gvt_aperture_pa_base(gvt) gvt_to_ggtt(gvt)->gmadr.start 386 387 #define gvt_ggtt_gm_sz(gvt) gvt_to_ggtt(gvt)->vm.total 388 #define gvt_ggtt_sz(gvt) (gvt_to_ggtt(gvt)->vm.total >> PAGE_SHIFT << 3) 389 #define gvt_hidden_sz(gvt) (gvt_ggtt_gm_sz(gvt) - gvt_aperture_sz(gvt)) 390 391 #define gvt_aperture_gmadr_base(gvt) (0) 392 #define gvt_aperture_gmadr_end(gvt) (gvt_aperture_gmadr_base(gvt) \ 393 + gvt_aperture_sz(gvt) - 1) 394 395 #define gvt_hidden_gmadr_base(gvt) (gvt_aperture_gmadr_base(gvt) \ 396 + gvt_aperture_sz(gvt)) 397 #define gvt_hidden_gmadr_end(gvt) (gvt_hidden_gmadr_base(gvt) \ 398 + gvt_hidden_sz(gvt) - 1) 399 400 #define gvt_fence_sz(gvt) (gvt_to_ggtt(gvt)->num_fences) 401 402 /* Aperture/GM space definitions for vGPU */ 403 #define vgpu_aperture_offset(vgpu) ((vgpu)->gm.low_gm_node.start) 404 #define vgpu_hidden_offset(vgpu) ((vgpu)->gm.high_gm_node.start) 405 #define vgpu_aperture_sz(vgpu) ((vgpu)->gm.aperture_sz) 406 #define vgpu_hidden_sz(vgpu) ((vgpu)->gm.hidden_sz) 407 408 #define vgpu_aperture_pa_base(vgpu) \ 409 (gvt_aperture_pa_base(vgpu->gvt) + vgpu_aperture_offset(vgpu)) 410 411 #define vgpu_ggtt_gm_sz(vgpu) ((vgpu)->gm.aperture_sz + (vgpu)->gm.hidden_sz) 412 413 #define vgpu_aperture_pa_end(vgpu) \ 414 (vgpu_aperture_pa_base(vgpu) + vgpu_aperture_sz(vgpu) - 1) 415 416 #define vgpu_aperture_gmadr_base(vgpu) (vgpu_aperture_offset(vgpu)) 417 #define vgpu_aperture_gmadr_end(vgpu) \ 418 (vgpu_aperture_gmadr_base(vgpu) + vgpu_aperture_sz(vgpu) - 1) 419 420 #define vgpu_hidden_gmadr_base(vgpu) (vgpu_hidden_offset(vgpu)) 421 #define vgpu_hidden_gmadr_end(vgpu) \ 422 (vgpu_hidden_gmadr_base(vgpu) + vgpu_hidden_sz(vgpu) - 1) 423 424 #define vgpu_fence_base(vgpu) (vgpu->fence.base) 425 #define vgpu_fence_sz(vgpu) (vgpu->fence.size) 426 427 /* ring context size i.e. the first 0x50 dwords*/ 428 #define RING_CTX_SIZE 320 429 430 struct intel_vgpu_creation_params { 431 __u64 handle; 432 __u64 low_gm_sz; /* in MB */ 433 __u64 high_gm_sz; /* in MB */ 434 __u64 fence_sz; 435 __u64 resolution; 436 __s32 primary; 437 __u64 vgpu_id; 438 439 __u32 weight; 440 }; 441 442 int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu, 443 struct intel_vgpu_creation_params *param); 444 void intel_vgpu_reset_resource(struct intel_vgpu *vgpu); 445 void intel_vgpu_free_resource(struct intel_vgpu *vgpu); 446 void intel_vgpu_write_fence(struct intel_vgpu *vgpu, 447 u32 fence, u64 value); 448 449 /* Macros for easily accessing vGPU virtual/shadow register. 450 Explicitly seperate use for typed MMIO reg or real offset.*/ 451 #define vgpu_vreg_t(vgpu, reg) \ 452 (*(u32 *)(vgpu->mmio.vreg + i915_mmio_reg_offset(reg))) 453 #define vgpu_vreg(vgpu, offset) \ 454 (*(u32 *)(vgpu->mmio.vreg + (offset))) 455 #define vgpu_vreg64_t(vgpu, reg) \ 456 (*(u64 *)(vgpu->mmio.vreg + i915_mmio_reg_offset(reg))) 457 #define vgpu_vreg64(vgpu, offset) \ 458 (*(u64 *)(vgpu->mmio.vreg + (offset))) 459 460 #define for_each_active_vgpu(gvt, vgpu, id) \ 461 idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) \ 462 for_each_if(vgpu->active) 463 464 static inline void intel_vgpu_write_pci_bar(struct intel_vgpu *vgpu, 465 u32 offset, u32 val, bool low) 466 { 467 u32 *pval; 468 469 /* BAR offset should be 32 bits algiend */ 470 offset = rounddown(offset, 4); 471 pval = (u32 *)(vgpu_cfg_space(vgpu) + offset); 472 473 if (low) { 474 /* 475 * only update bit 31 - bit 4, 476 * leave the bit 3 - bit 0 unchanged. 477 */ 478 *pval = (val & GENMASK(31, 4)) | (*pval & GENMASK(3, 0)); 479 } else { 480 *pval = val; 481 } 482 } 483 484 int intel_gvt_init_vgpu_types(struct intel_gvt *gvt); 485 void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt); 486 487 struct intel_vgpu *intel_gvt_create_idle_vgpu(struct intel_gvt *gvt); 488 void intel_gvt_destroy_idle_vgpu(struct intel_vgpu *vgpu); 489 struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt, 490 struct intel_vgpu_type *type); 491 void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu); 492 void intel_gvt_release_vgpu(struct intel_vgpu *vgpu); 493 void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr, 494 intel_engine_mask_t engine_mask); 495 void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu); 496 void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu); 497 void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu); 498 499 /* validating GM functions */ 500 #define vgpu_gmadr_is_aperture(vgpu, gmadr) \ 501 ((gmadr >= vgpu_aperture_gmadr_base(vgpu)) && \ 502 (gmadr <= vgpu_aperture_gmadr_end(vgpu))) 503 504 #define vgpu_gmadr_is_hidden(vgpu, gmadr) \ 505 ((gmadr >= vgpu_hidden_gmadr_base(vgpu)) && \ 506 (gmadr <= vgpu_hidden_gmadr_end(vgpu))) 507 508 #define vgpu_gmadr_is_valid(vgpu, gmadr) \ 509 ((vgpu_gmadr_is_aperture(vgpu, gmadr) || \ 510 (vgpu_gmadr_is_hidden(vgpu, gmadr)))) 511 512 #define gvt_gmadr_is_aperture(gvt, gmadr) \ 513 ((gmadr >= gvt_aperture_gmadr_base(gvt)) && \ 514 (gmadr <= gvt_aperture_gmadr_end(gvt))) 515 516 #define gvt_gmadr_is_hidden(gvt, gmadr) \ 517 ((gmadr >= gvt_hidden_gmadr_base(gvt)) && \ 518 (gmadr <= gvt_hidden_gmadr_end(gvt))) 519 520 #define gvt_gmadr_is_valid(gvt, gmadr) \ 521 (gvt_gmadr_is_aperture(gvt, gmadr) || \ 522 gvt_gmadr_is_hidden(gvt, gmadr)) 523 524 bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size); 525 int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr); 526 int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu *vgpu, u64 h_addr, u64 *g_addr); 527 int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index, 528 unsigned long *h_index); 529 int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index, 530 unsigned long *g_index); 531 532 void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu, 533 bool primary); 534 void intel_vgpu_reset_cfg_space(struct intel_vgpu *vgpu); 535 536 int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset, 537 void *p_data, unsigned int bytes); 538 539 int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset, 540 void *p_data, unsigned int bytes); 541 542 void intel_vgpu_emulate_hotplug(struct intel_vgpu *vgpu, bool connected); 543 544 static inline u64 intel_vgpu_get_bar_gpa(struct intel_vgpu *vgpu, int bar) 545 { 546 /* We are 64bit bar. */ 547 return (*(u64 *)(vgpu->cfg_space.virtual_cfg_space + bar)) & 548 PCI_BASE_ADDRESS_MEM_MASK; 549 } 550 551 void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu); 552 int intel_vgpu_init_opregion(struct intel_vgpu *vgpu); 553 int intel_vgpu_opregion_base_write_handler(struct intel_vgpu *vgpu, u32 gpa); 554 555 int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci); 556 void populate_pvinfo_page(struct intel_vgpu *vgpu); 557 558 int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload); 559 void enter_failsafe_mode(struct intel_vgpu *vgpu, int reason); 560 561 struct intel_gvt_ops { 562 int (*emulate_cfg_read)(struct intel_vgpu *, unsigned int, void *, 563 unsigned int); 564 int (*emulate_cfg_write)(struct intel_vgpu *, unsigned int, void *, 565 unsigned int); 566 int (*emulate_mmio_read)(struct intel_vgpu *, u64, void *, 567 unsigned int); 568 int (*emulate_mmio_write)(struct intel_vgpu *, u64, void *, 569 unsigned int); 570 struct intel_vgpu *(*vgpu_create)(struct intel_gvt *, 571 struct intel_vgpu_type *); 572 void (*vgpu_destroy)(struct intel_vgpu *vgpu); 573 void (*vgpu_release)(struct intel_vgpu *vgpu); 574 void (*vgpu_reset)(struct intel_vgpu *); 575 void (*vgpu_activate)(struct intel_vgpu *); 576 void (*vgpu_deactivate)(struct intel_vgpu *); 577 struct intel_vgpu_type *(*gvt_find_vgpu_type)( 578 struct intel_gvt *gvt, unsigned int type_group_id); 579 bool (*get_gvt_attrs)(struct attribute_group ***intel_vgpu_type_groups); 580 int (*vgpu_query_plane)(struct intel_vgpu *vgpu, void *); 581 int (*vgpu_get_dmabuf)(struct intel_vgpu *vgpu, unsigned int); 582 int (*write_protect_handler)(struct intel_vgpu *, u64, void *, 583 unsigned int); 584 void (*emulate_hotplug)(struct intel_vgpu *vgpu, bool connected); 585 }; 586 587 588 enum { 589 GVT_FAILSAFE_UNSUPPORTED_GUEST, 590 GVT_FAILSAFE_INSUFFICIENT_RESOURCE, 591 GVT_FAILSAFE_GUEST_ERR, 592 }; 593 594 static inline void mmio_hw_access_pre(struct intel_gt *gt) 595 { 596 intel_runtime_pm_get(gt->uncore->rpm); 597 } 598 599 static inline void mmio_hw_access_post(struct intel_gt *gt) 600 { 601 intel_runtime_pm_put_unchecked(gt->uncore->rpm); 602 } 603 604 /** 605 * intel_gvt_mmio_set_accessed - mark a MMIO has been accessed 606 * @gvt: a GVT device 607 * @offset: register offset 608 * 609 */ 610 static inline void intel_gvt_mmio_set_accessed( 611 struct intel_gvt *gvt, unsigned int offset) 612 { 613 gvt->mmio.mmio_attribute[offset >> 2] |= F_ACCESSED; 614 } 615 616 /** 617 * intel_gvt_mmio_is_cmd_accessible - if a MMIO could be accessed by command 618 * @gvt: a GVT device 619 * @offset: register offset 620 * 621 * Returns: 622 * True if an MMIO is able to be accessed by GPU commands 623 */ 624 static inline bool intel_gvt_mmio_is_cmd_accessible( 625 struct intel_gvt *gvt, unsigned int offset) 626 { 627 return gvt->mmio.mmio_attribute[offset >> 2] & F_CMD_ACCESS; 628 } 629 630 /** 631 * intel_gvt_mmio_set_cmd_accessible - 632 * mark a MMIO could be accessible by command 633 * @gvt: a GVT device 634 * @offset: register offset 635 * 636 */ 637 static inline void intel_gvt_mmio_set_cmd_accessible( 638 struct intel_gvt *gvt, unsigned int offset) 639 { 640 gvt->mmio.mmio_attribute[offset >> 2] |= F_CMD_ACCESS; 641 } 642 643 /** 644 * intel_gvt_mmio_is_unalign - mark a MMIO could be accessed unaligned 645 * @gvt: a GVT device 646 * @offset: register offset 647 * 648 */ 649 static inline bool intel_gvt_mmio_is_unalign( 650 struct intel_gvt *gvt, unsigned int offset) 651 { 652 return gvt->mmio.mmio_attribute[offset >> 2] & F_UNALIGN; 653 } 654 655 /** 656 * intel_gvt_mmio_has_mode_mask - if a MMIO has a mode mask 657 * @gvt: a GVT device 658 * @offset: register offset 659 * 660 * Returns: 661 * True if a MMIO has a mode mask in its higher 16 bits, false if it isn't. 662 * 663 */ 664 static inline bool intel_gvt_mmio_has_mode_mask( 665 struct intel_gvt *gvt, unsigned int offset) 666 { 667 return gvt->mmio.mmio_attribute[offset >> 2] & F_MODE_MASK; 668 } 669 670 /** 671 * intel_gvt_mmio_is_sr_in_ctx - 672 * check if an MMIO has F_SR_IN_CTX mask 673 * @gvt: a GVT device 674 * @offset: register offset 675 * 676 * Returns: 677 * True if an MMIO has an F_SR_IN_CTX mask, false if it isn't. 678 * 679 */ 680 static inline bool intel_gvt_mmio_is_sr_in_ctx( 681 struct intel_gvt *gvt, unsigned int offset) 682 { 683 return gvt->mmio.mmio_attribute[offset >> 2] & F_SR_IN_CTX; 684 } 685 686 /** 687 * intel_gvt_mmio_set_sr_in_ctx - 688 * mask an MMIO in GVT's mmio save-restore list and also 689 * in hardware logical context image 690 * @gvt: a GVT device 691 * @offset: register offset 692 * 693 */ 694 static inline void intel_gvt_mmio_set_sr_in_ctx( 695 struct intel_gvt *gvt, unsigned int offset) 696 { 697 gvt->mmio.mmio_attribute[offset >> 2] |= F_SR_IN_CTX; 698 } 699 700 void intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu); 701 /** 702 * intel_gvt_mmio_set_cmd_write_patch - 703 * mark an MMIO if its cmd write needs to be 704 * patched 705 * @gvt: a GVT device 706 * @offset: register offset 707 * 708 */ 709 static inline void intel_gvt_mmio_set_cmd_write_patch( 710 struct intel_gvt *gvt, unsigned int offset) 711 { 712 gvt->mmio.mmio_attribute[offset >> 2] |= F_CMD_WRITE_PATCH; 713 } 714 715 /** 716 * intel_gvt_mmio_is_cmd_write_patch - check if an mmio's cmd access needs to 717 * be patched 718 * @gvt: a GVT device 719 * @offset: register offset 720 * 721 * Returns: 722 * True if GPU commmand write to an MMIO should be patched 723 */ 724 static inline bool intel_gvt_mmio_is_cmd_write_patch( 725 struct intel_gvt *gvt, unsigned int offset) 726 { 727 return gvt->mmio.mmio_attribute[offset >> 2] & F_CMD_WRITE_PATCH; 728 } 729 730 void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu); 731 void intel_gvt_debugfs_init(struct intel_gvt *gvt); 732 void intel_gvt_debugfs_clean(struct intel_gvt *gvt); 733 734 int intel_gvt_pm_resume(struct intel_gvt *gvt); 735 736 #include "trace.h" 737 #include "mpt.h" 738 739 #endif 740