Lines Matching +full:se +full:- +full:pos
4 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
62 vgpu_gmadr_is_aperture(vgpu, addr + size - 1)) in intel_gvt_ggtt_validate_range()
65 vgpu_gmadr_is_hidden(vgpu, addr + size - 1)) in intel_gvt_ggtt_validate_range()
76 struct drm_i915_private *i915 = vgpu->gvt->gt->i915; in intel_gvt_ggtt_gmadr_g2h()
78 if (drm_WARN(&i915->drm, !vgpu_gmadr_is_valid(vgpu, g_addr), in intel_gvt_ggtt_gmadr_g2h()
80 return -EACCES; in intel_gvt_ggtt_gmadr_g2h()
84 + (g_addr - vgpu_aperture_offset(vgpu)); in intel_gvt_ggtt_gmadr_g2h()
87 + (g_addr - vgpu_hidden_offset(vgpu)); in intel_gvt_ggtt_gmadr_g2h()
94 struct drm_i915_private *i915 = vgpu->gvt->gt->i915; in intel_gvt_ggtt_gmadr_h2g()
96 if (drm_WARN(&i915->drm, !gvt_gmadr_is_valid(vgpu->gvt, h_addr), in intel_gvt_ggtt_gmadr_h2g()
98 return -EACCES; in intel_gvt_ggtt_gmadr_h2g()
100 if (gvt_gmadr_is_aperture(vgpu->gvt, h_addr)) in intel_gvt_ggtt_gmadr_h2g()
102 + (h_addr - gvt_aperture_gmadr_base(vgpu->gvt)); in intel_gvt_ggtt_gmadr_h2g()
105 + (h_addr - gvt_hidden_gmadr_base(vgpu->gvt)); in intel_gvt_ggtt_gmadr_h2g()
154 (e)->type = t; \
155 (e)->pdev = p; \
156 memcpy(&(e)->val64, &v, sizeof(v)); \
162 * - type of next level page table
163 * - type of entry inside this level page table
164 * - type of entry with PSE set
281 void __iomem *addr = (gen8_pte_t __iomem *)ggtt->gsm + index; in read_pte64()
289 intel_uncore_write(gt->uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); in ggtt_invalidate()
295 void __iomem *addr = (gen8_pte_t __iomem *)ggtt->gsm + index; in write_pte64()
305 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; in gtt_get_entry64()
308 if (WARN_ON(info->gtt_entry_size != 8)) in gtt_get_entry64()
309 return -EINVAL; in gtt_get_entry64()
313 (index << info->gtt_entry_size_shift), in gtt_get_entry64()
314 &e->val64, 8); in gtt_get_entry64()
318 e->val64 = read_pte64(vgpu->gvt->gt->ggtt, index); in gtt_get_entry64()
320 e->val64 = *((u64 *)pt + index); in gtt_get_entry64()
330 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; in gtt_set_entry64()
333 if (WARN_ON(info->gtt_entry_size != 8)) in gtt_set_entry64()
334 return -EINVAL; in gtt_set_entry64()
338 (index << info->gtt_entry_size_shift), in gtt_set_entry64()
339 &e->val64, 8); in gtt_set_entry64()
343 write_pte64(vgpu->gvt->gt->ggtt, index, e->val64); in gtt_set_entry64()
345 *((u64 *)pt + index) = e->val64; in gtt_set_entry64()
352 #define ADDR_1G_MASK GENMASK_ULL(GTT_HAW - 1, 30)
353 #define ADDR_2M_MASK GENMASK_ULL(GTT_HAW - 1, 21)
354 #define ADDR_64K_MASK GENMASK_ULL(GTT_HAW - 1, 16)
355 #define ADDR_4K_MASK GENMASK_ULL(GTT_HAW - 1, 12)
366 if (e->type == GTT_TYPE_PPGTT_PTE_1G_ENTRY) in gen8_gtt_get_pfn()
367 pfn = (e->val64 & ADDR_1G_MASK) >> PAGE_SHIFT; in gen8_gtt_get_pfn()
368 else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY) in gen8_gtt_get_pfn()
369 pfn = (e->val64 & ADDR_2M_MASK) >> PAGE_SHIFT; in gen8_gtt_get_pfn()
370 else if (e->type == GTT_TYPE_PPGTT_PTE_64K_ENTRY) in gen8_gtt_get_pfn()
371 pfn = (e->val64 & ADDR_64K_MASK) >> PAGE_SHIFT; in gen8_gtt_get_pfn()
373 pfn = (e->val64 & ADDR_4K_MASK) >> PAGE_SHIFT; in gen8_gtt_get_pfn()
379 if (e->type == GTT_TYPE_PPGTT_PTE_1G_ENTRY) { in gen8_gtt_set_pfn()
380 e->val64 &= ~ADDR_1G_MASK; in gen8_gtt_set_pfn()
382 } else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY) { in gen8_gtt_set_pfn()
383 e->val64 &= ~ADDR_2M_MASK; in gen8_gtt_set_pfn()
385 } else if (e->type == GTT_TYPE_PPGTT_PTE_64K_ENTRY) { in gen8_gtt_set_pfn()
386 e->val64 &= ~ADDR_64K_MASK; in gen8_gtt_set_pfn()
389 e->val64 &= ~ADDR_4K_MASK; in gen8_gtt_set_pfn()
393 e->val64 |= (pfn << PAGE_SHIFT); in gen8_gtt_set_pfn()
398 return !!(e->val64 & _PAGE_PSE); in gen8_gtt_test_pse()
404 switch (e->type) { in gen8_gtt_clear_pse()
406 e->val64 &= ~_PAGE_PSE; in gen8_gtt_clear_pse()
407 e->type = GTT_TYPE_PPGTT_PDE_ENTRY; in gen8_gtt_clear_pse()
410 e->type = GTT_TYPE_PPGTT_PDP_ENTRY; in gen8_gtt_clear_pse()
411 e->val64 &= ~_PAGE_PSE; in gen8_gtt_clear_pse()
421 if (GEM_WARN_ON(e->type != GTT_TYPE_PPGTT_PDE_ENTRY)) in gen8_gtt_test_ips()
424 return !!(e->val64 & GEN8_PDE_IPS_64K); in gen8_gtt_test_ips()
429 if (GEM_WARN_ON(e->type != GTT_TYPE_PPGTT_PDE_ENTRY)) in gen8_gtt_clear_ips()
432 e->val64 &= ~GEN8_PDE_IPS_64K; in gen8_gtt_clear_ips()
442 if (e->type == GTT_TYPE_PPGTT_ROOT_L3_ENTRY in gen8_gtt_test_present()
443 || e->type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) in gen8_gtt_test_present()
444 return (e->val64 != 0); in gen8_gtt_test_present()
446 return (e->val64 & GEN8_PAGE_PRESENT); in gen8_gtt_test_present()
451 e->val64 &= ~GEN8_PAGE_PRESENT; in gtt_entry_clear_present()
456 e->val64 |= GEN8_PAGE_PRESENT; in gtt_entry_set_present()
461 return !!(e->val64 & GTT_SPTE_FLAG_64K_SPLITED); in gen8_gtt_test_64k_splited()
466 e->val64 |= GTT_SPTE_FLAG_64K_SPLITED; in gen8_gtt_set_64k_splited()
471 e->val64 &= ~GTT_SPTE_FLAG_64K_SPLITED; in gen8_gtt_clear_64k_splited()
475 * Per-platform GMA routines.
529 switch (entry->type) { in update_entry_type_for_real()
532 if (pte_ops->test_pse(entry)) in update_entry_type_for_real()
533 entry->type = get_pse_type(entry->type); in update_entry_type_for_real()
537 entry->type = get_pse_type(entry->type); in update_entry_type_for_real()
540 GEM_BUG_ON(!gtt_type_is_entry(entry->type)); in update_entry_type_for_real()
543 GEM_BUG_ON(entry->type == GTT_TYPE_INVALID); in update_entry_type_for_real()
553 const struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; in _ppgtt_get_root_entry()
555 GEM_BUG_ON(mm->type != INTEL_GVT_MM_PPGTT); in _ppgtt_get_root_entry()
557 entry->type = mm->ppgtt_mm.root_entry_type; in _ppgtt_get_root_entry()
558 pte_ops->get_entry(guest ? mm->ppgtt_mm.guest_pdps : in _ppgtt_get_root_entry()
559 mm->ppgtt_mm.shadow_pdps, in _ppgtt_get_root_entry()
560 entry, index, false, 0, mm->vgpu); in _ppgtt_get_root_entry()
580 const struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; in _ppgtt_set_root_entry()
582 pte_ops->set_entry(guest ? mm->ppgtt_mm.guest_pdps : in _ppgtt_set_root_entry()
583 mm->ppgtt_mm.shadow_pdps, in _ppgtt_set_root_entry()
584 entry, index, false, 0, mm->vgpu); in _ppgtt_set_root_entry()
596 const struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; in ggtt_get_guest_entry()
598 GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT); in ggtt_get_guest_entry()
600 entry->type = GTT_TYPE_GGTT_PTE; in ggtt_get_guest_entry()
601 pte_ops->get_entry(mm->ggtt_mm.virtual_ggtt, entry, index, in ggtt_get_guest_entry()
602 false, 0, mm->vgpu); in ggtt_get_guest_entry()
608 const struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; in ggtt_set_guest_entry()
610 GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT); in ggtt_set_guest_entry()
612 pte_ops->set_entry(mm->ggtt_mm.virtual_ggtt, entry, index, in ggtt_set_guest_entry()
613 false, 0, mm->vgpu); in ggtt_set_guest_entry()
619 const struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; in ggtt_get_host_entry()
621 GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT); in ggtt_get_host_entry()
623 pte_ops->get_entry(NULL, entry, index, false, 0, mm->vgpu); in ggtt_get_host_entry()
629 const struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; in ggtt_set_host_entry()
632 GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT); in ggtt_set_host_entry()
634 if (vgpu_gmadr_is_aperture(mm->vgpu, index << I915_GTT_PAGE_SHIFT)) { in ggtt_set_host_entry()
635 offset -= (vgpu_aperture_gmadr_base(mm->vgpu) >> PAGE_SHIFT); in ggtt_set_host_entry()
636 mm->ggtt_mm.host_ggtt_aperture[offset] = entry->val64; in ggtt_set_host_entry()
637 } else if (vgpu_gmadr_is_hidden(mm->vgpu, index << I915_GTT_PAGE_SHIFT)) { in ggtt_set_host_entry()
638 offset -= (vgpu_hidden_gmadr_base(mm->vgpu) >> PAGE_SHIFT); in ggtt_set_host_entry()
639 mm->ggtt_mm.host_ggtt_hidden[offset] = entry->val64; in ggtt_set_host_entry()
642 pte_ops->set_entry(NULL, entry, index, false, 0, mm->vgpu); in ggtt_set_host_entry()
654 struct intel_gvt *gvt = spt->vgpu->gvt; in ppgtt_spt_get_entry()
655 const struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops; in ppgtt_spt_get_entry()
658 e->type = get_entry_type(type); in ppgtt_spt_get_entry()
660 if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n")) in ppgtt_spt_get_entry()
661 return -EINVAL; in ppgtt_spt_get_entry()
663 ret = ops->get_entry(page_table, e, index, guest, in ppgtt_spt_get_entry()
664 spt->guest_page.gfn << I915_GTT_PAGE_SHIFT, in ppgtt_spt_get_entry()
665 spt->vgpu); in ppgtt_spt_get_entry()
670 spt->guest_page.pde_ips : false); in ppgtt_spt_get_entry()
673 type, e->type, index, e->val64); in ppgtt_spt_get_entry()
683 struct intel_gvt *gvt = spt->vgpu->gvt; in ppgtt_spt_set_entry()
684 const struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops; in ppgtt_spt_set_entry()
686 if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n")) in ppgtt_spt_set_entry()
687 return -EINVAL; in ppgtt_spt_set_entry()
690 type, e->type, index, e->val64); in ppgtt_spt_set_entry()
692 return ops->set_entry(page_table, e, index, guest, in ppgtt_spt_set_entry()
693 spt->guest_page.gfn << I915_GTT_PAGE_SHIFT, in ppgtt_spt_set_entry()
694 spt->vgpu); in ppgtt_spt_set_entry()
699 spt->guest_page.type, e, index, true)
703 spt->guest_page.type, e, index, true)
706 ppgtt_spt_get_entry(spt, spt->shadow_page.vaddr, \
707 spt->shadow_page.type, e, index, false)
710 ppgtt_spt_set_entry(spt, spt->shadow_page.vaddr, \
711 spt->shadow_page.type, e, index, false)
721 spt->shadow_page.page = alloc_page(gfp_mask); in alloc_spt()
722 if (!spt->shadow_page.page) { in alloc_spt()
731 __free_page(spt->shadow_page.page); in free_spt()
740 struct device *kdev = spt->vgpu->gvt->gt->i915->drm.dev; in ppgtt_free_spt()
742 trace_spt_free(spt->vgpu->id, spt, spt->guest_page.type); in ppgtt_free_spt()
744 dma_unmap_page(kdev, spt->shadow_page.mfn << I915_GTT_PAGE_SHIFT, 4096, in ppgtt_free_spt()
747 radix_tree_delete(&spt->vgpu->gtt.spt_tree, spt->shadow_page.mfn); in ppgtt_free_spt()
749 if (spt->guest_page.gfn) { in ppgtt_free_spt()
750 if (spt->guest_page.oos_page) in ppgtt_free_spt()
751 detach_oos_page(spt->vgpu, spt->guest_page.oos_page); in ppgtt_free_spt()
753 intel_vgpu_unregister_page_track(spt->vgpu, spt->guest_page.gfn); in ppgtt_free_spt()
756 list_del_init(&spt->post_shadow_list); in ppgtt_free_spt()
768 radix_tree_for_each_slot(slot, &vgpu->gtt.spt_tree, &iter, 0) { in ppgtt_free_all_spt()
770 list_move(&spt->post_shadow_list, &all_spt); in ppgtt_free_all_spt()
786 struct intel_vgpu_ppgtt_spt *spt = page_track->priv_data; in ppgtt_write_protection_handler()
791 return -EINVAL; in ppgtt_write_protection_handler()
806 if (track && track->handler == ppgtt_write_protection_handler) in intel_vgpu_find_spt_by_gfn()
807 return track->priv_data; in intel_vgpu_find_spt_by_gfn()
816 return radix_tree_lookup(&vgpu->gtt.spt_tree, mfn); in intel_vgpu_find_spt_by_mfn()
825 struct device *kdev = vgpu->gvt->gt->i915->drm.dev; in ppgtt_alloc_spt()
833 if (reclaim_one_ppgtt_mm(vgpu->gvt)) in ppgtt_alloc_spt()
837 return ERR_PTR(-ENOMEM); in ppgtt_alloc_spt()
840 spt->vgpu = vgpu; in ppgtt_alloc_spt()
841 atomic_set(&spt->refcount, 1); in ppgtt_alloc_spt()
842 INIT_LIST_HEAD(&spt->post_shadow_list); in ppgtt_alloc_spt()
847 spt->shadow_page.type = type; in ppgtt_alloc_spt()
848 daddr = dma_map_page(kdev, spt->shadow_page.page, in ppgtt_alloc_spt()
852 ret = -EINVAL; in ppgtt_alloc_spt()
855 spt->shadow_page.vaddr = page_address(spt->shadow_page.page); in ppgtt_alloc_spt()
856 spt->shadow_page.mfn = daddr >> I915_GTT_PAGE_SHIFT; in ppgtt_alloc_spt()
858 ret = radix_tree_insert(&vgpu->gtt.spt_tree, spt->shadow_page.mfn, spt); in ppgtt_alloc_spt()
893 spt->guest_page.type = type; in ppgtt_alloc_spt_gfn()
894 spt->guest_page.gfn = gfn; in ppgtt_alloc_spt_gfn()
895 spt->guest_page.pde_ips = guest_pde_ips; in ppgtt_alloc_spt_gfn()
897 trace_spt_alloc(vgpu->id, spt, type, spt->shadow_page.mfn, gfn); in ppgtt_alloc_spt_gfn()
903 ((spt)->vgpu->gvt->device_info.gtt_entry_size_shift)
910 i += spt->guest_page.pde_ips ? GTT_64K_PTE_STRIDE : 1) \
912 spt->vgpu->gvt->gtt.pte_ops->test_present(e))
916 i += spt->shadow_page.pde_ips ? GTT_64K_PTE_STRIDE : 1) \
918 spt->vgpu->gvt->gtt.pte_ops->test_present(e))
922 i += (spt->shadow_page.pde_ips ? GTT_64K_PTE_STRIDE : 1)) \
927 int v = atomic_read(&spt->refcount); in ppgtt_get_spt()
929 trace_spt_refcount(spt->vgpu->id, "inc", spt, v, (v + 1)); in ppgtt_get_spt()
930 atomic_inc(&spt->refcount); in ppgtt_get_spt()
935 int v = atomic_read(&spt->refcount); in ppgtt_put_spt()
937 trace_spt_refcount(spt->vgpu->id, "dec", spt, v, (v - 1)); in ppgtt_put_spt()
938 return atomic_dec_return(&spt->refcount); in ppgtt_put_spt()
946 struct drm_i915_private *i915 = vgpu->gvt->gt->i915; in ppgtt_invalidate_spt_by_shadow_entry()
947 const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; in ppgtt_invalidate_spt_by_shadow_entry()
951 GEM_BUG_ON(!gtt_type_is_pt(get_next_pt_type(e->type))); in ppgtt_invalidate_spt_by_shadow_entry()
953 if (e->type != GTT_TYPE_PPGTT_ROOT_L3_ENTRY in ppgtt_invalidate_spt_by_shadow_entry()
954 && e->type != GTT_TYPE_PPGTT_ROOT_L4_ENTRY) { in ppgtt_invalidate_spt_by_shadow_entry()
955 cur_pt_type = get_next_pt_type(e->type); in ppgtt_invalidate_spt_by_shadow_entry()
959 drm_WARN(&i915->drm, 1, in ppgtt_invalidate_spt_by_shadow_entry()
962 return -EINVAL; in ppgtt_invalidate_spt_by_shadow_entry()
967 if (ops->get_pfn(e) == in ppgtt_invalidate_spt_by_shadow_entry()
968 vgpu->gtt.scratch_pt[cur_pt_type].page_mfn) in ppgtt_invalidate_spt_by_shadow_entry()
971 s = intel_vgpu_find_spt_by_mfn(vgpu, ops->get_pfn(e)); in ppgtt_invalidate_spt_by_shadow_entry()
974 ops->get_pfn(e)); in ppgtt_invalidate_spt_by_shadow_entry()
975 return -ENXIO; in ppgtt_invalidate_spt_by_shadow_entry()
983 struct intel_vgpu *vgpu = spt->vgpu; in ppgtt_invalidate_pte()
984 const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; in ppgtt_invalidate_pte()
988 pfn = ops->get_pfn(entry); in ppgtt_invalidate_pte()
989 type = spt->shadow_page.type; in ppgtt_invalidate_pte()
992 if (!pfn || pfn == vgpu->gtt.scratch_pt[type].page_mfn) in ppgtt_invalidate_pte()
1000 struct intel_vgpu *vgpu = spt->vgpu; in ppgtt_invalidate_spt()
1005 trace_spt_change(spt->vgpu->id, "die", spt, in ppgtt_invalidate_spt()
1006 spt->guest_page.gfn, spt->shadow_page.type); in ppgtt_invalidate_spt()
1032 spt->vgpu, &e); in ppgtt_invalidate_spt()
1041 trace_spt_change(spt->vgpu->id, "release", spt, in ppgtt_invalidate_spt()
1042 spt->guest_page.gfn, spt->shadow_page.type); in ppgtt_invalidate_spt()
1053 struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915; in vgpu_ips_enabled()
1072 const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; in ppgtt_populate_spt_by_guest_entry()
1077 GEM_BUG_ON(!gtt_type_is_pt(get_next_pt_type(we->type))); in ppgtt_populate_spt_by_guest_entry()
1079 if (we->type == GTT_TYPE_PPGTT_PDE_ENTRY) in ppgtt_populate_spt_by_guest_entry()
1080 ips = vgpu_ips_enabled(vgpu) && ops->test_ips(we); in ppgtt_populate_spt_by_guest_entry()
1082 spt = intel_vgpu_find_spt_by_gfn(vgpu, ops->get_pfn(we)); in ppgtt_populate_spt_by_guest_entry()
1086 if (ips != spt->guest_page.pde_ips) { in ppgtt_populate_spt_by_guest_entry()
1087 spt->guest_page.pde_ips = ips; in ppgtt_populate_spt_by_guest_entry()
1090 clear_page(spt->shadow_page.vaddr); in ppgtt_populate_spt_by_guest_entry()
1098 int type = get_next_pt_type(we->type); in ppgtt_populate_spt_by_guest_entry()
1101 ret = -EINVAL; in ppgtt_populate_spt_by_guest_entry()
1105 spt = ppgtt_alloc_spt_gfn(vgpu, type, ops->get_pfn(we), ips); in ppgtt_populate_spt_by_guest_entry()
1111 ret = intel_vgpu_enable_page_track(vgpu, spt->guest_page.gfn); in ppgtt_populate_spt_by_guest_entry()
1119 trace_spt_change(vgpu->id, "new", spt, spt->guest_page.gfn, in ppgtt_populate_spt_by_guest_entry()
1120 spt->shadow_page.type); in ppgtt_populate_spt_by_guest_entry()
1129 spt, we->val64, we->type); in ppgtt_populate_spt_by_guest_entry()
1133 static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry *se, in ppgtt_generate_shadow_entry() argument
1136 const struct intel_gvt_gtt_pte_ops *ops = s->vgpu->gvt->gtt.pte_ops; in ppgtt_generate_shadow_entry()
1138 se->type = ge->type; in ppgtt_generate_shadow_entry()
1139 se->val64 = ge->val64; in ppgtt_generate_shadow_entry()
1142 if (se->type == GTT_TYPE_PPGTT_PDE_ENTRY) in ppgtt_generate_shadow_entry()
1143 ops->clear_ips(se); in ppgtt_generate_shadow_entry()
1145 ops->set_pfn(se, s->shadow_page.mfn); in ppgtt_generate_shadow_entry()
1150 struct intel_gvt_gtt_entry *se) in split_2MB_gtt_entry() argument
1152 const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; in split_2MB_gtt_entry()
1162 start_gfn = ops->get_pfn(se); in split_2MB_gtt_entry()
1173 sub_se.val64 = se->val64; in split_2MB_gtt_entry()
1177 sub_se.val64 |= (se->val64 & _PAGE_PAT_LARGE) >> 5; in split_2MB_gtt_entry()
1179 ops->set_pfn(&sub_se, dma_addr >> PAGE_SHIFT); in split_2MB_gtt_entry()
1184 se->val64 &= ~_PAGE_DIRTY; in split_2MB_gtt_entry()
1186 ops->clear_pse(se); in split_2MB_gtt_entry()
1187 ops->clear_ips(se); in split_2MB_gtt_entry()
1188 ops->set_pfn(se, sub_spt->shadow_page.mfn); in split_2MB_gtt_entry()
1189 ppgtt_set_shadow_entry(spt, se, index); in split_2MB_gtt_entry()
1198 trace_spt_change(sub_spt->vgpu->id, "release", sub_spt, in split_2MB_gtt_entry()
1199 sub_spt->guest_page.gfn, sub_spt->shadow_page.type); in split_2MB_gtt_entry()
1206 struct intel_gvt_gtt_entry *se) in split_64KB_gtt_entry() argument
1208 const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; in split_64KB_gtt_entry()
1209 struct intel_gvt_gtt_entry entry = *se; in split_64KB_gtt_entry()
1218 start_gfn = ops->get_pfn(se); in split_64KB_gtt_entry()
1221 ops->set_64k_splited(&entry); in split_64KB_gtt_entry()
1229 ops->set_pfn(&entry, dma_addr >> PAGE_SHIFT); in split_64KB_gtt_entry()
1239 const struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops; in ppgtt_populate_shadow_entry()
1240 struct intel_gvt_gtt_entry se = *ge; in ppgtt_populate_shadow_entry() local
1245 if (!pte_ops->test_present(ge)) in ppgtt_populate_shadow_entry()
1248 gfn = pte_ops->get_pfn(ge); in ppgtt_populate_shadow_entry()
1250 switch (ge->type) { in ppgtt_populate_shadow_entry()
1255 return -ENXIO; in ppgtt_populate_shadow_entry()
1264 return split_64KB_gtt_entry(vgpu, spt, index, &se); in ppgtt_populate_shadow_entry()
1267 if (!HAS_PAGE_SIZES(vgpu->gvt->gt->i915, I915_GTT_PAGE_SIZE_2M) || in ppgtt_populate_shadow_entry()
1270 return split_2MB_gtt_entry(vgpu, spt, index, &se); in ppgtt_populate_shadow_entry()
1274 return -EINVAL; in ppgtt_populate_shadow_entry()
1277 return -EINVAL; in ppgtt_populate_shadow_entry()
1281 pte_ops->set_pfn(&se, dma_addr >> PAGE_SHIFT); in ppgtt_populate_shadow_entry()
1282 ppgtt_set_shadow_entry(spt, &se, index); in ppgtt_populate_shadow_entry()
1288 struct intel_vgpu *vgpu = spt->vgpu; in ppgtt_populate_spt()
1290 struct intel_gvt_gtt_entry se, ge; in ppgtt_populate_spt() local
1294 trace_spt_change(spt->vgpu->id, "born", spt, in ppgtt_populate_spt()
1295 spt->guest_page.gfn, spt->shadow_page.type); in ppgtt_populate_spt()
1304 ppgtt_get_shadow_entry(spt, &se, i); in ppgtt_populate_spt()
1305 ppgtt_generate_shadow_entry(&se, s, &ge); in ppgtt_populate_spt()
1306 ppgtt_set_shadow_entry(spt, &se, i); in ppgtt_populate_spt()
1321 struct intel_gvt_gtt_entry *se, unsigned long index) in ppgtt_handle_guest_entry_removal() argument
1323 struct intel_vgpu *vgpu = spt->vgpu; in ppgtt_handle_guest_entry_removal()
1324 const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; in ppgtt_handle_guest_entry_removal()
1327 trace_spt_guest_change(spt->vgpu->id, "remove", spt, in ppgtt_handle_guest_entry_removal()
1328 spt->shadow_page.type, se->val64, index); in ppgtt_handle_guest_entry_removal()
1331 se->type, index, se->val64); in ppgtt_handle_guest_entry_removal()
1333 if (!ops->test_present(se)) in ppgtt_handle_guest_entry_removal()
1336 if (ops->get_pfn(se) == in ppgtt_handle_guest_entry_removal()
1337 vgpu->gtt.scratch_pt[spt->shadow_page.type].page_mfn) in ppgtt_handle_guest_entry_removal()
1340 if (gtt_type_is_pt(get_next_pt_type(se->type))) { in ppgtt_handle_guest_entry_removal()
1342 intel_vgpu_find_spt_by_mfn(vgpu, ops->get_pfn(se)); in ppgtt_handle_guest_entry_removal()
1345 ret = -ENXIO; in ppgtt_handle_guest_entry_removal()
1353 WARN(se->type == GTT_TYPE_PPGTT_PTE_64K_ENTRY, in ppgtt_handle_guest_entry_removal()
1355 ppgtt_invalidate_pte(spt, se); in ppgtt_handle_guest_entry_removal()
1361 spt, se->val64, se->type); in ppgtt_handle_guest_entry_removal()
1368 struct intel_vgpu *vgpu = spt->vgpu; in ppgtt_handle_guest_entry_add()
1373 trace_spt_guest_change(spt->vgpu->id, "add", spt, spt->shadow_page.type, in ppgtt_handle_guest_entry_add()
1374 we->val64, index); in ppgtt_handle_guest_entry_add()
1377 we->type, index, we->val64); in ppgtt_handle_guest_entry_add()
1379 if (gtt_type_is_pt(get_next_pt_type(we->type))) { in ppgtt_handle_guest_entry_add()
1396 spt, we->val64, we->type); in ppgtt_handle_guest_entry_add()
1403 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; in sync_oos_page()
1404 struct intel_gvt *gvt = vgpu->gvt; in sync_oos_page()
1405 const struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops; in sync_oos_page()
1406 struct intel_vgpu_ppgtt_spt *spt = oos_page->spt; in sync_oos_page()
1411 trace_oos_change(vgpu->id, "sync", oos_page->id, in sync_oos_page()
1412 spt, spt->guest_page.type); in sync_oos_page()
1414 old.type = new.type = get_entry_type(spt->guest_page.type); in sync_oos_page()
1418 info->gtt_entry_size_shift); index++) { in sync_oos_page()
1419 ops->get_entry(oos_page->mem, &old, index, false, 0, vgpu); in sync_oos_page()
1420 ops->get_entry(NULL, &new, index, true, in sync_oos_page()
1421 spt->guest_page.gfn << PAGE_SHIFT, vgpu); in sync_oos_page()
1424 && !test_and_clear_bit(index, spt->post_shadow_bitmap)) in sync_oos_page()
1427 trace_oos_sync(vgpu->id, oos_page->id, in sync_oos_page()
1428 spt, spt->guest_page.type, in sync_oos_page()
1435 ops->set_entry(oos_page->mem, &new, index, false, 0, vgpu); in sync_oos_page()
1438 spt->guest_page.write_cnt = 0; in sync_oos_page()
1439 list_del_init(&spt->post_shadow_list); in sync_oos_page()
1446 struct intel_gvt *gvt = vgpu->gvt; in detach_oos_page()
1447 struct intel_vgpu_ppgtt_spt *spt = oos_page->spt; in detach_oos_page()
1449 trace_oos_change(vgpu->id, "detach", oos_page->id, in detach_oos_page()
1450 spt, spt->guest_page.type); in detach_oos_page()
1452 spt->guest_page.write_cnt = 0; in detach_oos_page()
1453 spt->guest_page.oos_page = NULL; in detach_oos_page()
1454 oos_page->spt = NULL; in detach_oos_page()
1456 list_del_init(&oos_page->vm_list); in detach_oos_page()
1457 list_move_tail(&oos_page->list, &gvt->gtt.oos_page_free_list_head); in detach_oos_page()
1465 struct intel_gvt *gvt = spt->vgpu->gvt; in attach_oos_page()
1468 ret = intel_gvt_read_gpa(spt->vgpu, in attach_oos_page()
1469 spt->guest_page.gfn << I915_GTT_PAGE_SHIFT, in attach_oos_page()
1470 oos_page->mem, I915_GTT_PAGE_SIZE); in attach_oos_page()
1474 oos_page->spt = spt; in attach_oos_page()
1475 spt->guest_page.oos_page = oos_page; in attach_oos_page()
1477 list_move_tail(&oos_page->list, &gvt->gtt.oos_page_use_list_head); in attach_oos_page()
1479 trace_oos_change(spt->vgpu->id, "attach", oos_page->id, in attach_oos_page()
1480 spt, spt->guest_page.type); in attach_oos_page()
1486 struct intel_vgpu_oos_page *oos_page = spt->guest_page.oos_page; in ppgtt_set_guest_page_sync()
1489 ret = intel_vgpu_enable_page_track(spt->vgpu, spt->guest_page.gfn); in ppgtt_set_guest_page_sync()
1493 trace_oos_change(spt->vgpu->id, "set page sync", oos_page->id, in ppgtt_set_guest_page_sync()
1494 spt, spt->guest_page.type); in ppgtt_set_guest_page_sync()
1496 list_del_init(&oos_page->vm_list); in ppgtt_set_guest_page_sync()
1497 return sync_oos_page(spt->vgpu, oos_page); in ppgtt_set_guest_page_sync()
1502 struct intel_gvt *gvt = spt->vgpu->gvt; in ppgtt_allocate_oos_page()
1503 struct intel_gvt_gtt *gtt = &gvt->gtt; in ppgtt_allocate_oos_page()
1504 struct intel_vgpu_oos_page *oos_page = spt->guest_page.oos_page; in ppgtt_allocate_oos_page()
1509 if (list_empty(>t->oos_page_free_list_head)) { in ppgtt_allocate_oos_page()
1510 oos_page = container_of(gtt->oos_page_use_list_head.next, in ppgtt_allocate_oos_page()
1512 ret = ppgtt_set_guest_page_sync(oos_page->spt); in ppgtt_allocate_oos_page()
1515 ret = detach_oos_page(spt->vgpu, oos_page); in ppgtt_allocate_oos_page()
1519 oos_page = container_of(gtt->oos_page_free_list_head.next, in ppgtt_allocate_oos_page()
1526 struct intel_vgpu_oos_page *oos_page = spt->guest_page.oos_page; in ppgtt_set_guest_page_oos()
1529 return -EINVAL; in ppgtt_set_guest_page_oos()
1531 trace_oos_change(spt->vgpu->id, "set page out of sync", oos_page->id, in ppgtt_set_guest_page_oos()
1532 spt, spt->guest_page.type); in ppgtt_set_guest_page_oos()
1534 list_add_tail(&oos_page->vm_list, &spt->vgpu->gtt.oos_page_list_head); in ppgtt_set_guest_page_oos()
1535 return intel_vgpu_disable_page_track(spt->vgpu, spt->guest_page.gfn); in ppgtt_set_guest_page_oos()
1539 * intel_vgpu_sync_oos_pages - sync all the out-of-synced shadow for vGPU
1543 * to sync all the out-of-synced shadow for vGPU
1550 struct list_head *pos, *n; in intel_vgpu_sync_oos_pages() local
1557 list_for_each_safe(pos, n, &vgpu->gtt.oos_page_list_head) { in intel_vgpu_sync_oos_pages()
1558 oos_page = container_of(pos, in intel_vgpu_sync_oos_pages()
1560 ret = ppgtt_set_guest_page_sync(oos_page->spt); in intel_vgpu_sync_oos_pages()
1574 struct intel_vgpu *vgpu = spt->vgpu; in ppgtt_handle_guest_write_page_table()
1575 int type = spt->shadow_page.type; in ppgtt_handle_guest_write_page_table()
1576 const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; in ppgtt_handle_guest_write_page_table()
1581 new_present = ops->test_present(we); in ppgtt_handle_guest_write_page_table()
1602 if (ops->test_64k_splited(&old_se) && in ppgtt_handle_guest_write_page_table()
1606 ops->clear_64k_splited(&old_se); in ppgtt_handle_guest_write_page_table()
1607 ops->set_pfn(&old_se, in ppgtt_handle_guest_write_page_table()
1608 vgpu->gtt.scratch_pt[type].page_mfn); in ppgtt_handle_guest_write_page_table()
1613 ops->clear_pse(&old_se); in ppgtt_handle_guest_write_page_table()
1614 ops->set_pfn(&old_se, in ppgtt_handle_guest_write_page_table()
1615 vgpu->gtt.scratch_pt[type].page_mfn); in ppgtt_handle_guest_write_page_table()
1618 ops->set_pfn(&old_se, in ppgtt_handle_guest_write_page_table()
1619 vgpu->gtt.scratch_pt[type].page_mfn); in ppgtt_handle_guest_write_page_table()
1627 spt, we->val64, we->type); in ppgtt_handle_guest_write_page_table()
1636 && gtt_type_is_pte_pt(spt->guest_page.type) in can_do_out_of_sync()
1637 && spt->guest_page.write_cnt >= 2; in can_do_out_of_sync()
1643 set_bit(index, spt->post_shadow_bitmap); in ppgtt_set_post_shadow()
1644 if (!list_empty(&spt->post_shadow_list)) in ppgtt_set_post_shadow()
1647 list_add_tail(&spt->post_shadow_list, in ppgtt_set_post_shadow()
1648 &spt->vgpu->gtt.post_shadow_list_head); in ppgtt_set_post_shadow()
1652 * intel_vgpu_flush_post_shadow - flush the post shadow transactions
1663 struct list_head *pos, *n; in intel_vgpu_flush_post_shadow() local
1669 list_for_each_safe(pos, n, &vgpu->gtt.post_shadow_list_head) { in intel_vgpu_flush_post_shadow()
1670 spt = container_of(pos, struct intel_vgpu_ppgtt_spt, in intel_vgpu_flush_post_shadow()
1673 for_each_set_bit(index, spt->post_shadow_bitmap, in intel_vgpu_flush_post_shadow()
1681 clear_bit(index, spt->post_shadow_bitmap); in intel_vgpu_flush_post_shadow()
1683 list_del_init(&spt->post_shadow_list); in intel_vgpu_flush_post_shadow()
1692 struct intel_vgpu *vgpu = spt->vgpu; in ppgtt_handle_guest_write_page_table_bytes()
1693 const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; in ppgtt_handle_guest_write_page_table_bytes()
1694 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; in ppgtt_handle_guest_write_page_table_bytes()
1695 struct intel_gvt_gtt_entry we, se; in ppgtt_handle_guest_write_page_table_bytes() local
1699 index = (pa & (PAGE_SIZE - 1)) >> info->gtt_entry_size_shift; in ppgtt_handle_guest_write_page_table_bytes()
1715 if (bytes == info->gtt_entry_size) { in ppgtt_handle_guest_write_page_table_bytes()
1720 if (!test_bit(index, spt->post_shadow_bitmap)) { in ppgtt_handle_guest_write_page_table_bytes()
1721 int type = spt->shadow_page.type; in ppgtt_handle_guest_write_page_table_bytes()
1723 ppgtt_get_shadow_entry(spt, &se, index); in ppgtt_handle_guest_write_page_table_bytes()
1724 ret = ppgtt_handle_guest_entry_removal(spt, &se, index); in ppgtt_handle_guest_write_page_table_bytes()
1727 ops->set_pfn(&se, vgpu->gtt.scratch_pt[type].page_mfn); in ppgtt_handle_guest_write_page_table_bytes()
1728 ppgtt_set_shadow_entry(spt, &se, index); in ppgtt_handle_guest_write_page_table_bytes()
1736 spt->guest_page.write_cnt++; in ppgtt_handle_guest_write_page_table_bytes()
1738 if (spt->guest_page.oos_page) in ppgtt_handle_guest_write_page_table_bytes()
1739 ops->set_entry(spt->guest_page.oos_page->mem, &we, index, in ppgtt_handle_guest_write_page_table_bytes()
1743 if (!spt->guest_page.oos_page) in ppgtt_handle_guest_write_page_table_bytes()
1755 struct intel_vgpu *vgpu = mm->vgpu; in invalidate_ppgtt_mm()
1756 struct intel_gvt *gvt = vgpu->gvt; in invalidate_ppgtt_mm()
1757 struct intel_gvt_gtt *gtt = &gvt->gtt; in invalidate_ppgtt_mm()
1758 const struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops; in invalidate_ppgtt_mm()
1759 struct intel_gvt_gtt_entry se; in invalidate_ppgtt_mm() local
1762 if (!mm->ppgtt_mm.shadowed) in invalidate_ppgtt_mm()
1765 for (index = 0; index < ARRAY_SIZE(mm->ppgtt_mm.shadow_pdps); index++) { in invalidate_ppgtt_mm()
1766 ppgtt_get_shadow_root_entry(mm, &se, index); in invalidate_ppgtt_mm()
1768 if (!ops->test_present(&se)) in invalidate_ppgtt_mm()
1771 ppgtt_invalidate_spt_by_shadow_entry(vgpu, &se); in invalidate_ppgtt_mm()
1772 se.val64 = 0; in invalidate_ppgtt_mm()
1773 ppgtt_set_shadow_root_entry(mm, &se, index); in invalidate_ppgtt_mm()
1775 trace_spt_guest_change(vgpu->id, "destroy root pointer", in invalidate_ppgtt_mm()
1776 NULL, se.type, se.val64, index); in invalidate_ppgtt_mm()
1779 mm->ppgtt_mm.shadowed = false; in invalidate_ppgtt_mm()
1785 struct intel_vgpu *vgpu = mm->vgpu; in shadow_ppgtt_mm()
1786 struct intel_gvt *gvt = vgpu->gvt; in shadow_ppgtt_mm()
1787 struct intel_gvt_gtt *gtt = &gvt->gtt; in shadow_ppgtt_mm()
1788 const struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops; in shadow_ppgtt_mm()
1790 struct intel_gvt_gtt_entry ge, se; in shadow_ppgtt_mm() local
1793 if (mm->ppgtt_mm.shadowed) in shadow_ppgtt_mm()
1796 if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status)) in shadow_ppgtt_mm()
1797 return -EINVAL; in shadow_ppgtt_mm()
1799 mm->ppgtt_mm.shadowed = true; in shadow_ppgtt_mm()
1801 for (index = 0; index < ARRAY_SIZE(mm->ppgtt_mm.guest_pdps); index++) { in shadow_ppgtt_mm()
1804 if (!ops->test_present(&ge)) in shadow_ppgtt_mm()
1807 trace_spt_guest_change(vgpu->id, __func__, NULL, in shadow_ppgtt_mm()
1816 ppgtt_generate_shadow_entry(&se, spt, &ge); in shadow_ppgtt_mm()
1817 ppgtt_set_shadow_root_entry(mm, &se, index); in shadow_ppgtt_mm()
1819 trace_spt_guest_change(vgpu->id, "populate root pointer", in shadow_ppgtt_mm()
1820 NULL, se.type, se.val64, index); in shadow_ppgtt_mm()
1837 mm->vgpu = vgpu; in vgpu_alloc_mm()
1838 kref_init(&mm->ref); in vgpu_alloc_mm()
1839 atomic_set(&mm->pincount, 0); in vgpu_alloc_mm()
1850 * intel_vgpu_create_ppgtt_mm - create a ppgtt mm object for a vGPU
1863 struct intel_gvt *gvt = vgpu->gvt; in intel_vgpu_create_ppgtt_mm()
1869 return ERR_PTR(-ENOMEM); in intel_vgpu_create_ppgtt_mm()
1871 mm->type = INTEL_GVT_MM_PPGTT; in intel_vgpu_create_ppgtt_mm()
1875 mm->ppgtt_mm.root_entry_type = root_entry_type; in intel_vgpu_create_ppgtt_mm()
1877 INIT_LIST_HEAD(&mm->ppgtt_mm.list); in intel_vgpu_create_ppgtt_mm()
1878 INIT_LIST_HEAD(&mm->ppgtt_mm.lru_list); in intel_vgpu_create_ppgtt_mm()
1879 INIT_LIST_HEAD(&mm->ppgtt_mm.link); in intel_vgpu_create_ppgtt_mm()
1882 mm->ppgtt_mm.guest_pdps[0] = pdps[0]; in intel_vgpu_create_ppgtt_mm()
1884 memcpy(mm->ppgtt_mm.guest_pdps, pdps, in intel_vgpu_create_ppgtt_mm()
1885 sizeof(mm->ppgtt_mm.guest_pdps)); in intel_vgpu_create_ppgtt_mm()
1894 list_add_tail(&mm->ppgtt_mm.list, &vgpu->gtt.ppgtt_mm_list_head); in intel_vgpu_create_ppgtt_mm()
1896 mutex_lock(&gvt->gtt.ppgtt_mm_lock); in intel_vgpu_create_ppgtt_mm()
1897 list_add_tail(&mm->ppgtt_mm.lru_list, &gvt->gtt.ppgtt_mm_lru_list_head); in intel_vgpu_create_ppgtt_mm()
1898 mutex_unlock(&gvt->gtt.ppgtt_mm_lock); in intel_vgpu_create_ppgtt_mm()
1910 return ERR_PTR(-ENOMEM); in intel_vgpu_create_ggtt_mm()
1912 mm->type = INTEL_GVT_MM_GGTT; in intel_vgpu_create_ggtt_mm()
1914 nr_entries = gvt_ggtt_gm_sz(vgpu->gvt) >> I915_GTT_PAGE_SHIFT; in intel_vgpu_create_ggtt_mm()
1915 mm->ggtt_mm.virtual_ggtt = in intel_vgpu_create_ggtt_mm()
1917 vgpu->gvt->device_info.gtt_entry_size)); in intel_vgpu_create_ggtt_mm()
1918 if (!mm->ggtt_mm.virtual_ggtt) { in intel_vgpu_create_ggtt_mm()
1920 return ERR_PTR(-ENOMEM); in intel_vgpu_create_ggtt_mm()
1923 mm->ggtt_mm.host_ggtt_aperture = vzalloc((vgpu_aperture_sz(vgpu) >> PAGE_SHIFT) * sizeof(u64)); in intel_vgpu_create_ggtt_mm()
1924 if (!mm->ggtt_mm.host_ggtt_aperture) { in intel_vgpu_create_ggtt_mm()
1925 vfree(mm->ggtt_mm.virtual_ggtt); in intel_vgpu_create_ggtt_mm()
1927 return ERR_PTR(-ENOMEM); in intel_vgpu_create_ggtt_mm()
1930 mm->ggtt_mm.host_ggtt_hidden = vzalloc((vgpu_hidden_sz(vgpu) >> PAGE_SHIFT) * sizeof(u64)); in intel_vgpu_create_ggtt_mm()
1931 if (!mm->ggtt_mm.host_ggtt_hidden) { in intel_vgpu_create_ggtt_mm()
1932 vfree(mm->ggtt_mm.host_ggtt_aperture); in intel_vgpu_create_ggtt_mm()
1933 vfree(mm->ggtt_mm.virtual_ggtt); in intel_vgpu_create_ggtt_mm()
1935 return ERR_PTR(-ENOMEM); in intel_vgpu_create_ggtt_mm()
1942 * _intel_vgpu_mm_release - destroy a mm object
1952 if (GEM_WARN_ON(atomic_read(&mm->pincount))) in _intel_vgpu_mm_release()
1955 if (mm->type == INTEL_GVT_MM_PPGTT) { in _intel_vgpu_mm_release()
1956 list_del(&mm->ppgtt_mm.list); in _intel_vgpu_mm_release()
1958 mutex_lock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock); in _intel_vgpu_mm_release()
1959 list_del(&mm->ppgtt_mm.lru_list); in _intel_vgpu_mm_release()
1960 mutex_unlock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock); in _intel_vgpu_mm_release()
1964 vfree(mm->ggtt_mm.virtual_ggtt); in _intel_vgpu_mm_release()
1965 vfree(mm->ggtt_mm.host_ggtt_aperture); in _intel_vgpu_mm_release()
1966 vfree(mm->ggtt_mm.host_ggtt_hidden); in _intel_vgpu_mm_release()
1973 * intel_vgpu_unpin_mm - decrease the pin count of a vGPU mm object
1980 atomic_dec_if_positive(&mm->pincount); in intel_vgpu_unpin_mm()
1984 * intel_vgpu_pin_mm - increase the pin count of a vGPU mm object
1998 atomic_inc(&mm->pincount); in intel_vgpu_pin_mm()
2000 if (mm->type == INTEL_GVT_MM_PPGTT) { in intel_vgpu_pin_mm()
2005 mutex_lock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock); in intel_vgpu_pin_mm()
2006 list_move_tail(&mm->ppgtt_mm.lru_list, in intel_vgpu_pin_mm()
2007 &mm->vgpu->gvt->gtt.ppgtt_mm_lru_list_head); in intel_vgpu_pin_mm()
2008 mutex_unlock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock); in intel_vgpu_pin_mm()
2017 struct list_head *pos, *n; in reclaim_one_ppgtt_mm() local
2019 mutex_lock(&gvt->gtt.ppgtt_mm_lock); in reclaim_one_ppgtt_mm()
2021 list_for_each_safe(pos, n, &gvt->gtt.ppgtt_mm_lru_list_head) { in reclaim_one_ppgtt_mm()
2022 mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.lru_list); in reclaim_one_ppgtt_mm()
2024 if (atomic_read(&mm->pincount)) in reclaim_one_ppgtt_mm()
2027 list_del_init(&mm->ppgtt_mm.lru_list); in reclaim_one_ppgtt_mm()
2028 mutex_unlock(&gvt->gtt.ppgtt_mm_lock); in reclaim_one_ppgtt_mm()
2032 mutex_unlock(&gvt->gtt.ppgtt_mm_lock); in reclaim_one_ppgtt_mm()
2042 struct intel_vgpu *vgpu = mm->vgpu; in ppgtt_get_next_level_entry()
2043 const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; in ppgtt_get_next_level_entry()
2046 s = intel_vgpu_find_spt_by_mfn(vgpu, ops->get_pfn(e)); in ppgtt_get_next_level_entry()
2048 return -ENXIO; in ppgtt_get_next_level_entry()
2058 * intel_vgpu_gma_to_gpa - translate a gma to GPA
2070 struct intel_vgpu *vgpu = mm->vgpu; in intel_vgpu_gma_to_gpa()
2071 struct intel_gvt *gvt = vgpu->gvt; in intel_vgpu_gma_to_gpa()
2072 const struct intel_gvt_gtt_pte_ops *pte_ops = gvt->gtt.pte_ops; in intel_vgpu_gma_to_gpa()
2073 const struct intel_gvt_gtt_gma_ops *gma_ops = gvt->gtt.gma_ops; in intel_vgpu_gma_to_gpa()
2080 GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT && in intel_vgpu_gma_to_gpa()
2081 mm->type != INTEL_GVT_MM_PPGTT); in intel_vgpu_gma_to_gpa()
2083 if (mm->type == INTEL_GVT_MM_GGTT) { in intel_vgpu_gma_to_gpa()
2088 gma_ops->gma_to_ggtt_pte_index(gma)); in intel_vgpu_gma_to_gpa()
2090 gpa = (pte_ops->get_pfn(&e) << I915_GTT_PAGE_SHIFT) in intel_vgpu_gma_to_gpa()
2093 trace_gma_translate(vgpu->id, "ggtt", 0, 0, gma, gpa); in intel_vgpu_gma_to_gpa()
2095 switch (mm->ppgtt_mm.root_entry_type) { in intel_vgpu_gma_to_gpa()
2099 gma_index[0] = gma_ops->gma_to_pml4_index(gma); in intel_vgpu_gma_to_gpa()
2100 gma_index[1] = gma_ops->gma_to_l4_pdp_index(gma); in intel_vgpu_gma_to_gpa()
2101 gma_index[2] = gma_ops->gma_to_pde_index(gma); in intel_vgpu_gma_to_gpa()
2102 gma_index[3] = gma_ops->gma_to_pte_index(gma); in intel_vgpu_gma_to_gpa()
2107 gma_ops->gma_to_l3_pdp_index(gma)); in intel_vgpu_gma_to_gpa()
2109 gma_index[0] = gma_ops->gma_to_pde_index(gma); in intel_vgpu_gma_to_gpa()
2110 gma_index[1] = gma_ops->gma_to_pte_index(gma); in intel_vgpu_gma_to_gpa()
2120 (i == levels - 1)); in intel_vgpu_gma_to_gpa()
2124 if (!pte_ops->test_present(&e)) { in intel_vgpu_gma_to_gpa()
2130 gpa = (pte_ops->get_pfn(&e) << I915_GTT_PAGE_SHIFT) + in intel_vgpu_gma_to_gpa()
2132 trace_gma_translate(vgpu->id, "ppgtt", 0, in intel_vgpu_gma_to_gpa()
2133 mm->ppgtt_mm.root_entry_type, gma, gpa); in intel_vgpu_gma_to_gpa()
2138 gvt_vgpu_err("invalid mm type: %d gma %lx\n", mm->type, gma); in intel_vgpu_gma_to_gpa()
2145 struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm; in emulate_ggtt_mmio_read()
2146 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; in emulate_ggtt_mmio_read()
2147 unsigned long index = off >> info->gtt_entry_size_shift; in emulate_ggtt_mmio_read()
2152 return -EINVAL; in emulate_ggtt_mmio_read()
2163 memcpy(p_data, (void *)&e.val64 + (off & (info->gtt_entry_size - 1)), in emulate_ggtt_mmio_read()
2169 * intel_vgpu_emulate_ggtt_mmio_read - emulate GTT MMIO register read
2183 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; in intel_vgpu_emulate_ggtt_mmio_read()
2187 return -EINVAL; in intel_vgpu_emulate_ggtt_mmio_read()
2189 off -= info->gtt_start_offset; in intel_vgpu_emulate_ggtt_mmio_read()
2197 const struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops; in ggtt_invalidate_pte()
2200 pfn = pte_ops->get_pfn(entry); in ggtt_invalidate_pte()
2201 if (pfn != vgpu->gvt->gtt.scratch_mfn) in ggtt_invalidate_pte()
2208 struct intel_gvt *gvt = vgpu->gvt; in emulate_ggtt_mmio_write()
2209 const struct intel_gvt_device_info *info = &gvt->device_info; in emulate_ggtt_mmio_write()
2210 struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm; in emulate_ggtt_mmio_write()
2211 const struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops; in emulate_ggtt_mmio_write()
2212 unsigned long g_gtt_index = off >> info->gtt_entry_size_shift; in emulate_ggtt_mmio_write()
2218 struct intel_gvt_partial_pte *partial_pte, *pos, *n; in emulate_ggtt_mmio_write() local
2222 return -EINVAL; in emulate_ggtt_mmio_write()
2231 memcpy((void *)&e.val64 + (off & (info->gtt_entry_size - 1)), p_data, in emulate_ggtt_mmio_write()
2238 if (bytes < info->gtt_entry_size) { in emulate_ggtt_mmio_write()
2241 list_for_each_entry_safe(pos, n, in emulate_ggtt_mmio_write()
2242 &ggtt_mm->ggtt_mm.partial_pte_list, list) { in emulate_ggtt_mmio_write()
2243 if (g_gtt_index == pos->offset >> in emulate_ggtt_mmio_write()
2244 info->gtt_entry_size_shift) { in emulate_ggtt_mmio_write()
2245 if (off != pos->offset) { in emulate_ggtt_mmio_write()
2247 int last_off = pos->offset & in emulate_ggtt_mmio_write()
2248 (info->gtt_entry_size - 1); in emulate_ggtt_mmio_write()
2251 (void *)&pos->data + last_off, in emulate_ggtt_mmio_write()
2254 list_del(&pos->list); in emulate_ggtt_mmio_write()
2255 kfree(pos); in emulate_ggtt_mmio_write()
2261 pos->data = e.val64; in emulate_ggtt_mmio_write()
2271 return -ENOMEM; in emulate_ggtt_mmio_write()
2272 partial_pte->offset = off; in emulate_ggtt_mmio_write()
2273 partial_pte->data = e.val64; in emulate_ggtt_mmio_write()
2274 list_add_tail(&partial_pte->list, in emulate_ggtt_mmio_write()
2275 &ggtt_mm->ggtt_mm.partial_pte_list); in emulate_ggtt_mmio_write()
2280 if (!partial_update && (ops->test_present(&e))) { in emulate_ggtt_mmio_write()
2281 gfn = ops->get_pfn(&e); in emulate_ggtt_mmio_write()
2293 ops->set_pfn(&m, gvt->gtt.scratch_mfn); in emulate_ggtt_mmio_write()
2295 ops->set_pfn(&m, dma_addr >> PAGE_SHIFT); in emulate_ggtt_mmio_write()
2297 ops->set_pfn(&m, gvt->gtt.scratch_mfn); in emulate_ggtt_mmio_write()
2298 ops->clear_present(&m); in emulate_ggtt_mmio_write()
2307 ggtt_invalidate(gvt->gt); in emulate_ggtt_mmio_write()
2312 * intel_vgpu_emulate_ggtt_mmio_write - emulate GTT MMIO register write
2326 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; in intel_vgpu_emulate_ggtt_mmio_write()
2328 struct intel_vgpu_submission *s = &vgpu->submission; in intel_vgpu_emulate_ggtt_mmio_write()
2333 return -EINVAL; in intel_vgpu_emulate_ggtt_mmio_write()
2335 off -= info->gtt_start_offset; in intel_vgpu_emulate_ggtt_mmio_write()
2342 for_each_engine(engine, vgpu->gvt->gt, i) { in intel_vgpu_emulate_ggtt_mmio_write()
2343 if (!s->last_ctx[i].valid) in intel_vgpu_emulate_ggtt_mmio_write()
2346 if (s->last_ctx[i].lrca == (off >> info->gtt_entry_size_shift)) in intel_vgpu_emulate_ggtt_mmio_write()
2347 s->last_ctx[i].valid = false; in intel_vgpu_emulate_ggtt_mmio_write()
2355 struct drm_i915_private *i915 = vgpu->gvt->gt->i915; in alloc_scratch_pages()
2356 struct intel_vgpu_gtt *gtt = &vgpu->gtt; in alloc_scratch_pages()
2357 const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; in alloc_scratch_pages()
2359 vgpu->gvt->device_info.gtt_entry_size_shift; in alloc_scratch_pages()
2362 struct device *dev = vgpu->gvt->gt->i915->drm.dev; in alloc_scratch_pages()
2365 if (drm_WARN_ON(&i915->drm, in alloc_scratch_pages()
2367 return -EINVAL; in alloc_scratch_pages()
2372 return -ENOMEM; in alloc_scratch_pages()
2379 return -ENOMEM; in alloc_scratch_pages()
2381 gtt->scratch_pt[type].page_mfn = in alloc_scratch_pages()
2383 gtt->scratch_pt[type].page = virt_to_page(scratch_pt); in alloc_scratch_pages()
2385 vgpu->id, type, gtt->scratch_pt[type].page_mfn); in alloc_scratch_pages()
2396 struct intel_gvt_gtt_entry se; in alloc_scratch_pages() local
2398 memset(&se, 0, sizeof(struct intel_gvt_gtt_entry)); in alloc_scratch_pages()
2399 se.type = get_entry_type(type - 1); in alloc_scratch_pages()
2400 ops->set_pfn(&se, gtt->scratch_pt[type - 1].page_mfn); in alloc_scratch_pages()
2405 se.val64 |= GEN8_PAGE_PRESENT | GEN8_PAGE_RW; in alloc_scratch_pages()
2407 se.val64 |= PPAT_CACHED; in alloc_scratch_pages()
2410 ops->set_entry(scratch_pt, &se, i, false, 0, vgpu); in alloc_scratch_pages()
2419 struct device *dev = vgpu->gvt->gt->i915->drm.dev; in release_scratch_page_tree()
2423 if (vgpu->gtt.scratch_pt[i].page != NULL) { in release_scratch_page_tree()
2424 daddr = (dma_addr_t)(vgpu->gtt.scratch_pt[i].page_mfn << in release_scratch_page_tree()
2427 __free_page(vgpu->gtt.scratch_pt[i].page); in release_scratch_page_tree()
2428 vgpu->gtt.scratch_pt[i].page = NULL; in release_scratch_page_tree()
2429 vgpu->gtt.scratch_pt[i].page_mfn = 0; in release_scratch_page_tree()
2454 * intel_vgpu_init_gtt - initialize per-vGPU graphics memory virulization
2457 * This function is used to initialize per-vGPU graphics memory virtualization
2465 struct intel_vgpu_gtt *gtt = &vgpu->gtt; in intel_vgpu_init_gtt()
2467 INIT_RADIX_TREE(>t->spt_tree, GFP_KERNEL); in intel_vgpu_init_gtt()
2469 INIT_LIST_HEAD(>t->ppgtt_mm_list_head); in intel_vgpu_init_gtt()
2470 INIT_LIST_HEAD(>t->oos_page_list_head); in intel_vgpu_init_gtt()
2471 INIT_LIST_HEAD(>t->post_shadow_list_head); in intel_vgpu_init_gtt()
2473 gtt->ggtt_mm = intel_vgpu_create_ggtt_mm(vgpu); in intel_vgpu_init_gtt()
2474 if (IS_ERR(gtt->ggtt_mm)) { in intel_vgpu_init_gtt()
2476 return PTR_ERR(gtt->ggtt_mm); in intel_vgpu_init_gtt()
2481 INIT_LIST_HEAD(>t->ggtt_mm->ggtt_mm.partial_pte_list); in intel_vgpu_init_gtt()
2488 struct list_head *pos, *n; in intel_vgpu_destroy_all_ppgtt_mm() local
2491 list_for_each_safe(pos, n, &vgpu->gtt.ppgtt_mm_list_head) { in intel_vgpu_destroy_all_ppgtt_mm()
2492 mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list); in intel_vgpu_destroy_all_ppgtt_mm()
2496 if (GEM_WARN_ON(!list_empty(&vgpu->gtt.ppgtt_mm_list_head))) in intel_vgpu_destroy_all_ppgtt_mm()
2499 if (GEM_WARN_ON(!radix_tree_empty(&vgpu->gtt.spt_tree))) { in intel_vgpu_destroy_all_ppgtt_mm()
2507 struct intel_gvt_partial_pte *pos, *next; in intel_vgpu_destroy_ggtt_mm() local
2509 list_for_each_entry_safe(pos, next, in intel_vgpu_destroy_ggtt_mm()
2510 &vgpu->gtt.ggtt_mm->ggtt_mm.partial_pte_list, in intel_vgpu_destroy_ggtt_mm()
2513 pos->offset, pos->data); in intel_vgpu_destroy_ggtt_mm()
2514 kfree(pos); in intel_vgpu_destroy_ggtt_mm()
2516 intel_vgpu_destroy_mm(vgpu->gtt.ggtt_mm); in intel_vgpu_destroy_ggtt_mm()
2517 vgpu->gtt.ggtt_mm = NULL; in intel_vgpu_destroy_ggtt_mm()
2521 * intel_vgpu_clean_gtt - clean up per-vGPU graphics memory virulization
2524 * This function is used to clean up per-vGPU graphics memory virtualization
2539 struct intel_gvt_gtt *gtt = &gvt->gtt; in clean_spt_oos()
2540 struct list_head *pos, *n; in clean_spt_oos() local
2543 WARN(!list_empty(>t->oos_page_use_list_head), in clean_spt_oos()
2546 list_for_each_safe(pos, n, >t->oos_page_free_list_head) { in clean_spt_oos()
2547 oos_page = container_of(pos, struct intel_vgpu_oos_page, list); in clean_spt_oos()
2548 list_del(&oos_page->list); in clean_spt_oos()
2549 free_page((unsigned long)oos_page->mem); in clean_spt_oos()
2556 struct intel_gvt_gtt *gtt = &gvt->gtt; in setup_spt_oos()
2561 INIT_LIST_HEAD(>t->oos_page_free_list_head); in setup_spt_oos()
2562 INIT_LIST_HEAD(>t->oos_page_use_list_head); in setup_spt_oos()
2567 ret = -ENOMEM; in setup_spt_oos()
2570 oos_page->mem = (void *)__get_free_pages(GFP_KERNEL, 0); in setup_spt_oos()
2571 if (!oos_page->mem) { in setup_spt_oos()
2572 ret = -ENOMEM; in setup_spt_oos()
2577 INIT_LIST_HEAD(&oos_page->list); in setup_spt_oos()
2578 INIT_LIST_HEAD(&oos_page->vm_list); in setup_spt_oos()
2579 oos_page->id = i; in setup_spt_oos()
2580 list_add_tail(&oos_page->list, >t->oos_page_free_list_head); in setup_spt_oos()
2592 * intel_vgpu_find_ppgtt_mm - find a PPGTT mm object
2605 struct list_head *pos; in intel_vgpu_find_ppgtt_mm() local
2607 list_for_each(pos, &vgpu->gtt.ppgtt_mm_list_head) { in intel_vgpu_find_ppgtt_mm()
2608 mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list); in intel_vgpu_find_ppgtt_mm()
2610 switch (mm->ppgtt_mm.root_entry_type) { in intel_vgpu_find_ppgtt_mm()
2612 if (pdps[0] == mm->ppgtt_mm.guest_pdps[0]) in intel_vgpu_find_ppgtt_mm()
2616 if (!memcmp(pdps, mm->ppgtt_mm.guest_pdps, in intel_vgpu_find_ppgtt_mm()
2617 sizeof(mm->ppgtt_mm.guest_pdps))) in intel_vgpu_find_ppgtt_mm()
2628 * intel_vgpu_get_ppgtt_mm - get or create a PPGTT mm object.
2655 * intel_vgpu_put_ppgtt_mm - find and put a PPGTT mm object.
2671 return -EINVAL; in intel_vgpu_put_ppgtt_mm()
2678 * intel_gvt_init_gtt - initialize mm components of a GVT device
2691 struct device *dev = gvt->gt->i915->drm.dev; in intel_gvt_init_gtt()
2696 gvt->gtt.pte_ops = &gen8_gtt_pte_ops; in intel_gvt_init_gtt()
2697 gvt->gtt.gma_ops = &gen8_gtt_gma_ops; in intel_gvt_init_gtt()
2702 return -ENOMEM; in intel_gvt_init_gtt()
2710 return -ENOMEM; in intel_gvt_init_gtt()
2713 gvt->gtt.scratch_page = virt_to_page(page); in intel_gvt_init_gtt()
2714 gvt->gtt.scratch_mfn = (unsigned long)(daddr >> I915_GTT_PAGE_SHIFT); in intel_gvt_init_gtt()
2721 __free_page(gvt->gtt.scratch_page); in intel_gvt_init_gtt()
2725 INIT_LIST_HEAD(&gvt->gtt.ppgtt_mm_lru_list_head); in intel_gvt_init_gtt()
2726 mutex_init(&gvt->gtt.ppgtt_mm_lock); in intel_gvt_init_gtt()
2731 * intel_gvt_clean_gtt - clean up mm components of a GVT device
2740 struct device *dev = gvt->gt->i915->drm.dev; in intel_gvt_clean_gtt()
2741 dma_addr_t daddr = (dma_addr_t)(gvt->gtt.scratch_mfn << in intel_gvt_clean_gtt()
2746 __free_page(gvt->gtt.scratch_page); in intel_gvt_clean_gtt()
2753 * intel_vgpu_invalidate_ppgtt - invalidate PPGTT instances
2761 struct list_head *pos, *n; in intel_vgpu_invalidate_ppgtt() local
2764 list_for_each_safe(pos, n, &vgpu->gtt.ppgtt_mm_list_head) { in intel_vgpu_invalidate_ppgtt()
2765 mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list); in intel_vgpu_invalidate_ppgtt()
2766 if (mm->type == INTEL_GVT_MM_PPGTT) { in intel_vgpu_invalidate_ppgtt()
2767 mutex_lock(&vgpu->gvt->gtt.ppgtt_mm_lock); in intel_vgpu_invalidate_ppgtt()
2768 list_del_init(&mm->ppgtt_mm.lru_list); in intel_vgpu_invalidate_ppgtt()
2769 mutex_unlock(&vgpu->gvt->gtt.ppgtt_mm_lock); in intel_vgpu_invalidate_ppgtt()
2770 if (mm->ppgtt_mm.shadowed) in intel_vgpu_invalidate_ppgtt()
2777 * intel_vgpu_reset_ggtt - reset the GGTT entry
2787 struct intel_gvt *gvt = vgpu->gvt; in intel_vgpu_reset_ggtt()
2788 const struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops; in intel_vgpu_reset_ggtt()
2794 pte_ops->set_pfn(&entry, gvt->gtt.scratch_mfn); in intel_vgpu_reset_ggtt()
2795 pte_ops->set_present(&entry); in intel_vgpu_reset_ggtt()
2799 while (num_entries--) { in intel_vgpu_reset_ggtt()
2801 ggtt_get_host_entry(vgpu->gtt.ggtt_mm, &old_entry, index); in intel_vgpu_reset_ggtt()
2804 ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++); in intel_vgpu_reset_ggtt()
2809 while (num_entries--) { in intel_vgpu_reset_ggtt()
2811 ggtt_get_host_entry(vgpu->gtt.ggtt_mm, &old_entry, index); in intel_vgpu_reset_ggtt()
2814 ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++); in intel_vgpu_reset_ggtt()
2817 ggtt_invalidate(gvt->gt); in intel_vgpu_reset_ggtt()
2821 * intel_gvt_restore_ggtt - restore all vGPU's ggtt entries
2837 idr_for_each_entry(&(gvt)->vgpu_idr, vgpu, id) { in intel_gvt_restore_ggtt()
2838 mm = vgpu->gtt.ggtt_mm; in intel_gvt_restore_ggtt()
2843 pte = mm->ggtt_mm.host_ggtt_aperture[idx]; in intel_gvt_restore_ggtt()
2845 write_pte64(vgpu->gvt->gt->ggtt, offset + idx, pte); in intel_gvt_restore_ggtt()
2851 pte = mm->ggtt_mm.host_ggtt_hidden[idx]; in intel_gvt_restore_ggtt()
2853 write_pte64(vgpu->gvt->gt->ggtt, offset + idx, pte); in intel_gvt_restore_ggtt()