Lines Matching full:vm
52 struct drm_i915_private *i915 = ggtt->vm.i915; in ggtt_init_hw()
54 i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT); in ggtt_init_hw()
56 ggtt->vm.is_ggtt = true; in ggtt_init_hw()
59 ggtt->vm.has_read_only = IS_VALLEYVIEW(i915); in ggtt_init_hw()
62 ggtt->vm.mm.color_adjust = i915_ggtt_color_adjust; in ggtt_init_hw()
68 ggtt->vm.cleanup(&ggtt->vm); in ggtt_init_hw()
103 * i915_ggtt_suspend_vm - Suspend the memory mappings for a GGTT or DPT VM
104 * @vm: The VM to suspend the mappings for
109 void i915_ggtt_suspend_vm(struct i915_address_space *vm) in i915_ggtt_suspend_vm() argument
114 drm_WARN_ON(&vm->i915->drm, !vm->is_ggtt && !vm->is_dpt); in i915_ggtt_suspend_vm()
117 i915_gem_drain_freed_objects(vm->i915); in i915_ggtt_suspend_vm()
119 mutex_lock(&vm->mutex); in i915_ggtt_suspend_vm()
125 save_skip_rewrite = vm->skip_pte_rewrite; in i915_ggtt_suspend_vm()
126 vm->skip_pte_rewrite = true; in i915_ggtt_suspend_vm()
128 list_for_each_entry_safe(vma, vn, &vm->bound_list, vm_link) { in i915_ggtt_suspend_vm()
144 mutex_unlock(&vm->mutex); in i915_ggtt_suspend_vm()
151 vm->skip_pte_rewrite = save_skip_rewrite; in i915_ggtt_suspend_vm()
165 vm->clear_range(vm, 0, vm->total); in i915_ggtt_suspend_vm()
167 vm->skip_pte_rewrite = save_skip_rewrite; in i915_ggtt_suspend_vm()
169 mutex_unlock(&vm->mutex); in i915_ggtt_suspend_vm()
176 i915_ggtt_suspend_vm(&ggtt->vm); in i915_ggtt_suspend()
185 struct intel_uncore *uncore = ggtt->vm.gt->uncore; in gen6_ggtt_invalidate()
210 struct intel_uncore *uncore = ggtt->vm.gt->uncore; in gen8_ggtt_invalidate()
218 if (needs_wc_ggtt_mapping(ggtt->vm.i915)) in gen8_ggtt_invalidate()
225 struct drm_i915_private *i915 = ggtt->vm.i915; in guc_ggtt_invalidate()
237 intel_uncore_write_fw(ggtt->vm.gt->uncore, in guc_ggtt_invalidate()
279 static void gen8_ggtt_insert_page(struct i915_address_space *vm, in gen8_ggtt_insert_page() argument
285 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); in gen8_ggtt_insert_page()
289 gen8_set_pte(pte, ggtt->vm.pte_encode(addr, pat_index, flags)); in gen8_ggtt_insert_page()
294 static void gen8_ggtt_insert_entries(struct i915_address_space *vm, in gen8_ggtt_insert_entries() argument
299 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); in gen8_ggtt_insert_entries()
300 const gen8_pte_t pte_encode = ggtt->vm.pte_encode(0, pat_index, flags); in gen8_ggtt_insert_entries()
315 gen8_set_pte(gte++, vm->scratch[0]->encode); in gen8_ggtt_insert_entries()
324 gen8_set_pte(gte++, vm->scratch[0]->encode); in gen8_ggtt_insert_entries()
333 static void gen8_ggtt_clear_range(struct i915_address_space *vm, in gen8_ggtt_clear_range() argument
336 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); in gen8_ggtt_clear_range()
339 const gen8_pte_t scratch_pte = vm->scratch[0]->encode; in gen8_ggtt_clear_range()
354 static void gen6_ggtt_insert_page(struct i915_address_space *vm, in gen6_ggtt_insert_page() argument
360 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); in gen6_ggtt_insert_page()
364 iowrite32(vm->pte_encode(addr, pat_index, flags), pte); in gen6_ggtt_insert_page()
375 static void gen6_ggtt_insert_entries(struct i915_address_space *vm, in gen6_ggtt_insert_entries() argument
380 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); in gen6_ggtt_insert_entries()
391 iowrite32(vm->scratch[0]->encode, gte++); in gen6_ggtt_insert_entries()
394 iowrite32(vm->pte_encode(addr, pat_index, flags), gte++); in gen6_ggtt_insert_entries()
399 iowrite32(vm->scratch[0]->encode, gte++); in gen6_ggtt_insert_entries()
408 static void nop_clear_range(struct i915_address_space *vm, in nop_clear_range() argument
413 static void bxt_vtd_ggtt_wa(struct i915_address_space *vm) in bxt_vtd_ggtt_wa() argument
422 intel_uncore_posting_read_fw(vm->gt->uncore, GFX_FLSH_CNTL_GEN6); in bxt_vtd_ggtt_wa()
426 struct i915_address_space *vm; member
436 gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset, in bxt_vtd_ggtt_insert_page__cb()
438 bxt_vtd_ggtt_wa(arg->vm); in bxt_vtd_ggtt_insert_page__cb()
443 static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm, in bxt_vtd_ggtt_insert_page__BKL() argument
449 struct insert_page arg = { vm, addr, offset, pat_index }; in bxt_vtd_ggtt_insert_page__BKL()
455 struct i915_address_space *vm; member
465 gen8_ggtt_insert_entries(arg->vm, arg->vma_res, in bxt_vtd_ggtt_insert_entries__cb()
467 bxt_vtd_ggtt_wa(arg->vm); in bxt_vtd_ggtt_insert_entries__cb()
472 static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm, in bxt_vtd_ggtt_insert_entries__BKL() argument
477 struct insert_entries arg = { vm, vma_res, pat_index, flags }; in bxt_vtd_ggtt_insert_entries__BKL()
482 static void gen6_ggtt_clear_range(struct i915_address_space *vm, in gen6_ggtt_clear_range() argument
485 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); in gen6_ggtt_clear_range()
498 scratch_pte = vm->scratch[0]->encode; in gen6_ggtt_clear_range()
503 void intel_ggtt_bind_vma(struct i915_address_space *vm, in intel_ggtt_bind_vma() argument
523 vm->insert_entries(vm, vma_res, pat_index, pte_flags); in intel_ggtt_bind_vma()
527 void intel_ggtt_unbind_vma(struct i915_address_space *vm, in intel_ggtt_unbind_vma() argument
530 vm->clear_range(vm, vma_res->start, vma_res->vma_size); in intel_ggtt_unbind_vma()
549 if (!intel_uc_uses_guc(&ggtt->vm.gt->uc)) in ggtt_reserve_guc_top()
552 GEM_BUG_ON(ggtt->vm.total <= GUC_TOP_RESERVE_SIZE); in ggtt_reserve_guc_top()
553 offset = ggtt->vm.total - GUC_TOP_RESERVE_SIZE; in ggtt_reserve_guc_top()
555 ret = i915_gem_gtt_reserve(&ggtt->vm, NULL, &ggtt->uc_fw, in ggtt_reserve_guc_top()
559 drm_dbg(&ggtt->vm.i915->drm, in ggtt_reserve_guc_top()
602 intel_wopcm_guc_size(&ggtt->vm.gt->wopcm)); in init_ggtt()
634 if (drm_mm_reserve_node(&ggtt->vm.mm, &ggtt->error_capture)) in init_ggtt()
635 drm_mm_insert_node_in_range(&ggtt->vm.mm, in init_ggtt()
646 ggtt->vm.scratch_range(&ggtt->vm, start, size); in init_ggtt()
647 drm_dbg(&ggtt->vm.i915->drm, in init_ggtt()
662 drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) { in init_ggtt()
663 drm_dbg(&ggtt->vm.i915->drm, in init_ggtt()
666 ggtt->vm.clear_range(&ggtt->vm, hole_start, in init_ggtt()
671 ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.total - PAGE_SIZE, PAGE_SIZE); in init_ggtt()
680 static void aliasing_gtt_bind_vma(struct i915_address_space *vm, in aliasing_gtt_bind_vma() argument
694 ppgtt_bind_vma(&i915_vm_to_ggtt(vm)->alias->vm, in aliasing_gtt_bind_vma()
698 vm->insert_entries(vm, vma_res, pat_index, pte_flags); in aliasing_gtt_bind_vma()
703 static void aliasing_gtt_unbind_vma(struct i915_address_space *vm, in aliasing_gtt_unbind_vma() argument
707 vm->clear_range(vm, vma_res->start, vma_res->vma_size); in aliasing_gtt_unbind_vma()
710 ppgtt_unbind_vma(&i915_vm_to_ggtt(vm)->alias->vm, vma_res); in aliasing_gtt_unbind_vma()
719 ppgtt = i915_ppgtt_create(ggtt->vm.gt, 0); in init_aliasing_ppgtt()
723 if (GEM_WARN_ON(ppgtt->vm.total < ggtt->vm.total)) { in init_aliasing_ppgtt()
728 err = i915_vm_alloc_pt_stash(&ppgtt->vm, &stash, ggtt->vm.total); in init_aliasing_ppgtt()
732 i915_gem_object_lock(ppgtt->vm.scratch[0], NULL); in init_aliasing_ppgtt()
733 err = i915_vm_map_pt_stash(&ppgtt->vm, &stash); in init_aliasing_ppgtt()
734 i915_gem_object_unlock(ppgtt->vm.scratch[0]); in init_aliasing_ppgtt()
744 ppgtt->vm.allocate_va_range(&ppgtt->vm, &stash, 0, ggtt->vm.total); in init_aliasing_ppgtt()
747 ggtt->vm.bind_async_flags |= ppgtt->vm.bind_async_flags; in init_aliasing_ppgtt()
749 GEM_BUG_ON(ggtt->vm.vma_ops.bind_vma != intel_ggtt_bind_vma); in init_aliasing_ppgtt()
750 ggtt->vm.vma_ops.bind_vma = aliasing_gtt_bind_vma; in init_aliasing_ppgtt()
752 GEM_BUG_ON(ggtt->vm.vma_ops.unbind_vma != intel_ggtt_unbind_vma); in init_aliasing_ppgtt()
753 ggtt->vm.vma_ops.unbind_vma = aliasing_gtt_unbind_vma; in init_aliasing_ppgtt()
755 i915_vm_free_pt_stash(&ppgtt->vm, &stash); in init_aliasing_ppgtt()
759 i915_vm_free_pt_stash(&ppgtt->vm, &stash); in init_aliasing_ppgtt()
761 i915_vm_put(&ppgtt->vm); in init_aliasing_ppgtt()
773 i915_vm_put(&ppgtt->vm); in fini_aliasing_ppgtt()
775 ggtt->vm.vma_ops.bind_vma = intel_ggtt_bind_vma; in fini_aliasing_ppgtt()
776 ggtt->vm.vma_ops.unbind_vma = intel_ggtt_unbind_vma; in fini_aliasing_ppgtt()
800 flush_workqueue(ggtt->vm.i915->wq); in ggtt_cleanup_hw()
801 i915_gem_drain_freed_objects(ggtt->vm.i915); in ggtt_cleanup_hw()
803 mutex_lock(&ggtt->vm.mutex); in ggtt_cleanup_hw()
805 ggtt->vm.skip_pte_rewrite = true; in ggtt_cleanup_hw()
807 list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) { in ggtt_cleanup_hw()
826 ggtt->vm.cleanup(&ggtt->vm); in ggtt_cleanup_hw()
828 mutex_unlock(&ggtt->vm.mutex); in ggtt_cleanup_hw()
829 i915_address_space_fini(&ggtt->vm); in ggtt_cleanup_hw()
860 GEM_WARN_ON(kref_read(&ggtt->vm.resv_ref) != 1); in i915_ggtt_driver_late_release()
861 dma_resv_fini(&ggtt->vm._resv); in i915_ggtt_driver_late_release()
915 struct drm_i915_private *i915 = ggtt->vm.i915; in ggtt_probe_common()
934 kref_init(&ggtt->vm.resv_ref); in ggtt_probe_common()
935 ret = setup_scratch_page(&ggtt->vm); in ggtt_probe_common()
944 if (i915_gem_object_is_lmem(ggtt->vm.scratch[0])) in ggtt_probe_common()
947 ggtt->vm.scratch[0]->encode = in ggtt_probe_common()
948 ggtt->vm.pte_encode(px_dma(ggtt->vm.scratch[0]), in ggtt_probe_common()
956 static void gen6_gmch_remove(struct i915_address_space *vm) in gen6_gmch_remove() argument
958 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); in gen6_gmch_remove()
961 free_scratch(vm); in gen6_gmch_remove()
972 struct drm_i915_private *i915 = ggtt->vm.i915; in gen8_gmch_probe()
991 ggtt->vm.alloc_pt_dma = alloc_pt_dma; in gen8_gmch_probe()
992 ggtt->vm.alloc_scratch_dma = alloc_pt_dma; in gen8_gmch_probe()
993 ggtt->vm.lmem_pt_obj_flags = I915_BO_ALLOC_PM_EARLY; in gen8_gmch_probe()
995 ggtt->vm.total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE; in gen8_gmch_probe()
996 ggtt->vm.cleanup = gen6_gmch_remove; in gen8_gmch_probe()
997 ggtt->vm.insert_page = gen8_ggtt_insert_page; in gen8_gmch_probe()
998 ggtt->vm.clear_range = nop_clear_range; in gen8_gmch_probe()
999 ggtt->vm.scratch_range = gen8_ggtt_clear_range; in gen8_gmch_probe()
1001 ggtt->vm.insert_entries = gen8_ggtt_insert_entries; in gen8_gmch_probe()
1008 ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL; in gen8_gmch_probe()
1009 ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL; in gen8_gmch_probe()
1017 ggtt->vm.raw_insert_page = gen8_ggtt_insert_page; in gen8_gmch_probe()
1018 ggtt->vm.raw_insert_entries = gen8_ggtt_insert_entries; in gen8_gmch_probe()
1020 ggtt->vm.bind_async_flags = in gen8_gmch_probe()
1024 if (intel_uc_wants_guc(&ggtt->vm.gt->uc)) in gen8_gmch_probe()
1029 ggtt->vm.vma_ops.bind_vma = intel_ggtt_bind_vma; in gen8_gmch_probe()
1030 ggtt->vm.vma_ops.unbind_vma = intel_ggtt_unbind_vma; in gen8_gmch_probe()
1033 ggtt->vm.pte_encode = mtl_ggtt_pte_encode; in gen8_gmch_probe()
1035 ggtt->vm.pte_encode = gen8_ggtt_pte_encode; in gen8_gmch_probe()
1138 struct drm_i915_private *i915 = ggtt->vm.i915; in gen6_gmch_probe()
1163 ggtt->vm.total = (size / sizeof(gen6_pte_t)) * I915_GTT_PAGE_SIZE; in gen6_gmch_probe()
1165 ggtt->vm.alloc_pt_dma = alloc_pt_dma; in gen6_gmch_probe()
1166 ggtt->vm.alloc_scratch_dma = alloc_pt_dma; in gen6_gmch_probe()
1168 ggtt->vm.clear_range = nop_clear_range; in gen6_gmch_probe()
1170 ggtt->vm.clear_range = gen6_ggtt_clear_range; in gen6_gmch_probe()
1171 ggtt->vm.scratch_range = gen6_ggtt_clear_range; in gen6_gmch_probe()
1172 ggtt->vm.insert_page = gen6_ggtt_insert_page; in gen6_gmch_probe()
1173 ggtt->vm.insert_entries = gen6_ggtt_insert_entries; in gen6_gmch_probe()
1174 ggtt->vm.cleanup = gen6_gmch_remove; in gen6_gmch_probe()
1179 ggtt->vm.pte_encode = iris_pte_encode; in gen6_gmch_probe()
1181 ggtt->vm.pte_encode = hsw_pte_encode; in gen6_gmch_probe()
1183 ggtt->vm.pte_encode = byt_pte_encode; in gen6_gmch_probe()
1185 ggtt->vm.pte_encode = ivb_pte_encode; in gen6_gmch_probe()
1187 ggtt->vm.pte_encode = snb_pte_encode; in gen6_gmch_probe()
1189 ggtt->vm.vma_ops.bind_vma = intel_ggtt_bind_vma; in gen6_gmch_probe()
1190 ggtt->vm.vma_ops.unbind_vma = intel_ggtt_unbind_vma; in gen6_gmch_probe()
1200 ggtt->vm.gt = gt; in ggtt_probe_hw()
1201 ggtt->vm.i915 = i915; in ggtt_probe_hw()
1202 ggtt->vm.dma = i915->drm.dev; in ggtt_probe_hw()
1203 dma_resv_init(&ggtt->vm._resv); in ggtt_probe_hw()
1213 dma_resv_fini(&ggtt->vm._resv); in ggtt_probe_hw()
1217 if ((ggtt->vm.total - 1) >> 32) { in ggtt_probe_hw()
1221 ggtt->vm.total >> 20); in ggtt_probe_hw()
1222 ggtt->vm.total = 1ULL << 32; in ggtt_probe_hw()
1224 min_t(u64, ggtt->mappable_end, ggtt->vm.total); in ggtt_probe_hw()
1227 if (ggtt->mappable_end > ggtt->vm.total) { in ggtt_probe_hw()
1231 &ggtt->mappable_end, ggtt->vm.total); in ggtt_probe_hw()
1232 ggtt->mappable_end = ggtt->vm.total; in ggtt_probe_hw()
1236 drm_dbg(&i915->drm, "GGTT size = %lluM\n", ggtt->vm.total >> 20); in ggtt_probe_hw()
1292 * i915_ggtt_resume_vm - Restore the memory mappings for a GGTT or DPT VM
1293 * @vm: The VM to restore the mappings for
1301 bool i915_ggtt_resume_vm(struct i915_address_space *vm) in i915_ggtt_resume_vm() argument
1306 drm_WARN_ON(&vm->i915->drm, !vm->is_ggtt && !vm->is_dpt); in i915_ggtt_resume_vm()
1309 vm->clear_range(vm, 0, vm->total); in i915_ggtt_resume_vm()
1312 list_for_each_entry(vma, &vm->bound_list, vm_link) { in i915_ggtt_resume_vm()
1324 vma->ops->bind_vma(vm, NULL, vma->resource, in i915_ggtt_resume_vm()
1326 i915_gem_get_pat_index(vm->i915, in i915_ggtt_resume_vm()
1347 flush = i915_ggtt_resume_vm(&ggtt->vm); in i915_ggtt_resume()
1350 ggtt->vm.scratch_range(&ggtt->vm, ggtt->error_capture.start, in i915_ggtt_resume()