Lines Matching refs:pgd

94 pgdval_t xen_pgd_val(pgd_t pgd);
99 pgd_t xen_make_pgd(pgdval_t pgd);
427 __visible pgdval_t xen_pgd_val(pgd_t pgd) in xen_pgd_val() argument
429 return pte_mfn_to_pfn(pgd.pgd); in xen_pgd_val()
441 __visible pgd_t xen_make_pgd(pgdval_t pgd) in xen_make_pgd() argument
443 pgd = pte_pfn_to_mfn(pgd); in xen_make_pgd()
444 return native_make_pgd(pgd); in xen_make_pgd()
507 static pgd_t *xen_get_user_pgd(pgd_t *pgd) in xen_get_user_pgd() argument
509 pgd_t *pgd_page = (pgd_t *)(((unsigned long)pgd) & PAGE_MASK); in xen_get_user_pgd()
510 unsigned offset = pgd - pgd_page; in xen_get_user_pgd()
565 pgd_val.pgd = p4d_val_ma(val); in xen_set_p4d()
662 static void __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd, in __xen_pgd_walk() argument
688 if (pgd_none(pgd[i])) in __xen_pgd_walk()
691 p4d = p4d_offset(&pgd[i], 0); in __xen_pgd_walk()
697 (*func)(mm, virt_to_page(pgd), PT_PGD); in __xen_pgd_walk()
705 __xen_pgd_walk(mm, mm->pgd, func, limit); in xen_pgd_walk()
790 static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd) in __xen_pgd_pin() argument
792 pgd_t *user_pgd = xen_get_user_pgd(pgd); in __xen_pgd_pin()
794 trace_xen_mmu_pgd_pin(mm, pgd); in __xen_pgd_pin()
798 __xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT); in __xen_pgd_pin()
800 xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(pgd))); in __xen_pgd_pin()
813 __xen_pgd_pin(mm, mm->pgd); in xen_pgd_pin()
907 static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd) in __xen_pgd_unpin() argument
909 pgd_t *user_pgd = xen_get_user_pgd(pgd); in __xen_pgd_unpin()
911 trace_xen_mmu_pgd_unpin(mm, pgd); in __xen_pgd_unpin()
915 xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd))); in __xen_pgd_unpin()
923 __xen_pgd_walk(mm, pgd, xen_unpin_page, USER_LIMIT); in __xen_pgd_unpin()
930 __xen_pgd_unpin(mm, mm->pgd); in xen_pgd_unpin()
974 if (this_cpu_read(xen_current_cr3) == __pa(mm->pgd)) in drop_mm_ref_this_cpu()
993 if (per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd)) in xen_drop_mm_ref()
1009 if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd)) in xen_drop_mm_ref()
1046 if (xen_page_pinned(mm->pgd)) in xen_exit_mmap()
1181 pgd_t *pgd; in xen_cleanmfnmap() local
1187 pgd = pgd_offset_k(vaddr); in xen_cleanmfnmap()
1188 p4d = p4d_offset(pgd, 0); in xen_cleanmfnmap()
1456 pgd_t *pgd = mm->pgd; in xen_pgd_alloc() local
1457 struct page *page = virt_to_page(pgd); in xen_pgd_alloc()
1461 BUG_ON(PagePinned(virt_to_page(pgd))); in xen_pgd_alloc()
1475 BUG_ON(PagePinned(virt_to_page(xen_get_user_pgd(pgd)))); in xen_pgd_alloc()
1480 static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd) in xen_pgd_free() argument
1482 pgd_t *user_pgd = xen_get_user_pgd(pgd); in xen_pgd_free()
1592 bool pinned = xen_page_pinned(mm->pgd); in xen_alloc_ptpage()
1755 void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) in xen_setup_kernel_pagetable() argument
1793 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd); in xen_setup_kernel_pagetable()
1796 addr[0] = (unsigned long)pgd; in xen_setup_kernel_pagetable()
1844 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd))); in xen_setup_kernel_pagetable()
1901 pgd_t pgd; in xen_early_virt_to_phys() local
1907 pgd = native_make_pgd(xen_read_phys_ulong(pa + pgd_index(vaddr) * in xen_early_virt_to_phys()
1908 sizeof(pgd))); in xen_early_virt_to_phys()
1909 if (!pgd_present(pgd)) in xen_early_virt_to_phys()
1912 pa = pgd_val(pgd) & PTE_PFN_MASK; in xen_early_virt_to_phys()
1950 pgd_t *pgd; in xen_relocate_p2m() local
1979 pgd = __va(read_cr3_pa()); in xen_relocate_p2m()
2019 set_pgd(pgd + 2 + idx_pud, __pgd(_PAGE_TABLE | pud_phys)); in xen_relocate_p2m()
2036 set_pgd(pgd + 1, __pgd(0)); in xen_relocate_p2m()