Lines Matching +full:architecturally +full:- +full:defined

1 // SPDX-License-Identifier: GPL-2.0
11 phys_addr_t physical_mask __ro_after_init = (1ULL << __PHYSICAL_MASK_SHIFT) - 1;
39 return -EINVAL; in setup_userpte()
48 return -EINVAL; in setup_userpte()
66 * NOTE! For PAE, any changes to the top page-directory-pointer-table in ___pmd_free_tlb()
70 tlb->need_flush_all = 1; in ___pmd_free_tlb()
97 list_add(&ptdesc->pt_list, &pgd_list); in pgd_list_add()
104 list_del(&ptdesc->pt_list); in pgd_list_del()
115 virt_to_ptdesc(pgd)->pt_mm = mm; in pgd_set_mm()
120 return page_ptdesc(page)->pt_mm; in pgd_page_get_mm()
126 ptes in non-PAE, or shared PMD in PAE), then just copy the in pgd_ctor()
154 * List of all pgd's needed for non-PAE so it can invalidate entries
157 * tactic would be needed. This is essentially codepath-based locking
161 * -- nyc
167 * updating the top-level pagetable entries to guarantee the
169 * all 4 top-level entries are used almost immediately in a
170 * new process's life, we just pre-populate them here.
180 * We allocate separate PMDs for the kernel part of the user page-table
181 * when PTI is enabled. We need them to map the per-process LDT into the
182 * user-space page-table.
197 * According to Intel App note "TLBs, Paging-Structure Caches, in pud_populate()
198 * and Their Invalidation", April 2007, document 317080-001, in pud_populate()
200 * TLB via cr3 if the top-level pgd is changed... in pud_populate()
206 /* No need to prepopulate any pagetable entries in non-PAE modes. */
259 return -ENOMEM; in preallocate_pmds()
385 * page for pgd. We are able to just allocate a 32-byte for pgd. in pgtable_cache_init()
386 * During boot time, we create a 32-byte slab for pgd table allocation. in pgtable_cache_init()
404 * a 32-byte slab for pgd to save memory space. in _pgd_alloc()
441 mm->pgd = pgd; in pgd_alloc()
455 * Make sure that pre-populating the pmds is atomic with in pgd_alloc()
523 * We had a write-protection fault here and changed the pmd in pmdp_set_access_flags()
525 * #PF is architecturally guaranteed to do that and in the in pmdp_set_access_flags()
526 * worst-case we'll generate a spurious fault. in pmdp_set_access_flags()
543 * We had a write-protection fault here and changed the pud in pudp_set_access_flags()
545 * #PF is architecturally guaranteed to do that and in the in pudp_set_access_flags()
546 * worst-case we'll generate a spurious fault. in pudp_set_access_flags()
561 (unsigned long *) &ptep->pte); in ptep_test_and_clear_young()
566 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG)
642 * reserve_top_address - reserves a hole in the top of kernel address space
643 * @reserve - size of hole to reserve
652 __FIXADDR_TOP = round_down(-reserve, 1 << PMD_SHIFT) - PAGE_SIZE; in reserve_top_address()
654 -reserve, __FIXADDR_TOP + PAGE_SIZE); in reserve_top_address()
693 * p4d_set_huge - setup kernel P4D mapping
695 * No 512GB pages yet -- always return 0
703 * p4d_clear_huge - clear kernel P4D mapping when it is set
705 * No 512GB pages yet -- always return 0
713 * pud_set_huge - setup kernel PUD mapping
719 * Callers should try to decrease page size (1GB -> 2MB -> 4K) if the bigger
732 /* Bail out if we are we on a populated non-leaf entry: */ in pud_set_huge()
744 * pmd_set_huge - setup kernel PMD mapping
756 …pr_warn_once("%s: Cannot satisfy [mem %#010llx-%#010llx] with a huge-page mapping due to MTRR over… in pmd_set_huge()
761 /* Bail out if we are we on a populated non-leaf entry: */ in pmd_set_huge()
773 * pud_clear_huge - clear kernel PUD mapping when it is set
788 * pmd_clear_huge - clear kernel PMD mapping when it is set
804 * pud_free_pmd_page - Clear pud entry and free pmd page.
832 /* INVLPG to clear all paging-structure caches */ in pud_free_pmd_page()
833 flush_tlb_kernel_range(addr, addr + PAGE_SIZE-1); in pud_free_pmd_page()
851 * pmd_free_pte_page - Clear pmd entry and free pte page.
865 /* INVLPG to clear all paging-structure caches */ in pmd_free_pte_page()
866 flush_tlb_kernel_range(addr, addr + PAGE_SIZE-1); in pmd_free_pte_page()
876 * Disable free page handling on x86-PAE. This assures that ioremap()
889 if (vma->vm_flags & VM_SHADOW_STACK) in pte_mkwrite()
899 if (vma->vm_flags & VM_SHADOW_STACK) in pmd_mkwrite()
916 VM_WARN_ON_ONCE(!(vma->vm_flags & VM_SHADOW_STACK) && in arch_check_zapped_pte()
923 VM_WARN_ON_ONCE(!(vma->vm_flags & VM_SHADOW_STACK) && in arch_check_zapped_pmd()