gmap.c (f8b04488b060b155f8b6769aa70412c3630b03f0) gmap.c (907835e6dee6f77ac30ae50bb3f88bd92055c86e)
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * KVM guest address space mapping code
4 *
5 * Copyright IBM Corp. 2007, 2020
6 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
7 * David Hildenbrand <david@redhat.com>
8 * Janosch Frank <frankja@linux.vnet.ibm.com>

--- 582 unchanged lines hidden (view full) ---

591 mm = gmap->mm;
592 pgd = pgd_offset(mm, vmaddr);
593 VM_BUG_ON(pgd_none(*pgd));
594 p4d = p4d_offset(pgd, vmaddr);
595 VM_BUG_ON(p4d_none(*p4d));
596 pud = pud_offset(p4d, vmaddr);
597 VM_BUG_ON(pud_none(*pud));
598 /* large puds cannot yet be handled */
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * KVM guest address space mapping code
4 *
5 * Copyright IBM Corp. 2007, 2020
6 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
7 * David Hildenbrand <david@redhat.com>
8 * Janosch Frank <frankja@linux.vnet.ibm.com>

--- 582 unchanged lines hidden (view full) ---

591 mm = gmap->mm;
592 pgd = pgd_offset(mm, vmaddr);
593 VM_BUG_ON(pgd_none(*pgd));
594 p4d = p4d_offset(pgd, vmaddr);
595 VM_BUG_ON(p4d_none(*p4d));
596 pud = pud_offset(p4d, vmaddr);
597 VM_BUG_ON(pud_none(*pud));
598 /* large puds cannot yet be handled */
599 if (pud_large(*pud))
599 if (pud_leaf(*pud))
600 return -EFAULT;
601 pmd = pmd_offset(pud, vmaddr);
602 VM_BUG_ON(pmd_none(*pmd));
603 /* Are we allowed to use huge pages? */
604 if (pmd_large(*pmd) && !gmap->mm->context.allow_gmap_hpage_1m)
605 return -EFAULT;
606 /* Link gmap segment table entry location to page table. */
607 rc = radix_tree_preload(GFP_KERNEL_ACCOUNT);

--- 2288 unchanged lines hidden ---
600 return -EFAULT;
601 pmd = pmd_offset(pud, vmaddr);
602 VM_BUG_ON(pmd_none(*pmd));
603 /* Are we allowed to use huge pages? */
604 if (pmd_large(*pmd) && !gmap->mm->context.allow_gmap_hpage_1m)
605 return -EFAULT;
606 /* Link gmap segment table entry location to page table. */
607 rc = radix_tree_preload(GFP_KERNEL_ACCOUNT);

--- 2288 unchanged lines hidden ---