1 /* 2 * Copyright IBM Corporation, 2015 3 * Author Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com> 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms of version 2 of the GNU Lesser General Public License 7 * as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it would be useful, but 10 * WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. 12 * 13 */ 14 15 #include <linux/mm.h> 16 #include <asm/machdep.h> 17 #include <asm/mmu.h> 18 19 #include "internal.h" 20 21 int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid, 22 pte_t *ptep, unsigned long trap, unsigned long flags, 23 int ssize, int subpg_prot) 24 { 25 real_pte_t rpte; 26 unsigned long hpte_group; 27 unsigned long rflags, pa; 28 unsigned long old_pte, new_pte; 29 unsigned long vpn, hash, slot; 30 unsigned long shift = mmu_psize_defs[MMU_PAGE_4K].shift; 31 32 /* 33 * atomically mark the linux large page PTE busy and dirty 34 */ 35 do { 36 pte_t pte = READ_ONCE(*ptep); 37 38 old_pte = pte_val(pte); 39 /* If PTE busy, retry the access */ 40 if (unlikely(old_pte & H_PAGE_BUSY)) 41 return 0; 42 /* If PTE permissions don't match, take page fault */ 43 if (unlikely(!check_pte_access(access, old_pte))) 44 return 1; 45 /* 46 * Try to lock the PTE, add ACCESSED and DIRTY if it was 47 * a write access. Since this is 4K insert of 64K page size 48 * also add H_PAGE_COMBO 49 */ 50 new_pte = old_pte | H_PAGE_BUSY | _PAGE_ACCESSED; 51 if (access & _PAGE_WRITE) 52 new_pte |= _PAGE_DIRTY; 53 } while (!pte_xchg(ptep, __pte(old_pte), __pte(new_pte))); 54 55 /* 56 * PP bits. _PAGE_USER is already PP bit 0x2, so we only 57 * need to add in 0x1 if it's a read-only user page 58 */ 59 rflags = htab_convert_pte_flags(new_pte, flags); 60 rpte = __real_pte(__pte(old_pte), ptep, PTRS_PER_PTE); 61 62 if (cpu_has_feature(CPU_FTR_NOEXECUTE) && 63 !cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) 64 rflags = hash_page_do_lazy_icache(rflags, __pte(old_pte), trap); 65 66 vpn = hpt_vpn(ea, vsid, ssize); 67 if (unlikely(old_pte & H_PAGE_HASHPTE)) { 68 /* 69 * There MIGHT be an HPTE for this pte 70 */ 71 unsigned long gslot = pte_get_hash_gslot(vpn, shift, ssize, 72 rpte, 0); 73 74 if (mmu_hash_ops.hpte_updatepp(gslot, rflags, vpn, MMU_PAGE_4K, 75 MMU_PAGE_4K, ssize, flags) == -1) 76 old_pte &= ~_PAGE_HPTEFLAGS; 77 } 78 79 if (likely(!(old_pte & H_PAGE_HASHPTE))) { 80 81 pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT; 82 hash = hpt_hash(vpn, shift, ssize); 83 84 repeat: 85 hpte_group = (hash & htab_hash_mask) * HPTES_PER_GROUP; 86 87 /* Insert into the hash table, primary slot */ 88 slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa, rflags, 0, 89 MMU_PAGE_4K, MMU_PAGE_4K, ssize); 90 /* 91 * Primary is full, try the secondary 92 */ 93 if (unlikely(slot == -1)) { 94 hpte_group = (~hash & htab_hash_mask) * HPTES_PER_GROUP; 95 slot = mmu_hash_ops.hpte_insert(hpte_group, vpn, pa, 96 rflags, 97 HPTE_V_SECONDARY, 98 MMU_PAGE_4K, 99 MMU_PAGE_4K, ssize); 100 if (slot == -1) { 101 if (mftb() & 0x1) 102 hpte_group = (hash & htab_hash_mask) * 103 HPTES_PER_GROUP; 104 mmu_hash_ops.hpte_remove(hpte_group); 105 /* 106 * FIXME!! Should be try the group from which we removed ? 107 */ 108 goto repeat; 109 } 110 } 111 /* 112 * Hypervisor failure. Restore old pte and return -1 113 * similar to __hash_page_* 114 */ 115 if (unlikely(slot == -2)) { 116 *ptep = __pte(old_pte); 117 hash_failure_debug(ea, access, vsid, trap, ssize, 118 MMU_PAGE_4K, MMU_PAGE_4K, old_pte); 119 return -1; 120 } 121 new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | H_PAGE_HASHPTE; 122 new_pte |= pte_set_hidx(ptep, rpte, 0, slot, PTRS_PER_PTE); 123 124 if (stress_hpt()) 125 hpt_do_stress(ea, hpte_group); 126 } 127 *ptep = __pte(new_pte & ~H_PAGE_BUSY); 128 return 0; 129 } 130