pgtable-generic.c (db6da59cf27b5661ced03754ae0550f8914eda9e) | pgtable-generic.c (0d940a9b270b9220dcff74d8e9123c9788365751) |
---|---|
1// SPDX-License-Identifier: GPL-2.0 2/* 3 * mm/pgtable-generic.c 4 * 5 * Generic pgtable methods declared in linux/pgtable.h 6 * 7 * Copyright (C) 2010 Linus Torvalds 8 */ 9 10#include <linux/pagemap.h> 11#include <linux/hugetlb.h> 12#include <linux/pgtable.h> | 1// SPDX-License-Identifier: GPL-2.0 2/* 3 * mm/pgtable-generic.c 4 * 5 * Generic pgtable methods declared in linux/pgtable.h 6 * 7 * Copyright (C) 2010 Linus Torvalds 8 */ 9 10#include <linux/pagemap.h> 11#include <linux/hugetlb.h> 12#include <linux/pgtable.h> |
13#include <linux/swap.h> 14#include <linux/swapops.h> |
|
13#include <linux/mm_inline.h> 14#include <asm/tlb.h> 15 16/* 17 * If a p?d_bad entry is found while walking page tables, report 18 * the error, before resetting entry to p?d_none. Usually (but 19 * very seldom) called out from the p?d_none_or_clear_bad macros. 20 */ --- 203 unchanged lines hidden (view full) --- 224 pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); 225 226 /* collapse entails shooting down ptes not pmd */ 227 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); 228 return pmd; 229} 230#endif 231#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | 15#include <linux/mm_inline.h> 16#include <asm/tlb.h> 17 18/* 19 * If a p?d_bad entry is found while walking page tables, report 20 * the error, before resetting entry to p?d_none. Usually (but 21 * very seldom) called out from the p?d_none_or_clear_bad macros. 22 */ --- 203 unchanged lines hidden (view full) --- 226 pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); 227 228 /* collapse entails shooting down ptes not pmd */ 229 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); 230 return pmd; 231} 232#endif 233#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
234 235pte_t *__pte_offset_map(pmd_t *pmd, unsigned long addr, pmd_t *pmdvalp) 236{ 237 pmd_t pmdval; 238 239 /* rcu_read_lock() to be added later */ 240 pmdval = pmdp_get_lockless(pmd); 241 if (pmdvalp) 242 *pmdvalp = pmdval; 243 if (unlikely(pmd_none(pmdval) || is_pmd_migration_entry(pmdval))) 244 goto nomap; 245 if (unlikely(pmd_trans_huge(pmdval) || pmd_devmap(pmdval))) 246 goto nomap; 247 if (unlikely(pmd_bad(pmdval))) { 248 pmd_clear_bad(pmd); 249 goto nomap; 250 } 251 return __pte_map(&pmdval, addr); 252nomap: 253 /* rcu_read_unlock() to be added later */ 254 return NULL; 255} 256 257pte_t *pte_offset_map_nolock(struct mm_struct *mm, pmd_t *pmd, 258 unsigned long addr, spinlock_t **ptlp) 259{ 260 pmd_t pmdval; 261 pte_t *pte; 262 263 pte = __pte_offset_map(pmd, addr, &pmdval); 264 if (likely(pte)) 265 *ptlp = pte_lockptr(mm, &pmdval); 266 return pte; 267} 268 269pte_t *__pte_offset_map_lock(struct mm_struct *mm, pmd_t *pmd, 270 unsigned long addr, spinlock_t **ptlp) 271{ 272 spinlock_t *ptl; 273 pmd_t pmdval; 274 pte_t *pte; 275again: 276 pte = __pte_offset_map(pmd, addr, &pmdval); 277 if (unlikely(!pte)) 278 return pte; 279 ptl = pte_lockptr(mm, &pmdval); 280 spin_lock(ptl); 281 if (likely(pmd_same(pmdval, pmdp_get_lockless(pmd)))) { 282 *ptlp = ptl; 283 return pte; 284 } 285 pte_unmap_unlock(pte, ptl); 286 goto again; 287} |
|