1 #include <linux/mm.h> 2 #include <linux/highmem.h> 3 #include <linux/sched.h> 4 5 static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, 6 struct mm_walk *walk) 7 { 8 pte_t *pte; 9 int err = 0; 10 11 pte = pte_offset_map(pmd, addr); 12 for (;;) { 13 err = walk->pte_entry(pte, addr, addr + PAGE_SIZE, walk); 14 if (err) 15 break; 16 addr += PAGE_SIZE; 17 if (addr == end) 18 break; 19 pte++; 20 } 21 22 pte_unmap(pte); 23 return err; 24 } 25 26 static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, 27 struct mm_walk *walk) 28 { 29 pmd_t *pmd; 30 unsigned long next; 31 int err = 0; 32 33 pmd = pmd_offset(pud, addr); 34 do { 35 next = pmd_addr_end(addr, end); 36 if (pmd_none_or_clear_bad(pmd)) { 37 if (walk->pte_hole) 38 err = walk->pte_hole(addr, next, walk); 39 if (err) 40 break; 41 continue; 42 } 43 if (walk->pmd_entry) 44 err = walk->pmd_entry(pmd, addr, next, walk); 45 if (!err && walk->pte_entry) 46 err = walk_pte_range(pmd, addr, next, walk); 47 if (err) 48 break; 49 } while (pmd++, addr = next, addr != end); 50 51 return err; 52 } 53 54 static int walk_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end, 55 struct mm_walk *walk) 56 { 57 pud_t *pud; 58 unsigned long next; 59 int err = 0; 60 61 pud = pud_offset(pgd, addr); 62 do { 63 next = pud_addr_end(addr, end); 64 if (pud_none_or_clear_bad(pud)) { 65 if (walk->pte_hole) 66 err = walk->pte_hole(addr, next, walk); 67 if (err) 68 break; 69 continue; 70 } 71 if (walk->pud_entry) 72 err = walk->pud_entry(pud, addr, next, walk); 73 if (!err && (walk->pmd_entry || walk->pte_entry)) 74 err = walk_pmd_range(pud, addr, next, walk); 75 if (err) 76 break; 77 } while (pud++, addr = next, addr != end); 78 79 return err; 80 } 81 82 /** 83 * walk_page_range - walk a memory map's page tables with a callback 84 * @mm: memory map to walk 85 * @addr: starting address 86 * @end: ending address 87 * @walk: set of callbacks to invoke for each level of the tree 88 * 89 * Recursively walk the page table for the memory area in a VMA, 90 * calling supplied callbacks. Callbacks are called in-order (first 91 * PGD, first PUD, first PMD, first PTE, second PTE... second PMD, 92 * etc.). If lower-level callbacks are omitted, walking depth is reduced. 93 * 94 * Each callback receives an entry pointer and the start and end of the 95 * associated range, and a copy of the original mm_walk for access to 96 * the ->private or ->mm fields. 97 * 98 * No locks are taken, but the bottom level iterator will map PTE 99 * directories from highmem if necessary. 100 * 101 * If any callback returns a non-zero value, the walk is aborted and 102 * the return value is propagated back to the caller. Otherwise 0 is returned. 103 */ 104 int walk_page_range(unsigned long addr, unsigned long end, 105 struct mm_walk *walk) 106 { 107 pgd_t *pgd; 108 unsigned long next; 109 int err = 0; 110 111 if (addr >= end) 112 return err; 113 114 if (!walk->mm) 115 return -EINVAL; 116 117 pgd = pgd_offset(walk->mm, addr); 118 do { 119 next = pgd_addr_end(addr, end); 120 if (pgd_none_or_clear_bad(pgd)) { 121 if (walk->pte_hole) 122 err = walk->pte_hole(addr, next, walk); 123 if (err) 124 break; 125 continue; 126 } 127 if (walk->pgd_entry) 128 err = walk->pgd_entry(pgd, addr, next, walk); 129 if (!err && 130 (walk->pud_entry || walk->pmd_entry || walk->pte_entry)) 131 err = walk_pud_range(pgd, addr, next, walk); 132 if (err) 133 break; 134 } while (pgd++, addr = next, addr != end); 135 136 return err; 137 } 138