pgtable.c (2d8ad8719591fa803b0d589ed057fa46f49b7155) pgtable.c (a79e53d85683c6dd9f99c90511028adc2043031f)
1#include <linux/mm.h>
2#include <linux/gfp.h>
3#include <asm/pgalloc.h>
4#include <asm/pgtable.h>
5#include <asm/tlb.h>
6#include <asm/fixmap.h>
7
8#define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO

--- 107 unchanged lines hidden (view full) ---

116 if (!SHARED_KERNEL_PMD) {
117 pgd_set_mm(pgd, mm);
118 pgd_list_add(pgd);
119 }
120}
121
122static void pgd_dtor(pgd_t *pgd)
123{
1#include <linux/mm.h>
2#include <linux/gfp.h>
3#include <asm/pgalloc.h>
4#include <asm/pgtable.h>
5#include <asm/tlb.h>
6#include <asm/fixmap.h>
7
8#define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO

--- 107 unchanged lines hidden (view full) ---

116 if (!SHARED_KERNEL_PMD) {
117 pgd_set_mm(pgd, mm);
118 pgd_list_add(pgd);
119 }
120}
121
122static void pgd_dtor(pgd_t *pgd)
123{
124 unsigned long flags; /* can be called from interrupt context */
125
126 if (SHARED_KERNEL_PMD)
127 return;
128
124 if (SHARED_KERNEL_PMD)
125 return;
126
129 spin_lock_irqsave(&pgd_lock, flags);
127 spin_lock(&pgd_lock);
130 pgd_list_del(pgd);
128 pgd_list_del(pgd);
131 spin_unlock_irqrestore(&pgd_lock, flags);
129 spin_unlock(&pgd_lock);
132}
133
134/*
135 * List of all pgd's needed for non-PAE so it can invalidate entries
136 * in both cached and uncached pgd's; not needed for PAE since the
137 * kernel pmd is shared. If PAE were not to share the pmd a similar
138 * tactic would be needed. This is essentially codepath-based locking
139 * against pageattr.c; it is the unique case in which a valid change

--- 115 unchanged lines hidden (view full) ---

255 pud_populate(mm, pud, pmd);
256 }
257}
258
259pgd_t *pgd_alloc(struct mm_struct *mm)
260{
261 pgd_t *pgd;
262 pmd_t *pmds[PREALLOCATED_PMDS];
130}
131
132/*
133 * List of all pgd's needed for non-PAE so it can invalidate entries
134 * in both cached and uncached pgd's; not needed for PAE since the
135 * kernel pmd is shared. If PAE were not to share the pmd a similar
136 * tactic would be needed. This is essentially codepath-based locking
137 * against pageattr.c; it is the unique case in which a valid change

--- 115 unchanged lines hidden (view full) ---

253 pud_populate(mm, pud, pmd);
254 }
255}
256
257pgd_t *pgd_alloc(struct mm_struct *mm)
258{
259 pgd_t *pgd;
260 pmd_t *pmds[PREALLOCATED_PMDS];
263 unsigned long flags;
264
265 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
266
267 if (pgd == NULL)
268 goto out;
269
270 mm->pgd = pgd;
271
272 if (preallocate_pmds(pmds) != 0)
273 goto out_free_pgd;
274
275 if (paravirt_pgd_alloc(mm) != 0)
276 goto out_free_pmds;
277
278 /*
279 * Make sure that pre-populating the pmds is atomic with
280 * respect to anything walking the pgd_list, so that they
281 * never see a partially populated pgd.
282 */
261
262 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
263
264 if (pgd == NULL)
265 goto out;
266
267 mm->pgd = pgd;
268
269 if (preallocate_pmds(pmds) != 0)
270 goto out_free_pgd;
271
272 if (paravirt_pgd_alloc(mm) != 0)
273 goto out_free_pmds;
274
275 /*
276 * Make sure that pre-populating the pmds is atomic with
277 * respect to anything walking the pgd_list, so that they
278 * never see a partially populated pgd.
279 */
283 spin_lock_irqsave(&pgd_lock, flags);
280 spin_lock(&pgd_lock);
284
285 pgd_ctor(mm, pgd);
286 pgd_prepopulate_pmd(mm, pgd, pmds);
287
281
282 pgd_ctor(mm, pgd);
283 pgd_prepopulate_pmd(mm, pgd, pmds);
284
288 spin_unlock_irqrestore(&pgd_lock, flags);
285 spin_unlock(&pgd_lock);
289
290 return pgd;
291
292out_free_pmds:
293 free_pmds(pmds);
294out_free_pgd:
295 free_page((unsigned long)pgd);
296out:

--- 155 unchanged lines hidden ---
286
287 return pgd;
288
289out_free_pmds:
290 free_pmds(pmds);
291out_free_pgd:
292 free_page((unsigned long)pgd);
293out:

--- 155 unchanged lines hidden ---