fault.c (7b2d0dbac4890c8ca4a8acc57709639fc8b158e9) fault.c (019132ff3daf36c97a4006655dfd00ee42f2b590)
1/*
2 * Copyright (C) 1995 Linus Torvalds
3 * Copyright (C) 2001, 2002 Andi Kleen, SuSE Labs.
4 * Copyright (C) 2008-2009, Red Hat Inc., Ingo Molnar
5 */
6#include <linux/sched.h> /* test_thread_flag(), ... */
7#include <linux/kdebug.h> /* oops_begin/end, ... */
8#include <linux/module.h> /* search_exception_table */
9#include <linux/bootmem.h> /* max_low_pfn */
10#include <linux/kprobes.h> /* NOKPROBE_SYMBOL, ... */
11#include <linux/mmiotrace.h> /* kmmio_handler, ... */
12#include <linux/perf_event.h> /* perf_sw_event */
13#include <linux/hugetlb.h> /* hstate_index_to_shift */
14#include <linux/prefetch.h> /* prefetchw */
15#include <linux/context_tracking.h> /* exception_enter(), ... */
16#include <linux/uaccess.h> /* faulthandler_disabled() */
17
1/*
2 * Copyright (C) 1995 Linus Torvalds
3 * Copyright (C) 2001, 2002 Andi Kleen, SuSE Labs.
4 * Copyright (C) 2008-2009, Red Hat Inc., Ingo Molnar
5 */
6#include <linux/sched.h> /* test_thread_flag(), ... */
7#include <linux/kdebug.h> /* oops_begin/end, ... */
8#include <linux/module.h> /* search_exception_table */
9#include <linux/bootmem.h> /* max_low_pfn */
10#include <linux/kprobes.h> /* NOKPROBE_SYMBOL, ... */
11#include <linux/mmiotrace.h> /* kmmio_handler, ... */
12#include <linux/perf_event.h> /* perf_sw_event */
13#include <linux/hugetlb.h> /* hstate_index_to_shift */
14#include <linux/prefetch.h> /* prefetchw */
15#include <linux/context_tracking.h> /* exception_enter(), ... */
16#include <linux/uaccess.h> /* faulthandler_disabled() */
17
18#include <asm/cpufeature.h> /* boot_cpu_has, ... */
18#include <asm/traps.h> /* dotraplinkage, ... */
19#include <asm/pgalloc.h> /* pgd_*(), ... */
20#include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
21#include <asm/fixmap.h> /* VSYSCALL_ADDR */
22#include <asm/vsyscall.h> /* emulate_vsyscall */
23#include <asm/vm86.h> /* struct vm86 */
19#include <asm/traps.h> /* dotraplinkage, ... */
20#include <asm/pgalloc.h> /* pgd_*(), ... */
21#include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
22#include <asm/fixmap.h> /* VSYSCALL_ADDR */
23#include <asm/vsyscall.h> /* emulate_vsyscall */
24#include <asm/vm86.h> /* struct vm86 */
25#include <asm/mmu_context.h> /* vma_pkey() */
24
25#define CREATE_TRACE_POINTS
26#include <asm/trace/exceptions.h>
27
28/*
29 * Page fault error code bits:
30 *
31 * bit 0 == 0: no page found 1: protection fault

--- 132 unchanged lines hidden (view full) ---

164 instr++;
165
166 if (!check_prefetch_opcode(regs, instr, opcode, &prefetch))
167 break;
168 }
169 return prefetch;
170}
171
26
27#define CREATE_TRACE_POINTS
28#include <asm/trace/exceptions.h>
29
30/*
31 * Page fault error code bits:
32 *
33 * bit 0 == 0: no page found 1: protection fault

--- 132 unchanged lines hidden (view full) ---

166 instr++;
167
168 if (!check_prefetch_opcode(regs, instr, opcode, &prefetch))
169 break;
170 }
171 return prefetch;
172}
173
174/*
175 * A protection key fault means that the PKRU value did not allow
176 * access to some PTE. Userspace can figure out what PKRU was
177 * from the XSAVE state, and this function fills out a field in
178 * siginfo so userspace can discover which protection key was set
179 * on the PTE.
180 *
181 * If we get here, we know that the hardware signaled a PF_PK
182 * fault and that there was a VMA once we got in the fault
183 * handler. It does *not* guarantee that the VMA we find here
184 * was the one that we faulted on.
185 *
186 * 1. T1 : mprotect_key(foo, PAGE_SIZE, pkey=4);
187 * 2. T1 : set PKRU to deny access to pkey=4, touches page
188 * 3. T1 : faults...
189 * 4. T2: mprotect_key(foo, PAGE_SIZE, pkey=5);
190 * 5. T1 : enters fault handler, takes mmap_sem, etc...
191 * 6. T1 : reaches here, sees vma_pkey(vma)=5, when we really
192 * faulted on a pte with its pkey=4.
193 */
194static void fill_sig_info_pkey(int si_code, siginfo_t *info,
195 struct vm_area_struct *vma)
196{
197 /* This is effectively an #ifdef */
198 if (!boot_cpu_has(X86_FEATURE_OSPKE))
199 return;
200
201 /* Fault not from Protection Keys: nothing to do */
202 if (si_code != SEGV_PKUERR)
203 return;
204 /*
205 * force_sig_info_fault() is called from a number of
206 * contexts, some of which have a VMA and some of which
207 * do not. The PF_PK handing happens after we have a
208 * valid VMA, so we should never reach this without a
209 * valid VMA.
210 */
211 if (!vma) {
212 WARN_ONCE(1, "PKU fault with no VMA passed in");
213 info->si_pkey = 0;
214 return;
215 }
216 /*
217 * si_pkey should be thought of as a strong hint, but not
218 * absolutely guranteed to be 100% accurate because of
219 * the race explained above.
220 */
221 info->si_pkey = vma_pkey(vma);
222}
223
172static void
173force_sig_info_fault(int si_signo, int si_code, unsigned long address,
174 struct task_struct *tsk, struct vm_area_struct *vma,
175 int fault)
176{
177 unsigned lsb = 0;
178 siginfo_t info;
179
180 info.si_signo = si_signo;
181 info.si_errno = 0;
182 info.si_code = si_code;
183 info.si_addr = (void __user *)address;
184 if (fault & VM_FAULT_HWPOISON_LARGE)
185 lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault));
186 if (fault & VM_FAULT_HWPOISON)
187 lsb = PAGE_SHIFT;
188 info.si_addr_lsb = lsb;
189
224static void
225force_sig_info_fault(int si_signo, int si_code, unsigned long address,
226 struct task_struct *tsk, struct vm_area_struct *vma,
227 int fault)
228{
229 unsigned lsb = 0;
230 siginfo_t info;
231
232 info.si_signo = si_signo;
233 info.si_errno = 0;
234 info.si_code = si_code;
235 info.si_addr = (void __user *)address;
236 if (fault & VM_FAULT_HWPOISON_LARGE)
237 lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault));
238 if (fault & VM_FAULT_HWPOISON)
239 lsb = PAGE_SHIFT;
240 info.si_addr_lsb = lsb;
241
242 fill_sig_info_pkey(si_code, &info, vma);
243
190 force_sig_info(si_signo, &info, tsk);
191}
192
193DEFINE_SPINLOCK(pgd_lock);
194LIST_HEAD(pgd_list);
195
196#ifdef CONFIG_X86_32
197static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)

--- 644 unchanged lines hidden (view full) ---

842{
843 __bad_area(regs, error_code, address, NULL, SEGV_MAPERR);
844}
845
846static noinline void
847bad_area_access_error(struct pt_regs *regs, unsigned long error_code,
848 unsigned long address, struct vm_area_struct *vma)
849{
244 force_sig_info(si_signo, &info, tsk);
245}
246
247DEFINE_SPINLOCK(pgd_lock);
248LIST_HEAD(pgd_list);
249
250#ifdef CONFIG_X86_32
251static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)

--- 644 unchanged lines hidden (view full) ---

896{
897 __bad_area(regs, error_code, address, NULL, SEGV_MAPERR);
898}
899
900static noinline void
901bad_area_access_error(struct pt_regs *regs, unsigned long error_code,
902 unsigned long address, struct vm_area_struct *vma)
903{
850 __bad_area(regs, error_code, address, vma, SEGV_ACCERR);
904 /*
905 * This OSPKE check is not strictly necessary at runtime.
906 * But, doing it this way allows compiler optimizations
907 * if pkeys are compiled out.
908 */
909 if (boot_cpu_has(X86_FEATURE_OSPKE) && (error_code & PF_PK))
910 __bad_area(regs, error_code, address, vma, SEGV_PKUERR);
911 else
912 __bad_area(regs, error_code, address, vma, SEGV_ACCERR);
851}
852
853static void
854do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
855 struct vm_area_struct *vma, unsigned int fault)
856{
857 struct task_struct *tsk = current;
858 int code = BUS_ADRERR;

--- 491 unchanged lines hidden ---
913}
914
915static void
916do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
917 struct vm_area_struct *vma, unsigned int fault)
918{
919 struct task_struct *tsk = current;
920 int code = BUS_ADRERR;

--- 491 unchanged lines hidden ---