gup.c (84a73014d86fd660822a20c032625e3afe99ca58) gup.c (de60f5f10c58d4f34b68622442c0e04180367f3f)
1#include <linux/kernel.h>
2#include <linux/errno.h>
3#include <linux/err.h>
4#include <linux/spinlock.h>
5
6#include <linux/mm.h>
7#include <linux/pagemap.h>
8#include <linux/rmap.h>

--- 115 unchanged lines hidden (view full) ---

124 set_page_dirty(page);
125 /*
126 * pte_mkyoung() would be more correct here, but atomic care
127 * is needed to avoid losing the dirty bit: it is easier to use
128 * mark_page_accessed().
129 */
130 mark_page_accessed(page);
131 }
1#include <linux/kernel.h>
2#include <linux/errno.h>
3#include <linux/err.h>
4#include <linux/spinlock.h>
5
6#include <linux/mm.h>
7#include <linux/pagemap.h>
8#include <linux/rmap.h>

--- 115 unchanged lines hidden (view full) ---

124 set_page_dirty(page);
125 /*
126 * pte_mkyoung() would be more correct here, but atomic care
127 * is needed to avoid losing the dirty bit: it is easier to use
128 * mark_page_accessed().
129 */
130 mark_page_accessed(page);
131 }
132 if ((flags & FOLL_POPULATE) && (vma->vm_flags & VM_LOCKED)) {
132 if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
133 /*
134 * The preliminary mapping check is mainly to avoid the
135 * pointless overhead of lock_page on the ZERO_PAGE
136 * which might bounce very badly if there is contention.
137 *
138 * If the page is already locked, we don't need to
139 * handle it now - vmscan will handle it later if and
140 * when it attempts to reclaim the page.

--- 153 unchanged lines hidden (view full) ---

294 */
295static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
296 unsigned long address, unsigned int *flags, int *nonblocking)
297{
298 struct mm_struct *mm = vma->vm_mm;
299 unsigned int fault_flags = 0;
300 int ret;
301
133 /*
134 * The preliminary mapping check is mainly to avoid the
135 * pointless overhead of lock_page on the ZERO_PAGE
136 * which might bounce very badly if there is contention.
137 *
138 * If the page is already locked, we don't need to
139 * handle it now - vmscan will handle it later if and
140 * when it attempts to reclaim the page.

--- 153 unchanged lines hidden (view full) ---

294 */
295static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
296 unsigned long address, unsigned int *flags, int *nonblocking)
297{
298 struct mm_struct *mm = vma->vm_mm;
299 unsigned int fault_flags = 0;
300 int ret;
301
302 /* mlock all present pages, but do not fault in new pages */
303 if ((*flags & (FOLL_POPULATE | FOLL_MLOCK)) == FOLL_MLOCK)
304 return -ENOENT;
302 /* For mm_populate(), just skip the stack guard page. */
303 if ((*flags & FOLL_POPULATE) &&
304 (stack_guard_page_start(vma, address) ||
305 stack_guard_page_end(vma, address + PAGE_SIZE)))
306 return -ENOENT;
307 if (*flags & FOLL_WRITE)
308 fault_flags |= FAULT_FLAG_WRITE;
309 if (nonblocking)

--- 575 unchanged lines hidden (view full) ---

885 int gup_flags;
886
887 VM_BUG_ON(start & ~PAGE_MASK);
888 VM_BUG_ON(end & ~PAGE_MASK);
889 VM_BUG_ON_VMA(start < vma->vm_start, vma);
890 VM_BUG_ON_VMA(end > vma->vm_end, vma);
891 VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm);
892
305 /* For mm_populate(), just skip the stack guard page. */
306 if ((*flags & FOLL_POPULATE) &&
307 (stack_guard_page_start(vma, address) ||
308 stack_guard_page_end(vma, address + PAGE_SIZE)))
309 return -ENOENT;
310 if (*flags & FOLL_WRITE)
311 fault_flags |= FAULT_FLAG_WRITE;
312 if (nonblocking)

--- 575 unchanged lines hidden (view full) ---

888 int gup_flags;
889
890 VM_BUG_ON(start & ~PAGE_MASK);
891 VM_BUG_ON(end & ~PAGE_MASK);
892 VM_BUG_ON_VMA(start < vma->vm_start, vma);
893 VM_BUG_ON_VMA(end > vma->vm_end, vma);
894 VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm);
895
893 gup_flags = FOLL_TOUCH | FOLL_POPULATE;
896 gup_flags = FOLL_TOUCH | FOLL_POPULATE | FOLL_MLOCK;
897 if (vma->vm_flags & VM_LOCKONFAULT)
898 gup_flags &= ~FOLL_POPULATE;
899
894 /*
895 * We want to touch writable mappings with a write fault in order
896 * to break COW, except for shared mappings because these don't COW
897 * and we would not want to dirty them for nothing.
898 */
899 if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE)
900 gup_flags |= FOLL_WRITE;
901

--- 518 unchanged lines hidden ---
900 /*
901 * We want to touch writable mappings with a write fault in order
902 * to break COW, except for shared mappings because these don't COW
903 * and we would not want to dirty them for nothing.
904 */
905 if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE)
906 gup_flags |= FOLL_WRITE;
907

--- 518 unchanged lines hidden ---