memory.c (e1fd09e3d1dd4a1a8b3b33bc1fd647eee9f4e475) memory.c (ec1c86b25f4bdd9dce6436c0539d2a6ae676e1c4)
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/mm/memory.c
4 *
5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
6 */
7
8/*

--- 5103 unchanged lines hidden (view full) ---

5112 return;
5113
5114 if (major)
5115 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
5116 else
5117 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
5118}
5119
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/mm/memory.c
4 *
5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
6 */
7
8/*

--- 5103 unchanged lines hidden (view full) ---

5112 return;
5113
5114 if (major)
5115 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
5116 else
5117 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
5118}
5119
5120#ifdef CONFIG_LRU_GEN
5121static void lru_gen_enter_fault(struct vm_area_struct *vma)
5122{
5123 /* the LRU algorithm doesn't apply to sequential or random reads */
5124 current->in_lru_fault = !(vma->vm_flags & (VM_SEQ_READ | VM_RAND_READ));
5125}
5126
5127static void lru_gen_exit_fault(void)
5128{
5129 current->in_lru_fault = false;
5130}
5131#else
5132static void lru_gen_enter_fault(struct vm_area_struct *vma)
5133{
5134}
5135
5136static void lru_gen_exit_fault(void)
5137{
5138}
5139#endif /* CONFIG_LRU_GEN */
5140
5120/*
5121 * By the time we get here, we already hold the mm semaphore
5122 *
5123 * The mmap_lock may have been released depending on flags and our
5124 * return value. See filemap_fault() and __folio_lock_or_retry().
5125 */
5126vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
5127 unsigned int flags, struct pt_regs *regs)

--- 15 unchanged lines hidden (view full) ---

5143
5144 /*
5145 * Enable the memcg OOM handling for faults triggered in user
5146 * space. Kernel faults are handled more gracefully.
5147 */
5148 if (flags & FAULT_FLAG_USER)
5149 mem_cgroup_enter_user_fault();
5150
5141/*
5142 * By the time we get here, we already hold the mm semaphore
5143 *
5144 * The mmap_lock may have been released depending on flags and our
5145 * return value. See filemap_fault() and __folio_lock_or_retry().
5146 */
5147vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
5148 unsigned int flags, struct pt_regs *regs)

--- 15 unchanged lines hidden (view full) ---

5164
5165 /*
5166 * Enable the memcg OOM handling for faults triggered in user
5167 * space. Kernel faults are handled more gracefully.
5168 */
5169 if (flags & FAULT_FLAG_USER)
5170 mem_cgroup_enter_user_fault();
5171
5172 lru_gen_enter_fault(vma);
5173
5151 if (unlikely(is_vm_hugetlb_page(vma)))
5152 ret = hugetlb_fault(vma->vm_mm, vma, address, flags);
5153 else
5154 ret = __handle_mm_fault(vma, address, flags);
5155
5174 if (unlikely(is_vm_hugetlb_page(vma)))
5175 ret = hugetlb_fault(vma->vm_mm, vma, address, flags);
5176 else
5177 ret = __handle_mm_fault(vma, address, flags);
5178
5179 lru_gen_exit_fault();
5180
5156 if (flags & FAULT_FLAG_USER) {
5157 mem_cgroup_exit_user_fault();
5158 /*
5159 * The task may have entered a memcg OOM situation but
5160 * if the allocation error was handled gracefully (no
5161 * VM_FAULT_OOM), there is no need to kill anything.
5162 * Just clean up the OOM state peacefully.
5163 */

--- 625 unchanged lines hidden ---
5181 if (flags & FAULT_FLAG_USER) {
5182 mem_cgroup_exit_user_fault();
5183 /*
5184 * The task may have entered a memcg OOM situation but
5185 * if the allocation error was handled gracefully (no
5186 * VM_FAULT_OOM), there is no need to kill anything.
5187 * Just clean up the OOM state peacefully.
5188 */

--- 625 unchanged lines hidden ---