memory.c (5169b844b7dd5934cd4f22ab66de0cc669abf0b0) | memory.c (5c041f5d1f23d3a172dd0db3215634c484b4acd6) |
---|---|
1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * linux/mm/memory.c 4 * 5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 6 */ 7 8/* --- 86 unchanged lines hidden (view full) --- 95#ifndef CONFIG_NUMA 96unsigned long max_mapnr; 97EXPORT_SYMBOL(max_mapnr); 98 99struct page *mem_map; 100EXPORT_SYMBOL(mem_map); 101#endif 102 | 1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * linux/mm/memory.c 4 * 5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 6 */ 7 8/* --- 86 unchanged lines hidden (view full) --- 95#ifndef CONFIG_NUMA 96unsigned long max_mapnr; 97EXPORT_SYMBOL(max_mapnr); 98 99struct page *mem_map; 100EXPORT_SYMBOL(mem_map); 101#endif 102 |
103static vm_fault_t do_fault(struct vm_fault *vmf); 104 |
|
103/* 104 * A number of key systems in x86 including ioremap() rely on the assumption 105 * that high_memory defines the upper bound on direct map memory, then end 106 * of ZONE_NORMAL. Under CONFIG_DISCONTIG this means that max_low_pfn and 107 * highstart_pfn must be the same; there must be no gap between ZONE_NORMAL 108 * and ZONE_HIGHMEM. 109 */ 110void *high_memory; --- 1299 unchanged lines hidden (view full) --- 1410 rss[MM_SWAPENTS]--; 1411 if (unlikely(!free_swap_and_cache(entry))) 1412 print_bad_pte(vma, addr, ptent, NULL); 1413 } else if (is_migration_entry(entry)) { 1414 page = pfn_swap_entry_to_page(entry); 1415 if (!should_zap_page(details, page)) 1416 continue; 1417 rss[mm_counter(page)]--; | 105/* 106 * A number of key systems in x86 including ioremap() rely on the assumption 107 * that high_memory defines the upper bound on direct map memory, then end 108 * of ZONE_NORMAL. Under CONFIG_DISCONTIG this means that max_low_pfn and 109 * highstart_pfn must be the same; there must be no gap between ZONE_NORMAL 110 * and ZONE_HIGHMEM. 111 */ 112void *high_memory; --- 1299 unchanged lines hidden (view full) --- 1412 rss[MM_SWAPENTS]--; 1413 if (unlikely(!free_swap_and_cache(entry))) 1414 print_bad_pte(vma, addr, ptent, NULL); 1415 } else if (is_migration_entry(entry)) { 1416 page = pfn_swap_entry_to_page(entry); 1417 if (!should_zap_page(details, page)) 1418 continue; 1419 rss[mm_counter(page)]--; |
1420 } else if (is_pte_marker_entry(entry)) { 1421 /* By default, simply drop all pte markers when zap */ |
|
1418 } else if (is_hwpoison_entry(entry)) { 1419 if (!should_zap_cows(details)) 1420 continue; 1421 } else { 1422 /* We should have covered all the swap entry types */ 1423 WARN_ON_ONCE(1); 1424 } 1425 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm); --- 2124 unchanged lines hidden (view full) --- 3550 * have to detect via the refcount if we're really the exclusive 3551 * user. Try freeing the swapcache to get rid of the swapcache 3552 * reference only in case it's likely that we'll be the exlusive user. 3553 */ 3554 return (fault_flags & FAULT_FLAG_WRITE) && !PageKsm(page) && 3555 page_count(page) == 2; 3556} 3557 | 1422 } else if (is_hwpoison_entry(entry)) { 1423 if (!should_zap_cows(details)) 1424 continue; 1425 } else { 1426 /* We should have covered all the swap entry types */ 1427 WARN_ON_ONCE(1); 1428 } 1429 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm); --- 2124 unchanged lines hidden (view full) --- 3554 * have to detect via the refcount if we're really the exclusive 3555 * user. Try freeing the swapcache to get rid of the swapcache 3556 * reference only in case it's likely that we'll be the exlusive user. 3557 */ 3558 return (fault_flags & FAULT_FLAG_WRITE) && !PageKsm(page) && 3559 page_count(page) == 2; 3560} 3561 |
3562static vm_fault_t handle_pte_marker(struct vm_fault *vmf) 3563{ 3564 swp_entry_t entry = pte_to_swp_entry(vmf->orig_pte); 3565 unsigned long marker = pte_marker_get(entry); 3566 3567 /* 3568 * PTE markers should always be with file-backed memories, and the 3569 * marker should never be empty. If anything weird happened, the best 3570 * thing to do is to kill the process along with its mm. 3571 */ 3572 if (WARN_ON_ONCE(vma_is_anonymous(vmf->vma) || !marker)) 3573 return VM_FAULT_SIGBUS; 3574 3575 /* TODO: handle pte markers */ 3576 return 0; 3577} 3578 |
|
3558/* 3559 * We enter with non-exclusive mmap_lock (to exclude vma changes, 3560 * but allow concurrent faults), and pte mapped but not yet locked. 3561 * We return with pte unmapped and unlocked. 3562 * 3563 * We return with the mmap_lock locked or unlocked in the same cases 3564 * as does filemap_fault(). 3565 */ --- 21 unchanged lines hidden (view full) --- 3587 } else if (is_device_exclusive_entry(entry)) { 3588 vmf->page = pfn_swap_entry_to_page(entry); 3589 ret = remove_device_exclusive_entry(vmf); 3590 } else if (is_device_private_entry(entry)) { 3591 vmf->page = pfn_swap_entry_to_page(entry); 3592 ret = vmf->page->pgmap->ops->migrate_to_ram(vmf); 3593 } else if (is_hwpoison_entry(entry)) { 3594 ret = VM_FAULT_HWPOISON; | 3579/* 3580 * We enter with non-exclusive mmap_lock (to exclude vma changes, 3581 * but allow concurrent faults), and pte mapped but not yet locked. 3582 * We return with pte unmapped and unlocked. 3583 * 3584 * We return with the mmap_lock locked or unlocked in the same cases 3585 * as does filemap_fault(). 3586 */ --- 21 unchanged lines hidden (view full) --- 3608 } else if (is_device_exclusive_entry(entry)) { 3609 vmf->page = pfn_swap_entry_to_page(entry); 3610 ret = remove_device_exclusive_entry(vmf); 3611 } else if (is_device_private_entry(entry)) { 3612 vmf->page = pfn_swap_entry_to_page(entry); 3613 ret = vmf->page->pgmap->ops->migrate_to_ram(vmf); 3614 } else if (is_hwpoison_entry(entry)) { 3615 ret = VM_FAULT_HWPOISON; |
3616 } else if (is_pte_marker_entry(entry)) { 3617 ret = handle_pte_marker(vmf); |
|
3595 } else { 3596 print_bad_pte(vma, vmf->address, vmf->orig_pte, NULL); 3597 ret = VM_FAULT_SIGBUS; 3598 } 3599 goto out; 3600 } 3601 3602 /* Prevent swapoff from happening to us. */ --- 2001 unchanged lines hidden --- | 3618 } else { 3619 print_bad_pte(vma, vmf->address, vmf->orig_pte, NULL); 3620 ret = VM_FAULT_SIGBUS; 3621 } 3622 goto out; 3623 } 3624 3625 /* Prevent swapoff from happening to us. */ --- 2001 unchanged lines hidden --- |