Lines Matching +full:async +full:- +full:prefix

1 /* SPDX-License-Identifier: GPL-2.0-or-later */
14 #include <linux/tracepoint-defs.h>
62 #define FOLIO_PAGES_MAPPED (COMPOUND_MAPPED - 1)
76 return atomic_read(&folio->_nr_pages_mapped) & FOLIO_PAGES_MAPPED; in folio_nr_pages_mapped()
81 unsigned long mapping = (unsigned long)folio->mapping; in folio_raw_mapping()
87 * This is a file-backed mapping, and is about to be memory mapped - invoke its
108 vma->vm_ops = &vma_dummy_vm_ops; in mmap_file()
120 if (vma->vm_ops && vma->vm_ops->close) { in vma_close()
121 vma->vm_ops->close(vma); in vma_close()
127 vma->vm_ops = &vma_dummy_vm_ops; in vma_close()
136 int nr_throttled = atomic_read(&pgdat->nr_writeback_throttled); in acct_reclaim_writeback()
146 wqh = &pgdat->reclaim_wait[VMSCAN_THROTTLE_ISOLATED]; in wake_throttle_isolated()
174 DEFINE_READAHEAD(ractl, file, &file->f_ra, mapping, index); in force_page_cache_readahead()
191 * folio_evictable - Test whether a folio is evictable.
194 * Test whether @folio is evictable -- i.e., should be placed on
214 * Turn a non-refcounted page (->_refcount == 0) into refcounted with
225 * Return true if a folio needs ->release_folio() calling upon it.
260 #define K(x) ((x) << (PAGE_SHIFT-10))
309 * general, page_zone(page)->lock must be held by the caller to prevent the
311 * If a caller does not hold page_zone(page)->lock, it must guarantee that the
343 * Setting, clearing, and testing PageBuddy is serialized by zone->lock.
395 * function is used in the performance-critical __free_one_page().
411 buddy = page + (__buddy_pfn - pfn); in find_buddy_page_pfn()
426 if (zone->contiguous) in pageblock_pfn_to_page()
436 zone->contiguous = false; in clear_zone_contiguous()
448 * caller passes in a non-large folio.
455 folio->_flags_1 = (folio->_flags_1 & ~0xffUL) | order; in folio_set_order()
457 folio->_folio_nr_pages = 1U << order; in folio_set_order()
472 if (data_race(list_empty(&folio->_deferred_list))) in folio_unqueue_deferred_split()
491 atomic_set(&folio->_entire_mapcount, -1); in prep_compound_head()
492 atomic_set(&folio->_nr_pages_mapped, 0); in prep_compound_head()
493 atomic_set(&folio->_pincount, 0); in prep_compound_head()
495 INIT_LIST_HEAD(&folio->_deferred_list); in prep_compound_head()
502 p->mapping = TAIL_MAPPING; in prep_compound_tail()
568 enum migrate_mode mode; /* Async or sync migration mode */
613 return list_empty(&area->free_list[migratetype]); in free_area_empty()
621 * Executable code area - executable, not writable, not stack
640 * Data area - private, writable, not stack
677 * 1) VM_IO check prevents migration from double-counting during mlock. in mlock_vma_folio()
680 * file->f_op->mmap() is using vm_insert_page(s), when VM_LOCKED may in mlock_vma_folio()
683 if (unlikely((vma->vm_flags & (VM_LOCKED|VM_SPECIAL)) == VM_LOCKED) && in mlock_vma_folio()
692 if (unlikely(vma->vm_flags & VM_LOCKED) && in munlock_vma_folio()
714 if (pgoff >= vma->vm_pgoff) { in vma_pgoff_address()
715 address = vma->vm_start + in vma_pgoff_address()
716 ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); in vma_pgoff_address()
718 if (address < vma->vm_start || address >= vma->vm_end) in vma_pgoff_address()
719 address = -EFAULT; in vma_pgoff_address()
720 } else if (pgoff + nr_pages - 1 >= vma->vm_pgoff) { in vma_pgoff_address()
721 /* Test above avoids possibility of wrap to 0 on 32-bit */ in vma_pgoff_address()
722 address = vma->vm_start; in vma_pgoff_address()
724 address = -EFAULT; in vma_pgoff_address()
731 * Returns -EFAULT if all of the page is outside the range of vma.
737 VM_BUG_ON_PAGE(PageKsm(page), page); /* KSM page->index unusable */ in vma_address()
747 struct vm_area_struct *vma = pvmw->vma; in vma_address_end()
751 /* Common case, plus ->pgoff is invalid for KSM */ in vma_address_end()
752 if (pvmw->nr_pages == 1) in vma_address_end()
753 return pvmw->address + PAGE_SIZE; in vma_address_end()
755 pgoff = pvmw->pgoff + pvmw->nr_pages; in vma_address_end()
756 address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); in vma_address_end()
758 if (address < vma->vm_start || address > vma->vm_end) in vma_address_end()
759 address = vma->vm_end; in vma_address_end()
766 int flags = vmf->flags; in maybe_unlock_mmap_for_io()
778 fpin = get_file(vmf->vma->vm_file); in maybe_unlock_mmap_for_io()
811 #define mminit_dprintk(level, prefix, fmt, arg...) \ argument
815 pr_warn("mminit::" prefix " " fmt, ##arg); \
817 printk(KERN_DEBUG "mminit::" prefix " " fmt, ##arg); \
826 const char *prefix, const char *fmt, ...) in mminit_dprintk() argument
839 #define NODE_RECLAIM_NOSCAN -2
840 #define NODE_RECLAIM_FULL -1
860 * mm/memory-failure.c
879 /* The ALLOC_WMARK bits are used as an index to zone->watermark */
886 #define ALLOC_WMARK_MASK (ALLOC_NO_WATERMARKS-1)
889 * Only MMU archs have async oom victim reclaim - aka oom_reaper so we
990 return -EINVAL; in vmap_pages_range_noflush()
1026 /* we are working on non-current tsk/mm */
1030 /* gup_fast: prevent fall-back to slow gup */
1043 * Indicates for which pages that are write-protected in the page table,
1051 * * GUP-fast and fork(): mm->write_protect_seq
1052 * * GUP-fast and KSM or temporary unmapping (swap, migration): see
1057 * PTE-mapped THP.
1059 * If the vma is NULL, we're coming from the GUP-fast path and might have
1067 * has to be writable -- and if it references (part of) an anonymous in gup_must_unshare()
1078 * We only care about R/O long-term pining: R/O short-term in gup_must_unshare()
1093 return is_cow_mapping(vma->vm_flags); in gup_must_unshare()
1101 * During GUP-fast we might not get called on the head page for a in gup_must_unshare()
1102 * hugetlb page that is mapped using cont-PTE, because GUP-fast does in gup_must_unshare()
1105 * page (as it cannot be partially COW-shared), so lookup the head page. in gup_must_unshare()
1123 * NOTE: we must check this before VM_SOFTDIRTY on soft-dirty in vma_soft_dirty_enabled()
1124 * enablements, because when without soft-dirty being compiled in, in vma_soft_dirty_enabled()
1132 * Soft-dirty is kind of special: its tracking is enabled when the in vma_soft_dirty_enabled()
1135 return !(vma->vm_flags & VM_SOFTDIRTY); in vma_soft_dirty_enabled()
1141 MAS_BUG_ON(&vmi->mas, vmi->mas.node != MAS_START && in vma_iter_config()
1142 (vmi->mas.index > index || vmi->mas.last < index)); in vma_iter_config()
1143 __mas_set_range(&vmi->mas, index, last - 1); in vma_iter_config()
1152 return mas_preallocate(&vmi->mas, vma, GFP_KERNEL); in vma_iter_prealloc()
1157 mas_store_prealloc(&vmi->mas, NULL); in vma_iter_clear()
1163 __mas_set_range(&vmi->mas, start, end - 1); in vma_iter_clear_gfp()
1164 mas_store_gfp(&vmi->mas, NULL, gfp); in vma_iter_clear_gfp()
1165 if (unlikely(mas_is_err(&vmi->mas))) in vma_iter_clear_gfp()
1166 return -ENOMEM; in vma_iter_clear_gfp()
1173 return mas_walk(&vmi->mas); in vma_iter_load()
1182 if (MAS_WARN_ON(&vmi->mas, vmi->mas.node != MAS_START && in vma_iter_store()
1183 vmi->mas.index > vma->vm_start)) { in vma_iter_store()
1184 pr_warn("%lx > %lx\n store vma %lx-%lx\n into slot %lx-%lx\n", in vma_iter_store()
1185 vmi->mas.index, vma->vm_start, vma->vm_start, in vma_iter_store()
1186 vma->vm_end, vmi->mas.index, vmi->mas.last); in vma_iter_store()
1188 if (MAS_WARN_ON(&vmi->mas, vmi->mas.node != MAS_START && in vma_iter_store()
1189 vmi->mas.last < vma->vm_start)) { in vma_iter_store()
1190 pr_warn("%lx < %lx\nstore vma %lx-%lx\ninto slot %lx-%lx\n", in vma_iter_store()
1191 vmi->mas.last, vma->vm_start, vma->vm_start, vma->vm_end, in vma_iter_store()
1192 vmi->mas.index, vmi->mas.last); in vma_iter_store()
1196 if (vmi->mas.node != MAS_START && in vma_iter_store()
1197 ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start))) in vma_iter_store()
1200 __mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1); in vma_iter_store()
1201 mas_store_prealloc(&vmi->mas, vma); in vma_iter_store()
1207 if (vmi->mas.node != MAS_START && in vma_iter_store_gfp()
1208 ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start))) in vma_iter_store_gfp()
1211 __mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1); in vma_iter_store_gfp()
1212 mas_store_gfp(&vmi->mas, vma, gfp); in vma_iter_store_gfp()
1213 if (unlikely(mas_is_err(&vmi->mas))) in vma_iter_store_gfp()
1214 return -ENOMEM; in vma_iter_store_gfp()