Lines Matching refs:gfp_t

205 gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
1452 static inline bool should_skip_kasan_unpoison(gfp_t flags) in should_skip_kasan_unpoison()
1470 static inline bool should_skip_init(gfp_t flags) in should_skip_init()
1481 gfp_t gfp_flags) in post_alloc_hook()
1540 static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags, in prep_new_page()
2764 gfp_t gfp_flags, unsigned int alloc_flags, in rmqueue()
2797 noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) in should_fail_alloc_page()
2921 unsigned int alloc_flags, gfp_t gfp_mask) in zone_watermark_fast()
3000 alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask) in alloc_flags_nofragment()
3032 static inline unsigned int gfp_to_alloc_flags_cma(gfp_t gfp_mask, in gfp_to_alloc_flags_cma()
3047 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, in get_page_from_freelist()
3207 static void warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask) in warn_alloc_show_mem()
3226 void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...) in warn_alloc()
3252 __alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order, in __alloc_pages_cpuset_fallback()
3272 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, in __alloc_pages_may_oom()
3367 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, in __alloc_pages_direct_compact()
3492 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, in __alloc_pages_direct_compact()
3532 static bool __need_reclaim(gfp_t gfp_mask) in __need_reclaim()
3558 void fs_reclaim_acquire(gfp_t gfp_mask) in fs_reclaim_acquire()
3575 void fs_reclaim_release(gfp_t gfp_mask) in fs_reclaim_release()
3613 __perform_reclaim(gfp_t gfp_mask, unsigned int order, in __perform_reclaim()
3639 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, in __alloc_pages_direct_reclaim()
3672 static void wake_all_kswapds(unsigned int order, gfp_t gfp_mask, in wake_all_kswapds()
3692 gfp_to_alloc_flags(gfp_t gfp_mask, unsigned int order) in gfp_to_alloc_flags()
3701 BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_MIN_RESERVE); in gfp_to_alloc_flags()
3702 BUILD_BUG_ON(__GFP_KSWAPD_RECLAIM != (__force gfp_t) ALLOC_KSWAPD); in gfp_to_alloc_flags()
3759 static inline int __gfp_pfmemalloc_flags(gfp_t gfp_mask) in __gfp_pfmemalloc_flags()
3777 bool gfp_pfmemalloc_allowed(gfp_t gfp_mask) in gfp_pfmemalloc_allowed()
3793 should_reclaim_retry(gfp_t gfp_mask, unsigned order, in should_reclaim_retry()
3898 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, in __alloc_pages_slowpath()
4176 static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order, in prepare_alloc_pages()
4178 struct alloc_context *ac, gfp_t *alloc_gfp, in prepare_alloc_pages()
4239 unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid, in __alloc_pages_bulk()
4251 gfp_t alloc_gfp; in __alloc_pages_bulk()
4391 struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid, in __alloc_pages()
4396 gfp_t alloc_gfp; /* The gfp_t that was actually used for allocation */ in __alloc_pages()
4456 struct folio *__folio_alloc(gfp_t gfp, unsigned int order, int preferred_nid, in __folio_alloc()
4474 unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order) in __get_free_pages()
4485 unsigned long get_zeroed_page(gfp_t gfp_mask) in get_zeroed_page()
4546 gfp_t gfp_mask) in __page_frag_cache_refill()
4549 gfp_t gfp = gfp_mask; in __page_frag_cache_refill()
4576 unsigned int fragsz, gfp_t gfp_mask, in page_frag_alloc_align()
4695 void *alloc_pages_exact(size_t size, gfp_t gfp_mask) in alloc_pages_exact()
4720 void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask) in alloc_pages_exact_nid()
6111 unsigned migratetype, gfp_t gfp_mask) in alloc_contig_range()
6237 unsigned long nr_pages, gfp_t gfp_mask) in __alloc_contig_pages()
6297 struct page *alloc_contig_pages(unsigned long nr_pages, gfp_t gfp_mask, in alloc_contig_pages()