Lines Matching defs:gfp_mask
2823 noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
2825 return __should_fail_alloc_page(gfp_mask, order);
2944 unsigned int alloc_flags, gfp_t gfp_mask)
3023 alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask)
3031 alloc_flags = (__force int) (gfp_mask & __GFP_KSWAPD_RECLAIM);
3054 /* Must be called after current_gfp_context() which can change gfp_mask */
3055 static inline unsigned int gfp_to_alloc_flags_cma(gfp_t gfp_mask,
3059 if (gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE)
3070 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
3093 !__cpuset_zone_allowed(zone, gfp_mask))
3145 gfp_mask)) {
3170 ret = node_reclaim(zone->zone_pgdat, gfp_mask, order);
3190 gfp_mask, alloc_flags, ac->migratetype);
3192 prep_new_page(page, order, gfp_mask, alloc_flags);
3228 static void warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask)
3237 if (!(gfp_mask & __GFP_NOMEMALLOC))
3241 if (!in_task() || !(gfp_mask & __GFP_DIRECT_RECLAIM))
3244 __show_mem(filter, nodemask, gfp_zone(gfp_mask));
3247 void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...)
3253 if ((gfp_mask & __GFP_NOWARN) ||
3255 ((gfp_mask & __GFP_DMA) && !has_managed_dma()))
3262 current->comm, &vaf, gfp_mask, &gfp_mask,
3269 warn_alloc_show_mem(gfp_mask, nodemask);
3273 __alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order,
3279 page = get_page_from_freelist(gfp_mask, order,
3286 page = get_page_from_freelist(gfp_mask, order,
3293 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
3300 .gfp_mask = gfp_mask,
3324 page = get_page_from_freelist((gfp_mask | __GFP_HARDWALL) &
3344 if (gfp_mask & (__GFP_RETRY_MAYFAIL | __GFP_THISNODE))
3363 WARN_ON_ONCE_GFP(gfp_mask & __GFP_NOFAIL, gfp_mask)) {
3370 if (gfp_mask & __GFP_NOFAIL)
3371 page = __alloc_pages_cpuset_fallback(gfp_mask, order,
3388 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
3403 *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac,
3420 prep_new_page(page, order, gfp_mask, alloc_flags);
3424 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
3513 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
3553 static bool __need_reclaim(gfp_t gfp_mask)
3556 if (!(gfp_mask & __GFP_DIRECT_RECLAIM))
3563 if (gfp_mask & __GFP_NOLOCKDEP)
3579 void fs_reclaim_acquire(gfp_t gfp_mask)
3581 gfp_mask = current_gfp_context(gfp_mask);
3583 if (__need_reclaim(gfp_mask)) {
3584 if (gfp_mask & __GFP_FS)
3596 void fs_reclaim_release(gfp_t gfp_mask)
3598 gfp_mask = current_gfp_context(gfp_mask);
3600 if (__need_reclaim(gfp_mask)) {
3601 if (gfp_mask & __GFP_FS)
3634 __perform_reclaim(gfp_t gfp_mask, unsigned int order,
3644 fs_reclaim_acquire(gfp_mask);
3647 progress = try_to_free_pages(ac->zonelist, order, gfp_mask,
3651 fs_reclaim_release(gfp_mask);
3660 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
3669 *did_some_progress = __perform_reclaim(gfp_mask, order, ac);
3674 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
3693 static void wake_all_kswapds(unsigned int order, gfp_t gfp_mask,
3706 wakeup_kswapd(zone, gfp_mask, order, highest_zoneidx);
3713 gfp_to_alloc_flags(gfp_t gfp_mask, unsigned int order)
3732 (gfp_mask & (__GFP_HIGH | __GFP_KSWAPD_RECLAIM));
3734 if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) {
3739 if (!(gfp_mask & __GFP_NOMEMALLOC)) {
3756 alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, alloc_flags);
3780 static inline int __gfp_pfmemalloc_flags(gfp_t gfp_mask)
3782 if (unlikely(gfp_mask & __GFP_NOMEMALLOC))
3784 if (gfp_mask & __GFP_MEMALLOC)
3798 bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)
3800 return !!__gfp_pfmemalloc_flags(gfp_mask);
3814 should_reclaim_retry(gfp_t gfp_mask, unsigned order,
3919 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
3922 bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM;
3923 bool can_compact = gfp_compaction_allowed(gfp_mask);
3949 alloc_flags = gfp_to_alloc_flags(gfp_mask, order);
3967 if (cpusets_insane_config() && (gfp_mask & __GFP_HARDWALL)) {
3976 wake_all_kswapds(order, gfp_mask, ac);
3982 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
3998 && !gfp_pfmemalloc_allowed(gfp_mask)) {
3999 page = __alloc_pages_direct_compact(gfp_mask, order,
4010 if (costly_order && (gfp_mask & __GFP_NORETRY)) {
4044 wake_all_kswapds(order, gfp_mask, ac);
4046 reserve_flags = __gfp_pfmemalloc_flags(gfp_mask);
4048 alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, reserve_flags) |
4063 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
4076 page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac,
4082 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac,
4088 if (gfp_mask & __GFP_NORETRY)
4096 !(gfp_mask & __GFP_RETRY_MAYFAIL)))
4099 if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags,
4125 page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress);
4132 (gfp_mask & __GFP_NOMEMALLOC)))
4154 if (gfp_mask & __GFP_NOFAIL) {
4159 if (WARN_ON_ONCE_GFP(!can_direct_reclaim, gfp_mask))
4167 WARN_ON_ONCE_GFP(current->flags & PF_MEMALLOC, gfp_mask);
4175 WARN_ON_ONCE_GFP(costly_order, gfp_mask);
4184 page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_MIN_RESERVE, ac);
4192 warn_alloc(gfp_mask, ac->nodemask,
4198 static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
4203 ac->highest_zoneidx = gfp_zone(gfp_mask);
4204 ac->zonelist = node_zonelist(preferred_nid, gfp_mask);
4206 ac->migratetype = gfp_migratetype(gfp_mask);
4220 might_alloc(gfp_mask);
4222 if (should_fail_alloc_page(gfp_mask, order))
4225 *alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, *alloc_flags);
4228 ac->spread_dirty_pages = (gfp_mask & __GFP_WRITE);
4493 unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
4497 page = alloc_pages(gfp_mask & ~__GFP_HIGHMEM, order);
4504 unsigned long get_zeroed_page(gfp_t gfp_mask)
4506 return __get_free_page(gfp_mask | __GFP_ZERO);
4565 gfp_t gfp_mask)
4568 gfp_t gfp = gfp_mask;
4571 gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY |
4573 page = alloc_pages_node(NUMA_NO_NODE, gfp_mask,
4595 unsigned int fragsz, gfp_t gfp_mask,
4604 page = __page_frag_cache_refill(nc, gfp_mask);
4702 * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP
4714 void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
4719 if (WARN_ON_ONCE(gfp_mask & (__GFP_COMP | __GFP_HIGHMEM)))
4720 gfp_mask &= ~(__GFP_COMP | __GFP_HIGHMEM);
4722 addr = __get_free_pages(gfp_mask, order);
4732 * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP
4739 void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask)
4744 if (WARN_ON_ONCE(gfp_mask & (__GFP_COMP | __GFP_HIGHMEM)))
4745 gfp_mask &= ~(__GFP_COMP | __GFP_HIGHMEM);
4747 p = alloc_pages_node(nid, gfp_mask, order);
6060 .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
6100 if (!(cc->gfp_mask & __GFP_NOWARN) && ret == -EBUSY)
6116 * @gfp_mask: GFP mask to use during compaction
6130 unsigned migratetype, gfp_t gfp_mask)
6143 .gfp_mask = current_gfp_context(gfp_mask),
6169 ret = start_isolate_page_range(start, end, migratetype, 0, gfp_mask);
6256 unsigned long nr_pages, gfp_t gfp_mask)
6261 gfp_mask);
6298 * @gfp_mask: GFP mask to limit search and used during compaction
6316 struct page *alloc_contig_pages(unsigned long nr_pages, gfp_t gfp_mask,
6324 zonelist = node_zonelist(nid, gfp_mask);
6326 gfp_zone(gfp_mask), nodemask) {
6341 gfp_mask);