1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Copyright (c) 2000-2005 Silicon Graphics, Inc. 4 * All Rights Reserved. 5 */ 6 #ifndef __XFS_SUPPORT_KMEM_H__ 7 #define __XFS_SUPPORT_KMEM_H__ 8 9 #include <linux/slab.h> 10 #include <linux/sched.h> 11 #include <linux/mm.h> 12 #include <linux/vmalloc.h> 13 14 /* 15 * General memory allocation interfaces 16 */ 17 18 typedef unsigned __bitwise xfs_km_flags_t; 19 #define KM_NOFS ((__force xfs_km_flags_t)0x0004u) 20 #define KM_MAYFAIL ((__force xfs_km_flags_t)0x0008u) 21 #define KM_ZERO ((__force xfs_km_flags_t)0x0010u) 22 #define KM_NOLOCKDEP ((__force xfs_km_flags_t)0x0020u) 23 24 /* 25 * We use a special process flag to avoid recursive callbacks into 26 * the filesystem during transactions. We will also issue our own 27 * warnings, so we explicitly skip any generic ones (silly of us). 28 */ 29 static inline gfp_t 30 kmem_flags_convert(xfs_km_flags_t flags) 31 { 32 gfp_t lflags; 33 34 BUG_ON(flags & ~(KM_NOFS | KM_MAYFAIL | KM_ZERO | KM_NOLOCKDEP)); 35 36 lflags = GFP_KERNEL | __GFP_NOWARN; 37 if (flags & KM_NOFS) 38 lflags &= ~__GFP_FS; 39 40 /* 41 * Default page/slab allocator behavior is to retry for ever 42 * for small allocations. We can override this behavior by using 43 * __GFP_RETRY_MAYFAIL which will tell the allocator to retry as long 44 * as it is feasible but rather fail than retry forever for all 45 * request sizes. 46 */ 47 if (flags & KM_MAYFAIL) 48 lflags |= __GFP_RETRY_MAYFAIL; 49 50 if (flags & KM_ZERO) 51 lflags |= __GFP_ZERO; 52 53 if (flags & KM_NOLOCKDEP) 54 lflags |= __GFP_NOLOCKDEP; 55 56 return lflags; 57 } 58 59 extern void *kmem_alloc(size_t, xfs_km_flags_t); 60 extern void *kmem_alloc_io(size_t size, int align_mask, xfs_km_flags_t flags); 61 extern void *kmem_alloc_large(size_t size, xfs_km_flags_t); 62 extern void *kmem_realloc(const void *, size_t, xfs_km_flags_t); 63 static inline void kmem_free(const void *ptr) 64 { 65 kvfree(ptr); 66 } 67 68 69 static inline void * 70 kmem_zalloc(size_t size, xfs_km_flags_t flags) 71 { 72 return kmem_alloc(size, flags | KM_ZERO); 73 } 74 75 static inline void * 76 kmem_zalloc_large(size_t size, xfs_km_flags_t flags) 77 { 78 return kmem_alloc_large(size, flags | KM_ZERO); 79 } 80 81 /* 82 * Zone interfaces 83 */ 84 85 #define kmem_zone kmem_cache 86 #define kmem_zone_t struct kmem_cache 87 88 extern void *kmem_zone_alloc(kmem_zone_t *, xfs_km_flags_t); 89 90 static inline void * 91 kmem_zone_zalloc(kmem_zone_t *zone, xfs_km_flags_t flags) 92 { 93 return kmem_zone_alloc(zone, flags | KM_ZERO); 94 } 95 96 static inline struct page * 97 kmem_to_page(void *addr) 98 { 99 if (is_vmalloc_addr(addr)) 100 return vmalloc_to_page(addr); 101 return virt_to_page(addr); 102 } 103 104 #endif /* __XFS_SUPPORT_KMEM_H__ */ 105