1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2005 Silicon Graphics, Inc. 4 * All Rights Reserved. 5 */ 6 #ifndef __XFS_SUPPORT_KMEM_H__ 7 #define __XFS_SUPPORT_KMEM_H__ 8 9 #include <linux/slab.h> 10 #include <linux/sched.h> 11 #include <linux/mm.h> 12 #include <linux/vmalloc.h> 13 14 /* 15 * General memory allocation interfaces 16 */ 17 18 typedef unsigned __bitwise xfs_km_flags_t; 19 #define KM_SLEEP ((__force xfs_km_flags_t)0x0001u) 20 #define KM_NOSLEEP ((__force xfs_km_flags_t)0x0002u) 21 #define KM_NOFS ((__force xfs_km_flags_t)0x0004u) 22 #define KM_MAYFAIL ((__force xfs_km_flags_t)0x0008u) 23 #define KM_ZERO ((__force xfs_km_flags_t)0x0010u) 24 25 /* 26 * We use a special process flag to avoid recursive callbacks into 27 * the filesystem during transactions. We will also issue our own 28 * warnings, so we explicitly skip any generic ones (silly of us). 29 */ 30 static inline gfp_t 31 kmem_flags_convert(xfs_km_flags_t flags) 32 { 33 gfp_t lflags; 34 35 BUG_ON(flags & ~(KM_SLEEP|KM_NOSLEEP|KM_NOFS|KM_MAYFAIL|KM_ZERO)); 36 37 if (flags & KM_NOSLEEP) { 38 lflags = GFP_ATOMIC | __GFP_NOWARN; 39 } else { 40 lflags = GFP_KERNEL | __GFP_NOWARN; 41 if (flags & KM_NOFS) 42 lflags &= ~__GFP_FS; 43 } 44 45 /* 46 * Default page/slab allocator behavior is to retry for ever 47 * for small allocations. We can override this behavior by using 48 * __GFP_RETRY_MAYFAIL which will tell the allocator to retry as long 49 * as it is feasible but rather fail than retry forever for all 50 * request sizes. 51 */ 52 if (flags & KM_MAYFAIL) 53 lflags |= __GFP_RETRY_MAYFAIL; 54 55 if (flags & KM_ZERO) 56 lflags |= __GFP_ZERO; 57 58 return lflags; 59 } 60 61 extern void *kmem_alloc(size_t, xfs_km_flags_t); 62 extern void *kmem_alloc_large(size_t size, xfs_km_flags_t); 63 extern void *kmem_realloc(const void *, size_t, xfs_km_flags_t); 64 static inline void kmem_free(const void *ptr) 65 { 66 kvfree(ptr); 67 } 68 69 70 static inline void * 71 kmem_zalloc(size_t size, xfs_km_flags_t flags) 72 { 73 return kmem_alloc(size, flags | KM_ZERO); 74 } 75 76 static inline void * 77 kmem_zalloc_large(size_t size, xfs_km_flags_t flags) 78 { 79 return kmem_alloc_large(size, flags | KM_ZERO); 80 } 81 82 /* 83 * Zone interfaces 84 */ 85 86 #define KM_ZONE_HWALIGN SLAB_HWCACHE_ALIGN 87 #define KM_ZONE_RECLAIM SLAB_RECLAIM_ACCOUNT 88 #define KM_ZONE_SPREAD SLAB_MEM_SPREAD 89 #define KM_ZONE_ACCOUNT SLAB_ACCOUNT 90 91 #define kmem_zone kmem_cache 92 #define kmem_zone_t struct kmem_cache 93 94 static inline kmem_zone_t * 95 kmem_zone_init(int size, char *zone_name) 96 { 97 return kmem_cache_create(zone_name, size, 0, 0, NULL); 98 } 99 100 static inline kmem_zone_t * 101 kmem_zone_init_flags(int size, char *zone_name, slab_flags_t flags, 102 void (*construct)(void *)) 103 { 104 return kmem_cache_create(zone_name, size, 0, flags, construct); 105 } 106 107 static inline void 108 kmem_zone_free(kmem_zone_t *zone, void *ptr) 109 { 110 kmem_cache_free(zone, ptr); 111 } 112 113 static inline void 114 kmem_zone_destroy(kmem_zone_t *zone) 115 { 116 kmem_cache_destroy(zone); 117 } 118 119 extern void *kmem_zone_alloc(kmem_zone_t *, xfs_km_flags_t); 120 121 static inline void * 122 kmem_zone_zalloc(kmem_zone_t *zone, xfs_km_flags_t flags) 123 { 124 return kmem_zone_alloc(zone, flags | KM_ZERO); 125 } 126 127 static inline struct page * 128 kmem_to_page(void *addr) 129 { 130 if (is_vmalloc_addr(addr)) 131 return vmalloc_to_page(addr); 132 return virt_to_page(addr); 133 } 134 135 #endif /* __XFS_SUPPORT_KMEM_H__ */ 136