1508578f2SNishad Kamdar /* SPDX-License-Identifier: GPL-2.0 */
2c59d87c4SChristoph Hellwig /*
3c59d87c4SChristoph Hellwig * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4c59d87c4SChristoph Hellwig * All Rights Reserved.
5c59d87c4SChristoph Hellwig */
6c59d87c4SChristoph Hellwig #ifndef __XFS_SUPPORT_KMEM_H__
7c59d87c4SChristoph Hellwig #define __XFS_SUPPORT_KMEM_H__
8c59d87c4SChristoph Hellwig
9c59d87c4SChristoph Hellwig #include <linux/slab.h>
10c59d87c4SChristoph Hellwig #include <linux/sched.h>
11c59d87c4SChristoph Hellwig #include <linux/mm.h>
12c59d87c4SChristoph Hellwig #include <linux/vmalloc.h>
13c59d87c4SChristoph Hellwig
14c59d87c4SChristoph Hellwig /*
15c59d87c4SChristoph Hellwig * General memory allocation interfaces
16c59d87c4SChristoph Hellwig */
17c59d87c4SChristoph Hellwig
1877ba7877SAl Viro typedef unsigned __bitwise xfs_km_flags_t;
1977ba7877SAl Viro #define KM_NOFS ((__force xfs_km_flags_t)0x0004u)
2077ba7877SAl Viro #define KM_MAYFAIL ((__force xfs_km_flags_t)0x0008u)
21359d992bSGu Zheng #define KM_ZERO ((__force xfs_km_flags_t)0x0010u)
22*6dcde60eSDarrick J. Wong #define KM_NOLOCKDEP ((__force xfs_km_flags_t)0x0020u)
23c59d87c4SChristoph Hellwig
24c59d87c4SChristoph Hellwig /*
25c59d87c4SChristoph Hellwig * We use a special process flag to avoid recursive callbacks into
26c59d87c4SChristoph Hellwig * the filesystem during transactions. We will also issue our own
27c59d87c4SChristoph Hellwig * warnings, so we explicitly skip any generic ones (silly of us).
28c59d87c4SChristoph Hellwig */
29c59d87c4SChristoph Hellwig static inline gfp_t
kmem_flags_convert(xfs_km_flags_t flags)3077ba7877SAl Viro kmem_flags_convert(xfs_km_flags_t flags)
31c59d87c4SChristoph Hellwig {
32c59d87c4SChristoph Hellwig gfp_t lflags;
33c59d87c4SChristoph Hellwig
34*6dcde60eSDarrick J. Wong BUG_ON(flags & ~(KM_NOFS | KM_MAYFAIL | KM_ZERO | KM_NOLOCKDEP));
35c59d87c4SChristoph Hellwig
36c59d87c4SChristoph Hellwig lflags = GFP_KERNEL | __GFP_NOWARN;
377dea19f9SMichal Hocko if (flags & KM_NOFS)
38c59d87c4SChristoph Hellwig lflags &= ~__GFP_FS;
39359d992bSGu Zheng
4091c63ecdSMichal Hocko /*
4191c63ecdSMichal Hocko * Default page/slab allocator behavior is to retry for ever
4291c63ecdSMichal Hocko * for small allocations. We can override this behavior by using
4391c63ecdSMichal Hocko * __GFP_RETRY_MAYFAIL which will tell the allocator to retry as long
4491c63ecdSMichal Hocko * as it is feasible but rather fail than retry forever for all
4591c63ecdSMichal Hocko * request sizes.
4691c63ecdSMichal Hocko */
4791c63ecdSMichal Hocko if (flags & KM_MAYFAIL)
4891c63ecdSMichal Hocko lflags |= __GFP_RETRY_MAYFAIL;
4991c63ecdSMichal Hocko
50359d992bSGu Zheng if (flags & KM_ZERO)
51359d992bSGu Zheng lflags |= __GFP_ZERO;
52359d992bSGu Zheng
53*6dcde60eSDarrick J. Wong if (flags & KM_NOLOCKDEP)
54*6dcde60eSDarrick J. Wong lflags |= __GFP_NOLOCKDEP;
55*6dcde60eSDarrick J. Wong
56c59d87c4SChristoph Hellwig return lflags;
57c59d87c4SChristoph Hellwig }
58c59d87c4SChristoph Hellwig
5977ba7877SAl Viro extern void *kmem_alloc(size_t, xfs_km_flags_t);
kmem_free(const void * ptr)60f3d21552SWang, Yalin static inline void kmem_free(const void *ptr)
61f3d21552SWang, Yalin {
62f3d21552SWang, Yalin kvfree(ptr);
63f3d21552SWang, Yalin }
64c59d87c4SChristoph Hellwig
65c59d87c4SChristoph Hellwig
66359d992bSGu Zheng static inline void *
kmem_zalloc(size_t size,xfs_km_flags_t flags)67359d992bSGu Zheng kmem_zalloc(size_t size, xfs_km_flags_t flags)
68359d992bSGu Zheng {
69359d992bSGu Zheng return kmem_alloc(size, flags | KM_ZERO);
70359d992bSGu Zheng }
71359d992bSGu Zheng
72c59d87c4SChristoph Hellwig /*
73c59d87c4SChristoph Hellwig * Zone interfaces
74c59d87c4SChristoph Hellwig */
7572945d86SChristoph Hellwig static inline struct page *
kmem_to_page(void * addr)7672945d86SChristoph Hellwig kmem_to_page(void *addr)
7772945d86SChristoph Hellwig {
7872945d86SChristoph Hellwig if (is_vmalloc_addr(addr))
7972945d86SChristoph Hellwig return vmalloc_to_page(addr);
8072945d86SChristoph Hellwig return virt_to_page(addr);
8172945d86SChristoph Hellwig }
8272945d86SChristoph Hellwig
83c59d87c4SChristoph Hellwig #endif /* __XFS_SUPPORT_KMEM_H__ */
84