xref: /openbmc/qemu/util/mmap-alloc.c (revision 8dbe22c6)
1 /*
2  * Support for RAM backed by mmaped host memory.
3  *
4  * Copyright (c) 2015 Red Hat, Inc.
5  *
6  * Authors:
7  *  Michael S. Tsirkin <mst@redhat.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2 or
10  * later.  See the COPYING file in the top-level directory.
11  */
12 
13 #ifdef CONFIG_LINUX
14 #include <linux/mman.h>
15 #else  /* !CONFIG_LINUX */
16 #define MAP_SYNC              0x0
17 #define MAP_SHARED_VALIDATE   0x0
18 #endif /* CONFIG_LINUX */
19 
20 #include "qemu/osdep.h"
21 #include "qemu/mmap-alloc.h"
22 #include "qemu/host-utils.h"
23 #include "qemu/error-report.h"
24 
25 #define HUGETLBFS_MAGIC       0x958458f6
26 
27 #ifdef CONFIG_LINUX
28 #include <sys/vfs.h>
29 #endif
30 
31 size_t qemu_fd_getpagesize(int fd)
32 {
33 #ifdef CONFIG_LINUX
34     struct statfs fs;
35     int ret;
36 
37     if (fd != -1) {
38         do {
39             ret = fstatfs(fd, &fs);
40         } while (ret != 0 && errno == EINTR);
41 
42         if (ret == 0 && fs.f_type == HUGETLBFS_MAGIC) {
43             return fs.f_bsize;
44         }
45     }
46 #ifdef __sparc__
47     /* SPARC Linux needs greater alignment than the pagesize */
48     return QEMU_VMALLOC_ALIGN;
49 #endif
50 #endif
51 
52     return qemu_real_host_page_size;
53 }
54 
55 size_t qemu_mempath_getpagesize(const char *mem_path)
56 {
57 #ifdef CONFIG_LINUX
58     struct statfs fs;
59     int ret;
60 
61     if (mem_path) {
62         do {
63             ret = statfs(mem_path, &fs);
64         } while (ret != 0 && errno == EINTR);
65 
66         if (ret != 0) {
67             fprintf(stderr, "Couldn't statfs() memory path: %s\n",
68                     strerror(errno));
69             exit(1);
70         }
71 
72         if (fs.f_type == HUGETLBFS_MAGIC) {
73             /* It's hugepage, return the huge page size */
74             return fs.f_bsize;
75         }
76     }
77 #ifdef __sparc__
78     /* SPARC Linux needs greater alignment than the pagesize */
79     return QEMU_VMALLOC_ALIGN;
80 #endif
81 #endif
82 
83     return qemu_real_host_page_size;
84 }
85 
86 /*
87  * Reserve a new memory region of the requested size to be used for mapping
88  * from the given fd (if any).
89  */
90 static void *mmap_reserve(size_t size, int fd)
91 {
92     int flags = MAP_PRIVATE;
93 
94 #if defined(__powerpc64__) && defined(__linux__)
95     /*
96      * On ppc64 mappings in the same segment (aka slice) must share the same
97      * page size. Since we will be re-allocating part of this segment
98      * from the supplied fd, we should make sure to use the same page size, to
99      * this end we mmap the supplied fd.  In this case, set MAP_NORESERVE to
100      * avoid allocating backing store memory.
101      * We do this unless we are using the system page size, in which case
102      * anonymous memory is OK.
103      */
104     if (fd == -1 || qemu_fd_getpagesize(fd) == qemu_real_host_page_size) {
105         fd = -1;
106         flags |= MAP_ANONYMOUS;
107     } else {
108         flags |= MAP_NORESERVE;
109     }
110 #else
111     fd = -1;
112     flags |= MAP_ANONYMOUS;
113 #endif
114 
115     return mmap(0, size, PROT_NONE, flags, fd, 0);
116 }
117 
118 /*
119  * Activate memory in a reserved region from the given fd (if any), to make
120  * it accessible.
121  */
122 static void *mmap_activate(void *ptr, size_t size, int fd,
123                            uint32_t qemu_map_flags, off_t map_offset)
124 {
125     const bool noreserve = qemu_map_flags & QEMU_MAP_NORESERVE;
126     const bool readonly = qemu_map_flags & QEMU_MAP_READONLY;
127     const bool shared = qemu_map_flags & QEMU_MAP_SHARED;
128     const bool sync = qemu_map_flags & QEMU_MAP_SYNC;
129     const int prot = PROT_READ | (readonly ? 0 : PROT_WRITE);
130     int map_sync_flags = 0;
131     int flags = MAP_FIXED;
132     void *activated_ptr;
133 
134     if (noreserve) {
135         error_report("Skipping reservation of swap space is not supported");
136         return MAP_FAILED;
137     }
138 
139     flags |= fd == -1 ? MAP_ANONYMOUS : 0;
140     flags |= shared ? MAP_SHARED : MAP_PRIVATE;
141     if (shared && sync) {
142         map_sync_flags = MAP_SYNC | MAP_SHARED_VALIDATE;
143     }
144 
145     activated_ptr = mmap(ptr, size, prot, flags | map_sync_flags, fd,
146                          map_offset);
147     if (activated_ptr == MAP_FAILED && map_sync_flags) {
148         if (errno == ENOTSUP) {
149             char *proc_link = g_strdup_printf("/proc/self/fd/%d", fd);
150             char *file_name = g_malloc0(PATH_MAX);
151             int len = readlink(proc_link, file_name, PATH_MAX - 1);
152 
153             if (len < 0) {
154                 len = 0;
155             }
156             file_name[len] = '\0';
157             fprintf(stderr, "Warning: requesting persistence across crashes "
158                     "for backend file %s failed. Proceeding without "
159                     "persistence, data might become corrupted in case of host "
160                     "crash.\n", file_name);
161             g_free(proc_link);
162             g_free(file_name);
163         }
164         /*
165          * If mmap failed with MAP_SHARED_VALIDATE | MAP_SYNC, we will try
166          * again without these flags to handle backwards compatibility.
167          */
168         activated_ptr = mmap(ptr, size, prot, flags, fd, map_offset);
169     }
170     return activated_ptr;
171 }
172 
173 static inline size_t mmap_guard_pagesize(int fd)
174 {
175 #if defined(__powerpc64__) && defined(__linux__)
176     /* Mappings in the same segment must share the same page size */
177     return qemu_fd_getpagesize(fd);
178 #else
179     return qemu_real_host_page_size;
180 #endif
181 }
182 
183 void *qemu_ram_mmap(int fd,
184                     size_t size,
185                     size_t align,
186                     uint32_t qemu_map_flags,
187                     off_t map_offset)
188 {
189     const size_t guard_pagesize = mmap_guard_pagesize(fd);
190     size_t offset, total;
191     void *ptr, *guardptr;
192 
193     /*
194      * Note: this always allocates at least one extra page of virtual address
195      * space, even if size is already aligned.
196      */
197     total = size + align;
198 
199     guardptr = mmap_reserve(total, fd);
200     if (guardptr == MAP_FAILED) {
201         return MAP_FAILED;
202     }
203 
204     assert(is_power_of_2(align));
205     /* Always align to host page size */
206     assert(align >= guard_pagesize);
207 
208     offset = QEMU_ALIGN_UP((uintptr_t)guardptr, align) - (uintptr_t)guardptr;
209 
210     ptr = mmap_activate(guardptr + offset, size, fd, qemu_map_flags,
211                         map_offset);
212     if (ptr == MAP_FAILED) {
213         munmap(guardptr, total);
214         return MAP_FAILED;
215     }
216 
217     if (offset > 0) {
218         munmap(guardptr, offset);
219     }
220 
221     /*
222      * Leave a single PROT_NONE page allocated after the RAM block, to serve as
223      * a guard page guarding against potential buffer overflows.
224      */
225     total -= offset;
226     if (total > size + guard_pagesize) {
227         munmap(ptr + size + guard_pagesize, total - size - guard_pagesize);
228     }
229 
230     return ptr;
231 }
232 
233 void qemu_ram_munmap(int fd, void *ptr, size_t size)
234 {
235     if (ptr) {
236         /* Unmap both the RAM block and the guard page */
237         munmap(ptr, size + mmap_guard_pagesize(fd));
238     }
239 }
240