1 /* 2 * Support for RAM backed by mmaped host memory. 3 * 4 * Copyright (c) 2015 Red Hat, Inc. 5 * 6 * Authors: 7 * Michael S. Tsirkin <mst@redhat.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2 or 10 * later. See the COPYING file in the top-level directory. 11 */ 12 13 #ifdef CONFIG_LINUX 14 #include <linux/mman.h> 15 #else /* !CONFIG_LINUX */ 16 #define MAP_SYNC 0x0 17 #define MAP_SHARED_VALIDATE 0x0 18 #endif /* CONFIG_LINUX */ 19 20 #include "qemu/osdep.h" 21 #include "qemu/mmap-alloc.h" 22 #include "qemu/host-utils.h" 23 24 #define HUGETLBFS_MAGIC 0x958458f6 25 26 #ifdef CONFIG_LINUX 27 #include <sys/vfs.h> 28 #endif 29 30 size_t qemu_fd_getpagesize(int fd) 31 { 32 #ifdef CONFIG_LINUX 33 struct statfs fs; 34 int ret; 35 36 if (fd != -1) { 37 do { 38 ret = fstatfs(fd, &fs); 39 } while (ret != 0 && errno == EINTR); 40 41 if (ret == 0 && fs.f_type == HUGETLBFS_MAGIC) { 42 return fs.f_bsize; 43 } 44 } 45 #ifdef __sparc__ 46 /* SPARC Linux needs greater alignment than the pagesize */ 47 return QEMU_VMALLOC_ALIGN; 48 #endif 49 #endif 50 51 return qemu_real_host_page_size; 52 } 53 54 size_t qemu_mempath_getpagesize(const char *mem_path) 55 { 56 #ifdef CONFIG_LINUX 57 struct statfs fs; 58 int ret; 59 60 if (mem_path) { 61 do { 62 ret = statfs(mem_path, &fs); 63 } while (ret != 0 && errno == EINTR); 64 65 if (ret != 0) { 66 fprintf(stderr, "Couldn't statfs() memory path: %s\n", 67 strerror(errno)); 68 exit(1); 69 } 70 71 if (fs.f_type == HUGETLBFS_MAGIC) { 72 /* It's hugepage, return the huge page size */ 73 return fs.f_bsize; 74 } 75 } 76 #ifdef __sparc__ 77 /* SPARC Linux needs greater alignment than the pagesize */ 78 return QEMU_VMALLOC_ALIGN; 79 #endif 80 #endif 81 82 return qemu_real_host_page_size; 83 } 84 85 /* 86 * Reserve a new memory region of the requested size to be used for mapping 87 * from the given fd (if any). 88 */ 89 static void *mmap_reserve(size_t size, int fd) 90 { 91 int flags = MAP_PRIVATE; 92 93 #if defined(__powerpc64__) && defined(__linux__) 94 /* 95 * On ppc64 mappings in the same segment (aka slice) must share the same 96 * page size. Since we will be re-allocating part of this segment 97 * from the supplied fd, we should make sure to use the same page size, to 98 * this end we mmap the supplied fd. In this case, set MAP_NORESERVE to 99 * avoid allocating backing store memory. 100 * We do this unless we are using the system page size, in which case 101 * anonymous memory is OK. 102 */ 103 if (fd == -1 || qemu_fd_getpagesize(fd) == qemu_real_host_page_size) { 104 fd = -1; 105 flags |= MAP_ANONYMOUS; 106 } else { 107 flags |= MAP_NORESERVE; 108 } 109 #else 110 fd = -1; 111 flags |= MAP_ANONYMOUS; 112 #endif 113 114 return mmap(0, size, PROT_NONE, flags, fd, 0); 115 } 116 117 static inline size_t mmap_guard_pagesize(int fd) 118 { 119 #if defined(__powerpc64__) && defined(__linux__) 120 /* Mappings in the same segment must share the same page size */ 121 return qemu_fd_getpagesize(fd); 122 #else 123 return qemu_real_host_page_size; 124 #endif 125 } 126 127 void *qemu_ram_mmap(int fd, 128 size_t size, 129 size_t align, 130 bool readonly, 131 bool shared, 132 bool is_pmem, 133 off_t map_offset) 134 { 135 const size_t guard_pagesize = mmap_guard_pagesize(fd); 136 int prot; 137 int flags; 138 int map_sync_flags = 0; 139 size_t offset; 140 size_t total; 141 void *guardptr; 142 void *ptr; 143 144 /* 145 * Note: this always allocates at least one extra page of virtual address 146 * space, even if size is already aligned. 147 */ 148 total = size + align; 149 150 guardptr = mmap_reserve(total, fd); 151 if (guardptr == MAP_FAILED) { 152 return MAP_FAILED; 153 } 154 155 assert(is_power_of_2(align)); 156 /* Always align to host page size */ 157 assert(align >= guard_pagesize); 158 159 flags = MAP_FIXED; 160 flags |= fd == -1 ? MAP_ANONYMOUS : 0; 161 flags |= shared ? MAP_SHARED : MAP_PRIVATE; 162 if (shared && is_pmem) { 163 map_sync_flags = MAP_SYNC | MAP_SHARED_VALIDATE; 164 } 165 166 offset = QEMU_ALIGN_UP((uintptr_t)guardptr, align) - (uintptr_t)guardptr; 167 168 prot = PROT_READ | (readonly ? 0 : PROT_WRITE); 169 170 ptr = mmap(guardptr + offset, size, prot, 171 flags | map_sync_flags, fd, map_offset); 172 173 if (ptr == MAP_FAILED && map_sync_flags) { 174 if (errno == ENOTSUP) { 175 char *proc_link, *file_name; 176 int len; 177 proc_link = g_strdup_printf("/proc/self/fd/%d", fd); 178 file_name = g_malloc0(PATH_MAX); 179 len = readlink(proc_link, file_name, PATH_MAX - 1); 180 if (len < 0) { 181 len = 0; 182 } 183 file_name[len] = '\0'; 184 fprintf(stderr, "Warning: requesting persistence across crashes " 185 "for backend file %s failed. Proceeding without " 186 "persistence, data might become corrupted in case of host " 187 "crash.\n", file_name); 188 g_free(proc_link); 189 g_free(file_name); 190 } 191 /* 192 * if map failed with MAP_SHARED_VALIDATE | MAP_SYNC, 193 * we will remove these flags to handle compatibility. 194 */ 195 ptr = mmap(guardptr + offset, size, prot, flags, fd, map_offset); 196 } 197 198 if (ptr == MAP_FAILED) { 199 munmap(guardptr, total); 200 return MAP_FAILED; 201 } 202 203 if (offset > 0) { 204 munmap(guardptr, offset); 205 } 206 207 /* 208 * Leave a single PROT_NONE page allocated after the RAM block, to serve as 209 * a guard page guarding against potential buffer overflows. 210 */ 211 total -= offset; 212 if (total > size + guard_pagesize) { 213 munmap(ptr + size + guard_pagesize, total - size - guard_pagesize); 214 } 215 216 return ptr; 217 } 218 219 void qemu_ram_munmap(int fd, void *ptr, size_t size) 220 { 221 if (ptr) { 222 /* Unmap both the RAM block and the guard page */ 223 munmap(ptr, size + mmap_guard_pagesize(fd)); 224 } 225 } 226