1 /* 2 * Support for RAM backed by mmaped host memory. 3 * 4 * Copyright (c) 2015 Red Hat, Inc. 5 * 6 * Authors: 7 * Michael S. Tsirkin <mst@redhat.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2 or 10 * later. See the COPYING file in the top-level directory. 11 */ 12 #include <qemu/mmap-alloc.h> 13 #include <sys/types.h> 14 #include <sys/mman.h> 15 #include <assert.h> 16 17 void *qemu_ram_mmap(int fd, size_t size, size_t align, bool shared) 18 { 19 /* 20 * Note: this always allocates at least one extra page of virtual address 21 * space, even if size is already aligned. 22 */ 23 size_t total = size + align; 24 void *ptr = mmap(0, total, PROT_NONE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); 25 size_t offset = QEMU_ALIGN_UP((uintptr_t)ptr, align) - (uintptr_t)ptr; 26 void *ptr1; 27 28 if (ptr == MAP_FAILED) { 29 return MAP_FAILED; 30 } 31 32 /* Make sure align is a power of 2 */ 33 assert(!(align & (align - 1))); 34 /* Always align to host page size */ 35 assert(align >= getpagesize()); 36 37 ptr1 = mmap(ptr + offset, size, PROT_READ | PROT_WRITE, 38 MAP_FIXED | 39 (fd == -1 ? MAP_ANONYMOUS : 0) | 40 (shared ? MAP_SHARED : MAP_PRIVATE), 41 fd, 0); 42 if (ptr1 == MAP_FAILED) { 43 munmap(ptr, total); 44 return MAP_FAILED; 45 } 46 47 ptr += offset; 48 total -= offset; 49 50 if (offset > 0) { 51 munmap(ptr - offset, offset); 52 } 53 54 /* 55 * Leave a single PROT_NONE page allocated after the RAM block, to serve as 56 * a guard page guarding against potential buffer overflows. 57 */ 58 if (total > size + getpagesize()) { 59 munmap(ptr + size + getpagesize(), total - size - getpagesize()); 60 } 61 62 return ptr; 63 } 64 65 void qemu_ram_munmap(void *ptr, size_t size) 66 { 67 if (ptr) { 68 /* Unmap both the RAM block and the guard page */ 69 munmap(ptr, size + getpagesize()); 70 } 71 } 72