1 /* 2 * Page cache for QEMU 3 * The cache is base on a hash of the page address 4 * 5 * Copyright 2012 Red Hat, Inc. and/or its affiliates 6 * 7 * Authors: 8 * Orit Wasserman <owasserm@redhat.com> 9 * 10 * This work is licensed under the terms of the GNU GPL, version 2 or later. 11 * See the COPYING file in the top-level directory. 12 * 13 */ 14 15 #include "qemu/osdep.h" 16 17 #include "qemu-common.h" 18 #include "qemu/host-utils.h" 19 #include "migration/page_cache.h" 20 21 #ifdef DEBUG_CACHE 22 #define DPRINTF(fmt, ...) \ 23 do { fprintf(stdout, "cache: " fmt, ## __VA_ARGS__); } while (0) 24 #else 25 #define DPRINTF(fmt, ...) \ 26 do { } while (0) 27 #endif 28 29 /* the page in cache will not be replaced in two cycles */ 30 #define CACHED_PAGE_LIFETIME 2 31 32 typedef struct CacheItem CacheItem; 33 34 struct CacheItem { 35 uint64_t it_addr; 36 uint64_t it_age; 37 uint8_t *it_data; 38 }; 39 40 struct PageCache { 41 CacheItem *page_cache; 42 unsigned int page_size; 43 int64_t max_num_items; 44 uint64_t max_item_age; 45 int64_t num_items; 46 }; 47 48 PageCache *cache_init(int64_t num_pages, unsigned int page_size) 49 { 50 int64_t i; 51 52 PageCache *cache; 53 54 if (num_pages <= 0) { 55 DPRINTF("invalid number of pages\n"); 56 return NULL; 57 } 58 59 /* We prefer not to abort if there is no memory */ 60 cache = g_try_malloc(sizeof(*cache)); 61 if (!cache) { 62 DPRINTF("Failed to allocate cache\n"); 63 return NULL; 64 } 65 /* round down to the nearest power of 2 */ 66 if (!is_power_of_2(num_pages)) { 67 num_pages = pow2floor(num_pages); 68 DPRINTF("rounding down to %" PRId64 "\n", num_pages); 69 } 70 cache->page_size = page_size; 71 cache->num_items = 0; 72 cache->max_item_age = 0; 73 cache->max_num_items = num_pages; 74 75 DPRINTF("Setting cache buckets to %" PRId64 "\n", cache->max_num_items); 76 77 /* We prefer not to abort if there is no memory */ 78 cache->page_cache = g_try_malloc((cache->max_num_items) * 79 sizeof(*cache->page_cache)); 80 if (!cache->page_cache) { 81 DPRINTF("Failed to allocate cache->page_cache\n"); 82 g_free(cache); 83 return NULL; 84 } 85 86 for (i = 0; i < cache->max_num_items; i++) { 87 cache->page_cache[i].it_data = NULL; 88 cache->page_cache[i].it_age = 0; 89 cache->page_cache[i].it_addr = -1; 90 } 91 92 return cache; 93 } 94 95 void cache_fini(PageCache *cache) 96 { 97 int64_t i; 98 99 g_assert(cache); 100 g_assert(cache->page_cache); 101 102 for (i = 0; i < cache->max_num_items; i++) { 103 g_free(cache->page_cache[i].it_data); 104 } 105 106 g_free(cache->page_cache); 107 cache->page_cache = NULL; 108 g_free(cache); 109 } 110 111 static size_t cache_get_cache_pos(const PageCache *cache, 112 uint64_t address) 113 { 114 g_assert(cache->max_num_items); 115 return (address / cache->page_size) & (cache->max_num_items - 1); 116 } 117 118 static CacheItem *cache_get_by_addr(const PageCache *cache, uint64_t addr) 119 { 120 size_t pos; 121 122 g_assert(cache); 123 g_assert(cache->page_cache); 124 125 pos = cache_get_cache_pos(cache, addr); 126 127 return &cache->page_cache[pos]; 128 } 129 130 uint8_t *get_cached_data(const PageCache *cache, uint64_t addr) 131 { 132 return cache_get_by_addr(cache, addr)->it_data; 133 } 134 135 bool cache_is_cached(const PageCache *cache, uint64_t addr, 136 uint64_t current_age) 137 { 138 CacheItem *it; 139 140 it = cache_get_by_addr(cache, addr); 141 142 if (it->it_addr == addr) { 143 /* update the it_age when the cache hit */ 144 it->it_age = current_age; 145 return true; 146 } 147 return false; 148 } 149 150 int cache_insert(PageCache *cache, uint64_t addr, const uint8_t *pdata, 151 uint64_t current_age) 152 { 153 154 CacheItem *it; 155 156 /* actual update of entry */ 157 it = cache_get_by_addr(cache, addr); 158 159 if (it->it_data && it->it_addr != addr && 160 it->it_age + CACHED_PAGE_LIFETIME > current_age) { 161 /* the cache page is fresh, don't replace it */ 162 return -1; 163 } 164 /* allocate page */ 165 if (!it->it_data) { 166 it->it_data = g_try_malloc(cache->page_size); 167 if (!it->it_data) { 168 DPRINTF("Error allocating page\n"); 169 return -1; 170 } 171 cache->num_items++; 172 } 173 174 memcpy(it->it_data, pdata, cache->page_size); 175 176 it->it_age = current_age; 177 it->it_addr = addr; 178 179 return 0; 180 } 181 182 int64_t cache_resize(PageCache *cache, int64_t new_num_pages) 183 { 184 PageCache *new_cache; 185 int64_t i; 186 187 CacheItem *old_it, *new_it; 188 189 g_assert(cache); 190 191 /* cache was not inited */ 192 if (cache->page_cache == NULL) { 193 return -1; 194 } 195 196 /* same size */ 197 if (pow2floor(new_num_pages) == cache->max_num_items) { 198 return cache->max_num_items; 199 } 200 201 new_cache = cache_init(new_num_pages, cache->page_size); 202 if (!(new_cache)) { 203 DPRINTF("Error creating new cache\n"); 204 return -1; 205 } 206 207 /* move all data from old cache */ 208 for (i = 0; i < cache->max_num_items; i++) { 209 old_it = &cache->page_cache[i]; 210 if (old_it->it_addr != -1) { 211 /* check for collision, if there is, keep MRU page */ 212 new_it = cache_get_by_addr(new_cache, old_it->it_addr); 213 if (new_it->it_data && new_it->it_age >= old_it->it_age) { 214 /* keep the MRU page */ 215 g_free(old_it->it_data); 216 } else { 217 if (!new_it->it_data) { 218 new_cache->num_items++; 219 } 220 g_free(new_it->it_data); 221 new_it->it_data = old_it->it_data; 222 new_it->it_age = old_it->it_age; 223 new_it->it_addr = old_it->it_addr; 224 } 225 } 226 } 227 228 g_free(cache->page_cache); 229 cache->page_cache = new_cache->page_cache; 230 cache->max_num_items = new_cache->max_num_items; 231 cache->num_items = new_cache->num_items; 232 233 g_free(new_cache); 234 235 return cache->max_num_items; 236 } 237