1 /* 2 * mm/balloon_compaction.c 3 * 4 * Common interface for making balloon pages movable by compaction. 5 * 6 * Copyright (C) 2012, Red Hat, Inc. Rafael Aquini <aquini@redhat.com> 7 */ 8 #include <linux/mm.h> 9 #include <linux/slab.h> 10 #include <linux/export.h> 11 #include <linux/balloon_compaction.h> 12 13 /* 14 * balloon_page_enqueue - allocates a new page and inserts it into the balloon 15 * page list. 16 * @b_dev_info: balloon device decriptor where we will insert a new page to 17 * 18 * Driver must call it to properly allocate a new enlisted balloon page 19 * before definetively removing it from the guest system. 20 * This function returns the page address for the recently enqueued page or 21 * NULL in the case we fail to allocate a new page this turn. 22 */ 23 struct page *balloon_page_enqueue(struct balloon_dev_info *b_dev_info) 24 { 25 unsigned long flags; 26 struct page *page = alloc_page(balloon_mapping_gfp_mask() | 27 __GFP_NOMEMALLOC | __GFP_NORETRY); 28 if (!page) 29 return NULL; 30 31 /* 32 * Block others from accessing the 'page' when we get around to 33 * establishing additional references. We should be the only one 34 * holding a reference to the 'page' at this point. 35 */ 36 BUG_ON(!trylock_page(page)); 37 spin_lock_irqsave(&b_dev_info->pages_lock, flags); 38 balloon_page_insert(b_dev_info, page); 39 __count_vm_event(BALLOON_INFLATE); 40 spin_unlock_irqrestore(&b_dev_info->pages_lock, flags); 41 unlock_page(page); 42 return page; 43 } 44 EXPORT_SYMBOL_GPL(balloon_page_enqueue); 45 46 /* 47 * balloon_page_dequeue - removes a page from balloon's page list and returns 48 * the its address to allow the driver release the page. 49 * @b_dev_info: balloon device decriptor where we will grab a page from. 50 * 51 * Driver must call it to properly de-allocate a previous enlisted balloon page 52 * before definetively releasing it back to the guest system. 53 * This function returns the page address for the recently dequeued page or 54 * NULL in the case we find balloon's page list temporarily empty due to 55 * compaction isolated pages. 56 */ 57 struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info) 58 { 59 struct page *page, *tmp; 60 unsigned long flags; 61 bool dequeued_page; 62 63 dequeued_page = false; 64 list_for_each_entry_safe(page, tmp, &b_dev_info->pages, lru) { 65 /* 66 * Block others from accessing the 'page' while we get around 67 * establishing additional references and preparing the 'page' 68 * to be released by the balloon driver. 69 */ 70 if (trylock_page(page)) { 71 if (!PagePrivate(page)) { 72 /* raced with isolation */ 73 unlock_page(page); 74 continue; 75 } 76 spin_lock_irqsave(&b_dev_info->pages_lock, flags); 77 balloon_page_delete(page); 78 __count_vm_event(BALLOON_DEFLATE); 79 spin_unlock_irqrestore(&b_dev_info->pages_lock, flags); 80 unlock_page(page); 81 dequeued_page = true; 82 break; 83 } 84 } 85 86 if (!dequeued_page) { 87 /* 88 * If we are unable to dequeue a balloon page because the page 89 * list is empty and there is no isolated pages, then something 90 * went out of track and some balloon pages are lost. 91 * BUG() here, otherwise the balloon driver may get stuck into 92 * an infinite loop while attempting to release all its pages. 93 */ 94 spin_lock_irqsave(&b_dev_info->pages_lock, flags); 95 if (unlikely(list_empty(&b_dev_info->pages) && 96 !b_dev_info->isolated_pages)) 97 BUG(); 98 spin_unlock_irqrestore(&b_dev_info->pages_lock, flags); 99 page = NULL; 100 } 101 return page; 102 } 103 EXPORT_SYMBOL_GPL(balloon_page_dequeue); 104 105 #ifdef CONFIG_BALLOON_COMPACTION 106 107 static inline void __isolate_balloon_page(struct page *page) 108 { 109 struct balloon_dev_info *b_dev_info = balloon_page_device(page); 110 unsigned long flags; 111 112 spin_lock_irqsave(&b_dev_info->pages_lock, flags); 113 ClearPagePrivate(page); 114 list_del(&page->lru); 115 b_dev_info->isolated_pages++; 116 spin_unlock_irqrestore(&b_dev_info->pages_lock, flags); 117 } 118 119 static inline void __putback_balloon_page(struct page *page) 120 { 121 struct balloon_dev_info *b_dev_info = balloon_page_device(page); 122 unsigned long flags; 123 124 spin_lock_irqsave(&b_dev_info->pages_lock, flags); 125 SetPagePrivate(page); 126 list_add(&page->lru, &b_dev_info->pages); 127 b_dev_info->isolated_pages--; 128 spin_unlock_irqrestore(&b_dev_info->pages_lock, flags); 129 } 130 131 /* __isolate_lru_page() counterpart for a ballooned page */ 132 bool balloon_page_isolate(struct page *page) 133 { 134 /* 135 * Avoid burning cycles with pages that are yet under __free_pages(), 136 * or just got freed under us. 137 * 138 * In case we 'win' a race for a balloon page being freed under us and 139 * raise its refcount preventing __free_pages() from doing its job 140 * the put_page() at the end of this block will take care of 141 * release this page, thus avoiding a nasty leakage. 142 */ 143 if (likely(get_page_unless_zero(page))) { 144 /* 145 * As balloon pages are not isolated from LRU lists, concurrent 146 * compaction threads can race against page migration functions 147 * as well as race against the balloon driver releasing a page. 148 * 149 * In order to avoid having an already isolated balloon page 150 * being (wrongly) re-isolated while it is under migration, 151 * or to avoid attempting to isolate pages being released by 152 * the balloon driver, lets be sure we have the page lock 153 * before proceeding with the balloon page isolation steps. 154 */ 155 if (likely(trylock_page(page))) { 156 /* 157 * A ballooned page, by default, has PagePrivate set. 158 * Prevent concurrent compaction threads from isolating 159 * an already isolated balloon page by clearing it. 160 */ 161 if (balloon_page_movable(page)) { 162 __isolate_balloon_page(page); 163 unlock_page(page); 164 return true; 165 } 166 unlock_page(page); 167 } 168 put_page(page); 169 } 170 return false; 171 } 172 173 /* putback_lru_page() counterpart for a ballooned page */ 174 void balloon_page_putback(struct page *page) 175 { 176 /* 177 * 'lock_page()' stabilizes the page and prevents races against 178 * concurrent isolation threads attempting to re-isolate it. 179 */ 180 lock_page(page); 181 182 if (__is_movable_balloon_page(page)) { 183 __putback_balloon_page(page); 184 /* drop the extra ref count taken for page isolation */ 185 put_page(page); 186 } else { 187 WARN_ON(1); 188 dump_page(page, "not movable balloon page"); 189 } 190 unlock_page(page); 191 } 192 193 /* move_to_new_page() counterpart for a ballooned page */ 194 int balloon_page_migrate(struct page *newpage, 195 struct page *page, enum migrate_mode mode) 196 { 197 struct balloon_dev_info *balloon = balloon_page_device(page); 198 int rc = -EAGAIN; 199 200 /* 201 * Block others from accessing the 'newpage' when we get around to 202 * establishing additional references. We should be the only one 203 * holding a reference to the 'newpage' at this point. 204 */ 205 BUG_ON(!trylock_page(newpage)); 206 207 if (WARN_ON(!__is_movable_balloon_page(page))) { 208 dump_page(page, "not movable balloon page"); 209 unlock_page(newpage); 210 return rc; 211 } 212 213 if (balloon && balloon->migratepage) 214 rc = balloon->migratepage(balloon, newpage, page, mode); 215 216 unlock_page(newpage); 217 return rc; 218 } 219 #endif /* CONFIG_BALLOON_COMPACTION */ 220