1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * mm/balloon_compaction.c 4 * 5 * Common interface for making balloon pages movable by compaction. 6 * 7 * Copyright (C) 2012, Red Hat, Inc. Rafael Aquini <aquini@redhat.com> 8 */ 9 #include <linux/mm.h> 10 #include <linux/slab.h> 11 #include <linux/export.h> 12 #include <linux/balloon_compaction.h> 13 14 static void balloon_page_enqueue_one(struct balloon_dev_info *b_dev_info, 15 struct page *page) 16 { 17 /* 18 * Block others from accessing the 'page' when we get around to 19 * establishing additional references. We should be the only one 20 * holding a reference to the 'page' at this point. If we are not, then 21 * memory corruption is possible and we should stop execution. 22 */ 23 BUG_ON(!trylock_page(page)); 24 list_del(&page->lru); 25 balloon_page_insert(b_dev_info, page); 26 unlock_page(page); 27 __count_vm_event(BALLOON_INFLATE); 28 } 29 30 /** 31 * balloon_page_list_enqueue() - inserts a list of pages into the balloon page 32 * list. 33 * @b_dev_info: balloon device descriptor where we will insert a new page to 34 * @pages: pages to enqueue - allocated using balloon_page_alloc. 35 * 36 * Driver must call it to properly enqueue a balloon pages before definitively 37 * removing it from the guest system. 38 * 39 * Return: number of pages that were enqueued. 40 */ 41 size_t balloon_page_list_enqueue(struct balloon_dev_info *b_dev_info, 42 struct list_head *pages) 43 { 44 struct page *page, *tmp; 45 unsigned long flags; 46 size_t n_pages = 0; 47 48 spin_lock_irqsave(&b_dev_info->pages_lock, flags); 49 list_for_each_entry_safe(page, tmp, pages, lru) { 50 balloon_page_enqueue_one(b_dev_info, page); 51 n_pages++; 52 } 53 spin_unlock_irqrestore(&b_dev_info->pages_lock, flags); 54 return n_pages; 55 } 56 EXPORT_SYMBOL_GPL(balloon_page_list_enqueue); 57 58 /** 59 * balloon_page_list_dequeue() - removes pages from balloon's page list and 60 * returns a list of the pages. 61 * @b_dev_info: balloon device decriptor where we will grab a page from. 62 * @pages: pointer to the list of pages that would be returned to the caller. 63 * @n_req_pages: number of requested pages. 64 * 65 * Driver must call this function to properly de-allocate a previous enlisted 66 * balloon pages before definetively releasing it back to the guest system. 67 * This function tries to remove @n_req_pages from the ballooned pages and 68 * return them to the caller in the @pages list. 69 * 70 * Note that this function may fail to dequeue some pages temporarily empty due 71 * to compaction isolated pages. 72 * 73 * Return: number of pages that were added to the @pages list. 74 */ 75 size_t balloon_page_list_dequeue(struct balloon_dev_info *b_dev_info, 76 struct list_head *pages, size_t n_req_pages) 77 { 78 struct page *page, *tmp; 79 unsigned long flags; 80 size_t n_pages = 0; 81 82 spin_lock_irqsave(&b_dev_info->pages_lock, flags); 83 list_for_each_entry_safe(page, tmp, &b_dev_info->pages, lru) { 84 if (n_pages == n_req_pages) 85 break; 86 87 /* 88 * Block others from accessing the 'page' while we get around to 89 * establishing additional references and preparing the 'page' 90 * to be released by the balloon driver. 91 */ 92 if (!trylock_page(page)) 93 continue; 94 95 if (IS_ENABLED(CONFIG_BALLOON_COMPACTION) && 96 PageIsolated(page)) { 97 /* raced with isolation */ 98 unlock_page(page); 99 continue; 100 } 101 balloon_page_delete(page); 102 __count_vm_event(BALLOON_DEFLATE); 103 list_add(&page->lru, pages); 104 unlock_page(page); 105 n_pages++; 106 } 107 spin_unlock_irqrestore(&b_dev_info->pages_lock, flags); 108 109 return n_pages; 110 } 111 EXPORT_SYMBOL_GPL(balloon_page_list_dequeue); 112 113 /* 114 * balloon_page_alloc - allocates a new page for insertion into the balloon 115 * page list. 116 * 117 * Driver must call it to properly allocate a new enlisted balloon page. 118 * Driver must call balloon_page_enqueue before definitively removing it from 119 * the guest system. This function returns the page address for the recently 120 * allocated page or NULL in the case we fail to allocate a new page this turn. 121 */ 122 struct page *balloon_page_alloc(void) 123 { 124 struct page *page = alloc_page(balloon_mapping_gfp_mask() | 125 __GFP_NOMEMALLOC | __GFP_NORETRY); 126 return page; 127 } 128 EXPORT_SYMBOL_GPL(balloon_page_alloc); 129 130 /* 131 * balloon_page_enqueue - allocates a new page and inserts it into the balloon 132 * page list. 133 * @b_dev_info: balloon device descriptor where we will insert a new page to 134 * @page: new page to enqueue - allocated using balloon_page_alloc. 135 * 136 * Driver must call it to properly enqueue a new allocated balloon page 137 * before definitively removing it from the guest system. 138 * This function returns the page address for the recently enqueued page or 139 * NULL in the case we fail to allocate a new page this turn. 140 */ 141 void balloon_page_enqueue(struct balloon_dev_info *b_dev_info, 142 struct page *page) 143 { 144 unsigned long flags; 145 146 spin_lock_irqsave(&b_dev_info->pages_lock, flags); 147 balloon_page_enqueue_one(b_dev_info, page); 148 spin_unlock_irqrestore(&b_dev_info->pages_lock, flags); 149 } 150 EXPORT_SYMBOL_GPL(balloon_page_enqueue); 151 152 /* 153 * balloon_page_dequeue - removes a page from balloon's page list and returns 154 * the its address to allow the driver release the page. 155 * @b_dev_info: balloon device decriptor where we will grab a page from. 156 * 157 * Driver must call it to properly de-allocate a previous enlisted balloon page 158 * before definetively releasing it back to the guest system. 159 * This function returns the page address for the recently dequeued page or 160 * NULL in the case we find balloon's page list temporarily empty due to 161 * compaction isolated pages. 162 */ 163 struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info) 164 { 165 unsigned long flags; 166 LIST_HEAD(pages); 167 int n_pages; 168 169 n_pages = balloon_page_list_dequeue(b_dev_info, &pages, 1); 170 171 if (n_pages != 1) { 172 /* 173 * If we are unable to dequeue a balloon page because the page 174 * list is empty and there is no isolated pages, then something 175 * went out of track and some balloon pages are lost. 176 * BUG() here, otherwise the balloon driver may get stuck into 177 * an infinite loop while attempting to release all its pages. 178 */ 179 spin_lock_irqsave(&b_dev_info->pages_lock, flags); 180 if (unlikely(list_empty(&b_dev_info->pages) && 181 !b_dev_info->isolated_pages)) 182 BUG(); 183 spin_unlock_irqrestore(&b_dev_info->pages_lock, flags); 184 return NULL; 185 } 186 return list_first_entry(&pages, struct page, lru); 187 } 188 EXPORT_SYMBOL_GPL(balloon_page_dequeue); 189 190 #ifdef CONFIG_BALLOON_COMPACTION 191 192 bool balloon_page_isolate(struct page *page, isolate_mode_t mode) 193 194 { 195 struct balloon_dev_info *b_dev_info = balloon_page_device(page); 196 unsigned long flags; 197 198 spin_lock_irqsave(&b_dev_info->pages_lock, flags); 199 list_del(&page->lru); 200 b_dev_info->isolated_pages++; 201 spin_unlock_irqrestore(&b_dev_info->pages_lock, flags); 202 203 return true; 204 } 205 206 void balloon_page_putback(struct page *page) 207 { 208 struct balloon_dev_info *b_dev_info = balloon_page_device(page); 209 unsigned long flags; 210 211 spin_lock_irqsave(&b_dev_info->pages_lock, flags); 212 list_add(&page->lru, &b_dev_info->pages); 213 b_dev_info->isolated_pages--; 214 spin_unlock_irqrestore(&b_dev_info->pages_lock, flags); 215 } 216 217 218 /* move_to_new_page() counterpart for a ballooned page */ 219 int balloon_page_migrate(struct address_space *mapping, 220 struct page *newpage, struct page *page, 221 enum migrate_mode mode) 222 { 223 struct balloon_dev_info *balloon = balloon_page_device(page); 224 225 /* 226 * We can not easily support the no copy case here so ignore it as it 227 * is unlikely to be use with ballon pages. See include/linux/hmm.h for 228 * user of the MIGRATE_SYNC_NO_COPY mode. 229 */ 230 if (mode == MIGRATE_SYNC_NO_COPY) 231 return -EINVAL; 232 233 VM_BUG_ON_PAGE(!PageLocked(page), page); 234 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage); 235 236 return balloon->migratepage(balloon, newpage, page, mode); 237 } 238 239 const struct address_space_operations balloon_aops = { 240 .migratepage = balloon_page_migrate, 241 .isolate_page = balloon_page_isolate, 242 .putback_page = balloon_page_putback, 243 }; 244 EXPORT_SYMBOL_GPL(balloon_aops); 245 246 #endif /* CONFIG_BALLOON_COMPACTION */ 247