1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
218468d93SRafael Aquini /*
318468d93SRafael Aquini * mm/balloon_compaction.c
418468d93SRafael Aquini *
518468d93SRafael Aquini * Common interface for making balloon pages movable by compaction.
618468d93SRafael Aquini *
718468d93SRafael Aquini * Copyright (C) 2012, Red Hat, Inc. Rafael Aquini <aquini@redhat.com>
818468d93SRafael Aquini */
918468d93SRafael Aquini #include <linux/mm.h>
1018468d93SRafael Aquini #include <linux/slab.h>
1118468d93SRafael Aquini #include <linux/export.h>
1218468d93SRafael Aquini #include <linux/balloon_compaction.h>
1318468d93SRafael Aquini
balloon_page_enqueue_one(struct balloon_dev_info * b_dev_info,struct page * page)14418a3ab1SNadav Amit static void balloon_page_enqueue_one(struct balloon_dev_info *b_dev_info,
15418a3ab1SNadav Amit struct page *page)
16418a3ab1SNadav Amit {
17418a3ab1SNadav Amit /*
18418a3ab1SNadav Amit * Block others from accessing the 'page' when we get around to
19418a3ab1SNadav Amit * establishing additional references. We should be the only one
20418a3ab1SNadav Amit * holding a reference to the 'page' at this point. If we are not, then
21418a3ab1SNadav Amit * memory corruption is possible and we should stop execution.
22418a3ab1SNadav Amit */
23418a3ab1SNadav Amit BUG_ON(!trylock_page(page));
24418a3ab1SNadav Amit balloon_page_insert(b_dev_info, page);
25418a3ab1SNadav Amit unlock_page(page);
26418a3ab1SNadav Amit __count_vm_event(BALLOON_INFLATE);
27418a3ab1SNadav Amit }
28418a3ab1SNadav Amit
29418a3ab1SNadav Amit /**
30418a3ab1SNadav Amit * balloon_page_list_enqueue() - inserts a list of pages into the balloon page
31418a3ab1SNadav Amit * list.
32418a3ab1SNadav Amit * @b_dev_info: balloon device descriptor where we will insert a new page to
33418a3ab1SNadav Amit * @pages: pages to enqueue - allocated using balloon_page_alloc.
34418a3ab1SNadav Amit *
35cfe61801SMichael S. Tsirkin * Driver must call this function to properly enqueue balloon pages before
36cfe61801SMichael S. Tsirkin * definitively removing them from the guest system.
37418a3ab1SNadav Amit *
38418a3ab1SNadav Amit * Return: number of pages that were enqueued.
39418a3ab1SNadav Amit */
balloon_page_list_enqueue(struct balloon_dev_info * b_dev_info,struct list_head * pages)40418a3ab1SNadav Amit size_t balloon_page_list_enqueue(struct balloon_dev_info *b_dev_info,
41418a3ab1SNadav Amit struct list_head *pages)
42418a3ab1SNadav Amit {
43418a3ab1SNadav Amit struct page *page, *tmp;
44418a3ab1SNadav Amit unsigned long flags;
45418a3ab1SNadav Amit size_t n_pages = 0;
46418a3ab1SNadav Amit
47418a3ab1SNadav Amit spin_lock_irqsave(&b_dev_info->pages_lock, flags);
48418a3ab1SNadav Amit list_for_each_entry_safe(page, tmp, pages, lru) {
49dd422906SWei Wang list_del(&page->lru);
50418a3ab1SNadav Amit balloon_page_enqueue_one(b_dev_info, page);
51418a3ab1SNadav Amit n_pages++;
52418a3ab1SNadav Amit }
53418a3ab1SNadav Amit spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
54418a3ab1SNadav Amit return n_pages;
55418a3ab1SNadav Amit }
56418a3ab1SNadav Amit EXPORT_SYMBOL_GPL(balloon_page_list_enqueue);
57418a3ab1SNadav Amit
58418a3ab1SNadav Amit /**
59418a3ab1SNadav Amit * balloon_page_list_dequeue() - removes pages from balloon's page list and
60418a3ab1SNadav Amit * returns a list of the pages.
61f0953a1bSIngo Molnar * @b_dev_info: balloon device descriptor where we will grab a page from.
62418a3ab1SNadav Amit * @pages: pointer to the list of pages that would be returned to the caller.
63418a3ab1SNadav Amit * @n_req_pages: number of requested pages.
64418a3ab1SNadav Amit *
65418a3ab1SNadav Amit * Driver must call this function to properly de-allocate a previous enlisted
66cfe61801SMichael S. Tsirkin * balloon pages before definitively releasing it back to the guest system.
67418a3ab1SNadav Amit * This function tries to remove @n_req_pages from the ballooned pages and
68418a3ab1SNadav Amit * return them to the caller in the @pages list.
69418a3ab1SNadav Amit *
70cfe61801SMichael S. Tsirkin * Note that this function may fail to dequeue some pages even if the balloon
71cfe61801SMichael S. Tsirkin * isn't empty - since the page list can be temporarily empty due to compaction
72cfe61801SMichael S. Tsirkin * of isolated pages.
73418a3ab1SNadav Amit *
74418a3ab1SNadav Amit * Return: number of pages that were added to the @pages list.
75418a3ab1SNadav Amit */
balloon_page_list_dequeue(struct balloon_dev_info * b_dev_info,struct list_head * pages,size_t n_req_pages)76418a3ab1SNadav Amit size_t balloon_page_list_dequeue(struct balloon_dev_info *b_dev_info,
77418a3ab1SNadav Amit struct list_head *pages, size_t n_req_pages)
78418a3ab1SNadav Amit {
79418a3ab1SNadav Amit struct page *page, *tmp;
80418a3ab1SNadav Amit unsigned long flags;
81418a3ab1SNadav Amit size_t n_pages = 0;
82418a3ab1SNadav Amit
83418a3ab1SNadav Amit spin_lock_irqsave(&b_dev_info->pages_lock, flags);
84418a3ab1SNadav Amit list_for_each_entry_safe(page, tmp, &b_dev_info->pages, lru) {
85418a3ab1SNadav Amit if (n_pages == n_req_pages)
86418a3ab1SNadav Amit break;
87418a3ab1SNadav Amit
88418a3ab1SNadav Amit /*
89418a3ab1SNadav Amit * Block others from accessing the 'page' while we get around to
90418a3ab1SNadav Amit * establishing additional references and preparing the 'page'
91418a3ab1SNadav Amit * to be released by the balloon driver.
92418a3ab1SNadav Amit */
93418a3ab1SNadav Amit if (!trylock_page(page))
94418a3ab1SNadav Amit continue;
95418a3ab1SNadav Amit
96418a3ab1SNadav Amit if (IS_ENABLED(CONFIG_BALLOON_COMPACTION) &&
97418a3ab1SNadav Amit PageIsolated(page)) {
98418a3ab1SNadav Amit /* raced with isolation */
99418a3ab1SNadav Amit unlock_page(page);
100418a3ab1SNadav Amit continue;
101418a3ab1SNadav Amit }
102418a3ab1SNadav Amit balloon_page_delete(page);
103418a3ab1SNadav Amit __count_vm_event(BALLOON_DEFLATE);
104418a3ab1SNadav Amit list_add(&page->lru, pages);
105418a3ab1SNadav Amit unlock_page(page);
106418a3ab1SNadav Amit n_pages++;
107418a3ab1SNadav Amit }
108418a3ab1SNadav Amit spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
109418a3ab1SNadav Amit
110418a3ab1SNadav Amit return n_pages;
111418a3ab1SNadav Amit }
112418a3ab1SNadav Amit EXPORT_SYMBOL_GPL(balloon_page_list_dequeue);
113418a3ab1SNadav Amit
11418468d93SRafael Aquini /*
115c7cdff0eSMichael S. Tsirkin * balloon_page_alloc - allocates a new page for insertion into the balloon
116c7cdff0eSMichael S. Tsirkin * page list.
117c7cdff0eSMichael S. Tsirkin *
118cfe61801SMichael S. Tsirkin * Driver must call this function to properly allocate a new balloon page.
119cfe61801SMichael S. Tsirkin * Driver must call balloon_page_enqueue before definitively removing the page
120cfe61801SMichael S. Tsirkin * from the guest system.
121cfe61801SMichael S. Tsirkin *
122cfe61801SMichael S. Tsirkin * Return: struct page for the allocated page or NULL on allocation failure.
123c7cdff0eSMichael S. Tsirkin */
balloon_page_alloc(void)124c7cdff0eSMichael S. Tsirkin struct page *balloon_page_alloc(void)
125c7cdff0eSMichael S. Tsirkin {
126c7cdff0eSMichael S. Tsirkin struct page *page = alloc_page(balloon_mapping_gfp_mask() |
12702fa5d7bSNadav Amit __GFP_NOMEMALLOC | __GFP_NORETRY |
12802fa5d7bSNadav Amit __GFP_NOWARN);
129c7cdff0eSMichael S. Tsirkin return page;
130c7cdff0eSMichael S. Tsirkin }
131c7cdff0eSMichael S. Tsirkin EXPORT_SYMBOL_GPL(balloon_page_alloc);
132c7cdff0eSMichael S. Tsirkin
133c7cdff0eSMichael S. Tsirkin /*
134dd422906SWei Wang * balloon_page_enqueue - inserts a new page into the balloon page list.
135dd422906SWei Wang *
136cfe61801SMichael S. Tsirkin * @b_dev_info: balloon device descriptor where we will insert a new page
137c7cdff0eSMichael S. Tsirkin * @page: new page to enqueue - allocated using balloon_page_alloc.
13818468d93SRafael Aquini *
139cfe61801SMichael S. Tsirkin * Drivers must call this function to properly enqueue a new allocated balloon
140cfe61801SMichael S. Tsirkin * page before definitively removing the page from the guest system.
141dd422906SWei Wang *
142cfe61801SMichael S. Tsirkin * Drivers must not call balloon_page_enqueue on pages that have been pushed to
143cfe61801SMichael S. Tsirkin * a list with balloon_page_push before removing them with balloon_page_pop. To
144cfe61801SMichael S. Tsirkin * enqueue a list of pages, use balloon_page_list_enqueue instead.
14518468d93SRafael Aquini */
balloon_page_enqueue(struct balloon_dev_info * b_dev_info,struct page * page)146c7cdff0eSMichael S. Tsirkin void balloon_page_enqueue(struct balloon_dev_info *b_dev_info,
147c7cdff0eSMichael S. Tsirkin struct page *page)
14818468d93SRafael Aquini {
14918468d93SRafael Aquini unsigned long flags;
15018468d93SRafael Aquini
15118468d93SRafael Aquini spin_lock_irqsave(&b_dev_info->pages_lock, flags);
152418a3ab1SNadav Amit balloon_page_enqueue_one(b_dev_info, page);
15318468d93SRafael Aquini spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
15418468d93SRafael Aquini }
15518468d93SRafael Aquini EXPORT_SYMBOL_GPL(balloon_page_enqueue);
15618468d93SRafael Aquini
15718468d93SRafael Aquini /*
15818468d93SRafael Aquini * balloon_page_dequeue - removes a page from balloon's page list and returns
159cfe61801SMichael S. Tsirkin * its address to allow the driver to release the page.
160f0953a1bSIngo Molnar * @b_dev_info: balloon device descriptor where we will grab a page from.
16118468d93SRafael Aquini *
162cfe61801SMichael S. Tsirkin * Driver must call this function to properly dequeue a previously enqueued page
163cfe61801SMichael S. Tsirkin * before definitively releasing it back to the guest system.
164cfe61801SMichael S. Tsirkin *
165cfe61801SMichael S. Tsirkin * Caller must perform its own accounting to ensure that this
166cfe61801SMichael S. Tsirkin * function is called only if some pages are actually enqueued.
167cfe61801SMichael S. Tsirkin *
168cfe61801SMichael S. Tsirkin * Note that this function may fail to dequeue some pages even if there are
169cfe61801SMichael S. Tsirkin * some enqueued pages - since the page list can be temporarily empty due to
170cfe61801SMichael S. Tsirkin * the compaction of isolated pages.
171cfe61801SMichael S. Tsirkin *
172cfe61801SMichael S. Tsirkin * TODO: remove the caller accounting requirements, and allow caller to wait
173cfe61801SMichael S. Tsirkin * until all pages can be dequeued.
174cfe61801SMichael S. Tsirkin *
175cfe61801SMichael S. Tsirkin * Return: struct page for the dequeued page, or NULL if no page was dequeued.
17618468d93SRafael Aquini */
balloon_page_dequeue(struct balloon_dev_info * b_dev_info)17718468d93SRafael Aquini struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info)
17818468d93SRafael Aquini {
17918468d93SRafael Aquini unsigned long flags;
180418a3ab1SNadav Amit LIST_HEAD(pages);
181418a3ab1SNadav Amit int n_pages;
18218468d93SRafael Aquini
183418a3ab1SNadav Amit n_pages = balloon_page_list_dequeue(b_dev_info, &pages, 1);
18418468d93SRafael Aquini
185418a3ab1SNadav Amit if (n_pages != 1) {
18618468d93SRafael Aquini /*
18718468d93SRafael Aquini * If we are unable to dequeue a balloon page because the page
188cfe61801SMichael S. Tsirkin * list is empty and there are no isolated pages, then something
18918468d93SRafael Aquini * went out of track and some balloon pages are lost.
190cfe61801SMichael S. Tsirkin * BUG() here, otherwise the balloon driver may get stuck in
19118468d93SRafael Aquini * an infinite loop while attempting to release all its pages.
19218468d93SRafael Aquini */
19318468d93SRafael Aquini spin_lock_irqsave(&b_dev_info->pages_lock, flags);
19418468d93SRafael Aquini if (unlikely(list_empty(&b_dev_info->pages) &&
19518468d93SRafael Aquini !b_dev_info->isolated_pages))
19618468d93SRafael Aquini BUG();
19718468d93SRafael Aquini spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
198418a3ab1SNadav Amit return NULL;
19918468d93SRafael Aquini }
200418a3ab1SNadav Amit return list_first_entry(&pages, struct page, lru);
20118468d93SRafael Aquini }
20218468d93SRafael Aquini EXPORT_SYMBOL_GPL(balloon_page_dequeue);
20318468d93SRafael Aquini
20418468d93SRafael Aquini #ifdef CONFIG_BALLOON_COMPACTION
20518468d93SRafael Aquini
balloon_page_isolate(struct page * page,isolate_mode_t mode)206504c1cabSMiaohe Lin static bool balloon_page_isolate(struct page *page, isolate_mode_t mode)
207b1123ea6SMinchan Kim
20818468d93SRafael Aquini {
2099d1ba805SKonstantin Khlebnikov struct balloon_dev_info *b_dev_info = balloon_page_device(page);
21018468d93SRafael Aquini unsigned long flags;
211d6d86c0aSKonstantin Khlebnikov
21218468d93SRafael Aquini spin_lock_irqsave(&b_dev_info->pages_lock, flags);
21318468d93SRafael Aquini list_del(&page->lru);
21418468d93SRafael Aquini b_dev_info->isolated_pages++;
21518468d93SRafael Aquini spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
216b1123ea6SMinchan Kim
217b1123ea6SMinchan Kim return true;
21818468d93SRafael Aquini }
21918468d93SRafael Aquini
balloon_page_putback(struct page * page)220504c1cabSMiaohe Lin static void balloon_page_putback(struct page *page)
22118468d93SRafael Aquini {
2229d1ba805SKonstantin Khlebnikov struct balloon_dev_info *b_dev_info = balloon_page_device(page);
22318468d93SRafael Aquini unsigned long flags;
224d6d86c0aSKonstantin Khlebnikov
22518468d93SRafael Aquini spin_lock_irqsave(&b_dev_info->pages_lock, flags);
22618468d93SRafael Aquini list_add(&page->lru, &b_dev_info->pages);
22718468d93SRafael Aquini b_dev_info->isolated_pages--;
22818468d93SRafael Aquini spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
22918468d93SRafael Aquini }
23018468d93SRafael Aquini
23118468d93SRafael Aquini /* move_to_new_page() counterpart for a ballooned page */
balloon_page_migrate(struct page * newpage,struct page * page,enum migrate_mode mode)232*68f2736aSMatthew Wilcox (Oracle) static int balloon_page_migrate(struct page *newpage, struct page *page,
233b1123ea6SMinchan Kim enum migrate_mode mode)
23418468d93SRafael Aquini {
2359d1ba805SKonstantin Khlebnikov struct balloon_dev_info *balloon = balloon_page_device(page);
23618468d93SRafael Aquini
2372916ecc0SJérôme Glisse /*
2382916ecc0SJérôme Glisse * We can not easily support the no copy case here so ignore it as it
239cfe61801SMichael S. Tsirkin * is unlikely to be used with balloon pages. See include/linux/hmm.h
240cfe61801SMichael S. Tsirkin * for a user of the MIGRATE_SYNC_NO_COPY mode.
2412916ecc0SJérôme Glisse */
2422916ecc0SJérôme Glisse if (mode == MIGRATE_SYNC_NO_COPY)
2432916ecc0SJérôme Glisse return -EINVAL;
2442916ecc0SJérôme Glisse
2457db7671fSHugh Dickins VM_BUG_ON_PAGE(!PageLocked(page), page);
2467db7671fSHugh Dickins VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
24718468d93SRafael Aquini
248b1123ea6SMinchan Kim return balloon->migratepage(balloon, newpage, page, mode);
24918468d93SRafael Aquini }
25018468d93SRafael Aquini
251*68f2736aSMatthew Wilcox (Oracle) const struct movable_operations balloon_mops = {
252*68f2736aSMatthew Wilcox (Oracle) .migrate_page = balloon_page_migrate,
253b1123ea6SMinchan Kim .isolate_page = balloon_page_isolate,
254b1123ea6SMinchan Kim .putback_page = balloon_page_putback,
255b1123ea6SMinchan Kim };
256*68f2736aSMatthew Wilcox (Oracle) EXPORT_SYMBOL_GPL(balloon_mops);
25718468d93SRafael Aquini
25818468d93SRafael Aquini #endif /* CONFIG_BALLOON_COMPACTION */
259