xref: /openbmc/linux/drivers/misc/vmw_balloon.c (revision 9a87ffc99ec8eb8d35eed7c4f816d75f5cc9662e)
1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   * VMware Balloon driver.
4   *
5   * Copyright (C) 2000-2018, VMware, Inc. All Rights Reserved.
6   *
7   * This is VMware physical memory management driver for Linux. The driver
8   * acts like a "balloon" that can be inflated to reclaim physical pages by
9   * reserving them in the guest and invalidating them in the monitor,
10   * freeing up the underlying machine pages so they can be allocated to
11   * other guests.  The balloon can also be deflated to allow the guest to
12   * use more physical memory. Higher level policies can control the sizes
13   * of balloons in VMs in order to manage physical memory resources.
14   */
15  
16  //#define DEBUG
17  #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18  
19  #include <linux/types.h>
20  #include <linux/io.h>
21  #include <linux/kernel.h>
22  #include <linux/mm.h>
23  #include <linux/vmalloc.h>
24  #include <linux/sched.h>
25  #include <linux/module.h>
26  #include <linux/workqueue.h>
27  #include <linux/debugfs.h>
28  #include <linux/seq_file.h>
29  #include <linux/rwsem.h>
30  #include <linux/slab.h>
31  #include <linux/spinlock.h>
32  #include <linux/balloon_compaction.h>
33  #include <linux/vmw_vmci_defs.h>
34  #include <linux/vmw_vmci_api.h>
35  #include <asm/hypervisor.h>
36  
37  MODULE_AUTHOR("VMware, Inc.");
38  MODULE_DESCRIPTION("VMware Memory Control (Balloon) Driver");
39  MODULE_ALIAS("dmi:*:svnVMware*:*");
40  MODULE_ALIAS("vmware_vmmemctl");
41  MODULE_LICENSE("GPL");
42  
43  static bool __read_mostly vmwballoon_shrinker_enable;
44  module_param(vmwballoon_shrinker_enable, bool, 0444);
45  MODULE_PARM_DESC(vmwballoon_shrinker_enable,
46  	"Enable non-cooperative out-of-memory protection. Disabled by default as it may degrade performance.");
47  
48  /* Delay in seconds after shrink before inflation. */
49  #define VMBALLOON_SHRINK_DELAY		(5)
50  
51  /* Maximum number of refused pages we accumulate during inflation cycle */
52  #define VMW_BALLOON_MAX_REFUSED		16
53  
54  /* Magic number for the balloon mount-point */
55  #define BALLOON_VMW_MAGIC		0x0ba11007
56  
57  /*
58   * Hypervisor communication port definitions.
59   */
60  #define VMW_BALLOON_HV_PORT		0x5670
61  #define VMW_BALLOON_HV_MAGIC		0x456c6d6f
62  #define VMW_BALLOON_GUEST_ID		1	/* Linux */
63  
64  enum vmwballoon_capabilities {
65  	/*
66  	 * Bit 0 is reserved and not associated to any capability.
67  	 */
68  	VMW_BALLOON_BASIC_CMDS			= (1 << 1),
69  	VMW_BALLOON_BATCHED_CMDS		= (1 << 2),
70  	VMW_BALLOON_BATCHED_2M_CMDS		= (1 << 3),
71  	VMW_BALLOON_SIGNALLED_WAKEUP_CMD	= (1 << 4),
72  	VMW_BALLOON_64_BIT_TARGET		= (1 << 5)
73  };
74  
75  #define VMW_BALLOON_CAPABILITIES_COMMON	(VMW_BALLOON_BASIC_CMDS \
76  					| VMW_BALLOON_BATCHED_CMDS \
77  					| VMW_BALLOON_BATCHED_2M_CMDS \
78  					| VMW_BALLOON_SIGNALLED_WAKEUP_CMD)
79  
80  #define VMW_BALLOON_2M_ORDER		(PMD_SHIFT - PAGE_SHIFT)
81  
82  /*
83   * 64-bit targets are only supported in 64-bit
84   */
85  #ifdef CONFIG_64BIT
86  #define VMW_BALLOON_CAPABILITIES	(VMW_BALLOON_CAPABILITIES_COMMON \
87  					| VMW_BALLOON_64_BIT_TARGET)
88  #else
89  #define VMW_BALLOON_CAPABILITIES	VMW_BALLOON_CAPABILITIES_COMMON
90  #endif
91  
92  enum vmballoon_page_size_type {
93  	VMW_BALLOON_4K_PAGE,
94  	VMW_BALLOON_2M_PAGE,
95  	VMW_BALLOON_LAST_SIZE = VMW_BALLOON_2M_PAGE
96  };
97  
98  #define VMW_BALLOON_NUM_PAGE_SIZES	(VMW_BALLOON_LAST_SIZE + 1)
99  
100  static const char * const vmballoon_page_size_names[] = {
101  	[VMW_BALLOON_4K_PAGE]			= "4k",
102  	[VMW_BALLOON_2M_PAGE]			= "2M"
103  };
104  
105  enum vmballoon_op {
106  	VMW_BALLOON_INFLATE,
107  	VMW_BALLOON_DEFLATE
108  };
109  
110  enum vmballoon_op_stat_type {
111  	VMW_BALLOON_OP_STAT,
112  	VMW_BALLOON_OP_FAIL_STAT
113  };
114  
115  #define VMW_BALLOON_OP_STAT_TYPES	(VMW_BALLOON_OP_FAIL_STAT + 1)
116  
117  /**
118   * enum vmballoon_cmd_type - backdoor commands.
119   *
120   * Availability of the commands is as followed:
121   *
122   * %VMW_BALLOON_CMD_START, %VMW_BALLOON_CMD_GET_TARGET and
123   * %VMW_BALLOON_CMD_GUEST_ID are always available.
124   *
125   * If the host reports %VMW_BALLOON_BASIC_CMDS are supported then
126   * %VMW_BALLOON_CMD_LOCK and %VMW_BALLOON_CMD_UNLOCK commands are available.
127   *
128   * If the host reports %VMW_BALLOON_BATCHED_CMDS are supported then
129   * %VMW_BALLOON_CMD_BATCHED_LOCK and VMW_BALLOON_CMD_BATCHED_UNLOCK commands
130   * are available.
131   *
132   * If the host reports %VMW_BALLOON_BATCHED_2M_CMDS are supported then
133   * %VMW_BALLOON_CMD_BATCHED_2M_LOCK and %VMW_BALLOON_CMD_BATCHED_2M_UNLOCK
134   * are supported.
135   *
136   * If the host reports  VMW_BALLOON_SIGNALLED_WAKEUP_CMD is supported then
137   * VMW_BALLOON_CMD_VMCI_DOORBELL_SET command is supported.
138   *
139   * @VMW_BALLOON_CMD_START: Communicating supported version with the hypervisor.
140   * @VMW_BALLOON_CMD_GET_TARGET: Gets the balloon target size.
141   * @VMW_BALLOON_CMD_LOCK: Informs the hypervisor about a ballooned page.
142   * @VMW_BALLOON_CMD_UNLOCK: Informs the hypervisor about a page that is about
143   *			    to be deflated from the balloon.
144   * @VMW_BALLOON_CMD_GUEST_ID: Informs the hypervisor about the type of OS that
145   *			      runs in the VM.
146   * @VMW_BALLOON_CMD_BATCHED_LOCK: Inform the hypervisor about a batch of
147   *				  ballooned pages (up to 512).
148   * @VMW_BALLOON_CMD_BATCHED_UNLOCK: Inform the hypervisor about a batch of
149   *				  pages that are about to be deflated from the
150   *				  balloon (up to 512).
151   * @VMW_BALLOON_CMD_BATCHED_2M_LOCK: Similar to @VMW_BALLOON_CMD_BATCHED_LOCK
152   *				     for 2MB pages.
153   * @VMW_BALLOON_CMD_BATCHED_2M_UNLOCK: Similar to
154   *				       @VMW_BALLOON_CMD_BATCHED_UNLOCK for 2MB
155   *				       pages.
156   * @VMW_BALLOON_CMD_VMCI_DOORBELL_SET: A command to set doorbell notification
157   *				       that would be invoked when the balloon
158   *				       size changes.
159   * @VMW_BALLOON_CMD_LAST: Value of the last command.
160   */
161  enum vmballoon_cmd_type {
162  	VMW_BALLOON_CMD_START,
163  	VMW_BALLOON_CMD_GET_TARGET,
164  	VMW_BALLOON_CMD_LOCK,
165  	VMW_BALLOON_CMD_UNLOCK,
166  	VMW_BALLOON_CMD_GUEST_ID,
167  	/* No command 5 */
168  	VMW_BALLOON_CMD_BATCHED_LOCK = 6,
169  	VMW_BALLOON_CMD_BATCHED_UNLOCK,
170  	VMW_BALLOON_CMD_BATCHED_2M_LOCK,
171  	VMW_BALLOON_CMD_BATCHED_2M_UNLOCK,
172  	VMW_BALLOON_CMD_VMCI_DOORBELL_SET,
173  	VMW_BALLOON_CMD_LAST = VMW_BALLOON_CMD_VMCI_DOORBELL_SET,
174  };
175  
176  #define VMW_BALLOON_CMD_NUM	(VMW_BALLOON_CMD_LAST + 1)
177  
178  enum vmballoon_error_codes {
179  	VMW_BALLOON_SUCCESS,
180  	VMW_BALLOON_ERROR_CMD_INVALID,
181  	VMW_BALLOON_ERROR_PPN_INVALID,
182  	VMW_BALLOON_ERROR_PPN_LOCKED,
183  	VMW_BALLOON_ERROR_PPN_UNLOCKED,
184  	VMW_BALLOON_ERROR_PPN_PINNED,
185  	VMW_BALLOON_ERROR_PPN_NOTNEEDED,
186  	VMW_BALLOON_ERROR_RESET,
187  	VMW_BALLOON_ERROR_BUSY
188  };
189  
190  #define VMW_BALLOON_SUCCESS_WITH_CAPABILITIES	(0x03000000)
191  
192  #define VMW_BALLOON_CMD_WITH_TARGET_MASK			\
193  	((1UL << VMW_BALLOON_CMD_GET_TARGET)		|	\
194  	 (1UL << VMW_BALLOON_CMD_LOCK)			|	\
195  	 (1UL << VMW_BALLOON_CMD_UNLOCK)		|	\
196  	 (1UL << VMW_BALLOON_CMD_BATCHED_LOCK)		|	\
197  	 (1UL << VMW_BALLOON_CMD_BATCHED_UNLOCK)	|	\
198  	 (1UL << VMW_BALLOON_CMD_BATCHED_2M_LOCK)	|	\
199  	 (1UL << VMW_BALLOON_CMD_BATCHED_2M_UNLOCK))
200  
201  static const char * const vmballoon_cmd_names[] = {
202  	[VMW_BALLOON_CMD_START]			= "start",
203  	[VMW_BALLOON_CMD_GET_TARGET]		= "target",
204  	[VMW_BALLOON_CMD_LOCK]			= "lock",
205  	[VMW_BALLOON_CMD_UNLOCK]		= "unlock",
206  	[VMW_BALLOON_CMD_GUEST_ID]		= "guestType",
207  	[VMW_BALLOON_CMD_BATCHED_LOCK]		= "batchLock",
208  	[VMW_BALLOON_CMD_BATCHED_UNLOCK]	= "batchUnlock",
209  	[VMW_BALLOON_CMD_BATCHED_2M_LOCK]	= "2m-lock",
210  	[VMW_BALLOON_CMD_BATCHED_2M_UNLOCK]	= "2m-unlock",
211  	[VMW_BALLOON_CMD_VMCI_DOORBELL_SET]	= "doorbellSet"
212  };
213  
214  enum vmballoon_stat_page {
215  	VMW_BALLOON_PAGE_STAT_ALLOC,
216  	VMW_BALLOON_PAGE_STAT_ALLOC_FAIL,
217  	VMW_BALLOON_PAGE_STAT_REFUSED_ALLOC,
218  	VMW_BALLOON_PAGE_STAT_REFUSED_FREE,
219  	VMW_BALLOON_PAGE_STAT_FREE,
220  	VMW_BALLOON_PAGE_STAT_LAST = VMW_BALLOON_PAGE_STAT_FREE
221  };
222  
223  #define VMW_BALLOON_PAGE_STAT_NUM	(VMW_BALLOON_PAGE_STAT_LAST + 1)
224  
225  enum vmballoon_stat_general {
226  	VMW_BALLOON_STAT_TIMER,
227  	VMW_BALLOON_STAT_DOORBELL,
228  	VMW_BALLOON_STAT_RESET,
229  	VMW_BALLOON_STAT_SHRINK,
230  	VMW_BALLOON_STAT_SHRINK_FREE,
231  	VMW_BALLOON_STAT_LAST = VMW_BALLOON_STAT_SHRINK_FREE
232  };
233  
234  #define VMW_BALLOON_STAT_NUM		(VMW_BALLOON_STAT_LAST + 1)
235  
236  static DEFINE_STATIC_KEY_TRUE(vmw_balloon_batching);
237  static DEFINE_STATIC_KEY_FALSE(balloon_stat_enabled);
238  
239  struct vmballoon_ctl {
240  	struct list_head pages;
241  	struct list_head refused_pages;
242  	struct list_head prealloc_pages;
243  	unsigned int n_refused_pages;
244  	unsigned int n_pages;
245  	enum vmballoon_page_size_type page_size;
246  	enum vmballoon_op op;
247  };
248  
249  /**
250   * struct vmballoon_batch_entry - a batch entry for lock or unlock.
251   *
252   * @status: the status of the operation, which is written by the hypervisor.
253   * @reserved: reserved for future use. Must be set to zero.
254   * @pfn: the physical frame number of the page to be locked or unlocked.
255   */
256  struct vmballoon_batch_entry {
257  	u64 status : 5;
258  	u64 reserved : PAGE_SHIFT - 5;
259  	u64 pfn : 52;
260  } __packed;
261  
262  struct vmballoon {
263  	/**
264  	 * @max_page_size: maximum supported page size for ballooning.
265  	 *
266  	 * Protected by @conf_sem
267  	 */
268  	enum vmballoon_page_size_type max_page_size;
269  
270  	/**
271  	 * @size: balloon actual size in basic page size (frames).
272  	 *
273  	 * While we currently do not support size which is bigger than 32-bit,
274  	 * in preparation for future support, use 64-bits.
275  	 */
276  	atomic64_t size;
277  
278  	/**
279  	 * @target: balloon target size in basic page size (frames).
280  	 *
281  	 * We do not protect the target under the assumption that setting the
282  	 * value is always done through a single write. If this assumption ever
283  	 * breaks, we would have to use X_ONCE for accesses, and suffer the less
284  	 * optimized code. Although we may read stale target value if multiple
285  	 * accesses happen at once, the performance impact should be minor.
286  	 */
287  	unsigned long target;
288  
289  	/**
290  	 * @reset_required: reset flag
291  	 *
292  	 * Setting this flag may introduce races, but the code is expected to
293  	 * handle them gracefully. In the worst case, another operation will
294  	 * fail as reset did not take place. Clearing the flag is done while
295  	 * holding @conf_sem for write.
296  	 */
297  	bool reset_required;
298  
299  	/**
300  	 * @capabilities: hypervisor balloon capabilities.
301  	 *
302  	 * Protected by @conf_sem.
303  	 */
304  	unsigned long capabilities;
305  
306  	/**
307  	 * @batch_page: pointer to communication batch page.
308  	 *
309  	 * When batching is used, batch_page points to a page, which holds up to
310  	 * %VMW_BALLOON_BATCH_MAX_PAGES entries for locking or unlocking.
311  	 */
312  	struct vmballoon_batch_entry *batch_page;
313  
314  	/**
315  	 * @batch_max_pages: maximum pages that can be locked/unlocked.
316  	 *
317  	 * Indicates the number of pages that the hypervisor can lock or unlock
318  	 * at once, according to whether batching is enabled. If batching is
319  	 * disabled, only a single page can be locked/unlock on each operation.
320  	 *
321  	 * Protected by @conf_sem.
322  	 */
323  	unsigned int batch_max_pages;
324  
325  	/**
326  	 * @page: page to be locked/unlocked by the hypervisor
327  	 *
328  	 * @page is only used when batching is disabled and a single page is
329  	 * reclaimed on each iteration.
330  	 *
331  	 * Protected by @comm_lock.
332  	 */
333  	struct page *page;
334  
335  	/**
336  	 * @shrink_timeout: timeout until the next inflation.
337  	 *
338  	 * After an shrink event, indicates the time in jiffies after which
339  	 * inflation is allowed again. Can be written concurrently with reads,
340  	 * so must use READ_ONCE/WRITE_ONCE when accessing.
341  	 */
342  	unsigned long shrink_timeout;
343  
344  	/* statistics */
345  	struct vmballoon_stats *stats;
346  
347  	/**
348  	 * @b_dev_info: balloon device information descriptor.
349  	 */
350  	struct balloon_dev_info b_dev_info;
351  
352  	struct delayed_work dwork;
353  
354  	/**
355  	 * @huge_pages - list of the inflated 2MB pages.
356  	 *
357  	 * Protected by @b_dev_info.pages_lock .
358  	 */
359  	struct list_head huge_pages;
360  
361  	/**
362  	 * @vmci_doorbell.
363  	 *
364  	 * Protected by @conf_sem.
365  	 */
366  	struct vmci_handle vmci_doorbell;
367  
368  	/**
369  	 * @conf_sem: semaphore to protect the configuration and the statistics.
370  	 */
371  	struct rw_semaphore conf_sem;
372  
373  	/**
374  	 * @comm_lock: lock to protect the communication with the host.
375  	 *
376  	 * Lock ordering: @conf_sem -> @comm_lock .
377  	 */
378  	spinlock_t comm_lock;
379  
380  	/**
381  	 * @shrinker: shrinker interface that is used to avoid over-inflation.
382  	 */
383  	struct shrinker shrinker;
384  
385  	/**
386  	 * @shrinker_registered: whether the shrinker was registered.
387  	 *
388  	 * The shrinker interface does not handle gracefully the removal of
389  	 * shrinker that was not registered before. This indication allows to
390  	 * simplify the unregistration process.
391  	 */
392  	bool shrinker_registered;
393  };
394  
395  static struct vmballoon balloon;
396  
397  struct vmballoon_stats {
398  	/* timer / doorbell operations */
399  	atomic64_t general_stat[VMW_BALLOON_STAT_NUM];
400  
401  	/* allocation statistics for huge and small pages */
402  	atomic64_t
403  	       page_stat[VMW_BALLOON_PAGE_STAT_NUM][VMW_BALLOON_NUM_PAGE_SIZES];
404  
405  	/* Monitor operations: total operations, and failures */
406  	atomic64_t ops[VMW_BALLOON_CMD_NUM][VMW_BALLOON_OP_STAT_TYPES];
407  };
408  
is_vmballoon_stats_on(void)409  static inline bool is_vmballoon_stats_on(void)
410  {
411  	return IS_ENABLED(CONFIG_DEBUG_FS) &&
412  		static_branch_unlikely(&balloon_stat_enabled);
413  }
414  
vmballoon_stats_op_inc(struct vmballoon * b,unsigned int op,enum vmballoon_op_stat_type type)415  static inline void vmballoon_stats_op_inc(struct vmballoon *b, unsigned int op,
416  					  enum vmballoon_op_stat_type type)
417  {
418  	if (is_vmballoon_stats_on())
419  		atomic64_inc(&b->stats->ops[op][type]);
420  }
421  
vmballoon_stats_gen_inc(struct vmballoon * b,enum vmballoon_stat_general stat)422  static inline void vmballoon_stats_gen_inc(struct vmballoon *b,
423  					   enum vmballoon_stat_general stat)
424  {
425  	if (is_vmballoon_stats_on())
426  		atomic64_inc(&b->stats->general_stat[stat]);
427  }
428  
vmballoon_stats_gen_add(struct vmballoon * b,enum vmballoon_stat_general stat,unsigned int val)429  static inline void vmballoon_stats_gen_add(struct vmballoon *b,
430  					   enum vmballoon_stat_general stat,
431  					   unsigned int val)
432  {
433  	if (is_vmballoon_stats_on())
434  		atomic64_add(val, &b->stats->general_stat[stat]);
435  }
436  
vmballoon_stats_page_inc(struct vmballoon * b,enum vmballoon_stat_page stat,enum vmballoon_page_size_type size)437  static inline void vmballoon_stats_page_inc(struct vmballoon *b,
438  					    enum vmballoon_stat_page stat,
439  					    enum vmballoon_page_size_type size)
440  {
441  	if (is_vmballoon_stats_on())
442  		atomic64_inc(&b->stats->page_stat[stat][size]);
443  }
444  
vmballoon_stats_page_add(struct vmballoon * b,enum vmballoon_stat_page stat,enum vmballoon_page_size_type size,unsigned int val)445  static inline void vmballoon_stats_page_add(struct vmballoon *b,
446  					    enum vmballoon_stat_page stat,
447  					    enum vmballoon_page_size_type size,
448  					    unsigned int val)
449  {
450  	if (is_vmballoon_stats_on())
451  		atomic64_add(val, &b->stats->page_stat[stat][size]);
452  }
453  
454  static inline unsigned long
__vmballoon_cmd(struct vmballoon * b,unsigned long cmd,unsigned long arg1,unsigned long arg2,unsigned long * result)455  __vmballoon_cmd(struct vmballoon *b, unsigned long cmd, unsigned long arg1,
456  		unsigned long arg2, unsigned long *result)
457  {
458  	unsigned long status, dummy1, dummy2, dummy3, local_result;
459  
460  	vmballoon_stats_op_inc(b, cmd, VMW_BALLOON_OP_STAT);
461  
462  	asm volatile ("inl %%dx" :
463  		"=a"(status),
464  		"=c"(dummy1),
465  		"=d"(dummy2),
466  		"=b"(local_result),
467  		"=S"(dummy3) :
468  		"0"(VMW_BALLOON_HV_MAGIC),
469  		"1"(cmd),
470  		"2"(VMW_BALLOON_HV_PORT),
471  		"3"(arg1),
472  		"4"(arg2) :
473  		"memory");
474  
475  	/* update the result if needed */
476  	if (result)
477  		*result = (cmd == VMW_BALLOON_CMD_START) ? dummy1 :
478  							   local_result;
479  
480  	/* update target when applicable */
481  	if (status == VMW_BALLOON_SUCCESS &&
482  	    ((1ul << cmd) & VMW_BALLOON_CMD_WITH_TARGET_MASK))
483  		WRITE_ONCE(b->target, local_result);
484  
485  	if (status != VMW_BALLOON_SUCCESS &&
486  	    status != VMW_BALLOON_SUCCESS_WITH_CAPABILITIES) {
487  		vmballoon_stats_op_inc(b, cmd, VMW_BALLOON_OP_FAIL_STAT);
488  		pr_debug("%s: %s [0x%lx,0x%lx) failed, returned %ld\n",
489  			 __func__, vmballoon_cmd_names[cmd], arg1, arg2,
490  			 status);
491  	}
492  
493  	/* mark reset required accordingly */
494  	if (status == VMW_BALLOON_ERROR_RESET)
495  		b->reset_required = true;
496  
497  	return status;
498  }
499  
500  static __always_inline unsigned long
vmballoon_cmd(struct vmballoon * b,unsigned long cmd,unsigned long arg1,unsigned long arg2)501  vmballoon_cmd(struct vmballoon *b, unsigned long cmd, unsigned long arg1,
502  	      unsigned long arg2)
503  {
504  	unsigned long dummy;
505  
506  	return __vmballoon_cmd(b, cmd, arg1, arg2, &dummy);
507  }
508  
509  /*
510   * Send "start" command to the host, communicating supported version
511   * of the protocol.
512   */
vmballoon_send_start(struct vmballoon * b,unsigned long req_caps)513  static int vmballoon_send_start(struct vmballoon *b, unsigned long req_caps)
514  {
515  	unsigned long status, capabilities;
516  
517  	status = __vmballoon_cmd(b, VMW_BALLOON_CMD_START, req_caps, 0,
518  				 &capabilities);
519  
520  	switch (status) {
521  	case VMW_BALLOON_SUCCESS_WITH_CAPABILITIES:
522  		b->capabilities = capabilities;
523  		break;
524  	case VMW_BALLOON_SUCCESS:
525  		b->capabilities = VMW_BALLOON_BASIC_CMDS;
526  		break;
527  	default:
528  		return -EIO;
529  	}
530  
531  	/*
532  	 * 2MB pages are only supported with batching. If batching is for some
533  	 * reason disabled, do not use 2MB pages, since otherwise the legacy
534  	 * mechanism is used with 2MB pages, causing a failure.
535  	 */
536  	b->max_page_size = VMW_BALLOON_4K_PAGE;
537  	if ((b->capabilities & VMW_BALLOON_BATCHED_2M_CMDS) &&
538  	    (b->capabilities & VMW_BALLOON_BATCHED_CMDS))
539  		b->max_page_size = VMW_BALLOON_2M_PAGE;
540  
541  
542  	return 0;
543  }
544  
545  /**
546   * vmballoon_send_guest_id - communicate guest type to the host.
547   *
548   * @b: pointer to the balloon.
549   *
550   * Communicate guest type to the host so that it can adjust ballooning
551   * algorithm to the one most appropriate for the guest. This command
552   * is normally issued after sending "start" command and is part of
553   * standard reset sequence.
554   *
555   * Return: zero on success or appropriate error code.
556   */
vmballoon_send_guest_id(struct vmballoon * b)557  static int vmballoon_send_guest_id(struct vmballoon *b)
558  {
559  	unsigned long status;
560  
561  	status = vmballoon_cmd(b, VMW_BALLOON_CMD_GUEST_ID,
562  			       VMW_BALLOON_GUEST_ID, 0);
563  
564  	return status == VMW_BALLOON_SUCCESS ? 0 : -EIO;
565  }
566  
567  /**
568   * vmballoon_page_order() - return the order of the page
569   * @page_size: the size of the page.
570   *
571   * Return: the allocation order.
572   */
573  static inline
vmballoon_page_order(enum vmballoon_page_size_type page_size)574  unsigned int vmballoon_page_order(enum vmballoon_page_size_type page_size)
575  {
576  	return page_size == VMW_BALLOON_2M_PAGE ? VMW_BALLOON_2M_ORDER : 0;
577  }
578  
579  /**
580   * vmballoon_page_in_frames() - returns the number of frames in a page.
581   * @page_size: the size of the page.
582   *
583   * Return: the number of 4k frames.
584   */
585  static inline unsigned int
vmballoon_page_in_frames(enum vmballoon_page_size_type page_size)586  vmballoon_page_in_frames(enum vmballoon_page_size_type page_size)
587  {
588  	return 1 << vmballoon_page_order(page_size);
589  }
590  
591  /**
592   * vmballoon_mark_page_offline() - mark a page as offline
593   * @page: pointer for the page.
594   * @page_size: the size of the page.
595   */
596  static void
vmballoon_mark_page_offline(struct page * page,enum vmballoon_page_size_type page_size)597  vmballoon_mark_page_offline(struct page *page,
598  			    enum vmballoon_page_size_type page_size)
599  {
600  	int i;
601  
602  	for (i = 0; i < vmballoon_page_in_frames(page_size); i++)
603  		__SetPageOffline(page + i);
604  }
605  
606  /**
607   * vmballoon_mark_page_online() - mark a page as online
608   * @page: pointer for the page.
609   * @page_size: the size of the page.
610   */
611  static void
vmballoon_mark_page_online(struct page * page,enum vmballoon_page_size_type page_size)612  vmballoon_mark_page_online(struct page *page,
613  			   enum vmballoon_page_size_type page_size)
614  {
615  	int i;
616  
617  	for (i = 0; i < vmballoon_page_in_frames(page_size); i++)
618  		__ClearPageOffline(page + i);
619  }
620  
621  /**
622   * vmballoon_send_get_target() - Retrieve desired balloon size from the host.
623   *
624   * @b: pointer to the balloon.
625   *
626   * Return: zero on success, EINVAL if limit does not fit in 32-bit, as required
627   * by the host-guest protocol and EIO if an error occurred in communicating with
628   * the host.
629   */
vmballoon_send_get_target(struct vmballoon * b)630  static int vmballoon_send_get_target(struct vmballoon *b)
631  {
632  	unsigned long status;
633  	unsigned long limit;
634  
635  	limit = totalram_pages();
636  
637  	/* Ensure limit fits in 32-bits if 64-bit targets are not supported */
638  	if (!(b->capabilities & VMW_BALLOON_64_BIT_TARGET) &&
639  	    limit != (u32)limit)
640  		return -EINVAL;
641  
642  	status = vmballoon_cmd(b, VMW_BALLOON_CMD_GET_TARGET, limit, 0);
643  
644  	return status == VMW_BALLOON_SUCCESS ? 0 : -EIO;
645  }
646  
647  /**
648   * vmballoon_alloc_page_list - allocates a list of pages.
649   *
650   * @b: pointer to the balloon.
651   * @ctl: pointer for the %struct vmballoon_ctl, which defines the operation.
652   * @req_n_pages: the number of requested pages.
653   *
654   * Tries to allocate @req_n_pages. Add them to the list of balloon pages in
655   * @ctl.pages and updates @ctl.n_pages to reflect the number of pages.
656   *
657   * Return: zero on success or error code otherwise.
658   */
vmballoon_alloc_page_list(struct vmballoon * b,struct vmballoon_ctl * ctl,unsigned int req_n_pages)659  static int vmballoon_alloc_page_list(struct vmballoon *b,
660  				     struct vmballoon_ctl *ctl,
661  				     unsigned int req_n_pages)
662  {
663  	struct page *page;
664  	unsigned int i;
665  
666  	for (i = 0; i < req_n_pages; i++) {
667  		/*
668  		 * First check if we happen to have pages that were allocated
669  		 * before. This happens when 2MB page rejected during inflation
670  		 * by the hypervisor, and then split into 4KB pages.
671  		 */
672  		if (!list_empty(&ctl->prealloc_pages)) {
673  			page = list_first_entry(&ctl->prealloc_pages,
674  						struct page, lru);
675  			list_del(&page->lru);
676  		} else {
677  			if (ctl->page_size == VMW_BALLOON_2M_PAGE)
678  				page = alloc_pages(__GFP_HIGHMEM|__GFP_NOWARN|
679  					__GFP_NOMEMALLOC, VMW_BALLOON_2M_ORDER);
680  			else
681  				page = balloon_page_alloc();
682  
683  			vmballoon_stats_page_inc(b, VMW_BALLOON_PAGE_STAT_ALLOC,
684  						 ctl->page_size);
685  		}
686  
687  		if (page) {
688  			/* Success. Add the page to the list and continue. */
689  			list_add(&page->lru, &ctl->pages);
690  			continue;
691  		}
692  
693  		/* Allocation failed. Update statistics and stop. */
694  		vmballoon_stats_page_inc(b, VMW_BALLOON_PAGE_STAT_ALLOC_FAIL,
695  					 ctl->page_size);
696  		break;
697  	}
698  
699  	ctl->n_pages = i;
700  
701  	return req_n_pages == ctl->n_pages ? 0 : -ENOMEM;
702  }
703  
704  /**
705   * vmballoon_handle_one_result - Handle lock/unlock result for a single page.
706   *
707   * @b: pointer for %struct vmballoon.
708   * @page: pointer for the page whose result should be handled.
709   * @page_size: size of the page.
710   * @status: status of the operation as provided by the hypervisor.
711   */
vmballoon_handle_one_result(struct vmballoon * b,struct page * page,enum vmballoon_page_size_type page_size,unsigned long status)712  static int vmballoon_handle_one_result(struct vmballoon *b, struct page *page,
713  				       enum vmballoon_page_size_type page_size,
714  				       unsigned long status)
715  {
716  	/* On success do nothing. The page is already on the balloon list. */
717  	if (likely(status == VMW_BALLOON_SUCCESS))
718  		return 0;
719  
720  	pr_debug("%s: failed comm pfn %lx status %lu page_size %s\n", __func__,
721  		 page_to_pfn(page), status,
722  		 vmballoon_page_size_names[page_size]);
723  
724  	/* Error occurred */
725  	vmballoon_stats_page_inc(b, VMW_BALLOON_PAGE_STAT_REFUSED_ALLOC,
726  				 page_size);
727  
728  	return -EIO;
729  }
730  
731  /**
732   * vmballoon_status_page - returns the status of (un)lock operation
733   *
734   * @b: pointer to the balloon.
735   * @idx: index for the page for which the operation is performed.
736   * @p: pointer to where the page struct is returned.
737   *
738   * Following a lock or unlock operation, returns the status of the operation for
739   * an individual page. Provides the page that the operation was performed on on
740   * the @page argument.
741   *
742   * Returns: The status of a lock or unlock operation for an individual page.
743   */
vmballoon_status_page(struct vmballoon * b,int idx,struct page ** p)744  static unsigned long vmballoon_status_page(struct vmballoon *b, int idx,
745  					   struct page **p)
746  {
747  	if (static_branch_likely(&vmw_balloon_batching)) {
748  		/* batching mode */
749  		*p = pfn_to_page(b->batch_page[idx].pfn);
750  		return b->batch_page[idx].status;
751  	}
752  
753  	/* non-batching mode */
754  	*p = b->page;
755  
756  	/*
757  	 * If a failure occurs, the indication will be provided in the status
758  	 * of the entire operation, which is considered before the individual
759  	 * page status. So for non-batching mode, the indication is always of
760  	 * success.
761  	 */
762  	return VMW_BALLOON_SUCCESS;
763  }
764  
765  /**
766   * vmballoon_lock_op - notifies the host about inflated/deflated pages.
767   * @b: pointer to the balloon.
768   * @num_pages: number of inflated/deflated pages.
769   * @page_size: size of the page.
770   * @op: the type of operation (lock or unlock).
771   *
772   * Notify the host about page(s) that were ballooned (or removed from the
773   * balloon) so that host can use it without fear that guest will need it (or
774   * stop using them since the VM does). Host may reject some pages, we need to
775   * check the return value and maybe submit a different page. The pages that are
776   * inflated/deflated are pointed by @b->page.
777   *
778   * Return: result as provided by the hypervisor.
779   */
vmballoon_lock_op(struct vmballoon * b,unsigned int num_pages,enum vmballoon_page_size_type page_size,enum vmballoon_op op)780  static unsigned long vmballoon_lock_op(struct vmballoon *b,
781  				       unsigned int num_pages,
782  				       enum vmballoon_page_size_type page_size,
783  				       enum vmballoon_op op)
784  {
785  	unsigned long cmd, pfn;
786  
787  	lockdep_assert_held(&b->comm_lock);
788  
789  	if (static_branch_likely(&vmw_balloon_batching)) {
790  		if (op == VMW_BALLOON_INFLATE)
791  			cmd = page_size == VMW_BALLOON_2M_PAGE ?
792  				VMW_BALLOON_CMD_BATCHED_2M_LOCK :
793  				VMW_BALLOON_CMD_BATCHED_LOCK;
794  		else
795  			cmd = page_size == VMW_BALLOON_2M_PAGE ?
796  				VMW_BALLOON_CMD_BATCHED_2M_UNLOCK :
797  				VMW_BALLOON_CMD_BATCHED_UNLOCK;
798  
799  		pfn = PHYS_PFN(virt_to_phys(b->batch_page));
800  	} else {
801  		cmd = op == VMW_BALLOON_INFLATE ? VMW_BALLOON_CMD_LOCK :
802  						  VMW_BALLOON_CMD_UNLOCK;
803  		pfn = page_to_pfn(b->page);
804  
805  		/* In non-batching mode, PFNs must fit in 32-bit */
806  		if (unlikely(pfn != (u32)pfn))
807  			return VMW_BALLOON_ERROR_PPN_INVALID;
808  	}
809  
810  	return vmballoon_cmd(b, cmd, pfn, num_pages);
811  }
812  
813  /**
814   * vmballoon_add_page - adds a page towards lock/unlock operation.
815   *
816   * @b: pointer to the balloon.
817   * @idx: index of the page to be ballooned in this batch.
818   * @p: pointer to the page that is about to be ballooned.
819   *
820   * Adds the page to be ballooned. Must be called while holding @comm_lock.
821   */
vmballoon_add_page(struct vmballoon * b,unsigned int idx,struct page * p)822  static void vmballoon_add_page(struct vmballoon *b, unsigned int idx,
823  			       struct page *p)
824  {
825  	lockdep_assert_held(&b->comm_lock);
826  
827  	if (static_branch_likely(&vmw_balloon_batching))
828  		b->batch_page[idx] = (struct vmballoon_batch_entry)
829  					{ .pfn = page_to_pfn(p) };
830  	else
831  		b->page = p;
832  }
833  
834  /**
835   * vmballoon_lock - lock or unlock a batch of pages.
836   *
837   * @b: pointer to the balloon.
838   * @ctl: pointer for the %struct vmballoon_ctl, which defines the operation.
839   *
840   * Notifies the host of about ballooned pages (after inflation or deflation,
841   * according to @ctl). If the host rejects the page put it on the
842   * @ctl refuse list. These refused page are then released when moving to the
843   * next size of pages.
844   *
845   * Note that we neither free any @page here nor put them back on the ballooned
846   * pages list. Instead we queue it for later processing. We do that for several
847   * reasons. First, we do not want to free the page under the lock. Second, it
848   * allows us to unify the handling of lock and unlock. In the inflate case, the
849   * caller will check if there are too many refused pages and release them.
850   * Although it is not identical to the past behavior, it should not affect
851   * performance.
852   */
vmballoon_lock(struct vmballoon * b,struct vmballoon_ctl * ctl)853  static int vmballoon_lock(struct vmballoon *b, struct vmballoon_ctl *ctl)
854  {
855  	unsigned long batch_status;
856  	struct page *page;
857  	unsigned int i, num_pages;
858  
859  	num_pages = ctl->n_pages;
860  	if (num_pages == 0)
861  		return 0;
862  
863  	/* communication with the host is done under the communication lock */
864  	spin_lock(&b->comm_lock);
865  
866  	i = 0;
867  	list_for_each_entry(page, &ctl->pages, lru)
868  		vmballoon_add_page(b, i++, page);
869  
870  	batch_status = vmballoon_lock_op(b, ctl->n_pages, ctl->page_size,
871  					 ctl->op);
872  
873  	/*
874  	 * Iterate over the pages in the provided list. Since we are changing
875  	 * @ctl->n_pages we are saving the original value in @num_pages and
876  	 * use this value to bound the loop.
877  	 */
878  	for (i = 0; i < num_pages; i++) {
879  		unsigned long status;
880  
881  		status = vmballoon_status_page(b, i, &page);
882  
883  		/*
884  		 * Failure of the whole batch overrides a single operation
885  		 * results.
886  		 */
887  		if (batch_status != VMW_BALLOON_SUCCESS)
888  			status = batch_status;
889  
890  		/* Continue if no error happened */
891  		if (!vmballoon_handle_one_result(b, page, ctl->page_size,
892  						 status))
893  			continue;
894  
895  		/*
896  		 * Error happened. Move the pages to the refused list and update
897  		 * the pages number.
898  		 */
899  		list_move(&page->lru, &ctl->refused_pages);
900  		ctl->n_pages--;
901  		ctl->n_refused_pages++;
902  	}
903  
904  	spin_unlock(&b->comm_lock);
905  
906  	return batch_status == VMW_BALLOON_SUCCESS ? 0 : -EIO;
907  }
908  
909  /**
910   * vmballoon_release_page_list() - Releases a page list
911   *
912   * @page_list: list of pages to release.
913   * @n_pages: pointer to the number of pages.
914   * @page_size: whether the pages in the list are 2MB (or else 4KB).
915   *
916   * Releases the list of pages and zeros the number of pages.
917   */
vmballoon_release_page_list(struct list_head * page_list,int * n_pages,enum vmballoon_page_size_type page_size)918  static void vmballoon_release_page_list(struct list_head *page_list,
919  				       int *n_pages,
920  				       enum vmballoon_page_size_type page_size)
921  {
922  	struct page *page, *tmp;
923  
924  	list_for_each_entry_safe(page, tmp, page_list, lru) {
925  		list_del(&page->lru);
926  		__free_pages(page, vmballoon_page_order(page_size));
927  	}
928  
929  	if (n_pages)
930  		*n_pages = 0;
931  }
932  
933  
934  /*
935   * Release pages that were allocated while attempting to inflate the
936   * balloon but were refused by the host for one reason or another.
937   */
vmballoon_release_refused_pages(struct vmballoon * b,struct vmballoon_ctl * ctl)938  static void vmballoon_release_refused_pages(struct vmballoon *b,
939  					    struct vmballoon_ctl *ctl)
940  {
941  	vmballoon_stats_page_inc(b, VMW_BALLOON_PAGE_STAT_REFUSED_FREE,
942  				 ctl->page_size);
943  
944  	vmballoon_release_page_list(&ctl->refused_pages, &ctl->n_refused_pages,
945  				    ctl->page_size);
946  }
947  
948  /**
949   * vmballoon_change - retrieve the required balloon change
950   *
951   * @b: pointer for the balloon.
952   *
953   * Return: the required change for the balloon size. A positive number
954   * indicates inflation, a negative number indicates a deflation.
955   */
vmballoon_change(struct vmballoon * b)956  static int64_t vmballoon_change(struct vmballoon *b)
957  {
958  	int64_t size, target;
959  
960  	size = atomic64_read(&b->size);
961  	target = READ_ONCE(b->target);
962  
963  	/*
964  	 * We must cast first because of int sizes
965  	 * Otherwise we might get huge positives instead of negatives
966  	 */
967  
968  	if (b->reset_required)
969  		return 0;
970  
971  	/* consider a 2MB slack on deflate, unless the balloon is emptied */
972  	if (target < size && target != 0 &&
973  	    size - target < vmballoon_page_in_frames(VMW_BALLOON_2M_PAGE))
974  		return 0;
975  
976  	/* If an out-of-memory recently occurred, inflation is disallowed. */
977  	if (target > size && time_before(jiffies, READ_ONCE(b->shrink_timeout)))
978  		return 0;
979  
980  	return target - size;
981  }
982  
983  /**
984   * vmballoon_enqueue_page_list() - Enqueues list of pages after inflation.
985   *
986   * @b: pointer to balloon.
987   * @pages: list of pages to enqueue.
988   * @n_pages: pointer to number of pages in list. The value is zeroed.
989   * @page_size: whether the pages are 2MB or 4KB pages.
990   *
991   * Enqueues the provides list of pages in the ballooned page list, clears the
992   * list and zeroes the number of pages that was provided.
993   */
vmballoon_enqueue_page_list(struct vmballoon * b,struct list_head * pages,unsigned int * n_pages,enum vmballoon_page_size_type page_size)994  static void vmballoon_enqueue_page_list(struct vmballoon *b,
995  					struct list_head *pages,
996  					unsigned int *n_pages,
997  					enum vmballoon_page_size_type page_size)
998  {
999  	unsigned long flags;
1000  	struct page *page;
1001  
1002  	if (page_size == VMW_BALLOON_4K_PAGE) {
1003  		balloon_page_list_enqueue(&b->b_dev_info, pages);
1004  	} else {
1005  		/*
1006  		 * Keep the huge pages in a local list which is not available
1007  		 * for the balloon compaction mechanism.
1008  		 */
1009  		spin_lock_irqsave(&b->b_dev_info.pages_lock, flags);
1010  
1011  		list_for_each_entry(page, pages, lru) {
1012  			vmballoon_mark_page_offline(page, VMW_BALLOON_2M_PAGE);
1013  		}
1014  
1015  		list_splice_init(pages, &b->huge_pages);
1016  		__count_vm_events(BALLOON_INFLATE, *n_pages *
1017  				  vmballoon_page_in_frames(VMW_BALLOON_2M_PAGE));
1018  		spin_unlock_irqrestore(&b->b_dev_info.pages_lock, flags);
1019  	}
1020  
1021  	*n_pages = 0;
1022  }
1023  
1024  /**
1025   * vmballoon_dequeue_page_list() - Dequeues page lists for deflation.
1026   *
1027   * @b: pointer to balloon.
1028   * @pages: list of pages to enqueue.
1029   * @n_pages: pointer to number of pages in list. The value is zeroed.
1030   * @page_size: whether the pages are 2MB or 4KB pages.
1031   * @n_req_pages: the number of requested pages.
1032   *
1033   * Dequeues the number of requested pages from the balloon for deflation. The
1034   * number of dequeued pages may be lower, if not enough pages in the requested
1035   * size are available.
1036   */
vmballoon_dequeue_page_list(struct vmballoon * b,struct list_head * pages,unsigned int * n_pages,enum vmballoon_page_size_type page_size,unsigned int n_req_pages)1037  static void vmballoon_dequeue_page_list(struct vmballoon *b,
1038  					struct list_head *pages,
1039  					unsigned int *n_pages,
1040  					enum vmballoon_page_size_type page_size,
1041  					unsigned int n_req_pages)
1042  {
1043  	struct page *page, *tmp;
1044  	unsigned int i = 0;
1045  	unsigned long flags;
1046  
1047  	/* In the case of 4k pages, use the compaction infrastructure */
1048  	if (page_size == VMW_BALLOON_4K_PAGE) {
1049  		*n_pages = balloon_page_list_dequeue(&b->b_dev_info, pages,
1050  						     n_req_pages);
1051  		return;
1052  	}
1053  
1054  	/* 2MB pages */
1055  	spin_lock_irqsave(&b->b_dev_info.pages_lock, flags);
1056  	list_for_each_entry_safe(page, tmp, &b->huge_pages, lru) {
1057  		vmballoon_mark_page_online(page, VMW_BALLOON_2M_PAGE);
1058  
1059  		list_move(&page->lru, pages);
1060  		if (++i == n_req_pages)
1061  			break;
1062  	}
1063  
1064  	__count_vm_events(BALLOON_DEFLATE,
1065  			  i * vmballoon_page_in_frames(VMW_BALLOON_2M_PAGE));
1066  	spin_unlock_irqrestore(&b->b_dev_info.pages_lock, flags);
1067  	*n_pages = i;
1068  }
1069  
1070  /**
1071   * vmballoon_split_refused_pages() - Split the 2MB refused pages to 4k.
1072   *
1073   * If inflation of 2MB pages was denied by the hypervisor, it is likely to be
1074   * due to one or few 4KB pages. These 2MB pages may keep being allocated and
1075   * then being refused. To prevent this case, this function splits the refused
1076   * pages into 4KB pages and adds them into @prealloc_pages list.
1077   *
1078   * @ctl: pointer for the %struct vmballoon_ctl, which defines the operation.
1079   */
vmballoon_split_refused_pages(struct vmballoon_ctl * ctl)1080  static void vmballoon_split_refused_pages(struct vmballoon_ctl *ctl)
1081  {
1082  	struct page *page, *tmp;
1083  	unsigned int i, order;
1084  
1085  	order = vmballoon_page_order(ctl->page_size);
1086  
1087  	list_for_each_entry_safe(page, tmp, &ctl->refused_pages, lru) {
1088  		list_del(&page->lru);
1089  		split_page(page, order);
1090  		for (i = 0; i < (1 << order); i++)
1091  			list_add(&page[i].lru, &ctl->prealloc_pages);
1092  	}
1093  	ctl->n_refused_pages = 0;
1094  }
1095  
1096  /**
1097   * vmballoon_inflate() - Inflate the balloon towards its target size.
1098   *
1099   * @b: pointer to the balloon.
1100   */
vmballoon_inflate(struct vmballoon * b)1101  static void vmballoon_inflate(struct vmballoon *b)
1102  {
1103  	int64_t to_inflate_frames;
1104  	struct vmballoon_ctl ctl = {
1105  		.pages = LIST_HEAD_INIT(ctl.pages),
1106  		.refused_pages = LIST_HEAD_INIT(ctl.refused_pages),
1107  		.prealloc_pages = LIST_HEAD_INIT(ctl.prealloc_pages),
1108  		.page_size = b->max_page_size,
1109  		.op = VMW_BALLOON_INFLATE
1110  	};
1111  
1112  	while ((to_inflate_frames = vmballoon_change(b)) > 0) {
1113  		unsigned int to_inflate_pages, page_in_frames;
1114  		int alloc_error, lock_error = 0;
1115  
1116  		VM_BUG_ON(!list_empty(&ctl.pages));
1117  		VM_BUG_ON(ctl.n_pages != 0);
1118  
1119  		page_in_frames = vmballoon_page_in_frames(ctl.page_size);
1120  
1121  		to_inflate_pages = min_t(unsigned long, b->batch_max_pages,
1122  					 DIV_ROUND_UP_ULL(to_inflate_frames,
1123  							  page_in_frames));
1124  
1125  		/* Start by allocating */
1126  		alloc_error = vmballoon_alloc_page_list(b, &ctl,
1127  							to_inflate_pages);
1128  
1129  		/* Actually lock the pages by telling the hypervisor */
1130  		lock_error = vmballoon_lock(b, &ctl);
1131  
1132  		/*
1133  		 * If an error indicates that something serious went wrong,
1134  		 * stop the inflation.
1135  		 */
1136  		if (lock_error)
1137  			break;
1138  
1139  		/* Update the balloon size */
1140  		atomic64_add(ctl.n_pages * page_in_frames, &b->size);
1141  
1142  		vmballoon_enqueue_page_list(b, &ctl.pages, &ctl.n_pages,
1143  					    ctl.page_size);
1144  
1145  		/*
1146  		 * If allocation failed or the number of refused pages exceeds
1147  		 * the maximum allowed, move to the next page size.
1148  		 */
1149  		if (alloc_error ||
1150  		    ctl.n_refused_pages >= VMW_BALLOON_MAX_REFUSED) {
1151  			if (ctl.page_size == VMW_BALLOON_4K_PAGE)
1152  				break;
1153  
1154  			/*
1155  			 * Split the refused pages to 4k. This will also empty
1156  			 * the refused pages list.
1157  			 */
1158  			vmballoon_split_refused_pages(&ctl);
1159  			ctl.page_size--;
1160  		}
1161  
1162  		cond_resched();
1163  	}
1164  
1165  	/*
1166  	 * Release pages that were allocated while attempting to inflate the
1167  	 * balloon but were refused by the host for one reason or another,
1168  	 * and update the statistics.
1169  	 */
1170  	if (ctl.n_refused_pages != 0)
1171  		vmballoon_release_refused_pages(b, &ctl);
1172  
1173  	vmballoon_release_page_list(&ctl.prealloc_pages, NULL, ctl.page_size);
1174  }
1175  
1176  /**
1177   * vmballoon_deflate() - Decrease the size of the balloon.
1178   *
1179   * @b: pointer to the balloon
1180   * @n_frames: the number of frames to deflate. If zero, automatically
1181   * calculated according to the target size.
1182   * @coordinated: whether to coordinate with the host
1183   *
1184   * Decrease the size of the balloon allowing guest to use more memory.
1185   *
1186   * Return: The number of deflated frames (i.e., basic page size units)
1187   */
vmballoon_deflate(struct vmballoon * b,uint64_t n_frames,bool coordinated)1188  static unsigned long vmballoon_deflate(struct vmballoon *b, uint64_t n_frames,
1189  				       bool coordinated)
1190  {
1191  	unsigned long deflated_frames = 0;
1192  	unsigned long tried_frames = 0;
1193  	struct vmballoon_ctl ctl = {
1194  		.pages = LIST_HEAD_INIT(ctl.pages),
1195  		.refused_pages = LIST_HEAD_INIT(ctl.refused_pages),
1196  		.page_size = VMW_BALLOON_4K_PAGE,
1197  		.op = VMW_BALLOON_DEFLATE
1198  	};
1199  
1200  	/* free pages to reach target */
1201  	while (true) {
1202  		unsigned int to_deflate_pages, n_unlocked_frames;
1203  		unsigned int page_in_frames;
1204  		int64_t to_deflate_frames;
1205  		bool deflated_all;
1206  
1207  		page_in_frames = vmballoon_page_in_frames(ctl.page_size);
1208  
1209  		VM_BUG_ON(!list_empty(&ctl.pages));
1210  		VM_BUG_ON(ctl.n_pages);
1211  		VM_BUG_ON(!list_empty(&ctl.refused_pages));
1212  		VM_BUG_ON(ctl.n_refused_pages);
1213  
1214  		/*
1215  		 * If we were requested a specific number of frames, we try to
1216  		 * deflate this number of frames. Otherwise, deflation is
1217  		 * performed according to the target and balloon size.
1218  		 */
1219  		to_deflate_frames = n_frames ? n_frames - tried_frames :
1220  					       -vmballoon_change(b);
1221  
1222  		/* break if no work to do */
1223  		if (to_deflate_frames <= 0)
1224  			break;
1225  
1226  		/*
1227  		 * Calculate the number of frames based on current page size,
1228  		 * but limit the deflated frames to a single chunk
1229  		 */
1230  		to_deflate_pages = min_t(unsigned long, b->batch_max_pages,
1231  					 DIV_ROUND_UP_ULL(to_deflate_frames,
1232  							  page_in_frames));
1233  
1234  		/* First take the pages from the balloon pages. */
1235  		vmballoon_dequeue_page_list(b, &ctl.pages, &ctl.n_pages,
1236  					    ctl.page_size, to_deflate_pages);
1237  
1238  		/*
1239  		 * Before pages are moving to the refused list, count their
1240  		 * frames as frames that we tried to deflate.
1241  		 */
1242  		tried_frames += ctl.n_pages * page_in_frames;
1243  
1244  		/*
1245  		 * Unlock the pages by communicating with the hypervisor if the
1246  		 * communication is coordinated (i.e., not pop). We ignore the
1247  		 * return code. Instead we check if all the pages we manage to
1248  		 * unlock all the pages. If we failed, we will move to the next
1249  		 * page size, and would eventually try again later.
1250  		 */
1251  		if (coordinated)
1252  			vmballoon_lock(b, &ctl);
1253  
1254  		/*
1255  		 * Check if we deflated enough. We will move to the next page
1256  		 * size if we did not manage to do so. This calculation takes
1257  		 * place now, as once the pages are released, the number of
1258  		 * pages is zeroed.
1259  		 */
1260  		deflated_all = (ctl.n_pages == to_deflate_pages);
1261  
1262  		/* Update local and global counters */
1263  		n_unlocked_frames = ctl.n_pages * page_in_frames;
1264  		atomic64_sub(n_unlocked_frames, &b->size);
1265  		deflated_frames += n_unlocked_frames;
1266  
1267  		vmballoon_stats_page_add(b, VMW_BALLOON_PAGE_STAT_FREE,
1268  					 ctl.page_size, ctl.n_pages);
1269  
1270  		/* free the ballooned pages */
1271  		vmballoon_release_page_list(&ctl.pages, &ctl.n_pages,
1272  					    ctl.page_size);
1273  
1274  		/* Return the refused pages to the ballooned list. */
1275  		vmballoon_enqueue_page_list(b, &ctl.refused_pages,
1276  					    &ctl.n_refused_pages,
1277  					    ctl.page_size);
1278  
1279  		/* If we failed to unlock all the pages, move to next size. */
1280  		if (!deflated_all) {
1281  			if (ctl.page_size == b->max_page_size)
1282  				break;
1283  			ctl.page_size++;
1284  		}
1285  
1286  		cond_resched();
1287  	}
1288  
1289  	return deflated_frames;
1290  }
1291  
1292  /**
1293   * vmballoon_deinit_batching - disables batching mode.
1294   *
1295   * @b: pointer to &struct vmballoon.
1296   *
1297   * Disables batching, by deallocating the page for communication with the
1298   * hypervisor and disabling the static key to indicate that batching is off.
1299   */
vmballoon_deinit_batching(struct vmballoon * b)1300  static void vmballoon_deinit_batching(struct vmballoon *b)
1301  {
1302  	free_page((unsigned long)b->batch_page);
1303  	b->batch_page = NULL;
1304  	static_branch_disable(&vmw_balloon_batching);
1305  	b->batch_max_pages = 1;
1306  }
1307  
1308  /**
1309   * vmballoon_init_batching - enable batching mode.
1310   *
1311   * @b: pointer to &struct vmballoon.
1312   *
1313   * Enables batching, by allocating a page for communication with the hypervisor
1314   * and enabling the static_key to use batching.
1315   *
1316   * Return: zero on success or an appropriate error-code.
1317   */
vmballoon_init_batching(struct vmballoon * b)1318  static int vmballoon_init_batching(struct vmballoon *b)
1319  {
1320  	struct page *page;
1321  
1322  	page = alloc_page(GFP_KERNEL | __GFP_ZERO);
1323  	if (!page)
1324  		return -ENOMEM;
1325  
1326  	b->batch_page = page_address(page);
1327  	b->batch_max_pages = PAGE_SIZE / sizeof(struct vmballoon_batch_entry);
1328  
1329  	static_branch_enable(&vmw_balloon_batching);
1330  
1331  	return 0;
1332  }
1333  
1334  /*
1335   * Receive notification and resize balloon
1336   */
vmballoon_doorbell(void * client_data)1337  static void vmballoon_doorbell(void *client_data)
1338  {
1339  	struct vmballoon *b = client_data;
1340  
1341  	vmballoon_stats_gen_inc(b, VMW_BALLOON_STAT_DOORBELL);
1342  
1343  	mod_delayed_work(system_freezable_wq, &b->dwork, 0);
1344  }
1345  
1346  /*
1347   * Clean up vmci doorbell
1348   */
vmballoon_vmci_cleanup(struct vmballoon * b)1349  static void vmballoon_vmci_cleanup(struct vmballoon *b)
1350  {
1351  	vmballoon_cmd(b, VMW_BALLOON_CMD_VMCI_DOORBELL_SET,
1352  		      VMCI_INVALID_ID, VMCI_INVALID_ID);
1353  
1354  	if (!vmci_handle_is_invalid(b->vmci_doorbell)) {
1355  		vmci_doorbell_destroy(b->vmci_doorbell);
1356  		b->vmci_doorbell = VMCI_INVALID_HANDLE;
1357  	}
1358  }
1359  
1360  /**
1361   * vmballoon_vmci_init - Initialize vmci doorbell.
1362   *
1363   * @b: pointer to the balloon.
1364   *
1365   * Return: zero on success or when wakeup command not supported. Error-code
1366   * otherwise.
1367   *
1368   * Initialize vmci doorbell, to get notified as soon as balloon changes.
1369   */
vmballoon_vmci_init(struct vmballoon * b)1370  static int vmballoon_vmci_init(struct vmballoon *b)
1371  {
1372  	unsigned long error;
1373  
1374  	if ((b->capabilities & VMW_BALLOON_SIGNALLED_WAKEUP_CMD) == 0)
1375  		return 0;
1376  
1377  	error = vmci_doorbell_create(&b->vmci_doorbell, VMCI_FLAG_DELAYED_CB,
1378  				     VMCI_PRIVILEGE_FLAG_RESTRICTED,
1379  				     vmballoon_doorbell, b);
1380  
1381  	if (error != VMCI_SUCCESS)
1382  		goto fail;
1383  
1384  	error =	__vmballoon_cmd(b, VMW_BALLOON_CMD_VMCI_DOORBELL_SET,
1385  				b->vmci_doorbell.context,
1386  				b->vmci_doorbell.resource, NULL);
1387  
1388  	if (error != VMW_BALLOON_SUCCESS)
1389  		goto fail;
1390  
1391  	return 0;
1392  fail:
1393  	vmballoon_vmci_cleanup(b);
1394  	return -EIO;
1395  }
1396  
1397  /**
1398   * vmballoon_pop - Quickly release all pages allocate for the balloon.
1399   *
1400   * @b: pointer to the balloon.
1401   *
1402   * This function is called when host decides to "reset" balloon for one reason
1403   * or another. Unlike normal "deflate" we do not (shall not) notify host of the
1404   * pages being released.
1405   */
vmballoon_pop(struct vmballoon * b)1406  static void vmballoon_pop(struct vmballoon *b)
1407  {
1408  	unsigned long size;
1409  
1410  	while ((size = atomic64_read(&b->size)))
1411  		vmballoon_deflate(b, size, false);
1412  }
1413  
1414  /*
1415   * Perform standard reset sequence by popping the balloon (in case it
1416   * is not  empty) and then restarting protocol. This operation normally
1417   * happens when host responds with VMW_BALLOON_ERROR_RESET to a command.
1418   */
vmballoon_reset(struct vmballoon * b)1419  static void vmballoon_reset(struct vmballoon *b)
1420  {
1421  	int error;
1422  
1423  	down_write(&b->conf_sem);
1424  
1425  	vmballoon_vmci_cleanup(b);
1426  
1427  	/* free all pages, skipping monitor unlock */
1428  	vmballoon_pop(b);
1429  
1430  	if (vmballoon_send_start(b, VMW_BALLOON_CAPABILITIES))
1431  		goto unlock;
1432  
1433  	if ((b->capabilities & VMW_BALLOON_BATCHED_CMDS) != 0) {
1434  		if (vmballoon_init_batching(b)) {
1435  			/*
1436  			 * We failed to initialize batching, inform the monitor
1437  			 * about it by sending a null capability.
1438  			 *
1439  			 * The guest will retry in one second.
1440  			 */
1441  			vmballoon_send_start(b, 0);
1442  			goto unlock;
1443  		}
1444  	} else if ((b->capabilities & VMW_BALLOON_BASIC_CMDS) != 0) {
1445  		vmballoon_deinit_batching(b);
1446  	}
1447  
1448  	vmballoon_stats_gen_inc(b, VMW_BALLOON_STAT_RESET);
1449  	b->reset_required = false;
1450  
1451  	error = vmballoon_vmci_init(b);
1452  	if (error)
1453  		pr_err_once("failed to initialize vmci doorbell\n");
1454  
1455  	if (vmballoon_send_guest_id(b))
1456  		pr_err_once("failed to send guest ID to the host\n");
1457  
1458  unlock:
1459  	up_write(&b->conf_sem);
1460  }
1461  
1462  /**
1463   * vmballoon_work - periodic balloon worker for reset, inflation and deflation.
1464   *
1465   * @work: pointer to the &work_struct which is provided by the workqueue.
1466   *
1467   * Resets the protocol if needed, gets the new size and adjusts balloon as
1468   * needed. Repeat in 1 sec.
1469   */
vmballoon_work(struct work_struct * work)1470  static void vmballoon_work(struct work_struct *work)
1471  {
1472  	struct delayed_work *dwork = to_delayed_work(work);
1473  	struct vmballoon *b = container_of(dwork, struct vmballoon, dwork);
1474  	int64_t change = 0;
1475  
1476  	if (b->reset_required)
1477  		vmballoon_reset(b);
1478  
1479  	down_read(&b->conf_sem);
1480  
1481  	/*
1482  	 * Update the stats while holding the semaphore to ensure that
1483  	 * @stats_enabled is consistent with whether the stats are actually
1484  	 * enabled
1485  	 */
1486  	vmballoon_stats_gen_inc(b, VMW_BALLOON_STAT_TIMER);
1487  
1488  	if (!vmballoon_send_get_target(b))
1489  		change = vmballoon_change(b);
1490  
1491  	if (change != 0) {
1492  		pr_debug("%s - size: %llu, target %lu\n", __func__,
1493  			 atomic64_read(&b->size), READ_ONCE(b->target));
1494  
1495  		if (change > 0)
1496  			vmballoon_inflate(b);
1497  		else  /* (change < 0) */
1498  			vmballoon_deflate(b, 0, true);
1499  	}
1500  
1501  	up_read(&b->conf_sem);
1502  
1503  	/*
1504  	 * We are using a freezable workqueue so that balloon operations are
1505  	 * stopped while the system transitions to/from sleep/hibernation.
1506  	 */
1507  	queue_delayed_work(system_freezable_wq,
1508  			   dwork, round_jiffies_relative(HZ));
1509  
1510  }
1511  
1512  /**
1513   * vmballoon_shrinker_scan() - deflate the balloon due to memory pressure.
1514   * @shrinker: pointer to the balloon shrinker.
1515   * @sc: page reclaim information.
1516   *
1517   * Returns: number of pages that were freed during deflation.
1518   */
vmballoon_shrinker_scan(struct shrinker * shrinker,struct shrink_control * sc)1519  static unsigned long vmballoon_shrinker_scan(struct shrinker *shrinker,
1520  					     struct shrink_control *sc)
1521  {
1522  	struct vmballoon *b = &balloon;
1523  	unsigned long deflated_frames;
1524  
1525  	pr_debug("%s - size: %llu", __func__, atomic64_read(&b->size));
1526  
1527  	vmballoon_stats_gen_inc(b, VMW_BALLOON_STAT_SHRINK);
1528  
1529  	/*
1530  	 * If the lock is also contended for read, we cannot easily reclaim and
1531  	 * we bail out.
1532  	 */
1533  	if (!down_read_trylock(&b->conf_sem))
1534  		return 0;
1535  
1536  	deflated_frames = vmballoon_deflate(b, sc->nr_to_scan, true);
1537  
1538  	vmballoon_stats_gen_add(b, VMW_BALLOON_STAT_SHRINK_FREE,
1539  				deflated_frames);
1540  
1541  	/*
1542  	 * Delay future inflation for some time to mitigate the situations in
1543  	 * which balloon continuously grows and shrinks. Use WRITE_ONCE() since
1544  	 * the access is asynchronous.
1545  	 */
1546  	WRITE_ONCE(b->shrink_timeout, jiffies + HZ * VMBALLOON_SHRINK_DELAY);
1547  
1548  	up_read(&b->conf_sem);
1549  
1550  	return deflated_frames;
1551  }
1552  
1553  /**
1554   * vmballoon_shrinker_count() - return the number of ballooned pages.
1555   * @shrinker: pointer to the balloon shrinker.
1556   * @sc: page reclaim information.
1557   *
1558   * Returns: number of 4k pages that are allocated for the balloon and can
1559   *	    therefore be reclaimed under pressure.
1560   */
vmballoon_shrinker_count(struct shrinker * shrinker,struct shrink_control * sc)1561  static unsigned long vmballoon_shrinker_count(struct shrinker *shrinker,
1562  					      struct shrink_control *sc)
1563  {
1564  	struct vmballoon *b = &balloon;
1565  
1566  	return atomic64_read(&b->size);
1567  }
1568  
vmballoon_unregister_shrinker(struct vmballoon * b)1569  static void vmballoon_unregister_shrinker(struct vmballoon *b)
1570  {
1571  	if (b->shrinker_registered)
1572  		unregister_shrinker(&b->shrinker);
1573  	b->shrinker_registered = false;
1574  }
1575  
vmballoon_register_shrinker(struct vmballoon * b)1576  static int vmballoon_register_shrinker(struct vmballoon *b)
1577  {
1578  	int r;
1579  
1580  	/* Do nothing if the shrinker is not enabled */
1581  	if (!vmwballoon_shrinker_enable)
1582  		return 0;
1583  
1584  	b->shrinker.scan_objects = vmballoon_shrinker_scan;
1585  	b->shrinker.count_objects = vmballoon_shrinker_count;
1586  	b->shrinker.seeks = DEFAULT_SEEKS;
1587  
1588  	r = register_shrinker(&b->shrinker, "vmw-balloon");
1589  
1590  	if (r == 0)
1591  		b->shrinker_registered = true;
1592  
1593  	return r;
1594  }
1595  
1596  /*
1597   * DEBUGFS Interface
1598   */
1599  #ifdef CONFIG_DEBUG_FS
1600  
1601  static const char * const vmballoon_stat_page_names[] = {
1602  	[VMW_BALLOON_PAGE_STAT_ALLOC]		= "alloc",
1603  	[VMW_BALLOON_PAGE_STAT_ALLOC_FAIL]	= "allocFail",
1604  	[VMW_BALLOON_PAGE_STAT_REFUSED_ALLOC]	= "errAlloc",
1605  	[VMW_BALLOON_PAGE_STAT_REFUSED_FREE]	= "errFree",
1606  	[VMW_BALLOON_PAGE_STAT_FREE]		= "free"
1607  };
1608  
1609  static const char * const vmballoon_stat_names[] = {
1610  	[VMW_BALLOON_STAT_TIMER]		= "timer",
1611  	[VMW_BALLOON_STAT_DOORBELL]		= "doorbell",
1612  	[VMW_BALLOON_STAT_RESET]		= "reset",
1613  	[VMW_BALLOON_STAT_SHRINK]		= "shrink",
1614  	[VMW_BALLOON_STAT_SHRINK_FREE]		= "shrinkFree"
1615  };
1616  
vmballoon_enable_stats(struct vmballoon * b)1617  static int vmballoon_enable_stats(struct vmballoon *b)
1618  {
1619  	int r = 0;
1620  
1621  	down_write(&b->conf_sem);
1622  
1623  	/* did we somehow race with another reader which enabled stats? */
1624  	if (b->stats)
1625  		goto out;
1626  
1627  	b->stats = kzalloc(sizeof(*b->stats), GFP_KERNEL);
1628  
1629  	if (!b->stats) {
1630  		/* allocation failed */
1631  		r = -ENOMEM;
1632  		goto out;
1633  	}
1634  	static_key_enable(&balloon_stat_enabled.key);
1635  out:
1636  	up_write(&b->conf_sem);
1637  	return r;
1638  }
1639  
1640  /**
1641   * vmballoon_debug_show - shows statistics of balloon operations.
1642   * @f: pointer to the &struct seq_file.
1643   * @offset: ignored.
1644   *
1645   * Provides the statistics that can be accessed in vmmemctl in the debugfs.
1646   * To avoid the overhead - mainly that of memory - of collecting the statistics,
1647   * we only collect statistics after the first time the counters are read.
1648   *
1649   * Return: zero on success or an error code.
1650   */
vmballoon_debug_show(struct seq_file * f,void * offset)1651  static int vmballoon_debug_show(struct seq_file *f, void *offset)
1652  {
1653  	struct vmballoon *b = f->private;
1654  	int i, j;
1655  
1656  	/* enables stats if they are disabled */
1657  	if (!b->stats) {
1658  		int r = vmballoon_enable_stats(b);
1659  
1660  		if (r)
1661  			return r;
1662  	}
1663  
1664  	/* format capabilities info */
1665  	seq_printf(f, "%-22s: %#16x\n", "balloon capabilities",
1666  		   VMW_BALLOON_CAPABILITIES);
1667  	seq_printf(f, "%-22s: %#16lx\n", "used capabilities", b->capabilities);
1668  	seq_printf(f, "%-22s: %16s\n", "is resetting",
1669  		   b->reset_required ? "y" : "n");
1670  
1671  	/* format size info */
1672  	seq_printf(f, "%-22s: %16lu\n", "target", READ_ONCE(b->target));
1673  	seq_printf(f, "%-22s: %16llu\n", "current", atomic64_read(&b->size));
1674  
1675  	for (i = 0; i < VMW_BALLOON_CMD_NUM; i++) {
1676  		if (vmballoon_cmd_names[i] == NULL)
1677  			continue;
1678  
1679  		seq_printf(f, "%-22s: %16llu (%llu failed)\n",
1680  			   vmballoon_cmd_names[i],
1681  			   atomic64_read(&b->stats->ops[i][VMW_BALLOON_OP_STAT]),
1682  			   atomic64_read(&b->stats->ops[i][VMW_BALLOON_OP_FAIL_STAT]));
1683  	}
1684  
1685  	for (i = 0; i < VMW_BALLOON_STAT_NUM; i++)
1686  		seq_printf(f, "%-22s: %16llu\n",
1687  			   vmballoon_stat_names[i],
1688  			   atomic64_read(&b->stats->general_stat[i]));
1689  
1690  	for (i = 0; i < VMW_BALLOON_PAGE_STAT_NUM; i++) {
1691  		for (j = 0; j < VMW_BALLOON_NUM_PAGE_SIZES; j++)
1692  			seq_printf(f, "%-18s(%s): %16llu\n",
1693  				   vmballoon_stat_page_names[i],
1694  				   vmballoon_page_size_names[j],
1695  				   atomic64_read(&b->stats->page_stat[i][j]));
1696  	}
1697  
1698  	return 0;
1699  }
1700  
1701  DEFINE_SHOW_ATTRIBUTE(vmballoon_debug);
1702  
vmballoon_debugfs_init(struct vmballoon * b)1703  static void __init vmballoon_debugfs_init(struct vmballoon *b)
1704  {
1705  	debugfs_create_file("vmmemctl", S_IRUGO, NULL, b,
1706  			    &vmballoon_debug_fops);
1707  }
1708  
vmballoon_debugfs_exit(struct vmballoon * b)1709  static void __exit vmballoon_debugfs_exit(struct vmballoon *b)
1710  {
1711  	static_key_disable(&balloon_stat_enabled.key);
1712  	debugfs_lookup_and_remove("vmmemctl", NULL);
1713  	kfree(b->stats);
1714  	b->stats = NULL;
1715  }
1716  
1717  #else
1718  
vmballoon_debugfs_init(struct vmballoon * b)1719  static inline void vmballoon_debugfs_init(struct vmballoon *b)
1720  {
1721  }
1722  
vmballoon_debugfs_exit(struct vmballoon * b)1723  static inline void vmballoon_debugfs_exit(struct vmballoon *b)
1724  {
1725  }
1726  
1727  #endif	/* CONFIG_DEBUG_FS */
1728  
1729  
1730  #ifdef CONFIG_BALLOON_COMPACTION
1731  /**
1732   * vmballoon_migratepage() - migrates a balloon page.
1733   * @b_dev_info: balloon device information descriptor.
1734   * @newpage: the page to which @page should be migrated.
1735   * @page: a ballooned page that should be migrated.
1736   * @mode: migration mode, ignored.
1737   *
1738   * This function is really open-coded, but that is according to the interface
1739   * that balloon_compaction provides.
1740   *
1741   * Return: zero on success, -EAGAIN when migration cannot be performed
1742   *	   momentarily, and -EBUSY if migration failed and should be retried
1743   *	   with that specific page.
1744   */
vmballoon_migratepage(struct balloon_dev_info * b_dev_info,struct page * newpage,struct page * page,enum migrate_mode mode)1745  static int vmballoon_migratepage(struct balloon_dev_info *b_dev_info,
1746  				 struct page *newpage, struct page *page,
1747  				 enum migrate_mode mode)
1748  {
1749  	unsigned long status, flags;
1750  	struct vmballoon *b;
1751  	int ret;
1752  
1753  	b = container_of(b_dev_info, struct vmballoon, b_dev_info);
1754  
1755  	/*
1756  	 * If the semaphore is taken, there is ongoing configuration change
1757  	 * (i.e., balloon reset), so try again.
1758  	 */
1759  	if (!down_read_trylock(&b->conf_sem))
1760  		return -EAGAIN;
1761  
1762  	spin_lock(&b->comm_lock);
1763  	/*
1764  	 * We must start by deflating and not inflating, as otherwise the
1765  	 * hypervisor may tell us that it has enough memory and the new page is
1766  	 * not needed. Since the old page is isolated, we cannot use the list
1767  	 * interface to unlock it, as the LRU field is used for isolation.
1768  	 * Instead, we use the native interface directly.
1769  	 */
1770  	vmballoon_add_page(b, 0, page);
1771  	status = vmballoon_lock_op(b, 1, VMW_BALLOON_4K_PAGE,
1772  				   VMW_BALLOON_DEFLATE);
1773  
1774  	if (status == VMW_BALLOON_SUCCESS)
1775  		status = vmballoon_status_page(b, 0, &page);
1776  
1777  	/*
1778  	 * If a failure happened, let the migration mechanism know that it
1779  	 * should not retry.
1780  	 */
1781  	if (status != VMW_BALLOON_SUCCESS) {
1782  		spin_unlock(&b->comm_lock);
1783  		ret = -EBUSY;
1784  		goto out_unlock;
1785  	}
1786  
1787  	/*
1788  	 * The page is isolated, so it is safe to delete it without holding
1789  	 * @pages_lock . We keep holding @comm_lock since we will need it in a
1790  	 * second.
1791  	 */
1792  	balloon_page_delete(page);
1793  
1794  	put_page(page);
1795  
1796  	/* Inflate */
1797  	vmballoon_add_page(b, 0, newpage);
1798  	status = vmballoon_lock_op(b, 1, VMW_BALLOON_4K_PAGE,
1799  				   VMW_BALLOON_INFLATE);
1800  
1801  	if (status == VMW_BALLOON_SUCCESS)
1802  		status = vmballoon_status_page(b, 0, &newpage);
1803  
1804  	spin_unlock(&b->comm_lock);
1805  
1806  	if (status != VMW_BALLOON_SUCCESS) {
1807  		/*
1808  		 * A failure happened. While we can deflate the page we just
1809  		 * inflated, this deflation can also encounter an error. Instead
1810  		 * we will decrease the size of the balloon to reflect the
1811  		 * change and report failure.
1812  		 */
1813  		atomic64_dec(&b->size);
1814  		ret = -EBUSY;
1815  	} else {
1816  		/*
1817  		 * Success. Take a reference for the page, and we will add it to
1818  		 * the list after acquiring the lock.
1819  		 */
1820  		get_page(newpage);
1821  		ret = MIGRATEPAGE_SUCCESS;
1822  	}
1823  
1824  	/* Update the balloon list under the @pages_lock */
1825  	spin_lock_irqsave(&b->b_dev_info.pages_lock, flags);
1826  
1827  	/*
1828  	 * On inflation success, we already took a reference for the @newpage.
1829  	 * If we succeed just insert it to the list and update the statistics
1830  	 * under the lock.
1831  	 */
1832  	if (ret == MIGRATEPAGE_SUCCESS) {
1833  		balloon_page_insert(&b->b_dev_info, newpage);
1834  		__count_vm_event(BALLOON_MIGRATE);
1835  	}
1836  
1837  	/*
1838  	 * We deflated successfully, so regardless to the inflation success, we
1839  	 * need to reduce the number of isolated_pages.
1840  	 */
1841  	b->b_dev_info.isolated_pages--;
1842  	spin_unlock_irqrestore(&b->b_dev_info.pages_lock, flags);
1843  
1844  out_unlock:
1845  	up_read(&b->conf_sem);
1846  	return ret;
1847  }
1848  
1849  /**
1850   * vmballoon_compaction_init() - initialized compaction for the balloon.
1851   *
1852   * @b: pointer to the balloon.
1853   *
1854   * If during the initialization a failure occurred, this function does not
1855   * perform cleanup. The caller must call vmballoon_compaction_deinit() in this
1856   * case.
1857   *
1858   * Return: zero on success or error code on failure.
1859   */
vmballoon_compaction_init(struct vmballoon * b)1860  static __init void vmballoon_compaction_init(struct vmballoon *b)
1861  {
1862  	b->b_dev_info.migratepage = vmballoon_migratepage;
1863  }
1864  
1865  #else /* CONFIG_BALLOON_COMPACTION */
vmballoon_compaction_init(struct vmballoon * b)1866  static inline void vmballoon_compaction_init(struct vmballoon *b)
1867  {
1868  }
1869  #endif /* CONFIG_BALLOON_COMPACTION */
1870  
vmballoon_init(void)1871  static int __init vmballoon_init(void)
1872  {
1873  	int error;
1874  
1875  	/*
1876  	 * Check if we are running on VMware's hypervisor and bail out
1877  	 * if we are not.
1878  	 */
1879  	if (x86_hyper_type != X86_HYPER_VMWARE)
1880  		return -ENODEV;
1881  
1882  	INIT_DELAYED_WORK(&balloon.dwork, vmballoon_work);
1883  
1884  	error = vmballoon_register_shrinker(&balloon);
1885  	if (error)
1886  		goto fail;
1887  
1888  	/*
1889  	 * Initialization of compaction must be done after the call to
1890  	 * balloon_devinfo_init() .
1891  	 */
1892  	balloon_devinfo_init(&balloon.b_dev_info);
1893  	vmballoon_compaction_init(&balloon);
1894  
1895  	INIT_LIST_HEAD(&balloon.huge_pages);
1896  	spin_lock_init(&balloon.comm_lock);
1897  	init_rwsem(&balloon.conf_sem);
1898  	balloon.vmci_doorbell = VMCI_INVALID_HANDLE;
1899  	balloon.batch_page = NULL;
1900  	balloon.page = NULL;
1901  	balloon.reset_required = true;
1902  
1903  	queue_delayed_work(system_freezable_wq, &balloon.dwork, 0);
1904  
1905  	vmballoon_debugfs_init(&balloon);
1906  
1907  	return 0;
1908  fail:
1909  	vmballoon_unregister_shrinker(&balloon);
1910  	return error;
1911  }
1912  
1913  /*
1914   * Using late_initcall() instead of module_init() allows the balloon to use the
1915   * VMCI doorbell even when the balloon is built into the kernel. Otherwise the
1916   * VMCI is probed only after the balloon is initialized. If the balloon is used
1917   * as a module, late_initcall() is equivalent to module_init().
1918   */
1919  late_initcall(vmballoon_init);
1920  
vmballoon_exit(void)1921  static void __exit vmballoon_exit(void)
1922  {
1923  	vmballoon_unregister_shrinker(&balloon);
1924  	vmballoon_vmci_cleanup(&balloon);
1925  	cancel_delayed_work_sync(&balloon.dwork);
1926  
1927  	vmballoon_debugfs_exit(&balloon);
1928  
1929  	/*
1930  	 * Deallocate all reserved memory, and reset connection with monitor.
1931  	 * Reset connection before deallocating memory to avoid potential for
1932  	 * additional spurious resets from guest touching deallocated pages.
1933  	 */
1934  	vmballoon_send_start(&balloon, 0);
1935  	vmballoon_pop(&balloon);
1936  }
1937  module_exit(vmballoon_exit);
1938