xref: /openbmc/linux/drivers/hv/hv_balloon.c (revision e639c869)
1 /*
2  * Copyright (c) 2012, Microsoft Corporation.
3  *
4  * Author:
5  *   K. Y. Srinivasan <kys@microsoft.com>
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms of the GNU General Public License version 2 as published
9  * by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful, but
12  * WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14  * NON INFRINGEMENT.  See the GNU General Public License for more
15  * details.
16  *
17  */
18 
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 
21 #include <linux/kernel.h>
22 #include <linux/jiffies.h>
23 #include <linux/mman.h>
24 #include <linux/delay.h>
25 #include <linux/init.h>
26 #include <linux/module.h>
27 #include <linux/slab.h>
28 #include <linux/kthread.h>
29 #include <linux/completion.h>
30 #include <linux/memory_hotplug.h>
31 #include <linux/memory.h>
32 #include <linux/notifier.h>
33 #include <linux/percpu_counter.h>
34 
35 #include <linux/hyperv.h>
36 
37 /*
38  * We begin with definitions supporting the Dynamic Memory protocol
39  * with the host.
40  *
41  * Begin protocol definitions.
42  */
43 
44 
45 
46 /*
47  * Protocol versions. The low word is the minor version, the high word the major
48  * version.
49  *
50  * History:
51  * Initial version 1.0
52  * Changed to 0.1 on 2009/03/25
53  * Changes to 0.2 on 2009/05/14
54  * Changes to 0.3 on 2009/12/03
55  * Changed to 1.0 on 2011/04/05
56  */
57 
58 #define DYNMEM_MAKE_VERSION(Major, Minor) ((__u32)(((Major) << 16) | (Minor)))
59 #define DYNMEM_MAJOR_VERSION(Version) ((__u32)(Version) >> 16)
60 #define DYNMEM_MINOR_VERSION(Version) ((__u32)(Version) & 0xff)
61 
62 enum {
63 	DYNMEM_PROTOCOL_VERSION_1 = DYNMEM_MAKE_VERSION(0, 3),
64 	DYNMEM_PROTOCOL_VERSION_2 = DYNMEM_MAKE_VERSION(1, 0),
65 	DYNMEM_PROTOCOL_VERSION_3 = DYNMEM_MAKE_VERSION(2, 0),
66 
67 	DYNMEM_PROTOCOL_VERSION_WIN7 = DYNMEM_PROTOCOL_VERSION_1,
68 	DYNMEM_PROTOCOL_VERSION_WIN8 = DYNMEM_PROTOCOL_VERSION_2,
69 	DYNMEM_PROTOCOL_VERSION_WIN10 = DYNMEM_PROTOCOL_VERSION_3,
70 
71 	DYNMEM_PROTOCOL_VERSION_CURRENT = DYNMEM_PROTOCOL_VERSION_WIN10
72 };
73 
74 
75 
76 /*
77  * Message Types
78  */
79 
80 enum dm_message_type {
81 	/*
82 	 * Version 0.3
83 	 */
84 	DM_ERROR			= 0,
85 	DM_VERSION_REQUEST		= 1,
86 	DM_VERSION_RESPONSE		= 2,
87 	DM_CAPABILITIES_REPORT		= 3,
88 	DM_CAPABILITIES_RESPONSE	= 4,
89 	DM_STATUS_REPORT		= 5,
90 	DM_BALLOON_REQUEST		= 6,
91 	DM_BALLOON_RESPONSE		= 7,
92 	DM_UNBALLOON_REQUEST		= 8,
93 	DM_UNBALLOON_RESPONSE		= 9,
94 	DM_MEM_HOT_ADD_REQUEST		= 10,
95 	DM_MEM_HOT_ADD_RESPONSE		= 11,
96 	DM_VERSION_03_MAX		= 11,
97 	/*
98 	 * Version 1.0.
99 	 */
100 	DM_INFO_MESSAGE			= 12,
101 	DM_VERSION_1_MAX		= 12
102 };
103 
104 
105 /*
106  * Structures defining the dynamic memory management
107  * protocol.
108  */
109 
110 union dm_version {
111 	struct {
112 		__u16 minor_version;
113 		__u16 major_version;
114 	};
115 	__u32 version;
116 } __packed;
117 
118 
119 union dm_caps {
120 	struct {
121 		__u64 balloon:1;
122 		__u64 hot_add:1;
123 		/*
124 		 * To support guests that may have alignment
125 		 * limitations on hot-add, the guest can specify
126 		 * its alignment requirements; a value of n
127 		 * represents an alignment of 2^n in mega bytes.
128 		 */
129 		__u64 hot_add_alignment:4;
130 		__u64 reservedz:58;
131 	} cap_bits;
132 	__u64 caps;
133 } __packed;
134 
135 union dm_mem_page_range {
136 	struct  {
137 		/*
138 		 * The PFN number of the first page in the range.
139 		 * 40 bits is the architectural limit of a PFN
140 		 * number for AMD64.
141 		 */
142 		__u64 start_page:40;
143 		/*
144 		 * The number of pages in the range.
145 		 */
146 		__u64 page_cnt:24;
147 	} finfo;
148 	__u64  page_range;
149 } __packed;
150 
151 
152 
153 /*
154  * The header for all dynamic memory messages:
155  *
156  * type: Type of the message.
157  * size: Size of the message in bytes; including the header.
158  * trans_id: The guest is responsible for manufacturing this ID.
159  */
160 
161 struct dm_header {
162 	__u16 type;
163 	__u16 size;
164 	__u32 trans_id;
165 } __packed;
166 
167 /*
168  * A generic message format for dynamic memory.
169  * Specific message formats are defined later in the file.
170  */
171 
172 struct dm_message {
173 	struct dm_header hdr;
174 	__u8 data[]; /* enclosed message */
175 } __packed;
176 
177 
178 /*
179  * Specific message types supporting the dynamic memory protocol.
180  */
181 
182 /*
183  * Version negotiation message. Sent from the guest to the host.
184  * The guest is free to try different versions until the host
185  * accepts the version.
186  *
187  * dm_version: The protocol version requested.
188  * is_last_attempt: If TRUE, this is the last version guest will request.
189  * reservedz: Reserved field, set to zero.
190  */
191 
192 struct dm_version_request {
193 	struct dm_header hdr;
194 	union dm_version version;
195 	__u32 is_last_attempt:1;
196 	__u32 reservedz:31;
197 } __packed;
198 
199 /*
200  * Version response message; Host to Guest and indicates
201  * if the host has accepted the version sent by the guest.
202  *
203  * is_accepted: If TRUE, host has accepted the version and the guest
204  * should proceed to the next stage of the protocol. FALSE indicates that
205  * guest should re-try with a different version.
206  *
207  * reservedz: Reserved field, set to zero.
208  */
209 
210 struct dm_version_response {
211 	struct dm_header hdr;
212 	__u64 is_accepted:1;
213 	__u64 reservedz:63;
214 } __packed;
215 
216 /*
217  * Message reporting capabilities. This is sent from the guest to the
218  * host.
219  */
220 
221 struct dm_capabilities {
222 	struct dm_header hdr;
223 	union dm_caps caps;
224 	__u64 min_page_cnt;
225 	__u64 max_page_number;
226 } __packed;
227 
228 /*
229  * Response to the capabilities message. This is sent from the host to the
230  * guest. This message notifies if the host has accepted the guest's
231  * capabilities. If the host has not accepted, the guest must shutdown
232  * the service.
233  *
234  * is_accepted: Indicates if the host has accepted guest's capabilities.
235  * reservedz: Must be 0.
236  */
237 
238 struct dm_capabilities_resp_msg {
239 	struct dm_header hdr;
240 	__u64 is_accepted:1;
241 	__u64 reservedz:63;
242 } __packed;
243 
244 /*
245  * This message is used to report memory pressure from the guest.
246  * This message is not part of any transaction and there is no
247  * response to this message.
248  *
249  * num_avail: Available memory in pages.
250  * num_committed: Committed memory in pages.
251  * page_file_size: The accumulated size of all page files
252  *		   in the system in pages.
253  * zero_free: The nunber of zero and free pages.
254  * page_file_writes: The writes to the page file in pages.
255  * io_diff: An indicator of file cache efficiency or page file activity,
256  *	    calculated as File Cache Page Fault Count - Page Read Count.
257  *	    This value is in pages.
258  *
259  * Some of these metrics are Windows specific and fortunately
260  * the algorithm on the host side that computes the guest memory
261  * pressure only uses num_committed value.
262  */
263 
264 struct dm_status {
265 	struct dm_header hdr;
266 	__u64 num_avail;
267 	__u64 num_committed;
268 	__u64 page_file_size;
269 	__u64 zero_free;
270 	__u32 page_file_writes;
271 	__u32 io_diff;
272 } __packed;
273 
274 
275 /*
276  * Message to ask the guest to allocate memory - balloon up message.
277  * This message is sent from the host to the guest. The guest may not be
278  * able to allocate as much memory as requested.
279  *
280  * num_pages: number of pages to allocate.
281  */
282 
283 struct dm_balloon {
284 	struct dm_header hdr;
285 	__u32 num_pages;
286 	__u32 reservedz;
287 } __packed;
288 
289 
290 /*
291  * Balloon response message; this message is sent from the guest
292  * to the host in response to the balloon message.
293  *
294  * reservedz: Reserved; must be set to zero.
295  * more_pages: If FALSE, this is the last message of the transaction.
296  * if TRUE there will atleast one more message from the guest.
297  *
298  * range_count: The number of ranges in the range array.
299  *
300  * range_array: An array of page ranges returned to the host.
301  *
302  */
303 
304 struct dm_balloon_response {
305 	struct dm_header hdr;
306 	__u32 reservedz;
307 	__u32 more_pages:1;
308 	__u32 range_count:31;
309 	union dm_mem_page_range range_array[];
310 } __packed;
311 
312 /*
313  * Un-balloon message; this message is sent from the host
314  * to the guest to give guest more memory.
315  *
316  * more_pages: If FALSE, this is the last message of the transaction.
317  * if TRUE there will atleast one more message from the guest.
318  *
319  * reservedz: Reserved; must be set to zero.
320  *
321  * range_count: The number of ranges in the range array.
322  *
323  * range_array: An array of page ranges returned to the host.
324  *
325  */
326 
327 struct dm_unballoon_request {
328 	struct dm_header hdr;
329 	__u32 more_pages:1;
330 	__u32 reservedz:31;
331 	__u32 range_count;
332 	union dm_mem_page_range range_array[];
333 } __packed;
334 
335 /*
336  * Un-balloon response message; this message is sent from the guest
337  * to the host in response to an unballoon request.
338  *
339  */
340 
341 struct dm_unballoon_response {
342 	struct dm_header hdr;
343 } __packed;
344 
345 
346 /*
347  * Hot add request message. Message sent from the host to the guest.
348  *
349  * mem_range: Memory range to hot add.
350  *
351  * On Linux we currently don't support this since we cannot hot add
352  * arbitrary granularity of memory.
353  */
354 
355 struct dm_hot_add {
356 	struct dm_header hdr;
357 	union dm_mem_page_range range;
358 } __packed;
359 
360 /*
361  * Hot add response message.
362  * This message is sent by the guest to report the status of a hot add request.
363  * If page_count is less than the requested page count, then the host should
364  * assume all further hot add requests will fail, since this indicates that
365  * the guest has hit an upper physical memory barrier.
366  *
367  * Hot adds may also fail due to low resources; in this case, the guest must
368  * not complete this message until the hot add can succeed, and the host must
369  * not send a new hot add request until the response is sent.
370  * If VSC fails to hot add memory DYNMEM_NUMBER_OF_UNSUCCESSFUL_HOTADD_ATTEMPTS
371  * times it fails the request.
372  *
373  *
374  * page_count: number of pages that were successfully hot added.
375  *
376  * result: result of the operation 1: success, 0: failure.
377  *
378  */
379 
380 struct dm_hot_add_response {
381 	struct dm_header hdr;
382 	__u32 page_count;
383 	__u32 result;
384 } __packed;
385 
386 /*
387  * Types of information sent from host to the guest.
388  */
389 
390 enum dm_info_type {
391 	INFO_TYPE_MAX_PAGE_CNT = 0,
392 	MAX_INFO_TYPE
393 };
394 
395 
396 /*
397  * Header for the information message.
398  */
399 
400 struct dm_info_header {
401 	enum dm_info_type type;
402 	__u32 data_size;
403 } __packed;
404 
405 /*
406  * This message is sent from the host to the guest to pass
407  * some relevant information (win8 addition).
408  *
409  * reserved: no used.
410  * info_size: size of the information blob.
411  * info: information blob.
412  */
413 
414 struct dm_info_msg {
415 	struct dm_header hdr;
416 	__u32 reserved;
417 	__u32 info_size;
418 	__u8  info[];
419 };
420 
421 /*
422  * End protocol definitions.
423  */
424 
425 /*
426  * State to manage hot adding memory into the guest.
427  * The range start_pfn : end_pfn specifies the range
428  * that the host has asked us to hot add. The range
429  * start_pfn : ha_end_pfn specifies the range that we have
430  * currently hot added. We hot add in multiples of 128M
431  * chunks; it is possible that we may not be able to bring
432  * online all the pages in the region. The range
433  * covered_start_pfn:covered_end_pfn defines the pages that can
434  * be brough online.
435  */
436 
437 struct hv_hotadd_state {
438 	struct list_head list;
439 	unsigned long start_pfn;
440 	unsigned long covered_start_pfn;
441 	unsigned long covered_end_pfn;
442 	unsigned long ha_end_pfn;
443 	unsigned long end_pfn;
444 	/*
445 	 * A list of gaps.
446 	 */
447 	struct list_head gap_list;
448 };
449 
450 struct hv_hotadd_gap {
451 	struct list_head list;
452 	unsigned long start_pfn;
453 	unsigned long end_pfn;
454 };
455 
456 struct balloon_state {
457 	__u32 num_pages;
458 	struct work_struct wrk;
459 };
460 
461 struct hot_add_wrk {
462 	union dm_mem_page_range ha_page_range;
463 	union dm_mem_page_range ha_region_range;
464 	struct work_struct wrk;
465 };
466 
467 static bool hot_add = true;
468 static bool do_hot_add;
469 /*
470  * Delay reporting memory pressure by
471  * the specified number of seconds.
472  */
473 static uint pressure_report_delay = 45;
474 
475 /*
476  * The last time we posted a pressure report to host.
477  */
478 static unsigned long last_post_time;
479 
480 module_param(hot_add, bool, (S_IRUGO | S_IWUSR));
481 MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add");
482 
483 module_param(pressure_report_delay, uint, (S_IRUGO | S_IWUSR));
484 MODULE_PARM_DESC(pressure_report_delay, "Delay in secs in reporting pressure");
485 static atomic_t trans_id = ATOMIC_INIT(0);
486 
487 static int dm_ring_size = (5 * PAGE_SIZE);
488 
489 /*
490  * Driver specific state.
491  */
492 
493 enum hv_dm_state {
494 	DM_INITIALIZING = 0,
495 	DM_INITIALIZED,
496 	DM_BALLOON_UP,
497 	DM_BALLOON_DOWN,
498 	DM_HOT_ADD,
499 	DM_INIT_ERROR
500 };
501 
502 
503 static __u8 recv_buffer[PAGE_SIZE];
504 static __u8 *send_buffer;
505 #define PAGES_IN_2M	512
506 #define HA_CHUNK (32 * 1024)
507 
508 struct hv_dynmem_device {
509 	struct hv_device *dev;
510 	enum hv_dm_state state;
511 	struct completion host_event;
512 	struct completion config_event;
513 
514 	/*
515 	 * Number of pages we have currently ballooned out.
516 	 */
517 	unsigned int num_pages_ballooned;
518 	unsigned int num_pages_onlined;
519 	unsigned int num_pages_added;
520 
521 	/*
522 	 * State to manage the ballooning (up) operation.
523 	 */
524 	struct balloon_state balloon_wrk;
525 
526 	/*
527 	 * State to execute the "hot-add" operation.
528 	 */
529 	struct hot_add_wrk ha_wrk;
530 
531 	/*
532 	 * This state tracks if the host has specified a hot-add
533 	 * region.
534 	 */
535 	bool host_specified_ha_region;
536 
537 	/*
538 	 * State to synchronize hot-add.
539 	 */
540 	struct completion  ol_waitevent;
541 	bool ha_waiting;
542 	/*
543 	 * This thread handles hot-add
544 	 * requests from the host as well as notifying
545 	 * the host with regards to memory pressure in
546 	 * the guest.
547 	 */
548 	struct task_struct *thread;
549 
550 	/*
551 	 * Protects ha_region_list, num_pages_onlined counter and individual
552 	 * regions from ha_region_list.
553 	 */
554 	spinlock_t ha_lock;
555 
556 	/*
557 	 * A list of hot-add regions.
558 	 */
559 	struct list_head ha_region_list;
560 
561 	/*
562 	 * We start with the highest version we can support
563 	 * and downgrade based on the host; we save here the
564 	 * next version to try.
565 	 */
566 	__u32 next_version;
567 
568 	/*
569 	 * The negotiated version agreed by host.
570 	 */
571 	__u32 version;
572 };
573 
574 static struct hv_dynmem_device dm_device;
575 
576 static void post_status(struct hv_dynmem_device *dm);
577 
578 #ifdef CONFIG_MEMORY_HOTPLUG
579 static int hv_memory_notifier(struct notifier_block *nb, unsigned long val,
580 			      void *v)
581 {
582 	struct memory_notify *mem = (struct memory_notify *)v;
583 	unsigned long flags;
584 
585 	switch (val) {
586 	case MEM_ONLINE:
587 	case MEM_CANCEL_ONLINE:
588 		if (dm_device.ha_waiting) {
589 			dm_device.ha_waiting = false;
590 			complete(&dm_device.ol_waitevent);
591 		}
592 		break;
593 
594 	case MEM_OFFLINE:
595 		spin_lock_irqsave(&dm_device.ha_lock, flags);
596 		dm_device.num_pages_onlined -= mem->nr_pages;
597 		spin_unlock_irqrestore(&dm_device.ha_lock, flags);
598 		break;
599 	case MEM_GOING_ONLINE:
600 	case MEM_GOING_OFFLINE:
601 	case MEM_CANCEL_OFFLINE:
602 		break;
603 	}
604 	return NOTIFY_OK;
605 }
606 
607 static struct notifier_block hv_memory_nb = {
608 	.notifier_call = hv_memory_notifier,
609 	.priority = 0
610 };
611 
612 /* Check if the particular page is backed and can be onlined and online it. */
613 static void hv_page_online_one(struct hv_hotadd_state *has, struct page *pg)
614 {
615 	unsigned long cur_start_pgp;
616 	unsigned long cur_end_pgp;
617 	struct hv_hotadd_gap *gap;
618 
619 	cur_start_pgp = (unsigned long)pfn_to_page(has->covered_start_pfn);
620 	cur_end_pgp = (unsigned long)pfn_to_page(has->covered_end_pfn);
621 
622 	/* The page is not backed. */
623 	if (((unsigned long)pg < cur_start_pgp) ||
624 	    ((unsigned long)pg >= cur_end_pgp))
625 		return;
626 
627 	/* Check for gaps. */
628 	list_for_each_entry(gap, &has->gap_list, list) {
629 		cur_start_pgp = (unsigned long)
630 			pfn_to_page(gap->start_pfn);
631 		cur_end_pgp = (unsigned long)
632 			pfn_to_page(gap->end_pfn);
633 		if (((unsigned long)pg >= cur_start_pgp) &&
634 		    ((unsigned long)pg < cur_end_pgp)) {
635 			return;
636 		}
637 	}
638 
639 	/* This frame is currently backed; online the page. */
640 	__online_page_set_limits(pg);
641 	__online_page_increment_counters(pg);
642 	__online_page_free(pg);
643 
644 	WARN_ON_ONCE(!spin_is_locked(&dm_device.ha_lock));
645 	dm_device.num_pages_onlined++;
646 }
647 
648 static void hv_bring_pgs_online(struct hv_hotadd_state *has,
649 				unsigned long start_pfn, unsigned long size)
650 {
651 	int i;
652 
653 	pr_debug("Online %lu pages starting at pfn 0x%lx\n", size, start_pfn);
654 	for (i = 0; i < size; i++)
655 		hv_page_online_one(has, pfn_to_page(start_pfn + i));
656 }
657 
658 static void hv_mem_hot_add(unsigned long start, unsigned long size,
659 				unsigned long pfn_count,
660 				struct hv_hotadd_state *has)
661 {
662 	int ret = 0;
663 	int i, nid;
664 	unsigned long start_pfn;
665 	unsigned long processed_pfn;
666 	unsigned long total_pfn = pfn_count;
667 	unsigned long flags;
668 
669 	for (i = 0; i < (size/HA_CHUNK); i++) {
670 		start_pfn = start + (i * HA_CHUNK);
671 
672 		spin_lock_irqsave(&dm_device.ha_lock, flags);
673 		has->ha_end_pfn +=  HA_CHUNK;
674 
675 		if (total_pfn > HA_CHUNK) {
676 			processed_pfn = HA_CHUNK;
677 			total_pfn -= HA_CHUNK;
678 		} else {
679 			processed_pfn = total_pfn;
680 			total_pfn = 0;
681 		}
682 
683 		has->covered_end_pfn +=  processed_pfn;
684 		spin_unlock_irqrestore(&dm_device.ha_lock, flags);
685 
686 		init_completion(&dm_device.ol_waitevent);
687 		dm_device.ha_waiting = !memhp_auto_online;
688 
689 		nid = memory_add_physaddr_to_nid(PFN_PHYS(start_pfn));
690 		ret = add_memory(nid, PFN_PHYS((start_pfn)),
691 				(HA_CHUNK << PAGE_SHIFT));
692 
693 		if (ret) {
694 			pr_warn("hot_add memory failed error is %d\n", ret);
695 			if (ret == -EEXIST) {
696 				/*
697 				 * This error indicates that the error
698 				 * is not a transient failure. This is the
699 				 * case where the guest's physical address map
700 				 * precludes hot adding memory. Stop all further
701 				 * memory hot-add.
702 				 */
703 				do_hot_add = false;
704 			}
705 			spin_lock_irqsave(&dm_device.ha_lock, flags);
706 			has->ha_end_pfn -= HA_CHUNK;
707 			has->covered_end_pfn -=  processed_pfn;
708 			spin_unlock_irqrestore(&dm_device.ha_lock, flags);
709 			break;
710 		}
711 
712 		/*
713 		 * Wait for the memory block to be onlined when memory onlining
714 		 * is done outside of kernel (memhp_auto_online). Since the hot
715 		 * add has succeeded, it is ok to proceed even if the pages in
716 		 * the hot added region have not been "onlined" within the
717 		 * allowed time.
718 		 */
719 		if (dm_device.ha_waiting)
720 			wait_for_completion_timeout(&dm_device.ol_waitevent,
721 						    5*HZ);
722 		post_status(&dm_device);
723 	}
724 }
725 
726 static void hv_online_page(struct page *pg)
727 {
728 	struct hv_hotadd_state *has;
729 	unsigned long cur_start_pgp;
730 	unsigned long cur_end_pgp;
731 	unsigned long flags;
732 
733 	spin_lock_irqsave(&dm_device.ha_lock, flags);
734 	list_for_each_entry(has, &dm_device.ha_region_list, list) {
735 		cur_start_pgp = (unsigned long)
736 			pfn_to_page(has->start_pfn);
737 		cur_end_pgp = (unsigned long)pfn_to_page(has->end_pfn);
738 
739 		/* The page belongs to a different HAS. */
740 		if (((unsigned long)pg < cur_start_pgp) ||
741 		    ((unsigned long)pg >= cur_end_pgp))
742 			continue;
743 
744 		hv_page_online_one(has, pg);
745 		break;
746 	}
747 	spin_unlock_irqrestore(&dm_device.ha_lock, flags);
748 }
749 
750 static int pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
751 {
752 	struct hv_hotadd_state *has;
753 	struct hv_hotadd_gap *gap;
754 	unsigned long residual, new_inc;
755 	int ret = 0;
756 	unsigned long flags;
757 
758 	spin_lock_irqsave(&dm_device.ha_lock, flags);
759 	list_for_each_entry(has, &dm_device.ha_region_list, list) {
760 		/*
761 		 * If the pfn range we are dealing with is not in the current
762 		 * "hot add block", move on.
763 		 */
764 		if (start_pfn < has->start_pfn || start_pfn >= has->end_pfn)
765 			continue;
766 
767 		/*
768 		 * If the current start pfn is not where the covered_end
769 		 * is, create a gap and update covered_end_pfn.
770 		 */
771 		if (has->covered_end_pfn != start_pfn) {
772 			gap = kzalloc(sizeof(struct hv_hotadd_gap), GFP_ATOMIC);
773 			if (!gap) {
774 				ret = -ENOMEM;
775 				break;
776 			}
777 
778 			INIT_LIST_HEAD(&gap->list);
779 			gap->start_pfn = has->covered_end_pfn;
780 			gap->end_pfn = start_pfn;
781 			list_add_tail(&gap->list, &has->gap_list);
782 
783 			has->covered_end_pfn = start_pfn;
784 		}
785 
786 		/*
787 		 * If the current hot add-request extends beyond
788 		 * our current limit; extend it.
789 		 */
790 		if ((start_pfn + pfn_cnt) > has->end_pfn) {
791 			residual = (start_pfn + pfn_cnt - has->end_pfn);
792 			/*
793 			 * Extend the region by multiples of HA_CHUNK.
794 			 */
795 			new_inc = (residual / HA_CHUNK) * HA_CHUNK;
796 			if (residual % HA_CHUNK)
797 				new_inc += HA_CHUNK;
798 
799 			has->end_pfn += new_inc;
800 		}
801 
802 		ret = 1;
803 		break;
804 	}
805 	spin_unlock_irqrestore(&dm_device.ha_lock, flags);
806 
807 	return ret;
808 }
809 
810 static unsigned long handle_pg_range(unsigned long pg_start,
811 					unsigned long pg_count)
812 {
813 	unsigned long start_pfn = pg_start;
814 	unsigned long pfn_cnt = pg_count;
815 	unsigned long size;
816 	struct hv_hotadd_state *has;
817 	unsigned long pgs_ol = 0;
818 	unsigned long old_covered_state;
819 	unsigned long res = 0, flags;
820 
821 	pr_debug("Hot adding %lu pages starting at pfn 0x%lx.\n", pg_count,
822 		pg_start);
823 
824 	spin_lock_irqsave(&dm_device.ha_lock, flags);
825 	list_for_each_entry(has, &dm_device.ha_region_list, list) {
826 		/*
827 		 * If the pfn range we are dealing with is not in the current
828 		 * "hot add block", move on.
829 		 */
830 		if (start_pfn < has->start_pfn || start_pfn >= has->end_pfn)
831 			continue;
832 
833 		old_covered_state = has->covered_end_pfn;
834 
835 		if (start_pfn < has->ha_end_pfn) {
836 			/*
837 			 * This is the case where we are backing pages
838 			 * in an already hot added region. Bring
839 			 * these pages online first.
840 			 */
841 			pgs_ol = has->ha_end_pfn - start_pfn;
842 			if (pgs_ol > pfn_cnt)
843 				pgs_ol = pfn_cnt;
844 
845 			has->covered_end_pfn +=  pgs_ol;
846 			pfn_cnt -= pgs_ol;
847 			/*
848 			 * Check if the corresponding memory block is already
849 			 * online by checking its last previously backed page.
850 			 * In case it is we need to bring rest (which was not
851 			 * backed previously) online too.
852 			 */
853 			if (start_pfn > has->start_pfn &&
854 			    !PageReserved(pfn_to_page(start_pfn - 1)))
855 				hv_bring_pgs_online(has, start_pfn, pgs_ol);
856 
857 		}
858 
859 		if ((has->ha_end_pfn < has->end_pfn) && (pfn_cnt > 0)) {
860 			/*
861 			 * We have some residual hot add range
862 			 * that needs to be hot added; hot add
863 			 * it now. Hot add a multiple of
864 			 * of HA_CHUNK that fully covers the pages
865 			 * we have.
866 			 */
867 			size = (has->end_pfn - has->ha_end_pfn);
868 			if (pfn_cnt <= size) {
869 				size = ((pfn_cnt / HA_CHUNK) * HA_CHUNK);
870 				if (pfn_cnt % HA_CHUNK)
871 					size += HA_CHUNK;
872 			} else {
873 				pfn_cnt = size;
874 			}
875 			spin_unlock_irqrestore(&dm_device.ha_lock, flags);
876 			hv_mem_hot_add(has->ha_end_pfn, size, pfn_cnt, has);
877 			spin_lock_irqsave(&dm_device.ha_lock, flags);
878 		}
879 		/*
880 		 * If we managed to online any pages that were given to us,
881 		 * we declare success.
882 		 */
883 		res = has->covered_end_pfn - old_covered_state;
884 		break;
885 	}
886 	spin_unlock_irqrestore(&dm_device.ha_lock, flags);
887 
888 	return res;
889 }
890 
891 static unsigned long process_hot_add(unsigned long pg_start,
892 					unsigned long pfn_cnt,
893 					unsigned long rg_start,
894 					unsigned long rg_size)
895 {
896 	struct hv_hotadd_state *ha_region = NULL;
897 	int covered;
898 	unsigned long flags;
899 
900 	if (pfn_cnt == 0)
901 		return 0;
902 
903 	if (!dm_device.host_specified_ha_region) {
904 		covered = pfn_covered(pg_start, pfn_cnt);
905 		if (covered < 0)
906 			return 0;
907 
908 		if (covered)
909 			goto do_pg_range;
910 	}
911 
912 	/*
913 	 * If the host has specified a hot-add range; deal with it first.
914 	 */
915 
916 	if (rg_size != 0) {
917 		ha_region = kzalloc(sizeof(struct hv_hotadd_state), GFP_KERNEL);
918 		if (!ha_region)
919 			return 0;
920 
921 		INIT_LIST_HEAD(&ha_region->list);
922 		INIT_LIST_HEAD(&ha_region->gap_list);
923 
924 		ha_region->start_pfn = rg_start;
925 		ha_region->ha_end_pfn = rg_start;
926 		ha_region->covered_start_pfn = pg_start;
927 		ha_region->covered_end_pfn = pg_start;
928 		ha_region->end_pfn = rg_start + rg_size;
929 
930 		spin_lock_irqsave(&dm_device.ha_lock, flags);
931 		list_add_tail(&ha_region->list, &dm_device.ha_region_list);
932 		spin_unlock_irqrestore(&dm_device.ha_lock, flags);
933 	}
934 
935 do_pg_range:
936 	/*
937 	 * Process the page range specified; bringing them
938 	 * online if possible.
939 	 */
940 	return handle_pg_range(pg_start, pfn_cnt);
941 }
942 
943 #endif
944 
945 static void hot_add_req(struct work_struct *dummy)
946 {
947 	struct dm_hot_add_response resp;
948 #ifdef CONFIG_MEMORY_HOTPLUG
949 	unsigned long pg_start, pfn_cnt;
950 	unsigned long rg_start, rg_sz;
951 #endif
952 	struct hv_dynmem_device *dm = &dm_device;
953 
954 	memset(&resp, 0, sizeof(struct dm_hot_add_response));
955 	resp.hdr.type = DM_MEM_HOT_ADD_RESPONSE;
956 	resp.hdr.size = sizeof(struct dm_hot_add_response);
957 
958 #ifdef CONFIG_MEMORY_HOTPLUG
959 	pg_start = dm->ha_wrk.ha_page_range.finfo.start_page;
960 	pfn_cnt = dm->ha_wrk.ha_page_range.finfo.page_cnt;
961 
962 	rg_start = dm->ha_wrk.ha_region_range.finfo.start_page;
963 	rg_sz = dm->ha_wrk.ha_region_range.finfo.page_cnt;
964 
965 	if ((rg_start == 0) && (!dm->host_specified_ha_region)) {
966 		unsigned long region_size;
967 		unsigned long region_start;
968 
969 		/*
970 		 * The host has not specified the hot-add region.
971 		 * Based on the hot-add page range being specified,
972 		 * compute a hot-add region that can cover the pages
973 		 * that need to be hot-added while ensuring the alignment
974 		 * and size requirements of Linux as it relates to hot-add.
975 		 */
976 		region_start = pg_start;
977 		region_size = (pfn_cnt / HA_CHUNK) * HA_CHUNK;
978 		if (pfn_cnt % HA_CHUNK)
979 			region_size += HA_CHUNK;
980 
981 		region_start = (pg_start / HA_CHUNK) * HA_CHUNK;
982 
983 		rg_start = region_start;
984 		rg_sz = region_size;
985 	}
986 
987 	if (do_hot_add)
988 		resp.page_count = process_hot_add(pg_start, pfn_cnt,
989 						rg_start, rg_sz);
990 
991 	dm->num_pages_added += resp.page_count;
992 #endif
993 	/*
994 	 * The result field of the response structure has the
995 	 * following semantics:
996 	 *
997 	 * 1. If all or some pages hot-added: Guest should return success.
998 	 *
999 	 * 2. If no pages could be hot-added:
1000 	 *
1001 	 * If the guest returns success, then the host
1002 	 * will not attempt any further hot-add operations. This
1003 	 * signifies a permanent failure.
1004 	 *
1005 	 * If the guest returns failure, then this failure will be
1006 	 * treated as a transient failure and the host may retry the
1007 	 * hot-add operation after some delay.
1008 	 */
1009 	if (resp.page_count > 0)
1010 		resp.result = 1;
1011 	else if (!do_hot_add)
1012 		resp.result = 1;
1013 	else
1014 		resp.result = 0;
1015 
1016 	if (!do_hot_add || (resp.page_count == 0))
1017 		pr_info("Memory hot add failed\n");
1018 
1019 	dm->state = DM_INITIALIZED;
1020 	resp.hdr.trans_id = atomic_inc_return(&trans_id);
1021 	vmbus_sendpacket(dm->dev->channel, &resp,
1022 			sizeof(struct dm_hot_add_response),
1023 			(unsigned long)NULL,
1024 			VM_PKT_DATA_INBAND, 0);
1025 }
1026 
1027 static void process_info(struct hv_dynmem_device *dm, struct dm_info_msg *msg)
1028 {
1029 	struct dm_info_header *info_hdr;
1030 
1031 	info_hdr = (struct dm_info_header *)msg->info;
1032 
1033 	switch (info_hdr->type) {
1034 	case INFO_TYPE_MAX_PAGE_CNT:
1035 		if (info_hdr->data_size == sizeof(__u64)) {
1036 			__u64 *max_page_count = (__u64 *)&info_hdr[1];
1037 
1038 			pr_info("Max. dynamic memory size: %llu MB\n",
1039 				(*max_page_count) >> (20 - PAGE_SHIFT));
1040 		}
1041 
1042 		break;
1043 	default:
1044 		pr_info("Received Unknown type: %d\n", info_hdr->type);
1045 	}
1046 }
1047 
1048 static unsigned long compute_balloon_floor(void)
1049 {
1050 	unsigned long min_pages;
1051 #define MB2PAGES(mb) ((mb) << (20 - PAGE_SHIFT))
1052 	/* Simple continuous piecewiese linear function:
1053 	 *  max MiB -> min MiB  gradient
1054 	 *       0         0
1055 	 *      16        16
1056 	 *      32        24
1057 	 *     128        72    (1/2)
1058 	 *     512       168    (1/4)
1059 	 *    2048       360    (1/8)
1060 	 *    8192       744    (1/16)
1061 	 *   32768      1512	(1/32)
1062 	 */
1063 	if (totalram_pages < MB2PAGES(128))
1064 		min_pages = MB2PAGES(8) + (totalram_pages >> 1);
1065 	else if (totalram_pages < MB2PAGES(512))
1066 		min_pages = MB2PAGES(40) + (totalram_pages >> 2);
1067 	else if (totalram_pages < MB2PAGES(2048))
1068 		min_pages = MB2PAGES(104) + (totalram_pages >> 3);
1069 	else if (totalram_pages < MB2PAGES(8192))
1070 		min_pages = MB2PAGES(232) + (totalram_pages >> 4);
1071 	else
1072 		min_pages = MB2PAGES(488) + (totalram_pages >> 5);
1073 #undef MB2PAGES
1074 	return min_pages;
1075 }
1076 
1077 /*
1078  * Post our status as it relates memory pressure to the
1079  * host. Host expects the guests to post this status
1080  * periodically at 1 second intervals.
1081  *
1082  * The metrics specified in this protocol are very Windows
1083  * specific and so we cook up numbers here to convey our memory
1084  * pressure.
1085  */
1086 
1087 static void post_status(struct hv_dynmem_device *dm)
1088 {
1089 	struct dm_status status;
1090 	unsigned long now = jiffies;
1091 	unsigned long last_post = last_post_time;
1092 
1093 	if (pressure_report_delay > 0) {
1094 		--pressure_report_delay;
1095 		return;
1096 	}
1097 
1098 	if (!time_after(now, (last_post_time + HZ)))
1099 		return;
1100 
1101 	memset(&status, 0, sizeof(struct dm_status));
1102 	status.hdr.type = DM_STATUS_REPORT;
1103 	status.hdr.size = sizeof(struct dm_status);
1104 	status.hdr.trans_id = atomic_inc_return(&trans_id);
1105 
1106 	/*
1107 	 * The host expects the guest to report free and committed memory.
1108 	 * Furthermore, the host expects the pressure information to include
1109 	 * the ballooned out pages. For a given amount of memory that we are
1110 	 * managing we need to compute a floor below which we should not
1111 	 * balloon. Compute this and add it to the pressure report.
1112 	 * We also need to report all offline pages (num_pages_added -
1113 	 * num_pages_onlined) as committed to the host, otherwise it can try
1114 	 * asking us to balloon them out.
1115 	 */
1116 	status.num_avail = si_mem_available();
1117 	status.num_committed = vm_memory_committed() +
1118 		dm->num_pages_ballooned +
1119 		(dm->num_pages_added > dm->num_pages_onlined ?
1120 		 dm->num_pages_added - dm->num_pages_onlined : 0) +
1121 		compute_balloon_floor();
1122 
1123 	/*
1124 	 * If our transaction ID is no longer current, just don't
1125 	 * send the status. This can happen if we were interrupted
1126 	 * after we picked our transaction ID.
1127 	 */
1128 	if (status.hdr.trans_id != atomic_read(&trans_id))
1129 		return;
1130 
1131 	/*
1132 	 * If the last post time that we sampled has changed,
1133 	 * we have raced, don't post the status.
1134 	 */
1135 	if (last_post != last_post_time)
1136 		return;
1137 
1138 	last_post_time = jiffies;
1139 	vmbus_sendpacket(dm->dev->channel, &status,
1140 				sizeof(struct dm_status),
1141 				(unsigned long)NULL,
1142 				VM_PKT_DATA_INBAND, 0);
1143 
1144 }
1145 
1146 static void free_balloon_pages(struct hv_dynmem_device *dm,
1147 			 union dm_mem_page_range *range_array)
1148 {
1149 	int num_pages = range_array->finfo.page_cnt;
1150 	__u64 start_frame = range_array->finfo.start_page;
1151 	struct page *pg;
1152 	int i;
1153 
1154 	for (i = 0; i < num_pages; i++) {
1155 		pg = pfn_to_page(i + start_frame);
1156 		__free_page(pg);
1157 		dm->num_pages_ballooned--;
1158 	}
1159 }
1160 
1161 
1162 
1163 static unsigned int alloc_balloon_pages(struct hv_dynmem_device *dm,
1164 					unsigned int num_pages,
1165 					struct dm_balloon_response *bl_resp,
1166 					int alloc_unit)
1167 {
1168 	unsigned int i = 0;
1169 	struct page *pg;
1170 
1171 	if (num_pages < alloc_unit)
1172 		return 0;
1173 
1174 	for (i = 0; (i * alloc_unit) < num_pages; i++) {
1175 		if (bl_resp->hdr.size + sizeof(union dm_mem_page_range) >
1176 			PAGE_SIZE)
1177 			return i * alloc_unit;
1178 
1179 		/*
1180 		 * We execute this code in a thread context. Furthermore,
1181 		 * we don't want the kernel to try too hard.
1182 		 */
1183 		pg = alloc_pages(GFP_HIGHUSER | __GFP_NORETRY |
1184 				__GFP_NOMEMALLOC | __GFP_NOWARN,
1185 				get_order(alloc_unit << PAGE_SHIFT));
1186 
1187 		if (!pg)
1188 			return i * alloc_unit;
1189 
1190 		dm->num_pages_ballooned += alloc_unit;
1191 
1192 		/*
1193 		 * If we allocatted 2M pages; split them so we
1194 		 * can free them in any order we get.
1195 		 */
1196 
1197 		if (alloc_unit != 1)
1198 			split_page(pg, get_order(alloc_unit << PAGE_SHIFT));
1199 
1200 		bl_resp->range_count++;
1201 		bl_resp->range_array[i].finfo.start_page =
1202 			page_to_pfn(pg);
1203 		bl_resp->range_array[i].finfo.page_cnt = alloc_unit;
1204 		bl_resp->hdr.size += sizeof(union dm_mem_page_range);
1205 
1206 	}
1207 
1208 	return num_pages;
1209 }
1210 
1211 static void balloon_up(struct work_struct *dummy)
1212 {
1213 	unsigned int num_pages = dm_device.balloon_wrk.num_pages;
1214 	unsigned int num_ballooned = 0;
1215 	struct dm_balloon_response *bl_resp;
1216 	int alloc_unit;
1217 	int ret;
1218 	bool done = false;
1219 	int i;
1220 	long avail_pages;
1221 	unsigned long floor;
1222 
1223 	/* The host balloons pages in 2M granularity. */
1224 	WARN_ON_ONCE(num_pages % PAGES_IN_2M != 0);
1225 
1226 	/*
1227 	 * We will attempt 2M allocations. However, if we fail to
1228 	 * allocate 2M chunks, we will go back to 4k allocations.
1229 	 */
1230 	alloc_unit = 512;
1231 
1232 	avail_pages = si_mem_available();
1233 	floor = compute_balloon_floor();
1234 
1235 	/* Refuse to balloon below the floor, keep the 2M granularity. */
1236 	if (avail_pages < num_pages || avail_pages - num_pages < floor) {
1237 		pr_warn("Balloon request will be partially fulfilled. %s\n",
1238 			avail_pages < num_pages ? "Not enough memory." :
1239 			"Balloon floor reached.");
1240 
1241 		num_pages = avail_pages > floor ? (avail_pages - floor) : 0;
1242 		num_pages -= num_pages % PAGES_IN_2M;
1243 	}
1244 
1245 	while (!done) {
1246 		bl_resp = (struct dm_balloon_response *)send_buffer;
1247 		memset(send_buffer, 0, PAGE_SIZE);
1248 		bl_resp->hdr.type = DM_BALLOON_RESPONSE;
1249 		bl_resp->hdr.size = sizeof(struct dm_balloon_response);
1250 		bl_resp->more_pages = 1;
1251 
1252 		num_pages -= num_ballooned;
1253 		num_ballooned = alloc_balloon_pages(&dm_device, num_pages,
1254 						    bl_resp, alloc_unit);
1255 
1256 		if (alloc_unit != 1 && num_ballooned == 0) {
1257 			alloc_unit = 1;
1258 			continue;
1259 		}
1260 
1261 		if (num_ballooned == 0 || num_ballooned == num_pages) {
1262 			pr_debug("Ballooned %u out of %u requested pages.\n",
1263 				num_pages, dm_device.balloon_wrk.num_pages);
1264 
1265 			bl_resp->more_pages = 0;
1266 			done = true;
1267 			dm_device.state = DM_INITIALIZED;
1268 		}
1269 
1270 		/*
1271 		 * We are pushing a lot of data through the channel;
1272 		 * deal with transient failures caused because of the
1273 		 * lack of space in the ring buffer.
1274 		 */
1275 
1276 		do {
1277 			bl_resp->hdr.trans_id = atomic_inc_return(&trans_id);
1278 			ret = vmbus_sendpacket(dm_device.dev->channel,
1279 						bl_resp,
1280 						bl_resp->hdr.size,
1281 						(unsigned long)NULL,
1282 						VM_PKT_DATA_INBAND, 0);
1283 
1284 			if (ret == -EAGAIN)
1285 				msleep(20);
1286 			post_status(&dm_device);
1287 		} while (ret == -EAGAIN);
1288 
1289 		if (ret) {
1290 			/*
1291 			 * Free up the memory we allocatted.
1292 			 */
1293 			pr_info("Balloon response failed\n");
1294 
1295 			for (i = 0; i < bl_resp->range_count; i++)
1296 				free_balloon_pages(&dm_device,
1297 						 &bl_resp->range_array[i]);
1298 
1299 			done = true;
1300 		}
1301 	}
1302 
1303 }
1304 
1305 static void balloon_down(struct hv_dynmem_device *dm,
1306 			struct dm_unballoon_request *req)
1307 {
1308 	union dm_mem_page_range *range_array = req->range_array;
1309 	int range_count = req->range_count;
1310 	struct dm_unballoon_response resp;
1311 	int i;
1312 	unsigned int prev_pages_ballooned = dm->num_pages_ballooned;
1313 
1314 	for (i = 0; i < range_count; i++) {
1315 		free_balloon_pages(dm, &range_array[i]);
1316 		complete(&dm_device.config_event);
1317 	}
1318 
1319 	pr_debug("Freed %u ballooned pages.\n",
1320 		prev_pages_ballooned - dm->num_pages_ballooned);
1321 
1322 	if (req->more_pages == 1)
1323 		return;
1324 
1325 	memset(&resp, 0, sizeof(struct dm_unballoon_response));
1326 	resp.hdr.type = DM_UNBALLOON_RESPONSE;
1327 	resp.hdr.trans_id = atomic_inc_return(&trans_id);
1328 	resp.hdr.size = sizeof(struct dm_unballoon_response);
1329 
1330 	vmbus_sendpacket(dm_device.dev->channel, &resp,
1331 				sizeof(struct dm_unballoon_response),
1332 				(unsigned long)NULL,
1333 				VM_PKT_DATA_INBAND, 0);
1334 
1335 	dm->state = DM_INITIALIZED;
1336 }
1337 
1338 static void balloon_onchannelcallback(void *context);
1339 
1340 static int dm_thread_func(void *dm_dev)
1341 {
1342 	struct hv_dynmem_device *dm = dm_dev;
1343 
1344 	while (!kthread_should_stop()) {
1345 		wait_for_completion_interruptible_timeout(
1346 						&dm_device.config_event, 1*HZ);
1347 		/*
1348 		 * The host expects us to post information on the memory
1349 		 * pressure every second.
1350 		 */
1351 		reinit_completion(&dm_device.config_event);
1352 		post_status(dm);
1353 	}
1354 
1355 	return 0;
1356 }
1357 
1358 
1359 static void version_resp(struct hv_dynmem_device *dm,
1360 			struct dm_version_response *vresp)
1361 {
1362 	struct dm_version_request version_req;
1363 	int ret;
1364 
1365 	if (vresp->is_accepted) {
1366 		/*
1367 		 * We are done; wakeup the
1368 		 * context waiting for version
1369 		 * negotiation.
1370 		 */
1371 		complete(&dm->host_event);
1372 		return;
1373 	}
1374 	/*
1375 	 * If there are more versions to try, continue
1376 	 * with negotiations; if not
1377 	 * shutdown the service since we are not able
1378 	 * to negotiate a suitable version number
1379 	 * with the host.
1380 	 */
1381 	if (dm->next_version == 0)
1382 		goto version_error;
1383 
1384 	memset(&version_req, 0, sizeof(struct dm_version_request));
1385 	version_req.hdr.type = DM_VERSION_REQUEST;
1386 	version_req.hdr.size = sizeof(struct dm_version_request);
1387 	version_req.hdr.trans_id = atomic_inc_return(&trans_id);
1388 	version_req.version.version = dm->next_version;
1389 	dm->version = version_req.version.version;
1390 
1391 	/*
1392 	 * Set the next version to try in case current version fails.
1393 	 * Win7 protocol ought to be the last one to try.
1394 	 */
1395 	switch (version_req.version.version) {
1396 	case DYNMEM_PROTOCOL_VERSION_WIN8:
1397 		dm->next_version = DYNMEM_PROTOCOL_VERSION_WIN7;
1398 		version_req.is_last_attempt = 0;
1399 		break;
1400 	default:
1401 		dm->next_version = 0;
1402 		version_req.is_last_attempt = 1;
1403 	}
1404 
1405 	ret = vmbus_sendpacket(dm->dev->channel, &version_req,
1406 				sizeof(struct dm_version_request),
1407 				(unsigned long)NULL,
1408 				VM_PKT_DATA_INBAND, 0);
1409 
1410 	if (ret)
1411 		goto version_error;
1412 
1413 	return;
1414 
1415 version_error:
1416 	dm->state = DM_INIT_ERROR;
1417 	complete(&dm->host_event);
1418 }
1419 
1420 static void cap_resp(struct hv_dynmem_device *dm,
1421 			struct dm_capabilities_resp_msg *cap_resp)
1422 {
1423 	if (!cap_resp->is_accepted) {
1424 		pr_info("Capabilities not accepted by host\n");
1425 		dm->state = DM_INIT_ERROR;
1426 	}
1427 	complete(&dm->host_event);
1428 }
1429 
1430 static void balloon_onchannelcallback(void *context)
1431 {
1432 	struct hv_device *dev = context;
1433 	u32 recvlen;
1434 	u64 requestid;
1435 	struct dm_message *dm_msg;
1436 	struct dm_header *dm_hdr;
1437 	struct hv_dynmem_device *dm = hv_get_drvdata(dev);
1438 	struct dm_balloon *bal_msg;
1439 	struct dm_hot_add *ha_msg;
1440 	union dm_mem_page_range *ha_pg_range;
1441 	union dm_mem_page_range *ha_region;
1442 
1443 	memset(recv_buffer, 0, sizeof(recv_buffer));
1444 	vmbus_recvpacket(dev->channel, recv_buffer,
1445 			 PAGE_SIZE, &recvlen, &requestid);
1446 
1447 	if (recvlen > 0) {
1448 		dm_msg = (struct dm_message *)recv_buffer;
1449 		dm_hdr = &dm_msg->hdr;
1450 
1451 		switch (dm_hdr->type) {
1452 		case DM_VERSION_RESPONSE:
1453 			version_resp(dm,
1454 				 (struct dm_version_response *)dm_msg);
1455 			break;
1456 
1457 		case DM_CAPABILITIES_RESPONSE:
1458 			cap_resp(dm,
1459 				 (struct dm_capabilities_resp_msg *)dm_msg);
1460 			break;
1461 
1462 		case DM_BALLOON_REQUEST:
1463 			if (dm->state == DM_BALLOON_UP)
1464 				pr_warn("Currently ballooning\n");
1465 			bal_msg = (struct dm_balloon *)recv_buffer;
1466 			dm->state = DM_BALLOON_UP;
1467 			dm_device.balloon_wrk.num_pages = bal_msg->num_pages;
1468 			schedule_work(&dm_device.balloon_wrk.wrk);
1469 			break;
1470 
1471 		case DM_UNBALLOON_REQUEST:
1472 			dm->state = DM_BALLOON_DOWN;
1473 			balloon_down(dm,
1474 				 (struct dm_unballoon_request *)recv_buffer);
1475 			break;
1476 
1477 		case DM_MEM_HOT_ADD_REQUEST:
1478 			if (dm->state == DM_HOT_ADD)
1479 				pr_warn("Currently hot-adding\n");
1480 			dm->state = DM_HOT_ADD;
1481 			ha_msg = (struct dm_hot_add *)recv_buffer;
1482 			if (ha_msg->hdr.size == sizeof(struct dm_hot_add)) {
1483 				/*
1484 				 * This is a normal hot-add request specifying
1485 				 * hot-add memory.
1486 				 */
1487 				dm->host_specified_ha_region = false;
1488 				ha_pg_range = &ha_msg->range;
1489 				dm->ha_wrk.ha_page_range = *ha_pg_range;
1490 				dm->ha_wrk.ha_region_range.page_range = 0;
1491 			} else {
1492 				/*
1493 				 * Host is specifying that we first hot-add
1494 				 * a region and then partially populate this
1495 				 * region.
1496 				 */
1497 				dm->host_specified_ha_region = true;
1498 				ha_pg_range = &ha_msg->range;
1499 				ha_region = &ha_pg_range[1];
1500 				dm->ha_wrk.ha_page_range = *ha_pg_range;
1501 				dm->ha_wrk.ha_region_range = *ha_region;
1502 			}
1503 			schedule_work(&dm_device.ha_wrk.wrk);
1504 			break;
1505 
1506 		case DM_INFO_MESSAGE:
1507 			process_info(dm, (struct dm_info_msg *)dm_msg);
1508 			break;
1509 
1510 		default:
1511 			pr_err("Unhandled message: type: %d\n", dm_hdr->type);
1512 
1513 		}
1514 	}
1515 
1516 }
1517 
1518 static int balloon_probe(struct hv_device *dev,
1519 			const struct hv_vmbus_device_id *dev_id)
1520 {
1521 	int ret;
1522 	unsigned long t;
1523 	struct dm_version_request version_req;
1524 	struct dm_capabilities cap_msg;
1525 
1526 #ifdef CONFIG_MEMORY_HOTPLUG
1527 	do_hot_add = hot_add;
1528 #else
1529 	do_hot_add = false;
1530 #endif
1531 
1532 	/*
1533 	 * First allocate a send buffer.
1534 	 */
1535 
1536 	send_buffer = kmalloc(PAGE_SIZE, GFP_KERNEL);
1537 	if (!send_buffer)
1538 		return -ENOMEM;
1539 
1540 	ret = vmbus_open(dev->channel, dm_ring_size, dm_ring_size, NULL, 0,
1541 			balloon_onchannelcallback, dev);
1542 
1543 	if (ret)
1544 		goto probe_error0;
1545 
1546 	dm_device.dev = dev;
1547 	dm_device.state = DM_INITIALIZING;
1548 	dm_device.next_version = DYNMEM_PROTOCOL_VERSION_WIN8;
1549 	init_completion(&dm_device.host_event);
1550 	init_completion(&dm_device.config_event);
1551 	INIT_LIST_HEAD(&dm_device.ha_region_list);
1552 	spin_lock_init(&dm_device.ha_lock);
1553 	INIT_WORK(&dm_device.balloon_wrk.wrk, balloon_up);
1554 	INIT_WORK(&dm_device.ha_wrk.wrk, hot_add_req);
1555 	dm_device.host_specified_ha_region = false;
1556 
1557 	dm_device.thread =
1558 		 kthread_run(dm_thread_func, &dm_device, "hv_balloon");
1559 	if (IS_ERR(dm_device.thread)) {
1560 		ret = PTR_ERR(dm_device.thread);
1561 		goto probe_error1;
1562 	}
1563 
1564 #ifdef CONFIG_MEMORY_HOTPLUG
1565 	set_online_page_callback(&hv_online_page);
1566 	register_memory_notifier(&hv_memory_nb);
1567 #endif
1568 
1569 	hv_set_drvdata(dev, &dm_device);
1570 	/*
1571 	 * Initiate the hand shake with the host and negotiate
1572 	 * a version that the host can support. We start with the
1573 	 * highest version number and go down if the host cannot
1574 	 * support it.
1575 	 */
1576 	memset(&version_req, 0, sizeof(struct dm_version_request));
1577 	version_req.hdr.type = DM_VERSION_REQUEST;
1578 	version_req.hdr.size = sizeof(struct dm_version_request);
1579 	version_req.hdr.trans_id = atomic_inc_return(&trans_id);
1580 	version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN10;
1581 	version_req.is_last_attempt = 0;
1582 	dm_device.version = version_req.version.version;
1583 
1584 	ret = vmbus_sendpacket(dev->channel, &version_req,
1585 				sizeof(struct dm_version_request),
1586 				(unsigned long)NULL,
1587 				VM_PKT_DATA_INBAND, 0);
1588 	if (ret)
1589 		goto probe_error2;
1590 
1591 	t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ);
1592 	if (t == 0) {
1593 		ret = -ETIMEDOUT;
1594 		goto probe_error2;
1595 	}
1596 
1597 	/*
1598 	 * If we could not negotiate a compatible version with the host
1599 	 * fail the probe function.
1600 	 */
1601 	if (dm_device.state == DM_INIT_ERROR) {
1602 		ret = -ETIMEDOUT;
1603 		goto probe_error2;
1604 	}
1605 
1606 	pr_info("Using Dynamic Memory protocol version %u.%u\n",
1607 		DYNMEM_MAJOR_VERSION(dm_device.version),
1608 		DYNMEM_MINOR_VERSION(dm_device.version));
1609 
1610 	/*
1611 	 * Now submit our capabilities to the host.
1612 	 */
1613 	memset(&cap_msg, 0, sizeof(struct dm_capabilities));
1614 	cap_msg.hdr.type = DM_CAPABILITIES_REPORT;
1615 	cap_msg.hdr.size = sizeof(struct dm_capabilities);
1616 	cap_msg.hdr.trans_id = atomic_inc_return(&trans_id);
1617 
1618 	cap_msg.caps.cap_bits.balloon = 1;
1619 	cap_msg.caps.cap_bits.hot_add = 1;
1620 
1621 	/*
1622 	 * Specify our alignment requirements as it relates
1623 	 * memory hot-add. Specify 128MB alignment.
1624 	 */
1625 	cap_msg.caps.cap_bits.hot_add_alignment = 7;
1626 
1627 	/*
1628 	 * Currently the host does not use these
1629 	 * values and we set them to what is done in the
1630 	 * Windows driver.
1631 	 */
1632 	cap_msg.min_page_cnt = 0;
1633 	cap_msg.max_page_number = -1;
1634 
1635 	ret = vmbus_sendpacket(dev->channel, &cap_msg,
1636 				sizeof(struct dm_capabilities),
1637 				(unsigned long)NULL,
1638 				VM_PKT_DATA_INBAND, 0);
1639 	if (ret)
1640 		goto probe_error2;
1641 
1642 	t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ);
1643 	if (t == 0) {
1644 		ret = -ETIMEDOUT;
1645 		goto probe_error2;
1646 	}
1647 
1648 	/*
1649 	 * If the host does not like our capabilities,
1650 	 * fail the probe function.
1651 	 */
1652 	if (dm_device.state == DM_INIT_ERROR) {
1653 		ret = -ETIMEDOUT;
1654 		goto probe_error2;
1655 	}
1656 
1657 	dm_device.state = DM_INITIALIZED;
1658 	last_post_time = jiffies;
1659 
1660 	return 0;
1661 
1662 probe_error2:
1663 #ifdef CONFIG_MEMORY_HOTPLUG
1664 	restore_online_page_callback(&hv_online_page);
1665 #endif
1666 	kthread_stop(dm_device.thread);
1667 
1668 probe_error1:
1669 	vmbus_close(dev->channel);
1670 probe_error0:
1671 	kfree(send_buffer);
1672 	return ret;
1673 }
1674 
1675 static int balloon_remove(struct hv_device *dev)
1676 {
1677 	struct hv_dynmem_device *dm = hv_get_drvdata(dev);
1678 	struct hv_hotadd_state *has, *tmp;
1679 	struct hv_hotadd_gap *gap, *tmp_gap;
1680 	unsigned long flags;
1681 
1682 	if (dm->num_pages_ballooned != 0)
1683 		pr_warn("Ballooned pages: %d\n", dm->num_pages_ballooned);
1684 
1685 	cancel_work_sync(&dm->balloon_wrk.wrk);
1686 	cancel_work_sync(&dm->ha_wrk.wrk);
1687 
1688 	vmbus_close(dev->channel);
1689 	kthread_stop(dm->thread);
1690 	kfree(send_buffer);
1691 #ifdef CONFIG_MEMORY_HOTPLUG
1692 	restore_online_page_callback(&hv_online_page);
1693 	unregister_memory_notifier(&hv_memory_nb);
1694 #endif
1695 	spin_lock_irqsave(&dm_device.ha_lock, flags);
1696 	list_for_each_entry_safe(has, tmp, &dm->ha_region_list, list) {
1697 		list_for_each_entry_safe(gap, tmp_gap, &has->gap_list, list) {
1698 			list_del(&gap->list);
1699 			kfree(gap);
1700 		}
1701 		list_del(&has->list);
1702 		kfree(has);
1703 	}
1704 	spin_unlock_irqrestore(&dm_device.ha_lock, flags);
1705 
1706 	return 0;
1707 }
1708 
1709 static const struct hv_vmbus_device_id id_table[] = {
1710 	/* Dynamic Memory Class ID */
1711 	/* 525074DC-8985-46e2-8057-A307DC18A502 */
1712 	{ HV_DM_GUID, },
1713 	{ },
1714 };
1715 
1716 MODULE_DEVICE_TABLE(vmbus, id_table);
1717 
1718 static  struct hv_driver balloon_drv = {
1719 	.name = "hv_balloon",
1720 	.id_table = id_table,
1721 	.probe =  balloon_probe,
1722 	.remove =  balloon_remove,
1723 };
1724 
1725 static int __init init_balloon_drv(void)
1726 {
1727 
1728 	return vmbus_driver_register(&balloon_drv);
1729 }
1730 
1731 module_init(init_balloon_drv);
1732 
1733 MODULE_DESCRIPTION("Hyper-V Balloon");
1734 MODULE_LICENSE("GPL");
1735