1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Virtio balloon implementation, inspired by Dor Laor and Marcelo 4 * Tosatti's implementations. 5 * 6 * Copyright 2008 Rusty Russell IBM Corporation 7 */ 8 9 #include <linux/virtio.h> 10 #include <linux/virtio_balloon.h> 11 #include <linux/swap.h> 12 #include <linux/workqueue.h> 13 #include <linux/delay.h> 14 #include <linux/slab.h> 15 #include <linux/module.h> 16 #include <linux/balloon_compaction.h> 17 #include <linux/wait.h> 18 #include <linux/mm.h> 19 #include <linux/mount.h> 20 #include <linux/magic.h> 21 #include <linux/pseudo_fs.h> 22 23 /* 24 * Balloon device works in 4K page units. So each page is pointed to by 25 * multiple balloon pages. All memory counters in this driver are in balloon 26 * page units. 27 */ 28 #define VIRTIO_BALLOON_PAGES_PER_PAGE (unsigned)(PAGE_SIZE >> VIRTIO_BALLOON_PFN_SHIFT) 29 #define VIRTIO_BALLOON_ARRAY_PFNS_MAX 256 30 #define VIRTBALLOON_OOM_NOTIFY_PRIORITY 80 31 32 #define VIRTIO_BALLOON_FREE_PAGE_ALLOC_FLAG (__GFP_NORETRY | __GFP_NOWARN | \ 33 __GFP_NOMEMALLOC) 34 /* The order of free page blocks to report to host */ 35 #define VIRTIO_BALLOON_FREE_PAGE_ORDER (MAX_ORDER - 1) 36 /* The size of a free page block in bytes */ 37 #define VIRTIO_BALLOON_FREE_PAGE_SIZE \ 38 (1 << (VIRTIO_BALLOON_FREE_PAGE_ORDER + PAGE_SHIFT)) 39 40 #ifdef CONFIG_BALLOON_COMPACTION 41 static struct vfsmount *balloon_mnt; 42 #endif 43 44 enum virtio_balloon_vq { 45 VIRTIO_BALLOON_VQ_INFLATE, 46 VIRTIO_BALLOON_VQ_DEFLATE, 47 VIRTIO_BALLOON_VQ_STATS, 48 VIRTIO_BALLOON_VQ_FREE_PAGE, 49 VIRTIO_BALLOON_VQ_MAX 50 }; 51 52 enum virtio_balloon_config_read { 53 VIRTIO_BALLOON_CONFIG_READ_CMD_ID = 0, 54 }; 55 56 struct virtio_balloon { 57 struct virtio_device *vdev; 58 struct virtqueue *inflate_vq, *deflate_vq, *stats_vq, *free_page_vq; 59 60 /* Balloon's own wq for cpu-intensive work items */ 61 struct workqueue_struct *balloon_wq; 62 /* The free page reporting work item submitted to the balloon wq */ 63 struct work_struct report_free_page_work; 64 65 /* The balloon servicing is delegated to a freezable workqueue. */ 66 struct work_struct update_balloon_stats_work; 67 struct work_struct update_balloon_size_work; 68 69 /* Prevent updating balloon when it is being canceled. */ 70 spinlock_t stop_update_lock; 71 bool stop_update; 72 /* Bitmap to indicate if reading the related config fields are needed */ 73 unsigned long config_read_bitmap; 74 75 /* The list of allocated free pages, waiting to be given back to mm */ 76 struct list_head free_page_list; 77 spinlock_t free_page_list_lock; 78 /* The number of free page blocks on the above list */ 79 unsigned long num_free_page_blocks; 80 /* 81 * The cmd id received from host. 82 * Read it via virtio_balloon_cmd_id_received to get the latest value 83 * sent from host. 84 */ 85 u32 cmd_id_received_cache; 86 /* The cmd id that is actively in use */ 87 __virtio32 cmd_id_active; 88 /* Buffer to store the stop sign */ 89 __virtio32 cmd_id_stop; 90 91 /* Waiting for host to ack the pages we released. */ 92 wait_queue_head_t acked; 93 94 /* Number of balloon pages we've told the Host we're not using. */ 95 unsigned int num_pages; 96 /* 97 * The pages we've told the Host we're not using are enqueued 98 * at vb_dev_info->pages list. 99 * Each page on this list adds VIRTIO_BALLOON_PAGES_PER_PAGE 100 * to num_pages above. 101 */ 102 struct balloon_dev_info vb_dev_info; 103 104 /* Synchronize access/update to this struct virtio_balloon elements */ 105 struct mutex balloon_lock; 106 107 /* The array of pfns we tell the Host about. */ 108 unsigned int num_pfns; 109 __virtio32 pfns[VIRTIO_BALLOON_ARRAY_PFNS_MAX]; 110 111 /* Memory statistics */ 112 struct virtio_balloon_stat stats[VIRTIO_BALLOON_S_NR]; 113 114 /* To register a shrinker to shrink memory upon memory pressure */ 115 struct shrinker shrinker; 116 }; 117 118 static struct virtio_device_id id_table[] = { 119 { VIRTIO_ID_BALLOON, VIRTIO_DEV_ANY_ID }, 120 { 0 }, 121 }; 122 123 static u32 page_to_balloon_pfn(struct page *page) 124 { 125 unsigned long pfn = page_to_pfn(page); 126 127 BUILD_BUG_ON(PAGE_SHIFT < VIRTIO_BALLOON_PFN_SHIFT); 128 /* Convert pfn from Linux page size to balloon page size. */ 129 return pfn * VIRTIO_BALLOON_PAGES_PER_PAGE; 130 } 131 132 static void balloon_ack(struct virtqueue *vq) 133 { 134 struct virtio_balloon *vb = vq->vdev->priv; 135 136 wake_up(&vb->acked); 137 } 138 139 static void tell_host(struct virtio_balloon *vb, struct virtqueue *vq) 140 { 141 struct scatterlist sg; 142 unsigned int len; 143 144 sg_init_one(&sg, vb->pfns, sizeof(vb->pfns[0]) * vb->num_pfns); 145 146 /* We should always be able to add one buffer to an empty queue. */ 147 virtqueue_add_outbuf(vq, &sg, 1, vb, GFP_KERNEL); 148 virtqueue_kick(vq); 149 150 /* When host has read buffer, this completes via balloon_ack */ 151 wait_event(vb->acked, virtqueue_get_buf(vq, &len)); 152 153 } 154 155 static void set_page_pfns(struct virtio_balloon *vb, 156 __virtio32 pfns[], struct page *page) 157 { 158 unsigned int i; 159 160 /* 161 * Set balloon pfns pointing at this page. 162 * Note that the first pfn points at start of the page. 163 */ 164 for (i = 0; i < VIRTIO_BALLOON_PAGES_PER_PAGE; i++) 165 pfns[i] = cpu_to_virtio32(vb->vdev, 166 page_to_balloon_pfn(page) + i); 167 } 168 169 static unsigned fill_balloon(struct virtio_balloon *vb, size_t num) 170 { 171 unsigned num_allocated_pages; 172 unsigned num_pfns; 173 struct page *page; 174 LIST_HEAD(pages); 175 176 /* We can only do one array worth at a time. */ 177 num = min(num, ARRAY_SIZE(vb->pfns)); 178 179 for (num_pfns = 0; num_pfns < num; 180 num_pfns += VIRTIO_BALLOON_PAGES_PER_PAGE) { 181 struct page *page = balloon_page_alloc(); 182 183 if (!page) { 184 dev_info_ratelimited(&vb->vdev->dev, 185 "Out of puff! Can't get %u pages\n", 186 VIRTIO_BALLOON_PAGES_PER_PAGE); 187 /* Sleep for at least 1/5 of a second before retry. */ 188 msleep(200); 189 break; 190 } 191 192 balloon_page_push(&pages, page); 193 } 194 195 mutex_lock(&vb->balloon_lock); 196 197 vb->num_pfns = 0; 198 199 while ((page = balloon_page_pop(&pages))) { 200 balloon_page_enqueue(&vb->vb_dev_info, page); 201 202 set_page_pfns(vb, vb->pfns + vb->num_pfns, page); 203 vb->num_pages += VIRTIO_BALLOON_PAGES_PER_PAGE; 204 if (!virtio_has_feature(vb->vdev, 205 VIRTIO_BALLOON_F_DEFLATE_ON_OOM)) 206 adjust_managed_page_count(page, -1); 207 vb->num_pfns += VIRTIO_BALLOON_PAGES_PER_PAGE; 208 } 209 210 num_allocated_pages = vb->num_pfns; 211 /* Did we get any? */ 212 if (vb->num_pfns != 0) 213 tell_host(vb, vb->inflate_vq); 214 mutex_unlock(&vb->balloon_lock); 215 216 return num_allocated_pages; 217 } 218 219 static void release_pages_balloon(struct virtio_balloon *vb, 220 struct list_head *pages) 221 { 222 struct page *page, *next; 223 224 list_for_each_entry_safe(page, next, pages, lru) { 225 if (!virtio_has_feature(vb->vdev, 226 VIRTIO_BALLOON_F_DEFLATE_ON_OOM)) 227 adjust_managed_page_count(page, 1); 228 list_del(&page->lru); 229 put_page(page); /* balloon reference */ 230 } 231 } 232 233 static unsigned leak_balloon(struct virtio_balloon *vb, size_t num) 234 { 235 unsigned num_freed_pages; 236 struct page *page; 237 struct balloon_dev_info *vb_dev_info = &vb->vb_dev_info; 238 LIST_HEAD(pages); 239 240 /* We can only do one array worth at a time. */ 241 num = min(num, ARRAY_SIZE(vb->pfns)); 242 243 mutex_lock(&vb->balloon_lock); 244 /* We can't release more pages than taken */ 245 num = min(num, (size_t)vb->num_pages); 246 for (vb->num_pfns = 0; vb->num_pfns < num; 247 vb->num_pfns += VIRTIO_BALLOON_PAGES_PER_PAGE) { 248 page = balloon_page_dequeue(vb_dev_info); 249 if (!page) 250 break; 251 set_page_pfns(vb, vb->pfns + vb->num_pfns, page); 252 list_add(&page->lru, &pages); 253 vb->num_pages -= VIRTIO_BALLOON_PAGES_PER_PAGE; 254 } 255 256 num_freed_pages = vb->num_pfns; 257 /* 258 * Note that if 259 * virtio_has_feature(vdev, VIRTIO_BALLOON_F_MUST_TELL_HOST); 260 * is true, we *have* to do it in this order 261 */ 262 if (vb->num_pfns != 0) 263 tell_host(vb, vb->deflate_vq); 264 release_pages_balloon(vb, &pages); 265 mutex_unlock(&vb->balloon_lock); 266 return num_freed_pages; 267 } 268 269 static inline void update_stat(struct virtio_balloon *vb, int idx, 270 u16 tag, u64 val) 271 { 272 BUG_ON(idx >= VIRTIO_BALLOON_S_NR); 273 vb->stats[idx].tag = cpu_to_virtio16(vb->vdev, tag); 274 vb->stats[idx].val = cpu_to_virtio64(vb->vdev, val); 275 } 276 277 #define pages_to_bytes(x) ((u64)(x) << PAGE_SHIFT) 278 279 static unsigned int update_balloon_stats(struct virtio_balloon *vb) 280 { 281 unsigned long events[NR_VM_EVENT_ITEMS]; 282 struct sysinfo i; 283 unsigned int idx = 0; 284 long available; 285 unsigned long caches; 286 287 all_vm_events(events); 288 si_meminfo(&i); 289 290 available = si_mem_available(); 291 caches = global_node_page_state(NR_FILE_PAGES); 292 293 #ifdef CONFIG_VM_EVENT_COUNTERS 294 update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_IN, 295 pages_to_bytes(events[PSWPIN])); 296 update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_OUT, 297 pages_to_bytes(events[PSWPOUT])); 298 update_stat(vb, idx++, VIRTIO_BALLOON_S_MAJFLT, events[PGMAJFAULT]); 299 update_stat(vb, idx++, VIRTIO_BALLOON_S_MINFLT, events[PGFAULT]); 300 #ifdef CONFIG_HUGETLB_PAGE 301 update_stat(vb, idx++, VIRTIO_BALLOON_S_HTLB_PGALLOC, 302 events[HTLB_BUDDY_PGALLOC]); 303 update_stat(vb, idx++, VIRTIO_BALLOON_S_HTLB_PGFAIL, 304 events[HTLB_BUDDY_PGALLOC_FAIL]); 305 #endif 306 #endif 307 update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMFREE, 308 pages_to_bytes(i.freeram)); 309 update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMTOT, 310 pages_to_bytes(i.totalram)); 311 update_stat(vb, idx++, VIRTIO_BALLOON_S_AVAIL, 312 pages_to_bytes(available)); 313 update_stat(vb, idx++, VIRTIO_BALLOON_S_CACHES, 314 pages_to_bytes(caches)); 315 316 return idx; 317 } 318 319 /* 320 * While most virtqueues communicate guest-initiated requests to the hypervisor, 321 * the stats queue operates in reverse. The driver initializes the virtqueue 322 * with a single buffer. From that point forward, all conversations consist of 323 * a hypervisor request (a call to this function) which directs us to refill 324 * the virtqueue with a fresh stats buffer. Since stats collection can sleep, 325 * we delegate the job to a freezable workqueue that will do the actual work via 326 * stats_handle_request(). 327 */ 328 static void stats_request(struct virtqueue *vq) 329 { 330 struct virtio_balloon *vb = vq->vdev->priv; 331 332 spin_lock(&vb->stop_update_lock); 333 if (!vb->stop_update) 334 queue_work(system_freezable_wq, &vb->update_balloon_stats_work); 335 spin_unlock(&vb->stop_update_lock); 336 } 337 338 static void stats_handle_request(struct virtio_balloon *vb) 339 { 340 struct virtqueue *vq; 341 struct scatterlist sg; 342 unsigned int len, num_stats; 343 344 num_stats = update_balloon_stats(vb); 345 346 vq = vb->stats_vq; 347 if (!virtqueue_get_buf(vq, &len)) 348 return; 349 sg_init_one(&sg, vb->stats, sizeof(vb->stats[0]) * num_stats); 350 virtqueue_add_outbuf(vq, &sg, 1, vb, GFP_KERNEL); 351 virtqueue_kick(vq); 352 } 353 354 static inline s64 towards_target(struct virtio_balloon *vb) 355 { 356 s64 target; 357 u32 num_pages; 358 359 virtio_cread(vb->vdev, struct virtio_balloon_config, num_pages, 360 &num_pages); 361 362 /* Legacy balloon config space is LE, unlike all other devices. */ 363 if (!virtio_has_feature(vb->vdev, VIRTIO_F_VERSION_1)) 364 num_pages = le32_to_cpu((__force __le32)num_pages); 365 366 target = num_pages; 367 return target - vb->num_pages; 368 } 369 370 /* Gives back @num_to_return blocks of free pages to mm. */ 371 static unsigned long return_free_pages_to_mm(struct virtio_balloon *vb, 372 unsigned long num_to_return) 373 { 374 struct page *page; 375 unsigned long num_returned; 376 377 spin_lock_irq(&vb->free_page_list_lock); 378 for (num_returned = 0; num_returned < num_to_return; num_returned++) { 379 page = balloon_page_pop(&vb->free_page_list); 380 if (!page) 381 break; 382 free_pages((unsigned long)page_address(page), 383 VIRTIO_BALLOON_FREE_PAGE_ORDER); 384 } 385 vb->num_free_page_blocks -= num_returned; 386 spin_unlock_irq(&vb->free_page_list_lock); 387 388 return num_returned; 389 } 390 391 static void virtio_balloon_queue_free_page_work(struct virtio_balloon *vb) 392 { 393 if (!virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) 394 return; 395 396 /* No need to queue the work if the bit was already set. */ 397 if (test_and_set_bit(VIRTIO_BALLOON_CONFIG_READ_CMD_ID, 398 &vb->config_read_bitmap)) 399 return; 400 401 queue_work(vb->balloon_wq, &vb->report_free_page_work); 402 } 403 404 static void virtballoon_changed(struct virtio_device *vdev) 405 { 406 struct virtio_balloon *vb = vdev->priv; 407 unsigned long flags; 408 409 spin_lock_irqsave(&vb->stop_update_lock, flags); 410 if (!vb->stop_update) { 411 queue_work(system_freezable_wq, 412 &vb->update_balloon_size_work); 413 virtio_balloon_queue_free_page_work(vb); 414 } 415 spin_unlock_irqrestore(&vb->stop_update_lock, flags); 416 } 417 418 static void update_balloon_size(struct virtio_balloon *vb) 419 { 420 u32 actual = vb->num_pages; 421 422 /* Legacy balloon config space is LE, unlike all other devices. */ 423 if (!virtio_has_feature(vb->vdev, VIRTIO_F_VERSION_1)) 424 actual = (__force u32)cpu_to_le32(actual); 425 426 virtio_cwrite(vb->vdev, struct virtio_balloon_config, actual, 427 &actual); 428 } 429 430 static void update_balloon_stats_func(struct work_struct *work) 431 { 432 struct virtio_balloon *vb; 433 434 vb = container_of(work, struct virtio_balloon, 435 update_balloon_stats_work); 436 stats_handle_request(vb); 437 } 438 439 static void update_balloon_size_func(struct work_struct *work) 440 { 441 struct virtio_balloon *vb; 442 s64 diff; 443 444 vb = container_of(work, struct virtio_balloon, 445 update_balloon_size_work); 446 diff = towards_target(vb); 447 448 if (!diff) 449 return; 450 451 if (diff > 0) 452 diff -= fill_balloon(vb, diff); 453 else 454 diff += leak_balloon(vb, -diff); 455 update_balloon_size(vb); 456 457 if (diff) 458 queue_work(system_freezable_wq, work); 459 } 460 461 static int init_vqs(struct virtio_balloon *vb) 462 { 463 struct virtqueue *vqs[VIRTIO_BALLOON_VQ_MAX]; 464 vq_callback_t *callbacks[VIRTIO_BALLOON_VQ_MAX]; 465 const char *names[VIRTIO_BALLOON_VQ_MAX]; 466 int err; 467 468 /* 469 * Inflateq and deflateq are used unconditionally. The names[] 470 * will be NULL if the related feature is not enabled, which will 471 * cause no allocation for the corresponding virtqueue in find_vqs. 472 */ 473 callbacks[VIRTIO_BALLOON_VQ_INFLATE] = balloon_ack; 474 names[VIRTIO_BALLOON_VQ_INFLATE] = "inflate"; 475 callbacks[VIRTIO_BALLOON_VQ_DEFLATE] = balloon_ack; 476 names[VIRTIO_BALLOON_VQ_DEFLATE] = "deflate"; 477 names[VIRTIO_BALLOON_VQ_STATS] = NULL; 478 names[VIRTIO_BALLOON_VQ_FREE_PAGE] = NULL; 479 480 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ)) { 481 names[VIRTIO_BALLOON_VQ_STATS] = "stats"; 482 callbacks[VIRTIO_BALLOON_VQ_STATS] = stats_request; 483 } 484 485 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) { 486 names[VIRTIO_BALLOON_VQ_FREE_PAGE] = "free_page_vq"; 487 callbacks[VIRTIO_BALLOON_VQ_FREE_PAGE] = NULL; 488 } 489 490 err = vb->vdev->config->find_vqs(vb->vdev, VIRTIO_BALLOON_VQ_MAX, 491 vqs, callbacks, names, NULL, NULL); 492 if (err) 493 return err; 494 495 vb->inflate_vq = vqs[VIRTIO_BALLOON_VQ_INFLATE]; 496 vb->deflate_vq = vqs[VIRTIO_BALLOON_VQ_DEFLATE]; 497 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ)) { 498 struct scatterlist sg; 499 unsigned int num_stats; 500 vb->stats_vq = vqs[VIRTIO_BALLOON_VQ_STATS]; 501 502 /* 503 * Prime this virtqueue with one buffer so the hypervisor can 504 * use it to signal us later (it can't be broken yet!). 505 */ 506 num_stats = update_balloon_stats(vb); 507 508 sg_init_one(&sg, vb->stats, sizeof(vb->stats[0]) * num_stats); 509 err = virtqueue_add_outbuf(vb->stats_vq, &sg, 1, vb, 510 GFP_KERNEL); 511 if (err) { 512 dev_warn(&vb->vdev->dev, "%s: add stat_vq failed\n", 513 __func__); 514 return err; 515 } 516 virtqueue_kick(vb->stats_vq); 517 } 518 519 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) 520 vb->free_page_vq = vqs[VIRTIO_BALLOON_VQ_FREE_PAGE]; 521 522 return 0; 523 } 524 525 static u32 virtio_balloon_cmd_id_received(struct virtio_balloon *vb) 526 { 527 if (test_and_clear_bit(VIRTIO_BALLOON_CONFIG_READ_CMD_ID, 528 &vb->config_read_bitmap)) 529 virtio_cread(vb->vdev, struct virtio_balloon_config, 530 free_page_report_cmd_id, 531 &vb->cmd_id_received_cache); 532 533 return vb->cmd_id_received_cache; 534 } 535 536 static int send_cmd_id_start(struct virtio_balloon *vb) 537 { 538 struct scatterlist sg; 539 struct virtqueue *vq = vb->free_page_vq; 540 int err, unused; 541 542 /* Detach all the used buffers from the vq */ 543 while (virtqueue_get_buf(vq, &unused)) 544 ; 545 546 vb->cmd_id_active = virtio32_to_cpu(vb->vdev, 547 virtio_balloon_cmd_id_received(vb)); 548 sg_init_one(&sg, &vb->cmd_id_active, sizeof(vb->cmd_id_active)); 549 err = virtqueue_add_outbuf(vq, &sg, 1, &vb->cmd_id_active, GFP_KERNEL); 550 if (!err) 551 virtqueue_kick(vq); 552 return err; 553 } 554 555 static int send_cmd_id_stop(struct virtio_balloon *vb) 556 { 557 struct scatterlist sg; 558 struct virtqueue *vq = vb->free_page_vq; 559 int err, unused; 560 561 /* Detach all the used buffers from the vq */ 562 while (virtqueue_get_buf(vq, &unused)) 563 ; 564 565 sg_init_one(&sg, &vb->cmd_id_stop, sizeof(vb->cmd_id_stop)); 566 err = virtqueue_add_outbuf(vq, &sg, 1, &vb->cmd_id_stop, GFP_KERNEL); 567 if (!err) 568 virtqueue_kick(vq); 569 return err; 570 } 571 572 static int get_free_page_and_send(struct virtio_balloon *vb) 573 { 574 struct virtqueue *vq = vb->free_page_vq; 575 struct page *page; 576 struct scatterlist sg; 577 int err, unused; 578 void *p; 579 580 /* Detach all the used buffers from the vq */ 581 while (virtqueue_get_buf(vq, &unused)) 582 ; 583 584 page = alloc_pages(VIRTIO_BALLOON_FREE_PAGE_ALLOC_FLAG, 585 VIRTIO_BALLOON_FREE_PAGE_ORDER); 586 /* 587 * When the allocation returns NULL, it indicates that we have got all 588 * the possible free pages, so return -EINTR to stop. 589 */ 590 if (!page) 591 return -EINTR; 592 593 p = page_address(page); 594 sg_init_one(&sg, p, VIRTIO_BALLOON_FREE_PAGE_SIZE); 595 /* There is always 1 entry reserved for the cmd id to use. */ 596 if (vq->num_free > 1) { 597 err = virtqueue_add_inbuf(vq, &sg, 1, p, GFP_KERNEL); 598 if (unlikely(err)) { 599 free_pages((unsigned long)p, 600 VIRTIO_BALLOON_FREE_PAGE_ORDER); 601 return err; 602 } 603 virtqueue_kick(vq); 604 spin_lock_irq(&vb->free_page_list_lock); 605 balloon_page_push(&vb->free_page_list, page); 606 vb->num_free_page_blocks++; 607 spin_unlock_irq(&vb->free_page_list_lock); 608 } else { 609 /* 610 * The vq has no available entry to add this page block, so 611 * just free it. 612 */ 613 free_pages((unsigned long)p, VIRTIO_BALLOON_FREE_PAGE_ORDER); 614 } 615 616 return 0; 617 } 618 619 static int send_free_pages(struct virtio_balloon *vb) 620 { 621 int err; 622 u32 cmd_id_active; 623 624 while (1) { 625 /* 626 * If a stop id or a new cmd id was just received from host, 627 * stop the reporting. 628 */ 629 cmd_id_active = virtio32_to_cpu(vb->vdev, vb->cmd_id_active); 630 if (unlikely(cmd_id_active != 631 virtio_balloon_cmd_id_received(vb))) 632 break; 633 634 /* 635 * The free page blocks are allocated and sent to host one by 636 * one. 637 */ 638 err = get_free_page_and_send(vb); 639 if (err == -EINTR) 640 break; 641 else if (unlikely(err)) 642 return err; 643 } 644 645 return 0; 646 } 647 648 static void virtio_balloon_report_free_page(struct virtio_balloon *vb) 649 { 650 int err; 651 struct device *dev = &vb->vdev->dev; 652 653 /* Start by sending the received cmd id to host with an outbuf. */ 654 err = send_cmd_id_start(vb); 655 if (unlikely(err)) 656 dev_err(dev, "Failed to send a start id, err = %d\n", err); 657 658 err = send_free_pages(vb); 659 if (unlikely(err)) 660 dev_err(dev, "Failed to send a free page, err = %d\n", err); 661 662 /* End by sending a stop id to host with an outbuf. */ 663 err = send_cmd_id_stop(vb); 664 if (unlikely(err)) 665 dev_err(dev, "Failed to send a stop id, err = %d\n", err); 666 } 667 668 static void report_free_page_func(struct work_struct *work) 669 { 670 struct virtio_balloon *vb = container_of(work, struct virtio_balloon, 671 report_free_page_work); 672 u32 cmd_id_received; 673 674 cmd_id_received = virtio_balloon_cmd_id_received(vb); 675 if (cmd_id_received == VIRTIO_BALLOON_CMD_ID_DONE) { 676 /* Pass ULONG_MAX to give back all the free pages */ 677 return_free_pages_to_mm(vb, ULONG_MAX); 678 } else if (cmd_id_received != VIRTIO_BALLOON_CMD_ID_STOP && 679 cmd_id_received != 680 virtio32_to_cpu(vb->vdev, vb->cmd_id_active)) { 681 virtio_balloon_report_free_page(vb); 682 } 683 } 684 685 #ifdef CONFIG_BALLOON_COMPACTION 686 /* 687 * virtballoon_migratepage - perform the balloon page migration on behalf of 688 * a compation thread. (called under page lock) 689 * @vb_dev_info: the balloon device 690 * @newpage: page that will replace the isolated page after migration finishes. 691 * @page : the isolated (old) page that is about to be migrated to newpage. 692 * @mode : compaction mode -- not used for balloon page migration. 693 * 694 * After a ballooned page gets isolated by compaction procedures, this is the 695 * function that performs the page migration on behalf of a compaction thread 696 * The page migration for virtio balloon is done in a simple swap fashion which 697 * follows these two macro steps: 698 * 1) insert newpage into vb->pages list and update the host about it; 699 * 2) update the host about the old page removed from vb->pages list; 700 * 701 * This function preforms the balloon page migration task. 702 * Called through balloon_mapping->a_ops->migratepage 703 */ 704 static int virtballoon_migratepage(struct balloon_dev_info *vb_dev_info, 705 struct page *newpage, struct page *page, enum migrate_mode mode) 706 { 707 struct virtio_balloon *vb = container_of(vb_dev_info, 708 struct virtio_balloon, vb_dev_info); 709 unsigned long flags; 710 711 /* 712 * In order to avoid lock contention while migrating pages concurrently 713 * to leak_balloon() or fill_balloon() we just give up the balloon_lock 714 * this turn, as it is easier to retry the page migration later. 715 * This also prevents fill_balloon() getting stuck into a mutex 716 * recursion in the case it ends up triggering memory compaction 717 * while it is attempting to inflate the ballon. 718 */ 719 if (!mutex_trylock(&vb->balloon_lock)) 720 return -EAGAIN; 721 722 get_page(newpage); /* balloon reference */ 723 724 /* balloon's page migration 1st step -- inflate "newpage" */ 725 spin_lock_irqsave(&vb_dev_info->pages_lock, flags); 726 balloon_page_insert(vb_dev_info, newpage); 727 vb_dev_info->isolated_pages--; 728 __count_vm_event(BALLOON_MIGRATE); 729 spin_unlock_irqrestore(&vb_dev_info->pages_lock, flags); 730 vb->num_pfns = VIRTIO_BALLOON_PAGES_PER_PAGE; 731 set_page_pfns(vb, vb->pfns, newpage); 732 tell_host(vb, vb->inflate_vq); 733 734 /* balloon's page migration 2nd step -- deflate "page" */ 735 spin_lock_irqsave(&vb_dev_info->pages_lock, flags); 736 balloon_page_delete(page); 737 spin_unlock_irqrestore(&vb_dev_info->pages_lock, flags); 738 vb->num_pfns = VIRTIO_BALLOON_PAGES_PER_PAGE; 739 set_page_pfns(vb, vb->pfns, page); 740 tell_host(vb, vb->deflate_vq); 741 742 mutex_unlock(&vb->balloon_lock); 743 744 put_page(page); /* balloon reference */ 745 746 return MIGRATEPAGE_SUCCESS; 747 } 748 749 static int balloon_init_fs_context(struct fs_context *fc) 750 { 751 return init_pseudo(fc, BALLOON_KVM_MAGIC) ? 0 : -ENOMEM; 752 } 753 754 static struct file_system_type balloon_fs = { 755 .name = "balloon-kvm", 756 .init_fs_context = balloon_init_fs_context, 757 .kill_sb = kill_anon_super, 758 }; 759 760 #endif /* CONFIG_BALLOON_COMPACTION */ 761 762 static unsigned long shrink_free_pages(struct virtio_balloon *vb, 763 unsigned long pages_to_free) 764 { 765 unsigned long blocks_to_free, blocks_freed; 766 767 pages_to_free = round_up(pages_to_free, 768 1 << VIRTIO_BALLOON_FREE_PAGE_ORDER); 769 blocks_to_free = pages_to_free >> VIRTIO_BALLOON_FREE_PAGE_ORDER; 770 blocks_freed = return_free_pages_to_mm(vb, blocks_to_free); 771 772 return blocks_freed << VIRTIO_BALLOON_FREE_PAGE_ORDER; 773 } 774 775 static unsigned long shrink_balloon_pages(struct virtio_balloon *vb, 776 unsigned long pages_to_free) 777 { 778 unsigned long pages_freed = 0; 779 780 /* 781 * One invocation of leak_balloon can deflate at most 782 * VIRTIO_BALLOON_ARRAY_PFNS_MAX balloon pages, so we call it 783 * multiple times to deflate pages till reaching pages_to_free. 784 */ 785 while (vb->num_pages && pages_to_free) { 786 pages_freed += leak_balloon(vb, pages_to_free) / 787 VIRTIO_BALLOON_PAGES_PER_PAGE; 788 pages_to_free -= pages_freed; 789 } 790 update_balloon_size(vb); 791 792 return pages_freed; 793 } 794 795 static unsigned long virtio_balloon_shrinker_scan(struct shrinker *shrinker, 796 struct shrink_control *sc) 797 { 798 unsigned long pages_to_free, pages_freed = 0; 799 struct virtio_balloon *vb = container_of(shrinker, 800 struct virtio_balloon, shrinker); 801 802 pages_to_free = sc->nr_to_scan * VIRTIO_BALLOON_PAGES_PER_PAGE; 803 804 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) 805 pages_freed = shrink_free_pages(vb, pages_to_free); 806 807 if (pages_freed >= pages_to_free) 808 return pages_freed; 809 810 pages_freed += shrink_balloon_pages(vb, pages_to_free - pages_freed); 811 812 return pages_freed; 813 } 814 815 static unsigned long virtio_balloon_shrinker_count(struct shrinker *shrinker, 816 struct shrink_control *sc) 817 { 818 struct virtio_balloon *vb = container_of(shrinker, 819 struct virtio_balloon, shrinker); 820 unsigned long count; 821 822 count = vb->num_pages / VIRTIO_BALLOON_PAGES_PER_PAGE; 823 count += vb->num_free_page_blocks >> VIRTIO_BALLOON_FREE_PAGE_ORDER; 824 825 return count; 826 } 827 828 static void virtio_balloon_unregister_shrinker(struct virtio_balloon *vb) 829 { 830 unregister_shrinker(&vb->shrinker); 831 } 832 833 static int virtio_balloon_register_shrinker(struct virtio_balloon *vb) 834 { 835 vb->shrinker.scan_objects = virtio_balloon_shrinker_scan; 836 vb->shrinker.count_objects = virtio_balloon_shrinker_count; 837 vb->shrinker.seeks = DEFAULT_SEEKS; 838 839 return register_shrinker(&vb->shrinker); 840 } 841 842 static int virtballoon_probe(struct virtio_device *vdev) 843 { 844 struct virtio_balloon *vb; 845 __u32 poison_val; 846 int err; 847 848 if (!vdev->config->get) { 849 dev_err(&vdev->dev, "%s failure: config access disabled\n", 850 __func__); 851 return -EINVAL; 852 } 853 854 vdev->priv = vb = kzalloc(sizeof(*vb), GFP_KERNEL); 855 if (!vb) { 856 err = -ENOMEM; 857 goto out; 858 } 859 860 INIT_WORK(&vb->update_balloon_stats_work, update_balloon_stats_func); 861 INIT_WORK(&vb->update_balloon_size_work, update_balloon_size_func); 862 spin_lock_init(&vb->stop_update_lock); 863 mutex_init(&vb->balloon_lock); 864 init_waitqueue_head(&vb->acked); 865 vb->vdev = vdev; 866 867 balloon_devinfo_init(&vb->vb_dev_info); 868 869 err = init_vqs(vb); 870 if (err) 871 goto out_free_vb; 872 873 #ifdef CONFIG_BALLOON_COMPACTION 874 balloon_mnt = kern_mount(&balloon_fs); 875 if (IS_ERR(balloon_mnt)) { 876 err = PTR_ERR(balloon_mnt); 877 goto out_del_vqs; 878 } 879 880 vb->vb_dev_info.migratepage = virtballoon_migratepage; 881 vb->vb_dev_info.inode = alloc_anon_inode(balloon_mnt->mnt_sb); 882 if (IS_ERR(vb->vb_dev_info.inode)) { 883 err = PTR_ERR(vb->vb_dev_info.inode); 884 kern_unmount(balloon_mnt); 885 goto out_del_vqs; 886 } 887 vb->vb_dev_info.inode->i_mapping->a_ops = &balloon_aops; 888 #endif 889 if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) { 890 /* 891 * There is always one entry reserved for cmd id, so the ring 892 * size needs to be at least two to report free page hints. 893 */ 894 if (virtqueue_get_vring_size(vb->free_page_vq) < 2) { 895 err = -ENOSPC; 896 goto out_del_vqs; 897 } 898 vb->balloon_wq = alloc_workqueue("balloon-wq", 899 WQ_FREEZABLE | WQ_CPU_INTENSIVE, 0); 900 if (!vb->balloon_wq) { 901 err = -ENOMEM; 902 goto out_del_vqs; 903 } 904 INIT_WORK(&vb->report_free_page_work, report_free_page_func); 905 vb->cmd_id_received_cache = VIRTIO_BALLOON_CMD_ID_STOP; 906 vb->cmd_id_active = cpu_to_virtio32(vb->vdev, 907 VIRTIO_BALLOON_CMD_ID_STOP); 908 vb->cmd_id_stop = cpu_to_virtio32(vb->vdev, 909 VIRTIO_BALLOON_CMD_ID_STOP); 910 spin_lock_init(&vb->free_page_list_lock); 911 INIT_LIST_HEAD(&vb->free_page_list); 912 if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_PAGE_POISON)) { 913 memset(&poison_val, PAGE_POISON, sizeof(poison_val)); 914 virtio_cwrite(vb->vdev, struct virtio_balloon_config, 915 poison_val, &poison_val); 916 } 917 } 918 /* 919 * We continue to use VIRTIO_BALLOON_F_DEFLATE_ON_OOM to decide if a 920 * shrinker needs to be registered to relieve memory pressure. 921 */ 922 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_DEFLATE_ON_OOM)) { 923 err = virtio_balloon_register_shrinker(vb); 924 if (err) 925 goto out_del_balloon_wq; 926 } 927 virtio_device_ready(vdev); 928 929 if (towards_target(vb)) 930 virtballoon_changed(vdev); 931 return 0; 932 933 out_del_balloon_wq: 934 if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) 935 destroy_workqueue(vb->balloon_wq); 936 out_del_vqs: 937 vdev->config->del_vqs(vdev); 938 out_free_vb: 939 kfree(vb); 940 out: 941 return err; 942 } 943 944 static void remove_common(struct virtio_balloon *vb) 945 { 946 /* There might be pages left in the balloon: free them. */ 947 while (vb->num_pages) 948 leak_balloon(vb, vb->num_pages); 949 update_balloon_size(vb); 950 951 /* Now we reset the device so we can clean up the queues. */ 952 vb->vdev->config->reset(vb->vdev); 953 954 vb->vdev->config->del_vqs(vb->vdev); 955 } 956 957 static void virtballoon_remove(struct virtio_device *vdev) 958 { 959 struct virtio_balloon *vb = vdev->priv; 960 961 if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_DEFLATE_ON_OOM)) 962 virtio_balloon_unregister_shrinker(vb); 963 spin_lock_irq(&vb->stop_update_lock); 964 vb->stop_update = true; 965 spin_unlock_irq(&vb->stop_update_lock); 966 cancel_work_sync(&vb->update_balloon_size_work); 967 cancel_work_sync(&vb->update_balloon_stats_work); 968 969 if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) { 970 cancel_work_sync(&vb->report_free_page_work); 971 destroy_workqueue(vb->balloon_wq); 972 } 973 974 remove_common(vb); 975 #ifdef CONFIG_BALLOON_COMPACTION 976 if (vb->vb_dev_info.inode) 977 iput(vb->vb_dev_info.inode); 978 979 kern_unmount(balloon_mnt); 980 #endif 981 kfree(vb); 982 } 983 984 #ifdef CONFIG_PM_SLEEP 985 static int virtballoon_freeze(struct virtio_device *vdev) 986 { 987 struct virtio_balloon *vb = vdev->priv; 988 989 /* 990 * The workqueue is already frozen by the PM core before this 991 * function is called. 992 */ 993 remove_common(vb); 994 return 0; 995 } 996 997 static int virtballoon_restore(struct virtio_device *vdev) 998 { 999 struct virtio_balloon *vb = vdev->priv; 1000 int ret; 1001 1002 ret = init_vqs(vdev->priv); 1003 if (ret) 1004 return ret; 1005 1006 virtio_device_ready(vdev); 1007 1008 if (towards_target(vb)) 1009 virtballoon_changed(vdev); 1010 update_balloon_size(vb); 1011 return 0; 1012 } 1013 #endif 1014 1015 static int virtballoon_validate(struct virtio_device *vdev) 1016 { 1017 if (!page_poisoning_enabled()) 1018 __virtio_clear_bit(vdev, VIRTIO_BALLOON_F_PAGE_POISON); 1019 1020 __virtio_clear_bit(vdev, VIRTIO_F_IOMMU_PLATFORM); 1021 return 0; 1022 } 1023 1024 static unsigned int features[] = { 1025 VIRTIO_BALLOON_F_MUST_TELL_HOST, 1026 VIRTIO_BALLOON_F_STATS_VQ, 1027 VIRTIO_BALLOON_F_DEFLATE_ON_OOM, 1028 VIRTIO_BALLOON_F_FREE_PAGE_HINT, 1029 VIRTIO_BALLOON_F_PAGE_POISON, 1030 }; 1031 1032 static struct virtio_driver virtio_balloon_driver = { 1033 .feature_table = features, 1034 .feature_table_size = ARRAY_SIZE(features), 1035 .driver.name = KBUILD_MODNAME, 1036 .driver.owner = THIS_MODULE, 1037 .id_table = id_table, 1038 .validate = virtballoon_validate, 1039 .probe = virtballoon_probe, 1040 .remove = virtballoon_remove, 1041 .config_changed = virtballoon_changed, 1042 #ifdef CONFIG_PM_SLEEP 1043 .freeze = virtballoon_freeze, 1044 .restore = virtballoon_restore, 1045 #endif 1046 }; 1047 1048 module_virtio_driver(virtio_balloon_driver); 1049 MODULE_DEVICE_TABLE(virtio, id_table); 1050 MODULE_DESCRIPTION("Virtio balloon driver"); 1051 MODULE_LICENSE("GPL"); 1052