1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * z3fold.c 4 * 5 * Author: Vitaly Wool <vitaly.wool@konsulko.com> 6 * Copyright (C) 2016, Sony Mobile Communications Inc. 7 * 8 * This implementation is based on zbud written by Seth Jennings. 9 * 10 * z3fold is an special purpose allocator for storing compressed pages. It 11 * can store up to three compressed pages per page which improves the 12 * compression ratio of zbud while retaining its main concepts (e. g. always 13 * storing an integral number of objects per page) and simplicity. 14 * It still has simple and deterministic reclaim properties that make it 15 * preferable to a higher density approach (with no requirement on integral 16 * number of object per page) when reclaim is used. 17 * 18 * As in zbud, pages are divided into "chunks". The size of the chunks is 19 * fixed at compile time and is determined by NCHUNKS_ORDER below. 20 * 21 * z3fold doesn't export any API and is meant to be used via zpool API. 22 */ 23 24 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 25 26 #include <linux/atomic.h> 27 #include <linux/sched.h> 28 #include <linux/cpumask.h> 29 #include <linux/list.h> 30 #include <linux/mm.h> 31 #include <linux/module.h> 32 #include <linux/page-flags.h> 33 #include <linux/migrate.h> 34 #include <linux/node.h> 35 #include <linux/compaction.h> 36 #include <linux/percpu.h> 37 #include <linux/mount.h> 38 #include <linux/pseudo_fs.h> 39 #include <linux/fs.h> 40 #include <linux/preempt.h> 41 #include <linux/workqueue.h> 42 #include <linux/slab.h> 43 #include <linux/spinlock.h> 44 #include <linux/zpool.h> 45 #include <linux/magic.h> 46 47 /* 48 * NCHUNKS_ORDER determines the internal allocation granularity, effectively 49 * adjusting internal fragmentation. It also determines the number of 50 * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the 51 * allocation granularity will be in chunks of size PAGE_SIZE/64. Some chunks 52 * in the beginning of an allocated page are occupied by z3fold header, so 53 * NCHUNKS will be calculated to 63 (or 62 in case CONFIG_DEBUG_SPINLOCK=y), 54 * which shows the max number of free chunks in z3fold page, also there will 55 * be 63, or 62, respectively, freelists per pool. 56 */ 57 #define NCHUNKS_ORDER 6 58 59 #define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER) 60 #define CHUNK_SIZE (1 << CHUNK_SHIFT) 61 #define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE) 62 #define ZHDR_CHUNKS (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT) 63 #define TOTAL_CHUNKS (PAGE_SIZE >> CHUNK_SHIFT) 64 #define NCHUNKS ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT) 65 66 #define BUDDY_MASK (0x3) 67 #define BUDDY_SHIFT 2 68 #define SLOTS_ALIGN (0x40) 69 70 /***************** 71 * Structures 72 *****************/ 73 struct z3fold_pool; 74 struct z3fold_ops { 75 int (*evict)(struct z3fold_pool *pool, unsigned long handle); 76 }; 77 78 enum buddy { 79 HEADLESS = 0, 80 FIRST, 81 MIDDLE, 82 LAST, 83 BUDDIES_MAX = LAST 84 }; 85 86 struct z3fold_buddy_slots { 87 /* 88 * we are using BUDDY_MASK in handle_to_buddy etc. so there should 89 * be enough slots to hold all possible variants 90 */ 91 unsigned long slot[BUDDY_MASK + 1]; 92 unsigned long pool; /* back link + flags */ 93 }; 94 #define HANDLE_FLAG_MASK (0x03) 95 96 /* 97 * struct z3fold_header - z3fold page metadata occupying first chunks of each 98 * z3fold page, except for HEADLESS pages 99 * @buddy: links the z3fold page into the relevant list in the 100 * pool 101 * @page_lock: per-page lock 102 * @refcount: reference count for the z3fold page 103 * @work: work_struct for page layout optimization 104 * @slots: pointer to the structure holding buddy slots 105 * @pool: pointer to the containing pool 106 * @cpu: CPU which this page "belongs" to 107 * @first_chunks: the size of the first buddy in chunks, 0 if free 108 * @middle_chunks: the size of the middle buddy in chunks, 0 if free 109 * @last_chunks: the size of the last buddy in chunks, 0 if free 110 * @first_num: the starting number (for the first handle) 111 * @mapped_count: the number of objects currently mapped 112 */ 113 struct z3fold_header { 114 struct list_head buddy; 115 spinlock_t page_lock; 116 struct kref refcount; 117 struct work_struct work; 118 struct z3fold_buddy_slots *slots; 119 struct z3fold_pool *pool; 120 short cpu; 121 unsigned short first_chunks; 122 unsigned short middle_chunks; 123 unsigned short last_chunks; 124 unsigned short start_middle; 125 unsigned short first_num:2; 126 unsigned short mapped_count:2; 127 }; 128 129 /** 130 * struct z3fold_pool - stores metadata for each z3fold pool 131 * @name: pool name 132 * @lock: protects pool unbuddied/lru lists 133 * @stale_lock: protects pool stale page list 134 * @unbuddied: per-cpu array of lists tracking z3fold pages that contain 2- 135 * buddies; the list each z3fold page is added to depends on 136 * the size of its free region. 137 * @lru: list tracking the z3fold pages in LRU order by most recently 138 * added buddy. 139 * @stale: list of pages marked for freeing 140 * @pages_nr: number of z3fold pages in the pool. 141 * @c_handle: cache for z3fold_buddy_slots allocation 142 * @ops: pointer to a structure of user defined operations specified at 143 * pool creation time. 144 * @compact_wq: workqueue for page layout background optimization 145 * @release_wq: workqueue for safe page release 146 * @work: work_struct for safe page release 147 * @inode: inode for z3fold pseudo filesystem 148 * 149 * This structure is allocated at pool creation time and maintains metadata 150 * pertaining to a particular z3fold pool. 151 */ 152 struct z3fold_pool { 153 const char *name; 154 spinlock_t lock; 155 spinlock_t stale_lock; 156 struct list_head *unbuddied; 157 struct list_head lru; 158 struct list_head stale; 159 atomic64_t pages_nr; 160 struct kmem_cache *c_handle; 161 const struct z3fold_ops *ops; 162 struct zpool *zpool; 163 const struct zpool_ops *zpool_ops; 164 struct workqueue_struct *compact_wq; 165 struct workqueue_struct *release_wq; 166 struct work_struct work; 167 struct inode *inode; 168 }; 169 170 /* 171 * Internal z3fold page flags 172 */ 173 enum z3fold_page_flags { 174 PAGE_HEADLESS = 0, 175 MIDDLE_CHUNK_MAPPED, 176 NEEDS_COMPACTING, 177 PAGE_STALE, 178 PAGE_CLAIMED, /* by either reclaim or free */ 179 }; 180 181 /***************** 182 * Helpers 183 *****************/ 184 185 /* Converts an allocation size in bytes to size in z3fold chunks */ 186 static int size_to_chunks(size_t size) 187 { 188 return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT; 189 } 190 191 #define for_each_unbuddied_list(_iter, _begin) \ 192 for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++) 193 194 static void compact_page_work(struct work_struct *w); 195 196 static inline struct z3fold_buddy_slots *alloc_slots(struct z3fold_pool *pool, 197 gfp_t gfp) 198 { 199 struct z3fold_buddy_slots *slots; 200 201 slots = kmem_cache_alloc(pool->c_handle, 202 (gfp & ~(__GFP_HIGHMEM | __GFP_MOVABLE))); 203 204 if (slots) { 205 memset(slots->slot, 0, sizeof(slots->slot)); 206 slots->pool = (unsigned long)pool; 207 } 208 209 return slots; 210 } 211 212 static inline struct z3fold_pool *slots_to_pool(struct z3fold_buddy_slots *s) 213 { 214 return (struct z3fold_pool *)(s->pool & ~HANDLE_FLAG_MASK); 215 } 216 217 static inline struct z3fold_buddy_slots *handle_to_slots(unsigned long handle) 218 { 219 return (struct z3fold_buddy_slots *)(handle & ~(SLOTS_ALIGN - 1)); 220 } 221 222 static inline void free_handle(unsigned long handle) 223 { 224 struct z3fold_buddy_slots *slots; 225 int i; 226 bool is_free; 227 228 if (handle & (1 << PAGE_HEADLESS)) 229 return; 230 231 WARN_ON(*(unsigned long *)handle == 0); 232 *(unsigned long *)handle = 0; 233 slots = handle_to_slots(handle); 234 is_free = true; 235 for (i = 0; i <= BUDDY_MASK; i++) { 236 if (slots->slot[i]) { 237 is_free = false; 238 break; 239 } 240 } 241 242 if (is_free) { 243 struct z3fold_pool *pool = slots_to_pool(slots); 244 245 kmem_cache_free(pool->c_handle, slots); 246 } 247 } 248 249 static int z3fold_init_fs_context(struct fs_context *fc) 250 { 251 return init_pseudo(fc, Z3FOLD_MAGIC) ? 0 : -ENOMEM; 252 } 253 254 static struct file_system_type z3fold_fs = { 255 .name = "z3fold", 256 .init_fs_context = z3fold_init_fs_context, 257 .kill_sb = kill_anon_super, 258 }; 259 260 static struct vfsmount *z3fold_mnt; 261 static int z3fold_mount(void) 262 { 263 int ret = 0; 264 265 z3fold_mnt = kern_mount(&z3fold_fs); 266 if (IS_ERR(z3fold_mnt)) 267 ret = PTR_ERR(z3fold_mnt); 268 269 return ret; 270 } 271 272 static void z3fold_unmount(void) 273 { 274 kern_unmount(z3fold_mnt); 275 } 276 277 static const struct address_space_operations z3fold_aops; 278 static int z3fold_register_migration(struct z3fold_pool *pool) 279 { 280 pool->inode = alloc_anon_inode(z3fold_mnt->mnt_sb); 281 if (IS_ERR(pool->inode)) { 282 pool->inode = NULL; 283 return 1; 284 } 285 286 pool->inode->i_mapping->private_data = pool; 287 pool->inode->i_mapping->a_ops = &z3fold_aops; 288 return 0; 289 } 290 291 static void z3fold_unregister_migration(struct z3fold_pool *pool) 292 { 293 if (pool->inode) 294 iput(pool->inode); 295 } 296 297 /* Initializes the z3fold header of a newly allocated z3fold page */ 298 static struct z3fold_header *init_z3fold_page(struct page *page, 299 struct z3fold_pool *pool, gfp_t gfp) 300 { 301 struct z3fold_header *zhdr = page_address(page); 302 struct z3fold_buddy_slots *slots = alloc_slots(pool, gfp); 303 304 if (!slots) 305 return NULL; 306 307 INIT_LIST_HEAD(&page->lru); 308 clear_bit(PAGE_HEADLESS, &page->private); 309 clear_bit(MIDDLE_CHUNK_MAPPED, &page->private); 310 clear_bit(NEEDS_COMPACTING, &page->private); 311 clear_bit(PAGE_STALE, &page->private); 312 clear_bit(PAGE_CLAIMED, &page->private); 313 314 spin_lock_init(&zhdr->page_lock); 315 kref_init(&zhdr->refcount); 316 zhdr->first_chunks = 0; 317 zhdr->middle_chunks = 0; 318 zhdr->last_chunks = 0; 319 zhdr->first_num = 0; 320 zhdr->start_middle = 0; 321 zhdr->cpu = -1; 322 zhdr->slots = slots; 323 zhdr->pool = pool; 324 INIT_LIST_HEAD(&zhdr->buddy); 325 INIT_WORK(&zhdr->work, compact_page_work); 326 return zhdr; 327 } 328 329 /* Resets the struct page fields and frees the page */ 330 static void free_z3fold_page(struct page *page, bool headless) 331 { 332 if (!headless) { 333 lock_page(page); 334 __ClearPageMovable(page); 335 unlock_page(page); 336 } 337 ClearPagePrivate(page); 338 __free_page(page); 339 } 340 341 /* Lock a z3fold page */ 342 static inline void z3fold_page_lock(struct z3fold_header *zhdr) 343 { 344 spin_lock(&zhdr->page_lock); 345 } 346 347 /* Try to lock a z3fold page */ 348 static inline int z3fold_page_trylock(struct z3fold_header *zhdr) 349 { 350 return spin_trylock(&zhdr->page_lock); 351 } 352 353 /* Unlock a z3fold page */ 354 static inline void z3fold_page_unlock(struct z3fold_header *zhdr) 355 { 356 spin_unlock(&zhdr->page_lock); 357 } 358 359 /* Helper function to build the index */ 360 static inline int __idx(struct z3fold_header *zhdr, enum buddy bud) 361 { 362 return (bud + zhdr->first_num) & BUDDY_MASK; 363 } 364 365 /* 366 * Encodes the handle of a particular buddy within a z3fold page 367 * Pool lock should be held as this function accesses first_num 368 */ 369 static unsigned long encode_handle(struct z3fold_header *zhdr, enum buddy bud) 370 { 371 struct z3fold_buddy_slots *slots; 372 unsigned long h = (unsigned long)zhdr; 373 int idx = 0; 374 375 /* 376 * For a headless page, its handle is its pointer with the extra 377 * PAGE_HEADLESS bit set 378 */ 379 if (bud == HEADLESS) 380 return h | (1 << PAGE_HEADLESS); 381 382 /* otherwise, return pointer to encoded handle */ 383 idx = __idx(zhdr, bud); 384 h += idx; 385 if (bud == LAST) 386 h |= (zhdr->last_chunks << BUDDY_SHIFT); 387 388 slots = zhdr->slots; 389 slots->slot[idx] = h; 390 return (unsigned long)&slots->slot[idx]; 391 } 392 393 /* Returns the z3fold page where a given handle is stored */ 394 static inline struct z3fold_header *handle_to_z3fold_header(unsigned long h) 395 { 396 unsigned long addr = h; 397 398 if (!(addr & (1 << PAGE_HEADLESS))) 399 addr = *(unsigned long *)h; 400 401 return (struct z3fold_header *)(addr & PAGE_MASK); 402 } 403 404 /* only for LAST bud, returns zero otherwise */ 405 static unsigned short handle_to_chunks(unsigned long handle) 406 { 407 unsigned long addr = *(unsigned long *)handle; 408 409 return (addr & ~PAGE_MASK) >> BUDDY_SHIFT; 410 } 411 412 /* 413 * (handle & BUDDY_MASK) < zhdr->first_num is possible in encode_handle 414 * but that doesn't matter. because the masking will result in the 415 * correct buddy number. 416 */ 417 static enum buddy handle_to_buddy(unsigned long handle) 418 { 419 struct z3fold_header *zhdr; 420 unsigned long addr; 421 422 WARN_ON(handle & (1 << PAGE_HEADLESS)); 423 addr = *(unsigned long *)handle; 424 zhdr = (struct z3fold_header *)(addr & PAGE_MASK); 425 return (addr - zhdr->first_num) & BUDDY_MASK; 426 } 427 428 static inline struct z3fold_pool *zhdr_to_pool(struct z3fold_header *zhdr) 429 { 430 return zhdr->pool; 431 } 432 433 static void __release_z3fold_page(struct z3fold_header *zhdr, bool locked) 434 { 435 struct page *page = virt_to_page(zhdr); 436 struct z3fold_pool *pool = zhdr_to_pool(zhdr); 437 438 WARN_ON(!list_empty(&zhdr->buddy)); 439 set_bit(PAGE_STALE, &page->private); 440 clear_bit(NEEDS_COMPACTING, &page->private); 441 spin_lock(&pool->lock); 442 if (!list_empty(&page->lru)) 443 list_del_init(&page->lru); 444 spin_unlock(&pool->lock); 445 if (locked) 446 z3fold_page_unlock(zhdr); 447 spin_lock(&pool->stale_lock); 448 list_add(&zhdr->buddy, &pool->stale); 449 queue_work(pool->release_wq, &pool->work); 450 spin_unlock(&pool->stale_lock); 451 } 452 453 static void __attribute__((__unused__)) 454 release_z3fold_page(struct kref *ref) 455 { 456 struct z3fold_header *zhdr = container_of(ref, struct z3fold_header, 457 refcount); 458 __release_z3fold_page(zhdr, false); 459 } 460 461 static void release_z3fold_page_locked(struct kref *ref) 462 { 463 struct z3fold_header *zhdr = container_of(ref, struct z3fold_header, 464 refcount); 465 WARN_ON(z3fold_page_trylock(zhdr)); 466 __release_z3fold_page(zhdr, true); 467 } 468 469 static void release_z3fold_page_locked_list(struct kref *ref) 470 { 471 struct z3fold_header *zhdr = container_of(ref, struct z3fold_header, 472 refcount); 473 struct z3fold_pool *pool = zhdr_to_pool(zhdr); 474 spin_lock(&pool->lock); 475 list_del_init(&zhdr->buddy); 476 spin_unlock(&pool->lock); 477 478 WARN_ON(z3fold_page_trylock(zhdr)); 479 __release_z3fold_page(zhdr, true); 480 } 481 482 static void free_pages_work(struct work_struct *w) 483 { 484 struct z3fold_pool *pool = container_of(w, struct z3fold_pool, work); 485 486 spin_lock(&pool->stale_lock); 487 while (!list_empty(&pool->stale)) { 488 struct z3fold_header *zhdr = list_first_entry(&pool->stale, 489 struct z3fold_header, buddy); 490 struct page *page = virt_to_page(zhdr); 491 492 list_del(&zhdr->buddy); 493 if (WARN_ON(!test_bit(PAGE_STALE, &page->private))) 494 continue; 495 spin_unlock(&pool->stale_lock); 496 cancel_work_sync(&zhdr->work); 497 free_z3fold_page(page, false); 498 cond_resched(); 499 spin_lock(&pool->stale_lock); 500 } 501 spin_unlock(&pool->stale_lock); 502 } 503 504 /* 505 * Returns the number of free chunks in a z3fold page. 506 * NB: can't be used with HEADLESS pages. 507 */ 508 static int num_free_chunks(struct z3fold_header *zhdr) 509 { 510 int nfree; 511 /* 512 * If there is a middle object, pick up the bigger free space 513 * either before or after it. Otherwise just subtract the number 514 * of chunks occupied by the first and the last objects. 515 */ 516 if (zhdr->middle_chunks != 0) { 517 int nfree_before = zhdr->first_chunks ? 518 0 : zhdr->start_middle - ZHDR_CHUNKS; 519 int nfree_after = zhdr->last_chunks ? 520 0 : TOTAL_CHUNKS - 521 (zhdr->start_middle + zhdr->middle_chunks); 522 nfree = max(nfree_before, nfree_after); 523 } else 524 nfree = NCHUNKS - zhdr->first_chunks - zhdr->last_chunks; 525 return nfree; 526 } 527 528 /* Add to the appropriate unbuddied list */ 529 static inline void add_to_unbuddied(struct z3fold_pool *pool, 530 struct z3fold_header *zhdr) 531 { 532 if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0 || 533 zhdr->middle_chunks == 0) { 534 struct list_head *unbuddied = get_cpu_ptr(pool->unbuddied); 535 536 int freechunks = num_free_chunks(zhdr); 537 spin_lock(&pool->lock); 538 list_add(&zhdr->buddy, &unbuddied[freechunks]); 539 spin_unlock(&pool->lock); 540 zhdr->cpu = smp_processor_id(); 541 put_cpu_ptr(pool->unbuddied); 542 } 543 } 544 545 static inline void *mchunk_memmove(struct z3fold_header *zhdr, 546 unsigned short dst_chunk) 547 { 548 void *beg = zhdr; 549 return memmove(beg + (dst_chunk << CHUNK_SHIFT), 550 beg + (zhdr->start_middle << CHUNK_SHIFT), 551 zhdr->middle_chunks << CHUNK_SHIFT); 552 } 553 554 #define BIG_CHUNK_GAP 3 555 /* Has to be called with lock held */ 556 static int z3fold_compact_page(struct z3fold_header *zhdr) 557 { 558 struct page *page = virt_to_page(zhdr); 559 560 if (test_bit(MIDDLE_CHUNK_MAPPED, &page->private)) 561 return 0; /* can't move middle chunk, it's used */ 562 563 if (unlikely(PageIsolated(page))) 564 return 0; 565 566 if (zhdr->middle_chunks == 0) 567 return 0; /* nothing to compact */ 568 569 if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) { 570 /* move to the beginning */ 571 mchunk_memmove(zhdr, ZHDR_CHUNKS); 572 zhdr->first_chunks = zhdr->middle_chunks; 573 zhdr->middle_chunks = 0; 574 zhdr->start_middle = 0; 575 zhdr->first_num++; 576 return 1; 577 } 578 579 /* 580 * moving data is expensive, so let's only do that if 581 * there's substantial gain (at least BIG_CHUNK_GAP chunks) 582 */ 583 if (zhdr->first_chunks != 0 && zhdr->last_chunks == 0 && 584 zhdr->start_middle - (zhdr->first_chunks + ZHDR_CHUNKS) >= 585 BIG_CHUNK_GAP) { 586 mchunk_memmove(zhdr, zhdr->first_chunks + ZHDR_CHUNKS); 587 zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS; 588 return 1; 589 } else if (zhdr->last_chunks != 0 && zhdr->first_chunks == 0 && 590 TOTAL_CHUNKS - (zhdr->last_chunks + zhdr->start_middle 591 + zhdr->middle_chunks) >= 592 BIG_CHUNK_GAP) { 593 unsigned short new_start = TOTAL_CHUNKS - zhdr->last_chunks - 594 zhdr->middle_chunks; 595 mchunk_memmove(zhdr, new_start); 596 zhdr->start_middle = new_start; 597 return 1; 598 } 599 600 return 0; 601 } 602 603 static void do_compact_page(struct z3fold_header *zhdr, bool locked) 604 { 605 struct z3fold_pool *pool = zhdr_to_pool(zhdr); 606 struct page *page; 607 608 page = virt_to_page(zhdr); 609 if (locked) 610 WARN_ON(z3fold_page_trylock(zhdr)); 611 else 612 z3fold_page_lock(zhdr); 613 if (WARN_ON(!test_and_clear_bit(NEEDS_COMPACTING, &page->private))) { 614 z3fold_page_unlock(zhdr); 615 return; 616 } 617 spin_lock(&pool->lock); 618 list_del_init(&zhdr->buddy); 619 spin_unlock(&pool->lock); 620 621 if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) { 622 atomic64_dec(&pool->pages_nr); 623 return; 624 } 625 626 if (unlikely(PageIsolated(page) || 627 test_bit(PAGE_STALE, &page->private))) { 628 z3fold_page_unlock(zhdr); 629 return; 630 } 631 632 z3fold_compact_page(zhdr); 633 add_to_unbuddied(pool, zhdr); 634 z3fold_page_unlock(zhdr); 635 } 636 637 static void compact_page_work(struct work_struct *w) 638 { 639 struct z3fold_header *zhdr = container_of(w, struct z3fold_header, 640 work); 641 642 do_compact_page(zhdr, false); 643 } 644 645 /* returns _locked_ z3fold page header or NULL */ 646 static inline struct z3fold_header *__z3fold_alloc(struct z3fold_pool *pool, 647 size_t size, bool can_sleep) 648 { 649 struct z3fold_header *zhdr = NULL; 650 struct page *page; 651 struct list_head *unbuddied; 652 int chunks = size_to_chunks(size), i; 653 654 lookup: 655 /* First, try to find an unbuddied z3fold page. */ 656 unbuddied = get_cpu_ptr(pool->unbuddied); 657 for_each_unbuddied_list(i, chunks) { 658 struct list_head *l = &unbuddied[i]; 659 660 zhdr = list_first_entry_or_null(READ_ONCE(l), 661 struct z3fold_header, buddy); 662 663 if (!zhdr) 664 continue; 665 666 /* Re-check under lock. */ 667 spin_lock(&pool->lock); 668 l = &unbuddied[i]; 669 if (unlikely(zhdr != list_first_entry(READ_ONCE(l), 670 struct z3fold_header, buddy)) || 671 !z3fold_page_trylock(zhdr)) { 672 spin_unlock(&pool->lock); 673 zhdr = NULL; 674 put_cpu_ptr(pool->unbuddied); 675 if (can_sleep) 676 cond_resched(); 677 goto lookup; 678 } 679 list_del_init(&zhdr->buddy); 680 zhdr->cpu = -1; 681 spin_unlock(&pool->lock); 682 683 page = virt_to_page(zhdr); 684 if (test_bit(NEEDS_COMPACTING, &page->private)) { 685 z3fold_page_unlock(zhdr); 686 zhdr = NULL; 687 put_cpu_ptr(pool->unbuddied); 688 if (can_sleep) 689 cond_resched(); 690 goto lookup; 691 } 692 693 /* 694 * this page could not be removed from its unbuddied 695 * list while pool lock was held, and then we've taken 696 * page lock so kref_put could not be called before 697 * we got here, so it's safe to just call kref_get() 698 */ 699 kref_get(&zhdr->refcount); 700 break; 701 } 702 put_cpu_ptr(pool->unbuddied); 703 704 if (!zhdr) { 705 int cpu; 706 707 /* look for _exact_ match on other cpus' lists */ 708 for_each_online_cpu(cpu) { 709 struct list_head *l; 710 711 unbuddied = per_cpu_ptr(pool->unbuddied, cpu); 712 spin_lock(&pool->lock); 713 l = &unbuddied[chunks]; 714 715 zhdr = list_first_entry_or_null(READ_ONCE(l), 716 struct z3fold_header, buddy); 717 718 if (!zhdr || !z3fold_page_trylock(zhdr)) { 719 spin_unlock(&pool->lock); 720 zhdr = NULL; 721 continue; 722 } 723 list_del_init(&zhdr->buddy); 724 zhdr->cpu = -1; 725 spin_unlock(&pool->lock); 726 727 page = virt_to_page(zhdr); 728 if (test_bit(NEEDS_COMPACTING, &page->private)) { 729 z3fold_page_unlock(zhdr); 730 zhdr = NULL; 731 if (can_sleep) 732 cond_resched(); 733 continue; 734 } 735 kref_get(&zhdr->refcount); 736 break; 737 } 738 } 739 740 return zhdr; 741 } 742 743 /* 744 * API Functions 745 */ 746 747 /** 748 * z3fold_create_pool() - create a new z3fold pool 749 * @name: pool name 750 * @gfp: gfp flags when allocating the z3fold pool structure 751 * @ops: user-defined operations for the z3fold pool 752 * 753 * Return: pointer to the new z3fold pool or NULL if the metadata allocation 754 * failed. 755 */ 756 static struct z3fold_pool *z3fold_create_pool(const char *name, gfp_t gfp, 757 const struct z3fold_ops *ops) 758 { 759 struct z3fold_pool *pool = NULL; 760 int i, cpu; 761 762 pool = kzalloc(sizeof(struct z3fold_pool), gfp); 763 if (!pool) 764 goto out; 765 pool->c_handle = kmem_cache_create("z3fold_handle", 766 sizeof(struct z3fold_buddy_slots), 767 SLOTS_ALIGN, 0, NULL); 768 if (!pool->c_handle) 769 goto out_c; 770 spin_lock_init(&pool->lock); 771 spin_lock_init(&pool->stale_lock); 772 pool->unbuddied = __alloc_percpu(sizeof(struct list_head)*NCHUNKS, 2); 773 if (!pool->unbuddied) 774 goto out_pool; 775 for_each_possible_cpu(cpu) { 776 struct list_head *unbuddied = 777 per_cpu_ptr(pool->unbuddied, cpu); 778 for_each_unbuddied_list(i, 0) 779 INIT_LIST_HEAD(&unbuddied[i]); 780 } 781 INIT_LIST_HEAD(&pool->lru); 782 INIT_LIST_HEAD(&pool->stale); 783 atomic64_set(&pool->pages_nr, 0); 784 pool->name = name; 785 pool->compact_wq = create_singlethread_workqueue(pool->name); 786 if (!pool->compact_wq) 787 goto out_unbuddied; 788 pool->release_wq = create_singlethread_workqueue(pool->name); 789 if (!pool->release_wq) 790 goto out_wq; 791 if (z3fold_register_migration(pool)) 792 goto out_rwq; 793 INIT_WORK(&pool->work, free_pages_work); 794 pool->ops = ops; 795 return pool; 796 797 out_rwq: 798 destroy_workqueue(pool->release_wq); 799 out_wq: 800 destroy_workqueue(pool->compact_wq); 801 out_unbuddied: 802 free_percpu(pool->unbuddied); 803 out_pool: 804 kmem_cache_destroy(pool->c_handle); 805 out_c: 806 kfree(pool); 807 out: 808 return NULL; 809 } 810 811 /** 812 * z3fold_destroy_pool() - destroys an existing z3fold pool 813 * @pool: the z3fold pool to be destroyed 814 * 815 * The pool should be emptied before this function is called. 816 */ 817 static void z3fold_destroy_pool(struct z3fold_pool *pool) 818 { 819 kmem_cache_destroy(pool->c_handle); 820 821 /* 822 * We need to destroy pool->compact_wq before pool->release_wq, 823 * as any pending work on pool->compact_wq will call 824 * queue_work(pool->release_wq, &pool->work). 825 * 826 * There are still outstanding pages until both workqueues are drained, 827 * so we cannot unregister migration until then. 828 */ 829 830 destroy_workqueue(pool->compact_wq); 831 destroy_workqueue(pool->release_wq); 832 z3fold_unregister_migration(pool); 833 kfree(pool); 834 } 835 836 /** 837 * z3fold_alloc() - allocates a region of a given size 838 * @pool: z3fold pool from which to allocate 839 * @size: size in bytes of the desired allocation 840 * @gfp: gfp flags used if the pool needs to grow 841 * @handle: handle of the new allocation 842 * 843 * This function will attempt to find a free region in the pool large enough to 844 * satisfy the allocation request. A search of the unbuddied lists is 845 * performed first. If no suitable free region is found, then a new page is 846 * allocated and added to the pool to satisfy the request. 847 * 848 * gfp should not set __GFP_HIGHMEM as highmem pages cannot be used 849 * as z3fold pool pages. 850 * 851 * Return: 0 if success and handle is set, otherwise -EINVAL if the size or 852 * gfp arguments are invalid or -ENOMEM if the pool was unable to allocate 853 * a new page. 854 */ 855 static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp, 856 unsigned long *handle) 857 { 858 int chunks = size_to_chunks(size); 859 struct z3fold_header *zhdr = NULL; 860 struct page *page = NULL; 861 enum buddy bud; 862 bool can_sleep = gfpflags_allow_blocking(gfp); 863 864 if (!size) 865 return -EINVAL; 866 867 if (size > PAGE_SIZE) 868 return -ENOSPC; 869 870 if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE) 871 bud = HEADLESS; 872 else { 873 retry: 874 zhdr = __z3fold_alloc(pool, size, can_sleep); 875 if (zhdr) { 876 if (zhdr->first_chunks == 0) { 877 if (zhdr->middle_chunks != 0 && 878 chunks >= zhdr->start_middle) 879 bud = LAST; 880 else 881 bud = FIRST; 882 } else if (zhdr->last_chunks == 0) 883 bud = LAST; 884 else if (zhdr->middle_chunks == 0) 885 bud = MIDDLE; 886 else { 887 if (kref_put(&zhdr->refcount, 888 release_z3fold_page_locked)) 889 atomic64_dec(&pool->pages_nr); 890 else 891 z3fold_page_unlock(zhdr); 892 pr_err("No free chunks in unbuddied\n"); 893 WARN_ON(1); 894 goto retry; 895 } 896 page = virt_to_page(zhdr); 897 goto found; 898 } 899 bud = FIRST; 900 } 901 902 page = NULL; 903 if (can_sleep) { 904 spin_lock(&pool->stale_lock); 905 zhdr = list_first_entry_or_null(&pool->stale, 906 struct z3fold_header, buddy); 907 /* 908 * Before allocating a page, let's see if we can take one from 909 * the stale pages list. cancel_work_sync() can sleep so we 910 * limit this case to the contexts where we can sleep 911 */ 912 if (zhdr) { 913 list_del(&zhdr->buddy); 914 spin_unlock(&pool->stale_lock); 915 cancel_work_sync(&zhdr->work); 916 page = virt_to_page(zhdr); 917 } else { 918 spin_unlock(&pool->stale_lock); 919 } 920 } 921 if (!page) 922 page = alloc_page(gfp); 923 924 if (!page) 925 return -ENOMEM; 926 927 zhdr = init_z3fold_page(page, pool, gfp); 928 if (!zhdr) { 929 __free_page(page); 930 return -ENOMEM; 931 } 932 atomic64_inc(&pool->pages_nr); 933 934 if (bud == HEADLESS) { 935 set_bit(PAGE_HEADLESS, &page->private); 936 goto headless; 937 } 938 if (can_sleep) { 939 lock_page(page); 940 __SetPageMovable(page, pool->inode->i_mapping); 941 unlock_page(page); 942 } else { 943 if (trylock_page(page)) { 944 __SetPageMovable(page, pool->inode->i_mapping); 945 unlock_page(page); 946 } 947 } 948 z3fold_page_lock(zhdr); 949 950 found: 951 if (bud == FIRST) 952 zhdr->first_chunks = chunks; 953 else if (bud == LAST) 954 zhdr->last_chunks = chunks; 955 else { 956 zhdr->middle_chunks = chunks; 957 zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS; 958 } 959 add_to_unbuddied(pool, zhdr); 960 961 headless: 962 spin_lock(&pool->lock); 963 /* Add/move z3fold page to beginning of LRU */ 964 if (!list_empty(&page->lru)) 965 list_del(&page->lru); 966 967 list_add(&page->lru, &pool->lru); 968 969 *handle = encode_handle(zhdr, bud); 970 spin_unlock(&pool->lock); 971 if (bud != HEADLESS) 972 z3fold_page_unlock(zhdr); 973 974 return 0; 975 } 976 977 /** 978 * z3fold_free() - frees the allocation associated with the given handle 979 * @pool: pool in which the allocation resided 980 * @handle: handle associated with the allocation returned by z3fold_alloc() 981 * 982 * In the case that the z3fold page in which the allocation resides is under 983 * reclaim, as indicated by the PG_reclaim flag being set, this function 984 * only sets the first|last_chunks to 0. The page is actually freed 985 * once both buddies are evicted (see z3fold_reclaim_page() below). 986 */ 987 static void z3fold_free(struct z3fold_pool *pool, unsigned long handle) 988 { 989 struct z3fold_header *zhdr; 990 struct page *page; 991 enum buddy bud; 992 993 zhdr = handle_to_z3fold_header(handle); 994 page = virt_to_page(zhdr); 995 996 if (test_bit(PAGE_HEADLESS, &page->private)) { 997 /* if a headless page is under reclaim, just leave. 998 * NB: we use test_and_set_bit for a reason: if the bit 999 * has not been set before, we release this page 1000 * immediately so we don't care about its value any more. 1001 */ 1002 if (!test_and_set_bit(PAGE_CLAIMED, &page->private)) { 1003 spin_lock(&pool->lock); 1004 list_del(&page->lru); 1005 spin_unlock(&pool->lock); 1006 free_z3fold_page(page, true); 1007 atomic64_dec(&pool->pages_nr); 1008 } 1009 return; 1010 } 1011 1012 /* Non-headless case */ 1013 z3fold_page_lock(zhdr); 1014 bud = handle_to_buddy(handle); 1015 1016 switch (bud) { 1017 case FIRST: 1018 zhdr->first_chunks = 0; 1019 break; 1020 case MIDDLE: 1021 zhdr->middle_chunks = 0; 1022 break; 1023 case LAST: 1024 zhdr->last_chunks = 0; 1025 break; 1026 default: 1027 pr_err("%s: unknown bud %d\n", __func__, bud); 1028 WARN_ON(1); 1029 z3fold_page_unlock(zhdr); 1030 return; 1031 } 1032 1033 free_handle(handle); 1034 if (kref_put(&zhdr->refcount, release_z3fold_page_locked_list)) { 1035 atomic64_dec(&pool->pages_nr); 1036 return; 1037 } 1038 if (test_bit(PAGE_CLAIMED, &page->private)) { 1039 z3fold_page_unlock(zhdr); 1040 return; 1041 } 1042 if (unlikely(PageIsolated(page)) || 1043 test_and_set_bit(NEEDS_COMPACTING, &page->private)) { 1044 z3fold_page_unlock(zhdr); 1045 return; 1046 } 1047 if (zhdr->cpu < 0 || !cpu_online(zhdr->cpu)) { 1048 spin_lock(&pool->lock); 1049 list_del_init(&zhdr->buddy); 1050 spin_unlock(&pool->lock); 1051 zhdr->cpu = -1; 1052 kref_get(&zhdr->refcount); 1053 do_compact_page(zhdr, true); 1054 return; 1055 } 1056 kref_get(&zhdr->refcount); 1057 queue_work_on(zhdr->cpu, pool->compact_wq, &zhdr->work); 1058 z3fold_page_unlock(zhdr); 1059 } 1060 1061 /** 1062 * z3fold_reclaim_page() - evicts allocations from a pool page and frees it 1063 * @pool: pool from which a page will attempt to be evicted 1064 * @retries: number of pages on the LRU list for which eviction will 1065 * be attempted before failing 1066 * 1067 * z3fold reclaim is different from normal system reclaim in that it is done 1068 * from the bottom, up. This is because only the bottom layer, z3fold, has 1069 * information on how the allocations are organized within each z3fold page. 1070 * This has the potential to create interesting locking situations between 1071 * z3fold and the user, however. 1072 * 1073 * To avoid these, this is how z3fold_reclaim_page() should be called: 1074 * 1075 * The user detects a page should be reclaimed and calls z3fold_reclaim_page(). 1076 * z3fold_reclaim_page() will remove a z3fold page from the pool LRU list and 1077 * call the user-defined eviction handler with the pool and handle as 1078 * arguments. 1079 * 1080 * If the handle can not be evicted, the eviction handler should return 1081 * non-zero. z3fold_reclaim_page() will add the z3fold page back to the 1082 * appropriate list and try the next z3fold page on the LRU up to 1083 * a user defined number of retries. 1084 * 1085 * If the handle is successfully evicted, the eviction handler should 1086 * return 0 _and_ should have called z3fold_free() on the handle. z3fold_free() 1087 * contains logic to delay freeing the page if the page is under reclaim, 1088 * as indicated by the setting of the PG_reclaim flag on the underlying page. 1089 * 1090 * If all buddies in the z3fold page are successfully evicted, then the 1091 * z3fold page can be freed. 1092 * 1093 * Returns: 0 if page is successfully freed, otherwise -EINVAL if there are 1094 * no pages to evict or an eviction handler is not registered, -EAGAIN if 1095 * the retry limit was hit. 1096 */ 1097 static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries) 1098 { 1099 int i, ret = 0; 1100 struct z3fold_header *zhdr = NULL; 1101 struct page *page = NULL; 1102 struct list_head *pos; 1103 unsigned long first_handle = 0, middle_handle = 0, last_handle = 0; 1104 1105 spin_lock(&pool->lock); 1106 if (!pool->ops || !pool->ops->evict || retries == 0) { 1107 spin_unlock(&pool->lock); 1108 return -EINVAL; 1109 } 1110 for (i = 0; i < retries; i++) { 1111 if (list_empty(&pool->lru)) { 1112 spin_unlock(&pool->lock); 1113 return -EINVAL; 1114 } 1115 list_for_each_prev(pos, &pool->lru) { 1116 page = list_entry(pos, struct page, lru); 1117 1118 /* this bit could have been set by free, in which case 1119 * we pass over to the next page in the pool. 1120 */ 1121 if (test_and_set_bit(PAGE_CLAIMED, &page->private)) 1122 continue; 1123 1124 if (unlikely(PageIsolated(page))) 1125 continue; 1126 if (test_bit(PAGE_HEADLESS, &page->private)) 1127 break; 1128 1129 zhdr = page_address(page); 1130 if (!z3fold_page_trylock(zhdr)) { 1131 zhdr = NULL; 1132 continue; /* can't evict at this point */ 1133 } 1134 kref_get(&zhdr->refcount); 1135 list_del_init(&zhdr->buddy); 1136 zhdr->cpu = -1; 1137 break; 1138 } 1139 1140 if (!zhdr) 1141 break; 1142 1143 list_del_init(&page->lru); 1144 spin_unlock(&pool->lock); 1145 1146 if (!test_bit(PAGE_HEADLESS, &page->private)) { 1147 /* 1148 * We need encode the handles before unlocking, since 1149 * we can race with free that will set 1150 * (first|last)_chunks to 0 1151 */ 1152 first_handle = 0; 1153 last_handle = 0; 1154 middle_handle = 0; 1155 if (zhdr->first_chunks) 1156 first_handle = encode_handle(zhdr, FIRST); 1157 if (zhdr->middle_chunks) 1158 middle_handle = encode_handle(zhdr, MIDDLE); 1159 if (zhdr->last_chunks) 1160 last_handle = encode_handle(zhdr, LAST); 1161 /* 1162 * it's safe to unlock here because we hold a 1163 * reference to this page 1164 */ 1165 z3fold_page_unlock(zhdr); 1166 } else { 1167 first_handle = encode_handle(zhdr, HEADLESS); 1168 last_handle = middle_handle = 0; 1169 } 1170 1171 /* Issue the eviction callback(s) */ 1172 if (middle_handle) { 1173 ret = pool->ops->evict(pool, middle_handle); 1174 if (ret) 1175 goto next; 1176 } 1177 if (first_handle) { 1178 ret = pool->ops->evict(pool, first_handle); 1179 if (ret) 1180 goto next; 1181 } 1182 if (last_handle) { 1183 ret = pool->ops->evict(pool, last_handle); 1184 if (ret) 1185 goto next; 1186 } 1187 next: 1188 if (test_bit(PAGE_HEADLESS, &page->private)) { 1189 if (ret == 0) { 1190 free_z3fold_page(page, true); 1191 atomic64_dec(&pool->pages_nr); 1192 return 0; 1193 } 1194 spin_lock(&pool->lock); 1195 list_add(&page->lru, &pool->lru); 1196 spin_unlock(&pool->lock); 1197 } else { 1198 z3fold_page_lock(zhdr); 1199 clear_bit(PAGE_CLAIMED, &page->private); 1200 if (kref_put(&zhdr->refcount, 1201 release_z3fold_page_locked)) { 1202 atomic64_dec(&pool->pages_nr); 1203 return 0; 1204 } 1205 /* 1206 * if we are here, the page is still not completely 1207 * free. Take the global pool lock then to be able 1208 * to add it back to the lru list 1209 */ 1210 spin_lock(&pool->lock); 1211 list_add(&page->lru, &pool->lru); 1212 spin_unlock(&pool->lock); 1213 z3fold_page_unlock(zhdr); 1214 } 1215 1216 /* We started off locked to we need to lock the pool back */ 1217 spin_lock(&pool->lock); 1218 } 1219 spin_unlock(&pool->lock); 1220 return -EAGAIN; 1221 } 1222 1223 /** 1224 * z3fold_map() - maps the allocation associated with the given handle 1225 * @pool: pool in which the allocation resides 1226 * @handle: handle associated with the allocation to be mapped 1227 * 1228 * Extracts the buddy number from handle and constructs the pointer to the 1229 * correct starting chunk within the page. 1230 * 1231 * Returns: a pointer to the mapped allocation 1232 */ 1233 static void *z3fold_map(struct z3fold_pool *pool, unsigned long handle) 1234 { 1235 struct z3fold_header *zhdr; 1236 struct page *page; 1237 void *addr; 1238 enum buddy buddy; 1239 1240 zhdr = handle_to_z3fold_header(handle); 1241 addr = zhdr; 1242 page = virt_to_page(zhdr); 1243 1244 if (test_bit(PAGE_HEADLESS, &page->private)) 1245 goto out; 1246 1247 z3fold_page_lock(zhdr); 1248 buddy = handle_to_buddy(handle); 1249 switch (buddy) { 1250 case FIRST: 1251 addr += ZHDR_SIZE_ALIGNED; 1252 break; 1253 case MIDDLE: 1254 addr += zhdr->start_middle << CHUNK_SHIFT; 1255 set_bit(MIDDLE_CHUNK_MAPPED, &page->private); 1256 break; 1257 case LAST: 1258 addr += PAGE_SIZE - (handle_to_chunks(handle) << CHUNK_SHIFT); 1259 break; 1260 default: 1261 pr_err("unknown buddy id %d\n", buddy); 1262 WARN_ON(1); 1263 addr = NULL; 1264 break; 1265 } 1266 1267 if (addr) 1268 zhdr->mapped_count++; 1269 z3fold_page_unlock(zhdr); 1270 out: 1271 return addr; 1272 } 1273 1274 /** 1275 * z3fold_unmap() - unmaps the allocation associated with the given handle 1276 * @pool: pool in which the allocation resides 1277 * @handle: handle associated with the allocation to be unmapped 1278 */ 1279 static void z3fold_unmap(struct z3fold_pool *pool, unsigned long handle) 1280 { 1281 struct z3fold_header *zhdr; 1282 struct page *page; 1283 enum buddy buddy; 1284 1285 zhdr = handle_to_z3fold_header(handle); 1286 page = virt_to_page(zhdr); 1287 1288 if (test_bit(PAGE_HEADLESS, &page->private)) 1289 return; 1290 1291 z3fold_page_lock(zhdr); 1292 buddy = handle_to_buddy(handle); 1293 if (buddy == MIDDLE) 1294 clear_bit(MIDDLE_CHUNK_MAPPED, &page->private); 1295 zhdr->mapped_count--; 1296 z3fold_page_unlock(zhdr); 1297 } 1298 1299 /** 1300 * z3fold_get_pool_size() - gets the z3fold pool size in pages 1301 * @pool: pool whose size is being queried 1302 * 1303 * Returns: size in pages of the given pool. 1304 */ 1305 static u64 z3fold_get_pool_size(struct z3fold_pool *pool) 1306 { 1307 return atomic64_read(&pool->pages_nr); 1308 } 1309 1310 static bool z3fold_page_isolate(struct page *page, isolate_mode_t mode) 1311 { 1312 struct z3fold_header *zhdr; 1313 struct z3fold_pool *pool; 1314 1315 VM_BUG_ON_PAGE(!PageMovable(page), page); 1316 VM_BUG_ON_PAGE(PageIsolated(page), page); 1317 1318 if (test_bit(PAGE_HEADLESS, &page->private)) 1319 return false; 1320 1321 zhdr = page_address(page); 1322 z3fold_page_lock(zhdr); 1323 if (test_bit(NEEDS_COMPACTING, &page->private) || 1324 test_bit(PAGE_STALE, &page->private)) 1325 goto out; 1326 1327 pool = zhdr_to_pool(zhdr); 1328 1329 if (zhdr->mapped_count == 0) { 1330 kref_get(&zhdr->refcount); 1331 if (!list_empty(&zhdr->buddy)) 1332 list_del_init(&zhdr->buddy); 1333 spin_lock(&pool->lock); 1334 if (!list_empty(&page->lru)) 1335 list_del(&page->lru); 1336 spin_unlock(&pool->lock); 1337 z3fold_page_unlock(zhdr); 1338 return true; 1339 } 1340 out: 1341 z3fold_page_unlock(zhdr); 1342 return false; 1343 } 1344 1345 static int z3fold_page_migrate(struct address_space *mapping, struct page *newpage, 1346 struct page *page, enum migrate_mode mode) 1347 { 1348 struct z3fold_header *zhdr, *new_zhdr; 1349 struct z3fold_pool *pool; 1350 struct address_space *new_mapping; 1351 1352 VM_BUG_ON_PAGE(!PageMovable(page), page); 1353 VM_BUG_ON_PAGE(!PageIsolated(page), page); 1354 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage); 1355 1356 zhdr = page_address(page); 1357 pool = zhdr_to_pool(zhdr); 1358 1359 if (!z3fold_page_trylock(zhdr)) { 1360 return -EAGAIN; 1361 } 1362 if (zhdr->mapped_count != 0) { 1363 z3fold_page_unlock(zhdr); 1364 return -EBUSY; 1365 } 1366 if (work_pending(&zhdr->work)) { 1367 z3fold_page_unlock(zhdr); 1368 return -EAGAIN; 1369 } 1370 new_zhdr = page_address(newpage); 1371 memcpy(new_zhdr, zhdr, PAGE_SIZE); 1372 newpage->private = page->private; 1373 page->private = 0; 1374 z3fold_page_unlock(zhdr); 1375 spin_lock_init(&new_zhdr->page_lock); 1376 INIT_WORK(&new_zhdr->work, compact_page_work); 1377 /* 1378 * z3fold_page_isolate() ensures that new_zhdr->buddy is empty, 1379 * so we only have to reinitialize it. 1380 */ 1381 INIT_LIST_HEAD(&new_zhdr->buddy); 1382 new_mapping = page_mapping(page); 1383 __ClearPageMovable(page); 1384 ClearPagePrivate(page); 1385 1386 get_page(newpage); 1387 z3fold_page_lock(new_zhdr); 1388 if (new_zhdr->first_chunks) 1389 encode_handle(new_zhdr, FIRST); 1390 if (new_zhdr->last_chunks) 1391 encode_handle(new_zhdr, LAST); 1392 if (new_zhdr->middle_chunks) 1393 encode_handle(new_zhdr, MIDDLE); 1394 set_bit(NEEDS_COMPACTING, &newpage->private); 1395 new_zhdr->cpu = smp_processor_id(); 1396 spin_lock(&pool->lock); 1397 list_add(&newpage->lru, &pool->lru); 1398 spin_unlock(&pool->lock); 1399 __SetPageMovable(newpage, new_mapping); 1400 z3fold_page_unlock(new_zhdr); 1401 1402 queue_work_on(new_zhdr->cpu, pool->compact_wq, &new_zhdr->work); 1403 1404 page_mapcount_reset(page); 1405 put_page(page); 1406 return 0; 1407 } 1408 1409 static void z3fold_page_putback(struct page *page) 1410 { 1411 struct z3fold_header *zhdr; 1412 struct z3fold_pool *pool; 1413 1414 zhdr = page_address(page); 1415 pool = zhdr_to_pool(zhdr); 1416 1417 z3fold_page_lock(zhdr); 1418 if (!list_empty(&zhdr->buddy)) 1419 list_del_init(&zhdr->buddy); 1420 INIT_LIST_HEAD(&page->lru); 1421 if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) { 1422 atomic64_dec(&pool->pages_nr); 1423 return; 1424 } 1425 spin_lock(&pool->lock); 1426 list_add(&page->lru, &pool->lru); 1427 spin_unlock(&pool->lock); 1428 z3fold_page_unlock(zhdr); 1429 } 1430 1431 static const struct address_space_operations z3fold_aops = { 1432 .isolate_page = z3fold_page_isolate, 1433 .migratepage = z3fold_page_migrate, 1434 .putback_page = z3fold_page_putback, 1435 }; 1436 1437 /***************** 1438 * zpool 1439 ****************/ 1440 1441 static int z3fold_zpool_evict(struct z3fold_pool *pool, unsigned long handle) 1442 { 1443 if (pool->zpool && pool->zpool_ops && pool->zpool_ops->evict) 1444 return pool->zpool_ops->evict(pool->zpool, handle); 1445 else 1446 return -ENOENT; 1447 } 1448 1449 static const struct z3fold_ops z3fold_zpool_ops = { 1450 .evict = z3fold_zpool_evict 1451 }; 1452 1453 static void *z3fold_zpool_create(const char *name, gfp_t gfp, 1454 const struct zpool_ops *zpool_ops, 1455 struct zpool *zpool) 1456 { 1457 struct z3fold_pool *pool; 1458 1459 pool = z3fold_create_pool(name, gfp, 1460 zpool_ops ? &z3fold_zpool_ops : NULL); 1461 if (pool) { 1462 pool->zpool = zpool; 1463 pool->zpool_ops = zpool_ops; 1464 } 1465 return pool; 1466 } 1467 1468 static void z3fold_zpool_destroy(void *pool) 1469 { 1470 z3fold_destroy_pool(pool); 1471 } 1472 1473 static int z3fold_zpool_malloc(void *pool, size_t size, gfp_t gfp, 1474 unsigned long *handle) 1475 { 1476 return z3fold_alloc(pool, size, gfp, handle); 1477 } 1478 static void z3fold_zpool_free(void *pool, unsigned long handle) 1479 { 1480 z3fold_free(pool, handle); 1481 } 1482 1483 static int z3fold_zpool_shrink(void *pool, unsigned int pages, 1484 unsigned int *reclaimed) 1485 { 1486 unsigned int total = 0; 1487 int ret = -EINVAL; 1488 1489 while (total < pages) { 1490 ret = z3fold_reclaim_page(pool, 8); 1491 if (ret < 0) 1492 break; 1493 total++; 1494 } 1495 1496 if (reclaimed) 1497 *reclaimed = total; 1498 1499 return ret; 1500 } 1501 1502 static void *z3fold_zpool_map(void *pool, unsigned long handle, 1503 enum zpool_mapmode mm) 1504 { 1505 return z3fold_map(pool, handle); 1506 } 1507 static void z3fold_zpool_unmap(void *pool, unsigned long handle) 1508 { 1509 z3fold_unmap(pool, handle); 1510 } 1511 1512 static u64 z3fold_zpool_total_size(void *pool) 1513 { 1514 return z3fold_get_pool_size(pool) * PAGE_SIZE; 1515 } 1516 1517 static struct zpool_driver z3fold_zpool_driver = { 1518 .type = "z3fold", 1519 .owner = THIS_MODULE, 1520 .create = z3fold_zpool_create, 1521 .destroy = z3fold_zpool_destroy, 1522 .malloc = z3fold_zpool_malloc, 1523 .free = z3fold_zpool_free, 1524 .shrink = z3fold_zpool_shrink, 1525 .map = z3fold_zpool_map, 1526 .unmap = z3fold_zpool_unmap, 1527 .total_size = z3fold_zpool_total_size, 1528 }; 1529 1530 MODULE_ALIAS("zpool-z3fold"); 1531 1532 static int __init init_z3fold(void) 1533 { 1534 int ret; 1535 1536 /* Make sure the z3fold header is not larger than the page size */ 1537 BUILD_BUG_ON(ZHDR_SIZE_ALIGNED > PAGE_SIZE); 1538 ret = z3fold_mount(); 1539 if (ret) 1540 return ret; 1541 1542 zpool_register_driver(&z3fold_zpool_driver); 1543 1544 return 0; 1545 } 1546 1547 static void __exit exit_z3fold(void) 1548 { 1549 z3fold_unmount(); 1550 zpool_unregister_driver(&z3fold_zpool_driver); 1551 } 1552 1553 module_init(init_z3fold); 1554 module_exit(exit_z3fold); 1555 1556 MODULE_LICENSE("GPL"); 1557 MODULE_AUTHOR("Vitaly Wool <vitalywool@gmail.com>"); 1558 MODULE_DESCRIPTION("3-Fold Allocator for Compressed Pages"); 1559