1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * z3fold.c 4 * 5 * Author: Vitaly Wool <vitaly.wool@konsulko.com> 6 * Copyright (C) 2016, Sony Mobile Communications Inc. 7 * 8 * This implementation is based on zbud written by Seth Jennings. 9 * 10 * z3fold is an special purpose allocator for storing compressed pages. It 11 * can store up to three compressed pages per page which improves the 12 * compression ratio of zbud while retaining its main concepts (e. g. always 13 * storing an integral number of objects per page) and simplicity. 14 * It still has simple and deterministic reclaim properties that make it 15 * preferable to a higher density approach (with no requirement on integral 16 * number of object per page) when reclaim is used. 17 * 18 * As in zbud, pages are divided into "chunks". The size of the chunks is 19 * fixed at compile time and is determined by NCHUNKS_ORDER below. 20 * 21 * z3fold doesn't export any API and is meant to be used via zpool API. 22 */ 23 24 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 25 26 #include <linux/atomic.h> 27 #include <linux/sched.h> 28 #include <linux/cpumask.h> 29 #include <linux/list.h> 30 #include <linux/mm.h> 31 #include <linux/module.h> 32 #include <linux/page-flags.h> 33 #include <linux/migrate.h> 34 #include <linux/node.h> 35 #include <linux/compaction.h> 36 #include <linux/percpu.h> 37 #include <linux/mount.h> 38 #include <linux/pseudo_fs.h> 39 #include <linux/fs.h> 40 #include <linux/preempt.h> 41 #include <linux/workqueue.h> 42 #include <linux/slab.h> 43 #include <linux/spinlock.h> 44 #include <linux/zpool.h> 45 #include <linux/magic.h> 46 #include <linux/kmemleak.h> 47 48 /* 49 * NCHUNKS_ORDER determines the internal allocation granularity, effectively 50 * adjusting internal fragmentation. It also determines the number of 51 * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the 52 * allocation granularity will be in chunks of size PAGE_SIZE/64. Some chunks 53 * in the beginning of an allocated page are occupied by z3fold header, so 54 * NCHUNKS will be calculated to 63 (or 62 in case CONFIG_DEBUG_SPINLOCK=y), 55 * which shows the max number of free chunks in z3fold page, also there will 56 * be 63, or 62, respectively, freelists per pool. 57 */ 58 #define NCHUNKS_ORDER 6 59 60 #define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER) 61 #define CHUNK_SIZE (1 << CHUNK_SHIFT) 62 #define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE) 63 #define ZHDR_CHUNKS (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT) 64 #define TOTAL_CHUNKS (PAGE_SIZE >> CHUNK_SHIFT) 65 #define NCHUNKS (TOTAL_CHUNKS - ZHDR_CHUNKS) 66 67 #define BUDDY_MASK (0x3) 68 #define BUDDY_SHIFT 2 69 #define SLOTS_ALIGN (0x40) 70 71 /***************** 72 * Structures 73 *****************/ 74 struct z3fold_pool; 75 struct z3fold_ops { 76 int (*evict)(struct z3fold_pool *pool, unsigned long handle); 77 }; 78 79 enum buddy { 80 HEADLESS = 0, 81 FIRST, 82 MIDDLE, 83 LAST, 84 BUDDIES_MAX = LAST 85 }; 86 87 struct z3fold_buddy_slots { 88 /* 89 * we are using BUDDY_MASK in handle_to_buddy etc. so there should 90 * be enough slots to hold all possible variants 91 */ 92 unsigned long slot[BUDDY_MASK + 1]; 93 unsigned long pool; /* back link */ 94 rwlock_t lock; 95 }; 96 #define HANDLE_FLAG_MASK (0x03) 97 98 /* 99 * struct z3fold_header - z3fold page metadata occupying first chunks of each 100 * z3fold page, except for HEADLESS pages 101 * @buddy: links the z3fold page into the relevant list in the 102 * pool 103 * @page_lock: per-page lock 104 * @refcount: reference count for the z3fold page 105 * @work: work_struct for page layout optimization 106 * @slots: pointer to the structure holding buddy slots 107 * @pool: pointer to the containing pool 108 * @cpu: CPU which this page "belongs" to 109 * @first_chunks: the size of the first buddy in chunks, 0 if free 110 * @middle_chunks: the size of the middle buddy in chunks, 0 if free 111 * @last_chunks: the size of the last buddy in chunks, 0 if free 112 * @first_num: the starting number (for the first handle) 113 * @mapped_count: the number of objects currently mapped 114 */ 115 struct z3fold_header { 116 struct list_head buddy; 117 spinlock_t page_lock; 118 struct kref refcount; 119 struct work_struct work; 120 struct z3fold_buddy_slots *slots; 121 struct z3fold_pool *pool; 122 short cpu; 123 unsigned short first_chunks; 124 unsigned short middle_chunks; 125 unsigned short last_chunks; 126 unsigned short start_middle; 127 unsigned short first_num:2; 128 unsigned short mapped_count:2; 129 unsigned short foreign_handles:2; 130 }; 131 132 /** 133 * struct z3fold_pool - stores metadata for each z3fold pool 134 * @name: pool name 135 * @lock: protects pool unbuddied/lru lists 136 * @stale_lock: protects pool stale page list 137 * @unbuddied: per-cpu array of lists tracking z3fold pages that contain 2- 138 * buddies; the list each z3fold page is added to depends on 139 * the size of its free region. 140 * @lru: list tracking the z3fold pages in LRU order by most recently 141 * added buddy. 142 * @stale: list of pages marked for freeing 143 * @pages_nr: number of z3fold pages in the pool. 144 * @c_handle: cache for z3fold_buddy_slots allocation 145 * @ops: pointer to a structure of user defined operations specified at 146 * pool creation time. 147 * @zpool: zpool driver 148 * @zpool_ops: zpool operations structure with an evict callback 149 * @compact_wq: workqueue for page layout background optimization 150 * @release_wq: workqueue for safe page release 151 * @work: work_struct for safe page release 152 * @inode: inode for z3fold pseudo filesystem 153 * 154 * This structure is allocated at pool creation time and maintains metadata 155 * pertaining to a particular z3fold pool. 156 */ 157 struct z3fold_pool { 158 const char *name; 159 spinlock_t lock; 160 spinlock_t stale_lock; 161 struct list_head *unbuddied; 162 struct list_head lru; 163 struct list_head stale; 164 atomic64_t pages_nr; 165 struct kmem_cache *c_handle; 166 const struct z3fold_ops *ops; 167 struct zpool *zpool; 168 const struct zpool_ops *zpool_ops; 169 struct workqueue_struct *compact_wq; 170 struct workqueue_struct *release_wq; 171 struct work_struct work; 172 struct inode *inode; 173 }; 174 175 /* 176 * Internal z3fold page flags 177 */ 178 enum z3fold_page_flags { 179 PAGE_HEADLESS = 0, 180 MIDDLE_CHUNK_MAPPED, 181 NEEDS_COMPACTING, 182 PAGE_STALE, 183 PAGE_CLAIMED, /* by either reclaim or free */ 184 }; 185 186 /* 187 * handle flags, go under HANDLE_FLAG_MASK 188 */ 189 enum z3fold_handle_flags { 190 HANDLES_NOFREE = 0, 191 }; 192 193 /* 194 * Forward declarations 195 */ 196 static struct z3fold_header *__z3fold_alloc(struct z3fold_pool *, size_t, bool); 197 static void compact_page_work(struct work_struct *w); 198 199 /***************** 200 * Helpers 201 *****************/ 202 203 /* Converts an allocation size in bytes to size in z3fold chunks */ 204 static int size_to_chunks(size_t size) 205 { 206 return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT; 207 } 208 209 #define for_each_unbuddied_list(_iter, _begin) \ 210 for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++) 211 212 static inline struct z3fold_buddy_slots *alloc_slots(struct z3fold_pool *pool, 213 gfp_t gfp) 214 { 215 struct z3fold_buddy_slots *slots = kmem_cache_zalloc(pool->c_handle, 216 gfp); 217 218 if (slots) { 219 /* It will be freed separately in free_handle(). */ 220 kmemleak_not_leak(slots); 221 slots->pool = (unsigned long)pool; 222 rwlock_init(&slots->lock); 223 } 224 225 return slots; 226 } 227 228 static inline struct z3fold_pool *slots_to_pool(struct z3fold_buddy_slots *s) 229 { 230 return (struct z3fold_pool *)(s->pool & ~HANDLE_FLAG_MASK); 231 } 232 233 static inline struct z3fold_buddy_slots *handle_to_slots(unsigned long handle) 234 { 235 return (struct z3fold_buddy_slots *)(handle & ~(SLOTS_ALIGN - 1)); 236 } 237 238 /* Lock a z3fold page */ 239 static inline void z3fold_page_lock(struct z3fold_header *zhdr) 240 { 241 spin_lock(&zhdr->page_lock); 242 } 243 244 /* Try to lock a z3fold page */ 245 static inline int z3fold_page_trylock(struct z3fold_header *zhdr) 246 { 247 return spin_trylock(&zhdr->page_lock); 248 } 249 250 /* Unlock a z3fold page */ 251 static inline void z3fold_page_unlock(struct z3fold_header *zhdr) 252 { 253 spin_unlock(&zhdr->page_lock); 254 } 255 256 /* return locked z3fold page if it's not headless */ 257 static inline struct z3fold_header *get_z3fold_header(unsigned long handle) 258 { 259 struct z3fold_buddy_slots *slots; 260 struct z3fold_header *zhdr; 261 int locked = 0; 262 263 if (!(handle & (1 << PAGE_HEADLESS))) { 264 slots = handle_to_slots(handle); 265 do { 266 unsigned long addr; 267 268 read_lock(&slots->lock); 269 addr = *(unsigned long *)handle; 270 zhdr = (struct z3fold_header *)(addr & PAGE_MASK); 271 locked = z3fold_page_trylock(zhdr); 272 read_unlock(&slots->lock); 273 if (locked) 274 break; 275 cpu_relax(); 276 } while (true); 277 } else { 278 zhdr = (struct z3fold_header *)(handle & PAGE_MASK); 279 } 280 281 return zhdr; 282 } 283 284 static inline void put_z3fold_header(struct z3fold_header *zhdr) 285 { 286 struct page *page = virt_to_page(zhdr); 287 288 if (!test_bit(PAGE_HEADLESS, &page->private)) 289 z3fold_page_unlock(zhdr); 290 } 291 292 static inline void free_handle(unsigned long handle, struct z3fold_header *zhdr) 293 { 294 struct z3fold_buddy_slots *slots; 295 int i; 296 bool is_free; 297 298 if (WARN_ON(*(unsigned long *)handle == 0)) 299 return; 300 301 slots = handle_to_slots(handle); 302 write_lock(&slots->lock); 303 *(unsigned long *)handle = 0; 304 305 if (test_bit(HANDLES_NOFREE, &slots->pool)) { 306 write_unlock(&slots->lock); 307 return; /* simple case, nothing else to do */ 308 } 309 310 if (zhdr->slots != slots) 311 zhdr->foreign_handles--; 312 313 is_free = true; 314 for (i = 0; i <= BUDDY_MASK; i++) { 315 if (slots->slot[i]) { 316 is_free = false; 317 break; 318 } 319 } 320 write_unlock(&slots->lock); 321 322 if (is_free) { 323 struct z3fold_pool *pool = slots_to_pool(slots); 324 325 if (zhdr->slots == slots) 326 zhdr->slots = NULL; 327 kmem_cache_free(pool->c_handle, slots); 328 } 329 } 330 331 static int z3fold_init_fs_context(struct fs_context *fc) 332 { 333 return init_pseudo(fc, Z3FOLD_MAGIC) ? 0 : -ENOMEM; 334 } 335 336 static struct file_system_type z3fold_fs = { 337 .name = "z3fold", 338 .init_fs_context = z3fold_init_fs_context, 339 .kill_sb = kill_anon_super, 340 }; 341 342 static struct vfsmount *z3fold_mnt; 343 static int __init z3fold_mount(void) 344 { 345 int ret = 0; 346 347 z3fold_mnt = kern_mount(&z3fold_fs); 348 if (IS_ERR(z3fold_mnt)) 349 ret = PTR_ERR(z3fold_mnt); 350 351 return ret; 352 } 353 354 static void z3fold_unmount(void) 355 { 356 kern_unmount(z3fold_mnt); 357 } 358 359 static const struct address_space_operations z3fold_aops; 360 static int z3fold_register_migration(struct z3fold_pool *pool) 361 { 362 pool->inode = alloc_anon_inode(z3fold_mnt->mnt_sb); 363 if (IS_ERR(pool->inode)) { 364 pool->inode = NULL; 365 return 1; 366 } 367 368 pool->inode->i_mapping->private_data = pool; 369 pool->inode->i_mapping->a_ops = &z3fold_aops; 370 return 0; 371 } 372 373 static void z3fold_unregister_migration(struct z3fold_pool *pool) 374 { 375 if (pool->inode) 376 iput(pool->inode); 377 } 378 379 /* Initializes the z3fold header of a newly allocated z3fold page */ 380 static struct z3fold_header *init_z3fold_page(struct page *page, bool headless, 381 struct z3fold_pool *pool, gfp_t gfp) 382 { 383 struct z3fold_header *zhdr = page_address(page); 384 struct z3fold_buddy_slots *slots; 385 386 INIT_LIST_HEAD(&page->lru); 387 clear_bit(PAGE_HEADLESS, &page->private); 388 clear_bit(MIDDLE_CHUNK_MAPPED, &page->private); 389 clear_bit(NEEDS_COMPACTING, &page->private); 390 clear_bit(PAGE_STALE, &page->private); 391 clear_bit(PAGE_CLAIMED, &page->private); 392 if (headless) 393 return zhdr; 394 395 slots = alloc_slots(pool, gfp); 396 if (!slots) 397 return NULL; 398 399 memset(zhdr, 0, sizeof(*zhdr)); 400 spin_lock_init(&zhdr->page_lock); 401 kref_init(&zhdr->refcount); 402 zhdr->cpu = -1; 403 zhdr->slots = slots; 404 zhdr->pool = pool; 405 INIT_LIST_HEAD(&zhdr->buddy); 406 INIT_WORK(&zhdr->work, compact_page_work); 407 return zhdr; 408 } 409 410 /* Resets the struct page fields and frees the page */ 411 static void free_z3fold_page(struct page *page, bool headless) 412 { 413 if (!headless) { 414 lock_page(page); 415 __ClearPageMovable(page); 416 unlock_page(page); 417 } 418 __free_page(page); 419 } 420 421 /* Helper function to build the index */ 422 static inline int __idx(struct z3fold_header *zhdr, enum buddy bud) 423 { 424 return (bud + zhdr->first_num) & BUDDY_MASK; 425 } 426 427 /* 428 * Encodes the handle of a particular buddy within a z3fold page 429 * Pool lock should be held as this function accesses first_num 430 */ 431 static unsigned long __encode_handle(struct z3fold_header *zhdr, 432 struct z3fold_buddy_slots *slots, 433 enum buddy bud) 434 { 435 unsigned long h = (unsigned long)zhdr; 436 int idx = 0; 437 438 /* 439 * For a headless page, its handle is its pointer with the extra 440 * PAGE_HEADLESS bit set 441 */ 442 if (bud == HEADLESS) 443 return h | (1 << PAGE_HEADLESS); 444 445 /* otherwise, return pointer to encoded handle */ 446 idx = __idx(zhdr, bud); 447 h += idx; 448 if (bud == LAST) 449 h |= (zhdr->last_chunks << BUDDY_SHIFT); 450 451 write_lock(&slots->lock); 452 slots->slot[idx] = h; 453 write_unlock(&slots->lock); 454 return (unsigned long)&slots->slot[idx]; 455 } 456 457 static unsigned long encode_handle(struct z3fold_header *zhdr, enum buddy bud) 458 { 459 return __encode_handle(zhdr, zhdr->slots, bud); 460 } 461 462 /* only for LAST bud, returns zero otherwise */ 463 static unsigned short handle_to_chunks(unsigned long handle) 464 { 465 struct z3fold_buddy_slots *slots = handle_to_slots(handle); 466 unsigned long addr; 467 468 read_lock(&slots->lock); 469 addr = *(unsigned long *)handle; 470 read_unlock(&slots->lock); 471 return (addr & ~PAGE_MASK) >> BUDDY_SHIFT; 472 } 473 474 /* 475 * (handle & BUDDY_MASK) < zhdr->first_num is possible in encode_handle 476 * but that doesn't matter. because the masking will result in the 477 * correct buddy number. 478 */ 479 static enum buddy handle_to_buddy(unsigned long handle) 480 { 481 struct z3fold_header *zhdr; 482 struct z3fold_buddy_slots *slots = handle_to_slots(handle); 483 unsigned long addr; 484 485 read_lock(&slots->lock); 486 WARN_ON(handle & (1 << PAGE_HEADLESS)); 487 addr = *(unsigned long *)handle; 488 read_unlock(&slots->lock); 489 zhdr = (struct z3fold_header *)(addr & PAGE_MASK); 490 return (addr - zhdr->first_num) & BUDDY_MASK; 491 } 492 493 static inline struct z3fold_pool *zhdr_to_pool(struct z3fold_header *zhdr) 494 { 495 return zhdr->pool; 496 } 497 498 static void __release_z3fold_page(struct z3fold_header *zhdr, bool locked) 499 { 500 struct page *page = virt_to_page(zhdr); 501 struct z3fold_pool *pool = zhdr_to_pool(zhdr); 502 503 WARN_ON(!list_empty(&zhdr->buddy)); 504 set_bit(PAGE_STALE, &page->private); 505 clear_bit(NEEDS_COMPACTING, &page->private); 506 spin_lock(&pool->lock); 507 if (!list_empty(&page->lru)) 508 list_del_init(&page->lru); 509 spin_unlock(&pool->lock); 510 511 if (locked) 512 z3fold_page_unlock(zhdr); 513 514 spin_lock(&pool->stale_lock); 515 list_add(&zhdr->buddy, &pool->stale); 516 queue_work(pool->release_wq, &pool->work); 517 spin_unlock(&pool->stale_lock); 518 519 atomic64_dec(&pool->pages_nr); 520 } 521 522 static void release_z3fold_page(struct kref *ref) 523 { 524 struct z3fold_header *zhdr = container_of(ref, struct z3fold_header, 525 refcount); 526 __release_z3fold_page(zhdr, false); 527 } 528 529 static void release_z3fold_page_locked(struct kref *ref) 530 { 531 struct z3fold_header *zhdr = container_of(ref, struct z3fold_header, 532 refcount); 533 WARN_ON(z3fold_page_trylock(zhdr)); 534 __release_z3fold_page(zhdr, true); 535 } 536 537 static void release_z3fold_page_locked_list(struct kref *ref) 538 { 539 struct z3fold_header *zhdr = container_of(ref, struct z3fold_header, 540 refcount); 541 struct z3fold_pool *pool = zhdr_to_pool(zhdr); 542 543 spin_lock(&pool->lock); 544 list_del_init(&zhdr->buddy); 545 spin_unlock(&pool->lock); 546 547 WARN_ON(z3fold_page_trylock(zhdr)); 548 __release_z3fold_page(zhdr, true); 549 } 550 551 static void free_pages_work(struct work_struct *w) 552 { 553 struct z3fold_pool *pool = container_of(w, struct z3fold_pool, work); 554 555 spin_lock(&pool->stale_lock); 556 while (!list_empty(&pool->stale)) { 557 struct z3fold_header *zhdr = list_first_entry(&pool->stale, 558 struct z3fold_header, buddy); 559 struct page *page = virt_to_page(zhdr); 560 561 list_del(&zhdr->buddy); 562 if (WARN_ON(!test_bit(PAGE_STALE, &page->private))) 563 continue; 564 spin_unlock(&pool->stale_lock); 565 cancel_work_sync(&zhdr->work); 566 free_z3fold_page(page, false); 567 cond_resched(); 568 spin_lock(&pool->stale_lock); 569 } 570 spin_unlock(&pool->stale_lock); 571 } 572 573 /* 574 * Returns the number of free chunks in a z3fold page. 575 * NB: can't be used with HEADLESS pages. 576 */ 577 static int num_free_chunks(struct z3fold_header *zhdr) 578 { 579 int nfree; 580 /* 581 * If there is a middle object, pick up the bigger free space 582 * either before or after it. Otherwise just subtract the number 583 * of chunks occupied by the first and the last objects. 584 */ 585 if (zhdr->middle_chunks != 0) { 586 int nfree_before = zhdr->first_chunks ? 587 0 : zhdr->start_middle - ZHDR_CHUNKS; 588 int nfree_after = zhdr->last_chunks ? 589 0 : TOTAL_CHUNKS - 590 (zhdr->start_middle + zhdr->middle_chunks); 591 nfree = max(nfree_before, nfree_after); 592 } else 593 nfree = NCHUNKS - zhdr->first_chunks - zhdr->last_chunks; 594 return nfree; 595 } 596 597 /* Add to the appropriate unbuddied list */ 598 static inline void add_to_unbuddied(struct z3fold_pool *pool, 599 struct z3fold_header *zhdr) 600 { 601 if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0 || 602 zhdr->middle_chunks == 0) { 603 struct list_head *unbuddied; 604 int freechunks = num_free_chunks(zhdr); 605 606 migrate_disable(); 607 unbuddied = this_cpu_ptr(pool->unbuddied); 608 spin_lock(&pool->lock); 609 list_add(&zhdr->buddy, &unbuddied[freechunks]); 610 spin_unlock(&pool->lock); 611 zhdr->cpu = smp_processor_id(); 612 migrate_enable(); 613 } 614 } 615 616 static inline enum buddy get_free_buddy(struct z3fold_header *zhdr, int chunks) 617 { 618 enum buddy bud = HEADLESS; 619 620 if (zhdr->middle_chunks) { 621 if (!zhdr->first_chunks && 622 chunks <= zhdr->start_middle - ZHDR_CHUNKS) 623 bud = FIRST; 624 else if (!zhdr->last_chunks) 625 bud = LAST; 626 } else { 627 if (!zhdr->first_chunks) 628 bud = FIRST; 629 else if (!zhdr->last_chunks) 630 bud = LAST; 631 else 632 bud = MIDDLE; 633 } 634 635 return bud; 636 } 637 638 static inline void *mchunk_memmove(struct z3fold_header *zhdr, 639 unsigned short dst_chunk) 640 { 641 void *beg = zhdr; 642 return memmove(beg + (dst_chunk << CHUNK_SHIFT), 643 beg + (zhdr->start_middle << CHUNK_SHIFT), 644 zhdr->middle_chunks << CHUNK_SHIFT); 645 } 646 647 static inline bool buddy_single(struct z3fold_header *zhdr) 648 { 649 return !((zhdr->first_chunks && zhdr->middle_chunks) || 650 (zhdr->first_chunks && zhdr->last_chunks) || 651 (zhdr->middle_chunks && zhdr->last_chunks)); 652 } 653 654 static struct z3fold_header *compact_single_buddy(struct z3fold_header *zhdr) 655 { 656 struct z3fold_pool *pool = zhdr_to_pool(zhdr); 657 void *p = zhdr; 658 unsigned long old_handle = 0; 659 size_t sz = 0; 660 struct z3fold_header *new_zhdr = NULL; 661 int first_idx = __idx(zhdr, FIRST); 662 int middle_idx = __idx(zhdr, MIDDLE); 663 int last_idx = __idx(zhdr, LAST); 664 unsigned short *moved_chunks = NULL; 665 666 /* 667 * No need to protect slots here -- all the slots are "local" and 668 * the page lock is already taken 669 */ 670 if (zhdr->first_chunks && zhdr->slots->slot[first_idx]) { 671 p += ZHDR_SIZE_ALIGNED; 672 sz = zhdr->first_chunks << CHUNK_SHIFT; 673 old_handle = (unsigned long)&zhdr->slots->slot[first_idx]; 674 moved_chunks = &zhdr->first_chunks; 675 } else if (zhdr->middle_chunks && zhdr->slots->slot[middle_idx]) { 676 p += zhdr->start_middle << CHUNK_SHIFT; 677 sz = zhdr->middle_chunks << CHUNK_SHIFT; 678 old_handle = (unsigned long)&zhdr->slots->slot[middle_idx]; 679 moved_chunks = &zhdr->middle_chunks; 680 } else if (zhdr->last_chunks && zhdr->slots->slot[last_idx]) { 681 p += PAGE_SIZE - (zhdr->last_chunks << CHUNK_SHIFT); 682 sz = zhdr->last_chunks << CHUNK_SHIFT; 683 old_handle = (unsigned long)&zhdr->slots->slot[last_idx]; 684 moved_chunks = &zhdr->last_chunks; 685 } 686 687 if (sz > 0) { 688 enum buddy new_bud = HEADLESS; 689 short chunks = size_to_chunks(sz); 690 void *q; 691 692 new_zhdr = __z3fold_alloc(pool, sz, false); 693 if (!new_zhdr) 694 return NULL; 695 696 if (WARN_ON(new_zhdr == zhdr)) 697 goto out_fail; 698 699 new_bud = get_free_buddy(new_zhdr, chunks); 700 q = new_zhdr; 701 switch (new_bud) { 702 case FIRST: 703 new_zhdr->first_chunks = chunks; 704 q += ZHDR_SIZE_ALIGNED; 705 break; 706 case MIDDLE: 707 new_zhdr->middle_chunks = chunks; 708 new_zhdr->start_middle = 709 new_zhdr->first_chunks + ZHDR_CHUNKS; 710 q += new_zhdr->start_middle << CHUNK_SHIFT; 711 break; 712 case LAST: 713 new_zhdr->last_chunks = chunks; 714 q += PAGE_SIZE - (new_zhdr->last_chunks << CHUNK_SHIFT); 715 break; 716 default: 717 goto out_fail; 718 } 719 new_zhdr->foreign_handles++; 720 memcpy(q, p, sz); 721 write_lock(&zhdr->slots->lock); 722 *(unsigned long *)old_handle = (unsigned long)new_zhdr + 723 __idx(new_zhdr, new_bud); 724 if (new_bud == LAST) 725 *(unsigned long *)old_handle |= 726 (new_zhdr->last_chunks << BUDDY_SHIFT); 727 write_unlock(&zhdr->slots->lock); 728 add_to_unbuddied(pool, new_zhdr); 729 z3fold_page_unlock(new_zhdr); 730 731 *moved_chunks = 0; 732 } 733 734 return new_zhdr; 735 736 out_fail: 737 if (new_zhdr && !kref_put(&new_zhdr->refcount, release_z3fold_page_locked)) { 738 add_to_unbuddied(pool, new_zhdr); 739 z3fold_page_unlock(new_zhdr); 740 } 741 return NULL; 742 743 } 744 745 #define BIG_CHUNK_GAP 3 746 /* Has to be called with lock held */ 747 static int z3fold_compact_page(struct z3fold_header *zhdr) 748 { 749 struct page *page = virt_to_page(zhdr); 750 751 if (test_bit(MIDDLE_CHUNK_MAPPED, &page->private)) 752 return 0; /* can't move middle chunk, it's used */ 753 754 if (unlikely(PageIsolated(page))) 755 return 0; 756 757 if (zhdr->middle_chunks == 0) 758 return 0; /* nothing to compact */ 759 760 if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) { 761 /* move to the beginning */ 762 mchunk_memmove(zhdr, ZHDR_CHUNKS); 763 zhdr->first_chunks = zhdr->middle_chunks; 764 zhdr->middle_chunks = 0; 765 zhdr->start_middle = 0; 766 zhdr->first_num++; 767 return 1; 768 } 769 770 /* 771 * moving data is expensive, so let's only do that if 772 * there's substantial gain (at least BIG_CHUNK_GAP chunks) 773 */ 774 if (zhdr->first_chunks != 0 && zhdr->last_chunks == 0 && 775 zhdr->start_middle - (zhdr->first_chunks + ZHDR_CHUNKS) >= 776 BIG_CHUNK_GAP) { 777 mchunk_memmove(zhdr, zhdr->first_chunks + ZHDR_CHUNKS); 778 zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS; 779 return 1; 780 } else if (zhdr->last_chunks != 0 && zhdr->first_chunks == 0 && 781 TOTAL_CHUNKS - (zhdr->last_chunks + zhdr->start_middle 782 + zhdr->middle_chunks) >= 783 BIG_CHUNK_GAP) { 784 unsigned short new_start = TOTAL_CHUNKS - zhdr->last_chunks - 785 zhdr->middle_chunks; 786 mchunk_memmove(zhdr, new_start); 787 zhdr->start_middle = new_start; 788 return 1; 789 } 790 791 return 0; 792 } 793 794 static void do_compact_page(struct z3fold_header *zhdr, bool locked) 795 { 796 struct z3fold_pool *pool = zhdr_to_pool(zhdr); 797 struct page *page; 798 799 page = virt_to_page(zhdr); 800 if (locked) 801 WARN_ON(z3fold_page_trylock(zhdr)); 802 else 803 z3fold_page_lock(zhdr); 804 if (WARN_ON(!test_and_clear_bit(NEEDS_COMPACTING, &page->private))) { 805 z3fold_page_unlock(zhdr); 806 return; 807 } 808 spin_lock(&pool->lock); 809 list_del_init(&zhdr->buddy); 810 spin_unlock(&pool->lock); 811 812 if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) 813 return; 814 815 if (test_bit(PAGE_STALE, &page->private) || 816 test_and_set_bit(PAGE_CLAIMED, &page->private)) { 817 z3fold_page_unlock(zhdr); 818 return; 819 } 820 821 if (!zhdr->foreign_handles && buddy_single(zhdr) && 822 zhdr->mapped_count == 0 && compact_single_buddy(zhdr)) { 823 if (!kref_put(&zhdr->refcount, release_z3fold_page_locked)) { 824 clear_bit(PAGE_CLAIMED, &page->private); 825 z3fold_page_unlock(zhdr); 826 } 827 return; 828 } 829 830 z3fold_compact_page(zhdr); 831 add_to_unbuddied(pool, zhdr); 832 clear_bit(PAGE_CLAIMED, &page->private); 833 z3fold_page_unlock(zhdr); 834 } 835 836 static void compact_page_work(struct work_struct *w) 837 { 838 struct z3fold_header *zhdr = container_of(w, struct z3fold_header, 839 work); 840 841 do_compact_page(zhdr, false); 842 } 843 844 /* returns _locked_ z3fold page header or NULL */ 845 static inline struct z3fold_header *__z3fold_alloc(struct z3fold_pool *pool, 846 size_t size, bool can_sleep) 847 { 848 struct z3fold_header *zhdr = NULL; 849 struct page *page; 850 struct list_head *unbuddied; 851 int chunks = size_to_chunks(size), i; 852 853 lookup: 854 migrate_disable(); 855 /* First, try to find an unbuddied z3fold page. */ 856 unbuddied = this_cpu_ptr(pool->unbuddied); 857 for_each_unbuddied_list(i, chunks) { 858 struct list_head *l = &unbuddied[i]; 859 860 zhdr = list_first_entry_or_null(READ_ONCE(l), 861 struct z3fold_header, buddy); 862 863 if (!zhdr) 864 continue; 865 866 /* Re-check under lock. */ 867 spin_lock(&pool->lock); 868 if (unlikely(zhdr != list_first_entry(READ_ONCE(l), 869 struct z3fold_header, buddy)) || 870 !z3fold_page_trylock(zhdr)) { 871 spin_unlock(&pool->lock); 872 zhdr = NULL; 873 migrate_enable(); 874 if (can_sleep) 875 cond_resched(); 876 goto lookup; 877 } 878 list_del_init(&zhdr->buddy); 879 zhdr->cpu = -1; 880 spin_unlock(&pool->lock); 881 882 page = virt_to_page(zhdr); 883 if (test_bit(NEEDS_COMPACTING, &page->private) || 884 test_bit(PAGE_CLAIMED, &page->private)) { 885 z3fold_page_unlock(zhdr); 886 zhdr = NULL; 887 migrate_enable(); 888 if (can_sleep) 889 cond_resched(); 890 goto lookup; 891 } 892 893 /* 894 * this page could not be removed from its unbuddied 895 * list while pool lock was held, and then we've taken 896 * page lock so kref_put could not be called before 897 * we got here, so it's safe to just call kref_get() 898 */ 899 kref_get(&zhdr->refcount); 900 break; 901 } 902 migrate_enable(); 903 904 if (!zhdr) { 905 int cpu; 906 907 /* look for _exact_ match on other cpus' lists */ 908 for_each_online_cpu(cpu) { 909 struct list_head *l; 910 911 unbuddied = per_cpu_ptr(pool->unbuddied, cpu); 912 spin_lock(&pool->lock); 913 l = &unbuddied[chunks]; 914 915 zhdr = list_first_entry_or_null(READ_ONCE(l), 916 struct z3fold_header, buddy); 917 918 if (!zhdr || !z3fold_page_trylock(zhdr)) { 919 spin_unlock(&pool->lock); 920 zhdr = NULL; 921 continue; 922 } 923 list_del_init(&zhdr->buddy); 924 zhdr->cpu = -1; 925 spin_unlock(&pool->lock); 926 927 page = virt_to_page(zhdr); 928 if (test_bit(NEEDS_COMPACTING, &page->private) || 929 test_bit(PAGE_CLAIMED, &page->private)) { 930 z3fold_page_unlock(zhdr); 931 zhdr = NULL; 932 if (can_sleep) 933 cond_resched(); 934 continue; 935 } 936 kref_get(&zhdr->refcount); 937 break; 938 } 939 } 940 941 if (zhdr && !zhdr->slots) { 942 zhdr->slots = alloc_slots(pool, GFP_ATOMIC); 943 if (!zhdr->slots) 944 goto out_fail; 945 } 946 return zhdr; 947 948 out_fail: 949 if (!kref_put(&zhdr->refcount, release_z3fold_page_locked)) { 950 add_to_unbuddied(pool, zhdr); 951 z3fold_page_unlock(zhdr); 952 } 953 return NULL; 954 } 955 956 /* 957 * API Functions 958 */ 959 960 /** 961 * z3fold_create_pool() - create a new z3fold pool 962 * @name: pool name 963 * @gfp: gfp flags when allocating the z3fold pool structure 964 * @ops: user-defined operations for the z3fold pool 965 * 966 * Return: pointer to the new z3fold pool or NULL if the metadata allocation 967 * failed. 968 */ 969 static struct z3fold_pool *z3fold_create_pool(const char *name, gfp_t gfp, 970 const struct z3fold_ops *ops) 971 { 972 struct z3fold_pool *pool = NULL; 973 int i, cpu; 974 975 pool = kzalloc(sizeof(struct z3fold_pool), gfp); 976 if (!pool) 977 goto out; 978 pool->c_handle = kmem_cache_create("z3fold_handle", 979 sizeof(struct z3fold_buddy_slots), 980 SLOTS_ALIGN, 0, NULL); 981 if (!pool->c_handle) 982 goto out_c; 983 spin_lock_init(&pool->lock); 984 spin_lock_init(&pool->stale_lock); 985 pool->unbuddied = __alloc_percpu(sizeof(struct list_head) * NCHUNKS, 986 __alignof__(struct list_head)); 987 if (!pool->unbuddied) 988 goto out_pool; 989 for_each_possible_cpu(cpu) { 990 struct list_head *unbuddied = 991 per_cpu_ptr(pool->unbuddied, cpu); 992 for_each_unbuddied_list(i, 0) 993 INIT_LIST_HEAD(&unbuddied[i]); 994 } 995 INIT_LIST_HEAD(&pool->lru); 996 INIT_LIST_HEAD(&pool->stale); 997 atomic64_set(&pool->pages_nr, 0); 998 pool->name = name; 999 pool->compact_wq = create_singlethread_workqueue(pool->name); 1000 if (!pool->compact_wq) 1001 goto out_unbuddied; 1002 pool->release_wq = create_singlethread_workqueue(pool->name); 1003 if (!pool->release_wq) 1004 goto out_wq; 1005 if (z3fold_register_migration(pool)) 1006 goto out_rwq; 1007 INIT_WORK(&pool->work, free_pages_work); 1008 pool->ops = ops; 1009 return pool; 1010 1011 out_rwq: 1012 destroy_workqueue(pool->release_wq); 1013 out_wq: 1014 destroy_workqueue(pool->compact_wq); 1015 out_unbuddied: 1016 free_percpu(pool->unbuddied); 1017 out_pool: 1018 kmem_cache_destroy(pool->c_handle); 1019 out_c: 1020 kfree(pool); 1021 out: 1022 return NULL; 1023 } 1024 1025 /** 1026 * z3fold_destroy_pool() - destroys an existing z3fold pool 1027 * @pool: the z3fold pool to be destroyed 1028 * 1029 * The pool should be emptied before this function is called. 1030 */ 1031 static void z3fold_destroy_pool(struct z3fold_pool *pool) 1032 { 1033 kmem_cache_destroy(pool->c_handle); 1034 1035 /* 1036 * We need to destroy pool->compact_wq before pool->release_wq, 1037 * as any pending work on pool->compact_wq will call 1038 * queue_work(pool->release_wq, &pool->work). 1039 * 1040 * There are still outstanding pages until both workqueues are drained, 1041 * so we cannot unregister migration until then. 1042 */ 1043 1044 destroy_workqueue(pool->compact_wq); 1045 destroy_workqueue(pool->release_wq); 1046 z3fold_unregister_migration(pool); 1047 free_percpu(pool->unbuddied); 1048 kfree(pool); 1049 } 1050 1051 /** 1052 * z3fold_alloc() - allocates a region of a given size 1053 * @pool: z3fold pool from which to allocate 1054 * @size: size in bytes of the desired allocation 1055 * @gfp: gfp flags used if the pool needs to grow 1056 * @handle: handle of the new allocation 1057 * 1058 * This function will attempt to find a free region in the pool large enough to 1059 * satisfy the allocation request. A search of the unbuddied lists is 1060 * performed first. If no suitable free region is found, then a new page is 1061 * allocated and added to the pool to satisfy the request. 1062 * 1063 * Return: 0 if success and handle is set, otherwise -EINVAL if the size or 1064 * gfp arguments are invalid or -ENOMEM if the pool was unable to allocate 1065 * a new page. 1066 */ 1067 static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp, 1068 unsigned long *handle) 1069 { 1070 int chunks = size_to_chunks(size); 1071 struct z3fold_header *zhdr = NULL; 1072 struct page *page = NULL; 1073 enum buddy bud; 1074 bool can_sleep = gfpflags_allow_blocking(gfp); 1075 1076 if (!size || (gfp & __GFP_HIGHMEM)) 1077 return -EINVAL; 1078 1079 if (size > PAGE_SIZE) 1080 return -ENOSPC; 1081 1082 if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE) 1083 bud = HEADLESS; 1084 else { 1085 retry: 1086 zhdr = __z3fold_alloc(pool, size, can_sleep); 1087 if (zhdr) { 1088 bud = get_free_buddy(zhdr, chunks); 1089 if (bud == HEADLESS) { 1090 if (!kref_put(&zhdr->refcount, 1091 release_z3fold_page_locked)) 1092 z3fold_page_unlock(zhdr); 1093 pr_err("No free chunks in unbuddied\n"); 1094 WARN_ON(1); 1095 goto retry; 1096 } 1097 page = virt_to_page(zhdr); 1098 goto found; 1099 } 1100 bud = FIRST; 1101 } 1102 1103 page = alloc_page(gfp); 1104 if (!page) 1105 return -ENOMEM; 1106 1107 zhdr = init_z3fold_page(page, bud == HEADLESS, pool, gfp); 1108 if (!zhdr) { 1109 __free_page(page); 1110 return -ENOMEM; 1111 } 1112 atomic64_inc(&pool->pages_nr); 1113 1114 if (bud == HEADLESS) { 1115 set_bit(PAGE_HEADLESS, &page->private); 1116 goto headless; 1117 } 1118 if (can_sleep) { 1119 lock_page(page); 1120 __SetPageMovable(page, pool->inode->i_mapping); 1121 unlock_page(page); 1122 } else { 1123 WARN_ON(!trylock_page(page)); 1124 __SetPageMovable(page, pool->inode->i_mapping); 1125 unlock_page(page); 1126 } 1127 z3fold_page_lock(zhdr); 1128 1129 found: 1130 if (bud == FIRST) 1131 zhdr->first_chunks = chunks; 1132 else if (bud == LAST) 1133 zhdr->last_chunks = chunks; 1134 else { 1135 zhdr->middle_chunks = chunks; 1136 zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS; 1137 } 1138 add_to_unbuddied(pool, zhdr); 1139 1140 headless: 1141 spin_lock(&pool->lock); 1142 /* Add/move z3fold page to beginning of LRU */ 1143 if (!list_empty(&page->lru)) 1144 list_del(&page->lru); 1145 1146 list_add(&page->lru, &pool->lru); 1147 1148 *handle = encode_handle(zhdr, bud); 1149 spin_unlock(&pool->lock); 1150 if (bud != HEADLESS) 1151 z3fold_page_unlock(zhdr); 1152 1153 return 0; 1154 } 1155 1156 /** 1157 * z3fold_free() - frees the allocation associated with the given handle 1158 * @pool: pool in which the allocation resided 1159 * @handle: handle associated with the allocation returned by z3fold_alloc() 1160 * 1161 * In the case that the z3fold page in which the allocation resides is under 1162 * reclaim, as indicated by the PAGE_CLAIMED flag being set, this function 1163 * only sets the first|middle|last_chunks to 0. The page is actually freed 1164 * once all buddies are evicted (see z3fold_reclaim_page() below). 1165 */ 1166 static void z3fold_free(struct z3fold_pool *pool, unsigned long handle) 1167 { 1168 struct z3fold_header *zhdr; 1169 struct page *page; 1170 enum buddy bud; 1171 bool page_claimed; 1172 1173 zhdr = get_z3fold_header(handle); 1174 page = virt_to_page(zhdr); 1175 page_claimed = test_and_set_bit(PAGE_CLAIMED, &page->private); 1176 1177 if (test_bit(PAGE_HEADLESS, &page->private)) { 1178 /* if a headless page is under reclaim, just leave. 1179 * NB: we use test_and_set_bit for a reason: if the bit 1180 * has not been set before, we release this page 1181 * immediately so we don't care about its value any more. 1182 */ 1183 if (!page_claimed) { 1184 spin_lock(&pool->lock); 1185 list_del(&page->lru); 1186 spin_unlock(&pool->lock); 1187 put_z3fold_header(zhdr); 1188 free_z3fold_page(page, true); 1189 atomic64_dec(&pool->pages_nr); 1190 } 1191 return; 1192 } 1193 1194 /* Non-headless case */ 1195 bud = handle_to_buddy(handle); 1196 1197 switch (bud) { 1198 case FIRST: 1199 zhdr->first_chunks = 0; 1200 break; 1201 case MIDDLE: 1202 zhdr->middle_chunks = 0; 1203 break; 1204 case LAST: 1205 zhdr->last_chunks = 0; 1206 break; 1207 default: 1208 pr_err("%s: unknown bud %d\n", __func__, bud); 1209 WARN_ON(1); 1210 put_z3fold_header(zhdr); 1211 return; 1212 } 1213 1214 if (!page_claimed) 1215 free_handle(handle, zhdr); 1216 if (kref_put(&zhdr->refcount, release_z3fold_page_locked_list)) 1217 return; 1218 if (page_claimed) { 1219 /* the page has not been claimed by us */ 1220 put_z3fold_header(zhdr); 1221 return; 1222 } 1223 if (test_and_set_bit(NEEDS_COMPACTING, &page->private)) { 1224 clear_bit(PAGE_CLAIMED, &page->private); 1225 put_z3fold_header(zhdr); 1226 return; 1227 } 1228 if (zhdr->cpu < 0 || !cpu_online(zhdr->cpu)) { 1229 zhdr->cpu = -1; 1230 kref_get(&zhdr->refcount); 1231 clear_bit(PAGE_CLAIMED, &page->private); 1232 do_compact_page(zhdr, true); 1233 return; 1234 } 1235 kref_get(&zhdr->refcount); 1236 clear_bit(PAGE_CLAIMED, &page->private); 1237 queue_work_on(zhdr->cpu, pool->compact_wq, &zhdr->work); 1238 put_z3fold_header(zhdr); 1239 } 1240 1241 /** 1242 * z3fold_reclaim_page() - evicts allocations from a pool page and frees it 1243 * @pool: pool from which a page will attempt to be evicted 1244 * @retries: number of pages on the LRU list for which eviction will 1245 * be attempted before failing 1246 * 1247 * z3fold reclaim is different from normal system reclaim in that it is done 1248 * from the bottom, up. This is because only the bottom layer, z3fold, has 1249 * information on how the allocations are organized within each z3fold page. 1250 * This has the potential to create interesting locking situations between 1251 * z3fold and the user, however. 1252 * 1253 * To avoid these, this is how z3fold_reclaim_page() should be called: 1254 * 1255 * The user detects a page should be reclaimed and calls z3fold_reclaim_page(). 1256 * z3fold_reclaim_page() will remove a z3fold page from the pool LRU list and 1257 * call the user-defined eviction handler with the pool and handle as 1258 * arguments. 1259 * 1260 * If the handle can not be evicted, the eviction handler should return 1261 * non-zero. z3fold_reclaim_page() will add the z3fold page back to the 1262 * appropriate list and try the next z3fold page on the LRU up to 1263 * a user defined number of retries. 1264 * 1265 * If the handle is successfully evicted, the eviction handler should 1266 * return 0 _and_ should have called z3fold_free() on the handle. z3fold_free() 1267 * contains logic to delay freeing the page if the page is under reclaim, 1268 * as indicated by the setting of the PG_reclaim flag on the underlying page. 1269 * 1270 * If all buddies in the z3fold page are successfully evicted, then the 1271 * z3fold page can be freed. 1272 * 1273 * Returns: 0 if page is successfully freed, otherwise -EINVAL if there are 1274 * no pages to evict or an eviction handler is not registered, -EAGAIN if 1275 * the retry limit was hit. 1276 */ 1277 static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries) 1278 { 1279 int i, ret = -1; 1280 struct z3fold_header *zhdr = NULL; 1281 struct page *page = NULL; 1282 struct list_head *pos; 1283 unsigned long first_handle = 0, middle_handle = 0, last_handle = 0; 1284 struct z3fold_buddy_slots slots __attribute__((aligned(SLOTS_ALIGN))); 1285 1286 rwlock_init(&slots.lock); 1287 slots.pool = (unsigned long)pool | (1 << HANDLES_NOFREE); 1288 1289 spin_lock(&pool->lock); 1290 if (!pool->ops || !pool->ops->evict || retries == 0) { 1291 spin_unlock(&pool->lock); 1292 return -EINVAL; 1293 } 1294 for (i = 0; i < retries; i++) { 1295 if (list_empty(&pool->lru)) { 1296 spin_unlock(&pool->lock); 1297 return -EINVAL; 1298 } 1299 list_for_each_prev(pos, &pool->lru) { 1300 page = list_entry(pos, struct page, lru); 1301 1302 zhdr = page_address(page); 1303 if (test_bit(PAGE_HEADLESS, &page->private)) { 1304 /* 1305 * For non-headless pages, we wait to do this 1306 * until we have the page lock to avoid racing 1307 * with __z3fold_alloc(). Headless pages don't 1308 * have a lock (and __z3fold_alloc() will never 1309 * see them), but we still need to test and set 1310 * PAGE_CLAIMED to avoid racing with 1311 * z3fold_free(), so just do it now before 1312 * leaving the loop. 1313 */ 1314 if (test_and_set_bit(PAGE_CLAIMED, &page->private)) 1315 continue; 1316 1317 break; 1318 } 1319 1320 if (kref_get_unless_zero(&zhdr->refcount) == 0) { 1321 zhdr = NULL; 1322 break; 1323 } 1324 if (!z3fold_page_trylock(zhdr)) { 1325 kref_put(&zhdr->refcount, release_z3fold_page); 1326 zhdr = NULL; 1327 continue; /* can't evict at this point */ 1328 } 1329 1330 /* test_and_set_bit is of course atomic, but we still 1331 * need to do it under page lock, otherwise checking 1332 * that bit in __z3fold_alloc wouldn't make sense 1333 */ 1334 if (zhdr->foreign_handles || 1335 test_and_set_bit(PAGE_CLAIMED, &page->private)) { 1336 if (!kref_put(&zhdr->refcount, 1337 release_z3fold_page_locked)) 1338 z3fold_page_unlock(zhdr); 1339 zhdr = NULL; 1340 continue; /* can't evict such page */ 1341 } 1342 list_del_init(&zhdr->buddy); 1343 zhdr->cpu = -1; 1344 break; 1345 } 1346 1347 if (!zhdr) 1348 break; 1349 1350 list_del_init(&page->lru); 1351 spin_unlock(&pool->lock); 1352 1353 if (!test_bit(PAGE_HEADLESS, &page->private)) { 1354 /* 1355 * We need encode the handles before unlocking, and 1356 * use our local slots structure because z3fold_free 1357 * can zero out zhdr->slots and we can't do much 1358 * about that 1359 */ 1360 first_handle = 0; 1361 last_handle = 0; 1362 middle_handle = 0; 1363 memset(slots.slot, 0, sizeof(slots.slot)); 1364 if (zhdr->first_chunks) 1365 first_handle = __encode_handle(zhdr, &slots, 1366 FIRST); 1367 if (zhdr->middle_chunks) 1368 middle_handle = __encode_handle(zhdr, &slots, 1369 MIDDLE); 1370 if (zhdr->last_chunks) 1371 last_handle = __encode_handle(zhdr, &slots, 1372 LAST); 1373 /* 1374 * it's safe to unlock here because we hold a 1375 * reference to this page 1376 */ 1377 z3fold_page_unlock(zhdr); 1378 } else { 1379 first_handle = encode_handle(zhdr, HEADLESS); 1380 last_handle = middle_handle = 0; 1381 } 1382 /* Issue the eviction callback(s) */ 1383 if (middle_handle) { 1384 ret = pool->ops->evict(pool, middle_handle); 1385 if (ret) 1386 goto next; 1387 } 1388 if (first_handle) { 1389 ret = pool->ops->evict(pool, first_handle); 1390 if (ret) 1391 goto next; 1392 } 1393 if (last_handle) { 1394 ret = pool->ops->evict(pool, last_handle); 1395 if (ret) 1396 goto next; 1397 } 1398 next: 1399 if (test_bit(PAGE_HEADLESS, &page->private)) { 1400 if (ret == 0) { 1401 free_z3fold_page(page, true); 1402 atomic64_dec(&pool->pages_nr); 1403 return 0; 1404 } 1405 spin_lock(&pool->lock); 1406 list_add(&page->lru, &pool->lru); 1407 spin_unlock(&pool->lock); 1408 clear_bit(PAGE_CLAIMED, &page->private); 1409 } else { 1410 struct z3fold_buddy_slots *slots = zhdr->slots; 1411 z3fold_page_lock(zhdr); 1412 if (kref_put(&zhdr->refcount, 1413 release_z3fold_page_locked)) { 1414 kmem_cache_free(pool->c_handle, slots); 1415 return 0; 1416 } 1417 /* 1418 * if we are here, the page is still not completely 1419 * free. Take the global pool lock then to be able 1420 * to add it back to the lru list 1421 */ 1422 spin_lock(&pool->lock); 1423 list_add(&page->lru, &pool->lru); 1424 spin_unlock(&pool->lock); 1425 if (list_empty(&zhdr->buddy)) 1426 add_to_unbuddied(pool, zhdr); 1427 clear_bit(PAGE_CLAIMED, &page->private); 1428 z3fold_page_unlock(zhdr); 1429 } 1430 1431 /* We started off locked to we need to lock the pool back */ 1432 spin_lock(&pool->lock); 1433 } 1434 spin_unlock(&pool->lock); 1435 return -EAGAIN; 1436 } 1437 1438 /** 1439 * z3fold_map() - maps the allocation associated with the given handle 1440 * @pool: pool in which the allocation resides 1441 * @handle: handle associated with the allocation to be mapped 1442 * 1443 * Extracts the buddy number from handle and constructs the pointer to the 1444 * correct starting chunk within the page. 1445 * 1446 * Returns: a pointer to the mapped allocation 1447 */ 1448 static void *z3fold_map(struct z3fold_pool *pool, unsigned long handle) 1449 { 1450 struct z3fold_header *zhdr; 1451 struct page *page; 1452 void *addr; 1453 enum buddy buddy; 1454 1455 zhdr = get_z3fold_header(handle); 1456 addr = zhdr; 1457 page = virt_to_page(zhdr); 1458 1459 if (test_bit(PAGE_HEADLESS, &page->private)) 1460 goto out; 1461 1462 buddy = handle_to_buddy(handle); 1463 switch (buddy) { 1464 case FIRST: 1465 addr += ZHDR_SIZE_ALIGNED; 1466 break; 1467 case MIDDLE: 1468 addr += zhdr->start_middle << CHUNK_SHIFT; 1469 set_bit(MIDDLE_CHUNK_MAPPED, &page->private); 1470 break; 1471 case LAST: 1472 addr += PAGE_SIZE - (handle_to_chunks(handle) << CHUNK_SHIFT); 1473 break; 1474 default: 1475 pr_err("unknown buddy id %d\n", buddy); 1476 WARN_ON(1); 1477 addr = NULL; 1478 break; 1479 } 1480 1481 if (addr) 1482 zhdr->mapped_count++; 1483 out: 1484 put_z3fold_header(zhdr); 1485 return addr; 1486 } 1487 1488 /** 1489 * z3fold_unmap() - unmaps the allocation associated with the given handle 1490 * @pool: pool in which the allocation resides 1491 * @handle: handle associated with the allocation to be unmapped 1492 */ 1493 static void z3fold_unmap(struct z3fold_pool *pool, unsigned long handle) 1494 { 1495 struct z3fold_header *zhdr; 1496 struct page *page; 1497 enum buddy buddy; 1498 1499 zhdr = get_z3fold_header(handle); 1500 page = virt_to_page(zhdr); 1501 1502 if (test_bit(PAGE_HEADLESS, &page->private)) 1503 return; 1504 1505 buddy = handle_to_buddy(handle); 1506 if (buddy == MIDDLE) 1507 clear_bit(MIDDLE_CHUNK_MAPPED, &page->private); 1508 zhdr->mapped_count--; 1509 put_z3fold_header(zhdr); 1510 } 1511 1512 /** 1513 * z3fold_get_pool_size() - gets the z3fold pool size in pages 1514 * @pool: pool whose size is being queried 1515 * 1516 * Returns: size in pages of the given pool. 1517 */ 1518 static u64 z3fold_get_pool_size(struct z3fold_pool *pool) 1519 { 1520 return atomic64_read(&pool->pages_nr); 1521 } 1522 1523 static bool z3fold_page_isolate(struct page *page, isolate_mode_t mode) 1524 { 1525 struct z3fold_header *zhdr; 1526 struct z3fold_pool *pool; 1527 1528 VM_BUG_ON_PAGE(!PageMovable(page), page); 1529 VM_BUG_ON_PAGE(PageIsolated(page), page); 1530 1531 if (test_bit(PAGE_HEADLESS, &page->private)) 1532 return false; 1533 1534 zhdr = page_address(page); 1535 z3fold_page_lock(zhdr); 1536 if (test_bit(NEEDS_COMPACTING, &page->private) || 1537 test_bit(PAGE_STALE, &page->private)) 1538 goto out; 1539 1540 if (zhdr->mapped_count != 0 || zhdr->foreign_handles != 0) 1541 goto out; 1542 1543 if (test_and_set_bit(PAGE_CLAIMED, &page->private)) 1544 goto out; 1545 pool = zhdr_to_pool(zhdr); 1546 spin_lock(&pool->lock); 1547 if (!list_empty(&zhdr->buddy)) 1548 list_del_init(&zhdr->buddy); 1549 if (!list_empty(&page->lru)) 1550 list_del_init(&page->lru); 1551 spin_unlock(&pool->lock); 1552 1553 kref_get(&zhdr->refcount); 1554 z3fold_page_unlock(zhdr); 1555 return true; 1556 1557 out: 1558 z3fold_page_unlock(zhdr); 1559 return false; 1560 } 1561 1562 static int z3fold_page_migrate(struct address_space *mapping, struct page *newpage, 1563 struct page *page, enum migrate_mode mode) 1564 { 1565 struct z3fold_header *zhdr, *new_zhdr; 1566 struct z3fold_pool *pool; 1567 struct address_space *new_mapping; 1568 1569 VM_BUG_ON_PAGE(!PageMovable(page), page); 1570 VM_BUG_ON_PAGE(!PageIsolated(page), page); 1571 VM_BUG_ON_PAGE(!test_bit(PAGE_CLAIMED, &page->private), page); 1572 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage); 1573 1574 zhdr = page_address(page); 1575 pool = zhdr_to_pool(zhdr); 1576 1577 if (!z3fold_page_trylock(zhdr)) 1578 return -EAGAIN; 1579 if (zhdr->mapped_count != 0 || zhdr->foreign_handles != 0) { 1580 clear_bit(PAGE_CLAIMED, &page->private); 1581 z3fold_page_unlock(zhdr); 1582 return -EBUSY; 1583 } 1584 if (work_pending(&zhdr->work)) { 1585 z3fold_page_unlock(zhdr); 1586 return -EAGAIN; 1587 } 1588 new_zhdr = page_address(newpage); 1589 memcpy(new_zhdr, zhdr, PAGE_SIZE); 1590 newpage->private = page->private; 1591 page->private = 0; 1592 z3fold_page_unlock(zhdr); 1593 spin_lock_init(&new_zhdr->page_lock); 1594 INIT_WORK(&new_zhdr->work, compact_page_work); 1595 /* 1596 * z3fold_page_isolate() ensures that new_zhdr->buddy is empty, 1597 * so we only have to reinitialize it. 1598 */ 1599 INIT_LIST_HEAD(&new_zhdr->buddy); 1600 new_mapping = page_mapping(page); 1601 __ClearPageMovable(page); 1602 1603 get_page(newpage); 1604 z3fold_page_lock(new_zhdr); 1605 if (new_zhdr->first_chunks) 1606 encode_handle(new_zhdr, FIRST); 1607 if (new_zhdr->last_chunks) 1608 encode_handle(new_zhdr, LAST); 1609 if (new_zhdr->middle_chunks) 1610 encode_handle(new_zhdr, MIDDLE); 1611 set_bit(NEEDS_COMPACTING, &newpage->private); 1612 new_zhdr->cpu = smp_processor_id(); 1613 spin_lock(&pool->lock); 1614 list_add(&newpage->lru, &pool->lru); 1615 spin_unlock(&pool->lock); 1616 __SetPageMovable(newpage, new_mapping); 1617 z3fold_page_unlock(new_zhdr); 1618 1619 queue_work_on(new_zhdr->cpu, pool->compact_wq, &new_zhdr->work); 1620 1621 clear_bit(PAGE_CLAIMED, &page->private); 1622 put_page(page); 1623 return 0; 1624 } 1625 1626 static void z3fold_page_putback(struct page *page) 1627 { 1628 struct z3fold_header *zhdr; 1629 struct z3fold_pool *pool; 1630 1631 zhdr = page_address(page); 1632 pool = zhdr_to_pool(zhdr); 1633 1634 z3fold_page_lock(zhdr); 1635 if (!list_empty(&zhdr->buddy)) 1636 list_del_init(&zhdr->buddy); 1637 INIT_LIST_HEAD(&page->lru); 1638 if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) 1639 return; 1640 spin_lock(&pool->lock); 1641 list_add(&page->lru, &pool->lru); 1642 spin_unlock(&pool->lock); 1643 if (list_empty(&zhdr->buddy)) 1644 add_to_unbuddied(pool, zhdr); 1645 clear_bit(PAGE_CLAIMED, &page->private); 1646 z3fold_page_unlock(zhdr); 1647 } 1648 1649 static const struct address_space_operations z3fold_aops = { 1650 .isolate_page = z3fold_page_isolate, 1651 .migratepage = z3fold_page_migrate, 1652 .putback_page = z3fold_page_putback, 1653 }; 1654 1655 /***************** 1656 * zpool 1657 ****************/ 1658 1659 static int z3fold_zpool_evict(struct z3fold_pool *pool, unsigned long handle) 1660 { 1661 if (pool->zpool && pool->zpool_ops && pool->zpool_ops->evict) 1662 return pool->zpool_ops->evict(pool->zpool, handle); 1663 else 1664 return -ENOENT; 1665 } 1666 1667 static const struct z3fold_ops z3fold_zpool_ops = { 1668 .evict = z3fold_zpool_evict 1669 }; 1670 1671 static void *z3fold_zpool_create(const char *name, gfp_t gfp, 1672 const struct zpool_ops *zpool_ops, 1673 struct zpool *zpool) 1674 { 1675 struct z3fold_pool *pool; 1676 1677 pool = z3fold_create_pool(name, gfp, 1678 zpool_ops ? &z3fold_zpool_ops : NULL); 1679 if (pool) { 1680 pool->zpool = zpool; 1681 pool->zpool_ops = zpool_ops; 1682 } 1683 return pool; 1684 } 1685 1686 static void z3fold_zpool_destroy(void *pool) 1687 { 1688 z3fold_destroy_pool(pool); 1689 } 1690 1691 static int z3fold_zpool_malloc(void *pool, size_t size, gfp_t gfp, 1692 unsigned long *handle) 1693 { 1694 return z3fold_alloc(pool, size, gfp, handle); 1695 } 1696 static void z3fold_zpool_free(void *pool, unsigned long handle) 1697 { 1698 z3fold_free(pool, handle); 1699 } 1700 1701 static int z3fold_zpool_shrink(void *pool, unsigned int pages, 1702 unsigned int *reclaimed) 1703 { 1704 unsigned int total = 0; 1705 int ret = -EINVAL; 1706 1707 while (total < pages) { 1708 ret = z3fold_reclaim_page(pool, 8); 1709 if (ret < 0) 1710 break; 1711 total++; 1712 } 1713 1714 if (reclaimed) 1715 *reclaimed = total; 1716 1717 return ret; 1718 } 1719 1720 static void *z3fold_zpool_map(void *pool, unsigned long handle, 1721 enum zpool_mapmode mm) 1722 { 1723 return z3fold_map(pool, handle); 1724 } 1725 static void z3fold_zpool_unmap(void *pool, unsigned long handle) 1726 { 1727 z3fold_unmap(pool, handle); 1728 } 1729 1730 static u64 z3fold_zpool_total_size(void *pool) 1731 { 1732 return z3fold_get_pool_size(pool) * PAGE_SIZE; 1733 } 1734 1735 static struct zpool_driver z3fold_zpool_driver = { 1736 .type = "z3fold", 1737 .sleep_mapped = true, 1738 .owner = THIS_MODULE, 1739 .create = z3fold_zpool_create, 1740 .destroy = z3fold_zpool_destroy, 1741 .malloc = z3fold_zpool_malloc, 1742 .free = z3fold_zpool_free, 1743 .shrink = z3fold_zpool_shrink, 1744 .map = z3fold_zpool_map, 1745 .unmap = z3fold_zpool_unmap, 1746 .total_size = z3fold_zpool_total_size, 1747 }; 1748 1749 MODULE_ALIAS("zpool-z3fold"); 1750 1751 static int __init init_z3fold(void) 1752 { 1753 int ret; 1754 1755 /* 1756 * Make sure the z3fold header is not larger than the page size and 1757 * there has remaining spaces for its buddy. 1758 */ 1759 BUILD_BUG_ON(ZHDR_SIZE_ALIGNED > PAGE_SIZE - CHUNK_SIZE); 1760 ret = z3fold_mount(); 1761 if (ret) 1762 return ret; 1763 1764 zpool_register_driver(&z3fold_zpool_driver); 1765 1766 return 0; 1767 } 1768 1769 static void __exit exit_z3fold(void) 1770 { 1771 z3fold_unmount(); 1772 zpool_unregister_driver(&z3fold_zpool_driver); 1773 } 1774 1775 module_init(init_z3fold); 1776 module_exit(exit_z3fold); 1777 1778 MODULE_LICENSE("GPL"); 1779 MODULE_AUTHOR("Vitaly Wool <vitalywool@gmail.com>"); 1780 MODULE_DESCRIPTION("3-Fold Allocator for Compressed Pages"); 1781