1 /* 2 * z3fold.c 3 * 4 * Author: Vitaly Wool <vitaly.wool@konsulko.com> 5 * Copyright (C) 2016, Sony Mobile Communications Inc. 6 * 7 * This implementation is based on zbud written by Seth Jennings. 8 * 9 * z3fold is an special purpose allocator for storing compressed pages. It 10 * can store up to three compressed pages per page which improves the 11 * compression ratio of zbud while retaining its main concepts (e. g. always 12 * storing an integral number of objects per page) and simplicity. 13 * It still has simple and deterministic reclaim properties that make it 14 * preferable to a higher density approach (with no requirement on integral 15 * number of object per page) when reclaim is used. 16 * 17 * As in zbud, pages are divided into "chunks". The size of the chunks is 18 * fixed at compile time and is determined by NCHUNKS_ORDER below. 19 * 20 * z3fold doesn't export any API and is meant to be used via zpool API. 21 */ 22 23 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 24 25 #include <linux/atomic.h> 26 #include <linux/sched.h> 27 #include <linux/list.h> 28 #include <linux/mm.h> 29 #include <linux/module.h> 30 #include <linux/percpu.h> 31 #include <linux/preempt.h> 32 #include <linux/workqueue.h> 33 #include <linux/slab.h> 34 #include <linux/spinlock.h> 35 #include <linux/zpool.h> 36 37 /***************** 38 * Structures 39 *****************/ 40 struct z3fold_pool; 41 struct z3fold_ops { 42 int (*evict)(struct z3fold_pool *pool, unsigned long handle); 43 }; 44 45 enum buddy { 46 HEADLESS = 0, 47 FIRST, 48 MIDDLE, 49 LAST, 50 BUDDIES_MAX 51 }; 52 53 /* 54 * struct z3fold_header - z3fold page metadata occupying first chunks of each 55 * z3fold page, except for HEADLESS pages 56 * @buddy: links the z3fold page into the relevant list in the 57 * pool 58 * @page_lock: per-page lock 59 * @refcount: reference count for the z3fold page 60 * @work: work_struct for page layout optimization 61 * @pool: pointer to the pool which this page belongs to 62 * @cpu: CPU which this page "belongs" to 63 * @first_chunks: the size of the first buddy in chunks, 0 if free 64 * @middle_chunks: the size of the middle buddy in chunks, 0 if free 65 * @last_chunks: the size of the last buddy in chunks, 0 if free 66 * @first_num: the starting number (for the first handle) 67 */ 68 struct z3fold_header { 69 struct list_head buddy; 70 spinlock_t page_lock; 71 struct kref refcount; 72 struct work_struct work; 73 struct z3fold_pool *pool; 74 short cpu; 75 unsigned short first_chunks; 76 unsigned short middle_chunks; 77 unsigned short last_chunks; 78 unsigned short start_middle; 79 unsigned short first_num:2; 80 }; 81 82 /* 83 * NCHUNKS_ORDER determines the internal allocation granularity, effectively 84 * adjusting internal fragmentation. It also determines the number of 85 * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the 86 * allocation granularity will be in chunks of size PAGE_SIZE/64. Some chunks 87 * in the beginning of an allocated page are occupied by z3fold header, so 88 * NCHUNKS will be calculated to 63 (or 62 in case CONFIG_DEBUG_SPINLOCK=y), 89 * which shows the max number of free chunks in z3fold page, also there will 90 * be 63, or 62, respectively, freelists per pool. 91 */ 92 #define NCHUNKS_ORDER 6 93 94 #define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER) 95 #define CHUNK_SIZE (1 << CHUNK_SHIFT) 96 #define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE) 97 #define ZHDR_CHUNKS (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT) 98 #define TOTAL_CHUNKS (PAGE_SIZE >> CHUNK_SHIFT) 99 #define NCHUNKS ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT) 100 101 #define BUDDY_MASK (0x3) 102 103 /** 104 * struct z3fold_pool - stores metadata for each z3fold pool 105 * @name: pool name 106 * @lock: protects pool unbuddied/lru lists 107 * @stale_lock: protects pool stale page list 108 * @unbuddied: per-cpu array of lists tracking z3fold pages that contain 2- 109 * buddies; the list each z3fold page is added to depends on 110 * the size of its free region. 111 * @lru: list tracking the z3fold pages in LRU order by most recently 112 * added buddy. 113 * @stale: list of pages marked for freeing 114 * @pages_nr: number of z3fold pages in the pool. 115 * @ops: pointer to a structure of user defined operations specified at 116 * pool creation time. 117 * @compact_wq: workqueue for page layout background optimization 118 * @release_wq: workqueue for safe page release 119 * @work: work_struct for safe page release 120 * 121 * This structure is allocated at pool creation time and maintains metadata 122 * pertaining to a particular z3fold pool. 123 */ 124 struct z3fold_pool { 125 const char *name; 126 spinlock_t lock; 127 spinlock_t stale_lock; 128 struct list_head *unbuddied; 129 struct list_head lru; 130 struct list_head stale; 131 atomic64_t pages_nr; 132 const struct z3fold_ops *ops; 133 struct zpool *zpool; 134 const struct zpool_ops *zpool_ops; 135 struct workqueue_struct *compact_wq; 136 struct workqueue_struct *release_wq; 137 struct work_struct work; 138 }; 139 140 /* 141 * Internal z3fold page flags 142 */ 143 enum z3fold_page_flags { 144 PAGE_HEADLESS = 0, 145 MIDDLE_CHUNK_MAPPED, 146 NEEDS_COMPACTING, 147 PAGE_STALE 148 }; 149 150 /***************** 151 * Helpers 152 *****************/ 153 154 /* Converts an allocation size in bytes to size in z3fold chunks */ 155 static int size_to_chunks(size_t size) 156 { 157 return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT; 158 } 159 160 #define for_each_unbuddied_list(_iter, _begin) \ 161 for ((_iter) = (_begin); (_iter) < NCHUNKS; (_iter)++) 162 163 static void compact_page_work(struct work_struct *w); 164 165 /* Initializes the z3fold header of a newly allocated z3fold page */ 166 static struct z3fold_header *init_z3fold_page(struct page *page, 167 struct z3fold_pool *pool) 168 { 169 struct z3fold_header *zhdr = page_address(page); 170 171 INIT_LIST_HEAD(&page->lru); 172 clear_bit(PAGE_HEADLESS, &page->private); 173 clear_bit(MIDDLE_CHUNK_MAPPED, &page->private); 174 clear_bit(NEEDS_COMPACTING, &page->private); 175 clear_bit(PAGE_STALE, &page->private); 176 177 spin_lock_init(&zhdr->page_lock); 178 kref_init(&zhdr->refcount); 179 zhdr->first_chunks = 0; 180 zhdr->middle_chunks = 0; 181 zhdr->last_chunks = 0; 182 zhdr->first_num = 0; 183 zhdr->start_middle = 0; 184 zhdr->cpu = -1; 185 zhdr->pool = pool; 186 INIT_LIST_HEAD(&zhdr->buddy); 187 INIT_WORK(&zhdr->work, compact_page_work); 188 return zhdr; 189 } 190 191 /* Resets the struct page fields and frees the page */ 192 static void free_z3fold_page(struct page *page) 193 { 194 __free_page(page); 195 } 196 197 /* Lock a z3fold page */ 198 static inline void z3fold_page_lock(struct z3fold_header *zhdr) 199 { 200 spin_lock(&zhdr->page_lock); 201 } 202 203 /* Try to lock a z3fold page */ 204 static inline int z3fold_page_trylock(struct z3fold_header *zhdr) 205 { 206 return spin_trylock(&zhdr->page_lock); 207 } 208 209 /* Unlock a z3fold page */ 210 static inline void z3fold_page_unlock(struct z3fold_header *zhdr) 211 { 212 spin_unlock(&zhdr->page_lock); 213 } 214 215 /* 216 * Encodes the handle of a particular buddy within a z3fold page 217 * Pool lock should be held as this function accesses first_num 218 */ 219 static unsigned long encode_handle(struct z3fold_header *zhdr, enum buddy bud) 220 { 221 unsigned long handle; 222 223 handle = (unsigned long)zhdr; 224 if (bud != HEADLESS) 225 handle += (bud + zhdr->first_num) & BUDDY_MASK; 226 return handle; 227 } 228 229 /* Returns the z3fold page where a given handle is stored */ 230 static struct z3fold_header *handle_to_z3fold_header(unsigned long handle) 231 { 232 return (struct z3fold_header *)(handle & PAGE_MASK); 233 } 234 235 /* 236 * (handle & BUDDY_MASK) < zhdr->first_num is possible in encode_handle 237 * but that doesn't matter. because the masking will result in the 238 * correct buddy number. 239 */ 240 static enum buddy handle_to_buddy(unsigned long handle) 241 { 242 struct z3fold_header *zhdr = handle_to_z3fold_header(handle); 243 return (handle - zhdr->first_num) & BUDDY_MASK; 244 } 245 246 static void __release_z3fold_page(struct z3fold_header *zhdr, bool locked) 247 { 248 struct page *page = virt_to_page(zhdr); 249 struct z3fold_pool *pool = zhdr->pool; 250 251 WARN_ON(!list_empty(&zhdr->buddy)); 252 set_bit(PAGE_STALE, &page->private); 253 clear_bit(NEEDS_COMPACTING, &page->private); 254 spin_lock(&pool->lock); 255 if (!list_empty(&page->lru)) 256 list_del(&page->lru); 257 spin_unlock(&pool->lock); 258 if (locked) 259 z3fold_page_unlock(zhdr); 260 spin_lock(&pool->stale_lock); 261 list_add(&zhdr->buddy, &pool->stale); 262 queue_work(pool->release_wq, &pool->work); 263 spin_unlock(&pool->stale_lock); 264 } 265 266 static void __attribute__((__unused__)) 267 release_z3fold_page(struct kref *ref) 268 { 269 struct z3fold_header *zhdr = container_of(ref, struct z3fold_header, 270 refcount); 271 __release_z3fold_page(zhdr, false); 272 } 273 274 static void release_z3fold_page_locked(struct kref *ref) 275 { 276 struct z3fold_header *zhdr = container_of(ref, struct z3fold_header, 277 refcount); 278 WARN_ON(z3fold_page_trylock(zhdr)); 279 __release_z3fold_page(zhdr, true); 280 } 281 282 static void release_z3fold_page_locked_list(struct kref *ref) 283 { 284 struct z3fold_header *zhdr = container_of(ref, struct z3fold_header, 285 refcount); 286 spin_lock(&zhdr->pool->lock); 287 list_del_init(&zhdr->buddy); 288 spin_unlock(&zhdr->pool->lock); 289 290 WARN_ON(z3fold_page_trylock(zhdr)); 291 __release_z3fold_page(zhdr, true); 292 } 293 294 static void free_pages_work(struct work_struct *w) 295 { 296 struct z3fold_pool *pool = container_of(w, struct z3fold_pool, work); 297 298 spin_lock(&pool->stale_lock); 299 while (!list_empty(&pool->stale)) { 300 struct z3fold_header *zhdr = list_first_entry(&pool->stale, 301 struct z3fold_header, buddy); 302 struct page *page = virt_to_page(zhdr); 303 304 list_del(&zhdr->buddy); 305 if (WARN_ON(!test_bit(PAGE_STALE, &page->private))) 306 continue; 307 spin_unlock(&pool->stale_lock); 308 cancel_work_sync(&zhdr->work); 309 free_z3fold_page(page); 310 cond_resched(); 311 spin_lock(&pool->stale_lock); 312 } 313 spin_unlock(&pool->stale_lock); 314 } 315 316 /* 317 * Returns the number of free chunks in a z3fold page. 318 * NB: can't be used with HEADLESS pages. 319 */ 320 static int num_free_chunks(struct z3fold_header *zhdr) 321 { 322 int nfree; 323 /* 324 * If there is a middle object, pick up the bigger free space 325 * either before or after it. Otherwise just subtract the number 326 * of chunks occupied by the first and the last objects. 327 */ 328 if (zhdr->middle_chunks != 0) { 329 int nfree_before = zhdr->first_chunks ? 330 0 : zhdr->start_middle - ZHDR_CHUNKS; 331 int nfree_after = zhdr->last_chunks ? 332 0 : TOTAL_CHUNKS - 333 (zhdr->start_middle + zhdr->middle_chunks); 334 nfree = max(nfree_before, nfree_after); 335 } else 336 nfree = NCHUNKS - zhdr->first_chunks - zhdr->last_chunks; 337 return nfree; 338 } 339 340 static inline void *mchunk_memmove(struct z3fold_header *zhdr, 341 unsigned short dst_chunk) 342 { 343 void *beg = zhdr; 344 return memmove(beg + (dst_chunk << CHUNK_SHIFT), 345 beg + (zhdr->start_middle << CHUNK_SHIFT), 346 zhdr->middle_chunks << CHUNK_SHIFT); 347 } 348 349 #define BIG_CHUNK_GAP 3 350 /* Has to be called with lock held */ 351 static int z3fold_compact_page(struct z3fold_header *zhdr) 352 { 353 struct page *page = virt_to_page(zhdr); 354 355 if (test_bit(MIDDLE_CHUNK_MAPPED, &page->private)) 356 return 0; /* can't move middle chunk, it's used */ 357 358 if (zhdr->middle_chunks == 0) 359 return 0; /* nothing to compact */ 360 361 if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) { 362 /* move to the beginning */ 363 mchunk_memmove(zhdr, ZHDR_CHUNKS); 364 zhdr->first_chunks = zhdr->middle_chunks; 365 zhdr->middle_chunks = 0; 366 zhdr->start_middle = 0; 367 zhdr->first_num++; 368 return 1; 369 } 370 371 /* 372 * moving data is expensive, so let's only do that if 373 * there's substantial gain (at least BIG_CHUNK_GAP chunks) 374 */ 375 if (zhdr->first_chunks != 0 && zhdr->last_chunks == 0 && 376 zhdr->start_middle - (zhdr->first_chunks + ZHDR_CHUNKS) >= 377 BIG_CHUNK_GAP) { 378 mchunk_memmove(zhdr, zhdr->first_chunks + ZHDR_CHUNKS); 379 zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS; 380 return 1; 381 } else if (zhdr->last_chunks != 0 && zhdr->first_chunks == 0 && 382 TOTAL_CHUNKS - (zhdr->last_chunks + zhdr->start_middle 383 + zhdr->middle_chunks) >= 384 BIG_CHUNK_GAP) { 385 unsigned short new_start = TOTAL_CHUNKS - zhdr->last_chunks - 386 zhdr->middle_chunks; 387 mchunk_memmove(zhdr, new_start); 388 zhdr->start_middle = new_start; 389 return 1; 390 } 391 392 return 0; 393 } 394 395 static void do_compact_page(struct z3fold_header *zhdr, bool locked) 396 { 397 struct z3fold_pool *pool = zhdr->pool; 398 struct page *page; 399 struct list_head *unbuddied; 400 int fchunks; 401 402 page = virt_to_page(zhdr); 403 if (locked) 404 WARN_ON(z3fold_page_trylock(zhdr)); 405 else 406 z3fold_page_lock(zhdr); 407 if (test_bit(PAGE_STALE, &page->private) || 408 !test_and_clear_bit(NEEDS_COMPACTING, &page->private)) { 409 z3fold_page_unlock(zhdr); 410 return; 411 } 412 spin_lock(&pool->lock); 413 list_del_init(&zhdr->buddy); 414 spin_unlock(&pool->lock); 415 416 z3fold_compact_page(zhdr); 417 unbuddied = get_cpu_ptr(pool->unbuddied); 418 fchunks = num_free_chunks(zhdr); 419 if (fchunks < NCHUNKS && 420 (!zhdr->first_chunks || !zhdr->middle_chunks || 421 !zhdr->last_chunks)) { 422 /* the page's not completely free and it's unbuddied */ 423 spin_lock(&pool->lock); 424 list_add(&zhdr->buddy, &unbuddied[fchunks]); 425 spin_unlock(&pool->lock); 426 zhdr->cpu = smp_processor_id(); 427 } 428 put_cpu_ptr(pool->unbuddied); 429 z3fold_page_unlock(zhdr); 430 } 431 432 static void compact_page_work(struct work_struct *w) 433 { 434 struct z3fold_header *zhdr = container_of(w, struct z3fold_header, 435 work); 436 437 do_compact_page(zhdr, false); 438 } 439 440 441 /* 442 * API Functions 443 */ 444 445 /** 446 * z3fold_create_pool() - create a new z3fold pool 447 * @name: pool name 448 * @gfp: gfp flags when allocating the z3fold pool structure 449 * @ops: user-defined operations for the z3fold pool 450 * 451 * Return: pointer to the new z3fold pool or NULL if the metadata allocation 452 * failed. 453 */ 454 static struct z3fold_pool *z3fold_create_pool(const char *name, gfp_t gfp, 455 const struct z3fold_ops *ops) 456 { 457 struct z3fold_pool *pool = NULL; 458 int i, cpu; 459 460 pool = kzalloc(sizeof(struct z3fold_pool), gfp); 461 if (!pool) 462 goto out; 463 spin_lock_init(&pool->lock); 464 spin_lock_init(&pool->stale_lock); 465 pool->unbuddied = __alloc_percpu(sizeof(struct list_head)*NCHUNKS, 2); 466 for_each_possible_cpu(cpu) { 467 struct list_head *unbuddied = 468 per_cpu_ptr(pool->unbuddied, cpu); 469 for_each_unbuddied_list(i, 0) 470 INIT_LIST_HEAD(&unbuddied[i]); 471 } 472 INIT_LIST_HEAD(&pool->lru); 473 INIT_LIST_HEAD(&pool->stale); 474 atomic64_set(&pool->pages_nr, 0); 475 pool->name = name; 476 pool->compact_wq = create_singlethread_workqueue(pool->name); 477 if (!pool->compact_wq) 478 goto out; 479 pool->release_wq = create_singlethread_workqueue(pool->name); 480 if (!pool->release_wq) 481 goto out_wq; 482 INIT_WORK(&pool->work, free_pages_work); 483 pool->ops = ops; 484 return pool; 485 486 out_wq: 487 destroy_workqueue(pool->compact_wq); 488 out: 489 kfree(pool); 490 return NULL; 491 } 492 493 /** 494 * z3fold_destroy_pool() - destroys an existing z3fold pool 495 * @pool: the z3fold pool to be destroyed 496 * 497 * The pool should be emptied before this function is called. 498 */ 499 static void z3fold_destroy_pool(struct z3fold_pool *pool) 500 { 501 destroy_workqueue(pool->release_wq); 502 destroy_workqueue(pool->compact_wq); 503 kfree(pool); 504 } 505 506 /** 507 * z3fold_alloc() - allocates a region of a given size 508 * @pool: z3fold pool from which to allocate 509 * @size: size in bytes of the desired allocation 510 * @gfp: gfp flags used if the pool needs to grow 511 * @handle: handle of the new allocation 512 * 513 * This function will attempt to find a free region in the pool large enough to 514 * satisfy the allocation request. A search of the unbuddied lists is 515 * performed first. If no suitable free region is found, then a new page is 516 * allocated and added to the pool to satisfy the request. 517 * 518 * gfp should not set __GFP_HIGHMEM as highmem pages cannot be used 519 * as z3fold pool pages. 520 * 521 * Return: 0 if success and handle is set, otherwise -EINVAL if the size or 522 * gfp arguments are invalid or -ENOMEM if the pool was unable to allocate 523 * a new page. 524 */ 525 static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp, 526 unsigned long *handle) 527 { 528 int chunks = 0, i, freechunks; 529 struct z3fold_header *zhdr = NULL; 530 struct page *page = NULL; 531 enum buddy bud; 532 bool can_sleep = (gfp & __GFP_RECLAIM) == __GFP_RECLAIM; 533 534 if (!size || (gfp & __GFP_HIGHMEM)) 535 return -EINVAL; 536 537 if (size > PAGE_SIZE) 538 return -ENOSPC; 539 540 if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE) 541 bud = HEADLESS; 542 else { 543 struct list_head *unbuddied; 544 chunks = size_to_chunks(size); 545 546 lookup: 547 /* First, try to find an unbuddied z3fold page. */ 548 unbuddied = get_cpu_ptr(pool->unbuddied); 549 for_each_unbuddied_list(i, chunks) { 550 struct list_head *l = &unbuddied[i]; 551 552 zhdr = list_first_entry_or_null(READ_ONCE(l), 553 struct z3fold_header, buddy); 554 555 if (!zhdr) 556 continue; 557 558 /* Re-check under lock. */ 559 spin_lock(&pool->lock); 560 l = &unbuddied[i]; 561 if (unlikely(zhdr != list_first_entry(READ_ONCE(l), 562 struct z3fold_header, buddy)) || 563 !z3fold_page_trylock(zhdr)) { 564 spin_unlock(&pool->lock); 565 put_cpu_ptr(pool->unbuddied); 566 goto lookup; 567 } 568 list_del_init(&zhdr->buddy); 569 zhdr->cpu = -1; 570 spin_unlock(&pool->lock); 571 572 page = virt_to_page(zhdr); 573 if (test_bit(NEEDS_COMPACTING, &page->private)) { 574 z3fold_page_unlock(zhdr); 575 zhdr = NULL; 576 put_cpu_ptr(pool->unbuddied); 577 if (can_sleep) 578 cond_resched(); 579 goto lookup; 580 } 581 582 /* 583 * this page could not be removed from its unbuddied 584 * list while pool lock was held, and then we've taken 585 * page lock so kref_put could not be called before 586 * we got here, so it's safe to just call kref_get() 587 */ 588 kref_get(&zhdr->refcount); 589 break; 590 } 591 put_cpu_ptr(pool->unbuddied); 592 593 if (zhdr) { 594 if (zhdr->first_chunks == 0) { 595 if (zhdr->middle_chunks != 0 && 596 chunks >= zhdr->start_middle) 597 bud = LAST; 598 else 599 bud = FIRST; 600 } else if (zhdr->last_chunks == 0) 601 bud = LAST; 602 else if (zhdr->middle_chunks == 0) 603 bud = MIDDLE; 604 else { 605 if (kref_put(&zhdr->refcount, 606 release_z3fold_page_locked)) 607 atomic64_dec(&pool->pages_nr); 608 else 609 z3fold_page_unlock(zhdr); 610 pr_err("No free chunks in unbuddied\n"); 611 WARN_ON(1); 612 goto lookup; 613 } 614 goto found; 615 } 616 bud = FIRST; 617 } 618 619 spin_lock(&pool->stale_lock); 620 zhdr = list_first_entry_or_null(&pool->stale, 621 struct z3fold_header, buddy); 622 /* 623 * Before allocating a page, let's see if we can take one from the 624 * stale pages list. cancel_work_sync() can sleep so we must make 625 * sure it won't be called in case we're in atomic context. 626 */ 627 if (zhdr && (can_sleep || !work_pending(&zhdr->work))) { 628 list_del(&zhdr->buddy); 629 spin_unlock(&pool->stale_lock); 630 if (can_sleep) 631 cancel_work_sync(&zhdr->work); 632 page = virt_to_page(zhdr); 633 } else { 634 spin_unlock(&pool->stale_lock); 635 page = alloc_page(gfp); 636 } 637 638 if (!page) 639 return -ENOMEM; 640 641 atomic64_inc(&pool->pages_nr); 642 zhdr = init_z3fold_page(page, pool); 643 644 if (bud == HEADLESS) { 645 set_bit(PAGE_HEADLESS, &page->private); 646 goto headless; 647 } 648 z3fold_page_lock(zhdr); 649 650 found: 651 if (bud == FIRST) 652 zhdr->first_chunks = chunks; 653 else if (bud == LAST) 654 zhdr->last_chunks = chunks; 655 else { 656 zhdr->middle_chunks = chunks; 657 zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS; 658 } 659 660 if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0 || 661 zhdr->middle_chunks == 0) { 662 struct list_head *unbuddied = get_cpu_ptr(pool->unbuddied); 663 664 /* Add to unbuddied list */ 665 freechunks = num_free_chunks(zhdr); 666 spin_lock(&pool->lock); 667 list_add(&zhdr->buddy, &unbuddied[freechunks]); 668 spin_unlock(&pool->lock); 669 zhdr->cpu = smp_processor_id(); 670 put_cpu_ptr(pool->unbuddied); 671 } 672 673 headless: 674 spin_lock(&pool->lock); 675 /* Add/move z3fold page to beginning of LRU */ 676 if (!list_empty(&page->lru)) 677 list_del(&page->lru); 678 679 list_add(&page->lru, &pool->lru); 680 681 *handle = encode_handle(zhdr, bud); 682 spin_unlock(&pool->lock); 683 if (bud != HEADLESS) 684 z3fold_page_unlock(zhdr); 685 686 return 0; 687 } 688 689 /** 690 * z3fold_free() - frees the allocation associated with the given handle 691 * @pool: pool in which the allocation resided 692 * @handle: handle associated with the allocation returned by z3fold_alloc() 693 * 694 * In the case that the z3fold page in which the allocation resides is under 695 * reclaim, as indicated by the PG_reclaim flag being set, this function 696 * only sets the first|last_chunks to 0. The page is actually freed 697 * once both buddies are evicted (see z3fold_reclaim_page() below). 698 */ 699 static void z3fold_free(struct z3fold_pool *pool, unsigned long handle) 700 { 701 struct z3fold_header *zhdr; 702 struct page *page; 703 enum buddy bud; 704 705 zhdr = handle_to_z3fold_header(handle); 706 page = virt_to_page(zhdr); 707 708 if (test_bit(PAGE_HEADLESS, &page->private)) { 709 /* HEADLESS page stored */ 710 bud = HEADLESS; 711 } else { 712 z3fold_page_lock(zhdr); 713 bud = handle_to_buddy(handle); 714 715 switch (bud) { 716 case FIRST: 717 zhdr->first_chunks = 0; 718 break; 719 case MIDDLE: 720 zhdr->middle_chunks = 0; 721 zhdr->start_middle = 0; 722 break; 723 case LAST: 724 zhdr->last_chunks = 0; 725 break; 726 default: 727 pr_err("%s: unknown bud %d\n", __func__, bud); 728 WARN_ON(1); 729 z3fold_page_unlock(zhdr); 730 return; 731 } 732 } 733 734 if (bud == HEADLESS) { 735 spin_lock(&pool->lock); 736 list_del(&page->lru); 737 spin_unlock(&pool->lock); 738 free_z3fold_page(page); 739 atomic64_dec(&pool->pages_nr); 740 return; 741 } 742 743 if (kref_put(&zhdr->refcount, release_z3fold_page_locked_list)) { 744 atomic64_dec(&pool->pages_nr); 745 return; 746 } 747 if (test_and_set_bit(NEEDS_COMPACTING, &page->private)) { 748 z3fold_page_unlock(zhdr); 749 return; 750 } 751 if (zhdr->cpu < 0 || !cpu_online(zhdr->cpu)) { 752 spin_lock(&pool->lock); 753 list_del_init(&zhdr->buddy); 754 spin_unlock(&pool->lock); 755 zhdr->cpu = -1; 756 do_compact_page(zhdr, true); 757 return; 758 } 759 queue_work_on(zhdr->cpu, pool->compact_wq, &zhdr->work); 760 z3fold_page_unlock(zhdr); 761 } 762 763 /** 764 * z3fold_reclaim_page() - evicts allocations from a pool page and frees it 765 * @pool: pool from which a page will attempt to be evicted 766 * @retires: number of pages on the LRU list for which eviction will 767 * be attempted before failing 768 * 769 * z3fold reclaim is different from normal system reclaim in that it is done 770 * from the bottom, up. This is because only the bottom layer, z3fold, has 771 * information on how the allocations are organized within each z3fold page. 772 * This has the potential to create interesting locking situations between 773 * z3fold and the user, however. 774 * 775 * To avoid these, this is how z3fold_reclaim_page() should be called: 776 777 * The user detects a page should be reclaimed and calls z3fold_reclaim_page(). 778 * z3fold_reclaim_page() will remove a z3fold page from the pool LRU list and 779 * call the user-defined eviction handler with the pool and handle as 780 * arguments. 781 * 782 * If the handle can not be evicted, the eviction handler should return 783 * non-zero. z3fold_reclaim_page() will add the z3fold page back to the 784 * appropriate list and try the next z3fold page on the LRU up to 785 * a user defined number of retries. 786 * 787 * If the handle is successfully evicted, the eviction handler should 788 * return 0 _and_ should have called z3fold_free() on the handle. z3fold_free() 789 * contains logic to delay freeing the page if the page is under reclaim, 790 * as indicated by the setting of the PG_reclaim flag on the underlying page. 791 * 792 * If all buddies in the z3fold page are successfully evicted, then the 793 * z3fold page can be freed. 794 * 795 * Returns: 0 if page is successfully freed, otherwise -EINVAL if there are 796 * no pages to evict or an eviction handler is not registered, -EAGAIN if 797 * the retry limit was hit. 798 */ 799 static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries) 800 { 801 int i, ret = 0; 802 struct z3fold_header *zhdr = NULL; 803 struct page *page = NULL; 804 struct list_head *pos; 805 unsigned long first_handle = 0, middle_handle = 0, last_handle = 0; 806 807 spin_lock(&pool->lock); 808 if (!pool->ops || !pool->ops->evict || retries == 0) { 809 spin_unlock(&pool->lock); 810 return -EINVAL; 811 } 812 for (i = 0; i < retries; i++) { 813 if (list_empty(&pool->lru)) { 814 spin_unlock(&pool->lock); 815 return -EINVAL; 816 } 817 list_for_each_prev(pos, &pool->lru) { 818 page = list_entry(pos, struct page, lru); 819 if (test_bit(PAGE_HEADLESS, &page->private)) 820 /* candidate found */ 821 break; 822 823 zhdr = page_address(page); 824 if (!z3fold_page_trylock(zhdr)) 825 continue; /* can't evict at this point */ 826 kref_get(&zhdr->refcount); 827 list_del_init(&zhdr->buddy); 828 zhdr->cpu = -1; 829 } 830 831 list_del_init(&page->lru); 832 spin_unlock(&pool->lock); 833 834 if (!test_bit(PAGE_HEADLESS, &page->private)) { 835 /* 836 * We need encode the handles before unlocking, since 837 * we can race with free that will set 838 * (first|last)_chunks to 0 839 */ 840 first_handle = 0; 841 last_handle = 0; 842 middle_handle = 0; 843 if (zhdr->first_chunks) 844 first_handle = encode_handle(zhdr, FIRST); 845 if (zhdr->middle_chunks) 846 middle_handle = encode_handle(zhdr, MIDDLE); 847 if (zhdr->last_chunks) 848 last_handle = encode_handle(zhdr, LAST); 849 /* 850 * it's safe to unlock here because we hold a 851 * reference to this page 852 */ 853 z3fold_page_unlock(zhdr); 854 } else { 855 first_handle = encode_handle(zhdr, HEADLESS); 856 last_handle = middle_handle = 0; 857 } 858 859 /* Issue the eviction callback(s) */ 860 if (middle_handle) { 861 ret = pool->ops->evict(pool, middle_handle); 862 if (ret) 863 goto next; 864 } 865 if (first_handle) { 866 ret = pool->ops->evict(pool, first_handle); 867 if (ret) 868 goto next; 869 } 870 if (last_handle) { 871 ret = pool->ops->evict(pool, last_handle); 872 if (ret) 873 goto next; 874 } 875 next: 876 spin_lock(&pool->lock); 877 if (test_bit(PAGE_HEADLESS, &page->private)) { 878 if (ret == 0) { 879 spin_unlock(&pool->lock); 880 free_z3fold_page(page); 881 return 0; 882 } 883 } else if (kref_put(&zhdr->refcount, release_z3fold_page)) { 884 atomic64_dec(&pool->pages_nr); 885 spin_unlock(&pool->lock); 886 return 0; 887 } 888 889 /* 890 * Add to the beginning of LRU. 891 * Pool lock has to be kept here to ensure the page has 892 * not already been released 893 */ 894 list_add(&page->lru, &pool->lru); 895 } 896 spin_unlock(&pool->lock); 897 return -EAGAIN; 898 } 899 900 /** 901 * z3fold_map() - maps the allocation associated with the given handle 902 * @pool: pool in which the allocation resides 903 * @handle: handle associated with the allocation to be mapped 904 * 905 * Extracts the buddy number from handle and constructs the pointer to the 906 * correct starting chunk within the page. 907 * 908 * Returns: a pointer to the mapped allocation 909 */ 910 static void *z3fold_map(struct z3fold_pool *pool, unsigned long handle) 911 { 912 struct z3fold_header *zhdr; 913 struct page *page; 914 void *addr; 915 enum buddy buddy; 916 917 zhdr = handle_to_z3fold_header(handle); 918 addr = zhdr; 919 page = virt_to_page(zhdr); 920 921 if (test_bit(PAGE_HEADLESS, &page->private)) 922 goto out; 923 924 z3fold_page_lock(zhdr); 925 buddy = handle_to_buddy(handle); 926 switch (buddy) { 927 case FIRST: 928 addr += ZHDR_SIZE_ALIGNED; 929 break; 930 case MIDDLE: 931 addr += zhdr->start_middle << CHUNK_SHIFT; 932 set_bit(MIDDLE_CHUNK_MAPPED, &page->private); 933 break; 934 case LAST: 935 addr += PAGE_SIZE - (zhdr->last_chunks << CHUNK_SHIFT); 936 break; 937 default: 938 pr_err("unknown buddy id %d\n", buddy); 939 WARN_ON(1); 940 addr = NULL; 941 break; 942 } 943 944 z3fold_page_unlock(zhdr); 945 out: 946 return addr; 947 } 948 949 /** 950 * z3fold_unmap() - unmaps the allocation associated with the given handle 951 * @pool: pool in which the allocation resides 952 * @handle: handle associated with the allocation to be unmapped 953 */ 954 static void z3fold_unmap(struct z3fold_pool *pool, unsigned long handle) 955 { 956 struct z3fold_header *zhdr; 957 struct page *page; 958 enum buddy buddy; 959 960 zhdr = handle_to_z3fold_header(handle); 961 page = virt_to_page(zhdr); 962 963 if (test_bit(PAGE_HEADLESS, &page->private)) 964 return; 965 966 z3fold_page_lock(zhdr); 967 buddy = handle_to_buddy(handle); 968 if (buddy == MIDDLE) 969 clear_bit(MIDDLE_CHUNK_MAPPED, &page->private); 970 z3fold_page_unlock(zhdr); 971 } 972 973 /** 974 * z3fold_get_pool_size() - gets the z3fold pool size in pages 975 * @pool: pool whose size is being queried 976 * 977 * Returns: size in pages of the given pool. 978 */ 979 static u64 z3fold_get_pool_size(struct z3fold_pool *pool) 980 { 981 return atomic64_read(&pool->pages_nr); 982 } 983 984 /***************** 985 * zpool 986 ****************/ 987 988 static int z3fold_zpool_evict(struct z3fold_pool *pool, unsigned long handle) 989 { 990 if (pool->zpool && pool->zpool_ops && pool->zpool_ops->evict) 991 return pool->zpool_ops->evict(pool->zpool, handle); 992 else 993 return -ENOENT; 994 } 995 996 static const struct z3fold_ops z3fold_zpool_ops = { 997 .evict = z3fold_zpool_evict 998 }; 999 1000 static void *z3fold_zpool_create(const char *name, gfp_t gfp, 1001 const struct zpool_ops *zpool_ops, 1002 struct zpool *zpool) 1003 { 1004 struct z3fold_pool *pool; 1005 1006 pool = z3fold_create_pool(name, gfp, 1007 zpool_ops ? &z3fold_zpool_ops : NULL); 1008 if (pool) { 1009 pool->zpool = zpool; 1010 pool->zpool_ops = zpool_ops; 1011 } 1012 return pool; 1013 } 1014 1015 static void z3fold_zpool_destroy(void *pool) 1016 { 1017 z3fold_destroy_pool(pool); 1018 } 1019 1020 static int z3fold_zpool_malloc(void *pool, size_t size, gfp_t gfp, 1021 unsigned long *handle) 1022 { 1023 return z3fold_alloc(pool, size, gfp, handle); 1024 } 1025 static void z3fold_zpool_free(void *pool, unsigned long handle) 1026 { 1027 z3fold_free(pool, handle); 1028 } 1029 1030 static int z3fold_zpool_shrink(void *pool, unsigned int pages, 1031 unsigned int *reclaimed) 1032 { 1033 unsigned int total = 0; 1034 int ret = -EINVAL; 1035 1036 while (total < pages) { 1037 ret = z3fold_reclaim_page(pool, 8); 1038 if (ret < 0) 1039 break; 1040 total++; 1041 } 1042 1043 if (reclaimed) 1044 *reclaimed = total; 1045 1046 return ret; 1047 } 1048 1049 static void *z3fold_zpool_map(void *pool, unsigned long handle, 1050 enum zpool_mapmode mm) 1051 { 1052 return z3fold_map(pool, handle); 1053 } 1054 static void z3fold_zpool_unmap(void *pool, unsigned long handle) 1055 { 1056 z3fold_unmap(pool, handle); 1057 } 1058 1059 static u64 z3fold_zpool_total_size(void *pool) 1060 { 1061 return z3fold_get_pool_size(pool) * PAGE_SIZE; 1062 } 1063 1064 static struct zpool_driver z3fold_zpool_driver = { 1065 .type = "z3fold", 1066 .owner = THIS_MODULE, 1067 .create = z3fold_zpool_create, 1068 .destroy = z3fold_zpool_destroy, 1069 .malloc = z3fold_zpool_malloc, 1070 .free = z3fold_zpool_free, 1071 .shrink = z3fold_zpool_shrink, 1072 .map = z3fold_zpool_map, 1073 .unmap = z3fold_zpool_unmap, 1074 .total_size = z3fold_zpool_total_size, 1075 }; 1076 1077 MODULE_ALIAS("zpool-z3fold"); 1078 1079 static int __init init_z3fold(void) 1080 { 1081 /* Make sure the z3fold header is not larger than the page size */ 1082 BUILD_BUG_ON(ZHDR_SIZE_ALIGNED > PAGE_SIZE); 1083 zpool_register_driver(&z3fold_zpool_driver); 1084 1085 return 0; 1086 } 1087 1088 static void __exit exit_z3fold(void) 1089 { 1090 zpool_unregister_driver(&z3fold_zpool_driver); 1091 } 1092 1093 module_init(init_z3fold); 1094 module_exit(exit_z3fold); 1095 1096 MODULE_LICENSE("GPL"); 1097 MODULE_AUTHOR("Vitaly Wool <vitalywool@gmail.com>"); 1098 MODULE_DESCRIPTION("3-Fold Allocator for Compressed Pages"); 1099