1 /* 2 drbd_bitmap.c 3 4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg. 5 6 Copyright (C) 2004-2008, LINBIT Information Technologies GmbH. 7 Copyright (C) 2004-2008, Philipp Reisner <philipp.reisner@linbit.com>. 8 Copyright (C) 2004-2008, Lars Ellenberg <lars.ellenberg@linbit.com>. 9 10 drbd is free software; you can redistribute it and/or modify 11 it under the terms of the GNU General Public License as published by 12 the Free Software Foundation; either version 2, or (at your option) 13 any later version. 14 15 drbd is distributed in the hope that it will be useful, 16 but WITHOUT ANY WARRANTY; without even the implied warranty of 17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 GNU General Public License for more details. 19 20 You should have received a copy of the GNU General Public License 21 along with drbd; see the file COPYING. If not, write to 22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 23 */ 24 25 #include <linux/bitops.h> 26 #include <linux/vmalloc.h> 27 #include <linux/string.h> 28 #include <linux/drbd.h> 29 #include <linux/slab.h> 30 #include <asm/kmap_types.h> 31 #include "drbd_int.h" 32 33 /* OPAQUE outside this file! 34 * interface defined in drbd_int.h 35 36 * convention: 37 * function name drbd_bm_... => used elsewhere, "public". 38 * function name bm_... => internal to implementation, "private". 39 40 * Note that since find_first_bit returns int, at the current granularity of 41 * the bitmap (4KB per byte), this implementation "only" supports up to 42 * 1<<(32+12) == 16 TB... 43 */ 44 45 /* 46 * NOTE 47 * Access to the *bm_pages is protected by bm_lock. 48 * It is safe to read the other members within the lock. 49 * 50 * drbd_bm_set_bits is called from bio_endio callbacks, 51 * We may be called with irq already disabled, 52 * so we need spin_lock_irqsave(). 53 * And we need the kmap_atomic. 54 */ 55 struct drbd_bitmap { 56 struct page **bm_pages; 57 spinlock_t bm_lock; 58 /* WARNING unsigned long bm_*: 59 * 32bit number of bit offset is just enough for 512 MB bitmap. 60 * it will blow up if we make the bitmap bigger... 61 * not that it makes much sense to have a bitmap that large, 62 * rather change the granularity to 16k or 64k or something. 63 * (that implies other problems, however...) 64 */ 65 unsigned long bm_set; /* nr of set bits; THINK maybe atomic_t? */ 66 unsigned long bm_bits; 67 size_t bm_words; 68 size_t bm_number_of_pages; 69 sector_t bm_dev_capacity; 70 struct mutex bm_change; /* serializes resize operations */ 71 72 atomic_t bm_async_io; 73 wait_queue_head_t bm_io_wait; 74 75 unsigned long bm_flags; 76 77 /* debugging aid, in case we are still racy somewhere */ 78 char *bm_why; 79 struct task_struct *bm_task; 80 }; 81 82 /* definition of bits in bm_flags */ 83 #define BM_LOCKED 0 84 #define BM_MD_IO_ERROR 1 85 #define BM_P_VMALLOCED 2 86 87 static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s, 88 unsigned long e, int val, const enum km_type km); 89 90 static int bm_is_locked(struct drbd_bitmap *b) 91 { 92 return test_bit(BM_LOCKED, &b->bm_flags); 93 } 94 95 #define bm_print_lock_info(m) __bm_print_lock_info(m, __func__) 96 static void __bm_print_lock_info(struct drbd_conf *mdev, const char *func) 97 { 98 struct drbd_bitmap *b = mdev->bitmap; 99 if (!__ratelimit(&drbd_ratelimit_state)) 100 return; 101 dev_err(DEV, "FIXME %s in %s, bitmap locked for '%s' by %s\n", 102 current == mdev->receiver.task ? "receiver" : 103 current == mdev->asender.task ? "asender" : 104 current == mdev->worker.task ? "worker" : current->comm, 105 func, b->bm_why ?: "?", 106 b->bm_task == mdev->receiver.task ? "receiver" : 107 b->bm_task == mdev->asender.task ? "asender" : 108 b->bm_task == mdev->worker.task ? "worker" : "?"); 109 } 110 111 void drbd_bm_lock(struct drbd_conf *mdev, char *why) 112 { 113 struct drbd_bitmap *b = mdev->bitmap; 114 int trylock_failed; 115 116 if (!b) { 117 dev_err(DEV, "FIXME no bitmap in drbd_bm_lock!?\n"); 118 return; 119 } 120 121 trylock_failed = !mutex_trylock(&b->bm_change); 122 123 if (trylock_failed) { 124 dev_warn(DEV, "%s going to '%s' but bitmap already locked for '%s' by %s\n", 125 current == mdev->receiver.task ? "receiver" : 126 current == mdev->asender.task ? "asender" : 127 current == mdev->worker.task ? "worker" : current->comm, 128 why, b->bm_why ?: "?", 129 b->bm_task == mdev->receiver.task ? "receiver" : 130 b->bm_task == mdev->asender.task ? "asender" : 131 b->bm_task == mdev->worker.task ? "worker" : "?"); 132 mutex_lock(&b->bm_change); 133 } 134 if (__test_and_set_bit(BM_LOCKED, &b->bm_flags)) 135 dev_err(DEV, "FIXME bitmap already locked in bm_lock\n"); 136 137 b->bm_why = why; 138 b->bm_task = current; 139 } 140 141 void drbd_bm_unlock(struct drbd_conf *mdev) 142 { 143 struct drbd_bitmap *b = mdev->bitmap; 144 if (!b) { 145 dev_err(DEV, "FIXME no bitmap in drbd_bm_unlock!?\n"); 146 return; 147 } 148 149 if (!__test_and_clear_bit(BM_LOCKED, &mdev->bitmap->bm_flags)) 150 dev_err(DEV, "FIXME bitmap not locked in bm_unlock\n"); 151 152 b->bm_why = NULL; 153 b->bm_task = NULL; 154 mutex_unlock(&b->bm_change); 155 } 156 157 /* word offset to long pointer */ 158 static unsigned long *__bm_map_paddr(struct drbd_bitmap *b, unsigned long offset, const enum km_type km) 159 { 160 struct page *page; 161 unsigned long page_nr; 162 163 /* page_nr = (word*sizeof(long)) >> PAGE_SHIFT; */ 164 page_nr = offset >> (PAGE_SHIFT - LN2_BPL + 3); 165 BUG_ON(page_nr >= b->bm_number_of_pages); 166 page = b->bm_pages[page_nr]; 167 168 return (unsigned long *) kmap_atomic(page, km); 169 } 170 171 static unsigned long * bm_map_paddr(struct drbd_bitmap *b, unsigned long offset) 172 { 173 return __bm_map_paddr(b, offset, KM_IRQ1); 174 } 175 176 static void __bm_unmap(unsigned long *p_addr, const enum km_type km) 177 { 178 kunmap_atomic(p_addr, km); 179 }; 180 181 static void bm_unmap(unsigned long *p_addr) 182 { 183 return __bm_unmap(p_addr, KM_IRQ1); 184 } 185 186 /* long word offset of _bitmap_ sector */ 187 #define S2W(s) ((s)<<(BM_EXT_SHIFT-BM_BLOCK_SHIFT-LN2_BPL)) 188 /* word offset from start of bitmap to word number _in_page_ 189 * modulo longs per page 190 #define MLPP(X) ((X) % (PAGE_SIZE/sizeof(long)) 191 hm, well, Philipp thinks gcc might not optimze the % into & (... - 1) 192 so do it explicitly: 193 */ 194 #define MLPP(X) ((X) & ((PAGE_SIZE/sizeof(long))-1)) 195 196 /* Long words per page */ 197 #define LWPP (PAGE_SIZE/sizeof(long)) 198 199 /* 200 * actually most functions herein should take a struct drbd_bitmap*, not a 201 * struct drbd_conf*, but for the debug macros I like to have the mdev around 202 * to be able to report device specific. 203 */ 204 205 static void bm_free_pages(struct page **pages, unsigned long number) 206 { 207 unsigned long i; 208 if (!pages) 209 return; 210 211 for (i = 0; i < number; i++) { 212 if (!pages[i]) { 213 printk(KERN_ALERT "drbd: bm_free_pages tried to free " 214 "a NULL pointer; i=%lu n=%lu\n", 215 i, number); 216 continue; 217 } 218 __free_page(pages[i]); 219 pages[i] = NULL; 220 } 221 } 222 223 static void bm_vk_free(void *ptr, int v) 224 { 225 if (v) 226 vfree(ptr); 227 else 228 kfree(ptr); 229 } 230 231 /* 232 * "have" and "want" are NUMBER OF PAGES. 233 */ 234 static struct page **bm_realloc_pages(struct drbd_bitmap *b, unsigned long want) 235 { 236 struct page **old_pages = b->bm_pages; 237 struct page **new_pages, *page; 238 unsigned int i, bytes, vmalloced = 0; 239 unsigned long have = b->bm_number_of_pages; 240 241 BUG_ON(have == 0 && old_pages != NULL); 242 BUG_ON(have != 0 && old_pages == NULL); 243 244 if (have == want) 245 return old_pages; 246 247 /* Trying kmalloc first, falling back to vmalloc. 248 * GFP_KERNEL is ok, as this is done when a lower level disk is 249 * "attached" to the drbd. Context is receiver thread or cqueue 250 * thread. As we have no disk yet, we are not in the IO path, 251 * not even the IO path of the peer. */ 252 bytes = sizeof(struct page *)*want; 253 new_pages = kmalloc(bytes, GFP_KERNEL); 254 if (!new_pages) { 255 new_pages = vmalloc(bytes); 256 if (!new_pages) 257 return NULL; 258 vmalloced = 1; 259 } 260 261 memset(new_pages, 0, bytes); 262 if (want >= have) { 263 for (i = 0; i < have; i++) 264 new_pages[i] = old_pages[i]; 265 for (; i < want; i++) { 266 page = alloc_page(GFP_HIGHUSER); 267 if (!page) { 268 bm_free_pages(new_pages + have, i - have); 269 bm_vk_free(new_pages, vmalloced); 270 return NULL; 271 } 272 new_pages[i] = page; 273 } 274 } else { 275 for (i = 0; i < want; i++) 276 new_pages[i] = old_pages[i]; 277 /* NOT HERE, we are outside the spinlock! 278 bm_free_pages(old_pages + want, have - want); 279 */ 280 } 281 282 if (vmalloced) 283 set_bit(BM_P_VMALLOCED, &b->bm_flags); 284 else 285 clear_bit(BM_P_VMALLOCED, &b->bm_flags); 286 287 return new_pages; 288 } 289 290 /* 291 * called on driver init only. TODO call when a device is created. 292 * allocates the drbd_bitmap, and stores it in mdev->bitmap. 293 */ 294 int drbd_bm_init(struct drbd_conf *mdev) 295 { 296 struct drbd_bitmap *b = mdev->bitmap; 297 WARN_ON(b != NULL); 298 b = kzalloc(sizeof(struct drbd_bitmap), GFP_KERNEL); 299 if (!b) 300 return -ENOMEM; 301 spin_lock_init(&b->bm_lock); 302 mutex_init(&b->bm_change); 303 init_waitqueue_head(&b->bm_io_wait); 304 305 mdev->bitmap = b; 306 307 return 0; 308 } 309 310 sector_t drbd_bm_capacity(struct drbd_conf *mdev) 311 { 312 ERR_IF(!mdev->bitmap) return 0; 313 return mdev->bitmap->bm_dev_capacity; 314 } 315 316 /* called on driver unload. TODO: call when a device is destroyed. 317 */ 318 void drbd_bm_cleanup(struct drbd_conf *mdev) 319 { 320 ERR_IF (!mdev->bitmap) return; 321 bm_free_pages(mdev->bitmap->bm_pages, mdev->bitmap->bm_number_of_pages); 322 bm_vk_free(mdev->bitmap->bm_pages, test_bit(BM_P_VMALLOCED, &mdev->bitmap->bm_flags)); 323 kfree(mdev->bitmap); 324 mdev->bitmap = NULL; 325 } 326 327 /* 328 * since (b->bm_bits % BITS_PER_LONG) != 0, 329 * this masks out the remaining bits. 330 * Returns the number of bits cleared. 331 */ 332 static int bm_clear_surplus(struct drbd_bitmap *b) 333 { 334 const unsigned long mask = (1UL << (b->bm_bits & (BITS_PER_LONG-1))) - 1; 335 size_t w = b->bm_bits >> LN2_BPL; 336 int cleared = 0; 337 unsigned long *p_addr, *bm; 338 339 p_addr = bm_map_paddr(b, w); 340 bm = p_addr + MLPP(w); 341 if (w < b->bm_words) { 342 cleared = hweight_long(*bm & ~mask); 343 *bm &= mask; 344 w++; bm++; 345 } 346 347 if (w < b->bm_words) { 348 cleared += hweight_long(*bm); 349 *bm = 0; 350 } 351 bm_unmap(p_addr); 352 return cleared; 353 } 354 355 static void bm_set_surplus(struct drbd_bitmap *b) 356 { 357 const unsigned long mask = (1UL << (b->bm_bits & (BITS_PER_LONG-1))) - 1; 358 size_t w = b->bm_bits >> LN2_BPL; 359 unsigned long *p_addr, *bm; 360 361 p_addr = bm_map_paddr(b, w); 362 bm = p_addr + MLPP(w); 363 if (w < b->bm_words) { 364 *bm |= ~mask; 365 bm++; w++; 366 } 367 368 if (w < b->bm_words) { 369 *bm = ~(0UL); 370 } 371 bm_unmap(p_addr); 372 } 373 374 static unsigned long __bm_count_bits(struct drbd_bitmap *b, const int swap_endian) 375 { 376 unsigned long *p_addr, *bm, offset = 0; 377 unsigned long bits = 0; 378 unsigned long i, do_now; 379 380 while (offset < b->bm_words) { 381 i = do_now = min_t(size_t, b->bm_words-offset, LWPP); 382 p_addr = __bm_map_paddr(b, offset, KM_USER0); 383 bm = p_addr + MLPP(offset); 384 while (i--) { 385 #ifndef __LITTLE_ENDIAN 386 if (swap_endian) 387 *bm = lel_to_cpu(*bm); 388 #endif 389 bits += hweight_long(*bm++); 390 } 391 __bm_unmap(p_addr, KM_USER0); 392 offset += do_now; 393 cond_resched(); 394 } 395 396 return bits; 397 } 398 399 static unsigned long bm_count_bits(struct drbd_bitmap *b) 400 { 401 return __bm_count_bits(b, 0); 402 } 403 404 static unsigned long bm_count_bits_swap_endian(struct drbd_bitmap *b) 405 { 406 return __bm_count_bits(b, 1); 407 } 408 409 /* offset and len in long words.*/ 410 static void bm_memset(struct drbd_bitmap *b, size_t offset, int c, size_t len) 411 { 412 unsigned long *p_addr, *bm; 413 size_t do_now, end; 414 415 #define BM_SECTORS_PER_BIT (BM_BLOCK_SIZE/512) 416 417 end = offset + len; 418 419 if (end > b->bm_words) { 420 printk(KERN_ALERT "drbd: bm_memset end > bm_words\n"); 421 return; 422 } 423 424 while (offset < end) { 425 do_now = min_t(size_t, ALIGN(offset + 1, LWPP), end) - offset; 426 p_addr = bm_map_paddr(b, offset); 427 bm = p_addr + MLPP(offset); 428 if (bm+do_now > p_addr + LWPP) { 429 printk(KERN_ALERT "drbd: BUG BUG BUG! p_addr:%p bm:%p do_now:%d\n", 430 p_addr, bm, (int)do_now); 431 break; /* breaks to after catch_oob_access_end() only! */ 432 } 433 memset(bm, c, do_now * sizeof(long)); 434 bm_unmap(p_addr); 435 offset += do_now; 436 } 437 } 438 439 /* 440 * make sure the bitmap has enough room for the attached storage, 441 * if necessary, resize. 442 * called whenever we may have changed the device size. 443 * returns -ENOMEM if we could not allocate enough memory, 0 on success. 444 * In case this is actually a resize, we copy the old bitmap into the new one. 445 * Otherwise, the bitmap is initialized to all bits set. 446 */ 447 int drbd_bm_resize(struct drbd_conf *mdev, sector_t capacity, int set_new_bits) 448 { 449 struct drbd_bitmap *b = mdev->bitmap; 450 unsigned long bits, words, owords, obits, *p_addr, *bm; 451 unsigned long want, have, onpages; /* number of pages */ 452 struct page **npages, **opages = NULL; 453 int err = 0, growing; 454 int opages_vmalloced; 455 456 ERR_IF(!b) return -ENOMEM; 457 458 drbd_bm_lock(mdev, "resize"); 459 460 dev_info(DEV, "drbd_bm_resize called with capacity == %llu\n", 461 (unsigned long long)capacity); 462 463 if (capacity == b->bm_dev_capacity) 464 goto out; 465 466 opages_vmalloced = test_bit(BM_P_VMALLOCED, &b->bm_flags); 467 468 if (capacity == 0) { 469 spin_lock_irq(&b->bm_lock); 470 opages = b->bm_pages; 471 onpages = b->bm_number_of_pages; 472 owords = b->bm_words; 473 b->bm_pages = NULL; 474 b->bm_number_of_pages = 475 b->bm_set = 476 b->bm_bits = 477 b->bm_words = 478 b->bm_dev_capacity = 0; 479 spin_unlock_irq(&b->bm_lock); 480 bm_free_pages(opages, onpages); 481 bm_vk_free(opages, opages_vmalloced); 482 goto out; 483 } 484 bits = BM_SECT_TO_BIT(ALIGN(capacity, BM_SECT_PER_BIT)); 485 486 /* if we would use 487 words = ALIGN(bits,BITS_PER_LONG) >> LN2_BPL; 488 a 32bit host could present the wrong number of words 489 to a 64bit host. 490 */ 491 words = ALIGN(bits, 64) >> LN2_BPL; 492 493 if (get_ldev(mdev)) { 494 D_ASSERT((u64)bits <= (((u64)mdev->ldev->md.md_size_sect-MD_BM_OFFSET) << 12)); 495 put_ldev(mdev); 496 } 497 498 /* one extra long to catch off by one errors */ 499 want = ALIGN((words+1)*sizeof(long), PAGE_SIZE) >> PAGE_SHIFT; 500 have = b->bm_number_of_pages; 501 if (want == have) { 502 D_ASSERT(b->bm_pages != NULL); 503 npages = b->bm_pages; 504 } else { 505 if (FAULT_ACTIVE(mdev, DRBD_FAULT_BM_ALLOC)) 506 npages = NULL; 507 else 508 npages = bm_realloc_pages(b, want); 509 } 510 511 if (!npages) { 512 err = -ENOMEM; 513 goto out; 514 } 515 516 spin_lock_irq(&b->bm_lock); 517 opages = b->bm_pages; 518 owords = b->bm_words; 519 obits = b->bm_bits; 520 521 growing = bits > obits; 522 if (opages && growing && set_new_bits) 523 bm_set_surplus(b); 524 525 b->bm_pages = npages; 526 b->bm_number_of_pages = want; 527 b->bm_bits = bits; 528 b->bm_words = words; 529 b->bm_dev_capacity = capacity; 530 531 if (growing) { 532 if (set_new_bits) { 533 bm_memset(b, owords, 0xff, words-owords); 534 b->bm_set += bits - obits; 535 } else 536 bm_memset(b, owords, 0x00, words-owords); 537 538 } 539 540 if (want < have) { 541 /* implicit: (opages != NULL) && (opages != npages) */ 542 bm_free_pages(opages + want, have - want); 543 } 544 545 p_addr = bm_map_paddr(b, words); 546 bm = p_addr + MLPP(words); 547 *bm = DRBD_MAGIC; 548 bm_unmap(p_addr); 549 550 (void)bm_clear_surplus(b); 551 552 spin_unlock_irq(&b->bm_lock); 553 if (opages != npages) 554 bm_vk_free(opages, opages_vmalloced); 555 if (!growing) 556 b->bm_set = bm_count_bits(b); 557 dev_info(DEV, "resync bitmap: bits=%lu words=%lu\n", bits, words); 558 559 out: 560 drbd_bm_unlock(mdev); 561 return err; 562 } 563 564 /* inherently racy: 565 * if not protected by other means, return value may be out of date when 566 * leaving this function... 567 * we still need to lock it, since it is important that this returns 568 * bm_set == 0 precisely. 569 * 570 * maybe bm_set should be atomic_t ? 571 */ 572 unsigned long _drbd_bm_total_weight(struct drbd_conf *mdev) 573 { 574 struct drbd_bitmap *b = mdev->bitmap; 575 unsigned long s; 576 unsigned long flags; 577 578 ERR_IF(!b) return 0; 579 ERR_IF(!b->bm_pages) return 0; 580 581 spin_lock_irqsave(&b->bm_lock, flags); 582 s = b->bm_set; 583 spin_unlock_irqrestore(&b->bm_lock, flags); 584 585 return s; 586 } 587 588 unsigned long drbd_bm_total_weight(struct drbd_conf *mdev) 589 { 590 unsigned long s; 591 /* if I don't have a disk, I don't know about out-of-sync status */ 592 if (!get_ldev_if_state(mdev, D_NEGOTIATING)) 593 return 0; 594 s = _drbd_bm_total_weight(mdev); 595 put_ldev(mdev); 596 return s; 597 } 598 599 size_t drbd_bm_words(struct drbd_conf *mdev) 600 { 601 struct drbd_bitmap *b = mdev->bitmap; 602 ERR_IF(!b) return 0; 603 ERR_IF(!b->bm_pages) return 0; 604 605 return b->bm_words; 606 } 607 608 unsigned long drbd_bm_bits(struct drbd_conf *mdev) 609 { 610 struct drbd_bitmap *b = mdev->bitmap; 611 ERR_IF(!b) return 0; 612 613 return b->bm_bits; 614 } 615 616 /* merge number words from buffer into the bitmap starting at offset. 617 * buffer[i] is expected to be little endian unsigned long. 618 * bitmap must be locked by drbd_bm_lock. 619 * currently only used from receive_bitmap. 620 */ 621 void drbd_bm_merge_lel(struct drbd_conf *mdev, size_t offset, size_t number, 622 unsigned long *buffer) 623 { 624 struct drbd_bitmap *b = mdev->bitmap; 625 unsigned long *p_addr, *bm; 626 unsigned long word, bits; 627 size_t end, do_now; 628 629 end = offset + number; 630 631 ERR_IF(!b) return; 632 ERR_IF(!b->bm_pages) return; 633 if (number == 0) 634 return; 635 WARN_ON(offset >= b->bm_words); 636 WARN_ON(end > b->bm_words); 637 638 spin_lock_irq(&b->bm_lock); 639 while (offset < end) { 640 do_now = min_t(size_t, ALIGN(offset+1, LWPP), end) - offset; 641 p_addr = bm_map_paddr(b, offset); 642 bm = p_addr + MLPP(offset); 643 offset += do_now; 644 while (do_now--) { 645 bits = hweight_long(*bm); 646 word = *bm | lel_to_cpu(*buffer++); 647 *bm++ = word; 648 b->bm_set += hweight_long(word) - bits; 649 } 650 bm_unmap(p_addr); 651 } 652 /* with 32bit <-> 64bit cross-platform connect 653 * this is only correct for current usage, 654 * where we _know_ that we are 64 bit aligned, 655 * and know that this function is used in this way, too... 656 */ 657 if (end == b->bm_words) 658 b->bm_set -= bm_clear_surplus(b); 659 660 spin_unlock_irq(&b->bm_lock); 661 } 662 663 /* copy number words from the bitmap starting at offset into the buffer. 664 * buffer[i] will be little endian unsigned long. 665 */ 666 void drbd_bm_get_lel(struct drbd_conf *mdev, size_t offset, size_t number, 667 unsigned long *buffer) 668 { 669 struct drbd_bitmap *b = mdev->bitmap; 670 unsigned long *p_addr, *bm; 671 size_t end, do_now; 672 673 end = offset + number; 674 675 ERR_IF(!b) return; 676 ERR_IF(!b->bm_pages) return; 677 678 spin_lock_irq(&b->bm_lock); 679 if ((offset >= b->bm_words) || 680 (end > b->bm_words) || 681 (number <= 0)) 682 dev_err(DEV, "offset=%lu number=%lu bm_words=%lu\n", 683 (unsigned long) offset, 684 (unsigned long) number, 685 (unsigned long) b->bm_words); 686 else { 687 while (offset < end) { 688 do_now = min_t(size_t, ALIGN(offset+1, LWPP), end) - offset; 689 p_addr = bm_map_paddr(b, offset); 690 bm = p_addr + MLPP(offset); 691 offset += do_now; 692 while (do_now--) 693 *buffer++ = cpu_to_lel(*bm++); 694 bm_unmap(p_addr); 695 } 696 } 697 spin_unlock_irq(&b->bm_lock); 698 } 699 700 /* set all bits in the bitmap */ 701 void drbd_bm_set_all(struct drbd_conf *mdev) 702 { 703 struct drbd_bitmap *b = mdev->bitmap; 704 ERR_IF(!b) return; 705 ERR_IF(!b->bm_pages) return; 706 707 spin_lock_irq(&b->bm_lock); 708 bm_memset(b, 0, 0xff, b->bm_words); 709 (void)bm_clear_surplus(b); 710 b->bm_set = b->bm_bits; 711 spin_unlock_irq(&b->bm_lock); 712 } 713 714 /* clear all bits in the bitmap */ 715 void drbd_bm_clear_all(struct drbd_conf *mdev) 716 { 717 struct drbd_bitmap *b = mdev->bitmap; 718 ERR_IF(!b) return; 719 ERR_IF(!b->bm_pages) return; 720 721 spin_lock_irq(&b->bm_lock); 722 bm_memset(b, 0, 0, b->bm_words); 723 b->bm_set = 0; 724 spin_unlock_irq(&b->bm_lock); 725 } 726 727 static void bm_async_io_complete(struct bio *bio, int error) 728 { 729 struct drbd_bitmap *b = bio->bi_private; 730 int uptodate = bio_flagged(bio, BIO_UPTODATE); 731 732 733 /* strange behavior of some lower level drivers... 734 * fail the request by clearing the uptodate flag, 735 * but do not return any error?! 736 * do we want to WARN() on this? */ 737 if (!error && !uptodate) 738 error = -EIO; 739 740 if (error) { 741 /* doh. what now? 742 * for now, set all bits, and flag MD_IO_ERROR */ 743 __set_bit(BM_MD_IO_ERROR, &b->bm_flags); 744 } 745 if (atomic_dec_and_test(&b->bm_async_io)) 746 wake_up(&b->bm_io_wait); 747 748 bio_put(bio); 749 } 750 751 static void bm_page_io_async(struct drbd_conf *mdev, struct drbd_bitmap *b, int page_nr, int rw) __must_hold(local) 752 { 753 /* we are process context. we always get a bio */ 754 struct bio *bio = bio_alloc(GFP_KERNEL, 1); 755 unsigned int len; 756 sector_t on_disk_sector = 757 mdev->ldev->md.md_offset + mdev->ldev->md.bm_offset; 758 on_disk_sector += ((sector_t)page_nr) << (PAGE_SHIFT-9); 759 760 /* this might happen with very small 761 * flexible external meta data device */ 762 len = min_t(unsigned int, PAGE_SIZE, 763 (drbd_md_last_sector(mdev->ldev) - on_disk_sector + 1)<<9); 764 765 bio->bi_bdev = mdev->ldev->md_bdev; 766 bio->bi_sector = on_disk_sector; 767 bio_add_page(bio, b->bm_pages[page_nr], len, 0); 768 bio->bi_private = b; 769 bio->bi_end_io = bm_async_io_complete; 770 771 if (FAULT_ACTIVE(mdev, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) { 772 bio->bi_rw |= rw; 773 bio_endio(bio, -EIO); 774 } else { 775 submit_bio(rw, bio); 776 } 777 } 778 779 # if defined(__LITTLE_ENDIAN) 780 /* nothing to do, on disk == in memory */ 781 # define bm_cpu_to_lel(x) ((void)0) 782 # else 783 static void bm_cpu_to_lel(struct drbd_bitmap *b) 784 { 785 /* need to cpu_to_lel all the pages ... 786 * this may be optimized by using 787 * cpu_to_lel(-1) == -1 and cpu_to_lel(0) == 0; 788 * the following is still not optimal, but better than nothing */ 789 unsigned int i; 790 unsigned long *p_addr, *bm; 791 if (b->bm_set == 0) { 792 /* no page at all; avoid swap if all is 0 */ 793 i = b->bm_number_of_pages; 794 } else if (b->bm_set == b->bm_bits) { 795 /* only the last page */ 796 i = b->bm_number_of_pages - 1; 797 } else { 798 /* all pages */ 799 i = 0; 800 } 801 for (; i < b->bm_number_of_pages; i++) { 802 p_addr = kmap_atomic(b->bm_pages[i], KM_USER0); 803 for (bm = p_addr; bm < p_addr + PAGE_SIZE/sizeof(long); bm++) 804 *bm = cpu_to_lel(*bm); 805 kunmap_atomic(p_addr, KM_USER0); 806 } 807 } 808 # endif 809 /* lel_to_cpu == cpu_to_lel */ 810 # define bm_lel_to_cpu(x) bm_cpu_to_lel(x) 811 812 /* 813 * bm_rw: read/write the whole bitmap from/to its on disk location. 814 */ 815 static int bm_rw(struct drbd_conf *mdev, int rw) __must_hold(local) 816 { 817 struct drbd_bitmap *b = mdev->bitmap; 818 /* sector_t sector; */ 819 int bm_words, num_pages, i; 820 unsigned long now; 821 char ppb[10]; 822 int err = 0; 823 824 WARN_ON(!bm_is_locked(b)); 825 826 /* no spinlock here, the drbd_bm_lock should be enough! */ 827 828 bm_words = drbd_bm_words(mdev); 829 num_pages = (bm_words*sizeof(long) + PAGE_SIZE-1) >> PAGE_SHIFT; 830 831 /* on disk bitmap is little endian */ 832 if (rw == WRITE) 833 bm_cpu_to_lel(b); 834 835 now = jiffies; 836 atomic_set(&b->bm_async_io, num_pages); 837 __clear_bit(BM_MD_IO_ERROR, &b->bm_flags); 838 839 /* let the layers below us try to merge these bios... */ 840 for (i = 0; i < num_pages; i++) 841 bm_page_io_async(mdev, b, i, rw); 842 843 drbd_blk_run_queue(bdev_get_queue(mdev->ldev->md_bdev)); 844 wait_event(b->bm_io_wait, atomic_read(&b->bm_async_io) == 0); 845 846 if (test_bit(BM_MD_IO_ERROR, &b->bm_flags)) { 847 dev_alert(DEV, "we had at least one MD IO ERROR during bitmap IO\n"); 848 drbd_chk_io_error(mdev, 1, TRUE); 849 err = -EIO; 850 } 851 852 now = jiffies; 853 if (rw == WRITE) { 854 /* swap back endianness */ 855 bm_lel_to_cpu(b); 856 /* flush bitmap to stable storage */ 857 drbd_md_flush(mdev); 858 } else /* rw == READ */ { 859 /* just read, if necessary adjust endianness */ 860 b->bm_set = bm_count_bits_swap_endian(b); 861 dev_info(DEV, "recounting of set bits took additional %lu jiffies\n", 862 jiffies - now); 863 } 864 now = b->bm_set; 865 866 dev_info(DEV, "%s (%lu bits) marked out-of-sync by on disk bit-map.\n", 867 ppsize(ppb, now << (BM_BLOCK_SHIFT-10)), now); 868 869 return err; 870 } 871 872 /** 873 * drbd_bm_read() - Read the whole bitmap from its on disk location. 874 * @mdev: DRBD device. 875 */ 876 int drbd_bm_read(struct drbd_conf *mdev) __must_hold(local) 877 { 878 return bm_rw(mdev, READ); 879 } 880 881 /** 882 * drbd_bm_write() - Write the whole bitmap to its on disk location. 883 * @mdev: DRBD device. 884 */ 885 int drbd_bm_write(struct drbd_conf *mdev) __must_hold(local) 886 { 887 return bm_rw(mdev, WRITE); 888 } 889 890 /** 891 * drbd_bm_write_sect: Writes a 512 (MD_SECTOR_SIZE) byte piece of the bitmap 892 * @mdev: DRBD device. 893 * @enr: Extent number in the resync lru (happens to be sector offset) 894 * 895 * The BM_EXT_SIZE is on purpose exactly the amount of the bitmap covered 896 * by a single sector write. Therefore enr == sector offset from the 897 * start of the bitmap. 898 */ 899 int drbd_bm_write_sect(struct drbd_conf *mdev, unsigned long enr) __must_hold(local) 900 { 901 sector_t on_disk_sector = enr + mdev->ldev->md.md_offset 902 + mdev->ldev->md.bm_offset; 903 int bm_words, num_words, offset; 904 int err = 0; 905 906 mutex_lock(&mdev->md_io_mutex); 907 bm_words = drbd_bm_words(mdev); 908 offset = S2W(enr); /* word offset into bitmap */ 909 num_words = min(S2W(1), bm_words - offset); 910 if (num_words < S2W(1)) 911 memset(page_address(mdev->md_io_page), 0, MD_SECTOR_SIZE); 912 drbd_bm_get_lel(mdev, offset, num_words, 913 page_address(mdev->md_io_page)); 914 if (!drbd_md_sync_page_io(mdev, mdev->ldev, on_disk_sector, WRITE)) { 915 int i; 916 err = -EIO; 917 dev_err(DEV, "IO ERROR writing bitmap sector %lu " 918 "(meta-disk sector %llus)\n", 919 enr, (unsigned long long)on_disk_sector); 920 drbd_chk_io_error(mdev, 1, TRUE); 921 for (i = 0; i < AL_EXT_PER_BM_SECT; i++) 922 drbd_bm_ALe_set_all(mdev, enr*AL_EXT_PER_BM_SECT+i); 923 } 924 mdev->bm_writ_cnt++; 925 mutex_unlock(&mdev->md_io_mutex); 926 return err; 927 } 928 929 /* NOTE 930 * find_first_bit returns int, we return unsigned long. 931 * should not make much difference anyways, but ... 932 * 933 * this returns a bit number, NOT a sector! 934 */ 935 #define BPP_MASK ((1UL << (PAGE_SHIFT+3)) - 1) 936 static unsigned long __bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo, 937 const int find_zero_bit, const enum km_type km) 938 { 939 struct drbd_bitmap *b = mdev->bitmap; 940 unsigned long i = -1UL; 941 unsigned long *p_addr; 942 unsigned long bit_offset; /* bit offset of the mapped page. */ 943 944 if (bm_fo > b->bm_bits) { 945 dev_err(DEV, "bm_fo=%lu bm_bits=%lu\n", bm_fo, b->bm_bits); 946 } else { 947 while (bm_fo < b->bm_bits) { 948 unsigned long offset; 949 bit_offset = bm_fo & ~BPP_MASK; /* bit offset of the page */ 950 offset = bit_offset >> LN2_BPL; /* word offset of the page */ 951 p_addr = __bm_map_paddr(b, offset, km); 952 953 if (find_zero_bit) 954 i = find_next_zero_bit(p_addr, PAGE_SIZE*8, bm_fo & BPP_MASK); 955 else 956 i = find_next_bit(p_addr, PAGE_SIZE*8, bm_fo & BPP_MASK); 957 958 __bm_unmap(p_addr, km); 959 if (i < PAGE_SIZE*8) { 960 i = bit_offset + i; 961 if (i >= b->bm_bits) 962 break; 963 goto found; 964 } 965 bm_fo = bit_offset + PAGE_SIZE*8; 966 } 967 i = -1UL; 968 } 969 found: 970 return i; 971 } 972 973 static unsigned long bm_find_next(struct drbd_conf *mdev, 974 unsigned long bm_fo, const int find_zero_bit) 975 { 976 struct drbd_bitmap *b = mdev->bitmap; 977 unsigned long i = -1UL; 978 979 ERR_IF(!b) return i; 980 ERR_IF(!b->bm_pages) return i; 981 982 spin_lock_irq(&b->bm_lock); 983 if (bm_is_locked(b)) 984 bm_print_lock_info(mdev); 985 986 i = __bm_find_next(mdev, bm_fo, find_zero_bit, KM_IRQ1); 987 988 spin_unlock_irq(&b->bm_lock); 989 return i; 990 } 991 992 unsigned long drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo) 993 { 994 return bm_find_next(mdev, bm_fo, 0); 995 } 996 997 #if 0 998 /* not yet needed for anything. */ 999 unsigned long drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_fo) 1000 { 1001 return bm_find_next(mdev, bm_fo, 1); 1002 } 1003 #endif 1004 1005 /* does not spin_lock_irqsave. 1006 * you must take drbd_bm_lock() first */ 1007 unsigned long _drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo) 1008 { 1009 /* WARN_ON(!bm_is_locked(mdev)); */ 1010 return __bm_find_next(mdev, bm_fo, 0, KM_USER1); 1011 } 1012 1013 unsigned long _drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_fo) 1014 { 1015 /* WARN_ON(!bm_is_locked(mdev)); */ 1016 return __bm_find_next(mdev, bm_fo, 1, KM_USER1); 1017 } 1018 1019 /* returns number of bits actually changed. 1020 * for val != 0, we change 0 -> 1, return code positive 1021 * for val == 0, we change 1 -> 0, return code negative 1022 * wants bitnr, not sector. 1023 * expected to be called for only a few bits (e - s about BITS_PER_LONG). 1024 * Must hold bitmap lock already. */ 1025 static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s, 1026 unsigned long e, int val, const enum km_type km) 1027 { 1028 struct drbd_bitmap *b = mdev->bitmap; 1029 unsigned long *p_addr = NULL; 1030 unsigned long bitnr; 1031 unsigned long last_page_nr = -1UL; 1032 int c = 0; 1033 1034 if (e >= b->bm_bits) { 1035 dev_err(DEV, "ASSERT FAILED: bit_s=%lu bit_e=%lu bm_bits=%lu\n", 1036 s, e, b->bm_bits); 1037 e = b->bm_bits ? b->bm_bits -1 : 0; 1038 } 1039 for (bitnr = s; bitnr <= e; bitnr++) { 1040 unsigned long offset = bitnr>>LN2_BPL; 1041 unsigned long page_nr = offset >> (PAGE_SHIFT - LN2_BPL + 3); 1042 if (page_nr != last_page_nr) { 1043 if (p_addr) 1044 __bm_unmap(p_addr, km); 1045 p_addr = __bm_map_paddr(b, offset, km); 1046 last_page_nr = page_nr; 1047 } 1048 if (val) 1049 c += (0 == __test_and_set_bit(bitnr & BPP_MASK, p_addr)); 1050 else 1051 c -= (0 != __test_and_clear_bit(bitnr & BPP_MASK, p_addr)); 1052 } 1053 if (p_addr) 1054 __bm_unmap(p_addr, km); 1055 b->bm_set += c; 1056 return c; 1057 } 1058 1059 /* returns number of bits actually changed. 1060 * for val != 0, we change 0 -> 1, return code positive 1061 * for val == 0, we change 1 -> 0, return code negative 1062 * wants bitnr, not sector */ 1063 static int bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s, 1064 const unsigned long e, int val) 1065 { 1066 unsigned long flags; 1067 struct drbd_bitmap *b = mdev->bitmap; 1068 int c = 0; 1069 1070 ERR_IF(!b) return 1; 1071 ERR_IF(!b->bm_pages) return 0; 1072 1073 spin_lock_irqsave(&b->bm_lock, flags); 1074 if (bm_is_locked(b)) 1075 bm_print_lock_info(mdev); 1076 1077 c = __bm_change_bits_to(mdev, s, e, val, KM_IRQ1); 1078 1079 spin_unlock_irqrestore(&b->bm_lock, flags); 1080 return c; 1081 } 1082 1083 /* returns number of bits changed 0 -> 1 */ 1084 int drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e) 1085 { 1086 return bm_change_bits_to(mdev, s, e, 1); 1087 } 1088 1089 /* returns number of bits changed 1 -> 0 */ 1090 int drbd_bm_clear_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e) 1091 { 1092 return -bm_change_bits_to(mdev, s, e, 0); 1093 } 1094 1095 /* sets all bits in full words, 1096 * from first_word up to, but not including, last_word */ 1097 static inline void bm_set_full_words_within_one_page(struct drbd_bitmap *b, 1098 int page_nr, int first_word, int last_word) 1099 { 1100 int i; 1101 int bits; 1102 unsigned long *paddr = kmap_atomic(b->bm_pages[page_nr], KM_USER0); 1103 for (i = first_word; i < last_word; i++) { 1104 bits = hweight_long(paddr[i]); 1105 paddr[i] = ~0UL; 1106 b->bm_set += BITS_PER_LONG - bits; 1107 } 1108 kunmap_atomic(paddr, KM_USER0); 1109 } 1110 1111 /* Same thing as drbd_bm_set_bits, but without taking the spin_lock_irqsave. 1112 * You must first drbd_bm_lock(). 1113 * Can be called to set the whole bitmap in one go. 1114 * Sets bits from s to e _inclusive_. */ 1115 void _drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e) 1116 { 1117 /* First set_bit from the first bit (s) 1118 * up to the next long boundary (sl), 1119 * then assign full words up to the last long boundary (el), 1120 * then set_bit up to and including the last bit (e). 1121 * 1122 * Do not use memset, because we must account for changes, 1123 * so we need to loop over the words with hweight() anyways. 1124 */ 1125 unsigned long sl = ALIGN(s,BITS_PER_LONG); 1126 unsigned long el = (e+1) & ~((unsigned long)BITS_PER_LONG-1); 1127 int first_page; 1128 int last_page; 1129 int page_nr; 1130 int first_word; 1131 int last_word; 1132 1133 if (e - s <= 3*BITS_PER_LONG) { 1134 /* don't bother; el and sl may even be wrong. */ 1135 __bm_change_bits_to(mdev, s, e, 1, KM_USER0); 1136 return; 1137 } 1138 1139 /* difference is large enough that we can trust sl and el */ 1140 1141 /* bits filling the current long */ 1142 if (sl) 1143 __bm_change_bits_to(mdev, s, sl-1, 1, KM_USER0); 1144 1145 first_page = sl >> (3 + PAGE_SHIFT); 1146 last_page = el >> (3 + PAGE_SHIFT); 1147 1148 /* MLPP: modulo longs per page */ 1149 /* LWPP: long words per page */ 1150 first_word = MLPP(sl >> LN2_BPL); 1151 last_word = LWPP; 1152 1153 /* first and full pages, unless first page == last page */ 1154 for (page_nr = first_page; page_nr < last_page; page_nr++) { 1155 bm_set_full_words_within_one_page(mdev->bitmap, page_nr, first_word, last_word); 1156 cond_resched(); 1157 first_word = 0; 1158 } 1159 1160 /* last page (respectively only page, for first page == last page) */ 1161 last_word = MLPP(el >> LN2_BPL); 1162 bm_set_full_words_within_one_page(mdev->bitmap, last_page, first_word, last_word); 1163 1164 /* possibly trailing bits. 1165 * example: (e & 63) == 63, el will be e+1. 1166 * if that even was the very last bit, 1167 * it would trigger an assert in __bm_change_bits_to() 1168 */ 1169 if (el <= e) 1170 __bm_change_bits_to(mdev, el, e, 1, KM_USER0); 1171 } 1172 1173 /* returns bit state 1174 * wants bitnr, NOT sector. 1175 * inherently racy... area needs to be locked by means of {al,rs}_lru 1176 * 1 ... bit set 1177 * 0 ... bit not set 1178 * -1 ... first out of bounds access, stop testing for bits! 1179 */ 1180 int drbd_bm_test_bit(struct drbd_conf *mdev, const unsigned long bitnr) 1181 { 1182 unsigned long flags; 1183 struct drbd_bitmap *b = mdev->bitmap; 1184 unsigned long *p_addr; 1185 int i; 1186 1187 ERR_IF(!b) return 0; 1188 ERR_IF(!b->bm_pages) return 0; 1189 1190 spin_lock_irqsave(&b->bm_lock, flags); 1191 if (bm_is_locked(b)) 1192 bm_print_lock_info(mdev); 1193 if (bitnr < b->bm_bits) { 1194 unsigned long offset = bitnr>>LN2_BPL; 1195 p_addr = bm_map_paddr(b, offset); 1196 i = test_bit(bitnr & BPP_MASK, p_addr) ? 1 : 0; 1197 bm_unmap(p_addr); 1198 } else if (bitnr == b->bm_bits) { 1199 i = -1; 1200 } else { /* (bitnr > b->bm_bits) */ 1201 dev_err(DEV, "bitnr=%lu > bm_bits=%lu\n", bitnr, b->bm_bits); 1202 i = 0; 1203 } 1204 1205 spin_unlock_irqrestore(&b->bm_lock, flags); 1206 return i; 1207 } 1208 1209 /* returns number of bits set in the range [s, e] */ 1210 int drbd_bm_count_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e) 1211 { 1212 unsigned long flags; 1213 struct drbd_bitmap *b = mdev->bitmap; 1214 unsigned long *p_addr = NULL, page_nr = -1; 1215 unsigned long bitnr; 1216 int c = 0; 1217 size_t w; 1218 1219 /* If this is called without a bitmap, that is a bug. But just to be 1220 * robust in case we screwed up elsewhere, in that case pretend there 1221 * was one dirty bit in the requested area, so we won't try to do a 1222 * local read there (no bitmap probably implies no disk) */ 1223 ERR_IF(!b) return 1; 1224 ERR_IF(!b->bm_pages) return 1; 1225 1226 spin_lock_irqsave(&b->bm_lock, flags); 1227 if (bm_is_locked(b)) 1228 bm_print_lock_info(mdev); 1229 for (bitnr = s; bitnr <= e; bitnr++) { 1230 w = bitnr >> LN2_BPL; 1231 if (page_nr != w >> (PAGE_SHIFT - LN2_BPL + 3)) { 1232 page_nr = w >> (PAGE_SHIFT - LN2_BPL + 3); 1233 if (p_addr) 1234 bm_unmap(p_addr); 1235 p_addr = bm_map_paddr(b, w); 1236 } 1237 ERR_IF (bitnr >= b->bm_bits) { 1238 dev_err(DEV, "bitnr=%lu bm_bits=%lu\n", bitnr, b->bm_bits); 1239 } else { 1240 c += (0 != test_bit(bitnr - (page_nr << (PAGE_SHIFT+3)), p_addr)); 1241 } 1242 } 1243 if (p_addr) 1244 bm_unmap(p_addr); 1245 spin_unlock_irqrestore(&b->bm_lock, flags); 1246 return c; 1247 } 1248 1249 1250 /* inherently racy... 1251 * return value may be already out-of-date when this function returns. 1252 * but the general usage is that this is only use during a cstate when bits are 1253 * only cleared, not set, and typically only care for the case when the return 1254 * value is zero, or we already "locked" this "bitmap extent" by other means. 1255 * 1256 * enr is bm-extent number, since we chose to name one sector (512 bytes) 1257 * worth of the bitmap a "bitmap extent". 1258 * 1259 * TODO 1260 * I think since we use it like a reference count, we should use the real 1261 * reference count of some bitmap extent element from some lru instead... 1262 * 1263 */ 1264 int drbd_bm_e_weight(struct drbd_conf *mdev, unsigned long enr) 1265 { 1266 struct drbd_bitmap *b = mdev->bitmap; 1267 int count, s, e; 1268 unsigned long flags; 1269 unsigned long *p_addr, *bm; 1270 1271 ERR_IF(!b) return 0; 1272 ERR_IF(!b->bm_pages) return 0; 1273 1274 spin_lock_irqsave(&b->bm_lock, flags); 1275 if (bm_is_locked(b)) 1276 bm_print_lock_info(mdev); 1277 1278 s = S2W(enr); 1279 e = min((size_t)S2W(enr+1), b->bm_words); 1280 count = 0; 1281 if (s < b->bm_words) { 1282 int n = e-s; 1283 p_addr = bm_map_paddr(b, s); 1284 bm = p_addr + MLPP(s); 1285 while (n--) 1286 count += hweight_long(*bm++); 1287 bm_unmap(p_addr); 1288 } else { 1289 dev_err(DEV, "start offset (%d) too large in drbd_bm_e_weight\n", s); 1290 } 1291 spin_unlock_irqrestore(&b->bm_lock, flags); 1292 return count; 1293 } 1294 1295 /* set all bits covered by the AL-extent al_enr */ 1296 unsigned long drbd_bm_ALe_set_all(struct drbd_conf *mdev, unsigned long al_enr) 1297 { 1298 struct drbd_bitmap *b = mdev->bitmap; 1299 unsigned long *p_addr, *bm; 1300 unsigned long weight; 1301 int count, s, e, i, do_now; 1302 ERR_IF(!b) return 0; 1303 ERR_IF(!b->bm_pages) return 0; 1304 1305 spin_lock_irq(&b->bm_lock); 1306 if (bm_is_locked(b)) 1307 bm_print_lock_info(mdev); 1308 weight = b->bm_set; 1309 1310 s = al_enr * BM_WORDS_PER_AL_EXT; 1311 e = min_t(size_t, s + BM_WORDS_PER_AL_EXT, b->bm_words); 1312 /* assert that s and e are on the same page */ 1313 D_ASSERT((e-1) >> (PAGE_SHIFT - LN2_BPL + 3) 1314 == s >> (PAGE_SHIFT - LN2_BPL + 3)); 1315 count = 0; 1316 if (s < b->bm_words) { 1317 i = do_now = e-s; 1318 p_addr = bm_map_paddr(b, s); 1319 bm = p_addr + MLPP(s); 1320 while (i--) { 1321 count += hweight_long(*bm); 1322 *bm = -1UL; 1323 bm++; 1324 } 1325 bm_unmap(p_addr); 1326 b->bm_set += do_now*BITS_PER_LONG - count; 1327 if (e == b->bm_words) 1328 b->bm_set -= bm_clear_surplus(b); 1329 } else { 1330 dev_err(DEV, "start offset (%d) too large in drbd_bm_ALe_set_all\n", s); 1331 } 1332 weight = b->bm_set - weight; 1333 spin_unlock_irq(&b->bm_lock); 1334 return weight; 1335 } 1336