1*93c68cc4SChristoph Böhmwalder // SPDX-License-Identifier: GPL-2.0-only 2b411b363SPhilipp Reisner /* 3b411b363SPhilipp Reisner drbd_actlog.c 4b411b363SPhilipp Reisner 5b411b363SPhilipp Reisner This file is part of DRBD by Philipp Reisner and Lars Ellenberg. 6b411b363SPhilipp Reisner 7b411b363SPhilipp Reisner Copyright (C) 2003-2008, LINBIT Information Technologies GmbH. 8b411b363SPhilipp Reisner Copyright (C) 2003-2008, Philipp Reisner <philipp.reisner@linbit.com>. 9b411b363SPhilipp Reisner Copyright (C) 2003-2008, Lars Ellenberg <lars.ellenberg@linbit.com>. 10b411b363SPhilipp Reisner 11b411b363SPhilipp Reisner 12b411b363SPhilipp Reisner */ 13b411b363SPhilipp Reisner 14b411b363SPhilipp Reisner #include <linux/slab.h> 157ad651b5SLars Ellenberg #include <linux/crc32c.h> 16b411b363SPhilipp Reisner #include <linux/drbd.h> 177ad651b5SLars Ellenberg #include <linux/drbd_limits.h> 18b411b363SPhilipp Reisner #include "drbd_int.h" 19b411b363SPhilipp Reisner 2085f103d8SLars Ellenberg 2185f103d8SLars Ellenberg enum al_transaction_types { 2285f103d8SLars Ellenberg AL_TR_UPDATE = 0, 2385f103d8SLars Ellenberg AL_TR_INITIALIZED = 0xffff 2485f103d8SLars Ellenberg }; 257ad651b5SLars Ellenberg /* all fields on disc in big endian */ 267ad651b5SLars Ellenberg struct __packed al_transaction_on_disk { 277ad651b5SLars Ellenberg /* don't we all like magic */ 287ad651b5SLars Ellenberg __be32 magic; 297ad651b5SLars Ellenberg 307ad651b5SLars Ellenberg /* to identify the most recent transaction block 317ad651b5SLars Ellenberg * in the on disk ring buffer */ 327ad651b5SLars Ellenberg __be32 tr_number; 337ad651b5SLars Ellenberg 347ad651b5SLars Ellenberg /* checksum on the full 4k block, with this field set to 0. */ 357ad651b5SLars Ellenberg __be32 crc32c; 367ad651b5SLars Ellenberg 377ad651b5SLars Ellenberg /* type of transaction, special transaction types like: 3885f103d8SLars Ellenberg * purge-all, set-all-idle, set-all-active, ... to-be-defined 3985f103d8SLars Ellenberg * see also enum al_transaction_types */ 407ad651b5SLars Ellenberg __be16 transaction_type; 417ad651b5SLars Ellenberg 427ad651b5SLars Ellenberg /* we currently allow only a few thousand extents, 437ad651b5SLars Ellenberg * so 16bit will be enough for the slot number. */ 447ad651b5SLars Ellenberg 457ad651b5SLars Ellenberg /* how many updates in this transaction */ 467ad651b5SLars Ellenberg __be16 n_updates; 477ad651b5SLars Ellenberg 487ad651b5SLars Ellenberg /* maximum slot number, "al-extents" in drbd.conf speak. 497ad651b5SLars Ellenberg * Having this in each transaction should make reconfiguration 507ad651b5SLars Ellenberg * of that parameter easier. */ 517ad651b5SLars Ellenberg __be16 context_size; 527ad651b5SLars Ellenberg 537ad651b5SLars Ellenberg /* slot number the context starts with */ 547ad651b5SLars Ellenberg __be16 context_start_slot_nr; 557ad651b5SLars Ellenberg 567ad651b5SLars Ellenberg /* Some reserved bytes. Expected usage is a 64bit counter of 577ad651b5SLars Ellenberg * sectors-written since device creation, and other data generation tag 587ad651b5SLars Ellenberg * supporting usage */ 597ad651b5SLars Ellenberg __be32 __reserved[4]; 607ad651b5SLars Ellenberg 617ad651b5SLars Ellenberg /* --- 36 byte used --- */ 627ad651b5SLars Ellenberg 637ad651b5SLars Ellenberg /* Reserve space for up to AL_UPDATES_PER_TRANSACTION changes 647ad651b5SLars Ellenberg * in one transaction, then use the remaining byte in the 4k block for 657ad651b5SLars Ellenberg * context information. "Flexible" number of updates per transaction 667ad651b5SLars Ellenberg * does not help, as we have to account for the case when all update 677ad651b5SLars Ellenberg * slots are used anyways, so it would only complicate code without 687ad651b5SLars Ellenberg * additional benefit. 69b411b363SPhilipp Reisner */ 707ad651b5SLars Ellenberg __be16 update_slot_nr[AL_UPDATES_PER_TRANSACTION]; 717ad651b5SLars Ellenberg 727ad651b5SLars Ellenberg /* but the extent number is 32bit, which at an extent size of 4 MiB 737ad651b5SLars Ellenberg * allows to cover device sizes of up to 2**54 Byte (16 PiB) */ 747ad651b5SLars Ellenberg __be32 update_extent_nr[AL_UPDATES_PER_TRANSACTION]; 757ad651b5SLars Ellenberg 767ad651b5SLars Ellenberg /* --- 420 bytes used (36 + 64*6) --- */ 777ad651b5SLars Ellenberg 787ad651b5SLars Ellenberg /* 4096 - 420 = 3676 = 919 * 4 */ 797ad651b5SLars Ellenberg __be32 context[AL_CONTEXT_PER_TRANSACTION]; 80b411b363SPhilipp Reisner }; 81b411b363SPhilipp Reisner 82e37d2438SLars Ellenberg void *drbd_md_get_buffer(struct drbd_device *device, const char *intent) 83cdfda633SPhilipp Reisner { 84cdfda633SPhilipp Reisner int r; 85cdfda633SPhilipp Reisner 86b30ab791SAndreas Gruenbacher wait_event(device->misc_wait, 87e37d2438SLars Ellenberg (r = atomic_cmpxchg(&device->md_io.in_use, 0, 1)) == 0 || 88b30ab791SAndreas Gruenbacher device->state.disk <= D_FAILED); 89cdfda633SPhilipp Reisner 90e37d2438SLars Ellenberg if (r) 91e37d2438SLars Ellenberg return NULL; 92e37d2438SLars Ellenberg 93e37d2438SLars Ellenberg device->md_io.current_use = intent; 94e37d2438SLars Ellenberg device->md_io.start_jif = jiffies; 95e37d2438SLars Ellenberg device->md_io.submit_jif = device->md_io.start_jif - 1; 96e37d2438SLars Ellenberg return page_address(device->md_io.page); 97cdfda633SPhilipp Reisner } 98cdfda633SPhilipp Reisner 99b30ab791SAndreas Gruenbacher void drbd_md_put_buffer(struct drbd_device *device) 100cdfda633SPhilipp Reisner { 101e37d2438SLars Ellenberg if (atomic_dec_and_test(&device->md_io.in_use)) 102b30ab791SAndreas Gruenbacher wake_up(&device->misc_wait); 103cdfda633SPhilipp Reisner } 104cdfda633SPhilipp Reisner 105b30ab791SAndreas Gruenbacher void wait_until_done_or_force_detached(struct drbd_device *device, struct drbd_backing_dev *bdev, 10632db80f6SPhilipp Reisner unsigned int *done) 107cdfda633SPhilipp Reisner { 10832db80f6SPhilipp Reisner long dt; 10932db80f6SPhilipp Reisner 11032db80f6SPhilipp Reisner rcu_read_lock(); 11132db80f6SPhilipp Reisner dt = rcu_dereference(bdev->disk_conf)->disk_timeout; 11232db80f6SPhilipp Reisner rcu_read_unlock(); 11332db80f6SPhilipp Reisner dt = dt * HZ / 10; 11432db80f6SPhilipp Reisner if (dt == 0) 11532db80f6SPhilipp Reisner dt = MAX_SCHEDULE_TIMEOUT; 11632db80f6SPhilipp Reisner 117b30ab791SAndreas Gruenbacher dt = wait_event_timeout(device->misc_wait, 118b30ab791SAndreas Gruenbacher *done || test_bit(FORCE_DETACH, &device->flags), dt); 119e34b677dSLars Ellenberg if (dt == 0) { 120d0180171SAndreas Gruenbacher drbd_err(device, "meta-data IO operation timed out\n"); 121b30ab791SAndreas Gruenbacher drbd_chk_io_error(device, 1, DRBD_FORCE_DETACH); 122e34b677dSLars Ellenberg } 123cdfda633SPhilipp Reisner } 124cdfda633SPhilipp Reisner 125b30ab791SAndreas Gruenbacher static int _drbd_md_sync_page_io(struct drbd_device *device, 126b411b363SPhilipp Reisner struct drbd_backing_dev *bdev, 1279945172aSBart Van Assche sector_t sector, enum req_op op) 128b411b363SPhilipp Reisner { 129b411b363SPhilipp Reisner struct bio *bio; 130193cb00cSLars Ellenberg /* we do all our meta data IO in aligned 4k blocks. */ 131193cb00cSLars Ellenberg const int size = 4096; 1329945172aSBart Van Assche int err; 1339945172aSBart Van Assche blk_opf_t op_flags = 0; 134b411b363SPhilipp Reisner 135b30ab791SAndreas Gruenbacher device->md_io.done = 0; 136b30ab791SAndreas Gruenbacher device->md_io.error = -ENODEV; 137b411b363SPhilipp Reisner 138bb3cc85eSMike Christie if ((op == REQ_OP_WRITE) && !test_bit(MD_NO_FUA, &device->flags)) 13928a8f0d3SMike Christie op_flags |= REQ_FUA | REQ_PREFLUSH; 140a2b80967SChristoph Hellwig op_flags |= REQ_SYNC; 141b411b363SPhilipp Reisner 142609be106SChristoph Hellwig bio = bio_alloc_bioset(bdev->md_bdev, 1, op | op_flags, GFP_NOIO, 143609be106SChristoph Hellwig &drbd_md_io_bio_set); 1444f024f37SKent Overstreet bio->bi_iter.bi_sector = sector; 145ac29f403SAndreas Gruenbacher err = -EIO; 146193cb00cSLars Ellenberg if (bio_add_page(bio, device->md_io.page, size, 0) != size) 147b411b363SPhilipp Reisner goto out; 148e37d2438SLars Ellenberg bio->bi_private = device; 149ed15b795SAndreas Gruenbacher bio->bi_end_io = drbd_md_endio; 150b411b363SPhilipp Reisner 151bb3cc85eSMike Christie if (op != REQ_OP_WRITE && device->state.disk == D_DISKLESS && device->ldev == NULL) 152c04ccaa6SLars Ellenberg /* special case, drbd_md_read() during drbd_adm_attach(): no get_ldev */ 153c04ccaa6SLars Ellenberg ; 154b30ab791SAndreas Gruenbacher else if (!get_ldev_if_state(device, D_ATTACHING)) { 155ed15b795SAndreas Gruenbacher /* Corresponding put_ldev in drbd_md_endio() */ 156d0180171SAndreas Gruenbacher drbd_err(device, "ASSERT FAILED: get_ldev_if_state() == 1 in _drbd_md_sync_page_io()\n"); 157cdfda633SPhilipp Reisner err = -ENODEV; 158cdfda633SPhilipp Reisner goto out; 159cdfda633SPhilipp Reisner } 160cdfda633SPhilipp Reisner 161cdfda633SPhilipp Reisner bio_get(bio); /* one bio_put() is in the completion handler */ 162e37d2438SLars Ellenberg atomic_inc(&device->md_io.in_use); /* drbd_md_put_buffer() is in the completion handler */ 163e37d2438SLars Ellenberg device->md_io.submit_jif = jiffies; 164bb3cc85eSMike Christie if (drbd_insert_fault(device, (op == REQ_OP_WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) 1654246a0b6SChristoph Hellwig bio_io_error(bio); 166b411b363SPhilipp Reisner else 1674e49ea4aSMike Christie submit_bio(bio); 168b30ab791SAndreas Gruenbacher wait_until_done_or_force_detached(device, bdev, &device->md_io.done); 1694e4cbee9SChristoph Hellwig if (!bio->bi_status) 170b30ab791SAndreas Gruenbacher err = device->md_io.error; 171b411b363SPhilipp Reisner 172b411b363SPhilipp Reisner out: 173b411b363SPhilipp Reisner bio_put(bio); 174ac29f403SAndreas Gruenbacher return err; 175b411b363SPhilipp Reisner } 176b411b363SPhilipp Reisner 177b30ab791SAndreas Gruenbacher int drbd_md_sync_page_io(struct drbd_device *device, struct drbd_backing_dev *bdev, 1789945172aSBart Van Assche sector_t sector, enum req_op op) 179b411b363SPhilipp Reisner { 1803fbf4d21SAndreas Gruenbacher int err; 181e37d2438SLars Ellenberg D_ASSERT(device, atomic_read(&device->md_io.in_use) == 1); 182b411b363SPhilipp Reisner 183b411b363SPhilipp Reisner BUG_ON(!bdev->md_bdev); 184b411b363SPhilipp Reisner 185e4d7d6f4SLars Ellenberg dynamic_drbd_dbg(device, "meta_data io: %s [%d]:%s(,%llus,%s) %pS\n", 1867ad651b5SLars Ellenberg current->comm, current->pid, __func__, 187bb3cc85eSMike Christie (unsigned long long)sector, (op == REQ_OP_WRITE) ? "WRITE" : "READ", 188c04ccaa6SLars Ellenberg (void*)_RET_IP_ ); 189b411b363SPhilipp Reisner 190b411b363SPhilipp Reisner if (sector < drbd_md_first_sector(bdev) || 1917ad651b5SLars Ellenberg sector + 7 > drbd_md_last_sector(bdev)) 192d0180171SAndreas Gruenbacher drbd_alert(device, "%s [%d]:%s(,%llus,%s) out of range md access!\n", 193b411b363SPhilipp Reisner current->comm, current->pid, __func__, 194bb3cc85eSMike Christie (unsigned long long)sector, 195bb3cc85eSMike Christie (op == REQ_OP_WRITE) ? "WRITE" : "READ"); 196b411b363SPhilipp Reisner 197bb3cc85eSMike Christie err = _drbd_md_sync_page_io(device, bdev, sector, op); 1983fbf4d21SAndreas Gruenbacher if (err) { 199d0180171SAndreas Gruenbacher drbd_err(device, "drbd_md_sync_page_io(,%llus,%s) failed with error %d\n", 200bb3cc85eSMike Christie (unsigned long long)sector, 201bb3cc85eSMike Christie (op == REQ_OP_WRITE) ? "WRITE" : "READ", err); 202b411b363SPhilipp Reisner } 2033fbf4d21SAndreas Gruenbacher return err; 204b411b363SPhilipp Reisner } 205b411b363SPhilipp Reisner 206b30ab791SAndreas Gruenbacher static struct bm_extent *find_active_resync_extent(struct drbd_device *device, unsigned int enr) 207b411b363SPhilipp Reisner { 208b411b363SPhilipp Reisner struct lc_element *tmp; 209b30ab791SAndreas Gruenbacher tmp = lc_find(device->resync, enr/AL_EXT_PER_BM_SECT); 210b411b363SPhilipp Reisner if (unlikely(tmp != NULL)) { 211b411b363SPhilipp Reisner struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce); 2126c3c4355SLars Ellenberg if (test_bit(BME_NO_WRITES, &bm_ext->flags)) 2136c3c4355SLars Ellenberg return bm_ext; 2146c3c4355SLars Ellenberg } 2156c3c4355SLars Ellenberg return NULL; 2166c3c4355SLars Ellenberg } 2176c3c4355SLars Ellenberg 218b30ab791SAndreas Gruenbacher static struct lc_element *_al_get(struct drbd_device *device, unsigned int enr, bool nonblock) 2196c3c4355SLars Ellenberg { 2206c3c4355SLars Ellenberg struct lc_element *al_ext; 2216c3c4355SLars Ellenberg struct bm_extent *bm_ext; 2226c3c4355SLars Ellenberg int wake; 2236c3c4355SLars Ellenberg 224b30ab791SAndreas Gruenbacher spin_lock_irq(&device->al_lock); 225b30ab791SAndreas Gruenbacher bm_ext = find_active_resync_extent(device, enr); 2266c3c4355SLars Ellenberg if (bm_ext) { 227f91ab628SPhilipp Reisner wake = !test_and_set_bit(BME_PRIORITY, &bm_ext->flags); 228b30ab791SAndreas Gruenbacher spin_unlock_irq(&device->al_lock); 229f91ab628SPhilipp Reisner if (wake) 230b30ab791SAndreas Gruenbacher wake_up(&device->al_wait); 231b411b363SPhilipp Reisner return NULL; 232b411b363SPhilipp Reisner } 2336c3c4355SLars Ellenberg if (nonblock) 234b30ab791SAndreas Gruenbacher al_ext = lc_try_get(device->act_log, enr); 2356c3c4355SLars Ellenberg else 236b30ab791SAndreas Gruenbacher al_ext = lc_get(device->act_log, enr); 237b30ab791SAndreas Gruenbacher spin_unlock_irq(&device->al_lock); 238b411b363SPhilipp Reisner return al_ext; 239b411b363SPhilipp Reisner } 240b411b363SPhilipp Reisner 241b30ab791SAndreas Gruenbacher bool drbd_al_begin_io_fastpath(struct drbd_device *device, struct drbd_interval *i) 242b5bc8e08SLars Ellenberg { 243b5bc8e08SLars Ellenberg /* for bios crossing activity log extent boundaries, 244b5bc8e08SLars Ellenberg * we may need to activate two extents in one go */ 245b5bc8e08SLars Ellenberg unsigned first = i->sector >> (AL_EXTENT_SHIFT-9); 246b5bc8e08SLars Ellenberg unsigned last = i->size == 0 ? first : (i->sector + (i->size >> 9) - 1) >> (AL_EXTENT_SHIFT-9); 247b5bc8e08SLars Ellenberg 248505675f9SLars Ellenberg D_ASSERT(device, first <= last); 2490b0ba1efSAndreas Gruenbacher D_ASSERT(device, atomic_read(&device->local_cnt) > 0); 250b5bc8e08SLars Ellenberg 251b5bc8e08SLars Ellenberg /* FIXME figure out a fast path for bios crossing AL extent boundaries */ 252b5bc8e08SLars Ellenberg if (first != last) 253b5bc8e08SLars Ellenberg return false; 254b5bc8e08SLars Ellenberg 255b30ab791SAndreas Gruenbacher return _al_get(device, first, true); 256b5bc8e08SLars Ellenberg } 257b5bc8e08SLars Ellenberg 258b30ab791SAndreas Gruenbacher bool drbd_al_begin_io_prepare(struct drbd_device *device, struct drbd_interval *i) 259b411b363SPhilipp Reisner { 2607726547eSLars Ellenberg /* for bios crossing activity log extent boundaries, 2617726547eSLars Ellenberg * we may need to activate two extents in one go */ 262e15766e9SLars Ellenberg unsigned first = i->sector >> (AL_EXTENT_SHIFT-9); 26381a3537aSLars Ellenberg unsigned last = i->size == 0 ? first : (i->sector + (i->size >> 9) - 1) >> (AL_EXTENT_SHIFT-9); 264e15766e9SLars Ellenberg unsigned enr; 265ebfd5d8fSLars Ellenberg bool need_transaction = false; 266b411b363SPhilipp Reisner 2670b0ba1efSAndreas Gruenbacher D_ASSERT(device, first <= last); 2680b0ba1efSAndreas Gruenbacher D_ASSERT(device, atomic_read(&device->local_cnt) > 0); 269b411b363SPhilipp Reisner 270ebfd5d8fSLars Ellenberg for (enr = first; enr <= last; enr++) { 271ebfd5d8fSLars Ellenberg struct lc_element *al_ext; 272b30ab791SAndreas Gruenbacher wait_event(device->al_wait, 273b30ab791SAndreas Gruenbacher (al_ext = _al_get(device, enr, false)) != NULL); 274ebfd5d8fSLars Ellenberg if (al_ext->lc_number != enr) 275ebfd5d8fSLars Ellenberg need_transaction = true; 276ebfd5d8fSLars Ellenberg } 277b5bc8e08SLars Ellenberg return need_transaction; 278b5bc8e08SLars Ellenberg } 279ebfd5d8fSLars Ellenberg 280603ee2c8SLars Ellenberg #if (PAGE_SHIFT + 3) < (AL_EXTENT_SHIFT - BM_BLOCK_SHIFT) 281603ee2c8SLars Ellenberg /* Currently BM_BLOCK_SHIFT, BM_EXT_SHIFT and AL_EXTENT_SHIFT 282603ee2c8SLars Ellenberg * are still coupled, or assume too much about their relation. 283603ee2c8SLars Ellenberg * Code below will not work if this is violated. 284603ee2c8SLars Ellenberg * Will be cleaned up with some followup patch. 285603ee2c8SLars Ellenberg */ 286603ee2c8SLars Ellenberg # error FIXME 287603ee2c8SLars Ellenberg #endif 288603ee2c8SLars Ellenberg 289603ee2c8SLars Ellenberg static unsigned int al_extent_to_bm_page(unsigned int al_enr) 290603ee2c8SLars Ellenberg { 291603ee2c8SLars Ellenberg return al_enr >> 292603ee2c8SLars Ellenberg /* bit to page */ 293603ee2c8SLars Ellenberg ((PAGE_SHIFT + 3) - 294603ee2c8SLars Ellenberg /* al extent number to bit */ 295603ee2c8SLars Ellenberg (AL_EXTENT_SHIFT - BM_BLOCK_SHIFT)); 296603ee2c8SLars Ellenberg } 297603ee2c8SLars Ellenberg 298603ee2c8SLars Ellenberg static sector_t al_tr_number_to_on_disk_sector(struct drbd_device *device) 299603ee2c8SLars Ellenberg { 300603ee2c8SLars Ellenberg const unsigned int stripes = device->ldev->md.al_stripes; 301603ee2c8SLars Ellenberg const unsigned int stripe_size_4kB = device->ldev->md.al_stripe_size_4k; 302603ee2c8SLars Ellenberg 303603ee2c8SLars Ellenberg /* transaction number, modulo on-disk ring buffer wrap around */ 304603ee2c8SLars Ellenberg unsigned int t = device->al_tr_number % (device->ldev->md.al_size_4k); 305603ee2c8SLars Ellenberg 306603ee2c8SLars Ellenberg /* ... to aligned 4k on disk block */ 307603ee2c8SLars Ellenberg t = ((t % stripes) * stripe_size_4kB) + t/stripes; 308603ee2c8SLars Ellenberg 309603ee2c8SLars Ellenberg /* ... to 512 byte sector in activity log */ 310603ee2c8SLars Ellenberg t *= 8; 311603ee2c8SLars Ellenberg 312603ee2c8SLars Ellenberg /* ... plus offset to the on disk position */ 313603ee2c8SLars Ellenberg return device->ldev->md.md_offset + device->ldev->md.al_offset + t; 314603ee2c8SLars Ellenberg } 315603ee2c8SLars Ellenberg 316603ee2c8SLars Ellenberg static int __al_write_transaction(struct drbd_device *device, struct al_transaction_on_disk *buffer) 317603ee2c8SLars Ellenberg { 318603ee2c8SLars Ellenberg struct lc_element *e; 319603ee2c8SLars Ellenberg sector_t sector; 320603ee2c8SLars Ellenberg int i, mx; 321603ee2c8SLars Ellenberg unsigned extent_nr; 322603ee2c8SLars Ellenberg unsigned crc = 0; 323603ee2c8SLars Ellenberg int err = 0; 324603ee2c8SLars Ellenberg 325603ee2c8SLars Ellenberg memset(buffer, 0, sizeof(*buffer)); 326603ee2c8SLars Ellenberg buffer->magic = cpu_to_be32(DRBD_AL_MAGIC); 327603ee2c8SLars Ellenberg buffer->tr_number = cpu_to_be32(device->al_tr_number); 328603ee2c8SLars Ellenberg 329603ee2c8SLars Ellenberg i = 0; 330603ee2c8SLars Ellenberg 33127ea1d87SLars Ellenberg drbd_bm_reset_al_hints(device); 33227ea1d87SLars Ellenberg 333603ee2c8SLars Ellenberg /* Even though no one can start to change this list 334603ee2c8SLars Ellenberg * once we set the LC_LOCKED -- from drbd_al_begin_io(), 335603ee2c8SLars Ellenberg * lc_try_lock_for_transaction() --, someone may still 336603ee2c8SLars Ellenberg * be in the process of changing it. */ 337603ee2c8SLars Ellenberg spin_lock_irq(&device->al_lock); 338603ee2c8SLars Ellenberg list_for_each_entry(e, &device->act_log->to_be_changed, list) { 339603ee2c8SLars Ellenberg if (i == AL_UPDATES_PER_TRANSACTION) { 340603ee2c8SLars Ellenberg i++; 341603ee2c8SLars Ellenberg break; 342603ee2c8SLars Ellenberg } 343603ee2c8SLars Ellenberg buffer->update_slot_nr[i] = cpu_to_be16(e->lc_index); 344603ee2c8SLars Ellenberg buffer->update_extent_nr[i] = cpu_to_be32(e->lc_new_number); 345603ee2c8SLars Ellenberg if (e->lc_number != LC_FREE) 346603ee2c8SLars Ellenberg drbd_bm_mark_for_writeout(device, 347603ee2c8SLars Ellenberg al_extent_to_bm_page(e->lc_number)); 348603ee2c8SLars Ellenberg i++; 349603ee2c8SLars Ellenberg } 350603ee2c8SLars Ellenberg spin_unlock_irq(&device->al_lock); 351603ee2c8SLars Ellenberg BUG_ON(i > AL_UPDATES_PER_TRANSACTION); 352603ee2c8SLars Ellenberg 353603ee2c8SLars Ellenberg buffer->n_updates = cpu_to_be16(i); 354603ee2c8SLars Ellenberg for ( ; i < AL_UPDATES_PER_TRANSACTION; i++) { 355603ee2c8SLars Ellenberg buffer->update_slot_nr[i] = cpu_to_be16(-1); 356603ee2c8SLars Ellenberg buffer->update_extent_nr[i] = cpu_to_be32(LC_FREE); 357603ee2c8SLars Ellenberg } 358603ee2c8SLars Ellenberg 359603ee2c8SLars Ellenberg buffer->context_size = cpu_to_be16(device->act_log->nr_elements); 360603ee2c8SLars Ellenberg buffer->context_start_slot_nr = cpu_to_be16(device->al_tr_cycle); 361603ee2c8SLars Ellenberg 362603ee2c8SLars Ellenberg mx = min_t(int, AL_CONTEXT_PER_TRANSACTION, 363603ee2c8SLars Ellenberg device->act_log->nr_elements - device->al_tr_cycle); 364603ee2c8SLars Ellenberg for (i = 0; i < mx; i++) { 365603ee2c8SLars Ellenberg unsigned idx = device->al_tr_cycle + i; 366603ee2c8SLars Ellenberg extent_nr = lc_element_by_index(device->act_log, idx)->lc_number; 367603ee2c8SLars Ellenberg buffer->context[i] = cpu_to_be32(extent_nr); 368603ee2c8SLars Ellenberg } 369603ee2c8SLars Ellenberg for (; i < AL_CONTEXT_PER_TRANSACTION; i++) 370603ee2c8SLars Ellenberg buffer->context[i] = cpu_to_be32(LC_FREE); 371603ee2c8SLars Ellenberg 372603ee2c8SLars Ellenberg device->al_tr_cycle += AL_CONTEXT_PER_TRANSACTION; 373603ee2c8SLars Ellenberg if (device->al_tr_cycle >= device->act_log->nr_elements) 374603ee2c8SLars Ellenberg device->al_tr_cycle = 0; 375603ee2c8SLars Ellenberg 376603ee2c8SLars Ellenberg sector = al_tr_number_to_on_disk_sector(device); 377603ee2c8SLars Ellenberg 378603ee2c8SLars Ellenberg crc = crc32c(0, buffer, 4096); 379603ee2c8SLars Ellenberg buffer->crc32c = cpu_to_be32(crc); 380603ee2c8SLars Ellenberg 381603ee2c8SLars Ellenberg if (drbd_bm_write_hinted(device)) 382603ee2c8SLars Ellenberg err = -EIO; 383603ee2c8SLars Ellenberg else { 384603ee2c8SLars Ellenberg bool write_al_updates; 385603ee2c8SLars Ellenberg rcu_read_lock(); 386603ee2c8SLars Ellenberg write_al_updates = rcu_dereference(device->ldev->disk_conf)->al_updates; 387603ee2c8SLars Ellenberg rcu_read_unlock(); 388603ee2c8SLars Ellenberg if (write_al_updates) { 3899945172aSBart Van Assche if (drbd_md_sync_page_io(device, device->ldev, sector, REQ_OP_WRITE)) { 390603ee2c8SLars Ellenberg err = -EIO; 391603ee2c8SLars Ellenberg drbd_chk_io_error(device, 1, DRBD_META_IO_ERROR); 392603ee2c8SLars Ellenberg } else { 393603ee2c8SLars Ellenberg device->al_tr_number++; 394603ee2c8SLars Ellenberg device->al_writ_cnt++; 395603ee2c8SLars Ellenberg } 396603ee2c8SLars Ellenberg } 397603ee2c8SLars Ellenberg } 398603ee2c8SLars Ellenberg 399603ee2c8SLars Ellenberg return err; 400603ee2c8SLars Ellenberg } 401603ee2c8SLars Ellenberg 402603ee2c8SLars Ellenberg static int al_write_transaction(struct drbd_device *device) 403603ee2c8SLars Ellenberg { 404603ee2c8SLars Ellenberg struct al_transaction_on_disk *buffer; 405603ee2c8SLars Ellenberg int err; 406603ee2c8SLars Ellenberg 407603ee2c8SLars Ellenberg if (!get_ldev(device)) { 408603ee2c8SLars Ellenberg drbd_err(device, "disk is %s, cannot start al transaction\n", 409603ee2c8SLars Ellenberg drbd_disk_str(device->state.disk)); 410603ee2c8SLars Ellenberg return -EIO; 411603ee2c8SLars Ellenberg } 412603ee2c8SLars Ellenberg 413603ee2c8SLars Ellenberg /* The bitmap write may have failed, causing a state change. */ 414603ee2c8SLars Ellenberg if (device->state.disk < D_INCONSISTENT) { 415603ee2c8SLars Ellenberg drbd_err(device, 416603ee2c8SLars Ellenberg "disk is %s, cannot write al transaction\n", 417603ee2c8SLars Ellenberg drbd_disk_str(device->state.disk)); 418603ee2c8SLars Ellenberg put_ldev(device); 419603ee2c8SLars Ellenberg return -EIO; 420603ee2c8SLars Ellenberg } 421603ee2c8SLars Ellenberg 422603ee2c8SLars Ellenberg /* protects md_io_buffer, al_tr_cycle, ... */ 423603ee2c8SLars Ellenberg buffer = drbd_md_get_buffer(device, __func__); 424603ee2c8SLars Ellenberg if (!buffer) { 425603ee2c8SLars Ellenberg drbd_err(device, "disk failed while waiting for md_io buffer\n"); 426603ee2c8SLars Ellenberg put_ldev(device); 427603ee2c8SLars Ellenberg return -ENODEV; 428603ee2c8SLars Ellenberg } 429603ee2c8SLars Ellenberg 430603ee2c8SLars Ellenberg err = __al_write_transaction(device, buffer); 431603ee2c8SLars Ellenberg 432603ee2c8SLars Ellenberg drbd_md_put_buffer(device); 433603ee2c8SLars Ellenberg put_ldev(device); 434603ee2c8SLars Ellenberg 435603ee2c8SLars Ellenberg return err; 436603ee2c8SLars Ellenberg } 437603ee2c8SLars Ellenberg 438b5bc8e08SLars Ellenberg 4394dd726f0SLars Ellenberg void drbd_al_begin_io_commit(struct drbd_device *device) 440b5bc8e08SLars Ellenberg { 441b5bc8e08SLars Ellenberg bool locked = false; 442b5bc8e08SLars Ellenberg 4437dc1d67fSLars Ellenberg /* Serialize multiple transactions. 4447dc1d67fSLars Ellenberg * This uses test_and_set_bit, memory barrier is implicit. 4457dc1d67fSLars Ellenberg */ 446b30ab791SAndreas Gruenbacher wait_event(device->al_wait, 447b30ab791SAndreas Gruenbacher device->act_log->pending_changes == 0 || 448b30ab791SAndreas Gruenbacher (locked = lc_try_lock_for_transaction(device->act_log))); 4497dc1d67fSLars Ellenberg 4507dc1d67fSLars Ellenberg if (locked) { 4517ad651b5SLars Ellenberg /* Double check: it may have been committed by someone else, 4527ad651b5SLars Ellenberg * while we have been waiting for the lock. */ 453b30ab791SAndreas Gruenbacher if (device->act_log->pending_changes) { 4549a51ab1cSPhilipp Reisner bool write_al_updates; 4559a51ab1cSPhilipp Reisner 4569a51ab1cSPhilipp Reisner rcu_read_lock(); 457b30ab791SAndreas Gruenbacher write_al_updates = rcu_dereference(device->ldev->disk_conf)->al_updates; 4589a51ab1cSPhilipp Reisner rcu_read_unlock(); 4599a51ab1cSPhilipp Reisner 460b5bc8e08SLars Ellenberg if (write_al_updates) 4614dd726f0SLars Ellenberg al_write_transaction(device); 462b30ab791SAndreas Gruenbacher spin_lock_irq(&device->al_lock); 4637ad651b5SLars Ellenberg /* FIXME 4641b7ab15bSPhilipp Reisner if (err) 4657ad651b5SLars Ellenberg we need an "lc_cancel" here; 4667ad651b5SLars Ellenberg */ 467b30ab791SAndreas Gruenbacher lc_committed(device->act_log); 468b30ab791SAndreas Gruenbacher spin_unlock_irq(&device->al_lock); 4697ad651b5SLars Ellenberg } 470b30ab791SAndreas Gruenbacher lc_unlock(device->act_log); 471b30ab791SAndreas Gruenbacher wake_up(&device->al_wait); 472b411b363SPhilipp Reisner } 473b411b363SPhilipp Reisner } 474b411b363SPhilipp Reisner 475b5bc8e08SLars Ellenberg /* 476b5bc8e08SLars Ellenberg * @delegate: delegate activity log I/O to the worker thread 477b5bc8e08SLars Ellenberg */ 4784dd726f0SLars Ellenberg void drbd_al_begin_io(struct drbd_device *device, struct drbd_interval *i) 479b5bc8e08SLars Ellenberg { 480b30ab791SAndreas Gruenbacher if (drbd_al_begin_io_prepare(device, i)) 4814dd726f0SLars Ellenberg drbd_al_begin_io_commit(device); 482b5bc8e08SLars Ellenberg } 483b5bc8e08SLars Ellenberg 484b30ab791SAndreas Gruenbacher int drbd_al_begin_io_nonblock(struct drbd_device *device, struct drbd_interval *i) 48508a1ddabSLars Ellenberg { 486b30ab791SAndreas Gruenbacher struct lru_cache *al = device->act_log; 48708a1ddabSLars Ellenberg /* for bios crossing activity log extent boundaries, 48808a1ddabSLars Ellenberg * we may need to activate two extents in one go */ 48908a1ddabSLars Ellenberg unsigned first = i->sector >> (AL_EXTENT_SHIFT-9); 49008a1ddabSLars Ellenberg unsigned last = i->size == 0 ? first : (i->sector + (i->size >> 9) - 1) >> (AL_EXTENT_SHIFT-9); 49108a1ddabSLars Ellenberg unsigned nr_al_extents; 49208a1ddabSLars Ellenberg unsigned available_update_slots; 49308a1ddabSLars Ellenberg unsigned enr; 49408a1ddabSLars Ellenberg 4950b0ba1efSAndreas Gruenbacher D_ASSERT(device, first <= last); 49608a1ddabSLars Ellenberg 49708a1ddabSLars Ellenberg nr_al_extents = 1 + last - first; /* worst case: all touched extends are cold. */ 49808a1ddabSLars Ellenberg available_update_slots = min(al->nr_elements - al->used, 49908a1ddabSLars Ellenberg al->max_pending_changes - al->pending_changes); 50008a1ddabSLars Ellenberg 50108a1ddabSLars Ellenberg /* We want all necessary updates for a given request within the same transaction 50208a1ddabSLars Ellenberg * We could first check how many updates are *actually* needed, 50308a1ddabSLars Ellenberg * and use that instead of the worst-case nr_al_extents */ 504f5b90b6bSLars Ellenberg if (available_update_slots < nr_al_extents) { 505f5b90b6bSLars Ellenberg /* Too many activity log extents are currently "hot". 506f5b90b6bSLars Ellenberg * 507f5b90b6bSLars Ellenberg * If we have accumulated pending changes already, 508f5b90b6bSLars Ellenberg * we made progress. 509f5b90b6bSLars Ellenberg * 510f5b90b6bSLars Ellenberg * If we cannot get even a single pending change through, 511f5b90b6bSLars Ellenberg * stop the fast path until we made some progress, 512f5b90b6bSLars Ellenberg * or requests to "cold" extents could be starved. */ 513f5b90b6bSLars Ellenberg if (!al->pending_changes) 514f5b90b6bSLars Ellenberg __set_bit(__LC_STARVING, &device->act_log->flags); 515f5b90b6bSLars Ellenberg return -ENOBUFS; 516f5b90b6bSLars Ellenberg } 51708a1ddabSLars Ellenberg 51808a1ddabSLars Ellenberg /* Is resync active in this area? */ 51908a1ddabSLars Ellenberg for (enr = first; enr <= last; enr++) { 52008a1ddabSLars Ellenberg struct lc_element *tmp; 521b30ab791SAndreas Gruenbacher tmp = lc_find(device->resync, enr/AL_EXT_PER_BM_SECT); 52208a1ddabSLars Ellenberg if (unlikely(tmp != NULL)) { 52308a1ddabSLars Ellenberg struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce); 52408a1ddabSLars Ellenberg if (test_bit(BME_NO_WRITES, &bm_ext->flags)) { 5250b6ef416SLars Ellenberg if (!test_and_set_bit(BME_PRIORITY, &bm_ext->flags)) 52608a1ddabSLars Ellenberg return -EBUSY; 52708a1ddabSLars Ellenberg return -EWOULDBLOCK; 52808a1ddabSLars Ellenberg } 52908a1ddabSLars Ellenberg } 53008a1ddabSLars Ellenberg } 53108a1ddabSLars Ellenberg 53208a1ddabSLars Ellenberg /* Checkout the refcounts. 53308a1ddabSLars Ellenberg * Given that we checked for available elements and update slots above, 53408a1ddabSLars Ellenberg * this has to be successful. */ 53508a1ddabSLars Ellenberg for (enr = first; enr <= last; enr++) { 53608a1ddabSLars Ellenberg struct lc_element *al_ext; 537b30ab791SAndreas Gruenbacher al_ext = lc_get_cumulative(device->act_log, enr); 53808a1ddabSLars Ellenberg if (!al_ext) 539d0180171SAndreas Gruenbacher drbd_info(device, "LOGIC BUG for enr=%u\n", enr); 54008a1ddabSLars Ellenberg } 54108a1ddabSLars Ellenberg return 0; 54208a1ddabSLars Ellenberg } 54308a1ddabSLars Ellenberg 544b30ab791SAndreas Gruenbacher void drbd_al_complete_io(struct drbd_device *device, struct drbd_interval *i) 545b411b363SPhilipp Reisner { 546e15766e9SLars Ellenberg /* for bios crossing activity log extent boundaries, 547e15766e9SLars Ellenberg * we may need to activate two extents in one go */ 548e15766e9SLars Ellenberg unsigned first = i->sector >> (AL_EXTENT_SHIFT-9); 54981a3537aSLars Ellenberg unsigned last = i->size == 0 ? first : (i->sector + (i->size >> 9) - 1) >> (AL_EXTENT_SHIFT-9); 550e15766e9SLars Ellenberg unsigned enr; 551b411b363SPhilipp Reisner struct lc_element *extent; 552b411b363SPhilipp Reisner unsigned long flags; 553b411b363SPhilipp Reisner 5540b0ba1efSAndreas Gruenbacher D_ASSERT(device, first <= last); 555b30ab791SAndreas Gruenbacher spin_lock_irqsave(&device->al_lock, flags); 556b411b363SPhilipp Reisner 557e15766e9SLars Ellenberg for (enr = first; enr <= last; enr++) { 558b30ab791SAndreas Gruenbacher extent = lc_find(device->act_log, enr); 559b411b363SPhilipp Reisner if (!extent) { 560d0180171SAndreas Gruenbacher drbd_err(device, "al_complete_io() called on inactive extent %u\n", enr); 561e15766e9SLars Ellenberg continue; 562b411b363SPhilipp Reisner } 563b30ab791SAndreas Gruenbacher lc_put(device->act_log, extent); 564e15766e9SLars Ellenberg } 565b30ab791SAndreas Gruenbacher spin_unlock_irqrestore(&device->al_lock, flags); 566b30ab791SAndreas Gruenbacher wake_up(&device->al_wait); 567b411b363SPhilipp Reisner } 568b411b363SPhilipp Reisner 569b30ab791SAndreas Gruenbacher static int _try_lc_del(struct drbd_device *device, struct lc_element *al_ext) 570b411b363SPhilipp Reisner { 571b411b363SPhilipp Reisner int rv; 572b411b363SPhilipp Reisner 573b30ab791SAndreas Gruenbacher spin_lock_irq(&device->al_lock); 574b411b363SPhilipp Reisner rv = (al_ext->refcnt == 0); 575b411b363SPhilipp Reisner if (likely(rv)) 576b30ab791SAndreas Gruenbacher lc_del(device->act_log, al_ext); 577b30ab791SAndreas Gruenbacher spin_unlock_irq(&device->al_lock); 578b411b363SPhilipp Reisner 579b411b363SPhilipp Reisner return rv; 580b411b363SPhilipp Reisner } 581b411b363SPhilipp Reisner 582b411b363SPhilipp Reisner /** 583b411b363SPhilipp Reisner * drbd_al_shrink() - Removes all active extents form the activity log 584b30ab791SAndreas Gruenbacher * @device: DRBD device. 585b411b363SPhilipp Reisner * 586b411b363SPhilipp Reisner * Removes all active extents form the activity log, waiting until 587b411b363SPhilipp Reisner * the reference count of each entry dropped to 0 first, of course. 588b411b363SPhilipp Reisner * 589b30ab791SAndreas Gruenbacher * You need to lock device->act_log with lc_try_lock() / lc_unlock() 590b411b363SPhilipp Reisner */ 591b30ab791SAndreas Gruenbacher void drbd_al_shrink(struct drbd_device *device) 592b411b363SPhilipp Reisner { 593b411b363SPhilipp Reisner struct lc_element *al_ext; 594b411b363SPhilipp Reisner int i; 595b411b363SPhilipp Reisner 5960b0ba1efSAndreas Gruenbacher D_ASSERT(device, test_bit(__LC_LOCKED, &device->act_log->flags)); 597b411b363SPhilipp Reisner 598b30ab791SAndreas Gruenbacher for (i = 0; i < device->act_log->nr_elements; i++) { 599b30ab791SAndreas Gruenbacher al_ext = lc_element_by_index(device->act_log, i); 600b411b363SPhilipp Reisner if (al_ext->lc_number == LC_FREE) 601b411b363SPhilipp Reisner continue; 602b30ab791SAndreas Gruenbacher wait_event(device->al_wait, _try_lc_del(device, al_ext)); 603b411b363SPhilipp Reisner } 604b411b363SPhilipp Reisner 605b30ab791SAndreas Gruenbacher wake_up(&device->al_wait); 606b411b363SPhilipp Reisner } 607b411b363SPhilipp Reisner 6085f7c0124SLars Ellenberg int drbd_al_initialize(struct drbd_device *device, void *buffer) 609d752b269SPhilipp Reisner { 610d752b269SPhilipp Reisner struct al_transaction_on_disk *al = buffer; 611b30ab791SAndreas Gruenbacher struct drbd_md *md = &device->ldev->md; 612d752b269SPhilipp Reisner int al_size_4k = md->al_stripes * md->al_stripe_size_4k; 613d752b269SPhilipp Reisner int i; 614d752b269SPhilipp Reisner 6155f7c0124SLars Ellenberg __al_write_transaction(device, al); 6165f7c0124SLars Ellenberg /* There may or may not have been a pending transaction. */ 6175f7c0124SLars Ellenberg spin_lock_irq(&device->al_lock); 6185f7c0124SLars Ellenberg lc_committed(device->act_log); 6195f7c0124SLars Ellenberg spin_unlock_irq(&device->al_lock); 620d752b269SPhilipp Reisner 6215f7c0124SLars Ellenberg /* The rest of the transactions will have an empty "updates" list, and 6225f7c0124SLars Ellenberg * are written out only to provide the context, and to initialize the 6235f7c0124SLars Ellenberg * on-disk ring buffer. */ 6245f7c0124SLars Ellenberg for (i = 1; i < al_size_4k; i++) { 6255f7c0124SLars Ellenberg int err = __al_write_transaction(device, al); 626d752b269SPhilipp Reisner if (err) 627d752b269SPhilipp Reisner return err; 628d752b269SPhilipp Reisner } 629d752b269SPhilipp Reisner return 0; 630d752b269SPhilipp Reisner } 631d752b269SPhilipp Reisner 6325ab7d2c0SLars Ellenberg static const char *drbd_change_sync_fname[] = { 6335ab7d2c0SLars Ellenberg [RECORD_RS_FAILED] = "drbd_rs_failed_io", 6345ab7d2c0SLars Ellenberg [SET_IN_SYNC] = "drbd_set_in_sync", 6355ab7d2c0SLars Ellenberg [SET_OUT_OF_SYNC] = "drbd_set_out_of_sync" 6365ab7d2c0SLars Ellenberg }; 6375ab7d2c0SLars Ellenberg 638b411b363SPhilipp Reisner /* ATTENTION. The AL's extents are 4MB each, while the extents in the 639b411b363SPhilipp Reisner * resync LRU-cache are 16MB each. 640b411b363SPhilipp Reisner * The caller of this function has to hold an get_ldev() reference. 641b411b363SPhilipp Reisner * 6425ab7d2c0SLars Ellenberg * Adjusts the caching members ->rs_left (success) or ->rs_failed (!success), 6435ab7d2c0SLars Ellenberg * potentially pulling in (and recounting the corresponding bits) 6445ab7d2c0SLars Ellenberg * this resync extent into the resync extent lru cache. 6455ab7d2c0SLars Ellenberg * 6465ab7d2c0SLars Ellenberg * Returns whether all bits have been cleared for this resync extent, 6475ab7d2c0SLars Ellenberg * precisely: (rs_left <= rs_failed) 6485ab7d2c0SLars Ellenberg * 649b411b363SPhilipp Reisner * TODO will be obsoleted once we have a caching lru of the on disk bitmap 650b411b363SPhilipp Reisner */ 6515ab7d2c0SLars Ellenberg static bool update_rs_extent(struct drbd_device *device, 6525ab7d2c0SLars Ellenberg unsigned int enr, int count, 6535ab7d2c0SLars Ellenberg enum update_sync_bits_mode mode) 654b411b363SPhilipp Reisner { 655b411b363SPhilipp Reisner struct lc_element *e; 656b411b363SPhilipp Reisner 6570b0ba1efSAndreas Gruenbacher D_ASSERT(device, atomic_read(&device->local_cnt)); 658b411b363SPhilipp Reisner 6595ab7d2c0SLars Ellenberg /* When setting out-of-sync bits, 6605ab7d2c0SLars Ellenberg * we don't need it cached (lc_find). 6615ab7d2c0SLars Ellenberg * But if it is present in the cache, 6625ab7d2c0SLars Ellenberg * we should update the cached bit count. 6635ab7d2c0SLars Ellenberg * Otherwise, that extent should be in the resync extent lru cache 6645ab7d2c0SLars Ellenberg * already -- or we want to pull it in if necessary -- (lc_get), 6655ab7d2c0SLars Ellenberg * then update and check rs_left and rs_failed. */ 6665ab7d2c0SLars Ellenberg if (mode == SET_OUT_OF_SYNC) 6675ab7d2c0SLars Ellenberg e = lc_find(device->resync, enr); 6685ab7d2c0SLars Ellenberg else 669b30ab791SAndreas Gruenbacher e = lc_get(device->resync, enr); 670b411b363SPhilipp Reisner if (e) { 671b411b363SPhilipp Reisner struct bm_extent *ext = lc_entry(e, struct bm_extent, lce); 672b411b363SPhilipp Reisner if (ext->lce.lc_number == enr) { 6735ab7d2c0SLars Ellenberg if (mode == SET_IN_SYNC) 674b411b363SPhilipp Reisner ext->rs_left -= count; 6755ab7d2c0SLars Ellenberg else if (mode == SET_OUT_OF_SYNC) 6765ab7d2c0SLars Ellenberg ext->rs_left += count; 677b411b363SPhilipp Reisner else 678b411b363SPhilipp Reisner ext->rs_failed += count; 679b411b363SPhilipp Reisner if (ext->rs_left < ext->rs_failed) { 6805ab7d2c0SLars Ellenberg drbd_warn(device, "BAD! enr=%u rs_left=%d " 681975b2979SPhilipp Reisner "rs_failed=%d count=%d cstate=%s\n", 682b411b363SPhilipp Reisner ext->lce.lc_number, ext->rs_left, 683975b2979SPhilipp Reisner ext->rs_failed, count, 684b30ab791SAndreas Gruenbacher drbd_conn_str(device->state.conn)); 685b411b363SPhilipp Reisner 686975b2979SPhilipp Reisner /* We don't expect to be able to clear more bits 687975b2979SPhilipp Reisner * than have been set when we originally counted 688975b2979SPhilipp Reisner * the set bits to cache that value in ext->rs_left. 689975b2979SPhilipp Reisner * Whatever the reason (disconnect during resync, 690975b2979SPhilipp Reisner * delayed local completion of an application write), 691975b2979SPhilipp Reisner * try to fix it up by recounting here. */ 692b30ab791SAndreas Gruenbacher ext->rs_left = drbd_bm_e_weight(device, enr); 693b411b363SPhilipp Reisner } 694b411b363SPhilipp Reisner } else { 695b411b363SPhilipp Reisner /* Normally this element should be in the cache, 696b411b363SPhilipp Reisner * since drbd_rs_begin_io() pulled it already in. 697b411b363SPhilipp Reisner * 698b411b363SPhilipp Reisner * But maybe an application write finished, and we set 699b411b363SPhilipp Reisner * something outside the resync lru_cache in sync. 700b411b363SPhilipp Reisner */ 701b30ab791SAndreas Gruenbacher int rs_left = drbd_bm_e_weight(device, enr); 702b411b363SPhilipp Reisner if (ext->flags != 0) { 703d0180171SAndreas Gruenbacher drbd_warn(device, "changing resync lce: %d[%u;%02lx]" 704b411b363SPhilipp Reisner " -> %d[%u;00]\n", 705b411b363SPhilipp Reisner ext->lce.lc_number, ext->rs_left, 706b411b363SPhilipp Reisner ext->flags, enr, rs_left); 707b411b363SPhilipp Reisner ext->flags = 0; 708b411b363SPhilipp Reisner } 709b411b363SPhilipp Reisner if (ext->rs_failed) { 710d0180171SAndreas Gruenbacher drbd_warn(device, "Kicking resync_lru element enr=%u " 711b411b363SPhilipp Reisner "out with rs_failed=%d\n", 712b411b363SPhilipp Reisner ext->lce.lc_number, ext->rs_failed); 713b411b363SPhilipp Reisner } 714b411b363SPhilipp Reisner ext->rs_left = rs_left; 7155ab7d2c0SLars Ellenberg ext->rs_failed = (mode == RECORD_RS_FAILED) ? count : 0; 71646a15bc3SLars Ellenberg /* we don't keep a persistent log of the resync lru, 71746a15bc3SLars Ellenberg * we can commit any change right away. */ 718b30ab791SAndreas Gruenbacher lc_committed(device->resync); 719b411b363SPhilipp Reisner } 7205ab7d2c0SLars Ellenberg if (mode != SET_OUT_OF_SYNC) 721b30ab791SAndreas Gruenbacher lc_put(device->resync, &ext->lce); 722b411b363SPhilipp Reisner /* no race, we are within the al_lock! */ 723b411b363SPhilipp Reisner 7245ab7d2c0SLars Ellenberg if (ext->rs_left <= ext->rs_failed) { 725b411b363SPhilipp Reisner ext->rs_failed = 0; 7265ab7d2c0SLars Ellenberg return true; 727b411b363SPhilipp Reisner } 7285ab7d2c0SLars Ellenberg } else if (mode != SET_OUT_OF_SYNC) { 7295ab7d2c0SLars Ellenberg /* be quiet if lc_find() did not find it. */ 730d0180171SAndreas Gruenbacher drbd_err(device, "lc_get() failed! locked=%d/%d flags=%lu\n", 731b30ab791SAndreas Gruenbacher device->resync_locked, 732b30ab791SAndreas Gruenbacher device->resync->nr_elements, 733b30ab791SAndreas Gruenbacher device->resync->flags); 734b411b363SPhilipp Reisner } 7355ab7d2c0SLars Ellenberg return false; 736b411b363SPhilipp Reisner } 737b411b363SPhilipp Reisner 738b30ab791SAndreas Gruenbacher void drbd_advance_rs_marks(struct drbd_device *device, unsigned long still_to_go) 739c6ea14dfSLars Ellenberg { 740c6ea14dfSLars Ellenberg unsigned long now = jiffies; 741b30ab791SAndreas Gruenbacher unsigned long last = device->rs_mark_time[device->rs_last_mark]; 742b30ab791SAndreas Gruenbacher int next = (device->rs_last_mark + 1) % DRBD_SYNC_MARKS; 743c6ea14dfSLars Ellenberg if (time_after_eq(now, last + DRBD_SYNC_MARK_STEP)) { 744b30ab791SAndreas Gruenbacher if (device->rs_mark_left[device->rs_last_mark] != still_to_go && 745b30ab791SAndreas Gruenbacher device->state.conn != C_PAUSED_SYNC_T && 746b30ab791SAndreas Gruenbacher device->state.conn != C_PAUSED_SYNC_S) { 747b30ab791SAndreas Gruenbacher device->rs_mark_time[next] = now; 748b30ab791SAndreas Gruenbacher device->rs_mark_left[next] = still_to_go; 749b30ab791SAndreas Gruenbacher device->rs_last_mark = next; 750c6ea14dfSLars Ellenberg } 751c6ea14dfSLars Ellenberg } 752c6ea14dfSLars Ellenberg } 753c6ea14dfSLars Ellenberg 7545ab7d2c0SLars Ellenberg /* It is called lazy update, so don't do write-out too often. */ 7555ab7d2c0SLars Ellenberg static bool lazy_bitmap_update_due(struct drbd_device *device) 7565ab7d2c0SLars Ellenberg { 7575ab7d2c0SLars Ellenberg return time_after(jiffies, device->rs_last_bcast + 2*HZ); 7585ab7d2c0SLars Ellenberg } 7595ab7d2c0SLars Ellenberg 7605ab7d2c0SLars Ellenberg static void maybe_schedule_on_disk_bitmap_update(struct drbd_device *device, bool rs_done) 7615ab7d2c0SLars Ellenberg { 7625052fee2SLars Ellenberg if (rs_done) { 7635052fee2SLars Ellenberg struct drbd_connection *connection = first_peer_device(device)->connection; 7645052fee2SLars Ellenberg if (connection->agreed_pro_version <= 95 || 7655052fee2SLars Ellenberg is_sync_target_state(device->state.conn)) 7665ab7d2c0SLars Ellenberg set_bit(RS_DONE, &device->flags); 7675ab7d2c0SLars Ellenberg /* and also set RS_PROGRESS below */ 7685052fee2SLars Ellenberg 7695052fee2SLars Ellenberg /* Else: rather wait for explicit notification via receive_state, 7705052fee2SLars Ellenberg * to avoid uuids-rotated-too-fast causing full resync 7715052fee2SLars Ellenberg * in next handshake, in case the replication link breaks 7725052fee2SLars Ellenberg * at the most unfortunate time... */ 7735052fee2SLars Ellenberg } else if (!lazy_bitmap_update_due(device)) 7745ab7d2c0SLars Ellenberg return; 7755ab7d2c0SLars Ellenberg 776e334f550SLars Ellenberg drbd_device_post_work(device, RS_PROGRESS); 7775ab7d2c0SLars Ellenberg } 7785ab7d2c0SLars Ellenberg 7795ab7d2c0SLars Ellenberg static int update_sync_bits(struct drbd_device *device, 7805ab7d2c0SLars Ellenberg unsigned long sbnr, unsigned long ebnr, 7815ab7d2c0SLars Ellenberg enum update_sync_bits_mode mode) 7825ab7d2c0SLars Ellenberg { 7835ab7d2c0SLars Ellenberg /* 7845ab7d2c0SLars Ellenberg * We keep a count of set bits per resync-extent in the ->rs_left 7855ab7d2c0SLars Ellenberg * caching member, so we need to loop and work within the resync extent 7865ab7d2c0SLars Ellenberg * alignment. Typically this loop will execute exactly once. 7875ab7d2c0SLars Ellenberg */ 7885ab7d2c0SLars Ellenberg unsigned long flags; 7895ab7d2c0SLars Ellenberg unsigned long count = 0; 7905ab7d2c0SLars Ellenberg unsigned int cleared = 0; 7915ab7d2c0SLars Ellenberg while (sbnr <= ebnr) { 7925ab7d2c0SLars Ellenberg /* set temporary boundary bit number to last bit number within 7935ab7d2c0SLars Ellenberg * the resync extent of the current start bit number, 7945ab7d2c0SLars Ellenberg * but cap at provided end bit number */ 7955ab7d2c0SLars Ellenberg unsigned long tbnr = min(ebnr, sbnr | BM_BLOCKS_PER_BM_EXT_MASK); 7965ab7d2c0SLars Ellenberg unsigned long c; 7975ab7d2c0SLars Ellenberg 7985ab7d2c0SLars Ellenberg if (mode == RECORD_RS_FAILED) 7995ab7d2c0SLars Ellenberg /* Only called from drbd_rs_failed_io(), bits 8005ab7d2c0SLars Ellenberg * supposedly still set. Recount, maybe some 8015ab7d2c0SLars Ellenberg * of the bits have been successfully cleared 8025ab7d2c0SLars Ellenberg * by application IO meanwhile. 8035ab7d2c0SLars Ellenberg */ 8045ab7d2c0SLars Ellenberg c = drbd_bm_count_bits(device, sbnr, tbnr); 8055ab7d2c0SLars Ellenberg else if (mode == SET_IN_SYNC) 8065ab7d2c0SLars Ellenberg c = drbd_bm_clear_bits(device, sbnr, tbnr); 8075ab7d2c0SLars Ellenberg else /* if (mode == SET_OUT_OF_SYNC) */ 8085ab7d2c0SLars Ellenberg c = drbd_bm_set_bits(device, sbnr, tbnr); 8095ab7d2c0SLars Ellenberg 8105ab7d2c0SLars Ellenberg if (c) { 8115ab7d2c0SLars Ellenberg spin_lock_irqsave(&device->al_lock, flags); 8125ab7d2c0SLars Ellenberg cleared += update_rs_extent(device, BM_BIT_TO_EXT(sbnr), c, mode); 8135ab7d2c0SLars Ellenberg spin_unlock_irqrestore(&device->al_lock, flags); 8145ab7d2c0SLars Ellenberg count += c; 8155ab7d2c0SLars Ellenberg } 8165ab7d2c0SLars Ellenberg sbnr = tbnr + 1; 8175ab7d2c0SLars Ellenberg } 8185ab7d2c0SLars Ellenberg if (count) { 8195ab7d2c0SLars Ellenberg if (mode == SET_IN_SYNC) { 8205ab7d2c0SLars Ellenberg unsigned long still_to_go = drbd_bm_total_weight(device); 8215ab7d2c0SLars Ellenberg bool rs_is_done = (still_to_go <= device->rs_failed); 8225ab7d2c0SLars Ellenberg drbd_advance_rs_marks(device, still_to_go); 8235ab7d2c0SLars Ellenberg if (cleared || rs_is_done) 8245ab7d2c0SLars Ellenberg maybe_schedule_on_disk_bitmap_update(device, rs_is_done); 8255ab7d2c0SLars Ellenberg } else if (mode == RECORD_RS_FAILED) 8265ab7d2c0SLars Ellenberg device->rs_failed += count; 8275ab7d2c0SLars Ellenberg wake_up(&device->al_wait); 8285ab7d2c0SLars Ellenberg } 8295ab7d2c0SLars Ellenberg return count; 8305ab7d2c0SLars Ellenberg } 8315ab7d2c0SLars Ellenberg 8329104d31aSLars Ellenberg static bool plausible_request_size(int size) 8339104d31aSLars Ellenberg { 8349104d31aSLars Ellenberg return size > 0 8359104d31aSLars Ellenberg && size <= DRBD_MAX_BATCH_BIO_SIZE 8369104d31aSLars Ellenberg && IS_ALIGNED(size, 512); 8379104d31aSLars Ellenberg } 8389104d31aSLars Ellenberg 839b411b363SPhilipp Reisner /* clear the bit corresponding to the piece of storage in question: 840b411b363SPhilipp Reisner * size byte of data starting from sector. Only clear a bits of the affected 841b411b363SPhilipp Reisner * one ore more _aligned_ BM_BLOCK_SIZE blocks. 842b411b363SPhilipp Reisner * 843b411b363SPhilipp Reisner * called by worker on C_SYNC_TARGET and receiver on SyncSource. 844b411b363SPhilipp Reisner * 845b411b363SPhilipp Reisner */ 8465ab7d2c0SLars Ellenberg int __drbd_change_sync(struct drbd_device *device, sector_t sector, int size, 847179e20b8SAndreas Gruenbacher enum update_sync_bits_mode mode) 848b411b363SPhilipp Reisner { 849b411b363SPhilipp Reisner /* Is called from worker and receiver context _only_ */ 850b411b363SPhilipp Reisner unsigned long sbnr, ebnr, lbnr; 851b411b363SPhilipp Reisner unsigned long count = 0; 852b411b363SPhilipp Reisner sector_t esector, nr_sectors; 8535ab7d2c0SLars Ellenberg 85428a8f0d3SMike Christie /* This would be an empty REQ_PREFLUSH, be silent. */ 8555ab7d2c0SLars Ellenberg if ((mode == SET_OUT_OF_SYNC) && size == 0) 8565ab7d2c0SLars Ellenberg return 0; 857b411b363SPhilipp Reisner 8589104d31aSLars Ellenberg if (!plausible_request_size(size)) { 8595ab7d2c0SLars Ellenberg drbd_err(device, "%s: sector=%llus size=%d nonsense!\n", 8605ab7d2c0SLars Ellenberg drbd_change_sync_fname[mode], 861b411b363SPhilipp Reisner (unsigned long long)sector, size); 8625ab7d2c0SLars Ellenberg return 0; 863b411b363SPhilipp Reisner } 864518a4d53SPhilipp Reisner 865b30ab791SAndreas Gruenbacher if (!get_ldev(device)) 8665ab7d2c0SLars Ellenberg return 0; /* no disk, no metadata, no bitmap to manipulate bits in */ 867518a4d53SPhilipp Reisner 868155bd9d1SChristoph Hellwig nr_sectors = get_capacity(device->vdisk); 869b411b363SPhilipp Reisner esector = sector + (size >> 9) - 1; 870b411b363SPhilipp Reisner 871841ce241SAndreas Gruenbacher if (!expect(sector < nr_sectors)) 872518a4d53SPhilipp Reisner goto out; 873841ce241SAndreas Gruenbacher if (!expect(esector < nr_sectors)) 874841ce241SAndreas Gruenbacher esector = nr_sectors - 1; 875b411b363SPhilipp Reisner 876b411b363SPhilipp Reisner lbnr = BM_SECT_TO_BIT(nr_sectors-1); 877b411b363SPhilipp Reisner 8785ab7d2c0SLars Ellenberg if (mode == SET_IN_SYNC) { 8795ab7d2c0SLars Ellenberg /* Round up start sector, round down end sector. We make sure 8805ab7d2c0SLars Ellenberg * we only clear full, aligned, BM_BLOCK_SIZE blocks. */ 881b411b363SPhilipp Reisner if (unlikely(esector < BM_SECT_PER_BIT-1)) 882518a4d53SPhilipp Reisner goto out; 883b411b363SPhilipp Reisner if (unlikely(esector == (nr_sectors-1))) 884b411b363SPhilipp Reisner ebnr = lbnr; 885b411b363SPhilipp Reisner else 886b411b363SPhilipp Reisner ebnr = BM_SECT_TO_BIT(esector - (BM_SECT_PER_BIT-1)); 887b411b363SPhilipp Reisner sbnr = BM_SECT_TO_BIT(sector + BM_SECT_PER_BIT-1); 8885ab7d2c0SLars Ellenberg } else { 8895ab7d2c0SLars Ellenberg /* We set it out of sync, or record resync failure. 8905ab7d2c0SLars Ellenberg * Should not round anything here. */ 891b411b363SPhilipp Reisner sbnr = BM_SECT_TO_BIT(sector); 892b411b363SPhilipp Reisner ebnr = BM_SECT_TO_BIT(esector); 8935ab7d2c0SLars Ellenberg } 894b411b363SPhilipp Reisner 8955ab7d2c0SLars Ellenberg count = update_sync_bits(device, sbnr, ebnr, mode); 896b411b363SPhilipp Reisner out: 897b30ab791SAndreas Gruenbacher put_ldev(device); 89873a01a18SPhilipp Reisner return count; 899b411b363SPhilipp Reisner } 900b411b363SPhilipp Reisner 901b411b363SPhilipp Reisner static 902b30ab791SAndreas Gruenbacher struct bm_extent *_bme_get(struct drbd_device *device, unsigned int enr) 903b411b363SPhilipp Reisner { 904b411b363SPhilipp Reisner struct lc_element *e; 905b411b363SPhilipp Reisner struct bm_extent *bm_ext; 906b411b363SPhilipp Reisner int wakeup = 0; 907b411b363SPhilipp Reisner unsigned long rs_flags; 908b411b363SPhilipp Reisner 909b30ab791SAndreas Gruenbacher spin_lock_irq(&device->al_lock); 910b30ab791SAndreas Gruenbacher if (device->resync_locked > device->resync->nr_elements/2) { 911b30ab791SAndreas Gruenbacher spin_unlock_irq(&device->al_lock); 912b411b363SPhilipp Reisner return NULL; 913b411b363SPhilipp Reisner } 914b30ab791SAndreas Gruenbacher e = lc_get(device->resync, enr); 915b411b363SPhilipp Reisner bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL; 916b411b363SPhilipp Reisner if (bm_ext) { 917b411b363SPhilipp Reisner if (bm_ext->lce.lc_number != enr) { 918b30ab791SAndreas Gruenbacher bm_ext->rs_left = drbd_bm_e_weight(device, enr); 919b411b363SPhilipp Reisner bm_ext->rs_failed = 0; 920b30ab791SAndreas Gruenbacher lc_committed(device->resync); 921b411b363SPhilipp Reisner wakeup = 1; 922b411b363SPhilipp Reisner } 923b411b363SPhilipp Reisner if (bm_ext->lce.refcnt == 1) 924b30ab791SAndreas Gruenbacher device->resync_locked++; 925b411b363SPhilipp Reisner set_bit(BME_NO_WRITES, &bm_ext->flags); 926b411b363SPhilipp Reisner } 927b30ab791SAndreas Gruenbacher rs_flags = device->resync->flags; 928b30ab791SAndreas Gruenbacher spin_unlock_irq(&device->al_lock); 929b411b363SPhilipp Reisner if (wakeup) 930b30ab791SAndreas Gruenbacher wake_up(&device->al_wait); 931b411b363SPhilipp Reisner 932b411b363SPhilipp Reisner if (!bm_ext) { 933b411b363SPhilipp Reisner if (rs_flags & LC_STARVING) 934d0180171SAndreas Gruenbacher drbd_warn(device, "Have to wait for element" 935b411b363SPhilipp Reisner " (resync LRU too small?)\n"); 93646a15bc3SLars Ellenberg BUG_ON(rs_flags & LC_LOCKED); 937b411b363SPhilipp Reisner } 938b411b363SPhilipp Reisner 939b411b363SPhilipp Reisner return bm_ext; 940b411b363SPhilipp Reisner } 941b411b363SPhilipp Reisner 942b30ab791SAndreas Gruenbacher static int _is_in_al(struct drbd_device *device, unsigned int enr) 943b411b363SPhilipp Reisner { 94446a15bc3SLars Ellenberg int rv; 945b411b363SPhilipp Reisner 946b30ab791SAndreas Gruenbacher spin_lock_irq(&device->al_lock); 947b30ab791SAndreas Gruenbacher rv = lc_is_used(device->act_log, enr); 948b30ab791SAndreas Gruenbacher spin_unlock_irq(&device->al_lock); 949b411b363SPhilipp Reisner 950b411b363SPhilipp Reisner return rv; 951b411b363SPhilipp Reisner } 952b411b363SPhilipp Reisner 953b411b363SPhilipp Reisner /** 954b411b363SPhilipp Reisner * drbd_rs_begin_io() - Gets an extent in the resync LRU cache and sets it to BME_LOCKED 955b30ab791SAndreas Gruenbacher * @device: DRBD device. 956b411b363SPhilipp Reisner * @sector: The sector number. 957b411b363SPhilipp Reisner * 95880a40e43SLars Ellenberg * This functions sleeps on al_wait. Returns 0 on success, -EINTR if interrupted. 959b411b363SPhilipp Reisner */ 960b30ab791SAndreas Gruenbacher int drbd_rs_begin_io(struct drbd_device *device, sector_t sector) 961b411b363SPhilipp Reisner { 962b411b363SPhilipp Reisner unsigned int enr = BM_SECT_TO_EXT(sector); 963b411b363SPhilipp Reisner struct bm_extent *bm_ext; 964b411b363SPhilipp Reisner int i, sig; 965e8299874SLars Ellenberg bool sa; 966b411b363SPhilipp Reisner 967f91ab628SPhilipp Reisner retry: 968b30ab791SAndreas Gruenbacher sig = wait_event_interruptible(device->al_wait, 969b30ab791SAndreas Gruenbacher (bm_ext = _bme_get(device, enr))); 970b411b363SPhilipp Reisner if (sig) 97180a40e43SLars Ellenberg return -EINTR; 972b411b363SPhilipp Reisner 973b411b363SPhilipp Reisner if (test_bit(BME_LOCKED, &bm_ext->flags)) 97480a40e43SLars Ellenberg return 0; 975b411b363SPhilipp Reisner 976e8299874SLars Ellenberg /* step aside only while we are above c-min-rate; unless disabled. */ 977e8299874SLars Ellenberg sa = drbd_rs_c_min_rate_throttle(device); 978e8299874SLars Ellenberg 979b411b363SPhilipp Reisner for (i = 0; i < AL_EXT_PER_BM_SECT; i++) { 980b30ab791SAndreas Gruenbacher sig = wait_event_interruptible(device->al_wait, 981b30ab791SAndreas Gruenbacher !_is_in_al(device, enr * AL_EXT_PER_BM_SECT + i) || 982e8299874SLars Ellenberg (sa && test_bit(BME_PRIORITY, &bm_ext->flags))); 983f91ab628SPhilipp Reisner 984e8299874SLars Ellenberg if (sig || (sa && test_bit(BME_PRIORITY, &bm_ext->flags))) { 985b30ab791SAndreas Gruenbacher spin_lock_irq(&device->al_lock); 986b30ab791SAndreas Gruenbacher if (lc_put(device->resync, &bm_ext->lce) == 0) { 987f91ab628SPhilipp Reisner bm_ext->flags = 0; /* clears BME_NO_WRITES and eventually BME_PRIORITY */ 988b30ab791SAndreas Gruenbacher device->resync_locked--; 989b30ab791SAndreas Gruenbacher wake_up(&device->al_wait); 990b411b363SPhilipp Reisner } 991b30ab791SAndreas Gruenbacher spin_unlock_irq(&device->al_lock); 992f91ab628SPhilipp Reisner if (sig) 99380a40e43SLars Ellenberg return -EINTR; 994f91ab628SPhilipp Reisner if (schedule_timeout_interruptible(HZ/10)) 995f91ab628SPhilipp Reisner return -EINTR; 996f91ab628SPhilipp Reisner goto retry; 997b411b363SPhilipp Reisner } 998b411b363SPhilipp Reisner } 999b411b363SPhilipp Reisner set_bit(BME_LOCKED, &bm_ext->flags); 100080a40e43SLars Ellenberg return 0; 1001b411b363SPhilipp Reisner } 1002b411b363SPhilipp Reisner 1003b411b363SPhilipp Reisner /** 1004b411b363SPhilipp Reisner * drbd_try_rs_begin_io() - Gets an extent in the resync LRU cache, does not sleep 1005b30ab791SAndreas Gruenbacher * @device: DRBD device. 1006b411b363SPhilipp Reisner * @sector: The sector number. 1007b411b363SPhilipp Reisner * 1008b411b363SPhilipp Reisner * Gets an extent in the resync LRU cache, sets it to BME_NO_WRITES, then 1009b411b363SPhilipp Reisner * tries to set it to BME_LOCKED. Returns 0 upon success, and -EAGAIN 1010b411b363SPhilipp Reisner * if there is still application IO going on in this area. 1011b411b363SPhilipp Reisner */ 1012b30ab791SAndreas Gruenbacher int drbd_try_rs_begin_io(struct drbd_device *device, sector_t sector) 1013b411b363SPhilipp Reisner { 1014b411b363SPhilipp Reisner unsigned int enr = BM_SECT_TO_EXT(sector); 1015b411b363SPhilipp Reisner const unsigned int al_enr = enr*AL_EXT_PER_BM_SECT; 1016b411b363SPhilipp Reisner struct lc_element *e; 1017b411b363SPhilipp Reisner struct bm_extent *bm_ext; 1018b411b363SPhilipp Reisner int i; 1019ad3fee79SLars Ellenberg bool throttle = drbd_rs_should_slow_down(device, sector, true); 1020ad3fee79SLars Ellenberg 1021ad3fee79SLars Ellenberg /* If we need to throttle, a half-locked (only marked BME_NO_WRITES, 1022ad3fee79SLars Ellenberg * not yet BME_LOCKED) extent needs to be kicked out explicitly if we 1023ad3fee79SLars Ellenberg * need to throttle. There is at most one such half-locked extent, 1024ad3fee79SLars Ellenberg * which is remembered in resync_wenr. */ 1025ad3fee79SLars Ellenberg 1026ad3fee79SLars Ellenberg if (throttle && device->resync_wenr != enr) 1027ad3fee79SLars Ellenberg return -EAGAIN; 1028b411b363SPhilipp Reisner 1029b30ab791SAndreas Gruenbacher spin_lock_irq(&device->al_lock); 1030b30ab791SAndreas Gruenbacher if (device->resync_wenr != LC_FREE && device->resync_wenr != enr) { 1031b411b363SPhilipp Reisner /* in case you have very heavy scattered io, it may 1032b411b363SPhilipp Reisner * stall the syncer undefined if we give up the ref count 1033b411b363SPhilipp Reisner * when we try again and requeue. 1034b411b363SPhilipp Reisner * 1035b411b363SPhilipp Reisner * if we don't give up the refcount, but the next time 1036b411b363SPhilipp Reisner * we are scheduled this extent has been "synced" by new 1037b411b363SPhilipp Reisner * application writes, we'd miss the lc_put on the 1038b411b363SPhilipp Reisner * extent we keep the refcount on. 1039b411b363SPhilipp Reisner * so we remembered which extent we had to try again, and 1040b411b363SPhilipp Reisner * if the next requested one is something else, we do 1041b411b363SPhilipp Reisner * the lc_put here... 1042b411b363SPhilipp Reisner * we also have to wake_up 1043b411b363SPhilipp Reisner */ 1044b30ab791SAndreas Gruenbacher e = lc_find(device->resync, device->resync_wenr); 1045b411b363SPhilipp Reisner bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL; 1046b411b363SPhilipp Reisner if (bm_ext) { 10470b0ba1efSAndreas Gruenbacher D_ASSERT(device, !test_bit(BME_LOCKED, &bm_ext->flags)); 10480b0ba1efSAndreas Gruenbacher D_ASSERT(device, test_bit(BME_NO_WRITES, &bm_ext->flags)); 1049b411b363SPhilipp Reisner clear_bit(BME_NO_WRITES, &bm_ext->flags); 1050b30ab791SAndreas Gruenbacher device->resync_wenr = LC_FREE; 1051ad3fee79SLars Ellenberg if (lc_put(device->resync, &bm_ext->lce) == 0) { 1052ad3fee79SLars Ellenberg bm_ext->flags = 0; 1053b30ab791SAndreas Gruenbacher device->resync_locked--; 1054ad3fee79SLars Ellenberg } 1055b30ab791SAndreas Gruenbacher wake_up(&device->al_wait); 1056b411b363SPhilipp Reisner } else { 1057d0180171SAndreas Gruenbacher drbd_alert(device, "LOGIC BUG\n"); 1058b411b363SPhilipp Reisner } 1059b411b363SPhilipp Reisner } 1060b411b363SPhilipp Reisner /* TRY. */ 1061b30ab791SAndreas Gruenbacher e = lc_try_get(device->resync, enr); 1062b411b363SPhilipp Reisner bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL; 1063b411b363SPhilipp Reisner if (bm_ext) { 1064b411b363SPhilipp Reisner if (test_bit(BME_LOCKED, &bm_ext->flags)) 1065b411b363SPhilipp Reisner goto proceed; 1066b411b363SPhilipp Reisner if (!test_and_set_bit(BME_NO_WRITES, &bm_ext->flags)) { 1067b30ab791SAndreas Gruenbacher device->resync_locked++; 1068b411b363SPhilipp Reisner } else { 1069b411b363SPhilipp Reisner /* we did set the BME_NO_WRITES, 1070b411b363SPhilipp Reisner * but then could not set BME_LOCKED, 1071b411b363SPhilipp Reisner * so we tried again. 1072b411b363SPhilipp Reisner * drop the extra reference. */ 1073b411b363SPhilipp Reisner bm_ext->lce.refcnt--; 10740b0ba1efSAndreas Gruenbacher D_ASSERT(device, bm_ext->lce.refcnt > 0); 1075b411b363SPhilipp Reisner } 1076b411b363SPhilipp Reisner goto check_al; 1077b411b363SPhilipp Reisner } else { 1078b411b363SPhilipp Reisner /* do we rather want to try later? */ 1079b30ab791SAndreas Gruenbacher if (device->resync_locked > device->resync->nr_elements-3) 1080b411b363SPhilipp Reisner goto try_again; 1081b411b363SPhilipp Reisner /* Do or do not. There is no try. -- Yoda */ 1082b30ab791SAndreas Gruenbacher e = lc_get(device->resync, enr); 1083b411b363SPhilipp Reisner bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL; 1084b411b363SPhilipp Reisner if (!bm_ext) { 1085b30ab791SAndreas Gruenbacher const unsigned long rs_flags = device->resync->flags; 1086b411b363SPhilipp Reisner if (rs_flags & LC_STARVING) 1087d0180171SAndreas Gruenbacher drbd_warn(device, "Have to wait for element" 1088b411b363SPhilipp Reisner " (resync LRU too small?)\n"); 108946a15bc3SLars Ellenberg BUG_ON(rs_flags & LC_LOCKED); 1090b411b363SPhilipp Reisner goto try_again; 1091b411b363SPhilipp Reisner } 1092b411b363SPhilipp Reisner if (bm_ext->lce.lc_number != enr) { 1093b30ab791SAndreas Gruenbacher bm_ext->rs_left = drbd_bm_e_weight(device, enr); 1094b411b363SPhilipp Reisner bm_ext->rs_failed = 0; 1095b30ab791SAndreas Gruenbacher lc_committed(device->resync); 1096b30ab791SAndreas Gruenbacher wake_up(&device->al_wait); 10970b0ba1efSAndreas Gruenbacher D_ASSERT(device, test_bit(BME_LOCKED, &bm_ext->flags) == 0); 1098b411b363SPhilipp Reisner } 1099b411b363SPhilipp Reisner set_bit(BME_NO_WRITES, &bm_ext->flags); 11000b0ba1efSAndreas Gruenbacher D_ASSERT(device, bm_ext->lce.refcnt == 1); 1101b30ab791SAndreas Gruenbacher device->resync_locked++; 1102b411b363SPhilipp Reisner goto check_al; 1103b411b363SPhilipp Reisner } 1104b411b363SPhilipp Reisner check_al: 1105b411b363SPhilipp Reisner for (i = 0; i < AL_EXT_PER_BM_SECT; i++) { 1106b30ab791SAndreas Gruenbacher if (lc_is_used(device->act_log, al_enr+i)) 1107b411b363SPhilipp Reisner goto try_again; 1108b411b363SPhilipp Reisner } 1109b411b363SPhilipp Reisner set_bit(BME_LOCKED, &bm_ext->flags); 1110b411b363SPhilipp Reisner proceed: 1111b30ab791SAndreas Gruenbacher device->resync_wenr = LC_FREE; 1112b30ab791SAndreas Gruenbacher spin_unlock_irq(&device->al_lock); 1113b411b363SPhilipp Reisner return 0; 1114b411b363SPhilipp Reisner 1115b411b363SPhilipp Reisner try_again: 1116ad3fee79SLars Ellenberg if (bm_ext) { 1117ad3fee79SLars Ellenberg if (throttle) { 1118ad3fee79SLars Ellenberg D_ASSERT(device, !test_bit(BME_LOCKED, &bm_ext->flags)); 1119ad3fee79SLars Ellenberg D_ASSERT(device, test_bit(BME_NO_WRITES, &bm_ext->flags)); 1120ad3fee79SLars Ellenberg clear_bit(BME_NO_WRITES, &bm_ext->flags); 1121ad3fee79SLars Ellenberg device->resync_wenr = LC_FREE; 1122ad3fee79SLars Ellenberg if (lc_put(device->resync, &bm_ext->lce) == 0) { 1123ad3fee79SLars Ellenberg bm_ext->flags = 0; 1124ad3fee79SLars Ellenberg device->resync_locked--; 1125ad3fee79SLars Ellenberg } 1126ad3fee79SLars Ellenberg wake_up(&device->al_wait); 1127ad3fee79SLars Ellenberg } else 1128b30ab791SAndreas Gruenbacher device->resync_wenr = enr; 1129ad3fee79SLars Ellenberg } 1130b30ab791SAndreas Gruenbacher spin_unlock_irq(&device->al_lock); 1131b411b363SPhilipp Reisner return -EAGAIN; 1132b411b363SPhilipp Reisner } 1133b411b363SPhilipp Reisner 1134b30ab791SAndreas Gruenbacher void drbd_rs_complete_io(struct drbd_device *device, sector_t sector) 1135b411b363SPhilipp Reisner { 1136b411b363SPhilipp Reisner unsigned int enr = BM_SECT_TO_EXT(sector); 1137b411b363SPhilipp Reisner struct lc_element *e; 1138b411b363SPhilipp Reisner struct bm_extent *bm_ext; 1139b411b363SPhilipp Reisner unsigned long flags; 1140b411b363SPhilipp Reisner 1141b30ab791SAndreas Gruenbacher spin_lock_irqsave(&device->al_lock, flags); 1142b30ab791SAndreas Gruenbacher e = lc_find(device->resync, enr); 1143b411b363SPhilipp Reisner bm_ext = e ? lc_entry(e, struct bm_extent, lce) : NULL; 1144b411b363SPhilipp Reisner if (!bm_ext) { 1145b30ab791SAndreas Gruenbacher spin_unlock_irqrestore(&device->al_lock, flags); 1146b411b363SPhilipp Reisner if (__ratelimit(&drbd_ratelimit_state)) 1147d0180171SAndreas Gruenbacher drbd_err(device, "drbd_rs_complete_io() called, but extent not found\n"); 1148b411b363SPhilipp Reisner return; 1149b411b363SPhilipp Reisner } 1150b411b363SPhilipp Reisner 1151b411b363SPhilipp Reisner if (bm_ext->lce.refcnt == 0) { 1152b30ab791SAndreas Gruenbacher spin_unlock_irqrestore(&device->al_lock, flags); 1153d0180171SAndreas Gruenbacher drbd_err(device, "drbd_rs_complete_io(,%llu [=%u]) called, " 1154b411b363SPhilipp Reisner "but refcnt is 0!?\n", 1155b411b363SPhilipp Reisner (unsigned long long)sector, enr); 1156b411b363SPhilipp Reisner return; 1157b411b363SPhilipp Reisner } 1158b411b363SPhilipp Reisner 1159b30ab791SAndreas Gruenbacher if (lc_put(device->resync, &bm_ext->lce) == 0) { 1160e3555d85SPhilipp Reisner bm_ext->flags = 0; /* clear BME_LOCKED, BME_NO_WRITES and BME_PRIORITY */ 1161b30ab791SAndreas Gruenbacher device->resync_locked--; 1162b30ab791SAndreas Gruenbacher wake_up(&device->al_wait); 1163b411b363SPhilipp Reisner } 1164b411b363SPhilipp Reisner 1165b30ab791SAndreas Gruenbacher spin_unlock_irqrestore(&device->al_lock, flags); 1166b411b363SPhilipp Reisner } 1167b411b363SPhilipp Reisner 1168b411b363SPhilipp Reisner /** 1169b411b363SPhilipp Reisner * drbd_rs_cancel_all() - Removes all extents from the resync LRU (even BME_LOCKED) 1170b30ab791SAndreas Gruenbacher * @device: DRBD device. 1171b411b363SPhilipp Reisner */ 1172b30ab791SAndreas Gruenbacher void drbd_rs_cancel_all(struct drbd_device *device) 1173b411b363SPhilipp Reisner { 1174b30ab791SAndreas Gruenbacher spin_lock_irq(&device->al_lock); 1175b411b363SPhilipp Reisner 1176b30ab791SAndreas Gruenbacher if (get_ldev_if_state(device, D_FAILED)) { /* Makes sure ->resync is there. */ 1177b30ab791SAndreas Gruenbacher lc_reset(device->resync); 1178b30ab791SAndreas Gruenbacher put_ldev(device); 1179b411b363SPhilipp Reisner } 1180b30ab791SAndreas Gruenbacher device->resync_locked = 0; 1181b30ab791SAndreas Gruenbacher device->resync_wenr = LC_FREE; 1182b30ab791SAndreas Gruenbacher spin_unlock_irq(&device->al_lock); 1183b30ab791SAndreas Gruenbacher wake_up(&device->al_wait); 1184b411b363SPhilipp Reisner } 1185b411b363SPhilipp Reisner 1186b411b363SPhilipp Reisner /** 1187b411b363SPhilipp Reisner * drbd_rs_del_all() - Gracefully remove all extents from the resync LRU 1188b30ab791SAndreas Gruenbacher * @device: DRBD device. 1189b411b363SPhilipp Reisner * 1190b411b363SPhilipp Reisner * Returns 0 upon success, -EAGAIN if at least one reference count was 1191b411b363SPhilipp Reisner * not zero. 1192b411b363SPhilipp Reisner */ 1193b30ab791SAndreas Gruenbacher int drbd_rs_del_all(struct drbd_device *device) 1194b411b363SPhilipp Reisner { 1195b411b363SPhilipp Reisner struct lc_element *e; 1196b411b363SPhilipp Reisner struct bm_extent *bm_ext; 1197b411b363SPhilipp Reisner int i; 1198b411b363SPhilipp Reisner 1199b30ab791SAndreas Gruenbacher spin_lock_irq(&device->al_lock); 1200b411b363SPhilipp Reisner 1201b30ab791SAndreas Gruenbacher if (get_ldev_if_state(device, D_FAILED)) { 1202b411b363SPhilipp Reisner /* ok, ->resync is there. */ 1203b30ab791SAndreas Gruenbacher for (i = 0; i < device->resync->nr_elements; i++) { 1204b30ab791SAndreas Gruenbacher e = lc_element_by_index(device->resync, i); 1205b2b163ddSPhilipp Reisner bm_ext = lc_entry(e, struct bm_extent, lce); 1206b411b363SPhilipp Reisner if (bm_ext->lce.lc_number == LC_FREE) 1207b411b363SPhilipp Reisner continue; 1208b30ab791SAndreas Gruenbacher if (bm_ext->lce.lc_number == device->resync_wenr) { 1209d0180171SAndreas Gruenbacher drbd_info(device, "dropping %u in drbd_rs_del_all, apparently" 1210b411b363SPhilipp Reisner " got 'synced' by application io\n", 1211b30ab791SAndreas Gruenbacher device->resync_wenr); 12120b0ba1efSAndreas Gruenbacher D_ASSERT(device, !test_bit(BME_LOCKED, &bm_ext->flags)); 12130b0ba1efSAndreas Gruenbacher D_ASSERT(device, test_bit(BME_NO_WRITES, &bm_ext->flags)); 1214b411b363SPhilipp Reisner clear_bit(BME_NO_WRITES, &bm_ext->flags); 1215b30ab791SAndreas Gruenbacher device->resync_wenr = LC_FREE; 1216b30ab791SAndreas Gruenbacher lc_put(device->resync, &bm_ext->lce); 1217b411b363SPhilipp Reisner } 1218b411b363SPhilipp Reisner if (bm_ext->lce.refcnt != 0) { 1219d0180171SAndreas Gruenbacher drbd_info(device, "Retrying drbd_rs_del_all() later. " 1220b411b363SPhilipp Reisner "refcnt=%d\n", bm_ext->lce.refcnt); 1221b30ab791SAndreas Gruenbacher put_ldev(device); 1222b30ab791SAndreas Gruenbacher spin_unlock_irq(&device->al_lock); 1223b411b363SPhilipp Reisner return -EAGAIN; 1224b411b363SPhilipp Reisner } 12250b0ba1efSAndreas Gruenbacher D_ASSERT(device, !test_bit(BME_LOCKED, &bm_ext->flags)); 12260b0ba1efSAndreas Gruenbacher D_ASSERT(device, !test_bit(BME_NO_WRITES, &bm_ext->flags)); 1227b30ab791SAndreas Gruenbacher lc_del(device->resync, &bm_ext->lce); 1228b411b363SPhilipp Reisner } 12290b0ba1efSAndreas Gruenbacher D_ASSERT(device, device->resync->used == 0); 1230b30ab791SAndreas Gruenbacher put_ldev(device); 1231b411b363SPhilipp Reisner } 1232b30ab791SAndreas Gruenbacher spin_unlock_irq(&device->al_lock); 1233b30ab791SAndreas Gruenbacher wake_up(&device->al_wait); 1234b411b363SPhilipp Reisner 1235b411b363SPhilipp Reisner return 0; 1236b411b363SPhilipp Reisner } 1237