1b411b363SPhilipp Reisner /* 2b411b363SPhilipp Reisner drbd_receiver.c 3b411b363SPhilipp Reisner 4b411b363SPhilipp Reisner This file is part of DRBD by Philipp Reisner and Lars Ellenberg. 5b411b363SPhilipp Reisner 6b411b363SPhilipp Reisner Copyright (C) 2001-2008, LINBIT Information Technologies GmbH. 7b411b363SPhilipp Reisner Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>. 8b411b363SPhilipp Reisner Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>. 9b411b363SPhilipp Reisner 10b411b363SPhilipp Reisner drbd is free software; you can redistribute it and/or modify 11b411b363SPhilipp Reisner it under the terms of the GNU General Public License as published by 12b411b363SPhilipp Reisner the Free Software Foundation; either version 2, or (at your option) 13b411b363SPhilipp Reisner any later version. 14b411b363SPhilipp Reisner 15b411b363SPhilipp Reisner drbd is distributed in the hope that it will be useful, 16b411b363SPhilipp Reisner but WITHOUT ANY WARRANTY; without even the implied warranty of 17b411b363SPhilipp Reisner MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18b411b363SPhilipp Reisner GNU General Public License for more details. 19b411b363SPhilipp Reisner 20b411b363SPhilipp Reisner You should have received a copy of the GNU General Public License 21b411b363SPhilipp Reisner along with drbd; see the file COPYING. If not, write to 22b411b363SPhilipp Reisner the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 23b411b363SPhilipp Reisner */ 24b411b363SPhilipp Reisner 25b411b363SPhilipp Reisner 26b411b363SPhilipp Reisner #include <linux/module.h> 27b411b363SPhilipp Reisner 28b411b363SPhilipp Reisner #include <asm/uaccess.h> 29b411b363SPhilipp Reisner #include <net/sock.h> 30b411b363SPhilipp Reisner 31b411b363SPhilipp Reisner #include <linux/drbd.h> 32b411b363SPhilipp Reisner #include <linux/fs.h> 33b411b363SPhilipp Reisner #include <linux/file.h> 34b411b363SPhilipp Reisner #include <linux/in.h> 35b411b363SPhilipp Reisner #include <linux/mm.h> 36b411b363SPhilipp Reisner #include <linux/memcontrol.h> 37b411b363SPhilipp Reisner #include <linux/mm_inline.h> 38b411b363SPhilipp Reisner #include <linux/slab.h> 39b411b363SPhilipp Reisner #include <linux/pkt_sched.h> 40b411b363SPhilipp Reisner #define __KERNEL_SYSCALLS__ 41b411b363SPhilipp Reisner #include <linux/unistd.h> 42b411b363SPhilipp Reisner #include <linux/vmalloc.h> 43b411b363SPhilipp Reisner #include <linux/random.h> 44b411b363SPhilipp Reisner #include <linux/string.h> 45b411b363SPhilipp Reisner #include <linux/scatterlist.h> 46b411b363SPhilipp Reisner #include "drbd_int.h" 47b411b363SPhilipp Reisner #include "drbd_req.h" 48b411b363SPhilipp Reisner 49b411b363SPhilipp Reisner #include "drbd_vli.h" 50b411b363SPhilipp Reisner 51b411b363SPhilipp Reisner enum finish_epoch { 52b411b363SPhilipp Reisner FE_STILL_LIVE, 53b411b363SPhilipp Reisner FE_DESTROYED, 54b411b363SPhilipp Reisner FE_RECYCLED, 55b411b363SPhilipp Reisner }; 56b411b363SPhilipp Reisner 57b411b363SPhilipp Reisner static int drbd_do_handshake(struct drbd_conf *mdev); 58b411b363SPhilipp Reisner static int drbd_do_auth(struct drbd_conf *mdev); 59b411b363SPhilipp Reisner 60b411b363SPhilipp Reisner static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *, struct drbd_epoch *, enum epoch_event); 61b411b363SPhilipp Reisner static int e_end_block(struct drbd_conf *, struct drbd_work *, int); 62b411b363SPhilipp Reisner 63b411b363SPhilipp Reisner 64b411b363SPhilipp Reisner #define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN) 65b411b363SPhilipp Reisner 6645bb912bSLars Ellenberg /* 6745bb912bSLars Ellenberg * some helper functions to deal with single linked page lists, 6845bb912bSLars Ellenberg * page->private being our "next" pointer. 6945bb912bSLars Ellenberg */ 7045bb912bSLars Ellenberg 7145bb912bSLars Ellenberg /* If at least n pages are linked at head, get n pages off. 7245bb912bSLars Ellenberg * Otherwise, don't modify head, and return NULL. 7345bb912bSLars Ellenberg * Locking is the responsibility of the caller. 7445bb912bSLars Ellenberg */ 7545bb912bSLars Ellenberg static struct page *page_chain_del(struct page **head, int n) 7645bb912bSLars Ellenberg { 7745bb912bSLars Ellenberg struct page *page; 7845bb912bSLars Ellenberg struct page *tmp; 7945bb912bSLars Ellenberg 8045bb912bSLars Ellenberg BUG_ON(!n); 8145bb912bSLars Ellenberg BUG_ON(!head); 8245bb912bSLars Ellenberg 8345bb912bSLars Ellenberg page = *head; 8423ce4227SPhilipp Reisner 8523ce4227SPhilipp Reisner if (!page) 8623ce4227SPhilipp Reisner return NULL; 8723ce4227SPhilipp Reisner 8845bb912bSLars Ellenberg while (page) { 8945bb912bSLars Ellenberg tmp = page_chain_next(page); 9045bb912bSLars Ellenberg if (--n == 0) 9145bb912bSLars Ellenberg break; /* found sufficient pages */ 9245bb912bSLars Ellenberg if (tmp == NULL) 9345bb912bSLars Ellenberg /* insufficient pages, don't use any of them. */ 9445bb912bSLars Ellenberg return NULL; 9545bb912bSLars Ellenberg page = tmp; 9645bb912bSLars Ellenberg } 9745bb912bSLars Ellenberg 9845bb912bSLars Ellenberg /* add end of list marker for the returned list */ 9945bb912bSLars Ellenberg set_page_private(page, 0); 10045bb912bSLars Ellenberg /* actual return value, and adjustment of head */ 10145bb912bSLars Ellenberg page = *head; 10245bb912bSLars Ellenberg *head = tmp; 10345bb912bSLars Ellenberg return page; 10445bb912bSLars Ellenberg } 10545bb912bSLars Ellenberg 10645bb912bSLars Ellenberg /* may be used outside of locks to find the tail of a (usually short) 10745bb912bSLars Ellenberg * "private" page chain, before adding it back to a global chain head 10845bb912bSLars Ellenberg * with page_chain_add() under a spinlock. */ 10945bb912bSLars Ellenberg static struct page *page_chain_tail(struct page *page, int *len) 11045bb912bSLars Ellenberg { 11145bb912bSLars Ellenberg struct page *tmp; 11245bb912bSLars Ellenberg int i = 1; 11345bb912bSLars Ellenberg while ((tmp = page_chain_next(page))) 11445bb912bSLars Ellenberg ++i, page = tmp; 11545bb912bSLars Ellenberg if (len) 11645bb912bSLars Ellenberg *len = i; 11745bb912bSLars Ellenberg return page; 11845bb912bSLars Ellenberg } 11945bb912bSLars Ellenberg 12045bb912bSLars Ellenberg static int page_chain_free(struct page *page) 12145bb912bSLars Ellenberg { 12245bb912bSLars Ellenberg struct page *tmp; 12345bb912bSLars Ellenberg int i = 0; 12445bb912bSLars Ellenberg page_chain_for_each_safe(page, tmp) { 12545bb912bSLars Ellenberg put_page(page); 12645bb912bSLars Ellenberg ++i; 12745bb912bSLars Ellenberg } 12845bb912bSLars Ellenberg return i; 12945bb912bSLars Ellenberg } 13045bb912bSLars Ellenberg 13145bb912bSLars Ellenberg static void page_chain_add(struct page **head, 13245bb912bSLars Ellenberg struct page *chain_first, struct page *chain_last) 13345bb912bSLars Ellenberg { 13445bb912bSLars Ellenberg #if 1 13545bb912bSLars Ellenberg struct page *tmp; 13645bb912bSLars Ellenberg tmp = page_chain_tail(chain_first, NULL); 13745bb912bSLars Ellenberg BUG_ON(tmp != chain_last); 13845bb912bSLars Ellenberg #endif 13945bb912bSLars Ellenberg 14045bb912bSLars Ellenberg /* add chain to head */ 14145bb912bSLars Ellenberg set_page_private(chain_last, (unsigned long)*head); 14245bb912bSLars Ellenberg *head = chain_first; 14345bb912bSLars Ellenberg } 14445bb912bSLars Ellenberg 14545bb912bSLars Ellenberg static struct page *drbd_pp_first_pages_or_try_alloc(struct drbd_conf *mdev, int number) 146b411b363SPhilipp Reisner { 147b411b363SPhilipp Reisner struct page *page = NULL; 14845bb912bSLars Ellenberg struct page *tmp = NULL; 14945bb912bSLars Ellenberg int i = 0; 150b411b363SPhilipp Reisner 151b411b363SPhilipp Reisner /* Yes, testing drbd_pp_vacant outside the lock is racy. 152b411b363SPhilipp Reisner * So what. It saves a spin_lock. */ 15345bb912bSLars Ellenberg if (drbd_pp_vacant >= number) { 154b411b363SPhilipp Reisner spin_lock(&drbd_pp_lock); 15545bb912bSLars Ellenberg page = page_chain_del(&drbd_pp_pool, number); 15645bb912bSLars Ellenberg if (page) 15745bb912bSLars Ellenberg drbd_pp_vacant -= number; 158b411b363SPhilipp Reisner spin_unlock(&drbd_pp_lock); 15945bb912bSLars Ellenberg if (page) 16045bb912bSLars Ellenberg return page; 161b411b363SPhilipp Reisner } 16245bb912bSLars Ellenberg 163b411b363SPhilipp Reisner /* GFP_TRY, because we must not cause arbitrary write-out: in a DRBD 164b411b363SPhilipp Reisner * "criss-cross" setup, that might cause write-out on some other DRBD, 165b411b363SPhilipp Reisner * which in turn might block on the other node at this very place. */ 16645bb912bSLars Ellenberg for (i = 0; i < number; i++) { 16745bb912bSLars Ellenberg tmp = alloc_page(GFP_TRY); 16845bb912bSLars Ellenberg if (!tmp) 16945bb912bSLars Ellenberg break; 17045bb912bSLars Ellenberg set_page_private(tmp, (unsigned long)page); 17145bb912bSLars Ellenberg page = tmp; 17245bb912bSLars Ellenberg } 17345bb912bSLars Ellenberg 17445bb912bSLars Ellenberg if (i == number) 175b411b363SPhilipp Reisner return page; 17645bb912bSLars Ellenberg 17745bb912bSLars Ellenberg /* Not enough pages immediately available this time. 17845bb912bSLars Ellenberg * No need to jump around here, drbd_pp_alloc will retry this 17945bb912bSLars Ellenberg * function "soon". */ 18045bb912bSLars Ellenberg if (page) { 18145bb912bSLars Ellenberg tmp = page_chain_tail(page, NULL); 18245bb912bSLars Ellenberg spin_lock(&drbd_pp_lock); 18345bb912bSLars Ellenberg page_chain_add(&drbd_pp_pool, page, tmp); 18445bb912bSLars Ellenberg drbd_pp_vacant += i; 18545bb912bSLars Ellenberg spin_unlock(&drbd_pp_lock); 18645bb912bSLars Ellenberg } 18745bb912bSLars Ellenberg return NULL; 188b411b363SPhilipp Reisner } 189b411b363SPhilipp Reisner 190b411b363SPhilipp Reisner static void reclaim_net_ee(struct drbd_conf *mdev, struct list_head *to_be_freed) 191b411b363SPhilipp Reisner { 192b411b363SPhilipp Reisner struct drbd_epoch_entry *e; 193b411b363SPhilipp Reisner struct list_head *le, *tle; 194b411b363SPhilipp Reisner 195b411b363SPhilipp Reisner /* The EEs are always appended to the end of the list. Since 196b411b363SPhilipp Reisner they are sent in order over the wire, they have to finish 197b411b363SPhilipp Reisner in order. As soon as we see the first not finished we can 198b411b363SPhilipp Reisner stop to examine the list... */ 199b411b363SPhilipp Reisner 200b411b363SPhilipp Reisner list_for_each_safe(le, tle, &mdev->net_ee) { 201b411b363SPhilipp Reisner e = list_entry(le, struct drbd_epoch_entry, w.list); 20245bb912bSLars Ellenberg if (drbd_ee_has_active_page(e)) 203b411b363SPhilipp Reisner break; 204b411b363SPhilipp Reisner list_move(le, to_be_freed); 205b411b363SPhilipp Reisner } 206b411b363SPhilipp Reisner } 207b411b363SPhilipp Reisner 208b411b363SPhilipp Reisner static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev) 209b411b363SPhilipp Reisner { 210b411b363SPhilipp Reisner LIST_HEAD(reclaimed); 211b411b363SPhilipp Reisner struct drbd_epoch_entry *e, *t; 212b411b363SPhilipp Reisner 213b411b363SPhilipp Reisner spin_lock_irq(&mdev->req_lock); 214b411b363SPhilipp Reisner reclaim_net_ee(mdev, &reclaimed); 215b411b363SPhilipp Reisner spin_unlock_irq(&mdev->req_lock); 216b411b363SPhilipp Reisner 217b411b363SPhilipp Reisner list_for_each_entry_safe(e, t, &reclaimed, w.list) 218435f0740SLars Ellenberg drbd_free_net_ee(mdev, e); 219b411b363SPhilipp Reisner } 220b411b363SPhilipp Reisner 221b411b363SPhilipp Reisner /** 22245bb912bSLars Ellenberg * drbd_pp_alloc() - Returns @number pages, retries forever (or until signalled) 223b411b363SPhilipp Reisner * @mdev: DRBD device. 22445bb912bSLars Ellenberg * @number: number of pages requested 22545bb912bSLars Ellenberg * @retry: whether to retry, if not enough pages are available right now 226b411b363SPhilipp Reisner * 22745bb912bSLars Ellenberg * Tries to allocate number pages, first from our own page pool, then from 22845bb912bSLars Ellenberg * the kernel, unless this allocation would exceed the max_buffers setting. 22945bb912bSLars Ellenberg * Possibly retry until DRBD frees sufficient pages somewhere else. 23045bb912bSLars Ellenberg * 23145bb912bSLars Ellenberg * Returns a page chain linked via page->private. 232b411b363SPhilipp Reisner */ 23345bb912bSLars Ellenberg static struct page *drbd_pp_alloc(struct drbd_conf *mdev, unsigned number, bool retry) 234b411b363SPhilipp Reisner { 235b411b363SPhilipp Reisner struct page *page = NULL; 236b411b363SPhilipp Reisner DEFINE_WAIT(wait); 237b411b363SPhilipp Reisner 23845bb912bSLars Ellenberg /* Yes, we may run up to @number over max_buffers. If we 23945bb912bSLars Ellenberg * follow it strictly, the admin will get it wrong anyways. */ 24045bb912bSLars Ellenberg if (atomic_read(&mdev->pp_in_use) < mdev->net_conf->max_buffers) 24145bb912bSLars Ellenberg page = drbd_pp_first_pages_or_try_alloc(mdev, number); 242b411b363SPhilipp Reisner 24345bb912bSLars Ellenberg while (page == NULL) { 244b411b363SPhilipp Reisner prepare_to_wait(&drbd_pp_wait, &wait, TASK_INTERRUPTIBLE); 245b411b363SPhilipp Reisner 246b411b363SPhilipp Reisner drbd_kick_lo_and_reclaim_net(mdev); 247b411b363SPhilipp Reisner 248b411b363SPhilipp Reisner if (atomic_read(&mdev->pp_in_use) < mdev->net_conf->max_buffers) { 24945bb912bSLars Ellenberg page = drbd_pp_first_pages_or_try_alloc(mdev, number); 250b411b363SPhilipp Reisner if (page) 251b411b363SPhilipp Reisner break; 252b411b363SPhilipp Reisner } 253b411b363SPhilipp Reisner 254b411b363SPhilipp Reisner if (!retry) 255b411b363SPhilipp Reisner break; 256b411b363SPhilipp Reisner 257b411b363SPhilipp Reisner if (signal_pending(current)) { 258b411b363SPhilipp Reisner dev_warn(DEV, "drbd_pp_alloc interrupted!\n"); 259b411b363SPhilipp Reisner break; 260b411b363SPhilipp Reisner } 261b411b363SPhilipp Reisner 262b411b363SPhilipp Reisner schedule(); 263b411b363SPhilipp Reisner } 264b411b363SPhilipp Reisner finish_wait(&drbd_pp_wait, &wait); 265b411b363SPhilipp Reisner 26645bb912bSLars Ellenberg if (page) 26745bb912bSLars Ellenberg atomic_add(number, &mdev->pp_in_use); 268b411b363SPhilipp Reisner return page; 269b411b363SPhilipp Reisner } 270b411b363SPhilipp Reisner 271b411b363SPhilipp Reisner /* Must not be used from irq, as that may deadlock: see drbd_pp_alloc. 27245bb912bSLars Ellenberg * Is also used from inside an other spin_lock_irq(&mdev->req_lock); 27345bb912bSLars Ellenberg * Either links the page chain back to the global pool, 27445bb912bSLars Ellenberg * or returns all pages to the system. */ 275435f0740SLars Ellenberg static void drbd_pp_free(struct drbd_conf *mdev, struct page *page, int is_net) 276b411b363SPhilipp Reisner { 277435f0740SLars Ellenberg atomic_t *a = is_net ? &mdev->pp_in_use_by_net : &mdev->pp_in_use; 278b411b363SPhilipp Reisner int i; 279435f0740SLars Ellenberg 280a73ff323SLars Ellenberg if (page == NULL) 281a73ff323SLars Ellenberg return; 282a73ff323SLars Ellenberg 2831816a2b4SLars Ellenberg if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE)*minor_count) 28445bb912bSLars Ellenberg i = page_chain_free(page); 28545bb912bSLars Ellenberg else { 28645bb912bSLars Ellenberg struct page *tmp; 28745bb912bSLars Ellenberg tmp = page_chain_tail(page, &i); 288b411b363SPhilipp Reisner spin_lock(&drbd_pp_lock); 28945bb912bSLars Ellenberg page_chain_add(&drbd_pp_pool, page, tmp); 29045bb912bSLars Ellenberg drbd_pp_vacant += i; 291b411b363SPhilipp Reisner spin_unlock(&drbd_pp_lock); 292b411b363SPhilipp Reisner } 293435f0740SLars Ellenberg i = atomic_sub_return(i, a); 29445bb912bSLars Ellenberg if (i < 0) 295435f0740SLars Ellenberg dev_warn(DEV, "ASSERTION FAILED: %s: %d < 0\n", 296435f0740SLars Ellenberg is_net ? "pp_in_use_by_net" : "pp_in_use", i); 297b411b363SPhilipp Reisner wake_up(&drbd_pp_wait); 298b411b363SPhilipp Reisner } 299b411b363SPhilipp Reisner 300b411b363SPhilipp Reisner /* 301b411b363SPhilipp Reisner You need to hold the req_lock: 302b411b363SPhilipp Reisner _drbd_wait_ee_list_empty() 303b411b363SPhilipp Reisner 304b411b363SPhilipp Reisner You must not have the req_lock: 305b411b363SPhilipp Reisner drbd_free_ee() 306b411b363SPhilipp Reisner drbd_alloc_ee() 307b411b363SPhilipp Reisner drbd_init_ee() 308b411b363SPhilipp Reisner drbd_release_ee() 309b411b363SPhilipp Reisner drbd_ee_fix_bhs() 310b411b363SPhilipp Reisner drbd_process_done_ee() 311b411b363SPhilipp Reisner drbd_clear_done_ee() 312b411b363SPhilipp Reisner drbd_wait_ee_list_empty() 313b411b363SPhilipp Reisner */ 314b411b363SPhilipp Reisner 315b411b363SPhilipp Reisner struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev, 316b411b363SPhilipp Reisner u64 id, 317b411b363SPhilipp Reisner sector_t sector, 318b411b363SPhilipp Reisner unsigned int data_size, 319b411b363SPhilipp Reisner gfp_t gfp_mask) __must_hold(local) 320b411b363SPhilipp Reisner { 321b411b363SPhilipp Reisner struct drbd_epoch_entry *e; 322a73ff323SLars Ellenberg struct page *page = NULL; 32345bb912bSLars Ellenberg unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT; 324b411b363SPhilipp Reisner 3250cf9d27eSAndreas Gruenbacher if (drbd_insert_fault(mdev, DRBD_FAULT_AL_EE)) 326b411b363SPhilipp Reisner return NULL; 327b411b363SPhilipp Reisner 328b411b363SPhilipp Reisner e = mempool_alloc(drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM); 329b411b363SPhilipp Reisner if (!e) { 330b411b363SPhilipp Reisner if (!(gfp_mask & __GFP_NOWARN)) 331b411b363SPhilipp Reisner dev_err(DEV, "alloc_ee: Allocation of an EE failed\n"); 332b411b363SPhilipp Reisner return NULL; 333b411b363SPhilipp Reisner } 334b411b363SPhilipp Reisner 335a73ff323SLars Ellenberg if (data_size) { 33645bb912bSLars Ellenberg page = drbd_pp_alloc(mdev, nr_pages, (gfp_mask & __GFP_WAIT)); 33745bb912bSLars Ellenberg if (!page) 33845bb912bSLars Ellenberg goto fail; 339a73ff323SLars Ellenberg } 340b411b363SPhilipp Reisner 34124c4830cSBart Van Assche INIT_HLIST_NODE(&e->collision); 342b411b363SPhilipp Reisner e->epoch = NULL; 34345bb912bSLars Ellenberg e->mdev = mdev; 34445bb912bSLars Ellenberg e->pages = page; 34545bb912bSLars Ellenberg atomic_set(&e->pending_bios, 0); 34645bb912bSLars Ellenberg e->size = data_size; 347b411b363SPhilipp Reisner e->flags = 0; 34845bb912bSLars Ellenberg e->sector = sector; 34945bb912bSLars Ellenberg e->block_id = id; 350b411b363SPhilipp Reisner 351b411b363SPhilipp Reisner return e; 352b411b363SPhilipp Reisner 35345bb912bSLars Ellenberg fail: 354b411b363SPhilipp Reisner mempool_free(e, drbd_ee_mempool); 355b411b363SPhilipp Reisner return NULL; 356b411b363SPhilipp Reisner } 357b411b363SPhilipp Reisner 358435f0740SLars Ellenberg void drbd_free_some_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e, int is_net) 359b411b363SPhilipp Reisner { 360c36c3cedSLars Ellenberg if (e->flags & EE_HAS_DIGEST) 361c36c3cedSLars Ellenberg kfree(e->digest); 362435f0740SLars Ellenberg drbd_pp_free(mdev, e->pages, is_net); 36345bb912bSLars Ellenberg D_ASSERT(atomic_read(&e->pending_bios) == 0); 36424c4830cSBart Van Assche D_ASSERT(hlist_unhashed(&e->collision)); 365b411b363SPhilipp Reisner mempool_free(e, drbd_ee_mempool); 366b411b363SPhilipp Reisner } 367b411b363SPhilipp Reisner 368b411b363SPhilipp Reisner int drbd_release_ee(struct drbd_conf *mdev, struct list_head *list) 369b411b363SPhilipp Reisner { 370b411b363SPhilipp Reisner LIST_HEAD(work_list); 371b411b363SPhilipp Reisner struct drbd_epoch_entry *e, *t; 372b411b363SPhilipp Reisner int count = 0; 373435f0740SLars Ellenberg int is_net = list == &mdev->net_ee; 374b411b363SPhilipp Reisner 375b411b363SPhilipp Reisner spin_lock_irq(&mdev->req_lock); 376b411b363SPhilipp Reisner list_splice_init(list, &work_list); 377b411b363SPhilipp Reisner spin_unlock_irq(&mdev->req_lock); 378b411b363SPhilipp Reisner 379b411b363SPhilipp Reisner list_for_each_entry_safe(e, t, &work_list, w.list) { 380435f0740SLars Ellenberg drbd_free_some_ee(mdev, e, is_net); 381b411b363SPhilipp Reisner count++; 382b411b363SPhilipp Reisner } 383b411b363SPhilipp Reisner return count; 384b411b363SPhilipp Reisner } 385b411b363SPhilipp Reisner 386b411b363SPhilipp Reisner 387b411b363SPhilipp Reisner /* 388b411b363SPhilipp Reisner * This function is called from _asender only_ 389b411b363SPhilipp Reisner * but see also comments in _req_mod(,barrier_acked) 390b411b363SPhilipp Reisner * and receive_Barrier. 391b411b363SPhilipp Reisner * 392b411b363SPhilipp Reisner * Move entries from net_ee to done_ee, if ready. 393b411b363SPhilipp Reisner * Grab done_ee, call all callbacks, free the entries. 394b411b363SPhilipp Reisner * The callbacks typically send out ACKs. 395b411b363SPhilipp Reisner */ 396b411b363SPhilipp Reisner static int drbd_process_done_ee(struct drbd_conf *mdev) 397b411b363SPhilipp Reisner { 398b411b363SPhilipp Reisner LIST_HEAD(work_list); 399b411b363SPhilipp Reisner LIST_HEAD(reclaimed); 400b411b363SPhilipp Reisner struct drbd_epoch_entry *e, *t; 401b411b363SPhilipp Reisner int ok = (mdev->state.conn >= C_WF_REPORT_PARAMS); 402b411b363SPhilipp Reisner 403b411b363SPhilipp Reisner spin_lock_irq(&mdev->req_lock); 404b411b363SPhilipp Reisner reclaim_net_ee(mdev, &reclaimed); 405b411b363SPhilipp Reisner list_splice_init(&mdev->done_ee, &work_list); 406b411b363SPhilipp Reisner spin_unlock_irq(&mdev->req_lock); 407b411b363SPhilipp Reisner 408b411b363SPhilipp Reisner list_for_each_entry_safe(e, t, &reclaimed, w.list) 409435f0740SLars Ellenberg drbd_free_net_ee(mdev, e); 410b411b363SPhilipp Reisner 411b411b363SPhilipp Reisner /* possible callbacks here: 412b411b363SPhilipp Reisner * e_end_block, and e_end_resync_block, e_send_discard_ack. 413b411b363SPhilipp Reisner * all ignore the last argument. 414b411b363SPhilipp Reisner */ 415b411b363SPhilipp Reisner list_for_each_entry_safe(e, t, &work_list, w.list) { 416b411b363SPhilipp Reisner /* list_del not necessary, next/prev members not touched */ 417b411b363SPhilipp Reisner ok = e->w.cb(mdev, &e->w, !ok) && ok; 418b411b363SPhilipp Reisner drbd_free_ee(mdev, e); 419b411b363SPhilipp Reisner } 420b411b363SPhilipp Reisner wake_up(&mdev->ee_wait); 421b411b363SPhilipp Reisner 422b411b363SPhilipp Reisner return ok; 423b411b363SPhilipp Reisner } 424b411b363SPhilipp Reisner 425b411b363SPhilipp Reisner void _drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head) 426b411b363SPhilipp Reisner { 427b411b363SPhilipp Reisner DEFINE_WAIT(wait); 428b411b363SPhilipp Reisner 429b411b363SPhilipp Reisner /* avoids spin_lock/unlock 430b411b363SPhilipp Reisner * and calling prepare_to_wait in the fast path */ 431b411b363SPhilipp Reisner while (!list_empty(head)) { 432b411b363SPhilipp Reisner prepare_to_wait(&mdev->ee_wait, &wait, TASK_UNINTERRUPTIBLE); 433b411b363SPhilipp Reisner spin_unlock_irq(&mdev->req_lock); 4347eaceaccSJens Axboe io_schedule(); 435b411b363SPhilipp Reisner finish_wait(&mdev->ee_wait, &wait); 436b411b363SPhilipp Reisner spin_lock_irq(&mdev->req_lock); 437b411b363SPhilipp Reisner } 438b411b363SPhilipp Reisner } 439b411b363SPhilipp Reisner 440b411b363SPhilipp Reisner void drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head) 441b411b363SPhilipp Reisner { 442b411b363SPhilipp Reisner spin_lock_irq(&mdev->req_lock); 443b411b363SPhilipp Reisner _drbd_wait_ee_list_empty(mdev, head); 444b411b363SPhilipp Reisner spin_unlock_irq(&mdev->req_lock); 445b411b363SPhilipp Reisner } 446b411b363SPhilipp Reisner 447b411b363SPhilipp Reisner /* see also kernel_accept; which is only present since 2.6.18. 448b411b363SPhilipp Reisner * also we want to log which part of it failed, exactly */ 449b411b363SPhilipp Reisner static int drbd_accept(struct drbd_conf *mdev, const char **what, 450b411b363SPhilipp Reisner struct socket *sock, struct socket **newsock) 451b411b363SPhilipp Reisner { 452b411b363SPhilipp Reisner struct sock *sk = sock->sk; 453b411b363SPhilipp Reisner int err = 0; 454b411b363SPhilipp Reisner 455b411b363SPhilipp Reisner *what = "listen"; 456b411b363SPhilipp Reisner err = sock->ops->listen(sock, 5); 457b411b363SPhilipp Reisner if (err < 0) 458b411b363SPhilipp Reisner goto out; 459b411b363SPhilipp Reisner 460b411b363SPhilipp Reisner *what = "sock_create_lite"; 461b411b363SPhilipp Reisner err = sock_create_lite(sk->sk_family, sk->sk_type, sk->sk_protocol, 462b411b363SPhilipp Reisner newsock); 463b411b363SPhilipp Reisner if (err < 0) 464b411b363SPhilipp Reisner goto out; 465b411b363SPhilipp Reisner 466b411b363SPhilipp Reisner *what = "accept"; 467b411b363SPhilipp Reisner err = sock->ops->accept(sock, *newsock, 0); 468b411b363SPhilipp Reisner if (err < 0) { 469b411b363SPhilipp Reisner sock_release(*newsock); 470b411b363SPhilipp Reisner *newsock = NULL; 471b411b363SPhilipp Reisner goto out; 472b411b363SPhilipp Reisner } 473b411b363SPhilipp Reisner (*newsock)->ops = sock->ops; 47447a4f1c1SLars Ellenberg __module_get((*newsock)->ops->owner); 475b411b363SPhilipp Reisner 476b411b363SPhilipp Reisner out: 477b411b363SPhilipp Reisner return err; 478b411b363SPhilipp Reisner } 479b411b363SPhilipp Reisner 480b411b363SPhilipp Reisner static int drbd_recv_short(struct drbd_conf *mdev, struct socket *sock, 481b411b363SPhilipp Reisner void *buf, size_t size, int flags) 482b411b363SPhilipp Reisner { 483b411b363SPhilipp Reisner mm_segment_t oldfs; 484b411b363SPhilipp Reisner struct kvec iov = { 485b411b363SPhilipp Reisner .iov_base = buf, 486b411b363SPhilipp Reisner .iov_len = size, 487b411b363SPhilipp Reisner }; 488b411b363SPhilipp Reisner struct msghdr msg = { 489b411b363SPhilipp Reisner .msg_iovlen = 1, 490b411b363SPhilipp Reisner .msg_iov = (struct iovec *)&iov, 491b411b363SPhilipp Reisner .msg_flags = (flags ? flags : MSG_WAITALL | MSG_NOSIGNAL) 492b411b363SPhilipp Reisner }; 493b411b363SPhilipp Reisner int rv; 494b411b363SPhilipp Reisner 495b411b363SPhilipp Reisner oldfs = get_fs(); 496b411b363SPhilipp Reisner set_fs(KERNEL_DS); 497b411b363SPhilipp Reisner rv = sock_recvmsg(sock, &msg, size, msg.msg_flags); 498b411b363SPhilipp Reisner set_fs(oldfs); 499b411b363SPhilipp Reisner 500b411b363SPhilipp Reisner return rv; 501b411b363SPhilipp Reisner } 502b411b363SPhilipp Reisner 503b411b363SPhilipp Reisner static int drbd_recv(struct drbd_conf *mdev, void *buf, size_t size) 504b411b363SPhilipp Reisner { 505b411b363SPhilipp Reisner mm_segment_t oldfs; 506b411b363SPhilipp Reisner struct kvec iov = { 507b411b363SPhilipp Reisner .iov_base = buf, 508b411b363SPhilipp Reisner .iov_len = size, 509b411b363SPhilipp Reisner }; 510b411b363SPhilipp Reisner struct msghdr msg = { 511b411b363SPhilipp Reisner .msg_iovlen = 1, 512b411b363SPhilipp Reisner .msg_iov = (struct iovec *)&iov, 513b411b363SPhilipp Reisner .msg_flags = MSG_WAITALL | MSG_NOSIGNAL 514b411b363SPhilipp Reisner }; 515b411b363SPhilipp Reisner int rv; 516b411b363SPhilipp Reisner 517b411b363SPhilipp Reisner oldfs = get_fs(); 518b411b363SPhilipp Reisner set_fs(KERNEL_DS); 519b411b363SPhilipp Reisner 520b411b363SPhilipp Reisner for (;;) { 521b411b363SPhilipp Reisner rv = sock_recvmsg(mdev->data.socket, &msg, size, msg.msg_flags); 522b411b363SPhilipp Reisner if (rv == size) 523b411b363SPhilipp Reisner break; 524b411b363SPhilipp Reisner 525b411b363SPhilipp Reisner /* Note: 526b411b363SPhilipp Reisner * ECONNRESET other side closed the connection 527b411b363SPhilipp Reisner * ERESTARTSYS (on sock) we got a signal 528b411b363SPhilipp Reisner */ 529b411b363SPhilipp Reisner 530b411b363SPhilipp Reisner if (rv < 0) { 531b411b363SPhilipp Reisner if (rv == -ECONNRESET) 532b411b363SPhilipp Reisner dev_info(DEV, "sock was reset by peer\n"); 533b411b363SPhilipp Reisner else if (rv != -ERESTARTSYS) 534b411b363SPhilipp Reisner dev_err(DEV, "sock_recvmsg returned %d\n", rv); 535b411b363SPhilipp Reisner break; 536b411b363SPhilipp Reisner } else if (rv == 0) { 537b411b363SPhilipp Reisner dev_info(DEV, "sock was shut down by peer\n"); 538b411b363SPhilipp Reisner break; 539b411b363SPhilipp Reisner } else { 540b411b363SPhilipp Reisner /* signal came in, or peer/link went down, 541b411b363SPhilipp Reisner * after we read a partial message 542b411b363SPhilipp Reisner */ 543b411b363SPhilipp Reisner /* D_ASSERT(signal_pending(current)); */ 544b411b363SPhilipp Reisner break; 545b411b363SPhilipp Reisner } 546b411b363SPhilipp Reisner }; 547b411b363SPhilipp Reisner 548b411b363SPhilipp Reisner set_fs(oldfs); 549b411b363SPhilipp Reisner 550b411b363SPhilipp Reisner if (rv != size) 551b411b363SPhilipp Reisner drbd_force_state(mdev, NS(conn, C_BROKEN_PIPE)); 552b411b363SPhilipp Reisner 553b411b363SPhilipp Reisner return rv; 554b411b363SPhilipp Reisner } 555b411b363SPhilipp Reisner 5565dbf1673SLars Ellenberg /* quoting tcp(7): 5575dbf1673SLars Ellenberg * On individual connections, the socket buffer size must be set prior to the 5585dbf1673SLars Ellenberg * listen(2) or connect(2) calls in order to have it take effect. 5595dbf1673SLars Ellenberg * This is our wrapper to do so. 5605dbf1673SLars Ellenberg */ 5615dbf1673SLars Ellenberg static void drbd_setbufsize(struct socket *sock, unsigned int snd, 5625dbf1673SLars Ellenberg unsigned int rcv) 5635dbf1673SLars Ellenberg { 5645dbf1673SLars Ellenberg /* open coded SO_SNDBUF, SO_RCVBUF */ 5655dbf1673SLars Ellenberg if (snd) { 5665dbf1673SLars Ellenberg sock->sk->sk_sndbuf = snd; 5675dbf1673SLars Ellenberg sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK; 5685dbf1673SLars Ellenberg } 5695dbf1673SLars Ellenberg if (rcv) { 5705dbf1673SLars Ellenberg sock->sk->sk_rcvbuf = rcv; 5715dbf1673SLars Ellenberg sock->sk->sk_userlocks |= SOCK_RCVBUF_LOCK; 5725dbf1673SLars Ellenberg } 5735dbf1673SLars Ellenberg } 5745dbf1673SLars Ellenberg 575b411b363SPhilipp Reisner static struct socket *drbd_try_connect(struct drbd_conf *mdev) 576b411b363SPhilipp Reisner { 577b411b363SPhilipp Reisner const char *what; 578b411b363SPhilipp Reisner struct socket *sock; 579b411b363SPhilipp Reisner struct sockaddr_in6 src_in6; 580b411b363SPhilipp Reisner int err; 581b411b363SPhilipp Reisner int disconnect_on_error = 1; 582b411b363SPhilipp Reisner 583b411b363SPhilipp Reisner if (!get_net_conf(mdev)) 584b411b363SPhilipp Reisner return NULL; 585b411b363SPhilipp Reisner 586b411b363SPhilipp Reisner what = "sock_create_kern"; 587b411b363SPhilipp Reisner err = sock_create_kern(((struct sockaddr *)mdev->net_conf->my_addr)->sa_family, 588b411b363SPhilipp Reisner SOCK_STREAM, IPPROTO_TCP, &sock); 589b411b363SPhilipp Reisner if (err < 0) { 590b411b363SPhilipp Reisner sock = NULL; 591b411b363SPhilipp Reisner goto out; 592b411b363SPhilipp Reisner } 593b411b363SPhilipp Reisner 594b411b363SPhilipp Reisner sock->sk->sk_rcvtimeo = 595b411b363SPhilipp Reisner sock->sk->sk_sndtimeo = mdev->net_conf->try_connect_int*HZ; 5965dbf1673SLars Ellenberg drbd_setbufsize(sock, mdev->net_conf->sndbuf_size, 5975dbf1673SLars Ellenberg mdev->net_conf->rcvbuf_size); 598b411b363SPhilipp Reisner 599b411b363SPhilipp Reisner /* explicitly bind to the configured IP as source IP 600b411b363SPhilipp Reisner * for the outgoing connections. 601b411b363SPhilipp Reisner * This is needed for multihomed hosts and to be 602b411b363SPhilipp Reisner * able to use lo: interfaces for drbd. 603b411b363SPhilipp Reisner * Make sure to use 0 as port number, so linux selects 604b411b363SPhilipp Reisner * a free one dynamically. 605b411b363SPhilipp Reisner */ 606b411b363SPhilipp Reisner memcpy(&src_in6, mdev->net_conf->my_addr, 607b411b363SPhilipp Reisner min_t(int, mdev->net_conf->my_addr_len, sizeof(src_in6))); 608b411b363SPhilipp Reisner if (((struct sockaddr *)mdev->net_conf->my_addr)->sa_family == AF_INET6) 609b411b363SPhilipp Reisner src_in6.sin6_port = 0; 610b411b363SPhilipp Reisner else 611b411b363SPhilipp Reisner ((struct sockaddr_in *)&src_in6)->sin_port = 0; /* AF_INET & AF_SCI */ 612b411b363SPhilipp Reisner 613b411b363SPhilipp Reisner what = "bind before connect"; 614b411b363SPhilipp Reisner err = sock->ops->bind(sock, 615b411b363SPhilipp Reisner (struct sockaddr *) &src_in6, 616b411b363SPhilipp Reisner mdev->net_conf->my_addr_len); 617b411b363SPhilipp Reisner if (err < 0) 618b411b363SPhilipp Reisner goto out; 619b411b363SPhilipp Reisner 620b411b363SPhilipp Reisner /* connect may fail, peer not yet available. 621b411b363SPhilipp Reisner * stay C_WF_CONNECTION, don't go Disconnecting! */ 622b411b363SPhilipp Reisner disconnect_on_error = 0; 623b411b363SPhilipp Reisner what = "connect"; 624b411b363SPhilipp Reisner err = sock->ops->connect(sock, 625b411b363SPhilipp Reisner (struct sockaddr *)mdev->net_conf->peer_addr, 626b411b363SPhilipp Reisner mdev->net_conf->peer_addr_len, 0); 627b411b363SPhilipp Reisner 628b411b363SPhilipp Reisner out: 629b411b363SPhilipp Reisner if (err < 0) { 630b411b363SPhilipp Reisner if (sock) { 631b411b363SPhilipp Reisner sock_release(sock); 632b411b363SPhilipp Reisner sock = NULL; 633b411b363SPhilipp Reisner } 634b411b363SPhilipp Reisner switch (-err) { 635b411b363SPhilipp Reisner /* timeout, busy, signal pending */ 636b411b363SPhilipp Reisner case ETIMEDOUT: case EAGAIN: case EINPROGRESS: 637b411b363SPhilipp Reisner case EINTR: case ERESTARTSYS: 638b411b363SPhilipp Reisner /* peer not (yet) available, network problem */ 639b411b363SPhilipp Reisner case ECONNREFUSED: case ENETUNREACH: 640b411b363SPhilipp Reisner case EHOSTDOWN: case EHOSTUNREACH: 641b411b363SPhilipp Reisner disconnect_on_error = 0; 642b411b363SPhilipp Reisner break; 643b411b363SPhilipp Reisner default: 644b411b363SPhilipp Reisner dev_err(DEV, "%s failed, err = %d\n", what, err); 645b411b363SPhilipp Reisner } 646b411b363SPhilipp Reisner if (disconnect_on_error) 647b411b363SPhilipp Reisner drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); 648b411b363SPhilipp Reisner } 649b411b363SPhilipp Reisner put_net_conf(mdev); 650b411b363SPhilipp Reisner return sock; 651b411b363SPhilipp Reisner } 652b411b363SPhilipp Reisner 653b411b363SPhilipp Reisner static struct socket *drbd_wait_for_connect(struct drbd_conf *mdev) 654b411b363SPhilipp Reisner { 655b411b363SPhilipp Reisner int timeo, err; 656b411b363SPhilipp Reisner struct socket *s_estab = NULL, *s_listen; 657b411b363SPhilipp Reisner const char *what; 658b411b363SPhilipp Reisner 659b411b363SPhilipp Reisner if (!get_net_conf(mdev)) 660b411b363SPhilipp Reisner return NULL; 661b411b363SPhilipp Reisner 662b411b363SPhilipp Reisner what = "sock_create_kern"; 663b411b363SPhilipp Reisner err = sock_create_kern(((struct sockaddr *)mdev->net_conf->my_addr)->sa_family, 664b411b363SPhilipp Reisner SOCK_STREAM, IPPROTO_TCP, &s_listen); 665b411b363SPhilipp Reisner if (err) { 666b411b363SPhilipp Reisner s_listen = NULL; 667b411b363SPhilipp Reisner goto out; 668b411b363SPhilipp Reisner } 669b411b363SPhilipp Reisner 670b411b363SPhilipp Reisner timeo = mdev->net_conf->try_connect_int * HZ; 671b411b363SPhilipp Reisner timeo += (random32() & 1) ? timeo / 7 : -timeo / 7; /* 28.5% random jitter */ 672b411b363SPhilipp Reisner 6734a17fd52SPavel Emelyanov s_listen->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */ 674b411b363SPhilipp Reisner s_listen->sk->sk_rcvtimeo = timeo; 675b411b363SPhilipp Reisner s_listen->sk->sk_sndtimeo = timeo; 6765dbf1673SLars Ellenberg drbd_setbufsize(s_listen, mdev->net_conf->sndbuf_size, 6775dbf1673SLars Ellenberg mdev->net_conf->rcvbuf_size); 678b411b363SPhilipp Reisner 679b411b363SPhilipp Reisner what = "bind before listen"; 680b411b363SPhilipp Reisner err = s_listen->ops->bind(s_listen, 681b411b363SPhilipp Reisner (struct sockaddr *) mdev->net_conf->my_addr, 682b411b363SPhilipp Reisner mdev->net_conf->my_addr_len); 683b411b363SPhilipp Reisner if (err < 0) 684b411b363SPhilipp Reisner goto out; 685b411b363SPhilipp Reisner 686b411b363SPhilipp Reisner err = drbd_accept(mdev, &what, s_listen, &s_estab); 687b411b363SPhilipp Reisner 688b411b363SPhilipp Reisner out: 689b411b363SPhilipp Reisner if (s_listen) 690b411b363SPhilipp Reisner sock_release(s_listen); 691b411b363SPhilipp Reisner if (err < 0) { 692b411b363SPhilipp Reisner if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) { 693b411b363SPhilipp Reisner dev_err(DEV, "%s failed, err = %d\n", what, err); 694b411b363SPhilipp Reisner drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); 695b411b363SPhilipp Reisner } 696b411b363SPhilipp Reisner } 697b411b363SPhilipp Reisner put_net_conf(mdev); 698b411b363SPhilipp Reisner 699b411b363SPhilipp Reisner return s_estab; 700b411b363SPhilipp Reisner } 701b411b363SPhilipp Reisner 702b411b363SPhilipp Reisner static int drbd_send_fp(struct drbd_conf *mdev, 703b411b363SPhilipp Reisner struct socket *sock, enum drbd_packets cmd) 704b411b363SPhilipp Reisner { 70502918be2SPhilipp Reisner struct p_header80 *h = &mdev->data.sbuf.header.h80; 706b411b363SPhilipp Reisner 707b411b363SPhilipp Reisner return _drbd_send_cmd(mdev, sock, cmd, h, sizeof(*h), 0); 708b411b363SPhilipp Reisner } 709b411b363SPhilipp Reisner 710b411b363SPhilipp Reisner static enum drbd_packets drbd_recv_fp(struct drbd_conf *mdev, struct socket *sock) 711b411b363SPhilipp Reisner { 71202918be2SPhilipp Reisner struct p_header80 *h = &mdev->data.rbuf.header.h80; 713b411b363SPhilipp Reisner int rr; 714b411b363SPhilipp Reisner 715b411b363SPhilipp Reisner rr = drbd_recv_short(mdev, sock, h, sizeof(*h), 0); 716b411b363SPhilipp Reisner 717b411b363SPhilipp Reisner if (rr == sizeof(*h) && h->magic == BE_DRBD_MAGIC) 718b411b363SPhilipp Reisner return be16_to_cpu(h->command); 719b411b363SPhilipp Reisner 720b411b363SPhilipp Reisner return 0xffff; 721b411b363SPhilipp Reisner } 722b411b363SPhilipp Reisner 723b411b363SPhilipp Reisner /** 724b411b363SPhilipp Reisner * drbd_socket_okay() - Free the socket if its connection is not okay 725b411b363SPhilipp Reisner * @mdev: DRBD device. 726b411b363SPhilipp Reisner * @sock: pointer to the pointer to the socket. 727b411b363SPhilipp Reisner */ 728b411b363SPhilipp Reisner static int drbd_socket_okay(struct drbd_conf *mdev, struct socket **sock) 729b411b363SPhilipp Reisner { 730b411b363SPhilipp Reisner int rr; 731b411b363SPhilipp Reisner char tb[4]; 732b411b363SPhilipp Reisner 733b411b363SPhilipp Reisner if (!*sock) 73481e84650SAndreas Gruenbacher return false; 735b411b363SPhilipp Reisner 736b411b363SPhilipp Reisner rr = drbd_recv_short(mdev, *sock, tb, 4, MSG_DONTWAIT | MSG_PEEK); 737b411b363SPhilipp Reisner 738b411b363SPhilipp Reisner if (rr > 0 || rr == -EAGAIN) { 73981e84650SAndreas Gruenbacher return true; 740b411b363SPhilipp Reisner } else { 741b411b363SPhilipp Reisner sock_release(*sock); 742b411b363SPhilipp Reisner *sock = NULL; 74381e84650SAndreas Gruenbacher return false; 744b411b363SPhilipp Reisner } 745b411b363SPhilipp Reisner } 746b411b363SPhilipp Reisner 747b411b363SPhilipp Reisner /* 748b411b363SPhilipp Reisner * return values: 749b411b363SPhilipp Reisner * 1 yes, we have a valid connection 750b411b363SPhilipp Reisner * 0 oops, did not work out, please try again 751b411b363SPhilipp Reisner * -1 peer talks different language, 752b411b363SPhilipp Reisner * no point in trying again, please go standalone. 753b411b363SPhilipp Reisner * -2 We do not have a network config... 754b411b363SPhilipp Reisner */ 755b411b363SPhilipp Reisner static int drbd_connect(struct drbd_conf *mdev) 756b411b363SPhilipp Reisner { 757b411b363SPhilipp Reisner struct socket *s, *sock, *msock; 758b411b363SPhilipp Reisner int try, h, ok; 759197296ffSPhilipp Reisner enum drbd_state_rv rv; 760b411b363SPhilipp Reisner 761b411b363SPhilipp Reisner D_ASSERT(!mdev->data.socket); 762b411b363SPhilipp Reisner 763b411b363SPhilipp Reisner if (drbd_request_state(mdev, NS(conn, C_WF_CONNECTION)) < SS_SUCCESS) 764b411b363SPhilipp Reisner return -2; 765b411b363SPhilipp Reisner 766b411b363SPhilipp Reisner clear_bit(DISCARD_CONCURRENT, &mdev->flags); 767b411b363SPhilipp Reisner 768b411b363SPhilipp Reisner sock = NULL; 769b411b363SPhilipp Reisner msock = NULL; 770b411b363SPhilipp Reisner 771b411b363SPhilipp Reisner do { 772b411b363SPhilipp Reisner for (try = 0;;) { 773b411b363SPhilipp Reisner /* 3 tries, this should take less than a second! */ 774b411b363SPhilipp Reisner s = drbd_try_connect(mdev); 775b411b363SPhilipp Reisner if (s || ++try >= 3) 776b411b363SPhilipp Reisner break; 777b411b363SPhilipp Reisner /* give the other side time to call bind() & listen() */ 77820ee6390SPhilipp Reisner schedule_timeout_interruptible(HZ / 10); 779b411b363SPhilipp Reisner } 780b411b363SPhilipp Reisner 781b411b363SPhilipp Reisner if (s) { 782b411b363SPhilipp Reisner if (!sock) { 783b411b363SPhilipp Reisner drbd_send_fp(mdev, s, P_HAND_SHAKE_S); 784b411b363SPhilipp Reisner sock = s; 785b411b363SPhilipp Reisner s = NULL; 786b411b363SPhilipp Reisner } else if (!msock) { 787b411b363SPhilipp Reisner drbd_send_fp(mdev, s, P_HAND_SHAKE_M); 788b411b363SPhilipp Reisner msock = s; 789b411b363SPhilipp Reisner s = NULL; 790b411b363SPhilipp Reisner } else { 791b411b363SPhilipp Reisner dev_err(DEV, "Logic error in drbd_connect()\n"); 792b411b363SPhilipp Reisner goto out_release_sockets; 793b411b363SPhilipp Reisner } 794b411b363SPhilipp Reisner } 795b411b363SPhilipp Reisner 796b411b363SPhilipp Reisner if (sock && msock) { 797a8e40792SPhilipp Reisner schedule_timeout_interruptible(mdev->net_conf->ping_timeo*HZ/10); 798b411b363SPhilipp Reisner ok = drbd_socket_okay(mdev, &sock); 799b411b363SPhilipp Reisner ok = drbd_socket_okay(mdev, &msock) && ok; 800b411b363SPhilipp Reisner if (ok) 801b411b363SPhilipp Reisner break; 802b411b363SPhilipp Reisner } 803b411b363SPhilipp Reisner 804b411b363SPhilipp Reisner retry: 805b411b363SPhilipp Reisner s = drbd_wait_for_connect(mdev); 806b411b363SPhilipp Reisner if (s) { 807b411b363SPhilipp Reisner try = drbd_recv_fp(mdev, s); 808b411b363SPhilipp Reisner drbd_socket_okay(mdev, &sock); 809b411b363SPhilipp Reisner drbd_socket_okay(mdev, &msock); 810b411b363SPhilipp Reisner switch (try) { 811b411b363SPhilipp Reisner case P_HAND_SHAKE_S: 812b411b363SPhilipp Reisner if (sock) { 813b411b363SPhilipp Reisner dev_warn(DEV, "initial packet S crossed\n"); 814b411b363SPhilipp Reisner sock_release(sock); 815b411b363SPhilipp Reisner } 816b411b363SPhilipp Reisner sock = s; 817b411b363SPhilipp Reisner break; 818b411b363SPhilipp Reisner case P_HAND_SHAKE_M: 819b411b363SPhilipp Reisner if (msock) { 820b411b363SPhilipp Reisner dev_warn(DEV, "initial packet M crossed\n"); 821b411b363SPhilipp Reisner sock_release(msock); 822b411b363SPhilipp Reisner } 823b411b363SPhilipp Reisner msock = s; 824b411b363SPhilipp Reisner set_bit(DISCARD_CONCURRENT, &mdev->flags); 825b411b363SPhilipp Reisner break; 826b411b363SPhilipp Reisner default: 827b411b363SPhilipp Reisner dev_warn(DEV, "Error receiving initial packet\n"); 828b411b363SPhilipp Reisner sock_release(s); 829b411b363SPhilipp Reisner if (random32() & 1) 830b411b363SPhilipp Reisner goto retry; 831b411b363SPhilipp Reisner } 832b411b363SPhilipp Reisner } 833b411b363SPhilipp Reisner 834b411b363SPhilipp Reisner if (mdev->state.conn <= C_DISCONNECTING) 835b411b363SPhilipp Reisner goto out_release_sockets; 836b411b363SPhilipp Reisner if (signal_pending(current)) { 837b411b363SPhilipp Reisner flush_signals(current); 838b411b363SPhilipp Reisner smp_rmb(); 839b411b363SPhilipp Reisner if (get_t_state(&mdev->receiver) == Exiting) 840b411b363SPhilipp Reisner goto out_release_sockets; 841b411b363SPhilipp Reisner } 842b411b363SPhilipp Reisner 843b411b363SPhilipp Reisner if (sock && msock) { 844b411b363SPhilipp Reisner ok = drbd_socket_okay(mdev, &sock); 845b411b363SPhilipp Reisner ok = drbd_socket_okay(mdev, &msock) && ok; 846b411b363SPhilipp Reisner if (ok) 847b411b363SPhilipp Reisner break; 848b411b363SPhilipp Reisner } 849b411b363SPhilipp Reisner } while (1); 850b411b363SPhilipp Reisner 8514a17fd52SPavel Emelyanov msock->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */ 8524a17fd52SPavel Emelyanov sock->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */ 853b411b363SPhilipp Reisner 854b411b363SPhilipp Reisner sock->sk->sk_allocation = GFP_NOIO; 855b411b363SPhilipp Reisner msock->sk->sk_allocation = GFP_NOIO; 856b411b363SPhilipp Reisner 857b411b363SPhilipp Reisner sock->sk->sk_priority = TC_PRIO_INTERACTIVE_BULK; 858b411b363SPhilipp Reisner msock->sk->sk_priority = TC_PRIO_INTERACTIVE; 859b411b363SPhilipp Reisner 860b411b363SPhilipp Reisner /* NOT YET ... 861b411b363SPhilipp Reisner * sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10; 862b411b363SPhilipp Reisner * sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT; 863b411b363SPhilipp Reisner * first set it to the P_HAND_SHAKE timeout, 864b411b363SPhilipp Reisner * which we set to 4x the configured ping_timeout. */ 865b411b363SPhilipp Reisner sock->sk->sk_sndtimeo = 866b411b363SPhilipp Reisner sock->sk->sk_rcvtimeo = mdev->net_conf->ping_timeo*4*HZ/10; 867b411b363SPhilipp Reisner 868b411b363SPhilipp Reisner msock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10; 869b411b363SPhilipp Reisner msock->sk->sk_rcvtimeo = mdev->net_conf->ping_int*HZ; 870b411b363SPhilipp Reisner 871b411b363SPhilipp Reisner /* we don't want delays. 87225985edcSLucas De Marchi * we use TCP_CORK where appropriate, though */ 873b411b363SPhilipp Reisner drbd_tcp_nodelay(sock); 874b411b363SPhilipp Reisner drbd_tcp_nodelay(msock); 875b411b363SPhilipp Reisner 876b411b363SPhilipp Reisner mdev->data.socket = sock; 877b411b363SPhilipp Reisner mdev->meta.socket = msock; 878b411b363SPhilipp Reisner mdev->last_received = jiffies; 879b411b363SPhilipp Reisner 880b411b363SPhilipp Reisner D_ASSERT(mdev->asender.task == NULL); 881b411b363SPhilipp Reisner 882b411b363SPhilipp Reisner h = drbd_do_handshake(mdev); 883b411b363SPhilipp Reisner if (h <= 0) 884b411b363SPhilipp Reisner return h; 885b411b363SPhilipp Reisner 886b411b363SPhilipp Reisner if (mdev->cram_hmac_tfm) { 887b411b363SPhilipp Reisner /* drbd_request_state(mdev, NS(conn, WFAuth)); */ 888b10d96cbSJohannes Thoma switch (drbd_do_auth(mdev)) { 889b10d96cbSJohannes Thoma case -1: 890b411b363SPhilipp Reisner dev_err(DEV, "Authentication of peer failed\n"); 891b411b363SPhilipp Reisner return -1; 892b10d96cbSJohannes Thoma case 0: 893b10d96cbSJohannes Thoma dev_err(DEV, "Authentication of peer failed, trying again.\n"); 894b10d96cbSJohannes Thoma return 0; 895b411b363SPhilipp Reisner } 896b411b363SPhilipp Reisner } 897b411b363SPhilipp Reisner 898b411b363SPhilipp Reisner sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10; 899b411b363SPhilipp Reisner sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT; 900b411b363SPhilipp Reisner 901b411b363SPhilipp Reisner atomic_set(&mdev->packet_seq, 0); 902b411b363SPhilipp Reisner mdev->peer_seq = 0; 903b411b363SPhilipp Reisner 904148efa16SPhilipp Reisner if (drbd_send_protocol(mdev) == -1) 9057e2455c1SPhilipp Reisner return -1; 906197296ffSPhilipp Reisner set_bit(STATE_SENT, &mdev->flags); 907b411b363SPhilipp Reisner drbd_send_sync_param(mdev, &mdev->sync_conf); 908e89b591cSPhilipp Reisner drbd_send_sizes(mdev, 0, 0); 909b411b363SPhilipp Reisner drbd_send_uuids(mdev); 910f479ea06SLars Ellenberg drbd_send_current_state(mdev); 911b411b363SPhilipp Reisner clear_bit(USE_DEGR_WFC_T, &mdev->flags); 912b411b363SPhilipp Reisner clear_bit(RESIZE_PENDING, &mdev->flags); 9131e86ac48SPhilipp Reisner 914197296ffSPhilipp Reisner spin_lock_irq(&mdev->req_lock); 915197296ffSPhilipp Reisner rv = _drbd_set_state(_NS(mdev, conn, C_WF_REPORT_PARAMS), CS_VERBOSE, NULL); 916197296ffSPhilipp Reisner if (mdev->state.conn != C_WF_REPORT_PARAMS) 917197296ffSPhilipp Reisner clear_bit(STATE_SENT, &mdev->flags); 918197296ffSPhilipp Reisner spin_unlock_irq(&mdev->req_lock); 919197296ffSPhilipp Reisner 920197296ffSPhilipp Reisner if (rv < SS_SUCCESS) 9211e86ac48SPhilipp Reisner return 0; 9221e86ac48SPhilipp Reisner 9231e86ac48SPhilipp Reisner drbd_thread_start(&mdev->asender); 9247fde2be9SPhilipp Reisner mod_timer(&mdev->request_timer, jiffies + HZ); /* just start it here. */ 925b411b363SPhilipp Reisner 926b411b363SPhilipp Reisner return 1; 927b411b363SPhilipp Reisner 928b411b363SPhilipp Reisner out_release_sockets: 929b411b363SPhilipp Reisner if (sock) 930b411b363SPhilipp Reisner sock_release(sock); 931b411b363SPhilipp Reisner if (msock) 932b411b363SPhilipp Reisner sock_release(msock); 933b411b363SPhilipp Reisner return -1; 934b411b363SPhilipp Reisner } 935b411b363SPhilipp Reisner 93602918be2SPhilipp Reisner static int drbd_recv_header(struct drbd_conf *mdev, enum drbd_packets *cmd, unsigned int *packet_size) 937b411b363SPhilipp Reisner { 93802918be2SPhilipp Reisner union p_header *h = &mdev->data.rbuf.header; 939b411b363SPhilipp Reisner int r; 940b411b363SPhilipp Reisner 941b411b363SPhilipp Reisner r = drbd_recv(mdev, h, sizeof(*h)); 942b411b363SPhilipp Reisner if (unlikely(r != sizeof(*h))) { 9430ddc5549SLars Ellenberg if (!signal_pending(current)) 9440ddc5549SLars Ellenberg dev_warn(DEV, "short read expecting header on sock: r=%d\n", r); 94581e84650SAndreas Gruenbacher return false; 94602918be2SPhilipp Reisner } 94702918be2SPhilipp Reisner 94802918be2SPhilipp Reisner if (likely(h->h80.magic == BE_DRBD_MAGIC)) { 94902918be2SPhilipp Reisner *cmd = be16_to_cpu(h->h80.command); 95002918be2SPhilipp Reisner *packet_size = be16_to_cpu(h->h80.length); 95102918be2SPhilipp Reisner } else if (h->h95.magic == BE_DRBD_MAGIC_BIG) { 95202918be2SPhilipp Reisner *cmd = be16_to_cpu(h->h95.command); 95302918be2SPhilipp Reisner *packet_size = be32_to_cpu(h->h95.length); 95402918be2SPhilipp Reisner } else { 955004352faSLars Ellenberg dev_err(DEV, "magic?? on data m: 0x%08x c: %d l: %d\n", 956004352faSLars Ellenberg be32_to_cpu(h->h80.magic), 957004352faSLars Ellenberg be16_to_cpu(h->h80.command), 958004352faSLars Ellenberg be16_to_cpu(h->h80.length)); 95981e84650SAndreas Gruenbacher return false; 960b411b363SPhilipp Reisner } 961b411b363SPhilipp Reisner mdev->last_received = jiffies; 962b411b363SPhilipp Reisner 96381e84650SAndreas Gruenbacher return true; 964b411b363SPhilipp Reisner } 965b411b363SPhilipp Reisner 9662451fc3bSPhilipp Reisner static void drbd_flush(struct drbd_conf *mdev) 967b411b363SPhilipp Reisner { 968b411b363SPhilipp Reisner int rv; 969b411b363SPhilipp Reisner 970b411b363SPhilipp Reisner if (mdev->write_ordering >= WO_bdev_flush && get_ldev(mdev)) { 971fbd9b09aSDmitry Monakhov rv = blkdev_issue_flush(mdev->ldev->backing_bdev, GFP_KERNEL, 972dd3932edSChristoph Hellwig NULL); 973b411b363SPhilipp Reisner if (rv) { 974ebd2b0cdSPhilipp Reisner dev_info(DEV, "local disk flush failed with status %d\n", rv); 975b411b363SPhilipp Reisner /* would rather check on EOPNOTSUPP, but that is not reliable. 976b411b363SPhilipp Reisner * don't try again for ANY return value != 0 977b411b363SPhilipp Reisner * if (rv == -EOPNOTSUPP) */ 978b411b363SPhilipp Reisner drbd_bump_write_ordering(mdev, WO_drain_io); 979b411b363SPhilipp Reisner } 980b411b363SPhilipp Reisner put_ldev(mdev); 981b411b363SPhilipp Reisner } 982b411b363SPhilipp Reisner } 983b411b363SPhilipp Reisner 984b411b363SPhilipp Reisner /** 985b411b363SPhilipp Reisner * drbd_may_finish_epoch() - Applies an epoch_event to the epoch's state, eventually finishes it. 986b411b363SPhilipp Reisner * @mdev: DRBD device. 987b411b363SPhilipp Reisner * @epoch: Epoch object. 988b411b363SPhilipp Reisner * @ev: Epoch event. 989b411b363SPhilipp Reisner */ 990b411b363SPhilipp Reisner static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev, 991b411b363SPhilipp Reisner struct drbd_epoch *epoch, 992b411b363SPhilipp Reisner enum epoch_event ev) 993b411b363SPhilipp Reisner { 9942451fc3bSPhilipp Reisner int epoch_size; 995b411b363SPhilipp Reisner struct drbd_epoch *next_epoch; 996b411b363SPhilipp Reisner enum finish_epoch rv = FE_STILL_LIVE; 997b411b363SPhilipp Reisner 998b411b363SPhilipp Reisner spin_lock(&mdev->epoch_lock); 999b411b363SPhilipp Reisner do { 1000b411b363SPhilipp Reisner next_epoch = NULL; 1001b411b363SPhilipp Reisner 1002b411b363SPhilipp Reisner epoch_size = atomic_read(&epoch->epoch_size); 1003b411b363SPhilipp Reisner 1004b411b363SPhilipp Reisner switch (ev & ~EV_CLEANUP) { 1005b411b363SPhilipp Reisner case EV_PUT: 1006b411b363SPhilipp Reisner atomic_dec(&epoch->active); 1007b411b363SPhilipp Reisner break; 1008b411b363SPhilipp Reisner case EV_GOT_BARRIER_NR: 1009b411b363SPhilipp Reisner set_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags); 1010b411b363SPhilipp Reisner break; 1011b411b363SPhilipp Reisner case EV_BECAME_LAST: 1012b411b363SPhilipp Reisner /* nothing to do*/ 1013b411b363SPhilipp Reisner break; 1014b411b363SPhilipp Reisner } 1015b411b363SPhilipp Reisner 1016b411b363SPhilipp Reisner if (epoch_size != 0 && 1017b411b363SPhilipp Reisner atomic_read(&epoch->active) == 0 && 101880f9fd55SPhilipp Reisner (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags) || ev & EV_CLEANUP)) { 1019b411b363SPhilipp Reisner if (!(ev & EV_CLEANUP)) { 1020b411b363SPhilipp Reisner spin_unlock(&mdev->epoch_lock); 1021b411b363SPhilipp Reisner drbd_send_b_ack(mdev, epoch->barrier_nr, epoch_size); 1022b411b363SPhilipp Reisner spin_lock(&mdev->epoch_lock); 1023b411b363SPhilipp Reisner } 102480f9fd55SPhilipp Reisner if (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags)) 1025b411b363SPhilipp Reisner dec_unacked(mdev); 1026b411b363SPhilipp Reisner 1027b411b363SPhilipp Reisner if (mdev->current_epoch != epoch) { 1028b411b363SPhilipp Reisner next_epoch = list_entry(epoch->list.next, struct drbd_epoch, list); 1029b411b363SPhilipp Reisner list_del(&epoch->list); 1030b411b363SPhilipp Reisner ev = EV_BECAME_LAST | (ev & EV_CLEANUP); 1031b411b363SPhilipp Reisner mdev->epochs--; 1032b411b363SPhilipp Reisner kfree(epoch); 1033b411b363SPhilipp Reisner 1034b411b363SPhilipp Reisner if (rv == FE_STILL_LIVE) 1035b411b363SPhilipp Reisner rv = FE_DESTROYED; 1036b411b363SPhilipp Reisner } else { 1037b411b363SPhilipp Reisner epoch->flags = 0; 1038b411b363SPhilipp Reisner atomic_set(&epoch->epoch_size, 0); 1039698f9315SUwe Kleine-König /* atomic_set(&epoch->active, 0); is already zero */ 1040b411b363SPhilipp Reisner if (rv == FE_STILL_LIVE) 1041b411b363SPhilipp Reisner rv = FE_RECYCLED; 10422451fc3bSPhilipp Reisner wake_up(&mdev->ee_wait); 1043b411b363SPhilipp Reisner } 1044b411b363SPhilipp Reisner } 1045b411b363SPhilipp Reisner 1046b411b363SPhilipp Reisner if (!next_epoch) 1047b411b363SPhilipp Reisner break; 1048b411b363SPhilipp Reisner 1049b411b363SPhilipp Reisner epoch = next_epoch; 1050b411b363SPhilipp Reisner } while (1); 1051b411b363SPhilipp Reisner 1052b411b363SPhilipp Reisner spin_unlock(&mdev->epoch_lock); 1053b411b363SPhilipp Reisner 1054b411b363SPhilipp Reisner return rv; 1055b411b363SPhilipp Reisner } 1056b411b363SPhilipp Reisner 1057b411b363SPhilipp Reisner /** 1058b411b363SPhilipp Reisner * drbd_bump_write_ordering() - Fall back to an other write ordering method 1059b411b363SPhilipp Reisner * @mdev: DRBD device. 1060b411b363SPhilipp Reisner * @wo: Write ordering method to try. 1061b411b363SPhilipp Reisner */ 1062b411b363SPhilipp Reisner void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo) __must_hold(local) 1063b411b363SPhilipp Reisner { 1064b411b363SPhilipp Reisner enum write_ordering_e pwo; 1065b411b363SPhilipp Reisner static char *write_ordering_str[] = { 1066b411b363SPhilipp Reisner [WO_none] = "none", 1067b411b363SPhilipp Reisner [WO_drain_io] = "drain", 1068b411b363SPhilipp Reisner [WO_bdev_flush] = "flush", 1069b411b363SPhilipp Reisner }; 1070b411b363SPhilipp Reisner 1071b411b363SPhilipp Reisner pwo = mdev->write_ordering; 1072b411b363SPhilipp Reisner wo = min(pwo, wo); 1073b411b363SPhilipp Reisner if (wo == WO_bdev_flush && mdev->ldev->dc.no_disk_flush) 1074b411b363SPhilipp Reisner wo = WO_drain_io; 1075b411b363SPhilipp Reisner if (wo == WO_drain_io && mdev->ldev->dc.no_disk_drain) 1076b411b363SPhilipp Reisner wo = WO_none; 1077b411b363SPhilipp Reisner mdev->write_ordering = wo; 10782451fc3bSPhilipp Reisner if (pwo != mdev->write_ordering || wo == WO_bdev_flush) 1079b411b363SPhilipp Reisner dev_info(DEV, "Method to ensure write ordering: %s\n", write_ordering_str[mdev->write_ordering]); 1080b411b363SPhilipp Reisner } 1081b411b363SPhilipp Reisner 1082b411b363SPhilipp Reisner /** 108345bb912bSLars Ellenberg * drbd_submit_ee() 108445bb912bSLars Ellenberg * @mdev: DRBD device. 108545bb912bSLars Ellenberg * @e: epoch entry 108645bb912bSLars Ellenberg * @rw: flag field, see bio->bi_rw 108710f6d992SLars Ellenberg * 108810f6d992SLars Ellenberg * May spread the pages to multiple bios, 108910f6d992SLars Ellenberg * depending on bio_add_page restrictions. 109010f6d992SLars Ellenberg * 109110f6d992SLars Ellenberg * Returns 0 if all bios have been submitted, 109210f6d992SLars Ellenberg * -ENOMEM if we could not allocate enough bios, 109310f6d992SLars Ellenberg * -ENOSPC (any better suggestion?) if we have not been able to bio_add_page a 109410f6d992SLars Ellenberg * single page to an empty bio (which should never happen and likely indicates 109510f6d992SLars Ellenberg * that the lower level IO stack is in some way broken). This has been observed 109610f6d992SLars Ellenberg * on certain Xen deployments. 109745bb912bSLars Ellenberg */ 109845bb912bSLars Ellenberg /* TODO allocate from our own bio_set. */ 109945bb912bSLars Ellenberg int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e, 110045bb912bSLars Ellenberg const unsigned rw, const int fault_type) 110145bb912bSLars Ellenberg { 110245bb912bSLars Ellenberg struct bio *bios = NULL; 110345bb912bSLars Ellenberg struct bio *bio; 110445bb912bSLars Ellenberg struct page *page = e->pages; 110545bb912bSLars Ellenberg sector_t sector = e->sector; 110645bb912bSLars Ellenberg unsigned ds = e->size; 110745bb912bSLars Ellenberg unsigned n_bios = 0; 110845bb912bSLars Ellenberg unsigned nr_pages = (ds + PAGE_SIZE -1) >> PAGE_SHIFT; 110910f6d992SLars Ellenberg int err = -ENOMEM; 111045bb912bSLars Ellenberg 111145bb912bSLars Ellenberg /* In most cases, we will only need one bio. But in case the lower 111245bb912bSLars Ellenberg * level restrictions happen to be different at this offset on this 111345bb912bSLars Ellenberg * side than those of the sending peer, we may need to submit the 11149476f39dSLars Ellenberg * request in more than one bio. 11159476f39dSLars Ellenberg * 11169476f39dSLars Ellenberg * Plain bio_alloc is good enough here, this is no DRBD internally 11179476f39dSLars Ellenberg * generated bio, but a bio allocated on behalf of the peer. 11189476f39dSLars Ellenberg */ 111945bb912bSLars Ellenberg next_bio: 112045bb912bSLars Ellenberg bio = bio_alloc(GFP_NOIO, nr_pages); 112145bb912bSLars Ellenberg if (!bio) { 112245bb912bSLars Ellenberg dev_err(DEV, "submit_ee: Allocation of a bio failed\n"); 112345bb912bSLars Ellenberg goto fail; 112445bb912bSLars Ellenberg } 112545bb912bSLars Ellenberg /* > e->sector, unless this is the first bio */ 112645bb912bSLars Ellenberg bio->bi_sector = sector; 112745bb912bSLars Ellenberg bio->bi_bdev = mdev->ldev->backing_bdev; 112845bb912bSLars Ellenberg bio->bi_rw = rw; 112945bb912bSLars Ellenberg bio->bi_private = e; 113045bb912bSLars Ellenberg bio->bi_end_io = drbd_endio_sec; 113145bb912bSLars Ellenberg 113245bb912bSLars Ellenberg bio->bi_next = bios; 113345bb912bSLars Ellenberg bios = bio; 113445bb912bSLars Ellenberg ++n_bios; 113545bb912bSLars Ellenberg 113645bb912bSLars Ellenberg page_chain_for_each(page) { 113745bb912bSLars Ellenberg unsigned len = min_t(unsigned, ds, PAGE_SIZE); 113845bb912bSLars Ellenberg if (!bio_add_page(bio, page, len, 0)) { 113910f6d992SLars Ellenberg /* A single page must always be possible! 114010f6d992SLars Ellenberg * But in case it fails anyways, 114110f6d992SLars Ellenberg * we deal with it, and complain (below). */ 114210f6d992SLars Ellenberg if (bio->bi_vcnt == 0) { 114310f6d992SLars Ellenberg dev_err(DEV, 114410f6d992SLars Ellenberg "bio_add_page failed for len=%u, " 114510f6d992SLars Ellenberg "bi_vcnt=0 (bi_sector=%llu)\n", 114610f6d992SLars Ellenberg len, (unsigned long long)bio->bi_sector); 114710f6d992SLars Ellenberg err = -ENOSPC; 114810f6d992SLars Ellenberg goto fail; 114910f6d992SLars Ellenberg } 115045bb912bSLars Ellenberg goto next_bio; 115145bb912bSLars Ellenberg } 115245bb912bSLars Ellenberg ds -= len; 115345bb912bSLars Ellenberg sector += len >> 9; 115445bb912bSLars Ellenberg --nr_pages; 115545bb912bSLars Ellenberg } 115645bb912bSLars Ellenberg D_ASSERT(page == NULL); 115745bb912bSLars Ellenberg D_ASSERT(ds == 0); 115845bb912bSLars Ellenberg 115945bb912bSLars Ellenberg atomic_set(&e->pending_bios, n_bios); 116045bb912bSLars Ellenberg do { 116145bb912bSLars Ellenberg bio = bios; 116245bb912bSLars Ellenberg bios = bios->bi_next; 116345bb912bSLars Ellenberg bio->bi_next = NULL; 116445bb912bSLars Ellenberg 116545bb912bSLars Ellenberg drbd_generic_make_request(mdev, fault_type, bio); 116645bb912bSLars Ellenberg } while (bios); 116745bb912bSLars Ellenberg return 0; 116845bb912bSLars Ellenberg 116945bb912bSLars Ellenberg fail: 117045bb912bSLars Ellenberg while (bios) { 117145bb912bSLars Ellenberg bio = bios; 117245bb912bSLars Ellenberg bios = bios->bi_next; 117345bb912bSLars Ellenberg bio_put(bio); 117445bb912bSLars Ellenberg } 117510f6d992SLars Ellenberg return err; 117645bb912bSLars Ellenberg } 117745bb912bSLars Ellenberg 117802918be2SPhilipp Reisner static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) 1179b411b363SPhilipp Reisner { 11802451fc3bSPhilipp Reisner int rv; 118102918be2SPhilipp Reisner struct p_barrier *p = &mdev->data.rbuf.barrier; 1182b411b363SPhilipp Reisner struct drbd_epoch *epoch; 1183b411b363SPhilipp Reisner 1184b411b363SPhilipp Reisner inc_unacked(mdev); 1185b411b363SPhilipp Reisner 1186b411b363SPhilipp Reisner mdev->current_epoch->barrier_nr = p->barrier; 1187b411b363SPhilipp Reisner rv = drbd_may_finish_epoch(mdev, mdev->current_epoch, EV_GOT_BARRIER_NR); 1188b411b363SPhilipp Reisner 1189b411b363SPhilipp Reisner /* P_BARRIER_ACK may imply that the corresponding extent is dropped from 1190b411b363SPhilipp Reisner * the activity log, which means it would not be resynced in case the 1191b411b363SPhilipp Reisner * R_PRIMARY crashes now. 1192b411b363SPhilipp Reisner * Therefore we must send the barrier_ack after the barrier request was 1193b411b363SPhilipp Reisner * completed. */ 1194b411b363SPhilipp Reisner switch (mdev->write_ordering) { 1195b411b363SPhilipp Reisner case WO_none: 1196b411b363SPhilipp Reisner if (rv == FE_RECYCLED) 119781e84650SAndreas Gruenbacher return true; 1198b411b363SPhilipp Reisner 1199b411b363SPhilipp Reisner /* receiver context, in the writeout path of the other node. 1200b411b363SPhilipp Reisner * avoid potential distributed deadlock */ 1201b411b363SPhilipp Reisner epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO); 12022451fc3bSPhilipp Reisner if (epoch) 12032451fc3bSPhilipp Reisner break; 12042451fc3bSPhilipp Reisner else 1205b411b363SPhilipp Reisner dev_warn(DEV, "Allocation of an epoch failed, slowing down\n"); 12062451fc3bSPhilipp Reisner /* Fall through */ 12072451fc3bSPhilipp Reisner 12082451fc3bSPhilipp Reisner case WO_bdev_flush: 12092451fc3bSPhilipp Reisner case WO_drain_io: 1210b411b363SPhilipp Reisner drbd_wait_ee_list_empty(mdev, &mdev->active_ee); 12112451fc3bSPhilipp Reisner drbd_flush(mdev); 12122451fc3bSPhilipp Reisner 12132451fc3bSPhilipp Reisner if (atomic_read(&mdev->current_epoch->epoch_size)) { 12142451fc3bSPhilipp Reisner epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO); 12152451fc3bSPhilipp Reisner if (epoch) 12162451fc3bSPhilipp Reisner break; 1217b411b363SPhilipp Reisner } 1218b411b363SPhilipp Reisner 12192451fc3bSPhilipp Reisner epoch = mdev->current_epoch; 12202451fc3bSPhilipp Reisner wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0); 12212451fc3bSPhilipp Reisner 12222451fc3bSPhilipp Reisner D_ASSERT(atomic_read(&epoch->active) == 0); 12232451fc3bSPhilipp Reisner D_ASSERT(epoch->flags == 0); 1224b411b363SPhilipp Reisner 122581e84650SAndreas Gruenbacher return true; 12262451fc3bSPhilipp Reisner default: 12272451fc3bSPhilipp Reisner dev_err(DEV, "Strangeness in mdev->write_ordering %d\n", mdev->write_ordering); 122881e84650SAndreas Gruenbacher return false; 1229b411b363SPhilipp Reisner } 1230b411b363SPhilipp Reisner 1231b411b363SPhilipp Reisner epoch->flags = 0; 1232b411b363SPhilipp Reisner atomic_set(&epoch->epoch_size, 0); 1233b411b363SPhilipp Reisner atomic_set(&epoch->active, 0); 1234b411b363SPhilipp Reisner 1235b411b363SPhilipp Reisner spin_lock(&mdev->epoch_lock); 1236b411b363SPhilipp Reisner if (atomic_read(&mdev->current_epoch->epoch_size)) { 1237b411b363SPhilipp Reisner list_add(&epoch->list, &mdev->current_epoch->list); 1238b411b363SPhilipp Reisner mdev->current_epoch = epoch; 1239b411b363SPhilipp Reisner mdev->epochs++; 1240b411b363SPhilipp Reisner } else { 1241b411b363SPhilipp Reisner /* The current_epoch got recycled while we allocated this one... */ 1242b411b363SPhilipp Reisner kfree(epoch); 1243b411b363SPhilipp Reisner } 1244b411b363SPhilipp Reisner spin_unlock(&mdev->epoch_lock); 1245b411b363SPhilipp Reisner 124681e84650SAndreas Gruenbacher return true; 1247b411b363SPhilipp Reisner } 1248b411b363SPhilipp Reisner 1249b411b363SPhilipp Reisner /* used from receive_RSDataReply (recv_resync_read) 1250b411b363SPhilipp Reisner * and from receive_Data */ 1251b411b363SPhilipp Reisner static struct drbd_epoch_entry * 1252b411b363SPhilipp Reisner read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __must_hold(local) 1253b411b363SPhilipp Reisner { 12546666032aSLars Ellenberg const sector_t capacity = drbd_get_capacity(mdev->this_bdev); 1255b411b363SPhilipp Reisner struct drbd_epoch_entry *e; 1256b411b363SPhilipp Reisner struct page *page; 125745bb912bSLars Ellenberg int dgs, ds, rr; 1258b411b363SPhilipp Reisner void *dig_in = mdev->int_dig_in; 1259b411b363SPhilipp Reisner void *dig_vv = mdev->int_dig_vv; 12606b4388acSPhilipp Reisner unsigned long *data; 1261b411b363SPhilipp Reisner 1262b411b363SPhilipp Reisner dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ? 1263b411b363SPhilipp Reisner crypto_hash_digestsize(mdev->integrity_r_tfm) : 0; 1264b411b363SPhilipp Reisner 1265b411b363SPhilipp Reisner if (dgs) { 1266b411b363SPhilipp Reisner rr = drbd_recv(mdev, dig_in, dgs); 1267b411b363SPhilipp Reisner if (rr != dgs) { 12680ddc5549SLars Ellenberg if (!signal_pending(current)) 12690ddc5549SLars Ellenberg dev_warn(DEV, 12700ddc5549SLars Ellenberg "short read receiving data digest: read %d expected %d\n", 1271b411b363SPhilipp Reisner rr, dgs); 1272b411b363SPhilipp Reisner return NULL; 1273b411b363SPhilipp Reisner } 1274b411b363SPhilipp Reisner } 1275b411b363SPhilipp Reisner 1276b411b363SPhilipp Reisner data_size -= dgs; 1277b411b363SPhilipp Reisner 1278b411b363SPhilipp Reisner ERR_IF(data_size & 0x1ff) return NULL; 12791816a2b4SLars Ellenberg ERR_IF(data_size > DRBD_MAX_BIO_SIZE) return NULL; 1280b411b363SPhilipp Reisner 12816666032aSLars Ellenberg /* even though we trust out peer, 12826666032aSLars Ellenberg * we sometimes have to double check. */ 12836666032aSLars Ellenberg if (sector + (data_size>>9) > capacity) { 1284fdda6544SLars Ellenberg dev_err(DEV, "request from peer beyond end of local disk: " 1285fdda6544SLars Ellenberg "capacity: %llus < sector: %llus + size: %u\n", 12866666032aSLars Ellenberg (unsigned long long)capacity, 12876666032aSLars Ellenberg (unsigned long long)sector, data_size); 12886666032aSLars Ellenberg return NULL; 12896666032aSLars Ellenberg } 12906666032aSLars Ellenberg 1291b411b363SPhilipp Reisner /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD 1292b411b363SPhilipp Reisner * "criss-cross" setup, that might cause write-out on some other DRBD, 1293b411b363SPhilipp Reisner * which in turn might block on the other node at this very place. */ 1294b411b363SPhilipp Reisner e = drbd_alloc_ee(mdev, id, sector, data_size, GFP_NOIO); 1295b411b363SPhilipp Reisner if (!e) 1296b411b363SPhilipp Reisner return NULL; 129745bb912bSLars Ellenberg 1298a73ff323SLars Ellenberg if (!data_size) 1299a73ff323SLars Ellenberg return e; 1300a73ff323SLars Ellenberg 1301b411b363SPhilipp Reisner ds = data_size; 130245bb912bSLars Ellenberg page = e->pages; 130345bb912bSLars Ellenberg page_chain_for_each(page) { 130445bb912bSLars Ellenberg unsigned len = min_t(int, ds, PAGE_SIZE); 13056b4388acSPhilipp Reisner data = kmap(page); 130645bb912bSLars Ellenberg rr = drbd_recv(mdev, data, len); 13070cf9d27eSAndreas Gruenbacher if (drbd_insert_fault(mdev, DRBD_FAULT_RECEIVE)) { 13086b4388acSPhilipp Reisner dev_err(DEV, "Fault injection: Corrupting data on receive\n"); 13096b4388acSPhilipp Reisner data[0] = data[0] ^ (unsigned long)-1; 13106b4388acSPhilipp Reisner } 1311b411b363SPhilipp Reisner kunmap(page); 131245bb912bSLars Ellenberg if (rr != len) { 1313b411b363SPhilipp Reisner drbd_free_ee(mdev, e); 13140ddc5549SLars Ellenberg if (!signal_pending(current)) 1315b411b363SPhilipp Reisner dev_warn(DEV, "short read receiving data: read %d expected %d\n", 131645bb912bSLars Ellenberg rr, len); 1317b411b363SPhilipp Reisner return NULL; 1318b411b363SPhilipp Reisner } 1319b411b363SPhilipp Reisner ds -= rr; 1320b411b363SPhilipp Reisner } 1321b411b363SPhilipp Reisner 1322b411b363SPhilipp Reisner if (dgs) { 132345bb912bSLars Ellenberg drbd_csum_ee(mdev, mdev->integrity_r_tfm, e, dig_vv); 1324b411b363SPhilipp Reisner if (memcmp(dig_in, dig_vv, dgs)) { 1325470be44aSLars Ellenberg dev_err(DEV, "Digest integrity check FAILED: %llus +%u\n", 1326470be44aSLars Ellenberg (unsigned long long)sector, data_size); 1327b411b363SPhilipp Reisner drbd_bcast_ee(mdev, "digest failed", 1328b411b363SPhilipp Reisner dgs, dig_in, dig_vv, e); 1329b411b363SPhilipp Reisner drbd_free_ee(mdev, e); 1330b411b363SPhilipp Reisner return NULL; 1331b411b363SPhilipp Reisner } 1332b411b363SPhilipp Reisner } 1333b411b363SPhilipp Reisner mdev->recv_cnt += data_size>>9; 1334b411b363SPhilipp Reisner return e; 1335b411b363SPhilipp Reisner } 1336b411b363SPhilipp Reisner 1337b411b363SPhilipp Reisner /* drbd_drain_block() just takes a data block 1338b411b363SPhilipp Reisner * out of the socket input buffer, and discards it. 1339b411b363SPhilipp Reisner */ 1340b411b363SPhilipp Reisner static int drbd_drain_block(struct drbd_conf *mdev, int data_size) 1341b411b363SPhilipp Reisner { 1342b411b363SPhilipp Reisner struct page *page; 1343b411b363SPhilipp Reisner int rr, rv = 1; 1344b411b363SPhilipp Reisner void *data; 1345b411b363SPhilipp Reisner 1346c3470cdeSLars Ellenberg if (!data_size) 134781e84650SAndreas Gruenbacher return true; 1348c3470cdeSLars Ellenberg 134945bb912bSLars Ellenberg page = drbd_pp_alloc(mdev, 1, 1); 1350b411b363SPhilipp Reisner 1351b411b363SPhilipp Reisner data = kmap(page); 1352b411b363SPhilipp Reisner while (data_size) { 1353b411b363SPhilipp Reisner rr = drbd_recv(mdev, data, min_t(int, data_size, PAGE_SIZE)); 1354b411b363SPhilipp Reisner if (rr != min_t(int, data_size, PAGE_SIZE)) { 1355b411b363SPhilipp Reisner rv = 0; 13560ddc5549SLars Ellenberg if (!signal_pending(current)) 13570ddc5549SLars Ellenberg dev_warn(DEV, 13580ddc5549SLars Ellenberg "short read receiving data: read %d expected %d\n", 1359b411b363SPhilipp Reisner rr, min_t(int, data_size, PAGE_SIZE)); 1360b411b363SPhilipp Reisner break; 1361b411b363SPhilipp Reisner } 1362b411b363SPhilipp Reisner data_size -= rr; 1363b411b363SPhilipp Reisner } 1364b411b363SPhilipp Reisner kunmap(page); 1365435f0740SLars Ellenberg drbd_pp_free(mdev, page, 0); 1366b411b363SPhilipp Reisner return rv; 1367b411b363SPhilipp Reisner } 1368b411b363SPhilipp Reisner 1369b411b363SPhilipp Reisner static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req, 1370b411b363SPhilipp Reisner sector_t sector, int data_size) 1371b411b363SPhilipp Reisner { 1372b411b363SPhilipp Reisner struct bio_vec *bvec; 1373b411b363SPhilipp Reisner struct bio *bio; 1374b411b363SPhilipp Reisner int dgs, rr, i, expect; 1375b411b363SPhilipp Reisner void *dig_in = mdev->int_dig_in; 1376b411b363SPhilipp Reisner void *dig_vv = mdev->int_dig_vv; 1377b411b363SPhilipp Reisner 1378b411b363SPhilipp Reisner dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ? 1379b411b363SPhilipp Reisner crypto_hash_digestsize(mdev->integrity_r_tfm) : 0; 1380b411b363SPhilipp Reisner 1381b411b363SPhilipp Reisner if (dgs) { 1382b411b363SPhilipp Reisner rr = drbd_recv(mdev, dig_in, dgs); 1383b411b363SPhilipp Reisner if (rr != dgs) { 13840ddc5549SLars Ellenberg if (!signal_pending(current)) 13850ddc5549SLars Ellenberg dev_warn(DEV, 13860ddc5549SLars Ellenberg "short read receiving data reply digest: read %d expected %d\n", 1387b411b363SPhilipp Reisner rr, dgs); 1388b411b363SPhilipp Reisner return 0; 1389b411b363SPhilipp Reisner } 1390b411b363SPhilipp Reisner } 1391b411b363SPhilipp Reisner 1392b411b363SPhilipp Reisner data_size -= dgs; 1393b411b363SPhilipp Reisner 1394b411b363SPhilipp Reisner /* optimistically update recv_cnt. if receiving fails below, 1395b411b363SPhilipp Reisner * we disconnect anyways, and counters will be reset. */ 1396b411b363SPhilipp Reisner mdev->recv_cnt += data_size>>9; 1397b411b363SPhilipp Reisner 1398b411b363SPhilipp Reisner bio = req->master_bio; 1399b411b363SPhilipp Reisner D_ASSERT(sector == bio->bi_sector); 1400b411b363SPhilipp Reisner 1401b411b363SPhilipp Reisner bio_for_each_segment(bvec, bio, i) { 1402b411b363SPhilipp Reisner expect = min_t(int, data_size, bvec->bv_len); 1403b411b363SPhilipp Reisner rr = drbd_recv(mdev, 1404b411b363SPhilipp Reisner kmap(bvec->bv_page)+bvec->bv_offset, 1405b411b363SPhilipp Reisner expect); 1406b411b363SPhilipp Reisner kunmap(bvec->bv_page); 1407b411b363SPhilipp Reisner if (rr != expect) { 14080ddc5549SLars Ellenberg if (!signal_pending(current)) 1409b411b363SPhilipp Reisner dev_warn(DEV, "short read receiving data reply: " 1410b411b363SPhilipp Reisner "read %d expected %d\n", 1411b411b363SPhilipp Reisner rr, expect); 1412b411b363SPhilipp Reisner return 0; 1413b411b363SPhilipp Reisner } 1414b411b363SPhilipp Reisner data_size -= rr; 1415b411b363SPhilipp Reisner } 1416b411b363SPhilipp Reisner 1417b411b363SPhilipp Reisner if (dgs) { 141845bb912bSLars Ellenberg drbd_csum_bio(mdev, mdev->integrity_r_tfm, bio, dig_vv); 1419b411b363SPhilipp Reisner if (memcmp(dig_in, dig_vv, dgs)) { 1420b411b363SPhilipp Reisner dev_err(DEV, "Digest integrity check FAILED. Broken NICs?\n"); 1421b411b363SPhilipp Reisner return 0; 1422b411b363SPhilipp Reisner } 1423b411b363SPhilipp Reisner } 1424b411b363SPhilipp Reisner 1425b411b363SPhilipp Reisner D_ASSERT(data_size == 0); 1426b411b363SPhilipp Reisner return 1; 1427b411b363SPhilipp Reisner } 1428b411b363SPhilipp Reisner 1429b411b363SPhilipp Reisner /* e_end_resync_block() is called via 1430b411b363SPhilipp Reisner * drbd_process_done_ee() by asender only */ 1431b411b363SPhilipp Reisner static int e_end_resync_block(struct drbd_conf *mdev, struct drbd_work *w, int unused) 1432b411b363SPhilipp Reisner { 1433b411b363SPhilipp Reisner struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w; 1434b411b363SPhilipp Reisner sector_t sector = e->sector; 1435b411b363SPhilipp Reisner int ok; 1436b411b363SPhilipp Reisner 143724c4830cSBart Van Assche D_ASSERT(hlist_unhashed(&e->collision)); 1438b411b363SPhilipp Reisner 143945bb912bSLars Ellenberg if (likely((e->flags & EE_WAS_ERROR) == 0)) { 1440b411b363SPhilipp Reisner drbd_set_in_sync(mdev, sector, e->size); 1441b411b363SPhilipp Reisner ok = drbd_send_ack(mdev, P_RS_WRITE_ACK, e); 1442b411b363SPhilipp Reisner } else { 1443b411b363SPhilipp Reisner /* Record failure to sync */ 1444b411b363SPhilipp Reisner drbd_rs_failed_io(mdev, sector, e->size); 1445b411b363SPhilipp Reisner 1446b411b363SPhilipp Reisner ok = drbd_send_ack(mdev, P_NEG_ACK, e); 1447b411b363SPhilipp Reisner } 1448b411b363SPhilipp Reisner dec_unacked(mdev); 1449b411b363SPhilipp Reisner 1450b411b363SPhilipp Reisner return ok; 1451b411b363SPhilipp Reisner } 1452b411b363SPhilipp Reisner 1453b411b363SPhilipp Reisner static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_size) __releases(local) 1454b411b363SPhilipp Reisner { 1455b411b363SPhilipp Reisner struct drbd_epoch_entry *e; 1456b411b363SPhilipp Reisner 1457b411b363SPhilipp Reisner e = read_in_block(mdev, ID_SYNCER, sector, data_size); 145845bb912bSLars Ellenberg if (!e) 145945bb912bSLars Ellenberg goto fail; 1460b411b363SPhilipp Reisner 1461b411b363SPhilipp Reisner dec_rs_pending(mdev); 1462b411b363SPhilipp Reisner 1463b411b363SPhilipp Reisner inc_unacked(mdev); 1464b411b363SPhilipp Reisner /* corresponding dec_unacked() in e_end_resync_block() 1465b411b363SPhilipp Reisner * respective _drbd_clear_done_ee */ 1466b411b363SPhilipp Reisner 146745bb912bSLars Ellenberg e->w.cb = e_end_resync_block; 146845bb912bSLars Ellenberg 1469b411b363SPhilipp Reisner spin_lock_irq(&mdev->req_lock); 1470b411b363SPhilipp Reisner list_add(&e->w.list, &mdev->sync_ee); 1471b411b363SPhilipp Reisner spin_unlock_irq(&mdev->req_lock); 1472b411b363SPhilipp Reisner 14730f0601f4SLars Ellenberg atomic_add(data_size >> 9, &mdev->rs_sect_ev); 147445bb912bSLars Ellenberg if (drbd_submit_ee(mdev, e, WRITE, DRBD_FAULT_RS_WR) == 0) 147581e84650SAndreas Gruenbacher return true; 147645bb912bSLars Ellenberg 147710f6d992SLars Ellenberg /* don't care for the reason here */ 147810f6d992SLars Ellenberg dev_err(DEV, "submit failed, triggering re-connect\n"); 147922cc37a9SLars Ellenberg spin_lock_irq(&mdev->req_lock); 148022cc37a9SLars Ellenberg list_del(&e->w.list); 148122cc37a9SLars Ellenberg spin_unlock_irq(&mdev->req_lock); 148222cc37a9SLars Ellenberg 148345bb912bSLars Ellenberg drbd_free_ee(mdev, e); 148445bb912bSLars Ellenberg fail: 148545bb912bSLars Ellenberg put_ldev(mdev); 148681e84650SAndreas Gruenbacher return false; 1487b411b363SPhilipp Reisner } 1488b411b363SPhilipp Reisner 148902918be2SPhilipp Reisner static int receive_DataReply(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) 1490b411b363SPhilipp Reisner { 1491b411b363SPhilipp Reisner struct drbd_request *req; 1492b411b363SPhilipp Reisner sector_t sector; 1493b411b363SPhilipp Reisner int ok; 149402918be2SPhilipp Reisner struct p_data *p = &mdev->data.rbuf.data; 1495b411b363SPhilipp Reisner 1496b411b363SPhilipp Reisner sector = be64_to_cpu(p->sector); 1497b411b363SPhilipp Reisner 1498b411b363SPhilipp Reisner spin_lock_irq(&mdev->req_lock); 1499b411b363SPhilipp Reisner req = _ar_id_to_req(mdev, p->block_id, sector); 1500b411b363SPhilipp Reisner spin_unlock_irq(&mdev->req_lock); 1501b411b363SPhilipp Reisner if (unlikely(!req)) { 1502b411b363SPhilipp Reisner dev_err(DEV, "Got a corrupt block_id/sector pair(1).\n"); 150381e84650SAndreas Gruenbacher return false; 1504b411b363SPhilipp Reisner } 1505b411b363SPhilipp Reisner 150624c4830cSBart Van Assche /* hlist_del(&req->collision) is done in _req_may_be_done, to avoid 1507b411b363SPhilipp Reisner * special casing it there for the various failure cases. 1508b411b363SPhilipp Reisner * still no race with drbd_fail_pending_reads */ 1509b411b363SPhilipp Reisner ok = recv_dless_read(mdev, req, sector, data_size); 1510b411b363SPhilipp Reisner 1511b411b363SPhilipp Reisner if (ok) 1512b411b363SPhilipp Reisner req_mod(req, data_received); 1513b411b363SPhilipp Reisner /* else: nothing. handled from drbd_disconnect... 1514b411b363SPhilipp Reisner * I don't think we may complete this just yet 1515b411b363SPhilipp Reisner * in case we are "on-disconnect: freeze" */ 1516b411b363SPhilipp Reisner 1517b411b363SPhilipp Reisner return ok; 1518b411b363SPhilipp Reisner } 1519b411b363SPhilipp Reisner 152002918be2SPhilipp Reisner static int receive_RSDataReply(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) 1521b411b363SPhilipp Reisner { 1522b411b363SPhilipp Reisner sector_t sector; 1523b411b363SPhilipp Reisner int ok; 152402918be2SPhilipp Reisner struct p_data *p = &mdev->data.rbuf.data; 1525b411b363SPhilipp Reisner 1526b411b363SPhilipp Reisner sector = be64_to_cpu(p->sector); 1527b411b363SPhilipp Reisner D_ASSERT(p->block_id == ID_SYNCER); 1528b411b363SPhilipp Reisner 1529b411b363SPhilipp Reisner if (get_ldev(mdev)) { 1530b411b363SPhilipp Reisner /* data is submitted to disk within recv_resync_read. 1531b411b363SPhilipp Reisner * corresponding put_ldev done below on error, 1532b411b363SPhilipp Reisner * or in drbd_endio_write_sec. */ 1533b411b363SPhilipp Reisner ok = recv_resync_read(mdev, sector, data_size); 1534b411b363SPhilipp Reisner } else { 1535b411b363SPhilipp Reisner if (__ratelimit(&drbd_ratelimit_state)) 1536b411b363SPhilipp Reisner dev_err(DEV, "Can not write resync data to local disk.\n"); 1537b411b363SPhilipp Reisner 1538b411b363SPhilipp Reisner ok = drbd_drain_block(mdev, data_size); 1539b411b363SPhilipp Reisner 15402b2bf214SLars Ellenberg drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size); 1541b411b363SPhilipp Reisner } 1542b411b363SPhilipp Reisner 1543778f271dSPhilipp Reisner atomic_add(data_size >> 9, &mdev->rs_sect_in); 1544778f271dSPhilipp Reisner 1545b411b363SPhilipp Reisner return ok; 1546b411b363SPhilipp Reisner } 1547b411b363SPhilipp Reisner 1548b411b363SPhilipp Reisner /* e_end_block() is called via drbd_process_done_ee(). 1549b411b363SPhilipp Reisner * this means this function only runs in the asender thread 1550b411b363SPhilipp Reisner */ 1551b411b363SPhilipp Reisner static int e_end_block(struct drbd_conf *mdev, struct drbd_work *w, int cancel) 1552b411b363SPhilipp Reisner { 1553b411b363SPhilipp Reisner struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w; 1554b411b363SPhilipp Reisner sector_t sector = e->sector; 1555b411b363SPhilipp Reisner int ok = 1, pcmd; 1556b411b363SPhilipp Reisner 1557b411b363SPhilipp Reisner if (mdev->net_conf->wire_protocol == DRBD_PROT_C) { 155845bb912bSLars Ellenberg if (likely((e->flags & EE_WAS_ERROR) == 0)) { 1559b411b363SPhilipp Reisner pcmd = (mdev->state.conn >= C_SYNC_SOURCE && 1560b411b363SPhilipp Reisner mdev->state.conn <= C_PAUSED_SYNC_T && 1561b411b363SPhilipp Reisner e->flags & EE_MAY_SET_IN_SYNC) ? 1562b411b363SPhilipp Reisner P_RS_WRITE_ACK : P_WRITE_ACK; 1563b411b363SPhilipp Reisner ok &= drbd_send_ack(mdev, pcmd, e); 1564b411b363SPhilipp Reisner if (pcmd == P_RS_WRITE_ACK) 1565b411b363SPhilipp Reisner drbd_set_in_sync(mdev, sector, e->size); 1566b411b363SPhilipp Reisner } else { 1567b411b363SPhilipp Reisner ok = drbd_send_ack(mdev, P_NEG_ACK, e); 1568b411b363SPhilipp Reisner /* we expect it to be marked out of sync anyways... 1569b411b363SPhilipp Reisner * maybe assert this? */ 1570b411b363SPhilipp Reisner } 1571b411b363SPhilipp Reisner dec_unacked(mdev); 1572b411b363SPhilipp Reisner } 1573b411b363SPhilipp Reisner /* we delete from the conflict detection hash _after_ we sent out the 1574b411b363SPhilipp Reisner * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */ 1575b411b363SPhilipp Reisner if (mdev->net_conf->two_primaries) { 1576b411b363SPhilipp Reisner spin_lock_irq(&mdev->req_lock); 157724c4830cSBart Van Assche D_ASSERT(!hlist_unhashed(&e->collision)); 157824c4830cSBart Van Assche hlist_del_init(&e->collision); 1579b411b363SPhilipp Reisner spin_unlock_irq(&mdev->req_lock); 1580b411b363SPhilipp Reisner } else { 158124c4830cSBart Van Assche D_ASSERT(hlist_unhashed(&e->collision)); 1582b411b363SPhilipp Reisner } 1583b411b363SPhilipp Reisner 1584b411b363SPhilipp Reisner drbd_may_finish_epoch(mdev, e->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0)); 1585b411b363SPhilipp Reisner 1586b411b363SPhilipp Reisner return ok; 1587b411b363SPhilipp Reisner } 1588b411b363SPhilipp Reisner 1589b411b363SPhilipp Reisner static int e_send_discard_ack(struct drbd_conf *mdev, struct drbd_work *w, int unused) 1590b411b363SPhilipp Reisner { 1591b411b363SPhilipp Reisner struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w; 1592b411b363SPhilipp Reisner int ok = 1; 1593b411b363SPhilipp Reisner 1594b411b363SPhilipp Reisner D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C); 1595b411b363SPhilipp Reisner ok = drbd_send_ack(mdev, P_DISCARD_ACK, e); 1596b411b363SPhilipp Reisner 1597b411b363SPhilipp Reisner spin_lock_irq(&mdev->req_lock); 159824c4830cSBart Van Assche D_ASSERT(!hlist_unhashed(&e->collision)); 159924c4830cSBart Van Assche hlist_del_init(&e->collision); 1600b411b363SPhilipp Reisner spin_unlock_irq(&mdev->req_lock); 1601b411b363SPhilipp Reisner 1602b411b363SPhilipp Reisner dec_unacked(mdev); 1603b411b363SPhilipp Reisner 1604b411b363SPhilipp Reisner return ok; 1605b411b363SPhilipp Reisner } 1606b411b363SPhilipp Reisner 1607b6a370baSPhilipp Reisner static bool overlapping_resync_write(struct drbd_conf *mdev, struct drbd_epoch_entry *data_e) 1608b6a370baSPhilipp Reisner { 1609b6a370baSPhilipp Reisner 1610b6a370baSPhilipp Reisner struct drbd_epoch_entry *rs_e; 1611b6a370baSPhilipp Reisner bool rv = 0; 1612b6a370baSPhilipp Reisner 1613b6a370baSPhilipp Reisner spin_lock_irq(&mdev->req_lock); 1614b6a370baSPhilipp Reisner list_for_each_entry(rs_e, &mdev->sync_ee, w.list) { 1615b6a370baSPhilipp Reisner if (overlaps(data_e->sector, data_e->size, rs_e->sector, rs_e->size)) { 1616b6a370baSPhilipp Reisner rv = 1; 1617b6a370baSPhilipp Reisner break; 1618b6a370baSPhilipp Reisner } 1619b6a370baSPhilipp Reisner } 1620b6a370baSPhilipp Reisner spin_unlock_irq(&mdev->req_lock); 1621b6a370baSPhilipp Reisner 1622b6a370baSPhilipp Reisner return rv; 1623b6a370baSPhilipp Reisner } 1624b6a370baSPhilipp Reisner 1625b411b363SPhilipp Reisner /* Called from receive_Data. 1626b411b363SPhilipp Reisner * Synchronize packets on sock with packets on msock. 1627b411b363SPhilipp Reisner * 1628b411b363SPhilipp Reisner * This is here so even when a P_DATA packet traveling via sock overtook an Ack 1629b411b363SPhilipp Reisner * packet traveling on msock, they are still processed in the order they have 1630b411b363SPhilipp Reisner * been sent. 1631b411b363SPhilipp Reisner * 1632b411b363SPhilipp Reisner * Note: we don't care for Ack packets overtaking P_DATA packets. 1633b411b363SPhilipp Reisner * 1634b411b363SPhilipp Reisner * In case packet_seq is larger than mdev->peer_seq number, there are 1635b411b363SPhilipp Reisner * outstanding packets on the msock. We wait for them to arrive. 1636b411b363SPhilipp Reisner * In case we are the logically next packet, we update mdev->peer_seq 1637b411b363SPhilipp Reisner * ourselves. Correctly handles 32bit wrap around. 1638b411b363SPhilipp Reisner * 1639b411b363SPhilipp Reisner * Assume we have a 10 GBit connection, that is about 1<<30 byte per second, 1640b411b363SPhilipp Reisner * about 1<<21 sectors per second. So "worst" case, we have 1<<3 == 8 seconds 1641b411b363SPhilipp Reisner * for the 24bit wrap (historical atomic_t guarantee on some archs), and we have 1642b411b363SPhilipp Reisner * 1<<9 == 512 seconds aka ages for the 32bit wrap around... 1643b411b363SPhilipp Reisner * 1644b411b363SPhilipp Reisner * returns 0 if we may process the packet, 1645b411b363SPhilipp Reisner * -ERESTARTSYS if we were interrupted (by disconnect signal). */ 1646b411b363SPhilipp Reisner static int drbd_wait_peer_seq(struct drbd_conf *mdev, const u32 packet_seq) 1647b411b363SPhilipp Reisner { 1648b411b363SPhilipp Reisner DEFINE_WAIT(wait); 1649b411b363SPhilipp Reisner unsigned int p_seq; 1650b411b363SPhilipp Reisner long timeout; 1651b411b363SPhilipp Reisner int ret = 0; 1652b411b363SPhilipp Reisner spin_lock(&mdev->peer_seq_lock); 1653b411b363SPhilipp Reisner for (;;) { 1654b411b363SPhilipp Reisner prepare_to_wait(&mdev->seq_wait, &wait, TASK_INTERRUPTIBLE); 1655b411b363SPhilipp Reisner if (seq_le(packet_seq, mdev->peer_seq+1)) 1656b411b363SPhilipp Reisner break; 1657b411b363SPhilipp Reisner if (signal_pending(current)) { 1658b411b363SPhilipp Reisner ret = -ERESTARTSYS; 1659b411b363SPhilipp Reisner break; 1660b411b363SPhilipp Reisner } 1661b411b363SPhilipp Reisner p_seq = mdev->peer_seq; 1662b411b363SPhilipp Reisner spin_unlock(&mdev->peer_seq_lock); 1663b411b363SPhilipp Reisner timeout = schedule_timeout(30*HZ); 1664b411b363SPhilipp Reisner spin_lock(&mdev->peer_seq_lock); 1665b411b363SPhilipp Reisner if (timeout == 0 && p_seq == mdev->peer_seq) { 1666b411b363SPhilipp Reisner ret = -ETIMEDOUT; 1667b411b363SPhilipp Reisner dev_err(DEV, "ASSERT FAILED waited 30 seconds for sequence update, forcing reconnect\n"); 1668b411b363SPhilipp Reisner break; 1669b411b363SPhilipp Reisner } 1670b411b363SPhilipp Reisner } 1671b411b363SPhilipp Reisner finish_wait(&mdev->seq_wait, &wait); 1672b411b363SPhilipp Reisner if (mdev->peer_seq+1 == packet_seq) 1673b411b363SPhilipp Reisner mdev->peer_seq++; 1674b411b363SPhilipp Reisner spin_unlock(&mdev->peer_seq_lock); 1675b411b363SPhilipp Reisner return ret; 1676b411b363SPhilipp Reisner } 1677b411b363SPhilipp Reisner 1678688593c5SLars Ellenberg /* see also bio_flags_to_wire() 1679688593c5SLars Ellenberg * DRBD_REQ_*, because we need to semantically map the flags to data packet 1680688593c5SLars Ellenberg * flags and back. We may replicate to other kernel versions. */ 1681688593c5SLars Ellenberg static unsigned long wire_flags_to_bio(struct drbd_conf *mdev, u32 dpf) 168276d2e7ecSPhilipp Reisner { 168376d2e7ecSPhilipp Reisner return (dpf & DP_RW_SYNC ? REQ_SYNC : 0) | 168476d2e7ecSPhilipp Reisner (dpf & DP_FUA ? REQ_FUA : 0) | 1685688593c5SLars Ellenberg (dpf & DP_FLUSH ? REQ_FLUSH : 0) | 168676d2e7ecSPhilipp Reisner (dpf & DP_DISCARD ? REQ_DISCARD : 0); 168776d2e7ecSPhilipp Reisner } 168876d2e7ecSPhilipp Reisner 1689b411b363SPhilipp Reisner /* mirrored write */ 169002918be2SPhilipp Reisner static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) 1691b411b363SPhilipp Reisner { 1692b411b363SPhilipp Reisner sector_t sector; 1693b411b363SPhilipp Reisner struct drbd_epoch_entry *e; 169402918be2SPhilipp Reisner struct p_data *p = &mdev->data.rbuf.data; 1695b411b363SPhilipp Reisner int rw = WRITE; 1696b411b363SPhilipp Reisner u32 dp_flags; 1697b411b363SPhilipp Reisner 1698b411b363SPhilipp Reisner if (!get_ldev(mdev)) { 1699b411b363SPhilipp Reisner spin_lock(&mdev->peer_seq_lock); 1700b411b363SPhilipp Reisner if (mdev->peer_seq+1 == be32_to_cpu(p->seq_num)) 1701b411b363SPhilipp Reisner mdev->peer_seq++; 1702b411b363SPhilipp Reisner spin_unlock(&mdev->peer_seq_lock); 1703b411b363SPhilipp Reisner 17042b2bf214SLars Ellenberg drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size); 1705b411b363SPhilipp Reisner atomic_inc(&mdev->current_epoch->epoch_size); 1706b411b363SPhilipp Reisner return drbd_drain_block(mdev, data_size); 1707b411b363SPhilipp Reisner } 1708b411b363SPhilipp Reisner 1709b411b363SPhilipp Reisner /* get_ldev(mdev) successful. 1710b411b363SPhilipp Reisner * Corresponding put_ldev done either below (on various errors), 1711b411b363SPhilipp Reisner * or in drbd_endio_write_sec, if we successfully submit the data at 1712b411b363SPhilipp Reisner * the end of this function. */ 1713b411b363SPhilipp Reisner 1714b411b363SPhilipp Reisner sector = be64_to_cpu(p->sector); 1715b411b363SPhilipp Reisner e = read_in_block(mdev, p->block_id, sector, data_size); 1716b411b363SPhilipp Reisner if (!e) { 1717b411b363SPhilipp Reisner put_ldev(mdev); 171881e84650SAndreas Gruenbacher return false; 1719b411b363SPhilipp Reisner } 1720b411b363SPhilipp Reisner 1721b411b363SPhilipp Reisner e->w.cb = e_end_block; 1722b411b363SPhilipp Reisner 1723688593c5SLars Ellenberg dp_flags = be32_to_cpu(p->dp_flags); 1724688593c5SLars Ellenberg rw |= wire_flags_to_bio(mdev, dp_flags); 1725a73ff323SLars Ellenberg if (e->pages == NULL) { 1726a73ff323SLars Ellenberg D_ASSERT(e->size == 0); 1727a73ff323SLars Ellenberg D_ASSERT(dp_flags & DP_FLUSH); 1728a73ff323SLars Ellenberg } 1729688593c5SLars Ellenberg 1730688593c5SLars Ellenberg if (dp_flags & DP_MAY_SET_IN_SYNC) 1731688593c5SLars Ellenberg e->flags |= EE_MAY_SET_IN_SYNC; 1732688593c5SLars Ellenberg 1733b411b363SPhilipp Reisner spin_lock(&mdev->epoch_lock); 1734b411b363SPhilipp Reisner e->epoch = mdev->current_epoch; 1735b411b363SPhilipp Reisner atomic_inc(&e->epoch->epoch_size); 1736b411b363SPhilipp Reisner atomic_inc(&e->epoch->active); 1737b411b363SPhilipp Reisner spin_unlock(&mdev->epoch_lock); 1738b411b363SPhilipp Reisner 1739b411b363SPhilipp Reisner /* I'm the receiver, I do hold a net_cnt reference. */ 1740b411b363SPhilipp Reisner if (!mdev->net_conf->two_primaries) { 1741b411b363SPhilipp Reisner spin_lock_irq(&mdev->req_lock); 1742b411b363SPhilipp Reisner } else { 1743b411b363SPhilipp Reisner /* don't get the req_lock yet, 1744b411b363SPhilipp Reisner * we may sleep in drbd_wait_peer_seq */ 1745b411b363SPhilipp Reisner const int size = e->size; 1746b411b363SPhilipp Reisner const int discard = test_bit(DISCARD_CONCURRENT, &mdev->flags); 1747b411b363SPhilipp Reisner DEFINE_WAIT(wait); 1748b411b363SPhilipp Reisner struct drbd_request *i; 1749b411b363SPhilipp Reisner struct hlist_node *n; 1750b411b363SPhilipp Reisner struct hlist_head *slot; 1751b411b363SPhilipp Reisner int first; 1752b411b363SPhilipp Reisner 1753b411b363SPhilipp Reisner D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C); 1754b411b363SPhilipp Reisner BUG_ON(mdev->ee_hash == NULL); 1755b411b363SPhilipp Reisner BUG_ON(mdev->tl_hash == NULL); 1756b411b363SPhilipp Reisner 1757b411b363SPhilipp Reisner /* conflict detection and handling: 1758b411b363SPhilipp Reisner * 1. wait on the sequence number, 1759b411b363SPhilipp Reisner * in case this data packet overtook ACK packets. 1760b411b363SPhilipp Reisner * 2. check our hash tables for conflicting requests. 1761b411b363SPhilipp Reisner * we only need to walk the tl_hash, since an ee can not 1762b411b363SPhilipp Reisner * have a conflict with an other ee: on the submitting 1763b411b363SPhilipp Reisner * node, the corresponding req had already been conflicting, 1764b411b363SPhilipp Reisner * and a conflicting req is never sent. 1765b411b363SPhilipp Reisner * 1766b411b363SPhilipp Reisner * Note: for two_primaries, we are protocol C, 1767b411b363SPhilipp Reisner * so there cannot be any request that is DONE 1768b411b363SPhilipp Reisner * but still on the transfer log. 1769b411b363SPhilipp Reisner * 1770b411b363SPhilipp Reisner * unconditionally add to the ee_hash. 1771b411b363SPhilipp Reisner * 1772b411b363SPhilipp Reisner * if no conflicting request is found: 1773b411b363SPhilipp Reisner * submit. 1774b411b363SPhilipp Reisner * 1775b411b363SPhilipp Reisner * if any conflicting request is found 1776b411b363SPhilipp Reisner * that has not yet been acked, 1777b411b363SPhilipp Reisner * AND I have the "discard concurrent writes" flag: 1778b411b363SPhilipp Reisner * queue (via done_ee) the P_DISCARD_ACK; OUT. 1779b411b363SPhilipp Reisner * 1780b411b363SPhilipp Reisner * if any conflicting request is found: 1781b411b363SPhilipp Reisner * block the receiver, waiting on misc_wait 1782b411b363SPhilipp Reisner * until no more conflicting requests are there, 1783b411b363SPhilipp Reisner * or we get interrupted (disconnect). 1784b411b363SPhilipp Reisner * 1785b411b363SPhilipp Reisner * we do not just write after local io completion of those 1786b411b363SPhilipp Reisner * requests, but only after req is done completely, i.e. 1787b411b363SPhilipp Reisner * we wait for the P_DISCARD_ACK to arrive! 1788b411b363SPhilipp Reisner * 1789b411b363SPhilipp Reisner * then proceed normally, i.e. submit. 1790b411b363SPhilipp Reisner */ 1791b411b363SPhilipp Reisner if (drbd_wait_peer_seq(mdev, be32_to_cpu(p->seq_num))) 1792b411b363SPhilipp Reisner goto out_interrupted; 1793b411b363SPhilipp Reisner 1794b411b363SPhilipp Reisner spin_lock_irq(&mdev->req_lock); 1795b411b363SPhilipp Reisner 179624c4830cSBart Van Assche hlist_add_head(&e->collision, ee_hash_slot(mdev, sector)); 1797b411b363SPhilipp Reisner 1798b411b363SPhilipp Reisner #define OVERLAPS overlaps(i->sector, i->size, sector, size) 1799b411b363SPhilipp Reisner slot = tl_hash_slot(mdev, sector); 1800b411b363SPhilipp Reisner first = 1; 1801b411b363SPhilipp Reisner for (;;) { 1802b411b363SPhilipp Reisner int have_unacked = 0; 1803b411b363SPhilipp Reisner int have_conflict = 0; 1804b411b363SPhilipp Reisner prepare_to_wait(&mdev->misc_wait, &wait, 1805b411b363SPhilipp Reisner TASK_INTERRUPTIBLE); 180624c4830cSBart Van Assche hlist_for_each_entry(i, n, slot, collision) { 1807b411b363SPhilipp Reisner if (OVERLAPS) { 1808b411b363SPhilipp Reisner /* only ALERT on first iteration, 1809b411b363SPhilipp Reisner * we may be woken up early... */ 1810b411b363SPhilipp Reisner if (first) 1811b411b363SPhilipp Reisner dev_alert(DEV, "%s[%u] Concurrent local write detected!" 1812b411b363SPhilipp Reisner " new: %llus +%u; pending: %llus +%u\n", 1813b411b363SPhilipp Reisner current->comm, current->pid, 1814b411b363SPhilipp Reisner (unsigned long long)sector, size, 1815b411b363SPhilipp Reisner (unsigned long long)i->sector, i->size); 1816b411b363SPhilipp Reisner if (i->rq_state & RQ_NET_PENDING) 1817b411b363SPhilipp Reisner ++have_unacked; 1818b411b363SPhilipp Reisner ++have_conflict; 1819b411b363SPhilipp Reisner } 1820b411b363SPhilipp Reisner } 1821b411b363SPhilipp Reisner #undef OVERLAPS 1822b411b363SPhilipp Reisner if (!have_conflict) 1823b411b363SPhilipp Reisner break; 1824b411b363SPhilipp Reisner 1825b411b363SPhilipp Reisner /* Discard Ack only for the _first_ iteration */ 1826b411b363SPhilipp Reisner if (first && discard && have_unacked) { 1827b411b363SPhilipp Reisner dev_alert(DEV, "Concurrent write! [DISCARD BY FLAG] sec=%llus\n", 1828b411b363SPhilipp Reisner (unsigned long long)sector); 1829b411b363SPhilipp Reisner inc_unacked(mdev); 1830b411b363SPhilipp Reisner e->w.cb = e_send_discard_ack; 1831b411b363SPhilipp Reisner list_add_tail(&e->w.list, &mdev->done_ee); 1832b411b363SPhilipp Reisner 1833b411b363SPhilipp Reisner spin_unlock_irq(&mdev->req_lock); 1834b411b363SPhilipp Reisner 1835b411b363SPhilipp Reisner /* we could probably send that P_DISCARD_ACK ourselves, 1836b411b363SPhilipp Reisner * but I don't like the receiver using the msock */ 1837b411b363SPhilipp Reisner 1838b411b363SPhilipp Reisner put_ldev(mdev); 1839b411b363SPhilipp Reisner wake_asender(mdev); 1840b411b363SPhilipp Reisner finish_wait(&mdev->misc_wait, &wait); 184181e84650SAndreas Gruenbacher return true; 1842b411b363SPhilipp Reisner } 1843b411b363SPhilipp Reisner 1844b411b363SPhilipp Reisner if (signal_pending(current)) { 184524c4830cSBart Van Assche hlist_del_init(&e->collision); 1846b411b363SPhilipp Reisner 1847b411b363SPhilipp Reisner spin_unlock_irq(&mdev->req_lock); 1848b411b363SPhilipp Reisner 1849b411b363SPhilipp Reisner finish_wait(&mdev->misc_wait, &wait); 1850b411b363SPhilipp Reisner goto out_interrupted; 1851b411b363SPhilipp Reisner } 1852b411b363SPhilipp Reisner 1853b411b363SPhilipp Reisner spin_unlock_irq(&mdev->req_lock); 1854b411b363SPhilipp Reisner if (first) { 1855b411b363SPhilipp Reisner first = 0; 1856b411b363SPhilipp Reisner dev_alert(DEV, "Concurrent write! [W AFTERWARDS] " 1857b411b363SPhilipp Reisner "sec=%llus\n", (unsigned long long)sector); 1858b411b363SPhilipp Reisner } else if (discard) { 1859b411b363SPhilipp Reisner /* we had none on the first iteration. 1860b411b363SPhilipp Reisner * there must be none now. */ 1861b411b363SPhilipp Reisner D_ASSERT(have_unacked == 0); 1862b411b363SPhilipp Reisner } 1863b411b363SPhilipp Reisner schedule(); 1864b411b363SPhilipp Reisner spin_lock_irq(&mdev->req_lock); 1865b411b363SPhilipp Reisner } 1866b411b363SPhilipp Reisner finish_wait(&mdev->misc_wait, &wait); 1867b411b363SPhilipp Reisner } 1868b411b363SPhilipp Reisner 1869b411b363SPhilipp Reisner list_add(&e->w.list, &mdev->active_ee); 1870b411b363SPhilipp Reisner spin_unlock_irq(&mdev->req_lock); 1871b411b363SPhilipp Reisner 1872b6a370baSPhilipp Reisner if (mdev->state.conn == C_SYNC_TARGET) 1873b6a370baSPhilipp Reisner wait_event(mdev->ee_wait, !overlapping_resync_write(mdev, e)); 1874b6a370baSPhilipp Reisner 1875b411b363SPhilipp Reisner switch (mdev->net_conf->wire_protocol) { 1876b411b363SPhilipp Reisner case DRBD_PROT_C: 1877b411b363SPhilipp Reisner inc_unacked(mdev); 1878b411b363SPhilipp Reisner /* corresponding dec_unacked() in e_end_block() 1879b411b363SPhilipp Reisner * respective _drbd_clear_done_ee */ 1880b411b363SPhilipp Reisner break; 1881b411b363SPhilipp Reisner case DRBD_PROT_B: 1882b411b363SPhilipp Reisner /* I really don't like it that the receiver thread 1883b411b363SPhilipp Reisner * sends on the msock, but anyways */ 1884b411b363SPhilipp Reisner drbd_send_ack(mdev, P_RECV_ACK, e); 1885b411b363SPhilipp Reisner break; 1886b411b363SPhilipp Reisner case DRBD_PROT_A: 1887b411b363SPhilipp Reisner /* nothing to do */ 1888b411b363SPhilipp Reisner break; 1889b411b363SPhilipp Reisner } 1890b411b363SPhilipp Reisner 18916719fb03SLars Ellenberg if (mdev->state.pdsk < D_INCONSISTENT) { 1892b411b363SPhilipp Reisner /* In case we have the only disk of the cluster, */ 1893b411b363SPhilipp Reisner drbd_set_out_of_sync(mdev, e->sector, e->size); 1894b411b363SPhilipp Reisner e->flags |= EE_CALL_AL_COMPLETE_IO; 18956719fb03SLars Ellenberg e->flags &= ~EE_MAY_SET_IN_SYNC; 1896b411b363SPhilipp Reisner drbd_al_begin_io(mdev, e->sector); 1897b411b363SPhilipp Reisner } 1898b411b363SPhilipp Reisner 189945bb912bSLars Ellenberg if (drbd_submit_ee(mdev, e, rw, DRBD_FAULT_DT_WR) == 0) 190081e84650SAndreas Gruenbacher return true; 1901b411b363SPhilipp Reisner 190210f6d992SLars Ellenberg /* don't care for the reason here */ 190310f6d992SLars Ellenberg dev_err(DEV, "submit failed, triggering re-connect\n"); 190422cc37a9SLars Ellenberg spin_lock_irq(&mdev->req_lock); 190522cc37a9SLars Ellenberg list_del(&e->w.list); 190624c4830cSBart Van Assche hlist_del_init(&e->collision); 190722cc37a9SLars Ellenberg spin_unlock_irq(&mdev->req_lock); 190822cc37a9SLars Ellenberg if (e->flags & EE_CALL_AL_COMPLETE_IO) 190922cc37a9SLars Ellenberg drbd_al_complete_io(mdev, e->sector); 191022cc37a9SLars Ellenberg 1911b411b363SPhilipp Reisner out_interrupted: 191210f6d992SLars Ellenberg drbd_may_finish_epoch(mdev, e->epoch, EV_PUT + EV_CLEANUP); 1913b411b363SPhilipp Reisner put_ldev(mdev); 1914b411b363SPhilipp Reisner drbd_free_ee(mdev, e); 191581e84650SAndreas Gruenbacher return false; 1916b411b363SPhilipp Reisner } 1917b411b363SPhilipp Reisner 19180f0601f4SLars Ellenberg /* We may throttle resync, if the lower device seems to be busy, 19190f0601f4SLars Ellenberg * and current sync rate is above c_min_rate. 19200f0601f4SLars Ellenberg * 19210f0601f4SLars Ellenberg * To decide whether or not the lower device is busy, we use a scheme similar 19220f0601f4SLars Ellenberg * to MD RAID is_mddev_idle(): if the partition stats reveal "significant" 19230f0601f4SLars Ellenberg * (more than 64 sectors) of activity we cannot account for with our own resync 19240f0601f4SLars Ellenberg * activity, it obviously is "busy". 19250f0601f4SLars Ellenberg * 19260f0601f4SLars Ellenberg * The current sync rate used here uses only the most recent two step marks, 19270f0601f4SLars Ellenberg * to have a short time average so we can react faster. 19280f0601f4SLars Ellenberg */ 1929e3555d85SPhilipp Reisner int drbd_rs_should_slow_down(struct drbd_conf *mdev, sector_t sector) 19300f0601f4SLars Ellenberg { 19310f0601f4SLars Ellenberg struct gendisk *disk = mdev->ldev->backing_bdev->bd_contains->bd_disk; 19320f0601f4SLars Ellenberg unsigned long db, dt, dbdt; 1933e3555d85SPhilipp Reisner struct lc_element *tmp; 19340f0601f4SLars Ellenberg int curr_events; 19350f0601f4SLars Ellenberg int throttle = 0; 19360f0601f4SLars Ellenberg 19370f0601f4SLars Ellenberg /* feature disabled? */ 19380f0601f4SLars Ellenberg if (mdev->sync_conf.c_min_rate == 0) 19390f0601f4SLars Ellenberg return 0; 19400f0601f4SLars Ellenberg 1941e3555d85SPhilipp Reisner spin_lock_irq(&mdev->al_lock); 1942e3555d85SPhilipp Reisner tmp = lc_find(mdev->resync, BM_SECT_TO_EXT(sector)); 1943e3555d85SPhilipp Reisner if (tmp) { 1944e3555d85SPhilipp Reisner struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce); 1945e3555d85SPhilipp Reisner if (test_bit(BME_PRIORITY, &bm_ext->flags)) { 1946e3555d85SPhilipp Reisner spin_unlock_irq(&mdev->al_lock); 1947e3555d85SPhilipp Reisner return 0; 1948e3555d85SPhilipp Reisner } 1949e3555d85SPhilipp Reisner /* Do not slow down if app IO is already waiting for this extent */ 1950e3555d85SPhilipp Reisner } 1951e3555d85SPhilipp Reisner spin_unlock_irq(&mdev->al_lock); 1952e3555d85SPhilipp Reisner 19530f0601f4SLars Ellenberg curr_events = (int)part_stat_read(&disk->part0, sectors[0]) + 19540f0601f4SLars Ellenberg (int)part_stat_read(&disk->part0, sectors[1]) - 19550f0601f4SLars Ellenberg atomic_read(&mdev->rs_sect_ev); 1956e3555d85SPhilipp Reisner 19570f0601f4SLars Ellenberg if (!mdev->rs_last_events || curr_events - mdev->rs_last_events > 64) { 19580f0601f4SLars Ellenberg unsigned long rs_left; 19590f0601f4SLars Ellenberg int i; 19600f0601f4SLars Ellenberg 19610f0601f4SLars Ellenberg mdev->rs_last_events = curr_events; 19620f0601f4SLars Ellenberg 19630f0601f4SLars Ellenberg /* sync speed average over the last 2*DRBD_SYNC_MARK_STEP, 19640f0601f4SLars Ellenberg * approx. */ 19652649f080SLars Ellenberg i = (mdev->rs_last_mark + DRBD_SYNC_MARKS-1) % DRBD_SYNC_MARKS; 19662649f080SLars Ellenberg 19672649f080SLars Ellenberg if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T) 19682649f080SLars Ellenberg rs_left = mdev->ov_left; 19692649f080SLars Ellenberg else 19700f0601f4SLars Ellenberg rs_left = drbd_bm_total_weight(mdev) - mdev->rs_failed; 19710f0601f4SLars Ellenberg 19720f0601f4SLars Ellenberg dt = ((long)jiffies - (long)mdev->rs_mark_time[i]) / HZ; 19730f0601f4SLars Ellenberg if (!dt) 19740f0601f4SLars Ellenberg dt++; 19750f0601f4SLars Ellenberg db = mdev->rs_mark_left[i] - rs_left; 19760f0601f4SLars Ellenberg dbdt = Bit2KB(db/dt); 19770f0601f4SLars Ellenberg 19780f0601f4SLars Ellenberg if (dbdt > mdev->sync_conf.c_min_rate) 19790f0601f4SLars Ellenberg throttle = 1; 19800f0601f4SLars Ellenberg } 19810f0601f4SLars Ellenberg return throttle; 19820f0601f4SLars Ellenberg } 19830f0601f4SLars Ellenberg 19840f0601f4SLars Ellenberg 198502918be2SPhilipp Reisner static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int digest_size) 1986b411b363SPhilipp Reisner { 1987b411b363SPhilipp Reisner sector_t sector; 1988b411b363SPhilipp Reisner const sector_t capacity = drbd_get_capacity(mdev->this_bdev); 1989b411b363SPhilipp Reisner struct drbd_epoch_entry *e; 1990b411b363SPhilipp Reisner struct digest_info *di = NULL; 1991b18b37beSPhilipp Reisner int size, verb; 1992b411b363SPhilipp Reisner unsigned int fault_type; 199302918be2SPhilipp Reisner struct p_block_req *p = &mdev->data.rbuf.block_req; 1994b411b363SPhilipp Reisner 1995b411b363SPhilipp Reisner sector = be64_to_cpu(p->sector); 1996b411b363SPhilipp Reisner size = be32_to_cpu(p->blksize); 1997b411b363SPhilipp Reisner 19981816a2b4SLars Ellenberg if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) { 1999b411b363SPhilipp Reisner dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__, 2000b411b363SPhilipp Reisner (unsigned long long)sector, size); 200181e84650SAndreas Gruenbacher return false; 2002b411b363SPhilipp Reisner } 2003b411b363SPhilipp Reisner if (sector + (size>>9) > capacity) { 2004b411b363SPhilipp Reisner dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__, 2005b411b363SPhilipp Reisner (unsigned long long)sector, size); 200681e84650SAndreas Gruenbacher return false; 2007b411b363SPhilipp Reisner } 2008b411b363SPhilipp Reisner 2009b411b363SPhilipp Reisner if (!get_ldev_if_state(mdev, D_UP_TO_DATE)) { 2010b18b37beSPhilipp Reisner verb = 1; 2011b18b37beSPhilipp Reisner switch (cmd) { 2012b18b37beSPhilipp Reisner case P_DATA_REQUEST: 2013b18b37beSPhilipp Reisner drbd_send_ack_rp(mdev, P_NEG_DREPLY, p); 2014b18b37beSPhilipp Reisner break; 2015b18b37beSPhilipp Reisner case P_RS_DATA_REQUEST: 2016b18b37beSPhilipp Reisner case P_CSUM_RS_REQUEST: 2017b18b37beSPhilipp Reisner case P_OV_REQUEST: 2018b18b37beSPhilipp Reisner drbd_send_ack_rp(mdev, P_NEG_RS_DREPLY , p); 2019b18b37beSPhilipp Reisner break; 2020b18b37beSPhilipp Reisner case P_OV_REPLY: 2021b18b37beSPhilipp Reisner verb = 0; 2022b18b37beSPhilipp Reisner dec_rs_pending(mdev); 2023b18b37beSPhilipp Reisner drbd_send_ack_ex(mdev, P_OV_RESULT, sector, size, ID_IN_SYNC); 2024b18b37beSPhilipp Reisner break; 2025b18b37beSPhilipp Reisner default: 2026b18b37beSPhilipp Reisner dev_err(DEV, "unexpected command (%s) in receive_DataRequest\n", 2027b18b37beSPhilipp Reisner cmdname(cmd)); 2028b18b37beSPhilipp Reisner } 2029b18b37beSPhilipp Reisner if (verb && __ratelimit(&drbd_ratelimit_state)) 2030b411b363SPhilipp Reisner dev_err(DEV, "Can not satisfy peer's read request, " 2031b411b363SPhilipp Reisner "no local data.\n"); 2032b18b37beSPhilipp Reisner 2033a821cc4aSLars Ellenberg /* drain possibly payload */ 2034a821cc4aSLars Ellenberg return drbd_drain_block(mdev, digest_size); 2035b411b363SPhilipp Reisner } 2036b411b363SPhilipp Reisner 2037b411b363SPhilipp Reisner /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD 2038b411b363SPhilipp Reisner * "criss-cross" setup, that might cause write-out on some other DRBD, 2039b411b363SPhilipp Reisner * which in turn might block on the other node at this very place. */ 2040b411b363SPhilipp Reisner e = drbd_alloc_ee(mdev, p->block_id, sector, size, GFP_NOIO); 2041b411b363SPhilipp Reisner if (!e) { 2042b411b363SPhilipp Reisner put_ldev(mdev); 204381e84650SAndreas Gruenbacher return false; 2044b411b363SPhilipp Reisner } 2045b411b363SPhilipp Reisner 204602918be2SPhilipp Reisner switch (cmd) { 2047b411b363SPhilipp Reisner case P_DATA_REQUEST: 2048b411b363SPhilipp Reisner e->w.cb = w_e_end_data_req; 2049b411b363SPhilipp Reisner fault_type = DRBD_FAULT_DT_RD; 205080a40e43SLars Ellenberg /* application IO, don't drbd_rs_begin_io */ 205180a40e43SLars Ellenberg goto submit; 205280a40e43SLars Ellenberg 2053b411b363SPhilipp Reisner case P_RS_DATA_REQUEST: 2054b411b363SPhilipp Reisner e->w.cb = w_e_end_rsdata_req; 2055b411b363SPhilipp Reisner fault_type = DRBD_FAULT_RS_RD; 20565f9915bbSLars Ellenberg /* used in the sector offset progress display */ 20575f9915bbSLars Ellenberg mdev->bm_resync_fo = BM_SECT_TO_BIT(sector); 2058b411b363SPhilipp Reisner break; 2059b411b363SPhilipp Reisner 2060b411b363SPhilipp Reisner case P_OV_REPLY: 2061b411b363SPhilipp Reisner case P_CSUM_RS_REQUEST: 2062b411b363SPhilipp Reisner fault_type = DRBD_FAULT_RS_RD; 2063b411b363SPhilipp Reisner di = kmalloc(sizeof(*di) + digest_size, GFP_NOIO); 2064b411b363SPhilipp Reisner if (!di) 2065b411b363SPhilipp Reisner goto out_free_e; 2066b411b363SPhilipp Reisner 2067b411b363SPhilipp Reisner di->digest_size = digest_size; 2068b411b363SPhilipp Reisner di->digest = (((char *)di)+sizeof(struct digest_info)); 2069b411b363SPhilipp Reisner 2070c36c3cedSLars Ellenberg e->digest = di; 2071c36c3cedSLars Ellenberg e->flags |= EE_HAS_DIGEST; 2072c36c3cedSLars Ellenberg 2073b411b363SPhilipp Reisner if (drbd_recv(mdev, di->digest, digest_size) != digest_size) 2074b411b363SPhilipp Reisner goto out_free_e; 2075b411b363SPhilipp Reisner 207602918be2SPhilipp Reisner if (cmd == P_CSUM_RS_REQUEST) { 2077b411b363SPhilipp Reisner D_ASSERT(mdev->agreed_pro_version >= 89); 2078b411b363SPhilipp Reisner e->w.cb = w_e_end_csum_rs_req; 20795f9915bbSLars Ellenberg /* used in the sector offset progress display */ 20805f9915bbSLars Ellenberg mdev->bm_resync_fo = BM_SECT_TO_BIT(sector); 208102918be2SPhilipp Reisner } else if (cmd == P_OV_REPLY) { 20822649f080SLars Ellenberg /* track progress, we may need to throttle */ 20832649f080SLars Ellenberg atomic_add(size >> 9, &mdev->rs_sect_in); 2084b411b363SPhilipp Reisner e->w.cb = w_e_end_ov_reply; 2085b411b363SPhilipp Reisner dec_rs_pending(mdev); 20860f0601f4SLars Ellenberg /* drbd_rs_begin_io done when we sent this request, 20870f0601f4SLars Ellenberg * but accounting still needs to be done. */ 20880f0601f4SLars Ellenberg goto submit_for_resync; 2089b411b363SPhilipp Reisner } 2090b411b363SPhilipp Reisner break; 2091b411b363SPhilipp Reisner 2092b411b363SPhilipp Reisner case P_OV_REQUEST: 2093b411b363SPhilipp Reisner if (mdev->ov_start_sector == ~(sector_t)0 && 2094b411b363SPhilipp Reisner mdev->agreed_pro_version >= 90) { 2095de228bbaSLars Ellenberg unsigned long now = jiffies; 2096de228bbaSLars Ellenberg int i; 2097b411b363SPhilipp Reisner mdev->ov_start_sector = sector; 2098b411b363SPhilipp Reisner mdev->ov_position = sector; 209930b743a2SLars Ellenberg mdev->ov_left = drbd_bm_bits(mdev) - BM_SECT_TO_BIT(sector); 210030b743a2SLars Ellenberg mdev->rs_total = mdev->ov_left; 2101de228bbaSLars Ellenberg for (i = 0; i < DRBD_SYNC_MARKS; i++) { 2102de228bbaSLars Ellenberg mdev->rs_mark_left[i] = mdev->ov_left; 2103de228bbaSLars Ellenberg mdev->rs_mark_time[i] = now; 2104de228bbaSLars Ellenberg } 2105b411b363SPhilipp Reisner dev_info(DEV, "Online Verify start sector: %llu\n", 2106b411b363SPhilipp Reisner (unsigned long long)sector); 2107b411b363SPhilipp Reisner } 2108b411b363SPhilipp Reisner e->w.cb = w_e_end_ov_req; 2109b411b363SPhilipp Reisner fault_type = DRBD_FAULT_RS_RD; 2110b411b363SPhilipp Reisner break; 2111b411b363SPhilipp Reisner 2112b411b363SPhilipp Reisner default: 2113b411b363SPhilipp Reisner dev_err(DEV, "unexpected command (%s) in receive_DataRequest\n", 211402918be2SPhilipp Reisner cmdname(cmd)); 2115b411b363SPhilipp Reisner fault_type = DRBD_FAULT_MAX; 211680a40e43SLars Ellenberg goto out_free_e; 2117b411b363SPhilipp Reisner } 2118b411b363SPhilipp Reisner 21190f0601f4SLars Ellenberg /* Throttle, drbd_rs_begin_io and submit should become asynchronous 21200f0601f4SLars Ellenberg * wrt the receiver, but it is not as straightforward as it may seem. 21210f0601f4SLars Ellenberg * Various places in the resync start and stop logic assume resync 21220f0601f4SLars Ellenberg * requests are processed in order, requeuing this on the worker thread 21230f0601f4SLars Ellenberg * introduces a bunch of new code for synchronization between threads. 21240f0601f4SLars Ellenberg * 21250f0601f4SLars Ellenberg * Unlimited throttling before drbd_rs_begin_io may stall the resync 21260f0601f4SLars Ellenberg * "forever", throttling after drbd_rs_begin_io will lock that extent 21270f0601f4SLars Ellenberg * for application writes for the same time. For now, just throttle 21280f0601f4SLars Ellenberg * here, where the rest of the code expects the receiver to sleep for 21290f0601f4SLars Ellenberg * a while, anyways. 21300f0601f4SLars Ellenberg */ 2131b411b363SPhilipp Reisner 21320f0601f4SLars Ellenberg /* Throttle before drbd_rs_begin_io, as that locks out application IO; 21330f0601f4SLars Ellenberg * this defers syncer requests for some time, before letting at least 21340f0601f4SLars Ellenberg * on request through. The resync controller on the receiving side 21350f0601f4SLars Ellenberg * will adapt to the incoming rate accordingly. 21360f0601f4SLars Ellenberg * 21370f0601f4SLars Ellenberg * We cannot throttle here if remote is Primary/SyncTarget: 21380f0601f4SLars Ellenberg * we would also throttle its application reads. 21390f0601f4SLars Ellenberg * In that case, throttling is done on the SyncTarget only. 21400f0601f4SLars Ellenberg */ 2141e3555d85SPhilipp Reisner if (mdev->state.peer != R_PRIMARY && drbd_rs_should_slow_down(mdev, sector)) 2142e3555d85SPhilipp Reisner schedule_timeout_uninterruptible(HZ/10); 2143e3555d85SPhilipp Reisner if (drbd_rs_begin_io(mdev, sector)) 214480a40e43SLars Ellenberg goto out_free_e; 2145b411b363SPhilipp Reisner 21460f0601f4SLars Ellenberg submit_for_resync: 21470f0601f4SLars Ellenberg atomic_add(size >> 9, &mdev->rs_sect_ev); 21480f0601f4SLars Ellenberg 214980a40e43SLars Ellenberg submit: 2150b411b363SPhilipp Reisner inc_unacked(mdev); 215180a40e43SLars Ellenberg spin_lock_irq(&mdev->req_lock); 215280a40e43SLars Ellenberg list_add_tail(&e->w.list, &mdev->read_ee); 215380a40e43SLars Ellenberg spin_unlock_irq(&mdev->req_lock); 2154b411b363SPhilipp Reisner 215545bb912bSLars Ellenberg if (drbd_submit_ee(mdev, e, READ, fault_type) == 0) 215681e84650SAndreas Gruenbacher return true; 2157b411b363SPhilipp Reisner 215810f6d992SLars Ellenberg /* don't care for the reason here */ 215910f6d992SLars Ellenberg dev_err(DEV, "submit failed, triggering re-connect\n"); 216022cc37a9SLars Ellenberg spin_lock_irq(&mdev->req_lock); 216122cc37a9SLars Ellenberg list_del(&e->w.list); 216222cc37a9SLars Ellenberg spin_unlock_irq(&mdev->req_lock); 216322cc37a9SLars Ellenberg /* no drbd_rs_complete_io(), we are dropping the connection anyways */ 216422cc37a9SLars Ellenberg 2165b411b363SPhilipp Reisner out_free_e: 2166b411b363SPhilipp Reisner put_ldev(mdev); 2167b411b363SPhilipp Reisner drbd_free_ee(mdev, e); 216881e84650SAndreas Gruenbacher return false; 2169b411b363SPhilipp Reisner } 2170b411b363SPhilipp Reisner 2171b411b363SPhilipp Reisner static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local) 2172b411b363SPhilipp Reisner { 2173b411b363SPhilipp Reisner int self, peer, rv = -100; 2174b411b363SPhilipp Reisner unsigned long ch_self, ch_peer; 2175b411b363SPhilipp Reisner 2176b411b363SPhilipp Reisner self = mdev->ldev->md.uuid[UI_BITMAP] & 1; 2177b411b363SPhilipp Reisner peer = mdev->p_uuid[UI_BITMAP] & 1; 2178b411b363SPhilipp Reisner 2179b411b363SPhilipp Reisner ch_peer = mdev->p_uuid[UI_SIZE]; 2180b411b363SPhilipp Reisner ch_self = mdev->comm_bm_set; 2181b411b363SPhilipp Reisner 2182b411b363SPhilipp Reisner switch (mdev->net_conf->after_sb_0p) { 2183b411b363SPhilipp Reisner case ASB_CONSENSUS: 2184b411b363SPhilipp Reisner case ASB_DISCARD_SECONDARY: 2185b411b363SPhilipp Reisner case ASB_CALL_HELPER: 2186b411b363SPhilipp Reisner dev_err(DEV, "Configuration error.\n"); 2187b411b363SPhilipp Reisner break; 2188b411b363SPhilipp Reisner case ASB_DISCONNECT: 2189b411b363SPhilipp Reisner break; 2190b411b363SPhilipp Reisner case ASB_DISCARD_YOUNGER_PRI: 2191b411b363SPhilipp Reisner if (self == 0 && peer == 1) { 2192b411b363SPhilipp Reisner rv = -1; 2193b411b363SPhilipp Reisner break; 2194b411b363SPhilipp Reisner } 2195b411b363SPhilipp Reisner if (self == 1 && peer == 0) { 2196b411b363SPhilipp Reisner rv = 1; 2197b411b363SPhilipp Reisner break; 2198b411b363SPhilipp Reisner } 2199b411b363SPhilipp Reisner /* Else fall through to one of the other strategies... */ 2200b411b363SPhilipp Reisner case ASB_DISCARD_OLDER_PRI: 2201b411b363SPhilipp Reisner if (self == 0 && peer == 1) { 2202b411b363SPhilipp Reisner rv = 1; 2203b411b363SPhilipp Reisner break; 2204b411b363SPhilipp Reisner } 2205b411b363SPhilipp Reisner if (self == 1 && peer == 0) { 2206b411b363SPhilipp Reisner rv = -1; 2207b411b363SPhilipp Reisner break; 2208b411b363SPhilipp Reisner } 2209b411b363SPhilipp Reisner /* Else fall through to one of the other strategies... */ 2210ad19bf6eSLars Ellenberg dev_warn(DEV, "Discard younger/older primary did not find a decision\n" 2211b411b363SPhilipp Reisner "Using discard-least-changes instead\n"); 2212b411b363SPhilipp Reisner case ASB_DISCARD_ZERO_CHG: 2213b411b363SPhilipp Reisner if (ch_peer == 0 && ch_self == 0) { 2214b411b363SPhilipp Reisner rv = test_bit(DISCARD_CONCURRENT, &mdev->flags) 2215b411b363SPhilipp Reisner ? -1 : 1; 2216b411b363SPhilipp Reisner break; 2217b411b363SPhilipp Reisner } else { 2218b411b363SPhilipp Reisner if (ch_peer == 0) { rv = 1; break; } 2219b411b363SPhilipp Reisner if (ch_self == 0) { rv = -1; break; } 2220b411b363SPhilipp Reisner } 2221b411b363SPhilipp Reisner if (mdev->net_conf->after_sb_0p == ASB_DISCARD_ZERO_CHG) 2222b411b363SPhilipp Reisner break; 2223b411b363SPhilipp Reisner case ASB_DISCARD_LEAST_CHG: 2224b411b363SPhilipp Reisner if (ch_self < ch_peer) 2225b411b363SPhilipp Reisner rv = -1; 2226b411b363SPhilipp Reisner else if (ch_self > ch_peer) 2227b411b363SPhilipp Reisner rv = 1; 2228b411b363SPhilipp Reisner else /* ( ch_self == ch_peer ) */ 2229b411b363SPhilipp Reisner /* Well, then use something else. */ 2230b411b363SPhilipp Reisner rv = test_bit(DISCARD_CONCURRENT, &mdev->flags) 2231b411b363SPhilipp Reisner ? -1 : 1; 2232b411b363SPhilipp Reisner break; 2233b411b363SPhilipp Reisner case ASB_DISCARD_LOCAL: 2234b411b363SPhilipp Reisner rv = -1; 2235b411b363SPhilipp Reisner break; 2236b411b363SPhilipp Reisner case ASB_DISCARD_REMOTE: 2237b411b363SPhilipp Reisner rv = 1; 2238b411b363SPhilipp Reisner } 2239b411b363SPhilipp Reisner 2240b411b363SPhilipp Reisner return rv; 2241b411b363SPhilipp Reisner } 2242b411b363SPhilipp Reisner 2243b411b363SPhilipp Reisner static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local) 2244b411b363SPhilipp Reisner { 22456184ea21SAndreas Gruenbacher int hg, rv = -100; 2246b411b363SPhilipp Reisner 2247b411b363SPhilipp Reisner switch (mdev->net_conf->after_sb_1p) { 2248b411b363SPhilipp Reisner case ASB_DISCARD_YOUNGER_PRI: 2249b411b363SPhilipp Reisner case ASB_DISCARD_OLDER_PRI: 2250b411b363SPhilipp Reisner case ASB_DISCARD_LEAST_CHG: 2251b411b363SPhilipp Reisner case ASB_DISCARD_LOCAL: 2252b411b363SPhilipp Reisner case ASB_DISCARD_REMOTE: 2253b411b363SPhilipp Reisner dev_err(DEV, "Configuration error.\n"); 2254b411b363SPhilipp Reisner break; 2255b411b363SPhilipp Reisner case ASB_DISCONNECT: 2256b411b363SPhilipp Reisner break; 2257b411b363SPhilipp Reisner case ASB_CONSENSUS: 2258b411b363SPhilipp Reisner hg = drbd_asb_recover_0p(mdev); 2259b411b363SPhilipp Reisner if (hg == -1 && mdev->state.role == R_SECONDARY) 2260b411b363SPhilipp Reisner rv = hg; 2261b411b363SPhilipp Reisner if (hg == 1 && mdev->state.role == R_PRIMARY) 2262b411b363SPhilipp Reisner rv = hg; 2263b411b363SPhilipp Reisner break; 2264b411b363SPhilipp Reisner case ASB_VIOLENTLY: 2265b411b363SPhilipp Reisner rv = drbd_asb_recover_0p(mdev); 2266b411b363SPhilipp Reisner break; 2267b411b363SPhilipp Reisner case ASB_DISCARD_SECONDARY: 2268b411b363SPhilipp Reisner return mdev->state.role == R_PRIMARY ? 1 : -1; 2269b411b363SPhilipp Reisner case ASB_CALL_HELPER: 2270b411b363SPhilipp Reisner hg = drbd_asb_recover_0p(mdev); 2271b411b363SPhilipp Reisner if (hg == -1 && mdev->state.role == R_PRIMARY) { 2272bb437946SAndreas Gruenbacher enum drbd_state_rv rv2; 2273bb437946SAndreas Gruenbacher 2274bb437946SAndreas Gruenbacher drbd_set_role(mdev, R_SECONDARY, 0); 2275b411b363SPhilipp Reisner /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE, 2276b411b363SPhilipp Reisner * we might be here in C_WF_REPORT_PARAMS which is transient. 2277b411b363SPhilipp Reisner * we do not need to wait for the after state change work either. */ 2278bb437946SAndreas Gruenbacher rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY)); 2279bb437946SAndreas Gruenbacher if (rv2 != SS_SUCCESS) { 2280b411b363SPhilipp Reisner drbd_khelper(mdev, "pri-lost-after-sb"); 2281b411b363SPhilipp Reisner } else { 2282b411b363SPhilipp Reisner dev_warn(DEV, "Successfully gave up primary role.\n"); 2283b411b363SPhilipp Reisner rv = hg; 2284b411b363SPhilipp Reisner } 2285b411b363SPhilipp Reisner } else 2286b411b363SPhilipp Reisner rv = hg; 2287b411b363SPhilipp Reisner } 2288b411b363SPhilipp Reisner 2289b411b363SPhilipp Reisner return rv; 2290b411b363SPhilipp Reisner } 2291b411b363SPhilipp Reisner 2292b411b363SPhilipp Reisner static int drbd_asb_recover_2p(struct drbd_conf *mdev) __must_hold(local) 2293b411b363SPhilipp Reisner { 22946184ea21SAndreas Gruenbacher int hg, rv = -100; 2295b411b363SPhilipp Reisner 2296b411b363SPhilipp Reisner switch (mdev->net_conf->after_sb_2p) { 2297b411b363SPhilipp Reisner case ASB_DISCARD_YOUNGER_PRI: 2298b411b363SPhilipp Reisner case ASB_DISCARD_OLDER_PRI: 2299b411b363SPhilipp Reisner case ASB_DISCARD_LEAST_CHG: 2300b411b363SPhilipp Reisner case ASB_DISCARD_LOCAL: 2301b411b363SPhilipp Reisner case ASB_DISCARD_REMOTE: 2302b411b363SPhilipp Reisner case ASB_CONSENSUS: 2303b411b363SPhilipp Reisner case ASB_DISCARD_SECONDARY: 2304b411b363SPhilipp Reisner dev_err(DEV, "Configuration error.\n"); 2305b411b363SPhilipp Reisner break; 2306b411b363SPhilipp Reisner case ASB_VIOLENTLY: 2307b411b363SPhilipp Reisner rv = drbd_asb_recover_0p(mdev); 2308b411b363SPhilipp Reisner break; 2309b411b363SPhilipp Reisner case ASB_DISCONNECT: 2310b411b363SPhilipp Reisner break; 2311b411b363SPhilipp Reisner case ASB_CALL_HELPER: 2312b411b363SPhilipp Reisner hg = drbd_asb_recover_0p(mdev); 2313b411b363SPhilipp Reisner if (hg == -1) { 2314bb437946SAndreas Gruenbacher enum drbd_state_rv rv2; 2315bb437946SAndreas Gruenbacher 2316b411b363SPhilipp Reisner /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE, 2317b411b363SPhilipp Reisner * we might be here in C_WF_REPORT_PARAMS which is transient. 2318b411b363SPhilipp Reisner * we do not need to wait for the after state change work either. */ 2319bb437946SAndreas Gruenbacher rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY)); 2320bb437946SAndreas Gruenbacher if (rv2 != SS_SUCCESS) { 2321b411b363SPhilipp Reisner drbd_khelper(mdev, "pri-lost-after-sb"); 2322b411b363SPhilipp Reisner } else { 2323b411b363SPhilipp Reisner dev_warn(DEV, "Successfully gave up primary role.\n"); 2324b411b363SPhilipp Reisner rv = hg; 2325b411b363SPhilipp Reisner } 2326b411b363SPhilipp Reisner } else 2327b411b363SPhilipp Reisner rv = hg; 2328b411b363SPhilipp Reisner } 2329b411b363SPhilipp Reisner 2330b411b363SPhilipp Reisner return rv; 2331b411b363SPhilipp Reisner } 2332b411b363SPhilipp Reisner 2333b411b363SPhilipp Reisner static void drbd_uuid_dump(struct drbd_conf *mdev, char *text, u64 *uuid, 2334b411b363SPhilipp Reisner u64 bits, u64 flags) 2335b411b363SPhilipp Reisner { 2336b411b363SPhilipp Reisner if (!uuid) { 2337b411b363SPhilipp Reisner dev_info(DEV, "%s uuid info vanished while I was looking!\n", text); 2338b411b363SPhilipp Reisner return; 2339b411b363SPhilipp Reisner } 2340b411b363SPhilipp Reisner dev_info(DEV, "%s %016llX:%016llX:%016llX:%016llX bits:%llu flags:%llX\n", 2341b411b363SPhilipp Reisner text, 2342b411b363SPhilipp Reisner (unsigned long long)uuid[UI_CURRENT], 2343b411b363SPhilipp Reisner (unsigned long long)uuid[UI_BITMAP], 2344b411b363SPhilipp Reisner (unsigned long long)uuid[UI_HISTORY_START], 2345b411b363SPhilipp Reisner (unsigned long long)uuid[UI_HISTORY_END], 2346b411b363SPhilipp Reisner (unsigned long long)bits, 2347b411b363SPhilipp Reisner (unsigned long long)flags); 2348b411b363SPhilipp Reisner } 2349b411b363SPhilipp Reisner 2350b411b363SPhilipp Reisner /* 2351b411b363SPhilipp Reisner 100 after split brain try auto recover 2352b411b363SPhilipp Reisner 2 C_SYNC_SOURCE set BitMap 2353b411b363SPhilipp Reisner 1 C_SYNC_SOURCE use BitMap 2354b411b363SPhilipp Reisner 0 no Sync 2355b411b363SPhilipp Reisner -1 C_SYNC_TARGET use BitMap 2356b411b363SPhilipp Reisner -2 C_SYNC_TARGET set BitMap 2357b411b363SPhilipp Reisner -100 after split brain, disconnect 2358b411b363SPhilipp Reisner -1000 unrelated data 23594a23f264SPhilipp Reisner -1091 requires proto 91 23604a23f264SPhilipp Reisner -1096 requires proto 96 2361b411b363SPhilipp Reisner */ 2362b411b363SPhilipp Reisner static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(local) 2363b411b363SPhilipp Reisner { 2364b411b363SPhilipp Reisner u64 self, peer; 2365b411b363SPhilipp Reisner int i, j; 2366b411b363SPhilipp Reisner 2367b411b363SPhilipp Reisner self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1); 2368b411b363SPhilipp Reisner peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1); 2369b411b363SPhilipp Reisner 2370b411b363SPhilipp Reisner *rule_nr = 10; 2371b411b363SPhilipp Reisner if (self == UUID_JUST_CREATED && peer == UUID_JUST_CREATED) 2372b411b363SPhilipp Reisner return 0; 2373b411b363SPhilipp Reisner 2374b411b363SPhilipp Reisner *rule_nr = 20; 2375b411b363SPhilipp Reisner if ((self == UUID_JUST_CREATED || self == (u64)0) && 2376b411b363SPhilipp Reisner peer != UUID_JUST_CREATED) 2377b411b363SPhilipp Reisner return -2; 2378b411b363SPhilipp Reisner 2379b411b363SPhilipp Reisner *rule_nr = 30; 2380b411b363SPhilipp Reisner if (self != UUID_JUST_CREATED && 2381b411b363SPhilipp Reisner (peer == UUID_JUST_CREATED || peer == (u64)0)) 2382b411b363SPhilipp Reisner return 2; 2383b411b363SPhilipp Reisner 2384b411b363SPhilipp Reisner if (self == peer) { 2385b411b363SPhilipp Reisner int rct, dc; /* roles at crash time */ 2386b411b363SPhilipp Reisner 2387b411b363SPhilipp Reisner if (mdev->p_uuid[UI_BITMAP] == (u64)0 && mdev->ldev->md.uuid[UI_BITMAP] != (u64)0) { 2388b411b363SPhilipp Reisner 2389b411b363SPhilipp Reisner if (mdev->agreed_pro_version < 91) 23904a23f264SPhilipp Reisner return -1091; 2391b411b363SPhilipp Reisner 2392b411b363SPhilipp Reisner if ((mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) && 2393b411b363SPhilipp Reisner (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1))) { 2394b411b363SPhilipp Reisner dev_info(DEV, "was SyncSource, missed the resync finished event, corrected myself:\n"); 23959f2247bbSPhilipp Reisner drbd_uuid_move_history(mdev); 23969f2247bbSPhilipp Reisner mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[UI_BITMAP]; 23979f2247bbSPhilipp Reisner mdev->ldev->md.uuid[UI_BITMAP] = 0; 2398b411b363SPhilipp Reisner 2399b411b363SPhilipp Reisner drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid, 2400b411b363SPhilipp Reisner mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0); 2401b411b363SPhilipp Reisner *rule_nr = 34; 2402b411b363SPhilipp Reisner } else { 2403b411b363SPhilipp Reisner dev_info(DEV, "was SyncSource (peer failed to write sync_uuid)\n"); 2404b411b363SPhilipp Reisner *rule_nr = 36; 2405b411b363SPhilipp Reisner } 2406b411b363SPhilipp Reisner 2407b411b363SPhilipp Reisner return 1; 2408b411b363SPhilipp Reisner } 2409b411b363SPhilipp Reisner 2410b411b363SPhilipp Reisner if (mdev->ldev->md.uuid[UI_BITMAP] == (u64)0 && mdev->p_uuid[UI_BITMAP] != (u64)0) { 2411b411b363SPhilipp Reisner 2412b411b363SPhilipp Reisner if (mdev->agreed_pro_version < 91) 24134a23f264SPhilipp Reisner return -1091; 2414b411b363SPhilipp Reisner 2415b411b363SPhilipp Reisner if ((mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_BITMAP] & ~((u64)1)) && 2416b411b363SPhilipp Reisner (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1))) { 2417b411b363SPhilipp Reisner dev_info(DEV, "was SyncTarget, peer missed the resync finished event, corrected peer:\n"); 2418b411b363SPhilipp Reisner 2419b411b363SPhilipp Reisner mdev->p_uuid[UI_HISTORY_START + 1] = mdev->p_uuid[UI_HISTORY_START]; 2420b411b363SPhilipp Reisner mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_BITMAP]; 2421b411b363SPhilipp Reisner mdev->p_uuid[UI_BITMAP] = 0UL; 2422b411b363SPhilipp Reisner 2423b411b363SPhilipp Reisner drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]); 2424b411b363SPhilipp Reisner *rule_nr = 35; 2425b411b363SPhilipp Reisner } else { 2426b411b363SPhilipp Reisner dev_info(DEV, "was SyncTarget (failed to write sync_uuid)\n"); 2427b411b363SPhilipp Reisner *rule_nr = 37; 2428b411b363SPhilipp Reisner } 2429b411b363SPhilipp Reisner 2430b411b363SPhilipp Reisner return -1; 2431b411b363SPhilipp Reisner } 2432b411b363SPhilipp Reisner 2433b411b363SPhilipp Reisner /* Common power [off|failure] */ 2434b411b363SPhilipp Reisner rct = (test_bit(CRASHED_PRIMARY, &mdev->flags) ? 1 : 0) + 2435b411b363SPhilipp Reisner (mdev->p_uuid[UI_FLAGS] & 2); 2436b411b363SPhilipp Reisner /* lowest bit is set when we were primary, 2437b411b363SPhilipp Reisner * next bit (weight 2) is set when peer was primary */ 2438b411b363SPhilipp Reisner *rule_nr = 40; 2439b411b363SPhilipp Reisner 2440b411b363SPhilipp Reisner switch (rct) { 2441b411b363SPhilipp Reisner case 0: /* !self_pri && !peer_pri */ return 0; 2442b411b363SPhilipp Reisner case 1: /* self_pri && !peer_pri */ return 1; 2443b411b363SPhilipp Reisner case 2: /* !self_pri && peer_pri */ return -1; 2444b411b363SPhilipp Reisner case 3: /* self_pri && peer_pri */ 2445b411b363SPhilipp Reisner dc = test_bit(DISCARD_CONCURRENT, &mdev->flags); 2446b411b363SPhilipp Reisner return dc ? -1 : 1; 2447b411b363SPhilipp Reisner } 2448b411b363SPhilipp Reisner } 2449b411b363SPhilipp Reisner 2450b411b363SPhilipp Reisner *rule_nr = 50; 2451b411b363SPhilipp Reisner peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1); 2452b411b363SPhilipp Reisner if (self == peer) 2453b411b363SPhilipp Reisner return -1; 2454b411b363SPhilipp Reisner 2455b411b363SPhilipp Reisner *rule_nr = 51; 2456b411b363SPhilipp Reisner peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1); 2457b411b363SPhilipp Reisner if (self == peer) { 24584a23f264SPhilipp Reisner if (mdev->agreed_pro_version < 96 ? 24594a23f264SPhilipp Reisner (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == 24604a23f264SPhilipp Reisner (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1)) : 24614a23f264SPhilipp Reisner peer + UUID_NEW_BM_OFFSET == (mdev->p_uuid[UI_BITMAP] & ~((u64)1))) { 2462b411b363SPhilipp Reisner /* The last P_SYNC_UUID did not get though. Undo the last start of 2463b411b363SPhilipp Reisner resync as sync source modifications of the peer's UUIDs. */ 2464b411b363SPhilipp Reisner 2465b411b363SPhilipp Reisner if (mdev->agreed_pro_version < 91) 24664a23f264SPhilipp Reisner return -1091; 2467b411b363SPhilipp Reisner 2468b411b363SPhilipp Reisner mdev->p_uuid[UI_BITMAP] = mdev->p_uuid[UI_HISTORY_START]; 2469b411b363SPhilipp Reisner mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_HISTORY_START + 1]; 24704a23f264SPhilipp Reisner 247192b4ca29SLars Ellenberg dev_info(DEV, "Lost last syncUUID packet, corrected:\n"); 24724a23f264SPhilipp Reisner drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]); 24734a23f264SPhilipp Reisner 2474b411b363SPhilipp Reisner return -1; 2475b411b363SPhilipp Reisner } 2476b411b363SPhilipp Reisner } 2477b411b363SPhilipp Reisner 2478b411b363SPhilipp Reisner *rule_nr = 60; 2479b411b363SPhilipp Reisner self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1); 2480b411b363SPhilipp Reisner for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) { 2481b411b363SPhilipp Reisner peer = mdev->p_uuid[i] & ~((u64)1); 2482b411b363SPhilipp Reisner if (self == peer) 2483b411b363SPhilipp Reisner return -2; 2484b411b363SPhilipp Reisner } 2485b411b363SPhilipp Reisner 2486b411b363SPhilipp Reisner *rule_nr = 70; 2487b411b363SPhilipp Reisner self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1); 2488b411b363SPhilipp Reisner peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1); 2489b411b363SPhilipp Reisner if (self == peer) 2490b411b363SPhilipp Reisner return 1; 2491b411b363SPhilipp Reisner 2492b411b363SPhilipp Reisner *rule_nr = 71; 2493b411b363SPhilipp Reisner self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1); 2494b411b363SPhilipp Reisner if (self == peer) { 24954a23f264SPhilipp Reisner if (mdev->agreed_pro_version < 96 ? 24964a23f264SPhilipp Reisner (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == 24974a23f264SPhilipp Reisner (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) : 24984a23f264SPhilipp Reisner self + UUID_NEW_BM_OFFSET == (mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1))) { 2499b411b363SPhilipp Reisner /* The last P_SYNC_UUID did not get though. Undo the last start of 2500b411b363SPhilipp Reisner resync as sync source modifications of our UUIDs. */ 2501b411b363SPhilipp Reisner 2502b411b363SPhilipp Reisner if (mdev->agreed_pro_version < 91) 25034a23f264SPhilipp Reisner return -1091; 2504b411b363SPhilipp Reisner 25059f2247bbSPhilipp Reisner __drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_HISTORY_START]); 25069f2247bbSPhilipp Reisner __drbd_uuid_set(mdev, UI_HISTORY_START, mdev->ldev->md.uuid[UI_HISTORY_START + 1]); 2507b411b363SPhilipp Reisner 25084a23f264SPhilipp Reisner dev_info(DEV, "Last syncUUID did not get through, corrected:\n"); 2509b411b363SPhilipp Reisner drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid, 2510b411b363SPhilipp Reisner mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0); 2511b411b363SPhilipp Reisner 2512b411b363SPhilipp Reisner return 1; 2513b411b363SPhilipp Reisner } 2514b411b363SPhilipp Reisner } 2515b411b363SPhilipp Reisner 2516b411b363SPhilipp Reisner 2517b411b363SPhilipp Reisner *rule_nr = 80; 2518d8c2a36bSPhilipp Reisner peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1); 2519b411b363SPhilipp Reisner for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) { 2520b411b363SPhilipp Reisner self = mdev->ldev->md.uuid[i] & ~((u64)1); 2521b411b363SPhilipp Reisner if (self == peer) 2522b411b363SPhilipp Reisner return 2; 2523b411b363SPhilipp Reisner } 2524b411b363SPhilipp Reisner 2525b411b363SPhilipp Reisner *rule_nr = 90; 2526b411b363SPhilipp Reisner self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1); 2527b411b363SPhilipp Reisner peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1); 2528b411b363SPhilipp Reisner if (self == peer && self != ((u64)0)) 2529b411b363SPhilipp Reisner return 100; 2530b411b363SPhilipp Reisner 2531b411b363SPhilipp Reisner *rule_nr = 100; 2532b411b363SPhilipp Reisner for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) { 2533b411b363SPhilipp Reisner self = mdev->ldev->md.uuid[i] & ~((u64)1); 2534b411b363SPhilipp Reisner for (j = UI_HISTORY_START; j <= UI_HISTORY_END; j++) { 2535b411b363SPhilipp Reisner peer = mdev->p_uuid[j] & ~((u64)1); 2536b411b363SPhilipp Reisner if (self == peer) 2537b411b363SPhilipp Reisner return -100; 2538b411b363SPhilipp Reisner } 2539b411b363SPhilipp Reisner } 2540b411b363SPhilipp Reisner 2541b411b363SPhilipp Reisner return -1000; 2542b411b363SPhilipp Reisner } 2543b411b363SPhilipp Reisner 2544b411b363SPhilipp Reisner /* drbd_sync_handshake() returns the new conn state on success, or 2545b411b363SPhilipp Reisner CONN_MASK (-1) on failure. 2546b411b363SPhilipp Reisner */ 2547b411b363SPhilipp Reisner static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_role peer_role, 2548b411b363SPhilipp Reisner enum drbd_disk_state peer_disk) __must_hold(local) 2549b411b363SPhilipp Reisner { 2550b411b363SPhilipp Reisner int hg, rule_nr; 2551b411b363SPhilipp Reisner enum drbd_conns rv = C_MASK; 2552b411b363SPhilipp Reisner enum drbd_disk_state mydisk; 2553b411b363SPhilipp Reisner 2554b411b363SPhilipp Reisner mydisk = mdev->state.disk; 2555b411b363SPhilipp Reisner if (mydisk == D_NEGOTIATING) 2556b411b363SPhilipp Reisner mydisk = mdev->new_state_tmp.disk; 2557b411b363SPhilipp Reisner 2558b411b363SPhilipp Reisner dev_info(DEV, "drbd_sync_handshake:\n"); 25599f2247bbSPhilipp Reisner 25609f2247bbSPhilipp Reisner spin_lock_irq(&mdev->ldev->md.uuid_lock); 2561b411b363SPhilipp Reisner drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid, mdev->comm_bm_set, 0); 2562b411b363SPhilipp Reisner drbd_uuid_dump(mdev, "peer", mdev->p_uuid, 2563b411b363SPhilipp Reisner mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]); 2564b411b363SPhilipp Reisner 2565b411b363SPhilipp Reisner hg = drbd_uuid_compare(mdev, &rule_nr); 25669f2247bbSPhilipp Reisner spin_unlock_irq(&mdev->ldev->md.uuid_lock); 2567b411b363SPhilipp Reisner 2568b411b363SPhilipp Reisner dev_info(DEV, "uuid_compare()=%d by rule %d\n", hg, rule_nr); 2569b411b363SPhilipp Reisner 2570b411b363SPhilipp Reisner if (hg == -1000) { 2571b411b363SPhilipp Reisner dev_alert(DEV, "Unrelated data, aborting!\n"); 2572b411b363SPhilipp Reisner return C_MASK; 2573b411b363SPhilipp Reisner } 25744a23f264SPhilipp Reisner if (hg < -1000) { 25754a23f264SPhilipp Reisner dev_alert(DEV, "To resolve this both sides have to support at least protocol %d\n", -hg - 1000); 2576b411b363SPhilipp Reisner return C_MASK; 2577b411b363SPhilipp Reisner } 2578b411b363SPhilipp Reisner 2579b411b363SPhilipp Reisner if ((mydisk == D_INCONSISTENT && peer_disk > D_INCONSISTENT) || 2580b411b363SPhilipp Reisner (peer_disk == D_INCONSISTENT && mydisk > D_INCONSISTENT)) { 2581b411b363SPhilipp Reisner int f = (hg == -100) || abs(hg) == 2; 2582b411b363SPhilipp Reisner hg = mydisk > D_INCONSISTENT ? 1 : -1; 2583b411b363SPhilipp Reisner if (f) 2584b411b363SPhilipp Reisner hg = hg*2; 2585b411b363SPhilipp Reisner dev_info(DEV, "Becoming sync %s due to disk states.\n", 2586b411b363SPhilipp Reisner hg > 0 ? "source" : "target"); 2587b411b363SPhilipp Reisner } 2588b411b363SPhilipp Reisner 25893a11a487SAdam Gandelman if (abs(hg) == 100) 25903a11a487SAdam Gandelman drbd_khelper(mdev, "initial-split-brain"); 25913a11a487SAdam Gandelman 2592b411b363SPhilipp Reisner if (hg == 100 || (hg == -100 && mdev->net_conf->always_asbp)) { 2593b411b363SPhilipp Reisner int pcount = (mdev->state.role == R_PRIMARY) 2594b411b363SPhilipp Reisner + (peer_role == R_PRIMARY); 2595b411b363SPhilipp Reisner int forced = (hg == -100); 2596b411b363SPhilipp Reisner 2597b411b363SPhilipp Reisner switch (pcount) { 2598b411b363SPhilipp Reisner case 0: 2599b411b363SPhilipp Reisner hg = drbd_asb_recover_0p(mdev); 2600b411b363SPhilipp Reisner break; 2601b411b363SPhilipp Reisner case 1: 2602b411b363SPhilipp Reisner hg = drbd_asb_recover_1p(mdev); 2603b411b363SPhilipp Reisner break; 2604b411b363SPhilipp Reisner case 2: 2605b411b363SPhilipp Reisner hg = drbd_asb_recover_2p(mdev); 2606b411b363SPhilipp Reisner break; 2607b411b363SPhilipp Reisner } 2608b411b363SPhilipp Reisner if (abs(hg) < 100) { 2609b411b363SPhilipp Reisner dev_warn(DEV, "Split-Brain detected, %d primaries, " 2610b411b363SPhilipp Reisner "automatically solved. Sync from %s node\n", 2611b411b363SPhilipp Reisner pcount, (hg < 0) ? "peer" : "this"); 2612b411b363SPhilipp Reisner if (forced) { 2613b411b363SPhilipp Reisner dev_warn(DEV, "Doing a full sync, since" 2614b411b363SPhilipp Reisner " UUIDs where ambiguous.\n"); 2615b411b363SPhilipp Reisner hg = hg*2; 2616b411b363SPhilipp Reisner } 2617b411b363SPhilipp Reisner } 2618b411b363SPhilipp Reisner } 2619b411b363SPhilipp Reisner 2620b411b363SPhilipp Reisner if (hg == -100) { 2621b411b363SPhilipp Reisner if (mdev->net_conf->want_lose && !(mdev->p_uuid[UI_FLAGS]&1)) 2622b411b363SPhilipp Reisner hg = -1; 2623b411b363SPhilipp Reisner if (!mdev->net_conf->want_lose && (mdev->p_uuid[UI_FLAGS]&1)) 2624b411b363SPhilipp Reisner hg = 1; 2625b411b363SPhilipp Reisner 2626b411b363SPhilipp Reisner if (abs(hg) < 100) 2627b411b363SPhilipp Reisner dev_warn(DEV, "Split-Brain detected, manually solved. " 2628b411b363SPhilipp Reisner "Sync from %s node\n", 2629b411b363SPhilipp Reisner (hg < 0) ? "peer" : "this"); 2630b411b363SPhilipp Reisner } 2631b411b363SPhilipp Reisner 2632b411b363SPhilipp Reisner if (hg == -100) { 2633580b9767SLars Ellenberg /* FIXME this log message is not correct if we end up here 2634580b9767SLars Ellenberg * after an attempted attach on a diskless node. 2635580b9767SLars Ellenberg * We just refuse to attach -- well, we drop the "connection" 2636580b9767SLars Ellenberg * to that disk, in a way... */ 26373a11a487SAdam Gandelman dev_alert(DEV, "Split-Brain detected but unresolved, dropping connection!\n"); 2638b411b363SPhilipp Reisner drbd_khelper(mdev, "split-brain"); 2639b411b363SPhilipp Reisner return C_MASK; 2640b411b363SPhilipp Reisner } 2641b411b363SPhilipp Reisner 2642b411b363SPhilipp Reisner if (hg > 0 && mydisk <= D_INCONSISTENT) { 2643b411b363SPhilipp Reisner dev_err(DEV, "I shall become SyncSource, but I am inconsistent!\n"); 2644b411b363SPhilipp Reisner return C_MASK; 2645b411b363SPhilipp Reisner } 2646b411b363SPhilipp Reisner 2647b411b363SPhilipp Reisner if (hg < 0 && /* by intention we do not use mydisk here. */ 2648b411b363SPhilipp Reisner mdev->state.role == R_PRIMARY && mdev->state.disk >= D_CONSISTENT) { 2649b411b363SPhilipp Reisner switch (mdev->net_conf->rr_conflict) { 2650b411b363SPhilipp Reisner case ASB_CALL_HELPER: 2651b411b363SPhilipp Reisner drbd_khelper(mdev, "pri-lost"); 2652b411b363SPhilipp Reisner /* fall through */ 2653b411b363SPhilipp Reisner case ASB_DISCONNECT: 2654b411b363SPhilipp Reisner dev_err(DEV, "I shall become SyncTarget, but I am primary!\n"); 2655b411b363SPhilipp Reisner return C_MASK; 2656b411b363SPhilipp Reisner case ASB_VIOLENTLY: 2657b411b363SPhilipp Reisner dev_warn(DEV, "Becoming SyncTarget, violating the stable-data" 2658b411b363SPhilipp Reisner "assumption\n"); 2659b411b363SPhilipp Reisner } 2660b411b363SPhilipp Reisner } 2661b411b363SPhilipp Reisner 2662cf14c2e9SPhilipp Reisner if (mdev->net_conf->dry_run || test_bit(CONN_DRY_RUN, &mdev->flags)) { 2663cf14c2e9SPhilipp Reisner if (hg == 0) 2664cf14c2e9SPhilipp Reisner dev_info(DEV, "dry-run connect: No resync, would become Connected immediately.\n"); 2665cf14c2e9SPhilipp Reisner else 2666cf14c2e9SPhilipp Reisner dev_info(DEV, "dry-run connect: Would become %s, doing a %s resync.", 2667cf14c2e9SPhilipp Reisner drbd_conn_str(hg > 0 ? C_SYNC_SOURCE : C_SYNC_TARGET), 2668cf14c2e9SPhilipp Reisner abs(hg) >= 2 ? "full" : "bit-map based"); 2669cf14c2e9SPhilipp Reisner return C_MASK; 2670cf14c2e9SPhilipp Reisner } 2671cf14c2e9SPhilipp Reisner 2672b411b363SPhilipp Reisner if (abs(hg) >= 2) { 2673b411b363SPhilipp Reisner dev_info(DEV, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n"); 267420ceb2b2SLars Ellenberg if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from sync_handshake", 267520ceb2b2SLars Ellenberg BM_LOCKED_SET_ALLOWED)) 2676b411b363SPhilipp Reisner return C_MASK; 2677b411b363SPhilipp Reisner } 2678b411b363SPhilipp Reisner 2679b411b363SPhilipp Reisner if (hg > 0) { /* become sync source. */ 2680b411b363SPhilipp Reisner rv = C_WF_BITMAP_S; 2681b411b363SPhilipp Reisner } else if (hg < 0) { /* become sync target */ 2682b411b363SPhilipp Reisner rv = C_WF_BITMAP_T; 2683b411b363SPhilipp Reisner } else { 2684b411b363SPhilipp Reisner rv = C_CONNECTED; 2685b411b363SPhilipp Reisner if (drbd_bm_total_weight(mdev)) { 2686b411b363SPhilipp Reisner dev_info(DEV, "No resync, but %lu bits in bitmap!\n", 2687b411b363SPhilipp Reisner drbd_bm_total_weight(mdev)); 2688b411b363SPhilipp Reisner } 2689b411b363SPhilipp Reisner } 2690b411b363SPhilipp Reisner 2691b411b363SPhilipp Reisner return rv; 2692b411b363SPhilipp Reisner } 2693b411b363SPhilipp Reisner 2694b411b363SPhilipp Reisner /* returns 1 if invalid */ 2695b411b363SPhilipp Reisner static int cmp_after_sb(enum drbd_after_sb_p peer, enum drbd_after_sb_p self) 2696b411b363SPhilipp Reisner { 2697b411b363SPhilipp Reisner /* ASB_DISCARD_REMOTE - ASB_DISCARD_LOCAL is valid */ 2698b411b363SPhilipp Reisner if ((peer == ASB_DISCARD_REMOTE && self == ASB_DISCARD_LOCAL) || 2699b411b363SPhilipp Reisner (self == ASB_DISCARD_REMOTE && peer == ASB_DISCARD_LOCAL)) 2700b411b363SPhilipp Reisner return 0; 2701b411b363SPhilipp Reisner 2702b411b363SPhilipp Reisner /* any other things with ASB_DISCARD_REMOTE or ASB_DISCARD_LOCAL are invalid */ 2703b411b363SPhilipp Reisner if (peer == ASB_DISCARD_REMOTE || peer == ASB_DISCARD_LOCAL || 2704b411b363SPhilipp Reisner self == ASB_DISCARD_REMOTE || self == ASB_DISCARD_LOCAL) 2705b411b363SPhilipp Reisner return 1; 2706b411b363SPhilipp Reisner 2707b411b363SPhilipp Reisner /* everything else is valid if they are equal on both sides. */ 2708b411b363SPhilipp Reisner if (peer == self) 2709b411b363SPhilipp Reisner return 0; 2710b411b363SPhilipp Reisner 2711b411b363SPhilipp Reisner /* everything es is invalid. */ 2712b411b363SPhilipp Reisner return 1; 2713b411b363SPhilipp Reisner } 2714b411b363SPhilipp Reisner 271502918be2SPhilipp Reisner static int receive_protocol(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) 2716b411b363SPhilipp Reisner { 271702918be2SPhilipp Reisner struct p_protocol *p = &mdev->data.rbuf.protocol; 2718b411b363SPhilipp Reisner int p_proto, p_after_sb_0p, p_after_sb_1p, p_after_sb_2p; 2719cf14c2e9SPhilipp Reisner int p_want_lose, p_two_primaries, cf; 2720b411b363SPhilipp Reisner char p_integrity_alg[SHARED_SECRET_MAX] = ""; 2721b411b363SPhilipp Reisner 2722b411b363SPhilipp Reisner p_proto = be32_to_cpu(p->protocol); 2723b411b363SPhilipp Reisner p_after_sb_0p = be32_to_cpu(p->after_sb_0p); 2724b411b363SPhilipp Reisner p_after_sb_1p = be32_to_cpu(p->after_sb_1p); 2725b411b363SPhilipp Reisner p_after_sb_2p = be32_to_cpu(p->after_sb_2p); 2726b411b363SPhilipp Reisner p_two_primaries = be32_to_cpu(p->two_primaries); 2727cf14c2e9SPhilipp Reisner cf = be32_to_cpu(p->conn_flags); 2728cf14c2e9SPhilipp Reisner p_want_lose = cf & CF_WANT_LOSE; 2729cf14c2e9SPhilipp Reisner 2730cf14c2e9SPhilipp Reisner clear_bit(CONN_DRY_RUN, &mdev->flags); 2731cf14c2e9SPhilipp Reisner 2732cf14c2e9SPhilipp Reisner if (cf & CF_DRY_RUN) 2733cf14c2e9SPhilipp Reisner set_bit(CONN_DRY_RUN, &mdev->flags); 2734b411b363SPhilipp Reisner 2735b411b363SPhilipp Reisner if (p_proto != mdev->net_conf->wire_protocol) { 2736b411b363SPhilipp Reisner dev_err(DEV, "incompatible communication protocols\n"); 2737b411b363SPhilipp Reisner goto disconnect; 2738b411b363SPhilipp Reisner } 2739b411b363SPhilipp Reisner 2740b411b363SPhilipp Reisner if (cmp_after_sb(p_after_sb_0p, mdev->net_conf->after_sb_0p)) { 2741b411b363SPhilipp Reisner dev_err(DEV, "incompatible after-sb-0pri settings\n"); 2742b411b363SPhilipp Reisner goto disconnect; 2743b411b363SPhilipp Reisner } 2744b411b363SPhilipp Reisner 2745b411b363SPhilipp Reisner if (cmp_after_sb(p_after_sb_1p, mdev->net_conf->after_sb_1p)) { 2746b411b363SPhilipp Reisner dev_err(DEV, "incompatible after-sb-1pri settings\n"); 2747b411b363SPhilipp Reisner goto disconnect; 2748b411b363SPhilipp Reisner } 2749b411b363SPhilipp Reisner 2750b411b363SPhilipp Reisner if (cmp_after_sb(p_after_sb_2p, mdev->net_conf->after_sb_2p)) { 2751b411b363SPhilipp Reisner dev_err(DEV, "incompatible after-sb-2pri settings\n"); 2752b411b363SPhilipp Reisner goto disconnect; 2753b411b363SPhilipp Reisner } 2754b411b363SPhilipp Reisner 2755b411b363SPhilipp Reisner if (p_want_lose && mdev->net_conf->want_lose) { 2756b411b363SPhilipp Reisner dev_err(DEV, "both sides have the 'want_lose' flag set\n"); 2757b411b363SPhilipp Reisner goto disconnect; 2758b411b363SPhilipp Reisner } 2759b411b363SPhilipp Reisner 2760b411b363SPhilipp Reisner if (p_two_primaries != mdev->net_conf->two_primaries) { 2761b411b363SPhilipp Reisner dev_err(DEV, "incompatible setting of the two-primaries options\n"); 2762b411b363SPhilipp Reisner goto disconnect; 2763b411b363SPhilipp Reisner } 2764b411b363SPhilipp Reisner 2765b411b363SPhilipp Reisner if (mdev->agreed_pro_version >= 87) { 2766b411b363SPhilipp Reisner unsigned char *my_alg = mdev->net_conf->integrity_alg; 2767b411b363SPhilipp Reisner 2768b411b363SPhilipp Reisner if (drbd_recv(mdev, p_integrity_alg, data_size) != data_size) 276981e84650SAndreas Gruenbacher return false; 2770b411b363SPhilipp Reisner 2771b411b363SPhilipp Reisner p_integrity_alg[SHARED_SECRET_MAX-1] = 0; 2772b411b363SPhilipp Reisner if (strcmp(p_integrity_alg, my_alg)) { 2773b411b363SPhilipp Reisner dev_err(DEV, "incompatible setting of the data-integrity-alg\n"); 2774b411b363SPhilipp Reisner goto disconnect; 2775b411b363SPhilipp Reisner } 2776b411b363SPhilipp Reisner dev_info(DEV, "data-integrity-alg: %s\n", 2777b411b363SPhilipp Reisner my_alg[0] ? my_alg : (unsigned char *)"<not-used>"); 2778b411b363SPhilipp Reisner } 2779b411b363SPhilipp Reisner 278081e84650SAndreas Gruenbacher return true; 2781b411b363SPhilipp Reisner 2782b411b363SPhilipp Reisner disconnect: 2783b411b363SPhilipp Reisner drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); 278481e84650SAndreas Gruenbacher return false; 2785b411b363SPhilipp Reisner } 2786b411b363SPhilipp Reisner 2787b411b363SPhilipp Reisner /* helper function 2788b411b363SPhilipp Reisner * input: alg name, feature name 2789b411b363SPhilipp Reisner * return: NULL (alg name was "") 2790b411b363SPhilipp Reisner * ERR_PTR(error) if something goes wrong 2791b411b363SPhilipp Reisner * or the crypto hash ptr, if it worked out ok. */ 2792b411b363SPhilipp Reisner struct crypto_hash *drbd_crypto_alloc_digest_safe(const struct drbd_conf *mdev, 2793b411b363SPhilipp Reisner const char *alg, const char *name) 2794b411b363SPhilipp Reisner { 2795b411b363SPhilipp Reisner struct crypto_hash *tfm; 2796b411b363SPhilipp Reisner 2797b411b363SPhilipp Reisner if (!alg[0]) 2798b411b363SPhilipp Reisner return NULL; 2799b411b363SPhilipp Reisner 2800b411b363SPhilipp Reisner tfm = crypto_alloc_hash(alg, 0, CRYPTO_ALG_ASYNC); 2801b411b363SPhilipp Reisner if (IS_ERR(tfm)) { 2802b411b363SPhilipp Reisner dev_err(DEV, "Can not allocate \"%s\" as %s (reason: %ld)\n", 2803b411b363SPhilipp Reisner alg, name, PTR_ERR(tfm)); 2804b411b363SPhilipp Reisner return tfm; 2805b411b363SPhilipp Reisner } 2806b411b363SPhilipp Reisner if (!drbd_crypto_is_hash(crypto_hash_tfm(tfm))) { 2807b411b363SPhilipp Reisner crypto_free_hash(tfm); 2808b411b363SPhilipp Reisner dev_err(DEV, "\"%s\" is not a digest (%s)\n", alg, name); 2809b411b363SPhilipp Reisner return ERR_PTR(-EINVAL); 2810b411b363SPhilipp Reisner } 2811b411b363SPhilipp Reisner return tfm; 2812b411b363SPhilipp Reisner } 2813b411b363SPhilipp Reisner 281402918be2SPhilipp Reisner static int receive_SyncParam(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int packet_size) 2815b411b363SPhilipp Reisner { 281681e84650SAndreas Gruenbacher int ok = true; 281702918be2SPhilipp Reisner struct p_rs_param_95 *p = &mdev->data.rbuf.rs_param_95; 2818b411b363SPhilipp Reisner unsigned int header_size, data_size, exp_max_sz; 2819b411b363SPhilipp Reisner struct crypto_hash *verify_tfm = NULL; 2820b411b363SPhilipp Reisner struct crypto_hash *csums_tfm = NULL; 2821b411b363SPhilipp Reisner const int apv = mdev->agreed_pro_version; 2822778f271dSPhilipp Reisner int *rs_plan_s = NULL; 2823778f271dSPhilipp Reisner int fifo_size = 0; 2824b411b363SPhilipp Reisner 2825b411b363SPhilipp Reisner exp_max_sz = apv <= 87 ? sizeof(struct p_rs_param) 2826b411b363SPhilipp Reisner : apv == 88 ? sizeof(struct p_rs_param) 2827b411b363SPhilipp Reisner + SHARED_SECRET_MAX 28288e26f9ccSPhilipp Reisner : apv <= 94 ? sizeof(struct p_rs_param_89) 28298e26f9ccSPhilipp Reisner : /* apv >= 95 */ sizeof(struct p_rs_param_95); 2830b411b363SPhilipp Reisner 283102918be2SPhilipp Reisner if (packet_size > exp_max_sz) { 2832b411b363SPhilipp Reisner dev_err(DEV, "SyncParam packet too long: received %u, expected <= %u bytes\n", 283302918be2SPhilipp Reisner packet_size, exp_max_sz); 283481e84650SAndreas Gruenbacher return false; 2835b411b363SPhilipp Reisner } 2836b411b363SPhilipp Reisner 2837b411b363SPhilipp Reisner if (apv <= 88) { 283802918be2SPhilipp Reisner header_size = sizeof(struct p_rs_param) - sizeof(struct p_header80); 283902918be2SPhilipp Reisner data_size = packet_size - header_size; 28408e26f9ccSPhilipp Reisner } else if (apv <= 94) { 284102918be2SPhilipp Reisner header_size = sizeof(struct p_rs_param_89) - sizeof(struct p_header80); 284202918be2SPhilipp Reisner data_size = packet_size - header_size; 2843b411b363SPhilipp Reisner D_ASSERT(data_size == 0); 28448e26f9ccSPhilipp Reisner } else { 284502918be2SPhilipp Reisner header_size = sizeof(struct p_rs_param_95) - sizeof(struct p_header80); 284602918be2SPhilipp Reisner data_size = packet_size - header_size; 2847b411b363SPhilipp Reisner D_ASSERT(data_size == 0); 2848b411b363SPhilipp Reisner } 2849b411b363SPhilipp Reisner 2850b411b363SPhilipp Reisner /* initialize verify_alg and csums_alg */ 2851b411b363SPhilipp Reisner memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX); 2852b411b363SPhilipp Reisner 285302918be2SPhilipp Reisner if (drbd_recv(mdev, &p->head.payload, header_size) != header_size) 285481e84650SAndreas Gruenbacher return false; 2855b411b363SPhilipp Reisner 2856b411b363SPhilipp Reisner mdev->sync_conf.rate = be32_to_cpu(p->rate); 2857b411b363SPhilipp Reisner 2858b411b363SPhilipp Reisner if (apv >= 88) { 2859b411b363SPhilipp Reisner if (apv == 88) { 28605de73827SPhilipp Reisner if (data_size > SHARED_SECRET_MAX || data_size == 0) { 28615de73827SPhilipp Reisner dev_err(DEV, "verify-alg of wrong size, " 28625de73827SPhilipp Reisner "peer wants %u, accepting only up to %u byte\n", 2863b411b363SPhilipp Reisner data_size, SHARED_SECRET_MAX); 286481e84650SAndreas Gruenbacher return false; 2865b411b363SPhilipp Reisner } 2866b411b363SPhilipp Reisner 2867b411b363SPhilipp Reisner if (drbd_recv(mdev, p->verify_alg, data_size) != data_size) 286881e84650SAndreas Gruenbacher return false; 2869b411b363SPhilipp Reisner 2870b411b363SPhilipp Reisner /* we expect NUL terminated string */ 2871b411b363SPhilipp Reisner /* but just in case someone tries to be evil */ 2872b411b363SPhilipp Reisner D_ASSERT(p->verify_alg[data_size-1] == 0); 2873b411b363SPhilipp Reisner p->verify_alg[data_size-1] = 0; 2874b411b363SPhilipp Reisner 2875b411b363SPhilipp Reisner } else /* apv >= 89 */ { 2876b411b363SPhilipp Reisner /* we still expect NUL terminated strings */ 2877b411b363SPhilipp Reisner /* but just in case someone tries to be evil */ 2878b411b363SPhilipp Reisner D_ASSERT(p->verify_alg[SHARED_SECRET_MAX-1] == 0); 2879b411b363SPhilipp Reisner D_ASSERT(p->csums_alg[SHARED_SECRET_MAX-1] == 0); 2880b411b363SPhilipp Reisner p->verify_alg[SHARED_SECRET_MAX-1] = 0; 2881b411b363SPhilipp Reisner p->csums_alg[SHARED_SECRET_MAX-1] = 0; 2882b411b363SPhilipp Reisner } 2883b411b363SPhilipp Reisner 2884b411b363SPhilipp Reisner if (strcmp(mdev->sync_conf.verify_alg, p->verify_alg)) { 2885b411b363SPhilipp Reisner if (mdev->state.conn == C_WF_REPORT_PARAMS) { 2886b411b363SPhilipp Reisner dev_err(DEV, "Different verify-alg settings. me=\"%s\" peer=\"%s\"\n", 2887b411b363SPhilipp Reisner mdev->sync_conf.verify_alg, p->verify_alg); 2888b411b363SPhilipp Reisner goto disconnect; 2889b411b363SPhilipp Reisner } 2890b411b363SPhilipp Reisner verify_tfm = drbd_crypto_alloc_digest_safe(mdev, 2891b411b363SPhilipp Reisner p->verify_alg, "verify-alg"); 2892b411b363SPhilipp Reisner if (IS_ERR(verify_tfm)) { 2893b411b363SPhilipp Reisner verify_tfm = NULL; 2894b411b363SPhilipp Reisner goto disconnect; 2895b411b363SPhilipp Reisner } 2896b411b363SPhilipp Reisner } 2897b411b363SPhilipp Reisner 2898b411b363SPhilipp Reisner if (apv >= 89 && strcmp(mdev->sync_conf.csums_alg, p->csums_alg)) { 2899b411b363SPhilipp Reisner if (mdev->state.conn == C_WF_REPORT_PARAMS) { 2900b411b363SPhilipp Reisner dev_err(DEV, "Different csums-alg settings. me=\"%s\" peer=\"%s\"\n", 2901b411b363SPhilipp Reisner mdev->sync_conf.csums_alg, p->csums_alg); 2902b411b363SPhilipp Reisner goto disconnect; 2903b411b363SPhilipp Reisner } 2904b411b363SPhilipp Reisner csums_tfm = drbd_crypto_alloc_digest_safe(mdev, 2905b411b363SPhilipp Reisner p->csums_alg, "csums-alg"); 2906b411b363SPhilipp Reisner if (IS_ERR(csums_tfm)) { 2907b411b363SPhilipp Reisner csums_tfm = NULL; 2908b411b363SPhilipp Reisner goto disconnect; 2909b411b363SPhilipp Reisner } 2910b411b363SPhilipp Reisner } 2911b411b363SPhilipp Reisner 29128e26f9ccSPhilipp Reisner if (apv > 94) { 29138e26f9ccSPhilipp Reisner mdev->sync_conf.rate = be32_to_cpu(p->rate); 29148e26f9ccSPhilipp Reisner mdev->sync_conf.c_plan_ahead = be32_to_cpu(p->c_plan_ahead); 29158e26f9ccSPhilipp Reisner mdev->sync_conf.c_delay_target = be32_to_cpu(p->c_delay_target); 29168e26f9ccSPhilipp Reisner mdev->sync_conf.c_fill_target = be32_to_cpu(p->c_fill_target); 29178e26f9ccSPhilipp Reisner mdev->sync_conf.c_max_rate = be32_to_cpu(p->c_max_rate); 2918778f271dSPhilipp Reisner 2919778f271dSPhilipp Reisner fifo_size = (mdev->sync_conf.c_plan_ahead * 10 * SLEEP_TIME) / HZ; 2920778f271dSPhilipp Reisner if (fifo_size != mdev->rs_plan_s.size && fifo_size > 0) { 2921778f271dSPhilipp Reisner rs_plan_s = kzalloc(sizeof(int) * fifo_size, GFP_KERNEL); 2922778f271dSPhilipp Reisner if (!rs_plan_s) { 2923778f271dSPhilipp Reisner dev_err(DEV, "kmalloc of fifo_buffer failed"); 2924778f271dSPhilipp Reisner goto disconnect; 2925778f271dSPhilipp Reisner } 2926778f271dSPhilipp Reisner } 29278e26f9ccSPhilipp Reisner } 2928b411b363SPhilipp Reisner 2929b411b363SPhilipp Reisner spin_lock(&mdev->peer_seq_lock); 2930b411b363SPhilipp Reisner /* lock against drbd_nl_syncer_conf() */ 2931b411b363SPhilipp Reisner if (verify_tfm) { 2932b411b363SPhilipp Reisner strcpy(mdev->sync_conf.verify_alg, p->verify_alg); 2933b411b363SPhilipp Reisner mdev->sync_conf.verify_alg_len = strlen(p->verify_alg) + 1; 2934b411b363SPhilipp Reisner crypto_free_hash(mdev->verify_tfm); 2935b411b363SPhilipp Reisner mdev->verify_tfm = verify_tfm; 2936b411b363SPhilipp Reisner dev_info(DEV, "using verify-alg: \"%s\"\n", p->verify_alg); 2937b411b363SPhilipp Reisner } 2938b411b363SPhilipp Reisner if (csums_tfm) { 2939b411b363SPhilipp Reisner strcpy(mdev->sync_conf.csums_alg, p->csums_alg); 2940b411b363SPhilipp Reisner mdev->sync_conf.csums_alg_len = strlen(p->csums_alg) + 1; 2941b411b363SPhilipp Reisner crypto_free_hash(mdev->csums_tfm); 2942b411b363SPhilipp Reisner mdev->csums_tfm = csums_tfm; 2943b411b363SPhilipp Reisner dev_info(DEV, "using csums-alg: \"%s\"\n", p->csums_alg); 2944b411b363SPhilipp Reisner } 2945778f271dSPhilipp Reisner if (fifo_size != mdev->rs_plan_s.size) { 2946778f271dSPhilipp Reisner kfree(mdev->rs_plan_s.values); 2947778f271dSPhilipp Reisner mdev->rs_plan_s.values = rs_plan_s; 2948778f271dSPhilipp Reisner mdev->rs_plan_s.size = fifo_size; 2949778f271dSPhilipp Reisner mdev->rs_planed = 0; 2950778f271dSPhilipp Reisner } 2951b411b363SPhilipp Reisner spin_unlock(&mdev->peer_seq_lock); 2952b411b363SPhilipp Reisner } 2953b411b363SPhilipp Reisner 2954b411b363SPhilipp Reisner return ok; 2955b411b363SPhilipp Reisner disconnect: 2956b411b363SPhilipp Reisner /* just for completeness: actually not needed, 2957b411b363SPhilipp Reisner * as this is not reached if csums_tfm was ok. */ 2958b411b363SPhilipp Reisner crypto_free_hash(csums_tfm); 2959b411b363SPhilipp Reisner /* but free the verify_tfm again, if csums_tfm did not work out */ 2960b411b363SPhilipp Reisner crypto_free_hash(verify_tfm); 2961b411b363SPhilipp Reisner drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); 296281e84650SAndreas Gruenbacher return false; 2963b411b363SPhilipp Reisner } 2964b411b363SPhilipp Reisner 2965b411b363SPhilipp Reisner /* warn if the arguments differ by more than 12.5% */ 2966b411b363SPhilipp Reisner static void warn_if_differ_considerably(struct drbd_conf *mdev, 2967b411b363SPhilipp Reisner const char *s, sector_t a, sector_t b) 2968b411b363SPhilipp Reisner { 2969b411b363SPhilipp Reisner sector_t d; 2970b411b363SPhilipp Reisner if (a == 0 || b == 0) 2971b411b363SPhilipp Reisner return; 2972b411b363SPhilipp Reisner d = (a > b) ? (a - b) : (b - a); 2973b411b363SPhilipp Reisner if (d > (a>>3) || d > (b>>3)) 2974b411b363SPhilipp Reisner dev_warn(DEV, "Considerable difference in %s: %llus vs. %llus\n", s, 2975b411b363SPhilipp Reisner (unsigned long long)a, (unsigned long long)b); 2976b411b363SPhilipp Reisner } 2977b411b363SPhilipp Reisner 297802918be2SPhilipp Reisner static int receive_sizes(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) 2979b411b363SPhilipp Reisner { 298002918be2SPhilipp Reisner struct p_sizes *p = &mdev->data.rbuf.sizes; 2981b411b363SPhilipp Reisner enum determine_dev_size dd = unchanged; 2982b411b363SPhilipp Reisner sector_t p_size, p_usize, my_usize; 2983b411b363SPhilipp Reisner int ldsc = 0; /* local disk size changed */ 2984e89b591cSPhilipp Reisner enum dds_flags ddsf; 2985b411b363SPhilipp Reisner 2986b411b363SPhilipp Reisner p_size = be64_to_cpu(p->d_size); 2987b411b363SPhilipp Reisner p_usize = be64_to_cpu(p->u_size); 2988b411b363SPhilipp Reisner 2989b411b363SPhilipp Reisner if (p_size == 0 && mdev->state.disk == D_DISKLESS) { 2990b411b363SPhilipp Reisner dev_err(DEV, "some backing storage is needed\n"); 2991b411b363SPhilipp Reisner drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); 299281e84650SAndreas Gruenbacher return false; 2993b411b363SPhilipp Reisner } 2994b411b363SPhilipp Reisner 2995b411b363SPhilipp Reisner /* just store the peer's disk size for now. 2996b411b363SPhilipp Reisner * we still need to figure out whether we accept that. */ 2997b411b363SPhilipp Reisner mdev->p_size = p_size; 2998b411b363SPhilipp Reisner 2999b411b363SPhilipp Reisner if (get_ldev(mdev)) { 3000b411b363SPhilipp Reisner warn_if_differ_considerably(mdev, "lower level device sizes", 3001b411b363SPhilipp Reisner p_size, drbd_get_max_capacity(mdev->ldev)); 3002b411b363SPhilipp Reisner warn_if_differ_considerably(mdev, "user requested size", 3003b411b363SPhilipp Reisner p_usize, mdev->ldev->dc.disk_size); 3004b411b363SPhilipp Reisner 3005b411b363SPhilipp Reisner /* if this is the first connect, or an otherwise expected 3006b411b363SPhilipp Reisner * param exchange, choose the minimum */ 3007b411b363SPhilipp Reisner if (mdev->state.conn == C_WF_REPORT_PARAMS) 3008b411b363SPhilipp Reisner p_usize = min_not_zero((sector_t)mdev->ldev->dc.disk_size, 3009b411b363SPhilipp Reisner p_usize); 3010b411b363SPhilipp Reisner 3011b411b363SPhilipp Reisner my_usize = mdev->ldev->dc.disk_size; 3012b411b363SPhilipp Reisner 3013b411b363SPhilipp Reisner if (mdev->ldev->dc.disk_size != p_usize) { 3014b411b363SPhilipp Reisner mdev->ldev->dc.disk_size = p_usize; 3015b411b363SPhilipp Reisner dev_info(DEV, "Peer sets u_size to %lu sectors\n", 3016b411b363SPhilipp Reisner (unsigned long)mdev->ldev->dc.disk_size); 3017b411b363SPhilipp Reisner } 3018b411b363SPhilipp Reisner 3019b411b363SPhilipp Reisner /* Never shrink a device with usable data during connect. 3020b411b363SPhilipp Reisner But allow online shrinking if we are connected. */ 3021a393db6fSPhilipp Reisner if (drbd_new_dev_size(mdev, mdev->ldev, 0) < 3022b411b363SPhilipp Reisner drbd_get_capacity(mdev->this_bdev) && 3023b411b363SPhilipp Reisner mdev->state.disk >= D_OUTDATED && 3024b411b363SPhilipp Reisner mdev->state.conn < C_CONNECTED) { 3025b411b363SPhilipp Reisner dev_err(DEV, "The peer's disk size is too small!\n"); 3026b411b363SPhilipp Reisner drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); 3027b411b363SPhilipp Reisner mdev->ldev->dc.disk_size = my_usize; 3028b411b363SPhilipp Reisner put_ldev(mdev); 302981e84650SAndreas Gruenbacher return false; 3030b411b363SPhilipp Reisner } 3031b411b363SPhilipp Reisner put_ldev(mdev); 3032b411b363SPhilipp Reisner } 3033b411b363SPhilipp Reisner 3034e89b591cSPhilipp Reisner ddsf = be16_to_cpu(p->dds_flags); 3035b411b363SPhilipp Reisner if (get_ldev(mdev)) { 303624c4830cSBart Van Assche dd = drbd_determine_dev_size(mdev, ddsf); 3037b411b363SPhilipp Reisner put_ldev(mdev); 3038b411b363SPhilipp Reisner if (dd == dev_size_error) 303981e84650SAndreas Gruenbacher return false; 3040b411b363SPhilipp Reisner drbd_md_sync(mdev); 3041b411b363SPhilipp Reisner } else { 3042b411b363SPhilipp Reisner /* I am diskless, need to accept the peer's size. */ 3043b411b363SPhilipp Reisner drbd_set_my_capacity(mdev, p_size); 3044b411b363SPhilipp Reisner } 3045b411b363SPhilipp Reisner 304699432fccSPhilipp Reisner mdev->peer_max_bio_size = be32_to_cpu(p->max_bio_size); 304799432fccSPhilipp Reisner drbd_reconsider_max_bio_size(mdev); 304899432fccSPhilipp Reisner 3049b411b363SPhilipp Reisner if (get_ldev(mdev)) { 3050b411b363SPhilipp Reisner if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev)) { 3051b411b363SPhilipp Reisner mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev); 3052b411b363SPhilipp Reisner ldsc = 1; 3053b411b363SPhilipp Reisner } 3054b411b363SPhilipp Reisner 3055b411b363SPhilipp Reisner put_ldev(mdev); 3056b411b363SPhilipp Reisner } 3057b411b363SPhilipp Reisner 3058b411b363SPhilipp Reisner if (mdev->state.conn > C_WF_REPORT_PARAMS) { 3059b411b363SPhilipp Reisner if (be64_to_cpu(p->c_size) != 3060b411b363SPhilipp Reisner drbd_get_capacity(mdev->this_bdev) || ldsc) { 3061b411b363SPhilipp Reisner /* we have different sizes, probably peer 3062b411b363SPhilipp Reisner * needs to know my new size... */ 3063e89b591cSPhilipp Reisner drbd_send_sizes(mdev, 0, ddsf); 3064b411b363SPhilipp Reisner } 3065b411b363SPhilipp Reisner if (test_and_clear_bit(RESIZE_PENDING, &mdev->flags) || 3066b411b363SPhilipp Reisner (dd == grew && mdev->state.conn == C_CONNECTED)) { 3067b411b363SPhilipp Reisner if (mdev->state.pdsk >= D_INCONSISTENT && 3068e89b591cSPhilipp Reisner mdev->state.disk >= D_INCONSISTENT) { 3069e89b591cSPhilipp Reisner if (ddsf & DDSF_NO_RESYNC) 3070e89b591cSPhilipp Reisner dev_info(DEV, "Resync of new storage suppressed with --assume-clean\n"); 3071b411b363SPhilipp Reisner else 3072e89b591cSPhilipp Reisner resync_after_online_grow(mdev); 3073e89b591cSPhilipp Reisner } else 3074b411b363SPhilipp Reisner set_bit(RESYNC_AFTER_NEG, &mdev->flags); 3075b411b363SPhilipp Reisner } 3076b411b363SPhilipp Reisner } 3077b411b363SPhilipp Reisner 307881e84650SAndreas Gruenbacher return true; 3079b411b363SPhilipp Reisner } 3080b411b363SPhilipp Reisner 308102918be2SPhilipp Reisner static int receive_uuids(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) 3082b411b363SPhilipp Reisner { 308302918be2SPhilipp Reisner struct p_uuids *p = &mdev->data.rbuf.uuids; 3084b411b363SPhilipp Reisner u64 *p_uuid; 308562b0da3aSLars Ellenberg int i, updated_uuids = 0; 3086b411b363SPhilipp Reisner 3087b411b363SPhilipp Reisner p_uuid = kmalloc(sizeof(u64)*UI_EXTENDED_SIZE, GFP_NOIO); 3088b411b363SPhilipp Reisner 3089b411b363SPhilipp Reisner for (i = UI_CURRENT; i < UI_EXTENDED_SIZE; i++) 3090b411b363SPhilipp Reisner p_uuid[i] = be64_to_cpu(p->uuid[i]); 3091b411b363SPhilipp Reisner 3092b411b363SPhilipp Reisner kfree(mdev->p_uuid); 3093b411b363SPhilipp Reisner mdev->p_uuid = p_uuid; 3094b411b363SPhilipp Reisner 3095b411b363SPhilipp Reisner if (mdev->state.conn < C_CONNECTED && 3096b411b363SPhilipp Reisner mdev->state.disk < D_INCONSISTENT && 3097b411b363SPhilipp Reisner mdev->state.role == R_PRIMARY && 3098b411b363SPhilipp Reisner (mdev->ed_uuid & ~((u64)1)) != (p_uuid[UI_CURRENT] & ~((u64)1))) { 3099b411b363SPhilipp Reisner dev_err(DEV, "Can only connect to data with current UUID=%016llX\n", 3100b411b363SPhilipp Reisner (unsigned long long)mdev->ed_uuid); 3101b411b363SPhilipp Reisner drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); 310281e84650SAndreas Gruenbacher return false; 3103b411b363SPhilipp Reisner } 3104b411b363SPhilipp Reisner 3105b411b363SPhilipp Reisner if (get_ldev(mdev)) { 3106b411b363SPhilipp Reisner int skip_initial_sync = 3107b411b363SPhilipp Reisner mdev->state.conn == C_CONNECTED && 3108b411b363SPhilipp Reisner mdev->agreed_pro_version >= 90 && 3109b411b363SPhilipp Reisner mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && 3110b411b363SPhilipp Reisner (p_uuid[UI_FLAGS] & 8); 3111b411b363SPhilipp Reisner if (skip_initial_sync) { 3112b411b363SPhilipp Reisner dev_info(DEV, "Accepted new current UUID, preparing to skip initial sync\n"); 3113b411b363SPhilipp Reisner drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write, 311420ceb2b2SLars Ellenberg "clear_n_write from receive_uuids", 311520ceb2b2SLars Ellenberg BM_LOCKED_TEST_ALLOWED); 3116b411b363SPhilipp Reisner _drbd_uuid_set(mdev, UI_CURRENT, p_uuid[UI_CURRENT]); 3117b411b363SPhilipp Reisner _drbd_uuid_set(mdev, UI_BITMAP, 0); 3118b411b363SPhilipp Reisner _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE), 3119b411b363SPhilipp Reisner CS_VERBOSE, NULL); 3120b411b363SPhilipp Reisner drbd_md_sync(mdev); 312162b0da3aSLars Ellenberg updated_uuids = 1; 3122b411b363SPhilipp Reisner } 3123b411b363SPhilipp Reisner put_ldev(mdev); 312418a50fa2SPhilipp Reisner } else if (mdev->state.disk < D_INCONSISTENT && 312518a50fa2SPhilipp Reisner mdev->state.role == R_PRIMARY) { 312618a50fa2SPhilipp Reisner /* I am a diskless primary, the peer just created a new current UUID 312718a50fa2SPhilipp Reisner for me. */ 312862b0da3aSLars Ellenberg updated_uuids = drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]); 3129b411b363SPhilipp Reisner } 3130b411b363SPhilipp Reisner 3131b411b363SPhilipp Reisner /* Before we test for the disk state, we should wait until an eventually 3132b411b363SPhilipp Reisner ongoing cluster wide state change is finished. That is important if 3133b411b363SPhilipp Reisner we are primary and are detaching from our disk. We need to see the 3134b411b363SPhilipp Reisner new disk state... */ 3135b411b363SPhilipp Reisner wait_event(mdev->misc_wait, !test_bit(CLUSTER_ST_CHANGE, &mdev->flags)); 3136b411b363SPhilipp Reisner if (mdev->state.conn >= C_CONNECTED && mdev->state.disk < D_INCONSISTENT) 313762b0da3aSLars Ellenberg updated_uuids |= drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]); 313862b0da3aSLars Ellenberg 313962b0da3aSLars Ellenberg if (updated_uuids) 314062b0da3aSLars Ellenberg drbd_print_uuids(mdev, "receiver updated UUIDs to"); 3141b411b363SPhilipp Reisner 314281e84650SAndreas Gruenbacher return true; 3143b411b363SPhilipp Reisner } 3144b411b363SPhilipp Reisner 3145b411b363SPhilipp Reisner /** 3146b411b363SPhilipp Reisner * convert_state() - Converts the peer's view of the cluster state to our point of view 3147b411b363SPhilipp Reisner * @ps: The state as seen by the peer. 3148b411b363SPhilipp Reisner */ 3149b411b363SPhilipp Reisner static union drbd_state convert_state(union drbd_state ps) 3150b411b363SPhilipp Reisner { 3151b411b363SPhilipp Reisner union drbd_state ms; 3152b411b363SPhilipp Reisner 3153b411b363SPhilipp Reisner static enum drbd_conns c_tab[] = { 3154b411b363SPhilipp Reisner [C_CONNECTED] = C_CONNECTED, 3155b411b363SPhilipp Reisner 3156b411b363SPhilipp Reisner [C_STARTING_SYNC_S] = C_STARTING_SYNC_T, 3157b411b363SPhilipp Reisner [C_STARTING_SYNC_T] = C_STARTING_SYNC_S, 3158b411b363SPhilipp Reisner [C_DISCONNECTING] = C_TEAR_DOWN, /* C_NETWORK_FAILURE, */ 3159b411b363SPhilipp Reisner [C_VERIFY_S] = C_VERIFY_T, 3160b411b363SPhilipp Reisner [C_MASK] = C_MASK, 3161b411b363SPhilipp Reisner }; 3162b411b363SPhilipp Reisner 3163b411b363SPhilipp Reisner ms.i = ps.i; 3164b411b363SPhilipp Reisner 3165b411b363SPhilipp Reisner ms.conn = c_tab[ps.conn]; 3166b411b363SPhilipp Reisner ms.peer = ps.role; 3167b411b363SPhilipp Reisner ms.role = ps.peer; 3168b411b363SPhilipp Reisner ms.pdsk = ps.disk; 3169b411b363SPhilipp Reisner ms.disk = ps.pdsk; 3170b411b363SPhilipp Reisner ms.peer_isp = (ps.aftr_isp | ps.user_isp); 3171b411b363SPhilipp Reisner 3172b411b363SPhilipp Reisner return ms; 3173b411b363SPhilipp Reisner } 3174b411b363SPhilipp Reisner 317502918be2SPhilipp Reisner static int receive_req_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) 3176b411b363SPhilipp Reisner { 317702918be2SPhilipp Reisner struct p_req_state *p = &mdev->data.rbuf.req_state; 3178b411b363SPhilipp Reisner union drbd_state mask, val; 3179bf885f8aSAndreas Gruenbacher enum drbd_state_rv rv; 3180b411b363SPhilipp Reisner 3181b411b363SPhilipp Reisner mask.i = be32_to_cpu(p->mask); 3182b411b363SPhilipp Reisner val.i = be32_to_cpu(p->val); 3183b411b363SPhilipp Reisner 3184b411b363SPhilipp Reisner if (test_bit(DISCARD_CONCURRENT, &mdev->flags) && 3185b411b363SPhilipp Reisner test_bit(CLUSTER_ST_CHANGE, &mdev->flags)) { 3186b411b363SPhilipp Reisner drbd_send_sr_reply(mdev, SS_CONCURRENT_ST_CHG); 318781e84650SAndreas Gruenbacher return true; 3188b411b363SPhilipp Reisner } 3189b411b363SPhilipp Reisner 3190b411b363SPhilipp Reisner mask = convert_state(mask); 3191b411b363SPhilipp Reisner val = convert_state(val); 3192b411b363SPhilipp Reisner 3193b411b363SPhilipp Reisner rv = drbd_change_state(mdev, CS_VERBOSE, mask, val); 3194b411b363SPhilipp Reisner 3195b411b363SPhilipp Reisner drbd_send_sr_reply(mdev, rv); 3196b411b363SPhilipp Reisner drbd_md_sync(mdev); 3197b411b363SPhilipp Reisner 319881e84650SAndreas Gruenbacher return true; 3199b411b363SPhilipp Reisner } 3200b411b363SPhilipp Reisner 320102918be2SPhilipp Reisner static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) 3202b411b363SPhilipp Reisner { 320302918be2SPhilipp Reisner struct p_state *p = &mdev->data.rbuf.state; 32044ac4aadaSLars Ellenberg union drbd_state os, ns, peer_state; 3205b411b363SPhilipp Reisner enum drbd_disk_state real_peer_disk; 320665d922c3SPhilipp Reisner enum chg_state_flags cs_flags; 3207b411b363SPhilipp Reisner int rv; 3208b411b363SPhilipp Reisner 3209b411b363SPhilipp Reisner peer_state.i = be32_to_cpu(p->state); 3210b411b363SPhilipp Reisner 3211b411b363SPhilipp Reisner real_peer_disk = peer_state.disk; 3212b411b363SPhilipp Reisner if (peer_state.disk == D_NEGOTIATING) { 3213b411b363SPhilipp Reisner real_peer_disk = mdev->p_uuid[UI_FLAGS] & 4 ? D_INCONSISTENT : D_CONSISTENT; 3214b411b363SPhilipp Reisner dev_info(DEV, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk)); 3215b411b363SPhilipp Reisner } 3216b411b363SPhilipp Reisner 3217b411b363SPhilipp Reisner spin_lock_irq(&mdev->req_lock); 3218b411b363SPhilipp Reisner retry: 32194ac4aadaSLars Ellenberg os = ns = mdev->state; 3220b411b363SPhilipp Reisner spin_unlock_irq(&mdev->req_lock); 3221b411b363SPhilipp Reisner 3222545752d5SLars Ellenberg /* If some other part of the code (asender thread, timeout) 3223545752d5SLars Ellenberg * already decided to close the connection again, 3224545752d5SLars Ellenberg * we must not "re-establish" it here. */ 3225545752d5SLars Ellenberg if (os.conn <= C_TEAR_DOWN) 3226545752d5SLars Ellenberg return false; 3227545752d5SLars Ellenberg 322840424e4aSLars Ellenberg /* If this is the "end of sync" confirmation, usually the peer disk 322940424e4aSLars Ellenberg * transitions from D_INCONSISTENT to D_UP_TO_DATE. For empty (0 bits 323040424e4aSLars Ellenberg * set) resync started in PausedSyncT, or if the timing of pause-/ 323140424e4aSLars Ellenberg * unpause-sync events has been "just right", the peer disk may 323240424e4aSLars Ellenberg * transition from D_CONSISTENT to D_UP_TO_DATE as well. 323340424e4aSLars Ellenberg */ 323440424e4aSLars Ellenberg if ((os.pdsk == D_INCONSISTENT || os.pdsk == D_CONSISTENT) && 323540424e4aSLars Ellenberg real_peer_disk == D_UP_TO_DATE && 3236e9ef7bb6SLars Ellenberg os.conn > C_CONNECTED && os.disk == D_UP_TO_DATE) { 3237e9ef7bb6SLars Ellenberg /* If we are (becoming) SyncSource, but peer is still in sync 3238e9ef7bb6SLars Ellenberg * preparation, ignore its uptodate-ness to avoid flapping, it 3239e9ef7bb6SLars Ellenberg * will change to inconsistent once the peer reaches active 3240e9ef7bb6SLars Ellenberg * syncing states. 3241e9ef7bb6SLars Ellenberg * It may have changed syncer-paused flags, however, so we 3242e9ef7bb6SLars Ellenberg * cannot ignore this completely. */ 3243e9ef7bb6SLars Ellenberg if (peer_state.conn > C_CONNECTED && 3244e9ef7bb6SLars Ellenberg peer_state.conn < C_SYNC_SOURCE) 3245e9ef7bb6SLars Ellenberg real_peer_disk = D_INCONSISTENT; 3246e9ef7bb6SLars Ellenberg 3247e9ef7bb6SLars Ellenberg /* if peer_state changes to connected at the same time, 3248e9ef7bb6SLars Ellenberg * it explicitly notifies us that it finished resync. 3249e9ef7bb6SLars Ellenberg * Maybe we should finish it up, too? */ 3250e9ef7bb6SLars Ellenberg else if (os.conn >= C_SYNC_SOURCE && 3251e9ef7bb6SLars Ellenberg peer_state.conn == C_CONNECTED) { 3252e9ef7bb6SLars Ellenberg if (drbd_bm_total_weight(mdev) <= mdev->rs_failed) 3253e9ef7bb6SLars Ellenberg drbd_resync_finished(mdev); 325481e84650SAndreas Gruenbacher return true; 3255e9ef7bb6SLars Ellenberg } 3256e9ef7bb6SLars Ellenberg } 3257e9ef7bb6SLars Ellenberg 325802b91b55SLars Ellenberg /* explicit verify finished notification, stop sector reached. */ 325902b91b55SLars Ellenberg if (os.conn == C_VERIFY_T && os.disk == D_UP_TO_DATE && 326002b91b55SLars Ellenberg peer_state.conn == C_CONNECTED && real_peer_disk == D_UP_TO_DATE) { 326102b91b55SLars Ellenberg ov_oos_print(mdev); 326202b91b55SLars Ellenberg drbd_resync_finished(mdev); 326302b91b55SLars Ellenberg return true; 326402b91b55SLars Ellenberg } 326502b91b55SLars Ellenberg 3266e9ef7bb6SLars Ellenberg /* peer says his disk is inconsistent, while we think it is uptodate, 3267e9ef7bb6SLars Ellenberg * and this happens while the peer still thinks we have a sync going on, 3268e9ef7bb6SLars Ellenberg * but we think we are already done with the sync. 3269e9ef7bb6SLars Ellenberg * We ignore this to avoid flapping pdsk. 3270e9ef7bb6SLars Ellenberg * This should not happen, if the peer is a recent version of drbd. */ 3271e9ef7bb6SLars Ellenberg if (os.pdsk == D_UP_TO_DATE && real_peer_disk == D_INCONSISTENT && 3272e9ef7bb6SLars Ellenberg os.conn == C_CONNECTED && peer_state.conn > C_SYNC_SOURCE) 3273e9ef7bb6SLars Ellenberg real_peer_disk = D_UP_TO_DATE; 3274e9ef7bb6SLars Ellenberg 32754ac4aadaSLars Ellenberg if (ns.conn == C_WF_REPORT_PARAMS) 32764ac4aadaSLars Ellenberg ns.conn = C_CONNECTED; 3277b411b363SPhilipp Reisner 327867531718SPhilipp Reisner if (peer_state.conn == C_AHEAD) 327967531718SPhilipp Reisner ns.conn = C_BEHIND; 328067531718SPhilipp Reisner 3281b411b363SPhilipp Reisner if (mdev->p_uuid && peer_state.disk >= D_NEGOTIATING && 3282b411b363SPhilipp Reisner get_ldev_if_state(mdev, D_NEGOTIATING)) { 3283b411b363SPhilipp Reisner int cr; /* consider resync */ 3284b411b363SPhilipp Reisner 3285b411b363SPhilipp Reisner /* if we established a new connection */ 32864ac4aadaSLars Ellenberg cr = (os.conn < C_CONNECTED); 3287b411b363SPhilipp Reisner /* if we had an established connection 3288b411b363SPhilipp Reisner * and one of the nodes newly attaches a disk */ 32894ac4aadaSLars Ellenberg cr |= (os.conn == C_CONNECTED && 3290b411b363SPhilipp Reisner (peer_state.disk == D_NEGOTIATING || 32914ac4aadaSLars Ellenberg os.disk == D_NEGOTIATING)); 3292b411b363SPhilipp Reisner /* if we have both been inconsistent, and the peer has been 3293b411b363SPhilipp Reisner * forced to be UpToDate with --overwrite-data */ 3294b411b363SPhilipp Reisner cr |= test_bit(CONSIDER_RESYNC, &mdev->flags); 3295b411b363SPhilipp Reisner /* if we had been plain connected, and the admin requested to 3296b411b363SPhilipp Reisner * start a sync by "invalidate" or "invalidate-remote" */ 32974ac4aadaSLars Ellenberg cr |= (os.conn == C_CONNECTED && 3298b411b363SPhilipp Reisner (peer_state.conn >= C_STARTING_SYNC_S && 3299b411b363SPhilipp Reisner peer_state.conn <= C_WF_BITMAP_T)); 3300b411b363SPhilipp Reisner 3301b411b363SPhilipp Reisner if (cr) 33024ac4aadaSLars Ellenberg ns.conn = drbd_sync_handshake(mdev, peer_state.role, real_peer_disk); 3303b411b363SPhilipp Reisner 3304b411b363SPhilipp Reisner put_ldev(mdev); 33054ac4aadaSLars Ellenberg if (ns.conn == C_MASK) { 33064ac4aadaSLars Ellenberg ns.conn = C_CONNECTED; 3307b411b363SPhilipp Reisner if (mdev->state.disk == D_NEGOTIATING) { 330882f59cc6SLars Ellenberg drbd_force_state(mdev, NS(disk, D_FAILED)); 3309b411b363SPhilipp Reisner } else if (peer_state.disk == D_NEGOTIATING) { 3310b411b363SPhilipp Reisner dev_err(DEV, "Disk attach process on the peer node was aborted.\n"); 3311b411b363SPhilipp Reisner peer_state.disk = D_DISKLESS; 3312580b9767SLars Ellenberg real_peer_disk = D_DISKLESS; 3313b411b363SPhilipp Reisner } else { 3314cf14c2e9SPhilipp Reisner if (test_and_clear_bit(CONN_DRY_RUN, &mdev->flags)) 331581e84650SAndreas Gruenbacher return false; 33164ac4aadaSLars Ellenberg D_ASSERT(os.conn == C_WF_REPORT_PARAMS); 3317b411b363SPhilipp Reisner drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); 331881e84650SAndreas Gruenbacher return false; 3319b411b363SPhilipp Reisner } 3320b411b363SPhilipp Reisner } 3321b411b363SPhilipp Reisner } 3322b411b363SPhilipp Reisner 3323b411b363SPhilipp Reisner spin_lock_irq(&mdev->req_lock); 33244ac4aadaSLars Ellenberg if (mdev->state.i != os.i) 3325b411b363SPhilipp Reisner goto retry; 3326b411b363SPhilipp Reisner clear_bit(CONSIDER_RESYNC, &mdev->flags); 3327b411b363SPhilipp Reisner ns.peer = peer_state.role; 3328b411b363SPhilipp Reisner ns.pdsk = real_peer_disk; 3329b411b363SPhilipp Reisner ns.peer_isp = (peer_state.aftr_isp | peer_state.user_isp); 33304ac4aadaSLars Ellenberg if ((ns.conn == C_CONNECTED || ns.conn == C_WF_BITMAP_S) && ns.disk == D_NEGOTIATING) 3331b411b363SPhilipp Reisner ns.disk = mdev->new_state_tmp.disk; 33324ac4aadaSLars Ellenberg cs_flags = CS_VERBOSE + (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED ? 0 : CS_HARD); 33334ac4aadaSLars Ellenberg if (ns.pdsk == D_CONSISTENT && is_susp(ns) && ns.conn == C_CONNECTED && os.conn < C_CONNECTED && 3334481c6f50SPhilipp Reisner test_bit(NEW_CUR_UUID, &mdev->flags)) { 3335481c6f50SPhilipp Reisner /* Do not allow tl_restart(resend) for a rebooted peer. We can only allow this 3336481c6f50SPhilipp Reisner for temporal network outages! */ 3337481c6f50SPhilipp Reisner spin_unlock_irq(&mdev->req_lock); 3338481c6f50SPhilipp Reisner dev_err(DEV, "Aborting Connect, can not thaw IO with an only Consistent peer\n"); 3339481c6f50SPhilipp Reisner tl_clear(mdev); 3340481c6f50SPhilipp Reisner drbd_uuid_new_current(mdev); 3341481c6f50SPhilipp Reisner clear_bit(NEW_CUR_UUID, &mdev->flags); 3342481c6f50SPhilipp Reisner drbd_force_state(mdev, NS2(conn, C_PROTOCOL_ERROR, susp, 0)); 334381e84650SAndreas Gruenbacher return false; 3344481c6f50SPhilipp Reisner } 334565d922c3SPhilipp Reisner rv = _drbd_set_state(mdev, ns, cs_flags, NULL); 3346b411b363SPhilipp Reisner ns = mdev->state; 3347b411b363SPhilipp Reisner spin_unlock_irq(&mdev->req_lock); 3348b411b363SPhilipp Reisner 3349b411b363SPhilipp Reisner if (rv < SS_SUCCESS) { 3350b411b363SPhilipp Reisner drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); 335181e84650SAndreas Gruenbacher return false; 3352b411b363SPhilipp Reisner } 3353b411b363SPhilipp Reisner 33544ac4aadaSLars Ellenberg if (os.conn > C_WF_REPORT_PARAMS) { 33554ac4aadaSLars Ellenberg if (ns.conn > C_CONNECTED && peer_state.conn <= C_CONNECTED && 3356b411b363SPhilipp Reisner peer_state.disk != D_NEGOTIATING ) { 3357b411b363SPhilipp Reisner /* we want resync, peer has not yet decided to sync... */ 3358b411b363SPhilipp Reisner /* Nowadays only used when forcing a node into primary role and 3359b411b363SPhilipp Reisner setting its disk to UpToDate with that */ 3360b411b363SPhilipp Reisner drbd_send_uuids(mdev); 3361f479ea06SLars Ellenberg drbd_send_current_state(mdev); 3362b411b363SPhilipp Reisner } 3363b411b363SPhilipp Reisner } 3364b411b363SPhilipp Reisner 3365b411b363SPhilipp Reisner mdev->net_conf->want_lose = 0; 3366b411b363SPhilipp Reisner 3367b411b363SPhilipp Reisner drbd_md_sync(mdev); /* update connected indicator, la_size, ... */ 3368b411b363SPhilipp Reisner 336981e84650SAndreas Gruenbacher return true; 3370b411b363SPhilipp Reisner } 3371b411b363SPhilipp Reisner 337202918be2SPhilipp Reisner static int receive_sync_uuid(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) 3373b411b363SPhilipp Reisner { 337402918be2SPhilipp Reisner struct p_rs_uuid *p = &mdev->data.rbuf.rs_uuid; 3375b411b363SPhilipp Reisner 3376b411b363SPhilipp Reisner wait_event(mdev->misc_wait, 3377b411b363SPhilipp Reisner mdev->state.conn == C_WF_SYNC_UUID || 3378c4752ef1SPhilipp Reisner mdev->state.conn == C_BEHIND || 3379b411b363SPhilipp Reisner mdev->state.conn < C_CONNECTED || 3380b411b363SPhilipp Reisner mdev->state.disk < D_NEGOTIATING); 3381b411b363SPhilipp Reisner 3382b411b363SPhilipp Reisner /* D_ASSERT( mdev->state.conn == C_WF_SYNC_UUID ); */ 3383b411b363SPhilipp Reisner 3384b411b363SPhilipp Reisner /* Here the _drbd_uuid_ functions are right, current should 3385b411b363SPhilipp Reisner _not_ be rotated into the history */ 3386b411b363SPhilipp Reisner if (get_ldev_if_state(mdev, D_NEGOTIATING)) { 3387b411b363SPhilipp Reisner _drbd_uuid_set(mdev, UI_CURRENT, be64_to_cpu(p->uuid)); 3388b411b363SPhilipp Reisner _drbd_uuid_set(mdev, UI_BITMAP, 0UL); 3389b411b363SPhilipp Reisner 339062b0da3aSLars Ellenberg drbd_print_uuids(mdev, "updated sync uuid"); 3391b411b363SPhilipp Reisner drbd_start_resync(mdev, C_SYNC_TARGET); 3392b411b363SPhilipp Reisner 3393b411b363SPhilipp Reisner put_ldev(mdev); 3394b411b363SPhilipp Reisner } else 3395b411b363SPhilipp Reisner dev_err(DEV, "Ignoring SyncUUID packet!\n"); 3396b411b363SPhilipp Reisner 339781e84650SAndreas Gruenbacher return true; 3398b411b363SPhilipp Reisner } 3399b411b363SPhilipp Reisner 34002c46407dSAndreas Gruenbacher /** 34012c46407dSAndreas Gruenbacher * receive_bitmap_plain 34022c46407dSAndreas Gruenbacher * 34032c46407dSAndreas Gruenbacher * Return 0 when done, 1 when another iteration is needed, and a negative error 34042c46407dSAndreas Gruenbacher * code upon failure. 34052c46407dSAndreas Gruenbacher */ 34062c46407dSAndreas Gruenbacher static int 340702918be2SPhilipp Reisner receive_bitmap_plain(struct drbd_conf *mdev, unsigned int data_size, 3408b411b363SPhilipp Reisner unsigned long *buffer, struct bm_xfer_ctx *c) 3409b411b363SPhilipp Reisner { 3410b411b363SPhilipp Reisner unsigned num_words = min_t(size_t, BM_PACKET_WORDS, c->bm_words - c->word_offset); 3411b411b363SPhilipp Reisner unsigned want = num_words * sizeof(long); 34122c46407dSAndreas Gruenbacher int err; 3413b411b363SPhilipp Reisner 341402918be2SPhilipp Reisner if (want != data_size) { 341502918be2SPhilipp Reisner dev_err(DEV, "%s:want (%u) != data_size (%u)\n", __func__, want, data_size); 34162c46407dSAndreas Gruenbacher return -EIO; 3417b411b363SPhilipp Reisner } 3418b411b363SPhilipp Reisner if (want == 0) 34192c46407dSAndreas Gruenbacher return 0; 34202c46407dSAndreas Gruenbacher err = drbd_recv(mdev, buffer, want); 34212c46407dSAndreas Gruenbacher if (err != want) { 34222c46407dSAndreas Gruenbacher if (err >= 0) 34232c46407dSAndreas Gruenbacher err = -EIO; 34242c46407dSAndreas Gruenbacher return err; 34252c46407dSAndreas Gruenbacher } 3426b411b363SPhilipp Reisner 3427b411b363SPhilipp Reisner drbd_bm_merge_lel(mdev, c->word_offset, num_words, buffer); 3428b411b363SPhilipp Reisner 3429b411b363SPhilipp Reisner c->word_offset += num_words; 3430b411b363SPhilipp Reisner c->bit_offset = c->word_offset * BITS_PER_LONG; 3431b411b363SPhilipp Reisner if (c->bit_offset > c->bm_bits) 3432b411b363SPhilipp Reisner c->bit_offset = c->bm_bits; 3433b411b363SPhilipp Reisner 34342c46407dSAndreas Gruenbacher return 1; 3435b411b363SPhilipp Reisner } 3436b411b363SPhilipp Reisner 34372c46407dSAndreas Gruenbacher /** 34382c46407dSAndreas Gruenbacher * recv_bm_rle_bits 34392c46407dSAndreas Gruenbacher * 34402c46407dSAndreas Gruenbacher * Return 0 when done, 1 when another iteration is needed, and a negative error 34412c46407dSAndreas Gruenbacher * code upon failure. 34422c46407dSAndreas Gruenbacher */ 34432c46407dSAndreas Gruenbacher static int 3444b411b363SPhilipp Reisner recv_bm_rle_bits(struct drbd_conf *mdev, 3445b411b363SPhilipp Reisner struct p_compressed_bm *p, 3446b411b363SPhilipp Reisner struct bm_xfer_ctx *c) 3447b411b363SPhilipp Reisner { 3448b411b363SPhilipp Reisner struct bitstream bs; 3449b411b363SPhilipp Reisner u64 look_ahead; 3450b411b363SPhilipp Reisner u64 rl; 3451b411b363SPhilipp Reisner u64 tmp; 3452b411b363SPhilipp Reisner unsigned long s = c->bit_offset; 3453b411b363SPhilipp Reisner unsigned long e; 3454004352faSLars Ellenberg int len = be16_to_cpu(p->head.length) - (sizeof(*p) - sizeof(p->head)); 3455b411b363SPhilipp Reisner int toggle = DCBP_get_start(p); 3456b411b363SPhilipp Reisner int have; 3457b411b363SPhilipp Reisner int bits; 3458b411b363SPhilipp Reisner 3459b411b363SPhilipp Reisner bitstream_init(&bs, p->code, len, DCBP_get_pad_bits(p)); 3460b411b363SPhilipp Reisner 3461b411b363SPhilipp Reisner bits = bitstream_get_bits(&bs, &look_ahead, 64); 3462b411b363SPhilipp Reisner if (bits < 0) 34632c46407dSAndreas Gruenbacher return -EIO; 3464b411b363SPhilipp Reisner 3465b411b363SPhilipp Reisner for (have = bits; have > 0; s += rl, toggle = !toggle) { 3466b411b363SPhilipp Reisner bits = vli_decode_bits(&rl, look_ahead); 3467b411b363SPhilipp Reisner if (bits <= 0) 34682c46407dSAndreas Gruenbacher return -EIO; 3469b411b363SPhilipp Reisner 3470b411b363SPhilipp Reisner if (toggle) { 3471b411b363SPhilipp Reisner e = s + rl -1; 3472b411b363SPhilipp Reisner if (e >= c->bm_bits) { 3473b411b363SPhilipp Reisner dev_err(DEV, "bitmap overflow (e:%lu) while decoding bm RLE packet\n", e); 34742c46407dSAndreas Gruenbacher return -EIO; 3475b411b363SPhilipp Reisner } 3476b411b363SPhilipp Reisner _drbd_bm_set_bits(mdev, s, e); 3477b411b363SPhilipp Reisner } 3478b411b363SPhilipp Reisner 3479b411b363SPhilipp Reisner if (have < bits) { 3480b411b363SPhilipp Reisner dev_err(DEV, "bitmap decoding error: h:%d b:%d la:0x%08llx l:%u/%u\n", 3481b411b363SPhilipp Reisner have, bits, look_ahead, 3482b411b363SPhilipp Reisner (unsigned int)(bs.cur.b - p->code), 3483b411b363SPhilipp Reisner (unsigned int)bs.buf_len); 34842c46407dSAndreas Gruenbacher return -EIO; 3485b411b363SPhilipp Reisner } 3486b411b363SPhilipp Reisner look_ahead >>= bits; 3487b411b363SPhilipp Reisner have -= bits; 3488b411b363SPhilipp Reisner 3489b411b363SPhilipp Reisner bits = bitstream_get_bits(&bs, &tmp, 64 - have); 3490b411b363SPhilipp Reisner if (bits < 0) 34912c46407dSAndreas Gruenbacher return -EIO; 3492b411b363SPhilipp Reisner look_ahead |= tmp << have; 3493b411b363SPhilipp Reisner have += bits; 3494b411b363SPhilipp Reisner } 3495b411b363SPhilipp Reisner 3496b411b363SPhilipp Reisner c->bit_offset = s; 3497b411b363SPhilipp Reisner bm_xfer_ctx_bit_to_word_offset(c); 3498b411b363SPhilipp Reisner 34992c46407dSAndreas Gruenbacher return (s != c->bm_bits); 3500b411b363SPhilipp Reisner } 3501b411b363SPhilipp Reisner 35022c46407dSAndreas Gruenbacher /** 35032c46407dSAndreas Gruenbacher * decode_bitmap_c 35042c46407dSAndreas Gruenbacher * 35052c46407dSAndreas Gruenbacher * Return 0 when done, 1 when another iteration is needed, and a negative error 35062c46407dSAndreas Gruenbacher * code upon failure. 35072c46407dSAndreas Gruenbacher */ 35082c46407dSAndreas Gruenbacher static int 3509b411b363SPhilipp Reisner decode_bitmap_c(struct drbd_conf *mdev, 3510b411b363SPhilipp Reisner struct p_compressed_bm *p, 3511b411b363SPhilipp Reisner struct bm_xfer_ctx *c) 3512b411b363SPhilipp Reisner { 3513b411b363SPhilipp Reisner if (DCBP_get_code(p) == RLE_VLI_Bits) 3514b411b363SPhilipp Reisner return recv_bm_rle_bits(mdev, p, c); 3515b411b363SPhilipp Reisner 3516b411b363SPhilipp Reisner /* other variants had been implemented for evaluation, 3517b411b363SPhilipp Reisner * but have been dropped as this one turned out to be "best" 3518b411b363SPhilipp Reisner * during all our tests. */ 3519b411b363SPhilipp Reisner 3520b411b363SPhilipp Reisner dev_err(DEV, "receive_bitmap_c: unknown encoding %u\n", p->encoding); 3521b411b363SPhilipp Reisner drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR)); 35222c46407dSAndreas Gruenbacher return -EIO; 3523b411b363SPhilipp Reisner } 3524b411b363SPhilipp Reisner 3525b411b363SPhilipp Reisner void INFO_bm_xfer_stats(struct drbd_conf *mdev, 3526b411b363SPhilipp Reisner const char *direction, struct bm_xfer_ctx *c) 3527b411b363SPhilipp Reisner { 3528b411b363SPhilipp Reisner /* what would it take to transfer it "plaintext" */ 35290b70a13dSPhilipp Reisner unsigned plain = sizeof(struct p_header80) * 3530b411b363SPhilipp Reisner ((c->bm_words+BM_PACKET_WORDS-1)/BM_PACKET_WORDS+1) 3531b411b363SPhilipp Reisner + c->bm_words * sizeof(long); 3532b411b363SPhilipp Reisner unsigned total = c->bytes[0] + c->bytes[1]; 3533b411b363SPhilipp Reisner unsigned r; 3534b411b363SPhilipp Reisner 3535b411b363SPhilipp Reisner /* total can not be zero. but just in case: */ 3536b411b363SPhilipp Reisner if (total == 0) 3537b411b363SPhilipp Reisner return; 3538b411b363SPhilipp Reisner 3539b411b363SPhilipp Reisner /* don't report if not compressed */ 3540b411b363SPhilipp Reisner if (total >= plain) 3541b411b363SPhilipp Reisner return; 3542b411b363SPhilipp Reisner 3543b411b363SPhilipp Reisner /* total < plain. check for overflow, still */ 3544b411b363SPhilipp Reisner r = (total > UINT_MAX/1000) ? (total / (plain/1000)) 3545b411b363SPhilipp Reisner : (1000 * total / plain); 3546b411b363SPhilipp Reisner 3547b411b363SPhilipp Reisner if (r > 1000) 3548b411b363SPhilipp Reisner r = 1000; 3549b411b363SPhilipp Reisner 3550b411b363SPhilipp Reisner r = 1000 - r; 3551b411b363SPhilipp Reisner dev_info(DEV, "%s bitmap stats [Bytes(packets)]: plain %u(%u), RLE %u(%u), " 3552b411b363SPhilipp Reisner "total %u; compression: %u.%u%%\n", 3553b411b363SPhilipp Reisner direction, 3554b411b363SPhilipp Reisner c->bytes[1], c->packets[1], 3555b411b363SPhilipp Reisner c->bytes[0], c->packets[0], 3556b411b363SPhilipp Reisner total, r/10, r % 10); 3557b411b363SPhilipp Reisner } 3558b411b363SPhilipp Reisner 3559b411b363SPhilipp Reisner /* Since we are processing the bitfield from lower addresses to higher, 3560b411b363SPhilipp Reisner it does not matter if the process it in 32 bit chunks or 64 bit 3561b411b363SPhilipp Reisner chunks as long as it is little endian. (Understand it as byte stream, 3562b411b363SPhilipp Reisner beginning with the lowest byte...) If we would use big endian 3563b411b363SPhilipp Reisner we would need to process it from the highest address to the lowest, 3564b411b363SPhilipp Reisner in order to be agnostic to the 32 vs 64 bits issue. 3565b411b363SPhilipp Reisner 3566b411b363SPhilipp Reisner returns 0 on failure, 1 if we successfully received it. */ 356702918be2SPhilipp Reisner static int receive_bitmap(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) 3568b411b363SPhilipp Reisner { 3569b411b363SPhilipp Reisner struct bm_xfer_ctx c; 3570b411b363SPhilipp Reisner void *buffer; 35712c46407dSAndreas Gruenbacher int err; 357281e84650SAndreas Gruenbacher int ok = false; 357302918be2SPhilipp Reisner struct p_header80 *h = &mdev->data.rbuf.header.h80; 3574b411b363SPhilipp Reisner 357520ceb2b2SLars Ellenberg drbd_bm_lock(mdev, "receive bitmap", BM_LOCKED_SET_ALLOWED); 357620ceb2b2SLars Ellenberg /* you are supposed to send additional out-of-sync information 357720ceb2b2SLars Ellenberg * if you actually set bits during this phase */ 3578b411b363SPhilipp Reisner 3579b411b363SPhilipp Reisner /* maybe we should use some per thread scratch page, 3580b411b363SPhilipp Reisner * and allocate that during initial device creation? */ 3581b411b363SPhilipp Reisner buffer = (unsigned long *) __get_free_page(GFP_NOIO); 3582b411b363SPhilipp Reisner if (!buffer) { 3583b411b363SPhilipp Reisner dev_err(DEV, "failed to allocate one page buffer in %s\n", __func__); 3584b411b363SPhilipp Reisner goto out; 3585b411b363SPhilipp Reisner } 3586b411b363SPhilipp Reisner 3587b411b363SPhilipp Reisner c = (struct bm_xfer_ctx) { 3588b411b363SPhilipp Reisner .bm_bits = drbd_bm_bits(mdev), 3589b411b363SPhilipp Reisner .bm_words = drbd_bm_words(mdev), 3590b411b363SPhilipp Reisner }; 3591b411b363SPhilipp Reisner 35922c46407dSAndreas Gruenbacher for(;;) { 359302918be2SPhilipp Reisner if (cmd == P_BITMAP) { 35942c46407dSAndreas Gruenbacher err = receive_bitmap_plain(mdev, data_size, buffer, &c); 359502918be2SPhilipp Reisner } else if (cmd == P_COMPRESSED_BITMAP) { 3596b411b363SPhilipp Reisner /* MAYBE: sanity check that we speak proto >= 90, 3597b411b363SPhilipp Reisner * and the feature is enabled! */ 3598b411b363SPhilipp Reisner struct p_compressed_bm *p; 3599b411b363SPhilipp Reisner 360002918be2SPhilipp Reisner if (data_size > BM_PACKET_PAYLOAD_BYTES) { 3601b411b363SPhilipp Reisner dev_err(DEV, "ReportCBitmap packet too large\n"); 3602b411b363SPhilipp Reisner goto out; 3603b411b363SPhilipp Reisner } 3604b411b363SPhilipp Reisner /* use the page buff */ 3605b411b363SPhilipp Reisner p = buffer; 3606b411b363SPhilipp Reisner memcpy(p, h, sizeof(*h)); 360702918be2SPhilipp Reisner if (drbd_recv(mdev, p->head.payload, data_size) != data_size) 3608b411b363SPhilipp Reisner goto out; 3609004352faSLars Ellenberg if (data_size <= (sizeof(*p) - sizeof(p->head))) { 3610004352faSLars Ellenberg dev_err(DEV, "ReportCBitmap packet too small (l:%u)\n", data_size); 361178fcbdaeSAndreas Gruenbacher goto out; 3612b411b363SPhilipp Reisner } 36132c46407dSAndreas Gruenbacher err = decode_bitmap_c(mdev, p, &c); 3614b411b363SPhilipp Reisner } else { 361502918be2SPhilipp Reisner dev_warn(DEV, "receive_bitmap: cmd neither ReportBitMap nor ReportCBitMap (is 0x%x)", cmd); 3616b411b363SPhilipp Reisner goto out; 3617b411b363SPhilipp Reisner } 3618b411b363SPhilipp Reisner 361902918be2SPhilipp Reisner c.packets[cmd == P_BITMAP]++; 362002918be2SPhilipp Reisner c.bytes[cmd == P_BITMAP] += sizeof(struct p_header80) + data_size; 3621b411b363SPhilipp Reisner 36222c46407dSAndreas Gruenbacher if (err <= 0) { 36232c46407dSAndreas Gruenbacher if (err < 0) 36242c46407dSAndreas Gruenbacher goto out; 3625b411b363SPhilipp Reisner break; 36262c46407dSAndreas Gruenbacher } 362702918be2SPhilipp Reisner if (!drbd_recv_header(mdev, &cmd, &data_size)) 3628b411b363SPhilipp Reisner goto out; 36292c46407dSAndreas Gruenbacher } 3630b411b363SPhilipp Reisner 3631b411b363SPhilipp Reisner INFO_bm_xfer_stats(mdev, "receive", &c); 3632b411b363SPhilipp Reisner 3633b411b363SPhilipp Reisner if (mdev->state.conn == C_WF_BITMAP_T) { 3634de1f8e4aSAndreas Gruenbacher enum drbd_state_rv rv; 3635de1f8e4aSAndreas Gruenbacher 3636b411b363SPhilipp Reisner ok = !drbd_send_bitmap(mdev); 3637b411b363SPhilipp Reisner if (!ok) 3638b411b363SPhilipp Reisner goto out; 3639b411b363SPhilipp Reisner /* Omit CS_ORDERED with this state transition to avoid deadlocks. */ 3640de1f8e4aSAndreas Gruenbacher rv = _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE); 3641de1f8e4aSAndreas Gruenbacher D_ASSERT(rv == SS_SUCCESS); 3642b411b363SPhilipp Reisner } else if (mdev->state.conn != C_WF_BITMAP_S) { 3643b411b363SPhilipp Reisner /* admin may have requested C_DISCONNECTING, 3644b411b363SPhilipp Reisner * other threads may have noticed network errors */ 3645b411b363SPhilipp Reisner dev_info(DEV, "unexpected cstate (%s) in receive_bitmap\n", 3646b411b363SPhilipp Reisner drbd_conn_str(mdev->state.conn)); 3647b411b363SPhilipp Reisner } 3648b411b363SPhilipp Reisner 364981e84650SAndreas Gruenbacher ok = true; 3650b411b363SPhilipp Reisner out: 365120ceb2b2SLars Ellenberg drbd_bm_unlock(mdev); 3652b411b363SPhilipp Reisner if (ok && mdev->state.conn == C_WF_BITMAP_S) 3653b411b363SPhilipp Reisner drbd_start_resync(mdev, C_SYNC_SOURCE); 3654b411b363SPhilipp Reisner free_page((unsigned long) buffer); 3655b411b363SPhilipp Reisner return ok; 3656b411b363SPhilipp Reisner } 3657b411b363SPhilipp Reisner 365802918be2SPhilipp Reisner static int receive_skip(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) 3659b411b363SPhilipp Reisner { 3660b411b363SPhilipp Reisner /* TODO zero copy sink :) */ 3661b411b363SPhilipp Reisner static char sink[128]; 3662b411b363SPhilipp Reisner int size, want, r; 3663b411b363SPhilipp Reisner 3664b411b363SPhilipp Reisner dev_warn(DEV, "skipping unknown optional packet type %d, l: %d!\n", 366502918be2SPhilipp Reisner cmd, data_size); 3666b411b363SPhilipp Reisner 366702918be2SPhilipp Reisner size = data_size; 3668b411b363SPhilipp Reisner while (size > 0) { 3669b411b363SPhilipp Reisner want = min_t(int, size, sizeof(sink)); 3670b411b363SPhilipp Reisner r = drbd_recv(mdev, sink, want); 3671b411b363SPhilipp Reisner ERR_IF(r <= 0) break; 3672b411b363SPhilipp Reisner size -= r; 3673b411b363SPhilipp Reisner } 3674b411b363SPhilipp Reisner return size == 0; 3675b411b363SPhilipp Reisner } 3676b411b363SPhilipp Reisner 367702918be2SPhilipp Reisner static int receive_UnplugRemote(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) 3678b411b363SPhilipp Reisner { 3679b411b363SPhilipp Reisner /* Make sure we've acked all the TCP data associated 3680b411b363SPhilipp Reisner * with the data requests being unplugged */ 3681b411b363SPhilipp Reisner drbd_tcp_quickack(mdev->data.socket); 3682b411b363SPhilipp Reisner 368381e84650SAndreas Gruenbacher return true; 3684b411b363SPhilipp Reisner } 3685b411b363SPhilipp Reisner 368673a01a18SPhilipp Reisner static int receive_out_of_sync(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) 368773a01a18SPhilipp Reisner { 368873a01a18SPhilipp Reisner struct p_block_desc *p = &mdev->data.rbuf.block_desc; 368973a01a18SPhilipp Reisner 3690f735e363SLars Ellenberg switch (mdev->state.conn) { 3691f735e363SLars Ellenberg case C_WF_SYNC_UUID: 3692f735e363SLars Ellenberg case C_WF_BITMAP_T: 3693f735e363SLars Ellenberg case C_BEHIND: 3694f735e363SLars Ellenberg break; 3695f735e363SLars Ellenberg default: 3696f735e363SLars Ellenberg dev_err(DEV, "ASSERT FAILED cstate = %s, expected: WFSyncUUID|WFBitMapT|Behind\n", 3697f735e363SLars Ellenberg drbd_conn_str(mdev->state.conn)); 3698f735e363SLars Ellenberg } 3699f735e363SLars Ellenberg 370073a01a18SPhilipp Reisner drbd_set_out_of_sync(mdev, be64_to_cpu(p->sector), be32_to_cpu(p->blksize)); 370173a01a18SPhilipp Reisner 370281e84650SAndreas Gruenbacher return true; 370373a01a18SPhilipp Reisner } 370473a01a18SPhilipp Reisner 370502918be2SPhilipp Reisner typedef int (*drbd_cmd_handler_f)(struct drbd_conf *, enum drbd_packets cmd, unsigned int to_receive); 3706b411b363SPhilipp Reisner 370702918be2SPhilipp Reisner struct data_cmd { 370802918be2SPhilipp Reisner int expect_payload; 370902918be2SPhilipp Reisner size_t pkt_size; 371002918be2SPhilipp Reisner drbd_cmd_handler_f function; 3711b411b363SPhilipp Reisner }; 3712b411b363SPhilipp Reisner 371302918be2SPhilipp Reisner static struct data_cmd drbd_cmd_handler[] = { 371402918be2SPhilipp Reisner [P_DATA] = { 1, sizeof(struct p_data), receive_Data }, 371502918be2SPhilipp Reisner [P_DATA_REPLY] = { 1, sizeof(struct p_data), receive_DataReply }, 371602918be2SPhilipp Reisner [P_RS_DATA_REPLY] = { 1, sizeof(struct p_data), receive_RSDataReply } , 371702918be2SPhilipp Reisner [P_BARRIER] = { 0, sizeof(struct p_barrier), receive_Barrier } , 371802918be2SPhilipp Reisner [P_BITMAP] = { 1, sizeof(struct p_header80), receive_bitmap } , 371902918be2SPhilipp Reisner [P_COMPRESSED_BITMAP] = { 1, sizeof(struct p_header80), receive_bitmap } , 372002918be2SPhilipp Reisner [P_UNPLUG_REMOTE] = { 0, sizeof(struct p_header80), receive_UnplugRemote }, 372102918be2SPhilipp Reisner [P_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest }, 372202918be2SPhilipp Reisner [P_RS_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest }, 372302918be2SPhilipp Reisner [P_SYNC_PARAM] = { 1, sizeof(struct p_header80), receive_SyncParam }, 372402918be2SPhilipp Reisner [P_SYNC_PARAM89] = { 1, sizeof(struct p_header80), receive_SyncParam }, 372502918be2SPhilipp Reisner [P_PROTOCOL] = { 1, sizeof(struct p_protocol), receive_protocol }, 372602918be2SPhilipp Reisner [P_UUIDS] = { 0, sizeof(struct p_uuids), receive_uuids }, 372702918be2SPhilipp Reisner [P_SIZES] = { 0, sizeof(struct p_sizes), receive_sizes }, 372802918be2SPhilipp Reisner [P_STATE] = { 0, sizeof(struct p_state), receive_state }, 372902918be2SPhilipp Reisner [P_STATE_CHG_REQ] = { 0, sizeof(struct p_req_state), receive_req_state }, 373002918be2SPhilipp Reisner [P_SYNC_UUID] = { 0, sizeof(struct p_rs_uuid), receive_sync_uuid }, 373102918be2SPhilipp Reisner [P_OV_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest }, 373202918be2SPhilipp Reisner [P_OV_REPLY] = { 1, sizeof(struct p_block_req), receive_DataRequest }, 373302918be2SPhilipp Reisner [P_CSUM_RS_REQUEST] = { 1, sizeof(struct p_block_req), receive_DataRequest }, 373402918be2SPhilipp Reisner [P_DELAY_PROBE] = { 0, sizeof(struct p_delay_probe93), receive_skip }, 373573a01a18SPhilipp Reisner [P_OUT_OF_SYNC] = { 0, sizeof(struct p_block_desc), receive_out_of_sync }, 373602918be2SPhilipp Reisner /* anything missing from this table is in 373702918be2SPhilipp Reisner * the asender_tbl, see get_asender_cmd */ 373802918be2SPhilipp Reisner [P_MAX_CMD] = { 0, 0, NULL }, 373902918be2SPhilipp Reisner }; 374002918be2SPhilipp Reisner 374102918be2SPhilipp Reisner /* All handler functions that expect a sub-header get that sub-heder in 374202918be2SPhilipp Reisner mdev->data.rbuf.header.head.payload. 374302918be2SPhilipp Reisner 374402918be2SPhilipp Reisner Usually in mdev->data.rbuf.header.head the callback can find the usual 374502918be2SPhilipp Reisner p_header, but they may not rely on that. Since there is also p_header95 ! 374602918be2SPhilipp Reisner */ 3747b411b363SPhilipp Reisner 3748b411b363SPhilipp Reisner static void drbdd(struct drbd_conf *mdev) 3749b411b363SPhilipp Reisner { 375002918be2SPhilipp Reisner union p_header *header = &mdev->data.rbuf.header; 375102918be2SPhilipp Reisner unsigned int packet_size; 375202918be2SPhilipp Reisner enum drbd_packets cmd; 375302918be2SPhilipp Reisner size_t shs; /* sub header size */ 375402918be2SPhilipp Reisner int rv; 3755b411b363SPhilipp Reisner 3756b411b363SPhilipp Reisner while (get_t_state(&mdev->receiver) == Running) { 3757b411b363SPhilipp Reisner drbd_thread_current_set_cpu(mdev); 375802918be2SPhilipp Reisner if (!drbd_recv_header(mdev, &cmd, &packet_size)) 375902918be2SPhilipp Reisner goto err_out; 376002918be2SPhilipp Reisner 376102918be2SPhilipp Reisner if (unlikely(cmd >= P_MAX_CMD || !drbd_cmd_handler[cmd].function)) { 376202918be2SPhilipp Reisner dev_err(DEV, "unknown packet type %d, l: %d!\n", cmd, packet_size); 376302918be2SPhilipp Reisner goto err_out; 37640b33a916SLars Ellenberg } 3765b411b363SPhilipp Reisner 376602918be2SPhilipp Reisner shs = drbd_cmd_handler[cmd].pkt_size - sizeof(union p_header); 3767c13f7e1aSLars Ellenberg if (packet_size - shs > 0 && !drbd_cmd_handler[cmd].expect_payload) { 3768c13f7e1aSLars Ellenberg dev_err(DEV, "No payload expected %s l:%d\n", cmdname(cmd), packet_size); 3769c13f7e1aSLars Ellenberg goto err_out; 3770c13f7e1aSLars Ellenberg } 3771c13f7e1aSLars Ellenberg 3772c13f7e1aSLars Ellenberg if (shs) { 377302918be2SPhilipp Reisner rv = drbd_recv(mdev, &header->h80.payload, shs); 377402918be2SPhilipp Reisner if (unlikely(rv != shs)) { 37750ddc5549SLars Ellenberg if (!signal_pending(current)) 37760ddc5549SLars Ellenberg dev_warn(DEV, "short read while reading sub header: rv=%d\n", rv); 377702918be2SPhilipp Reisner goto err_out; 3778b411b363SPhilipp Reisner } 377902918be2SPhilipp Reisner } 378002918be2SPhilipp Reisner 378102918be2SPhilipp Reisner rv = drbd_cmd_handler[cmd].function(mdev, cmd, packet_size - shs); 378202918be2SPhilipp Reisner 378302918be2SPhilipp Reisner if (unlikely(!rv)) { 3784b411b363SPhilipp Reisner dev_err(DEV, "error receiving %s, l: %d!\n", 378502918be2SPhilipp Reisner cmdname(cmd), packet_size); 378602918be2SPhilipp Reisner goto err_out; 3787b411b363SPhilipp Reisner } 3788b411b363SPhilipp Reisner } 378902918be2SPhilipp Reisner 379002918be2SPhilipp Reisner if (0) { 379102918be2SPhilipp Reisner err_out: 3792b411b363SPhilipp Reisner drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR)); 3793b411b363SPhilipp Reisner } 3794856c50c7SLars Ellenberg /* If we leave here, we probably want to update at least the 3795856c50c7SLars Ellenberg * "Connected" indicator on stable storage. Do so explicitly here. */ 3796856c50c7SLars Ellenberg drbd_md_sync(mdev); 3797b411b363SPhilipp Reisner } 3798b411b363SPhilipp Reisner 3799b411b363SPhilipp Reisner void drbd_flush_workqueue(struct drbd_conf *mdev) 3800b411b363SPhilipp Reisner { 3801b411b363SPhilipp Reisner struct drbd_wq_barrier barr; 3802b411b363SPhilipp Reisner 3803b411b363SPhilipp Reisner barr.w.cb = w_prev_work_done; 3804b411b363SPhilipp Reisner init_completion(&barr.done); 3805b411b363SPhilipp Reisner drbd_queue_work(&mdev->data.work, &barr.w); 3806b411b363SPhilipp Reisner wait_for_completion(&barr.done); 3807b411b363SPhilipp Reisner } 3808b411b363SPhilipp Reisner 3809f70b3511SPhilipp Reisner void drbd_free_tl_hash(struct drbd_conf *mdev) 3810f70b3511SPhilipp Reisner { 3811f70b3511SPhilipp Reisner struct hlist_head *h; 3812f70b3511SPhilipp Reisner 3813f70b3511SPhilipp Reisner spin_lock_irq(&mdev->req_lock); 3814f70b3511SPhilipp Reisner 3815f70b3511SPhilipp Reisner if (!mdev->tl_hash || mdev->state.conn != C_STANDALONE) { 3816f70b3511SPhilipp Reisner spin_unlock_irq(&mdev->req_lock); 3817f70b3511SPhilipp Reisner return; 3818f70b3511SPhilipp Reisner } 3819f70b3511SPhilipp Reisner /* paranoia code */ 3820f70b3511SPhilipp Reisner for (h = mdev->ee_hash; h < mdev->ee_hash + mdev->ee_hash_s; h++) 3821f70b3511SPhilipp Reisner if (h->first) 3822f70b3511SPhilipp Reisner dev_err(DEV, "ASSERT FAILED ee_hash[%u].first == %p, expected NULL\n", 3823f70b3511SPhilipp Reisner (int)(h - mdev->ee_hash), h->first); 3824f70b3511SPhilipp Reisner kfree(mdev->ee_hash); 3825f70b3511SPhilipp Reisner mdev->ee_hash = NULL; 3826f70b3511SPhilipp Reisner mdev->ee_hash_s = 0; 3827f70b3511SPhilipp Reisner 3828c12e9c89SLars Ellenberg /* We may not have had the chance to wait for all locally pending 3829c12e9c89SLars Ellenberg * application requests. The hlist_add_fake() prevents access after 3830c12e9c89SLars Ellenberg * free on master bio completion. */ 3831c12e9c89SLars Ellenberg for (h = mdev->tl_hash; h < mdev->tl_hash + mdev->tl_hash_s; h++) { 3832c12e9c89SLars Ellenberg struct drbd_request *req; 3833c12e9c89SLars Ellenberg struct hlist_node *pos, *n; 3834c12e9c89SLars Ellenberg hlist_for_each_entry_safe(req, pos, n, h, collision) { 3835c12e9c89SLars Ellenberg hlist_del_init(&req->collision); 3836c12e9c89SLars Ellenberg hlist_add_fake(&req->collision); 3837c12e9c89SLars Ellenberg } 3838c12e9c89SLars Ellenberg } 3839c12e9c89SLars Ellenberg 3840f70b3511SPhilipp Reisner kfree(mdev->tl_hash); 3841f70b3511SPhilipp Reisner mdev->tl_hash = NULL; 3842f70b3511SPhilipp Reisner mdev->tl_hash_s = 0; 3843f70b3511SPhilipp Reisner spin_unlock_irq(&mdev->req_lock); 3844f70b3511SPhilipp Reisner } 3845f70b3511SPhilipp Reisner 3846b411b363SPhilipp Reisner static void drbd_disconnect(struct drbd_conf *mdev) 3847b411b363SPhilipp Reisner { 3848b411b363SPhilipp Reisner enum drbd_fencing_p fp; 3849b411b363SPhilipp Reisner union drbd_state os, ns; 3850b411b363SPhilipp Reisner int rv = SS_UNKNOWN_ERROR; 3851b411b363SPhilipp Reisner unsigned int i; 3852b411b363SPhilipp Reisner 3853b411b363SPhilipp Reisner if (mdev->state.conn == C_STANDALONE) 3854b411b363SPhilipp Reisner return; 3855b411b363SPhilipp Reisner 3856545752d5SLars Ellenberg /* We are about to start the cleanup after connection loss. 3857545752d5SLars Ellenberg * Make sure drbd_make_request knows about that. 3858545752d5SLars Ellenberg * Usually we should be in some network failure state already, 3859545752d5SLars Ellenberg * but just in case we are not, we fix it up here. 3860545752d5SLars Ellenberg */ 3861545752d5SLars Ellenberg drbd_force_state(mdev, NS(conn, C_NETWORK_FAILURE)); 3862545752d5SLars Ellenberg 3863b411b363SPhilipp Reisner /* asender does not clean up anything. it must not interfere, either */ 3864b411b363SPhilipp Reisner drbd_thread_stop(&mdev->asender); 3865b411b363SPhilipp Reisner drbd_free_sock(mdev); 3866b411b363SPhilipp Reisner 386785719573SPhilipp Reisner /* wait for current activity to cease. */ 3868b411b363SPhilipp Reisner spin_lock_irq(&mdev->req_lock); 3869b411b363SPhilipp Reisner _drbd_wait_ee_list_empty(mdev, &mdev->active_ee); 3870b411b363SPhilipp Reisner _drbd_wait_ee_list_empty(mdev, &mdev->sync_ee); 3871b411b363SPhilipp Reisner _drbd_wait_ee_list_empty(mdev, &mdev->read_ee); 3872b411b363SPhilipp Reisner spin_unlock_irq(&mdev->req_lock); 3873b411b363SPhilipp Reisner 3874b411b363SPhilipp Reisner /* We do not have data structures that would allow us to 3875b411b363SPhilipp Reisner * get the rs_pending_cnt down to 0 again. 3876b411b363SPhilipp Reisner * * On C_SYNC_TARGET we do not have any data structures describing 3877b411b363SPhilipp Reisner * the pending RSDataRequest's we have sent. 3878b411b363SPhilipp Reisner * * On C_SYNC_SOURCE there is no data structure that tracks 3879b411b363SPhilipp Reisner * the P_RS_DATA_REPLY blocks that we sent to the SyncTarget. 3880b411b363SPhilipp Reisner * And no, it is not the sum of the reference counts in the 3881b411b363SPhilipp Reisner * resync_LRU. The resync_LRU tracks the whole operation including 3882b411b363SPhilipp Reisner * the disk-IO, while the rs_pending_cnt only tracks the blocks 3883b411b363SPhilipp Reisner * on the fly. */ 3884b411b363SPhilipp Reisner drbd_rs_cancel_all(mdev); 3885b411b363SPhilipp Reisner mdev->rs_total = 0; 3886b411b363SPhilipp Reisner mdev->rs_failed = 0; 3887b411b363SPhilipp Reisner atomic_set(&mdev->rs_pending_cnt, 0); 3888b411b363SPhilipp Reisner wake_up(&mdev->misc_wait); 3889b411b363SPhilipp Reisner 3890b411b363SPhilipp Reisner /* make sure syncer is stopped and w_resume_next_sg queued */ 3891b411b363SPhilipp Reisner del_timer_sync(&mdev->resync_timer); 3892b411b363SPhilipp Reisner resync_timer_fn((unsigned long)mdev); 3893b411b363SPhilipp Reisner 3894b411b363SPhilipp Reisner /* wait for all w_e_end_data_req, w_e_end_rsdata_req, w_send_barrier, 3895b411b363SPhilipp Reisner * w_make_resync_request etc. which may still be on the worker queue 3896b411b363SPhilipp Reisner * to be "canceled" */ 3897b411b363SPhilipp Reisner drbd_flush_workqueue(mdev); 3898b411b363SPhilipp Reisner 3899b411b363SPhilipp Reisner /* This also does reclaim_net_ee(). If we do this too early, we might 3900b411b363SPhilipp Reisner * miss some resync ee and pages.*/ 3901b411b363SPhilipp Reisner drbd_process_done_ee(mdev); 3902b411b363SPhilipp Reisner 3903b411b363SPhilipp Reisner kfree(mdev->p_uuid); 3904b411b363SPhilipp Reisner mdev->p_uuid = NULL; 3905b411b363SPhilipp Reisner 3906fb22c402SPhilipp Reisner if (!is_susp(mdev->state)) 3907b411b363SPhilipp Reisner tl_clear(mdev); 3908b411b363SPhilipp Reisner 3909b411b363SPhilipp Reisner dev_info(DEV, "Connection closed\n"); 3910b411b363SPhilipp Reisner 3911b411b363SPhilipp Reisner drbd_md_sync(mdev); 3912b411b363SPhilipp Reisner 3913b411b363SPhilipp Reisner fp = FP_DONT_CARE; 3914b411b363SPhilipp Reisner if (get_ldev(mdev)) { 3915b411b363SPhilipp Reisner fp = mdev->ldev->dc.fencing; 3916b411b363SPhilipp Reisner put_ldev(mdev); 3917b411b363SPhilipp Reisner } 3918b411b363SPhilipp Reisner 391987f7be4cSPhilipp Reisner if (mdev->state.role == R_PRIMARY && fp >= FP_RESOURCE && mdev->state.pdsk >= D_UNKNOWN) 392087f7be4cSPhilipp Reisner drbd_try_outdate_peer_async(mdev); 3921b411b363SPhilipp Reisner 3922b411b363SPhilipp Reisner spin_lock_irq(&mdev->req_lock); 3923b411b363SPhilipp Reisner os = mdev->state; 3924b411b363SPhilipp Reisner if (os.conn >= C_UNCONNECTED) { 3925b411b363SPhilipp Reisner /* Do not restart in case we are C_DISCONNECTING */ 3926b411b363SPhilipp Reisner ns = os; 3927b411b363SPhilipp Reisner ns.conn = C_UNCONNECTED; 3928b411b363SPhilipp Reisner rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL); 3929b411b363SPhilipp Reisner } 3930b411b363SPhilipp Reisner spin_unlock_irq(&mdev->req_lock); 3931b411b363SPhilipp Reisner 3932b411b363SPhilipp Reisner if (os.conn == C_DISCONNECTING) { 393384dfb9f5SPhilipp Reisner wait_event(mdev->net_cnt_wait, atomic_read(&mdev->net_cnt) == 0); 3934b411b363SPhilipp Reisner 3935b411b363SPhilipp Reisner crypto_free_hash(mdev->cram_hmac_tfm); 3936b411b363SPhilipp Reisner mdev->cram_hmac_tfm = NULL; 3937b411b363SPhilipp Reisner 3938b411b363SPhilipp Reisner kfree(mdev->net_conf); 3939b411b363SPhilipp Reisner mdev->net_conf = NULL; 3940b411b363SPhilipp Reisner drbd_request_state(mdev, NS(conn, C_STANDALONE)); 3941b411b363SPhilipp Reisner } 3942b411b363SPhilipp Reisner 394320ceb2b2SLars Ellenberg /* serialize with bitmap writeout triggered by the state change, 394420ceb2b2SLars Ellenberg * if any. */ 394520ceb2b2SLars Ellenberg wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags)); 394620ceb2b2SLars Ellenberg 3947b411b363SPhilipp Reisner /* tcp_close and release of sendpage pages can be deferred. I don't 3948b411b363SPhilipp Reisner * want to use SO_LINGER, because apparently it can be deferred for 3949b411b363SPhilipp Reisner * more than 20 seconds (longest time I checked). 3950b411b363SPhilipp Reisner * 3951b411b363SPhilipp Reisner * Actually we don't care for exactly when the network stack does its 3952b411b363SPhilipp Reisner * put_page(), but release our reference on these pages right here. 3953b411b363SPhilipp Reisner */ 3954b411b363SPhilipp Reisner i = drbd_release_ee(mdev, &mdev->net_ee); 3955b411b363SPhilipp Reisner if (i) 3956b411b363SPhilipp Reisner dev_info(DEV, "net_ee not empty, killed %u entries\n", i); 3957435f0740SLars Ellenberg i = atomic_read(&mdev->pp_in_use_by_net); 3958435f0740SLars Ellenberg if (i) 3959435f0740SLars Ellenberg dev_info(DEV, "pp_in_use_by_net = %d, expected 0\n", i); 3960b411b363SPhilipp Reisner i = atomic_read(&mdev->pp_in_use); 3961b411b363SPhilipp Reisner if (i) 396245bb912bSLars Ellenberg dev_info(DEV, "pp_in_use = %d, expected 0\n", i); 3963b411b363SPhilipp Reisner 3964b411b363SPhilipp Reisner D_ASSERT(list_empty(&mdev->read_ee)); 3965b411b363SPhilipp Reisner D_ASSERT(list_empty(&mdev->active_ee)); 3966b411b363SPhilipp Reisner D_ASSERT(list_empty(&mdev->sync_ee)); 3967b411b363SPhilipp Reisner D_ASSERT(list_empty(&mdev->done_ee)); 3968b411b363SPhilipp Reisner 3969b411b363SPhilipp Reisner /* ok, no more ee's on the fly, it is safe to reset the epoch_size */ 3970b411b363SPhilipp Reisner atomic_set(&mdev->current_epoch->epoch_size, 0); 3971b411b363SPhilipp Reisner D_ASSERT(list_empty(&mdev->current_epoch->list)); 3972b411b363SPhilipp Reisner } 3973b411b363SPhilipp Reisner 3974b411b363SPhilipp Reisner /* 3975b411b363SPhilipp Reisner * We support PRO_VERSION_MIN to PRO_VERSION_MAX. The protocol version 3976b411b363SPhilipp Reisner * we can agree on is stored in agreed_pro_version. 3977b411b363SPhilipp Reisner * 3978b411b363SPhilipp Reisner * feature flags and the reserved array should be enough room for future 3979b411b363SPhilipp Reisner * enhancements of the handshake protocol, and possible plugins... 3980b411b363SPhilipp Reisner * 3981b411b363SPhilipp Reisner * for now, they are expected to be zero, but ignored. 3982b411b363SPhilipp Reisner */ 3983b411b363SPhilipp Reisner static int drbd_send_handshake(struct drbd_conf *mdev) 3984b411b363SPhilipp Reisner { 3985b411b363SPhilipp Reisner /* ASSERT current == mdev->receiver ... */ 3986b411b363SPhilipp Reisner struct p_handshake *p = &mdev->data.sbuf.handshake; 3987b411b363SPhilipp Reisner int ok; 3988b411b363SPhilipp Reisner 3989b411b363SPhilipp Reisner if (mutex_lock_interruptible(&mdev->data.mutex)) { 3990b411b363SPhilipp Reisner dev_err(DEV, "interrupted during initial handshake\n"); 3991b411b363SPhilipp Reisner return 0; /* interrupted. not ok. */ 3992b411b363SPhilipp Reisner } 3993b411b363SPhilipp Reisner 3994b411b363SPhilipp Reisner if (mdev->data.socket == NULL) { 3995b411b363SPhilipp Reisner mutex_unlock(&mdev->data.mutex); 3996b411b363SPhilipp Reisner return 0; 3997b411b363SPhilipp Reisner } 3998b411b363SPhilipp Reisner 3999b411b363SPhilipp Reisner memset(p, 0, sizeof(*p)); 4000b411b363SPhilipp Reisner p->protocol_min = cpu_to_be32(PRO_VERSION_MIN); 4001b411b363SPhilipp Reisner p->protocol_max = cpu_to_be32(PRO_VERSION_MAX); 4002b411b363SPhilipp Reisner ok = _drbd_send_cmd( mdev, mdev->data.socket, P_HAND_SHAKE, 40030b70a13dSPhilipp Reisner (struct p_header80 *)p, sizeof(*p), 0 ); 4004b411b363SPhilipp Reisner mutex_unlock(&mdev->data.mutex); 4005b411b363SPhilipp Reisner return ok; 4006b411b363SPhilipp Reisner } 4007b411b363SPhilipp Reisner 4008b411b363SPhilipp Reisner /* 4009b411b363SPhilipp Reisner * return values: 4010b411b363SPhilipp Reisner * 1 yes, we have a valid connection 4011b411b363SPhilipp Reisner * 0 oops, did not work out, please try again 4012b411b363SPhilipp Reisner * -1 peer talks different language, 4013b411b363SPhilipp Reisner * no point in trying again, please go standalone. 4014b411b363SPhilipp Reisner */ 4015b411b363SPhilipp Reisner static int drbd_do_handshake(struct drbd_conf *mdev) 4016b411b363SPhilipp Reisner { 4017b411b363SPhilipp Reisner /* ASSERT current == mdev->receiver ... */ 4018b411b363SPhilipp Reisner struct p_handshake *p = &mdev->data.rbuf.handshake; 401902918be2SPhilipp Reisner const int expect = sizeof(struct p_handshake) - sizeof(struct p_header80); 402002918be2SPhilipp Reisner unsigned int length; 402102918be2SPhilipp Reisner enum drbd_packets cmd; 4022b411b363SPhilipp Reisner int rv; 4023b411b363SPhilipp Reisner 4024b411b363SPhilipp Reisner rv = drbd_send_handshake(mdev); 4025b411b363SPhilipp Reisner if (!rv) 4026b411b363SPhilipp Reisner return 0; 4027b411b363SPhilipp Reisner 402802918be2SPhilipp Reisner rv = drbd_recv_header(mdev, &cmd, &length); 4029b411b363SPhilipp Reisner if (!rv) 4030b411b363SPhilipp Reisner return 0; 4031b411b363SPhilipp Reisner 403202918be2SPhilipp Reisner if (cmd != P_HAND_SHAKE) { 4033b411b363SPhilipp Reisner dev_err(DEV, "expected HandShake packet, received: %s (0x%04x)\n", 403402918be2SPhilipp Reisner cmdname(cmd), cmd); 4035b411b363SPhilipp Reisner return -1; 4036b411b363SPhilipp Reisner } 4037b411b363SPhilipp Reisner 403802918be2SPhilipp Reisner if (length != expect) { 4039b411b363SPhilipp Reisner dev_err(DEV, "expected HandShake length: %u, received: %u\n", 404002918be2SPhilipp Reisner expect, length); 4041b411b363SPhilipp Reisner return -1; 4042b411b363SPhilipp Reisner } 4043b411b363SPhilipp Reisner 4044b411b363SPhilipp Reisner rv = drbd_recv(mdev, &p->head.payload, expect); 4045b411b363SPhilipp Reisner 4046b411b363SPhilipp Reisner if (rv != expect) { 40470ddc5549SLars Ellenberg if (!signal_pending(current)) 40480ddc5549SLars Ellenberg dev_warn(DEV, "short read receiving handshake packet: l=%u\n", rv); 4049b411b363SPhilipp Reisner return 0; 4050b411b363SPhilipp Reisner } 4051b411b363SPhilipp Reisner 4052b411b363SPhilipp Reisner p->protocol_min = be32_to_cpu(p->protocol_min); 4053b411b363SPhilipp Reisner p->protocol_max = be32_to_cpu(p->protocol_max); 4054b411b363SPhilipp Reisner if (p->protocol_max == 0) 4055b411b363SPhilipp Reisner p->protocol_max = p->protocol_min; 4056b411b363SPhilipp Reisner 4057b411b363SPhilipp Reisner if (PRO_VERSION_MAX < p->protocol_min || 4058b411b363SPhilipp Reisner PRO_VERSION_MIN > p->protocol_max) 4059b411b363SPhilipp Reisner goto incompat; 4060b411b363SPhilipp Reisner 4061b411b363SPhilipp Reisner mdev->agreed_pro_version = min_t(int, PRO_VERSION_MAX, p->protocol_max); 4062b411b363SPhilipp Reisner 4063b411b363SPhilipp Reisner dev_info(DEV, "Handshake successful: " 4064b411b363SPhilipp Reisner "Agreed network protocol version %d\n", mdev->agreed_pro_version); 4065b411b363SPhilipp Reisner 4066b411b363SPhilipp Reisner return 1; 4067b411b363SPhilipp Reisner 4068b411b363SPhilipp Reisner incompat: 4069b411b363SPhilipp Reisner dev_err(DEV, "incompatible DRBD dialects: " 4070b411b363SPhilipp Reisner "I support %d-%d, peer supports %d-%d\n", 4071b411b363SPhilipp Reisner PRO_VERSION_MIN, PRO_VERSION_MAX, 4072b411b363SPhilipp Reisner p->protocol_min, p->protocol_max); 4073b411b363SPhilipp Reisner return -1; 4074b411b363SPhilipp Reisner } 4075b411b363SPhilipp Reisner 4076b411b363SPhilipp Reisner #if !defined(CONFIG_CRYPTO_HMAC) && !defined(CONFIG_CRYPTO_HMAC_MODULE) 4077b411b363SPhilipp Reisner static int drbd_do_auth(struct drbd_conf *mdev) 4078b411b363SPhilipp Reisner { 4079b411b363SPhilipp Reisner dev_err(DEV, "This kernel was build without CONFIG_CRYPTO_HMAC.\n"); 4080b411b363SPhilipp Reisner dev_err(DEV, "You need to disable 'cram-hmac-alg' in drbd.conf.\n"); 4081b10d96cbSJohannes Thoma return -1; 4082b411b363SPhilipp Reisner } 4083b411b363SPhilipp Reisner #else 4084b411b363SPhilipp Reisner #define CHALLENGE_LEN 64 4085b10d96cbSJohannes Thoma 4086b10d96cbSJohannes Thoma /* Return value: 4087b10d96cbSJohannes Thoma 1 - auth succeeded, 4088b10d96cbSJohannes Thoma 0 - failed, try again (network error), 4089b10d96cbSJohannes Thoma -1 - auth failed, don't try again. 4090b10d96cbSJohannes Thoma */ 4091b10d96cbSJohannes Thoma 4092b411b363SPhilipp Reisner static int drbd_do_auth(struct drbd_conf *mdev) 4093b411b363SPhilipp Reisner { 4094b411b363SPhilipp Reisner char my_challenge[CHALLENGE_LEN]; /* 64 Bytes... */ 4095b411b363SPhilipp Reisner struct scatterlist sg; 4096b411b363SPhilipp Reisner char *response = NULL; 4097b411b363SPhilipp Reisner char *right_response = NULL; 4098b411b363SPhilipp Reisner char *peers_ch = NULL; 4099b411b363SPhilipp Reisner unsigned int key_len = strlen(mdev->net_conf->shared_secret); 4100b411b363SPhilipp Reisner unsigned int resp_size; 4101b411b363SPhilipp Reisner struct hash_desc desc; 410202918be2SPhilipp Reisner enum drbd_packets cmd; 410302918be2SPhilipp Reisner unsigned int length; 4104b411b363SPhilipp Reisner int rv; 4105b411b363SPhilipp Reisner 4106b411b363SPhilipp Reisner desc.tfm = mdev->cram_hmac_tfm; 4107b411b363SPhilipp Reisner desc.flags = 0; 4108b411b363SPhilipp Reisner 4109b411b363SPhilipp Reisner rv = crypto_hash_setkey(mdev->cram_hmac_tfm, 4110b411b363SPhilipp Reisner (u8 *)mdev->net_conf->shared_secret, key_len); 4111b411b363SPhilipp Reisner if (rv) { 4112b411b363SPhilipp Reisner dev_err(DEV, "crypto_hash_setkey() failed with %d\n", rv); 4113b10d96cbSJohannes Thoma rv = -1; 4114b411b363SPhilipp Reisner goto fail; 4115b411b363SPhilipp Reisner } 4116b411b363SPhilipp Reisner 4117b411b363SPhilipp Reisner get_random_bytes(my_challenge, CHALLENGE_LEN); 4118b411b363SPhilipp Reisner 4119b411b363SPhilipp Reisner rv = drbd_send_cmd2(mdev, P_AUTH_CHALLENGE, my_challenge, CHALLENGE_LEN); 4120b411b363SPhilipp Reisner if (!rv) 4121b411b363SPhilipp Reisner goto fail; 4122b411b363SPhilipp Reisner 412302918be2SPhilipp Reisner rv = drbd_recv_header(mdev, &cmd, &length); 4124b411b363SPhilipp Reisner if (!rv) 4125b411b363SPhilipp Reisner goto fail; 4126b411b363SPhilipp Reisner 412702918be2SPhilipp Reisner if (cmd != P_AUTH_CHALLENGE) { 4128b411b363SPhilipp Reisner dev_err(DEV, "expected AuthChallenge packet, received: %s (0x%04x)\n", 412902918be2SPhilipp Reisner cmdname(cmd), cmd); 4130b411b363SPhilipp Reisner rv = 0; 4131b411b363SPhilipp Reisner goto fail; 4132b411b363SPhilipp Reisner } 4133b411b363SPhilipp Reisner 413402918be2SPhilipp Reisner if (length > CHALLENGE_LEN * 2) { 4135b411b363SPhilipp Reisner dev_err(DEV, "expected AuthChallenge payload too big.\n"); 4136b10d96cbSJohannes Thoma rv = -1; 4137b411b363SPhilipp Reisner goto fail; 4138b411b363SPhilipp Reisner } 4139b411b363SPhilipp Reisner 414002918be2SPhilipp Reisner peers_ch = kmalloc(length, GFP_NOIO); 4141b411b363SPhilipp Reisner if (peers_ch == NULL) { 4142b411b363SPhilipp Reisner dev_err(DEV, "kmalloc of peers_ch failed\n"); 4143b10d96cbSJohannes Thoma rv = -1; 4144b411b363SPhilipp Reisner goto fail; 4145b411b363SPhilipp Reisner } 4146b411b363SPhilipp Reisner 414702918be2SPhilipp Reisner rv = drbd_recv(mdev, peers_ch, length); 4148b411b363SPhilipp Reisner 414902918be2SPhilipp Reisner if (rv != length) { 41500ddc5549SLars Ellenberg if (!signal_pending(current)) 41510ddc5549SLars Ellenberg dev_warn(DEV, "short read AuthChallenge: l=%u\n", rv); 4152b411b363SPhilipp Reisner rv = 0; 4153b411b363SPhilipp Reisner goto fail; 4154b411b363SPhilipp Reisner } 4155b411b363SPhilipp Reisner 4156b411b363SPhilipp Reisner resp_size = crypto_hash_digestsize(mdev->cram_hmac_tfm); 4157b411b363SPhilipp Reisner response = kmalloc(resp_size, GFP_NOIO); 4158b411b363SPhilipp Reisner if (response == NULL) { 4159b411b363SPhilipp Reisner dev_err(DEV, "kmalloc of response failed\n"); 4160b10d96cbSJohannes Thoma rv = -1; 4161b411b363SPhilipp Reisner goto fail; 4162b411b363SPhilipp Reisner } 4163b411b363SPhilipp Reisner 4164b411b363SPhilipp Reisner sg_init_table(&sg, 1); 416502918be2SPhilipp Reisner sg_set_buf(&sg, peers_ch, length); 4166b411b363SPhilipp Reisner 4167b411b363SPhilipp Reisner rv = crypto_hash_digest(&desc, &sg, sg.length, response); 4168b411b363SPhilipp Reisner if (rv) { 4169b411b363SPhilipp Reisner dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv); 4170b10d96cbSJohannes Thoma rv = -1; 4171b411b363SPhilipp Reisner goto fail; 4172b411b363SPhilipp Reisner } 4173b411b363SPhilipp Reisner 4174b411b363SPhilipp Reisner rv = drbd_send_cmd2(mdev, P_AUTH_RESPONSE, response, resp_size); 4175b411b363SPhilipp Reisner if (!rv) 4176b411b363SPhilipp Reisner goto fail; 4177b411b363SPhilipp Reisner 417802918be2SPhilipp Reisner rv = drbd_recv_header(mdev, &cmd, &length); 4179b411b363SPhilipp Reisner if (!rv) 4180b411b363SPhilipp Reisner goto fail; 4181b411b363SPhilipp Reisner 418202918be2SPhilipp Reisner if (cmd != P_AUTH_RESPONSE) { 4183b411b363SPhilipp Reisner dev_err(DEV, "expected AuthResponse packet, received: %s (0x%04x)\n", 418402918be2SPhilipp Reisner cmdname(cmd), cmd); 4185b411b363SPhilipp Reisner rv = 0; 4186b411b363SPhilipp Reisner goto fail; 4187b411b363SPhilipp Reisner } 4188b411b363SPhilipp Reisner 418902918be2SPhilipp Reisner if (length != resp_size) { 4190b411b363SPhilipp Reisner dev_err(DEV, "expected AuthResponse payload of wrong size\n"); 4191b411b363SPhilipp Reisner rv = 0; 4192b411b363SPhilipp Reisner goto fail; 4193b411b363SPhilipp Reisner } 4194b411b363SPhilipp Reisner 4195b411b363SPhilipp Reisner rv = drbd_recv(mdev, response , resp_size); 4196b411b363SPhilipp Reisner 4197b411b363SPhilipp Reisner if (rv != resp_size) { 41980ddc5549SLars Ellenberg if (!signal_pending(current)) 41990ddc5549SLars Ellenberg dev_warn(DEV, "short read receiving AuthResponse: l=%u\n", rv); 4200b411b363SPhilipp Reisner rv = 0; 4201b411b363SPhilipp Reisner goto fail; 4202b411b363SPhilipp Reisner } 4203b411b363SPhilipp Reisner 4204b411b363SPhilipp Reisner right_response = kmalloc(resp_size, GFP_NOIO); 42052d1ee87dSJulia Lawall if (right_response == NULL) { 4206b411b363SPhilipp Reisner dev_err(DEV, "kmalloc of right_response failed\n"); 4207b10d96cbSJohannes Thoma rv = -1; 4208b411b363SPhilipp Reisner goto fail; 4209b411b363SPhilipp Reisner } 4210b411b363SPhilipp Reisner 4211b411b363SPhilipp Reisner sg_set_buf(&sg, my_challenge, CHALLENGE_LEN); 4212b411b363SPhilipp Reisner 4213b411b363SPhilipp Reisner rv = crypto_hash_digest(&desc, &sg, sg.length, right_response); 4214b411b363SPhilipp Reisner if (rv) { 4215b411b363SPhilipp Reisner dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv); 4216b10d96cbSJohannes Thoma rv = -1; 4217b411b363SPhilipp Reisner goto fail; 4218b411b363SPhilipp Reisner } 4219b411b363SPhilipp Reisner 4220b411b363SPhilipp Reisner rv = !memcmp(response, right_response, resp_size); 4221b411b363SPhilipp Reisner 4222b411b363SPhilipp Reisner if (rv) 4223b411b363SPhilipp Reisner dev_info(DEV, "Peer authenticated using %d bytes of '%s' HMAC\n", 4224b411b363SPhilipp Reisner resp_size, mdev->net_conf->cram_hmac_alg); 4225b10d96cbSJohannes Thoma else 4226b10d96cbSJohannes Thoma rv = -1; 4227b411b363SPhilipp Reisner 4228b411b363SPhilipp Reisner fail: 4229b411b363SPhilipp Reisner kfree(peers_ch); 4230b411b363SPhilipp Reisner kfree(response); 4231b411b363SPhilipp Reisner kfree(right_response); 4232b411b363SPhilipp Reisner 4233b411b363SPhilipp Reisner return rv; 4234b411b363SPhilipp Reisner } 4235b411b363SPhilipp Reisner #endif 4236b411b363SPhilipp Reisner 4237b411b363SPhilipp Reisner int drbdd_init(struct drbd_thread *thi) 4238b411b363SPhilipp Reisner { 4239b411b363SPhilipp Reisner struct drbd_conf *mdev = thi->mdev; 4240b411b363SPhilipp Reisner unsigned int minor = mdev_to_minor(mdev); 4241b411b363SPhilipp Reisner int h; 4242b411b363SPhilipp Reisner 4243b411b363SPhilipp Reisner sprintf(current->comm, "drbd%d_receiver", minor); 4244b411b363SPhilipp Reisner 4245b411b363SPhilipp Reisner dev_info(DEV, "receiver (re)started\n"); 4246b411b363SPhilipp Reisner 4247b411b363SPhilipp Reisner do { 4248b411b363SPhilipp Reisner h = drbd_connect(mdev); 4249b411b363SPhilipp Reisner if (h == 0) { 4250b411b363SPhilipp Reisner drbd_disconnect(mdev); 425120ee6390SPhilipp Reisner schedule_timeout_interruptible(HZ); 4252b411b363SPhilipp Reisner } 4253b411b363SPhilipp Reisner if (h == -1) { 4254b411b363SPhilipp Reisner dev_warn(DEV, "Discarding network configuration.\n"); 4255b411b363SPhilipp Reisner drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); 4256b411b363SPhilipp Reisner } 4257b411b363SPhilipp Reisner } while (h == 0); 4258b411b363SPhilipp Reisner 4259b411b363SPhilipp Reisner if (h > 0) { 4260b411b363SPhilipp Reisner if (get_net_conf(mdev)) { 4261b411b363SPhilipp Reisner drbdd(mdev); 4262b411b363SPhilipp Reisner put_net_conf(mdev); 4263b411b363SPhilipp Reisner } 4264b411b363SPhilipp Reisner } 4265b411b363SPhilipp Reisner 4266b411b363SPhilipp Reisner drbd_disconnect(mdev); 4267b411b363SPhilipp Reisner 4268b411b363SPhilipp Reisner dev_info(DEV, "receiver terminated\n"); 4269b411b363SPhilipp Reisner return 0; 4270b411b363SPhilipp Reisner } 4271b411b363SPhilipp Reisner 4272b411b363SPhilipp Reisner /* ********* acknowledge sender ******** */ 4273b411b363SPhilipp Reisner 42740b70a13dSPhilipp Reisner static int got_RqSReply(struct drbd_conf *mdev, struct p_header80 *h) 4275b411b363SPhilipp Reisner { 4276b411b363SPhilipp Reisner struct p_req_state_reply *p = (struct p_req_state_reply *)h; 4277b411b363SPhilipp Reisner 4278b411b363SPhilipp Reisner int retcode = be32_to_cpu(p->retcode); 4279b411b363SPhilipp Reisner 4280b411b363SPhilipp Reisner if (retcode >= SS_SUCCESS) { 4281b411b363SPhilipp Reisner set_bit(CL_ST_CHG_SUCCESS, &mdev->flags); 4282b411b363SPhilipp Reisner } else { 4283b411b363SPhilipp Reisner set_bit(CL_ST_CHG_FAIL, &mdev->flags); 4284b411b363SPhilipp Reisner dev_err(DEV, "Requested state change failed by peer: %s (%d)\n", 4285b411b363SPhilipp Reisner drbd_set_st_err_str(retcode), retcode); 4286b411b363SPhilipp Reisner } 4287b411b363SPhilipp Reisner wake_up(&mdev->state_wait); 4288b411b363SPhilipp Reisner 428981e84650SAndreas Gruenbacher return true; 4290b411b363SPhilipp Reisner } 4291b411b363SPhilipp Reisner 42920b70a13dSPhilipp Reisner static int got_Ping(struct drbd_conf *mdev, struct p_header80 *h) 4293b411b363SPhilipp Reisner { 4294b411b363SPhilipp Reisner return drbd_send_ping_ack(mdev); 4295b411b363SPhilipp Reisner 4296b411b363SPhilipp Reisner } 4297b411b363SPhilipp Reisner 42980b70a13dSPhilipp Reisner static int got_PingAck(struct drbd_conf *mdev, struct p_header80 *h) 4299b411b363SPhilipp Reisner { 4300b411b363SPhilipp Reisner /* restore idle timeout */ 4301b411b363SPhilipp Reisner mdev->meta.socket->sk->sk_rcvtimeo = mdev->net_conf->ping_int*HZ; 4302309d1608SPhilipp Reisner if (!test_and_set_bit(GOT_PING_ACK, &mdev->flags)) 4303309d1608SPhilipp Reisner wake_up(&mdev->misc_wait); 4304b411b363SPhilipp Reisner 430581e84650SAndreas Gruenbacher return true; 4306b411b363SPhilipp Reisner } 4307b411b363SPhilipp Reisner 43080b70a13dSPhilipp Reisner static int got_IsInSync(struct drbd_conf *mdev, struct p_header80 *h) 4309b411b363SPhilipp Reisner { 4310b411b363SPhilipp Reisner struct p_block_ack *p = (struct p_block_ack *)h; 4311b411b363SPhilipp Reisner sector_t sector = be64_to_cpu(p->sector); 4312b411b363SPhilipp Reisner int blksize = be32_to_cpu(p->blksize); 4313b411b363SPhilipp Reisner 4314b411b363SPhilipp Reisner D_ASSERT(mdev->agreed_pro_version >= 89); 4315b411b363SPhilipp Reisner 4316b411b363SPhilipp Reisner update_peer_seq(mdev, be32_to_cpu(p->seq_num)); 4317b411b363SPhilipp Reisner 43181d53f09eSLars Ellenberg if (get_ldev(mdev)) { 4319b411b363SPhilipp Reisner drbd_rs_complete_io(mdev, sector); 4320b411b363SPhilipp Reisner drbd_set_in_sync(mdev, sector, blksize); 4321b411b363SPhilipp Reisner /* rs_same_csums is supposed to count in units of BM_BLOCK_SIZE */ 4322b411b363SPhilipp Reisner mdev->rs_same_csum += (blksize >> BM_BLOCK_SHIFT); 43231d53f09eSLars Ellenberg put_ldev(mdev); 43241d53f09eSLars Ellenberg } 4325b411b363SPhilipp Reisner dec_rs_pending(mdev); 4326778f271dSPhilipp Reisner atomic_add(blksize >> 9, &mdev->rs_sect_in); 4327b411b363SPhilipp Reisner 432881e84650SAndreas Gruenbacher return true; 4329b411b363SPhilipp Reisner } 4330b411b363SPhilipp Reisner 4331b411b363SPhilipp Reisner /* when we receive the ACK for a write request, 4332b411b363SPhilipp Reisner * verify that we actually know about it */ 4333b411b363SPhilipp Reisner static struct drbd_request *_ack_id_to_req(struct drbd_conf *mdev, 4334b411b363SPhilipp Reisner u64 id, sector_t sector) 4335b411b363SPhilipp Reisner { 4336b411b363SPhilipp Reisner struct hlist_head *slot = tl_hash_slot(mdev, sector); 4337b411b363SPhilipp Reisner struct hlist_node *n; 4338b411b363SPhilipp Reisner struct drbd_request *req; 4339b411b363SPhilipp Reisner 434024c4830cSBart Van Assche hlist_for_each_entry(req, n, slot, collision) { 4341b411b363SPhilipp Reisner if ((unsigned long)req == (unsigned long)id) { 4342b411b363SPhilipp Reisner if (req->sector != sector) { 4343b411b363SPhilipp Reisner dev_err(DEV, "_ack_id_to_req: found req %p but it has " 4344b411b363SPhilipp Reisner "wrong sector (%llus versus %llus)\n", req, 4345b411b363SPhilipp Reisner (unsigned long long)req->sector, 4346b411b363SPhilipp Reisner (unsigned long long)sector); 4347b411b363SPhilipp Reisner break; 4348b411b363SPhilipp Reisner } 4349b411b363SPhilipp Reisner return req; 4350b411b363SPhilipp Reisner } 4351b411b363SPhilipp Reisner } 4352b411b363SPhilipp Reisner return NULL; 4353b411b363SPhilipp Reisner } 4354b411b363SPhilipp Reisner 4355b411b363SPhilipp Reisner typedef struct drbd_request *(req_validator_fn) 4356b411b363SPhilipp Reisner (struct drbd_conf *mdev, u64 id, sector_t sector); 4357b411b363SPhilipp Reisner 4358b411b363SPhilipp Reisner static int validate_req_change_req_state(struct drbd_conf *mdev, 4359b411b363SPhilipp Reisner u64 id, sector_t sector, req_validator_fn validator, 4360b411b363SPhilipp Reisner const char *func, enum drbd_req_event what) 4361b411b363SPhilipp Reisner { 4362b411b363SPhilipp Reisner struct drbd_request *req; 4363b411b363SPhilipp Reisner struct bio_and_error m; 4364b411b363SPhilipp Reisner 4365b411b363SPhilipp Reisner spin_lock_irq(&mdev->req_lock); 4366b411b363SPhilipp Reisner req = validator(mdev, id, sector); 4367b411b363SPhilipp Reisner if (unlikely(!req)) { 4368b411b363SPhilipp Reisner spin_unlock_irq(&mdev->req_lock); 43692deb8336SPhilipp Reisner 43702deb8336SPhilipp Reisner dev_err(DEV, "%s: failed to find req %p, sector %llus\n", func, 43712deb8336SPhilipp Reisner (void *)(unsigned long)id, (unsigned long long)sector); 437281e84650SAndreas Gruenbacher return false; 4373b411b363SPhilipp Reisner } 4374b411b363SPhilipp Reisner __req_mod(req, what, &m); 4375b411b363SPhilipp Reisner spin_unlock_irq(&mdev->req_lock); 4376b411b363SPhilipp Reisner 4377b411b363SPhilipp Reisner if (m.bio) 4378b411b363SPhilipp Reisner complete_master_bio(mdev, &m); 437981e84650SAndreas Gruenbacher return true; 4380b411b363SPhilipp Reisner } 4381b411b363SPhilipp Reisner 43820b70a13dSPhilipp Reisner static int got_BlockAck(struct drbd_conf *mdev, struct p_header80 *h) 4383b411b363SPhilipp Reisner { 4384b411b363SPhilipp Reisner struct p_block_ack *p = (struct p_block_ack *)h; 4385b411b363SPhilipp Reisner sector_t sector = be64_to_cpu(p->sector); 4386b411b363SPhilipp Reisner int blksize = be32_to_cpu(p->blksize); 4387b411b363SPhilipp Reisner enum drbd_req_event what; 4388b411b363SPhilipp Reisner 4389b411b363SPhilipp Reisner update_peer_seq(mdev, be32_to_cpu(p->seq_num)); 4390b411b363SPhilipp Reisner 4391b411b363SPhilipp Reisner if (is_syncer_block_id(p->block_id)) { 4392b411b363SPhilipp Reisner drbd_set_in_sync(mdev, sector, blksize); 4393b411b363SPhilipp Reisner dec_rs_pending(mdev); 439481e84650SAndreas Gruenbacher return true; 4395b411b363SPhilipp Reisner } 4396b411b363SPhilipp Reisner switch (be16_to_cpu(h->command)) { 4397b411b363SPhilipp Reisner case P_RS_WRITE_ACK: 4398b411b363SPhilipp Reisner D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C); 4399b411b363SPhilipp Reisner what = write_acked_by_peer_and_sis; 4400b411b363SPhilipp Reisner break; 4401b411b363SPhilipp Reisner case P_WRITE_ACK: 4402b411b363SPhilipp Reisner D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C); 4403b411b363SPhilipp Reisner what = write_acked_by_peer; 4404b411b363SPhilipp Reisner break; 4405b411b363SPhilipp Reisner case P_RECV_ACK: 4406b411b363SPhilipp Reisner D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_B); 4407b411b363SPhilipp Reisner what = recv_acked_by_peer; 4408b411b363SPhilipp Reisner break; 4409b411b363SPhilipp Reisner case P_DISCARD_ACK: 4410b411b363SPhilipp Reisner D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C); 4411b411b363SPhilipp Reisner what = conflict_discarded_by_peer; 4412b411b363SPhilipp Reisner break; 4413b411b363SPhilipp Reisner default: 4414b411b363SPhilipp Reisner D_ASSERT(0); 441581e84650SAndreas Gruenbacher return false; 4416b411b363SPhilipp Reisner } 4417b411b363SPhilipp Reisner 4418b411b363SPhilipp Reisner return validate_req_change_req_state(mdev, p->block_id, sector, 4419b411b363SPhilipp Reisner _ack_id_to_req, __func__ , what); 4420b411b363SPhilipp Reisner } 4421b411b363SPhilipp Reisner 44220b70a13dSPhilipp Reisner static int got_NegAck(struct drbd_conf *mdev, struct p_header80 *h) 4423b411b363SPhilipp Reisner { 4424b411b363SPhilipp Reisner struct p_block_ack *p = (struct p_block_ack *)h; 4425b411b363SPhilipp Reisner sector_t sector = be64_to_cpu(p->sector); 44262deb8336SPhilipp Reisner int size = be32_to_cpu(p->blksize); 44272deb8336SPhilipp Reisner struct drbd_request *req; 44282deb8336SPhilipp Reisner struct bio_and_error m; 4429b411b363SPhilipp Reisner 4430b411b363SPhilipp Reisner update_peer_seq(mdev, be32_to_cpu(p->seq_num)); 4431b411b363SPhilipp Reisner 4432b411b363SPhilipp Reisner if (is_syncer_block_id(p->block_id)) { 4433b411b363SPhilipp Reisner dec_rs_pending(mdev); 4434b411b363SPhilipp Reisner drbd_rs_failed_io(mdev, sector, size); 443581e84650SAndreas Gruenbacher return true; 4436b411b363SPhilipp Reisner } 44372deb8336SPhilipp Reisner 44382deb8336SPhilipp Reisner spin_lock_irq(&mdev->req_lock); 44392deb8336SPhilipp Reisner req = _ack_id_to_req(mdev, p->block_id, sector); 44402deb8336SPhilipp Reisner if (!req) { 44412deb8336SPhilipp Reisner spin_unlock_irq(&mdev->req_lock); 44422deb8336SPhilipp Reisner if (mdev->net_conf->wire_protocol == DRBD_PROT_A || 44432deb8336SPhilipp Reisner mdev->net_conf->wire_protocol == DRBD_PROT_B) { 44442deb8336SPhilipp Reisner /* Protocol A has no P_WRITE_ACKs, but has P_NEG_ACKs. 44452deb8336SPhilipp Reisner The master bio might already be completed, therefore the 44462deb8336SPhilipp Reisner request is no longer in the collision hash. 44472deb8336SPhilipp Reisner => Do not try to validate block_id as request. */ 44482deb8336SPhilipp Reisner /* In Protocol B we might already have got a P_RECV_ACK 44492deb8336SPhilipp Reisner but then get a P_NEG_ACK after wards. */ 44502deb8336SPhilipp Reisner drbd_set_out_of_sync(mdev, sector, size); 44512deb8336SPhilipp Reisner return true; 44522deb8336SPhilipp Reisner } else { 44532deb8336SPhilipp Reisner dev_err(DEV, "%s: failed to find req %p, sector %llus\n", __func__, 44542deb8336SPhilipp Reisner (void *)(unsigned long)p->block_id, (unsigned long long)sector); 44552deb8336SPhilipp Reisner return false; 44562deb8336SPhilipp Reisner } 44572deb8336SPhilipp Reisner } 44582deb8336SPhilipp Reisner __req_mod(req, neg_acked, &m); 44592deb8336SPhilipp Reisner spin_unlock_irq(&mdev->req_lock); 44602deb8336SPhilipp Reisner 44612deb8336SPhilipp Reisner if (m.bio) 44622deb8336SPhilipp Reisner complete_master_bio(mdev, &m); 44632deb8336SPhilipp Reisner return true; 4464b411b363SPhilipp Reisner } 4465b411b363SPhilipp Reisner 44660b70a13dSPhilipp Reisner static int got_NegDReply(struct drbd_conf *mdev, struct p_header80 *h) 4467b411b363SPhilipp Reisner { 4468b411b363SPhilipp Reisner struct p_block_ack *p = (struct p_block_ack *)h; 4469b411b363SPhilipp Reisner sector_t sector = be64_to_cpu(p->sector); 4470b411b363SPhilipp Reisner 4471b411b363SPhilipp Reisner update_peer_seq(mdev, be32_to_cpu(p->seq_num)); 4472b411b363SPhilipp Reisner dev_err(DEV, "Got NegDReply; Sector %llus, len %u; Fail original request.\n", 4473b411b363SPhilipp Reisner (unsigned long long)sector, be32_to_cpu(p->blksize)); 4474b411b363SPhilipp Reisner 4475b411b363SPhilipp Reisner return validate_req_change_req_state(mdev, p->block_id, sector, 4476b411b363SPhilipp Reisner _ar_id_to_req, __func__ , neg_acked); 4477b411b363SPhilipp Reisner } 4478b411b363SPhilipp Reisner 44790b70a13dSPhilipp Reisner static int got_NegRSDReply(struct drbd_conf *mdev, struct p_header80 *h) 4480b411b363SPhilipp Reisner { 4481b411b363SPhilipp Reisner sector_t sector; 4482b411b363SPhilipp Reisner int size; 4483b411b363SPhilipp Reisner struct p_block_ack *p = (struct p_block_ack *)h; 4484b411b363SPhilipp Reisner 4485b411b363SPhilipp Reisner sector = be64_to_cpu(p->sector); 4486b411b363SPhilipp Reisner size = be32_to_cpu(p->blksize); 4487b411b363SPhilipp Reisner 4488b411b363SPhilipp Reisner update_peer_seq(mdev, be32_to_cpu(p->seq_num)); 4489b411b363SPhilipp Reisner 4490b411b363SPhilipp Reisner dec_rs_pending(mdev); 4491b411b363SPhilipp Reisner 4492b411b363SPhilipp Reisner if (get_ldev_if_state(mdev, D_FAILED)) { 4493b411b363SPhilipp Reisner drbd_rs_complete_io(mdev, sector); 4494d612d309SPhilipp Reisner switch (be16_to_cpu(h->command)) { 4495d612d309SPhilipp Reisner case P_NEG_RS_DREPLY: 4496b411b363SPhilipp Reisner drbd_rs_failed_io(mdev, sector, size); 4497d612d309SPhilipp Reisner case P_RS_CANCEL: 4498d612d309SPhilipp Reisner break; 4499d612d309SPhilipp Reisner default: 4500d612d309SPhilipp Reisner D_ASSERT(0); 4501d612d309SPhilipp Reisner put_ldev(mdev); 4502d612d309SPhilipp Reisner return false; 4503d612d309SPhilipp Reisner } 4504b411b363SPhilipp Reisner put_ldev(mdev); 4505b411b363SPhilipp Reisner } 4506b411b363SPhilipp Reisner 450781e84650SAndreas Gruenbacher return true; 4508b411b363SPhilipp Reisner } 4509b411b363SPhilipp Reisner 45100b70a13dSPhilipp Reisner static int got_BarrierAck(struct drbd_conf *mdev, struct p_header80 *h) 4511b411b363SPhilipp Reisner { 4512b411b363SPhilipp Reisner struct p_barrier_ack *p = (struct p_barrier_ack *)h; 4513b411b363SPhilipp Reisner 4514b411b363SPhilipp Reisner tl_release(mdev, p->barrier, be32_to_cpu(p->set_size)); 4515b411b363SPhilipp Reisner 4516c4752ef1SPhilipp Reisner if (mdev->state.conn == C_AHEAD && 4517c4752ef1SPhilipp Reisner atomic_read(&mdev->ap_in_flight) == 0 && 4518e89868a0SPhilipp Reisner !test_and_set_bit(AHEAD_TO_SYNC_SOURCE, &mdev->flags)) { 4519370a43e7SPhilipp Reisner mdev->start_resync_timer.expires = jiffies + HZ; 4520370a43e7SPhilipp Reisner add_timer(&mdev->start_resync_timer); 4521c4752ef1SPhilipp Reisner } 4522c4752ef1SPhilipp Reisner 452381e84650SAndreas Gruenbacher return true; 4524b411b363SPhilipp Reisner } 4525b411b363SPhilipp Reisner 45260b70a13dSPhilipp Reisner static int got_OVResult(struct drbd_conf *mdev, struct p_header80 *h) 4527b411b363SPhilipp Reisner { 4528b411b363SPhilipp Reisner struct p_block_ack *p = (struct p_block_ack *)h; 4529b411b363SPhilipp Reisner struct drbd_work *w; 4530b411b363SPhilipp Reisner sector_t sector; 4531b411b363SPhilipp Reisner int size; 4532b411b363SPhilipp Reisner 4533b411b363SPhilipp Reisner sector = be64_to_cpu(p->sector); 4534b411b363SPhilipp Reisner size = be32_to_cpu(p->blksize); 4535b411b363SPhilipp Reisner 4536b411b363SPhilipp Reisner update_peer_seq(mdev, be32_to_cpu(p->seq_num)); 4537b411b363SPhilipp Reisner 4538b411b363SPhilipp Reisner if (be64_to_cpu(p->block_id) == ID_OUT_OF_SYNC) 4539b411b363SPhilipp Reisner drbd_ov_oos_found(mdev, sector, size); 4540b411b363SPhilipp Reisner else 4541b411b363SPhilipp Reisner ov_oos_print(mdev); 4542b411b363SPhilipp Reisner 45431d53f09eSLars Ellenberg if (!get_ldev(mdev)) 454481e84650SAndreas Gruenbacher return true; 45451d53f09eSLars Ellenberg 4546b411b363SPhilipp Reisner drbd_rs_complete_io(mdev, sector); 4547b411b363SPhilipp Reisner dec_rs_pending(mdev); 4548b411b363SPhilipp Reisner 4549ea5442afSLars Ellenberg --mdev->ov_left; 4550ea5442afSLars Ellenberg 4551ea5442afSLars Ellenberg /* let's advance progress step marks only for every other megabyte */ 4552ea5442afSLars Ellenberg if ((mdev->ov_left & 0x200) == 0x200) 4553ea5442afSLars Ellenberg drbd_advance_rs_marks(mdev, mdev->ov_left); 4554ea5442afSLars Ellenberg 4555ea5442afSLars Ellenberg if (mdev->ov_left == 0) { 4556b411b363SPhilipp Reisner w = kmalloc(sizeof(*w), GFP_NOIO); 4557b411b363SPhilipp Reisner if (w) { 4558b411b363SPhilipp Reisner w->cb = w_ov_finished; 4559b411b363SPhilipp Reisner drbd_queue_work_front(&mdev->data.work, w); 4560b411b363SPhilipp Reisner } else { 4561b411b363SPhilipp Reisner dev_err(DEV, "kmalloc(w) failed."); 4562b411b363SPhilipp Reisner ov_oos_print(mdev); 4563b411b363SPhilipp Reisner drbd_resync_finished(mdev); 4564b411b363SPhilipp Reisner } 4565b411b363SPhilipp Reisner } 45661d53f09eSLars Ellenberg put_ldev(mdev); 456781e84650SAndreas Gruenbacher return true; 4568b411b363SPhilipp Reisner } 4569b411b363SPhilipp Reisner 457002918be2SPhilipp Reisner static int got_skip(struct drbd_conf *mdev, struct p_header80 *h) 45710ced55a3SPhilipp Reisner { 457281e84650SAndreas Gruenbacher return true; 45730ced55a3SPhilipp Reisner } 45740ced55a3SPhilipp Reisner 4575b411b363SPhilipp Reisner struct asender_cmd { 4576b411b363SPhilipp Reisner size_t pkt_size; 45770b70a13dSPhilipp Reisner int (*process)(struct drbd_conf *mdev, struct p_header80 *h); 4578b411b363SPhilipp Reisner }; 4579b411b363SPhilipp Reisner 4580b411b363SPhilipp Reisner static struct asender_cmd *get_asender_cmd(int cmd) 4581b411b363SPhilipp Reisner { 4582b411b363SPhilipp Reisner static struct asender_cmd asender_tbl[] = { 4583b411b363SPhilipp Reisner /* anything missing from this table is in 4584b411b363SPhilipp Reisner * the drbd_cmd_handler (drbd_default_handler) table, 4585b411b363SPhilipp Reisner * see the beginning of drbdd() */ 45860b70a13dSPhilipp Reisner [P_PING] = { sizeof(struct p_header80), got_Ping }, 45870b70a13dSPhilipp Reisner [P_PING_ACK] = { sizeof(struct p_header80), got_PingAck }, 4588b411b363SPhilipp Reisner [P_RECV_ACK] = { sizeof(struct p_block_ack), got_BlockAck }, 4589b411b363SPhilipp Reisner [P_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck }, 4590b411b363SPhilipp Reisner [P_RS_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck }, 4591b411b363SPhilipp Reisner [P_DISCARD_ACK] = { sizeof(struct p_block_ack), got_BlockAck }, 4592b411b363SPhilipp Reisner [P_NEG_ACK] = { sizeof(struct p_block_ack), got_NegAck }, 4593b411b363SPhilipp Reisner [P_NEG_DREPLY] = { sizeof(struct p_block_ack), got_NegDReply }, 4594b411b363SPhilipp Reisner [P_NEG_RS_DREPLY] = { sizeof(struct p_block_ack), got_NegRSDReply}, 4595b411b363SPhilipp Reisner [P_OV_RESULT] = { sizeof(struct p_block_ack), got_OVResult }, 4596b411b363SPhilipp Reisner [P_BARRIER_ACK] = { sizeof(struct p_barrier_ack), got_BarrierAck }, 4597b411b363SPhilipp Reisner [P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), got_RqSReply }, 4598b411b363SPhilipp Reisner [P_RS_IS_IN_SYNC] = { sizeof(struct p_block_ack), got_IsInSync }, 459902918be2SPhilipp Reisner [P_DELAY_PROBE] = { sizeof(struct p_delay_probe93), got_skip }, 4600d612d309SPhilipp Reisner [P_RS_CANCEL] = { sizeof(struct p_block_ack), got_NegRSDReply}, 4601b411b363SPhilipp Reisner [P_MAX_CMD] = { 0, NULL }, 4602b411b363SPhilipp Reisner }; 4603b411b363SPhilipp Reisner if (cmd > P_MAX_CMD || asender_tbl[cmd].process == NULL) 4604b411b363SPhilipp Reisner return NULL; 4605b411b363SPhilipp Reisner return &asender_tbl[cmd]; 4606b411b363SPhilipp Reisner } 4607b411b363SPhilipp Reisner 4608b411b363SPhilipp Reisner int drbd_asender(struct drbd_thread *thi) 4609b411b363SPhilipp Reisner { 4610b411b363SPhilipp Reisner struct drbd_conf *mdev = thi->mdev; 461102918be2SPhilipp Reisner struct p_header80 *h = &mdev->meta.rbuf.header.h80; 4612b411b363SPhilipp Reisner struct asender_cmd *cmd = NULL; 4613b411b363SPhilipp Reisner 4614b411b363SPhilipp Reisner int rv, len; 4615b411b363SPhilipp Reisner void *buf = h; 4616b411b363SPhilipp Reisner int received = 0; 46170b70a13dSPhilipp Reisner int expect = sizeof(struct p_header80); 4618b411b363SPhilipp Reisner int empty; 4619f36af18cSLars Ellenberg int ping_timeout_active = 0; 4620b411b363SPhilipp Reisner 4621b411b363SPhilipp Reisner sprintf(current->comm, "drbd%d_asender", mdev_to_minor(mdev)); 4622b411b363SPhilipp Reisner 4623b411b363SPhilipp Reisner current->policy = SCHED_RR; /* Make this a realtime task! */ 4624b411b363SPhilipp Reisner current->rt_priority = 2; /* more important than all other tasks */ 4625b411b363SPhilipp Reisner 4626b411b363SPhilipp Reisner while (get_t_state(thi) == Running) { 4627b411b363SPhilipp Reisner drbd_thread_current_set_cpu(mdev); 4628b411b363SPhilipp Reisner if (test_and_clear_bit(SEND_PING, &mdev->flags)) { 4629b411b363SPhilipp Reisner ERR_IF(!drbd_send_ping(mdev)) goto reconnect; 4630b411b363SPhilipp Reisner mdev->meta.socket->sk->sk_rcvtimeo = 4631b411b363SPhilipp Reisner mdev->net_conf->ping_timeo*HZ/10; 4632f36af18cSLars Ellenberg ping_timeout_active = 1; 4633b411b363SPhilipp Reisner } 4634b411b363SPhilipp Reisner 4635b411b363SPhilipp Reisner /* conditionally cork; 4636b411b363SPhilipp Reisner * it may hurt latency if we cork without much to send */ 4637b411b363SPhilipp Reisner if (!mdev->net_conf->no_cork && 4638b411b363SPhilipp Reisner 3 < atomic_read(&mdev->unacked_cnt)) 4639b411b363SPhilipp Reisner drbd_tcp_cork(mdev->meta.socket); 4640b411b363SPhilipp Reisner while (1) { 4641b411b363SPhilipp Reisner clear_bit(SIGNAL_ASENDER, &mdev->flags); 4642b411b363SPhilipp Reisner flush_signals(current); 46430f8488e1SLars Ellenberg if (!drbd_process_done_ee(mdev)) 4644b411b363SPhilipp Reisner goto reconnect; 4645b411b363SPhilipp Reisner /* to avoid race with newly queued ACKs */ 4646b411b363SPhilipp Reisner set_bit(SIGNAL_ASENDER, &mdev->flags); 4647b411b363SPhilipp Reisner spin_lock_irq(&mdev->req_lock); 4648b411b363SPhilipp Reisner empty = list_empty(&mdev->done_ee); 4649b411b363SPhilipp Reisner spin_unlock_irq(&mdev->req_lock); 4650b411b363SPhilipp Reisner /* new ack may have been queued right here, 4651b411b363SPhilipp Reisner * but then there is also a signal pending, 4652b411b363SPhilipp Reisner * and we start over... */ 4653b411b363SPhilipp Reisner if (empty) 4654b411b363SPhilipp Reisner break; 4655b411b363SPhilipp Reisner } 4656b411b363SPhilipp Reisner /* but unconditionally uncork unless disabled */ 4657b411b363SPhilipp Reisner if (!mdev->net_conf->no_cork) 4658b411b363SPhilipp Reisner drbd_tcp_uncork(mdev->meta.socket); 4659b411b363SPhilipp Reisner 4660b411b363SPhilipp Reisner /* short circuit, recv_msg would return EINTR anyways. */ 4661b411b363SPhilipp Reisner if (signal_pending(current)) 4662b411b363SPhilipp Reisner continue; 4663b411b363SPhilipp Reisner 4664b411b363SPhilipp Reisner rv = drbd_recv_short(mdev, mdev->meta.socket, 4665b411b363SPhilipp Reisner buf, expect-received, 0); 4666b411b363SPhilipp Reisner clear_bit(SIGNAL_ASENDER, &mdev->flags); 4667b411b363SPhilipp Reisner 4668b411b363SPhilipp Reisner flush_signals(current); 4669b411b363SPhilipp Reisner 4670b411b363SPhilipp Reisner /* Note: 4671b411b363SPhilipp Reisner * -EINTR (on meta) we got a signal 4672b411b363SPhilipp Reisner * -EAGAIN (on meta) rcvtimeo expired 4673b411b363SPhilipp Reisner * -ECONNRESET other side closed the connection 4674b411b363SPhilipp Reisner * -ERESTARTSYS (on data) we got a signal 4675b411b363SPhilipp Reisner * rv < 0 other than above: unexpected error! 4676b411b363SPhilipp Reisner * rv == expected: full header or command 4677b411b363SPhilipp Reisner * rv < expected: "woken" by signal during receive 4678b411b363SPhilipp Reisner * rv == 0 : "connection shut down by peer" 4679b411b363SPhilipp Reisner */ 4680b411b363SPhilipp Reisner if (likely(rv > 0)) { 4681b411b363SPhilipp Reisner received += rv; 4682b411b363SPhilipp Reisner buf += rv; 4683b411b363SPhilipp Reisner } else if (rv == 0) { 4684b411b363SPhilipp Reisner dev_err(DEV, "meta connection shut down by peer.\n"); 4685b411b363SPhilipp Reisner goto reconnect; 4686b411b363SPhilipp Reisner } else if (rv == -EAGAIN) { 4687cb6518cbSLars Ellenberg /* If the data socket received something meanwhile, 4688cb6518cbSLars Ellenberg * that is good enough: peer is still alive. */ 4689cb6518cbSLars Ellenberg if (time_after(mdev->last_received, 4690cb6518cbSLars Ellenberg jiffies - mdev->meta.socket->sk->sk_rcvtimeo)) 4691cb6518cbSLars Ellenberg continue; 4692f36af18cSLars Ellenberg if (ping_timeout_active) { 4693b411b363SPhilipp Reisner dev_err(DEV, "PingAck did not arrive in time.\n"); 4694b411b363SPhilipp Reisner goto reconnect; 4695b411b363SPhilipp Reisner } 4696b411b363SPhilipp Reisner set_bit(SEND_PING, &mdev->flags); 4697b411b363SPhilipp Reisner continue; 4698b411b363SPhilipp Reisner } else if (rv == -EINTR) { 4699b411b363SPhilipp Reisner continue; 4700b411b363SPhilipp Reisner } else { 4701b411b363SPhilipp Reisner dev_err(DEV, "sock_recvmsg returned %d\n", rv); 4702b411b363SPhilipp Reisner goto reconnect; 4703b411b363SPhilipp Reisner } 4704b411b363SPhilipp Reisner 4705b411b363SPhilipp Reisner if (received == expect && cmd == NULL) { 4706b411b363SPhilipp Reisner if (unlikely(h->magic != BE_DRBD_MAGIC)) { 4707004352faSLars Ellenberg dev_err(DEV, "magic?? on meta m: 0x%08x c: %d l: %d\n", 4708004352faSLars Ellenberg be32_to_cpu(h->magic), 4709004352faSLars Ellenberg be16_to_cpu(h->command), 4710004352faSLars Ellenberg be16_to_cpu(h->length)); 4711b411b363SPhilipp Reisner goto reconnect; 4712b411b363SPhilipp Reisner } 4713b411b363SPhilipp Reisner cmd = get_asender_cmd(be16_to_cpu(h->command)); 4714b411b363SPhilipp Reisner len = be16_to_cpu(h->length); 4715b411b363SPhilipp Reisner if (unlikely(cmd == NULL)) { 4716004352faSLars Ellenberg dev_err(DEV, "unknown command?? on meta m: 0x%08x c: %d l: %d\n", 4717004352faSLars Ellenberg be32_to_cpu(h->magic), 4718004352faSLars Ellenberg be16_to_cpu(h->command), 4719004352faSLars Ellenberg be16_to_cpu(h->length)); 4720b411b363SPhilipp Reisner goto disconnect; 4721b411b363SPhilipp Reisner } 4722b411b363SPhilipp Reisner expect = cmd->pkt_size; 47230b70a13dSPhilipp Reisner ERR_IF(len != expect-sizeof(struct p_header80)) 4724b411b363SPhilipp Reisner goto reconnect; 4725b411b363SPhilipp Reisner } 4726b411b363SPhilipp Reisner if (received == expect) { 4727cb6518cbSLars Ellenberg mdev->last_received = jiffies; 4728b411b363SPhilipp Reisner D_ASSERT(cmd != NULL); 4729b411b363SPhilipp Reisner if (!cmd->process(mdev, h)) 4730b411b363SPhilipp Reisner goto reconnect; 4731b411b363SPhilipp Reisner 4732f36af18cSLars Ellenberg /* the idle_timeout (ping-int) 4733f36af18cSLars Ellenberg * has been restored in got_PingAck() */ 4734f36af18cSLars Ellenberg if (cmd == get_asender_cmd(P_PING_ACK)) 4735f36af18cSLars Ellenberg ping_timeout_active = 0; 4736f36af18cSLars Ellenberg 4737b411b363SPhilipp Reisner buf = h; 4738b411b363SPhilipp Reisner received = 0; 47390b70a13dSPhilipp Reisner expect = sizeof(struct p_header80); 4740b411b363SPhilipp Reisner cmd = NULL; 4741b411b363SPhilipp Reisner } 4742b411b363SPhilipp Reisner } 4743b411b363SPhilipp Reisner 4744b411b363SPhilipp Reisner if (0) { 4745b411b363SPhilipp Reisner reconnect: 4746b411b363SPhilipp Reisner drbd_force_state(mdev, NS(conn, C_NETWORK_FAILURE)); 4747856c50c7SLars Ellenberg drbd_md_sync(mdev); 4748b411b363SPhilipp Reisner } 4749b411b363SPhilipp Reisner if (0) { 4750b411b363SPhilipp Reisner disconnect: 4751b411b363SPhilipp Reisner drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); 4752856c50c7SLars Ellenberg drbd_md_sync(mdev); 4753b411b363SPhilipp Reisner } 4754b411b363SPhilipp Reisner clear_bit(SIGNAL_ASENDER, &mdev->flags); 4755b411b363SPhilipp Reisner 4756b411b363SPhilipp Reisner D_ASSERT(mdev->state.conn < C_CONNECTED); 4757b411b363SPhilipp Reisner dev_info(DEV, "asender terminated\n"); 4758b411b363SPhilipp Reisner 4759b411b363SPhilipp Reisner return 0; 4760b411b363SPhilipp Reisner } 4761