1b411b363SPhilipp Reisner /* 2b411b363SPhilipp Reisner drbd_receiver.c 3b411b363SPhilipp Reisner 4b411b363SPhilipp Reisner This file is part of DRBD by Philipp Reisner and Lars Ellenberg. 5b411b363SPhilipp Reisner 6b411b363SPhilipp Reisner Copyright (C) 2001-2008, LINBIT Information Technologies GmbH. 7b411b363SPhilipp Reisner Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>. 8b411b363SPhilipp Reisner Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>. 9b411b363SPhilipp Reisner 10b411b363SPhilipp Reisner drbd is free software; you can redistribute it and/or modify 11b411b363SPhilipp Reisner it under the terms of the GNU General Public License as published by 12b411b363SPhilipp Reisner the Free Software Foundation; either version 2, or (at your option) 13b411b363SPhilipp Reisner any later version. 14b411b363SPhilipp Reisner 15b411b363SPhilipp Reisner drbd is distributed in the hope that it will be useful, 16b411b363SPhilipp Reisner but WITHOUT ANY WARRANTY; without even the implied warranty of 17b411b363SPhilipp Reisner MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18b411b363SPhilipp Reisner GNU General Public License for more details. 19b411b363SPhilipp Reisner 20b411b363SPhilipp Reisner You should have received a copy of the GNU General Public License 21b411b363SPhilipp Reisner along with drbd; see the file COPYING. If not, write to 22b411b363SPhilipp Reisner the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 23b411b363SPhilipp Reisner */ 24b411b363SPhilipp Reisner 25b411b363SPhilipp Reisner 26b411b363SPhilipp Reisner #include <linux/module.h> 27b411b363SPhilipp Reisner 28b411b363SPhilipp Reisner #include <asm/uaccess.h> 29b411b363SPhilipp Reisner #include <net/sock.h> 30b411b363SPhilipp Reisner 31b411b363SPhilipp Reisner #include <linux/drbd.h> 32b411b363SPhilipp Reisner #include <linux/fs.h> 33b411b363SPhilipp Reisner #include <linux/file.h> 34b411b363SPhilipp Reisner #include <linux/in.h> 35b411b363SPhilipp Reisner #include <linux/mm.h> 36b411b363SPhilipp Reisner #include <linux/memcontrol.h> 37b411b363SPhilipp Reisner #include <linux/mm_inline.h> 38b411b363SPhilipp Reisner #include <linux/slab.h> 39b411b363SPhilipp Reisner #include <linux/pkt_sched.h> 40b411b363SPhilipp Reisner #define __KERNEL_SYSCALLS__ 41b411b363SPhilipp Reisner #include <linux/unistd.h> 42b411b363SPhilipp Reisner #include <linux/vmalloc.h> 43b411b363SPhilipp Reisner #include <linux/random.h> 44b411b363SPhilipp Reisner #include <linux/string.h> 45b411b363SPhilipp Reisner #include <linux/scatterlist.h> 46b411b363SPhilipp Reisner #include "drbd_int.h" 47b411b363SPhilipp Reisner #include "drbd_req.h" 48b411b363SPhilipp Reisner 49b411b363SPhilipp Reisner #include "drbd_vli.h" 50b411b363SPhilipp Reisner 51b411b363SPhilipp Reisner enum finish_epoch { 52b411b363SPhilipp Reisner FE_STILL_LIVE, 53b411b363SPhilipp Reisner FE_DESTROYED, 54b411b363SPhilipp Reisner FE_RECYCLED, 55b411b363SPhilipp Reisner }; 56b411b363SPhilipp Reisner 57b411b363SPhilipp Reisner static int drbd_do_handshake(struct drbd_conf *mdev); 58b411b363SPhilipp Reisner static int drbd_do_auth(struct drbd_conf *mdev); 59b411b363SPhilipp Reisner 60b411b363SPhilipp Reisner static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *, struct drbd_epoch *, enum epoch_event); 61b411b363SPhilipp Reisner static int e_end_block(struct drbd_conf *, struct drbd_work *, int); 62b411b363SPhilipp Reisner 63b411b363SPhilipp Reisner 64b411b363SPhilipp Reisner #define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN) 65b411b363SPhilipp Reisner 6645bb912bSLars Ellenberg /* 6745bb912bSLars Ellenberg * some helper functions to deal with single linked page lists, 6845bb912bSLars Ellenberg * page->private being our "next" pointer. 6945bb912bSLars Ellenberg */ 7045bb912bSLars Ellenberg 7145bb912bSLars Ellenberg /* If at least n pages are linked at head, get n pages off. 7245bb912bSLars Ellenberg * Otherwise, don't modify head, and return NULL. 7345bb912bSLars Ellenberg * Locking is the responsibility of the caller. 7445bb912bSLars Ellenberg */ 7545bb912bSLars Ellenberg static struct page *page_chain_del(struct page **head, int n) 7645bb912bSLars Ellenberg { 7745bb912bSLars Ellenberg struct page *page; 7845bb912bSLars Ellenberg struct page *tmp; 7945bb912bSLars Ellenberg 8045bb912bSLars Ellenberg BUG_ON(!n); 8145bb912bSLars Ellenberg BUG_ON(!head); 8245bb912bSLars Ellenberg 8345bb912bSLars Ellenberg page = *head; 8423ce4227SPhilipp Reisner 8523ce4227SPhilipp Reisner if (!page) 8623ce4227SPhilipp Reisner return NULL; 8723ce4227SPhilipp Reisner 8845bb912bSLars Ellenberg while (page) { 8945bb912bSLars Ellenberg tmp = page_chain_next(page); 9045bb912bSLars Ellenberg if (--n == 0) 9145bb912bSLars Ellenberg break; /* found sufficient pages */ 9245bb912bSLars Ellenberg if (tmp == NULL) 9345bb912bSLars Ellenberg /* insufficient pages, don't use any of them. */ 9445bb912bSLars Ellenberg return NULL; 9545bb912bSLars Ellenberg page = tmp; 9645bb912bSLars Ellenberg } 9745bb912bSLars Ellenberg 9845bb912bSLars Ellenberg /* add end of list marker for the returned list */ 9945bb912bSLars Ellenberg set_page_private(page, 0); 10045bb912bSLars Ellenberg /* actual return value, and adjustment of head */ 10145bb912bSLars Ellenberg page = *head; 10245bb912bSLars Ellenberg *head = tmp; 10345bb912bSLars Ellenberg return page; 10445bb912bSLars Ellenberg } 10545bb912bSLars Ellenberg 10645bb912bSLars Ellenberg /* may be used outside of locks to find the tail of a (usually short) 10745bb912bSLars Ellenberg * "private" page chain, before adding it back to a global chain head 10845bb912bSLars Ellenberg * with page_chain_add() under a spinlock. */ 10945bb912bSLars Ellenberg static struct page *page_chain_tail(struct page *page, int *len) 11045bb912bSLars Ellenberg { 11145bb912bSLars Ellenberg struct page *tmp; 11245bb912bSLars Ellenberg int i = 1; 11345bb912bSLars Ellenberg while ((tmp = page_chain_next(page))) 11445bb912bSLars Ellenberg ++i, page = tmp; 11545bb912bSLars Ellenberg if (len) 11645bb912bSLars Ellenberg *len = i; 11745bb912bSLars Ellenberg return page; 11845bb912bSLars Ellenberg } 11945bb912bSLars Ellenberg 12045bb912bSLars Ellenberg static int page_chain_free(struct page *page) 12145bb912bSLars Ellenberg { 12245bb912bSLars Ellenberg struct page *tmp; 12345bb912bSLars Ellenberg int i = 0; 12445bb912bSLars Ellenberg page_chain_for_each_safe(page, tmp) { 12545bb912bSLars Ellenberg put_page(page); 12645bb912bSLars Ellenberg ++i; 12745bb912bSLars Ellenberg } 12845bb912bSLars Ellenberg return i; 12945bb912bSLars Ellenberg } 13045bb912bSLars Ellenberg 13145bb912bSLars Ellenberg static void page_chain_add(struct page **head, 13245bb912bSLars Ellenberg struct page *chain_first, struct page *chain_last) 13345bb912bSLars Ellenberg { 13445bb912bSLars Ellenberg #if 1 13545bb912bSLars Ellenberg struct page *tmp; 13645bb912bSLars Ellenberg tmp = page_chain_tail(chain_first, NULL); 13745bb912bSLars Ellenberg BUG_ON(tmp != chain_last); 13845bb912bSLars Ellenberg #endif 13945bb912bSLars Ellenberg 14045bb912bSLars Ellenberg /* add chain to head */ 14145bb912bSLars Ellenberg set_page_private(chain_last, (unsigned long)*head); 14245bb912bSLars Ellenberg *head = chain_first; 14345bb912bSLars Ellenberg } 14445bb912bSLars Ellenberg 14545bb912bSLars Ellenberg static struct page *drbd_pp_first_pages_or_try_alloc(struct drbd_conf *mdev, int number) 146b411b363SPhilipp Reisner { 147b411b363SPhilipp Reisner struct page *page = NULL; 14845bb912bSLars Ellenberg struct page *tmp = NULL; 14945bb912bSLars Ellenberg int i = 0; 150b411b363SPhilipp Reisner 151b411b363SPhilipp Reisner /* Yes, testing drbd_pp_vacant outside the lock is racy. 152b411b363SPhilipp Reisner * So what. It saves a spin_lock. */ 15345bb912bSLars Ellenberg if (drbd_pp_vacant >= number) { 154b411b363SPhilipp Reisner spin_lock(&drbd_pp_lock); 15545bb912bSLars Ellenberg page = page_chain_del(&drbd_pp_pool, number); 15645bb912bSLars Ellenberg if (page) 15745bb912bSLars Ellenberg drbd_pp_vacant -= number; 158b411b363SPhilipp Reisner spin_unlock(&drbd_pp_lock); 15945bb912bSLars Ellenberg if (page) 16045bb912bSLars Ellenberg return page; 161b411b363SPhilipp Reisner } 16245bb912bSLars Ellenberg 163b411b363SPhilipp Reisner /* GFP_TRY, because we must not cause arbitrary write-out: in a DRBD 164b411b363SPhilipp Reisner * "criss-cross" setup, that might cause write-out on some other DRBD, 165b411b363SPhilipp Reisner * which in turn might block on the other node at this very place. */ 16645bb912bSLars Ellenberg for (i = 0; i < number; i++) { 16745bb912bSLars Ellenberg tmp = alloc_page(GFP_TRY); 16845bb912bSLars Ellenberg if (!tmp) 16945bb912bSLars Ellenberg break; 17045bb912bSLars Ellenberg set_page_private(tmp, (unsigned long)page); 17145bb912bSLars Ellenberg page = tmp; 17245bb912bSLars Ellenberg } 17345bb912bSLars Ellenberg 17445bb912bSLars Ellenberg if (i == number) 175b411b363SPhilipp Reisner return page; 17645bb912bSLars Ellenberg 17745bb912bSLars Ellenberg /* Not enough pages immediately available this time. 17845bb912bSLars Ellenberg * No need to jump around here, drbd_pp_alloc will retry this 17945bb912bSLars Ellenberg * function "soon". */ 18045bb912bSLars Ellenberg if (page) { 18145bb912bSLars Ellenberg tmp = page_chain_tail(page, NULL); 18245bb912bSLars Ellenberg spin_lock(&drbd_pp_lock); 18345bb912bSLars Ellenberg page_chain_add(&drbd_pp_pool, page, tmp); 18445bb912bSLars Ellenberg drbd_pp_vacant += i; 18545bb912bSLars Ellenberg spin_unlock(&drbd_pp_lock); 18645bb912bSLars Ellenberg } 18745bb912bSLars Ellenberg return NULL; 188b411b363SPhilipp Reisner } 189b411b363SPhilipp Reisner 190b411b363SPhilipp Reisner static void reclaim_net_ee(struct drbd_conf *mdev, struct list_head *to_be_freed) 191b411b363SPhilipp Reisner { 192b411b363SPhilipp Reisner struct drbd_epoch_entry *e; 193b411b363SPhilipp Reisner struct list_head *le, *tle; 194b411b363SPhilipp Reisner 195b411b363SPhilipp Reisner /* The EEs are always appended to the end of the list. Since 196b411b363SPhilipp Reisner they are sent in order over the wire, they have to finish 197b411b363SPhilipp Reisner in order. As soon as we see the first not finished we can 198b411b363SPhilipp Reisner stop to examine the list... */ 199b411b363SPhilipp Reisner 200b411b363SPhilipp Reisner list_for_each_safe(le, tle, &mdev->net_ee) { 201b411b363SPhilipp Reisner e = list_entry(le, struct drbd_epoch_entry, w.list); 20245bb912bSLars Ellenberg if (drbd_ee_has_active_page(e)) 203b411b363SPhilipp Reisner break; 204b411b363SPhilipp Reisner list_move(le, to_be_freed); 205b411b363SPhilipp Reisner } 206b411b363SPhilipp Reisner } 207b411b363SPhilipp Reisner 208b411b363SPhilipp Reisner static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev) 209b411b363SPhilipp Reisner { 210b411b363SPhilipp Reisner LIST_HEAD(reclaimed); 211b411b363SPhilipp Reisner struct drbd_epoch_entry *e, *t; 212b411b363SPhilipp Reisner 213b411b363SPhilipp Reisner spin_lock_irq(&mdev->req_lock); 214b411b363SPhilipp Reisner reclaim_net_ee(mdev, &reclaimed); 215b411b363SPhilipp Reisner spin_unlock_irq(&mdev->req_lock); 216b411b363SPhilipp Reisner 217b411b363SPhilipp Reisner list_for_each_entry_safe(e, t, &reclaimed, w.list) 218435f0740SLars Ellenberg drbd_free_net_ee(mdev, e); 219b411b363SPhilipp Reisner } 220b411b363SPhilipp Reisner 221b411b363SPhilipp Reisner /** 22245bb912bSLars Ellenberg * drbd_pp_alloc() - Returns @number pages, retries forever (or until signalled) 223b411b363SPhilipp Reisner * @mdev: DRBD device. 22445bb912bSLars Ellenberg * @number: number of pages requested 22545bb912bSLars Ellenberg * @retry: whether to retry, if not enough pages are available right now 226b411b363SPhilipp Reisner * 22745bb912bSLars Ellenberg * Tries to allocate number pages, first from our own page pool, then from 22845bb912bSLars Ellenberg * the kernel, unless this allocation would exceed the max_buffers setting. 22945bb912bSLars Ellenberg * Possibly retry until DRBD frees sufficient pages somewhere else. 23045bb912bSLars Ellenberg * 23145bb912bSLars Ellenberg * Returns a page chain linked via page->private. 232b411b363SPhilipp Reisner */ 23345bb912bSLars Ellenberg static struct page *drbd_pp_alloc(struct drbd_conf *mdev, unsigned number, bool retry) 234b411b363SPhilipp Reisner { 235b411b363SPhilipp Reisner struct page *page = NULL; 236b411b363SPhilipp Reisner DEFINE_WAIT(wait); 237b411b363SPhilipp Reisner 23845bb912bSLars Ellenberg /* Yes, we may run up to @number over max_buffers. If we 23945bb912bSLars Ellenberg * follow it strictly, the admin will get it wrong anyways. */ 24089e58e75SPhilipp Reisner if (atomic_read(&mdev->pp_in_use) < mdev->tconn->net_conf->max_buffers) 24145bb912bSLars Ellenberg page = drbd_pp_first_pages_or_try_alloc(mdev, number); 242b411b363SPhilipp Reisner 24345bb912bSLars Ellenberg while (page == NULL) { 244b411b363SPhilipp Reisner prepare_to_wait(&drbd_pp_wait, &wait, TASK_INTERRUPTIBLE); 245b411b363SPhilipp Reisner 246b411b363SPhilipp Reisner drbd_kick_lo_and_reclaim_net(mdev); 247b411b363SPhilipp Reisner 24889e58e75SPhilipp Reisner if (atomic_read(&mdev->pp_in_use) < mdev->tconn->net_conf->max_buffers) { 24945bb912bSLars Ellenberg page = drbd_pp_first_pages_or_try_alloc(mdev, number); 250b411b363SPhilipp Reisner if (page) 251b411b363SPhilipp Reisner break; 252b411b363SPhilipp Reisner } 253b411b363SPhilipp Reisner 254b411b363SPhilipp Reisner if (!retry) 255b411b363SPhilipp Reisner break; 256b411b363SPhilipp Reisner 257b411b363SPhilipp Reisner if (signal_pending(current)) { 258b411b363SPhilipp Reisner dev_warn(DEV, "drbd_pp_alloc interrupted!\n"); 259b411b363SPhilipp Reisner break; 260b411b363SPhilipp Reisner } 261b411b363SPhilipp Reisner 262b411b363SPhilipp Reisner schedule(); 263b411b363SPhilipp Reisner } 264b411b363SPhilipp Reisner finish_wait(&drbd_pp_wait, &wait); 265b411b363SPhilipp Reisner 26645bb912bSLars Ellenberg if (page) 26745bb912bSLars Ellenberg atomic_add(number, &mdev->pp_in_use); 268b411b363SPhilipp Reisner return page; 269b411b363SPhilipp Reisner } 270b411b363SPhilipp Reisner 271b411b363SPhilipp Reisner /* Must not be used from irq, as that may deadlock: see drbd_pp_alloc. 27245bb912bSLars Ellenberg * Is also used from inside an other spin_lock_irq(&mdev->req_lock); 27345bb912bSLars Ellenberg * Either links the page chain back to the global pool, 27445bb912bSLars Ellenberg * or returns all pages to the system. */ 275435f0740SLars Ellenberg static void drbd_pp_free(struct drbd_conf *mdev, struct page *page, int is_net) 276b411b363SPhilipp Reisner { 277435f0740SLars Ellenberg atomic_t *a = is_net ? &mdev->pp_in_use_by_net : &mdev->pp_in_use; 278b411b363SPhilipp Reisner int i; 279435f0740SLars Ellenberg 2801816a2b4SLars Ellenberg if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE)*minor_count) 28145bb912bSLars Ellenberg i = page_chain_free(page); 28245bb912bSLars Ellenberg else { 28345bb912bSLars Ellenberg struct page *tmp; 28445bb912bSLars Ellenberg tmp = page_chain_tail(page, &i); 285b411b363SPhilipp Reisner spin_lock(&drbd_pp_lock); 28645bb912bSLars Ellenberg page_chain_add(&drbd_pp_pool, page, tmp); 28745bb912bSLars Ellenberg drbd_pp_vacant += i; 288b411b363SPhilipp Reisner spin_unlock(&drbd_pp_lock); 289b411b363SPhilipp Reisner } 290435f0740SLars Ellenberg i = atomic_sub_return(i, a); 29145bb912bSLars Ellenberg if (i < 0) 292435f0740SLars Ellenberg dev_warn(DEV, "ASSERTION FAILED: %s: %d < 0\n", 293435f0740SLars Ellenberg is_net ? "pp_in_use_by_net" : "pp_in_use", i); 294b411b363SPhilipp Reisner wake_up(&drbd_pp_wait); 295b411b363SPhilipp Reisner } 296b411b363SPhilipp Reisner 297b411b363SPhilipp Reisner /* 298b411b363SPhilipp Reisner You need to hold the req_lock: 299b411b363SPhilipp Reisner _drbd_wait_ee_list_empty() 300b411b363SPhilipp Reisner 301b411b363SPhilipp Reisner You must not have the req_lock: 302b411b363SPhilipp Reisner drbd_free_ee() 303b411b363SPhilipp Reisner drbd_alloc_ee() 304b411b363SPhilipp Reisner drbd_init_ee() 305b411b363SPhilipp Reisner drbd_release_ee() 306b411b363SPhilipp Reisner drbd_ee_fix_bhs() 307b411b363SPhilipp Reisner drbd_process_done_ee() 308b411b363SPhilipp Reisner drbd_clear_done_ee() 309b411b363SPhilipp Reisner drbd_wait_ee_list_empty() 310b411b363SPhilipp Reisner */ 311b411b363SPhilipp Reisner 312b411b363SPhilipp Reisner struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev, 313b411b363SPhilipp Reisner u64 id, 314b411b363SPhilipp Reisner sector_t sector, 315b411b363SPhilipp Reisner unsigned int data_size, 316b411b363SPhilipp Reisner gfp_t gfp_mask) __must_hold(local) 317b411b363SPhilipp Reisner { 318b411b363SPhilipp Reisner struct drbd_epoch_entry *e; 319b411b363SPhilipp Reisner struct page *page; 32045bb912bSLars Ellenberg unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT; 321b411b363SPhilipp Reisner 3220cf9d27eSAndreas Gruenbacher if (drbd_insert_fault(mdev, DRBD_FAULT_AL_EE)) 323b411b363SPhilipp Reisner return NULL; 324b411b363SPhilipp Reisner 325b411b363SPhilipp Reisner e = mempool_alloc(drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM); 326b411b363SPhilipp Reisner if (!e) { 327b411b363SPhilipp Reisner if (!(gfp_mask & __GFP_NOWARN)) 328b411b363SPhilipp Reisner dev_err(DEV, "alloc_ee: Allocation of an EE failed\n"); 329b411b363SPhilipp Reisner return NULL; 330b411b363SPhilipp Reisner } 331b411b363SPhilipp Reisner 33245bb912bSLars Ellenberg page = drbd_pp_alloc(mdev, nr_pages, (gfp_mask & __GFP_WAIT)); 33345bb912bSLars Ellenberg if (!page) 33445bb912bSLars Ellenberg goto fail; 335b411b363SPhilipp Reisner 3368b946255SAndreas Gruenbacher drbd_clear_interval(&e->i); 337b411b363SPhilipp Reisner e->epoch = NULL; 33845bb912bSLars Ellenberg e->mdev = mdev; 33945bb912bSLars Ellenberg e->pages = page; 34045bb912bSLars Ellenberg atomic_set(&e->pending_bios, 0); 341010f6e67SAndreas Gruenbacher e->i.size = data_size; 342b411b363SPhilipp Reisner e->flags = 0; 343010f6e67SAndreas Gruenbacher e->i.sector = sector; 3449a8e7753SAndreas Gruenbacher /* 3459a8e7753SAndreas Gruenbacher * The block_id is opaque to the receiver. It is not endianness 3469a8e7753SAndreas Gruenbacher * converted, and sent back to the sender unchanged. 3479a8e7753SAndreas Gruenbacher */ 34845bb912bSLars Ellenberg e->block_id = id; 349b411b363SPhilipp Reisner 350b411b363SPhilipp Reisner return e; 351b411b363SPhilipp Reisner 35245bb912bSLars Ellenberg fail: 353b411b363SPhilipp Reisner mempool_free(e, drbd_ee_mempool); 354b411b363SPhilipp Reisner return NULL; 355b411b363SPhilipp Reisner } 356b411b363SPhilipp Reisner 357435f0740SLars Ellenberg void drbd_free_some_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e, int is_net) 358b411b363SPhilipp Reisner { 359c36c3cedSLars Ellenberg if (e->flags & EE_HAS_DIGEST) 360c36c3cedSLars Ellenberg kfree(e->digest); 361435f0740SLars Ellenberg drbd_pp_free(mdev, e->pages, is_net); 36245bb912bSLars Ellenberg D_ASSERT(atomic_read(&e->pending_bios) == 0); 3638b946255SAndreas Gruenbacher D_ASSERT(drbd_interval_empty(&e->i)); 364b411b363SPhilipp Reisner mempool_free(e, drbd_ee_mempool); 365b411b363SPhilipp Reisner } 366b411b363SPhilipp Reisner 367b411b363SPhilipp Reisner int drbd_release_ee(struct drbd_conf *mdev, struct list_head *list) 368b411b363SPhilipp Reisner { 369b411b363SPhilipp Reisner LIST_HEAD(work_list); 370b411b363SPhilipp Reisner struct drbd_epoch_entry *e, *t; 371b411b363SPhilipp Reisner int count = 0; 372435f0740SLars Ellenberg int is_net = list == &mdev->net_ee; 373b411b363SPhilipp Reisner 374b411b363SPhilipp Reisner spin_lock_irq(&mdev->req_lock); 375b411b363SPhilipp Reisner list_splice_init(list, &work_list); 376b411b363SPhilipp Reisner spin_unlock_irq(&mdev->req_lock); 377b411b363SPhilipp Reisner 378b411b363SPhilipp Reisner list_for_each_entry_safe(e, t, &work_list, w.list) { 379435f0740SLars Ellenberg drbd_free_some_ee(mdev, e, is_net); 380b411b363SPhilipp Reisner count++; 381b411b363SPhilipp Reisner } 382b411b363SPhilipp Reisner return count; 383b411b363SPhilipp Reisner } 384b411b363SPhilipp Reisner 385b411b363SPhilipp Reisner 386b411b363SPhilipp Reisner /* 387b411b363SPhilipp Reisner * This function is called from _asender only_ 3888554df1cSAndreas Gruenbacher * but see also comments in _req_mod(,BARRIER_ACKED) 389b411b363SPhilipp Reisner * and receive_Barrier. 390b411b363SPhilipp Reisner * 391b411b363SPhilipp Reisner * Move entries from net_ee to done_ee, if ready. 392b411b363SPhilipp Reisner * Grab done_ee, call all callbacks, free the entries. 393b411b363SPhilipp Reisner * The callbacks typically send out ACKs. 394b411b363SPhilipp Reisner */ 395b411b363SPhilipp Reisner static int drbd_process_done_ee(struct drbd_conf *mdev) 396b411b363SPhilipp Reisner { 397b411b363SPhilipp Reisner LIST_HEAD(work_list); 398b411b363SPhilipp Reisner LIST_HEAD(reclaimed); 399b411b363SPhilipp Reisner struct drbd_epoch_entry *e, *t; 400b411b363SPhilipp Reisner int ok = (mdev->state.conn >= C_WF_REPORT_PARAMS); 401b411b363SPhilipp Reisner 402b411b363SPhilipp Reisner spin_lock_irq(&mdev->req_lock); 403b411b363SPhilipp Reisner reclaim_net_ee(mdev, &reclaimed); 404b411b363SPhilipp Reisner list_splice_init(&mdev->done_ee, &work_list); 405b411b363SPhilipp Reisner spin_unlock_irq(&mdev->req_lock); 406b411b363SPhilipp Reisner 407b411b363SPhilipp Reisner list_for_each_entry_safe(e, t, &reclaimed, w.list) 408435f0740SLars Ellenberg drbd_free_net_ee(mdev, e); 409b411b363SPhilipp Reisner 410b411b363SPhilipp Reisner /* possible callbacks here: 411b411b363SPhilipp Reisner * e_end_block, and e_end_resync_block, e_send_discard_ack. 412b411b363SPhilipp Reisner * all ignore the last argument. 413b411b363SPhilipp Reisner */ 414b411b363SPhilipp Reisner list_for_each_entry_safe(e, t, &work_list, w.list) { 415b411b363SPhilipp Reisner /* list_del not necessary, next/prev members not touched */ 416b411b363SPhilipp Reisner ok = e->w.cb(mdev, &e->w, !ok) && ok; 417b411b363SPhilipp Reisner drbd_free_ee(mdev, e); 418b411b363SPhilipp Reisner } 419b411b363SPhilipp Reisner wake_up(&mdev->ee_wait); 420b411b363SPhilipp Reisner 421b411b363SPhilipp Reisner return ok; 422b411b363SPhilipp Reisner } 423b411b363SPhilipp Reisner 424b411b363SPhilipp Reisner void _drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head) 425b411b363SPhilipp Reisner { 426b411b363SPhilipp Reisner DEFINE_WAIT(wait); 427b411b363SPhilipp Reisner 428b411b363SPhilipp Reisner /* avoids spin_lock/unlock 429b411b363SPhilipp Reisner * and calling prepare_to_wait in the fast path */ 430b411b363SPhilipp Reisner while (!list_empty(head)) { 431b411b363SPhilipp Reisner prepare_to_wait(&mdev->ee_wait, &wait, TASK_UNINTERRUPTIBLE); 432b411b363SPhilipp Reisner spin_unlock_irq(&mdev->req_lock); 4337eaceaccSJens Axboe io_schedule(); 434b411b363SPhilipp Reisner finish_wait(&mdev->ee_wait, &wait); 435b411b363SPhilipp Reisner spin_lock_irq(&mdev->req_lock); 436b411b363SPhilipp Reisner } 437b411b363SPhilipp Reisner } 438b411b363SPhilipp Reisner 439b411b363SPhilipp Reisner void drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head) 440b411b363SPhilipp Reisner { 441b411b363SPhilipp Reisner spin_lock_irq(&mdev->req_lock); 442b411b363SPhilipp Reisner _drbd_wait_ee_list_empty(mdev, head); 443b411b363SPhilipp Reisner spin_unlock_irq(&mdev->req_lock); 444b411b363SPhilipp Reisner } 445b411b363SPhilipp Reisner 446b411b363SPhilipp Reisner /* see also kernel_accept; which is only present since 2.6.18. 447b411b363SPhilipp Reisner * also we want to log which part of it failed, exactly */ 448b411b363SPhilipp Reisner static int drbd_accept(struct drbd_conf *mdev, const char **what, 449b411b363SPhilipp Reisner struct socket *sock, struct socket **newsock) 450b411b363SPhilipp Reisner { 451b411b363SPhilipp Reisner struct sock *sk = sock->sk; 452b411b363SPhilipp Reisner int err = 0; 453b411b363SPhilipp Reisner 454b411b363SPhilipp Reisner *what = "listen"; 455b411b363SPhilipp Reisner err = sock->ops->listen(sock, 5); 456b411b363SPhilipp Reisner if (err < 0) 457b411b363SPhilipp Reisner goto out; 458b411b363SPhilipp Reisner 459b411b363SPhilipp Reisner *what = "sock_create_lite"; 460b411b363SPhilipp Reisner err = sock_create_lite(sk->sk_family, sk->sk_type, sk->sk_protocol, 461b411b363SPhilipp Reisner newsock); 462b411b363SPhilipp Reisner if (err < 0) 463b411b363SPhilipp Reisner goto out; 464b411b363SPhilipp Reisner 465b411b363SPhilipp Reisner *what = "accept"; 466b411b363SPhilipp Reisner err = sock->ops->accept(sock, *newsock, 0); 467b411b363SPhilipp Reisner if (err < 0) { 468b411b363SPhilipp Reisner sock_release(*newsock); 469b411b363SPhilipp Reisner *newsock = NULL; 470b411b363SPhilipp Reisner goto out; 471b411b363SPhilipp Reisner } 472b411b363SPhilipp Reisner (*newsock)->ops = sock->ops; 473b411b363SPhilipp Reisner 474b411b363SPhilipp Reisner out: 475b411b363SPhilipp Reisner return err; 476b411b363SPhilipp Reisner } 477b411b363SPhilipp Reisner 478b411b363SPhilipp Reisner static int drbd_recv_short(struct drbd_conf *mdev, struct socket *sock, 479b411b363SPhilipp Reisner void *buf, size_t size, int flags) 480b411b363SPhilipp Reisner { 481b411b363SPhilipp Reisner mm_segment_t oldfs; 482b411b363SPhilipp Reisner struct kvec iov = { 483b411b363SPhilipp Reisner .iov_base = buf, 484b411b363SPhilipp Reisner .iov_len = size, 485b411b363SPhilipp Reisner }; 486b411b363SPhilipp Reisner struct msghdr msg = { 487b411b363SPhilipp Reisner .msg_iovlen = 1, 488b411b363SPhilipp Reisner .msg_iov = (struct iovec *)&iov, 489b411b363SPhilipp Reisner .msg_flags = (flags ? flags : MSG_WAITALL | MSG_NOSIGNAL) 490b411b363SPhilipp Reisner }; 491b411b363SPhilipp Reisner int rv; 492b411b363SPhilipp Reisner 493b411b363SPhilipp Reisner oldfs = get_fs(); 494b411b363SPhilipp Reisner set_fs(KERNEL_DS); 495b411b363SPhilipp Reisner rv = sock_recvmsg(sock, &msg, size, msg.msg_flags); 496b411b363SPhilipp Reisner set_fs(oldfs); 497b411b363SPhilipp Reisner 498b411b363SPhilipp Reisner return rv; 499b411b363SPhilipp Reisner } 500b411b363SPhilipp Reisner 501b411b363SPhilipp Reisner static int drbd_recv(struct drbd_conf *mdev, void *buf, size_t size) 502b411b363SPhilipp Reisner { 503b411b363SPhilipp Reisner mm_segment_t oldfs; 504b411b363SPhilipp Reisner struct kvec iov = { 505b411b363SPhilipp Reisner .iov_base = buf, 506b411b363SPhilipp Reisner .iov_len = size, 507b411b363SPhilipp Reisner }; 508b411b363SPhilipp Reisner struct msghdr msg = { 509b411b363SPhilipp Reisner .msg_iovlen = 1, 510b411b363SPhilipp Reisner .msg_iov = (struct iovec *)&iov, 511b411b363SPhilipp Reisner .msg_flags = MSG_WAITALL | MSG_NOSIGNAL 512b411b363SPhilipp Reisner }; 513b411b363SPhilipp Reisner int rv; 514b411b363SPhilipp Reisner 515b411b363SPhilipp Reisner oldfs = get_fs(); 516b411b363SPhilipp Reisner set_fs(KERNEL_DS); 517b411b363SPhilipp Reisner 518b411b363SPhilipp Reisner for (;;) { 519b411b363SPhilipp Reisner rv = sock_recvmsg(mdev->data.socket, &msg, size, msg.msg_flags); 520b411b363SPhilipp Reisner if (rv == size) 521b411b363SPhilipp Reisner break; 522b411b363SPhilipp Reisner 523b411b363SPhilipp Reisner /* Note: 524b411b363SPhilipp Reisner * ECONNRESET other side closed the connection 525b411b363SPhilipp Reisner * ERESTARTSYS (on sock) we got a signal 526b411b363SPhilipp Reisner */ 527b411b363SPhilipp Reisner 528b411b363SPhilipp Reisner if (rv < 0) { 529b411b363SPhilipp Reisner if (rv == -ECONNRESET) 530b411b363SPhilipp Reisner dev_info(DEV, "sock was reset by peer\n"); 531b411b363SPhilipp Reisner else if (rv != -ERESTARTSYS) 532b411b363SPhilipp Reisner dev_err(DEV, "sock_recvmsg returned %d\n", rv); 533b411b363SPhilipp Reisner break; 534b411b363SPhilipp Reisner } else if (rv == 0) { 535b411b363SPhilipp Reisner dev_info(DEV, "sock was shut down by peer\n"); 536b411b363SPhilipp Reisner break; 537b411b363SPhilipp Reisner } else { 538b411b363SPhilipp Reisner /* signal came in, or peer/link went down, 539b411b363SPhilipp Reisner * after we read a partial message 540b411b363SPhilipp Reisner */ 541b411b363SPhilipp Reisner /* D_ASSERT(signal_pending(current)); */ 542b411b363SPhilipp Reisner break; 543b411b363SPhilipp Reisner } 544b411b363SPhilipp Reisner }; 545b411b363SPhilipp Reisner 546b411b363SPhilipp Reisner set_fs(oldfs); 547b411b363SPhilipp Reisner 548b411b363SPhilipp Reisner if (rv != size) 549b411b363SPhilipp Reisner drbd_force_state(mdev, NS(conn, C_BROKEN_PIPE)); 550b411b363SPhilipp Reisner 551b411b363SPhilipp Reisner return rv; 552b411b363SPhilipp Reisner } 553b411b363SPhilipp Reisner 5545dbf1673SLars Ellenberg /* quoting tcp(7): 5555dbf1673SLars Ellenberg * On individual connections, the socket buffer size must be set prior to the 5565dbf1673SLars Ellenberg * listen(2) or connect(2) calls in order to have it take effect. 5575dbf1673SLars Ellenberg * This is our wrapper to do so. 5585dbf1673SLars Ellenberg */ 5595dbf1673SLars Ellenberg static void drbd_setbufsize(struct socket *sock, unsigned int snd, 5605dbf1673SLars Ellenberg unsigned int rcv) 5615dbf1673SLars Ellenberg { 5625dbf1673SLars Ellenberg /* open coded SO_SNDBUF, SO_RCVBUF */ 5635dbf1673SLars Ellenberg if (snd) { 5645dbf1673SLars Ellenberg sock->sk->sk_sndbuf = snd; 5655dbf1673SLars Ellenberg sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK; 5665dbf1673SLars Ellenberg } 5675dbf1673SLars Ellenberg if (rcv) { 5685dbf1673SLars Ellenberg sock->sk->sk_rcvbuf = rcv; 5695dbf1673SLars Ellenberg sock->sk->sk_userlocks |= SOCK_RCVBUF_LOCK; 5705dbf1673SLars Ellenberg } 5715dbf1673SLars Ellenberg } 5725dbf1673SLars Ellenberg 573b411b363SPhilipp Reisner static struct socket *drbd_try_connect(struct drbd_conf *mdev) 574b411b363SPhilipp Reisner { 575b411b363SPhilipp Reisner const char *what; 576b411b363SPhilipp Reisner struct socket *sock; 577b411b363SPhilipp Reisner struct sockaddr_in6 src_in6; 578b411b363SPhilipp Reisner int err; 579b411b363SPhilipp Reisner int disconnect_on_error = 1; 580b411b363SPhilipp Reisner 581b2fb6dbeSPhilipp Reisner if (!get_net_conf(mdev->tconn)) 582b411b363SPhilipp Reisner return NULL; 583b411b363SPhilipp Reisner 584b411b363SPhilipp Reisner what = "sock_create_kern"; 58589e58e75SPhilipp Reisner err = sock_create_kern(((struct sockaddr *)mdev->tconn->net_conf->my_addr)->sa_family, 586b411b363SPhilipp Reisner SOCK_STREAM, IPPROTO_TCP, &sock); 587b411b363SPhilipp Reisner if (err < 0) { 588b411b363SPhilipp Reisner sock = NULL; 589b411b363SPhilipp Reisner goto out; 590b411b363SPhilipp Reisner } 591b411b363SPhilipp Reisner 592b411b363SPhilipp Reisner sock->sk->sk_rcvtimeo = 59389e58e75SPhilipp Reisner sock->sk->sk_sndtimeo = mdev->tconn->net_conf->try_connect_int*HZ; 59489e58e75SPhilipp Reisner drbd_setbufsize(sock, mdev->tconn->net_conf->sndbuf_size, 59589e58e75SPhilipp Reisner mdev->tconn->net_conf->rcvbuf_size); 596b411b363SPhilipp Reisner 597b411b363SPhilipp Reisner /* explicitly bind to the configured IP as source IP 598b411b363SPhilipp Reisner * for the outgoing connections. 599b411b363SPhilipp Reisner * This is needed for multihomed hosts and to be 600b411b363SPhilipp Reisner * able to use lo: interfaces for drbd. 601b411b363SPhilipp Reisner * Make sure to use 0 as port number, so linux selects 602b411b363SPhilipp Reisner * a free one dynamically. 603b411b363SPhilipp Reisner */ 60489e58e75SPhilipp Reisner memcpy(&src_in6, mdev->tconn->net_conf->my_addr, 60589e58e75SPhilipp Reisner min_t(int, mdev->tconn->net_conf->my_addr_len, sizeof(src_in6))); 60689e58e75SPhilipp Reisner if (((struct sockaddr *)mdev->tconn->net_conf->my_addr)->sa_family == AF_INET6) 607b411b363SPhilipp Reisner src_in6.sin6_port = 0; 608b411b363SPhilipp Reisner else 609b411b363SPhilipp Reisner ((struct sockaddr_in *)&src_in6)->sin_port = 0; /* AF_INET & AF_SCI */ 610b411b363SPhilipp Reisner 611b411b363SPhilipp Reisner what = "bind before connect"; 612b411b363SPhilipp Reisner err = sock->ops->bind(sock, 613b411b363SPhilipp Reisner (struct sockaddr *) &src_in6, 61489e58e75SPhilipp Reisner mdev->tconn->net_conf->my_addr_len); 615b411b363SPhilipp Reisner if (err < 0) 616b411b363SPhilipp Reisner goto out; 617b411b363SPhilipp Reisner 618b411b363SPhilipp Reisner /* connect may fail, peer not yet available. 619b411b363SPhilipp Reisner * stay C_WF_CONNECTION, don't go Disconnecting! */ 620b411b363SPhilipp Reisner disconnect_on_error = 0; 621b411b363SPhilipp Reisner what = "connect"; 622b411b363SPhilipp Reisner err = sock->ops->connect(sock, 62389e58e75SPhilipp Reisner (struct sockaddr *)mdev->tconn->net_conf->peer_addr, 62489e58e75SPhilipp Reisner mdev->tconn->net_conf->peer_addr_len, 0); 625b411b363SPhilipp Reisner 626b411b363SPhilipp Reisner out: 627b411b363SPhilipp Reisner if (err < 0) { 628b411b363SPhilipp Reisner if (sock) { 629b411b363SPhilipp Reisner sock_release(sock); 630b411b363SPhilipp Reisner sock = NULL; 631b411b363SPhilipp Reisner } 632b411b363SPhilipp Reisner switch (-err) { 633b411b363SPhilipp Reisner /* timeout, busy, signal pending */ 634b411b363SPhilipp Reisner case ETIMEDOUT: case EAGAIN: case EINPROGRESS: 635b411b363SPhilipp Reisner case EINTR: case ERESTARTSYS: 636b411b363SPhilipp Reisner /* peer not (yet) available, network problem */ 637b411b363SPhilipp Reisner case ECONNREFUSED: case ENETUNREACH: 638b411b363SPhilipp Reisner case EHOSTDOWN: case EHOSTUNREACH: 639b411b363SPhilipp Reisner disconnect_on_error = 0; 640b411b363SPhilipp Reisner break; 641b411b363SPhilipp Reisner default: 642b411b363SPhilipp Reisner dev_err(DEV, "%s failed, err = %d\n", what, err); 643b411b363SPhilipp Reisner } 644b411b363SPhilipp Reisner if (disconnect_on_error) 645b411b363SPhilipp Reisner drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); 646b411b363SPhilipp Reisner } 647b2fb6dbeSPhilipp Reisner put_net_conf(mdev->tconn); 648b411b363SPhilipp Reisner return sock; 649b411b363SPhilipp Reisner } 650b411b363SPhilipp Reisner 651b411b363SPhilipp Reisner static struct socket *drbd_wait_for_connect(struct drbd_conf *mdev) 652b411b363SPhilipp Reisner { 653b411b363SPhilipp Reisner int timeo, err; 654b411b363SPhilipp Reisner struct socket *s_estab = NULL, *s_listen; 655b411b363SPhilipp Reisner const char *what; 656b411b363SPhilipp Reisner 657b2fb6dbeSPhilipp Reisner if (!get_net_conf(mdev->tconn)) 658b411b363SPhilipp Reisner return NULL; 659b411b363SPhilipp Reisner 660b411b363SPhilipp Reisner what = "sock_create_kern"; 66189e58e75SPhilipp Reisner err = sock_create_kern(((struct sockaddr *)mdev->tconn->net_conf->my_addr)->sa_family, 662b411b363SPhilipp Reisner SOCK_STREAM, IPPROTO_TCP, &s_listen); 663b411b363SPhilipp Reisner if (err) { 664b411b363SPhilipp Reisner s_listen = NULL; 665b411b363SPhilipp Reisner goto out; 666b411b363SPhilipp Reisner } 667b411b363SPhilipp Reisner 66889e58e75SPhilipp Reisner timeo = mdev->tconn->net_conf->try_connect_int * HZ; 669b411b363SPhilipp Reisner timeo += (random32() & 1) ? timeo / 7 : -timeo / 7; /* 28.5% random jitter */ 670b411b363SPhilipp Reisner 671b411b363SPhilipp Reisner s_listen->sk->sk_reuse = 1; /* SO_REUSEADDR */ 672b411b363SPhilipp Reisner s_listen->sk->sk_rcvtimeo = timeo; 673b411b363SPhilipp Reisner s_listen->sk->sk_sndtimeo = timeo; 67489e58e75SPhilipp Reisner drbd_setbufsize(s_listen, mdev->tconn->net_conf->sndbuf_size, 67589e58e75SPhilipp Reisner mdev->tconn->net_conf->rcvbuf_size); 676b411b363SPhilipp Reisner 677b411b363SPhilipp Reisner what = "bind before listen"; 678b411b363SPhilipp Reisner err = s_listen->ops->bind(s_listen, 67989e58e75SPhilipp Reisner (struct sockaddr *) mdev->tconn->net_conf->my_addr, 68089e58e75SPhilipp Reisner mdev->tconn->net_conf->my_addr_len); 681b411b363SPhilipp Reisner if (err < 0) 682b411b363SPhilipp Reisner goto out; 683b411b363SPhilipp Reisner 684b411b363SPhilipp Reisner err = drbd_accept(mdev, &what, s_listen, &s_estab); 685b411b363SPhilipp Reisner 686b411b363SPhilipp Reisner out: 687b411b363SPhilipp Reisner if (s_listen) 688b411b363SPhilipp Reisner sock_release(s_listen); 689b411b363SPhilipp Reisner if (err < 0) { 690b411b363SPhilipp Reisner if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) { 691b411b363SPhilipp Reisner dev_err(DEV, "%s failed, err = %d\n", what, err); 692b411b363SPhilipp Reisner drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); 693b411b363SPhilipp Reisner } 694b411b363SPhilipp Reisner } 695b2fb6dbeSPhilipp Reisner put_net_conf(mdev->tconn); 696b411b363SPhilipp Reisner 697b411b363SPhilipp Reisner return s_estab; 698b411b363SPhilipp Reisner } 699b411b363SPhilipp Reisner 700b411b363SPhilipp Reisner static int drbd_send_fp(struct drbd_conf *mdev, 701b411b363SPhilipp Reisner struct socket *sock, enum drbd_packets cmd) 702b411b363SPhilipp Reisner { 70302918be2SPhilipp Reisner struct p_header80 *h = &mdev->data.sbuf.header.h80; 704b411b363SPhilipp Reisner 705b411b363SPhilipp Reisner return _drbd_send_cmd(mdev, sock, cmd, h, sizeof(*h), 0); 706b411b363SPhilipp Reisner } 707b411b363SPhilipp Reisner 708b411b363SPhilipp Reisner static enum drbd_packets drbd_recv_fp(struct drbd_conf *mdev, struct socket *sock) 709b411b363SPhilipp Reisner { 71002918be2SPhilipp Reisner struct p_header80 *h = &mdev->data.rbuf.header.h80; 711b411b363SPhilipp Reisner int rr; 712b411b363SPhilipp Reisner 713b411b363SPhilipp Reisner rr = drbd_recv_short(mdev, sock, h, sizeof(*h), 0); 714b411b363SPhilipp Reisner 715ca9bc12bSAndreas Gruenbacher if (rr == sizeof(*h) && h->magic == cpu_to_be32(DRBD_MAGIC)) 716b411b363SPhilipp Reisner return be16_to_cpu(h->command); 717b411b363SPhilipp Reisner 718b411b363SPhilipp Reisner return 0xffff; 719b411b363SPhilipp Reisner } 720b411b363SPhilipp Reisner 721b411b363SPhilipp Reisner /** 722b411b363SPhilipp Reisner * drbd_socket_okay() - Free the socket if its connection is not okay 723b411b363SPhilipp Reisner * @mdev: DRBD device. 724b411b363SPhilipp Reisner * @sock: pointer to the pointer to the socket. 725b411b363SPhilipp Reisner */ 726b411b363SPhilipp Reisner static int drbd_socket_okay(struct drbd_conf *mdev, struct socket **sock) 727b411b363SPhilipp Reisner { 728b411b363SPhilipp Reisner int rr; 729b411b363SPhilipp Reisner char tb[4]; 730b411b363SPhilipp Reisner 731b411b363SPhilipp Reisner if (!*sock) 73281e84650SAndreas Gruenbacher return false; 733b411b363SPhilipp Reisner 734b411b363SPhilipp Reisner rr = drbd_recv_short(mdev, *sock, tb, 4, MSG_DONTWAIT | MSG_PEEK); 735b411b363SPhilipp Reisner 736b411b363SPhilipp Reisner if (rr > 0 || rr == -EAGAIN) { 73781e84650SAndreas Gruenbacher return true; 738b411b363SPhilipp Reisner } else { 739b411b363SPhilipp Reisner sock_release(*sock); 740b411b363SPhilipp Reisner *sock = NULL; 74181e84650SAndreas Gruenbacher return false; 742b411b363SPhilipp Reisner } 743b411b363SPhilipp Reisner } 744b411b363SPhilipp Reisner 745b411b363SPhilipp Reisner /* 746b411b363SPhilipp Reisner * return values: 747b411b363SPhilipp Reisner * 1 yes, we have a valid connection 748b411b363SPhilipp Reisner * 0 oops, did not work out, please try again 749b411b363SPhilipp Reisner * -1 peer talks different language, 750b411b363SPhilipp Reisner * no point in trying again, please go standalone. 751b411b363SPhilipp Reisner * -2 We do not have a network config... 752b411b363SPhilipp Reisner */ 753b411b363SPhilipp Reisner static int drbd_connect(struct drbd_conf *mdev) 754b411b363SPhilipp Reisner { 755b411b363SPhilipp Reisner struct socket *s, *sock, *msock; 756b411b363SPhilipp Reisner int try, h, ok; 757b411b363SPhilipp Reisner 758b411b363SPhilipp Reisner D_ASSERT(!mdev->data.socket); 759b411b363SPhilipp Reisner 760b411b363SPhilipp Reisner if (drbd_request_state(mdev, NS(conn, C_WF_CONNECTION)) < SS_SUCCESS) 761b411b363SPhilipp Reisner return -2; 762b411b363SPhilipp Reisner 763b411b363SPhilipp Reisner clear_bit(DISCARD_CONCURRENT, &mdev->flags); 764b411b363SPhilipp Reisner 765b411b363SPhilipp Reisner sock = NULL; 766b411b363SPhilipp Reisner msock = NULL; 767b411b363SPhilipp Reisner 768b411b363SPhilipp Reisner do { 769b411b363SPhilipp Reisner for (try = 0;;) { 770b411b363SPhilipp Reisner /* 3 tries, this should take less than a second! */ 771b411b363SPhilipp Reisner s = drbd_try_connect(mdev); 772b411b363SPhilipp Reisner if (s || ++try >= 3) 773b411b363SPhilipp Reisner break; 774b411b363SPhilipp Reisner /* give the other side time to call bind() & listen() */ 77520ee6390SPhilipp Reisner schedule_timeout_interruptible(HZ / 10); 776b411b363SPhilipp Reisner } 777b411b363SPhilipp Reisner 778b411b363SPhilipp Reisner if (s) { 779b411b363SPhilipp Reisner if (!sock) { 780b411b363SPhilipp Reisner drbd_send_fp(mdev, s, P_HAND_SHAKE_S); 781b411b363SPhilipp Reisner sock = s; 782b411b363SPhilipp Reisner s = NULL; 783b411b363SPhilipp Reisner } else if (!msock) { 784b411b363SPhilipp Reisner drbd_send_fp(mdev, s, P_HAND_SHAKE_M); 785b411b363SPhilipp Reisner msock = s; 786b411b363SPhilipp Reisner s = NULL; 787b411b363SPhilipp Reisner } else { 788b411b363SPhilipp Reisner dev_err(DEV, "Logic error in drbd_connect()\n"); 789b411b363SPhilipp Reisner goto out_release_sockets; 790b411b363SPhilipp Reisner } 791b411b363SPhilipp Reisner } 792b411b363SPhilipp Reisner 793b411b363SPhilipp Reisner if (sock && msock) { 79489e58e75SPhilipp Reisner schedule_timeout_interruptible(mdev->tconn->net_conf->ping_timeo*HZ/10); 795b411b363SPhilipp Reisner ok = drbd_socket_okay(mdev, &sock); 796b411b363SPhilipp Reisner ok = drbd_socket_okay(mdev, &msock) && ok; 797b411b363SPhilipp Reisner if (ok) 798b411b363SPhilipp Reisner break; 799b411b363SPhilipp Reisner } 800b411b363SPhilipp Reisner 801b411b363SPhilipp Reisner retry: 802b411b363SPhilipp Reisner s = drbd_wait_for_connect(mdev); 803b411b363SPhilipp Reisner if (s) { 804b411b363SPhilipp Reisner try = drbd_recv_fp(mdev, s); 805b411b363SPhilipp Reisner drbd_socket_okay(mdev, &sock); 806b411b363SPhilipp Reisner drbd_socket_okay(mdev, &msock); 807b411b363SPhilipp Reisner switch (try) { 808b411b363SPhilipp Reisner case P_HAND_SHAKE_S: 809b411b363SPhilipp Reisner if (sock) { 810b411b363SPhilipp Reisner dev_warn(DEV, "initial packet S crossed\n"); 811b411b363SPhilipp Reisner sock_release(sock); 812b411b363SPhilipp Reisner } 813b411b363SPhilipp Reisner sock = s; 814b411b363SPhilipp Reisner break; 815b411b363SPhilipp Reisner case P_HAND_SHAKE_M: 816b411b363SPhilipp Reisner if (msock) { 817b411b363SPhilipp Reisner dev_warn(DEV, "initial packet M crossed\n"); 818b411b363SPhilipp Reisner sock_release(msock); 819b411b363SPhilipp Reisner } 820b411b363SPhilipp Reisner msock = s; 821b411b363SPhilipp Reisner set_bit(DISCARD_CONCURRENT, &mdev->flags); 822b411b363SPhilipp Reisner break; 823b411b363SPhilipp Reisner default: 824b411b363SPhilipp Reisner dev_warn(DEV, "Error receiving initial packet\n"); 825b411b363SPhilipp Reisner sock_release(s); 826b411b363SPhilipp Reisner if (random32() & 1) 827b411b363SPhilipp Reisner goto retry; 828b411b363SPhilipp Reisner } 829b411b363SPhilipp Reisner } 830b411b363SPhilipp Reisner 831b411b363SPhilipp Reisner if (mdev->state.conn <= C_DISCONNECTING) 832b411b363SPhilipp Reisner goto out_release_sockets; 833b411b363SPhilipp Reisner if (signal_pending(current)) { 834b411b363SPhilipp Reisner flush_signals(current); 835b411b363SPhilipp Reisner smp_rmb(); 836e77a0a5cSAndreas Gruenbacher if (get_t_state(&mdev->receiver) == EXITING) 837b411b363SPhilipp Reisner goto out_release_sockets; 838b411b363SPhilipp Reisner } 839b411b363SPhilipp Reisner 840b411b363SPhilipp Reisner if (sock && msock) { 841b411b363SPhilipp Reisner ok = drbd_socket_okay(mdev, &sock); 842b411b363SPhilipp Reisner ok = drbd_socket_okay(mdev, &msock) && ok; 843b411b363SPhilipp Reisner if (ok) 844b411b363SPhilipp Reisner break; 845b411b363SPhilipp Reisner } 846b411b363SPhilipp Reisner } while (1); 847b411b363SPhilipp Reisner 848b411b363SPhilipp Reisner msock->sk->sk_reuse = 1; /* SO_REUSEADDR */ 849b411b363SPhilipp Reisner sock->sk->sk_reuse = 1; /* SO_REUSEADDR */ 850b411b363SPhilipp Reisner 851b411b363SPhilipp Reisner sock->sk->sk_allocation = GFP_NOIO; 852b411b363SPhilipp Reisner msock->sk->sk_allocation = GFP_NOIO; 853b411b363SPhilipp Reisner 854b411b363SPhilipp Reisner sock->sk->sk_priority = TC_PRIO_INTERACTIVE_BULK; 855b411b363SPhilipp Reisner msock->sk->sk_priority = TC_PRIO_INTERACTIVE; 856b411b363SPhilipp Reisner 857b411b363SPhilipp Reisner /* NOT YET ... 85889e58e75SPhilipp Reisner * sock->sk->sk_sndtimeo = mdev->tconn->net_conf->timeout*HZ/10; 859b411b363SPhilipp Reisner * sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT; 860b411b363SPhilipp Reisner * first set it to the P_HAND_SHAKE timeout, 861b411b363SPhilipp Reisner * which we set to 4x the configured ping_timeout. */ 862b411b363SPhilipp Reisner sock->sk->sk_sndtimeo = 86389e58e75SPhilipp Reisner sock->sk->sk_rcvtimeo = mdev->tconn->net_conf->ping_timeo*4*HZ/10; 864b411b363SPhilipp Reisner 86589e58e75SPhilipp Reisner msock->sk->sk_sndtimeo = mdev->tconn->net_conf->timeout*HZ/10; 86689e58e75SPhilipp Reisner msock->sk->sk_rcvtimeo = mdev->tconn->net_conf->ping_int*HZ; 867b411b363SPhilipp Reisner 868b411b363SPhilipp Reisner /* we don't want delays. 86925985edcSLucas De Marchi * we use TCP_CORK where appropriate, though */ 870b411b363SPhilipp Reisner drbd_tcp_nodelay(sock); 871b411b363SPhilipp Reisner drbd_tcp_nodelay(msock); 872b411b363SPhilipp Reisner 873b411b363SPhilipp Reisner mdev->data.socket = sock; 874b411b363SPhilipp Reisner mdev->meta.socket = msock; 875b411b363SPhilipp Reisner mdev->last_received = jiffies; 876b411b363SPhilipp Reisner 877b411b363SPhilipp Reisner D_ASSERT(mdev->asender.task == NULL); 878b411b363SPhilipp Reisner 879b411b363SPhilipp Reisner h = drbd_do_handshake(mdev); 880b411b363SPhilipp Reisner if (h <= 0) 881b411b363SPhilipp Reisner return h; 882b411b363SPhilipp Reisner 883b411b363SPhilipp Reisner if (mdev->cram_hmac_tfm) { 884b411b363SPhilipp Reisner /* drbd_request_state(mdev, NS(conn, WFAuth)); */ 885b10d96cbSJohannes Thoma switch (drbd_do_auth(mdev)) { 886b10d96cbSJohannes Thoma case -1: 887b411b363SPhilipp Reisner dev_err(DEV, "Authentication of peer failed\n"); 888b411b363SPhilipp Reisner return -1; 889b10d96cbSJohannes Thoma case 0: 890b10d96cbSJohannes Thoma dev_err(DEV, "Authentication of peer failed, trying again.\n"); 891b10d96cbSJohannes Thoma return 0; 892b411b363SPhilipp Reisner } 893b411b363SPhilipp Reisner } 894b411b363SPhilipp Reisner 895b411b363SPhilipp Reisner if (drbd_request_state(mdev, NS(conn, C_WF_REPORT_PARAMS)) < SS_SUCCESS) 896b411b363SPhilipp Reisner return 0; 897b411b363SPhilipp Reisner 89889e58e75SPhilipp Reisner sock->sk->sk_sndtimeo = mdev->tconn->net_conf->timeout*HZ/10; 899b411b363SPhilipp Reisner sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT; 900b411b363SPhilipp Reisner 901b411b363SPhilipp Reisner atomic_set(&mdev->packet_seq, 0); 902b411b363SPhilipp Reisner mdev->peer_seq = 0; 903b411b363SPhilipp Reisner 904b411b363SPhilipp Reisner drbd_thread_start(&mdev->asender); 905b411b363SPhilipp Reisner 906148efa16SPhilipp Reisner if (drbd_send_protocol(mdev) == -1) 9077e2455c1SPhilipp Reisner return -1; 908b411b363SPhilipp Reisner drbd_send_sync_param(mdev, &mdev->sync_conf); 909e89b591cSPhilipp Reisner drbd_send_sizes(mdev, 0, 0); 910b411b363SPhilipp Reisner drbd_send_uuids(mdev); 911b411b363SPhilipp Reisner drbd_send_state(mdev); 912b411b363SPhilipp Reisner clear_bit(USE_DEGR_WFC_T, &mdev->flags); 913b411b363SPhilipp Reisner clear_bit(RESIZE_PENDING, &mdev->flags); 9147fde2be9SPhilipp Reisner mod_timer(&mdev->request_timer, jiffies + HZ); /* just start it here. */ 915b411b363SPhilipp Reisner 916b411b363SPhilipp Reisner return 1; 917b411b363SPhilipp Reisner 918b411b363SPhilipp Reisner out_release_sockets: 919b411b363SPhilipp Reisner if (sock) 920b411b363SPhilipp Reisner sock_release(sock); 921b411b363SPhilipp Reisner if (msock) 922b411b363SPhilipp Reisner sock_release(msock); 923b411b363SPhilipp Reisner return -1; 924b411b363SPhilipp Reisner } 925b411b363SPhilipp Reisner 92602918be2SPhilipp Reisner static int drbd_recv_header(struct drbd_conf *mdev, enum drbd_packets *cmd, unsigned int *packet_size) 927b411b363SPhilipp Reisner { 92802918be2SPhilipp Reisner union p_header *h = &mdev->data.rbuf.header; 929b411b363SPhilipp Reisner int r; 930b411b363SPhilipp Reisner 931b411b363SPhilipp Reisner r = drbd_recv(mdev, h, sizeof(*h)); 932b411b363SPhilipp Reisner if (unlikely(r != sizeof(*h))) { 9330ddc5549SLars Ellenberg if (!signal_pending(current)) 9340ddc5549SLars Ellenberg dev_warn(DEV, "short read expecting header on sock: r=%d\n", r); 93581e84650SAndreas Gruenbacher return false; 93602918be2SPhilipp Reisner } 93702918be2SPhilipp Reisner 938ca9bc12bSAndreas Gruenbacher if (likely(h->h80.magic == cpu_to_be32(DRBD_MAGIC))) { 93902918be2SPhilipp Reisner *cmd = be16_to_cpu(h->h80.command); 94002918be2SPhilipp Reisner *packet_size = be16_to_cpu(h->h80.length); 941ca9bc12bSAndreas Gruenbacher } else if (h->h95.magic == cpu_to_be16(DRBD_MAGIC_BIG)) { 94202918be2SPhilipp Reisner *cmd = be16_to_cpu(h->h95.command); 94302918be2SPhilipp Reisner *packet_size = be32_to_cpu(h->h95.length); 94402918be2SPhilipp Reisner } else { 945004352faSLars Ellenberg dev_err(DEV, "magic?? on data m: 0x%08x c: %d l: %d\n", 946004352faSLars Ellenberg be32_to_cpu(h->h80.magic), 947004352faSLars Ellenberg be16_to_cpu(h->h80.command), 948004352faSLars Ellenberg be16_to_cpu(h->h80.length)); 94981e84650SAndreas Gruenbacher return false; 950b411b363SPhilipp Reisner } 951b411b363SPhilipp Reisner mdev->last_received = jiffies; 952b411b363SPhilipp Reisner 95381e84650SAndreas Gruenbacher return true; 954b411b363SPhilipp Reisner } 955b411b363SPhilipp Reisner 9562451fc3bSPhilipp Reisner static void drbd_flush(struct drbd_conf *mdev) 957b411b363SPhilipp Reisner { 958b411b363SPhilipp Reisner int rv; 959b411b363SPhilipp Reisner 960b411b363SPhilipp Reisner if (mdev->write_ordering >= WO_bdev_flush && get_ldev(mdev)) { 961fbd9b09aSDmitry Monakhov rv = blkdev_issue_flush(mdev->ldev->backing_bdev, GFP_KERNEL, 962dd3932edSChristoph Hellwig NULL); 963b411b363SPhilipp Reisner if (rv) { 964b411b363SPhilipp Reisner dev_err(DEV, "local disk flush failed with status %d\n", rv); 965b411b363SPhilipp Reisner /* would rather check on EOPNOTSUPP, but that is not reliable. 966b411b363SPhilipp Reisner * don't try again for ANY return value != 0 967b411b363SPhilipp Reisner * if (rv == -EOPNOTSUPP) */ 968b411b363SPhilipp Reisner drbd_bump_write_ordering(mdev, WO_drain_io); 969b411b363SPhilipp Reisner } 970b411b363SPhilipp Reisner put_ldev(mdev); 971b411b363SPhilipp Reisner } 972b411b363SPhilipp Reisner } 973b411b363SPhilipp Reisner 974b411b363SPhilipp Reisner /** 975b411b363SPhilipp Reisner * drbd_may_finish_epoch() - Applies an epoch_event to the epoch's state, eventually finishes it. 976b411b363SPhilipp Reisner * @mdev: DRBD device. 977b411b363SPhilipp Reisner * @epoch: Epoch object. 978b411b363SPhilipp Reisner * @ev: Epoch event. 979b411b363SPhilipp Reisner */ 980b411b363SPhilipp Reisner static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev, 981b411b363SPhilipp Reisner struct drbd_epoch *epoch, 982b411b363SPhilipp Reisner enum epoch_event ev) 983b411b363SPhilipp Reisner { 9842451fc3bSPhilipp Reisner int epoch_size; 985b411b363SPhilipp Reisner struct drbd_epoch *next_epoch; 986b411b363SPhilipp Reisner enum finish_epoch rv = FE_STILL_LIVE; 987b411b363SPhilipp Reisner 988b411b363SPhilipp Reisner spin_lock(&mdev->epoch_lock); 989b411b363SPhilipp Reisner do { 990b411b363SPhilipp Reisner next_epoch = NULL; 991b411b363SPhilipp Reisner 992b411b363SPhilipp Reisner epoch_size = atomic_read(&epoch->epoch_size); 993b411b363SPhilipp Reisner 994b411b363SPhilipp Reisner switch (ev & ~EV_CLEANUP) { 995b411b363SPhilipp Reisner case EV_PUT: 996b411b363SPhilipp Reisner atomic_dec(&epoch->active); 997b411b363SPhilipp Reisner break; 998b411b363SPhilipp Reisner case EV_GOT_BARRIER_NR: 999b411b363SPhilipp Reisner set_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags); 1000b411b363SPhilipp Reisner break; 1001b411b363SPhilipp Reisner case EV_BECAME_LAST: 1002b411b363SPhilipp Reisner /* nothing to do*/ 1003b411b363SPhilipp Reisner break; 1004b411b363SPhilipp Reisner } 1005b411b363SPhilipp Reisner 1006b411b363SPhilipp Reisner if (epoch_size != 0 && 1007b411b363SPhilipp Reisner atomic_read(&epoch->active) == 0 && 10082451fc3bSPhilipp Reisner test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags)) { 1009b411b363SPhilipp Reisner if (!(ev & EV_CLEANUP)) { 1010b411b363SPhilipp Reisner spin_unlock(&mdev->epoch_lock); 1011b411b363SPhilipp Reisner drbd_send_b_ack(mdev, epoch->barrier_nr, epoch_size); 1012b411b363SPhilipp Reisner spin_lock(&mdev->epoch_lock); 1013b411b363SPhilipp Reisner } 1014b411b363SPhilipp Reisner dec_unacked(mdev); 1015b411b363SPhilipp Reisner 1016b411b363SPhilipp Reisner if (mdev->current_epoch != epoch) { 1017b411b363SPhilipp Reisner next_epoch = list_entry(epoch->list.next, struct drbd_epoch, list); 1018b411b363SPhilipp Reisner list_del(&epoch->list); 1019b411b363SPhilipp Reisner ev = EV_BECAME_LAST | (ev & EV_CLEANUP); 1020b411b363SPhilipp Reisner mdev->epochs--; 1021b411b363SPhilipp Reisner kfree(epoch); 1022b411b363SPhilipp Reisner 1023b411b363SPhilipp Reisner if (rv == FE_STILL_LIVE) 1024b411b363SPhilipp Reisner rv = FE_DESTROYED; 1025b411b363SPhilipp Reisner } else { 1026b411b363SPhilipp Reisner epoch->flags = 0; 1027b411b363SPhilipp Reisner atomic_set(&epoch->epoch_size, 0); 1028698f9315SUwe Kleine-König /* atomic_set(&epoch->active, 0); is already zero */ 1029b411b363SPhilipp Reisner if (rv == FE_STILL_LIVE) 1030b411b363SPhilipp Reisner rv = FE_RECYCLED; 10312451fc3bSPhilipp Reisner wake_up(&mdev->ee_wait); 1032b411b363SPhilipp Reisner } 1033b411b363SPhilipp Reisner } 1034b411b363SPhilipp Reisner 1035b411b363SPhilipp Reisner if (!next_epoch) 1036b411b363SPhilipp Reisner break; 1037b411b363SPhilipp Reisner 1038b411b363SPhilipp Reisner epoch = next_epoch; 1039b411b363SPhilipp Reisner } while (1); 1040b411b363SPhilipp Reisner 1041b411b363SPhilipp Reisner spin_unlock(&mdev->epoch_lock); 1042b411b363SPhilipp Reisner 1043b411b363SPhilipp Reisner return rv; 1044b411b363SPhilipp Reisner } 1045b411b363SPhilipp Reisner 1046b411b363SPhilipp Reisner /** 1047b411b363SPhilipp Reisner * drbd_bump_write_ordering() - Fall back to an other write ordering method 1048b411b363SPhilipp Reisner * @mdev: DRBD device. 1049b411b363SPhilipp Reisner * @wo: Write ordering method to try. 1050b411b363SPhilipp Reisner */ 1051b411b363SPhilipp Reisner void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo) __must_hold(local) 1052b411b363SPhilipp Reisner { 1053b411b363SPhilipp Reisner enum write_ordering_e pwo; 1054b411b363SPhilipp Reisner static char *write_ordering_str[] = { 1055b411b363SPhilipp Reisner [WO_none] = "none", 1056b411b363SPhilipp Reisner [WO_drain_io] = "drain", 1057b411b363SPhilipp Reisner [WO_bdev_flush] = "flush", 1058b411b363SPhilipp Reisner }; 1059b411b363SPhilipp Reisner 1060b411b363SPhilipp Reisner pwo = mdev->write_ordering; 1061b411b363SPhilipp Reisner wo = min(pwo, wo); 1062b411b363SPhilipp Reisner if (wo == WO_bdev_flush && mdev->ldev->dc.no_disk_flush) 1063b411b363SPhilipp Reisner wo = WO_drain_io; 1064b411b363SPhilipp Reisner if (wo == WO_drain_io && mdev->ldev->dc.no_disk_drain) 1065b411b363SPhilipp Reisner wo = WO_none; 1066b411b363SPhilipp Reisner mdev->write_ordering = wo; 10672451fc3bSPhilipp Reisner if (pwo != mdev->write_ordering || wo == WO_bdev_flush) 1068b411b363SPhilipp Reisner dev_info(DEV, "Method to ensure write ordering: %s\n", write_ordering_str[mdev->write_ordering]); 1069b411b363SPhilipp Reisner } 1070b411b363SPhilipp Reisner 1071b411b363SPhilipp Reisner /** 107245bb912bSLars Ellenberg * drbd_submit_ee() 107345bb912bSLars Ellenberg * @mdev: DRBD device. 107445bb912bSLars Ellenberg * @e: epoch entry 107545bb912bSLars Ellenberg * @rw: flag field, see bio->bi_rw 107610f6d992SLars Ellenberg * 107710f6d992SLars Ellenberg * May spread the pages to multiple bios, 107810f6d992SLars Ellenberg * depending on bio_add_page restrictions. 107910f6d992SLars Ellenberg * 108010f6d992SLars Ellenberg * Returns 0 if all bios have been submitted, 108110f6d992SLars Ellenberg * -ENOMEM if we could not allocate enough bios, 108210f6d992SLars Ellenberg * -ENOSPC (any better suggestion?) if we have not been able to bio_add_page a 108310f6d992SLars Ellenberg * single page to an empty bio (which should never happen and likely indicates 108410f6d992SLars Ellenberg * that the lower level IO stack is in some way broken). This has been observed 108510f6d992SLars Ellenberg * on certain Xen deployments. 108645bb912bSLars Ellenberg */ 108745bb912bSLars Ellenberg /* TODO allocate from our own bio_set. */ 108845bb912bSLars Ellenberg int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e, 108945bb912bSLars Ellenberg const unsigned rw, const int fault_type) 109045bb912bSLars Ellenberg { 109145bb912bSLars Ellenberg struct bio *bios = NULL; 109245bb912bSLars Ellenberg struct bio *bio; 109345bb912bSLars Ellenberg struct page *page = e->pages; 1094010f6e67SAndreas Gruenbacher sector_t sector = e->i.sector; 1095010f6e67SAndreas Gruenbacher unsigned ds = e->i.size; 109645bb912bSLars Ellenberg unsigned n_bios = 0; 109745bb912bSLars Ellenberg unsigned nr_pages = (ds + PAGE_SIZE -1) >> PAGE_SHIFT; 109810f6d992SLars Ellenberg int err = -ENOMEM; 109945bb912bSLars Ellenberg 110045bb912bSLars Ellenberg /* In most cases, we will only need one bio. But in case the lower 110145bb912bSLars Ellenberg * level restrictions happen to be different at this offset on this 110245bb912bSLars Ellenberg * side than those of the sending peer, we may need to submit the 110345bb912bSLars Ellenberg * request in more than one bio. */ 110445bb912bSLars Ellenberg next_bio: 110545bb912bSLars Ellenberg bio = bio_alloc(GFP_NOIO, nr_pages); 110645bb912bSLars Ellenberg if (!bio) { 110745bb912bSLars Ellenberg dev_err(DEV, "submit_ee: Allocation of a bio failed\n"); 110845bb912bSLars Ellenberg goto fail; 110945bb912bSLars Ellenberg } 1110010f6e67SAndreas Gruenbacher /* > e->i.sector, unless this is the first bio */ 111145bb912bSLars Ellenberg bio->bi_sector = sector; 111245bb912bSLars Ellenberg bio->bi_bdev = mdev->ldev->backing_bdev; 111345bb912bSLars Ellenberg bio->bi_rw = rw; 111445bb912bSLars Ellenberg bio->bi_private = e; 111545bb912bSLars Ellenberg bio->bi_end_io = drbd_endio_sec; 111645bb912bSLars Ellenberg 111745bb912bSLars Ellenberg bio->bi_next = bios; 111845bb912bSLars Ellenberg bios = bio; 111945bb912bSLars Ellenberg ++n_bios; 112045bb912bSLars Ellenberg 112145bb912bSLars Ellenberg page_chain_for_each(page) { 112245bb912bSLars Ellenberg unsigned len = min_t(unsigned, ds, PAGE_SIZE); 112345bb912bSLars Ellenberg if (!bio_add_page(bio, page, len, 0)) { 112410f6d992SLars Ellenberg /* A single page must always be possible! 112510f6d992SLars Ellenberg * But in case it fails anyways, 112610f6d992SLars Ellenberg * we deal with it, and complain (below). */ 112710f6d992SLars Ellenberg if (bio->bi_vcnt == 0) { 112810f6d992SLars Ellenberg dev_err(DEV, 112910f6d992SLars Ellenberg "bio_add_page failed for len=%u, " 113010f6d992SLars Ellenberg "bi_vcnt=0 (bi_sector=%llu)\n", 113110f6d992SLars Ellenberg len, (unsigned long long)bio->bi_sector); 113210f6d992SLars Ellenberg err = -ENOSPC; 113310f6d992SLars Ellenberg goto fail; 113410f6d992SLars Ellenberg } 113545bb912bSLars Ellenberg goto next_bio; 113645bb912bSLars Ellenberg } 113745bb912bSLars Ellenberg ds -= len; 113845bb912bSLars Ellenberg sector += len >> 9; 113945bb912bSLars Ellenberg --nr_pages; 114045bb912bSLars Ellenberg } 114145bb912bSLars Ellenberg D_ASSERT(page == NULL); 114245bb912bSLars Ellenberg D_ASSERT(ds == 0); 114345bb912bSLars Ellenberg 114445bb912bSLars Ellenberg atomic_set(&e->pending_bios, n_bios); 114545bb912bSLars Ellenberg do { 114645bb912bSLars Ellenberg bio = bios; 114745bb912bSLars Ellenberg bios = bios->bi_next; 114845bb912bSLars Ellenberg bio->bi_next = NULL; 114945bb912bSLars Ellenberg 115045bb912bSLars Ellenberg drbd_generic_make_request(mdev, fault_type, bio); 115145bb912bSLars Ellenberg } while (bios); 115245bb912bSLars Ellenberg return 0; 115345bb912bSLars Ellenberg 115445bb912bSLars Ellenberg fail: 115545bb912bSLars Ellenberg while (bios) { 115645bb912bSLars Ellenberg bio = bios; 115745bb912bSLars Ellenberg bios = bios->bi_next; 115845bb912bSLars Ellenberg bio_put(bio); 115945bb912bSLars Ellenberg } 116010f6d992SLars Ellenberg return err; 116145bb912bSLars Ellenberg } 116245bb912bSLars Ellenberg 116302918be2SPhilipp Reisner static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) 1164b411b363SPhilipp Reisner { 11652451fc3bSPhilipp Reisner int rv; 116602918be2SPhilipp Reisner struct p_barrier *p = &mdev->data.rbuf.barrier; 1167b411b363SPhilipp Reisner struct drbd_epoch *epoch; 1168b411b363SPhilipp Reisner 1169b411b363SPhilipp Reisner inc_unacked(mdev); 1170b411b363SPhilipp Reisner 1171b411b363SPhilipp Reisner mdev->current_epoch->barrier_nr = p->barrier; 1172b411b363SPhilipp Reisner rv = drbd_may_finish_epoch(mdev, mdev->current_epoch, EV_GOT_BARRIER_NR); 1173b411b363SPhilipp Reisner 1174b411b363SPhilipp Reisner /* P_BARRIER_ACK may imply that the corresponding extent is dropped from 1175b411b363SPhilipp Reisner * the activity log, which means it would not be resynced in case the 1176b411b363SPhilipp Reisner * R_PRIMARY crashes now. 1177b411b363SPhilipp Reisner * Therefore we must send the barrier_ack after the barrier request was 1178b411b363SPhilipp Reisner * completed. */ 1179b411b363SPhilipp Reisner switch (mdev->write_ordering) { 1180b411b363SPhilipp Reisner case WO_none: 1181b411b363SPhilipp Reisner if (rv == FE_RECYCLED) 118281e84650SAndreas Gruenbacher return true; 1183b411b363SPhilipp Reisner 1184b411b363SPhilipp Reisner /* receiver context, in the writeout path of the other node. 1185b411b363SPhilipp Reisner * avoid potential distributed deadlock */ 1186b411b363SPhilipp Reisner epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO); 11872451fc3bSPhilipp Reisner if (epoch) 11882451fc3bSPhilipp Reisner break; 11892451fc3bSPhilipp Reisner else 1190b411b363SPhilipp Reisner dev_warn(DEV, "Allocation of an epoch failed, slowing down\n"); 11912451fc3bSPhilipp Reisner /* Fall through */ 11922451fc3bSPhilipp Reisner 11932451fc3bSPhilipp Reisner case WO_bdev_flush: 11942451fc3bSPhilipp Reisner case WO_drain_io: 1195b411b363SPhilipp Reisner drbd_wait_ee_list_empty(mdev, &mdev->active_ee); 11962451fc3bSPhilipp Reisner drbd_flush(mdev); 11972451fc3bSPhilipp Reisner 11982451fc3bSPhilipp Reisner if (atomic_read(&mdev->current_epoch->epoch_size)) { 11992451fc3bSPhilipp Reisner epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO); 12002451fc3bSPhilipp Reisner if (epoch) 12012451fc3bSPhilipp Reisner break; 1202b411b363SPhilipp Reisner } 1203b411b363SPhilipp Reisner 12042451fc3bSPhilipp Reisner epoch = mdev->current_epoch; 12052451fc3bSPhilipp Reisner wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0); 12062451fc3bSPhilipp Reisner 12072451fc3bSPhilipp Reisner D_ASSERT(atomic_read(&epoch->active) == 0); 12082451fc3bSPhilipp Reisner D_ASSERT(epoch->flags == 0); 1209b411b363SPhilipp Reisner 121081e84650SAndreas Gruenbacher return true; 12112451fc3bSPhilipp Reisner default: 12122451fc3bSPhilipp Reisner dev_err(DEV, "Strangeness in mdev->write_ordering %d\n", mdev->write_ordering); 121381e84650SAndreas Gruenbacher return false; 1214b411b363SPhilipp Reisner } 1215b411b363SPhilipp Reisner 1216b411b363SPhilipp Reisner epoch->flags = 0; 1217b411b363SPhilipp Reisner atomic_set(&epoch->epoch_size, 0); 1218b411b363SPhilipp Reisner atomic_set(&epoch->active, 0); 1219b411b363SPhilipp Reisner 1220b411b363SPhilipp Reisner spin_lock(&mdev->epoch_lock); 1221b411b363SPhilipp Reisner if (atomic_read(&mdev->current_epoch->epoch_size)) { 1222b411b363SPhilipp Reisner list_add(&epoch->list, &mdev->current_epoch->list); 1223b411b363SPhilipp Reisner mdev->current_epoch = epoch; 1224b411b363SPhilipp Reisner mdev->epochs++; 1225b411b363SPhilipp Reisner } else { 1226b411b363SPhilipp Reisner /* The current_epoch got recycled while we allocated this one... */ 1227b411b363SPhilipp Reisner kfree(epoch); 1228b411b363SPhilipp Reisner } 1229b411b363SPhilipp Reisner spin_unlock(&mdev->epoch_lock); 1230b411b363SPhilipp Reisner 123181e84650SAndreas Gruenbacher return true; 1232b411b363SPhilipp Reisner } 1233b411b363SPhilipp Reisner 1234b411b363SPhilipp Reisner /* used from receive_RSDataReply (recv_resync_read) 1235b411b363SPhilipp Reisner * and from receive_Data */ 1236b411b363SPhilipp Reisner static struct drbd_epoch_entry * 1237b411b363SPhilipp Reisner read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __must_hold(local) 1238b411b363SPhilipp Reisner { 12396666032aSLars Ellenberg const sector_t capacity = drbd_get_capacity(mdev->this_bdev); 1240b411b363SPhilipp Reisner struct drbd_epoch_entry *e; 1241b411b363SPhilipp Reisner struct page *page; 124245bb912bSLars Ellenberg int dgs, ds, rr; 1243b411b363SPhilipp Reisner void *dig_in = mdev->int_dig_in; 1244b411b363SPhilipp Reisner void *dig_vv = mdev->int_dig_vv; 12456b4388acSPhilipp Reisner unsigned long *data; 1246b411b363SPhilipp Reisner 1247b411b363SPhilipp Reisner dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ? 1248b411b363SPhilipp Reisner crypto_hash_digestsize(mdev->integrity_r_tfm) : 0; 1249b411b363SPhilipp Reisner 1250b411b363SPhilipp Reisner if (dgs) { 1251b411b363SPhilipp Reisner rr = drbd_recv(mdev, dig_in, dgs); 1252b411b363SPhilipp Reisner if (rr != dgs) { 12530ddc5549SLars Ellenberg if (!signal_pending(current)) 12540ddc5549SLars Ellenberg dev_warn(DEV, 12550ddc5549SLars Ellenberg "short read receiving data digest: read %d expected %d\n", 1256b411b363SPhilipp Reisner rr, dgs); 1257b411b363SPhilipp Reisner return NULL; 1258b411b363SPhilipp Reisner } 1259b411b363SPhilipp Reisner } 1260b411b363SPhilipp Reisner 1261b411b363SPhilipp Reisner data_size -= dgs; 1262b411b363SPhilipp Reisner 1263841ce241SAndreas Gruenbacher if (!expect(data_size != 0)) 1264841ce241SAndreas Gruenbacher return NULL; 1265841ce241SAndreas Gruenbacher if (!expect(IS_ALIGNED(data_size, 512))) 1266841ce241SAndreas Gruenbacher return NULL; 1267841ce241SAndreas Gruenbacher if (!expect(data_size <= DRBD_MAX_BIO_SIZE)) 1268841ce241SAndreas Gruenbacher return NULL; 1269b411b363SPhilipp Reisner 12706666032aSLars Ellenberg /* even though we trust out peer, 12716666032aSLars Ellenberg * we sometimes have to double check. */ 12726666032aSLars Ellenberg if (sector + (data_size>>9) > capacity) { 1273fdda6544SLars Ellenberg dev_err(DEV, "request from peer beyond end of local disk: " 1274fdda6544SLars Ellenberg "capacity: %llus < sector: %llus + size: %u\n", 12756666032aSLars Ellenberg (unsigned long long)capacity, 12766666032aSLars Ellenberg (unsigned long long)sector, data_size); 12776666032aSLars Ellenberg return NULL; 12786666032aSLars Ellenberg } 12796666032aSLars Ellenberg 1280b411b363SPhilipp Reisner /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD 1281b411b363SPhilipp Reisner * "criss-cross" setup, that might cause write-out on some other DRBD, 1282b411b363SPhilipp Reisner * which in turn might block on the other node at this very place. */ 1283b411b363SPhilipp Reisner e = drbd_alloc_ee(mdev, id, sector, data_size, GFP_NOIO); 1284b411b363SPhilipp Reisner if (!e) 1285b411b363SPhilipp Reisner return NULL; 128645bb912bSLars Ellenberg 1287b411b363SPhilipp Reisner ds = data_size; 128845bb912bSLars Ellenberg page = e->pages; 128945bb912bSLars Ellenberg page_chain_for_each(page) { 129045bb912bSLars Ellenberg unsigned len = min_t(int, ds, PAGE_SIZE); 12916b4388acSPhilipp Reisner data = kmap(page); 129245bb912bSLars Ellenberg rr = drbd_recv(mdev, data, len); 12930cf9d27eSAndreas Gruenbacher if (drbd_insert_fault(mdev, DRBD_FAULT_RECEIVE)) { 12946b4388acSPhilipp Reisner dev_err(DEV, "Fault injection: Corrupting data on receive\n"); 12956b4388acSPhilipp Reisner data[0] = data[0] ^ (unsigned long)-1; 12966b4388acSPhilipp Reisner } 1297b411b363SPhilipp Reisner kunmap(page); 129845bb912bSLars Ellenberg if (rr != len) { 1299b411b363SPhilipp Reisner drbd_free_ee(mdev, e); 13000ddc5549SLars Ellenberg if (!signal_pending(current)) 1301b411b363SPhilipp Reisner dev_warn(DEV, "short read receiving data: read %d expected %d\n", 130245bb912bSLars Ellenberg rr, len); 1303b411b363SPhilipp Reisner return NULL; 1304b411b363SPhilipp Reisner } 1305b411b363SPhilipp Reisner ds -= rr; 1306b411b363SPhilipp Reisner } 1307b411b363SPhilipp Reisner 1308b411b363SPhilipp Reisner if (dgs) { 130945bb912bSLars Ellenberg drbd_csum_ee(mdev, mdev->integrity_r_tfm, e, dig_vv); 1310b411b363SPhilipp Reisner if (memcmp(dig_in, dig_vv, dgs)) { 1311470be44aSLars Ellenberg dev_err(DEV, "Digest integrity check FAILED: %llus +%u\n", 1312470be44aSLars Ellenberg (unsigned long long)sector, data_size); 1313b411b363SPhilipp Reisner drbd_bcast_ee(mdev, "digest failed", 1314b411b363SPhilipp Reisner dgs, dig_in, dig_vv, e); 1315b411b363SPhilipp Reisner drbd_free_ee(mdev, e); 1316b411b363SPhilipp Reisner return NULL; 1317b411b363SPhilipp Reisner } 1318b411b363SPhilipp Reisner } 1319b411b363SPhilipp Reisner mdev->recv_cnt += data_size>>9; 1320b411b363SPhilipp Reisner return e; 1321b411b363SPhilipp Reisner } 1322b411b363SPhilipp Reisner 1323b411b363SPhilipp Reisner /* drbd_drain_block() just takes a data block 1324b411b363SPhilipp Reisner * out of the socket input buffer, and discards it. 1325b411b363SPhilipp Reisner */ 1326b411b363SPhilipp Reisner static int drbd_drain_block(struct drbd_conf *mdev, int data_size) 1327b411b363SPhilipp Reisner { 1328b411b363SPhilipp Reisner struct page *page; 1329b411b363SPhilipp Reisner int rr, rv = 1; 1330b411b363SPhilipp Reisner void *data; 1331b411b363SPhilipp Reisner 1332c3470cdeSLars Ellenberg if (!data_size) 133381e84650SAndreas Gruenbacher return true; 1334c3470cdeSLars Ellenberg 133545bb912bSLars Ellenberg page = drbd_pp_alloc(mdev, 1, 1); 1336b411b363SPhilipp Reisner 1337b411b363SPhilipp Reisner data = kmap(page); 1338b411b363SPhilipp Reisner while (data_size) { 1339b411b363SPhilipp Reisner rr = drbd_recv(mdev, data, min_t(int, data_size, PAGE_SIZE)); 1340b411b363SPhilipp Reisner if (rr != min_t(int, data_size, PAGE_SIZE)) { 1341b411b363SPhilipp Reisner rv = 0; 13420ddc5549SLars Ellenberg if (!signal_pending(current)) 13430ddc5549SLars Ellenberg dev_warn(DEV, 13440ddc5549SLars Ellenberg "short read receiving data: read %d expected %d\n", 1345b411b363SPhilipp Reisner rr, min_t(int, data_size, PAGE_SIZE)); 1346b411b363SPhilipp Reisner break; 1347b411b363SPhilipp Reisner } 1348b411b363SPhilipp Reisner data_size -= rr; 1349b411b363SPhilipp Reisner } 1350b411b363SPhilipp Reisner kunmap(page); 1351435f0740SLars Ellenberg drbd_pp_free(mdev, page, 0); 1352b411b363SPhilipp Reisner return rv; 1353b411b363SPhilipp Reisner } 1354b411b363SPhilipp Reisner 1355b411b363SPhilipp Reisner static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req, 1356b411b363SPhilipp Reisner sector_t sector, int data_size) 1357b411b363SPhilipp Reisner { 1358b411b363SPhilipp Reisner struct bio_vec *bvec; 1359b411b363SPhilipp Reisner struct bio *bio; 1360b411b363SPhilipp Reisner int dgs, rr, i, expect; 1361b411b363SPhilipp Reisner void *dig_in = mdev->int_dig_in; 1362b411b363SPhilipp Reisner void *dig_vv = mdev->int_dig_vv; 1363b411b363SPhilipp Reisner 1364b411b363SPhilipp Reisner dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ? 1365b411b363SPhilipp Reisner crypto_hash_digestsize(mdev->integrity_r_tfm) : 0; 1366b411b363SPhilipp Reisner 1367b411b363SPhilipp Reisner if (dgs) { 1368b411b363SPhilipp Reisner rr = drbd_recv(mdev, dig_in, dgs); 1369b411b363SPhilipp Reisner if (rr != dgs) { 13700ddc5549SLars Ellenberg if (!signal_pending(current)) 13710ddc5549SLars Ellenberg dev_warn(DEV, 13720ddc5549SLars Ellenberg "short read receiving data reply digest: read %d expected %d\n", 1373b411b363SPhilipp Reisner rr, dgs); 1374b411b363SPhilipp Reisner return 0; 1375b411b363SPhilipp Reisner } 1376b411b363SPhilipp Reisner } 1377b411b363SPhilipp Reisner 1378b411b363SPhilipp Reisner data_size -= dgs; 1379b411b363SPhilipp Reisner 1380b411b363SPhilipp Reisner /* optimistically update recv_cnt. if receiving fails below, 1381b411b363SPhilipp Reisner * we disconnect anyways, and counters will be reset. */ 1382b411b363SPhilipp Reisner mdev->recv_cnt += data_size>>9; 1383b411b363SPhilipp Reisner 1384b411b363SPhilipp Reisner bio = req->master_bio; 1385b411b363SPhilipp Reisner D_ASSERT(sector == bio->bi_sector); 1386b411b363SPhilipp Reisner 1387b411b363SPhilipp Reisner bio_for_each_segment(bvec, bio, i) { 1388b411b363SPhilipp Reisner expect = min_t(int, data_size, bvec->bv_len); 1389b411b363SPhilipp Reisner rr = drbd_recv(mdev, 1390b411b363SPhilipp Reisner kmap(bvec->bv_page)+bvec->bv_offset, 1391b411b363SPhilipp Reisner expect); 1392b411b363SPhilipp Reisner kunmap(bvec->bv_page); 1393b411b363SPhilipp Reisner if (rr != expect) { 13940ddc5549SLars Ellenberg if (!signal_pending(current)) 1395b411b363SPhilipp Reisner dev_warn(DEV, "short read receiving data reply: " 1396b411b363SPhilipp Reisner "read %d expected %d\n", 1397b411b363SPhilipp Reisner rr, expect); 1398b411b363SPhilipp Reisner return 0; 1399b411b363SPhilipp Reisner } 1400b411b363SPhilipp Reisner data_size -= rr; 1401b411b363SPhilipp Reisner } 1402b411b363SPhilipp Reisner 1403b411b363SPhilipp Reisner if (dgs) { 140445bb912bSLars Ellenberg drbd_csum_bio(mdev, mdev->integrity_r_tfm, bio, dig_vv); 1405b411b363SPhilipp Reisner if (memcmp(dig_in, dig_vv, dgs)) { 1406b411b363SPhilipp Reisner dev_err(DEV, "Digest integrity check FAILED. Broken NICs?\n"); 1407b411b363SPhilipp Reisner return 0; 1408b411b363SPhilipp Reisner } 1409b411b363SPhilipp Reisner } 1410b411b363SPhilipp Reisner 1411b411b363SPhilipp Reisner D_ASSERT(data_size == 0); 1412b411b363SPhilipp Reisner return 1; 1413b411b363SPhilipp Reisner } 1414b411b363SPhilipp Reisner 1415b411b363SPhilipp Reisner /* e_end_resync_block() is called via 1416b411b363SPhilipp Reisner * drbd_process_done_ee() by asender only */ 1417b411b363SPhilipp Reisner static int e_end_resync_block(struct drbd_conf *mdev, struct drbd_work *w, int unused) 1418b411b363SPhilipp Reisner { 1419b411b363SPhilipp Reisner struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w; 1420010f6e67SAndreas Gruenbacher sector_t sector = e->i.sector; 1421b411b363SPhilipp Reisner int ok; 1422b411b363SPhilipp Reisner 14238b946255SAndreas Gruenbacher D_ASSERT(drbd_interval_empty(&e->i)); 1424b411b363SPhilipp Reisner 142545bb912bSLars Ellenberg if (likely((e->flags & EE_WAS_ERROR) == 0)) { 1426010f6e67SAndreas Gruenbacher drbd_set_in_sync(mdev, sector, e->i.size); 1427b411b363SPhilipp Reisner ok = drbd_send_ack(mdev, P_RS_WRITE_ACK, e); 1428b411b363SPhilipp Reisner } else { 1429b411b363SPhilipp Reisner /* Record failure to sync */ 1430010f6e67SAndreas Gruenbacher drbd_rs_failed_io(mdev, sector, e->i.size); 1431b411b363SPhilipp Reisner 1432b411b363SPhilipp Reisner ok = drbd_send_ack(mdev, P_NEG_ACK, e); 1433b411b363SPhilipp Reisner } 1434b411b363SPhilipp Reisner dec_unacked(mdev); 1435b411b363SPhilipp Reisner 1436b411b363SPhilipp Reisner return ok; 1437b411b363SPhilipp Reisner } 1438b411b363SPhilipp Reisner 1439b411b363SPhilipp Reisner static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_size) __releases(local) 1440b411b363SPhilipp Reisner { 1441b411b363SPhilipp Reisner struct drbd_epoch_entry *e; 1442b411b363SPhilipp Reisner 1443b411b363SPhilipp Reisner e = read_in_block(mdev, ID_SYNCER, sector, data_size); 144445bb912bSLars Ellenberg if (!e) 144545bb912bSLars Ellenberg goto fail; 1446b411b363SPhilipp Reisner 1447b411b363SPhilipp Reisner dec_rs_pending(mdev); 1448b411b363SPhilipp Reisner 1449b411b363SPhilipp Reisner inc_unacked(mdev); 1450b411b363SPhilipp Reisner /* corresponding dec_unacked() in e_end_resync_block() 1451b411b363SPhilipp Reisner * respective _drbd_clear_done_ee */ 1452b411b363SPhilipp Reisner 145345bb912bSLars Ellenberg e->w.cb = e_end_resync_block; 145445bb912bSLars Ellenberg 1455b411b363SPhilipp Reisner spin_lock_irq(&mdev->req_lock); 1456b411b363SPhilipp Reisner list_add(&e->w.list, &mdev->sync_ee); 1457b411b363SPhilipp Reisner spin_unlock_irq(&mdev->req_lock); 1458b411b363SPhilipp Reisner 14590f0601f4SLars Ellenberg atomic_add(data_size >> 9, &mdev->rs_sect_ev); 146045bb912bSLars Ellenberg if (drbd_submit_ee(mdev, e, WRITE, DRBD_FAULT_RS_WR) == 0) 146181e84650SAndreas Gruenbacher return true; 146245bb912bSLars Ellenberg 146310f6d992SLars Ellenberg /* don't care for the reason here */ 146410f6d992SLars Ellenberg dev_err(DEV, "submit failed, triggering re-connect\n"); 146522cc37a9SLars Ellenberg spin_lock_irq(&mdev->req_lock); 146622cc37a9SLars Ellenberg list_del(&e->w.list); 146722cc37a9SLars Ellenberg spin_unlock_irq(&mdev->req_lock); 146822cc37a9SLars Ellenberg 146945bb912bSLars Ellenberg drbd_free_ee(mdev, e); 147045bb912bSLars Ellenberg fail: 147145bb912bSLars Ellenberg put_ldev(mdev); 147281e84650SAndreas Gruenbacher return false; 1473b411b363SPhilipp Reisner } 1474b411b363SPhilipp Reisner 1475668eebc6SAndreas Gruenbacher static struct drbd_request * 1476bc9c5c41SAndreas Gruenbacher find_request(struct drbd_conf *mdev, struct rb_root *root, u64 id, 1477bc9c5c41SAndreas Gruenbacher sector_t sector, bool missing_ok, const char *func) 1478668eebc6SAndreas Gruenbacher { 1479668eebc6SAndreas Gruenbacher struct drbd_request *req; 1480668eebc6SAndreas Gruenbacher 1481bc9c5c41SAndreas Gruenbacher /* Request object according to our peer */ 1482bc9c5c41SAndreas Gruenbacher req = (struct drbd_request *)(unsigned long)id; 1483bc9c5c41SAndreas Gruenbacher if (drbd_contains_interval(root, sector, &req->i)) 1484668eebc6SAndreas Gruenbacher return req; 1485c3afd8f5SAndreas Gruenbacher if (!missing_ok) { 1486c3afd8f5SAndreas Gruenbacher dev_err(DEV, "%s: failed to find request %lu, sector %llus\n", func, 1487c3afd8f5SAndreas Gruenbacher (unsigned long)id, (unsigned long long)sector); 1488c3afd8f5SAndreas Gruenbacher } 1489668eebc6SAndreas Gruenbacher return NULL; 1490668eebc6SAndreas Gruenbacher } 1491668eebc6SAndreas Gruenbacher 149202918be2SPhilipp Reisner static int receive_DataReply(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) 1493b411b363SPhilipp Reisner { 1494b411b363SPhilipp Reisner struct drbd_request *req; 1495b411b363SPhilipp Reisner sector_t sector; 1496b411b363SPhilipp Reisner int ok; 149702918be2SPhilipp Reisner struct p_data *p = &mdev->data.rbuf.data; 1498b411b363SPhilipp Reisner 1499b411b363SPhilipp Reisner sector = be64_to_cpu(p->sector); 1500b411b363SPhilipp Reisner 1501b411b363SPhilipp Reisner spin_lock_irq(&mdev->req_lock); 1502bc9c5c41SAndreas Gruenbacher req = find_request(mdev, &mdev->read_requests, p->block_id, sector, false, __func__); 1503b411b363SPhilipp Reisner spin_unlock_irq(&mdev->req_lock); 1504c3afd8f5SAndreas Gruenbacher if (unlikely(!req)) 150581e84650SAndreas Gruenbacher return false; 1506b411b363SPhilipp Reisner 150724c4830cSBart Van Assche /* hlist_del(&req->collision) is done in _req_may_be_done, to avoid 1508b411b363SPhilipp Reisner * special casing it there for the various failure cases. 1509b411b363SPhilipp Reisner * still no race with drbd_fail_pending_reads */ 1510b411b363SPhilipp Reisner ok = recv_dless_read(mdev, req, sector, data_size); 1511b411b363SPhilipp Reisner 1512b411b363SPhilipp Reisner if (ok) 15138554df1cSAndreas Gruenbacher req_mod(req, DATA_RECEIVED); 1514b411b363SPhilipp Reisner /* else: nothing. handled from drbd_disconnect... 1515b411b363SPhilipp Reisner * I don't think we may complete this just yet 1516b411b363SPhilipp Reisner * in case we are "on-disconnect: freeze" */ 1517b411b363SPhilipp Reisner 1518b411b363SPhilipp Reisner return ok; 1519b411b363SPhilipp Reisner } 1520b411b363SPhilipp Reisner 152102918be2SPhilipp Reisner static int receive_RSDataReply(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) 1522b411b363SPhilipp Reisner { 1523b411b363SPhilipp Reisner sector_t sector; 1524b411b363SPhilipp Reisner int ok; 152502918be2SPhilipp Reisner struct p_data *p = &mdev->data.rbuf.data; 1526b411b363SPhilipp Reisner 1527b411b363SPhilipp Reisner sector = be64_to_cpu(p->sector); 1528b411b363SPhilipp Reisner D_ASSERT(p->block_id == ID_SYNCER); 1529b411b363SPhilipp Reisner 1530b411b363SPhilipp Reisner if (get_ldev(mdev)) { 1531b411b363SPhilipp Reisner /* data is submitted to disk within recv_resync_read. 1532b411b363SPhilipp Reisner * corresponding put_ldev done below on error, 15339c50842aSAndreas Gruenbacher * or in drbd_endio_sec. */ 1534b411b363SPhilipp Reisner ok = recv_resync_read(mdev, sector, data_size); 1535b411b363SPhilipp Reisner } else { 1536b411b363SPhilipp Reisner if (__ratelimit(&drbd_ratelimit_state)) 1537b411b363SPhilipp Reisner dev_err(DEV, "Can not write resync data to local disk.\n"); 1538b411b363SPhilipp Reisner 1539b411b363SPhilipp Reisner ok = drbd_drain_block(mdev, data_size); 1540b411b363SPhilipp Reisner 15412b2bf214SLars Ellenberg drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size); 1542b411b363SPhilipp Reisner } 1543b411b363SPhilipp Reisner 1544778f271dSPhilipp Reisner atomic_add(data_size >> 9, &mdev->rs_sect_in); 1545778f271dSPhilipp Reisner 1546b411b363SPhilipp Reisner return ok; 1547b411b363SPhilipp Reisner } 1548b411b363SPhilipp Reisner 1549b411b363SPhilipp Reisner /* e_end_block() is called via drbd_process_done_ee(). 1550b411b363SPhilipp Reisner * this means this function only runs in the asender thread 1551b411b363SPhilipp Reisner */ 1552b411b363SPhilipp Reisner static int e_end_block(struct drbd_conf *mdev, struct drbd_work *w, int cancel) 1553b411b363SPhilipp Reisner { 1554b411b363SPhilipp Reisner struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w; 1555010f6e67SAndreas Gruenbacher sector_t sector = e->i.sector; 1556b411b363SPhilipp Reisner int ok = 1, pcmd; 1557b411b363SPhilipp Reisner 155889e58e75SPhilipp Reisner if (mdev->tconn->net_conf->wire_protocol == DRBD_PROT_C) { 155945bb912bSLars Ellenberg if (likely((e->flags & EE_WAS_ERROR) == 0)) { 1560b411b363SPhilipp Reisner pcmd = (mdev->state.conn >= C_SYNC_SOURCE && 1561b411b363SPhilipp Reisner mdev->state.conn <= C_PAUSED_SYNC_T && 1562b411b363SPhilipp Reisner e->flags & EE_MAY_SET_IN_SYNC) ? 1563b411b363SPhilipp Reisner P_RS_WRITE_ACK : P_WRITE_ACK; 1564b411b363SPhilipp Reisner ok &= drbd_send_ack(mdev, pcmd, e); 1565b411b363SPhilipp Reisner if (pcmd == P_RS_WRITE_ACK) 1566010f6e67SAndreas Gruenbacher drbd_set_in_sync(mdev, sector, e->i.size); 1567b411b363SPhilipp Reisner } else { 1568b411b363SPhilipp Reisner ok = drbd_send_ack(mdev, P_NEG_ACK, e); 1569b411b363SPhilipp Reisner /* we expect it to be marked out of sync anyways... 1570b411b363SPhilipp Reisner * maybe assert this? */ 1571b411b363SPhilipp Reisner } 1572b411b363SPhilipp Reisner dec_unacked(mdev); 1573b411b363SPhilipp Reisner } 1574b411b363SPhilipp Reisner /* we delete from the conflict detection hash _after_ we sent out the 1575b411b363SPhilipp Reisner * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */ 157689e58e75SPhilipp Reisner if (mdev->tconn->net_conf->two_primaries) { 1577b411b363SPhilipp Reisner spin_lock_irq(&mdev->req_lock); 15788b946255SAndreas Gruenbacher D_ASSERT(!drbd_interval_empty(&e->i)); 15798b946255SAndreas Gruenbacher drbd_remove_interval(&mdev->epoch_entries, &e->i); 15808b946255SAndreas Gruenbacher drbd_clear_interval(&e->i); 1581b411b363SPhilipp Reisner spin_unlock_irq(&mdev->req_lock); 1582bb3bfe96SAndreas Gruenbacher } else 15838b946255SAndreas Gruenbacher D_ASSERT(drbd_interval_empty(&e->i)); 1584b411b363SPhilipp Reisner 1585b411b363SPhilipp Reisner drbd_may_finish_epoch(mdev, e->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0)); 1586b411b363SPhilipp Reisner 1587b411b363SPhilipp Reisner return ok; 1588b411b363SPhilipp Reisner } 1589b411b363SPhilipp Reisner 1590b411b363SPhilipp Reisner static int e_send_discard_ack(struct drbd_conf *mdev, struct drbd_work *w, int unused) 1591b411b363SPhilipp Reisner { 1592b411b363SPhilipp Reisner struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w; 1593b411b363SPhilipp Reisner int ok = 1; 1594b411b363SPhilipp Reisner 159589e58e75SPhilipp Reisner D_ASSERT(mdev->tconn->net_conf->wire_protocol == DRBD_PROT_C); 1596b411b363SPhilipp Reisner ok = drbd_send_ack(mdev, P_DISCARD_ACK, e); 1597b411b363SPhilipp Reisner 1598b411b363SPhilipp Reisner spin_lock_irq(&mdev->req_lock); 15998b946255SAndreas Gruenbacher D_ASSERT(!drbd_interval_empty(&e->i)); 16008b946255SAndreas Gruenbacher drbd_remove_interval(&mdev->epoch_entries, &e->i); 16018b946255SAndreas Gruenbacher drbd_clear_interval(&e->i); 1602b411b363SPhilipp Reisner spin_unlock_irq(&mdev->req_lock); 1603b411b363SPhilipp Reisner 1604b411b363SPhilipp Reisner dec_unacked(mdev); 1605b411b363SPhilipp Reisner 1606b411b363SPhilipp Reisner return ok; 1607b411b363SPhilipp Reisner } 1608b411b363SPhilipp Reisner 1609b411b363SPhilipp Reisner /* Called from receive_Data. 1610b411b363SPhilipp Reisner * Synchronize packets on sock with packets on msock. 1611b411b363SPhilipp Reisner * 1612b411b363SPhilipp Reisner * This is here so even when a P_DATA packet traveling via sock overtook an Ack 1613b411b363SPhilipp Reisner * packet traveling on msock, they are still processed in the order they have 1614b411b363SPhilipp Reisner * been sent. 1615b411b363SPhilipp Reisner * 1616b411b363SPhilipp Reisner * Note: we don't care for Ack packets overtaking P_DATA packets. 1617b411b363SPhilipp Reisner * 1618b411b363SPhilipp Reisner * In case packet_seq is larger than mdev->peer_seq number, there are 1619b411b363SPhilipp Reisner * outstanding packets on the msock. We wait for them to arrive. 1620b411b363SPhilipp Reisner * In case we are the logically next packet, we update mdev->peer_seq 1621b411b363SPhilipp Reisner * ourselves. Correctly handles 32bit wrap around. 1622b411b363SPhilipp Reisner * 1623b411b363SPhilipp Reisner * Assume we have a 10 GBit connection, that is about 1<<30 byte per second, 1624b411b363SPhilipp Reisner * about 1<<21 sectors per second. So "worst" case, we have 1<<3 == 8 seconds 1625b411b363SPhilipp Reisner * for the 24bit wrap (historical atomic_t guarantee on some archs), and we have 1626b411b363SPhilipp Reisner * 1<<9 == 512 seconds aka ages for the 32bit wrap around... 1627b411b363SPhilipp Reisner * 1628b411b363SPhilipp Reisner * returns 0 if we may process the packet, 1629b411b363SPhilipp Reisner * -ERESTARTSYS if we were interrupted (by disconnect signal). */ 1630b411b363SPhilipp Reisner static int drbd_wait_peer_seq(struct drbd_conf *mdev, const u32 packet_seq) 1631b411b363SPhilipp Reisner { 1632b411b363SPhilipp Reisner DEFINE_WAIT(wait); 1633b411b363SPhilipp Reisner unsigned int p_seq; 1634b411b363SPhilipp Reisner long timeout; 1635b411b363SPhilipp Reisner int ret = 0; 1636b411b363SPhilipp Reisner spin_lock(&mdev->peer_seq_lock); 1637b411b363SPhilipp Reisner for (;;) { 1638b411b363SPhilipp Reisner prepare_to_wait(&mdev->seq_wait, &wait, TASK_INTERRUPTIBLE); 1639b411b363SPhilipp Reisner if (seq_le(packet_seq, mdev->peer_seq+1)) 1640b411b363SPhilipp Reisner break; 1641b411b363SPhilipp Reisner if (signal_pending(current)) { 1642b411b363SPhilipp Reisner ret = -ERESTARTSYS; 1643b411b363SPhilipp Reisner break; 1644b411b363SPhilipp Reisner } 1645b411b363SPhilipp Reisner p_seq = mdev->peer_seq; 1646b411b363SPhilipp Reisner spin_unlock(&mdev->peer_seq_lock); 1647b411b363SPhilipp Reisner timeout = schedule_timeout(30*HZ); 1648b411b363SPhilipp Reisner spin_lock(&mdev->peer_seq_lock); 1649b411b363SPhilipp Reisner if (timeout == 0 && p_seq == mdev->peer_seq) { 1650b411b363SPhilipp Reisner ret = -ETIMEDOUT; 1651b411b363SPhilipp Reisner dev_err(DEV, "ASSERT FAILED waited 30 seconds for sequence update, forcing reconnect\n"); 1652b411b363SPhilipp Reisner break; 1653b411b363SPhilipp Reisner } 1654b411b363SPhilipp Reisner } 1655b411b363SPhilipp Reisner finish_wait(&mdev->seq_wait, &wait); 1656b411b363SPhilipp Reisner if (mdev->peer_seq+1 == packet_seq) 1657b411b363SPhilipp Reisner mdev->peer_seq++; 1658b411b363SPhilipp Reisner spin_unlock(&mdev->peer_seq_lock); 1659b411b363SPhilipp Reisner return ret; 1660b411b363SPhilipp Reisner } 1661b411b363SPhilipp Reisner 1662688593c5SLars Ellenberg /* see also bio_flags_to_wire() 1663688593c5SLars Ellenberg * DRBD_REQ_*, because we need to semantically map the flags to data packet 1664688593c5SLars Ellenberg * flags and back. We may replicate to other kernel versions. */ 1665688593c5SLars Ellenberg static unsigned long wire_flags_to_bio(struct drbd_conf *mdev, u32 dpf) 166676d2e7ecSPhilipp Reisner { 166776d2e7ecSPhilipp Reisner return (dpf & DP_RW_SYNC ? REQ_SYNC : 0) | 166876d2e7ecSPhilipp Reisner (dpf & DP_FUA ? REQ_FUA : 0) | 1669688593c5SLars Ellenberg (dpf & DP_FLUSH ? REQ_FLUSH : 0) | 167076d2e7ecSPhilipp Reisner (dpf & DP_DISCARD ? REQ_DISCARD : 0); 167176d2e7ecSPhilipp Reisner } 167276d2e7ecSPhilipp Reisner 1673b411b363SPhilipp Reisner /* mirrored write */ 167402918be2SPhilipp Reisner static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) 1675b411b363SPhilipp Reisner { 1676b411b363SPhilipp Reisner sector_t sector; 1677b411b363SPhilipp Reisner struct drbd_epoch_entry *e; 167802918be2SPhilipp Reisner struct p_data *p = &mdev->data.rbuf.data; 1679b411b363SPhilipp Reisner int rw = WRITE; 1680b411b363SPhilipp Reisner u32 dp_flags; 1681b411b363SPhilipp Reisner 1682b411b363SPhilipp Reisner if (!get_ldev(mdev)) { 1683b411b363SPhilipp Reisner spin_lock(&mdev->peer_seq_lock); 1684b411b363SPhilipp Reisner if (mdev->peer_seq+1 == be32_to_cpu(p->seq_num)) 1685b411b363SPhilipp Reisner mdev->peer_seq++; 1686b411b363SPhilipp Reisner spin_unlock(&mdev->peer_seq_lock); 1687b411b363SPhilipp Reisner 16882b2bf214SLars Ellenberg drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size); 1689b411b363SPhilipp Reisner atomic_inc(&mdev->current_epoch->epoch_size); 1690b411b363SPhilipp Reisner return drbd_drain_block(mdev, data_size); 1691b411b363SPhilipp Reisner } 1692b411b363SPhilipp Reisner 1693b411b363SPhilipp Reisner /* get_ldev(mdev) successful. 1694b411b363SPhilipp Reisner * Corresponding put_ldev done either below (on various errors), 16959c50842aSAndreas Gruenbacher * or in drbd_endio_sec, if we successfully submit the data at 1696b411b363SPhilipp Reisner * the end of this function. */ 1697b411b363SPhilipp Reisner 1698b411b363SPhilipp Reisner sector = be64_to_cpu(p->sector); 1699b411b363SPhilipp Reisner e = read_in_block(mdev, p->block_id, sector, data_size); 1700b411b363SPhilipp Reisner if (!e) { 1701b411b363SPhilipp Reisner put_ldev(mdev); 170281e84650SAndreas Gruenbacher return false; 1703b411b363SPhilipp Reisner } 1704b411b363SPhilipp Reisner 1705b411b363SPhilipp Reisner e->w.cb = e_end_block; 1706b411b363SPhilipp Reisner 1707688593c5SLars Ellenberg dp_flags = be32_to_cpu(p->dp_flags); 1708688593c5SLars Ellenberg rw |= wire_flags_to_bio(mdev, dp_flags); 1709688593c5SLars Ellenberg 1710688593c5SLars Ellenberg if (dp_flags & DP_MAY_SET_IN_SYNC) 1711688593c5SLars Ellenberg e->flags |= EE_MAY_SET_IN_SYNC; 1712688593c5SLars Ellenberg 1713b411b363SPhilipp Reisner spin_lock(&mdev->epoch_lock); 1714b411b363SPhilipp Reisner e->epoch = mdev->current_epoch; 1715b411b363SPhilipp Reisner atomic_inc(&e->epoch->epoch_size); 1716b411b363SPhilipp Reisner atomic_inc(&e->epoch->active); 1717b411b363SPhilipp Reisner spin_unlock(&mdev->epoch_lock); 1718b411b363SPhilipp Reisner 1719b411b363SPhilipp Reisner /* I'm the receiver, I do hold a net_cnt reference. */ 172089e58e75SPhilipp Reisner if (!mdev->tconn->net_conf->two_primaries) { 1721b411b363SPhilipp Reisner spin_lock_irq(&mdev->req_lock); 1722b411b363SPhilipp Reisner } else { 1723b411b363SPhilipp Reisner /* don't get the req_lock yet, 1724b411b363SPhilipp Reisner * we may sleep in drbd_wait_peer_seq */ 1725010f6e67SAndreas Gruenbacher const int size = e->i.size; 1726b411b363SPhilipp Reisner const int discard = test_bit(DISCARD_CONCURRENT, &mdev->flags); 1727b411b363SPhilipp Reisner DEFINE_WAIT(wait); 1728b411b363SPhilipp Reisner int first; 1729b411b363SPhilipp Reisner 173089e58e75SPhilipp Reisner D_ASSERT(mdev->tconn->net_conf->wire_protocol == DRBD_PROT_C); 1731b411b363SPhilipp Reisner 1732b411b363SPhilipp Reisner /* conflict detection and handling: 1733b411b363SPhilipp Reisner * 1. wait on the sequence number, 1734b411b363SPhilipp Reisner * in case this data packet overtook ACK packets. 1735bb3bfe96SAndreas Gruenbacher * 2. check our interval trees for conflicting requests: 1736bb3bfe96SAndreas Gruenbacher * we only need to check the write_requests tree; the 1737bb3bfe96SAndreas Gruenbacher * epoch_entries tree cannot contain any overlaps because 1738bb3bfe96SAndreas Gruenbacher * they were already eliminated on the submitting node. 1739b411b363SPhilipp Reisner * 1740b411b363SPhilipp Reisner * Note: for two_primaries, we are protocol C, 1741b411b363SPhilipp Reisner * so there cannot be any request that is DONE 1742b411b363SPhilipp Reisner * but still on the transfer log. 1743b411b363SPhilipp Reisner * 1744bb3bfe96SAndreas Gruenbacher * unconditionally add to the epoch_entries tree. 1745b411b363SPhilipp Reisner * 1746b411b363SPhilipp Reisner * if no conflicting request is found: 1747b411b363SPhilipp Reisner * submit. 1748b411b363SPhilipp Reisner * 1749b411b363SPhilipp Reisner * if any conflicting request is found 1750b411b363SPhilipp Reisner * that has not yet been acked, 1751b411b363SPhilipp Reisner * AND I have the "discard concurrent writes" flag: 1752b411b363SPhilipp Reisner * queue (via done_ee) the P_DISCARD_ACK; OUT. 1753b411b363SPhilipp Reisner * 1754b411b363SPhilipp Reisner * if any conflicting request is found: 1755b411b363SPhilipp Reisner * block the receiver, waiting on misc_wait 1756b411b363SPhilipp Reisner * until no more conflicting requests are there, 1757b411b363SPhilipp Reisner * or we get interrupted (disconnect). 1758b411b363SPhilipp Reisner * 1759b411b363SPhilipp Reisner * we do not just write after local io completion of those 1760b411b363SPhilipp Reisner * requests, but only after req is done completely, i.e. 1761b411b363SPhilipp Reisner * we wait for the P_DISCARD_ACK to arrive! 1762b411b363SPhilipp Reisner * 1763b411b363SPhilipp Reisner * then proceed normally, i.e. submit. 1764b411b363SPhilipp Reisner */ 1765b411b363SPhilipp Reisner if (drbd_wait_peer_seq(mdev, be32_to_cpu(p->seq_num))) 1766b411b363SPhilipp Reisner goto out_interrupted; 1767b411b363SPhilipp Reisner 1768b411b363SPhilipp Reisner spin_lock_irq(&mdev->req_lock); 1769b411b363SPhilipp Reisner 17708b946255SAndreas Gruenbacher drbd_insert_interval(&mdev->epoch_entries, &e->i); 1771b411b363SPhilipp Reisner 1772b411b363SPhilipp Reisner first = 1; 1773b411b363SPhilipp Reisner for (;;) { 1774de696716SAndreas Gruenbacher struct drbd_interval *i; 1775b411b363SPhilipp Reisner int have_unacked = 0; 1776b411b363SPhilipp Reisner int have_conflict = 0; 1777b411b363SPhilipp Reisner prepare_to_wait(&mdev->misc_wait, &wait, 1778b411b363SPhilipp Reisner TASK_INTERRUPTIBLE); 1779de696716SAndreas Gruenbacher 1780de696716SAndreas Gruenbacher i = drbd_find_overlap(&mdev->write_requests, sector, size); 1781de696716SAndreas Gruenbacher if (i) { 1782de696716SAndreas Gruenbacher struct drbd_request *req2 = 1783de696716SAndreas Gruenbacher container_of(i, struct drbd_request, i); 1784de696716SAndreas Gruenbacher 1785b411b363SPhilipp Reisner /* only ALERT on first iteration, 1786b411b363SPhilipp Reisner * we may be woken up early... */ 1787b411b363SPhilipp Reisner if (first) 1788b411b363SPhilipp Reisner dev_alert(DEV, "%s[%u] Concurrent local write detected!" 1789b411b363SPhilipp Reisner " new: %llus +%u; pending: %llus +%u\n", 1790b411b363SPhilipp Reisner current->comm, current->pid, 1791b411b363SPhilipp Reisner (unsigned long long)sector, size, 1792de696716SAndreas Gruenbacher (unsigned long long)req2->i.sector, req2->i.size); 1793de696716SAndreas Gruenbacher if (req2->rq_state & RQ_NET_PENDING) 1794b411b363SPhilipp Reisner ++have_unacked; 1795b411b363SPhilipp Reisner ++have_conflict; 1796b411b363SPhilipp Reisner } 1797b411b363SPhilipp Reisner if (!have_conflict) 1798b411b363SPhilipp Reisner break; 1799b411b363SPhilipp Reisner 1800b411b363SPhilipp Reisner /* Discard Ack only for the _first_ iteration */ 1801b411b363SPhilipp Reisner if (first && discard && have_unacked) { 1802b411b363SPhilipp Reisner dev_alert(DEV, "Concurrent write! [DISCARD BY FLAG] sec=%llus\n", 1803b411b363SPhilipp Reisner (unsigned long long)sector); 1804b411b363SPhilipp Reisner inc_unacked(mdev); 1805b411b363SPhilipp Reisner e->w.cb = e_send_discard_ack; 1806b411b363SPhilipp Reisner list_add_tail(&e->w.list, &mdev->done_ee); 1807b411b363SPhilipp Reisner 1808b411b363SPhilipp Reisner spin_unlock_irq(&mdev->req_lock); 1809b411b363SPhilipp Reisner 1810b411b363SPhilipp Reisner /* we could probably send that P_DISCARD_ACK ourselves, 1811b411b363SPhilipp Reisner * but I don't like the receiver using the msock */ 1812b411b363SPhilipp Reisner 1813b411b363SPhilipp Reisner put_ldev(mdev); 1814b411b363SPhilipp Reisner wake_asender(mdev); 1815b411b363SPhilipp Reisner finish_wait(&mdev->misc_wait, &wait); 181681e84650SAndreas Gruenbacher return true; 1817b411b363SPhilipp Reisner } 1818b411b363SPhilipp Reisner 1819b411b363SPhilipp Reisner if (signal_pending(current)) { 18208b946255SAndreas Gruenbacher drbd_remove_interval(&mdev->epoch_entries, &e->i); 18218b946255SAndreas Gruenbacher drbd_clear_interval(&e->i); 1822b411b363SPhilipp Reisner 1823b411b363SPhilipp Reisner spin_unlock_irq(&mdev->req_lock); 1824b411b363SPhilipp Reisner 1825b411b363SPhilipp Reisner finish_wait(&mdev->misc_wait, &wait); 1826b411b363SPhilipp Reisner goto out_interrupted; 1827b411b363SPhilipp Reisner } 1828b411b363SPhilipp Reisner 1829b411b363SPhilipp Reisner spin_unlock_irq(&mdev->req_lock); 1830b411b363SPhilipp Reisner if (first) { 1831b411b363SPhilipp Reisner first = 0; 1832b411b363SPhilipp Reisner dev_alert(DEV, "Concurrent write! [W AFTERWARDS] " 1833b411b363SPhilipp Reisner "sec=%llus\n", (unsigned long long)sector); 1834b411b363SPhilipp Reisner } else if (discard) { 1835b411b363SPhilipp Reisner /* we had none on the first iteration. 1836b411b363SPhilipp Reisner * there must be none now. */ 1837b411b363SPhilipp Reisner D_ASSERT(have_unacked == 0); 1838b411b363SPhilipp Reisner } 1839b411b363SPhilipp Reisner schedule(); 1840b411b363SPhilipp Reisner spin_lock_irq(&mdev->req_lock); 1841b411b363SPhilipp Reisner } 1842b411b363SPhilipp Reisner finish_wait(&mdev->misc_wait, &wait); 1843b411b363SPhilipp Reisner } 1844b411b363SPhilipp Reisner 1845b411b363SPhilipp Reisner list_add(&e->w.list, &mdev->active_ee); 1846b411b363SPhilipp Reisner spin_unlock_irq(&mdev->req_lock); 1847b411b363SPhilipp Reisner 184889e58e75SPhilipp Reisner switch (mdev->tconn->net_conf->wire_protocol) { 1849b411b363SPhilipp Reisner case DRBD_PROT_C: 1850b411b363SPhilipp Reisner inc_unacked(mdev); 1851b411b363SPhilipp Reisner /* corresponding dec_unacked() in e_end_block() 1852b411b363SPhilipp Reisner * respective _drbd_clear_done_ee */ 1853b411b363SPhilipp Reisner break; 1854b411b363SPhilipp Reisner case DRBD_PROT_B: 1855b411b363SPhilipp Reisner /* I really don't like it that the receiver thread 1856b411b363SPhilipp Reisner * sends on the msock, but anyways */ 1857b411b363SPhilipp Reisner drbd_send_ack(mdev, P_RECV_ACK, e); 1858b411b363SPhilipp Reisner break; 1859b411b363SPhilipp Reisner case DRBD_PROT_A: 1860b411b363SPhilipp Reisner /* nothing to do */ 1861b411b363SPhilipp Reisner break; 1862b411b363SPhilipp Reisner } 1863b411b363SPhilipp Reisner 18646719fb03SLars Ellenberg if (mdev->state.pdsk < D_INCONSISTENT) { 1865b411b363SPhilipp Reisner /* In case we have the only disk of the cluster, */ 1866010f6e67SAndreas Gruenbacher drbd_set_out_of_sync(mdev, e->i.sector, e->i.size); 1867b411b363SPhilipp Reisner e->flags |= EE_CALL_AL_COMPLETE_IO; 18686719fb03SLars Ellenberg e->flags &= ~EE_MAY_SET_IN_SYNC; 1869010f6e67SAndreas Gruenbacher drbd_al_begin_io(mdev, e->i.sector); 1870b411b363SPhilipp Reisner } 1871b411b363SPhilipp Reisner 187245bb912bSLars Ellenberg if (drbd_submit_ee(mdev, e, rw, DRBD_FAULT_DT_WR) == 0) 187381e84650SAndreas Gruenbacher return true; 1874b411b363SPhilipp Reisner 187510f6d992SLars Ellenberg /* don't care for the reason here */ 187610f6d992SLars Ellenberg dev_err(DEV, "submit failed, triggering re-connect\n"); 187722cc37a9SLars Ellenberg spin_lock_irq(&mdev->req_lock); 187822cc37a9SLars Ellenberg list_del(&e->w.list); 18798b946255SAndreas Gruenbacher drbd_remove_interval(&mdev->epoch_entries, &e->i); 18808b946255SAndreas Gruenbacher drbd_clear_interval(&e->i); 188122cc37a9SLars Ellenberg spin_unlock_irq(&mdev->req_lock); 188222cc37a9SLars Ellenberg if (e->flags & EE_CALL_AL_COMPLETE_IO) 1883010f6e67SAndreas Gruenbacher drbd_al_complete_io(mdev, e->i.sector); 188422cc37a9SLars Ellenberg 1885b411b363SPhilipp Reisner out_interrupted: 188610f6d992SLars Ellenberg drbd_may_finish_epoch(mdev, e->epoch, EV_PUT + EV_CLEANUP); 1887b411b363SPhilipp Reisner put_ldev(mdev); 1888b411b363SPhilipp Reisner drbd_free_ee(mdev, e); 188981e84650SAndreas Gruenbacher return false; 1890b411b363SPhilipp Reisner } 1891b411b363SPhilipp Reisner 18920f0601f4SLars Ellenberg /* We may throttle resync, if the lower device seems to be busy, 18930f0601f4SLars Ellenberg * and current sync rate is above c_min_rate. 18940f0601f4SLars Ellenberg * 18950f0601f4SLars Ellenberg * To decide whether or not the lower device is busy, we use a scheme similar 18960f0601f4SLars Ellenberg * to MD RAID is_mddev_idle(): if the partition stats reveal "significant" 18970f0601f4SLars Ellenberg * (more than 64 sectors) of activity we cannot account for with our own resync 18980f0601f4SLars Ellenberg * activity, it obviously is "busy". 18990f0601f4SLars Ellenberg * 19000f0601f4SLars Ellenberg * The current sync rate used here uses only the most recent two step marks, 19010f0601f4SLars Ellenberg * to have a short time average so we can react faster. 19020f0601f4SLars Ellenberg */ 1903e3555d85SPhilipp Reisner int drbd_rs_should_slow_down(struct drbd_conf *mdev, sector_t sector) 19040f0601f4SLars Ellenberg { 19050f0601f4SLars Ellenberg struct gendisk *disk = mdev->ldev->backing_bdev->bd_contains->bd_disk; 19060f0601f4SLars Ellenberg unsigned long db, dt, dbdt; 1907e3555d85SPhilipp Reisner struct lc_element *tmp; 19080f0601f4SLars Ellenberg int curr_events; 19090f0601f4SLars Ellenberg int throttle = 0; 19100f0601f4SLars Ellenberg 19110f0601f4SLars Ellenberg /* feature disabled? */ 19120f0601f4SLars Ellenberg if (mdev->sync_conf.c_min_rate == 0) 19130f0601f4SLars Ellenberg return 0; 19140f0601f4SLars Ellenberg 1915e3555d85SPhilipp Reisner spin_lock_irq(&mdev->al_lock); 1916e3555d85SPhilipp Reisner tmp = lc_find(mdev->resync, BM_SECT_TO_EXT(sector)); 1917e3555d85SPhilipp Reisner if (tmp) { 1918e3555d85SPhilipp Reisner struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce); 1919e3555d85SPhilipp Reisner if (test_bit(BME_PRIORITY, &bm_ext->flags)) { 1920e3555d85SPhilipp Reisner spin_unlock_irq(&mdev->al_lock); 1921e3555d85SPhilipp Reisner return 0; 1922e3555d85SPhilipp Reisner } 1923e3555d85SPhilipp Reisner /* Do not slow down if app IO is already waiting for this extent */ 1924e3555d85SPhilipp Reisner } 1925e3555d85SPhilipp Reisner spin_unlock_irq(&mdev->al_lock); 1926e3555d85SPhilipp Reisner 19270f0601f4SLars Ellenberg curr_events = (int)part_stat_read(&disk->part0, sectors[0]) + 19280f0601f4SLars Ellenberg (int)part_stat_read(&disk->part0, sectors[1]) - 19290f0601f4SLars Ellenberg atomic_read(&mdev->rs_sect_ev); 1930e3555d85SPhilipp Reisner 19310f0601f4SLars Ellenberg if (!mdev->rs_last_events || curr_events - mdev->rs_last_events > 64) { 19320f0601f4SLars Ellenberg unsigned long rs_left; 19330f0601f4SLars Ellenberg int i; 19340f0601f4SLars Ellenberg 19350f0601f4SLars Ellenberg mdev->rs_last_events = curr_events; 19360f0601f4SLars Ellenberg 19370f0601f4SLars Ellenberg /* sync speed average over the last 2*DRBD_SYNC_MARK_STEP, 19380f0601f4SLars Ellenberg * approx. */ 19392649f080SLars Ellenberg i = (mdev->rs_last_mark + DRBD_SYNC_MARKS-1) % DRBD_SYNC_MARKS; 19402649f080SLars Ellenberg 19412649f080SLars Ellenberg if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T) 19422649f080SLars Ellenberg rs_left = mdev->ov_left; 19432649f080SLars Ellenberg else 19440f0601f4SLars Ellenberg rs_left = drbd_bm_total_weight(mdev) - mdev->rs_failed; 19450f0601f4SLars Ellenberg 19460f0601f4SLars Ellenberg dt = ((long)jiffies - (long)mdev->rs_mark_time[i]) / HZ; 19470f0601f4SLars Ellenberg if (!dt) 19480f0601f4SLars Ellenberg dt++; 19490f0601f4SLars Ellenberg db = mdev->rs_mark_left[i] - rs_left; 19500f0601f4SLars Ellenberg dbdt = Bit2KB(db/dt); 19510f0601f4SLars Ellenberg 19520f0601f4SLars Ellenberg if (dbdt > mdev->sync_conf.c_min_rate) 19530f0601f4SLars Ellenberg throttle = 1; 19540f0601f4SLars Ellenberg } 19550f0601f4SLars Ellenberg return throttle; 19560f0601f4SLars Ellenberg } 19570f0601f4SLars Ellenberg 19580f0601f4SLars Ellenberg 195902918be2SPhilipp Reisner static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int digest_size) 1960b411b363SPhilipp Reisner { 1961b411b363SPhilipp Reisner sector_t sector; 1962b411b363SPhilipp Reisner const sector_t capacity = drbd_get_capacity(mdev->this_bdev); 1963b411b363SPhilipp Reisner struct drbd_epoch_entry *e; 1964b411b363SPhilipp Reisner struct digest_info *di = NULL; 1965b18b37beSPhilipp Reisner int size, verb; 1966b411b363SPhilipp Reisner unsigned int fault_type; 196702918be2SPhilipp Reisner struct p_block_req *p = &mdev->data.rbuf.block_req; 1968b411b363SPhilipp Reisner 1969b411b363SPhilipp Reisner sector = be64_to_cpu(p->sector); 1970b411b363SPhilipp Reisner size = be32_to_cpu(p->blksize); 1971b411b363SPhilipp Reisner 19721816a2b4SLars Ellenberg if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) { 1973b411b363SPhilipp Reisner dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__, 1974b411b363SPhilipp Reisner (unsigned long long)sector, size); 197581e84650SAndreas Gruenbacher return false; 1976b411b363SPhilipp Reisner } 1977b411b363SPhilipp Reisner if (sector + (size>>9) > capacity) { 1978b411b363SPhilipp Reisner dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__, 1979b411b363SPhilipp Reisner (unsigned long long)sector, size); 198081e84650SAndreas Gruenbacher return false; 1981b411b363SPhilipp Reisner } 1982b411b363SPhilipp Reisner 1983b411b363SPhilipp Reisner if (!get_ldev_if_state(mdev, D_UP_TO_DATE)) { 1984b18b37beSPhilipp Reisner verb = 1; 1985b18b37beSPhilipp Reisner switch (cmd) { 1986b18b37beSPhilipp Reisner case P_DATA_REQUEST: 1987b18b37beSPhilipp Reisner drbd_send_ack_rp(mdev, P_NEG_DREPLY, p); 1988b18b37beSPhilipp Reisner break; 1989b18b37beSPhilipp Reisner case P_RS_DATA_REQUEST: 1990b18b37beSPhilipp Reisner case P_CSUM_RS_REQUEST: 1991b18b37beSPhilipp Reisner case P_OV_REQUEST: 1992b18b37beSPhilipp Reisner drbd_send_ack_rp(mdev, P_NEG_RS_DREPLY , p); 1993b18b37beSPhilipp Reisner break; 1994b18b37beSPhilipp Reisner case P_OV_REPLY: 1995b18b37beSPhilipp Reisner verb = 0; 1996b18b37beSPhilipp Reisner dec_rs_pending(mdev); 1997b18b37beSPhilipp Reisner drbd_send_ack_ex(mdev, P_OV_RESULT, sector, size, ID_IN_SYNC); 1998b18b37beSPhilipp Reisner break; 1999b18b37beSPhilipp Reisner default: 2000b18b37beSPhilipp Reisner dev_err(DEV, "unexpected command (%s) in receive_DataRequest\n", 2001b18b37beSPhilipp Reisner cmdname(cmd)); 2002b18b37beSPhilipp Reisner } 2003b18b37beSPhilipp Reisner if (verb && __ratelimit(&drbd_ratelimit_state)) 2004b411b363SPhilipp Reisner dev_err(DEV, "Can not satisfy peer's read request, " 2005b411b363SPhilipp Reisner "no local data.\n"); 2006b18b37beSPhilipp Reisner 2007a821cc4aSLars Ellenberg /* drain possibly payload */ 2008a821cc4aSLars Ellenberg return drbd_drain_block(mdev, digest_size); 2009b411b363SPhilipp Reisner } 2010b411b363SPhilipp Reisner 2011b411b363SPhilipp Reisner /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD 2012b411b363SPhilipp Reisner * "criss-cross" setup, that might cause write-out on some other DRBD, 2013b411b363SPhilipp Reisner * which in turn might block on the other node at this very place. */ 2014b411b363SPhilipp Reisner e = drbd_alloc_ee(mdev, p->block_id, sector, size, GFP_NOIO); 2015b411b363SPhilipp Reisner if (!e) { 2016b411b363SPhilipp Reisner put_ldev(mdev); 201781e84650SAndreas Gruenbacher return false; 2018b411b363SPhilipp Reisner } 2019b411b363SPhilipp Reisner 202002918be2SPhilipp Reisner switch (cmd) { 2021b411b363SPhilipp Reisner case P_DATA_REQUEST: 2022b411b363SPhilipp Reisner e->w.cb = w_e_end_data_req; 2023b411b363SPhilipp Reisner fault_type = DRBD_FAULT_DT_RD; 202480a40e43SLars Ellenberg /* application IO, don't drbd_rs_begin_io */ 202580a40e43SLars Ellenberg goto submit; 202680a40e43SLars Ellenberg 2027b411b363SPhilipp Reisner case P_RS_DATA_REQUEST: 2028b411b363SPhilipp Reisner e->w.cb = w_e_end_rsdata_req; 2029b411b363SPhilipp Reisner fault_type = DRBD_FAULT_RS_RD; 20305f9915bbSLars Ellenberg /* used in the sector offset progress display */ 20315f9915bbSLars Ellenberg mdev->bm_resync_fo = BM_SECT_TO_BIT(sector); 2032b411b363SPhilipp Reisner break; 2033b411b363SPhilipp Reisner 2034b411b363SPhilipp Reisner case P_OV_REPLY: 2035b411b363SPhilipp Reisner case P_CSUM_RS_REQUEST: 2036b411b363SPhilipp Reisner fault_type = DRBD_FAULT_RS_RD; 2037b411b363SPhilipp Reisner di = kmalloc(sizeof(*di) + digest_size, GFP_NOIO); 2038b411b363SPhilipp Reisner if (!di) 2039b411b363SPhilipp Reisner goto out_free_e; 2040b411b363SPhilipp Reisner 2041b411b363SPhilipp Reisner di->digest_size = digest_size; 2042b411b363SPhilipp Reisner di->digest = (((char *)di)+sizeof(struct digest_info)); 2043b411b363SPhilipp Reisner 2044c36c3cedSLars Ellenberg e->digest = di; 2045c36c3cedSLars Ellenberg e->flags |= EE_HAS_DIGEST; 2046c36c3cedSLars Ellenberg 2047b411b363SPhilipp Reisner if (drbd_recv(mdev, di->digest, digest_size) != digest_size) 2048b411b363SPhilipp Reisner goto out_free_e; 2049b411b363SPhilipp Reisner 205002918be2SPhilipp Reisner if (cmd == P_CSUM_RS_REQUEST) { 2051b411b363SPhilipp Reisner D_ASSERT(mdev->agreed_pro_version >= 89); 2052b411b363SPhilipp Reisner e->w.cb = w_e_end_csum_rs_req; 20535f9915bbSLars Ellenberg /* used in the sector offset progress display */ 20545f9915bbSLars Ellenberg mdev->bm_resync_fo = BM_SECT_TO_BIT(sector); 205502918be2SPhilipp Reisner } else if (cmd == P_OV_REPLY) { 20562649f080SLars Ellenberg /* track progress, we may need to throttle */ 20572649f080SLars Ellenberg atomic_add(size >> 9, &mdev->rs_sect_in); 2058b411b363SPhilipp Reisner e->w.cb = w_e_end_ov_reply; 2059b411b363SPhilipp Reisner dec_rs_pending(mdev); 20600f0601f4SLars Ellenberg /* drbd_rs_begin_io done when we sent this request, 20610f0601f4SLars Ellenberg * but accounting still needs to be done. */ 20620f0601f4SLars Ellenberg goto submit_for_resync; 2063b411b363SPhilipp Reisner } 2064b411b363SPhilipp Reisner break; 2065b411b363SPhilipp Reisner 2066b411b363SPhilipp Reisner case P_OV_REQUEST: 2067b411b363SPhilipp Reisner if (mdev->ov_start_sector == ~(sector_t)0 && 2068b411b363SPhilipp Reisner mdev->agreed_pro_version >= 90) { 2069de228bbaSLars Ellenberg unsigned long now = jiffies; 2070de228bbaSLars Ellenberg int i; 2071b411b363SPhilipp Reisner mdev->ov_start_sector = sector; 2072b411b363SPhilipp Reisner mdev->ov_position = sector; 207330b743a2SLars Ellenberg mdev->ov_left = drbd_bm_bits(mdev) - BM_SECT_TO_BIT(sector); 207430b743a2SLars Ellenberg mdev->rs_total = mdev->ov_left; 2075de228bbaSLars Ellenberg for (i = 0; i < DRBD_SYNC_MARKS; i++) { 2076de228bbaSLars Ellenberg mdev->rs_mark_left[i] = mdev->ov_left; 2077de228bbaSLars Ellenberg mdev->rs_mark_time[i] = now; 2078de228bbaSLars Ellenberg } 2079b411b363SPhilipp Reisner dev_info(DEV, "Online Verify start sector: %llu\n", 2080b411b363SPhilipp Reisner (unsigned long long)sector); 2081b411b363SPhilipp Reisner } 2082b411b363SPhilipp Reisner e->w.cb = w_e_end_ov_req; 2083b411b363SPhilipp Reisner fault_type = DRBD_FAULT_RS_RD; 2084b411b363SPhilipp Reisner break; 2085b411b363SPhilipp Reisner 2086b411b363SPhilipp Reisner default: 2087b411b363SPhilipp Reisner dev_err(DEV, "unexpected command (%s) in receive_DataRequest\n", 208802918be2SPhilipp Reisner cmdname(cmd)); 2089b411b363SPhilipp Reisner fault_type = DRBD_FAULT_MAX; 209080a40e43SLars Ellenberg goto out_free_e; 2091b411b363SPhilipp Reisner } 2092b411b363SPhilipp Reisner 20930f0601f4SLars Ellenberg /* Throttle, drbd_rs_begin_io and submit should become asynchronous 20940f0601f4SLars Ellenberg * wrt the receiver, but it is not as straightforward as it may seem. 20950f0601f4SLars Ellenberg * Various places in the resync start and stop logic assume resync 20960f0601f4SLars Ellenberg * requests are processed in order, requeuing this on the worker thread 20970f0601f4SLars Ellenberg * introduces a bunch of new code for synchronization between threads. 20980f0601f4SLars Ellenberg * 20990f0601f4SLars Ellenberg * Unlimited throttling before drbd_rs_begin_io may stall the resync 21000f0601f4SLars Ellenberg * "forever", throttling after drbd_rs_begin_io will lock that extent 21010f0601f4SLars Ellenberg * for application writes for the same time. For now, just throttle 21020f0601f4SLars Ellenberg * here, where the rest of the code expects the receiver to sleep for 21030f0601f4SLars Ellenberg * a while, anyways. 21040f0601f4SLars Ellenberg */ 2105b411b363SPhilipp Reisner 21060f0601f4SLars Ellenberg /* Throttle before drbd_rs_begin_io, as that locks out application IO; 21070f0601f4SLars Ellenberg * this defers syncer requests for some time, before letting at least 21080f0601f4SLars Ellenberg * on request through. The resync controller on the receiving side 21090f0601f4SLars Ellenberg * will adapt to the incoming rate accordingly. 21100f0601f4SLars Ellenberg * 21110f0601f4SLars Ellenberg * We cannot throttle here if remote is Primary/SyncTarget: 21120f0601f4SLars Ellenberg * we would also throttle its application reads. 21130f0601f4SLars Ellenberg * In that case, throttling is done on the SyncTarget only. 21140f0601f4SLars Ellenberg */ 2115e3555d85SPhilipp Reisner if (mdev->state.peer != R_PRIMARY && drbd_rs_should_slow_down(mdev, sector)) 2116e3555d85SPhilipp Reisner schedule_timeout_uninterruptible(HZ/10); 2117e3555d85SPhilipp Reisner if (drbd_rs_begin_io(mdev, sector)) 211880a40e43SLars Ellenberg goto out_free_e; 2119b411b363SPhilipp Reisner 21200f0601f4SLars Ellenberg submit_for_resync: 21210f0601f4SLars Ellenberg atomic_add(size >> 9, &mdev->rs_sect_ev); 21220f0601f4SLars Ellenberg 212380a40e43SLars Ellenberg submit: 2124b411b363SPhilipp Reisner inc_unacked(mdev); 212580a40e43SLars Ellenberg spin_lock_irq(&mdev->req_lock); 212680a40e43SLars Ellenberg list_add_tail(&e->w.list, &mdev->read_ee); 212780a40e43SLars Ellenberg spin_unlock_irq(&mdev->req_lock); 2128b411b363SPhilipp Reisner 212945bb912bSLars Ellenberg if (drbd_submit_ee(mdev, e, READ, fault_type) == 0) 213081e84650SAndreas Gruenbacher return true; 2131b411b363SPhilipp Reisner 213210f6d992SLars Ellenberg /* don't care for the reason here */ 213310f6d992SLars Ellenberg dev_err(DEV, "submit failed, triggering re-connect\n"); 213422cc37a9SLars Ellenberg spin_lock_irq(&mdev->req_lock); 213522cc37a9SLars Ellenberg list_del(&e->w.list); 213622cc37a9SLars Ellenberg spin_unlock_irq(&mdev->req_lock); 213722cc37a9SLars Ellenberg /* no drbd_rs_complete_io(), we are dropping the connection anyways */ 213822cc37a9SLars Ellenberg 2139b411b363SPhilipp Reisner out_free_e: 2140b411b363SPhilipp Reisner put_ldev(mdev); 2141b411b363SPhilipp Reisner drbd_free_ee(mdev, e); 214281e84650SAndreas Gruenbacher return false; 2143b411b363SPhilipp Reisner } 2144b411b363SPhilipp Reisner 2145b411b363SPhilipp Reisner static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local) 2146b411b363SPhilipp Reisner { 2147b411b363SPhilipp Reisner int self, peer, rv = -100; 2148b411b363SPhilipp Reisner unsigned long ch_self, ch_peer; 2149b411b363SPhilipp Reisner 2150b411b363SPhilipp Reisner self = mdev->ldev->md.uuid[UI_BITMAP] & 1; 2151b411b363SPhilipp Reisner peer = mdev->p_uuid[UI_BITMAP] & 1; 2152b411b363SPhilipp Reisner 2153b411b363SPhilipp Reisner ch_peer = mdev->p_uuid[UI_SIZE]; 2154b411b363SPhilipp Reisner ch_self = mdev->comm_bm_set; 2155b411b363SPhilipp Reisner 215689e58e75SPhilipp Reisner switch (mdev->tconn->net_conf->after_sb_0p) { 2157b411b363SPhilipp Reisner case ASB_CONSENSUS: 2158b411b363SPhilipp Reisner case ASB_DISCARD_SECONDARY: 2159b411b363SPhilipp Reisner case ASB_CALL_HELPER: 2160b411b363SPhilipp Reisner dev_err(DEV, "Configuration error.\n"); 2161b411b363SPhilipp Reisner break; 2162b411b363SPhilipp Reisner case ASB_DISCONNECT: 2163b411b363SPhilipp Reisner break; 2164b411b363SPhilipp Reisner case ASB_DISCARD_YOUNGER_PRI: 2165b411b363SPhilipp Reisner if (self == 0 && peer == 1) { 2166b411b363SPhilipp Reisner rv = -1; 2167b411b363SPhilipp Reisner break; 2168b411b363SPhilipp Reisner } 2169b411b363SPhilipp Reisner if (self == 1 && peer == 0) { 2170b411b363SPhilipp Reisner rv = 1; 2171b411b363SPhilipp Reisner break; 2172b411b363SPhilipp Reisner } 2173b411b363SPhilipp Reisner /* Else fall through to one of the other strategies... */ 2174b411b363SPhilipp Reisner case ASB_DISCARD_OLDER_PRI: 2175b411b363SPhilipp Reisner if (self == 0 && peer == 1) { 2176b411b363SPhilipp Reisner rv = 1; 2177b411b363SPhilipp Reisner break; 2178b411b363SPhilipp Reisner } 2179b411b363SPhilipp Reisner if (self == 1 && peer == 0) { 2180b411b363SPhilipp Reisner rv = -1; 2181b411b363SPhilipp Reisner break; 2182b411b363SPhilipp Reisner } 2183b411b363SPhilipp Reisner /* Else fall through to one of the other strategies... */ 2184ad19bf6eSLars Ellenberg dev_warn(DEV, "Discard younger/older primary did not find a decision\n" 2185b411b363SPhilipp Reisner "Using discard-least-changes instead\n"); 2186b411b363SPhilipp Reisner case ASB_DISCARD_ZERO_CHG: 2187b411b363SPhilipp Reisner if (ch_peer == 0 && ch_self == 0) { 2188b411b363SPhilipp Reisner rv = test_bit(DISCARD_CONCURRENT, &mdev->flags) 2189b411b363SPhilipp Reisner ? -1 : 1; 2190b411b363SPhilipp Reisner break; 2191b411b363SPhilipp Reisner } else { 2192b411b363SPhilipp Reisner if (ch_peer == 0) { rv = 1; break; } 2193b411b363SPhilipp Reisner if (ch_self == 0) { rv = -1; break; } 2194b411b363SPhilipp Reisner } 219589e58e75SPhilipp Reisner if (mdev->tconn->net_conf->after_sb_0p == ASB_DISCARD_ZERO_CHG) 2196b411b363SPhilipp Reisner break; 2197b411b363SPhilipp Reisner case ASB_DISCARD_LEAST_CHG: 2198b411b363SPhilipp Reisner if (ch_self < ch_peer) 2199b411b363SPhilipp Reisner rv = -1; 2200b411b363SPhilipp Reisner else if (ch_self > ch_peer) 2201b411b363SPhilipp Reisner rv = 1; 2202b411b363SPhilipp Reisner else /* ( ch_self == ch_peer ) */ 2203b411b363SPhilipp Reisner /* Well, then use something else. */ 2204b411b363SPhilipp Reisner rv = test_bit(DISCARD_CONCURRENT, &mdev->flags) 2205b411b363SPhilipp Reisner ? -1 : 1; 2206b411b363SPhilipp Reisner break; 2207b411b363SPhilipp Reisner case ASB_DISCARD_LOCAL: 2208b411b363SPhilipp Reisner rv = -1; 2209b411b363SPhilipp Reisner break; 2210b411b363SPhilipp Reisner case ASB_DISCARD_REMOTE: 2211b411b363SPhilipp Reisner rv = 1; 2212b411b363SPhilipp Reisner } 2213b411b363SPhilipp Reisner 2214b411b363SPhilipp Reisner return rv; 2215b411b363SPhilipp Reisner } 2216b411b363SPhilipp Reisner 2217b411b363SPhilipp Reisner static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local) 2218b411b363SPhilipp Reisner { 22196184ea21SAndreas Gruenbacher int hg, rv = -100; 2220b411b363SPhilipp Reisner 222189e58e75SPhilipp Reisner switch (mdev->tconn->net_conf->after_sb_1p) { 2222b411b363SPhilipp Reisner case ASB_DISCARD_YOUNGER_PRI: 2223b411b363SPhilipp Reisner case ASB_DISCARD_OLDER_PRI: 2224b411b363SPhilipp Reisner case ASB_DISCARD_LEAST_CHG: 2225b411b363SPhilipp Reisner case ASB_DISCARD_LOCAL: 2226b411b363SPhilipp Reisner case ASB_DISCARD_REMOTE: 2227b411b363SPhilipp Reisner dev_err(DEV, "Configuration error.\n"); 2228b411b363SPhilipp Reisner break; 2229b411b363SPhilipp Reisner case ASB_DISCONNECT: 2230b411b363SPhilipp Reisner break; 2231b411b363SPhilipp Reisner case ASB_CONSENSUS: 2232b411b363SPhilipp Reisner hg = drbd_asb_recover_0p(mdev); 2233b411b363SPhilipp Reisner if (hg == -1 && mdev->state.role == R_SECONDARY) 2234b411b363SPhilipp Reisner rv = hg; 2235b411b363SPhilipp Reisner if (hg == 1 && mdev->state.role == R_PRIMARY) 2236b411b363SPhilipp Reisner rv = hg; 2237b411b363SPhilipp Reisner break; 2238b411b363SPhilipp Reisner case ASB_VIOLENTLY: 2239b411b363SPhilipp Reisner rv = drbd_asb_recover_0p(mdev); 2240b411b363SPhilipp Reisner break; 2241b411b363SPhilipp Reisner case ASB_DISCARD_SECONDARY: 2242b411b363SPhilipp Reisner return mdev->state.role == R_PRIMARY ? 1 : -1; 2243b411b363SPhilipp Reisner case ASB_CALL_HELPER: 2244b411b363SPhilipp Reisner hg = drbd_asb_recover_0p(mdev); 2245b411b363SPhilipp Reisner if (hg == -1 && mdev->state.role == R_PRIMARY) { 2246bb437946SAndreas Gruenbacher enum drbd_state_rv rv2; 2247bb437946SAndreas Gruenbacher 2248bb437946SAndreas Gruenbacher drbd_set_role(mdev, R_SECONDARY, 0); 2249b411b363SPhilipp Reisner /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE, 2250b411b363SPhilipp Reisner * we might be here in C_WF_REPORT_PARAMS which is transient. 2251b411b363SPhilipp Reisner * we do not need to wait for the after state change work either. */ 2252bb437946SAndreas Gruenbacher rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY)); 2253bb437946SAndreas Gruenbacher if (rv2 != SS_SUCCESS) { 2254b411b363SPhilipp Reisner drbd_khelper(mdev, "pri-lost-after-sb"); 2255b411b363SPhilipp Reisner } else { 2256b411b363SPhilipp Reisner dev_warn(DEV, "Successfully gave up primary role.\n"); 2257b411b363SPhilipp Reisner rv = hg; 2258b411b363SPhilipp Reisner } 2259b411b363SPhilipp Reisner } else 2260b411b363SPhilipp Reisner rv = hg; 2261b411b363SPhilipp Reisner } 2262b411b363SPhilipp Reisner 2263b411b363SPhilipp Reisner return rv; 2264b411b363SPhilipp Reisner } 2265b411b363SPhilipp Reisner 2266b411b363SPhilipp Reisner static int drbd_asb_recover_2p(struct drbd_conf *mdev) __must_hold(local) 2267b411b363SPhilipp Reisner { 22686184ea21SAndreas Gruenbacher int hg, rv = -100; 2269b411b363SPhilipp Reisner 227089e58e75SPhilipp Reisner switch (mdev->tconn->net_conf->after_sb_2p) { 2271b411b363SPhilipp Reisner case ASB_DISCARD_YOUNGER_PRI: 2272b411b363SPhilipp Reisner case ASB_DISCARD_OLDER_PRI: 2273b411b363SPhilipp Reisner case ASB_DISCARD_LEAST_CHG: 2274b411b363SPhilipp Reisner case ASB_DISCARD_LOCAL: 2275b411b363SPhilipp Reisner case ASB_DISCARD_REMOTE: 2276b411b363SPhilipp Reisner case ASB_CONSENSUS: 2277b411b363SPhilipp Reisner case ASB_DISCARD_SECONDARY: 2278b411b363SPhilipp Reisner dev_err(DEV, "Configuration error.\n"); 2279b411b363SPhilipp Reisner break; 2280b411b363SPhilipp Reisner case ASB_VIOLENTLY: 2281b411b363SPhilipp Reisner rv = drbd_asb_recover_0p(mdev); 2282b411b363SPhilipp Reisner break; 2283b411b363SPhilipp Reisner case ASB_DISCONNECT: 2284b411b363SPhilipp Reisner break; 2285b411b363SPhilipp Reisner case ASB_CALL_HELPER: 2286b411b363SPhilipp Reisner hg = drbd_asb_recover_0p(mdev); 2287b411b363SPhilipp Reisner if (hg == -1) { 2288bb437946SAndreas Gruenbacher enum drbd_state_rv rv2; 2289bb437946SAndreas Gruenbacher 2290b411b363SPhilipp Reisner /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE, 2291b411b363SPhilipp Reisner * we might be here in C_WF_REPORT_PARAMS which is transient. 2292b411b363SPhilipp Reisner * we do not need to wait for the after state change work either. */ 2293bb437946SAndreas Gruenbacher rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY)); 2294bb437946SAndreas Gruenbacher if (rv2 != SS_SUCCESS) { 2295b411b363SPhilipp Reisner drbd_khelper(mdev, "pri-lost-after-sb"); 2296b411b363SPhilipp Reisner } else { 2297b411b363SPhilipp Reisner dev_warn(DEV, "Successfully gave up primary role.\n"); 2298b411b363SPhilipp Reisner rv = hg; 2299b411b363SPhilipp Reisner } 2300b411b363SPhilipp Reisner } else 2301b411b363SPhilipp Reisner rv = hg; 2302b411b363SPhilipp Reisner } 2303b411b363SPhilipp Reisner 2304b411b363SPhilipp Reisner return rv; 2305b411b363SPhilipp Reisner } 2306b411b363SPhilipp Reisner 2307b411b363SPhilipp Reisner static void drbd_uuid_dump(struct drbd_conf *mdev, char *text, u64 *uuid, 2308b411b363SPhilipp Reisner u64 bits, u64 flags) 2309b411b363SPhilipp Reisner { 2310b411b363SPhilipp Reisner if (!uuid) { 2311b411b363SPhilipp Reisner dev_info(DEV, "%s uuid info vanished while I was looking!\n", text); 2312b411b363SPhilipp Reisner return; 2313b411b363SPhilipp Reisner } 2314b411b363SPhilipp Reisner dev_info(DEV, "%s %016llX:%016llX:%016llX:%016llX bits:%llu flags:%llX\n", 2315b411b363SPhilipp Reisner text, 2316b411b363SPhilipp Reisner (unsigned long long)uuid[UI_CURRENT], 2317b411b363SPhilipp Reisner (unsigned long long)uuid[UI_BITMAP], 2318b411b363SPhilipp Reisner (unsigned long long)uuid[UI_HISTORY_START], 2319b411b363SPhilipp Reisner (unsigned long long)uuid[UI_HISTORY_END], 2320b411b363SPhilipp Reisner (unsigned long long)bits, 2321b411b363SPhilipp Reisner (unsigned long long)flags); 2322b411b363SPhilipp Reisner } 2323b411b363SPhilipp Reisner 2324b411b363SPhilipp Reisner /* 2325b411b363SPhilipp Reisner 100 after split brain try auto recover 2326b411b363SPhilipp Reisner 2 C_SYNC_SOURCE set BitMap 2327b411b363SPhilipp Reisner 1 C_SYNC_SOURCE use BitMap 2328b411b363SPhilipp Reisner 0 no Sync 2329b411b363SPhilipp Reisner -1 C_SYNC_TARGET use BitMap 2330b411b363SPhilipp Reisner -2 C_SYNC_TARGET set BitMap 2331b411b363SPhilipp Reisner -100 after split brain, disconnect 2332b411b363SPhilipp Reisner -1000 unrelated data 23334a23f264SPhilipp Reisner -1091 requires proto 91 23344a23f264SPhilipp Reisner -1096 requires proto 96 2335b411b363SPhilipp Reisner */ 2336b411b363SPhilipp Reisner static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(local) 2337b411b363SPhilipp Reisner { 2338b411b363SPhilipp Reisner u64 self, peer; 2339b411b363SPhilipp Reisner int i, j; 2340b411b363SPhilipp Reisner 2341b411b363SPhilipp Reisner self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1); 2342b411b363SPhilipp Reisner peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1); 2343b411b363SPhilipp Reisner 2344b411b363SPhilipp Reisner *rule_nr = 10; 2345b411b363SPhilipp Reisner if (self == UUID_JUST_CREATED && peer == UUID_JUST_CREATED) 2346b411b363SPhilipp Reisner return 0; 2347b411b363SPhilipp Reisner 2348b411b363SPhilipp Reisner *rule_nr = 20; 2349b411b363SPhilipp Reisner if ((self == UUID_JUST_CREATED || self == (u64)0) && 2350b411b363SPhilipp Reisner peer != UUID_JUST_CREATED) 2351b411b363SPhilipp Reisner return -2; 2352b411b363SPhilipp Reisner 2353b411b363SPhilipp Reisner *rule_nr = 30; 2354b411b363SPhilipp Reisner if (self != UUID_JUST_CREATED && 2355b411b363SPhilipp Reisner (peer == UUID_JUST_CREATED || peer == (u64)0)) 2356b411b363SPhilipp Reisner return 2; 2357b411b363SPhilipp Reisner 2358b411b363SPhilipp Reisner if (self == peer) { 2359b411b363SPhilipp Reisner int rct, dc; /* roles at crash time */ 2360b411b363SPhilipp Reisner 2361b411b363SPhilipp Reisner if (mdev->p_uuid[UI_BITMAP] == (u64)0 && mdev->ldev->md.uuid[UI_BITMAP] != (u64)0) { 2362b411b363SPhilipp Reisner 2363b411b363SPhilipp Reisner if (mdev->agreed_pro_version < 91) 23644a23f264SPhilipp Reisner return -1091; 2365b411b363SPhilipp Reisner 2366b411b363SPhilipp Reisner if ((mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) && 2367b411b363SPhilipp Reisner (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1))) { 2368b411b363SPhilipp Reisner dev_info(DEV, "was SyncSource, missed the resync finished event, corrected myself:\n"); 2369b411b363SPhilipp Reisner drbd_uuid_set_bm(mdev, 0UL); 2370b411b363SPhilipp Reisner 2371b411b363SPhilipp Reisner drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid, 2372b411b363SPhilipp Reisner mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0); 2373b411b363SPhilipp Reisner *rule_nr = 34; 2374b411b363SPhilipp Reisner } else { 2375b411b363SPhilipp Reisner dev_info(DEV, "was SyncSource (peer failed to write sync_uuid)\n"); 2376b411b363SPhilipp Reisner *rule_nr = 36; 2377b411b363SPhilipp Reisner } 2378b411b363SPhilipp Reisner 2379b411b363SPhilipp Reisner return 1; 2380b411b363SPhilipp Reisner } 2381b411b363SPhilipp Reisner 2382b411b363SPhilipp Reisner if (mdev->ldev->md.uuid[UI_BITMAP] == (u64)0 && mdev->p_uuid[UI_BITMAP] != (u64)0) { 2383b411b363SPhilipp Reisner 2384b411b363SPhilipp Reisner if (mdev->agreed_pro_version < 91) 23854a23f264SPhilipp Reisner return -1091; 2386b411b363SPhilipp Reisner 2387b411b363SPhilipp Reisner if ((mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_BITMAP] & ~((u64)1)) && 2388b411b363SPhilipp Reisner (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1))) { 2389b411b363SPhilipp Reisner dev_info(DEV, "was SyncTarget, peer missed the resync finished event, corrected peer:\n"); 2390b411b363SPhilipp Reisner 2391b411b363SPhilipp Reisner mdev->p_uuid[UI_HISTORY_START + 1] = mdev->p_uuid[UI_HISTORY_START]; 2392b411b363SPhilipp Reisner mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_BITMAP]; 2393b411b363SPhilipp Reisner mdev->p_uuid[UI_BITMAP] = 0UL; 2394b411b363SPhilipp Reisner 2395b411b363SPhilipp Reisner drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]); 2396b411b363SPhilipp Reisner *rule_nr = 35; 2397b411b363SPhilipp Reisner } else { 2398b411b363SPhilipp Reisner dev_info(DEV, "was SyncTarget (failed to write sync_uuid)\n"); 2399b411b363SPhilipp Reisner *rule_nr = 37; 2400b411b363SPhilipp Reisner } 2401b411b363SPhilipp Reisner 2402b411b363SPhilipp Reisner return -1; 2403b411b363SPhilipp Reisner } 2404b411b363SPhilipp Reisner 2405b411b363SPhilipp Reisner /* Common power [off|failure] */ 2406b411b363SPhilipp Reisner rct = (test_bit(CRASHED_PRIMARY, &mdev->flags) ? 1 : 0) + 2407b411b363SPhilipp Reisner (mdev->p_uuid[UI_FLAGS] & 2); 2408b411b363SPhilipp Reisner /* lowest bit is set when we were primary, 2409b411b363SPhilipp Reisner * next bit (weight 2) is set when peer was primary */ 2410b411b363SPhilipp Reisner *rule_nr = 40; 2411b411b363SPhilipp Reisner 2412b411b363SPhilipp Reisner switch (rct) { 2413b411b363SPhilipp Reisner case 0: /* !self_pri && !peer_pri */ return 0; 2414b411b363SPhilipp Reisner case 1: /* self_pri && !peer_pri */ return 1; 2415b411b363SPhilipp Reisner case 2: /* !self_pri && peer_pri */ return -1; 2416b411b363SPhilipp Reisner case 3: /* self_pri && peer_pri */ 2417b411b363SPhilipp Reisner dc = test_bit(DISCARD_CONCURRENT, &mdev->flags); 2418b411b363SPhilipp Reisner return dc ? -1 : 1; 2419b411b363SPhilipp Reisner } 2420b411b363SPhilipp Reisner } 2421b411b363SPhilipp Reisner 2422b411b363SPhilipp Reisner *rule_nr = 50; 2423b411b363SPhilipp Reisner peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1); 2424b411b363SPhilipp Reisner if (self == peer) 2425b411b363SPhilipp Reisner return -1; 2426b411b363SPhilipp Reisner 2427b411b363SPhilipp Reisner *rule_nr = 51; 2428b411b363SPhilipp Reisner peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1); 2429b411b363SPhilipp Reisner if (self == peer) { 24304a23f264SPhilipp Reisner if (mdev->agreed_pro_version < 96 ? 24314a23f264SPhilipp Reisner (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == 24324a23f264SPhilipp Reisner (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1)) : 24334a23f264SPhilipp Reisner peer + UUID_NEW_BM_OFFSET == (mdev->p_uuid[UI_BITMAP] & ~((u64)1))) { 2434b411b363SPhilipp Reisner /* The last P_SYNC_UUID did not get though. Undo the last start of 2435b411b363SPhilipp Reisner resync as sync source modifications of the peer's UUIDs. */ 2436b411b363SPhilipp Reisner 2437b411b363SPhilipp Reisner if (mdev->agreed_pro_version < 91) 24384a23f264SPhilipp Reisner return -1091; 2439b411b363SPhilipp Reisner 2440b411b363SPhilipp Reisner mdev->p_uuid[UI_BITMAP] = mdev->p_uuid[UI_HISTORY_START]; 2441b411b363SPhilipp Reisner mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_HISTORY_START + 1]; 24424a23f264SPhilipp Reisner 24434a23f264SPhilipp Reisner dev_info(DEV, "Did not got last syncUUID packet, corrected:\n"); 24444a23f264SPhilipp Reisner drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]); 24454a23f264SPhilipp Reisner 2446b411b363SPhilipp Reisner return -1; 2447b411b363SPhilipp Reisner } 2448b411b363SPhilipp Reisner } 2449b411b363SPhilipp Reisner 2450b411b363SPhilipp Reisner *rule_nr = 60; 2451b411b363SPhilipp Reisner self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1); 2452b411b363SPhilipp Reisner for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) { 2453b411b363SPhilipp Reisner peer = mdev->p_uuid[i] & ~((u64)1); 2454b411b363SPhilipp Reisner if (self == peer) 2455b411b363SPhilipp Reisner return -2; 2456b411b363SPhilipp Reisner } 2457b411b363SPhilipp Reisner 2458b411b363SPhilipp Reisner *rule_nr = 70; 2459b411b363SPhilipp Reisner self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1); 2460b411b363SPhilipp Reisner peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1); 2461b411b363SPhilipp Reisner if (self == peer) 2462b411b363SPhilipp Reisner return 1; 2463b411b363SPhilipp Reisner 2464b411b363SPhilipp Reisner *rule_nr = 71; 2465b411b363SPhilipp Reisner self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1); 2466b411b363SPhilipp Reisner if (self == peer) { 24674a23f264SPhilipp Reisner if (mdev->agreed_pro_version < 96 ? 24684a23f264SPhilipp Reisner (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == 24694a23f264SPhilipp Reisner (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) : 24704a23f264SPhilipp Reisner self + UUID_NEW_BM_OFFSET == (mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1))) { 2471b411b363SPhilipp Reisner /* The last P_SYNC_UUID did not get though. Undo the last start of 2472b411b363SPhilipp Reisner resync as sync source modifications of our UUIDs. */ 2473b411b363SPhilipp Reisner 2474b411b363SPhilipp Reisner if (mdev->agreed_pro_version < 91) 24754a23f264SPhilipp Reisner return -1091; 2476b411b363SPhilipp Reisner 2477b411b363SPhilipp Reisner _drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_HISTORY_START]); 2478b411b363SPhilipp Reisner _drbd_uuid_set(mdev, UI_HISTORY_START, mdev->ldev->md.uuid[UI_HISTORY_START + 1]); 2479b411b363SPhilipp Reisner 24804a23f264SPhilipp Reisner dev_info(DEV, "Last syncUUID did not get through, corrected:\n"); 2481b411b363SPhilipp Reisner drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid, 2482b411b363SPhilipp Reisner mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0); 2483b411b363SPhilipp Reisner 2484b411b363SPhilipp Reisner return 1; 2485b411b363SPhilipp Reisner } 2486b411b363SPhilipp Reisner } 2487b411b363SPhilipp Reisner 2488b411b363SPhilipp Reisner 2489b411b363SPhilipp Reisner *rule_nr = 80; 2490d8c2a36bSPhilipp Reisner peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1); 2491b411b363SPhilipp Reisner for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) { 2492b411b363SPhilipp Reisner self = mdev->ldev->md.uuid[i] & ~((u64)1); 2493b411b363SPhilipp Reisner if (self == peer) 2494b411b363SPhilipp Reisner return 2; 2495b411b363SPhilipp Reisner } 2496b411b363SPhilipp Reisner 2497b411b363SPhilipp Reisner *rule_nr = 90; 2498b411b363SPhilipp Reisner self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1); 2499b411b363SPhilipp Reisner peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1); 2500b411b363SPhilipp Reisner if (self == peer && self != ((u64)0)) 2501b411b363SPhilipp Reisner return 100; 2502b411b363SPhilipp Reisner 2503b411b363SPhilipp Reisner *rule_nr = 100; 2504b411b363SPhilipp Reisner for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) { 2505b411b363SPhilipp Reisner self = mdev->ldev->md.uuid[i] & ~((u64)1); 2506b411b363SPhilipp Reisner for (j = UI_HISTORY_START; j <= UI_HISTORY_END; j++) { 2507b411b363SPhilipp Reisner peer = mdev->p_uuid[j] & ~((u64)1); 2508b411b363SPhilipp Reisner if (self == peer) 2509b411b363SPhilipp Reisner return -100; 2510b411b363SPhilipp Reisner } 2511b411b363SPhilipp Reisner } 2512b411b363SPhilipp Reisner 2513b411b363SPhilipp Reisner return -1000; 2514b411b363SPhilipp Reisner } 2515b411b363SPhilipp Reisner 2516b411b363SPhilipp Reisner /* drbd_sync_handshake() returns the new conn state on success, or 2517b411b363SPhilipp Reisner CONN_MASK (-1) on failure. 2518b411b363SPhilipp Reisner */ 2519b411b363SPhilipp Reisner static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_role peer_role, 2520b411b363SPhilipp Reisner enum drbd_disk_state peer_disk) __must_hold(local) 2521b411b363SPhilipp Reisner { 2522b411b363SPhilipp Reisner int hg, rule_nr; 2523b411b363SPhilipp Reisner enum drbd_conns rv = C_MASK; 2524b411b363SPhilipp Reisner enum drbd_disk_state mydisk; 2525b411b363SPhilipp Reisner 2526b411b363SPhilipp Reisner mydisk = mdev->state.disk; 2527b411b363SPhilipp Reisner if (mydisk == D_NEGOTIATING) 2528b411b363SPhilipp Reisner mydisk = mdev->new_state_tmp.disk; 2529b411b363SPhilipp Reisner 2530b411b363SPhilipp Reisner dev_info(DEV, "drbd_sync_handshake:\n"); 2531b411b363SPhilipp Reisner drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid, mdev->comm_bm_set, 0); 2532b411b363SPhilipp Reisner drbd_uuid_dump(mdev, "peer", mdev->p_uuid, 2533b411b363SPhilipp Reisner mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]); 2534b411b363SPhilipp Reisner 2535b411b363SPhilipp Reisner hg = drbd_uuid_compare(mdev, &rule_nr); 2536b411b363SPhilipp Reisner 2537b411b363SPhilipp Reisner dev_info(DEV, "uuid_compare()=%d by rule %d\n", hg, rule_nr); 2538b411b363SPhilipp Reisner 2539b411b363SPhilipp Reisner if (hg == -1000) { 2540b411b363SPhilipp Reisner dev_alert(DEV, "Unrelated data, aborting!\n"); 2541b411b363SPhilipp Reisner return C_MASK; 2542b411b363SPhilipp Reisner } 25434a23f264SPhilipp Reisner if (hg < -1000) { 25444a23f264SPhilipp Reisner dev_alert(DEV, "To resolve this both sides have to support at least protocol %d\n", -hg - 1000); 2545b411b363SPhilipp Reisner return C_MASK; 2546b411b363SPhilipp Reisner } 2547b411b363SPhilipp Reisner 2548b411b363SPhilipp Reisner if ((mydisk == D_INCONSISTENT && peer_disk > D_INCONSISTENT) || 2549b411b363SPhilipp Reisner (peer_disk == D_INCONSISTENT && mydisk > D_INCONSISTENT)) { 2550b411b363SPhilipp Reisner int f = (hg == -100) || abs(hg) == 2; 2551b411b363SPhilipp Reisner hg = mydisk > D_INCONSISTENT ? 1 : -1; 2552b411b363SPhilipp Reisner if (f) 2553b411b363SPhilipp Reisner hg = hg*2; 2554b411b363SPhilipp Reisner dev_info(DEV, "Becoming sync %s due to disk states.\n", 2555b411b363SPhilipp Reisner hg > 0 ? "source" : "target"); 2556b411b363SPhilipp Reisner } 2557b411b363SPhilipp Reisner 25583a11a487SAdam Gandelman if (abs(hg) == 100) 25593a11a487SAdam Gandelman drbd_khelper(mdev, "initial-split-brain"); 25603a11a487SAdam Gandelman 256189e58e75SPhilipp Reisner if (hg == 100 || (hg == -100 && mdev->tconn->net_conf->always_asbp)) { 2562b411b363SPhilipp Reisner int pcount = (mdev->state.role == R_PRIMARY) 2563b411b363SPhilipp Reisner + (peer_role == R_PRIMARY); 2564b411b363SPhilipp Reisner int forced = (hg == -100); 2565b411b363SPhilipp Reisner 2566b411b363SPhilipp Reisner switch (pcount) { 2567b411b363SPhilipp Reisner case 0: 2568b411b363SPhilipp Reisner hg = drbd_asb_recover_0p(mdev); 2569b411b363SPhilipp Reisner break; 2570b411b363SPhilipp Reisner case 1: 2571b411b363SPhilipp Reisner hg = drbd_asb_recover_1p(mdev); 2572b411b363SPhilipp Reisner break; 2573b411b363SPhilipp Reisner case 2: 2574b411b363SPhilipp Reisner hg = drbd_asb_recover_2p(mdev); 2575b411b363SPhilipp Reisner break; 2576b411b363SPhilipp Reisner } 2577b411b363SPhilipp Reisner if (abs(hg) < 100) { 2578b411b363SPhilipp Reisner dev_warn(DEV, "Split-Brain detected, %d primaries, " 2579b411b363SPhilipp Reisner "automatically solved. Sync from %s node\n", 2580b411b363SPhilipp Reisner pcount, (hg < 0) ? "peer" : "this"); 2581b411b363SPhilipp Reisner if (forced) { 2582b411b363SPhilipp Reisner dev_warn(DEV, "Doing a full sync, since" 2583b411b363SPhilipp Reisner " UUIDs where ambiguous.\n"); 2584b411b363SPhilipp Reisner hg = hg*2; 2585b411b363SPhilipp Reisner } 2586b411b363SPhilipp Reisner } 2587b411b363SPhilipp Reisner } 2588b411b363SPhilipp Reisner 2589b411b363SPhilipp Reisner if (hg == -100) { 259089e58e75SPhilipp Reisner if (mdev->tconn->net_conf->want_lose && !(mdev->p_uuid[UI_FLAGS]&1)) 2591b411b363SPhilipp Reisner hg = -1; 259289e58e75SPhilipp Reisner if (!mdev->tconn->net_conf->want_lose && (mdev->p_uuid[UI_FLAGS]&1)) 2593b411b363SPhilipp Reisner hg = 1; 2594b411b363SPhilipp Reisner 2595b411b363SPhilipp Reisner if (abs(hg) < 100) 2596b411b363SPhilipp Reisner dev_warn(DEV, "Split-Brain detected, manually solved. " 2597b411b363SPhilipp Reisner "Sync from %s node\n", 2598b411b363SPhilipp Reisner (hg < 0) ? "peer" : "this"); 2599b411b363SPhilipp Reisner } 2600b411b363SPhilipp Reisner 2601b411b363SPhilipp Reisner if (hg == -100) { 2602580b9767SLars Ellenberg /* FIXME this log message is not correct if we end up here 2603580b9767SLars Ellenberg * after an attempted attach on a diskless node. 2604580b9767SLars Ellenberg * We just refuse to attach -- well, we drop the "connection" 2605580b9767SLars Ellenberg * to that disk, in a way... */ 26063a11a487SAdam Gandelman dev_alert(DEV, "Split-Brain detected but unresolved, dropping connection!\n"); 2607b411b363SPhilipp Reisner drbd_khelper(mdev, "split-brain"); 2608b411b363SPhilipp Reisner return C_MASK; 2609b411b363SPhilipp Reisner } 2610b411b363SPhilipp Reisner 2611b411b363SPhilipp Reisner if (hg > 0 && mydisk <= D_INCONSISTENT) { 2612b411b363SPhilipp Reisner dev_err(DEV, "I shall become SyncSource, but I am inconsistent!\n"); 2613b411b363SPhilipp Reisner return C_MASK; 2614b411b363SPhilipp Reisner } 2615b411b363SPhilipp Reisner 2616b411b363SPhilipp Reisner if (hg < 0 && /* by intention we do not use mydisk here. */ 2617b411b363SPhilipp Reisner mdev->state.role == R_PRIMARY && mdev->state.disk >= D_CONSISTENT) { 261889e58e75SPhilipp Reisner switch (mdev->tconn->net_conf->rr_conflict) { 2619b411b363SPhilipp Reisner case ASB_CALL_HELPER: 2620b411b363SPhilipp Reisner drbd_khelper(mdev, "pri-lost"); 2621b411b363SPhilipp Reisner /* fall through */ 2622b411b363SPhilipp Reisner case ASB_DISCONNECT: 2623b411b363SPhilipp Reisner dev_err(DEV, "I shall become SyncTarget, but I am primary!\n"); 2624b411b363SPhilipp Reisner return C_MASK; 2625b411b363SPhilipp Reisner case ASB_VIOLENTLY: 2626b411b363SPhilipp Reisner dev_warn(DEV, "Becoming SyncTarget, violating the stable-data" 2627b411b363SPhilipp Reisner "assumption\n"); 2628b411b363SPhilipp Reisner } 2629b411b363SPhilipp Reisner } 2630b411b363SPhilipp Reisner 263189e58e75SPhilipp Reisner if (mdev->tconn->net_conf->dry_run || test_bit(CONN_DRY_RUN, &mdev->flags)) { 2632cf14c2e9SPhilipp Reisner if (hg == 0) 2633cf14c2e9SPhilipp Reisner dev_info(DEV, "dry-run connect: No resync, would become Connected immediately.\n"); 2634cf14c2e9SPhilipp Reisner else 2635cf14c2e9SPhilipp Reisner dev_info(DEV, "dry-run connect: Would become %s, doing a %s resync.", 2636cf14c2e9SPhilipp Reisner drbd_conn_str(hg > 0 ? C_SYNC_SOURCE : C_SYNC_TARGET), 2637cf14c2e9SPhilipp Reisner abs(hg) >= 2 ? "full" : "bit-map based"); 2638cf14c2e9SPhilipp Reisner return C_MASK; 2639cf14c2e9SPhilipp Reisner } 2640cf14c2e9SPhilipp Reisner 2641b411b363SPhilipp Reisner if (abs(hg) >= 2) { 2642b411b363SPhilipp Reisner dev_info(DEV, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n"); 264320ceb2b2SLars Ellenberg if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from sync_handshake", 264420ceb2b2SLars Ellenberg BM_LOCKED_SET_ALLOWED)) 2645b411b363SPhilipp Reisner return C_MASK; 2646b411b363SPhilipp Reisner } 2647b411b363SPhilipp Reisner 2648b411b363SPhilipp Reisner if (hg > 0) { /* become sync source. */ 2649b411b363SPhilipp Reisner rv = C_WF_BITMAP_S; 2650b411b363SPhilipp Reisner } else if (hg < 0) { /* become sync target */ 2651b411b363SPhilipp Reisner rv = C_WF_BITMAP_T; 2652b411b363SPhilipp Reisner } else { 2653b411b363SPhilipp Reisner rv = C_CONNECTED; 2654b411b363SPhilipp Reisner if (drbd_bm_total_weight(mdev)) { 2655b411b363SPhilipp Reisner dev_info(DEV, "No resync, but %lu bits in bitmap!\n", 2656b411b363SPhilipp Reisner drbd_bm_total_weight(mdev)); 2657b411b363SPhilipp Reisner } 2658b411b363SPhilipp Reisner } 2659b411b363SPhilipp Reisner 2660b411b363SPhilipp Reisner return rv; 2661b411b363SPhilipp Reisner } 2662b411b363SPhilipp Reisner 2663b411b363SPhilipp Reisner /* returns 1 if invalid */ 2664b411b363SPhilipp Reisner static int cmp_after_sb(enum drbd_after_sb_p peer, enum drbd_after_sb_p self) 2665b411b363SPhilipp Reisner { 2666b411b363SPhilipp Reisner /* ASB_DISCARD_REMOTE - ASB_DISCARD_LOCAL is valid */ 2667b411b363SPhilipp Reisner if ((peer == ASB_DISCARD_REMOTE && self == ASB_DISCARD_LOCAL) || 2668b411b363SPhilipp Reisner (self == ASB_DISCARD_REMOTE && peer == ASB_DISCARD_LOCAL)) 2669b411b363SPhilipp Reisner return 0; 2670b411b363SPhilipp Reisner 2671b411b363SPhilipp Reisner /* any other things with ASB_DISCARD_REMOTE or ASB_DISCARD_LOCAL are invalid */ 2672b411b363SPhilipp Reisner if (peer == ASB_DISCARD_REMOTE || peer == ASB_DISCARD_LOCAL || 2673b411b363SPhilipp Reisner self == ASB_DISCARD_REMOTE || self == ASB_DISCARD_LOCAL) 2674b411b363SPhilipp Reisner return 1; 2675b411b363SPhilipp Reisner 2676b411b363SPhilipp Reisner /* everything else is valid if they are equal on both sides. */ 2677b411b363SPhilipp Reisner if (peer == self) 2678b411b363SPhilipp Reisner return 0; 2679b411b363SPhilipp Reisner 2680b411b363SPhilipp Reisner /* everything es is invalid. */ 2681b411b363SPhilipp Reisner return 1; 2682b411b363SPhilipp Reisner } 2683b411b363SPhilipp Reisner 268402918be2SPhilipp Reisner static int receive_protocol(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) 2685b411b363SPhilipp Reisner { 268602918be2SPhilipp Reisner struct p_protocol *p = &mdev->data.rbuf.protocol; 2687b411b363SPhilipp Reisner int p_proto, p_after_sb_0p, p_after_sb_1p, p_after_sb_2p; 2688cf14c2e9SPhilipp Reisner int p_want_lose, p_two_primaries, cf; 2689b411b363SPhilipp Reisner char p_integrity_alg[SHARED_SECRET_MAX] = ""; 2690b411b363SPhilipp Reisner 2691b411b363SPhilipp Reisner p_proto = be32_to_cpu(p->protocol); 2692b411b363SPhilipp Reisner p_after_sb_0p = be32_to_cpu(p->after_sb_0p); 2693b411b363SPhilipp Reisner p_after_sb_1p = be32_to_cpu(p->after_sb_1p); 2694b411b363SPhilipp Reisner p_after_sb_2p = be32_to_cpu(p->after_sb_2p); 2695b411b363SPhilipp Reisner p_two_primaries = be32_to_cpu(p->two_primaries); 2696cf14c2e9SPhilipp Reisner cf = be32_to_cpu(p->conn_flags); 2697cf14c2e9SPhilipp Reisner p_want_lose = cf & CF_WANT_LOSE; 2698cf14c2e9SPhilipp Reisner 2699cf14c2e9SPhilipp Reisner clear_bit(CONN_DRY_RUN, &mdev->flags); 2700cf14c2e9SPhilipp Reisner 2701cf14c2e9SPhilipp Reisner if (cf & CF_DRY_RUN) 2702cf14c2e9SPhilipp Reisner set_bit(CONN_DRY_RUN, &mdev->flags); 2703b411b363SPhilipp Reisner 270489e58e75SPhilipp Reisner if (p_proto != mdev->tconn->net_conf->wire_protocol) { 2705b411b363SPhilipp Reisner dev_err(DEV, "incompatible communication protocols\n"); 2706b411b363SPhilipp Reisner goto disconnect; 2707b411b363SPhilipp Reisner } 2708b411b363SPhilipp Reisner 270989e58e75SPhilipp Reisner if (cmp_after_sb(p_after_sb_0p, mdev->tconn->net_conf->after_sb_0p)) { 2710b411b363SPhilipp Reisner dev_err(DEV, "incompatible after-sb-0pri settings\n"); 2711b411b363SPhilipp Reisner goto disconnect; 2712b411b363SPhilipp Reisner } 2713b411b363SPhilipp Reisner 271489e58e75SPhilipp Reisner if (cmp_after_sb(p_after_sb_1p, mdev->tconn->net_conf->after_sb_1p)) { 2715b411b363SPhilipp Reisner dev_err(DEV, "incompatible after-sb-1pri settings\n"); 2716b411b363SPhilipp Reisner goto disconnect; 2717b411b363SPhilipp Reisner } 2718b411b363SPhilipp Reisner 271989e58e75SPhilipp Reisner if (cmp_after_sb(p_after_sb_2p, mdev->tconn->net_conf->after_sb_2p)) { 2720b411b363SPhilipp Reisner dev_err(DEV, "incompatible after-sb-2pri settings\n"); 2721b411b363SPhilipp Reisner goto disconnect; 2722b411b363SPhilipp Reisner } 2723b411b363SPhilipp Reisner 272489e58e75SPhilipp Reisner if (p_want_lose && mdev->tconn->net_conf->want_lose) { 2725b411b363SPhilipp Reisner dev_err(DEV, "both sides have the 'want_lose' flag set\n"); 2726b411b363SPhilipp Reisner goto disconnect; 2727b411b363SPhilipp Reisner } 2728b411b363SPhilipp Reisner 272989e58e75SPhilipp Reisner if (p_two_primaries != mdev->tconn->net_conf->two_primaries) { 2730b411b363SPhilipp Reisner dev_err(DEV, "incompatible setting of the two-primaries options\n"); 2731b411b363SPhilipp Reisner goto disconnect; 2732b411b363SPhilipp Reisner } 2733b411b363SPhilipp Reisner 2734b411b363SPhilipp Reisner if (mdev->agreed_pro_version >= 87) { 273589e58e75SPhilipp Reisner unsigned char *my_alg = mdev->tconn->net_conf->integrity_alg; 2736b411b363SPhilipp Reisner 2737b411b363SPhilipp Reisner if (drbd_recv(mdev, p_integrity_alg, data_size) != data_size) 273881e84650SAndreas Gruenbacher return false; 2739b411b363SPhilipp Reisner 2740b411b363SPhilipp Reisner p_integrity_alg[SHARED_SECRET_MAX-1] = 0; 2741b411b363SPhilipp Reisner if (strcmp(p_integrity_alg, my_alg)) { 2742b411b363SPhilipp Reisner dev_err(DEV, "incompatible setting of the data-integrity-alg\n"); 2743b411b363SPhilipp Reisner goto disconnect; 2744b411b363SPhilipp Reisner } 2745b411b363SPhilipp Reisner dev_info(DEV, "data-integrity-alg: %s\n", 2746b411b363SPhilipp Reisner my_alg[0] ? my_alg : (unsigned char *)"<not-used>"); 2747b411b363SPhilipp Reisner } 2748b411b363SPhilipp Reisner 274981e84650SAndreas Gruenbacher return true; 2750b411b363SPhilipp Reisner 2751b411b363SPhilipp Reisner disconnect: 2752b411b363SPhilipp Reisner drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); 275381e84650SAndreas Gruenbacher return false; 2754b411b363SPhilipp Reisner } 2755b411b363SPhilipp Reisner 2756b411b363SPhilipp Reisner /* helper function 2757b411b363SPhilipp Reisner * input: alg name, feature name 2758b411b363SPhilipp Reisner * return: NULL (alg name was "") 2759b411b363SPhilipp Reisner * ERR_PTR(error) if something goes wrong 2760b411b363SPhilipp Reisner * or the crypto hash ptr, if it worked out ok. */ 2761b411b363SPhilipp Reisner struct crypto_hash *drbd_crypto_alloc_digest_safe(const struct drbd_conf *mdev, 2762b411b363SPhilipp Reisner const char *alg, const char *name) 2763b411b363SPhilipp Reisner { 2764b411b363SPhilipp Reisner struct crypto_hash *tfm; 2765b411b363SPhilipp Reisner 2766b411b363SPhilipp Reisner if (!alg[0]) 2767b411b363SPhilipp Reisner return NULL; 2768b411b363SPhilipp Reisner 2769b411b363SPhilipp Reisner tfm = crypto_alloc_hash(alg, 0, CRYPTO_ALG_ASYNC); 2770b411b363SPhilipp Reisner if (IS_ERR(tfm)) { 2771b411b363SPhilipp Reisner dev_err(DEV, "Can not allocate \"%s\" as %s (reason: %ld)\n", 2772b411b363SPhilipp Reisner alg, name, PTR_ERR(tfm)); 2773b411b363SPhilipp Reisner return tfm; 2774b411b363SPhilipp Reisner } 2775b411b363SPhilipp Reisner if (!drbd_crypto_is_hash(crypto_hash_tfm(tfm))) { 2776b411b363SPhilipp Reisner crypto_free_hash(tfm); 2777b411b363SPhilipp Reisner dev_err(DEV, "\"%s\" is not a digest (%s)\n", alg, name); 2778b411b363SPhilipp Reisner return ERR_PTR(-EINVAL); 2779b411b363SPhilipp Reisner } 2780b411b363SPhilipp Reisner return tfm; 2781b411b363SPhilipp Reisner } 2782b411b363SPhilipp Reisner 278302918be2SPhilipp Reisner static int receive_SyncParam(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int packet_size) 2784b411b363SPhilipp Reisner { 278581e84650SAndreas Gruenbacher int ok = true; 278602918be2SPhilipp Reisner struct p_rs_param_95 *p = &mdev->data.rbuf.rs_param_95; 2787b411b363SPhilipp Reisner unsigned int header_size, data_size, exp_max_sz; 2788b411b363SPhilipp Reisner struct crypto_hash *verify_tfm = NULL; 2789b411b363SPhilipp Reisner struct crypto_hash *csums_tfm = NULL; 2790b411b363SPhilipp Reisner const int apv = mdev->agreed_pro_version; 2791778f271dSPhilipp Reisner int *rs_plan_s = NULL; 2792778f271dSPhilipp Reisner int fifo_size = 0; 2793b411b363SPhilipp Reisner 2794b411b363SPhilipp Reisner exp_max_sz = apv <= 87 ? sizeof(struct p_rs_param) 2795b411b363SPhilipp Reisner : apv == 88 ? sizeof(struct p_rs_param) 2796b411b363SPhilipp Reisner + SHARED_SECRET_MAX 27978e26f9ccSPhilipp Reisner : apv <= 94 ? sizeof(struct p_rs_param_89) 27988e26f9ccSPhilipp Reisner : /* apv >= 95 */ sizeof(struct p_rs_param_95); 2799b411b363SPhilipp Reisner 280002918be2SPhilipp Reisner if (packet_size > exp_max_sz) { 2801b411b363SPhilipp Reisner dev_err(DEV, "SyncParam packet too long: received %u, expected <= %u bytes\n", 280202918be2SPhilipp Reisner packet_size, exp_max_sz); 280381e84650SAndreas Gruenbacher return false; 2804b411b363SPhilipp Reisner } 2805b411b363SPhilipp Reisner 2806b411b363SPhilipp Reisner if (apv <= 88) { 280702918be2SPhilipp Reisner header_size = sizeof(struct p_rs_param) - sizeof(struct p_header80); 280802918be2SPhilipp Reisner data_size = packet_size - header_size; 28098e26f9ccSPhilipp Reisner } else if (apv <= 94) { 281002918be2SPhilipp Reisner header_size = sizeof(struct p_rs_param_89) - sizeof(struct p_header80); 281102918be2SPhilipp Reisner data_size = packet_size - header_size; 2812b411b363SPhilipp Reisner D_ASSERT(data_size == 0); 28138e26f9ccSPhilipp Reisner } else { 281402918be2SPhilipp Reisner header_size = sizeof(struct p_rs_param_95) - sizeof(struct p_header80); 281502918be2SPhilipp Reisner data_size = packet_size - header_size; 2816b411b363SPhilipp Reisner D_ASSERT(data_size == 0); 2817b411b363SPhilipp Reisner } 2818b411b363SPhilipp Reisner 2819b411b363SPhilipp Reisner /* initialize verify_alg and csums_alg */ 2820b411b363SPhilipp Reisner memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX); 2821b411b363SPhilipp Reisner 282202918be2SPhilipp Reisner if (drbd_recv(mdev, &p->head.payload, header_size) != header_size) 282381e84650SAndreas Gruenbacher return false; 2824b411b363SPhilipp Reisner 2825b411b363SPhilipp Reisner mdev->sync_conf.rate = be32_to_cpu(p->rate); 2826b411b363SPhilipp Reisner 2827b411b363SPhilipp Reisner if (apv >= 88) { 2828b411b363SPhilipp Reisner if (apv == 88) { 2829b411b363SPhilipp Reisner if (data_size > SHARED_SECRET_MAX) { 2830b411b363SPhilipp Reisner dev_err(DEV, "verify-alg too long, " 2831b411b363SPhilipp Reisner "peer wants %u, accepting only %u byte\n", 2832b411b363SPhilipp Reisner data_size, SHARED_SECRET_MAX); 283381e84650SAndreas Gruenbacher return false; 2834b411b363SPhilipp Reisner } 2835b411b363SPhilipp Reisner 2836b411b363SPhilipp Reisner if (drbd_recv(mdev, p->verify_alg, data_size) != data_size) 283781e84650SAndreas Gruenbacher return false; 2838b411b363SPhilipp Reisner 2839b411b363SPhilipp Reisner /* we expect NUL terminated string */ 2840b411b363SPhilipp Reisner /* but just in case someone tries to be evil */ 2841b411b363SPhilipp Reisner D_ASSERT(p->verify_alg[data_size-1] == 0); 2842b411b363SPhilipp Reisner p->verify_alg[data_size-1] = 0; 2843b411b363SPhilipp Reisner 2844b411b363SPhilipp Reisner } else /* apv >= 89 */ { 2845b411b363SPhilipp Reisner /* we still expect NUL terminated strings */ 2846b411b363SPhilipp Reisner /* but just in case someone tries to be evil */ 2847b411b363SPhilipp Reisner D_ASSERT(p->verify_alg[SHARED_SECRET_MAX-1] == 0); 2848b411b363SPhilipp Reisner D_ASSERT(p->csums_alg[SHARED_SECRET_MAX-1] == 0); 2849b411b363SPhilipp Reisner p->verify_alg[SHARED_SECRET_MAX-1] = 0; 2850b411b363SPhilipp Reisner p->csums_alg[SHARED_SECRET_MAX-1] = 0; 2851b411b363SPhilipp Reisner } 2852b411b363SPhilipp Reisner 2853b411b363SPhilipp Reisner if (strcmp(mdev->sync_conf.verify_alg, p->verify_alg)) { 2854b411b363SPhilipp Reisner if (mdev->state.conn == C_WF_REPORT_PARAMS) { 2855b411b363SPhilipp Reisner dev_err(DEV, "Different verify-alg settings. me=\"%s\" peer=\"%s\"\n", 2856b411b363SPhilipp Reisner mdev->sync_conf.verify_alg, p->verify_alg); 2857b411b363SPhilipp Reisner goto disconnect; 2858b411b363SPhilipp Reisner } 2859b411b363SPhilipp Reisner verify_tfm = drbd_crypto_alloc_digest_safe(mdev, 2860b411b363SPhilipp Reisner p->verify_alg, "verify-alg"); 2861b411b363SPhilipp Reisner if (IS_ERR(verify_tfm)) { 2862b411b363SPhilipp Reisner verify_tfm = NULL; 2863b411b363SPhilipp Reisner goto disconnect; 2864b411b363SPhilipp Reisner } 2865b411b363SPhilipp Reisner } 2866b411b363SPhilipp Reisner 2867b411b363SPhilipp Reisner if (apv >= 89 && strcmp(mdev->sync_conf.csums_alg, p->csums_alg)) { 2868b411b363SPhilipp Reisner if (mdev->state.conn == C_WF_REPORT_PARAMS) { 2869b411b363SPhilipp Reisner dev_err(DEV, "Different csums-alg settings. me=\"%s\" peer=\"%s\"\n", 2870b411b363SPhilipp Reisner mdev->sync_conf.csums_alg, p->csums_alg); 2871b411b363SPhilipp Reisner goto disconnect; 2872b411b363SPhilipp Reisner } 2873b411b363SPhilipp Reisner csums_tfm = drbd_crypto_alloc_digest_safe(mdev, 2874b411b363SPhilipp Reisner p->csums_alg, "csums-alg"); 2875b411b363SPhilipp Reisner if (IS_ERR(csums_tfm)) { 2876b411b363SPhilipp Reisner csums_tfm = NULL; 2877b411b363SPhilipp Reisner goto disconnect; 2878b411b363SPhilipp Reisner } 2879b411b363SPhilipp Reisner } 2880b411b363SPhilipp Reisner 28818e26f9ccSPhilipp Reisner if (apv > 94) { 28828e26f9ccSPhilipp Reisner mdev->sync_conf.rate = be32_to_cpu(p->rate); 28838e26f9ccSPhilipp Reisner mdev->sync_conf.c_plan_ahead = be32_to_cpu(p->c_plan_ahead); 28848e26f9ccSPhilipp Reisner mdev->sync_conf.c_delay_target = be32_to_cpu(p->c_delay_target); 28858e26f9ccSPhilipp Reisner mdev->sync_conf.c_fill_target = be32_to_cpu(p->c_fill_target); 28868e26f9ccSPhilipp Reisner mdev->sync_conf.c_max_rate = be32_to_cpu(p->c_max_rate); 2887778f271dSPhilipp Reisner 2888778f271dSPhilipp Reisner fifo_size = (mdev->sync_conf.c_plan_ahead * 10 * SLEEP_TIME) / HZ; 2889778f271dSPhilipp Reisner if (fifo_size != mdev->rs_plan_s.size && fifo_size > 0) { 2890778f271dSPhilipp Reisner rs_plan_s = kzalloc(sizeof(int) * fifo_size, GFP_KERNEL); 2891778f271dSPhilipp Reisner if (!rs_plan_s) { 2892778f271dSPhilipp Reisner dev_err(DEV, "kmalloc of fifo_buffer failed"); 2893778f271dSPhilipp Reisner goto disconnect; 2894778f271dSPhilipp Reisner } 2895778f271dSPhilipp Reisner } 28968e26f9ccSPhilipp Reisner } 2897b411b363SPhilipp Reisner 2898b411b363SPhilipp Reisner spin_lock(&mdev->peer_seq_lock); 2899b411b363SPhilipp Reisner /* lock against drbd_nl_syncer_conf() */ 2900b411b363SPhilipp Reisner if (verify_tfm) { 2901b411b363SPhilipp Reisner strcpy(mdev->sync_conf.verify_alg, p->verify_alg); 2902b411b363SPhilipp Reisner mdev->sync_conf.verify_alg_len = strlen(p->verify_alg) + 1; 2903b411b363SPhilipp Reisner crypto_free_hash(mdev->verify_tfm); 2904b411b363SPhilipp Reisner mdev->verify_tfm = verify_tfm; 2905b411b363SPhilipp Reisner dev_info(DEV, "using verify-alg: \"%s\"\n", p->verify_alg); 2906b411b363SPhilipp Reisner } 2907b411b363SPhilipp Reisner if (csums_tfm) { 2908b411b363SPhilipp Reisner strcpy(mdev->sync_conf.csums_alg, p->csums_alg); 2909b411b363SPhilipp Reisner mdev->sync_conf.csums_alg_len = strlen(p->csums_alg) + 1; 2910b411b363SPhilipp Reisner crypto_free_hash(mdev->csums_tfm); 2911b411b363SPhilipp Reisner mdev->csums_tfm = csums_tfm; 2912b411b363SPhilipp Reisner dev_info(DEV, "using csums-alg: \"%s\"\n", p->csums_alg); 2913b411b363SPhilipp Reisner } 2914778f271dSPhilipp Reisner if (fifo_size != mdev->rs_plan_s.size) { 2915778f271dSPhilipp Reisner kfree(mdev->rs_plan_s.values); 2916778f271dSPhilipp Reisner mdev->rs_plan_s.values = rs_plan_s; 2917778f271dSPhilipp Reisner mdev->rs_plan_s.size = fifo_size; 2918778f271dSPhilipp Reisner mdev->rs_planed = 0; 2919778f271dSPhilipp Reisner } 2920b411b363SPhilipp Reisner spin_unlock(&mdev->peer_seq_lock); 2921b411b363SPhilipp Reisner } 2922b411b363SPhilipp Reisner 2923b411b363SPhilipp Reisner return ok; 2924b411b363SPhilipp Reisner disconnect: 2925b411b363SPhilipp Reisner /* just for completeness: actually not needed, 2926b411b363SPhilipp Reisner * as this is not reached if csums_tfm was ok. */ 2927b411b363SPhilipp Reisner crypto_free_hash(csums_tfm); 2928b411b363SPhilipp Reisner /* but free the verify_tfm again, if csums_tfm did not work out */ 2929b411b363SPhilipp Reisner crypto_free_hash(verify_tfm); 2930b411b363SPhilipp Reisner drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); 293181e84650SAndreas Gruenbacher return false; 2932b411b363SPhilipp Reisner } 2933b411b363SPhilipp Reisner 2934b411b363SPhilipp Reisner /* warn if the arguments differ by more than 12.5% */ 2935b411b363SPhilipp Reisner static void warn_if_differ_considerably(struct drbd_conf *mdev, 2936b411b363SPhilipp Reisner const char *s, sector_t a, sector_t b) 2937b411b363SPhilipp Reisner { 2938b411b363SPhilipp Reisner sector_t d; 2939b411b363SPhilipp Reisner if (a == 0 || b == 0) 2940b411b363SPhilipp Reisner return; 2941b411b363SPhilipp Reisner d = (a > b) ? (a - b) : (b - a); 2942b411b363SPhilipp Reisner if (d > (a>>3) || d > (b>>3)) 2943b411b363SPhilipp Reisner dev_warn(DEV, "Considerable difference in %s: %llus vs. %llus\n", s, 2944b411b363SPhilipp Reisner (unsigned long long)a, (unsigned long long)b); 2945b411b363SPhilipp Reisner } 2946b411b363SPhilipp Reisner 294702918be2SPhilipp Reisner static int receive_sizes(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) 2948b411b363SPhilipp Reisner { 294902918be2SPhilipp Reisner struct p_sizes *p = &mdev->data.rbuf.sizes; 2950b411b363SPhilipp Reisner enum determine_dev_size dd = unchanged; 2951b411b363SPhilipp Reisner sector_t p_size, p_usize, my_usize; 2952b411b363SPhilipp Reisner int ldsc = 0; /* local disk size changed */ 2953e89b591cSPhilipp Reisner enum dds_flags ddsf; 2954b411b363SPhilipp Reisner 2955b411b363SPhilipp Reisner p_size = be64_to_cpu(p->d_size); 2956b411b363SPhilipp Reisner p_usize = be64_to_cpu(p->u_size); 2957b411b363SPhilipp Reisner 2958b411b363SPhilipp Reisner if (p_size == 0 && mdev->state.disk == D_DISKLESS) { 2959b411b363SPhilipp Reisner dev_err(DEV, "some backing storage is needed\n"); 2960b411b363SPhilipp Reisner drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); 296181e84650SAndreas Gruenbacher return false; 2962b411b363SPhilipp Reisner } 2963b411b363SPhilipp Reisner 2964b411b363SPhilipp Reisner /* just store the peer's disk size for now. 2965b411b363SPhilipp Reisner * we still need to figure out whether we accept that. */ 2966b411b363SPhilipp Reisner mdev->p_size = p_size; 2967b411b363SPhilipp Reisner 2968b411b363SPhilipp Reisner if (get_ldev(mdev)) { 2969b411b363SPhilipp Reisner warn_if_differ_considerably(mdev, "lower level device sizes", 2970b411b363SPhilipp Reisner p_size, drbd_get_max_capacity(mdev->ldev)); 2971b411b363SPhilipp Reisner warn_if_differ_considerably(mdev, "user requested size", 2972b411b363SPhilipp Reisner p_usize, mdev->ldev->dc.disk_size); 2973b411b363SPhilipp Reisner 2974b411b363SPhilipp Reisner /* if this is the first connect, or an otherwise expected 2975b411b363SPhilipp Reisner * param exchange, choose the minimum */ 2976b411b363SPhilipp Reisner if (mdev->state.conn == C_WF_REPORT_PARAMS) 2977b411b363SPhilipp Reisner p_usize = min_not_zero((sector_t)mdev->ldev->dc.disk_size, 2978b411b363SPhilipp Reisner p_usize); 2979b411b363SPhilipp Reisner 2980b411b363SPhilipp Reisner my_usize = mdev->ldev->dc.disk_size; 2981b411b363SPhilipp Reisner 2982b411b363SPhilipp Reisner if (mdev->ldev->dc.disk_size != p_usize) { 2983b411b363SPhilipp Reisner mdev->ldev->dc.disk_size = p_usize; 2984b411b363SPhilipp Reisner dev_info(DEV, "Peer sets u_size to %lu sectors\n", 2985b411b363SPhilipp Reisner (unsigned long)mdev->ldev->dc.disk_size); 2986b411b363SPhilipp Reisner } 2987b411b363SPhilipp Reisner 2988b411b363SPhilipp Reisner /* Never shrink a device with usable data during connect. 2989b411b363SPhilipp Reisner But allow online shrinking if we are connected. */ 2990a393db6fSPhilipp Reisner if (drbd_new_dev_size(mdev, mdev->ldev, 0) < 2991b411b363SPhilipp Reisner drbd_get_capacity(mdev->this_bdev) && 2992b411b363SPhilipp Reisner mdev->state.disk >= D_OUTDATED && 2993b411b363SPhilipp Reisner mdev->state.conn < C_CONNECTED) { 2994b411b363SPhilipp Reisner dev_err(DEV, "The peer's disk size is too small!\n"); 2995b411b363SPhilipp Reisner drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); 2996b411b363SPhilipp Reisner mdev->ldev->dc.disk_size = my_usize; 2997b411b363SPhilipp Reisner put_ldev(mdev); 299881e84650SAndreas Gruenbacher return false; 2999b411b363SPhilipp Reisner } 3000b411b363SPhilipp Reisner put_ldev(mdev); 3001b411b363SPhilipp Reisner } 3002b411b363SPhilipp Reisner 3003e89b591cSPhilipp Reisner ddsf = be16_to_cpu(p->dds_flags); 3004b411b363SPhilipp Reisner if (get_ldev(mdev)) { 300524c4830cSBart Van Assche dd = drbd_determine_dev_size(mdev, ddsf); 3006b411b363SPhilipp Reisner put_ldev(mdev); 3007b411b363SPhilipp Reisner if (dd == dev_size_error) 300881e84650SAndreas Gruenbacher return false; 3009b411b363SPhilipp Reisner drbd_md_sync(mdev); 3010b411b363SPhilipp Reisner } else { 3011b411b363SPhilipp Reisner /* I am diskless, need to accept the peer's size. */ 3012b411b363SPhilipp Reisner drbd_set_my_capacity(mdev, p_size); 3013b411b363SPhilipp Reisner } 3014b411b363SPhilipp Reisner 301599432fccSPhilipp Reisner mdev->peer_max_bio_size = be32_to_cpu(p->max_bio_size); 301699432fccSPhilipp Reisner drbd_reconsider_max_bio_size(mdev); 301799432fccSPhilipp Reisner 3018b411b363SPhilipp Reisner if (get_ldev(mdev)) { 3019b411b363SPhilipp Reisner if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev)) { 3020b411b363SPhilipp Reisner mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev); 3021b411b363SPhilipp Reisner ldsc = 1; 3022b411b363SPhilipp Reisner } 3023b411b363SPhilipp Reisner 3024b411b363SPhilipp Reisner put_ldev(mdev); 3025b411b363SPhilipp Reisner } 3026b411b363SPhilipp Reisner 3027b411b363SPhilipp Reisner if (mdev->state.conn > C_WF_REPORT_PARAMS) { 3028b411b363SPhilipp Reisner if (be64_to_cpu(p->c_size) != 3029b411b363SPhilipp Reisner drbd_get_capacity(mdev->this_bdev) || ldsc) { 3030b411b363SPhilipp Reisner /* we have different sizes, probably peer 3031b411b363SPhilipp Reisner * needs to know my new size... */ 3032e89b591cSPhilipp Reisner drbd_send_sizes(mdev, 0, ddsf); 3033b411b363SPhilipp Reisner } 3034b411b363SPhilipp Reisner if (test_and_clear_bit(RESIZE_PENDING, &mdev->flags) || 3035b411b363SPhilipp Reisner (dd == grew && mdev->state.conn == C_CONNECTED)) { 3036b411b363SPhilipp Reisner if (mdev->state.pdsk >= D_INCONSISTENT && 3037e89b591cSPhilipp Reisner mdev->state.disk >= D_INCONSISTENT) { 3038e89b591cSPhilipp Reisner if (ddsf & DDSF_NO_RESYNC) 3039e89b591cSPhilipp Reisner dev_info(DEV, "Resync of new storage suppressed with --assume-clean\n"); 3040b411b363SPhilipp Reisner else 3041e89b591cSPhilipp Reisner resync_after_online_grow(mdev); 3042e89b591cSPhilipp Reisner } else 3043b411b363SPhilipp Reisner set_bit(RESYNC_AFTER_NEG, &mdev->flags); 3044b411b363SPhilipp Reisner } 3045b411b363SPhilipp Reisner } 3046b411b363SPhilipp Reisner 304781e84650SAndreas Gruenbacher return true; 3048b411b363SPhilipp Reisner } 3049b411b363SPhilipp Reisner 305002918be2SPhilipp Reisner static int receive_uuids(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) 3051b411b363SPhilipp Reisner { 305202918be2SPhilipp Reisner struct p_uuids *p = &mdev->data.rbuf.uuids; 3053b411b363SPhilipp Reisner u64 *p_uuid; 305462b0da3aSLars Ellenberg int i, updated_uuids = 0; 3055b411b363SPhilipp Reisner 3056b411b363SPhilipp Reisner p_uuid = kmalloc(sizeof(u64)*UI_EXTENDED_SIZE, GFP_NOIO); 3057b411b363SPhilipp Reisner 3058b411b363SPhilipp Reisner for (i = UI_CURRENT; i < UI_EXTENDED_SIZE; i++) 3059b411b363SPhilipp Reisner p_uuid[i] = be64_to_cpu(p->uuid[i]); 3060b411b363SPhilipp Reisner 3061b411b363SPhilipp Reisner kfree(mdev->p_uuid); 3062b411b363SPhilipp Reisner mdev->p_uuid = p_uuid; 3063b411b363SPhilipp Reisner 3064b411b363SPhilipp Reisner if (mdev->state.conn < C_CONNECTED && 3065b411b363SPhilipp Reisner mdev->state.disk < D_INCONSISTENT && 3066b411b363SPhilipp Reisner mdev->state.role == R_PRIMARY && 3067b411b363SPhilipp Reisner (mdev->ed_uuid & ~((u64)1)) != (p_uuid[UI_CURRENT] & ~((u64)1))) { 3068b411b363SPhilipp Reisner dev_err(DEV, "Can only connect to data with current UUID=%016llX\n", 3069b411b363SPhilipp Reisner (unsigned long long)mdev->ed_uuid); 3070b411b363SPhilipp Reisner drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); 307181e84650SAndreas Gruenbacher return false; 3072b411b363SPhilipp Reisner } 3073b411b363SPhilipp Reisner 3074b411b363SPhilipp Reisner if (get_ldev(mdev)) { 3075b411b363SPhilipp Reisner int skip_initial_sync = 3076b411b363SPhilipp Reisner mdev->state.conn == C_CONNECTED && 3077b411b363SPhilipp Reisner mdev->agreed_pro_version >= 90 && 3078b411b363SPhilipp Reisner mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && 3079b411b363SPhilipp Reisner (p_uuid[UI_FLAGS] & 8); 3080b411b363SPhilipp Reisner if (skip_initial_sync) { 3081b411b363SPhilipp Reisner dev_info(DEV, "Accepted new current UUID, preparing to skip initial sync\n"); 3082b411b363SPhilipp Reisner drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write, 308320ceb2b2SLars Ellenberg "clear_n_write from receive_uuids", 308420ceb2b2SLars Ellenberg BM_LOCKED_TEST_ALLOWED); 3085b411b363SPhilipp Reisner _drbd_uuid_set(mdev, UI_CURRENT, p_uuid[UI_CURRENT]); 3086b411b363SPhilipp Reisner _drbd_uuid_set(mdev, UI_BITMAP, 0); 3087b411b363SPhilipp Reisner _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE), 3088b411b363SPhilipp Reisner CS_VERBOSE, NULL); 3089b411b363SPhilipp Reisner drbd_md_sync(mdev); 309062b0da3aSLars Ellenberg updated_uuids = 1; 3091b411b363SPhilipp Reisner } 3092b411b363SPhilipp Reisner put_ldev(mdev); 309318a50fa2SPhilipp Reisner } else if (mdev->state.disk < D_INCONSISTENT && 309418a50fa2SPhilipp Reisner mdev->state.role == R_PRIMARY) { 309518a50fa2SPhilipp Reisner /* I am a diskless primary, the peer just created a new current UUID 309618a50fa2SPhilipp Reisner for me. */ 309762b0da3aSLars Ellenberg updated_uuids = drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]); 3098b411b363SPhilipp Reisner } 3099b411b363SPhilipp Reisner 3100b411b363SPhilipp Reisner /* Before we test for the disk state, we should wait until an eventually 3101b411b363SPhilipp Reisner ongoing cluster wide state change is finished. That is important if 3102b411b363SPhilipp Reisner we are primary and are detaching from our disk. We need to see the 3103b411b363SPhilipp Reisner new disk state... */ 3104b411b363SPhilipp Reisner wait_event(mdev->misc_wait, !test_bit(CLUSTER_ST_CHANGE, &mdev->flags)); 3105b411b363SPhilipp Reisner if (mdev->state.conn >= C_CONNECTED && mdev->state.disk < D_INCONSISTENT) 310662b0da3aSLars Ellenberg updated_uuids |= drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]); 310762b0da3aSLars Ellenberg 310862b0da3aSLars Ellenberg if (updated_uuids) 310962b0da3aSLars Ellenberg drbd_print_uuids(mdev, "receiver updated UUIDs to"); 3110b411b363SPhilipp Reisner 311181e84650SAndreas Gruenbacher return true; 3112b411b363SPhilipp Reisner } 3113b411b363SPhilipp Reisner 3114b411b363SPhilipp Reisner /** 3115b411b363SPhilipp Reisner * convert_state() - Converts the peer's view of the cluster state to our point of view 3116b411b363SPhilipp Reisner * @ps: The state as seen by the peer. 3117b411b363SPhilipp Reisner */ 3118b411b363SPhilipp Reisner static union drbd_state convert_state(union drbd_state ps) 3119b411b363SPhilipp Reisner { 3120b411b363SPhilipp Reisner union drbd_state ms; 3121b411b363SPhilipp Reisner 3122b411b363SPhilipp Reisner static enum drbd_conns c_tab[] = { 3123b411b363SPhilipp Reisner [C_CONNECTED] = C_CONNECTED, 3124b411b363SPhilipp Reisner 3125b411b363SPhilipp Reisner [C_STARTING_SYNC_S] = C_STARTING_SYNC_T, 3126b411b363SPhilipp Reisner [C_STARTING_SYNC_T] = C_STARTING_SYNC_S, 3127b411b363SPhilipp Reisner [C_DISCONNECTING] = C_TEAR_DOWN, /* C_NETWORK_FAILURE, */ 3128b411b363SPhilipp Reisner [C_VERIFY_S] = C_VERIFY_T, 3129b411b363SPhilipp Reisner [C_MASK] = C_MASK, 3130b411b363SPhilipp Reisner }; 3131b411b363SPhilipp Reisner 3132b411b363SPhilipp Reisner ms.i = ps.i; 3133b411b363SPhilipp Reisner 3134b411b363SPhilipp Reisner ms.conn = c_tab[ps.conn]; 3135b411b363SPhilipp Reisner ms.peer = ps.role; 3136b411b363SPhilipp Reisner ms.role = ps.peer; 3137b411b363SPhilipp Reisner ms.pdsk = ps.disk; 3138b411b363SPhilipp Reisner ms.disk = ps.pdsk; 3139b411b363SPhilipp Reisner ms.peer_isp = (ps.aftr_isp | ps.user_isp); 3140b411b363SPhilipp Reisner 3141b411b363SPhilipp Reisner return ms; 3142b411b363SPhilipp Reisner } 3143b411b363SPhilipp Reisner 314402918be2SPhilipp Reisner static int receive_req_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) 3145b411b363SPhilipp Reisner { 314602918be2SPhilipp Reisner struct p_req_state *p = &mdev->data.rbuf.req_state; 3147b411b363SPhilipp Reisner union drbd_state mask, val; 3148bf885f8aSAndreas Gruenbacher enum drbd_state_rv rv; 3149b411b363SPhilipp Reisner 3150b411b363SPhilipp Reisner mask.i = be32_to_cpu(p->mask); 3151b411b363SPhilipp Reisner val.i = be32_to_cpu(p->val); 3152b411b363SPhilipp Reisner 3153b411b363SPhilipp Reisner if (test_bit(DISCARD_CONCURRENT, &mdev->flags) && 3154b411b363SPhilipp Reisner test_bit(CLUSTER_ST_CHANGE, &mdev->flags)) { 3155b411b363SPhilipp Reisner drbd_send_sr_reply(mdev, SS_CONCURRENT_ST_CHG); 315681e84650SAndreas Gruenbacher return true; 3157b411b363SPhilipp Reisner } 3158b411b363SPhilipp Reisner 3159b411b363SPhilipp Reisner mask = convert_state(mask); 3160b411b363SPhilipp Reisner val = convert_state(val); 3161b411b363SPhilipp Reisner 3162b411b363SPhilipp Reisner rv = drbd_change_state(mdev, CS_VERBOSE, mask, val); 3163b411b363SPhilipp Reisner 3164b411b363SPhilipp Reisner drbd_send_sr_reply(mdev, rv); 3165b411b363SPhilipp Reisner drbd_md_sync(mdev); 3166b411b363SPhilipp Reisner 316781e84650SAndreas Gruenbacher return true; 3168b411b363SPhilipp Reisner } 3169b411b363SPhilipp Reisner 317002918be2SPhilipp Reisner static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) 3171b411b363SPhilipp Reisner { 317202918be2SPhilipp Reisner struct p_state *p = &mdev->data.rbuf.state; 31734ac4aadaSLars Ellenberg union drbd_state os, ns, peer_state; 3174b411b363SPhilipp Reisner enum drbd_disk_state real_peer_disk; 317565d922c3SPhilipp Reisner enum chg_state_flags cs_flags; 3176b411b363SPhilipp Reisner int rv; 3177b411b363SPhilipp Reisner 3178b411b363SPhilipp Reisner peer_state.i = be32_to_cpu(p->state); 3179b411b363SPhilipp Reisner 3180b411b363SPhilipp Reisner real_peer_disk = peer_state.disk; 3181b411b363SPhilipp Reisner if (peer_state.disk == D_NEGOTIATING) { 3182b411b363SPhilipp Reisner real_peer_disk = mdev->p_uuid[UI_FLAGS] & 4 ? D_INCONSISTENT : D_CONSISTENT; 3183b411b363SPhilipp Reisner dev_info(DEV, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk)); 3184b411b363SPhilipp Reisner } 3185b411b363SPhilipp Reisner 3186b411b363SPhilipp Reisner spin_lock_irq(&mdev->req_lock); 3187b411b363SPhilipp Reisner retry: 31884ac4aadaSLars Ellenberg os = ns = mdev->state; 3189b411b363SPhilipp Reisner spin_unlock_irq(&mdev->req_lock); 3190b411b363SPhilipp Reisner 3191e9ef7bb6SLars Ellenberg /* peer says his disk is uptodate, while we think it is inconsistent, 3192e9ef7bb6SLars Ellenberg * and this happens while we think we have a sync going on. */ 3193e9ef7bb6SLars Ellenberg if (os.pdsk == D_INCONSISTENT && real_peer_disk == D_UP_TO_DATE && 3194e9ef7bb6SLars Ellenberg os.conn > C_CONNECTED && os.disk == D_UP_TO_DATE) { 3195e9ef7bb6SLars Ellenberg /* If we are (becoming) SyncSource, but peer is still in sync 3196e9ef7bb6SLars Ellenberg * preparation, ignore its uptodate-ness to avoid flapping, it 3197e9ef7bb6SLars Ellenberg * will change to inconsistent once the peer reaches active 3198e9ef7bb6SLars Ellenberg * syncing states. 3199e9ef7bb6SLars Ellenberg * It may have changed syncer-paused flags, however, so we 3200e9ef7bb6SLars Ellenberg * cannot ignore this completely. */ 3201e9ef7bb6SLars Ellenberg if (peer_state.conn > C_CONNECTED && 3202e9ef7bb6SLars Ellenberg peer_state.conn < C_SYNC_SOURCE) 3203e9ef7bb6SLars Ellenberg real_peer_disk = D_INCONSISTENT; 3204e9ef7bb6SLars Ellenberg 3205e9ef7bb6SLars Ellenberg /* if peer_state changes to connected at the same time, 3206e9ef7bb6SLars Ellenberg * it explicitly notifies us that it finished resync. 3207e9ef7bb6SLars Ellenberg * Maybe we should finish it up, too? */ 3208e9ef7bb6SLars Ellenberg else if (os.conn >= C_SYNC_SOURCE && 3209e9ef7bb6SLars Ellenberg peer_state.conn == C_CONNECTED) { 3210e9ef7bb6SLars Ellenberg if (drbd_bm_total_weight(mdev) <= mdev->rs_failed) 3211e9ef7bb6SLars Ellenberg drbd_resync_finished(mdev); 321281e84650SAndreas Gruenbacher return true; 3213e9ef7bb6SLars Ellenberg } 3214e9ef7bb6SLars Ellenberg } 3215e9ef7bb6SLars Ellenberg 3216e9ef7bb6SLars Ellenberg /* peer says his disk is inconsistent, while we think it is uptodate, 3217e9ef7bb6SLars Ellenberg * and this happens while the peer still thinks we have a sync going on, 3218e9ef7bb6SLars Ellenberg * but we think we are already done with the sync. 3219e9ef7bb6SLars Ellenberg * We ignore this to avoid flapping pdsk. 3220e9ef7bb6SLars Ellenberg * This should not happen, if the peer is a recent version of drbd. */ 3221e9ef7bb6SLars Ellenberg if (os.pdsk == D_UP_TO_DATE && real_peer_disk == D_INCONSISTENT && 3222e9ef7bb6SLars Ellenberg os.conn == C_CONNECTED && peer_state.conn > C_SYNC_SOURCE) 3223e9ef7bb6SLars Ellenberg real_peer_disk = D_UP_TO_DATE; 3224e9ef7bb6SLars Ellenberg 32254ac4aadaSLars Ellenberg if (ns.conn == C_WF_REPORT_PARAMS) 32264ac4aadaSLars Ellenberg ns.conn = C_CONNECTED; 3227b411b363SPhilipp Reisner 322867531718SPhilipp Reisner if (peer_state.conn == C_AHEAD) 322967531718SPhilipp Reisner ns.conn = C_BEHIND; 323067531718SPhilipp Reisner 3231b411b363SPhilipp Reisner if (mdev->p_uuid && peer_state.disk >= D_NEGOTIATING && 3232b411b363SPhilipp Reisner get_ldev_if_state(mdev, D_NEGOTIATING)) { 3233b411b363SPhilipp Reisner int cr; /* consider resync */ 3234b411b363SPhilipp Reisner 3235b411b363SPhilipp Reisner /* if we established a new connection */ 32364ac4aadaSLars Ellenberg cr = (os.conn < C_CONNECTED); 3237b411b363SPhilipp Reisner /* if we had an established connection 3238b411b363SPhilipp Reisner * and one of the nodes newly attaches a disk */ 32394ac4aadaSLars Ellenberg cr |= (os.conn == C_CONNECTED && 3240b411b363SPhilipp Reisner (peer_state.disk == D_NEGOTIATING || 32414ac4aadaSLars Ellenberg os.disk == D_NEGOTIATING)); 3242b411b363SPhilipp Reisner /* if we have both been inconsistent, and the peer has been 3243b411b363SPhilipp Reisner * forced to be UpToDate with --overwrite-data */ 3244b411b363SPhilipp Reisner cr |= test_bit(CONSIDER_RESYNC, &mdev->flags); 3245b411b363SPhilipp Reisner /* if we had been plain connected, and the admin requested to 3246b411b363SPhilipp Reisner * start a sync by "invalidate" or "invalidate-remote" */ 32474ac4aadaSLars Ellenberg cr |= (os.conn == C_CONNECTED && 3248b411b363SPhilipp Reisner (peer_state.conn >= C_STARTING_SYNC_S && 3249b411b363SPhilipp Reisner peer_state.conn <= C_WF_BITMAP_T)); 3250b411b363SPhilipp Reisner 3251b411b363SPhilipp Reisner if (cr) 32524ac4aadaSLars Ellenberg ns.conn = drbd_sync_handshake(mdev, peer_state.role, real_peer_disk); 3253b411b363SPhilipp Reisner 3254b411b363SPhilipp Reisner put_ldev(mdev); 32554ac4aadaSLars Ellenberg if (ns.conn == C_MASK) { 32564ac4aadaSLars Ellenberg ns.conn = C_CONNECTED; 3257b411b363SPhilipp Reisner if (mdev->state.disk == D_NEGOTIATING) { 325882f59cc6SLars Ellenberg drbd_force_state(mdev, NS(disk, D_FAILED)); 3259b411b363SPhilipp Reisner } else if (peer_state.disk == D_NEGOTIATING) { 3260b411b363SPhilipp Reisner dev_err(DEV, "Disk attach process on the peer node was aborted.\n"); 3261b411b363SPhilipp Reisner peer_state.disk = D_DISKLESS; 3262580b9767SLars Ellenberg real_peer_disk = D_DISKLESS; 3263b411b363SPhilipp Reisner } else { 3264cf14c2e9SPhilipp Reisner if (test_and_clear_bit(CONN_DRY_RUN, &mdev->flags)) 326581e84650SAndreas Gruenbacher return false; 32664ac4aadaSLars Ellenberg D_ASSERT(os.conn == C_WF_REPORT_PARAMS); 3267b411b363SPhilipp Reisner drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); 326881e84650SAndreas Gruenbacher return false; 3269b411b363SPhilipp Reisner } 3270b411b363SPhilipp Reisner } 3271b411b363SPhilipp Reisner } 3272b411b363SPhilipp Reisner 3273b411b363SPhilipp Reisner spin_lock_irq(&mdev->req_lock); 32744ac4aadaSLars Ellenberg if (mdev->state.i != os.i) 3275b411b363SPhilipp Reisner goto retry; 3276b411b363SPhilipp Reisner clear_bit(CONSIDER_RESYNC, &mdev->flags); 3277b411b363SPhilipp Reisner ns.peer = peer_state.role; 3278b411b363SPhilipp Reisner ns.pdsk = real_peer_disk; 3279b411b363SPhilipp Reisner ns.peer_isp = (peer_state.aftr_isp | peer_state.user_isp); 32804ac4aadaSLars Ellenberg if ((ns.conn == C_CONNECTED || ns.conn == C_WF_BITMAP_S) && ns.disk == D_NEGOTIATING) 3281b411b363SPhilipp Reisner ns.disk = mdev->new_state_tmp.disk; 32824ac4aadaSLars Ellenberg cs_flags = CS_VERBOSE + (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED ? 0 : CS_HARD); 32834ac4aadaSLars Ellenberg if (ns.pdsk == D_CONSISTENT && is_susp(ns) && ns.conn == C_CONNECTED && os.conn < C_CONNECTED && 3284481c6f50SPhilipp Reisner test_bit(NEW_CUR_UUID, &mdev->flags)) { 32858554df1cSAndreas Gruenbacher /* Do not allow tl_restart(RESEND) for a rebooted peer. We can only allow this 3286481c6f50SPhilipp Reisner for temporal network outages! */ 3287481c6f50SPhilipp Reisner spin_unlock_irq(&mdev->req_lock); 3288481c6f50SPhilipp Reisner dev_err(DEV, "Aborting Connect, can not thaw IO with an only Consistent peer\n"); 3289481c6f50SPhilipp Reisner tl_clear(mdev); 3290481c6f50SPhilipp Reisner drbd_uuid_new_current(mdev); 3291481c6f50SPhilipp Reisner clear_bit(NEW_CUR_UUID, &mdev->flags); 3292481c6f50SPhilipp Reisner drbd_force_state(mdev, NS2(conn, C_PROTOCOL_ERROR, susp, 0)); 329381e84650SAndreas Gruenbacher return false; 3294481c6f50SPhilipp Reisner } 329565d922c3SPhilipp Reisner rv = _drbd_set_state(mdev, ns, cs_flags, NULL); 3296b411b363SPhilipp Reisner ns = mdev->state; 3297b411b363SPhilipp Reisner spin_unlock_irq(&mdev->req_lock); 3298b411b363SPhilipp Reisner 3299b411b363SPhilipp Reisner if (rv < SS_SUCCESS) { 3300b411b363SPhilipp Reisner drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); 330181e84650SAndreas Gruenbacher return false; 3302b411b363SPhilipp Reisner } 3303b411b363SPhilipp Reisner 33044ac4aadaSLars Ellenberg if (os.conn > C_WF_REPORT_PARAMS) { 33054ac4aadaSLars Ellenberg if (ns.conn > C_CONNECTED && peer_state.conn <= C_CONNECTED && 3306b411b363SPhilipp Reisner peer_state.disk != D_NEGOTIATING ) { 3307b411b363SPhilipp Reisner /* we want resync, peer has not yet decided to sync... */ 3308b411b363SPhilipp Reisner /* Nowadays only used when forcing a node into primary role and 3309b411b363SPhilipp Reisner setting its disk to UpToDate with that */ 3310b411b363SPhilipp Reisner drbd_send_uuids(mdev); 3311b411b363SPhilipp Reisner drbd_send_state(mdev); 3312b411b363SPhilipp Reisner } 3313b411b363SPhilipp Reisner } 3314b411b363SPhilipp Reisner 331589e58e75SPhilipp Reisner mdev->tconn->net_conf->want_lose = 0; 3316b411b363SPhilipp Reisner 3317b411b363SPhilipp Reisner drbd_md_sync(mdev); /* update connected indicator, la_size, ... */ 3318b411b363SPhilipp Reisner 331981e84650SAndreas Gruenbacher return true; 3320b411b363SPhilipp Reisner } 3321b411b363SPhilipp Reisner 332202918be2SPhilipp Reisner static int receive_sync_uuid(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) 3323b411b363SPhilipp Reisner { 332402918be2SPhilipp Reisner struct p_rs_uuid *p = &mdev->data.rbuf.rs_uuid; 3325b411b363SPhilipp Reisner 3326b411b363SPhilipp Reisner wait_event(mdev->misc_wait, 3327b411b363SPhilipp Reisner mdev->state.conn == C_WF_SYNC_UUID || 3328c4752ef1SPhilipp Reisner mdev->state.conn == C_BEHIND || 3329b411b363SPhilipp Reisner mdev->state.conn < C_CONNECTED || 3330b411b363SPhilipp Reisner mdev->state.disk < D_NEGOTIATING); 3331b411b363SPhilipp Reisner 3332b411b363SPhilipp Reisner /* D_ASSERT( mdev->state.conn == C_WF_SYNC_UUID ); */ 3333b411b363SPhilipp Reisner 3334b411b363SPhilipp Reisner /* Here the _drbd_uuid_ functions are right, current should 3335b411b363SPhilipp Reisner _not_ be rotated into the history */ 3336b411b363SPhilipp Reisner if (get_ldev_if_state(mdev, D_NEGOTIATING)) { 3337b411b363SPhilipp Reisner _drbd_uuid_set(mdev, UI_CURRENT, be64_to_cpu(p->uuid)); 3338b411b363SPhilipp Reisner _drbd_uuid_set(mdev, UI_BITMAP, 0UL); 3339b411b363SPhilipp Reisner 334062b0da3aSLars Ellenberg drbd_print_uuids(mdev, "updated sync uuid"); 3341b411b363SPhilipp Reisner drbd_start_resync(mdev, C_SYNC_TARGET); 3342b411b363SPhilipp Reisner 3343b411b363SPhilipp Reisner put_ldev(mdev); 3344b411b363SPhilipp Reisner } else 3345b411b363SPhilipp Reisner dev_err(DEV, "Ignoring SyncUUID packet!\n"); 3346b411b363SPhilipp Reisner 334781e84650SAndreas Gruenbacher return true; 3348b411b363SPhilipp Reisner } 3349b411b363SPhilipp Reisner 33502c46407dSAndreas Gruenbacher /** 33512c46407dSAndreas Gruenbacher * receive_bitmap_plain 33522c46407dSAndreas Gruenbacher * 33532c46407dSAndreas Gruenbacher * Return 0 when done, 1 when another iteration is needed, and a negative error 33542c46407dSAndreas Gruenbacher * code upon failure. 33552c46407dSAndreas Gruenbacher */ 33562c46407dSAndreas Gruenbacher static int 335702918be2SPhilipp Reisner receive_bitmap_plain(struct drbd_conf *mdev, unsigned int data_size, 3358b411b363SPhilipp Reisner unsigned long *buffer, struct bm_xfer_ctx *c) 3359b411b363SPhilipp Reisner { 3360b411b363SPhilipp Reisner unsigned num_words = min_t(size_t, BM_PACKET_WORDS, c->bm_words - c->word_offset); 3361b411b363SPhilipp Reisner unsigned want = num_words * sizeof(long); 33622c46407dSAndreas Gruenbacher int err; 3363b411b363SPhilipp Reisner 336402918be2SPhilipp Reisner if (want != data_size) { 336502918be2SPhilipp Reisner dev_err(DEV, "%s:want (%u) != data_size (%u)\n", __func__, want, data_size); 33662c46407dSAndreas Gruenbacher return -EIO; 3367b411b363SPhilipp Reisner } 3368b411b363SPhilipp Reisner if (want == 0) 33692c46407dSAndreas Gruenbacher return 0; 33702c46407dSAndreas Gruenbacher err = drbd_recv(mdev, buffer, want); 33712c46407dSAndreas Gruenbacher if (err != want) { 33722c46407dSAndreas Gruenbacher if (err >= 0) 33732c46407dSAndreas Gruenbacher err = -EIO; 33742c46407dSAndreas Gruenbacher return err; 33752c46407dSAndreas Gruenbacher } 3376b411b363SPhilipp Reisner 3377b411b363SPhilipp Reisner drbd_bm_merge_lel(mdev, c->word_offset, num_words, buffer); 3378b411b363SPhilipp Reisner 3379b411b363SPhilipp Reisner c->word_offset += num_words; 3380b411b363SPhilipp Reisner c->bit_offset = c->word_offset * BITS_PER_LONG; 3381b411b363SPhilipp Reisner if (c->bit_offset > c->bm_bits) 3382b411b363SPhilipp Reisner c->bit_offset = c->bm_bits; 3383b411b363SPhilipp Reisner 33842c46407dSAndreas Gruenbacher return 1; 3385b411b363SPhilipp Reisner } 3386b411b363SPhilipp Reisner 33872c46407dSAndreas Gruenbacher /** 33882c46407dSAndreas Gruenbacher * recv_bm_rle_bits 33892c46407dSAndreas Gruenbacher * 33902c46407dSAndreas Gruenbacher * Return 0 when done, 1 when another iteration is needed, and a negative error 33912c46407dSAndreas Gruenbacher * code upon failure. 33922c46407dSAndreas Gruenbacher */ 33932c46407dSAndreas Gruenbacher static int 3394b411b363SPhilipp Reisner recv_bm_rle_bits(struct drbd_conf *mdev, 3395b411b363SPhilipp Reisner struct p_compressed_bm *p, 3396b411b363SPhilipp Reisner struct bm_xfer_ctx *c) 3397b411b363SPhilipp Reisner { 3398b411b363SPhilipp Reisner struct bitstream bs; 3399b411b363SPhilipp Reisner u64 look_ahead; 3400b411b363SPhilipp Reisner u64 rl; 3401b411b363SPhilipp Reisner u64 tmp; 3402b411b363SPhilipp Reisner unsigned long s = c->bit_offset; 3403b411b363SPhilipp Reisner unsigned long e; 3404004352faSLars Ellenberg int len = be16_to_cpu(p->head.length) - (sizeof(*p) - sizeof(p->head)); 3405b411b363SPhilipp Reisner int toggle = DCBP_get_start(p); 3406b411b363SPhilipp Reisner int have; 3407b411b363SPhilipp Reisner int bits; 3408b411b363SPhilipp Reisner 3409b411b363SPhilipp Reisner bitstream_init(&bs, p->code, len, DCBP_get_pad_bits(p)); 3410b411b363SPhilipp Reisner 3411b411b363SPhilipp Reisner bits = bitstream_get_bits(&bs, &look_ahead, 64); 3412b411b363SPhilipp Reisner if (bits < 0) 34132c46407dSAndreas Gruenbacher return -EIO; 3414b411b363SPhilipp Reisner 3415b411b363SPhilipp Reisner for (have = bits; have > 0; s += rl, toggle = !toggle) { 3416b411b363SPhilipp Reisner bits = vli_decode_bits(&rl, look_ahead); 3417b411b363SPhilipp Reisner if (bits <= 0) 34182c46407dSAndreas Gruenbacher return -EIO; 3419b411b363SPhilipp Reisner 3420b411b363SPhilipp Reisner if (toggle) { 3421b411b363SPhilipp Reisner e = s + rl -1; 3422b411b363SPhilipp Reisner if (e >= c->bm_bits) { 3423b411b363SPhilipp Reisner dev_err(DEV, "bitmap overflow (e:%lu) while decoding bm RLE packet\n", e); 34242c46407dSAndreas Gruenbacher return -EIO; 3425b411b363SPhilipp Reisner } 3426b411b363SPhilipp Reisner _drbd_bm_set_bits(mdev, s, e); 3427b411b363SPhilipp Reisner } 3428b411b363SPhilipp Reisner 3429b411b363SPhilipp Reisner if (have < bits) { 3430b411b363SPhilipp Reisner dev_err(DEV, "bitmap decoding error: h:%d b:%d la:0x%08llx l:%u/%u\n", 3431b411b363SPhilipp Reisner have, bits, look_ahead, 3432b411b363SPhilipp Reisner (unsigned int)(bs.cur.b - p->code), 3433b411b363SPhilipp Reisner (unsigned int)bs.buf_len); 34342c46407dSAndreas Gruenbacher return -EIO; 3435b411b363SPhilipp Reisner } 3436b411b363SPhilipp Reisner look_ahead >>= bits; 3437b411b363SPhilipp Reisner have -= bits; 3438b411b363SPhilipp Reisner 3439b411b363SPhilipp Reisner bits = bitstream_get_bits(&bs, &tmp, 64 - have); 3440b411b363SPhilipp Reisner if (bits < 0) 34412c46407dSAndreas Gruenbacher return -EIO; 3442b411b363SPhilipp Reisner look_ahead |= tmp << have; 3443b411b363SPhilipp Reisner have += bits; 3444b411b363SPhilipp Reisner } 3445b411b363SPhilipp Reisner 3446b411b363SPhilipp Reisner c->bit_offset = s; 3447b411b363SPhilipp Reisner bm_xfer_ctx_bit_to_word_offset(c); 3448b411b363SPhilipp Reisner 34492c46407dSAndreas Gruenbacher return (s != c->bm_bits); 3450b411b363SPhilipp Reisner } 3451b411b363SPhilipp Reisner 34522c46407dSAndreas Gruenbacher /** 34532c46407dSAndreas Gruenbacher * decode_bitmap_c 34542c46407dSAndreas Gruenbacher * 34552c46407dSAndreas Gruenbacher * Return 0 when done, 1 when another iteration is needed, and a negative error 34562c46407dSAndreas Gruenbacher * code upon failure. 34572c46407dSAndreas Gruenbacher */ 34582c46407dSAndreas Gruenbacher static int 3459b411b363SPhilipp Reisner decode_bitmap_c(struct drbd_conf *mdev, 3460b411b363SPhilipp Reisner struct p_compressed_bm *p, 3461b411b363SPhilipp Reisner struct bm_xfer_ctx *c) 3462b411b363SPhilipp Reisner { 3463b411b363SPhilipp Reisner if (DCBP_get_code(p) == RLE_VLI_Bits) 3464b411b363SPhilipp Reisner return recv_bm_rle_bits(mdev, p, c); 3465b411b363SPhilipp Reisner 3466b411b363SPhilipp Reisner /* other variants had been implemented for evaluation, 3467b411b363SPhilipp Reisner * but have been dropped as this one turned out to be "best" 3468b411b363SPhilipp Reisner * during all our tests. */ 3469b411b363SPhilipp Reisner 3470b411b363SPhilipp Reisner dev_err(DEV, "receive_bitmap_c: unknown encoding %u\n", p->encoding); 3471b411b363SPhilipp Reisner drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR)); 34722c46407dSAndreas Gruenbacher return -EIO; 3473b411b363SPhilipp Reisner } 3474b411b363SPhilipp Reisner 3475b411b363SPhilipp Reisner void INFO_bm_xfer_stats(struct drbd_conf *mdev, 3476b411b363SPhilipp Reisner const char *direction, struct bm_xfer_ctx *c) 3477b411b363SPhilipp Reisner { 3478b411b363SPhilipp Reisner /* what would it take to transfer it "plaintext" */ 34790b70a13dSPhilipp Reisner unsigned plain = sizeof(struct p_header80) * 3480b411b363SPhilipp Reisner ((c->bm_words+BM_PACKET_WORDS-1)/BM_PACKET_WORDS+1) 3481b411b363SPhilipp Reisner + c->bm_words * sizeof(long); 3482b411b363SPhilipp Reisner unsigned total = c->bytes[0] + c->bytes[1]; 3483b411b363SPhilipp Reisner unsigned r; 3484b411b363SPhilipp Reisner 3485b411b363SPhilipp Reisner /* total can not be zero. but just in case: */ 3486b411b363SPhilipp Reisner if (total == 0) 3487b411b363SPhilipp Reisner return; 3488b411b363SPhilipp Reisner 3489b411b363SPhilipp Reisner /* don't report if not compressed */ 3490b411b363SPhilipp Reisner if (total >= plain) 3491b411b363SPhilipp Reisner return; 3492b411b363SPhilipp Reisner 3493b411b363SPhilipp Reisner /* total < plain. check for overflow, still */ 3494b411b363SPhilipp Reisner r = (total > UINT_MAX/1000) ? (total / (plain/1000)) 3495b411b363SPhilipp Reisner : (1000 * total / plain); 3496b411b363SPhilipp Reisner 3497b411b363SPhilipp Reisner if (r > 1000) 3498b411b363SPhilipp Reisner r = 1000; 3499b411b363SPhilipp Reisner 3500b411b363SPhilipp Reisner r = 1000 - r; 3501b411b363SPhilipp Reisner dev_info(DEV, "%s bitmap stats [Bytes(packets)]: plain %u(%u), RLE %u(%u), " 3502b411b363SPhilipp Reisner "total %u; compression: %u.%u%%\n", 3503b411b363SPhilipp Reisner direction, 3504b411b363SPhilipp Reisner c->bytes[1], c->packets[1], 3505b411b363SPhilipp Reisner c->bytes[0], c->packets[0], 3506b411b363SPhilipp Reisner total, r/10, r % 10); 3507b411b363SPhilipp Reisner } 3508b411b363SPhilipp Reisner 3509b411b363SPhilipp Reisner /* Since we are processing the bitfield from lower addresses to higher, 3510b411b363SPhilipp Reisner it does not matter if the process it in 32 bit chunks or 64 bit 3511b411b363SPhilipp Reisner chunks as long as it is little endian. (Understand it as byte stream, 3512b411b363SPhilipp Reisner beginning with the lowest byte...) If we would use big endian 3513b411b363SPhilipp Reisner we would need to process it from the highest address to the lowest, 3514b411b363SPhilipp Reisner in order to be agnostic to the 32 vs 64 bits issue. 3515b411b363SPhilipp Reisner 3516b411b363SPhilipp Reisner returns 0 on failure, 1 if we successfully received it. */ 351702918be2SPhilipp Reisner static int receive_bitmap(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) 3518b411b363SPhilipp Reisner { 3519b411b363SPhilipp Reisner struct bm_xfer_ctx c; 3520b411b363SPhilipp Reisner void *buffer; 35212c46407dSAndreas Gruenbacher int err; 352281e84650SAndreas Gruenbacher int ok = false; 352302918be2SPhilipp Reisner struct p_header80 *h = &mdev->data.rbuf.header.h80; 3524b411b363SPhilipp Reisner 352520ceb2b2SLars Ellenberg drbd_bm_lock(mdev, "receive bitmap", BM_LOCKED_SET_ALLOWED); 352620ceb2b2SLars Ellenberg /* you are supposed to send additional out-of-sync information 352720ceb2b2SLars Ellenberg * if you actually set bits during this phase */ 3528b411b363SPhilipp Reisner 3529b411b363SPhilipp Reisner /* maybe we should use some per thread scratch page, 3530b411b363SPhilipp Reisner * and allocate that during initial device creation? */ 3531b411b363SPhilipp Reisner buffer = (unsigned long *) __get_free_page(GFP_NOIO); 3532b411b363SPhilipp Reisner if (!buffer) { 3533b411b363SPhilipp Reisner dev_err(DEV, "failed to allocate one page buffer in %s\n", __func__); 3534b411b363SPhilipp Reisner goto out; 3535b411b363SPhilipp Reisner } 3536b411b363SPhilipp Reisner 3537b411b363SPhilipp Reisner c = (struct bm_xfer_ctx) { 3538b411b363SPhilipp Reisner .bm_bits = drbd_bm_bits(mdev), 3539b411b363SPhilipp Reisner .bm_words = drbd_bm_words(mdev), 3540b411b363SPhilipp Reisner }; 3541b411b363SPhilipp Reisner 35422c46407dSAndreas Gruenbacher for(;;) { 354302918be2SPhilipp Reisner if (cmd == P_BITMAP) { 35442c46407dSAndreas Gruenbacher err = receive_bitmap_plain(mdev, data_size, buffer, &c); 354502918be2SPhilipp Reisner } else if (cmd == P_COMPRESSED_BITMAP) { 3546b411b363SPhilipp Reisner /* MAYBE: sanity check that we speak proto >= 90, 3547b411b363SPhilipp Reisner * and the feature is enabled! */ 3548b411b363SPhilipp Reisner struct p_compressed_bm *p; 3549b411b363SPhilipp Reisner 355002918be2SPhilipp Reisner if (data_size > BM_PACKET_PAYLOAD_BYTES) { 3551b411b363SPhilipp Reisner dev_err(DEV, "ReportCBitmap packet too large\n"); 3552b411b363SPhilipp Reisner goto out; 3553b411b363SPhilipp Reisner } 3554b411b363SPhilipp Reisner /* use the page buff */ 3555b411b363SPhilipp Reisner p = buffer; 3556b411b363SPhilipp Reisner memcpy(p, h, sizeof(*h)); 355702918be2SPhilipp Reisner if (drbd_recv(mdev, p->head.payload, data_size) != data_size) 3558b411b363SPhilipp Reisner goto out; 3559004352faSLars Ellenberg if (data_size <= (sizeof(*p) - sizeof(p->head))) { 3560004352faSLars Ellenberg dev_err(DEV, "ReportCBitmap packet too small (l:%u)\n", data_size); 356178fcbdaeSAndreas Gruenbacher goto out; 3562b411b363SPhilipp Reisner } 35632c46407dSAndreas Gruenbacher err = decode_bitmap_c(mdev, p, &c); 3564b411b363SPhilipp Reisner } else { 356502918be2SPhilipp Reisner dev_warn(DEV, "receive_bitmap: cmd neither ReportBitMap nor ReportCBitMap (is 0x%x)", cmd); 3566b411b363SPhilipp Reisner goto out; 3567b411b363SPhilipp Reisner } 3568b411b363SPhilipp Reisner 356902918be2SPhilipp Reisner c.packets[cmd == P_BITMAP]++; 357002918be2SPhilipp Reisner c.bytes[cmd == P_BITMAP] += sizeof(struct p_header80) + data_size; 3571b411b363SPhilipp Reisner 35722c46407dSAndreas Gruenbacher if (err <= 0) { 35732c46407dSAndreas Gruenbacher if (err < 0) 35742c46407dSAndreas Gruenbacher goto out; 3575b411b363SPhilipp Reisner break; 35762c46407dSAndreas Gruenbacher } 357702918be2SPhilipp Reisner if (!drbd_recv_header(mdev, &cmd, &data_size)) 3578b411b363SPhilipp Reisner goto out; 35792c46407dSAndreas Gruenbacher } 3580b411b363SPhilipp Reisner 3581b411b363SPhilipp Reisner INFO_bm_xfer_stats(mdev, "receive", &c); 3582b411b363SPhilipp Reisner 3583b411b363SPhilipp Reisner if (mdev->state.conn == C_WF_BITMAP_T) { 3584de1f8e4aSAndreas Gruenbacher enum drbd_state_rv rv; 3585de1f8e4aSAndreas Gruenbacher 3586b411b363SPhilipp Reisner ok = !drbd_send_bitmap(mdev); 3587b411b363SPhilipp Reisner if (!ok) 3588b411b363SPhilipp Reisner goto out; 3589b411b363SPhilipp Reisner /* Omit CS_ORDERED with this state transition to avoid deadlocks. */ 3590de1f8e4aSAndreas Gruenbacher rv = _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE); 3591de1f8e4aSAndreas Gruenbacher D_ASSERT(rv == SS_SUCCESS); 3592b411b363SPhilipp Reisner } else if (mdev->state.conn != C_WF_BITMAP_S) { 3593b411b363SPhilipp Reisner /* admin may have requested C_DISCONNECTING, 3594b411b363SPhilipp Reisner * other threads may have noticed network errors */ 3595b411b363SPhilipp Reisner dev_info(DEV, "unexpected cstate (%s) in receive_bitmap\n", 3596b411b363SPhilipp Reisner drbd_conn_str(mdev->state.conn)); 3597b411b363SPhilipp Reisner } 3598b411b363SPhilipp Reisner 359981e84650SAndreas Gruenbacher ok = true; 3600b411b363SPhilipp Reisner out: 360120ceb2b2SLars Ellenberg drbd_bm_unlock(mdev); 3602b411b363SPhilipp Reisner if (ok && mdev->state.conn == C_WF_BITMAP_S) 3603b411b363SPhilipp Reisner drbd_start_resync(mdev, C_SYNC_SOURCE); 3604b411b363SPhilipp Reisner free_page((unsigned long) buffer); 3605b411b363SPhilipp Reisner return ok; 3606b411b363SPhilipp Reisner } 3607b411b363SPhilipp Reisner 360802918be2SPhilipp Reisner static int receive_skip(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) 3609b411b363SPhilipp Reisner { 3610b411b363SPhilipp Reisner /* TODO zero copy sink :) */ 3611b411b363SPhilipp Reisner static char sink[128]; 3612b411b363SPhilipp Reisner int size, want, r; 3613b411b363SPhilipp Reisner 3614b411b363SPhilipp Reisner dev_warn(DEV, "skipping unknown optional packet type %d, l: %d!\n", 361502918be2SPhilipp Reisner cmd, data_size); 3616b411b363SPhilipp Reisner 361702918be2SPhilipp Reisner size = data_size; 3618b411b363SPhilipp Reisner while (size > 0) { 3619b411b363SPhilipp Reisner want = min_t(int, size, sizeof(sink)); 3620b411b363SPhilipp Reisner r = drbd_recv(mdev, sink, want); 3621841ce241SAndreas Gruenbacher if (!expect(r > 0)) 3622841ce241SAndreas Gruenbacher break; 3623b411b363SPhilipp Reisner size -= r; 3624b411b363SPhilipp Reisner } 3625b411b363SPhilipp Reisner return size == 0; 3626b411b363SPhilipp Reisner } 3627b411b363SPhilipp Reisner 362802918be2SPhilipp Reisner static int receive_UnplugRemote(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) 3629b411b363SPhilipp Reisner { 3630b411b363SPhilipp Reisner /* Make sure we've acked all the TCP data associated 3631b411b363SPhilipp Reisner * with the data requests being unplugged */ 3632b411b363SPhilipp Reisner drbd_tcp_quickack(mdev->data.socket); 3633b411b363SPhilipp Reisner 363481e84650SAndreas Gruenbacher return true; 3635b411b363SPhilipp Reisner } 3636b411b363SPhilipp Reisner 363773a01a18SPhilipp Reisner static int receive_out_of_sync(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) 363873a01a18SPhilipp Reisner { 363973a01a18SPhilipp Reisner struct p_block_desc *p = &mdev->data.rbuf.block_desc; 364073a01a18SPhilipp Reisner 3641f735e363SLars Ellenberg switch (mdev->state.conn) { 3642f735e363SLars Ellenberg case C_WF_SYNC_UUID: 3643f735e363SLars Ellenberg case C_WF_BITMAP_T: 3644f735e363SLars Ellenberg case C_BEHIND: 3645f735e363SLars Ellenberg break; 3646f735e363SLars Ellenberg default: 3647f735e363SLars Ellenberg dev_err(DEV, "ASSERT FAILED cstate = %s, expected: WFSyncUUID|WFBitMapT|Behind\n", 3648f735e363SLars Ellenberg drbd_conn_str(mdev->state.conn)); 3649f735e363SLars Ellenberg } 3650f735e363SLars Ellenberg 365173a01a18SPhilipp Reisner drbd_set_out_of_sync(mdev, be64_to_cpu(p->sector), be32_to_cpu(p->blksize)); 365273a01a18SPhilipp Reisner 365381e84650SAndreas Gruenbacher return true; 365473a01a18SPhilipp Reisner } 365573a01a18SPhilipp Reisner 365602918be2SPhilipp Reisner typedef int (*drbd_cmd_handler_f)(struct drbd_conf *, enum drbd_packets cmd, unsigned int to_receive); 3657b411b363SPhilipp Reisner 365802918be2SPhilipp Reisner struct data_cmd { 365902918be2SPhilipp Reisner int expect_payload; 366002918be2SPhilipp Reisner size_t pkt_size; 366102918be2SPhilipp Reisner drbd_cmd_handler_f function; 3662b411b363SPhilipp Reisner }; 3663b411b363SPhilipp Reisner 366402918be2SPhilipp Reisner static struct data_cmd drbd_cmd_handler[] = { 366502918be2SPhilipp Reisner [P_DATA] = { 1, sizeof(struct p_data), receive_Data }, 366602918be2SPhilipp Reisner [P_DATA_REPLY] = { 1, sizeof(struct p_data), receive_DataReply }, 366702918be2SPhilipp Reisner [P_RS_DATA_REPLY] = { 1, sizeof(struct p_data), receive_RSDataReply } , 366802918be2SPhilipp Reisner [P_BARRIER] = { 0, sizeof(struct p_barrier), receive_Barrier } , 366902918be2SPhilipp Reisner [P_BITMAP] = { 1, sizeof(struct p_header80), receive_bitmap } , 367002918be2SPhilipp Reisner [P_COMPRESSED_BITMAP] = { 1, sizeof(struct p_header80), receive_bitmap } , 367102918be2SPhilipp Reisner [P_UNPLUG_REMOTE] = { 0, sizeof(struct p_header80), receive_UnplugRemote }, 367202918be2SPhilipp Reisner [P_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest }, 367302918be2SPhilipp Reisner [P_RS_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest }, 367402918be2SPhilipp Reisner [P_SYNC_PARAM] = { 1, sizeof(struct p_header80), receive_SyncParam }, 367502918be2SPhilipp Reisner [P_SYNC_PARAM89] = { 1, sizeof(struct p_header80), receive_SyncParam }, 367602918be2SPhilipp Reisner [P_PROTOCOL] = { 1, sizeof(struct p_protocol), receive_protocol }, 367702918be2SPhilipp Reisner [P_UUIDS] = { 0, sizeof(struct p_uuids), receive_uuids }, 367802918be2SPhilipp Reisner [P_SIZES] = { 0, sizeof(struct p_sizes), receive_sizes }, 367902918be2SPhilipp Reisner [P_STATE] = { 0, sizeof(struct p_state), receive_state }, 368002918be2SPhilipp Reisner [P_STATE_CHG_REQ] = { 0, sizeof(struct p_req_state), receive_req_state }, 368102918be2SPhilipp Reisner [P_SYNC_UUID] = { 0, sizeof(struct p_rs_uuid), receive_sync_uuid }, 368202918be2SPhilipp Reisner [P_OV_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest }, 368302918be2SPhilipp Reisner [P_OV_REPLY] = { 1, sizeof(struct p_block_req), receive_DataRequest }, 368402918be2SPhilipp Reisner [P_CSUM_RS_REQUEST] = { 1, sizeof(struct p_block_req), receive_DataRequest }, 368502918be2SPhilipp Reisner [P_DELAY_PROBE] = { 0, sizeof(struct p_delay_probe93), receive_skip }, 368673a01a18SPhilipp Reisner [P_OUT_OF_SYNC] = { 0, sizeof(struct p_block_desc), receive_out_of_sync }, 368702918be2SPhilipp Reisner /* anything missing from this table is in 368802918be2SPhilipp Reisner * the asender_tbl, see get_asender_cmd */ 368902918be2SPhilipp Reisner [P_MAX_CMD] = { 0, 0, NULL }, 369002918be2SPhilipp Reisner }; 369102918be2SPhilipp Reisner 369202918be2SPhilipp Reisner /* All handler functions that expect a sub-header get that sub-heder in 369302918be2SPhilipp Reisner mdev->data.rbuf.header.head.payload. 369402918be2SPhilipp Reisner 369502918be2SPhilipp Reisner Usually in mdev->data.rbuf.header.head the callback can find the usual 369602918be2SPhilipp Reisner p_header, but they may not rely on that. Since there is also p_header95 ! 369702918be2SPhilipp Reisner */ 3698b411b363SPhilipp Reisner 3699b411b363SPhilipp Reisner static void drbdd(struct drbd_conf *mdev) 3700b411b363SPhilipp Reisner { 370102918be2SPhilipp Reisner union p_header *header = &mdev->data.rbuf.header; 370202918be2SPhilipp Reisner unsigned int packet_size; 370302918be2SPhilipp Reisner enum drbd_packets cmd; 370402918be2SPhilipp Reisner size_t shs; /* sub header size */ 370502918be2SPhilipp Reisner int rv; 3706b411b363SPhilipp Reisner 3707e77a0a5cSAndreas Gruenbacher while (get_t_state(&mdev->receiver) == RUNNING) { 3708b411b363SPhilipp Reisner drbd_thread_current_set_cpu(mdev); 370902918be2SPhilipp Reisner if (!drbd_recv_header(mdev, &cmd, &packet_size)) 371002918be2SPhilipp Reisner goto err_out; 371102918be2SPhilipp Reisner 371202918be2SPhilipp Reisner if (unlikely(cmd >= P_MAX_CMD || !drbd_cmd_handler[cmd].function)) { 371302918be2SPhilipp Reisner dev_err(DEV, "unknown packet type %d, l: %d!\n", cmd, packet_size); 371402918be2SPhilipp Reisner goto err_out; 37150b33a916SLars Ellenberg } 3716b411b363SPhilipp Reisner 371702918be2SPhilipp Reisner shs = drbd_cmd_handler[cmd].pkt_size - sizeof(union p_header); 3718c13f7e1aSLars Ellenberg if (packet_size - shs > 0 && !drbd_cmd_handler[cmd].expect_payload) { 3719c13f7e1aSLars Ellenberg dev_err(DEV, "No payload expected %s l:%d\n", cmdname(cmd), packet_size); 3720c13f7e1aSLars Ellenberg goto err_out; 3721c13f7e1aSLars Ellenberg } 3722c13f7e1aSLars Ellenberg 3723c13f7e1aSLars Ellenberg if (shs) { 372402918be2SPhilipp Reisner rv = drbd_recv(mdev, &header->h80.payload, shs); 372502918be2SPhilipp Reisner if (unlikely(rv != shs)) { 37260ddc5549SLars Ellenberg if (!signal_pending(current)) 37270ddc5549SLars Ellenberg dev_warn(DEV, "short read while reading sub header: rv=%d\n", rv); 372802918be2SPhilipp Reisner goto err_out; 3729b411b363SPhilipp Reisner } 373002918be2SPhilipp Reisner } 373102918be2SPhilipp Reisner 373202918be2SPhilipp Reisner rv = drbd_cmd_handler[cmd].function(mdev, cmd, packet_size - shs); 373302918be2SPhilipp Reisner 373402918be2SPhilipp Reisner if (unlikely(!rv)) { 3735b411b363SPhilipp Reisner dev_err(DEV, "error receiving %s, l: %d!\n", 373602918be2SPhilipp Reisner cmdname(cmd), packet_size); 373702918be2SPhilipp Reisner goto err_out; 3738b411b363SPhilipp Reisner } 3739b411b363SPhilipp Reisner } 374002918be2SPhilipp Reisner 374102918be2SPhilipp Reisner if (0) { 374202918be2SPhilipp Reisner err_out: 3743b411b363SPhilipp Reisner drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR)); 3744b411b363SPhilipp Reisner } 3745856c50c7SLars Ellenberg /* If we leave here, we probably want to update at least the 3746856c50c7SLars Ellenberg * "Connected" indicator on stable storage. Do so explicitly here. */ 3747856c50c7SLars Ellenberg drbd_md_sync(mdev); 3748b411b363SPhilipp Reisner } 3749b411b363SPhilipp Reisner 3750b411b363SPhilipp Reisner void drbd_flush_workqueue(struct drbd_conf *mdev) 3751b411b363SPhilipp Reisner { 3752b411b363SPhilipp Reisner struct drbd_wq_barrier barr; 3753b411b363SPhilipp Reisner 3754b411b363SPhilipp Reisner barr.w.cb = w_prev_work_done; 3755b411b363SPhilipp Reisner init_completion(&barr.done); 3756b411b363SPhilipp Reisner drbd_queue_work(&mdev->data.work, &barr.w); 3757b411b363SPhilipp Reisner wait_for_completion(&barr.done); 3758b411b363SPhilipp Reisner } 3759b411b363SPhilipp Reisner 3760b411b363SPhilipp Reisner static void drbd_disconnect(struct drbd_conf *mdev) 3761b411b363SPhilipp Reisner { 3762b411b363SPhilipp Reisner enum drbd_fencing_p fp; 3763b411b363SPhilipp Reisner union drbd_state os, ns; 3764b411b363SPhilipp Reisner int rv = SS_UNKNOWN_ERROR; 3765b411b363SPhilipp Reisner unsigned int i; 3766b411b363SPhilipp Reisner 3767b411b363SPhilipp Reisner if (mdev->state.conn == C_STANDALONE) 3768b411b363SPhilipp Reisner return; 3769b411b363SPhilipp Reisner 3770b411b363SPhilipp Reisner /* asender does not clean up anything. it must not interfere, either */ 3771b411b363SPhilipp Reisner drbd_thread_stop(&mdev->asender); 3772b411b363SPhilipp Reisner drbd_free_sock(mdev); 3773b411b363SPhilipp Reisner 377485719573SPhilipp Reisner /* wait for current activity to cease. */ 3775b411b363SPhilipp Reisner spin_lock_irq(&mdev->req_lock); 3776b411b363SPhilipp Reisner _drbd_wait_ee_list_empty(mdev, &mdev->active_ee); 3777b411b363SPhilipp Reisner _drbd_wait_ee_list_empty(mdev, &mdev->sync_ee); 3778b411b363SPhilipp Reisner _drbd_wait_ee_list_empty(mdev, &mdev->read_ee); 3779b411b363SPhilipp Reisner spin_unlock_irq(&mdev->req_lock); 3780b411b363SPhilipp Reisner 3781b411b363SPhilipp Reisner /* We do not have data structures that would allow us to 3782b411b363SPhilipp Reisner * get the rs_pending_cnt down to 0 again. 3783b411b363SPhilipp Reisner * * On C_SYNC_TARGET we do not have any data structures describing 3784b411b363SPhilipp Reisner * the pending RSDataRequest's we have sent. 3785b411b363SPhilipp Reisner * * On C_SYNC_SOURCE there is no data structure that tracks 3786b411b363SPhilipp Reisner * the P_RS_DATA_REPLY blocks that we sent to the SyncTarget. 3787b411b363SPhilipp Reisner * And no, it is not the sum of the reference counts in the 3788b411b363SPhilipp Reisner * resync_LRU. The resync_LRU tracks the whole operation including 3789b411b363SPhilipp Reisner * the disk-IO, while the rs_pending_cnt only tracks the blocks 3790b411b363SPhilipp Reisner * on the fly. */ 3791b411b363SPhilipp Reisner drbd_rs_cancel_all(mdev); 3792b411b363SPhilipp Reisner mdev->rs_total = 0; 3793b411b363SPhilipp Reisner mdev->rs_failed = 0; 3794b411b363SPhilipp Reisner atomic_set(&mdev->rs_pending_cnt, 0); 3795b411b363SPhilipp Reisner wake_up(&mdev->misc_wait); 3796b411b363SPhilipp Reisner 37977fde2be9SPhilipp Reisner del_timer(&mdev->request_timer); 37987fde2be9SPhilipp Reisner 3799b411b363SPhilipp Reisner /* make sure syncer is stopped and w_resume_next_sg queued */ 3800b411b363SPhilipp Reisner del_timer_sync(&mdev->resync_timer); 3801b411b363SPhilipp Reisner resync_timer_fn((unsigned long)mdev); 3802b411b363SPhilipp Reisner 3803b411b363SPhilipp Reisner /* wait for all w_e_end_data_req, w_e_end_rsdata_req, w_send_barrier, 3804b411b363SPhilipp Reisner * w_make_resync_request etc. which may still be on the worker queue 3805b411b363SPhilipp Reisner * to be "canceled" */ 3806b411b363SPhilipp Reisner drbd_flush_workqueue(mdev); 3807b411b363SPhilipp Reisner 3808b411b363SPhilipp Reisner /* This also does reclaim_net_ee(). If we do this too early, we might 3809b411b363SPhilipp Reisner * miss some resync ee and pages.*/ 3810b411b363SPhilipp Reisner drbd_process_done_ee(mdev); 3811b411b363SPhilipp Reisner 3812b411b363SPhilipp Reisner kfree(mdev->p_uuid); 3813b411b363SPhilipp Reisner mdev->p_uuid = NULL; 3814b411b363SPhilipp Reisner 3815fb22c402SPhilipp Reisner if (!is_susp(mdev->state)) 3816b411b363SPhilipp Reisner tl_clear(mdev); 3817b411b363SPhilipp Reisner 3818b411b363SPhilipp Reisner dev_info(DEV, "Connection closed\n"); 3819b411b363SPhilipp Reisner 3820b411b363SPhilipp Reisner drbd_md_sync(mdev); 3821b411b363SPhilipp Reisner 3822b411b363SPhilipp Reisner fp = FP_DONT_CARE; 3823b411b363SPhilipp Reisner if (get_ldev(mdev)) { 3824b411b363SPhilipp Reisner fp = mdev->ldev->dc.fencing; 3825b411b363SPhilipp Reisner put_ldev(mdev); 3826b411b363SPhilipp Reisner } 3827b411b363SPhilipp Reisner 382887f7be4cSPhilipp Reisner if (mdev->state.role == R_PRIMARY && fp >= FP_RESOURCE && mdev->state.pdsk >= D_UNKNOWN) 382987f7be4cSPhilipp Reisner drbd_try_outdate_peer_async(mdev); 3830b411b363SPhilipp Reisner 3831b411b363SPhilipp Reisner spin_lock_irq(&mdev->req_lock); 3832b411b363SPhilipp Reisner os = mdev->state; 3833b411b363SPhilipp Reisner if (os.conn >= C_UNCONNECTED) { 3834b411b363SPhilipp Reisner /* Do not restart in case we are C_DISCONNECTING */ 3835b411b363SPhilipp Reisner ns = os; 3836b411b363SPhilipp Reisner ns.conn = C_UNCONNECTED; 3837b411b363SPhilipp Reisner rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL); 3838b411b363SPhilipp Reisner } 3839b411b363SPhilipp Reisner spin_unlock_irq(&mdev->req_lock); 3840b411b363SPhilipp Reisner 3841b411b363SPhilipp Reisner if (os.conn == C_DISCONNECTING) { 3842b2fb6dbeSPhilipp Reisner wait_event(mdev->tconn->net_cnt_wait, atomic_read(&mdev->tconn->net_cnt) == 0); 3843b411b363SPhilipp Reisner 3844b411b363SPhilipp Reisner crypto_free_hash(mdev->cram_hmac_tfm); 3845b411b363SPhilipp Reisner mdev->cram_hmac_tfm = NULL; 3846b411b363SPhilipp Reisner 384789e58e75SPhilipp Reisner kfree(mdev->tconn->net_conf); 384889e58e75SPhilipp Reisner mdev->tconn->net_conf = NULL; 3849b411b363SPhilipp Reisner drbd_request_state(mdev, NS(conn, C_STANDALONE)); 3850b411b363SPhilipp Reisner } 3851b411b363SPhilipp Reisner 385220ceb2b2SLars Ellenberg /* serialize with bitmap writeout triggered by the state change, 385320ceb2b2SLars Ellenberg * if any. */ 385420ceb2b2SLars Ellenberg wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags)); 385520ceb2b2SLars Ellenberg 3856b411b363SPhilipp Reisner /* tcp_close and release of sendpage pages can be deferred. I don't 3857b411b363SPhilipp Reisner * want to use SO_LINGER, because apparently it can be deferred for 3858b411b363SPhilipp Reisner * more than 20 seconds (longest time I checked). 3859b411b363SPhilipp Reisner * 3860b411b363SPhilipp Reisner * Actually we don't care for exactly when the network stack does its 3861b411b363SPhilipp Reisner * put_page(), but release our reference on these pages right here. 3862b411b363SPhilipp Reisner */ 3863b411b363SPhilipp Reisner i = drbd_release_ee(mdev, &mdev->net_ee); 3864b411b363SPhilipp Reisner if (i) 3865b411b363SPhilipp Reisner dev_info(DEV, "net_ee not empty, killed %u entries\n", i); 3866435f0740SLars Ellenberg i = atomic_read(&mdev->pp_in_use_by_net); 3867435f0740SLars Ellenberg if (i) 3868435f0740SLars Ellenberg dev_info(DEV, "pp_in_use_by_net = %d, expected 0\n", i); 3869b411b363SPhilipp Reisner i = atomic_read(&mdev->pp_in_use); 3870b411b363SPhilipp Reisner if (i) 387145bb912bSLars Ellenberg dev_info(DEV, "pp_in_use = %d, expected 0\n", i); 3872b411b363SPhilipp Reisner 3873b411b363SPhilipp Reisner D_ASSERT(list_empty(&mdev->read_ee)); 3874b411b363SPhilipp Reisner D_ASSERT(list_empty(&mdev->active_ee)); 3875b411b363SPhilipp Reisner D_ASSERT(list_empty(&mdev->sync_ee)); 3876b411b363SPhilipp Reisner D_ASSERT(list_empty(&mdev->done_ee)); 3877b411b363SPhilipp Reisner 3878b411b363SPhilipp Reisner /* ok, no more ee's on the fly, it is safe to reset the epoch_size */ 3879b411b363SPhilipp Reisner atomic_set(&mdev->current_epoch->epoch_size, 0); 3880b411b363SPhilipp Reisner D_ASSERT(list_empty(&mdev->current_epoch->list)); 3881b411b363SPhilipp Reisner } 3882b411b363SPhilipp Reisner 3883b411b363SPhilipp Reisner /* 3884b411b363SPhilipp Reisner * We support PRO_VERSION_MIN to PRO_VERSION_MAX. The protocol version 3885b411b363SPhilipp Reisner * we can agree on is stored in agreed_pro_version. 3886b411b363SPhilipp Reisner * 3887b411b363SPhilipp Reisner * feature flags and the reserved array should be enough room for future 3888b411b363SPhilipp Reisner * enhancements of the handshake protocol, and possible plugins... 3889b411b363SPhilipp Reisner * 3890b411b363SPhilipp Reisner * for now, they are expected to be zero, but ignored. 3891b411b363SPhilipp Reisner */ 3892b411b363SPhilipp Reisner static int drbd_send_handshake(struct drbd_conf *mdev) 3893b411b363SPhilipp Reisner { 3894b411b363SPhilipp Reisner /* ASSERT current == mdev->receiver ... */ 3895b411b363SPhilipp Reisner struct p_handshake *p = &mdev->data.sbuf.handshake; 3896b411b363SPhilipp Reisner int ok; 3897b411b363SPhilipp Reisner 3898b411b363SPhilipp Reisner if (mutex_lock_interruptible(&mdev->data.mutex)) { 3899b411b363SPhilipp Reisner dev_err(DEV, "interrupted during initial handshake\n"); 3900b411b363SPhilipp Reisner return 0; /* interrupted. not ok. */ 3901b411b363SPhilipp Reisner } 3902b411b363SPhilipp Reisner 3903b411b363SPhilipp Reisner if (mdev->data.socket == NULL) { 3904b411b363SPhilipp Reisner mutex_unlock(&mdev->data.mutex); 3905b411b363SPhilipp Reisner return 0; 3906b411b363SPhilipp Reisner } 3907b411b363SPhilipp Reisner 3908b411b363SPhilipp Reisner memset(p, 0, sizeof(*p)); 3909b411b363SPhilipp Reisner p->protocol_min = cpu_to_be32(PRO_VERSION_MIN); 3910b411b363SPhilipp Reisner p->protocol_max = cpu_to_be32(PRO_VERSION_MAX); 3911b411b363SPhilipp Reisner ok = _drbd_send_cmd( mdev, mdev->data.socket, P_HAND_SHAKE, 39120b70a13dSPhilipp Reisner (struct p_header80 *)p, sizeof(*p), 0 ); 3913b411b363SPhilipp Reisner mutex_unlock(&mdev->data.mutex); 3914b411b363SPhilipp Reisner return ok; 3915b411b363SPhilipp Reisner } 3916b411b363SPhilipp Reisner 3917b411b363SPhilipp Reisner /* 3918b411b363SPhilipp Reisner * return values: 3919b411b363SPhilipp Reisner * 1 yes, we have a valid connection 3920b411b363SPhilipp Reisner * 0 oops, did not work out, please try again 3921b411b363SPhilipp Reisner * -1 peer talks different language, 3922b411b363SPhilipp Reisner * no point in trying again, please go standalone. 3923b411b363SPhilipp Reisner */ 3924b411b363SPhilipp Reisner static int drbd_do_handshake(struct drbd_conf *mdev) 3925b411b363SPhilipp Reisner { 3926b411b363SPhilipp Reisner /* ASSERT current == mdev->receiver ... */ 3927b411b363SPhilipp Reisner struct p_handshake *p = &mdev->data.rbuf.handshake; 392802918be2SPhilipp Reisner const int expect = sizeof(struct p_handshake) - sizeof(struct p_header80); 392902918be2SPhilipp Reisner unsigned int length; 393002918be2SPhilipp Reisner enum drbd_packets cmd; 3931b411b363SPhilipp Reisner int rv; 3932b411b363SPhilipp Reisner 3933b411b363SPhilipp Reisner rv = drbd_send_handshake(mdev); 3934b411b363SPhilipp Reisner if (!rv) 3935b411b363SPhilipp Reisner return 0; 3936b411b363SPhilipp Reisner 393702918be2SPhilipp Reisner rv = drbd_recv_header(mdev, &cmd, &length); 3938b411b363SPhilipp Reisner if (!rv) 3939b411b363SPhilipp Reisner return 0; 3940b411b363SPhilipp Reisner 394102918be2SPhilipp Reisner if (cmd != P_HAND_SHAKE) { 3942b411b363SPhilipp Reisner dev_err(DEV, "expected HandShake packet, received: %s (0x%04x)\n", 394302918be2SPhilipp Reisner cmdname(cmd), cmd); 3944b411b363SPhilipp Reisner return -1; 3945b411b363SPhilipp Reisner } 3946b411b363SPhilipp Reisner 394702918be2SPhilipp Reisner if (length != expect) { 3948b411b363SPhilipp Reisner dev_err(DEV, "expected HandShake length: %u, received: %u\n", 394902918be2SPhilipp Reisner expect, length); 3950b411b363SPhilipp Reisner return -1; 3951b411b363SPhilipp Reisner } 3952b411b363SPhilipp Reisner 3953b411b363SPhilipp Reisner rv = drbd_recv(mdev, &p->head.payload, expect); 3954b411b363SPhilipp Reisner 3955b411b363SPhilipp Reisner if (rv != expect) { 39560ddc5549SLars Ellenberg if (!signal_pending(current)) 39570ddc5549SLars Ellenberg dev_warn(DEV, "short read receiving handshake packet: l=%u\n", rv); 3958b411b363SPhilipp Reisner return 0; 3959b411b363SPhilipp Reisner } 3960b411b363SPhilipp Reisner 3961b411b363SPhilipp Reisner p->protocol_min = be32_to_cpu(p->protocol_min); 3962b411b363SPhilipp Reisner p->protocol_max = be32_to_cpu(p->protocol_max); 3963b411b363SPhilipp Reisner if (p->protocol_max == 0) 3964b411b363SPhilipp Reisner p->protocol_max = p->protocol_min; 3965b411b363SPhilipp Reisner 3966b411b363SPhilipp Reisner if (PRO_VERSION_MAX < p->protocol_min || 3967b411b363SPhilipp Reisner PRO_VERSION_MIN > p->protocol_max) 3968b411b363SPhilipp Reisner goto incompat; 3969b411b363SPhilipp Reisner 3970b411b363SPhilipp Reisner mdev->agreed_pro_version = min_t(int, PRO_VERSION_MAX, p->protocol_max); 3971b411b363SPhilipp Reisner 3972b411b363SPhilipp Reisner dev_info(DEV, "Handshake successful: " 3973b411b363SPhilipp Reisner "Agreed network protocol version %d\n", mdev->agreed_pro_version); 3974b411b363SPhilipp Reisner 3975b411b363SPhilipp Reisner return 1; 3976b411b363SPhilipp Reisner 3977b411b363SPhilipp Reisner incompat: 3978b411b363SPhilipp Reisner dev_err(DEV, "incompatible DRBD dialects: " 3979b411b363SPhilipp Reisner "I support %d-%d, peer supports %d-%d\n", 3980b411b363SPhilipp Reisner PRO_VERSION_MIN, PRO_VERSION_MAX, 3981b411b363SPhilipp Reisner p->protocol_min, p->protocol_max); 3982b411b363SPhilipp Reisner return -1; 3983b411b363SPhilipp Reisner } 3984b411b363SPhilipp Reisner 3985b411b363SPhilipp Reisner #if !defined(CONFIG_CRYPTO_HMAC) && !defined(CONFIG_CRYPTO_HMAC_MODULE) 3986b411b363SPhilipp Reisner static int drbd_do_auth(struct drbd_conf *mdev) 3987b411b363SPhilipp Reisner { 3988b411b363SPhilipp Reisner dev_err(DEV, "This kernel was build without CONFIG_CRYPTO_HMAC.\n"); 3989b411b363SPhilipp Reisner dev_err(DEV, "You need to disable 'cram-hmac-alg' in drbd.conf.\n"); 3990b10d96cbSJohannes Thoma return -1; 3991b411b363SPhilipp Reisner } 3992b411b363SPhilipp Reisner #else 3993b411b363SPhilipp Reisner #define CHALLENGE_LEN 64 3994b10d96cbSJohannes Thoma 3995b10d96cbSJohannes Thoma /* Return value: 3996b10d96cbSJohannes Thoma 1 - auth succeeded, 3997b10d96cbSJohannes Thoma 0 - failed, try again (network error), 3998b10d96cbSJohannes Thoma -1 - auth failed, don't try again. 3999b10d96cbSJohannes Thoma */ 4000b10d96cbSJohannes Thoma 4001b411b363SPhilipp Reisner static int drbd_do_auth(struct drbd_conf *mdev) 4002b411b363SPhilipp Reisner { 4003b411b363SPhilipp Reisner char my_challenge[CHALLENGE_LEN]; /* 64 Bytes... */ 4004b411b363SPhilipp Reisner struct scatterlist sg; 4005b411b363SPhilipp Reisner char *response = NULL; 4006b411b363SPhilipp Reisner char *right_response = NULL; 4007b411b363SPhilipp Reisner char *peers_ch = NULL; 400889e58e75SPhilipp Reisner unsigned int key_len = strlen(mdev->tconn->net_conf->shared_secret); 4009b411b363SPhilipp Reisner unsigned int resp_size; 4010b411b363SPhilipp Reisner struct hash_desc desc; 401102918be2SPhilipp Reisner enum drbd_packets cmd; 401202918be2SPhilipp Reisner unsigned int length; 4013b411b363SPhilipp Reisner int rv; 4014b411b363SPhilipp Reisner 4015b411b363SPhilipp Reisner desc.tfm = mdev->cram_hmac_tfm; 4016b411b363SPhilipp Reisner desc.flags = 0; 4017b411b363SPhilipp Reisner 4018b411b363SPhilipp Reisner rv = crypto_hash_setkey(mdev->cram_hmac_tfm, 401989e58e75SPhilipp Reisner (u8 *)mdev->tconn->net_conf->shared_secret, key_len); 4020b411b363SPhilipp Reisner if (rv) { 4021b411b363SPhilipp Reisner dev_err(DEV, "crypto_hash_setkey() failed with %d\n", rv); 4022b10d96cbSJohannes Thoma rv = -1; 4023b411b363SPhilipp Reisner goto fail; 4024b411b363SPhilipp Reisner } 4025b411b363SPhilipp Reisner 4026b411b363SPhilipp Reisner get_random_bytes(my_challenge, CHALLENGE_LEN); 4027b411b363SPhilipp Reisner 4028b411b363SPhilipp Reisner rv = drbd_send_cmd2(mdev, P_AUTH_CHALLENGE, my_challenge, CHALLENGE_LEN); 4029b411b363SPhilipp Reisner if (!rv) 4030b411b363SPhilipp Reisner goto fail; 4031b411b363SPhilipp Reisner 403202918be2SPhilipp Reisner rv = drbd_recv_header(mdev, &cmd, &length); 4033b411b363SPhilipp Reisner if (!rv) 4034b411b363SPhilipp Reisner goto fail; 4035b411b363SPhilipp Reisner 403602918be2SPhilipp Reisner if (cmd != P_AUTH_CHALLENGE) { 4037b411b363SPhilipp Reisner dev_err(DEV, "expected AuthChallenge packet, received: %s (0x%04x)\n", 403802918be2SPhilipp Reisner cmdname(cmd), cmd); 4039b411b363SPhilipp Reisner rv = 0; 4040b411b363SPhilipp Reisner goto fail; 4041b411b363SPhilipp Reisner } 4042b411b363SPhilipp Reisner 404302918be2SPhilipp Reisner if (length > CHALLENGE_LEN * 2) { 4044b411b363SPhilipp Reisner dev_err(DEV, "expected AuthChallenge payload too big.\n"); 4045b10d96cbSJohannes Thoma rv = -1; 4046b411b363SPhilipp Reisner goto fail; 4047b411b363SPhilipp Reisner } 4048b411b363SPhilipp Reisner 404902918be2SPhilipp Reisner peers_ch = kmalloc(length, GFP_NOIO); 4050b411b363SPhilipp Reisner if (peers_ch == NULL) { 4051b411b363SPhilipp Reisner dev_err(DEV, "kmalloc of peers_ch failed\n"); 4052b10d96cbSJohannes Thoma rv = -1; 4053b411b363SPhilipp Reisner goto fail; 4054b411b363SPhilipp Reisner } 4055b411b363SPhilipp Reisner 405602918be2SPhilipp Reisner rv = drbd_recv(mdev, peers_ch, length); 4057b411b363SPhilipp Reisner 405802918be2SPhilipp Reisner if (rv != length) { 40590ddc5549SLars Ellenberg if (!signal_pending(current)) 40600ddc5549SLars Ellenberg dev_warn(DEV, "short read AuthChallenge: l=%u\n", rv); 4061b411b363SPhilipp Reisner rv = 0; 4062b411b363SPhilipp Reisner goto fail; 4063b411b363SPhilipp Reisner } 4064b411b363SPhilipp Reisner 4065b411b363SPhilipp Reisner resp_size = crypto_hash_digestsize(mdev->cram_hmac_tfm); 4066b411b363SPhilipp Reisner response = kmalloc(resp_size, GFP_NOIO); 4067b411b363SPhilipp Reisner if (response == NULL) { 4068b411b363SPhilipp Reisner dev_err(DEV, "kmalloc of response failed\n"); 4069b10d96cbSJohannes Thoma rv = -1; 4070b411b363SPhilipp Reisner goto fail; 4071b411b363SPhilipp Reisner } 4072b411b363SPhilipp Reisner 4073b411b363SPhilipp Reisner sg_init_table(&sg, 1); 407402918be2SPhilipp Reisner sg_set_buf(&sg, peers_ch, length); 4075b411b363SPhilipp Reisner 4076b411b363SPhilipp Reisner rv = crypto_hash_digest(&desc, &sg, sg.length, response); 4077b411b363SPhilipp Reisner if (rv) { 4078b411b363SPhilipp Reisner dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv); 4079b10d96cbSJohannes Thoma rv = -1; 4080b411b363SPhilipp Reisner goto fail; 4081b411b363SPhilipp Reisner } 4082b411b363SPhilipp Reisner 4083b411b363SPhilipp Reisner rv = drbd_send_cmd2(mdev, P_AUTH_RESPONSE, response, resp_size); 4084b411b363SPhilipp Reisner if (!rv) 4085b411b363SPhilipp Reisner goto fail; 4086b411b363SPhilipp Reisner 408702918be2SPhilipp Reisner rv = drbd_recv_header(mdev, &cmd, &length); 4088b411b363SPhilipp Reisner if (!rv) 4089b411b363SPhilipp Reisner goto fail; 4090b411b363SPhilipp Reisner 409102918be2SPhilipp Reisner if (cmd != P_AUTH_RESPONSE) { 4092b411b363SPhilipp Reisner dev_err(DEV, "expected AuthResponse packet, received: %s (0x%04x)\n", 409302918be2SPhilipp Reisner cmdname(cmd), cmd); 4094b411b363SPhilipp Reisner rv = 0; 4095b411b363SPhilipp Reisner goto fail; 4096b411b363SPhilipp Reisner } 4097b411b363SPhilipp Reisner 409802918be2SPhilipp Reisner if (length != resp_size) { 4099b411b363SPhilipp Reisner dev_err(DEV, "expected AuthResponse payload of wrong size\n"); 4100b411b363SPhilipp Reisner rv = 0; 4101b411b363SPhilipp Reisner goto fail; 4102b411b363SPhilipp Reisner } 4103b411b363SPhilipp Reisner 4104b411b363SPhilipp Reisner rv = drbd_recv(mdev, response , resp_size); 4105b411b363SPhilipp Reisner 4106b411b363SPhilipp Reisner if (rv != resp_size) { 41070ddc5549SLars Ellenberg if (!signal_pending(current)) 41080ddc5549SLars Ellenberg dev_warn(DEV, "short read receiving AuthResponse: l=%u\n", rv); 4109b411b363SPhilipp Reisner rv = 0; 4110b411b363SPhilipp Reisner goto fail; 4111b411b363SPhilipp Reisner } 4112b411b363SPhilipp Reisner 4113b411b363SPhilipp Reisner right_response = kmalloc(resp_size, GFP_NOIO); 41142d1ee87dSJulia Lawall if (right_response == NULL) { 4115b411b363SPhilipp Reisner dev_err(DEV, "kmalloc of right_response failed\n"); 4116b10d96cbSJohannes Thoma rv = -1; 4117b411b363SPhilipp Reisner goto fail; 4118b411b363SPhilipp Reisner } 4119b411b363SPhilipp Reisner 4120b411b363SPhilipp Reisner sg_set_buf(&sg, my_challenge, CHALLENGE_LEN); 4121b411b363SPhilipp Reisner 4122b411b363SPhilipp Reisner rv = crypto_hash_digest(&desc, &sg, sg.length, right_response); 4123b411b363SPhilipp Reisner if (rv) { 4124b411b363SPhilipp Reisner dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv); 4125b10d96cbSJohannes Thoma rv = -1; 4126b411b363SPhilipp Reisner goto fail; 4127b411b363SPhilipp Reisner } 4128b411b363SPhilipp Reisner 4129b411b363SPhilipp Reisner rv = !memcmp(response, right_response, resp_size); 4130b411b363SPhilipp Reisner 4131b411b363SPhilipp Reisner if (rv) 4132b411b363SPhilipp Reisner dev_info(DEV, "Peer authenticated using %d bytes of '%s' HMAC\n", 413389e58e75SPhilipp Reisner resp_size, mdev->tconn->net_conf->cram_hmac_alg); 4134b10d96cbSJohannes Thoma else 4135b10d96cbSJohannes Thoma rv = -1; 4136b411b363SPhilipp Reisner 4137b411b363SPhilipp Reisner fail: 4138b411b363SPhilipp Reisner kfree(peers_ch); 4139b411b363SPhilipp Reisner kfree(response); 4140b411b363SPhilipp Reisner kfree(right_response); 4141b411b363SPhilipp Reisner 4142b411b363SPhilipp Reisner return rv; 4143b411b363SPhilipp Reisner } 4144b411b363SPhilipp Reisner #endif 4145b411b363SPhilipp Reisner 4146b411b363SPhilipp Reisner int drbdd_init(struct drbd_thread *thi) 4147b411b363SPhilipp Reisner { 4148b411b363SPhilipp Reisner struct drbd_conf *mdev = thi->mdev; 4149b411b363SPhilipp Reisner unsigned int minor = mdev_to_minor(mdev); 4150b411b363SPhilipp Reisner int h; 4151b411b363SPhilipp Reisner 4152b411b363SPhilipp Reisner sprintf(current->comm, "drbd%d_receiver", minor); 4153b411b363SPhilipp Reisner 4154b411b363SPhilipp Reisner dev_info(DEV, "receiver (re)started\n"); 4155b411b363SPhilipp Reisner 4156b411b363SPhilipp Reisner do { 4157b411b363SPhilipp Reisner h = drbd_connect(mdev); 4158b411b363SPhilipp Reisner if (h == 0) { 4159b411b363SPhilipp Reisner drbd_disconnect(mdev); 416020ee6390SPhilipp Reisner schedule_timeout_interruptible(HZ); 4161b411b363SPhilipp Reisner } 4162b411b363SPhilipp Reisner if (h == -1) { 4163b411b363SPhilipp Reisner dev_warn(DEV, "Discarding network configuration.\n"); 4164b411b363SPhilipp Reisner drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); 4165b411b363SPhilipp Reisner } 4166b411b363SPhilipp Reisner } while (h == 0); 4167b411b363SPhilipp Reisner 4168b411b363SPhilipp Reisner if (h > 0) { 4169b2fb6dbeSPhilipp Reisner if (get_net_conf(mdev->tconn)) { 4170b411b363SPhilipp Reisner drbdd(mdev); 4171b2fb6dbeSPhilipp Reisner put_net_conf(mdev->tconn); 4172b411b363SPhilipp Reisner } 4173b411b363SPhilipp Reisner } 4174b411b363SPhilipp Reisner 4175b411b363SPhilipp Reisner drbd_disconnect(mdev); 4176b411b363SPhilipp Reisner 4177b411b363SPhilipp Reisner dev_info(DEV, "receiver terminated\n"); 4178b411b363SPhilipp Reisner return 0; 4179b411b363SPhilipp Reisner } 4180b411b363SPhilipp Reisner 4181b411b363SPhilipp Reisner /* ********* acknowledge sender ******** */ 4182b411b363SPhilipp Reisner 41830b70a13dSPhilipp Reisner static int got_RqSReply(struct drbd_conf *mdev, struct p_header80 *h) 4184b411b363SPhilipp Reisner { 4185b411b363SPhilipp Reisner struct p_req_state_reply *p = (struct p_req_state_reply *)h; 4186b411b363SPhilipp Reisner 4187b411b363SPhilipp Reisner int retcode = be32_to_cpu(p->retcode); 4188b411b363SPhilipp Reisner 4189b411b363SPhilipp Reisner if (retcode >= SS_SUCCESS) { 4190b411b363SPhilipp Reisner set_bit(CL_ST_CHG_SUCCESS, &mdev->flags); 4191b411b363SPhilipp Reisner } else { 4192b411b363SPhilipp Reisner set_bit(CL_ST_CHG_FAIL, &mdev->flags); 4193b411b363SPhilipp Reisner dev_err(DEV, "Requested state change failed by peer: %s (%d)\n", 4194b411b363SPhilipp Reisner drbd_set_st_err_str(retcode), retcode); 4195b411b363SPhilipp Reisner } 4196b411b363SPhilipp Reisner wake_up(&mdev->state_wait); 4197b411b363SPhilipp Reisner 419881e84650SAndreas Gruenbacher return true; 4199b411b363SPhilipp Reisner } 4200b411b363SPhilipp Reisner 42010b70a13dSPhilipp Reisner static int got_Ping(struct drbd_conf *mdev, struct p_header80 *h) 4202b411b363SPhilipp Reisner { 4203b411b363SPhilipp Reisner return drbd_send_ping_ack(mdev); 4204b411b363SPhilipp Reisner 4205b411b363SPhilipp Reisner } 4206b411b363SPhilipp Reisner 42070b70a13dSPhilipp Reisner static int got_PingAck(struct drbd_conf *mdev, struct p_header80 *h) 4208b411b363SPhilipp Reisner { 4209b411b363SPhilipp Reisner /* restore idle timeout */ 421089e58e75SPhilipp Reisner mdev->meta.socket->sk->sk_rcvtimeo = mdev->tconn->net_conf->ping_int*HZ; 4211309d1608SPhilipp Reisner if (!test_and_set_bit(GOT_PING_ACK, &mdev->flags)) 4212309d1608SPhilipp Reisner wake_up(&mdev->misc_wait); 4213b411b363SPhilipp Reisner 421481e84650SAndreas Gruenbacher return true; 4215b411b363SPhilipp Reisner } 4216b411b363SPhilipp Reisner 42170b70a13dSPhilipp Reisner static int got_IsInSync(struct drbd_conf *mdev, struct p_header80 *h) 4218b411b363SPhilipp Reisner { 4219b411b363SPhilipp Reisner struct p_block_ack *p = (struct p_block_ack *)h; 4220b411b363SPhilipp Reisner sector_t sector = be64_to_cpu(p->sector); 4221b411b363SPhilipp Reisner int blksize = be32_to_cpu(p->blksize); 4222b411b363SPhilipp Reisner 4223b411b363SPhilipp Reisner D_ASSERT(mdev->agreed_pro_version >= 89); 4224b411b363SPhilipp Reisner 4225b411b363SPhilipp Reisner update_peer_seq(mdev, be32_to_cpu(p->seq_num)); 4226b411b363SPhilipp Reisner 42271d53f09eSLars Ellenberg if (get_ldev(mdev)) { 4228b411b363SPhilipp Reisner drbd_rs_complete_io(mdev, sector); 4229b411b363SPhilipp Reisner drbd_set_in_sync(mdev, sector, blksize); 4230b411b363SPhilipp Reisner /* rs_same_csums is supposed to count in units of BM_BLOCK_SIZE */ 4231b411b363SPhilipp Reisner mdev->rs_same_csum += (blksize >> BM_BLOCK_SHIFT); 42321d53f09eSLars Ellenberg put_ldev(mdev); 42331d53f09eSLars Ellenberg } 4234b411b363SPhilipp Reisner dec_rs_pending(mdev); 4235778f271dSPhilipp Reisner atomic_add(blksize >> 9, &mdev->rs_sect_in); 4236b411b363SPhilipp Reisner 423781e84650SAndreas Gruenbacher return true; 4238b411b363SPhilipp Reisner } 4239b411b363SPhilipp Reisner 4240bc9c5c41SAndreas Gruenbacher static int 4241bc9c5c41SAndreas Gruenbacher validate_req_change_req_state(struct drbd_conf *mdev, u64 id, sector_t sector, 4242bc9c5c41SAndreas Gruenbacher struct rb_root *root, const char *func, 4243bc9c5c41SAndreas Gruenbacher enum drbd_req_event what, bool missing_ok) 4244b411b363SPhilipp Reisner { 4245b411b363SPhilipp Reisner struct drbd_request *req; 4246b411b363SPhilipp Reisner struct bio_and_error m; 4247b411b363SPhilipp Reisner 4248b411b363SPhilipp Reisner spin_lock_irq(&mdev->req_lock); 4249bc9c5c41SAndreas Gruenbacher req = find_request(mdev, root, id, sector, missing_ok, func); 4250b411b363SPhilipp Reisner if (unlikely(!req)) { 4251b411b363SPhilipp Reisner spin_unlock_irq(&mdev->req_lock); 425281e84650SAndreas Gruenbacher return false; 4253b411b363SPhilipp Reisner } 4254b411b363SPhilipp Reisner __req_mod(req, what, &m); 4255b411b363SPhilipp Reisner spin_unlock_irq(&mdev->req_lock); 4256b411b363SPhilipp Reisner 4257b411b363SPhilipp Reisner if (m.bio) 4258b411b363SPhilipp Reisner complete_master_bio(mdev, &m); 425981e84650SAndreas Gruenbacher return true; 4260b411b363SPhilipp Reisner } 4261b411b363SPhilipp Reisner 42620b70a13dSPhilipp Reisner static int got_BlockAck(struct drbd_conf *mdev, struct p_header80 *h) 4263b411b363SPhilipp Reisner { 4264b411b363SPhilipp Reisner struct p_block_ack *p = (struct p_block_ack *)h; 4265b411b363SPhilipp Reisner sector_t sector = be64_to_cpu(p->sector); 4266b411b363SPhilipp Reisner int blksize = be32_to_cpu(p->blksize); 4267b411b363SPhilipp Reisner enum drbd_req_event what; 4268b411b363SPhilipp Reisner 4269b411b363SPhilipp Reisner update_peer_seq(mdev, be32_to_cpu(p->seq_num)); 4270b411b363SPhilipp Reisner 4271579b57edSAndreas Gruenbacher if (p->block_id == ID_SYNCER) { 4272b411b363SPhilipp Reisner drbd_set_in_sync(mdev, sector, blksize); 4273b411b363SPhilipp Reisner dec_rs_pending(mdev); 427481e84650SAndreas Gruenbacher return true; 4275b411b363SPhilipp Reisner } 4276b411b363SPhilipp Reisner switch (be16_to_cpu(h->command)) { 4277b411b363SPhilipp Reisner case P_RS_WRITE_ACK: 427889e58e75SPhilipp Reisner D_ASSERT(mdev->tconn->net_conf->wire_protocol == DRBD_PROT_C); 42798554df1cSAndreas Gruenbacher what = WRITE_ACKED_BY_PEER_AND_SIS; 4280b411b363SPhilipp Reisner break; 4281b411b363SPhilipp Reisner case P_WRITE_ACK: 428289e58e75SPhilipp Reisner D_ASSERT(mdev->tconn->net_conf->wire_protocol == DRBD_PROT_C); 42838554df1cSAndreas Gruenbacher what = WRITE_ACKED_BY_PEER; 4284b411b363SPhilipp Reisner break; 4285b411b363SPhilipp Reisner case P_RECV_ACK: 428689e58e75SPhilipp Reisner D_ASSERT(mdev->tconn->net_conf->wire_protocol == DRBD_PROT_B); 42878554df1cSAndreas Gruenbacher what = RECV_ACKED_BY_PEER; 4288b411b363SPhilipp Reisner break; 4289b411b363SPhilipp Reisner case P_DISCARD_ACK: 429089e58e75SPhilipp Reisner D_ASSERT(mdev->tconn->net_conf->wire_protocol == DRBD_PROT_C); 42918554df1cSAndreas Gruenbacher what = CONFLICT_DISCARDED_BY_PEER; 4292b411b363SPhilipp Reisner break; 4293b411b363SPhilipp Reisner default: 4294b411b363SPhilipp Reisner D_ASSERT(0); 429581e84650SAndreas Gruenbacher return false; 4296b411b363SPhilipp Reisner } 4297b411b363SPhilipp Reisner 4298b411b363SPhilipp Reisner return validate_req_change_req_state(mdev, p->block_id, sector, 4299bc9c5c41SAndreas Gruenbacher &mdev->write_requests, __func__, 4300bc9c5c41SAndreas Gruenbacher what, false); 4301b411b363SPhilipp Reisner } 4302b411b363SPhilipp Reisner 43030b70a13dSPhilipp Reisner static int got_NegAck(struct drbd_conf *mdev, struct p_header80 *h) 4304b411b363SPhilipp Reisner { 4305b411b363SPhilipp Reisner struct p_block_ack *p = (struct p_block_ack *)h; 4306b411b363SPhilipp Reisner sector_t sector = be64_to_cpu(p->sector); 43072deb8336SPhilipp Reisner int size = be32_to_cpu(p->blksize); 430889e58e75SPhilipp Reisner bool missing_ok = mdev->tconn->net_conf->wire_protocol == DRBD_PROT_A || 430989e58e75SPhilipp Reisner mdev->tconn->net_conf->wire_protocol == DRBD_PROT_B; 4310c3afd8f5SAndreas Gruenbacher bool found; 4311b411b363SPhilipp Reisner 4312b411b363SPhilipp Reisner update_peer_seq(mdev, be32_to_cpu(p->seq_num)); 4313b411b363SPhilipp Reisner 4314579b57edSAndreas Gruenbacher if (p->block_id == ID_SYNCER) { 4315b411b363SPhilipp Reisner dec_rs_pending(mdev); 4316b411b363SPhilipp Reisner drbd_rs_failed_io(mdev, sector, size); 431781e84650SAndreas Gruenbacher return true; 4318b411b363SPhilipp Reisner } 43192deb8336SPhilipp Reisner 4320c3afd8f5SAndreas Gruenbacher found = validate_req_change_req_state(mdev, p->block_id, sector, 4321bc9c5c41SAndreas Gruenbacher &mdev->write_requests, __func__, 43228554df1cSAndreas Gruenbacher NEG_ACKED, missing_ok); 4323c3afd8f5SAndreas Gruenbacher if (!found) { 43242deb8336SPhilipp Reisner /* Protocol A has no P_WRITE_ACKs, but has P_NEG_ACKs. 43252deb8336SPhilipp Reisner The master bio might already be completed, therefore the 4326c3afd8f5SAndreas Gruenbacher request is no longer in the collision hash. */ 43272deb8336SPhilipp Reisner /* In Protocol B we might already have got a P_RECV_ACK 43282deb8336SPhilipp Reisner but then get a P_NEG_ACK afterwards. */ 4329c3afd8f5SAndreas Gruenbacher if (!missing_ok) 43302deb8336SPhilipp Reisner return false; 4331c3afd8f5SAndreas Gruenbacher drbd_set_out_of_sync(mdev, sector, size); 43322deb8336SPhilipp Reisner } 43332deb8336SPhilipp Reisner return true; 4334b411b363SPhilipp Reisner } 4335b411b363SPhilipp Reisner 43360b70a13dSPhilipp Reisner static int got_NegDReply(struct drbd_conf *mdev, struct p_header80 *h) 4337b411b363SPhilipp Reisner { 4338b411b363SPhilipp Reisner struct p_block_ack *p = (struct p_block_ack *)h; 4339b411b363SPhilipp Reisner sector_t sector = be64_to_cpu(p->sector); 4340b411b363SPhilipp Reisner 4341b411b363SPhilipp Reisner update_peer_seq(mdev, be32_to_cpu(p->seq_num)); 4342b411b363SPhilipp Reisner dev_err(DEV, "Got NegDReply; Sector %llus, len %u; Fail original request.\n", 4343b411b363SPhilipp Reisner (unsigned long long)sector, be32_to_cpu(p->blksize)); 4344b411b363SPhilipp Reisner 4345b411b363SPhilipp Reisner return validate_req_change_req_state(mdev, p->block_id, sector, 4346bc9c5c41SAndreas Gruenbacher &mdev->read_requests, __func__, 43478554df1cSAndreas Gruenbacher NEG_ACKED, false); 4348b411b363SPhilipp Reisner } 4349b411b363SPhilipp Reisner 43500b70a13dSPhilipp Reisner static int got_NegRSDReply(struct drbd_conf *mdev, struct p_header80 *h) 4351b411b363SPhilipp Reisner { 4352b411b363SPhilipp Reisner sector_t sector; 4353b411b363SPhilipp Reisner int size; 4354b411b363SPhilipp Reisner struct p_block_ack *p = (struct p_block_ack *)h; 4355b411b363SPhilipp Reisner 4356b411b363SPhilipp Reisner sector = be64_to_cpu(p->sector); 4357b411b363SPhilipp Reisner size = be32_to_cpu(p->blksize); 4358b411b363SPhilipp Reisner 4359b411b363SPhilipp Reisner update_peer_seq(mdev, be32_to_cpu(p->seq_num)); 4360b411b363SPhilipp Reisner 4361b411b363SPhilipp Reisner dec_rs_pending(mdev); 4362b411b363SPhilipp Reisner 4363b411b363SPhilipp Reisner if (get_ldev_if_state(mdev, D_FAILED)) { 4364b411b363SPhilipp Reisner drbd_rs_complete_io(mdev, sector); 4365d612d309SPhilipp Reisner switch (be16_to_cpu(h->command)) { 4366d612d309SPhilipp Reisner case P_NEG_RS_DREPLY: 4367b411b363SPhilipp Reisner drbd_rs_failed_io(mdev, sector, size); 4368d612d309SPhilipp Reisner case P_RS_CANCEL: 4369d612d309SPhilipp Reisner break; 4370d612d309SPhilipp Reisner default: 4371d612d309SPhilipp Reisner D_ASSERT(0); 4372d612d309SPhilipp Reisner put_ldev(mdev); 4373d612d309SPhilipp Reisner return false; 4374d612d309SPhilipp Reisner } 4375b411b363SPhilipp Reisner put_ldev(mdev); 4376b411b363SPhilipp Reisner } 4377b411b363SPhilipp Reisner 437881e84650SAndreas Gruenbacher return true; 4379b411b363SPhilipp Reisner } 4380b411b363SPhilipp Reisner 43810b70a13dSPhilipp Reisner static int got_BarrierAck(struct drbd_conf *mdev, struct p_header80 *h) 4382b411b363SPhilipp Reisner { 4383b411b363SPhilipp Reisner struct p_barrier_ack *p = (struct p_barrier_ack *)h; 4384b411b363SPhilipp Reisner 4385b411b363SPhilipp Reisner tl_release(mdev, p->barrier, be32_to_cpu(p->set_size)); 4386b411b363SPhilipp Reisner 4387c4752ef1SPhilipp Reisner if (mdev->state.conn == C_AHEAD && 4388c4752ef1SPhilipp Reisner atomic_read(&mdev->ap_in_flight) == 0 && 4389370a43e7SPhilipp Reisner !test_and_set_bit(AHEAD_TO_SYNC_SOURCE, &mdev->current_epoch->flags)) { 4390370a43e7SPhilipp Reisner mdev->start_resync_timer.expires = jiffies + HZ; 4391370a43e7SPhilipp Reisner add_timer(&mdev->start_resync_timer); 4392c4752ef1SPhilipp Reisner } 4393c4752ef1SPhilipp Reisner 439481e84650SAndreas Gruenbacher return true; 4395b411b363SPhilipp Reisner } 4396b411b363SPhilipp Reisner 43970b70a13dSPhilipp Reisner static int got_OVResult(struct drbd_conf *mdev, struct p_header80 *h) 4398b411b363SPhilipp Reisner { 4399b411b363SPhilipp Reisner struct p_block_ack *p = (struct p_block_ack *)h; 4400b411b363SPhilipp Reisner struct drbd_work *w; 4401b411b363SPhilipp Reisner sector_t sector; 4402b411b363SPhilipp Reisner int size; 4403b411b363SPhilipp Reisner 4404b411b363SPhilipp Reisner sector = be64_to_cpu(p->sector); 4405b411b363SPhilipp Reisner size = be32_to_cpu(p->blksize); 4406b411b363SPhilipp Reisner 4407b411b363SPhilipp Reisner update_peer_seq(mdev, be32_to_cpu(p->seq_num)); 4408b411b363SPhilipp Reisner 4409b411b363SPhilipp Reisner if (be64_to_cpu(p->block_id) == ID_OUT_OF_SYNC) 4410b411b363SPhilipp Reisner drbd_ov_oos_found(mdev, sector, size); 4411b411b363SPhilipp Reisner else 4412b411b363SPhilipp Reisner ov_oos_print(mdev); 4413b411b363SPhilipp Reisner 44141d53f09eSLars Ellenberg if (!get_ldev(mdev)) 441581e84650SAndreas Gruenbacher return true; 44161d53f09eSLars Ellenberg 4417b411b363SPhilipp Reisner drbd_rs_complete_io(mdev, sector); 4418b411b363SPhilipp Reisner dec_rs_pending(mdev); 4419b411b363SPhilipp Reisner 4420ea5442afSLars Ellenberg --mdev->ov_left; 4421ea5442afSLars Ellenberg 4422ea5442afSLars Ellenberg /* let's advance progress step marks only for every other megabyte */ 4423ea5442afSLars Ellenberg if ((mdev->ov_left & 0x200) == 0x200) 4424ea5442afSLars Ellenberg drbd_advance_rs_marks(mdev, mdev->ov_left); 4425ea5442afSLars Ellenberg 4426ea5442afSLars Ellenberg if (mdev->ov_left == 0) { 4427b411b363SPhilipp Reisner w = kmalloc(sizeof(*w), GFP_NOIO); 4428b411b363SPhilipp Reisner if (w) { 4429b411b363SPhilipp Reisner w->cb = w_ov_finished; 4430b411b363SPhilipp Reisner drbd_queue_work_front(&mdev->data.work, w); 4431b411b363SPhilipp Reisner } else { 4432b411b363SPhilipp Reisner dev_err(DEV, "kmalloc(w) failed."); 4433b411b363SPhilipp Reisner ov_oos_print(mdev); 4434b411b363SPhilipp Reisner drbd_resync_finished(mdev); 4435b411b363SPhilipp Reisner } 4436b411b363SPhilipp Reisner } 44371d53f09eSLars Ellenberg put_ldev(mdev); 443881e84650SAndreas Gruenbacher return true; 4439b411b363SPhilipp Reisner } 4440b411b363SPhilipp Reisner 444102918be2SPhilipp Reisner static int got_skip(struct drbd_conf *mdev, struct p_header80 *h) 44420ced55a3SPhilipp Reisner { 444381e84650SAndreas Gruenbacher return true; 44440ced55a3SPhilipp Reisner } 44450ced55a3SPhilipp Reisner 4446b411b363SPhilipp Reisner struct asender_cmd { 4447b411b363SPhilipp Reisner size_t pkt_size; 44480b70a13dSPhilipp Reisner int (*process)(struct drbd_conf *mdev, struct p_header80 *h); 4449b411b363SPhilipp Reisner }; 4450b411b363SPhilipp Reisner 4451b411b363SPhilipp Reisner static struct asender_cmd *get_asender_cmd(int cmd) 4452b411b363SPhilipp Reisner { 4453b411b363SPhilipp Reisner static struct asender_cmd asender_tbl[] = { 4454b411b363SPhilipp Reisner /* anything missing from this table is in 4455b411b363SPhilipp Reisner * the drbd_cmd_handler (drbd_default_handler) table, 4456b411b363SPhilipp Reisner * see the beginning of drbdd() */ 44570b70a13dSPhilipp Reisner [P_PING] = { sizeof(struct p_header80), got_Ping }, 44580b70a13dSPhilipp Reisner [P_PING_ACK] = { sizeof(struct p_header80), got_PingAck }, 4459b411b363SPhilipp Reisner [P_RECV_ACK] = { sizeof(struct p_block_ack), got_BlockAck }, 4460b411b363SPhilipp Reisner [P_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck }, 4461b411b363SPhilipp Reisner [P_RS_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck }, 4462b411b363SPhilipp Reisner [P_DISCARD_ACK] = { sizeof(struct p_block_ack), got_BlockAck }, 4463b411b363SPhilipp Reisner [P_NEG_ACK] = { sizeof(struct p_block_ack), got_NegAck }, 4464b411b363SPhilipp Reisner [P_NEG_DREPLY] = { sizeof(struct p_block_ack), got_NegDReply }, 4465b411b363SPhilipp Reisner [P_NEG_RS_DREPLY] = { sizeof(struct p_block_ack), got_NegRSDReply}, 4466b411b363SPhilipp Reisner [P_OV_RESULT] = { sizeof(struct p_block_ack), got_OVResult }, 4467b411b363SPhilipp Reisner [P_BARRIER_ACK] = { sizeof(struct p_barrier_ack), got_BarrierAck }, 4468b411b363SPhilipp Reisner [P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), got_RqSReply }, 4469b411b363SPhilipp Reisner [P_RS_IS_IN_SYNC] = { sizeof(struct p_block_ack), got_IsInSync }, 447002918be2SPhilipp Reisner [P_DELAY_PROBE] = { sizeof(struct p_delay_probe93), got_skip }, 4471d612d309SPhilipp Reisner [P_RS_CANCEL] = { sizeof(struct p_block_ack), got_NegRSDReply}, 4472b411b363SPhilipp Reisner [P_MAX_CMD] = { 0, NULL }, 4473b411b363SPhilipp Reisner }; 4474b411b363SPhilipp Reisner if (cmd > P_MAX_CMD || asender_tbl[cmd].process == NULL) 4475b411b363SPhilipp Reisner return NULL; 4476b411b363SPhilipp Reisner return &asender_tbl[cmd]; 4477b411b363SPhilipp Reisner } 4478b411b363SPhilipp Reisner 4479b411b363SPhilipp Reisner int drbd_asender(struct drbd_thread *thi) 4480b411b363SPhilipp Reisner { 4481b411b363SPhilipp Reisner struct drbd_conf *mdev = thi->mdev; 448202918be2SPhilipp Reisner struct p_header80 *h = &mdev->meta.rbuf.header.h80; 4483b411b363SPhilipp Reisner struct asender_cmd *cmd = NULL; 4484b411b363SPhilipp Reisner 4485b411b363SPhilipp Reisner int rv, len; 4486b411b363SPhilipp Reisner void *buf = h; 4487b411b363SPhilipp Reisner int received = 0; 44880b70a13dSPhilipp Reisner int expect = sizeof(struct p_header80); 4489b411b363SPhilipp Reisner int empty; 4490f36af18cSLars Ellenberg int ping_timeout_active = 0; 4491b411b363SPhilipp Reisner 4492b411b363SPhilipp Reisner sprintf(current->comm, "drbd%d_asender", mdev_to_minor(mdev)); 4493b411b363SPhilipp Reisner 4494b411b363SPhilipp Reisner current->policy = SCHED_RR; /* Make this a realtime task! */ 4495b411b363SPhilipp Reisner current->rt_priority = 2; /* more important than all other tasks */ 4496b411b363SPhilipp Reisner 4497e77a0a5cSAndreas Gruenbacher while (get_t_state(thi) == RUNNING) { 4498b411b363SPhilipp Reisner drbd_thread_current_set_cpu(mdev); 4499b411b363SPhilipp Reisner if (test_and_clear_bit(SEND_PING, &mdev->flags)) { 4500841ce241SAndreas Gruenbacher if (!drbd_send_ping(mdev)) { 4501841ce241SAndreas Gruenbacher dev_err(DEV, "drbd_send_ping has failed\n"); 4502841ce241SAndreas Gruenbacher goto reconnect; 4503841ce241SAndreas Gruenbacher } 4504b411b363SPhilipp Reisner mdev->meta.socket->sk->sk_rcvtimeo = 450589e58e75SPhilipp Reisner mdev->tconn->net_conf->ping_timeo*HZ/10; 4506f36af18cSLars Ellenberg ping_timeout_active = 1; 4507b411b363SPhilipp Reisner } 4508b411b363SPhilipp Reisner 4509b411b363SPhilipp Reisner /* conditionally cork; 4510b411b363SPhilipp Reisner * it may hurt latency if we cork without much to send */ 451189e58e75SPhilipp Reisner if (!mdev->tconn->net_conf->no_cork && 4512b411b363SPhilipp Reisner 3 < atomic_read(&mdev->unacked_cnt)) 4513b411b363SPhilipp Reisner drbd_tcp_cork(mdev->meta.socket); 4514b411b363SPhilipp Reisner while (1) { 4515b411b363SPhilipp Reisner clear_bit(SIGNAL_ASENDER, &mdev->flags); 4516b411b363SPhilipp Reisner flush_signals(current); 45170f8488e1SLars Ellenberg if (!drbd_process_done_ee(mdev)) 4518b411b363SPhilipp Reisner goto reconnect; 4519b411b363SPhilipp Reisner /* to avoid race with newly queued ACKs */ 4520b411b363SPhilipp Reisner set_bit(SIGNAL_ASENDER, &mdev->flags); 4521b411b363SPhilipp Reisner spin_lock_irq(&mdev->req_lock); 4522b411b363SPhilipp Reisner empty = list_empty(&mdev->done_ee); 4523b411b363SPhilipp Reisner spin_unlock_irq(&mdev->req_lock); 4524b411b363SPhilipp Reisner /* new ack may have been queued right here, 4525b411b363SPhilipp Reisner * but then there is also a signal pending, 4526b411b363SPhilipp Reisner * and we start over... */ 4527b411b363SPhilipp Reisner if (empty) 4528b411b363SPhilipp Reisner break; 4529b411b363SPhilipp Reisner } 4530b411b363SPhilipp Reisner /* but unconditionally uncork unless disabled */ 453189e58e75SPhilipp Reisner if (!mdev->tconn->net_conf->no_cork) 4532b411b363SPhilipp Reisner drbd_tcp_uncork(mdev->meta.socket); 4533b411b363SPhilipp Reisner 4534b411b363SPhilipp Reisner /* short circuit, recv_msg would return EINTR anyways. */ 4535b411b363SPhilipp Reisner if (signal_pending(current)) 4536b411b363SPhilipp Reisner continue; 4537b411b363SPhilipp Reisner 4538b411b363SPhilipp Reisner rv = drbd_recv_short(mdev, mdev->meta.socket, 4539b411b363SPhilipp Reisner buf, expect-received, 0); 4540b411b363SPhilipp Reisner clear_bit(SIGNAL_ASENDER, &mdev->flags); 4541b411b363SPhilipp Reisner 4542b411b363SPhilipp Reisner flush_signals(current); 4543b411b363SPhilipp Reisner 4544b411b363SPhilipp Reisner /* Note: 4545b411b363SPhilipp Reisner * -EINTR (on meta) we got a signal 4546b411b363SPhilipp Reisner * -EAGAIN (on meta) rcvtimeo expired 4547b411b363SPhilipp Reisner * -ECONNRESET other side closed the connection 4548b411b363SPhilipp Reisner * -ERESTARTSYS (on data) we got a signal 4549b411b363SPhilipp Reisner * rv < 0 other than above: unexpected error! 4550b411b363SPhilipp Reisner * rv == expected: full header or command 4551b411b363SPhilipp Reisner * rv < expected: "woken" by signal during receive 4552b411b363SPhilipp Reisner * rv == 0 : "connection shut down by peer" 4553b411b363SPhilipp Reisner */ 4554b411b363SPhilipp Reisner if (likely(rv > 0)) { 4555b411b363SPhilipp Reisner received += rv; 4556b411b363SPhilipp Reisner buf += rv; 4557b411b363SPhilipp Reisner } else if (rv == 0) { 4558b411b363SPhilipp Reisner dev_err(DEV, "meta connection shut down by peer.\n"); 4559b411b363SPhilipp Reisner goto reconnect; 4560b411b363SPhilipp Reisner } else if (rv == -EAGAIN) { 4561cb6518cbSLars Ellenberg /* If the data socket received something meanwhile, 4562cb6518cbSLars Ellenberg * that is good enough: peer is still alive. */ 4563cb6518cbSLars Ellenberg if (time_after(mdev->last_received, 4564cb6518cbSLars Ellenberg jiffies - mdev->meta.socket->sk->sk_rcvtimeo)) 4565cb6518cbSLars Ellenberg continue; 4566f36af18cSLars Ellenberg if (ping_timeout_active) { 4567b411b363SPhilipp Reisner dev_err(DEV, "PingAck did not arrive in time.\n"); 4568b411b363SPhilipp Reisner goto reconnect; 4569b411b363SPhilipp Reisner } 4570b411b363SPhilipp Reisner set_bit(SEND_PING, &mdev->flags); 4571b411b363SPhilipp Reisner continue; 4572b411b363SPhilipp Reisner } else if (rv == -EINTR) { 4573b411b363SPhilipp Reisner continue; 4574b411b363SPhilipp Reisner } else { 4575b411b363SPhilipp Reisner dev_err(DEV, "sock_recvmsg returned %d\n", rv); 4576b411b363SPhilipp Reisner goto reconnect; 4577b411b363SPhilipp Reisner } 4578b411b363SPhilipp Reisner 4579b411b363SPhilipp Reisner if (received == expect && cmd == NULL) { 4580ca9bc12bSAndreas Gruenbacher if (unlikely(h->magic != cpu_to_be32(DRBD_MAGIC))) { 4581004352faSLars Ellenberg dev_err(DEV, "magic?? on meta m: 0x%08x c: %d l: %d\n", 4582004352faSLars Ellenberg be32_to_cpu(h->magic), 4583004352faSLars Ellenberg be16_to_cpu(h->command), 4584004352faSLars Ellenberg be16_to_cpu(h->length)); 4585b411b363SPhilipp Reisner goto reconnect; 4586b411b363SPhilipp Reisner } 4587b411b363SPhilipp Reisner cmd = get_asender_cmd(be16_to_cpu(h->command)); 4588b411b363SPhilipp Reisner len = be16_to_cpu(h->length); 4589b411b363SPhilipp Reisner if (unlikely(cmd == NULL)) { 4590004352faSLars Ellenberg dev_err(DEV, "unknown command?? on meta m: 0x%08x c: %d l: %d\n", 4591004352faSLars Ellenberg be32_to_cpu(h->magic), 4592004352faSLars Ellenberg be16_to_cpu(h->command), 4593004352faSLars Ellenberg be16_to_cpu(h->length)); 4594b411b363SPhilipp Reisner goto disconnect; 4595b411b363SPhilipp Reisner } 4596b411b363SPhilipp Reisner expect = cmd->pkt_size; 4597841ce241SAndreas Gruenbacher if (!expect(len == expect - sizeof(struct p_header80))) 4598b411b363SPhilipp Reisner goto reconnect; 4599b411b363SPhilipp Reisner } 4600b411b363SPhilipp Reisner if (received == expect) { 4601cb6518cbSLars Ellenberg mdev->last_received = jiffies; 4602b411b363SPhilipp Reisner D_ASSERT(cmd != NULL); 4603b411b363SPhilipp Reisner if (!cmd->process(mdev, h)) 4604b411b363SPhilipp Reisner goto reconnect; 4605b411b363SPhilipp Reisner 4606f36af18cSLars Ellenberg /* the idle_timeout (ping-int) 4607f36af18cSLars Ellenberg * has been restored in got_PingAck() */ 4608f36af18cSLars Ellenberg if (cmd == get_asender_cmd(P_PING_ACK)) 4609f36af18cSLars Ellenberg ping_timeout_active = 0; 4610f36af18cSLars Ellenberg 4611b411b363SPhilipp Reisner buf = h; 4612b411b363SPhilipp Reisner received = 0; 46130b70a13dSPhilipp Reisner expect = sizeof(struct p_header80); 4614b411b363SPhilipp Reisner cmd = NULL; 4615b411b363SPhilipp Reisner } 4616b411b363SPhilipp Reisner } 4617b411b363SPhilipp Reisner 4618b411b363SPhilipp Reisner if (0) { 4619b411b363SPhilipp Reisner reconnect: 4620b411b363SPhilipp Reisner drbd_force_state(mdev, NS(conn, C_NETWORK_FAILURE)); 4621856c50c7SLars Ellenberg drbd_md_sync(mdev); 4622b411b363SPhilipp Reisner } 4623b411b363SPhilipp Reisner if (0) { 4624b411b363SPhilipp Reisner disconnect: 4625b411b363SPhilipp Reisner drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); 4626856c50c7SLars Ellenberg drbd_md_sync(mdev); 4627b411b363SPhilipp Reisner } 4628b411b363SPhilipp Reisner clear_bit(SIGNAL_ASENDER, &mdev->flags); 4629b411b363SPhilipp Reisner 4630b411b363SPhilipp Reisner D_ASSERT(mdev->state.conn < C_CONNECTED); 4631b411b363SPhilipp Reisner dev_info(DEV, "asender terminated\n"); 4632b411b363SPhilipp Reisner 4633b411b363SPhilipp Reisner return 0; 4634b411b363SPhilipp Reisner } 4635