1b411b363SPhilipp Reisner /* 2b411b363SPhilipp Reisner drbd_receiver.c 3b411b363SPhilipp Reisner 4b411b363SPhilipp Reisner This file is part of DRBD by Philipp Reisner and Lars Ellenberg. 5b411b363SPhilipp Reisner 6b411b363SPhilipp Reisner Copyright (C) 2001-2008, LINBIT Information Technologies GmbH. 7b411b363SPhilipp Reisner Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>. 8b411b363SPhilipp Reisner Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>. 9b411b363SPhilipp Reisner 10b411b363SPhilipp Reisner drbd is free software; you can redistribute it and/or modify 11b411b363SPhilipp Reisner it under the terms of the GNU General Public License as published by 12b411b363SPhilipp Reisner the Free Software Foundation; either version 2, or (at your option) 13b411b363SPhilipp Reisner any later version. 14b411b363SPhilipp Reisner 15b411b363SPhilipp Reisner drbd is distributed in the hope that it will be useful, 16b411b363SPhilipp Reisner but WITHOUT ANY WARRANTY; without even the implied warranty of 17b411b363SPhilipp Reisner MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18b411b363SPhilipp Reisner GNU General Public License for more details. 19b411b363SPhilipp Reisner 20b411b363SPhilipp Reisner You should have received a copy of the GNU General Public License 21b411b363SPhilipp Reisner along with drbd; see the file COPYING. If not, write to 22b411b363SPhilipp Reisner the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 23b411b363SPhilipp Reisner */ 24b411b363SPhilipp Reisner 25b411b363SPhilipp Reisner 26b411b363SPhilipp Reisner #include <linux/module.h> 27b411b363SPhilipp Reisner 28b411b363SPhilipp Reisner #include <asm/uaccess.h> 29b411b363SPhilipp Reisner #include <net/sock.h> 30b411b363SPhilipp Reisner 31b411b363SPhilipp Reisner #include <linux/drbd.h> 32b411b363SPhilipp Reisner #include <linux/fs.h> 33b411b363SPhilipp Reisner #include <linux/file.h> 34b411b363SPhilipp Reisner #include <linux/in.h> 35b411b363SPhilipp Reisner #include <linux/mm.h> 36b411b363SPhilipp Reisner #include <linux/memcontrol.h> 37b411b363SPhilipp Reisner #include <linux/mm_inline.h> 38b411b363SPhilipp Reisner #include <linux/slab.h> 39b411b363SPhilipp Reisner #include <linux/pkt_sched.h> 40b411b363SPhilipp Reisner #define __KERNEL_SYSCALLS__ 41b411b363SPhilipp Reisner #include <linux/unistd.h> 42b411b363SPhilipp Reisner #include <linux/vmalloc.h> 43b411b363SPhilipp Reisner #include <linux/random.h> 44b411b363SPhilipp Reisner #include <linux/string.h> 45b411b363SPhilipp Reisner #include <linux/scatterlist.h> 46b411b363SPhilipp Reisner #include "drbd_int.h" 47b411b363SPhilipp Reisner #include "drbd_req.h" 48b411b363SPhilipp Reisner 49b411b363SPhilipp Reisner #include "drbd_vli.h" 50b411b363SPhilipp Reisner 51b411b363SPhilipp Reisner enum finish_epoch { 52b411b363SPhilipp Reisner FE_STILL_LIVE, 53b411b363SPhilipp Reisner FE_DESTROYED, 54b411b363SPhilipp Reisner FE_RECYCLED, 55b411b363SPhilipp Reisner }; 56b411b363SPhilipp Reisner 57b411b363SPhilipp Reisner static int drbd_do_handshake(struct drbd_conf *mdev); 58b411b363SPhilipp Reisner static int drbd_do_auth(struct drbd_conf *mdev); 59b411b363SPhilipp Reisner 60b411b363SPhilipp Reisner static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *, struct drbd_epoch *, enum epoch_event); 61b411b363SPhilipp Reisner static int e_end_block(struct drbd_conf *, struct drbd_work *, int); 62b411b363SPhilipp Reisner 63b411b363SPhilipp Reisner 64b411b363SPhilipp Reisner #define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN) 65b411b363SPhilipp Reisner 6645bb912bSLars Ellenberg /* 6745bb912bSLars Ellenberg * some helper functions to deal with single linked page lists, 6845bb912bSLars Ellenberg * page->private being our "next" pointer. 6945bb912bSLars Ellenberg */ 7045bb912bSLars Ellenberg 7145bb912bSLars Ellenberg /* If at least n pages are linked at head, get n pages off. 7245bb912bSLars Ellenberg * Otherwise, don't modify head, and return NULL. 7345bb912bSLars Ellenberg * Locking is the responsibility of the caller. 7445bb912bSLars Ellenberg */ 7545bb912bSLars Ellenberg static struct page *page_chain_del(struct page **head, int n) 7645bb912bSLars Ellenberg { 7745bb912bSLars Ellenberg struct page *page; 7845bb912bSLars Ellenberg struct page *tmp; 7945bb912bSLars Ellenberg 8045bb912bSLars Ellenberg BUG_ON(!n); 8145bb912bSLars Ellenberg BUG_ON(!head); 8245bb912bSLars Ellenberg 8345bb912bSLars Ellenberg page = *head; 8423ce4227SPhilipp Reisner 8523ce4227SPhilipp Reisner if (!page) 8623ce4227SPhilipp Reisner return NULL; 8723ce4227SPhilipp Reisner 8845bb912bSLars Ellenberg while (page) { 8945bb912bSLars Ellenberg tmp = page_chain_next(page); 9045bb912bSLars Ellenberg if (--n == 0) 9145bb912bSLars Ellenberg break; /* found sufficient pages */ 9245bb912bSLars Ellenberg if (tmp == NULL) 9345bb912bSLars Ellenberg /* insufficient pages, don't use any of them. */ 9445bb912bSLars Ellenberg return NULL; 9545bb912bSLars Ellenberg page = tmp; 9645bb912bSLars Ellenberg } 9745bb912bSLars Ellenberg 9845bb912bSLars Ellenberg /* add end of list marker for the returned list */ 9945bb912bSLars Ellenberg set_page_private(page, 0); 10045bb912bSLars Ellenberg /* actual return value, and adjustment of head */ 10145bb912bSLars Ellenberg page = *head; 10245bb912bSLars Ellenberg *head = tmp; 10345bb912bSLars Ellenberg return page; 10445bb912bSLars Ellenberg } 10545bb912bSLars Ellenberg 10645bb912bSLars Ellenberg /* may be used outside of locks to find the tail of a (usually short) 10745bb912bSLars Ellenberg * "private" page chain, before adding it back to a global chain head 10845bb912bSLars Ellenberg * with page_chain_add() under a spinlock. */ 10945bb912bSLars Ellenberg static struct page *page_chain_tail(struct page *page, int *len) 11045bb912bSLars Ellenberg { 11145bb912bSLars Ellenberg struct page *tmp; 11245bb912bSLars Ellenberg int i = 1; 11345bb912bSLars Ellenberg while ((tmp = page_chain_next(page))) 11445bb912bSLars Ellenberg ++i, page = tmp; 11545bb912bSLars Ellenberg if (len) 11645bb912bSLars Ellenberg *len = i; 11745bb912bSLars Ellenberg return page; 11845bb912bSLars Ellenberg } 11945bb912bSLars Ellenberg 12045bb912bSLars Ellenberg static int page_chain_free(struct page *page) 12145bb912bSLars Ellenberg { 12245bb912bSLars Ellenberg struct page *tmp; 12345bb912bSLars Ellenberg int i = 0; 12445bb912bSLars Ellenberg page_chain_for_each_safe(page, tmp) { 12545bb912bSLars Ellenberg put_page(page); 12645bb912bSLars Ellenberg ++i; 12745bb912bSLars Ellenberg } 12845bb912bSLars Ellenberg return i; 12945bb912bSLars Ellenberg } 13045bb912bSLars Ellenberg 13145bb912bSLars Ellenberg static void page_chain_add(struct page **head, 13245bb912bSLars Ellenberg struct page *chain_first, struct page *chain_last) 13345bb912bSLars Ellenberg { 13445bb912bSLars Ellenberg #if 1 13545bb912bSLars Ellenberg struct page *tmp; 13645bb912bSLars Ellenberg tmp = page_chain_tail(chain_first, NULL); 13745bb912bSLars Ellenberg BUG_ON(tmp != chain_last); 13845bb912bSLars Ellenberg #endif 13945bb912bSLars Ellenberg 14045bb912bSLars Ellenberg /* add chain to head */ 14145bb912bSLars Ellenberg set_page_private(chain_last, (unsigned long)*head); 14245bb912bSLars Ellenberg *head = chain_first; 14345bb912bSLars Ellenberg } 14445bb912bSLars Ellenberg 14545bb912bSLars Ellenberg static struct page *drbd_pp_first_pages_or_try_alloc(struct drbd_conf *mdev, int number) 146b411b363SPhilipp Reisner { 147b411b363SPhilipp Reisner struct page *page = NULL; 14845bb912bSLars Ellenberg struct page *tmp = NULL; 14945bb912bSLars Ellenberg int i = 0; 150b411b363SPhilipp Reisner 151b411b363SPhilipp Reisner /* Yes, testing drbd_pp_vacant outside the lock is racy. 152b411b363SPhilipp Reisner * So what. It saves a spin_lock. */ 15345bb912bSLars Ellenberg if (drbd_pp_vacant >= number) { 154b411b363SPhilipp Reisner spin_lock(&drbd_pp_lock); 15545bb912bSLars Ellenberg page = page_chain_del(&drbd_pp_pool, number); 15645bb912bSLars Ellenberg if (page) 15745bb912bSLars Ellenberg drbd_pp_vacant -= number; 158b411b363SPhilipp Reisner spin_unlock(&drbd_pp_lock); 15945bb912bSLars Ellenberg if (page) 16045bb912bSLars Ellenberg return page; 161b411b363SPhilipp Reisner } 16245bb912bSLars Ellenberg 163b411b363SPhilipp Reisner /* GFP_TRY, because we must not cause arbitrary write-out: in a DRBD 164b411b363SPhilipp Reisner * "criss-cross" setup, that might cause write-out on some other DRBD, 165b411b363SPhilipp Reisner * which in turn might block on the other node at this very place. */ 16645bb912bSLars Ellenberg for (i = 0; i < number; i++) { 16745bb912bSLars Ellenberg tmp = alloc_page(GFP_TRY); 16845bb912bSLars Ellenberg if (!tmp) 16945bb912bSLars Ellenberg break; 17045bb912bSLars Ellenberg set_page_private(tmp, (unsigned long)page); 17145bb912bSLars Ellenberg page = tmp; 17245bb912bSLars Ellenberg } 17345bb912bSLars Ellenberg 17445bb912bSLars Ellenberg if (i == number) 175b411b363SPhilipp Reisner return page; 17645bb912bSLars Ellenberg 17745bb912bSLars Ellenberg /* Not enough pages immediately available this time. 17845bb912bSLars Ellenberg * No need to jump around here, drbd_pp_alloc will retry this 17945bb912bSLars Ellenberg * function "soon". */ 18045bb912bSLars Ellenberg if (page) { 18145bb912bSLars Ellenberg tmp = page_chain_tail(page, NULL); 18245bb912bSLars Ellenberg spin_lock(&drbd_pp_lock); 18345bb912bSLars Ellenberg page_chain_add(&drbd_pp_pool, page, tmp); 18445bb912bSLars Ellenberg drbd_pp_vacant += i; 18545bb912bSLars Ellenberg spin_unlock(&drbd_pp_lock); 18645bb912bSLars Ellenberg } 18745bb912bSLars Ellenberg return NULL; 188b411b363SPhilipp Reisner } 189b411b363SPhilipp Reisner 190b411b363SPhilipp Reisner static void reclaim_net_ee(struct drbd_conf *mdev, struct list_head *to_be_freed) 191b411b363SPhilipp Reisner { 192b411b363SPhilipp Reisner struct drbd_epoch_entry *e; 193b411b363SPhilipp Reisner struct list_head *le, *tle; 194b411b363SPhilipp Reisner 195b411b363SPhilipp Reisner /* The EEs are always appended to the end of the list. Since 196b411b363SPhilipp Reisner they are sent in order over the wire, they have to finish 197b411b363SPhilipp Reisner in order. As soon as we see the first not finished we can 198b411b363SPhilipp Reisner stop to examine the list... */ 199b411b363SPhilipp Reisner 200b411b363SPhilipp Reisner list_for_each_safe(le, tle, &mdev->net_ee) { 201b411b363SPhilipp Reisner e = list_entry(le, struct drbd_epoch_entry, w.list); 20245bb912bSLars Ellenberg if (drbd_ee_has_active_page(e)) 203b411b363SPhilipp Reisner break; 204b411b363SPhilipp Reisner list_move(le, to_be_freed); 205b411b363SPhilipp Reisner } 206b411b363SPhilipp Reisner } 207b411b363SPhilipp Reisner 208b411b363SPhilipp Reisner static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev) 209b411b363SPhilipp Reisner { 210b411b363SPhilipp Reisner LIST_HEAD(reclaimed); 211b411b363SPhilipp Reisner struct drbd_epoch_entry *e, *t; 212b411b363SPhilipp Reisner 213b411b363SPhilipp Reisner spin_lock_irq(&mdev->req_lock); 214b411b363SPhilipp Reisner reclaim_net_ee(mdev, &reclaimed); 215b411b363SPhilipp Reisner spin_unlock_irq(&mdev->req_lock); 216b411b363SPhilipp Reisner 217b411b363SPhilipp Reisner list_for_each_entry_safe(e, t, &reclaimed, w.list) 218435f0740SLars Ellenberg drbd_free_net_ee(mdev, e); 219b411b363SPhilipp Reisner } 220b411b363SPhilipp Reisner 221b411b363SPhilipp Reisner /** 22245bb912bSLars Ellenberg * drbd_pp_alloc() - Returns @number pages, retries forever (or until signalled) 223b411b363SPhilipp Reisner * @mdev: DRBD device. 22445bb912bSLars Ellenberg * @number: number of pages requested 22545bb912bSLars Ellenberg * @retry: whether to retry, if not enough pages are available right now 226b411b363SPhilipp Reisner * 22745bb912bSLars Ellenberg * Tries to allocate number pages, first from our own page pool, then from 22845bb912bSLars Ellenberg * the kernel, unless this allocation would exceed the max_buffers setting. 22945bb912bSLars Ellenberg * Possibly retry until DRBD frees sufficient pages somewhere else. 23045bb912bSLars Ellenberg * 23145bb912bSLars Ellenberg * Returns a page chain linked via page->private. 232b411b363SPhilipp Reisner */ 23345bb912bSLars Ellenberg static struct page *drbd_pp_alloc(struct drbd_conf *mdev, unsigned number, bool retry) 234b411b363SPhilipp Reisner { 235b411b363SPhilipp Reisner struct page *page = NULL; 236b411b363SPhilipp Reisner DEFINE_WAIT(wait); 237b411b363SPhilipp Reisner 23845bb912bSLars Ellenberg /* Yes, we may run up to @number over max_buffers. If we 23945bb912bSLars Ellenberg * follow it strictly, the admin will get it wrong anyways. */ 24045bb912bSLars Ellenberg if (atomic_read(&mdev->pp_in_use) < mdev->net_conf->max_buffers) 24145bb912bSLars Ellenberg page = drbd_pp_first_pages_or_try_alloc(mdev, number); 242b411b363SPhilipp Reisner 24345bb912bSLars Ellenberg while (page == NULL) { 244b411b363SPhilipp Reisner prepare_to_wait(&drbd_pp_wait, &wait, TASK_INTERRUPTIBLE); 245b411b363SPhilipp Reisner 246b411b363SPhilipp Reisner drbd_kick_lo_and_reclaim_net(mdev); 247b411b363SPhilipp Reisner 248b411b363SPhilipp Reisner if (atomic_read(&mdev->pp_in_use) < mdev->net_conf->max_buffers) { 24945bb912bSLars Ellenberg page = drbd_pp_first_pages_or_try_alloc(mdev, number); 250b411b363SPhilipp Reisner if (page) 251b411b363SPhilipp Reisner break; 252b411b363SPhilipp Reisner } 253b411b363SPhilipp Reisner 254b411b363SPhilipp Reisner if (!retry) 255b411b363SPhilipp Reisner break; 256b411b363SPhilipp Reisner 257b411b363SPhilipp Reisner if (signal_pending(current)) { 258b411b363SPhilipp Reisner dev_warn(DEV, "drbd_pp_alloc interrupted!\n"); 259b411b363SPhilipp Reisner break; 260b411b363SPhilipp Reisner } 261b411b363SPhilipp Reisner 262b411b363SPhilipp Reisner schedule(); 263b411b363SPhilipp Reisner } 264b411b363SPhilipp Reisner finish_wait(&drbd_pp_wait, &wait); 265b411b363SPhilipp Reisner 26645bb912bSLars Ellenberg if (page) 26745bb912bSLars Ellenberg atomic_add(number, &mdev->pp_in_use); 268b411b363SPhilipp Reisner return page; 269b411b363SPhilipp Reisner } 270b411b363SPhilipp Reisner 271b411b363SPhilipp Reisner /* Must not be used from irq, as that may deadlock: see drbd_pp_alloc. 27245bb912bSLars Ellenberg * Is also used from inside an other spin_lock_irq(&mdev->req_lock); 27345bb912bSLars Ellenberg * Either links the page chain back to the global pool, 27445bb912bSLars Ellenberg * or returns all pages to the system. */ 275435f0740SLars Ellenberg static void drbd_pp_free(struct drbd_conf *mdev, struct page *page, int is_net) 276b411b363SPhilipp Reisner { 277435f0740SLars Ellenberg atomic_t *a = is_net ? &mdev->pp_in_use_by_net : &mdev->pp_in_use; 278b411b363SPhilipp Reisner int i; 279435f0740SLars Ellenberg 2801816a2b4SLars Ellenberg if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE)*minor_count) 28145bb912bSLars Ellenberg i = page_chain_free(page); 28245bb912bSLars Ellenberg else { 28345bb912bSLars Ellenberg struct page *tmp; 28445bb912bSLars Ellenberg tmp = page_chain_tail(page, &i); 285b411b363SPhilipp Reisner spin_lock(&drbd_pp_lock); 28645bb912bSLars Ellenberg page_chain_add(&drbd_pp_pool, page, tmp); 28745bb912bSLars Ellenberg drbd_pp_vacant += i; 288b411b363SPhilipp Reisner spin_unlock(&drbd_pp_lock); 289b411b363SPhilipp Reisner } 290435f0740SLars Ellenberg i = atomic_sub_return(i, a); 29145bb912bSLars Ellenberg if (i < 0) 292435f0740SLars Ellenberg dev_warn(DEV, "ASSERTION FAILED: %s: %d < 0\n", 293435f0740SLars Ellenberg is_net ? "pp_in_use_by_net" : "pp_in_use", i); 294b411b363SPhilipp Reisner wake_up(&drbd_pp_wait); 295b411b363SPhilipp Reisner } 296b411b363SPhilipp Reisner 297b411b363SPhilipp Reisner /* 298b411b363SPhilipp Reisner You need to hold the req_lock: 299b411b363SPhilipp Reisner _drbd_wait_ee_list_empty() 300b411b363SPhilipp Reisner 301b411b363SPhilipp Reisner You must not have the req_lock: 302b411b363SPhilipp Reisner drbd_free_ee() 303b411b363SPhilipp Reisner drbd_alloc_ee() 304b411b363SPhilipp Reisner drbd_init_ee() 305b411b363SPhilipp Reisner drbd_release_ee() 306b411b363SPhilipp Reisner drbd_ee_fix_bhs() 307b411b363SPhilipp Reisner drbd_process_done_ee() 308b411b363SPhilipp Reisner drbd_clear_done_ee() 309b411b363SPhilipp Reisner drbd_wait_ee_list_empty() 310b411b363SPhilipp Reisner */ 311b411b363SPhilipp Reisner 312b411b363SPhilipp Reisner struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev, 313b411b363SPhilipp Reisner u64 id, 314b411b363SPhilipp Reisner sector_t sector, 315b411b363SPhilipp Reisner unsigned int data_size, 316b411b363SPhilipp Reisner gfp_t gfp_mask) __must_hold(local) 317b411b363SPhilipp Reisner { 318b411b363SPhilipp Reisner struct drbd_epoch_entry *e; 319b411b363SPhilipp Reisner struct page *page; 32045bb912bSLars Ellenberg unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT; 321b411b363SPhilipp Reisner 3220cf9d27eSAndreas Gruenbacher if (drbd_insert_fault(mdev, DRBD_FAULT_AL_EE)) 323b411b363SPhilipp Reisner return NULL; 324b411b363SPhilipp Reisner 325b411b363SPhilipp Reisner e = mempool_alloc(drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM); 326b411b363SPhilipp Reisner if (!e) { 327b411b363SPhilipp Reisner if (!(gfp_mask & __GFP_NOWARN)) 328b411b363SPhilipp Reisner dev_err(DEV, "alloc_ee: Allocation of an EE failed\n"); 329b411b363SPhilipp Reisner return NULL; 330b411b363SPhilipp Reisner } 331b411b363SPhilipp Reisner 33245bb912bSLars Ellenberg page = drbd_pp_alloc(mdev, nr_pages, (gfp_mask & __GFP_WAIT)); 33345bb912bSLars Ellenberg if (!page) 33445bb912bSLars Ellenberg goto fail; 335b411b363SPhilipp Reisner 336b411b363SPhilipp Reisner INIT_HLIST_NODE(&e->colision); 337b411b363SPhilipp Reisner e->epoch = NULL; 33845bb912bSLars Ellenberg e->mdev = mdev; 33945bb912bSLars Ellenberg e->pages = page; 34045bb912bSLars Ellenberg atomic_set(&e->pending_bios, 0); 34145bb912bSLars Ellenberg e->size = data_size; 342b411b363SPhilipp Reisner e->flags = 0; 34345bb912bSLars Ellenberg e->sector = sector; 34445bb912bSLars Ellenberg e->block_id = id; 345b411b363SPhilipp Reisner 346b411b363SPhilipp Reisner return e; 347b411b363SPhilipp Reisner 34845bb912bSLars Ellenberg fail: 349b411b363SPhilipp Reisner mempool_free(e, drbd_ee_mempool); 350b411b363SPhilipp Reisner return NULL; 351b411b363SPhilipp Reisner } 352b411b363SPhilipp Reisner 353435f0740SLars Ellenberg void drbd_free_some_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e, int is_net) 354b411b363SPhilipp Reisner { 355c36c3cedSLars Ellenberg if (e->flags & EE_HAS_DIGEST) 356c36c3cedSLars Ellenberg kfree(e->digest); 357435f0740SLars Ellenberg drbd_pp_free(mdev, e->pages, is_net); 35845bb912bSLars Ellenberg D_ASSERT(atomic_read(&e->pending_bios) == 0); 359b411b363SPhilipp Reisner D_ASSERT(hlist_unhashed(&e->colision)); 360b411b363SPhilipp Reisner mempool_free(e, drbd_ee_mempool); 361b411b363SPhilipp Reisner } 362b411b363SPhilipp Reisner 363b411b363SPhilipp Reisner int drbd_release_ee(struct drbd_conf *mdev, struct list_head *list) 364b411b363SPhilipp Reisner { 365b411b363SPhilipp Reisner LIST_HEAD(work_list); 366b411b363SPhilipp Reisner struct drbd_epoch_entry *e, *t; 367b411b363SPhilipp Reisner int count = 0; 368435f0740SLars Ellenberg int is_net = list == &mdev->net_ee; 369b411b363SPhilipp Reisner 370b411b363SPhilipp Reisner spin_lock_irq(&mdev->req_lock); 371b411b363SPhilipp Reisner list_splice_init(list, &work_list); 372b411b363SPhilipp Reisner spin_unlock_irq(&mdev->req_lock); 373b411b363SPhilipp Reisner 374b411b363SPhilipp Reisner list_for_each_entry_safe(e, t, &work_list, w.list) { 375435f0740SLars Ellenberg drbd_free_some_ee(mdev, e, is_net); 376b411b363SPhilipp Reisner count++; 377b411b363SPhilipp Reisner } 378b411b363SPhilipp Reisner return count; 379b411b363SPhilipp Reisner } 380b411b363SPhilipp Reisner 381b411b363SPhilipp Reisner 382b411b363SPhilipp Reisner /* 383b411b363SPhilipp Reisner * This function is called from _asender only_ 384b411b363SPhilipp Reisner * but see also comments in _req_mod(,barrier_acked) 385b411b363SPhilipp Reisner * and receive_Barrier. 386b411b363SPhilipp Reisner * 387b411b363SPhilipp Reisner * Move entries from net_ee to done_ee, if ready. 388b411b363SPhilipp Reisner * Grab done_ee, call all callbacks, free the entries. 389b411b363SPhilipp Reisner * The callbacks typically send out ACKs. 390b411b363SPhilipp Reisner */ 391b411b363SPhilipp Reisner static int drbd_process_done_ee(struct drbd_conf *mdev) 392b411b363SPhilipp Reisner { 393b411b363SPhilipp Reisner LIST_HEAD(work_list); 394b411b363SPhilipp Reisner LIST_HEAD(reclaimed); 395b411b363SPhilipp Reisner struct drbd_epoch_entry *e, *t; 396b411b363SPhilipp Reisner int ok = (mdev->state.conn >= C_WF_REPORT_PARAMS); 397b411b363SPhilipp Reisner 398b411b363SPhilipp Reisner spin_lock_irq(&mdev->req_lock); 399b411b363SPhilipp Reisner reclaim_net_ee(mdev, &reclaimed); 400b411b363SPhilipp Reisner list_splice_init(&mdev->done_ee, &work_list); 401b411b363SPhilipp Reisner spin_unlock_irq(&mdev->req_lock); 402b411b363SPhilipp Reisner 403b411b363SPhilipp Reisner list_for_each_entry_safe(e, t, &reclaimed, w.list) 404435f0740SLars Ellenberg drbd_free_net_ee(mdev, e); 405b411b363SPhilipp Reisner 406b411b363SPhilipp Reisner /* possible callbacks here: 407b411b363SPhilipp Reisner * e_end_block, and e_end_resync_block, e_send_discard_ack. 408b411b363SPhilipp Reisner * all ignore the last argument. 409b411b363SPhilipp Reisner */ 410b411b363SPhilipp Reisner list_for_each_entry_safe(e, t, &work_list, w.list) { 411b411b363SPhilipp Reisner /* list_del not necessary, next/prev members not touched */ 412b411b363SPhilipp Reisner ok = e->w.cb(mdev, &e->w, !ok) && ok; 413b411b363SPhilipp Reisner drbd_free_ee(mdev, e); 414b411b363SPhilipp Reisner } 415b411b363SPhilipp Reisner wake_up(&mdev->ee_wait); 416b411b363SPhilipp Reisner 417b411b363SPhilipp Reisner return ok; 418b411b363SPhilipp Reisner } 419b411b363SPhilipp Reisner 420b411b363SPhilipp Reisner void _drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head) 421b411b363SPhilipp Reisner { 422b411b363SPhilipp Reisner DEFINE_WAIT(wait); 423b411b363SPhilipp Reisner 424b411b363SPhilipp Reisner /* avoids spin_lock/unlock 425b411b363SPhilipp Reisner * and calling prepare_to_wait in the fast path */ 426b411b363SPhilipp Reisner while (!list_empty(head)) { 427b411b363SPhilipp Reisner prepare_to_wait(&mdev->ee_wait, &wait, TASK_UNINTERRUPTIBLE); 428b411b363SPhilipp Reisner spin_unlock_irq(&mdev->req_lock); 4297eaceaccSJens Axboe io_schedule(); 430b411b363SPhilipp Reisner finish_wait(&mdev->ee_wait, &wait); 431b411b363SPhilipp Reisner spin_lock_irq(&mdev->req_lock); 432b411b363SPhilipp Reisner } 433b411b363SPhilipp Reisner } 434b411b363SPhilipp Reisner 435b411b363SPhilipp Reisner void drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head) 436b411b363SPhilipp Reisner { 437b411b363SPhilipp Reisner spin_lock_irq(&mdev->req_lock); 438b411b363SPhilipp Reisner _drbd_wait_ee_list_empty(mdev, head); 439b411b363SPhilipp Reisner spin_unlock_irq(&mdev->req_lock); 440b411b363SPhilipp Reisner } 441b411b363SPhilipp Reisner 442b411b363SPhilipp Reisner /* see also kernel_accept; which is only present since 2.6.18. 443b411b363SPhilipp Reisner * also we want to log which part of it failed, exactly */ 444b411b363SPhilipp Reisner static int drbd_accept(struct drbd_conf *mdev, const char **what, 445b411b363SPhilipp Reisner struct socket *sock, struct socket **newsock) 446b411b363SPhilipp Reisner { 447b411b363SPhilipp Reisner struct sock *sk = sock->sk; 448b411b363SPhilipp Reisner int err = 0; 449b411b363SPhilipp Reisner 450b411b363SPhilipp Reisner *what = "listen"; 451b411b363SPhilipp Reisner err = sock->ops->listen(sock, 5); 452b411b363SPhilipp Reisner if (err < 0) 453b411b363SPhilipp Reisner goto out; 454b411b363SPhilipp Reisner 455b411b363SPhilipp Reisner *what = "sock_create_lite"; 456b411b363SPhilipp Reisner err = sock_create_lite(sk->sk_family, sk->sk_type, sk->sk_protocol, 457b411b363SPhilipp Reisner newsock); 458b411b363SPhilipp Reisner if (err < 0) 459b411b363SPhilipp Reisner goto out; 460b411b363SPhilipp Reisner 461b411b363SPhilipp Reisner *what = "accept"; 462b411b363SPhilipp Reisner err = sock->ops->accept(sock, *newsock, 0); 463b411b363SPhilipp Reisner if (err < 0) { 464b411b363SPhilipp Reisner sock_release(*newsock); 465b411b363SPhilipp Reisner *newsock = NULL; 466b411b363SPhilipp Reisner goto out; 467b411b363SPhilipp Reisner } 468b411b363SPhilipp Reisner (*newsock)->ops = sock->ops; 469b411b363SPhilipp Reisner 470b411b363SPhilipp Reisner out: 471b411b363SPhilipp Reisner return err; 472b411b363SPhilipp Reisner } 473b411b363SPhilipp Reisner 474b411b363SPhilipp Reisner static int drbd_recv_short(struct drbd_conf *mdev, struct socket *sock, 475b411b363SPhilipp Reisner void *buf, size_t size, int flags) 476b411b363SPhilipp Reisner { 477b411b363SPhilipp Reisner mm_segment_t oldfs; 478b411b363SPhilipp Reisner struct kvec iov = { 479b411b363SPhilipp Reisner .iov_base = buf, 480b411b363SPhilipp Reisner .iov_len = size, 481b411b363SPhilipp Reisner }; 482b411b363SPhilipp Reisner struct msghdr msg = { 483b411b363SPhilipp Reisner .msg_iovlen = 1, 484b411b363SPhilipp Reisner .msg_iov = (struct iovec *)&iov, 485b411b363SPhilipp Reisner .msg_flags = (flags ? flags : MSG_WAITALL | MSG_NOSIGNAL) 486b411b363SPhilipp Reisner }; 487b411b363SPhilipp Reisner int rv; 488b411b363SPhilipp Reisner 489b411b363SPhilipp Reisner oldfs = get_fs(); 490b411b363SPhilipp Reisner set_fs(KERNEL_DS); 491b411b363SPhilipp Reisner rv = sock_recvmsg(sock, &msg, size, msg.msg_flags); 492b411b363SPhilipp Reisner set_fs(oldfs); 493b411b363SPhilipp Reisner 494b411b363SPhilipp Reisner return rv; 495b411b363SPhilipp Reisner } 496b411b363SPhilipp Reisner 497b411b363SPhilipp Reisner static int drbd_recv(struct drbd_conf *mdev, void *buf, size_t size) 498b411b363SPhilipp Reisner { 499b411b363SPhilipp Reisner mm_segment_t oldfs; 500b411b363SPhilipp Reisner struct kvec iov = { 501b411b363SPhilipp Reisner .iov_base = buf, 502b411b363SPhilipp Reisner .iov_len = size, 503b411b363SPhilipp Reisner }; 504b411b363SPhilipp Reisner struct msghdr msg = { 505b411b363SPhilipp Reisner .msg_iovlen = 1, 506b411b363SPhilipp Reisner .msg_iov = (struct iovec *)&iov, 507b411b363SPhilipp Reisner .msg_flags = MSG_WAITALL | MSG_NOSIGNAL 508b411b363SPhilipp Reisner }; 509b411b363SPhilipp Reisner int rv; 510b411b363SPhilipp Reisner 511b411b363SPhilipp Reisner oldfs = get_fs(); 512b411b363SPhilipp Reisner set_fs(KERNEL_DS); 513b411b363SPhilipp Reisner 514b411b363SPhilipp Reisner for (;;) { 515b411b363SPhilipp Reisner rv = sock_recvmsg(mdev->data.socket, &msg, size, msg.msg_flags); 516b411b363SPhilipp Reisner if (rv == size) 517b411b363SPhilipp Reisner break; 518b411b363SPhilipp Reisner 519b411b363SPhilipp Reisner /* Note: 520b411b363SPhilipp Reisner * ECONNRESET other side closed the connection 521b411b363SPhilipp Reisner * ERESTARTSYS (on sock) we got a signal 522b411b363SPhilipp Reisner */ 523b411b363SPhilipp Reisner 524b411b363SPhilipp Reisner if (rv < 0) { 525b411b363SPhilipp Reisner if (rv == -ECONNRESET) 526b411b363SPhilipp Reisner dev_info(DEV, "sock was reset by peer\n"); 527b411b363SPhilipp Reisner else if (rv != -ERESTARTSYS) 528b411b363SPhilipp Reisner dev_err(DEV, "sock_recvmsg returned %d\n", rv); 529b411b363SPhilipp Reisner break; 530b411b363SPhilipp Reisner } else if (rv == 0) { 531b411b363SPhilipp Reisner dev_info(DEV, "sock was shut down by peer\n"); 532b411b363SPhilipp Reisner break; 533b411b363SPhilipp Reisner } else { 534b411b363SPhilipp Reisner /* signal came in, or peer/link went down, 535b411b363SPhilipp Reisner * after we read a partial message 536b411b363SPhilipp Reisner */ 537b411b363SPhilipp Reisner /* D_ASSERT(signal_pending(current)); */ 538b411b363SPhilipp Reisner break; 539b411b363SPhilipp Reisner } 540b411b363SPhilipp Reisner }; 541b411b363SPhilipp Reisner 542b411b363SPhilipp Reisner set_fs(oldfs); 543b411b363SPhilipp Reisner 544b411b363SPhilipp Reisner if (rv != size) 545b411b363SPhilipp Reisner drbd_force_state(mdev, NS(conn, C_BROKEN_PIPE)); 546b411b363SPhilipp Reisner 547b411b363SPhilipp Reisner return rv; 548b411b363SPhilipp Reisner } 549b411b363SPhilipp Reisner 5505dbf1673SLars Ellenberg /* quoting tcp(7): 5515dbf1673SLars Ellenberg * On individual connections, the socket buffer size must be set prior to the 5525dbf1673SLars Ellenberg * listen(2) or connect(2) calls in order to have it take effect. 5535dbf1673SLars Ellenberg * This is our wrapper to do so. 5545dbf1673SLars Ellenberg */ 5555dbf1673SLars Ellenberg static void drbd_setbufsize(struct socket *sock, unsigned int snd, 5565dbf1673SLars Ellenberg unsigned int rcv) 5575dbf1673SLars Ellenberg { 5585dbf1673SLars Ellenberg /* open coded SO_SNDBUF, SO_RCVBUF */ 5595dbf1673SLars Ellenberg if (snd) { 5605dbf1673SLars Ellenberg sock->sk->sk_sndbuf = snd; 5615dbf1673SLars Ellenberg sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK; 5625dbf1673SLars Ellenberg } 5635dbf1673SLars Ellenberg if (rcv) { 5645dbf1673SLars Ellenberg sock->sk->sk_rcvbuf = rcv; 5655dbf1673SLars Ellenberg sock->sk->sk_userlocks |= SOCK_RCVBUF_LOCK; 5665dbf1673SLars Ellenberg } 5675dbf1673SLars Ellenberg } 5685dbf1673SLars Ellenberg 569b411b363SPhilipp Reisner static struct socket *drbd_try_connect(struct drbd_conf *mdev) 570b411b363SPhilipp Reisner { 571b411b363SPhilipp Reisner const char *what; 572b411b363SPhilipp Reisner struct socket *sock; 573b411b363SPhilipp Reisner struct sockaddr_in6 src_in6; 574b411b363SPhilipp Reisner int err; 575b411b363SPhilipp Reisner int disconnect_on_error = 1; 576b411b363SPhilipp Reisner 577b411b363SPhilipp Reisner if (!get_net_conf(mdev)) 578b411b363SPhilipp Reisner return NULL; 579b411b363SPhilipp Reisner 580b411b363SPhilipp Reisner what = "sock_create_kern"; 581b411b363SPhilipp Reisner err = sock_create_kern(((struct sockaddr *)mdev->net_conf->my_addr)->sa_family, 582b411b363SPhilipp Reisner SOCK_STREAM, IPPROTO_TCP, &sock); 583b411b363SPhilipp Reisner if (err < 0) { 584b411b363SPhilipp Reisner sock = NULL; 585b411b363SPhilipp Reisner goto out; 586b411b363SPhilipp Reisner } 587b411b363SPhilipp Reisner 588b411b363SPhilipp Reisner sock->sk->sk_rcvtimeo = 589b411b363SPhilipp Reisner sock->sk->sk_sndtimeo = mdev->net_conf->try_connect_int*HZ; 5905dbf1673SLars Ellenberg drbd_setbufsize(sock, mdev->net_conf->sndbuf_size, 5915dbf1673SLars Ellenberg mdev->net_conf->rcvbuf_size); 592b411b363SPhilipp Reisner 593b411b363SPhilipp Reisner /* explicitly bind to the configured IP as source IP 594b411b363SPhilipp Reisner * for the outgoing connections. 595b411b363SPhilipp Reisner * This is needed for multihomed hosts and to be 596b411b363SPhilipp Reisner * able to use lo: interfaces for drbd. 597b411b363SPhilipp Reisner * Make sure to use 0 as port number, so linux selects 598b411b363SPhilipp Reisner * a free one dynamically. 599b411b363SPhilipp Reisner */ 600b411b363SPhilipp Reisner memcpy(&src_in6, mdev->net_conf->my_addr, 601b411b363SPhilipp Reisner min_t(int, mdev->net_conf->my_addr_len, sizeof(src_in6))); 602b411b363SPhilipp Reisner if (((struct sockaddr *)mdev->net_conf->my_addr)->sa_family == AF_INET6) 603b411b363SPhilipp Reisner src_in6.sin6_port = 0; 604b411b363SPhilipp Reisner else 605b411b363SPhilipp Reisner ((struct sockaddr_in *)&src_in6)->sin_port = 0; /* AF_INET & AF_SCI */ 606b411b363SPhilipp Reisner 607b411b363SPhilipp Reisner what = "bind before connect"; 608b411b363SPhilipp Reisner err = sock->ops->bind(sock, 609b411b363SPhilipp Reisner (struct sockaddr *) &src_in6, 610b411b363SPhilipp Reisner mdev->net_conf->my_addr_len); 611b411b363SPhilipp Reisner if (err < 0) 612b411b363SPhilipp Reisner goto out; 613b411b363SPhilipp Reisner 614b411b363SPhilipp Reisner /* connect may fail, peer not yet available. 615b411b363SPhilipp Reisner * stay C_WF_CONNECTION, don't go Disconnecting! */ 616b411b363SPhilipp Reisner disconnect_on_error = 0; 617b411b363SPhilipp Reisner what = "connect"; 618b411b363SPhilipp Reisner err = sock->ops->connect(sock, 619b411b363SPhilipp Reisner (struct sockaddr *)mdev->net_conf->peer_addr, 620b411b363SPhilipp Reisner mdev->net_conf->peer_addr_len, 0); 621b411b363SPhilipp Reisner 622b411b363SPhilipp Reisner out: 623b411b363SPhilipp Reisner if (err < 0) { 624b411b363SPhilipp Reisner if (sock) { 625b411b363SPhilipp Reisner sock_release(sock); 626b411b363SPhilipp Reisner sock = NULL; 627b411b363SPhilipp Reisner } 628b411b363SPhilipp Reisner switch (-err) { 629b411b363SPhilipp Reisner /* timeout, busy, signal pending */ 630b411b363SPhilipp Reisner case ETIMEDOUT: case EAGAIN: case EINPROGRESS: 631b411b363SPhilipp Reisner case EINTR: case ERESTARTSYS: 632b411b363SPhilipp Reisner /* peer not (yet) available, network problem */ 633b411b363SPhilipp Reisner case ECONNREFUSED: case ENETUNREACH: 634b411b363SPhilipp Reisner case EHOSTDOWN: case EHOSTUNREACH: 635b411b363SPhilipp Reisner disconnect_on_error = 0; 636b411b363SPhilipp Reisner break; 637b411b363SPhilipp Reisner default: 638b411b363SPhilipp Reisner dev_err(DEV, "%s failed, err = %d\n", what, err); 639b411b363SPhilipp Reisner } 640b411b363SPhilipp Reisner if (disconnect_on_error) 641b411b363SPhilipp Reisner drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); 642b411b363SPhilipp Reisner } 643b411b363SPhilipp Reisner put_net_conf(mdev); 644b411b363SPhilipp Reisner return sock; 645b411b363SPhilipp Reisner } 646b411b363SPhilipp Reisner 647b411b363SPhilipp Reisner static struct socket *drbd_wait_for_connect(struct drbd_conf *mdev) 648b411b363SPhilipp Reisner { 649b411b363SPhilipp Reisner int timeo, err; 650b411b363SPhilipp Reisner struct socket *s_estab = NULL, *s_listen; 651b411b363SPhilipp Reisner const char *what; 652b411b363SPhilipp Reisner 653b411b363SPhilipp Reisner if (!get_net_conf(mdev)) 654b411b363SPhilipp Reisner return NULL; 655b411b363SPhilipp Reisner 656b411b363SPhilipp Reisner what = "sock_create_kern"; 657b411b363SPhilipp Reisner err = sock_create_kern(((struct sockaddr *)mdev->net_conf->my_addr)->sa_family, 658b411b363SPhilipp Reisner SOCK_STREAM, IPPROTO_TCP, &s_listen); 659b411b363SPhilipp Reisner if (err) { 660b411b363SPhilipp Reisner s_listen = NULL; 661b411b363SPhilipp Reisner goto out; 662b411b363SPhilipp Reisner } 663b411b363SPhilipp Reisner 664b411b363SPhilipp Reisner timeo = mdev->net_conf->try_connect_int * HZ; 665b411b363SPhilipp Reisner timeo += (random32() & 1) ? timeo / 7 : -timeo / 7; /* 28.5% random jitter */ 666b411b363SPhilipp Reisner 667b411b363SPhilipp Reisner s_listen->sk->sk_reuse = 1; /* SO_REUSEADDR */ 668b411b363SPhilipp Reisner s_listen->sk->sk_rcvtimeo = timeo; 669b411b363SPhilipp Reisner s_listen->sk->sk_sndtimeo = timeo; 6705dbf1673SLars Ellenberg drbd_setbufsize(s_listen, mdev->net_conf->sndbuf_size, 6715dbf1673SLars Ellenberg mdev->net_conf->rcvbuf_size); 672b411b363SPhilipp Reisner 673b411b363SPhilipp Reisner what = "bind before listen"; 674b411b363SPhilipp Reisner err = s_listen->ops->bind(s_listen, 675b411b363SPhilipp Reisner (struct sockaddr *) mdev->net_conf->my_addr, 676b411b363SPhilipp Reisner mdev->net_conf->my_addr_len); 677b411b363SPhilipp Reisner if (err < 0) 678b411b363SPhilipp Reisner goto out; 679b411b363SPhilipp Reisner 680b411b363SPhilipp Reisner err = drbd_accept(mdev, &what, s_listen, &s_estab); 681b411b363SPhilipp Reisner 682b411b363SPhilipp Reisner out: 683b411b363SPhilipp Reisner if (s_listen) 684b411b363SPhilipp Reisner sock_release(s_listen); 685b411b363SPhilipp Reisner if (err < 0) { 686b411b363SPhilipp Reisner if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) { 687b411b363SPhilipp Reisner dev_err(DEV, "%s failed, err = %d\n", what, err); 688b411b363SPhilipp Reisner drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); 689b411b363SPhilipp Reisner } 690b411b363SPhilipp Reisner } 691b411b363SPhilipp Reisner put_net_conf(mdev); 692b411b363SPhilipp Reisner 693b411b363SPhilipp Reisner return s_estab; 694b411b363SPhilipp Reisner } 695b411b363SPhilipp Reisner 696b411b363SPhilipp Reisner static int drbd_send_fp(struct drbd_conf *mdev, 697b411b363SPhilipp Reisner struct socket *sock, enum drbd_packets cmd) 698b411b363SPhilipp Reisner { 69902918be2SPhilipp Reisner struct p_header80 *h = &mdev->data.sbuf.header.h80; 700b411b363SPhilipp Reisner 701b411b363SPhilipp Reisner return _drbd_send_cmd(mdev, sock, cmd, h, sizeof(*h), 0); 702b411b363SPhilipp Reisner } 703b411b363SPhilipp Reisner 704b411b363SPhilipp Reisner static enum drbd_packets drbd_recv_fp(struct drbd_conf *mdev, struct socket *sock) 705b411b363SPhilipp Reisner { 70602918be2SPhilipp Reisner struct p_header80 *h = &mdev->data.rbuf.header.h80; 707b411b363SPhilipp Reisner int rr; 708b411b363SPhilipp Reisner 709b411b363SPhilipp Reisner rr = drbd_recv_short(mdev, sock, h, sizeof(*h), 0); 710b411b363SPhilipp Reisner 711b411b363SPhilipp Reisner if (rr == sizeof(*h) && h->magic == BE_DRBD_MAGIC) 712b411b363SPhilipp Reisner return be16_to_cpu(h->command); 713b411b363SPhilipp Reisner 714b411b363SPhilipp Reisner return 0xffff; 715b411b363SPhilipp Reisner } 716b411b363SPhilipp Reisner 717b411b363SPhilipp Reisner /** 718b411b363SPhilipp Reisner * drbd_socket_okay() - Free the socket if its connection is not okay 719b411b363SPhilipp Reisner * @mdev: DRBD device. 720b411b363SPhilipp Reisner * @sock: pointer to the pointer to the socket. 721b411b363SPhilipp Reisner */ 722b411b363SPhilipp Reisner static int drbd_socket_okay(struct drbd_conf *mdev, struct socket **sock) 723b411b363SPhilipp Reisner { 724b411b363SPhilipp Reisner int rr; 725b411b363SPhilipp Reisner char tb[4]; 726b411b363SPhilipp Reisner 727b411b363SPhilipp Reisner if (!*sock) 72881e84650SAndreas Gruenbacher return false; 729b411b363SPhilipp Reisner 730b411b363SPhilipp Reisner rr = drbd_recv_short(mdev, *sock, tb, 4, MSG_DONTWAIT | MSG_PEEK); 731b411b363SPhilipp Reisner 732b411b363SPhilipp Reisner if (rr > 0 || rr == -EAGAIN) { 73381e84650SAndreas Gruenbacher return true; 734b411b363SPhilipp Reisner } else { 735b411b363SPhilipp Reisner sock_release(*sock); 736b411b363SPhilipp Reisner *sock = NULL; 73781e84650SAndreas Gruenbacher return false; 738b411b363SPhilipp Reisner } 739b411b363SPhilipp Reisner } 740b411b363SPhilipp Reisner 741b411b363SPhilipp Reisner /* 742b411b363SPhilipp Reisner * return values: 743b411b363SPhilipp Reisner * 1 yes, we have a valid connection 744b411b363SPhilipp Reisner * 0 oops, did not work out, please try again 745b411b363SPhilipp Reisner * -1 peer talks different language, 746b411b363SPhilipp Reisner * no point in trying again, please go standalone. 747b411b363SPhilipp Reisner * -2 We do not have a network config... 748b411b363SPhilipp Reisner */ 749b411b363SPhilipp Reisner static int drbd_connect(struct drbd_conf *mdev) 750b411b363SPhilipp Reisner { 751b411b363SPhilipp Reisner struct socket *s, *sock, *msock; 752b411b363SPhilipp Reisner int try, h, ok; 753b411b363SPhilipp Reisner 754b411b363SPhilipp Reisner D_ASSERT(!mdev->data.socket); 755b411b363SPhilipp Reisner 756b411b363SPhilipp Reisner if (drbd_request_state(mdev, NS(conn, C_WF_CONNECTION)) < SS_SUCCESS) 757b411b363SPhilipp Reisner return -2; 758b411b363SPhilipp Reisner 759b411b363SPhilipp Reisner clear_bit(DISCARD_CONCURRENT, &mdev->flags); 760b411b363SPhilipp Reisner 761b411b363SPhilipp Reisner sock = NULL; 762b411b363SPhilipp Reisner msock = NULL; 763b411b363SPhilipp Reisner 764b411b363SPhilipp Reisner do { 765b411b363SPhilipp Reisner for (try = 0;;) { 766b411b363SPhilipp Reisner /* 3 tries, this should take less than a second! */ 767b411b363SPhilipp Reisner s = drbd_try_connect(mdev); 768b411b363SPhilipp Reisner if (s || ++try >= 3) 769b411b363SPhilipp Reisner break; 770b411b363SPhilipp Reisner /* give the other side time to call bind() & listen() */ 771b411b363SPhilipp Reisner __set_current_state(TASK_INTERRUPTIBLE); 772b411b363SPhilipp Reisner schedule_timeout(HZ / 10); 773b411b363SPhilipp Reisner } 774b411b363SPhilipp Reisner 775b411b363SPhilipp Reisner if (s) { 776b411b363SPhilipp Reisner if (!sock) { 777b411b363SPhilipp Reisner drbd_send_fp(mdev, s, P_HAND_SHAKE_S); 778b411b363SPhilipp Reisner sock = s; 779b411b363SPhilipp Reisner s = NULL; 780b411b363SPhilipp Reisner } else if (!msock) { 781b411b363SPhilipp Reisner drbd_send_fp(mdev, s, P_HAND_SHAKE_M); 782b411b363SPhilipp Reisner msock = s; 783b411b363SPhilipp Reisner s = NULL; 784b411b363SPhilipp Reisner } else { 785b411b363SPhilipp Reisner dev_err(DEV, "Logic error in drbd_connect()\n"); 786b411b363SPhilipp Reisner goto out_release_sockets; 787b411b363SPhilipp Reisner } 788b411b363SPhilipp Reisner } 789b411b363SPhilipp Reisner 790b411b363SPhilipp Reisner if (sock && msock) { 791b411b363SPhilipp Reisner __set_current_state(TASK_INTERRUPTIBLE); 792b411b363SPhilipp Reisner schedule_timeout(HZ / 10); 793b411b363SPhilipp Reisner ok = drbd_socket_okay(mdev, &sock); 794b411b363SPhilipp Reisner ok = drbd_socket_okay(mdev, &msock) && ok; 795b411b363SPhilipp Reisner if (ok) 796b411b363SPhilipp Reisner break; 797b411b363SPhilipp Reisner } 798b411b363SPhilipp Reisner 799b411b363SPhilipp Reisner retry: 800b411b363SPhilipp Reisner s = drbd_wait_for_connect(mdev); 801b411b363SPhilipp Reisner if (s) { 802b411b363SPhilipp Reisner try = drbd_recv_fp(mdev, s); 803b411b363SPhilipp Reisner drbd_socket_okay(mdev, &sock); 804b411b363SPhilipp Reisner drbd_socket_okay(mdev, &msock); 805b411b363SPhilipp Reisner switch (try) { 806b411b363SPhilipp Reisner case P_HAND_SHAKE_S: 807b411b363SPhilipp Reisner if (sock) { 808b411b363SPhilipp Reisner dev_warn(DEV, "initial packet S crossed\n"); 809b411b363SPhilipp Reisner sock_release(sock); 810b411b363SPhilipp Reisner } 811b411b363SPhilipp Reisner sock = s; 812b411b363SPhilipp Reisner break; 813b411b363SPhilipp Reisner case P_HAND_SHAKE_M: 814b411b363SPhilipp Reisner if (msock) { 815b411b363SPhilipp Reisner dev_warn(DEV, "initial packet M crossed\n"); 816b411b363SPhilipp Reisner sock_release(msock); 817b411b363SPhilipp Reisner } 818b411b363SPhilipp Reisner msock = s; 819b411b363SPhilipp Reisner set_bit(DISCARD_CONCURRENT, &mdev->flags); 820b411b363SPhilipp Reisner break; 821b411b363SPhilipp Reisner default: 822b411b363SPhilipp Reisner dev_warn(DEV, "Error receiving initial packet\n"); 823b411b363SPhilipp Reisner sock_release(s); 824b411b363SPhilipp Reisner if (random32() & 1) 825b411b363SPhilipp Reisner goto retry; 826b411b363SPhilipp Reisner } 827b411b363SPhilipp Reisner } 828b411b363SPhilipp Reisner 829b411b363SPhilipp Reisner if (mdev->state.conn <= C_DISCONNECTING) 830b411b363SPhilipp Reisner goto out_release_sockets; 831b411b363SPhilipp Reisner if (signal_pending(current)) { 832b411b363SPhilipp Reisner flush_signals(current); 833b411b363SPhilipp Reisner smp_rmb(); 834b411b363SPhilipp Reisner if (get_t_state(&mdev->receiver) == Exiting) 835b411b363SPhilipp Reisner goto out_release_sockets; 836b411b363SPhilipp Reisner } 837b411b363SPhilipp Reisner 838b411b363SPhilipp Reisner if (sock && msock) { 839b411b363SPhilipp Reisner ok = drbd_socket_okay(mdev, &sock); 840b411b363SPhilipp Reisner ok = drbd_socket_okay(mdev, &msock) && ok; 841b411b363SPhilipp Reisner if (ok) 842b411b363SPhilipp Reisner break; 843b411b363SPhilipp Reisner } 844b411b363SPhilipp Reisner } while (1); 845b411b363SPhilipp Reisner 846b411b363SPhilipp Reisner msock->sk->sk_reuse = 1; /* SO_REUSEADDR */ 847b411b363SPhilipp Reisner sock->sk->sk_reuse = 1; /* SO_REUSEADDR */ 848b411b363SPhilipp Reisner 849b411b363SPhilipp Reisner sock->sk->sk_allocation = GFP_NOIO; 850b411b363SPhilipp Reisner msock->sk->sk_allocation = GFP_NOIO; 851b411b363SPhilipp Reisner 852b411b363SPhilipp Reisner sock->sk->sk_priority = TC_PRIO_INTERACTIVE_BULK; 853b411b363SPhilipp Reisner msock->sk->sk_priority = TC_PRIO_INTERACTIVE; 854b411b363SPhilipp Reisner 855b411b363SPhilipp Reisner /* NOT YET ... 856b411b363SPhilipp Reisner * sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10; 857b411b363SPhilipp Reisner * sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT; 858b411b363SPhilipp Reisner * first set it to the P_HAND_SHAKE timeout, 859b411b363SPhilipp Reisner * which we set to 4x the configured ping_timeout. */ 860b411b363SPhilipp Reisner sock->sk->sk_sndtimeo = 861b411b363SPhilipp Reisner sock->sk->sk_rcvtimeo = mdev->net_conf->ping_timeo*4*HZ/10; 862b411b363SPhilipp Reisner 863b411b363SPhilipp Reisner msock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10; 864b411b363SPhilipp Reisner msock->sk->sk_rcvtimeo = mdev->net_conf->ping_int*HZ; 865b411b363SPhilipp Reisner 866b411b363SPhilipp Reisner /* we don't want delays. 867b411b363SPhilipp Reisner * we use TCP_CORK where apropriate, though */ 868b411b363SPhilipp Reisner drbd_tcp_nodelay(sock); 869b411b363SPhilipp Reisner drbd_tcp_nodelay(msock); 870b411b363SPhilipp Reisner 871b411b363SPhilipp Reisner mdev->data.socket = sock; 872b411b363SPhilipp Reisner mdev->meta.socket = msock; 873b411b363SPhilipp Reisner mdev->last_received = jiffies; 874b411b363SPhilipp Reisner 875b411b363SPhilipp Reisner D_ASSERT(mdev->asender.task == NULL); 876b411b363SPhilipp Reisner 877b411b363SPhilipp Reisner h = drbd_do_handshake(mdev); 878b411b363SPhilipp Reisner if (h <= 0) 879b411b363SPhilipp Reisner return h; 880b411b363SPhilipp Reisner 881b411b363SPhilipp Reisner if (mdev->cram_hmac_tfm) { 882b411b363SPhilipp Reisner /* drbd_request_state(mdev, NS(conn, WFAuth)); */ 883b10d96cbSJohannes Thoma switch (drbd_do_auth(mdev)) { 884b10d96cbSJohannes Thoma case -1: 885b411b363SPhilipp Reisner dev_err(DEV, "Authentication of peer failed\n"); 886b411b363SPhilipp Reisner return -1; 887b10d96cbSJohannes Thoma case 0: 888b10d96cbSJohannes Thoma dev_err(DEV, "Authentication of peer failed, trying again.\n"); 889b10d96cbSJohannes Thoma return 0; 890b411b363SPhilipp Reisner } 891b411b363SPhilipp Reisner } 892b411b363SPhilipp Reisner 893b411b363SPhilipp Reisner if (drbd_request_state(mdev, NS(conn, C_WF_REPORT_PARAMS)) < SS_SUCCESS) 894b411b363SPhilipp Reisner return 0; 895b411b363SPhilipp Reisner 896b411b363SPhilipp Reisner sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10; 897b411b363SPhilipp Reisner sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT; 898b411b363SPhilipp Reisner 899b411b363SPhilipp Reisner atomic_set(&mdev->packet_seq, 0); 900b411b363SPhilipp Reisner mdev->peer_seq = 0; 901b411b363SPhilipp Reisner 902b411b363SPhilipp Reisner drbd_thread_start(&mdev->asender); 903b411b363SPhilipp Reisner 904d5373389SPhilipp Reisner if (mdev->agreed_pro_version < 95 && get_ldev(mdev)) { 905d5373389SPhilipp Reisner drbd_setup_queue_param(mdev, DRBD_MAX_SIZE_H80_PACKET); 906d5373389SPhilipp Reisner put_ldev(mdev); 907d5373389SPhilipp Reisner } 908d5373389SPhilipp Reisner 9097e2455c1SPhilipp Reisner if (!drbd_send_protocol(mdev)) 9107e2455c1SPhilipp Reisner return -1; 911b411b363SPhilipp Reisner drbd_send_sync_param(mdev, &mdev->sync_conf); 912e89b591cSPhilipp Reisner drbd_send_sizes(mdev, 0, 0); 913b411b363SPhilipp Reisner drbd_send_uuids(mdev); 914b411b363SPhilipp Reisner drbd_send_state(mdev); 915b411b363SPhilipp Reisner clear_bit(USE_DEGR_WFC_T, &mdev->flags); 916b411b363SPhilipp Reisner clear_bit(RESIZE_PENDING, &mdev->flags); 917b411b363SPhilipp Reisner 918b411b363SPhilipp Reisner return 1; 919b411b363SPhilipp Reisner 920b411b363SPhilipp Reisner out_release_sockets: 921b411b363SPhilipp Reisner if (sock) 922b411b363SPhilipp Reisner sock_release(sock); 923b411b363SPhilipp Reisner if (msock) 924b411b363SPhilipp Reisner sock_release(msock); 925b411b363SPhilipp Reisner return -1; 926b411b363SPhilipp Reisner } 927b411b363SPhilipp Reisner 92802918be2SPhilipp Reisner static int drbd_recv_header(struct drbd_conf *mdev, enum drbd_packets *cmd, unsigned int *packet_size) 929b411b363SPhilipp Reisner { 93002918be2SPhilipp Reisner union p_header *h = &mdev->data.rbuf.header; 931b411b363SPhilipp Reisner int r; 932b411b363SPhilipp Reisner 933b411b363SPhilipp Reisner r = drbd_recv(mdev, h, sizeof(*h)); 934b411b363SPhilipp Reisner if (unlikely(r != sizeof(*h))) { 935b411b363SPhilipp Reisner dev_err(DEV, "short read expecting header on sock: r=%d\n", r); 93681e84650SAndreas Gruenbacher return false; 93702918be2SPhilipp Reisner } 93802918be2SPhilipp Reisner 93902918be2SPhilipp Reisner if (likely(h->h80.magic == BE_DRBD_MAGIC)) { 94002918be2SPhilipp Reisner *cmd = be16_to_cpu(h->h80.command); 94102918be2SPhilipp Reisner *packet_size = be16_to_cpu(h->h80.length); 94202918be2SPhilipp Reisner } else if (h->h95.magic == BE_DRBD_MAGIC_BIG) { 94302918be2SPhilipp Reisner *cmd = be16_to_cpu(h->h95.command); 94402918be2SPhilipp Reisner *packet_size = be32_to_cpu(h->h95.length); 94502918be2SPhilipp Reisner } else { 946004352faSLars Ellenberg dev_err(DEV, "magic?? on data m: 0x%08x c: %d l: %d\n", 947004352faSLars Ellenberg be32_to_cpu(h->h80.magic), 948004352faSLars Ellenberg be16_to_cpu(h->h80.command), 949004352faSLars Ellenberg be16_to_cpu(h->h80.length)); 95081e84650SAndreas Gruenbacher return false; 951b411b363SPhilipp Reisner } 952b411b363SPhilipp Reisner mdev->last_received = jiffies; 953b411b363SPhilipp Reisner 95481e84650SAndreas Gruenbacher return true; 955b411b363SPhilipp Reisner } 956b411b363SPhilipp Reisner 9572451fc3bSPhilipp Reisner static void drbd_flush(struct drbd_conf *mdev) 958b411b363SPhilipp Reisner { 959b411b363SPhilipp Reisner int rv; 960b411b363SPhilipp Reisner 961b411b363SPhilipp Reisner if (mdev->write_ordering >= WO_bdev_flush && get_ldev(mdev)) { 962fbd9b09aSDmitry Monakhov rv = blkdev_issue_flush(mdev->ldev->backing_bdev, GFP_KERNEL, 963dd3932edSChristoph Hellwig NULL); 964b411b363SPhilipp Reisner if (rv) { 965b411b363SPhilipp Reisner dev_err(DEV, "local disk flush failed with status %d\n", rv); 966b411b363SPhilipp Reisner /* would rather check on EOPNOTSUPP, but that is not reliable. 967b411b363SPhilipp Reisner * don't try again for ANY return value != 0 968b411b363SPhilipp Reisner * if (rv == -EOPNOTSUPP) */ 969b411b363SPhilipp Reisner drbd_bump_write_ordering(mdev, WO_drain_io); 970b411b363SPhilipp Reisner } 971b411b363SPhilipp Reisner put_ldev(mdev); 972b411b363SPhilipp Reisner } 973b411b363SPhilipp Reisner } 974b411b363SPhilipp Reisner 975b411b363SPhilipp Reisner /** 976b411b363SPhilipp Reisner * drbd_may_finish_epoch() - Applies an epoch_event to the epoch's state, eventually finishes it. 977b411b363SPhilipp Reisner * @mdev: DRBD device. 978b411b363SPhilipp Reisner * @epoch: Epoch object. 979b411b363SPhilipp Reisner * @ev: Epoch event. 980b411b363SPhilipp Reisner */ 981b411b363SPhilipp Reisner static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev, 982b411b363SPhilipp Reisner struct drbd_epoch *epoch, 983b411b363SPhilipp Reisner enum epoch_event ev) 984b411b363SPhilipp Reisner { 9852451fc3bSPhilipp Reisner int epoch_size; 986b411b363SPhilipp Reisner struct drbd_epoch *next_epoch; 987b411b363SPhilipp Reisner enum finish_epoch rv = FE_STILL_LIVE; 988b411b363SPhilipp Reisner 989b411b363SPhilipp Reisner spin_lock(&mdev->epoch_lock); 990b411b363SPhilipp Reisner do { 991b411b363SPhilipp Reisner next_epoch = NULL; 992b411b363SPhilipp Reisner 993b411b363SPhilipp Reisner epoch_size = atomic_read(&epoch->epoch_size); 994b411b363SPhilipp Reisner 995b411b363SPhilipp Reisner switch (ev & ~EV_CLEANUP) { 996b411b363SPhilipp Reisner case EV_PUT: 997b411b363SPhilipp Reisner atomic_dec(&epoch->active); 998b411b363SPhilipp Reisner break; 999b411b363SPhilipp Reisner case EV_GOT_BARRIER_NR: 1000b411b363SPhilipp Reisner set_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags); 1001b411b363SPhilipp Reisner break; 1002b411b363SPhilipp Reisner case EV_BECAME_LAST: 1003b411b363SPhilipp Reisner /* nothing to do*/ 1004b411b363SPhilipp Reisner break; 1005b411b363SPhilipp Reisner } 1006b411b363SPhilipp Reisner 1007b411b363SPhilipp Reisner if (epoch_size != 0 && 1008b411b363SPhilipp Reisner atomic_read(&epoch->active) == 0 && 10092451fc3bSPhilipp Reisner test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags)) { 1010b411b363SPhilipp Reisner if (!(ev & EV_CLEANUP)) { 1011b411b363SPhilipp Reisner spin_unlock(&mdev->epoch_lock); 1012b411b363SPhilipp Reisner drbd_send_b_ack(mdev, epoch->barrier_nr, epoch_size); 1013b411b363SPhilipp Reisner spin_lock(&mdev->epoch_lock); 1014b411b363SPhilipp Reisner } 1015b411b363SPhilipp Reisner dec_unacked(mdev); 1016b411b363SPhilipp Reisner 1017b411b363SPhilipp Reisner if (mdev->current_epoch != epoch) { 1018b411b363SPhilipp Reisner next_epoch = list_entry(epoch->list.next, struct drbd_epoch, list); 1019b411b363SPhilipp Reisner list_del(&epoch->list); 1020b411b363SPhilipp Reisner ev = EV_BECAME_LAST | (ev & EV_CLEANUP); 1021b411b363SPhilipp Reisner mdev->epochs--; 1022b411b363SPhilipp Reisner kfree(epoch); 1023b411b363SPhilipp Reisner 1024b411b363SPhilipp Reisner if (rv == FE_STILL_LIVE) 1025b411b363SPhilipp Reisner rv = FE_DESTROYED; 1026b411b363SPhilipp Reisner } else { 1027b411b363SPhilipp Reisner epoch->flags = 0; 1028b411b363SPhilipp Reisner atomic_set(&epoch->epoch_size, 0); 1029698f9315SUwe Kleine-König /* atomic_set(&epoch->active, 0); is already zero */ 1030b411b363SPhilipp Reisner if (rv == FE_STILL_LIVE) 1031b411b363SPhilipp Reisner rv = FE_RECYCLED; 10322451fc3bSPhilipp Reisner wake_up(&mdev->ee_wait); 1033b411b363SPhilipp Reisner } 1034b411b363SPhilipp Reisner } 1035b411b363SPhilipp Reisner 1036b411b363SPhilipp Reisner if (!next_epoch) 1037b411b363SPhilipp Reisner break; 1038b411b363SPhilipp Reisner 1039b411b363SPhilipp Reisner epoch = next_epoch; 1040b411b363SPhilipp Reisner } while (1); 1041b411b363SPhilipp Reisner 1042b411b363SPhilipp Reisner spin_unlock(&mdev->epoch_lock); 1043b411b363SPhilipp Reisner 1044b411b363SPhilipp Reisner return rv; 1045b411b363SPhilipp Reisner } 1046b411b363SPhilipp Reisner 1047b411b363SPhilipp Reisner /** 1048b411b363SPhilipp Reisner * drbd_bump_write_ordering() - Fall back to an other write ordering method 1049b411b363SPhilipp Reisner * @mdev: DRBD device. 1050b411b363SPhilipp Reisner * @wo: Write ordering method to try. 1051b411b363SPhilipp Reisner */ 1052b411b363SPhilipp Reisner void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo) __must_hold(local) 1053b411b363SPhilipp Reisner { 1054b411b363SPhilipp Reisner enum write_ordering_e pwo; 1055b411b363SPhilipp Reisner static char *write_ordering_str[] = { 1056b411b363SPhilipp Reisner [WO_none] = "none", 1057b411b363SPhilipp Reisner [WO_drain_io] = "drain", 1058b411b363SPhilipp Reisner [WO_bdev_flush] = "flush", 1059b411b363SPhilipp Reisner }; 1060b411b363SPhilipp Reisner 1061b411b363SPhilipp Reisner pwo = mdev->write_ordering; 1062b411b363SPhilipp Reisner wo = min(pwo, wo); 1063b411b363SPhilipp Reisner if (wo == WO_bdev_flush && mdev->ldev->dc.no_disk_flush) 1064b411b363SPhilipp Reisner wo = WO_drain_io; 1065b411b363SPhilipp Reisner if (wo == WO_drain_io && mdev->ldev->dc.no_disk_drain) 1066b411b363SPhilipp Reisner wo = WO_none; 1067b411b363SPhilipp Reisner mdev->write_ordering = wo; 10682451fc3bSPhilipp Reisner if (pwo != mdev->write_ordering || wo == WO_bdev_flush) 1069b411b363SPhilipp Reisner dev_info(DEV, "Method to ensure write ordering: %s\n", write_ordering_str[mdev->write_ordering]); 1070b411b363SPhilipp Reisner } 1071b411b363SPhilipp Reisner 1072b411b363SPhilipp Reisner /** 107345bb912bSLars Ellenberg * drbd_submit_ee() 107445bb912bSLars Ellenberg * @mdev: DRBD device. 107545bb912bSLars Ellenberg * @e: epoch entry 107645bb912bSLars Ellenberg * @rw: flag field, see bio->bi_rw 107745bb912bSLars Ellenberg */ 107845bb912bSLars Ellenberg /* TODO allocate from our own bio_set. */ 107945bb912bSLars Ellenberg int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e, 108045bb912bSLars Ellenberg const unsigned rw, const int fault_type) 108145bb912bSLars Ellenberg { 108245bb912bSLars Ellenberg struct bio *bios = NULL; 108345bb912bSLars Ellenberg struct bio *bio; 108445bb912bSLars Ellenberg struct page *page = e->pages; 108545bb912bSLars Ellenberg sector_t sector = e->sector; 108645bb912bSLars Ellenberg unsigned ds = e->size; 108745bb912bSLars Ellenberg unsigned n_bios = 0; 108845bb912bSLars Ellenberg unsigned nr_pages = (ds + PAGE_SIZE -1) >> PAGE_SHIFT; 108945bb912bSLars Ellenberg 109045bb912bSLars Ellenberg /* In most cases, we will only need one bio. But in case the lower 109145bb912bSLars Ellenberg * level restrictions happen to be different at this offset on this 109245bb912bSLars Ellenberg * side than those of the sending peer, we may need to submit the 109345bb912bSLars Ellenberg * request in more than one bio. */ 109445bb912bSLars Ellenberg next_bio: 109545bb912bSLars Ellenberg bio = bio_alloc(GFP_NOIO, nr_pages); 109645bb912bSLars Ellenberg if (!bio) { 109745bb912bSLars Ellenberg dev_err(DEV, "submit_ee: Allocation of a bio failed\n"); 109845bb912bSLars Ellenberg goto fail; 109945bb912bSLars Ellenberg } 110045bb912bSLars Ellenberg /* > e->sector, unless this is the first bio */ 110145bb912bSLars Ellenberg bio->bi_sector = sector; 110245bb912bSLars Ellenberg bio->bi_bdev = mdev->ldev->backing_bdev; 110345bb912bSLars Ellenberg bio->bi_rw = rw; 110445bb912bSLars Ellenberg bio->bi_private = e; 110545bb912bSLars Ellenberg bio->bi_end_io = drbd_endio_sec; 110645bb912bSLars Ellenberg 110745bb912bSLars Ellenberg bio->bi_next = bios; 110845bb912bSLars Ellenberg bios = bio; 110945bb912bSLars Ellenberg ++n_bios; 111045bb912bSLars Ellenberg 111145bb912bSLars Ellenberg page_chain_for_each(page) { 111245bb912bSLars Ellenberg unsigned len = min_t(unsigned, ds, PAGE_SIZE); 111345bb912bSLars Ellenberg if (!bio_add_page(bio, page, len, 0)) { 111445bb912bSLars Ellenberg /* a single page must always be possible! */ 111545bb912bSLars Ellenberg BUG_ON(bio->bi_vcnt == 0); 111645bb912bSLars Ellenberg goto next_bio; 111745bb912bSLars Ellenberg } 111845bb912bSLars Ellenberg ds -= len; 111945bb912bSLars Ellenberg sector += len >> 9; 112045bb912bSLars Ellenberg --nr_pages; 112145bb912bSLars Ellenberg } 112245bb912bSLars Ellenberg D_ASSERT(page == NULL); 112345bb912bSLars Ellenberg D_ASSERT(ds == 0); 112445bb912bSLars Ellenberg 112545bb912bSLars Ellenberg atomic_set(&e->pending_bios, n_bios); 112645bb912bSLars Ellenberg do { 112745bb912bSLars Ellenberg bio = bios; 112845bb912bSLars Ellenberg bios = bios->bi_next; 112945bb912bSLars Ellenberg bio->bi_next = NULL; 113045bb912bSLars Ellenberg 113145bb912bSLars Ellenberg drbd_generic_make_request(mdev, fault_type, bio); 113245bb912bSLars Ellenberg } while (bios); 113345bb912bSLars Ellenberg return 0; 113445bb912bSLars Ellenberg 113545bb912bSLars Ellenberg fail: 113645bb912bSLars Ellenberg while (bios) { 113745bb912bSLars Ellenberg bio = bios; 113845bb912bSLars Ellenberg bios = bios->bi_next; 113945bb912bSLars Ellenberg bio_put(bio); 114045bb912bSLars Ellenberg } 114145bb912bSLars Ellenberg return -ENOMEM; 114245bb912bSLars Ellenberg } 114345bb912bSLars Ellenberg 114402918be2SPhilipp Reisner static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) 1145b411b363SPhilipp Reisner { 11462451fc3bSPhilipp Reisner int rv; 114702918be2SPhilipp Reisner struct p_barrier *p = &mdev->data.rbuf.barrier; 1148b411b363SPhilipp Reisner struct drbd_epoch *epoch; 1149b411b363SPhilipp Reisner 1150b411b363SPhilipp Reisner inc_unacked(mdev); 1151b411b363SPhilipp Reisner 1152b411b363SPhilipp Reisner mdev->current_epoch->barrier_nr = p->barrier; 1153b411b363SPhilipp Reisner rv = drbd_may_finish_epoch(mdev, mdev->current_epoch, EV_GOT_BARRIER_NR); 1154b411b363SPhilipp Reisner 1155b411b363SPhilipp Reisner /* P_BARRIER_ACK may imply that the corresponding extent is dropped from 1156b411b363SPhilipp Reisner * the activity log, which means it would not be resynced in case the 1157b411b363SPhilipp Reisner * R_PRIMARY crashes now. 1158b411b363SPhilipp Reisner * Therefore we must send the barrier_ack after the barrier request was 1159b411b363SPhilipp Reisner * completed. */ 1160b411b363SPhilipp Reisner switch (mdev->write_ordering) { 1161b411b363SPhilipp Reisner case WO_none: 1162b411b363SPhilipp Reisner if (rv == FE_RECYCLED) 116381e84650SAndreas Gruenbacher return true; 1164b411b363SPhilipp Reisner 1165b411b363SPhilipp Reisner /* receiver context, in the writeout path of the other node. 1166b411b363SPhilipp Reisner * avoid potential distributed deadlock */ 1167b411b363SPhilipp Reisner epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO); 11682451fc3bSPhilipp Reisner if (epoch) 11692451fc3bSPhilipp Reisner break; 11702451fc3bSPhilipp Reisner else 1171b411b363SPhilipp Reisner dev_warn(DEV, "Allocation of an epoch failed, slowing down\n"); 11722451fc3bSPhilipp Reisner /* Fall through */ 11732451fc3bSPhilipp Reisner 11742451fc3bSPhilipp Reisner case WO_bdev_flush: 11752451fc3bSPhilipp Reisner case WO_drain_io: 1176b411b363SPhilipp Reisner drbd_wait_ee_list_empty(mdev, &mdev->active_ee); 11772451fc3bSPhilipp Reisner drbd_flush(mdev); 11782451fc3bSPhilipp Reisner 11792451fc3bSPhilipp Reisner if (atomic_read(&mdev->current_epoch->epoch_size)) { 11802451fc3bSPhilipp Reisner epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO); 11812451fc3bSPhilipp Reisner if (epoch) 11822451fc3bSPhilipp Reisner break; 1183b411b363SPhilipp Reisner } 1184b411b363SPhilipp Reisner 11852451fc3bSPhilipp Reisner epoch = mdev->current_epoch; 11862451fc3bSPhilipp Reisner wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0); 11872451fc3bSPhilipp Reisner 11882451fc3bSPhilipp Reisner D_ASSERT(atomic_read(&epoch->active) == 0); 11892451fc3bSPhilipp Reisner D_ASSERT(epoch->flags == 0); 1190b411b363SPhilipp Reisner 119181e84650SAndreas Gruenbacher return true; 11922451fc3bSPhilipp Reisner default: 11932451fc3bSPhilipp Reisner dev_err(DEV, "Strangeness in mdev->write_ordering %d\n", mdev->write_ordering); 119481e84650SAndreas Gruenbacher return false; 1195b411b363SPhilipp Reisner } 1196b411b363SPhilipp Reisner 1197b411b363SPhilipp Reisner epoch->flags = 0; 1198b411b363SPhilipp Reisner atomic_set(&epoch->epoch_size, 0); 1199b411b363SPhilipp Reisner atomic_set(&epoch->active, 0); 1200b411b363SPhilipp Reisner 1201b411b363SPhilipp Reisner spin_lock(&mdev->epoch_lock); 1202b411b363SPhilipp Reisner if (atomic_read(&mdev->current_epoch->epoch_size)) { 1203b411b363SPhilipp Reisner list_add(&epoch->list, &mdev->current_epoch->list); 1204b411b363SPhilipp Reisner mdev->current_epoch = epoch; 1205b411b363SPhilipp Reisner mdev->epochs++; 1206b411b363SPhilipp Reisner } else { 1207b411b363SPhilipp Reisner /* The current_epoch got recycled while we allocated this one... */ 1208b411b363SPhilipp Reisner kfree(epoch); 1209b411b363SPhilipp Reisner } 1210b411b363SPhilipp Reisner spin_unlock(&mdev->epoch_lock); 1211b411b363SPhilipp Reisner 121281e84650SAndreas Gruenbacher return true; 1213b411b363SPhilipp Reisner } 1214b411b363SPhilipp Reisner 1215b411b363SPhilipp Reisner /* used from receive_RSDataReply (recv_resync_read) 1216b411b363SPhilipp Reisner * and from receive_Data */ 1217b411b363SPhilipp Reisner static struct drbd_epoch_entry * 1218b411b363SPhilipp Reisner read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __must_hold(local) 1219b411b363SPhilipp Reisner { 12206666032aSLars Ellenberg const sector_t capacity = drbd_get_capacity(mdev->this_bdev); 1221b411b363SPhilipp Reisner struct drbd_epoch_entry *e; 1222b411b363SPhilipp Reisner struct page *page; 122345bb912bSLars Ellenberg int dgs, ds, rr; 1224b411b363SPhilipp Reisner void *dig_in = mdev->int_dig_in; 1225b411b363SPhilipp Reisner void *dig_vv = mdev->int_dig_vv; 12266b4388acSPhilipp Reisner unsigned long *data; 1227b411b363SPhilipp Reisner 1228b411b363SPhilipp Reisner dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ? 1229b411b363SPhilipp Reisner crypto_hash_digestsize(mdev->integrity_r_tfm) : 0; 1230b411b363SPhilipp Reisner 1231b411b363SPhilipp Reisner if (dgs) { 1232b411b363SPhilipp Reisner rr = drbd_recv(mdev, dig_in, dgs); 1233b411b363SPhilipp Reisner if (rr != dgs) { 1234b411b363SPhilipp Reisner dev_warn(DEV, "short read receiving data digest: read %d expected %d\n", 1235b411b363SPhilipp Reisner rr, dgs); 1236b411b363SPhilipp Reisner return NULL; 1237b411b363SPhilipp Reisner } 1238b411b363SPhilipp Reisner } 1239b411b363SPhilipp Reisner 1240b411b363SPhilipp Reisner data_size -= dgs; 1241b411b363SPhilipp Reisner 1242b411b363SPhilipp Reisner ERR_IF(data_size & 0x1ff) return NULL; 12431816a2b4SLars Ellenberg ERR_IF(data_size > DRBD_MAX_BIO_SIZE) return NULL; 1244b411b363SPhilipp Reisner 12456666032aSLars Ellenberg /* even though we trust out peer, 12466666032aSLars Ellenberg * we sometimes have to double check. */ 12476666032aSLars Ellenberg if (sector + (data_size>>9) > capacity) { 12486666032aSLars Ellenberg dev_err(DEV, "capacity: %llus < sector: %llus + size: %u\n", 12496666032aSLars Ellenberg (unsigned long long)capacity, 12506666032aSLars Ellenberg (unsigned long long)sector, data_size); 12516666032aSLars Ellenberg return NULL; 12526666032aSLars Ellenberg } 12536666032aSLars Ellenberg 1254b411b363SPhilipp Reisner /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD 1255b411b363SPhilipp Reisner * "criss-cross" setup, that might cause write-out on some other DRBD, 1256b411b363SPhilipp Reisner * which in turn might block on the other node at this very place. */ 1257b411b363SPhilipp Reisner e = drbd_alloc_ee(mdev, id, sector, data_size, GFP_NOIO); 1258b411b363SPhilipp Reisner if (!e) 1259b411b363SPhilipp Reisner return NULL; 126045bb912bSLars Ellenberg 1261b411b363SPhilipp Reisner ds = data_size; 126245bb912bSLars Ellenberg page = e->pages; 126345bb912bSLars Ellenberg page_chain_for_each(page) { 126445bb912bSLars Ellenberg unsigned len = min_t(int, ds, PAGE_SIZE); 12656b4388acSPhilipp Reisner data = kmap(page); 126645bb912bSLars Ellenberg rr = drbd_recv(mdev, data, len); 12670cf9d27eSAndreas Gruenbacher if (drbd_insert_fault(mdev, DRBD_FAULT_RECEIVE)) { 12686b4388acSPhilipp Reisner dev_err(DEV, "Fault injection: Corrupting data on receive\n"); 12696b4388acSPhilipp Reisner data[0] = data[0] ^ (unsigned long)-1; 12706b4388acSPhilipp Reisner } 1271b411b363SPhilipp Reisner kunmap(page); 127245bb912bSLars Ellenberg if (rr != len) { 1273b411b363SPhilipp Reisner drbd_free_ee(mdev, e); 1274b411b363SPhilipp Reisner dev_warn(DEV, "short read receiving data: read %d expected %d\n", 127545bb912bSLars Ellenberg rr, len); 1276b411b363SPhilipp Reisner return NULL; 1277b411b363SPhilipp Reisner } 1278b411b363SPhilipp Reisner ds -= rr; 1279b411b363SPhilipp Reisner } 1280b411b363SPhilipp Reisner 1281b411b363SPhilipp Reisner if (dgs) { 128245bb912bSLars Ellenberg drbd_csum_ee(mdev, mdev->integrity_r_tfm, e, dig_vv); 1283b411b363SPhilipp Reisner if (memcmp(dig_in, dig_vv, dgs)) { 1284470be44aSLars Ellenberg dev_err(DEV, "Digest integrity check FAILED: %llus +%u\n", 1285470be44aSLars Ellenberg (unsigned long long)sector, data_size); 1286b411b363SPhilipp Reisner drbd_bcast_ee(mdev, "digest failed", 1287b411b363SPhilipp Reisner dgs, dig_in, dig_vv, e); 1288b411b363SPhilipp Reisner drbd_free_ee(mdev, e); 1289b411b363SPhilipp Reisner return NULL; 1290b411b363SPhilipp Reisner } 1291b411b363SPhilipp Reisner } 1292b411b363SPhilipp Reisner mdev->recv_cnt += data_size>>9; 1293b411b363SPhilipp Reisner return e; 1294b411b363SPhilipp Reisner } 1295b411b363SPhilipp Reisner 1296b411b363SPhilipp Reisner /* drbd_drain_block() just takes a data block 1297b411b363SPhilipp Reisner * out of the socket input buffer, and discards it. 1298b411b363SPhilipp Reisner */ 1299b411b363SPhilipp Reisner static int drbd_drain_block(struct drbd_conf *mdev, int data_size) 1300b411b363SPhilipp Reisner { 1301b411b363SPhilipp Reisner struct page *page; 1302b411b363SPhilipp Reisner int rr, rv = 1; 1303b411b363SPhilipp Reisner void *data; 1304b411b363SPhilipp Reisner 1305c3470cdeSLars Ellenberg if (!data_size) 130681e84650SAndreas Gruenbacher return true; 1307c3470cdeSLars Ellenberg 130845bb912bSLars Ellenberg page = drbd_pp_alloc(mdev, 1, 1); 1309b411b363SPhilipp Reisner 1310b411b363SPhilipp Reisner data = kmap(page); 1311b411b363SPhilipp Reisner while (data_size) { 1312b411b363SPhilipp Reisner rr = drbd_recv(mdev, data, min_t(int, data_size, PAGE_SIZE)); 1313b411b363SPhilipp Reisner if (rr != min_t(int, data_size, PAGE_SIZE)) { 1314b411b363SPhilipp Reisner rv = 0; 1315b411b363SPhilipp Reisner dev_warn(DEV, "short read receiving data: read %d expected %d\n", 1316b411b363SPhilipp Reisner rr, min_t(int, data_size, PAGE_SIZE)); 1317b411b363SPhilipp Reisner break; 1318b411b363SPhilipp Reisner } 1319b411b363SPhilipp Reisner data_size -= rr; 1320b411b363SPhilipp Reisner } 1321b411b363SPhilipp Reisner kunmap(page); 1322435f0740SLars Ellenberg drbd_pp_free(mdev, page, 0); 1323b411b363SPhilipp Reisner return rv; 1324b411b363SPhilipp Reisner } 1325b411b363SPhilipp Reisner 1326b411b363SPhilipp Reisner static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req, 1327b411b363SPhilipp Reisner sector_t sector, int data_size) 1328b411b363SPhilipp Reisner { 1329b411b363SPhilipp Reisner struct bio_vec *bvec; 1330b411b363SPhilipp Reisner struct bio *bio; 1331b411b363SPhilipp Reisner int dgs, rr, i, expect; 1332b411b363SPhilipp Reisner void *dig_in = mdev->int_dig_in; 1333b411b363SPhilipp Reisner void *dig_vv = mdev->int_dig_vv; 1334b411b363SPhilipp Reisner 1335b411b363SPhilipp Reisner dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ? 1336b411b363SPhilipp Reisner crypto_hash_digestsize(mdev->integrity_r_tfm) : 0; 1337b411b363SPhilipp Reisner 1338b411b363SPhilipp Reisner if (dgs) { 1339b411b363SPhilipp Reisner rr = drbd_recv(mdev, dig_in, dgs); 1340b411b363SPhilipp Reisner if (rr != dgs) { 1341b411b363SPhilipp Reisner dev_warn(DEV, "short read receiving data reply digest: read %d expected %d\n", 1342b411b363SPhilipp Reisner rr, dgs); 1343b411b363SPhilipp Reisner return 0; 1344b411b363SPhilipp Reisner } 1345b411b363SPhilipp Reisner } 1346b411b363SPhilipp Reisner 1347b411b363SPhilipp Reisner data_size -= dgs; 1348b411b363SPhilipp Reisner 1349b411b363SPhilipp Reisner /* optimistically update recv_cnt. if receiving fails below, 1350b411b363SPhilipp Reisner * we disconnect anyways, and counters will be reset. */ 1351b411b363SPhilipp Reisner mdev->recv_cnt += data_size>>9; 1352b411b363SPhilipp Reisner 1353b411b363SPhilipp Reisner bio = req->master_bio; 1354b411b363SPhilipp Reisner D_ASSERT(sector == bio->bi_sector); 1355b411b363SPhilipp Reisner 1356b411b363SPhilipp Reisner bio_for_each_segment(bvec, bio, i) { 1357b411b363SPhilipp Reisner expect = min_t(int, data_size, bvec->bv_len); 1358b411b363SPhilipp Reisner rr = drbd_recv(mdev, 1359b411b363SPhilipp Reisner kmap(bvec->bv_page)+bvec->bv_offset, 1360b411b363SPhilipp Reisner expect); 1361b411b363SPhilipp Reisner kunmap(bvec->bv_page); 1362b411b363SPhilipp Reisner if (rr != expect) { 1363b411b363SPhilipp Reisner dev_warn(DEV, "short read receiving data reply: " 1364b411b363SPhilipp Reisner "read %d expected %d\n", 1365b411b363SPhilipp Reisner rr, expect); 1366b411b363SPhilipp Reisner return 0; 1367b411b363SPhilipp Reisner } 1368b411b363SPhilipp Reisner data_size -= rr; 1369b411b363SPhilipp Reisner } 1370b411b363SPhilipp Reisner 1371b411b363SPhilipp Reisner if (dgs) { 137245bb912bSLars Ellenberg drbd_csum_bio(mdev, mdev->integrity_r_tfm, bio, dig_vv); 1373b411b363SPhilipp Reisner if (memcmp(dig_in, dig_vv, dgs)) { 1374b411b363SPhilipp Reisner dev_err(DEV, "Digest integrity check FAILED. Broken NICs?\n"); 1375b411b363SPhilipp Reisner return 0; 1376b411b363SPhilipp Reisner } 1377b411b363SPhilipp Reisner } 1378b411b363SPhilipp Reisner 1379b411b363SPhilipp Reisner D_ASSERT(data_size == 0); 1380b411b363SPhilipp Reisner return 1; 1381b411b363SPhilipp Reisner } 1382b411b363SPhilipp Reisner 1383b411b363SPhilipp Reisner /* e_end_resync_block() is called via 1384b411b363SPhilipp Reisner * drbd_process_done_ee() by asender only */ 1385b411b363SPhilipp Reisner static int e_end_resync_block(struct drbd_conf *mdev, struct drbd_work *w, int unused) 1386b411b363SPhilipp Reisner { 1387b411b363SPhilipp Reisner struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w; 1388b411b363SPhilipp Reisner sector_t sector = e->sector; 1389b411b363SPhilipp Reisner int ok; 1390b411b363SPhilipp Reisner 1391b411b363SPhilipp Reisner D_ASSERT(hlist_unhashed(&e->colision)); 1392b411b363SPhilipp Reisner 139345bb912bSLars Ellenberg if (likely((e->flags & EE_WAS_ERROR) == 0)) { 1394b411b363SPhilipp Reisner drbd_set_in_sync(mdev, sector, e->size); 1395b411b363SPhilipp Reisner ok = drbd_send_ack(mdev, P_RS_WRITE_ACK, e); 1396b411b363SPhilipp Reisner } else { 1397b411b363SPhilipp Reisner /* Record failure to sync */ 1398b411b363SPhilipp Reisner drbd_rs_failed_io(mdev, sector, e->size); 1399b411b363SPhilipp Reisner 1400b411b363SPhilipp Reisner ok = drbd_send_ack(mdev, P_NEG_ACK, e); 1401b411b363SPhilipp Reisner } 1402b411b363SPhilipp Reisner dec_unacked(mdev); 1403b411b363SPhilipp Reisner 1404b411b363SPhilipp Reisner return ok; 1405b411b363SPhilipp Reisner } 1406b411b363SPhilipp Reisner 1407b411b363SPhilipp Reisner static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_size) __releases(local) 1408b411b363SPhilipp Reisner { 1409b411b363SPhilipp Reisner struct drbd_epoch_entry *e; 1410b411b363SPhilipp Reisner 1411b411b363SPhilipp Reisner e = read_in_block(mdev, ID_SYNCER, sector, data_size); 141245bb912bSLars Ellenberg if (!e) 141345bb912bSLars Ellenberg goto fail; 1414b411b363SPhilipp Reisner 1415b411b363SPhilipp Reisner dec_rs_pending(mdev); 1416b411b363SPhilipp Reisner 1417b411b363SPhilipp Reisner inc_unacked(mdev); 1418b411b363SPhilipp Reisner /* corresponding dec_unacked() in e_end_resync_block() 1419b411b363SPhilipp Reisner * respective _drbd_clear_done_ee */ 1420b411b363SPhilipp Reisner 142145bb912bSLars Ellenberg e->w.cb = e_end_resync_block; 142245bb912bSLars Ellenberg 1423b411b363SPhilipp Reisner spin_lock_irq(&mdev->req_lock); 1424b411b363SPhilipp Reisner list_add(&e->w.list, &mdev->sync_ee); 1425b411b363SPhilipp Reisner spin_unlock_irq(&mdev->req_lock); 1426b411b363SPhilipp Reisner 14270f0601f4SLars Ellenberg atomic_add(data_size >> 9, &mdev->rs_sect_ev); 142845bb912bSLars Ellenberg if (drbd_submit_ee(mdev, e, WRITE, DRBD_FAULT_RS_WR) == 0) 142981e84650SAndreas Gruenbacher return true; 143045bb912bSLars Ellenberg 143122cc37a9SLars Ellenberg /* drbd_submit_ee currently fails for one reason only: 143222cc37a9SLars Ellenberg * not being able to allocate enough bios. 143322cc37a9SLars Ellenberg * Is dropping the connection going to help? */ 143422cc37a9SLars Ellenberg spin_lock_irq(&mdev->req_lock); 143522cc37a9SLars Ellenberg list_del(&e->w.list); 143622cc37a9SLars Ellenberg spin_unlock_irq(&mdev->req_lock); 143722cc37a9SLars Ellenberg 143845bb912bSLars Ellenberg drbd_free_ee(mdev, e); 143945bb912bSLars Ellenberg fail: 144045bb912bSLars Ellenberg put_ldev(mdev); 144181e84650SAndreas Gruenbacher return false; 1442b411b363SPhilipp Reisner } 1443b411b363SPhilipp Reisner 144402918be2SPhilipp Reisner static int receive_DataReply(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) 1445b411b363SPhilipp Reisner { 1446b411b363SPhilipp Reisner struct drbd_request *req; 1447b411b363SPhilipp Reisner sector_t sector; 1448b411b363SPhilipp Reisner int ok; 144902918be2SPhilipp Reisner struct p_data *p = &mdev->data.rbuf.data; 1450b411b363SPhilipp Reisner 1451b411b363SPhilipp Reisner sector = be64_to_cpu(p->sector); 1452b411b363SPhilipp Reisner 1453b411b363SPhilipp Reisner spin_lock_irq(&mdev->req_lock); 1454b411b363SPhilipp Reisner req = _ar_id_to_req(mdev, p->block_id, sector); 1455b411b363SPhilipp Reisner spin_unlock_irq(&mdev->req_lock); 1456b411b363SPhilipp Reisner if (unlikely(!req)) { 1457b411b363SPhilipp Reisner dev_err(DEV, "Got a corrupt block_id/sector pair(1).\n"); 145881e84650SAndreas Gruenbacher return false; 1459b411b363SPhilipp Reisner } 1460b411b363SPhilipp Reisner 1461b411b363SPhilipp Reisner /* hlist_del(&req->colision) is done in _req_may_be_done, to avoid 1462b411b363SPhilipp Reisner * special casing it there for the various failure cases. 1463b411b363SPhilipp Reisner * still no race with drbd_fail_pending_reads */ 1464b411b363SPhilipp Reisner ok = recv_dless_read(mdev, req, sector, data_size); 1465b411b363SPhilipp Reisner 1466b411b363SPhilipp Reisner if (ok) 1467b411b363SPhilipp Reisner req_mod(req, data_received); 1468b411b363SPhilipp Reisner /* else: nothing. handled from drbd_disconnect... 1469b411b363SPhilipp Reisner * I don't think we may complete this just yet 1470b411b363SPhilipp Reisner * in case we are "on-disconnect: freeze" */ 1471b411b363SPhilipp Reisner 1472b411b363SPhilipp Reisner return ok; 1473b411b363SPhilipp Reisner } 1474b411b363SPhilipp Reisner 147502918be2SPhilipp Reisner static int receive_RSDataReply(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) 1476b411b363SPhilipp Reisner { 1477b411b363SPhilipp Reisner sector_t sector; 1478b411b363SPhilipp Reisner int ok; 147902918be2SPhilipp Reisner struct p_data *p = &mdev->data.rbuf.data; 1480b411b363SPhilipp Reisner 1481b411b363SPhilipp Reisner sector = be64_to_cpu(p->sector); 1482b411b363SPhilipp Reisner D_ASSERT(p->block_id == ID_SYNCER); 1483b411b363SPhilipp Reisner 1484b411b363SPhilipp Reisner if (get_ldev(mdev)) { 1485b411b363SPhilipp Reisner /* data is submitted to disk within recv_resync_read. 1486b411b363SPhilipp Reisner * corresponding put_ldev done below on error, 1487b411b363SPhilipp Reisner * or in drbd_endio_write_sec. */ 1488b411b363SPhilipp Reisner ok = recv_resync_read(mdev, sector, data_size); 1489b411b363SPhilipp Reisner } else { 1490b411b363SPhilipp Reisner if (__ratelimit(&drbd_ratelimit_state)) 1491b411b363SPhilipp Reisner dev_err(DEV, "Can not write resync data to local disk.\n"); 1492b411b363SPhilipp Reisner 1493b411b363SPhilipp Reisner ok = drbd_drain_block(mdev, data_size); 1494b411b363SPhilipp Reisner 14952b2bf214SLars Ellenberg drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size); 1496b411b363SPhilipp Reisner } 1497b411b363SPhilipp Reisner 1498778f271dSPhilipp Reisner atomic_add(data_size >> 9, &mdev->rs_sect_in); 1499778f271dSPhilipp Reisner 1500b411b363SPhilipp Reisner return ok; 1501b411b363SPhilipp Reisner } 1502b411b363SPhilipp Reisner 1503b411b363SPhilipp Reisner /* e_end_block() is called via drbd_process_done_ee(). 1504b411b363SPhilipp Reisner * this means this function only runs in the asender thread 1505b411b363SPhilipp Reisner */ 1506b411b363SPhilipp Reisner static int e_end_block(struct drbd_conf *mdev, struct drbd_work *w, int cancel) 1507b411b363SPhilipp Reisner { 1508b411b363SPhilipp Reisner struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w; 1509b411b363SPhilipp Reisner sector_t sector = e->sector; 1510b411b363SPhilipp Reisner int ok = 1, pcmd; 1511b411b363SPhilipp Reisner 1512b411b363SPhilipp Reisner if (mdev->net_conf->wire_protocol == DRBD_PROT_C) { 151345bb912bSLars Ellenberg if (likely((e->flags & EE_WAS_ERROR) == 0)) { 1514b411b363SPhilipp Reisner pcmd = (mdev->state.conn >= C_SYNC_SOURCE && 1515b411b363SPhilipp Reisner mdev->state.conn <= C_PAUSED_SYNC_T && 1516b411b363SPhilipp Reisner e->flags & EE_MAY_SET_IN_SYNC) ? 1517b411b363SPhilipp Reisner P_RS_WRITE_ACK : P_WRITE_ACK; 1518b411b363SPhilipp Reisner ok &= drbd_send_ack(mdev, pcmd, e); 1519b411b363SPhilipp Reisner if (pcmd == P_RS_WRITE_ACK) 1520b411b363SPhilipp Reisner drbd_set_in_sync(mdev, sector, e->size); 1521b411b363SPhilipp Reisner } else { 1522b411b363SPhilipp Reisner ok = drbd_send_ack(mdev, P_NEG_ACK, e); 1523b411b363SPhilipp Reisner /* we expect it to be marked out of sync anyways... 1524b411b363SPhilipp Reisner * maybe assert this? */ 1525b411b363SPhilipp Reisner } 1526b411b363SPhilipp Reisner dec_unacked(mdev); 1527b411b363SPhilipp Reisner } 1528b411b363SPhilipp Reisner /* we delete from the conflict detection hash _after_ we sent out the 1529b411b363SPhilipp Reisner * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */ 1530b411b363SPhilipp Reisner if (mdev->net_conf->two_primaries) { 1531b411b363SPhilipp Reisner spin_lock_irq(&mdev->req_lock); 1532b411b363SPhilipp Reisner D_ASSERT(!hlist_unhashed(&e->colision)); 1533b411b363SPhilipp Reisner hlist_del_init(&e->colision); 1534b411b363SPhilipp Reisner spin_unlock_irq(&mdev->req_lock); 1535b411b363SPhilipp Reisner } else { 1536b411b363SPhilipp Reisner D_ASSERT(hlist_unhashed(&e->colision)); 1537b411b363SPhilipp Reisner } 1538b411b363SPhilipp Reisner 1539b411b363SPhilipp Reisner drbd_may_finish_epoch(mdev, e->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0)); 1540b411b363SPhilipp Reisner 1541b411b363SPhilipp Reisner return ok; 1542b411b363SPhilipp Reisner } 1543b411b363SPhilipp Reisner 1544b411b363SPhilipp Reisner static int e_send_discard_ack(struct drbd_conf *mdev, struct drbd_work *w, int unused) 1545b411b363SPhilipp Reisner { 1546b411b363SPhilipp Reisner struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w; 1547b411b363SPhilipp Reisner int ok = 1; 1548b411b363SPhilipp Reisner 1549b411b363SPhilipp Reisner D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C); 1550b411b363SPhilipp Reisner ok = drbd_send_ack(mdev, P_DISCARD_ACK, e); 1551b411b363SPhilipp Reisner 1552b411b363SPhilipp Reisner spin_lock_irq(&mdev->req_lock); 1553b411b363SPhilipp Reisner D_ASSERT(!hlist_unhashed(&e->colision)); 1554b411b363SPhilipp Reisner hlist_del_init(&e->colision); 1555b411b363SPhilipp Reisner spin_unlock_irq(&mdev->req_lock); 1556b411b363SPhilipp Reisner 1557b411b363SPhilipp Reisner dec_unacked(mdev); 1558b411b363SPhilipp Reisner 1559b411b363SPhilipp Reisner return ok; 1560b411b363SPhilipp Reisner } 1561b411b363SPhilipp Reisner 1562b411b363SPhilipp Reisner /* Called from receive_Data. 1563b411b363SPhilipp Reisner * Synchronize packets on sock with packets on msock. 1564b411b363SPhilipp Reisner * 1565b411b363SPhilipp Reisner * This is here so even when a P_DATA packet traveling via sock overtook an Ack 1566b411b363SPhilipp Reisner * packet traveling on msock, they are still processed in the order they have 1567b411b363SPhilipp Reisner * been sent. 1568b411b363SPhilipp Reisner * 1569b411b363SPhilipp Reisner * Note: we don't care for Ack packets overtaking P_DATA packets. 1570b411b363SPhilipp Reisner * 1571b411b363SPhilipp Reisner * In case packet_seq is larger than mdev->peer_seq number, there are 1572b411b363SPhilipp Reisner * outstanding packets on the msock. We wait for them to arrive. 1573b411b363SPhilipp Reisner * In case we are the logically next packet, we update mdev->peer_seq 1574b411b363SPhilipp Reisner * ourselves. Correctly handles 32bit wrap around. 1575b411b363SPhilipp Reisner * 1576b411b363SPhilipp Reisner * Assume we have a 10 GBit connection, that is about 1<<30 byte per second, 1577b411b363SPhilipp Reisner * about 1<<21 sectors per second. So "worst" case, we have 1<<3 == 8 seconds 1578b411b363SPhilipp Reisner * for the 24bit wrap (historical atomic_t guarantee on some archs), and we have 1579b411b363SPhilipp Reisner * 1<<9 == 512 seconds aka ages for the 32bit wrap around... 1580b411b363SPhilipp Reisner * 1581b411b363SPhilipp Reisner * returns 0 if we may process the packet, 1582b411b363SPhilipp Reisner * -ERESTARTSYS if we were interrupted (by disconnect signal). */ 1583b411b363SPhilipp Reisner static int drbd_wait_peer_seq(struct drbd_conf *mdev, const u32 packet_seq) 1584b411b363SPhilipp Reisner { 1585b411b363SPhilipp Reisner DEFINE_WAIT(wait); 1586b411b363SPhilipp Reisner unsigned int p_seq; 1587b411b363SPhilipp Reisner long timeout; 1588b411b363SPhilipp Reisner int ret = 0; 1589b411b363SPhilipp Reisner spin_lock(&mdev->peer_seq_lock); 1590b411b363SPhilipp Reisner for (;;) { 1591b411b363SPhilipp Reisner prepare_to_wait(&mdev->seq_wait, &wait, TASK_INTERRUPTIBLE); 1592b411b363SPhilipp Reisner if (seq_le(packet_seq, mdev->peer_seq+1)) 1593b411b363SPhilipp Reisner break; 1594b411b363SPhilipp Reisner if (signal_pending(current)) { 1595b411b363SPhilipp Reisner ret = -ERESTARTSYS; 1596b411b363SPhilipp Reisner break; 1597b411b363SPhilipp Reisner } 1598b411b363SPhilipp Reisner p_seq = mdev->peer_seq; 1599b411b363SPhilipp Reisner spin_unlock(&mdev->peer_seq_lock); 1600b411b363SPhilipp Reisner timeout = schedule_timeout(30*HZ); 1601b411b363SPhilipp Reisner spin_lock(&mdev->peer_seq_lock); 1602b411b363SPhilipp Reisner if (timeout == 0 && p_seq == mdev->peer_seq) { 1603b411b363SPhilipp Reisner ret = -ETIMEDOUT; 1604b411b363SPhilipp Reisner dev_err(DEV, "ASSERT FAILED waited 30 seconds for sequence update, forcing reconnect\n"); 1605b411b363SPhilipp Reisner break; 1606b411b363SPhilipp Reisner } 1607b411b363SPhilipp Reisner } 1608b411b363SPhilipp Reisner finish_wait(&mdev->seq_wait, &wait); 1609b411b363SPhilipp Reisner if (mdev->peer_seq+1 == packet_seq) 1610b411b363SPhilipp Reisner mdev->peer_seq++; 1611b411b363SPhilipp Reisner spin_unlock(&mdev->peer_seq_lock); 1612b411b363SPhilipp Reisner return ret; 1613b411b363SPhilipp Reisner } 1614b411b363SPhilipp Reisner 1615688593c5SLars Ellenberg /* see also bio_flags_to_wire() 1616688593c5SLars Ellenberg * DRBD_REQ_*, because we need to semantically map the flags to data packet 1617688593c5SLars Ellenberg * flags and back. We may replicate to other kernel versions. */ 1618688593c5SLars Ellenberg static unsigned long wire_flags_to_bio(struct drbd_conf *mdev, u32 dpf) 161976d2e7ecSPhilipp Reisner { 162076d2e7ecSPhilipp Reisner return (dpf & DP_RW_SYNC ? REQ_SYNC : 0) | 162176d2e7ecSPhilipp Reisner (dpf & DP_FUA ? REQ_FUA : 0) | 1622688593c5SLars Ellenberg (dpf & DP_FLUSH ? REQ_FLUSH : 0) | 162376d2e7ecSPhilipp Reisner (dpf & DP_DISCARD ? REQ_DISCARD : 0); 162476d2e7ecSPhilipp Reisner } 162576d2e7ecSPhilipp Reisner 1626b411b363SPhilipp Reisner /* mirrored write */ 162702918be2SPhilipp Reisner static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) 1628b411b363SPhilipp Reisner { 1629b411b363SPhilipp Reisner sector_t sector; 1630b411b363SPhilipp Reisner struct drbd_epoch_entry *e; 163102918be2SPhilipp Reisner struct p_data *p = &mdev->data.rbuf.data; 1632b411b363SPhilipp Reisner int rw = WRITE; 1633b411b363SPhilipp Reisner u32 dp_flags; 1634b411b363SPhilipp Reisner 1635b411b363SPhilipp Reisner if (!get_ldev(mdev)) { 1636b411b363SPhilipp Reisner if (__ratelimit(&drbd_ratelimit_state)) 1637b411b363SPhilipp Reisner dev_err(DEV, "Can not write mirrored data block " 1638b411b363SPhilipp Reisner "to local disk.\n"); 1639b411b363SPhilipp Reisner spin_lock(&mdev->peer_seq_lock); 1640b411b363SPhilipp Reisner if (mdev->peer_seq+1 == be32_to_cpu(p->seq_num)) 1641b411b363SPhilipp Reisner mdev->peer_seq++; 1642b411b363SPhilipp Reisner spin_unlock(&mdev->peer_seq_lock); 1643b411b363SPhilipp Reisner 16442b2bf214SLars Ellenberg drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size); 1645b411b363SPhilipp Reisner atomic_inc(&mdev->current_epoch->epoch_size); 1646b411b363SPhilipp Reisner return drbd_drain_block(mdev, data_size); 1647b411b363SPhilipp Reisner } 1648b411b363SPhilipp Reisner 1649b411b363SPhilipp Reisner /* get_ldev(mdev) successful. 1650b411b363SPhilipp Reisner * Corresponding put_ldev done either below (on various errors), 1651b411b363SPhilipp Reisner * or in drbd_endio_write_sec, if we successfully submit the data at 1652b411b363SPhilipp Reisner * the end of this function. */ 1653b411b363SPhilipp Reisner 1654b411b363SPhilipp Reisner sector = be64_to_cpu(p->sector); 1655b411b363SPhilipp Reisner e = read_in_block(mdev, p->block_id, sector, data_size); 1656b411b363SPhilipp Reisner if (!e) { 1657b411b363SPhilipp Reisner put_ldev(mdev); 165881e84650SAndreas Gruenbacher return false; 1659b411b363SPhilipp Reisner } 1660b411b363SPhilipp Reisner 1661b411b363SPhilipp Reisner e->w.cb = e_end_block; 1662b411b363SPhilipp Reisner 1663688593c5SLars Ellenberg dp_flags = be32_to_cpu(p->dp_flags); 1664688593c5SLars Ellenberg rw |= wire_flags_to_bio(mdev, dp_flags); 1665688593c5SLars Ellenberg 1666688593c5SLars Ellenberg if (dp_flags & DP_MAY_SET_IN_SYNC) 1667688593c5SLars Ellenberg e->flags |= EE_MAY_SET_IN_SYNC; 1668688593c5SLars Ellenberg 1669b411b363SPhilipp Reisner spin_lock(&mdev->epoch_lock); 1670b411b363SPhilipp Reisner e->epoch = mdev->current_epoch; 1671b411b363SPhilipp Reisner atomic_inc(&e->epoch->epoch_size); 1672b411b363SPhilipp Reisner atomic_inc(&e->epoch->active); 1673b411b363SPhilipp Reisner spin_unlock(&mdev->epoch_lock); 1674b411b363SPhilipp Reisner 1675b411b363SPhilipp Reisner /* I'm the receiver, I do hold a net_cnt reference. */ 1676b411b363SPhilipp Reisner if (!mdev->net_conf->two_primaries) { 1677b411b363SPhilipp Reisner spin_lock_irq(&mdev->req_lock); 1678b411b363SPhilipp Reisner } else { 1679b411b363SPhilipp Reisner /* don't get the req_lock yet, 1680b411b363SPhilipp Reisner * we may sleep in drbd_wait_peer_seq */ 1681b411b363SPhilipp Reisner const int size = e->size; 1682b411b363SPhilipp Reisner const int discard = test_bit(DISCARD_CONCURRENT, &mdev->flags); 1683b411b363SPhilipp Reisner DEFINE_WAIT(wait); 1684b411b363SPhilipp Reisner struct drbd_request *i; 1685b411b363SPhilipp Reisner struct hlist_node *n; 1686b411b363SPhilipp Reisner struct hlist_head *slot; 1687b411b363SPhilipp Reisner int first; 1688b411b363SPhilipp Reisner 1689b411b363SPhilipp Reisner D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C); 1690b411b363SPhilipp Reisner BUG_ON(mdev->ee_hash == NULL); 1691b411b363SPhilipp Reisner BUG_ON(mdev->tl_hash == NULL); 1692b411b363SPhilipp Reisner 1693b411b363SPhilipp Reisner /* conflict detection and handling: 1694b411b363SPhilipp Reisner * 1. wait on the sequence number, 1695b411b363SPhilipp Reisner * in case this data packet overtook ACK packets. 1696b411b363SPhilipp Reisner * 2. check our hash tables for conflicting requests. 1697b411b363SPhilipp Reisner * we only need to walk the tl_hash, since an ee can not 1698b411b363SPhilipp Reisner * have a conflict with an other ee: on the submitting 1699b411b363SPhilipp Reisner * node, the corresponding req had already been conflicting, 1700b411b363SPhilipp Reisner * and a conflicting req is never sent. 1701b411b363SPhilipp Reisner * 1702b411b363SPhilipp Reisner * Note: for two_primaries, we are protocol C, 1703b411b363SPhilipp Reisner * so there cannot be any request that is DONE 1704b411b363SPhilipp Reisner * but still on the transfer log. 1705b411b363SPhilipp Reisner * 1706b411b363SPhilipp Reisner * unconditionally add to the ee_hash. 1707b411b363SPhilipp Reisner * 1708b411b363SPhilipp Reisner * if no conflicting request is found: 1709b411b363SPhilipp Reisner * submit. 1710b411b363SPhilipp Reisner * 1711b411b363SPhilipp Reisner * if any conflicting request is found 1712b411b363SPhilipp Reisner * that has not yet been acked, 1713b411b363SPhilipp Reisner * AND I have the "discard concurrent writes" flag: 1714b411b363SPhilipp Reisner * queue (via done_ee) the P_DISCARD_ACK; OUT. 1715b411b363SPhilipp Reisner * 1716b411b363SPhilipp Reisner * if any conflicting request is found: 1717b411b363SPhilipp Reisner * block the receiver, waiting on misc_wait 1718b411b363SPhilipp Reisner * until no more conflicting requests are there, 1719b411b363SPhilipp Reisner * or we get interrupted (disconnect). 1720b411b363SPhilipp Reisner * 1721b411b363SPhilipp Reisner * we do not just write after local io completion of those 1722b411b363SPhilipp Reisner * requests, but only after req is done completely, i.e. 1723b411b363SPhilipp Reisner * we wait for the P_DISCARD_ACK to arrive! 1724b411b363SPhilipp Reisner * 1725b411b363SPhilipp Reisner * then proceed normally, i.e. submit. 1726b411b363SPhilipp Reisner */ 1727b411b363SPhilipp Reisner if (drbd_wait_peer_seq(mdev, be32_to_cpu(p->seq_num))) 1728b411b363SPhilipp Reisner goto out_interrupted; 1729b411b363SPhilipp Reisner 1730b411b363SPhilipp Reisner spin_lock_irq(&mdev->req_lock); 1731b411b363SPhilipp Reisner 1732b411b363SPhilipp Reisner hlist_add_head(&e->colision, ee_hash_slot(mdev, sector)); 1733b411b363SPhilipp Reisner 1734b411b363SPhilipp Reisner #define OVERLAPS overlaps(i->sector, i->size, sector, size) 1735b411b363SPhilipp Reisner slot = tl_hash_slot(mdev, sector); 1736b411b363SPhilipp Reisner first = 1; 1737b411b363SPhilipp Reisner for (;;) { 1738b411b363SPhilipp Reisner int have_unacked = 0; 1739b411b363SPhilipp Reisner int have_conflict = 0; 1740b411b363SPhilipp Reisner prepare_to_wait(&mdev->misc_wait, &wait, 1741b411b363SPhilipp Reisner TASK_INTERRUPTIBLE); 1742b411b363SPhilipp Reisner hlist_for_each_entry(i, n, slot, colision) { 1743b411b363SPhilipp Reisner if (OVERLAPS) { 1744b411b363SPhilipp Reisner /* only ALERT on first iteration, 1745b411b363SPhilipp Reisner * we may be woken up early... */ 1746b411b363SPhilipp Reisner if (first) 1747b411b363SPhilipp Reisner dev_alert(DEV, "%s[%u] Concurrent local write detected!" 1748b411b363SPhilipp Reisner " new: %llus +%u; pending: %llus +%u\n", 1749b411b363SPhilipp Reisner current->comm, current->pid, 1750b411b363SPhilipp Reisner (unsigned long long)sector, size, 1751b411b363SPhilipp Reisner (unsigned long long)i->sector, i->size); 1752b411b363SPhilipp Reisner if (i->rq_state & RQ_NET_PENDING) 1753b411b363SPhilipp Reisner ++have_unacked; 1754b411b363SPhilipp Reisner ++have_conflict; 1755b411b363SPhilipp Reisner } 1756b411b363SPhilipp Reisner } 1757b411b363SPhilipp Reisner #undef OVERLAPS 1758b411b363SPhilipp Reisner if (!have_conflict) 1759b411b363SPhilipp Reisner break; 1760b411b363SPhilipp Reisner 1761b411b363SPhilipp Reisner /* Discard Ack only for the _first_ iteration */ 1762b411b363SPhilipp Reisner if (first && discard && have_unacked) { 1763b411b363SPhilipp Reisner dev_alert(DEV, "Concurrent write! [DISCARD BY FLAG] sec=%llus\n", 1764b411b363SPhilipp Reisner (unsigned long long)sector); 1765b411b363SPhilipp Reisner inc_unacked(mdev); 1766b411b363SPhilipp Reisner e->w.cb = e_send_discard_ack; 1767b411b363SPhilipp Reisner list_add_tail(&e->w.list, &mdev->done_ee); 1768b411b363SPhilipp Reisner 1769b411b363SPhilipp Reisner spin_unlock_irq(&mdev->req_lock); 1770b411b363SPhilipp Reisner 1771b411b363SPhilipp Reisner /* we could probably send that P_DISCARD_ACK ourselves, 1772b411b363SPhilipp Reisner * but I don't like the receiver using the msock */ 1773b411b363SPhilipp Reisner 1774b411b363SPhilipp Reisner put_ldev(mdev); 1775b411b363SPhilipp Reisner wake_asender(mdev); 1776b411b363SPhilipp Reisner finish_wait(&mdev->misc_wait, &wait); 177781e84650SAndreas Gruenbacher return true; 1778b411b363SPhilipp Reisner } 1779b411b363SPhilipp Reisner 1780b411b363SPhilipp Reisner if (signal_pending(current)) { 1781b411b363SPhilipp Reisner hlist_del_init(&e->colision); 1782b411b363SPhilipp Reisner 1783b411b363SPhilipp Reisner spin_unlock_irq(&mdev->req_lock); 1784b411b363SPhilipp Reisner 1785b411b363SPhilipp Reisner finish_wait(&mdev->misc_wait, &wait); 1786b411b363SPhilipp Reisner goto out_interrupted; 1787b411b363SPhilipp Reisner } 1788b411b363SPhilipp Reisner 1789b411b363SPhilipp Reisner spin_unlock_irq(&mdev->req_lock); 1790b411b363SPhilipp Reisner if (first) { 1791b411b363SPhilipp Reisner first = 0; 1792b411b363SPhilipp Reisner dev_alert(DEV, "Concurrent write! [W AFTERWARDS] " 1793b411b363SPhilipp Reisner "sec=%llus\n", (unsigned long long)sector); 1794b411b363SPhilipp Reisner } else if (discard) { 1795b411b363SPhilipp Reisner /* we had none on the first iteration. 1796b411b363SPhilipp Reisner * there must be none now. */ 1797b411b363SPhilipp Reisner D_ASSERT(have_unacked == 0); 1798b411b363SPhilipp Reisner } 1799b411b363SPhilipp Reisner schedule(); 1800b411b363SPhilipp Reisner spin_lock_irq(&mdev->req_lock); 1801b411b363SPhilipp Reisner } 1802b411b363SPhilipp Reisner finish_wait(&mdev->misc_wait, &wait); 1803b411b363SPhilipp Reisner } 1804b411b363SPhilipp Reisner 1805b411b363SPhilipp Reisner list_add(&e->w.list, &mdev->active_ee); 1806b411b363SPhilipp Reisner spin_unlock_irq(&mdev->req_lock); 1807b411b363SPhilipp Reisner 1808b411b363SPhilipp Reisner switch (mdev->net_conf->wire_protocol) { 1809b411b363SPhilipp Reisner case DRBD_PROT_C: 1810b411b363SPhilipp Reisner inc_unacked(mdev); 1811b411b363SPhilipp Reisner /* corresponding dec_unacked() in e_end_block() 1812b411b363SPhilipp Reisner * respective _drbd_clear_done_ee */ 1813b411b363SPhilipp Reisner break; 1814b411b363SPhilipp Reisner case DRBD_PROT_B: 1815b411b363SPhilipp Reisner /* I really don't like it that the receiver thread 1816b411b363SPhilipp Reisner * sends on the msock, but anyways */ 1817b411b363SPhilipp Reisner drbd_send_ack(mdev, P_RECV_ACK, e); 1818b411b363SPhilipp Reisner break; 1819b411b363SPhilipp Reisner case DRBD_PROT_A: 1820b411b363SPhilipp Reisner /* nothing to do */ 1821b411b363SPhilipp Reisner break; 1822b411b363SPhilipp Reisner } 1823b411b363SPhilipp Reisner 18246719fb03SLars Ellenberg if (mdev->state.pdsk < D_INCONSISTENT) { 1825b411b363SPhilipp Reisner /* In case we have the only disk of the cluster, */ 1826b411b363SPhilipp Reisner drbd_set_out_of_sync(mdev, e->sector, e->size); 1827b411b363SPhilipp Reisner e->flags |= EE_CALL_AL_COMPLETE_IO; 18286719fb03SLars Ellenberg e->flags &= ~EE_MAY_SET_IN_SYNC; 1829b411b363SPhilipp Reisner drbd_al_begin_io(mdev, e->sector); 1830b411b363SPhilipp Reisner } 1831b411b363SPhilipp Reisner 183245bb912bSLars Ellenberg if (drbd_submit_ee(mdev, e, rw, DRBD_FAULT_DT_WR) == 0) 183381e84650SAndreas Gruenbacher return true; 1834b411b363SPhilipp Reisner 183522cc37a9SLars Ellenberg /* drbd_submit_ee currently fails for one reason only: 183622cc37a9SLars Ellenberg * not being able to allocate enough bios. 183722cc37a9SLars Ellenberg * Is dropping the connection going to help? */ 183822cc37a9SLars Ellenberg spin_lock_irq(&mdev->req_lock); 183922cc37a9SLars Ellenberg list_del(&e->w.list); 184022cc37a9SLars Ellenberg hlist_del_init(&e->colision); 184122cc37a9SLars Ellenberg spin_unlock_irq(&mdev->req_lock); 184222cc37a9SLars Ellenberg if (e->flags & EE_CALL_AL_COMPLETE_IO) 184322cc37a9SLars Ellenberg drbd_al_complete_io(mdev, e->sector); 184422cc37a9SLars Ellenberg 1845b411b363SPhilipp Reisner out_interrupted: 1846b411b363SPhilipp Reisner /* yes, the epoch_size now is imbalanced. 1847b411b363SPhilipp Reisner * but we drop the connection anyways, so we don't have a chance to 1848b411b363SPhilipp Reisner * receive a barrier... atomic_inc(&mdev->epoch_size); */ 1849b411b363SPhilipp Reisner put_ldev(mdev); 1850b411b363SPhilipp Reisner drbd_free_ee(mdev, e); 185181e84650SAndreas Gruenbacher return false; 1852b411b363SPhilipp Reisner } 1853b411b363SPhilipp Reisner 18540f0601f4SLars Ellenberg /* We may throttle resync, if the lower device seems to be busy, 18550f0601f4SLars Ellenberg * and current sync rate is above c_min_rate. 18560f0601f4SLars Ellenberg * 18570f0601f4SLars Ellenberg * To decide whether or not the lower device is busy, we use a scheme similar 18580f0601f4SLars Ellenberg * to MD RAID is_mddev_idle(): if the partition stats reveal "significant" 18590f0601f4SLars Ellenberg * (more than 64 sectors) of activity we cannot account for with our own resync 18600f0601f4SLars Ellenberg * activity, it obviously is "busy". 18610f0601f4SLars Ellenberg * 18620f0601f4SLars Ellenberg * The current sync rate used here uses only the most recent two step marks, 18630f0601f4SLars Ellenberg * to have a short time average so we can react faster. 18640f0601f4SLars Ellenberg */ 1865e3555d85SPhilipp Reisner int drbd_rs_should_slow_down(struct drbd_conf *mdev, sector_t sector) 18660f0601f4SLars Ellenberg { 18670f0601f4SLars Ellenberg struct gendisk *disk = mdev->ldev->backing_bdev->bd_contains->bd_disk; 18680f0601f4SLars Ellenberg unsigned long db, dt, dbdt; 1869e3555d85SPhilipp Reisner struct lc_element *tmp; 18700f0601f4SLars Ellenberg int curr_events; 18710f0601f4SLars Ellenberg int throttle = 0; 18720f0601f4SLars Ellenberg 18730f0601f4SLars Ellenberg /* feature disabled? */ 18740f0601f4SLars Ellenberg if (mdev->sync_conf.c_min_rate == 0) 18750f0601f4SLars Ellenberg return 0; 18760f0601f4SLars Ellenberg 1877e3555d85SPhilipp Reisner spin_lock_irq(&mdev->al_lock); 1878e3555d85SPhilipp Reisner tmp = lc_find(mdev->resync, BM_SECT_TO_EXT(sector)); 1879e3555d85SPhilipp Reisner if (tmp) { 1880e3555d85SPhilipp Reisner struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce); 1881e3555d85SPhilipp Reisner if (test_bit(BME_PRIORITY, &bm_ext->flags)) { 1882e3555d85SPhilipp Reisner spin_unlock_irq(&mdev->al_lock); 1883e3555d85SPhilipp Reisner return 0; 1884e3555d85SPhilipp Reisner } 1885e3555d85SPhilipp Reisner /* Do not slow down if app IO is already waiting for this extent */ 1886e3555d85SPhilipp Reisner } 1887e3555d85SPhilipp Reisner spin_unlock_irq(&mdev->al_lock); 1888e3555d85SPhilipp Reisner 18890f0601f4SLars Ellenberg curr_events = (int)part_stat_read(&disk->part0, sectors[0]) + 18900f0601f4SLars Ellenberg (int)part_stat_read(&disk->part0, sectors[1]) - 18910f0601f4SLars Ellenberg atomic_read(&mdev->rs_sect_ev); 1892e3555d85SPhilipp Reisner 18930f0601f4SLars Ellenberg if (!mdev->rs_last_events || curr_events - mdev->rs_last_events > 64) { 18940f0601f4SLars Ellenberg unsigned long rs_left; 18950f0601f4SLars Ellenberg int i; 18960f0601f4SLars Ellenberg 18970f0601f4SLars Ellenberg mdev->rs_last_events = curr_events; 18980f0601f4SLars Ellenberg 18990f0601f4SLars Ellenberg /* sync speed average over the last 2*DRBD_SYNC_MARK_STEP, 19000f0601f4SLars Ellenberg * approx. */ 19012649f080SLars Ellenberg i = (mdev->rs_last_mark + DRBD_SYNC_MARKS-1) % DRBD_SYNC_MARKS; 19022649f080SLars Ellenberg 19032649f080SLars Ellenberg if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T) 19042649f080SLars Ellenberg rs_left = mdev->ov_left; 19052649f080SLars Ellenberg else 19060f0601f4SLars Ellenberg rs_left = drbd_bm_total_weight(mdev) - mdev->rs_failed; 19070f0601f4SLars Ellenberg 19080f0601f4SLars Ellenberg dt = ((long)jiffies - (long)mdev->rs_mark_time[i]) / HZ; 19090f0601f4SLars Ellenberg if (!dt) 19100f0601f4SLars Ellenberg dt++; 19110f0601f4SLars Ellenberg db = mdev->rs_mark_left[i] - rs_left; 19120f0601f4SLars Ellenberg dbdt = Bit2KB(db/dt); 19130f0601f4SLars Ellenberg 19140f0601f4SLars Ellenberg if (dbdt > mdev->sync_conf.c_min_rate) 19150f0601f4SLars Ellenberg throttle = 1; 19160f0601f4SLars Ellenberg } 19170f0601f4SLars Ellenberg return throttle; 19180f0601f4SLars Ellenberg } 19190f0601f4SLars Ellenberg 19200f0601f4SLars Ellenberg 192102918be2SPhilipp Reisner static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int digest_size) 1922b411b363SPhilipp Reisner { 1923b411b363SPhilipp Reisner sector_t sector; 1924b411b363SPhilipp Reisner const sector_t capacity = drbd_get_capacity(mdev->this_bdev); 1925b411b363SPhilipp Reisner struct drbd_epoch_entry *e; 1926b411b363SPhilipp Reisner struct digest_info *di = NULL; 1927b18b37beSPhilipp Reisner int size, verb; 1928b411b363SPhilipp Reisner unsigned int fault_type; 192902918be2SPhilipp Reisner struct p_block_req *p = &mdev->data.rbuf.block_req; 1930b411b363SPhilipp Reisner 1931b411b363SPhilipp Reisner sector = be64_to_cpu(p->sector); 1932b411b363SPhilipp Reisner size = be32_to_cpu(p->blksize); 1933b411b363SPhilipp Reisner 19341816a2b4SLars Ellenberg if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) { 1935b411b363SPhilipp Reisner dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__, 1936b411b363SPhilipp Reisner (unsigned long long)sector, size); 193781e84650SAndreas Gruenbacher return false; 1938b411b363SPhilipp Reisner } 1939b411b363SPhilipp Reisner if (sector + (size>>9) > capacity) { 1940b411b363SPhilipp Reisner dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__, 1941b411b363SPhilipp Reisner (unsigned long long)sector, size); 194281e84650SAndreas Gruenbacher return false; 1943b411b363SPhilipp Reisner } 1944b411b363SPhilipp Reisner 1945b411b363SPhilipp Reisner if (!get_ldev_if_state(mdev, D_UP_TO_DATE)) { 1946b18b37beSPhilipp Reisner verb = 1; 1947b18b37beSPhilipp Reisner switch (cmd) { 1948b18b37beSPhilipp Reisner case P_DATA_REQUEST: 1949b18b37beSPhilipp Reisner drbd_send_ack_rp(mdev, P_NEG_DREPLY, p); 1950b18b37beSPhilipp Reisner break; 1951b18b37beSPhilipp Reisner case P_RS_DATA_REQUEST: 1952b18b37beSPhilipp Reisner case P_CSUM_RS_REQUEST: 1953b18b37beSPhilipp Reisner case P_OV_REQUEST: 1954b18b37beSPhilipp Reisner drbd_send_ack_rp(mdev, P_NEG_RS_DREPLY , p); 1955b18b37beSPhilipp Reisner break; 1956b18b37beSPhilipp Reisner case P_OV_REPLY: 1957b18b37beSPhilipp Reisner verb = 0; 1958b18b37beSPhilipp Reisner dec_rs_pending(mdev); 1959b18b37beSPhilipp Reisner drbd_send_ack_ex(mdev, P_OV_RESULT, sector, size, ID_IN_SYNC); 1960b18b37beSPhilipp Reisner break; 1961b18b37beSPhilipp Reisner default: 1962b18b37beSPhilipp Reisner dev_err(DEV, "unexpected command (%s) in receive_DataRequest\n", 1963b18b37beSPhilipp Reisner cmdname(cmd)); 1964b18b37beSPhilipp Reisner } 1965b18b37beSPhilipp Reisner if (verb && __ratelimit(&drbd_ratelimit_state)) 1966b411b363SPhilipp Reisner dev_err(DEV, "Can not satisfy peer's read request, " 1967b411b363SPhilipp Reisner "no local data.\n"); 1968b18b37beSPhilipp Reisner 1969a821cc4aSLars Ellenberg /* drain possibly payload */ 1970a821cc4aSLars Ellenberg return drbd_drain_block(mdev, digest_size); 1971b411b363SPhilipp Reisner } 1972b411b363SPhilipp Reisner 1973b411b363SPhilipp Reisner /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD 1974b411b363SPhilipp Reisner * "criss-cross" setup, that might cause write-out on some other DRBD, 1975b411b363SPhilipp Reisner * which in turn might block on the other node at this very place. */ 1976b411b363SPhilipp Reisner e = drbd_alloc_ee(mdev, p->block_id, sector, size, GFP_NOIO); 1977b411b363SPhilipp Reisner if (!e) { 1978b411b363SPhilipp Reisner put_ldev(mdev); 197981e84650SAndreas Gruenbacher return false; 1980b411b363SPhilipp Reisner } 1981b411b363SPhilipp Reisner 198202918be2SPhilipp Reisner switch (cmd) { 1983b411b363SPhilipp Reisner case P_DATA_REQUEST: 1984b411b363SPhilipp Reisner e->w.cb = w_e_end_data_req; 1985b411b363SPhilipp Reisner fault_type = DRBD_FAULT_DT_RD; 198680a40e43SLars Ellenberg /* application IO, don't drbd_rs_begin_io */ 198780a40e43SLars Ellenberg goto submit; 198880a40e43SLars Ellenberg 1989b411b363SPhilipp Reisner case P_RS_DATA_REQUEST: 1990b411b363SPhilipp Reisner e->w.cb = w_e_end_rsdata_req; 1991b411b363SPhilipp Reisner fault_type = DRBD_FAULT_RS_RD; 19925f9915bbSLars Ellenberg /* used in the sector offset progress display */ 19935f9915bbSLars Ellenberg mdev->bm_resync_fo = BM_SECT_TO_BIT(sector); 1994b411b363SPhilipp Reisner break; 1995b411b363SPhilipp Reisner 1996b411b363SPhilipp Reisner case P_OV_REPLY: 1997b411b363SPhilipp Reisner case P_CSUM_RS_REQUEST: 1998b411b363SPhilipp Reisner fault_type = DRBD_FAULT_RS_RD; 1999b411b363SPhilipp Reisner di = kmalloc(sizeof(*di) + digest_size, GFP_NOIO); 2000b411b363SPhilipp Reisner if (!di) 2001b411b363SPhilipp Reisner goto out_free_e; 2002b411b363SPhilipp Reisner 2003b411b363SPhilipp Reisner di->digest_size = digest_size; 2004b411b363SPhilipp Reisner di->digest = (((char *)di)+sizeof(struct digest_info)); 2005b411b363SPhilipp Reisner 2006c36c3cedSLars Ellenberg e->digest = di; 2007c36c3cedSLars Ellenberg e->flags |= EE_HAS_DIGEST; 2008c36c3cedSLars Ellenberg 2009b411b363SPhilipp Reisner if (drbd_recv(mdev, di->digest, digest_size) != digest_size) 2010b411b363SPhilipp Reisner goto out_free_e; 2011b411b363SPhilipp Reisner 201202918be2SPhilipp Reisner if (cmd == P_CSUM_RS_REQUEST) { 2013b411b363SPhilipp Reisner D_ASSERT(mdev->agreed_pro_version >= 89); 2014b411b363SPhilipp Reisner e->w.cb = w_e_end_csum_rs_req; 20155f9915bbSLars Ellenberg /* used in the sector offset progress display */ 20165f9915bbSLars Ellenberg mdev->bm_resync_fo = BM_SECT_TO_BIT(sector); 201702918be2SPhilipp Reisner } else if (cmd == P_OV_REPLY) { 20182649f080SLars Ellenberg /* track progress, we may need to throttle */ 20192649f080SLars Ellenberg atomic_add(size >> 9, &mdev->rs_sect_in); 2020b411b363SPhilipp Reisner e->w.cb = w_e_end_ov_reply; 2021b411b363SPhilipp Reisner dec_rs_pending(mdev); 20220f0601f4SLars Ellenberg /* drbd_rs_begin_io done when we sent this request, 20230f0601f4SLars Ellenberg * but accounting still needs to be done. */ 20240f0601f4SLars Ellenberg goto submit_for_resync; 2025b411b363SPhilipp Reisner } 2026b411b363SPhilipp Reisner break; 2027b411b363SPhilipp Reisner 2028b411b363SPhilipp Reisner case P_OV_REQUEST: 2029b411b363SPhilipp Reisner if (mdev->ov_start_sector == ~(sector_t)0 && 2030b411b363SPhilipp Reisner mdev->agreed_pro_version >= 90) { 2031de228bbaSLars Ellenberg unsigned long now = jiffies; 2032de228bbaSLars Ellenberg int i; 2033b411b363SPhilipp Reisner mdev->ov_start_sector = sector; 2034b411b363SPhilipp Reisner mdev->ov_position = sector; 203530b743a2SLars Ellenberg mdev->ov_left = drbd_bm_bits(mdev) - BM_SECT_TO_BIT(sector); 203630b743a2SLars Ellenberg mdev->rs_total = mdev->ov_left; 2037de228bbaSLars Ellenberg for (i = 0; i < DRBD_SYNC_MARKS; i++) { 2038de228bbaSLars Ellenberg mdev->rs_mark_left[i] = mdev->ov_left; 2039de228bbaSLars Ellenberg mdev->rs_mark_time[i] = now; 2040de228bbaSLars Ellenberg } 2041b411b363SPhilipp Reisner dev_info(DEV, "Online Verify start sector: %llu\n", 2042b411b363SPhilipp Reisner (unsigned long long)sector); 2043b411b363SPhilipp Reisner } 2044b411b363SPhilipp Reisner e->w.cb = w_e_end_ov_req; 2045b411b363SPhilipp Reisner fault_type = DRBD_FAULT_RS_RD; 2046b411b363SPhilipp Reisner break; 2047b411b363SPhilipp Reisner 2048b411b363SPhilipp Reisner default: 2049b411b363SPhilipp Reisner dev_err(DEV, "unexpected command (%s) in receive_DataRequest\n", 205002918be2SPhilipp Reisner cmdname(cmd)); 2051b411b363SPhilipp Reisner fault_type = DRBD_FAULT_MAX; 205280a40e43SLars Ellenberg goto out_free_e; 2053b411b363SPhilipp Reisner } 2054b411b363SPhilipp Reisner 20550f0601f4SLars Ellenberg /* Throttle, drbd_rs_begin_io and submit should become asynchronous 20560f0601f4SLars Ellenberg * wrt the receiver, but it is not as straightforward as it may seem. 20570f0601f4SLars Ellenberg * Various places in the resync start and stop logic assume resync 20580f0601f4SLars Ellenberg * requests are processed in order, requeuing this on the worker thread 20590f0601f4SLars Ellenberg * introduces a bunch of new code for synchronization between threads. 20600f0601f4SLars Ellenberg * 20610f0601f4SLars Ellenberg * Unlimited throttling before drbd_rs_begin_io may stall the resync 20620f0601f4SLars Ellenberg * "forever", throttling after drbd_rs_begin_io will lock that extent 20630f0601f4SLars Ellenberg * for application writes for the same time. For now, just throttle 20640f0601f4SLars Ellenberg * here, where the rest of the code expects the receiver to sleep for 20650f0601f4SLars Ellenberg * a while, anyways. 20660f0601f4SLars Ellenberg */ 2067b411b363SPhilipp Reisner 20680f0601f4SLars Ellenberg /* Throttle before drbd_rs_begin_io, as that locks out application IO; 20690f0601f4SLars Ellenberg * this defers syncer requests for some time, before letting at least 20700f0601f4SLars Ellenberg * on request through. The resync controller on the receiving side 20710f0601f4SLars Ellenberg * will adapt to the incoming rate accordingly. 20720f0601f4SLars Ellenberg * 20730f0601f4SLars Ellenberg * We cannot throttle here if remote is Primary/SyncTarget: 20740f0601f4SLars Ellenberg * we would also throttle its application reads. 20750f0601f4SLars Ellenberg * In that case, throttling is done on the SyncTarget only. 20760f0601f4SLars Ellenberg */ 2077e3555d85SPhilipp Reisner if (mdev->state.peer != R_PRIMARY && drbd_rs_should_slow_down(mdev, sector)) 2078e3555d85SPhilipp Reisner schedule_timeout_uninterruptible(HZ/10); 2079e3555d85SPhilipp Reisner if (drbd_rs_begin_io(mdev, sector)) 208080a40e43SLars Ellenberg goto out_free_e; 2081b411b363SPhilipp Reisner 20820f0601f4SLars Ellenberg submit_for_resync: 20830f0601f4SLars Ellenberg atomic_add(size >> 9, &mdev->rs_sect_ev); 20840f0601f4SLars Ellenberg 208580a40e43SLars Ellenberg submit: 2086b411b363SPhilipp Reisner inc_unacked(mdev); 208780a40e43SLars Ellenberg spin_lock_irq(&mdev->req_lock); 208880a40e43SLars Ellenberg list_add_tail(&e->w.list, &mdev->read_ee); 208980a40e43SLars Ellenberg spin_unlock_irq(&mdev->req_lock); 2090b411b363SPhilipp Reisner 209145bb912bSLars Ellenberg if (drbd_submit_ee(mdev, e, READ, fault_type) == 0) 209281e84650SAndreas Gruenbacher return true; 2093b411b363SPhilipp Reisner 209422cc37a9SLars Ellenberg /* drbd_submit_ee currently fails for one reason only: 209522cc37a9SLars Ellenberg * not being able to allocate enough bios. 209622cc37a9SLars Ellenberg * Is dropping the connection going to help? */ 209722cc37a9SLars Ellenberg spin_lock_irq(&mdev->req_lock); 209822cc37a9SLars Ellenberg list_del(&e->w.list); 209922cc37a9SLars Ellenberg spin_unlock_irq(&mdev->req_lock); 210022cc37a9SLars Ellenberg /* no drbd_rs_complete_io(), we are dropping the connection anyways */ 210122cc37a9SLars Ellenberg 2102b411b363SPhilipp Reisner out_free_e: 2103b411b363SPhilipp Reisner put_ldev(mdev); 2104b411b363SPhilipp Reisner drbd_free_ee(mdev, e); 210581e84650SAndreas Gruenbacher return false; 2106b411b363SPhilipp Reisner } 2107b411b363SPhilipp Reisner 2108b411b363SPhilipp Reisner static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local) 2109b411b363SPhilipp Reisner { 2110b411b363SPhilipp Reisner int self, peer, rv = -100; 2111b411b363SPhilipp Reisner unsigned long ch_self, ch_peer; 2112b411b363SPhilipp Reisner 2113b411b363SPhilipp Reisner self = mdev->ldev->md.uuid[UI_BITMAP] & 1; 2114b411b363SPhilipp Reisner peer = mdev->p_uuid[UI_BITMAP] & 1; 2115b411b363SPhilipp Reisner 2116b411b363SPhilipp Reisner ch_peer = mdev->p_uuid[UI_SIZE]; 2117b411b363SPhilipp Reisner ch_self = mdev->comm_bm_set; 2118b411b363SPhilipp Reisner 2119b411b363SPhilipp Reisner switch (mdev->net_conf->after_sb_0p) { 2120b411b363SPhilipp Reisner case ASB_CONSENSUS: 2121b411b363SPhilipp Reisner case ASB_DISCARD_SECONDARY: 2122b411b363SPhilipp Reisner case ASB_CALL_HELPER: 2123b411b363SPhilipp Reisner dev_err(DEV, "Configuration error.\n"); 2124b411b363SPhilipp Reisner break; 2125b411b363SPhilipp Reisner case ASB_DISCONNECT: 2126b411b363SPhilipp Reisner break; 2127b411b363SPhilipp Reisner case ASB_DISCARD_YOUNGER_PRI: 2128b411b363SPhilipp Reisner if (self == 0 && peer == 1) { 2129b411b363SPhilipp Reisner rv = -1; 2130b411b363SPhilipp Reisner break; 2131b411b363SPhilipp Reisner } 2132b411b363SPhilipp Reisner if (self == 1 && peer == 0) { 2133b411b363SPhilipp Reisner rv = 1; 2134b411b363SPhilipp Reisner break; 2135b411b363SPhilipp Reisner } 2136b411b363SPhilipp Reisner /* Else fall through to one of the other strategies... */ 2137b411b363SPhilipp Reisner case ASB_DISCARD_OLDER_PRI: 2138b411b363SPhilipp Reisner if (self == 0 && peer == 1) { 2139b411b363SPhilipp Reisner rv = 1; 2140b411b363SPhilipp Reisner break; 2141b411b363SPhilipp Reisner } 2142b411b363SPhilipp Reisner if (self == 1 && peer == 0) { 2143b411b363SPhilipp Reisner rv = -1; 2144b411b363SPhilipp Reisner break; 2145b411b363SPhilipp Reisner } 2146b411b363SPhilipp Reisner /* Else fall through to one of the other strategies... */ 2147ad19bf6eSLars Ellenberg dev_warn(DEV, "Discard younger/older primary did not find a decision\n" 2148b411b363SPhilipp Reisner "Using discard-least-changes instead\n"); 2149b411b363SPhilipp Reisner case ASB_DISCARD_ZERO_CHG: 2150b411b363SPhilipp Reisner if (ch_peer == 0 && ch_self == 0) { 2151b411b363SPhilipp Reisner rv = test_bit(DISCARD_CONCURRENT, &mdev->flags) 2152b411b363SPhilipp Reisner ? -1 : 1; 2153b411b363SPhilipp Reisner break; 2154b411b363SPhilipp Reisner } else { 2155b411b363SPhilipp Reisner if (ch_peer == 0) { rv = 1; break; } 2156b411b363SPhilipp Reisner if (ch_self == 0) { rv = -1; break; } 2157b411b363SPhilipp Reisner } 2158b411b363SPhilipp Reisner if (mdev->net_conf->after_sb_0p == ASB_DISCARD_ZERO_CHG) 2159b411b363SPhilipp Reisner break; 2160b411b363SPhilipp Reisner case ASB_DISCARD_LEAST_CHG: 2161b411b363SPhilipp Reisner if (ch_self < ch_peer) 2162b411b363SPhilipp Reisner rv = -1; 2163b411b363SPhilipp Reisner else if (ch_self > ch_peer) 2164b411b363SPhilipp Reisner rv = 1; 2165b411b363SPhilipp Reisner else /* ( ch_self == ch_peer ) */ 2166b411b363SPhilipp Reisner /* Well, then use something else. */ 2167b411b363SPhilipp Reisner rv = test_bit(DISCARD_CONCURRENT, &mdev->flags) 2168b411b363SPhilipp Reisner ? -1 : 1; 2169b411b363SPhilipp Reisner break; 2170b411b363SPhilipp Reisner case ASB_DISCARD_LOCAL: 2171b411b363SPhilipp Reisner rv = -1; 2172b411b363SPhilipp Reisner break; 2173b411b363SPhilipp Reisner case ASB_DISCARD_REMOTE: 2174b411b363SPhilipp Reisner rv = 1; 2175b411b363SPhilipp Reisner } 2176b411b363SPhilipp Reisner 2177b411b363SPhilipp Reisner return rv; 2178b411b363SPhilipp Reisner } 2179b411b363SPhilipp Reisner 2180b411b363SPhilipp Reisner static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local) 2181b411b363SPhilipp Reisner { 21826184ea21SAndreas Gruenbacher int hg, rv = -100; 2183b411b363SPhilipp Reisner 2184b411b363SPhilipp Reisner switch (mdev->net_conf->after_sb_1p) { 2185b411b363SPhilipp Reisner case ASB_DISCARD_YOUNGER_PRI: 2186b411b363SPhilipp Reisner case ASB_DISCARD_OLDER_PRI: 2187b411b363SPhilipp Reisner case ASB_DISCARD_LEAST_CHG: 2188b411b363SPhilipp Reisner case ASB_DISCARD_LOCAL: 2189b411b363SPhilipp Reisner case ASB_DISCARD_REMOTE: 2190b411b363SPhilipp Reisner dev_err(DEV, "Configuration error.\n"); 2191b411b363SPhilipp Reisner break; 2192b411b363SPhilipp Reisner case ASB_DISCONNECT: 2193b411b363SPhilipp Reisner break; 2194b411b363SPhilipp Reisner case ASB_CONSENSUS: 2195b411b363SPhilipp Reisner hg = drbd_asb_recover_0p(mdev); 2196b411b363SPhilipp Reisner if (hg == -1 && mdev->state.role == R_SECONDARY) 2197b411b363SPhilipp Reisner rv = hg; 2198b411b363SPhilipp Reisner if (hg == 1 && mdev->state.role == R_PRIMARY) 2199b411b363SPhilipp Reisner rv = hg; 2200b411b363SPhilipp Reisner break; 2201b411b363SPhilipp Reisner case ASB_VIOLENTLY: 2202b411b363SPhilipp Reisner rv = drbd_asb_recover_0p(mdev); 2203b411b363SPhilipp Reisner break; 2204b411b363SPhilipp Reisner case ASB_DISCARD_SECONDARY: 2205b411b363SPhilipp Reisner return mdev->state.role == R_PRIMARY ? 1 : -1; 2206b411b363SPhilipp Reisner case ASB_CALL_HELPER: 2207b411b363SPhilipp Reisner hg = drbd_asb_recover_0p(mdev); 2208b411b363SPhilipp Reisner if (hg == -1 && mdev->state.role == R_PRIMARY) { 2209bb437946SAndreas Gruenbacher enum drbd_state_rv rv2; 2210bb437946SAndreas Gruenbacher 2211bb437946SAndreas Gruenbacher drbd_set_role(mdev, R_SECONDARY, 0); 2212b411b363SPhilipp Reisner /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE, 2213b411b363SPhilipp Reisner * we might be here in C_WF_REPORT_PARAMS which is transient. 2214b411b363SPhilipp Reisner * we do not need to wait for the after state change work either. */ 2215bb437946SAndreas Gruenbacher rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY)); 2216bb437946SAndreas Gruenbacher if (rv2 != SS_SUCCESS) { 2217b411b363SPhilipp Reisner drbd_khelper(mdev, "pri-lost-after-sb"); 2218b411b363SPhilipp Reisner } else { 2219b411b363SPhilipp Reisner dev_warn(DEV, "Successfully gave up primary role.\n"); 2220b411b363SPhilipp Reisner rv = hg; 2221b411b363SPhilipp Reisner } 2222b411b363SPhilipp Reisner } else 2223b411b363SPhilipp Reisner rv = hg; 2224b411b363SPhilipp Reisner } 2225b411b363SPhilipp Reisner 2226b411b363SPhilipp Reisner return rv; 2227b411b363SPhilipp Reisner } 2228b411b363SPhilipp Reisner 2229b411b363SPhilipp Reisner static int drbd_asb_recover_2p(struct drbd_conf *mdev) __must_hold(local) 2230b411b363SPhilipp Reisner { 22316184ea21SAndreas Gruenbacher int hg, rv = -100; 2232b411b363SPhilipp Reisner 2233b411b363SPhilipp Reisner switch (mdev->net_conf->after_sb_2p) { 2234b411b363SPhilipp Reisner case ASB_DISCARD_YOUNGER_PRI: 2235b411b363SPhilipp Reisner case ASB_DISCARD_OLDER_PRI: 2236b411b363SPhilipp Reisner case ASB_DISCARD_LEAST_CHG: 2237b411b363SPhilipp Reisner case ASB_DISCARD_LOCAL: 2238b411b363SPhilipp Reisner case ASB_DISCARD_REMOTE: 2239b411b363SPhilipp Reisner case ASB_CONSENSUS: 2240b411b363SPhilipp Reisner case ASB_DISCARD_SECONDARY: 2241b411b363SPhilipp Reisner dev_err(DEV, "Configuration error.\n"); 2242b411b363SPhilipp Reisner break; 2243b411b363SPhilipp Reisner case ASB_VIOLENTLY: 2244b411b363SPhilipp Reisner rv = drbd_asb_recover_0p(mdev); 2245b411b363SPhilipp Reisner break; 2246b411b363SPhilipp Reisner case ASB_DISCONNECT: 2247b411b363SPhilipp Reisner break; 2248b411b363SPhilipp Reisner case ASB_CALL_HELPER: 2249b411b363SPhilipp Reisner hg = drbd_asb_recover_0p(mdev); 2250b411b363SPhilipp Reisner if (hg == -1) { 2251bb437946SAndreas Gruenbacher enum drbd_state_rv rv2; 2252bb437946SAndreas Gruenbacher 2253b411b363SPhilipp Reisner /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE, 2254b411b363SPhilipp Reisner * we might be here in C_WF_REPORT_PARAMS which is transient. 2255b411b363SPhilipp Reisner * we do not need to wait for the after state change work either. */ 2256bb437946SAndreas Gruenbacher rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY)); 2257bb437946SAndreas Gruenbacher if (rv2 != SS_SUCCESS) { 2258b411b363SPhilipp Reisner drbd_khelper(mdev, "pri-lost-after-sb"); 2259b411b363SPhilipp Reisner } else { 2260b411b363SPhilipp Reisner dev_warn(DEV, "Successfully gave up primary role.\n"); 2261b411b363SPhilipp Reisner rv = hg; 2262b411b363SPhilipp Reisner } 2263b411b363SPhilipp Reisner } else 2264b411b363SPhilipp Reisner rv = hg; 2265b411b363SPhilipp Reisner } 2266b411b363SPhilipp Reisner 2267b411b363SPhilipp Reisner return rv; 2268b411b363SPhilipp Reisner } 2269b411b363SPhilipp Reisner 2270b411b363SPhilipp Reisner static void drbd_uuid_dump(struct drbd_conf *mdev, char *text, u64 *uuid, 2271b411b363SPhilipp Reisner u64 bits, u64 flags) 2272b411b363SPhilipp Reisner { 2273b411b363SPhilipp Reisner if (!uuid) { 2274b411b363SPhilipp Reisner dev_info(DEV, "%s uuid info vanished while I was looking!\n", text); 2275b411b363SPhilipp Reisner return; 2276b411b363SPhilipp Reisner } 2277b411b363SPhilipp Reisner dev_info(DEV, "%s %016llX:%016llX:%016llX:%016llX bits:%llu flags:%llX\n", 2278b411b363SPhilipp Reisner text, 2279b411b363SPhilipp Reisner (unsigned long long)uuid[UI_CURRENT], 2280b411b363SPhilipp Reisner (unsigned long long)uuid[UI_BITMAP], 2281b411b363SPhilipp Reisner (unsigned long long)uuid[UI_HISTORY_START], 2282b411b363SPhilipp Reisner (unsigned long long)uuid[UI_HISTORY_END], 2283b411b363SPhilipp Reisner (unsigned long long)bits, 2284b411b363SPhilipp Reisner (unsigned long long)flags); 2285b411b363SPhilipp Reisner } 2286b411b363SPhilipp Reisner 2287b411b363SPhilipp Reisner /* 2288b411b363SPhilipp Reisner 100 after split brain try auto recover 2289b411b363SPhilipp Reisner 2 C_SYNC_SOURCE set BitMap 2290b411b363SPhilipp Reisner 1 C_SYNC_SOURCE use BitMap 2291b411b363SPhilipp Reisner 0 no Sync 2292b411b363SPhilipp Reisner -1 C_SYNC_TARGET use BitMap 2293b411b363SPhilipp Reisner -2 C_SYNC_TARGET set BitMap 2294b411b363SPhilipp Reisner -100 after split brain, disconnect 2295b411b363SPhilipp Reisner -1000 unrelated data 2296b411b363SPhilipp Reisner */ 2297b411b363SPhilipp Reisner static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(local) 2298b411b363SPhilipp Reisner { 2299b411b363SPhilipp Reisner u64 self, peer; 2300b411b363SPhilipp Reisner int i, j; 2301b411b363SPhilipp Reisner 2302b411b363SPhilipp Reisner self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1); 2303b411b363SPhilipp Reisner peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1); 2304b411b363SPhilipp Reisner 2305b411b363SPhilipp Reisner *rule_nr = 10; 2306b411b363SPhilipp Reisner if (self == UUID_JUST_CREATED && peer == UUID_JUST_CREATED) 2307b411b363SPhilipp Reisner return 0; 2308b411b363SPhilipp Reisner 2309b411b363SPhilipp Reisner *rule_nr = 20; 2310b411b363SPhilipp Reisner if ((self == UUID_JUST_CREATED || self == (u64)0) && 2311b411b363SPhilipp Reisner peer != UUID_JUST_CREATED) 2312b411b363SPhilipp Reisner return -2; 2313b411b363SPhilipp Reisner 2314b411b363SPhilipp Reisner *rule_nr = 30; 2315b411b363SPhilipp Reisner if (self != UUID_JUST_CREATED && 2316b411b363SPhilipp Reisner (peer == UUID_JUST_CREATED || peer == (u64)0)) 2317b411b363SPhilipp Reisner return 2; 2318b411b363SPhilipp Reisner 2319b411b363SPhilipp Reisner if (self == peer) { 2320b411b363SPhilipp Reisner int rct, dc; /* roles at crash time */ 2321b411b363SPhilipp Reisner 2322b411b363SPhilipp Reisner if (mdev->p_uuid[UI_BITMAP] == (u64)0 && mdev->ldev->md.uuid[UI_BITMAP] != (u64)0) { 2323b411b363SPhilipp Reisner 2324b411b363SPhilipp Reisner if (mdev->agreed_pro_version < 91) 2325b411b363SPhilipp Reisner return -1001; 2326b411b363SPhilipp Reisner 2327b411b363SPhilipp Reisner if ((mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) && 2328b411b363SPhilipp Reisner (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1))) { 2329b411b363SPhilipp Reisner dev_info(DEV, "was SyncSource, missed the resync finished event, corrected myself:\n"); 2330b411b363SPhilipp Reisner drbd_uuid_set_bm(mdev, 0UL); 2331b411b363SPhilipp Reisner 2332b411b363SPhilipp Reisner drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid, 2333b411b363SPhilipp Reisner mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0); 2334b411b363SPhilipp Reisner *rule_nr = 34; 2335b411b363SPhilipp Reisner } else { 2336b411b363SPhilipp Reisner dev_info(DEV, "was SyncSource (peer failed to write sync_uuid)\n"); 2337b411b363SPhilipp Reisner *rule_nr = 36; 2338b411b363SPhilipp Reisner } 2339b411b363SPhilipp Reisner 2340b411b363SPhilipp Reisner return 1; 2341b411b363SPhilipp Reisner } 2342b411b363SPhilipp Reisner 2343b411b363SPhilipp Reisner if (mdev->ldev->md.uuid[UI_BITMAP] == (u64)0 && mdev->p_uuid[UI_BITMAP] != (u64)0) { 2344b411b363SPhilipp Reisner 2345b411b363SPhilipp Reisner if (mdev->agreed_pro_version < 91) 2346b411b363SPhilipp Reisner return -1001; 2347b411b363SPhilipp Reisner 2348b411b363SPhilipp Reisner if ((mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_BITMAP] & ~((u64)1)) && 2349b411b363SPhilipp Reisner (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1))) { 2350b411b363SPhilipp Reisner dev_info(DEV, "was SyncTarget, peer missed the resync finished event, corrected peer:\n"); 2351b411b363SPhilipp Reisner 2352b411b363SPhilipp Reisner mdev->p_uuid[UI_HISTORY_START + 1] = mdev->p_uuid[UI_HISTORY_START]; 2353b411b363SPhilipp Reisner mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_BITMAP]; 2354b411b363SPhilipp Reisner mdev->p_uuid[UI_BITMAP] = 0UL; 2355b411b363SPhilipp Reisner 2356b411b363SPhilipp Reisner drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]); 2357b411b363SPhilipp Reisner *rule_nr = 35; 2358b411b363SPhilipp Reisner } else { 2359b411b363SPhilipp Reisner dev_info(DEV, "was SyncTarget (failed to write sync_uuid)\n"); 2360b411b363SPhilipp Reisner *rule_nr = 37; 2361b411b363SPhilipp Reisner } 2362b411b363SPhilipp Reisner 2363b411b363SPhilipp Reisner return -1; 2364b411b363SPhilipp Reisner } 2365b411b363SPhilipp Reisner 2366b411b363SPhilipp Reisner /* Common power [off|failure] */ 2367b411b363SPhilipp Reisner rct = (test_bit(CRASHED_PRIMARY, &mdev->flags) ? 1 : 0) + 2368b411b363SPhilipp Reisner (mdev->p_uuid[UI_FLAGS] & 2); 2369b411b363SPhilipp Reisner /* lowest bit is set when we were primary, 2370b411b363SPhilipp Reisner * next bit (weight 2) is set when peer was primary */ 2371b411b363SPhilipp Reisner *rule_nr = 40; 2372b411b363SPhilipp Reisner 2373b411b363SPhilipp Reisner switch (rct) { 2374b411b363SPhilipp Reisner case 0: /* !self_pri && !peer_pri */ return 0; 2375b411b363SPhilipp Reisner case 1: /* self_pri && !peer_pri */ return 1; 2376b411b363SPhilipp Reisner case 2: /* !self_pri && peer_pri */ return -1; 2377b411b363SPhilipp Reisner case 3: /* self_pri && peer_pri */ 2378b411b363SPhilipp Reisner dc = test_bit(DISCARD_CONCURRENT, &mdev->flags); 2379b411b363SPhilipp Reisner return dc ? -1 : 1; 2380b411b363SPhilipp Reisner } 2381b411b363SPhilipp Reisner } 2382b411b363SPhilipp Reisner 2383b411b363SPhilipp Reisner *rule_nr = 50; 2384b411b363SPhilipp Reisner peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1); 2385b411b363SPhilipp Reisner if (self == peer) 2386b411b363SPhilipp Reisner return -1; 2387b411b363SPhilipp Reisner 2388b411b363SPhilipp Reisner *rule_nr = 51; 2389b411b363SPhilipp Reisner peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1); 2390b411b363SPhilipp Reisner if (self == peer) { 2391b411b363SPhilipp Reisner self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1); 2392b411b363SPhilipp Reisner peer = mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1); 2393b411b363SPhilipp Reisner if (self == peer) { 2394b411b363SPhilipp Reisner /* The last P_SYNC_UUID did not get though. Undo the last start of 2395b411b363SPhilipp Reisner resync as sync source modifications of the peer's UUIDs. */ 2396b411b363SPhilipp Reisner 2397b411b363SPhilipp Reisner if (mdev->agreed_pro_version < 91) 2398b411b363SPhilipp Reisner return -1001; 2399b411b363SPhilipp Reisner 2400b411b363SPhilipp Reisner mdev->p_uuid[UI_BITMAP] = mdev->p_uuid[UI_HISTORY_START]; 2401b411b363SPhilipp Reisner mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_HISTORY_START + 1]; 2402b411b363SPhilipp Reisner return -1; 2403b411b363SPhilipp Reisner } 2404b411b363SPhilipp Reisner } 2405b411b363SPhilipp Reisner 2406b411b363SPhilipp Reisner *rule_nr = 60; 2407b411b363SPhilipp Reisner self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1); 2408b411b363SPhilipp Reisner for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) { 2409b411b363SPhilipp Reisner peer = mdev->p_uuid[i] & ~((u64)1); 2410b411b363SPhilipp Reisner if (self == peer) 2411b411b363SPhilipp Reisner return -2; 2412b411b363SPhilipp Reisner } 2413b411b363SPhilipp Reisner 2414b411b363SPhilipp Reisner *rule_nr = 70; 2415b411b363SPhilipp Reisner self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1); 2416b411b363SPhilipp Reisner peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1); 2417b411b363SPhilipp Reisner if (self == peer) 2418b411b363SPhilipp Reisner return 1; 2419b411b363SPhilipp Reisner 2420b411b363SPhilipp Reisner *rule_nr = 71; 2421b411b363SPhilipp Reisner self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1); 2422b411b363SPhilipp Reisner if (self == peer) { 2423b411b363SPhilipp Reisner self = mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1); 2424b411b363SPhilipp Reisner peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1); 2425b411b363SPhilipp Reisner if (self == peer) { 2426b411b363SPhilipp Reisner /* The last P_SYNC_UUID did not get though. Undo the last start of 2427b411b363SPhilipp Reisner resync as sync source modifications of our UUIDs. */ 2428b411b363SPhilipp Reisner 2429b411b363SPhilipp Reisner if (mdev->agreed_pro_version < 91) 2430b411b363SPhilipp Reisner return -1001; 2431b411b363SPhilipp Reisner 2432b411b363SPhilipp Reisner _drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_HISTORY_START]); 2433b411b363SPhilipp Reisner _drbd_uuid_set(mdev, UI_HISTORY_START, mdev->ldev->md.uuid[UI_HISTORY_START + 1]); 2434b411b363SPhilipp Reisner 2435b411b363SPhilipp Reisner dev_info(DEV, "Undid last start of resync:\n"); 2436b411b363SPhilipp Reisner 2437b411b363SPhilipp Reisner drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid, 2438b411b363SPhilipp Reisner mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0); 2439b411b363SPhilipp Reisner 2440b411b363SPhilipp Reisner return 1; 2441b411b363SPhilipp Reisner } 2442b411b363SPhilipp Reisner } 2443b411b363SPhilipp Reisner 2444b411b363SPhilipp Reisner 2445b411b363SPhilipp Reisner *rule_nr = 80; 2446d8c2a36bSPhilipp Reisner peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1); 2447b411b363SPhilipp Reisner for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) { 2448b411b363SPhilipp Reisner self = mdev->ldev->md.uuid[i] & ~((u64)1); 2449b411b363SPhilipp Reisner if (self == peer) 2450b411b363SPhilipp Reisner return 2; 2451b411b363SPhilipp Reisner } 2452b411b363SPhilipp Reisner 2453b411b363SPhilipp Reisner *rule_nr = 90; 2454b411b363SPhilipp Reisner self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1); 2455b411b363SPhilipp Reisner peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1); 2456b411b363SPhilipp Reisner if (self == peer && self != ((u64)0)) 2457b411b363SPhilipp Reisner return 100; 2458b411b363SPhilipp Reisner 2459b411b363SPhilipp Reisner *rule_nr = 100; 2460b411b363SPhilipp Reisner for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) { 2461b411b363SPhilipp Reisner self = mdev->ldev->md.uuid[i] & ~((u64)1); 2462b411b363SPhilipp Reisner for (j = UI_HISTORY_START; j <= UI_HISTORY_END; j++) { 2463b411b363SPhilipp Reisner peer = mdev->p_uuid[j] & ~((u64)1); 2464b411b363SPhilipp Reisner if (self == peer) 2465b411b363SPhilipp Reisner return -100; 2466b411b363SPhilipp Reisner } 2467b411b363SPhilipp Reisner } 2468b411b363SPhilipp Reisner 2469b411b363SPhilipp Reisner return -1000; 2470b411b363SPhilipp Reisner } 2471b411b363SPhilipp Reisner 2472b411b363SPhilipp Reisner /* drbd_sync_handshake() returns the new conn state on success, or 2473b411b363SPhilipp Reisner CONN_MASK (-1) on failure. 2474b411b363SPhilipp Reisner */ 2475b411b363SPhilipp Reisner static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_role peer_role, 2476b411b363SPhilipp Reisner enum drbd_disk_state peer_disk) __must_hold(local) 2477b411b363SPhilipp Reisner { 2478b411b363SPhilipp Reisner int hg, rule_nr; 2479b411b363SPhilipp Reisner enum drbd_conns rv = C_MASK; 2480b411b363SPhilipp Reisner enum drbd_disk_state mydisk; 2481b411b363SPhilipp Reisner 2482b411b363SPhilipp Reisner mydisk = mdev->state.disk; 2483b411b363SPhilipp Reisner if (mydisk == D_NEGOTIATING) 2484b411b363SPhilipp Reisner mydisk = mdev->new_state_tmp.disk; 2485b411b363SPhilipp Reisner 2486b411b363SPhilipp Reisner dev_info(DEV, "drbd_sync_handshake:\n"); 2487b411b363SPhilipp Reisner drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid, mdev->comm_bm_set, 0); 2488b411b363SPhilipp Reisner drbd_uuid_dump(mdev, "peer", mdev->p_uuid, 2489b411b363SPhilipp Reisner mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]); 2490b411b363SPhilipp Reisner 2491b411b363SPhilipp Reisner hg = drbd_uuid_compare(mdev, &rule_nr); 2492b411b363SPhilipp Reisner 2493b411b363SPhilipp Reisner dev_info(DEV, "uuid_compare()=%d by rule %d\n", hg, rule_nr); 2494b411b363SPhilipp Reisner 2495b411b363SPhilipp Reisner if (hg == -1000) { 2496b411b363SPhilipp Reisner dev_alert(DEV, "Unrelated data, aborting!\n"); 2497b411b363SPhilipp Reisner return C_MASK; 2498b411b363SPhilipp Reisner } 2499b411b363SPhilipp Reisner if (hg == -1001) { 2500220df4d0SLars Ellenberg dev_alert(DEV, "To resolve this both sides have to support at least protocol 91\n"); 2501b411b363SPhilipp Reisner return C_MASK; 2502b411b363SPhilipp Reisner } 2503b411b363SPhilipp Reisner 2504b411b363SPhilipp Reisner if ((mydisk == D_INCONSISTENT && peer_disk > D_INCONSISTENT) || 2505b411b363SPhilipp Reisner (peer_disk == D_INCONSISTENT && mydisk > D_INCONSISTENT)) { 2506b411b363SPhilipp Reisner int f = (hg == -100) || abs(hg) == 2; 2507b411b363SPhilipp Reisner hg = mydisk > D_INCONSISTENT ? 1 : -1; 2508b411b363SPhilipp Reisner if (f) 2509b411b363SPhilipp Reisner hg = hg*2; 2510b411b363SPhilipp Reisner dev_info(DEV, "Becoming sync %s due to disk states.\n", 2511b411b363SPhilipp Reisner hg > 0 ? "source" : "target"); 2512b411b363SPhilipp Reisner } 2513b411b363SPhilipp Reisner 25143a11a487SAdam Gandelman if (abs(hg) == 100) 25153a11a487SAdam Gandelman drbd_khelper(mdev, "initial-split-brain"); 25163a11a487SAdam Gandelman 2517b411b363SPhilipp Reisner if (hg == 100 || (hg == -100 && mdev->net_conf->always_asbp)) { 2518b411b363SPhilipp Reisner int pcount = (mdev->state.role == R_PRIMARY) 2519b411b363SPhilipp Reisner + (peer_role == R_PRIMARY); 2520b411b363SPhilipp Reisner int forced = (hg == -100); 2521b411b363SPhilipp Reisner 2522b411b363SPhilipp Reisner switch (pcount) { 2523b411b363SPhilipp Reisner case 0: 2524b411b363SPhilipp Reisner hg = drbd_asb_recover_0p(mdev); 2525b411b363SPhilipp Reisner break; 2526b411b363SPhilipp Reisner case 1: 2527b411b363SPhilipp Reisner hg = drbd_asb_recover_1p(mdev); 2528b411b363SPhilipp Reisner break; 2529b411b363SPhilipp Reisner case 2: 2530b411b363SPhilipp Reisner hg = drbd_asb_recover_2p(mdev); 2531b411b363SPhilipp Reisner break; 2532b411b363SPhilipp Reisner } 2533b411b363SPhilipp Reisner if (abs(hg) < 100) { 2534b411b363SPhilipp Reisner dev_warn(DEV, "Split-Brain detected, %d primaries, " 2535b411b363SPhilipp Reisner "automatically solved. Sync from %s node\n", 2536b411b363SPhilipp Reisner pcount, (hg < 0) ? "peer" : "this"); 2537b411b363SPhilipp Reisner if (forced) { 2538b411b363SPhilipp Reisner dev_warn(DEV, "Doing a full sync, since" 2539b411b363SPhilipp Reisner " UUIDs where ambiguous.\n"); 2540b411b363SPhilipp Reisner hg = hg*2; 2541b411b363SPhilipp Reisner } 2542b411b363SPhilipp Reisner } 2543b411b363SPhilipp Reisner } 2544b411b363SPhilipp Reisner 2545b411b363SPhilipp Reisner if (hg == -100) { 2546b411b363SPhilipp Reisner if (mdev->net_conf->want_lose && !(mdev->p_uuid[UI_FLAGS]&1)) 2547b411b363SPhilipp Reisner hg = -1; 2548b411b363SPhilipp Reisner if (!mdev->net_conf->want_lose && (mdev->p_uuid[UI_FLAGS]&1)) 2549b411b363SPhilipp Reisner hg = 1; 2550b411b363SPhilipp Reisner 2551b411b363SPhilipp Reisner if (abs(hg) < 100) 2552b411b363SPhilipp Reisner dev_warn(DEV, "Split-Brain detected, manually solved. " 2553b411b363SPhilipp Reisner "Sync from %s node\n", 2554b411b363SPhilipp Reisner (hg < 0) ? "peer" : "this"); 2555b411b363SPhilipp Reisner } 2556b411b363SPhilipp Reisner 2557b411b363SPhilipp Reisner if (hg == -100) { 2558580b9767SLars Ellenberg /* FIXME this log message is not correct if we end up here 2559580b9767SLars Ellenberg * after an attempted attach on a diskless node. 2560580b9767SLars Ellenberg * We just refuse to attach -- well, we drop the "connection" 2561580b9767SLars Ellenberg * to that disk, in a way... */ 25623a11a487SAdam Gandelman dev_alert(DEV, "Split-Brain detected but unresolved, dropping connection!\n"); 2563b411b363SPhilipp Reisner drbd_khelper(mdev, "split-brain"); 2564b411b363SPhilipp Reisner return C_MASK; 2565b411b363SPhilipp Reisner } 2566b411b363SPhilipp Reisner 2567b411b363SPhilipp Reisner if (hg > 0 && mydisk <= D_INCONSISTENT) { 2568b411b363SPhilipp Reisner dev_err(DEV, "I shall become SyncSource, but I am inconsistent!\n"); 2569b411b363SPhilipp Reisner return C_MASK; 2570b411b363SPhilipp Reisner } 2571b411b363SPhilipp Reisner 2572b411b363SPhilipp Reisner if (hg < 0 && /* by intention we do not use mydisk here. */ 2573b411b363SPhilipp Reisner mdev->state.role == R_PRIMARY && mdev->state.disk >= D_CONSISTENT) { 2574b411b363SPhilipp Reisner switch (mdev->net_conf->rr_conflict) { 2575b411b363SPhilipp Reisner case ASB_CALL_HELPER: 2576b411b363SPhilipp Reisner drbd_khelper(mdev, "pri-lost"); 2577b411b363SPhilipp Reisner /* fall through */ 2578b411b363SPhilipp Reisner case ASB_DISCONNECT: 2579b411b363SPhilipp Reisner dev_err(DEV, "I shall become SyncTarget, but I am primary!\n"); 2580b411b363SPhilipp Reisner return C_MASK; 2581b411b363SPhilipp Reisner case ASB_VIOLENTLY: 2582b411b363SPhilipp Reisner dev_warn(DEV, "Becoming SyncTarget, violating the stable-data" 2583b411b363SPhilipp Reisner "assumption\n"); 2584b411b363SPhilipp Reisner } 2585b411b363SPhilipp Reisner } 2586b411b363SPhilipp Reisner 2587cf14c2e9SPhilipp Reisner if (mdev->net_conf->dry_run || test_bit(CONN_DRY_RUN, &mdev->flags)) { 2588cf14c2e9SPhilipp Reisner if (hg == 0) 2589cf14c2e9SPhilipp Reisner dev_info(DEV, "dry-run connect: No resync, would become Connected immediately.\n"); 2590cf14c2e9SPhilipp Reisner else 2591cf14c2e9SPhilipp Reisner dev_info(DEV, "dry-run connect: Would become %s, doing a %s resync.", 2592cf14c2e9SPhilipp Reisner drbd_conn_str(hg > 0 ? C_SYNC_SOURCE : C_SYNC_TARGET), 2593cf14c2e9SPhilipp Reisner abs(hg) >= 2 ? "full" : "bit-map based"); 2594cf14c2e9SPhilipp Reisner return C_MASK; 2595cf14c2e9SPhilipp Reisner } 2596cf14c2e9SPhilipp Reisner 2597b411b363SPhilipp Reisner if (abs(hg) >= 2) { 2598b411b363SPhilipp Reisner dev_info(DEV, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n"); 2599b411b363SPhilipp Reisner if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from sync_handshake")) 2600b411b363SPhilipp Reisner return C_MASK; 2601b411b363SPhilipp Reisner } 2602b411b363SPhilipp Reisner 2603b411b363SPhilipp Reisner if (hg > 0) { /* become sync source. */ 2604b411b363SPhilipp Reisner rv = C_WF_BITMAP_S; 2605b411b363SPhilipp Reisner } else if (hg < 0) { /* become sync target */ 2606b411b363SPhilipp Reisner rv = C_WF_BITMAP_T; 2607b411b363SPhilipp Reisner } else { 2608b411b363SPhilipp Reisner rv = C_CONNECTED; 2609b411b363SPhilipp Reisner if (drbd_bm_total_weight(mdev)) { 2610b411b363SPhilipp Reisner dev_info(DEV, "No resync, but %lu bits in bitmap!\n", 2611b411b363SPhilipp Reisner drbd_bm_total_weight(mdev)); 2612b411b363SPhilipp Reisner } 2613b411b363SPhilipp Reisner } 2614b411b363SPhilipp Reisner 2615b411b363SPhilipp Reisner return rv; 2616b411b363SPhilipp Reisner } 2617b411b363SPhilipp Reisner 2618b411b363SPhilipp Reisner /* returns 1 if invalid */ 2619b411b363SPhilipp Reisner static int cmp_after_sb(enum drbd_after_sb_p peer, enum drbd_after_sb_p self) 2620b411b363SPhilipp Reisner { 2621b411b363SPhilipp Reisner /* ASB_DISCARD_REMOTE - ASB_DISCARD_LOCAL is valid */ 2622b411b363SPhilipp Reisner if ((peer == ASB_DISCARD_REMOTE && self == ASB_DISCARD_LOCAL) || 2623b411b363SPhilipp Reisner (self == ASB_DISCARD_REMOTE && peer == ASB_DISCARD_LOCAL)) 2624b411b363SPhilipp Reisner return 0; 2625b411b363SPhilipp Reisner 2626b411b363SPhilipp Reisner /* any other things with ASB_DISCARD_REMOTE or ASB_DISCARD_LOCAL are invalid */ 2627b411b363SPhilipp Reisner if (peer == ASB_DISCARD_REMOTE || peer == ASB_DISCARD_LOCAL || 2628b411b363SPhilipp Reisner self == ASB_DISCARD_REMOTE || self == ASB_DISCARD_LOCAL) 2629b411b363SPhilipp Reisner return 1; 2630b411b363SPhilipp Reisner 2631b411b363SPhilipp Reisner /* everything else is valid if they are equal on both sides. */ 2632b411b363SPhilipp Reisner if (peer == self) 2633b411b363SPhilipp Reisner return 0; 2634b411b363SPhilipp Reisner 2635b411b363SPhilipp Reisner /* everything es is invalid. */ 2636b411b363SPhilipp Reisner return 1; 2637b411b363SPhilipp Reisner } 2638b411b363SPhilipp Reisner 263902918be2SPhilipp Reisner static int receive_protocol(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) 2640b411b363SPhilipp Reisner { 264102918be2SPhilipp Reisner struct p_protocol *p = &mdev->data.rbuf.protocol; 2642b411b363SPhilipp Reisner int p_proto, p_after_sb_0p, p_after_sb_1p, p_after_sb_2p; 2643cf14c2e9SPhilipp Reisner int p_want_lose, p_two_primaries, cf; 2644b411b363SPhilipp Reisner char p_integrity_alg[SHARED_SECRET_MAX] = ""; 2645b411b363SPhilipp Reisner 2646b411b363SPhilipp Reisner p_proto = be32_to_cpu(p->protocol); 2647b411b363SPhilipp Reisner p_after_sb_0p = be32_to_cpu(p->after_sb_0p); 2648b411b363SPhilipp Reisner p_after_sb_1p = be32_to_cpu(p->after_sb_1p); 2649b411b363SPhilipp Reisner p_after_sb_2p = be32_to_cpu(p->after_sb_2p); 2650b411b363SPhilipp Reisner p_two_primaries = be32_to_cpu(p->two_primaries); 2651cf14c2e9SPhilipp Reisner cf = be32_to_cpu(p->conn_flags); 2652cf14c2e9SPhilipp Reisner p_want_lose = cf & CF_WANT_LOSE; 2653cf14c2e9SPhilipp Reisner 2654cf14c2e9SPhilipp Reisner clear_bit(CONN_DRY_RUN, &mdev->flags); 2655cf14c2e9SPhilipp Reisner 2656cf14c2e9SPhilipp Reisner if (cf & CF_DRY_RUN) 2657cf14c2e9SPhilipp Reisner set_bit(CONN_DRY_RUN, &mdev->flags); 2658b411b363SPhilipp Reisner 2659b411b363SPhilipp Reisner if (p_proto != mdev->net_conf->wire_protocol) { 2660b411b363SPhilipp Reisner dev_err(DEV, "incompatible communication protocols\n"); 2661b411b363SPhilipp Reisner goto disconnect; 2662b411b363SPhilipp Reisner } 2663b411b363SPhilipp Reisner 2664b411b363SPhilipp Reisner if (cmp_after_sb(p_after_sb_0p, mdev->net_conf->after_sb_0p)) { 2665b411b363SPhilipp Reisner dev_err(DEV, "incompatible after-sb-0pri settings\n"); 2666b411b363SPhilipp Reisner goto disconnect; 2667b411b363SPhilipp Reisner } 2668b411b363SPhilipp Reisner 2669b411b363SPhilipp Reisner if (cmp_after_sb(p_after_sb_1p, mdev->net_conf->after_sb_1p)) { 2670b411b363SPhilipp Reisner dev_err(DEV, "incompatible after-sb-1pri settings\n"); 2671b411b363SPhilipp Reisner goto disconnect; 2672b411b363SPhilipp Reisner } 2673b411b363SPhilipp Reisner 2674b411b363SPhilipp Reisner if (cmp_after_sb(p_after_sb_2p, mdev->net_conf->after_sb_2p)) { 2675b411b363SPhilipp Reisner dev_err(DEV, "incompatible after-sb-2pri settings\n"); 2676b411b363SPhilipp Reisner goto disconnect; 2677b411b363SPhilipp Reisner } 2678b411b363SPhilipp Reisner 2679b411b363SPhilipp Reisner if (p_want_lose && mdev->net_conf->want_lose) { 2680b411b363SPhilipp Reisner dev_err(DEV, "both sides have the 'want_lose' flag set\n"); 2681b411b363SPhilipp Reisner goto disconnect; 2682b411b363SPhilipp Reisner } 2683b411b363SPhilipp Reisner 2684b411b363SPhilipp Reisner if (p_two_primaries != mdev->net_conf->two_primaries) { 2685b411b363SPhilipp Reisner dev_err(DEV, "incompatible setting of the two-primaries options\n"); 2686b411b363SPhilipp Reisner goto disconnect; 2687b411b363SPhilipp Reisner } 2688b411b363SPhilipp Reisner 2689b411b363SPhilipp Reisner if (mdev->agreed_pro_version >= 87) { 2690b411b363SPhilipp Reisner unsigned char *my_alg = mdev->net_conf->integrity_alg; 2691b411b363SPhilipp Reisner 2692b411b363SPhilipp Reisner if (drbd_recv(mdev, p_integrity_alg, data_size) != data_size) 269381e84650SAndreas Gruenbacher return false; 2694b411b363SPhilipp Reisner 2695b411b363SPhilipp Reisner p_integrity_alg[SHARED_SECRET_MAX-1] = 0; 2696b411b363SPhilipp Reisner if (strcmp(p_integrity_alg, my_alg)) { 2697b411b363SPhilipp Reisner dev_err(DEV, "incompatible setting of the data-integrity-alg\n"); 2698b411b363SPhilipp Reisner goto disconnect; 2699b411b363SPhilipp Reisner } 2700b411b363SPhilipp Reisner dev_info(DEV, "data-integrity-alg: %s\n", 2701b411b363SPhilipp Reisner my_alg[0] ? my_alg : (unsigned char *)"<not-used>"); 2702b411b363SPhilipp Reisner } 2703b411b363SPhilipp Reisner 270481e84650SAndreas Gruenbacher return true; 2705b411b363SPhilipp Reisner 2706b411b363SPhilipp Reisner disconnect: 2707b411b363SPhilipp Reisner drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); 270881e84650SAndreas Gruenbacher return false; 2709b411b363SPhilipp Reisner } 2710b411b363SPhilipp Reisner 2711b411b363SPhilipp Reisner /* helper function 2712b411b363SPhilipp Reisner * input: alg name, feature name 2713b411b363SPhilipp Reisner * return: NULL (alg name was "") 2714b411b363SPhilipp Reisner * ERR_PTR(error) if something goes wrong 2715b411b363SPhilipp Reisner * or the crypto hash ptr, if it worked out ok. */ 2716b411b363SPhilipp Reisner struct crypto_hash *drbd_crypto_alloc_digest_safe(const struct drbd_conf *mdev, 2717b411b363SPhilipp Reisner const char *alg, const char *name) 2718b411b363SPhilipp Reisner { 2719b411b363SPhilipp Reisner struct crypto_hash *tfm; 2720b411b363SPhilipp Reisner 2721b411b363SPhilipp Reisner if (!alg[0]) 2722b411b363SPhilipp Reisner return NULL; 2723b411b363SPhilipp Reisner 2724b411b363SPhilipp Reisner tfm = crypto_alloc_hash(alg, 0, CRYPTO_ALG_ASYNC); 2725b411b363SPhilipp Reisner if (IS_ERR(tfm)) { 2726b411b363SPhilipp Reisner dev_err(DEV, "Can not allocate \"%s\" as %s (reason: %ld)\n", 2727b411b363SPhilipp Reisner alg, name, PTR_ERR(tfm)); 2728b411b363SPhilipp Reisner return tfm; 2729b411b363SPhilipp Reisner } 2730b411b363SPhilipp Reisner if (!drbd_crypto_is_hash(crypto_hash_tfm(tfm))) { 2731b411b363SPhilipp Reisner crypto_free_hash(tfm); 2732b411b363SPhilipp Reisner dev_err(DEV, "\"%s\" is not a digest (%s)\n", alg, name); 2733b411b363SPhilipp Reisner return ERR_PTR(-EINVAL); 2734b411b363SPhilipp Reisner } 2735b411b363SPhilipp Reisner return tfm; 2736b411b363SPhilipp Reisner } 2737b411b363SPhilipp Reisner 273802918be2SPhilipp Reisner static int receive_SyncParam(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int packet_size) 2739b411b363SPhilipp Reisner { 274081e84650SAndreas Gruenbacher int ok = true; 274102918be2SPhilipp Reisner struct p_rs_param_95 *p = &mdev->data.rbuf.rs_param_95; 2742b411b363SPhilipp Reisner unsigned int header_size, data_size, exp_max_sz; 2743b411b363SPhilipp Reisner struct crypto_hash *verify_tfm = NULL; 2744b411b363SPhilipp Reisner struct crypto_hash *csums_tfm = NULL; 2745b411b363SPhilipp Reisner const int apv = mdev->agreed_pro_version; 2746778f271dSPhilipp Reisner int *rs_plan_s = NULL; 2747778f271dSPhilipp Reisner int fifo_size = 0; 2748b411b363SPhilipp Reisner 2749b411b363SPhilipp Reisner exp_max_sz = apv <= 87 ? sizeof(struct p_rs_param) 2750b411b363SPhilipp Reisner : apv == 88 ? sizeof(struct p_rs_param) 2751b411b363SPhilipp Reisner + SHARED_SECRET_MAX 27528e26f9ccSPhilipp Reisner : apv <= 94 ? sizeof(struct p_rs_param_89) 27538e26f9ccSPhilipp Reisner : /* apv >= 95 */ sizeof(struct p_rs_param_95); 2754b411b363SPhilipp Reisner 275502918be2SPhilipp Reisner if (packet_size > exp_max_sz) { 2756b411b363SPhilipp Reisner dev_err(DEV, "SyncParam packet too long: received %u, expected <= %u bytes\n", 275702918be2SPhilipp Reisner packet_size, exp_max_sz); 275881e84650SAndreas Gruenbacher return false; 2759b411b363SPhilipp Reisner } 2760b411b363SPhilipp Reisner 2761b411b363SPhilipp Reisner if (apv <= 88) { 276202918be2SPhilipp Reisner header_size = sizeof(struct p_rs_param) - sizeof(struct p_header80); 276302918be2SPhilipp Reisner data_size = packet_size - header_size; 27648e26f9ccSPhilipp Reisner } else if (apv <= 94) { 276502918be2SPhilipp Reisner header_size = sizeof(struct p_rs_param_89) - sizeof(struct p_header80); 276602918be2SPhilipp Reisner data_size = packet_size - header_size; 2767b411b363SPhilipp Reisner D_ASSERT(data_size == 0); 27688e26f9ccSPhilipp Reisner } else { 276902918be2SPhilipp Reisner header_size = sizeof(struct p_rs_param_95) - sizeof(struct p_header80); 277002918be2SPhilipp Reisner data_size = packet_size - header_size; 2771b411b363SPhilipp Reisner D_ASSERT(data_size == 0); 2772b411b363SPhilipp Reisner } 2773b411b363SPhilipp Reisner 2774b411b363SPhilipp Reisner /* initialize verify_alg and csums_alg */ 2775b411b363SPhilipp Reisner memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX); 2776b411b363SPhilipp Reisner 277702918be2SPhilipp Reisner if (drbd_recv(mdev, &p->head.payload, header_size) != header_size) 277881e84650SAndreas Gruenbacher return false; 2779b411b363SPhilipp Reisner 2780b411b363SPhilipp Reisner mdev->sync_conf.rate = be32_to_cpu(p->rate); 2781b411b363SPhilipp Reisner 2782b411b363SPhilipp Reisner if (apv >= 88) { 2783b411b363SPhilipp Reisner if (apv == 88) { 2784b411b363SPhilipp Reisner if (data_size > SHARED_SECRET_MAX) { 2785b411b363SPhilipp Reisner dev_err(DEV, "verify-alg too long, " 2786b411b363SPhilipp Reisner "peer wants %u, accepting only %u byte\n", 2787b411b363SPhilipp Reisner data_size, SHARED_SECRET_MAX); 278881e84650SAndreas Gruenbacher return false; 2789b411b363SPhilipp Reisner } 2790b411b363SPhilipp Reisner 2791b411b363SPhilipp Reisner if (drbd_recv(mdev, p->verify_alg, data_size) != data_size) 279281e84650SAndreas Gruenbacher return false; 2793b411b363SPhilipp Reisner 2794b411b363SPhilipp Reisner /* we expect NUL terminated string */ 2795b411b363SPhilipp Reisner /* but just in case someone tries to be evil */ 2796b411b363SPhilipp Reisner D_ASSERT(p->verify_alg[data_size-1] == 0); 2797b411b363SPhilipp Reisner p->verify_alg[data_size-1] = 0; 2798b411b363SPhilipp Reisner 2799b411b363SPhilipp Reisner } else /* apv >= 89 */ { 2800b411b363SPhilipp Reisner /* we still expect NUL terminated strings */ 2801b411b363SPhilipp Reisner /* but just in case someone tries to be evil */ 2802b411b363SPhilipp Reisner D_ASSERT(p->verify_alg[SHARED_SECRET_MAX-1] == 0); 2803b411b363SPhilipp Reisner D_ASSERT(p->csums_alg[SHARED_SECRET_MAX-1] == 0); 2804b411b363SPhilipp Reisner p->verify_alg[SHARED_SECRET_MAX-1] = 0; 2805b411b363SPhilipp Reisner p->csums_alg[SHARED_SECRET_MAX-1] = 0; 2806b411b363SPhilipp Reisner } 2807b411b363SPhilipp Reisner 2808b411b363SPhilipp Reisner if (strcmp(mdev->sync_conf.verify_alg, p->verify_alg)) { 2809b411b363SPhilipp Reisner if (mdev->state.conn == C_WF_REPORT_PARAMS) { 2810b411b363SPhilipp Reisner dev_err(DEV, "Different verify-alg settings. me=\"%s\" peer=\"%s\"\n", 2811b411b363SPhilipp Reisner mdev->sync_conf.verify_alg, p->verify_alg); 2812b411b363SPhilipp Reisner goto disconnect; 2813b411b363SPhilipp Reisner } 2814b411b363SPhilipp Reisner verify_tfm = drbd_crypto_alloc_digest_safe(mdev, 2815b411b363SPhilipp Reisner p->verify_alg, "verify-alg"); 2816b411b363SPhilipp Reisner if (IS_ERR(verify_tfm)) { 2817b411b363SPhilipp Reisner verify_tfm = NULL; 2818b411b363SPhilipp Reisner goto disconnect; 2819b411b363SPhilipp Reisner } 2820b411b363SPhilipp Reisner } 2821b411b363SPhilipp Reisner 2822b411b363SPhilipp Reisner if (apv >= 89 && strcmp(mdev->sync_conf.csums_alg, p->csums_alg)) { 2823b411b363SPhilipp Reisner if (mdev->state.conn == C_WF_REPORT_PARAMS) { 2824b411b363SPhilipp Reisner dev_err(DEV, "Different csums-alg settings. me=\"%s\" peer=\"%s\"\n", 2825b411b363SPhilipp Reisner mdev->sync_conf.csums_alg, p->csums_alg); 2826b411b363SPhilipp Reisner goto disconnect; 2827b411b363SPhilipp Reisner } 2828b411b363SPhilipp Reisner csums_tfm = drbd_crypto_alloc_digest_safe(mdev, 2829b411b363SPhilipp Reisner p->csums_alg, "csums-alg"); 2830b411b363SPhilipp Reisner if (IS_ERR(csums_tfm)) { 2831b411b363SPhilipp Reisner csums_tfm = NULL; 2832b411b363SPhilipp Reisner goto disconnect; 2833b411b363SPhilipp Reisner } 2834b411b363SPhilipp Reisner } 2835b411b363SPhilipp Reisner 28368e26f9ccSPhilipp Reisner if (apv > 94) { 28378e26f9ccSPhilipp Reisner mdev->sync_conf.rate = be32_to_cpu(p->rate); 28388e26f9ccSPhilipp Reisner mdev->sync_conf.c_plan_ahead = be32_to_cpu(p->c_plan_ahead); 28398e26f9ccSPhilipp Reisner mdev->sync_conf.c_delay_target = be32_to_cpu(p->c_delay_target); 28408e26f9ccSPhilipp Reisner mdev->sync_conf.c_fill_target = be32_to_cpu(p->c_fill_target); 28418e26f9ccSPhilipp Reisner mdev->sync_conf.c_max_rate = be32_to_cpu(p->c_max_rate); 2842778f271dSPhilipp Reisner 2843778f271dSPhilipp Reisner fifo_size = (mdev->sync_conf.c_plan_ahead * 10 * SLEEP_TIME) / HZ; 2844778f271dSPhilipp Reisner if (fifo_size != mdev->rs_plan_s.size && fifo_size > 0) { 2845778f271dSPhilipp Reisner rs_plan_s = kzalloc(sizeof(int) * fifo_size, GFP_KERNEL); 2846778f271dSPhilipp Reisner if (!rs_plan_s) { 2847778f271dSPhilipp Reisner dev_err(DEV, "kmalloc of fifo_buffer failed"); 2848778f271dSPhilipp Reisner goto disconnect; 2849778f271dSPhilipp Reisner } 2850778f271dSPhilipp Reisner } 28518e26f9ccSPhilipp Reisner } 2852b411b363SPhilipp Reisner 2853b411b363SPhilipp Reisner spin_lock(&mdev->peer_seq_lock); 2854b411b363SPhilipp Reisner /* lock against drbd_nl_syncer_conf() */ 2855b411b363SPhilipp Reisner if (verify_tfm) { 2856b411b363SPhilipp Reisner strcpy(mdev->sync_conf.verify_alg, p->verify_alg); 2857b411b363SPhilipp Reisner mdev->sync_conf.verify_alg_len = strlen(p->verify_alg) + 1; 2858b411b363SPhilipp Reisner crypto_free_hash(mdev->verify_tfm); 2859b411b363SPhilipp Reisner mdev->verify_tfm = verify_tfm; 2860b411b363SPhilipp Reisner dev_info(DEV, "using verify-alg: \"%s\"\n", p->verify_alg); 2861b411b363SPhilipp Reisner } 2862b411b363SPhilipp Reisner if (csums_tfm) { 2863b411b363SPhilipp Reisner strcpy(mdev->sync_conf.csums_alg, p->csums_alg); 2864b411b363SPhilipp Reisner mdev->sync_conf.csums_alg_len = strlen(p->csums_alg) + 1; 2865b411b363SPhilipp Reisner crypto_free_hash(mdev->csums_tfm); 2866b411b363SPhilipp Reisner mdev->csums_tfm = csums_tfm; 2867b411b363SPhilipp Reisner dev_info(DEV, "using csums-alg: \"%s\"\n", p->csums_alg); 2868b411b363SPhilipp Reisner } 2869778f271dSPhilipp Reisner if (fifo_size != mdev->rs_plan_s.size) { 2870778f271dSPhilipp Reisner kfree(mdev->rs_plan_s.values); 2871778f271dSPhilipp Reisner mdev->rs_plan_s.values = rs_plan_s; 2872778f271dSPhilipp Reisner mdev->rs_plan_s.size = fifo_size; 2873778f271dSPhilipp Reisner mdev->rs_planed = 0; 2874778f271dSPhilipp Reisner } 2875b411b363SPhilipp Reisner spin_unlock(&mdev->peer_seq_lock); 2876b411b363SPhilipp Reisner } 2877b411b363SPhilipp Reisner 2878b411b363SPhilipp Reisner return ok; 2879b411b363SPhilipp Reisner disconnect: 2880b411b363SPhilipp Reisner /* just for completeness: actually not needed, 2881b411b363SPhilipp Reisner * as this is not reached if csums_tfm was ok. */ 2882b411b363SPhilipp Reisner crypto_free_hash(csums_tfm); 2883b411b363SPhilipp Reisner /* but free the verify_tfm again, if csums_tfm did not work out */ 2884b411b363SPhilipp Reisner crypto_free_hash(verify_tfm); 2885b411b363SPhilipp Reisner drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); 288681e84650SAndreas Gruenbacher return false; 2887b411b363SPhilipp Reisner } 2888b411b363SPhilipp Reisner 2889b411b363SPhilipp Reisner static void drbd_setup_order_type(struct drbd_conf *mdev, int peer) 2890b411b363SPhilipp Reisner { 2891b411b363SPhilipp Reisner /* sorry, we currently have no working implementation 2892b411b363SPhilipp Reisner * of distributed TCQ */ 2893b411b363SPhilipp Reisner } 2894b411b363SPhilipp Reisner 2895b411b363SPhilipp Reisner /* warn if the arguments differ by more than 12.5% */ 2896b411b363SPhilipp Reisner static void warn_if_differ_considerably(struct drbd_conf *mdev, 2897b411b363SPhilipp Reisner const char *s, sector_t a, sector_t b) 2898b411b363SPhilipp Reisner { 2899b411b363SPhilipp Reisner sector_t d; 2900b411b363SPhilipp Reisner if (a == 0 || b == 0) 2901b411b363SPhilipp Reisner return; 2902b411b363SPhilipp Reisner d = (a > b) ? (a - b) : (b - a); 2903b411b363SPhilipp Reisner if (d > (a>>3) || d > (b>>3)) 2904b411b363SPhilipp Reisner dev_warn(DEV, "Considerable difference in %s: %llus vs. %llus\n", s, 2905b411b363SPhilipp Reisner (unsigned long long)a, (unsigned long long)b); 2906b411b363SPhilipp Reisner } 2907b411b363SPhilipp Reisner 290802918be2SPhilipp Reisner static int receive_sizes(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) 2909b411b363SPhilipp Reisner { 291002918be2SPhilipp Reisner struct p_sizes *p = &mdev->data.rbuf.sizes; 2911b411b363SPhilipp Reisner enum determine_dev_size dd = unchanged; 29121816a2b4SLars Ellenberg unsigned int max_bio_size; 2913b411b363SPhilipp Reisner sector_t p_size, p_usize, my_usize; 2914b411b363SPhilipp Reisner int ldsc = 0; /* local disk size changed */ 2915e89b591cSPhilipp Reisner enum dds_flags ddsf; 2916b411b363SPhilipp Reisner 2917b411b363SPhilipp Reisner p_size = be64_to_cpu(p->d_size); 2918b411b363SPhilipp Reisner p_usize = be64_to_cpu(p->u_size); 2919b411b363SPhilipp Reisner 2920b411b363SPhilipp Reisner if (p_size == 0 && mdev->state.disk == D_DISKLESS) { 2921b411b363SPhilipp Reisner dev_err(DEV, "some backing storage is needed\n"); 2922b411b363SPhilipp Reisner drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); 292381e84650SAndreas Gruenbacher return false; 2924b411b363SPhilipp Reisner } 2925b411b363SPhilipp Reisner 2926b411b363SPhilipp Reisner /* just store the peer's disk size for now. 2927b411b363SPhilipp Reisner * we still need to figure out whether we accept that. */ 2928b411b363SPhilipp Reisner mdev->p_size = p_size; 2929b411b363SPhilipp Reisner 2930b411b363SPhilipp Reisner if (get_ldev(mdev)) { 2931b411b363SPhilipp Reisner warn_if_differ_considerably(mdev, "lower level device sizes", 2932b411b363SPhilipp Reisner p_size, drbd_get_max_capacity(mdev->ldev)); 2933b411b363SPhilipp Reisner warn_if_differ_considerably(mdev, "user requested size", 2934b411b363SPhilipp Reisner p_usize, mdev->ldev->dc.disk_size); 2935b411b363SPhilipp Reisner 2936b411b363SPhilipp Reisner /* if this is the first connect, or an otherwise expected 2937b411b363SPhilipp Reisner * param exchange, choose the minimum */ 2938b411b363SPhilipp Reisner if (mdev->state.conn == C_WF_REPORT_PARAMS) 2939b411b363SPhilipp Reisner p_usize = min_not_zero((sector_t)mdev->ldev->dc.disk_size, 2940b411b363SPhilipp Reisner p_usize); 2941b411b363SPhilipp Reisner 2942b411b363SPhilipp Reisner my_usize = mdev->ldev->dc.disk_size; 2943b411b363SPhilipp Reisner 2944b411b363SPhilipp Reisner if (mdev->ldev->dc.disk_size != p_usize) { 2945b411b363SPhilipp Reisner mdev->ldev->dc.disk_size = p_usize; 2946b411b363SPhilipp Reisner dev_info(DEV, "Peer sets u_size to %lu sectors\n", 2947b411b363SPhilipp Reisner (unsigned long)mdev->ldev->dc.disk_size); 2948b411b363SPhilipp Reisner } 2949b411b363SPhilipp Reisner 2950b411b363SPhilipp Reisner /* Never shrink a device with usable data during connect. 2951b411b363SPhilipp Reisner But allow online shrinking if we are connected. */ 2952a393db6fSPhilipp Reisner if (drbd_new_dev_size(mdev, mdev->ldev, 0) < 2953b411b363SPhilipp Reisner drbd_get_capacity(mdev->this_bdev) && 2954b411b363SPhilipp Reisner mdev->state.disk >= D_OUTDATED && 2955b411b363SPhilipp Reisner mdev->state.conn < C_CONNECTED) { 2956b411b363SPhilipp Reisner dev_err(DEV, "The peer's disk size is too small!\n"); 2957b411b363SPhilipp Reisner drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); 2958b411b363SPhilipp Reisner mdev->ldev->dc.disk_size = my_usize; 2959b411b363SPhilipp Reisner put_ldev(mdev); 296081e84650SAndreas Gruenbacher return false; 2961b411b363SPhilipp Reisner } 2962b411b363SPhilipp Reisner put_ldev(mdev); 2963b411b363SPhilipp Reisner } 2964b411b363SPhilipp Reisner 2965e89b591cSPhilipp Reisner ddsf = be16_to_cpu(p->dds_flags); 2966b411b363SPhilipp Reisner if (get_ldev(mdev)) { 2967e89b591cSPhilipp Reisner dd = drbd_determin_dev_size(mdev, ddsf); 2968b411b363SPhilipp Reisner put_ldev(mdev); 2969b411b363SPhilipp Reisner if (dd == dev_size_error) 297081e84650SAndreas Gruenbacher return false; 2971b411b363SPhilipp Reisner drbd_md_sync(mdev); 2972b411b363SPhilipp Reisner } else { 2973b411b363SPhilipp Reisner /* I am diskless, need to accept the peer's size. */ 2974b411b363SPhilipp Reisner drbd_set_my_capacity(mdev, p_size); 2975b411b363SPhilipp Reisner } 2976b411b363SPhilipp Reisner 2977b411b363SPhilipp Reisner if (get_ldev(mdev)) { 2978b411b363SPhilipp Reisner if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev)) { 2979b411b363SPhilipp Reisner mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev); 2980b411b363SPhilipp Reisner ldsc = 1; 2981b411b363SPhilipp Reisner } 2982b411b363SPhilipp Reisner 2983a1c88d0dSLars Ellenberg if (mdev->agreed_pro_version < 94) 29841816a2b4SLars Ellenberg max_bio_size = be32_to_cpu(p->max_bio_size); 29858979d9c9SLars Ellenberg else if (mdev->agreed_pro_version == 94) 29861816a2b4SLars Ellenberg max_bio_size = DRBD_MAX_SIZE_H80_PACKET; 2987a1c88d0dSLars Ellenberg else /* drbd 8.3.8 onwards */ 29881816a2b4SLars Ellenberg max_bio_size = DRBD_MAX_BIO_SIZE; 2989a1c88d0dSLars Ellenberg 29901816a2b4SLars Ellenberg if (max_bio_size != queue_max_hw_sectors(mdev->rq_queue) << 9) 29911816a2b4SLars Ellenberg drbd_setup_queue_param(mdev, max_bio_size); 2992b411b363SPhilipp Reisner 2993e89b591cSPhilipp Reisner drbd_setup_order_type(mdev, be16_to_cpu(p->queue_order_type)); 2994b411b363SPhilipp Reisner put_ldev(mdev); 2995b411b363SPhilipp Reisner } 2996b411b363SPhilipp Reisner 2997b411b363SPhilipp Reisner if (mdev->state.conn > C_WF_REPORT_PARAMS) { 2998b411b363SPhilipp Reisner if (be64_to_cpu(p->c_size) != 2999b411b363SPhilipp Reisner drbd_get_capacity(mdev->this_bdev) || ldsc) { 3000b411b363SPhilipp Reisner /* we have different sizes, probably peer 3001b411b363SPhilipp Reisner * needs to know my new size... */ 3002e89b591cSPhilipp Reisner drbd_send_sizes(mdev, 0, ddsf); 3003b411b363SPhilipp Reisner } 3004b411b363SPhilipp Reisner if (test_and_clear_bit(RESIZE_PENDING, &mdev->flags) || 3005b411b363SPhilipp Reisner (dd == grew && mdev->state.conn == C_CONNECTED)) { 3006b411b363SPhilipp Reisner if (mdev->state.pdsk >= D_INCONSISTENT && 3007e89b591cSPhilipp Reisner mdev->state.disk >= D_INCONSISTENT) { 3008e89b591cSPhilipp Reisner if (ddsf & DDSF_NO_RESYNC) 3009e89b591cSPhilipp Reisner dev_info(DEV, "Resync of new storage suppressed with --assume-clean\n"); 3010b411b363SPhilipp Reisner else 3011e89b591cSPhilipp Reisner resync_after_online_grow(mdev); 3012e89b591cSPhilipp Reisner } else 3013b411b363SPhilipp Reisner set_bit(RESYNC_AFTER_NEG, &mdev->flags); 3014b411b363SPhilipp Reisner } 3015b411b363SPhilipp Reisner } 3016b411b363SPhilipp Reisner 301781e84650SAndreas Gruenbacher return true; 3018b411b363SPhilipp Reisner } 3019b411b363SPhilipp Reisner 302002918be2SPhilipp Reisner static int receive_uuids(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) 3021b411b363SPhilipp Reisner { 302202918be2SPhilipp Reisner struct p_uuids *p = &mdev->data.rbuf.uuids; 3023b411b363SPhilipp Reisner u64 *p_uuid; 3024b411b363SPhilipp Reisner int i; 3025b411b363SPhilipp Reisner 3026b411b363SPhilipp Reisner p_uuid = kmalloc(sizeof(u64)*UI_EXTENDED_SIZE, GFP_NOIO); 3027b411b363SPhilipp Reisner 3028b411b363SPhilipp Reisner for (i = UI_CURRENT; i < UI_EXTENDED_SIZE; i++) 3029b411b363SPhilipp Reisner p_uuid[i] = be64_to_cpu(p->uuid[i]); 3030b411b363SPhilipp Reisner 3031b411b363SPhilipp Reisner kfree(mdev->p_uuid); 3032b411b363SPhilipp Reisner mdev->p_uuid = p_uuid; 3033b411b363SPhilipp Reisner 3034b411b363SPhilipp Reisner if (mdev->state.conn < C_CONNECTED && 3035b411b363SPhilipp Reisner mdev->state.disk < D_INCONSISTENT && 3036b411b363SPhilipp Reisner mdev->state.role == R_PRIMARY && 3037b411b363SPhilipp Reisner (mdev->ed_uuid & ~((u64)1)) != (p_uuid[UI_CURRENT] & ~((u64)1))) { 3038b411b363SPhilipp Reisner dev_err(DEV, "Can only connect to data with current UUID=%016llX\n", 3039b411b363SPhilipp Reisner (unsigned long long)mdev->ed_uuid); 3040b411b363SPhilipp Reisner drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); 304181e84650SAndreas Gruenbacher return false; 3042b411b363SPhilipp Reisner } 3043b411b363SPhilipp Reisner 3044b411b363SPhilipp Reisner if (get_ldev(mdev)) { 3045b411b363SPhilipp Reisner int skip_initial_sync = 3046b411b363SPhilipp Reisner mdev->state.conn == C_CONNECTED && 3047b411b363SPhilipp Reisner mdev->agreed_pro_version >= 90 && 3048b411b363SPhilipp Reisner mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && 3049b411b363SPhilipp Reisner (p_uuid[UI_FLAGS] & 8); 3050b411b363SPhilipp Reisner if (skip_initial_sync) { 3051b411b363SPhilipp Reisner dev_info(DEV, "Accepted new current UUID, preparing to skip initial sync\n"); 3052b411b363SPhilipp Reisner drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write, 3053b411b363SPhilipp Reisner "clear_n_write from receive_uuids"); 3054b411b363SPhilipp Reisner _drbd_uuid_set(mdev, UI_CURRENT, p_uuid[UI_CURRENT]); 3055b411b363SPhilipp Reisner _drbd_uuid_set(mdev, UI_BITMAP, 0); 3056b411b363SPhilipp Reisner _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE), 3057b411b363SPhilipp Reisner CS_VERBOSE, NULL); 3058b411b363SPhilipp Reisner drbd_md_sync(mdev); 3059b411b363SPhilipp Reisner } 3060b411b363SPhilipp Reisner put_ldev(mdev); 306118a50fa2SPhilipp Reisner } else if (mdev->state.disk < D_INCONSISTENT && 306218a50fa2SPhilipp Reisner mdev->state.role == R_PRIMARY) { 306318a50fa2SPhilipp Reisner /* I am a diskless primary, the peer just created a new current UUID 306418a50fa2SPhilipp Reisner for me. */ 306518a50fa2SPhilipp Reisner drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]); 3066b411b363SPhilipp Reisner } 3067b411b363SPhilipp Reisner 3068b411b363SPhilipp Reisner /* Before we test for the disk state, we should wait until an eventually 3069b411b363SPhilipp Reisner ongoing cluster wide state change is finished. That is important if 3070b411b363SPhilipp Reisner we are primary and are detaching from our disk. We need to see the 3071b411b363SPhilipp Reisner new disk state... */ 3072b411b363SPhilipp Reisner wait_event(mdev->misc_wait, !test_bit(CLUSTER_ST_CHANGE, &mdev->flags)); 3073b411b363SPhilipp Reisner if (mdev->state.conn >= C_CONNECTED && mdev->state.disk < D_INCONSISTENT) 3074b411b363SPhilipp Reisner drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]); 3075b411b363SPhilipp Reisner 307681e84650SAndreas Gruenbacher return true; 3077b411b363SPhilipp Reisner } 3078b411b363SPhilipp Reisner 3079b411b363SPhilipp Reisner /** 3080b411b363SPhilipp Reisner * convert_state() - Converts the peer's view of the cluster state to our point of view 3081b411b363SPhilipp Reisner * @ps: The state as seen by the peer. 3082b411b363SPhilipp Reisner */ 3083b411b363SPhilipp Reisner static union drbd_state convert_state(union drbd_state ps) 3084b411b363SPhilipp Reisner { 3085b411b363SPhilipp Reisner union drbd_state ms; 3086b411b363SPhilipp Reisner 3087b411b363SPhilipp Reisner static enum drbd_conns c_tab[] = { 3088b411b363SPhilipp Reisner [C_CONNECTED] = C_CONNECTED, 3089b411b363SPhilipp Reisner 3090b411b363SPhilipp Reisner [C_STARTING_SYNC_S] = C_STARTING_SYNC_T, 3091b411b363SPhilipp Reisner [C_STARTING_SYNC_T] = C_STARTING_SYNC_S, 3092b411b363SPhilipp Reisner [C_DISCONNECTING] = C_TEAR_DOWN, /* C_NETWORK_FAILURE, */ 3093b411b363SPhilipp Reisner [C_VERIFY_S] = C_VERIFY_T, 3094b411b363SPhilipp Reisner [C_MASK] = C_MASK, 3095b411b363SPhilipp Reisner }; 3096b411b363SPhilipp Reisner 3097b411b363SPhilipp Reisner ms.i = ps.i; 3098b411b363SPhilipp Reisner 3099b411b363SPhilipp Reisner ms.conn = c_tab[ps.conn]; 3100b411b363SPhilipp Reisner ms.peer = ps.role; 3101b411b363SPhilipp Reisner ms.role = ps.peer; 3102b411b363SPhilipp Reisner ms.pdsk = ps.disk; 3103b411b363SPhilipp Reisner ms.disk = ps.pdsk; 3104b411b363SPhilipp Reisner ms.peer_isp = (ps.aftr_isp | ps.user_isp); 3105b411b363SPhilipp Reisner 3106b411b363SPhilipp Reisner return ms; 3107b411b363SPhilipp Reisner } 3108b411b363SPhilipp Reisner 310902918be2SPhilipp Reisner static int receive_req_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) 3110b411b363SPhilipp Reisner { 311102918be2SPhilipp Reisner struct p_req_state *p = &mdev->data.rbuf.req_state; 3112b411b363SPhilipp Reisner union drbd_state mask, val; 3113bf885f8aSAndreas Gruenbacher enum drbd_state_rv rv; 3114b411b363SPhilipp Reisner 3115b411b363SPhilipp Reisner mask.i = be32_to_cpu(p->mask); 3116b411b363SPhilipp Reisner val.i = be32_to_cpu(p->val); 3117b411b363SPhilipp Reisner 3118b411b363SPhilipp Reisner if (test_bit(DISCARD_CONCURRENT, &mdev->flags) && 3119b411b363SPhilipp Reisner test_bit(CLUSTER_ST_CHANGE, &mdev->flags)) { 3120b411b363SPhilipp Reisner drbd_send_sr_reply(mdev, SS_CONCURRENT_ST_CHG); 312181e84650SAndreas Gruenbacher return true; 3122b411b363SPhilipp Reisner } 3123b411b363SPhilipp Reisner 3124b411b363SPhilipp Reisner mask = convert_state(mask); 3125b411b363SPhilipp Reisner val = convert_state(val); 3126b411b363SPhilipp Reisner 3127b411b363SPhilipp Reisner rv = drbd_change_state(mdev, CS_VERBOSE, mask, val); 3128b411b363SPhilipp Reisner 3129b411b363SPhilipp Reisner drbd_send_sr_reply(mdev, rv); 3130b411b363SPhilipp Reisner drbd_md_sync(mdev); 3131b411b363SPhilipp Reisner 313281e84650SAndreas Gruenbacher return true; 3133b411b363SPhilipp Reisner } 3134b411b363SPhilipp Reisner 313502918be2SPhilipp Reisner static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) 3136b411b363SPhilipp Reisner { 313702918be2SPhilipp Reisner struct p_state *p = &mdev->data.rbuf.state; 31384ac4aadaSLars Ellenberg union drbd_state os, ns, peer_state; 3139b411b363SPhilipp Reisner enum drbd_disk_state real_peer_disk; 314065d922c3SPhilipp Reisner enum chg_state_flags cs_flags; 3141b411b363SPhilipp Reisner int rv; 3142b411b363SPhilipp Reisner 3143b411b363SPhilipp Reisner peer_state.i = be32_to_cpu(p->state); 3144b411b363SPhilipp Reisner 3145b411b363SPhilipp Reisner real_peer_disk = peer_state.disk; 3146b411b363SPhilipp Reisner if (peer_state.disk == D_NEGOTIATING) { 3147b411b363SPhilipp Reisner real_peer_disk = mdev->p_uuid[UI_FLAGS] & 4 ? D_INCONSISTENT : D_CONSISTENT; 3148b411b363SPhilipp Reisner dev_info(DEV, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk)); 3149b411b363SPhilipp Reisner } 3150b411b363SPhilipp Reisner 3151b411b363SPhilipp Reisner spin_lock_irq(&mdev->req_lock); 3152b411b363SPhilipp Reisner retry: 31534ac4aadaSLars Ellenberg os = ns = mdev->state; 3154b411b363SPhilipp Reisner spin_unlock_irq(&mdev->req_lock); 3155b411b363SPhilipp Reisner 3156e9ef7bb6SLars Ellenberg /* peer says his disk is uptodate, while we think it is inconsistent, 3157e9ef7bb6SLars Ellenberg * and this happens while we think we have a sync going on. */ 3158e9ef7bb6SLars Ellenberg if (os.pdsk == D_INCONSISTENT && real_peer_disk == D_UP_TO_DATE && 3159e9ef7bb6SLars Ellenberg os.conn > C_CONNECTED && os.disk == D_UP_TO_DATE) { 3160e9ef7bb6SLars Ellenberg /* If we are (becoming) SyncSource, but peer is still in sync 3161e9ef7bb6SLars Ellenberg * preparation, ignore its uptodate-ness to avoid flapping, it 3162e9ef7bb6SLars Ellenberg * will change to inconsistent once the peer reaches active 3163e9ef7bb6SLars Ellenberg * syncing states. 3164e9ef7bb6SLars Ellenberg * It may have changed syncer-paused flags, however, so we 3165e9ef7bb6SLars Ellenberg * cannot ignore this completely. */ 3166e9ef7bb6SLars Ellenberg if (peer_state.conn > C_CONNECTED && 3167e9ef7bb6SLars Ellenberg peer_state.conn < C_SYNC_SOURCE) 3168e9ef7bb6SLars Ellenberg real_peer_disk = D_INCONSISTENT; 3169e9ef7bb6SLars Ellenberg 3170e9ef7bb6SLars Ellenberg /* if peer_state changes to connected at the same time, 3171e9ef7bb6SLars Ellenberg * it explicitly notifies us that it finished resync. 3172e9ef7bb6SLars Ellenberg * Maybe we should finish it up, too? */ 3173e9ef7bb6SLars Ellenberg else if (os.conn >= C_SYNC_SOURCE && 3174e9ef7bb6SLars Ellenberg peer_state.conn == C_CONNECTED) { 3175e9ef7bb6SLars Ellenberg if (drbd_bm_total_weight(mdev) <= mdev->rs_failed) 3176e9ef7bb6SLars Ellenberg drbd_resync_finished(mdev); 317781e84650SAndreas Gruenbacher return true; 3178e9ef7bb6SLars Ellenberg } 3179e9ef7bb6SLars Ellenberg } 3180e9ef7bb6SLars Ellenberg 3181e9ef7bb6SLars Ellenberg /* peer says his disk is inconsistent, while we think it is uptodate, 3182e9ef7bb6SLars Ellenberg * and this happens while the peer still thinks we have a sync going on, 3183e9ef7bb6SLars Ellenberg * but we think we are already done with the sync. 3184e9ef7bb6SLars Ellenberg * We ignore this to avoid flapping pdsk. 3185e9ef7bb6SLars Ellenberg * This should not happen, if the peer is a recent version of drbd. */ 3186e9ef7bb6SLars Ellenberg if (os.pdsk == D_UP_TO_DATE && real_peer_disk == D_INCONSISTENT && 3187e9ef7bb6SLars Ellenberg os.conn == C_CONNECTED && peer_state.conn > C_SYNC_SOURCE) 3188e9ef7bb6SLars Ellenberg real_peer_disk = D_UP_TO_DATE; 3189e9ef7bb6SLars Ellenberg 31904ac4aadaSLars Ellenberg if (ns.conn == C_WF_REPORT_PARAMS) 31914ac4aadaSLars Ellenberg ns.conn = C_CONNECTED; 3192b411b363SPhilipp Reisner 319367531718SPhilipp Reisner if (peer_state.conn == C_AHEAD) 319467531718SPhilipp Reisner ns.conn = C_BEHIND; 319567531718SPhilipp Reisner 3196b411b363SPhilipp Reisner if (mdev->p_uuid && peer_state.disk >= D_NEGOTIATING && 3197b411b363SPhilipp Reisner get_ldev_if_state(mdev, D_NEGOTIATING)) { 3198b411b363SPhilipp Reisner int cr; /* consider resync */ 3199b411b363SPhilipp Reisner 3200b411b363SPhilipp Reisner /* if we established a new connection */ 32014ac4aadaSLars Ellenberg cr = (os.conn < C_CONNECTED); 3202b411b363SPhilipp Reisner /* if we had an established connection 3203b411b363SPhilipp Reisner * and one of the nodes newly attaches a disk */ 32044ac4aadaSLars Ellenberg cr |= (os.conn == C_CONNECTED && 3205b411b363SPhilipp Reisner (peer_state.disk == D_NEGOTIATING || 32064ac4aadaSLars Ellenberg os.disk == D_NEGOTIATING)); 3207b411b363SPhilipp Reisner /* if we have both been inconsistent, and the peer has been 3208b411b363SPhilipp Reisner * forced to be UpToDate with --overwrite-data */ 3209b411b363SPhilipp Reisner cr |= test_bit(CONSIDER_RESYNC, &mdev->flags); 3210b411b363SPhilipp Reisner /* if we had been plain connected, and the admin requested to 3211b411b363SPhilipp Reisner * start a sync by "invalidate" or "invalidate-remote" */ 32124ac4aadaSLars Ellenberg cr |= (os.conn == C_CONNECTED && 3213b411b363SPhilipp Reisner (peer_state.conn >= C_STARTING_SYNC_S && 3214b411b363SPhilipp Reisner peer_state.conn <= C_WF_BITMAP_T)); 3215b411b363SPhilipp Reisner 3216b411b363SPhilipp Reisner if (cr) 32174ac4aadaSLars Ellenberg ns.conn = drbd_sync_handshake(mdev, peer_state.role, real_peer_disk); 3218b411b363SPhilipp Reisner 3219b411b363SPhilipp Reisner put_ldev(mdev); 32204ac4aadaSLars Ellenberg if (ns.conn == C_MASK) { 32214ac4aadaSLars Ellenberg ns.conn = C_CONNECTED; 3222b411b363SPhilipp Reisner if (mdev->state.disk == D_NEGOTIATING) { 322382f59cc6SLars Ellenberg drbd_force_state(mdev, NS(disk, D_FAILED)); 3224b411b363SPhilipp Reisner } else if (peer_state.disk == D_NEGOTIATING) { 3225b411b363SPhilipp Reisner dev_err(DEV, "Disk attach process on the peer node was aborted.\n"); 3226b411b363SPhilipp Reisner peer_state.disk = D_DISKLESS; 3227580b9767SLars Ellenberg real_peer_disk = D_DISKLESS; 3228b411b363SPhilipp Reisner } else { 3229cf14c2e9SPhilipp Reisner if (test_and_clear_bit(CONN_DRY_RUN, &mdev->flags)) 323081e84650SAndreas Gruenbacher return false; 32314ac4aadaSLars Ellenberg D_ASSERT(os.conn == C_WF_REPORT_PARAMS); 3232b411b363SPhilipp Reisner drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); 323381e84650SAndreas Gruenbacher return false; 3234b411b363SPhilipp Reisner } 3235b411b363SPhilipp Reisner } 3236b411b363SPhilipp Reisner } 3237b411b363SPhilipp Reisner 3238b411b363SPhilipp Reisner spin_lock_irq(&mdev->req_lock); 32394ac4aadaSLars Ellenberg if (mdev->state.i != os.i) 3240b411b363SPhilipp Reisner goto retry; 3241b411b363SPhilipp Reisner clear_bit(CONSIDER_RESYNC, &mdev->flags); 3242b411b363SPhilipp Reisner ns.peer = peer_state.role; 3243b411b363SPhilipp Reisner ns.pdsk = real_peer_disk; 3244b411b363SPhilipp Reisner ns.peer_isp = (peer_state.aftr_isp | peer_state.user_isp); 32454ac4aadaSLars Ellenberg if ((ns.conn == C_CONNECTED || ns.conn == C_WF_BITMAP_S) && ns.disk == D_NEGOTIATING) 3246b411b363SPhilipp Reisner ns.disk = mdev->new_state_tmp.disk; 32474ac4aadaSLars Ellenberg cs_flags = CS_VERBOSE + (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED ? 0 : CS_HARD); 32484ac4aadaSLars Ellenberg if (ns.pdsk == D_CONSISTENT && is_susp(ns) && ns.conn == C_CONNECTED && os.conn < C_CONNECTED && 3249481c6f50SPhilipp Reisner test_bit(NEW_CUR_UUID, &mdev->flags)) { 3250481c6f50SPhilipp Reisner /* Do not allow tl_restart(resend) for a rebooted peer. We can only allow this 3251481c6f50SPhilipp Reisner for temporal network outages! */ 3252481c6f50SPhilipp Reisner spin_unlock_irq(&mdev->req_lock); 3253481c6f50SPhilipp Reisner dev_err(DEV, "Aborting Connect, can not thaw IO with an only Consistent peer\n"); 3254481c6f50SPhilipp Reisner tl_clear(mdev); 3255481c6f50SPhilipp Reisner drbd_uuid_new_current(mdev); 3256481c6f50SPhilipp Reisner clear_bit(NEW_CUR_UUID, &mdev->flags); 3257481c6f50SPhilipp Reisner drbd_force_state(mdev, NS2(conn, C_PROTOCOL_ERROR, susp, 0)); 325881e84650SAndreas Gruenbacher return false; 3259481c6f50SPhilipp Reisner } 326065d922c3SPhilipp Reisner rv = _drbd_set_state(mdev, ns, cs_flags, NULL); 3261b411b363SPhilipp Reisner ns = mdev->state; 3262b411b363SPhilipp Reisner spin_unlock_irq(&mdev->req_lock); 3263b411b363SPhilipp Reisner 3264b411b363SPhilipp Reisner if (rv < SS_SUCCESS) { 3265b411b363SPhilipp Reisner drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); 326681e84650SAndreas Gruenbacher return false; 3267b411b363SPhilipp Reisner } 3268b411b363SPhilipp Reisner 32694ac4aadaSLars Ellenberg if (os.conn > C_WF_REPORT_PARAMS) { 32704ac4aadaSLars Ellenberg if (ns.conn > C_CONNECTED && peer_state.conn <= C_CONNECTED && 3271b411b363SPhilipp Reisner peer_state.disk != D_NEGOTIATING ) { 3272b411b363SPhilipp Reisner /* we want resync, peer has not yet decided to sync... */ 3273b411b363SPhilipp Reisner /* Nowadays only used when forcing a node into primary role and 3274b411b363SPhilipp Reisner setting its disk to UpToDate with that */ 3275b411b363SPhilipp Reisner drbd_send_uuids(mdev); 3276b411b363SPhilipp Reisner drbd_send_state(mdev); 3277b411b363SPhilipp Reisner } 3278b411b363SPhilipp Reisner } 3279b411b363SPhilipp Reisner 3280b411b363SPhilipp Reisner mdev->net_conf->want_lose = 0; 3281b411b363SPhilipp Reisner 3282b411b363SPhilipp Reisner drbd_md_sync(mdev); /* update connected indicator, la_size, ... */ 3283b411b363SPhilipp Reisner 328481e84650SAndreas Gruenbacher return true; 3285b411b363SPhilipp Reisner } 3286b411b363SPhilipp Reisner 328702918be2SPhilipp Reisner static int receive_sync_uuid(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) 3288b411b363SPhilipp Reisner { 328902918be2SPhilipp Reisner struct p_rs_uuid *p = &mdev->data.rbuf.rs_uuid; 3290b411b363SPhilipp Reisner 3291b411b363SPhilipp Reisner wait_event(mdev->misc_wait, 3292b411b363SPhilipp Reisner mdev->state.conn == C_WF_SYNC_UUID || 3293c4752ef1SPhilipp Reisner mdev->state.conn == C_BEHIND || 3294b411b363SPhilipp Reisner mdev->state.conn < C_CONNECTED || 3295b411b363SPhilipp Reisner mdev->state.disk < D_NEGOTIATING); 3296b411b363SPhilipp Reisner 3297b411b363SPhilipp Reisner /* D_ASSERT( mdev->state.conn == C_WF_SYNC_UUID ); */ 3298b411b363SPhilipp Reisner 3299b411b363SPhilipp Reisner /* Here the _drbd_uuid_ functions are right, current should 3300b411b363SPhilipp Reisner _not_ be rotated into the history */ 3301b411b363SPhilipp Reisner if (get_ldev_if_state(mdev, D_NEGOTIATING)) { 3302b411b363SPhilipp Reisner _drbd_uuid_set(mdev, UI_CURRENT, be64_to_cpu(p->uuid)); 3303b411b363SPhilipp Reisner _drbd_uuid_set(mdev, UI_BITMAP, 0UL); 3304b411b363SPhilipp Reisner 3305b411b363SPhilipp Reisner drbd_start_resync(mdev, C_SYNC_TARGET); 3306b411b363SPhilipp Reisner 3307b411b363SPhilipp Reisner put_ldev(mdev); 3308b411b363SPhilipp Reisner } else 3309b411b363SPhilipp Reisner dev_err(DEV, "Ignoring SyncUUID packet!\n"); 3310b411b363SPhilipp Reisner 331181e84650SAndreas Gruenbacher return true; 3312b411b363SPhilipp Reisner } 3313b411b363SPhilipp Reisner 3314b411b363SPhilipp Reisner enum receive_bitmap_ret { OK, DONE, FAILED }; 3315b411b363SPhilipp Reisner 3316b411b363SPhilipp Reisner static enum receive_bitmap_ret 331702918be2SPhilipp Reisner receive_bitmap_plain(struct drbd_conf *mdev, unsigned int data_size, 3318b411b363SPhilipp Reisner unsigned long *buffer, struct bm_xfer_ctx *c) 3319b411b363SPhilipp Reisner { 3320b411b363SPhilipp Reisner unsigned num_words = min_t(size_t, BM_PACKET_WORDS, c->bm_words - c->word_offset); 3321b411b363SPhilipp Reisner unsigned want = num_words * sizeof(long); 3322b411b363SPhilipp Reisner 332302918be2SPhilipp Reisner if (want != data_size) { 332402918be2SPhilipp Reisner dev_err(DEV, "%s:want (%u) != data_size (%u)\n", __func__, want, data_size); 3325b411b363SPhilipp Reisner return FAILED; 3326b411b363SPhilipp Reisner } 3327b411b363SPhilipp Reisner if (want == 0) 3328b411b363SPhilipp Reisner return DONE; 3329b411b363SPhilipp Reisner if (drbd_recv(mdev, buffer, want) != want) 3330b411b363SPhilipp Reisner return FAILED; 3331b411b363SPhilipp Reisner 3332b411b363SPhilipp Reisner drbd_bm_merge_lel(mdev, c->word_offset, num_words, buffer); 3333b411b363SPhilipp Reisner 3334b411b363SPhilipp Reisner c->word_offset += num_words; 3335b411b363SPhilipp Reisner c->bit_offset = c->word_offset * BITS_PER_LONG; 3336b411b363SPhilipp Reisner if (c->bit_offset > c->bm_bits) 3337b411b363SPhilipp Reisner c->bit_offset = c->bm_bits; 3338b411b363SPhilipp Reisner 3339b411b363SPhilipp Reisner return OK; 3340b411b363SPhilipp Reisner } 3341b411b363SPhilipp Reisner 3342b411b363SPhilipp Reisner static enum receive_bitmap_ret 3343b411b363SPhilipp Reisner recv_bm_rle_bits(struct drbd_conf *mdev, 3344b411b363SPhilipp Reisner struct p_compressed_bm *p, 3345b411b363SPhilipp Reisner struct bm_xfer_ctx *c) 3346b411b363SPhilipp Reisner { 3347b411b363SPhilipp Reisner struct bitstream bs; 3348b411b363SPhilipp Reisner u64 look_ahead; 3349b411b363SPhilipp Reisner u64 rl; 3350b411b363SPhilipp Reisner u64 tmp; 3351b411b363SPhilipp Reisner unsigned long s = c->bit_offset; 3352b411b363SPhilipp Reisner unsigned long e; 3353004352faSLars Ellenberg int len = be16_to_cpu(p->head.length) - (sizeof(*p) - sizeof(p->head)); 3354b411b363SPhilipp Reisner int toggle = DCBP_get_start(p); 3355b411b363SPhilipp Reisner int have; 3356b411b363SPhilipp Reisner int bits; 3357b411b363SPhilipp Reisner 3358b411b363SPhilipp Reisner bitstream_init(&bs, p->code, len, DCBP_get_pad_bits(p)); 3359b411b363SPhilipp Reisner 3360b411b363SPhilipp Reisner bits = bitstream_get_bits(&bs, &look_ahead, 64); 3361b411b363SPhilipp Reisner if (bits < 0) 3362b411b363SPhilipp Reisner return FAILED; 3363b411b363SPhilipp Reisner 3364b411b363SPhilipp Reisner for (have = bits; have > 0; s += rl, toggle = !toggle) { 3365b411b363SPhilipp Reisner bits = vli_decode_bits(&rl, look_ahead); 3366b411b363SPhilipp Reisner if (bits <= 0) 3367b411b363SPhilipp Reisner return FAILED; 3368b411b363SPhilipp Reisner 3369b411b363SPhilipp Reisner if (toggle) { 3370b411b363SPhilipp Reisner e = s + rl -1; 3371b411b363SPhilipp Reisner if (e >= c->bm_bits) { 3372b411b363SPhilipp Reisner dev_err(DEV, "bitmap overflow (e:%lu) while decoding bm RLE packet\n", e); 3373b411b363SPhilipp Reisner return FAILED; 3374b411b363SPhilipp Reisner } 3375b411b363SPhilipp Reisner _drbd_bm_set_bits(mdev, s, e); 3376b411b363SPhilipp Reisner } 3377b411b363SPhilipp Reisner 3378b411b363SPhilipp Reisner if (have < bits) { 3379b411b363SPhilipp Reisner dev_err(DEV, "bitmap decoding error: h:%d b:%d la:0x%08llx l:%u/%u\n", 3380b411b363SPhilipp Reisner have, bits, look_ahead, 3381b411b363SPhilipp Reisner (unsigned int)(bs.cur.b - p->code), 3382b411b363SPhilipp Reisner (unsigned int)bs.buf_len); 3383b411b363SPhilipp Reisner return FAILED; 3384b411b363SPhilipp Reisner } 3385b411b363SPhilipp Reisner look_ahead >>= bits; 3386b411b363SPhilipp Reisner have -= bits; 3387b411b363SPhilipp Reisner 3388b411b363SPhilipp Reisner bits = bitstream_get_bits(&bs, &tmp, 64 - have); 3389b411b363SPhilipp Reisner if (bits < 0) 3390b411b363SPhilipp Reisner return FAILED; 3391b411b363SPhilipp Reisner look_ahead |= tmp << have; 3392b411b363SPhilipp Reisner have += bits; 3393b411b363SPhilipp Reisner } 3394b411b363SPhilipp Reisner 3395b411b363SPhilipp Reisner c->bit_offset = s; 3396b411b363SPhilipp Reisner bm_xfer_ctx_bit_to_word_offset(c); 3397b411b363SPhilipp Reisner 3398b411b363SPhilipp Reisner return (s == c->bm_bits) ? DONE : OK; 3399b411b363SPhilipp Reisner } 3400b411b363SPhilipp Reisner 3401b411b363SPhilipp Reisner static enum receive_bitmap_ret 3402b411b363SPhilipp Reisner decode_bitmap_c(struct drbd_conf *mdev, 3403b411b363SPhilipp Reisner struct p_compressed_bm *p, 3404b411b363SPhilipp Reisner struct bm_xfer_ctx *c) 3405b411b363SPhilipp Reisner { 3406b411b363SPhilipp Reisner if (DCBP_get_code(p) == RLE_VLI_Bits) 3407b411b363SPhilipp Reisner return recv_bm_rle_bits(mdev, p, c); 3408b411b363SPhilipp Reisner 3409b411b363SPhilipp Reisner /* other variants had been implemented for evaluation, 3410b411b363SPhilipp Reisner * but have been dropped as this one turned out to be "best" 3411b411b363SPhilipp Reisner * during all our tests. */ 3412b411b363SPhilipp Reisner 3413b411b363SPhilipp Reisner dev_err(DEV, "receive_bitmap_c: unknown encoding %u\n", p->encoding); 3414b411b363SPhilipp Reisner drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR)); 3415b411b363SPhilipp Reisner return FAILED; 3416b411b363SPhilipp Reisner } 3417b411b363SPhilipp Reisner 3418b411b363SPhilipp Reisner void INFO_bm_xfer_stats(struct drbd_conf *mdev, 3419b411b363SPhilipp Reisner const char *direction, struct bm_xfer_ctx *c) 3420b411b363SPhilipp Reisner { 3421b411b363SPhilipp Reisner /* what would it take to transfer it "plaintext" */ 34220b70a13dSPhilipp Reisner unsigned plain = sizeof(struct p_header80) * 3423b411b363SPhilipp Reisner ((c->bm_words+BM_PACKET_WORDS-1)/BM_PACKET_WORDS+1) 3424b411b363SPhilipp Reisner + c->bm_words * sizeof(long); 3425b411b363SPhilipp Reisner unsigned total = c->bytes[0] + c->bytes[1]; 3426b411b363SPhilipp Reisner unsigned r; 3427b411b363SPhilipp Reisner 3428b411b363SPhilipp Reisner /* total can not be zero. but just in case: */ 3429b411b363SPhilipp Reisner if (total == 0) 3430b411b363SPhilipp Reisner return; 3431b411b363SPhilipp Reisner 3432b411b363SPhilipp Reisner /* don't report if not compressed */ 3433b411b363SPhilipp Reisner if (total >= plain) 3434b411b363SPhilipp Reisner return; 3435b411b363SPhilipp Reisner 3436b411b363SPhilipp Reisner /* total < plain. check for overflow, still */ 3437b411b363SPhilipp Reisner r = (total > UINT_MAX/1000) ? (total / (plain/1000)) 3438b411b363SPhilipp Reisner : (1000 * total / plain); 3439b411b363SPhilipp Reisner 3440b411b363SPhilipp Reisner if (r > 1000) 3441b411b363SPhilipp Reisner r = 1000; 3442b411b363SPhilipp Reisner 3443b411b363SPhilipp Reisner r = 1000 - r; 3444b411b363SPhilipp Reisner dev_info(DEV, "%s bitmap stats [Bytes(packets)]: plain %u(%u), RLE %u(%u), " 3445b411b363SPhilipp Reisner "total %u; compression: %u.%u%%\n", 3446b411b363SPhilipp Reisner direction, 3447b411b363SPhilipp Reisner c->bytes[1], c->packets[1], 3448b411b363SPhilipp Reisner c->bytes[0], c->packets[0], 3449b411b363SPhilipp Reisner total, r/10, r % 10); 3450b411b363SPhilipp Reisner } 3451b411b363SPhilipp Reisner 3452b411b363SPhilipp Reisner /* Since we are processing the bitfield from lower addresses to higher, 3453b411b363SPhilipp Reisner it does not matter if the process it in 32 bit chunks or 64 bit 3454b411b363SPhilipp Reisner chunks as long as it is little endian. (Understand it as byte stream, 3455b411b363SPhilipp Reisner beginning with the lowest byte...) If we would use big endian 3456b411b363SPhilipp Reisner we would need to process it from the highest address to the lowest, 3457b411b363SPhilipp Reisner in order to be agnostic to the 32 vs 64 bits issue. 3458b411b363SPhilipp Reisner 3459b411b363SPhilipp Reisner returns 0 on failure, 1 if we successfully received it. */ 346002918be2SPhilipp Reisner static int receive_bitmap(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) 3461b411b363SPhilipp Reisner { 3462b411b363SPhilipp Reisner struct bm_xfer_ctx c; 3463b411b363SPhilipp Reisner void *buffer; 3464b411b363SPhilipp Reisner enum receive_bitmap_ret ret; 346581e84650SAndreas Gruenbacher int ok = false; 346602918be2SPhilipp Reisner struct p_header80 *h = &mdev->data.rbuf.header.h80; 3467b411b363SPhilipp Reisner 34683719094eSPhilipp Reisner /* drbd_bm_lock(mdev, "receive bitmap"); By intention no bm_lock */ 3469b411b363SPhilipp Reisner 3470b411b363SPhilipp Reisner /* maybe we should use some per thread scratch page, 3471b411b363SPhilipp Reisner * and allocate that during initial device creation? */ 3472b411b363SPhilipp Reisner buffer = (unsigned long *) __get_free_page(GFP_NOIO); 3473b411b363SPhilipp Reisner if (!buffer) { 3474b411b363SPhilipp Reisner dev_err(DEV, "failed to allocate one page buffer in %s\n", __func__); 3475b411b363SPhilipp Reisner goto out; 3476b411b363SPhilipp Reisner } 3477b411b363SPhilipp Reisner 3478b411b363SPhilipp Reisner c = (struct bm_xfer_ctx) { 3479b411b363SPhilipp Reisner .bm_bits = drbd_bm_bits(mdev), 3480b411b363SPhilipp Reisner .bm_words = drbd_bm_words(mdev), 3481b411b363SPhilipp Reisner }; 3482b411b363SPhilipp Reisner 3483b411b363SPhilipp Reisner do { 348402918be2SPhilipp Reisner if (cmd == P_BITMAP) { 348502918be2SPhilipp Reisner ret = receive_bitmap_plain(mdev, data_size, buffer, &c); 348602918be2SPhilipp Reisner } else if (cmd == P_COMPRESSED_BITMAP) { 3487b411b363SPhilipp Reisner /* MAYBE: sanity check that we speak proto >= 90, 3488b411b363SPhilipp Reisner * and the feature is enabled! */ 3489b411b363SPhilipp Reisner struct p_compressed_bm *p; 3490b411b363SPhilipp Reisner 349102918be2SPhilipp Reisner if (data_size > BM_PACKET_PAYLOAD_BYTES) { 3492b411b363SPhilipp Reisner dev_err(DEV, "ReportCBitmap packet too large\n"); 3493b411b363SPhilipp Reisner goto out; 3494b411b363SPhilipp Reisner } 3495b411b363SPhilipp Reisner /* use the page buff */ 3496b411b363SPhilipp Reisner p = buffer; 3497b411b363SPhilipp Reisner memcpy(p, h, sizeof(*h)); 349802918be2SPhilipp Reisner if (drbd_recv(mdev, p->head.payload, data_size) != data_size) 3499b411b363SPhilipp Reisner goto out; 3500004352faSLars Ellenberg if (data_size <= (sizeof(*p) - sizeof(p->head))) { 3501004352faSLars Ellenberg dev_err(DEV, "ReportCBitmap packet too small (l:%u)\n", data_size); 350278fcbdaeSAndreas Gruenbacher goto out; 3503b411b363SPhilipp Reisner } 3504b411b363SPhilipp Reisner ret = decode_bitmap_c(mdev, p, &c); 3505b411b363SPhilipp Reisner } else { 350602918be2SPhilipp Reisner dev_warn(DEV, "receive_bitmap: cmd neither ReportBitMap nor ReportCBitMap (is 0x%x)", cmd); 3507b411b363SPhilipp Reisner goto out; 3508b411b363SPhilipp Reisner } 3509b411b363SPhilipp Reisner 351002918be2SPhilipp Reisner c.packets[cmd == P_BITMAP]++; 351102918be2SPhilipp Reisner c.bytes[cmd == P_BITMAP] += sizeof(struct p_header80) + data_size; 3512b411b363SPhilipp Reisner 3513b411b363SPhilipp Reisner if (ret != OK) 3514b411b363SPhilipp Reisner break; 3515b411b363SPhilipp Reisner 351602918be2SPhilipp Reisner if (!drbd_recv_header(mdev, &cmd, &data_size)) 3517b411b363SPhilipp Reisner goto out; 3518b411b363SPhilipp Reisner } while (ret == OK); 3519b411b363SPhilipp Reisner if (ret == FAILED) 3520b411b363SPhilipp Reisner goto out; 3521b411b363SPhilipp Reisner 3522b411b363SPhilipp Reisner INFO_bm_xfer_stats(mdev, "receive", &c); 3523b411b363SPhilipp Reisner 3524b411b363SPhilipp Reisner if (mdev->state.conn == C_WF_BITMAP_T) { 3525de1f8e4aSAndreas Gruenbacher enum drbd_state_rv rv; 3526de1f8e4aSAndreas Gruenbacher 3527b411b363SPhilipp Reisner ok = !drbd_send_bitmap(mdev); 3528b411b363SPhilipp Reisner if (!ok) 3529b411b363SPhilipp Reisner goto out; 3530b411b363SPhilipp Reisner /* Omit CS_ORDERED with this state transition to avoid deadlocks. */ 3531de1f8e4aSAndreas Gruenbacher rv = _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE); 3532de1f8e4aSAndreas Gruenbacher D_ASSERT(rv == SS_SUCCESS); 3533b411b363SPhilipp Reisner } else if (mdev->state.conn != C_WF_BITMAP_S) { 3534b411b363SPhilipp Reisner /* admin may have requested C_DISCONNECTING, 3535b411b363SPhilipp Reisner * other threads may have noticed network errors */ 3536b411b363SPhilipp Reisner dev_info(DEV, "unexpected cstate (%s) in receive_bitmap\n", 3537b411b363SPhilipp Reisner drbd_conn_str(mdev->state.conn)); 3538b411b363SPhilipp Reisner } 3539b411b363SPhilipp Reisner 354081e84650SAndreas Gruenbacher ok = true; 3541b411b363SPhilipp Reisner out: 35423719094eSPhilipp Reisner /* drbd_bm_unlock(mdev); by intention no lock */ 3543b411b363SPhilipp Reisner if (ok && mdev->state.conn == C_WF_BITMAP_S) 3544b411b363SPhilipp Reisner drbd_start_resync(mdev, C_SYNC_SOURCE); 3545b411b363SPhilipp Reisner free_page((unsigned long) buffer); 3546b411b363SPhilipp Reisner return ok; 3547b411b363SPhilipp Reisner } 3548b411b363SPhilipp Reisner 354902918be2SPhilipp Reisner static int receive_skip(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) 3550b411b363SPhilipp Reisner { 3551b411b363SPhilipp Reisner /* TODO zero copy sink :) */ 3552b411b363SPhilipp Reisner static char sink[128]; 3553b411b363SPhilipp Reisner int size, want, r; 3554b411b363SPhilipp Reisner 3555b411b363SPhilipp Reisner dev_warn(DEV, "skipping unknown optional packet type %d, l: %d!\n", 355602918be2SPhilipp Reisner cmd, data_size); 3557b411b363SPhilipp Reisner 355802918be2SPhilipp Reisner size = data_size; 3559b411b363SPhilipp Reisner while (size > 0) { 3560b411b363SPhilipp Reisner want = min_t(int, size, sizeof(sink)); 3561b411b363SPhilipp Reisner r = drbd_recv(mdev, sink, want); 3562b411b363SPhilipp Reisner ERR_IF(r <= 0) break; 3563b411b363SPhilipp Reisner size -= r; 3564b411b363SPhilipp Reisner } 3565b411b363SPhilipp Reisner return size == 0; 3566b411b363SPhilipp Reisner } 3567b411b363SPhilipp Reisner 356802918be2SPhilipp Reisner static int receive_UnplugRemote(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) 3569b411b363SPhilipp Reisner { 3570b411b363SPhilipp Reisner /* Make sure we've acked all the TCP data associated 3571b411b363SPhilipp Reisner * with the data requests being unplugged */ 3572b411b363SPhilipp Reisner drbd_tcp_quickack(mdev->data.socket); 3573b411b363SPhilipp Reisner 357481e84650SAndreas Gruenbacher return true; 3575b411b363SPhilipp Reisner } 3576b411b363SPhilipp Reisner 357773a01a18SPhilipp Reisner static int receive_out_of_sync(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size) 357873a01a18SPhilipp Reisner { 357973a01a18SPhilipp Reisner struct p_block_desc *p = &mdev->data.rbuf.block_desc; 358073a01a18SPhilipp Reisner 358173a01a18SPhilipp Reisner drbd_set_out_of_sync(mdev, be64_to_cpu(p->sector), be32_to_cpu(p->blksize)); 358273a01a18SPhilipp Reisner 358381e84650SAndreas Gruenbacher return true; 358473a01a18SPhilipp Reisner } 358573a01a18SPhilipp Reisner 358602918be2SPhilipp Reisner typedef int (*drbd_cmd_handler_f)(struct drbd_conf *, enum drbd_packets cmd, unsigned int to_receive); 3587b411b363SPhilipp Reisner 358802918be2SPhilipp Reisner struct data_cmd { 358902918be2SPhilipp Reisner int expect_payload; 359002918be2SPhilipp Reisner size_t pkt_size; 359102918be2SPhilipp Reisner drbd_cmd_handler_f function; 3592b411b363SPhilipp Reisner }; 3593b411b363SPhilipp Reisner 359402918be2SPhilipp Reisner static struct data_cmd drbd_cmd_handler[] = { 359502918be2SPhilipp Reisner [P_DATA] = { 1, sizeof(struct p_data), receive_Data }, 359602918be2SPhilipp Reisner [P_DATA_REPLY] = { 1, sizeof(struct p_data), receive_DataReply }, 359702918be2SPhilipp Reisner [P_RS_DATA_REPLY] = { 1, sizeof(struct p_data), receive_RSDataReply } , 359802918be2SPhilipp Reisner [P_BARRIER] = { 0, sizeof(struct p_barrier), receive_Barrier } , 359902918be2SPhilipp Reisner [P_BITMAP] = { 1, sizeof(struct p_header80), receive_bitmap } , 360002918be2SPhilipp Reisner [P_COMPRESSED_BITMAP] = { 1, sizeof(struct p_header80), receive_bitmap } , 360102918be2SPhilipp Reisner [P_UNPLUG_REMOTE] = { 0, sizeof(struct p_header80), receive_UnplugRemote }, 360202918be2SPhilipp Reisner [P_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest }, 360302918be2SPhilipp Reisner [P_RS_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest }, 360402918be2SPhilipp Reisner [P_SYNC_PARAM] = { 1, sizeof(struct p_header80), receive_SyncParam }, 360502918be2SPhilipp Reisner [P_SYNC_PARAM89] = { 1, sizeof(struct p_header80), receive_SyncParam }, 360602918be2SPhilipp Reisner [P_PROTOCOL] = { 1, sizeof(struct p_protocol), receive_protocol }, 360702918be2SPhilipp Reisner [P_UUIDS] = { 0, sizeof(struct p_uuids), receive_uuids }, 360802918be2SPhilipp Reisner [P_SIZES] = { 0, sizeof(struct p_sizes), receive_sizes }, 360902918be2SPhilipp Reisner [P_STATE] = { 0, sizeof(struct p_state), receive_state }, 361002918be2SPhilipp Reisner [P_STATE_CHG_REQ] = { 0, sizeof(struct p_req_state), receive_req_state }, 361102918be2SPhilipp Reisner [P_SYNC_UUID] = { 0, sizeof(struct p_rs_uuid), receive_sync_uuid }, 361202918be2SPhilipp Reisner [P_OV_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest }, 361302918be2SPhilipp Reisner [P_OV_REPLY] = { 1, sizeof(struct p_block_req), receive_DataRequest }, 361402918be2SPhilipp Reisner [P_CSUM_RS_REQUEST] = { 1, sizeof(struct p_block_req), receive_DataRequest }, 361502918be2SPhilipp Reisner [P_DELAY_PROBE] = { 0, sizeof(struct p_delay_probe93), receive_skip }, 361673a01a18SPhilipp Reisner [P_OUT_OF_SYNC] = { 0, sizeof(struct p_block_desc), receive_out_of_sync }, 361702918be2SPhilipp Reisner /* anything missing from this table is in 361802918be2SPhilipp Reisner * the asender_tbl, see get_asender_cmd */ 361902918be2SPhilipp Reisner [P_MAX_CMD] = { 0, 0, NULL }, 362002918be2SPhilipp Reisner }; 362102918be2SPhilipp Reisner 362202918be2SPhilipp Reisner /* All handler functions that expect a sub-header get that sub-heder in 362302918be2SPhilipp Reisner mdev->data.rbuf.header.head.payload. 362402918be2SPhilipp Reisner 362502918be2SPhilipp Reisner Usually in mdev->data.rbuf.header.head the callback can find the usual 362602918be2SPhilipp Reisner p_header, but they may not rely on that. Since there is also p_header95 ! 362702918be2SPhilipp Reisner */ 3628b411b363SPhilipp Reisner 3629b411b363SPhilipp Reisner static void drbdd(struct drbd_conf *mdev) 3630b411b363SPhilipp Reisner { 363102918be2SPhilipp Reisner union p_header *header = &mdev->data.rbuf.header; 363202918be2SPhilipp Reisner unsigned int packet_size; 363302918be2SPhilipp Reisner enum drbd_packets cmd; 363402918be2SPhilipp Reisner size_t shs; /* sub header size */ 363502918be2SPhilipp Reisner int rv; 3636b411b363SPhilipp Reisner 3637b411b363SPhilipp Reisner while (get_t_state(&mdev->receiver) == Running) { 3638b411b363SPhilipp Reisner drbd_thread_current_set_cpu(mdev); 363902918be2SPhilipp Reisner if (!drbd_recv_header(mdev, &cmd, &packet_size)) 364002918be2SPhilipp Reisner goto err_out; 364102918be2SPhilipp Reisner 364202918be2SPhilipp Reisner if (unlikely(cmd >= P_MAX_CMD || !drbd_cmd_handler[cmd].function)) { 364302918be2SPhilipp Reisner dev_err(DEV, "unknown packet type %d, l: %d!\n", cmd, packet_size); 364402918be2SPhilipp Reisner goto err_out; 36450b33a916SLars Ellenberg } 3646b411b363SPhilipp Reisner 364702918be2SPhilipp Reisner shs = drbd_cmd_handler[cmd].pkt_size - sizeof(union p_header); 3648c13f7e1aSLars Ellenberg if (packet_size - shs > 0 && !drbd_cmd_handler[cmd].expect_payload) { 3649c13f7e1aSLars Ellenberg dev_err(DEV, "No payload expected %s l:%d\n", cmdname(cmd), packet_size); 3650c13f7e1aSLars Ellenberg goto err_out; 3651c13f7e1aSLars Ellenberg } 3652c13f7e1aSLars Ellenberg 3653c13f7e1aSLars Ellenberg if (shs) { 365402918be2SPhilipp Reisner rv = drbd_recv(mdev, &header->h80.payload, shs); 365502918be2SPhilipp Reisner if (unlikely(rv != shs)) { 365602918be2SPhilipp Reisner dev_err(DEV, "short read while reading sub header: rv=%d\n", rv); 365702918be2SPhilipp Reisner goto err_out; 3658b411b363SPhilipp Reisner } 365902918be2SPhilipp Reisner } 366002918be2SPhilipp Reisner 366102918be2SPhilipp Reisner rv = drbd_cmd_handler[cmd].function(mdev, cmd, packet_size - shs); 366202918be2SPhilipp Reisner 366302918be2SPhilipp Reisner if (unlikely(!rv)) { 3664b411b363SPhilipp Reisner dev_err(DEV, "error receiving %s, l: %d!\n", 366502918be2SPhilipp Reisner cmdname(cmd), packet_size); 366602918be2SPhilipp Reisner goto err_out; 3667b411b363SPhilipp Reisner } 3668b411b363SPhilipp Reisner } 366902918be2SPhilipp Reisner 367002918be2SPhilipp Reisner if (0) { 367102918be2SPhilipp Reisner err_out: 3672b411b363SPhilipp Reisner drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR)); 3673b411b363SPhilipp Reisner } 3674856c50c7SLars Ellenberg /* If we leave here, we probably want to update at least the 3675856c50c7SLars Ellenberg * "Connected" indicator on stable storage. Do so explicitly here. */ 3676856c50c7SLars Ellenberg drbd_md_sync(mdev); 3677b411b363SPhilipp Reisner } 3678b411b363SPhilipp Reisner 3679b411b363SPhilipp Reisner void drbd_flush_workqueue(struct drbd_conf *mdev) 3680b411b363SPhilipp Reisner { 3681b411b363SPhilipp Reisner struct drbd_wq_barrier barr; 3682b411b363SPhilipp Reisner 3683b411b363SPhilipp Reisner barr.w.cb = w_prev_work_done; 3684b411b363SPhilipp Reisner init_completion(&barr.done); 3685b411b363SPhilipp Reisner drbd_queue_work(&mdev->data.work, &barr.w); 3686b411b363SPhilipp Reisner wait_for_completion(&barr.done); 3687b411b363SPhilipp Reisner } 3688b411b363SPhilipp Reisner 3689f70b3511SPhilipp Reisner void drbd_free_tl_hash(struct drbd_conf *mdev) 3690f70b3511SPhilipp Reisner { 3691f70b3511SPhilipp Reisner struct hlist_head *h; 3692f70b3511SPhilipp Reisner 3693f70b3511SPhilipp Reisner spin_lock_irq(&mdev->req_lock); 3694f70b3511SPhilipp Reisner 3695f70b3511SPhilipp Reisner if (!mdev->tl_hash || mdev->state.conn != C_STANDALONE) { 3696f70b3511SPhilipp Reisner spin_unlock_irq(&mdev->req_lock); 3697f70b3511SPhilipp Reisner return; 3698f70b3511SPhilipp Reisner } 3699f70b3511SPhilipp Reisner /* paranoia code */ 3700f70b3511SPhilipp Reisner for (h = mdev->ee_hash; h < mdev->ee_hash + mdev->ee_hash_s; h++) 3701f70b3511SPhilipp Reisner if (h->first) 3702f70b3511SPhilipp Reisner dev_err(DEV, "ASSERT FAILED ee_hash[%u].first == %p, expected NULL\n", 3703f70b3511SPhilipp Reisner (int)(h - mdev->ee_hash), h->first); 3704f70b3511SPhilipp Reisner kfree(mdev->ee_hash); 3705f70b3511SPhilipp Reisner mdev->ee_hash = NULL; 3706f70b3511SPhilipp Reisner mdev->ee_hash_s = 0; 3707f70b3511SPhilipp Reisner 3708f70b3511SPhilipp Reisner /* paranoia code */ 3709f70b3511SPhilipp Reisner for (h = mdev->tl_hash; h < mdev->tl_hash + mdev->tl_hash_s; h++) 3710f70b3511SPhilipp Reisner if (h->first) 3711f70b3511SPhilipp Reisner dev_err(DEV, "ASSERT FAILED tl_hash[%u] == %p, expected NULL\n", 3712f70b3511SPhilipp Reisner (int)(h - mdev->tl_hash), h->first); 3713f70b3511SPhilipp Reisner kfree(mdev->tl_hash); 3714f70b3511SPhilipp Reisner mdev->tl_hash = NULL; 3715f70b3511SPhilipp Reisner mdev->tl_hash_s = 0; 3716f70b3511SPhilipp Reisner spin_unlock_irq(&mdev->req_lock); 3717f70b3511SPhilipp Reisner } 3718f70b3511SPhilipp Reisner 3719b411b363SPhilipp Reisner static void drbd_disconnect(struct drbd_conf *mdev) 3720b411b363SPhilipp Reisner { 3721b411b363SPhilipp Reisner enum drbd_fencing_p fp; 3722b411b363SPhilipp Reisner union drbd_state os, ns; 3723b411b363SPhilipp Reisner int rv = SS_UNKNOWN_ERROR; 3724b411b363SPhilipp Reisner unsigned int i; 3725b411b363SPhilipp Reisner 3726b411b363SPhilipp Reisner if (mdev->state.conn == C_STANDALONE) 3727b411b363SPhilipp Reisner return; 3728b411b363SPhilipp Reisner if (mdev->state.conn >= C_WF_CONNECTION) 3729b411b363SPhilipp Reisner dev_err(DEV, "ASSERT FAILED cstate = %s, expected < WFConnection\n", 3730b411b363SPhilipp Reisner drbd_conn_str(mdev->state.conn)); 3731b411b363SPhilipp Reisner 3732b411b363SPhilipp Reisner /* asender does not clean up anything. it must not interfere, either */ 3733b411b363SPhilipp Reisner drbd_thread_stop(&mdev->asender); 3734b411b363SPhilipp Reisner drbd_free_sock(mdev); 3735b411b363SPhilipp Reisner 373685719573SPhilipp Reisner /* wait for current activity to cease. */ 3737b411b363SPhilipp Reisner spin_lock_irq(&mdev->req_lock); 3738b411b363SPhilipp Reisner _drbd_wait_ee_list_empty(mdev, &mdev->active_ee); 3739b411b363SPhilipp Reisner _drbd_wait_ee_list_empty(mdev, &mdev->sync_ee); 3740b411b363SPhilipp Reisner _drbd_wait_ee_list_empty(mdev, &mdev->read_ee); 3741b411b363SPhilipp Reisner spin_unlock_irq(&mdev->req_lock); 3742b411b363SPhilipp Reisner 3743b411b363SPhilipp Reisner /* We do not have data structures that would allow us to 3744b411b363SPhilipp Reisner * get the rs_pending_cnt down to 0 again. 3745b411b363SPhilipp Reisner * * On C_SYNC_TARGET we do not have any data structures describing 3746b411b363SPhilipp Reisner * the pending RSDataRequest's we have sent. 3747b411b363SPhilipp Reisner * * On C_SYNC_SOURCE there is no data structure that tracks 3748b411b363SPhilipp Reisner * the P_RS_DATA_REPLY blocks that we sent to the SyncTarget. 3749b411b363SPhilipp Reisner * And no, it is not the sum of the reference counts in the 3750b411b363SPhilipp Reisner * resync_LRU. The resync_LRU tracks the whole operation including 3751b411b363SPhilipp Reisner * the disk-IO, while the rs_pending_cnt only tracks the blocks 3752b411b363SPhilipp Reisner * on the fly. */ 3753b411b363SPhilipp Reisner drbd_rs_cancel_all(mdev); 3754b411b363SPhilipp Reisner mdev->rs_total = 0; 3755b411b363SPhilipp Reisner mdev->rs_failed = 0; 3756b411b363SPhilipp Reisner atomic_set(&mdev->rs_pending_cnt, 0); 3757b411b363SPhilipp Reisner wake_up(&mdev->misc_wait); 3758b411b363SPhilipp Reisner 3759b411b363SPhilipp Reisner /* make sure syncer is stopped and w_resume_next_sg queued */ 3760b411b363SPhilipp Reisner del_timer_sync(&mdev->resync_timer); 3761b411b363SPhilipp Reisner resync_timer_fn((unsigned long)mdev); 3762b411b363SPhilipp Reisner 3763b411b363SPhilipp Reisner /* wait for all w_e_end_data_req, w_e_end_rsdata_req, w_send_barrier, 3764b411b363SPhilipp Reisner * w_make_resync_request etc. which may still be on the worker queue 3765b411b363SPhilipp Reisner * to be "canceled" */ 3766b411b363SPhilipp Reisner drbd_flush_workqueue(mdev); 3767b411b363SPhilipp Reisner 3768b411b363SPhilipp Reisner /* This also does reclaim_net_ee(). If we do this too early, we might 3769b411b363SPhilipp Reisner * miss some resync ee and pages.*/ 3770b411b363SPhilipp Reisner drbd_process_done_ee(mdev); 3771b411b363SPhilipp Reisner 3772b411b363SPhilipp Reisner kfree(mdev->p_uuid); 3773b411b363SPhilipp Reisner mdev->p_uuid = NULL; 3774b411b363SPhilipp Reisner 3775fb22c402SPhilipp Reisner if (!is_susp(mdev->state)) 3776b411b363SPhilipp Reisner tl_clear(mdev); 3777b411b363SPhilipp Reisner 3778b411b363SPhilipp Reisner dev_info(DEV, "Connection closed\n"); 3779b411b363SPhilipp Reisner 3780b411b363SPhilipp Reisner drbd_md_sync(mdev); 3781b411b363SPhilipp Reisner 3782b411b363SPhilipp Reisner fp = FP_DONT_CARE; 3783b411b363SPhilipp Reisner if (get_ldev(mdev)) { 3784b411b363SPhilipp Reisner fp = mdev->ldev->dc.fencing; 3785b411b363SPhilipp Reisner put_ldev(mdev); 3786b411b363SPhilipp Reisner } 3787b411b363SPhilipp Reisner 378887f7be4cSPhilipp Reisner if (mdev->state.role == R_PRIMARY && fp >= FP_RESOURCE && mdev->state.pdsk >= D_UNKNOWN) 378987f7be4cSPhilipp Reisner drbd_try_outdate_peer_async(mdev); 3790b411b363SPhilipp Reisner 3791b411b363SPhilipp Reisner spin_lock_irq(&mdev->req_lock); 3792b411b363SPhilipp Reisner os = mdev->state; 3793b411b363SPhilipp Reisner if (os.conn >= C_UNCONNECTED) { 3794b411b363SPhilipp Reisner /* Do not restart in case we are C_DISCONNECTING */ 3795b411b363SPhilipp Reisner ns = os; 3796b411b363SPhilipp Reisner ns.conn = C_UNCONNECTED; 3797b411b363SPhilipp Reisner rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL); 3798b411b363SPhilipp Reisner } 3799b411b363SPhilipp Reisner spin_unlock_irq(&mdev->req_lock); 3800b411b363SPhilipp Reisner 3801b411b363SPhilipp Reisner if (os.conn == C_DISCONNECTING) { 380284dfb9f5SPhilipp Reisner wait_event(mdev->net_cnt_wait, atomic_read(&mdev->net_cnt) == 0); 3803b411b363SPhilipp Reisner 3804b411b363SPhilipp Reisner crypto_free_hash(mdev->cram_hmac_tfm); 3805b411b363SPhilipp Reisner mdev->cram_hmac_tfm = NULL; 3806b411b363SPhilipp Reisner 3807b411b363SPhilipp Reisner kfree(mdev->net_conf); 3808b411b363SPhilipp Reisner mdev->net_conf = NULL; 3809b411b363SPhilipp Reisner drbd_request_state(mdev, NS(conn, C_STANDALONE)); 3810b411b363SPhilipp Reisner } 3811b411b363SPhilipp Reisner 3812b411b363SPhilipp Reisner /* tcp_close and release of sendpage pages can be deferred. I don't 3813b411b363SPhilipp Reisner * want to use SO_LINGER, because apparently it can be deferred for 3814b411b363SPhilipp Reisner * more than 20 seconds (longest time I checked). 3815b411b363SPhilipp Reisner * 3816b411b363SPhilipp Reisner * Actually we don't care for exactly when the network stack does its 3817b411b363SPhilipp Reisner * put_page(), but release our reference on these pages right here. 3818b411b363SPhilipp Reisner */ 3819b411b363SPhilipp Reisner i = drbd_release_ee(mdev, &mdev->net_ee); 3820b411b363SPhilipp Reisner if (i) 3821b411b363SPhilipp Reisner dev_info(DEV, "net_ee not empty, killed %u entries\n", i); 3822435f0740SLars Ellenberg i = atomic_read(&mdev->pp_in_use_by_net); 3823435f0740SLars Ellenberg if (i) 3824435f0740SLars Ellenberg dev_info(DEV, "pp_in_use_by_net = %d, expected 0\n", i); 3825b411b363SPhilipp Reisner i = atomic_read(&mdev->pp_in_use); 3826b411b363SPhilipp Reisner if (i) 382745bb912bSLars Ellenberg dev_info(DEV, "pp_in_use = %d, expected 0\n", i); 3828b411b363SPhilipp Reisner 3829b411b363SPhilipp Reisner D_ASSERT(list_empty(&mdev->read_ee)); 3830b411b363SPhilipp Reisner D_ASSERT(list_empty(&mdev->active_ee)); 3831b411b363SPhilipp Reisner D_ASSERT(list_empty(&mdev->sync_ee)); 3832b411b363SPhilipp Reisner D_ASSERT(list_empty(&mdev->done_ee)); 3833b411b363SPhilipp Reisner 3834b411b363SPhilipp Reisner /* ok, no more ee's on the fly, it is safe to reset the epoch_size */ 3835b411b363SPhilipp Reisner atomic_set(&mdev->current_epoch->epoch_size, 0); 3836b411b363SPhilipp Reisner D_ASSERT(list_empty(&mdev->current_epoch->list)); 3837b411b363SPhilipp Reisner } 3838b411b363SPhilipp Reisner 3839b411b363SPhilipp Reisner /* 3840b411b363SPhilipp Reisner * We support PRO_VERSION_MIN to PRO_VERSION_MAX. The protocol version 3841b411b363SPhilipp Reisner * we can agree on is stored in agreed_pro_version. 3842b411b363SPhilipp Reisner * 3843b411b363SPhilipp Reisner * feature flags and the reserved array should be enough room for future 3844b411b363SPhilipp Reisner * enhancements of the handshake protocol, and possible plugins... 3845b411b363SPhilipp Reisner * 3846b411b363SPhilipp Reisner * for now, they are expected to be zero, but ignored. 3847b411b363SPhilipp Reisner */ 3848b411b363SPhilipp Reisner static int drbd_send_handshake(struct drbd_conf *mdev) 3849b411b363SPhilipp Reisner { 3850b411b363SPhilipp Reisner /* ASSERT current == mdev->receiver ... */ 3851b411b363SPhilipp Reisner struct p_handshake *p = &mdev->data.sbuf.handshake; 3852b411b363SPhilipp Reisner int ok; 3853b411b363SPhilipp Reisner 3854b411b363SPhilipp Reisner if (mutex_lock_interruptible(&mdev->data.mutex)) { 3855b411b363SPhilipp Reisner dev_err(DEV, "interrupted during initial handshake\n"); 3856b411b363SPhilipp Reisner return 0; /* interrupted. not ok. */ 3857b411b363SPhilipp Reisner } 3858b411b363SPhilipp Reisner 3859b411b363SPhilipp Reisner if (mdev->data.socket == NULL) { 3860b411b363SPhilipp Reisner mutex_unlock(&mdev->data.mutex); 3861b411b363SPhilipp Reisner return 0; 3862b411b363SPhilipp Reisner } 3863b411b363SPhilipp Reisner 3864b411b363SPhilipp Reisner memset(p, 0, sizeof(*p)); 3865b411b363SPhilipp Reisner p->protocol_min = cpu_to_be32(PRO_VERSION_MIN); 3866b411b363SPhilipp Reisner p->protocol_max = cpu_to_be32(PRO_VERSION_MAX); 3867b411b363SPhilipp Reisner ok = _drbd_send_cmd( mdev, mdev->data.socket, P_HAND_SHAKE, 38680b70a13dSPhilipp Reisner (struct p_header80 *)p, sizeof(*p), 0 ); 3869b411b363SPhilipp Reisner mutex_unlock(&mdev->data.mutex); 3870b411b363SPhilipp Reisner return ok; 3871b411b363SPhilipp Reisner } 3872b411b363SPhilipp Reisner 3873b411b363SPhilipp Reisner /* 3874b411b363SPhilipp Reisner * return values: 3875b411b363SPhilipp Reisner * 1 yes, we have a valid connection 3876b411b363SPhilipp Reisner * 0 oops, did not work out, please try again 3877b411b363SPhilipp Reisner * -1 peer talks different language, 3878b411b363SPhilipp Reisner * no point in trying again, please go standalone. 3879b411b363SPhilipp Reisner */ 3880b411b363SPhilipp Reisner static int drbd_do_handshake(struct drbd_conf *mdev) 3881b411b363SPhilipp Reisner { 3882b411b363SPhilipp Reisner /* ASSERT current == mdev->receiver ... */ 3883b411b363SPhilipp Reisner struct p_handshake *p = &mdev->data.rbuf.handshake; 388402918be2SPhilipp Reisner const int expect = sizeof(struct p_handshake) - sizeof(struct p_header80); 388502918be2SPhilipp Reisner unsigned int length; 388602918be2SPhilipp Reisner enum drbd_packets cmd; 3887b411b363SPhilipp Reisner int rv; 3888b411b363SPhilipp Reisner 3889b411b363SPhilipp Reisner rv = drbd_send_handshake(mdev); 3890b411b363SPhilipp Reisner if (!rv) 3891b411b363SPhilipp Reisner return 0; 3892b411b363SPhilipp Reisner 389302918be2SPhilipp Reisner rv = drbd_recv_header(mdev, &cmd, &length); 3894b411b363SPhilipp Reisner if (!rv) 3895b411b363SPhilipp Reisner return 0; 3896b411b363SPhilipp Reisner 389702918be2SPhilipp Reisner if (cmd != P_HAND_SHAKE) { 3898b411b363SPhilipp Reisner dev_err(DEV, "expected HandShake packet, received: %s (0x%04x)\n", 389902918be2SPhilipp Reisner cmdname(cmd), cmd); 3900b411b363SPhilipp Reisner return -1; 3901b411b363SPhilipp Reisner } 3902b411b363SPhilipp Reisner 390302918be2SPhilipp Reisner if (length != expect) { 3904b411b363SPhilipp Reisner dev_err(DEV, "expected HandShake length: %u, received: %u\n", 390502918be2SPhilipp Reisner expect, length); 3906b411b363SPhilipp Reisner return -1; 3907b411b363SPhilipp Reisner } 3908b411b363SPhilipp Reisner 3909b411b363SPhilipp Reisner rv = drbd_recv(mdev, &p->head.payload, expect); 3910b411b363SPhilipp Reisner 3911b411b363SPhilipp Reisner if (rv != expect) { 3912b411b363SPhilipp Reisner dev_err(DEV, "short read receiving handshake packet: l=%u\n", rv); 3913b411b363SPhilipp Reisner return 0; 3914b411b363SPhilipp Reisner } 3915b411b363SPhilipp Reisner 3916b411b363SPhilipp Reisner p->protocol_min = be32_to_cpu(p->protocol_min); 3917b411b363SPhilipp Reisner p->protocol_max = be32_to_cpu(p->protocol_max); 3918b411b363SPhilipp Reisner if (p->protocol_max == 0) 3919b411b363SPhilipp Reisner p->protocol_max = p->protocol_min; 3920b411b363SPhilipp Reisner 3921b411b363SPhilipp Reisner if (PRO_VERSION_MAX < p->protocol_min || 3922b411b363SPhilipp Reisner PRO_VERSION_MIN > p->protocol_max) 3923b411b363SPhilipp Reisner goto incompat; 3924b411b363SPhilipp Reisner 3925b411b363SPhilipp Reisner mdev->agreed_pro_version = min_t(int, PRO_VERSION_MAX, p->protocol_max); 3926b411b363SPhilipp Reisner 3927b411b363SPhilipp Reisner dev_info(DEV, "Handshake successful: " 3928b411b363SPhilipp Reisner "Agreed network protocol version %d\n", mdev->agreed_pro_version); 3929b411b363SPhilipp Reisner 3930b411b363SPhilipp Reisner return 1; 3931b411b363SPhilipp Reisner 3932b411b363SPhilipp Reisner incompat: 3933b411b363SPhilipp Reisner dev_err(DEV, "incompatible DRBD dialects: " 3934b411b363SPhilipp Reisner "I support %d-%d, peer supports %d-%d\n", 3935b411b363SPhilipp Reisner PRO_VERSION_MIN, PRO_VERSION_MAX, 3936b411b363SPhilipp Reisner p->protocol_min, p->protocol_max); 3937b411b363SPhilipp Reisner return -1; 3938b411b363SPhilipp Reisner } 3939b411b363SPhilipp Reisner 3940b411b363SPhilipp Reisner #if !defined(CONFIG_CRYPTO_HMAC) && !defined(CONFIG_CRYPTO_HMAC_MODULE) 3941b411b363SPhilipp Reisner static int drbd_do_auth(struct drbd_conf *mdev) 3942b411b363SPhilipp Reisner { 3943b411b363SPhilipp Reisner dev_err(DEV, "This kernel was build without CONFIG_CRYPTO_HMAC.\n"); 3944b411b363SPhilipp Reisner dev_err(DEV, "You need to disable 'cram-hmac-alg' in drbd.conf.\n"); 3945b10d96cbSJohannes Thoma return -1; 3946b411b363SPhilipp Reisner } 3947b411b363SPhilipp Reisner #else 3948b411b363SPhilipp Reisner #define CHALLENGE_LEN 64 3949b10d96cbSJohannes Thoma 3950b10d96cbSJohannes Thoma /* Return value: 3951b10d96cbSJohannes Thoma 1 - auth succeeded, 3952b10d96cbSJohannes Thoma 0 - failed, try again (network error), 3953b10d96cbSJohannes Thoma -1 - auth failed, don't try again. 3954b10d96cbSJohannes Thoma */ 3955b10d96cbSJohannes Thoma 3956b411b363SPhilipp Reisner static int drbd_do_auth(struct drbd_conf *mdev) 3957b411b363SPhilipp Reisner { 3958b411b363SPhilipp Reisner char my_challenge[CHALLENGE_LEN]; /* 64 Bytes... */ 3959b411b363SPhilipp Reisner struct scatterlist sg; 3960b411b363SPhilipp Reisner char *response = NULL; 3961b411b363SPhilipp Reisner char *right_response = NULL; 3962b411b363SPhilipp Reisner char *peers_ch = NULL; 3963b411b363SPhilipp Reisner unsigned int key_len = strlen(mdev->net_conf->shared_secret); 3964b411b363SPhilipp Reisner unsigned int resp_size; 3965b411b363SPhilipp Reisner struct hash_desc desc; 396602918be2SPhilipp Reisner enum drbd_packets cmd; 396702918be2SPhilipp Reisner unsigned int length; 3968b411b363SPhilipp Reisner int rv; 3969b411b363SPhilipp Reisner 3970b411b363SPhilipp Reisner desc.tfm = mdev->cram_hmac_tfm; 3971b411b363SPhilipp Reisner desc.flags = 0; 3972b411b363SPhilipp Reisner 3973b411b363SPhilipp Reisner rv = crypto_hash_setkey(mdev->cram_hmac_tfm, 3974b411b363SPhilipp Reisner (u8 *)mdev->net_conf->shared_secret, key_len); 3975b411b363SPhilipp Reisner if (rv) { 3976b411b363SPhilipp Reisner dev_err(DEV, "crypto_hash_setkey() failed with %d\n", rv); 3977b10d96cbSJohannes Thoma rv = -1; 3978b411b363SPhilipp Reisner goto fail; 3979b411b363SPhilipp Reisner } 3980b411b363SPhilipp Reisner 3981b411b363SPhilipp Reisner get_random_bytes(my_challenge, CHALLENGE_LEN); 3982b411b363SPhilipp Reisner 3983b411b363SPhilipp Reisner rv = drbd_send_cmd2(mdev, P_AUTH_CHALLENGE, my_challenge, CHALLENGE_LEN); 3984b411b363SPhilipp Reisner if (!rv) 3985b411b363SPhilipp Reisner goto fail; 3986b411b363SPhilipp Reisner 398702918be2SPhilipp Reisner rv = drbd_recv_header(mdev, &cmd, &length); 3988b411b363SPhilipp Reisner if (!rv) 3989b411b363SPhilipp Reisner goto fail; 3990b411b363SPhilipp Reisner 399102918be2SPhilipp Reisner if (cmd != P_AUTH_CHALLENGE) { 3992b411b363SPhilipp Reisner dev_err(DEV, "expected AuthChallenge packet, received: %s (0x%04x)\n", 399302918be2SPhilipp Reisner cmdname(cmd), cmd); 3994b411b363SPhilipp Reisner rv = 0; 3995b411b363SPhilipp Reisner goto fail; 3996b411b363SPhilipp Reisner } 3997b411b363SPhilipp Reisner 399802918be2SPhilipp Reisner if (length > CHALLENGE_LEN * 2) { 3999b411b363SPhilipp Reisner dev_err(DEV, "expected AuthChallenge payload too big.\n"); 4000b10d96cbSJohannes Thoma rv = -1; 4001b411b363SPhilipp Reisner goto fail; 4002b411b363SPhilipp Reisner } 4003b411b363SPhilipp Reisner 400402918be2SPhilipp Reisner peers_ch = kmalloc(length, GFP_NOIO); 4005b411b363SPhilipp Reisner if (peers_ch == NULL) { 4006b411b363SPhilipp Reisner dev_err(DEV, "kmalloc of peers_ch failed\n"); 4007b10d96cbSJohannes Thoma rv = -1; 4008b411b363SPhilipp Reisner goto fail; 4009b411b363SPhilipp Reisner } 4010b411b363SPhilipp Reisner 401102918be2SPhilipp Reisner rv = drbd_recv(mdev, peers_ch, length); 4012b411b363SPhilipp Reisner 401302918be2SPhilipp Reisner if (rv != length) { 4014b411b363SPhilipp Reisner dev_err(DEV, "short read AuthChallenge: l=%u\n", rv); 4015b411b363SPhilipp Reisner rv = 0; 4016b411b363SPhilipp Reisner goto fail; 4017b411b363SPhilipp Reisner } 4018b411b363SPhilipp Reisner 4019b411b363SPhilipp Reisner resp_size = crypto_hash_digestsize(mdev->cram_hmac_tfm); 4020b411b363SPhilipp Reisner response = kmalloc(resp_size, GFP_NOIO); 4021b411b363SPhilipp Reisner if (response == NULL) { 4022b411b363SPhilipp Reisner dev_err(DEV, "kmalloc of response failed\n"); 4023b10d96cbSJohannes Thoma rv = -1; 4024b411b363SPhilipp Reisner goto fail; 4025b411b363SPhilipp Reisner } 4026b411b363SPhilipp Reisner 4027b411b363SPhilipp Reisner sg_init_table(&sg, 1); 402802918be2SPhilipp Reisner sg_set_buf(&sg, peers_ch, length); 4029b411b363SPhilipp Reisner 4030b411b363SPhilipp Reisner rv = crypto_hash_digest(&desc, &sg, sg.length, response); 4031b411b363SPhilipp Reisner if (rv) { 4032b411b363SPhilipp Reisner dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv); 4033b10d96cbSJohannes Thoma rv = -1; 4034b411b363SPhilipp Reisner goto fail; 4035b411b363SPhilipp Reisner } 4036b411b363SPhilipp Reisner 4037b411b363SPhilipp Reisner rv = drbd_send_cmd2(mdev, P_AUTH_RESPONSE, response, resp_size); 4038b411b363SPhilipp Reisner if (!rv) 4039b411b363SPhilipp Reisner goto fail; 4040b411b363SPhilipp Reisner 404102918be2SPhilipp Reisner rv = drbd_recv_header(mdev, &cmd, &length); 4042b411b363SPhilipp Reisner if (!rv) 4043b411b363SPhilipp Reisner goto fail; 4044b411b363SPhilipp Reisner 404502918be2SPhilipp Reisner if (cmd != P_AUTH_RESPONSE) { 4046b411b363SPhilipp Reisner dev_err(DEV, "expected AuthResponse packet, received: %s (0x%04x)\n", 404702918be2SPhilipp Reisner cmdname(cmd), cmd); 4048b411b363SPhilipp Reisner rv = 0; 4049b411b363SPhilipp Reisner goto fail; 4050b411b363SPhilipp Reisner } 4051b411b363SPhilipp Reisner 405202918be2SPhilipp Reisner if (length != resp_size) { 4053b411b363SPhilipp Reisner dev_err(DEV, "expected AuthResponse payload of wrong size\n"); 4054b411b363SPhilipp Reisner rv = 0; 4055b411b363SPhilipp Reisner goto fail; 4056b411b363SPhilipp Reisner } 4057b411b363SPhilipp Reisner 4058b411b363SPhilipp Reisner rv = drbd_recv(mdev, response , resp_size); 4059b411b363SPhilipp Reisner 4060b411b363SPhilipp Reisner if (rv != resp_size) { 4061b411b363SPhilipp Reisner dev_err(DEV, "short read receiving AuthResponse: l=%u\n", rv); 4062b411b363SPhilipp Reisner rv = 0; 4063b411b363SPhilipp Reisner goto fail; 4064b411b363SPhilipp Reisner } 4065b411b363SPhilipp Reisner 4066b411b363SPhilipp Reisner right_response = kmalloc(resp_size, GFP_NOIO); 40672d1ee87dSJulia Lawall if (right_response == NULL) { 4068b411b363SPhilipp Reisner dev_err(DEV, "kmalloc of right_response failed\n"); 4069b10d96cbSJohannes Thoma rv = -1; 4070b411b363SPhilipp Reisner goto fail; 4071b411b363SPhilipp Reisner } 4072b411b363SPhilipp Reisner 4073b411b363SPhilipp Reisner sg_set_buf(&sg, my_challenge, CHALLENGE_LEN); 4074b411b363SPhilipp Reisner 4075b411b363SPhilipp Reisner rv = crypto_hash_digest(&desc, &sg, sg.length, right_response); 4076b411b363SPhilipp Reisner if (rv) { 4077b411b363SPhilipp Reisner dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv); 4078b10d96cbSJohannes Thoma rv = -1; 4079b411b363SPhilipp Reisner goto fail; 4080b411b363SPhilipp Reisner } 4081b411b363SPhilipp Reisner 4082b411b363SPhilipp Reisner rv = !memcmp(response, right_response, resp_size); 4083b411b363SPhilipp Reisner 4084b411b363SPhilipp Reisner if (rv) 4085b411b363SPhilipp Reisner dev_info(DEV, "Peer authenticated using %d bytes of '%s' HMAC\n", 4086b411b363SPhilipp Reisner resp_size, mdev->net_conf->cram_hmac_alg); 4087b10d96cbSJohannes Thoma else 4088b10d96cbSJohannes Thoma rv = -1; 4089b411b363SPhilipp Reisner 4090b411b363SPhilipp Reisner fail: 4091b411b363SPhilipp Reisner kfree(peers_ch); 4092b411b363SPhilipp Reisner kfree(response); 4093b411b363SPhilipp Reisner kfree(right_response); 4094b411b363SPhilipp Reisner 4095b411b363SPhilipp Reisner return rv; 4096b411b363SPhilipp Reisner } 4097b411b363SPhilipp Reisner #endif 4098b411b363SPhilipp Reisner 4099b411b363SPhilipp Reisner int drbdd_init(struct drbd_thread *thi) 4100b411b363SPhilipp Reisner { 4101b411b363SPhilipp Reisner struct drbd_conf *mdev = thi->mdev; 4102b411b363SPhilipp Reisner unsigned int minor = mdev_to_minor(mdev); 4103b411b363SPhilipp Reisner int h; 4104b411b363SPhilipp Reisner 4105b411b363SPhilipp Reisner sprintf(current->comm, "drbd%d_receiver", minor); 4106b411b363SPhilipp Reisner 4107b411b363SPhilipp Reisner dev_info(DEV, "receiver (re)started\n"); 4108b411b363SPhilipp Reisner 4109b411b363SPhilipp Reisner do { 4110b411b363SPhilipp Reisner h = drbd_connect(mdev); 4111b411b363SPhilipp Reisner if (h == 0) { 4112b411b363SPhilipp Reisner drbd_disconnect(mdev); 4113b411b363SPhilipp Reisner __set_current_state(TASK_INTERRUPTIBLE); 4114b411b363SPhilipp Reisner schedule_timeout(HZ); 4115b411b363SPhilipp Reisner } 4116b411b363SPhilipp Reisner if (h == -1) { 4117b411b363SPhilipp Reisner dev_warn(DEV, "Discarding network configuration.\n"); 4118b411b363SPhilipp Reisner drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); 4119b411b363SPhilipp Reisner } 4120b411b363SPhilipp Reisner } while (h == 0); 4121b411b363SPhilipp Reisner 4122b411b363SPhilipp Reisner if (h > 0) { 4123b411b363SPhilipp Reisner if (get_net_conf(mdev)) { 4124b411b363SPhilipp Reisner drbdd(mdev); 4125b411b363SPhilipp Reisner put_net_conf(mdev); 4126b411b363SPhilipp Reisner } 4127b411b363SPhilipp Reisner } 4128b411b363SPhilipp Reisner 4129b411b363SPhilipp Reisner drbd_disconnect(mdev); 4130b411b363SPhilipp Reisner 4131b411b363SPhilipp Reisner dev_info(DEV, "receiver terminated\n"); 4132b411b363SPhilipp Reisner return 0; 4133b411b363SPhilipp Reisner } 4134b411b363SPhilipp Reisner 4135b411b363SPhilipp Reisner /* ********* acknowledge sender ******** */ 4136b411b363SPhilipp Reisner 41370b70a13dSPhilipp Reisner static int got_RqSReply(struct drbd_conf *mdev, struct p_header80 *h) 4138b411b363SPhilipp Reisner { 4139b411b363SPhilipp Reisner struct p_req_state_reply *p = (struct p_req_state_reply *)h; 4140b411b363SPhilipp Reisner 4141b411b363SPhilipp Reisner int retcode = be32_to_cpu(p->retcode); 4142b411b363SPhilipp Reisner 4143b411b363SPhilipp Reisner if (retcode >= SS_SUCCESS) { 4144b411b363SPhilipp Reisner set_bit(CL_ST_CHG_SUCCESS, &mdev->flags); 4145b411b363SPhilipp Reisner } else { 4146b411b363SPhilipp Reisner set_bit(CL_ST_CHG_FAIL, &mdev->flags); 4147b411b363SPhilipp Reisner dev_err(DEV, "Requested state change failed by peer: %s (%d)\n", 4148b411b363SPhilipp Reisner drbd_set_st_err_str(retcode), retcode); 4149b411b363SPhilipp Reisner } 4150b411b363SPhilipp Reisner wake_up(&mdev->state_wait); 4151b411b363SPhilipp Reisner 415281e84650SAndreas Gruenbacher return true; 4153b411b363SPhilipp Reisner } 4154b411b363SPhilipp Reisner 41550b70a13dSPhilipp Reisner static int got_Ping(struct drbd_conf *mdev, struct p_header80 *h) 4156b411b363SPhilipp Reisner { 4157b411b363SPhilipp Reisner return drbd_send_ping_ack(mdev); 4158b411b363SPhilipp Reisner 4159b411b363SPhilipp Reisner } 4160b411b363SPhilipp Reisner 41610b70a13dSPhilipp Reisner static int got_PingAck(struct drbd_conf *mdev, struct p_header80 *h) 4162b411b363SPhilipp Reisner { 4163b411b363SPhilipp Reisner /* restore idle timeout */ 4164b411b363SPhilipp Reisner mdev->meta.socket->sk->sk_rcvtimeo = mdev->net_conf->ping_int*HZ; 4165309d1608SPhilipp Reisner if (!test_and_set_bit(GOT_PING_ACK, &mdev->flags)) 4166309d1608SPhilipp Reisner wake_up(&mdev->misc_wait); 4167b411b363SPhilipp Reisner 416881e84650SAndreas Gruenbacher return true; 4169b411b363SPhilipp Reisner } 4170b411b363SPhilipp Reisner 41710b70a13dSPhilipp Reisner static int got_IsInSync(struct drbd_conf *mdev, struct p_header80 *h) 4172b411b363SPhilipp Reisner { 4173b411b363SPhilipp Reisner struct p_block_ack *p = (struct p_block_ack *)h; 4174b411b363SPhilipp Reisner sector_t sector = be64_to_cpu(p->sector); 4175b411b363SPhilipp Reisner int blksize = be32_to_cpu(p->blksize); 4176b411b363SPhilipp Reisner 4177b411b363SPhilipp Reisner D_ASSERT(mdev->agreed_pro_version >= 89); 4178b411b363SPhilipp Reisner 4179b411b363SPhilipp Reisner update_peer_seq(mdev, be32_to_cpu(p->seq_num)); 4180b411b363SPhilipp Reisner 41811d53f09eSLars Ellenberg if (get_ldev(mdev)) { 4182b411b363SPhilipp Reisner drbd_rs_complete_io(mdev, sector); 4183b411b363SPhilipp Reisner drbd_set_in_sync(mdev, sector, blksize); 4184b411b363SPhilipp Reisner /* rs_same_csums is supposed to count in units of BM_BLOCK_SIZE */ 4185b411b363SPhilipp Reisner mdev->rs_same_csum += (blksize >> BM_BLOCK_SHIFT); 41861d53f09eSLars Ellenberg put_ldev(mdev); 41871d53f09eSLars Ellenberg } 4188b411b363SPhilipp Reisner dec_rs_pending(mdev); 4189778f271dSPhilipp Reisner atomic_add(blksize >> 9, &mdev->rs_sect_in); 4190b411b363SPhilipp Reisner 419181e84650SAndreas Gruenbacher return true; 4192b411b363SPhilipp Reisner } 4193b411b363SPhilipp Reisner 4194b411b363SPhilipp Reisner /* when we receive the ACK for a write request, 4195b411b363SPhilipp Reisner * verify that we actually know about it */ 4196b411b363SPhilipp Reisner static struct drbd_request *_ack_id_to_req(struct drbd_conf *mdev, 4197b411b363SPhilipp Reisner u64 id, sector_t sector) 4198b411b363SPhilipp Reisner { 4199b411b363SPhilipp Reisner struct hlist_head *slot = tl_hash_slot(mdev, sector); 4200b411b363SPhilipp Reisner struct hlist_node *n; 4201b411b363SPhilipp Reisner struct drbd_request *req; 4202b411b363SPhilipp Reisner 4203b411b363SPhilipp Reisner hlist_for_each_entry(req, n, slot, colision) { 4204b411b363SPhilipp Reisner if ((unsigned long)req == (unsigned long)id) { 4205b411b363SPhilipp Reisner if (req->sector != sector) { 4206b411b363SPhilipp Reisner dev_err(DEV, "_ack_id_to_req: found req %p but it has " 4207b411b363SPhilipp Reisner "wrong sector (%llus versus %llus)\n", req, 4208b411b363SPhilipp Reisner (unsigned long long)req->sector, 4209b411b363SPhilipp Reisner (unsigned long long)sector); 4210b411b363SPhilipp Reisner break; 4211b411b363SPhilipp Reisner } 4212b411b363SPhilipp Reisner return req; 4213b411b363SPhilipp Reisner } 4214b411b363SPhilipp Reisner } 4215b411b363SPhilipp Reisner dev_err(DEV, "_ack_id_to_req: failed to find req %p, sector %llus in list\n", 4216b411b363SPhilipp Reisner (void *)(unsigned long)id, (unsigned long long)sector); 4217b411b363SPhilipp Reisner return NULL; 4218b411b363SPhilipp Reisner } 4219b411b363SPhilipp Reisner 4220b411b363SPhilipp Reisner typedef struct drbd_request *(req_validator_fn) 4221b411b363SPhilipp Reisner (struct drbd_conf *mdev, u64 id, sector_t sector); 4222b411b363SPhilipp Reisner 4223b411b363SPhilipp Reisner static int validate_req_change_req_state(struct drbd_conf *mdev, 4224b411b363SPhilipp Reisner u64 id, sector_t sector, req_validator_fn validator, 4225b411b363SPhilipp Reisner const char *func, enum drbd_req_event what) 4226b411b363SPhilipp Reisner { 4227b411b363SPhilipp Reisner struct drbd_request *req; 4228b411b363SPhilipp Reisner struct bio_and_error m; 4229b411b363SPhilipp Reisner 4230b411b363SPhilipp Reisner spin_lock_irq(&mdev->req_lock); 4231b411b363SPhilipp Reisner req = validator(mdev, id, sector); 4232b411b363SPhilipp Reisner if (unlikely(!req)) { 4233b411b363SPhilipp Reisner spin_unlock_irq(&mdev->req_lock); 4234b411b363SPhilipp Reisner dev_err(DEV, "%s: got a corrupt block_id/sector pair\n", func); 423581e84650SAndreas Gruenbacher return false; 4236b411b363SPhilipp Reisner } 4237b411b363SPhilipp Reisner __req_mod(req, what, &m); 4238b411b363SPhilipp Reisner spin_unlock_irq(&mdev->req_lock); 4239b411b363SPhilipp Reisner 4240b411b363SPhilipp Reisner if (m.bio) 4241b411b363SPhilipp Reisner complete_master_bio(mdev, &m); 424281e84650SAndreas Gruenbacher return true; 4243b411b363SPhilipp Reisner } 4244b411b363SPhilipp Reisner 42450b70a13dSPhilipp Reisner static int got_BlockAck(struct drbd_conf *mdev, struct p_header80 *h) 4246b411b363SPhilipp Reisner { 4247b411b363SPhilipp Reisner struct p_block_ack *p = (struct p_block_ack *)h; 4248b411b363SPhilipp Reisner sector_t sector = be64_to_cpu(p->sector); 4249b411b363SPhilipp Reisner int blksize = be32_to_cpu(p->blksize); 4250b411b363SPhilipp Reisner enum drbd_req_event what; 4251b411b363SPhilipp Reisner 4252b411b363SPhilipp Reisner update_peer_seq(mdev, be32_to_cpu(p->seq_num)); 4253b411b363SPhilipp Reisner 4254b411b363SPhilipp Reisner if (is_syncer_block_id(p->block_id)) { 4255b411b363SPhilipp Reisner drbd_set_in_sync(mdev, sector, blksize); 4256b411b363SPhilipp Reisner dec_rs_pending(mdev); 425781e84650SAndreas Gruenbacher return true; 4258b411b363SPhilipp Reisner } 4259b411b363SPhilipp Reisner switch (be16_to_cpu(h->command)) { 4260b411b363SPhilipp Reisner case P_RS_WRITE_ACK: 4261b411b363SPhilipp Reisner D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C); 4262b411b363SPhilipp Reisner what = write_acked_by_peer_and_sis; 4263b411b363SPhilipp Reisner break; 4264b411b363SPhilipp Reisner case P_WRITE_ACK: 4265b411b363SPhilipp Reisner D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C); 4266b411b363SPhilipp Reisner what = write_acked_by_peer; 4267b411b363SPhilipp Reisner break; 4268b411b363SPhilipp Reisner case P_RECV_ACK: 4269b411b363SPhilipp Reisner D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_B); 4270b411b363SPhilipp Reisner what = recv_acked_by_peer; 4271b411b363SPhilipp Reisner break; 4272b411b363SPhilipp Reisner case P_DISCARD_ACK: 4273b411b363SPhilipp Reisner D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C); 4274b411b363SPhilipp Reisner what = conflict_discarded_by_peer; 4275b411b363SPhilipp Reisner break; 4276b411b363SPhilipp Reisner default: 4277b411b363SPhilipp Reisner D_ASSERT(0); 427881e84650SAndreas Gruenbacher return false; 4279b411b363SPhilipp Reisner } 4280b411b363SPhilipp Reisner 4281b411b363SPhilipp Reisner return validate_req_change_req_state(mdev, p->block_id, sector, 4282b411b363SPhilipp Reisner _ack_id_to_req, __func__ , what); 4283b411b363SPhilipp Reisner } 4284b411b363SPhilipp Reisner 42850b70a13dSPhilipp Reisner static int got_NegAck(struct drbd_conf *mdev, struct p_header80 *h) 4286b411b363SPhilipp Reisner { 4287b411b363SPhilipp Reisner struct p_block_ack *p = (struct p_block_ack *)h; 4288b411b363SPhilipp Reisner sector_t sector = be64_to_cpu(p->sector); 4289b411b363SPhilipp Reisner 4290b411b363SPhilipp Reisner if (__ratelimit(&drbd_ratelimit_state)) 4291b411b363SPhilipp Reisner dev_warn(DEV, "Got NegAck packet. Peer is in troubles?\n"); 4292b411b363SPhilipp Reisner 4293b411b363SPhilipp Reisner update_peer_seq(mdev, be32_to_cpu(p->seq_num)); 4294b411b363SPhilipp Reisner 4295b411b363SPhilipp Reisner if (is_syncer_block_id(p->block_id)) { 4296b411b363SPhilipp Reisner int size = be32_to_cpu(p->blksize); 4297b411b363SPhilipp Reisner dec_rs_pending(mdev); 4298b411b363SPhilipp Reisner drbd_rs_failed_io(mdev, sector, size); 429981e84650SAndreas Gruenbacher return true; 4300b411b363SPhilipp Reisner } 4301b411b363SPhilipp Reisner return validate_req_change_req_state(mdev, p->block_id, sector, 4302b411b363SPhilipp Reisner _ack_id_to_req, __func__ , neg_acked); 4303b411b363SPhilipp Reisner } 4304b411b363SPhilipp Reisner 43050b70a13dSPhilipp Reisner static int got_NegDReply(struct drbd_conf *mdev, struct p_header80 *h) 4306b411b363SPhilipp Reisner { 4307b411b363SPhilipp Reisner struct p_block_ack *p = (struct p_block_ack *)h; 4308b411b363SPhilipp Reisner sector_t sector = be64_to_cpu(p->sector); 4309b411b363SPhilipp Reisner 4310b411b363SPhilipp Reisner update_peer_seq(mdev, be32_to_cpu(p->seq_num)); 4311b411b363SPhilipp Reisner dev_err(DEV, "Got NegDReply; Sector %llus, len %u; Fail original request.\n", 4312b411b363SPhilipp Reisner (unsigned long long)sector, be32_to_cpu(p->blksize)); 4313b411b363SPhilipp Reisner 4314b411b363SPhilipp Reisner return validate_req_change_req_state(mdev, p->block_id, sector, 4315b411b363SPhilipp Reisner _ar_id_to_req, __func__ , neg_acked); 4316b411b363SPhilipp Reisner } 4317b411b363SPhilipp Reisner 43180b70a13dSPhilipp Reisner static int got_NegRSDReply(struct drbd_conf *mdev, struct p_header80 *h) 4319b411b363SPhilipp Reisner { 4320b411b363SPhilipp Reisner sector_t sector; 4321b411b363SPhilipp Reisner int size; 4322b411b363SPhilipp Reisner struct p_block_ack *p = (struct p_block_ack *)h; 4323b411b363SPhilipp Reisner 4324b411b363SPhilipp Reisner sector = be64_to_cpu(p->sector); 4325b411b363SPhilipp Reisner size = be32_to_cpu(p->blksize); 4326b411b363SPhilipp Reisner 4327b411b363SPhilipp Reisner update_peer_seq(mdev, be32_to_cpu(p->seq_num)); 4328b411b363SPhilipp Reisner 4329b411b363SPhilipp Reisner dec_rs_pending(mdev); 4330b411b363SPhilipp Reisner 4331b411b363SPhilipp Reisner if (get_ldev_if_state(mdev, D_FAILED)) { 4332b411b363SPhilipp Reisner drbd_rs_complete_io(mdev, sector); 4333b411b363SPhilipp Reisner drbd_rs_failed_io(mdev, sector, size); 4334b411b363SPhilipp Reisner put_ldev(mdev); 4335b411b363SPhilipp Reisner } 4336b411b363SPhilipp Reisner 433781e84650SAndreas Gruenbacher return true; 4338b411b363SPhilipp Reisner } 4339b411b363SPhilipp Reisner 43400b70a13dSPhilipp Reisner static int got_BarrierAck(struct drbd_conf *mdev, struct p_header80 *h) 4341b411b363SPhilipp Reisner { 4342b411b363SPhilipp Reisner struct p_barrier_ack *p = (struct p_barrier_ack *)h; 4343b411b363SPhilipp Reisner 4344b411b363SPhilipp Reisner tl_release(mdev, p->barrier, be32_to_cpu(p->set_size)); 4345b411b363SPhilipp Reisner 4346c4752ef1SPhilipp Reisner if (mdev->state.conn == C_AHEAD && 4347c4752ef1SPhilipp Reisner atomic_read(&mdev->ap_in_flight) == 0 && 4348c4752ef1SPhilipp Reisner list_empty(&mdev->start_resync_work.list)) { 4349c4752ef1SPhilipp Reisner struct drbd_work *w = &mdev->start_resync_work; 4350c4752ef1SPhilipp Reisner w->cb = w_start_resync; 4351c4752ef1SPhilipp Reisner drbd_queue_work_front(&mdev->data.work, w); 4352c4752ef1SPhilipp Reisner } 4353c4752ef1SPhilipp Reisner 435481e84650SAndreas Gruenbacher return true; 4355b411b363SPhilipp Reisner } 4356b411b363SPhilipp Reisner 43570b70a13dSPhilipp Reisner static int got_OVResult(struct drbd_conf *mdev, struct p_header80 *h) 4358b411b363SPhilipp Reisner { 4359b411b363SPhilipp Reisner struct p_block_ack *p = (struct p_block_ack *)h; 4360b411b363SPhilipp Reisner struct drbd_work *w; 4361b411b363SPhilipp Reisner sector_t sector; 4362b411b363SPhilipp Reisner int size; 4363b411b363SPhilipp Reisner 4364b411b363SPhilipp Reisner sector = be64_to_cpu(p->sector); 4365b411b363SPhilipp Reisner size = be32_to_cpu(p->blksize); 4366b411b363SPhilipp Reisner 4367b411b363SPhilipp Reisner update_peer_seq(mdev, be32_to_cpu(p->seq_num)); 4368b411b363SPhilipp Reisner 4369b411b363SPhilipp Reisner if (be64_to_cpu(p->block_id) == ID_OUT_OF_SYNC) 4370b411b363SPhilipp Reisner drbd_ov_oos_found(mdev, sector, size); 4371b411b363SPhilipp Reisner else 4372b411b363SPhilipp Reisner ov_oos_print(mdev); 4373b411b363SPhilipp Reisner 43741d53f09eSLars Ellenberg if (!get_ldev(mdev)) 437581e84650SAndreas Gruenbacher return true; 43761d53f09eSLars Ellenberg 4377b411b363SPhilipp Reisner drbd_rs_complete_io(mdev, sector); 4378b411b363SPhilipp Reisner dec_rs_pending(mdev); 4379b411b363SPhilipp Reisner 4380ea5442afSLars Ellenberg --mdev->ov_left; 4381ea5442afSLars Ellenberg 4382ea5442afSLars Ellenberg /* let's advance progress step marks only for every other megabyte */ 4383ea5442afSLars Ellenberg if ((mdev->ov_left & 0x200) == 0x200) 4384ea5442afSLars Ellenberg drbd_advance_rs_marks(mdev, mdev->ov_left); 4385ea5442afSLars Ellenberg 4386ea5442afSLars Ellenberg if (mdev->ov_left == 0) { 4387b411b363SPhilipp Reisner w = kmalloc(sizeof(*w), GFP_NOIO); 4388b411b363SPhilipp Reisner if (w) { 4389b411b363SPhilipp Reisner w->cb = w_ov_finished; 4390b411b363SPhilipp Reisner drbd_queue_work_front(&mdev->data.work, w); 4391b411b363SPhilipp Reisner } else { 4392b411b363SPhilipp Reisner dev_err(DEV, "kmalloc(w) failed."); 4393b411b363SPhilipp Reisner ov_oos_print(mdev); 4394b411b363SPhilipp Reisner drbd_resync_finished(mdev); 4395b411b363SPhilipp Reisner } 4396b411b363SPhilipp Reisner } 43971d53f09eSLars Ellenberg put_ldev(mdev); 439881e84650SAndreas Gruenbacher return true; 4399b411b363SPhilipp Reisner } 4400b411b363SPhilipp Reisner 440102918be2SPhilipp Reisner static int got_skip(struct drbd_conf *mdev, struct p_header80 *h) 44020ced55a3SPhilipp Reisner { 440381e84650SAndreas Gruenbacher return true; 44040ced55a3SPhilipp Reisner } 44050ced55a3SPhilipp Reisner 4406b411b363SPhilipp Reisner struct asender_cmd { 4407b411b363SPhilipp Reisner size_t pkt_size; 44080b70a13dSPhilipp Reisner int (*process)(struct drbd_conf *mdev, struct p_header80 *h); 4409b411b363SPhilipp Reisner }; 4410b411b363SPhilipp Reisner 4411b411b363SPhilipp Reisner static struct asender_cmd *get_asender_cmd(int cmd) 4412b411b363SPhilipp Reisner { 4413b411b363SPhilipp Reisner static struct asender_cmd asender_tbl[] = { 4414b411b363SPhilipp Reisner /* anything missing from this table is in 4415b411b363SPhilipp Reisner * the drbd_cmd_handler (drbd_default_handler) table, 4416b411b363SPhilipp Reisner * see the beginning of drbdd() */ 44170b70a13dSPhilipp Reisner [P_PING] = { sizeof(struct p_header80), got_Ping }, 44180b70a13dSPhilipp Reisner [P_PING_ACK] = { sizeof(struct p_header80), got_PingAck }, 4419b411b363SPhilipp Reisner [P_RECV_ACK] = { sizeof(struct p_block_ack), got_BlockAck }, 4420b411b363SPhilipp Reisner [P_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck }, 4421b411b363SPhilipp Reisner [P_RS_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck }, 4422b411b363SPhilipp Reisner [P_DISCARD_ACK] = { sizeof(struct p_block_ack), got_BlockAck }, 4423b411b363SPhilipp Reisner [P_NEG_ACK] = { sizeof(struct p_block_ack), got_NegAck }, 4424b411b363SPhilipp Reisner [P_NEG_DREPLY] = { sizeof(struct p_block_ack), got_NegDReply }, 4425b411b363SPhilipp Reisner [P_NEG_RS_DREPLY] = { sizeof(struct p_block_ack), got_NegRSDReply}, 4426b411b363SPhilipp Reisner [P_OV_RESULT] = { sizeof(struct p_block_ack), got_OVResult }, 4427b411b363SPhilipp Reisner [P_BARRIER_ACK] = { sizeof(struct p_barrier_ack), got_BarrierAck }, 4428b411b363SPhilipp Reisner [P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), got_RqSReply }, 4429b411b363SPhilipp Reisner [P_RS_IS_IN_SYNC] = { sizeof(struct p_block_ack), got_IsInSync }, 443002918be2SPhilipp Reisner [P_DELAY_PROBE] = { sizeof(struct p_delay_probe93), got_skip }, 4431b411b363SPhilipp Reisner [P_MAX_CMD] = { 0, NULL }, 4432b411b363SPhilipp Reisner }; 4433b411b363SPhilipp Reisner if (cmd > P_MAX_CMD || asender_tbl[cmd].process == NULL) 4434b411b363SPhilipp Reisner return NULL; 4435b411b363SPhilipp Reisner return &asender_tbl[cmd]; 4436b411b363SPhilipp Reisner } 4437b411b363SPhilipp Reisner 4438b411b363SPhilipp Reisner int drbd_asender(struct drbd_thread *thi) 4439b411b363SPhilipp Reisner { 4440b411b363SPhilipp Reisner struct drbd_conf *mdev = thi->mdev; 444102918be2SPhilipp Reisner struct p_header80 *h = &mdev->meta.rbuf.header.h80; 4442b411b363SPhilipp Reisner struct asender_cmd *cmd = NULL; 4443b411b363SPhilipp Reisner 4444b411b363SPhilipp Reisner int rv, len; 4445b411b363SPhilipp Reisner void *buf = h; 4446b411b363SPhilipp Reisner int received = 0; 44470b70a13dSPhilipp Reisner int expect = sizeof(struct p_header80); 4448b411b363SPhilipp Reisner int empty; 4449b411b363SPhilipp Reisner 4450b411b363SPhilipp Reisner sprintf(current->comm, "drbd%d_asender", mdev_to_minor(mdev)); 4451b411b363SPhilipp Reisner 4452b411b363SPhilipp Reisner current->policy = SCHED_RR; /* Make this a realtime task! */ 4453b411b363SPhilipp Reisner current->rt_priority = 2; /* more important than all other tasks */ 4454b411b363SPhilipp Reisner 4455b411b363SPhilipp Reisner while (get_t_state(thi) == Running) { 4456b411b363SPhilipp Reisner drbd_thread_current_set_cpu(mdev); 4457b411b363SPhilipp Reisner if (test_and_clear_bit(SEND_PING, &mdev->flags)) { 4458b411b363SPhilipp Reisner ERR_IF(!drbd_send_ping(mdev)) goto reconnect; 4459b411b363SPhilipp Reisner mdev->meta.socket->sk->sk_rcvtimeo = 4460b411b363SPhilipp Reisner mdev->net_conf->ping_timeo*HZ/10; 4461b411b363SPhilipp Reisner } 4462b411b363SPhilipp Reisner 4463b411b363SPhilipp Reisner /* conditionally cork; 4464b411b363SPhilipp Reisner * it may hurt latency if we cork without much to send */ 4465b411b363SPhilipp Reisner if (!mdev->net_conf->no_cork && 4466b411b363SPhilipp Reisner 3 < atomic_read(&mdev->unacked_cnt)) 4467b411b363SPhilipp Reisner drbd_tcp_cork(mdev->meta.socket); 4468b411b363SPhilipp Reisner while (1) { 4469b411b363SPhilipp Reisner clear_bit(SIGNAL_ASENDER, &mdev->flags); 4470b411b363SPhilipp Reisner flush_signals(current); 44710f8488e1SLars Ellenberg if (!drbd_process_done_ee(mdev)) 4472b411b363SPhilipp Reisner goto reconnect; 4473b411b363SPhilipp Reisner /* to avoid race with newly queued ACKs */ 4474b411b363SPhilipp Reisner set_bit(SIGNAL_ASENDER, &mdev->flags); 4475b411b363SPhilipp Reisner spin_lock_irq(&mdev->req_lock); 4476b411b363SPhilipp Reisner empty = list_empty(&mdev->done_ee); 4477b411b363SPhilipp Reisner spin_unlock_irq(&mdev->req_lock); 4478b411b363SPhilipp Reisner /* new ack may have been queued right here, 4479b411b363SPhilipp Reisner * but then there is also a signal pending, 4480b411b363SPhilipp Reisner * and we start over... */ 4481b411b363SPhilipp Reisner if (empty) 4482b411b363SPhilipp Reisner break; 4483b411b363SPhilipp Reisner } 4484b411b363SPhilipp Reisner /* but unconditionally uncork unless disabled */ 4485b411b363SPhilipp Reisner if (!mdev->net_conf->no_cork) 4486b411b363SPhilipp Reisner drbd_tcp_uncork(mdev->meta.socket); 4487b411b363SPhilipp Reisner 4488b411b363SPhilipp Reisner /* short circuit, recv_msg would return EINTR anyways. */ 4489b411b363SPhilipp Reisner if (signal_pending(current)) 4490b411b363SPhilipp Reisner continue; 4491b411b363SPhilipp Reisner 4492b411b363SPhilipp Reisner rv = drbd_recv_short(mdev, mdev->meta.socket, 4493b411b363SPhilipp Reisner buf, expect-received, 0); 4494b411b363SPhilipp Reisner clear_bit(SIGNAL_ASENDER, &mdev->flags); 4495b411b363SPhilipp Reisner 4496b411b363SPhilipp Reisner flush_signals(current); 4497b411b363SPhilipp Reisner 4498b411b363SPhilipp Reisner /* Note: 4499b411b363SPhilipp Reisner * -EINTR (on meta) we got a signal 4500b411b363SPhilipp Reisner * -EAGAIN (on meta) rcvtimeo expired 4501b411b363SPhilipp Reisner * -ECONNRESET other side closed the connection 4502b411b363SPhilipp Reisner * -ERESTARTSYS (on data) we got a signal 4503b411b363SPhilipp Reisner * rv < 0 other than above: unexpected error! 4504b411b363SPhilipp Reisner * rv == expected: full header or command 4505b411b363SPhilipp Reisner * rv < expected: "woken" by signal during receive 4506b411b363SPhilipp Reisner * rv == 0 : "connection shut down by peer" 4507b411b363SPhilipp Reisner */ 4508b411b363SPhilipp Reisner if (likely(rv > 0)) { 4509b411b363SPhilipp Reisner received += rv; 4510b411b363SPhilipp Reisner buf += rv; 4511b411b363SPhilipp Reisner } else if (rv == 0) { 4512b411b363SPhilipp Reisner dev_err(DEV, "meta connection shut down by peer.\n"); 4513b411b363SPhilipp Reisner goto reconnect; 4514b411b363SPhilipp Reisner } else if (rv == -EAGAIN) { 4515b411b363SPhilipp Reisner if (mdev->meta.socket->sk->sk_rcvtimeo == 4516b411b363SPhilipp Reisner mdev->net_conf->ping_timeo*HZ/10) { 4517b411b363SPhilipp Reisner dev_err(DEV, "PingAck did not arrive in time.\n"); 4518b411b363SPhilipp Reisner goto reconnect; 4519b411b363SPhilipp Reisner } 4520b411b363SPhilipp Reisner set_bit(SEND_PING, &mdev->flags); 4521b411b363SPhilipp Reisner continue; 4522b411b363SPhilipp Reisner } else if (rv == -EINTR) { 4523b411b363SPhilipp Reisner continue; 4524b411b363SPhilipp Reisner } else { 4525b411b363SPhilipp Reisner dev_err(DEV, "sock_recvmsg returned %d\n", rv); 4526b411b363SPhilipp Reisner goto reconnect; 4527b411b363SPhilipp Reisner } 4528b411b363SPhilipp Reisner 4529b411b363SPhilipp Reisner if (received == expect && cmd == NULL) { 4530b411b363SPhilipp Reisner if (unlikely(h->magic != BE_DRBD_MAGIC)) { 4531004352faSLars Ellenberg dev_err(DEV, "magic?? on meta m: 0x%08x c: %d l: %d\n", 4532004352faSLars Ellenberg be32_to_cpu(h->magic), 4533004352faSLars Ellenberg be16_to_cpu(h->command), 4534004352faSLars Ellenberg be16_to_cpu(h->length)); 4535b411b363SPhilipp Reisner goto reconnect; 4536b411b363SPhilipp Reisner } 4537b411b363SPhilipp Reisner cmd = get_asender_cmd(be16_to_cpu(h->command)); 4538b411b363SPhilipp Reisner len = be16_to_cpu(h->length); 4539b411b363SPhilipp Reisner if (unlikely(cmd == NULL)) { 4540004352faSLars Ellenberg dev_err(DEV, "unknown command?? on meta m: 0x%08x c: %d l: %d\n", 4541004352faSLars Ellenberg be32_to_cpu(h->magic), 4542004352faSLars Ellenberg be16_to_cpu(h->command), 4543004352faSLars Ellenberg be16_to_cpu(h->length)); 4544b411b363SPhilipp Reisner goto disconnect; 4545b411b363SPhilipp Reisner } 4546b411b363SPhilipp Reisner expect = cmd->pkt_size; 45470b70a13dSPhilipp Reisner ERR_IF(len != expect-sizeof(struct p_header80)) 4548b411b363SPhilipp Reisner goto reconnect; 4549b411b363SPhilipp Reisner } 4550b411b363SPhilipp Reisner if (received == expect) { 4551b411b363SPhilipp Reisner D_ASSERT(cmd != NULL); 4552b411b363SPhilipp Reisner if (!cmd->process(mdev, h)) 4553b411b363SPhilipp Reisner goto reconnect; 4554b411b363SPhilipp Reisner 4555b411b363SPhilipp Reisner buf = h; 4556b411b363SPhilipp Reisner received = 0; 45570b70a13dSPhilipp Reisner expect = sizeof(struct p_header80); 4558b411b363SPhilipp Reisner cmd = NULL; 4559b411b363SPhilipp Reisner } 4560b411b363SPhilipp Reisner } 4561b411b363SPhilipp Reisner 4562b411b363SPhilipp Reisner if (0) { 4563b411b363SPhilipp Reisner reconnect: 4564b411b363SPhilipp Reisner drbd_force_state(mdev, NS(conn, C_NETWORK_FAILURE)); 4565856c50c7SLars Ellenberg drbd_md_sync(mdev); 4566b411b363SPhilipp Reisner } 4567b411b363SPhilipp Reisner if (0) { 4568b411b363SPhilipp Reisner disconnect: 4569b411b363SPhilipp Reisner drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); 4570856c50c7SLars Ellenberg drbd_md_sync(mdev); 4571b411b363SPhilipp Reisner } 4572b411b363SPhilipp Reisner clear_bit(SIGNAL_ASENDER, &mdev->flags); 4573b411b363SPhilipp Reisner 4574b411b363SPhilipp Reisner D_ASSERT(mdev->state.conn < C_CONNECTED); 4575b411b363SPhilipp Reisner dev_info(DEV, "asender terminated\n"); 4576b411b363SPhilipp Reisner 4577b411b363SPhilipp Reisner return 0; 4578b411b363SPhilipp Reisner } 4579