1b411b363SPhilipp Reisner /* 2b411b363SPhilipp Reisner drbd_receiver.c 3b411b363SPhilipp Reisner 4b411b363SPhilipp Reisner This file is part of DRBD by Philipp Reisner and Lars Ellenberg. 5b411b363SPhilipp Reisner 6b411b363SPhilipp Reisner Copyright (C) 2001-2008, LINBIT Information Technologies GmbH. 7b411b363SPhilipp Reisner Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>. 8b411b363SPhilipp Reisner Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>. 9b411b363SPhilipp Reisner 10b411b363SPhilipp Reisner drbd is free software; you can redistribute it and/or modify 11b411b363SPhilipp Reisner it under the terms of the GNU General Public License as published by 12b411b363SPhilipp Reisner the Free Software Foundation; either version 2, or (at your option) 13b411b363SPhilipp Reisner any later version. 14b411b363SPhilipp Reisner 15b411b363SPhilipp Reisner drbd is distributed in the hope that it will be useful, 16b411b363SPhilipp Reisner but WITHOUT ANY WARRANTY; without even the implied warranty of 17b411b363SPhilipp Reisner MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18b411b363SPhilipp Reisner GNU General Public License for more details. 19b411b363SPhilipp Reisner 20b411b363SPhilipp Reisner You should have received a copy of the GNU General Public License 21b411b363SPhilipp Reisner along with drbd; see the file COPYING. If not, write to 22b411b363SPhilipp Reisner the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 23b411b363SPhilipp Reisner */ 24b411b363SPhilipp Reisner 25b411b363SPhilipp Reisner 26b411b363SPhilipp Reisner #include <linux/module.h> 27b411b363SPhilipp Reisner 28b411b363SPhilipp Reisner #include <asm/uaccess.h> 29b411b363SPhilipp Reisner #include <net/sock.h> 30b411b363SPhilipp Reisner 31b411b363SPhilipp Reisner #include <linux/drbd.h> 32b411b363SPhilipp Reisner #include <linux/fs.h> 33b411b363SPhilipp Reisner #include <linux/file.h> 34b411b363SPhilipp Reisner #include <linux/in.h> 35b411b363SPhilipp Reisner #include <linux/mm.h> 36b411b363SPhilipp Reisner #include <linux/memcontrol.h> 37b411b363SPhilipp Reisner #include <linux/mm_inline.h> 38b411b363SPhilipp Reisner #include <linux/slab.h> 39b411b363SPhilipp Reisner #include <linux/smp_lock.h> 40b411b363SPhilipp Reisner #include <linux/pkt_sched.h> 41b411b363SPhilipp Reisner #define __KERNEL_SYSCALLS__ 42b411b363SPhilipp Reisner #include <linux/unistd.h> 43b411b363SPhilipp Reisner #include <linux/vmalloc.h> 44b411b363SPhilipp Reisner #include <linux/random.h> 45b411b363SPhilipp Reisner #include <linux/string.h> 46b411b363SPhilipp Reisner #include <linux/scatterlist.h> 47b411b363SPhilipp Reisner #include "drbd_int.h" 48b411b363SPhilipp Reisner #include "drbd_req.h" 49b411b363SPhilipp Reisner 50b411b363SPhilipp Reisner #include "drbd_vli.h" 51b411b363SPhilipp Reisner 52b411b363SPhilipp Reisner struct flush_work { 53b411b363SPhilipp Reisner struct drbd_work w; 54b411b363SPhilipp Reisner struct drbd_epoch *epoch; 55b411b363SPhilipp Reisner }; 56b411b363SPhilipp Reisner 57b411b363SPhilipp Reisner enum finish_epoch { 58b411b363SPhilipp Reisner FE_STILL_LIVE, 59b411b363SPhilipp Reisner FE_DESTROYED, 60b411b363SPhilipp Reisner FE_RECYCLED, 61b411b363SPhilipp Reisner }; 62b411b363SPhilipp Reisner 63b411b363SPhilipp Reisner static int drbd_do_handshake(struct drbd_conf *mdev); 64b411b363SPhilipp Reisner static int drbd_do_auth(struct drbd_conf *mdev); 65b411b363SPhilipp Reisner 66b411b363SPhilipp Reisner static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *, struct drbd_epoch *, enum epoch_event); 67b411b363SPhilipp Reisner static int e_end_block(struct drbd_conf *, struct drbd_work *, int); 68b411b363SPhilipp Reisner 69b411b363SPhilipp Reisner static struct drbd_epoch *previous_epoch(struct drbd_conf *mdev, struct drbd_epoch *epoch) 70b411b363SPhilipp Reisner { 71b411b363SPhilipp Reisner struct drbd_epoch *prev; 72b411b363SPhilipp Reisner spin_lock(&mdev->epoch_lock); 73b411b363SPhilipp Reisner prev = list_entry(epoch->list.prev, struct drbd_epoch, list); 74b411b363SPhilipp Reisner if (prev == epoch || prev == mdev->current_epoch) 75b411b363SPhilipp Reisner prev = NULL; 76b411b363SPhilipp Reisner spin_unlock(&mdev->epoch_lock); 77b411b363SPhilipp Reisner return prev; 78b411b363SPhilipp Reisner } 79b411b363SPhilipp Reisner 80b411b363SPhilipp Reisner #define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN) 81b411b363SPhilipp Reisner 8245bb912bSLars Ellenberg /* 8345bb912bSLars Ellenberg * some helper functions to deal with single linked page lists, 8445bb912bSLars Ellenberg * page->private being our "next" pointer. 8545bb912bSLars Ellenberg */ 8645bb912bSLars Ellenberg 8745bb912bSLars Ellenberg /* If at least n pages are linked at head, get n pages off. 8845bb912bSLars Ellenberg * Otherwise, don't modify head, and return NULL. 8945bb912bSLars Ellenberg * Locking is the responsibility of the caller. 9045bb912bSLars Ellenberg */ 9145bb912bSLars Ellenberg static struct page *page_chain_del(struct page **head, int n) 9245bb912bSLars Ellenberg { 9345bb912bSLars Ellenberg struct page *page; 9445bb912bSLars Ellenberg struct page *tmp; 9545bb912bSLars Ellenberg 9645bb912bSLars Ellenberg BUG_ON(!n); 9745bb912bSLars Ellenberg BUG_ON(!head); 9845bb912bSLars Ellenberg 9945bb912bSLars Ellenberg page = *head; 10023ce4227SPhilipp Reisner 10123ce4227SPhilipp Reisner if (!page) 10223ce4227SPhilipp Reisner return NULL; 10323ce4227SPhilipp Reisner 10445bb912bSLars Ellenberg while (page) { 10545bb912bSLars Ellenberg tmp = page_chain_next(page); 10645bb912bSLars Ellenberg if (--n == 0) 10745bb912bSLars Ellenberg break; /* found sufficient pages */ 10845bb912bSLars Ellenberg if (tmp == NULL) 10945bb912bSLars Ellenberg /* insufficient pages, don't use any of them. */ 11045bb912bSLars Ellenberg return NULL; 11145bb912bSLars Ellenberg page = tmp; 11245bb912bSLars Ellenberg } 11345bb912bSLars Ellenberg 11445bb912bSLars Ellenberg /* add end of list marker for the returned list */ 11545bb912bSLars Ellenberg set_page_private(page, 0); 11645bb912bSLars Ellenberg /* actual return value, and adjustment of head */ 11745bb912bSLars Ellenberg page = *head; 11845bb912bSLars Ellenberg *head = tmp; 11945bb912bSLars Ellenberg return page; 12045bb912bSLars Ellenberg } 12145bb912bSLars Ellenberg 12245bb912bSLars Ellenberg /* may be used outside of locks to find the tail of a (usually short) 12345bb912bSLars Ellenberg * "private" page chain, before adding it back to a global chain head 12445bb912bSLars Ellenberg * with page_chain_add() under a spinlock. */ 12545bb912bSLars Ellenberg static struct page *page_chain_tail(struct page *page, int *len) 12645bb912bSLars Ellenberg { 12745bb912bSLars Ellenberg struct page *tmp; 12845bb912bSLars Ellenberg int i = 1; 12945bb912bSLars Ellenberg while ((tmp = page_chain_next(page))) 13045bb912bSLars Ellenberg ++i, page = tmp; 13145bb912bSLars Ellenberg if (len) 13245bb912bSLars Ellenberg *len = i; 13345bb912bSLars Ellenberg return page; 13445bb912bSLars Ellenberg } 13545bb912bSLars Ellenberg 13645bb912bSLars Ellenberg static int page_chain_free(struct page *page) 13745bb912bSLars Ellenberg { 13845bb912bSLars Ellenberg struct page *tmp; 13945bb912bSLars Ellenberg int i = 0; 14045bb912bSLars Ellenberg page_chain_for_each_safe(page, tmp) { 14145bb912bSLars Ellenberg put_page(page); 14245bb912bSLars Ellenberg ++i; 14345bb912bSLars Ellenberg } 14445bb912bSLars Ellenberg return i; 14545bb912bSLars Ellenberg } 14645bb912bSLars Ellenberg 14745bb912bSLars Ellenberg static void page_chain_add(struct page **head, 14845bb912bSLars Ellenberg struct page *chain_first, struct page *chain_last) 14945bb912bSLars Ellenberg { 15045bb912bSLars Ellenberg #if 1 15145bb912bSLars Ellenberg struct page *tmp; 15245bb912bSLars Ellenberg tmp = page_chain_tail(chain_first, NULL); 15345bb912bSLars Ellenberg BUG_ON(tmp != chain_last); 15445bb912bSLars Ellenberg #endif 15545bb912bSLars Ellenberg 15645bb912bSLars Ellenberg /* add chain to head */ 15745bb912bSLars Ellenberg set_page_private(chain_last, (unsigned long)*head); 15845bb912bSLars Ellenberg *head = chain_first; 15945bb912bSLars Ellenberg } 16045bb912bSLars Ellenberg 16145bb912bSLars Ellenberg static struct page *drbd_pp_first_pages_or_try_alloc(struct drbd_conf *mdev, int number) 162b411b363SPhilipp Reisner { 163b411b363SPhilipp Reisner struct page *page = NULL; 16445bb912bSLars Ellenberg struct page *tmp = NULL; 16545bb912bSLars Ellenberg int i = 0; 166b411b363SPhilipp Reisner 167b411b363SPhilipp Reisner /* Yes, testing drbd_pp_vacant outside the lock is racy. 168b411b363SPhilipp Reisner * So what. It saves a spin_lock. */ 16945bb912bSLars Ellenberg if (drbd_pp_vacant >= number) { 170b411b363SPhilipp Reisner spin_lock(&drbd_pp_lock); 17145bb912bSLars Ellenberg page = page_chain_del(&drbd_pp_pool, number); 17245bb912bSLars Ellenberg if (page) 17345bb912bSLars Ellenberg drbd_pp_vacant -= number; 174b411b363SPhilipp Reisner spin_unlock(&drbd_pp_lock); 17545bb912bSLars Ellenberg if (page) 17645bb912bSLars Ellenberg return page; 177b411b363SPhilipp Reisner } 17845bb912bSLars Ellenberg 179b411b363SPhilipp Reisner /* GFP_TRY, because we must not cause arbitrary write-out: in a DRBD 180b411b363SPhilipp Reisner * "criss-cross" setup, that might cause write-out on some other DRBD, 181b411b363SPhilipp Reisner * which in turn might block on the other node at this very place. */ 18245bb912bSLars Ellenberg for (i = 0; i < number; i++) { 18345bb912bSLars Ellenberg tmp = alloc_page(GFP_TRY); 18445bb912bSLars Ellenberg if (!tmp) 18545bb912bSLars Ellenberg break; 18645bb912bSLars Ellenberg set_page_private(tmp, (unsigned long)page); 18745bb912bSLars Ellenberg page = tmp; 18845bb912bSLars Ellenberg } 18945bb912bSLars Ellenberg 19045bb912bSLars Ellenberg if (i == number) 191b411b363SPhilipp Reisner return page; 19245bb912bSLars Ellenberg 19345bb912bSLars Ellenberg /* Not enough pages immediately available this time. 19445bb912bSLars Ellenberg * No need to jump around here, drbd_pp_alloc will retry this 19545bb912bSLars Ellenberg * function "soon". */ 19645bb912bSLars Ellenberg if (page) { 19745bb912bSLars Ellenberg tmp = page_chain_tail(page, NULL); 19845bb912bSLars Ellenberg spin_lock(&drbd_pp_lock); 19945bb912bSLars Ellenberg page_chain_add(&drbd_pp_pool, page, tmp); 20045bb912bSLars Ellenberg drbd_pp_vacant += i; 20145bb912bSLars Ellenberg spin_unlock(&drbd_pp_lock); 20245bb912bSLars Ellenberg } 20345bb912bSLars Ellenberg return NULL; 204b411b363SPhilipp Reisner } 205b411b363SPhilipp Reisner 206b411b363SPhilipp Reisner /* kick lower level device, if we have more than (arbitrary number) 207b411b363SPhilipp Reisner * reference counts on it, which typically are locally submitted io 208b411b363SPhilipp Reisner * requests. don't use unacked_cnt, so we speed up proto A and B, too. */ 209b411b363SPhilipp Reisner static void maybe_kick_lo(struct drbd_conf *mdev) 210b411b363SPhilipp Reisner { 211b411b363SPhilipp Reisner if (atomic_read(&mdev->local_cnt) >= mdev->net_conf->unplug_watermark) 212b411b363SPhilipp Reisner drbd_kick_lo(mdev); 213b411b363SPhilipp Reisner } 214b411b363SPhilipp Reisner 215b411b363SPhilipp Reisner static void reclaim_net_ee(struct drbd_conf *mdev, struct list_head *to_be_freed) 216b411b363SPhilipp Reisner { 217b411b363SPhilipp Reisner struct drbd_epoch_entry *e; 218b411b363SPhilipp Reisner struct list_head *le, *tle; 219b411b363SPhilipp Reisner 220b411b363SPhilipp Reisner /* The EEs are always appended to the end of the list. Since 221b411b363SPhilipp Reisner they are sent in order over the wire, they have to finish 222b411b363SPhilipp Reisner in order. As soon as we see the first not finished we can 223b411b363SPhilipp Reisner stop to examine the list... */ 224b411b363SPhilipp Reisner 225b411b363SPhilipp Reisner list_for_each_safe(le, tle, &mdev->net_ee) { 226b411b363SPhilipp Reisner e = list_entry(le, struct drbd_epoch_entry, w.list); 22745bb912bSLars Ellenberg if (drbd_ee_has_active_page(e)) 228b411b363SPhilipp Reisner break; 229b411b363SPhilipp Reisner list_move(le, to_be_freed); 230b411b363SPhilipp Reisner } 231b411b363SPhilipp Reisner } 232b411b363SPhilipp Reisner 233b411b363SPhilipp Reisner static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev) 234b411b363SPhilipp Reisner { 235b411b363SPhilipp Reisner LIST_HEAD(reclaimed); 236b411b363SPhilipp Reisner struct drbd_epoch_entry *e, *t; 237b411b363SPhilipp Reisner 238b411b363SPhilipp Reisner maybe_kick_lo(mdev); 239b411b363SPhilipp Reisner spin_lock_irq(&mdev->req_lock); 240b411b363SPhilipp Reisner reclaim_net_ee(mdev, &reclaimed); 241b411b363SPhilipp Reisner spin_unlock_irq(&mdev->req_lock); 242b411b363SPhilipp Reisner 243b411b363SPhilipp Reisner list_for_each_entry_safe(e, t, &reclaimed, w.list) 244b411b363SPhilipp Reisner drbd_free_ee(mdev, e); 245b411b363SPhilipp Reisner } 246b411b363SPhilipp Reisner 247b411b363SPhilipp Reisner /** 24845bb912bSLars Ellenberg * drbd_pp_alloc() - Returns @number pages, retries forever (or until signalled) 249b411b363SPhilipp Reisner * @mdev: DRBD device. 25045bb912bSLars Ellenberg * @number: number of pages requested 25145bb912bSLars Ellenberg * @retry: whether to retry, if not enough pages are available right now 252b411b363SPhilipp Reisner * 25345bb912bSLars Ellenberg * Tries to allocate number pages, first from our own page pool, then from 25445bb912bSLars Ellenberg * the kernel, unless this allocation would exceed the max_buffers setting. 25545bb912bSLars Ellenberg * Possibly retry until DRBD frees sufficient pages somewhere else. 25645bb912bSLars Ellenberg * 25745bb912bSLars Ellenberg * Returns a page chain linked via page->private. 258b411b363SPhilipp Reisner */ 25945bb912bSLars Ellenberg static struct page *drbd_pp_alloc(struct drbd_conf *mdev, unsigned number, bool retry) 260b411b363SPhilipp Reisner { 261b411b363SPhilipp Reisner struct page *page = NULL; 262b411b363SPhilipp Reisner DEFINE_WAIT(wait); 263b411b363SPhilipp Reisner 26445bb912bSLars Ellenberg /* Yes, we may run up to @number over max_buffers. If we 26545bb912bSLars Ellenberg * follow it strictly, the admin will get it wrong anyways. */ 26645bb912bSLars Ellenberg if (atomic_read(&mdev->pp_in_use) < mdev->net_conf->max_buffers) 26745bb912bSLars Ellenberg page = drbd_pp_first_pages_or_try_alloc(mdev, number); 268b411b363SPhilipp Reisner 26945bb912bSLars Ellenberg while (page == NULL) { 270b411b363SPhilipp Reisner prepare_to_wait(&drbd_pp_wait, &wait, TASK_INTERRUPTIBLE); 271b411b363SPhilipp Reisner 272b411b363SPhilipp Reisner drbd_kick_lo_and_reclaim_net(mdev); 273b411b363SPhilipp Reisner 274b411b363SPhilipp Reisner if (atomic_read(&mdev->pp_in_use) < mdev->net_conf->max_buffers) { 27545bb912bSLars Ellenberg page = drbd_pp_first_pages_or_try_alloc(mdev, number); 276b411b363SPhilipp Reisner if (page) 277b411b363SPhilipp Reisner break; 278b411b363SPhilipp Reisner } 279b411b363SPhilipp Reisner 280b411b363SPhilipp Reisner if (!retry) 281b411b363SPhilipp Reisner break; 282b411b363SPhilipp Reisner 283b411b363SPhilipp Reisner if (signal_pending(current)) { 284b411b363SPhilipp Reisner dev_warn(DEV, "drbd_pp_alloc interrupted!\n"); 285b411b363SPhilipp Reisner break; 286b411b363SPhilipp Reisner } 287b411b363SPhilipp Reisner 288b411b363SPhilipp Reisner schedule(); 289b411b363SPhilipp Reisner } 290b411b363SPhilipp Reisner finish_wait(&drbd_pp_wait, &wait); 291b411b363SPhilipp Reisner 29245bb912bSLars Ellenberg if (page) 29345bb912bSLars Ellenberg atomic_add(number, &mdev->pp_in_use); 294b411b363SPhilipp Reisner return page; 295b411b363SPhilipp Reisner } 296b411b363SPhilipp Reisner 297b411b363SPhilipp Reisner /* Must not be used from irq, as that may deadlock: see drbd_pp_alloc. 29845bb912bSLars Ellenberg * Is also used from inside an other spin_lock_irq(&mdev->req_lock); 29945bb912bSLars Ellenberg * Either links the page chain back to the global pool, 30045bb912bSLars Ellenberg * or returns all pages to the system. */ 301b411b363SPhilipp Reisner static void drbd_pp_free(struct drbd_conf *mdev, struct page *page) 302b411b363SPhilipp Reisner { 303b411b363SPhilipp Reisner int i; 30445bb912bSLars Ellenberg if (drbd_pp_vacant > (DRBD_MAX_SEGMENT_SIZE/PAGE_SIZE)*minor_count) 30545bb912bSLars Ellenberg i = page_chain_free(page); 30645bb912bSLars Ellenberg else { 30745bb912bSLars Ellenberg struct page *tmp; 30845bb912bSLars Ellenberg tmp = page_chain_tail(page, &i); 309b411b363SPhilipp Reisner spin_lock(&drbd_pp_lock); 31045bb912bSLars Ellenberg page_chain_add(&drbd_pp_pool, page, tmp); 31145bb912bSLars Ellenberg drbd_pp_vacant += i; 312b411b363SPhilipp Reisner spin_unlock(&drbd_pp_lock); 313b411b363SPhilipp Reisner } 31445bb912bSLars Ellenberg atomic_sub(i, &mdev->pp_in_use); 31545bb912bSLars Ellenberg i = atomic_read(&mdev->pp_in_use); 31645bb912bSLars Ellenberg if (i < 0) 31745bb912bSLars Ellenberg dev_warn(DEV, "ASSERTION FAILED: pp_in_use: %d < 0\n", i); 318b411b363SPhilipp Reisner wake_up(&drbd_pp_wait); 319b411b363SPhilipp Reisner } 320b411b363SPhilipp Reisner 321b411b363SPhilipp Reisner /* 322b411b363SPhilipp Reisner You need to hold the req_lock: 323b411b363SPhilipp Reisner _drbd_wait_ee_list_empty() 324b411b363SPhilipp Reisner 325b411b363SPhilipp Reisner You must not have the req_lock: 326b411b363SPhilipp Reisner drbd_free_ee() 327b411b363SPhilipp Reisner drbd_alloc_ee() 328b411b363SPhilipp Reisner drbd_init_ee() 329b411b363SPhilipp Reisner drbd_release_ee() 330b411b363SPhilipp Reisner drbd_ee_fix_bhs() 331b411b363SPhilipp Reisner drbd_process_done_ee() 332b411b363SPhilipp Reisner drbd_clear_done_ee() 333b411b363SPhilipp Reisner drbd_wait_ee_list_empty() 334b411b363SPhilipp Reisner */ 335b411b363SPhilipp Reisner 336b411b363SPhilipp Reisner struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev, 337b411b363SPhilipp Reisner u64 id, 338b411b363SPhilipp Reisner sector_t sector, 339b411b363SPhilipp Reisner unsigned int data_size, 340b411b363SPhilipp Reisner gfp_t gfp_mask) __must_hold(local) 341b411b363SPhilipp Reisner { 342b411b363SPhilipp Reisner struct drbd_epoch_entry *e; 343b411b363SPhilipp Reisner struct page *page; 34445bb912bSLars Ellenberg unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT; 345b411b363SPhilipp Reisner 346b411b363SPhilipp Reisner if (FAULT_ACTIVE(mdev, DRBD_FAULT_AL_EE)) 347b411b363SPhilipp Reisner return NULL; 348b411b363SPhilipp Reisner 349b411b363SPhilipp Reisner e = mempool_alloc(drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM); 350b411b363SPhilipp Reisner if (!e) { 351b411b363SPhilipp Reisner if (!(gfp_mask & __GFP_NOWARN)) 352b411b363SPhilipp Reisner dev_err(DEV, "alloc_ee: Allocation of an EE failed\n"); 353b411b363SPhilipp Reisner return NULL; 354b411b363SPhilipp Reisner } 355b411b363SPhilipp Reisner 35645bb912bSLars Ellenberg page = drbd_pp_alloc(mdev, nr_pages, (gfp_mask & __GFP_WAIT)); 35745bb912bSLars Ellenberg if (!page) 35845bb912bSLars Ellenberg goto fail; 359b411b363SPhilipp Reisner 360b411b363SPhilipp Reisner INIT_HLIST_NODE(&e->colision); 361b411b363SPhilipp Reisner e->epoch = NULL; 36245bb912bSLars Ellenberg e->mdev = mdev; 36345bb912bSLars Ellenberg e->pages = page; 36445bb912bSLars Ellenberg atomic_set(&e->pending_bios, 0); 36545bb912bSLars Ellenberg e->size = data_size; 366b411b363SPhilipp Reisner e->flags = 0; 36745bb912bSLars Ellenberg e->sector = sector; 36845bb912bSLars Ellenberg e->sector = sector; 36945bb912bSLars Ellenberg e->block_id = id; 370b411b363SPhilipp Reisner 371b411b363SPhilipp Reisner return e; 372b411b363SPhilipp Reisner 37345bb912bSLars Ellenberg fail: 374b411b363SPhilipp Reisner mempool_free(e, drbd_ee_mempool); 375b411b363SPhilipp Reisner return NULL; 376b411b363SPhilipp Reisner } 377b411b363SPhilipp Reisner 378b411b363SPhilipp Reisner void drbd_free_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e) 379b411b363SPhilipp Reisner { 38045bb912bSLars Ellenberg drbd_pp_free(mdev, e->pages); 38145bb912bSLars Ellenberg D_ASSERT(atomic_read(&e->pending_bios) == 0); 382b411b363SPhilipp Reisner D_ASSERT(hlist_unhashed(&e->colision)); 383b411b363SPhilipp Reisner mempool_free(e, drbd_ee_mempool); 384b411b363SPhilipp Reisner } 385b411b363SPhilipp Reisner 386b411b363SPhilipp Reisner int drbd_release_ee(struct drbd_conf *mdev, struct list_head *list) 387b411b363SPhilipp Reisner { 388b411b363SPhilipp Reisner LIST_HEAD(work_list); 389b411b363SPhilipp Reisner struct drbd_epoch_entry *e, *t; 390b411b363SPhilipp Reisner int count = 0; 391b411b363SPhilipp Reisner 392b411b363SPhilipp Reisner spin_lock_irq(&mdev->req_lock); 393b411b363SPhilipp Reisner list_splice_init(list, &work_list); 394b411b363SPhilipp Reisner spin_unlock_irq(&mdev->req_lock); 395b411b363SPhilipp Reisner 396b411b363SPhilipp Reisner list_for_each_entry_safe(e, t, &work_list, w.list) { 397b411b363SPhilipp Reisner drbd_free_ee(mdev, e); 398b411b363SPhilipp Reisner count++; 399b411b363SPhilipp Reisner } 400b411b363SPhilipp Reisner return count; 401b411b363SPhilipp Reisner } 402b411b363SPhilipp Reisner 403b411b363SPhilipp Reisner 404b411b363SPhilipp Reisner /* 405b411b363SPhilipp Reisner * This function is called from _asender only_ 406b411b363SPhilipp Reisner * but see also comments in _req_mod(,barrier_acked) 407b411b363SPhilipp Reisner * and receive_Barrier. 408b411b363SPhilipp Reisner * 409b411b363SPhilipp Reisner * Move entries from net_ee to done_ee, if ready. 410b411b363SPhilipp Reisner * Grab done_ee, call all callbacks, free the entries. 411b411b363SPhilipp Reisner * The callbacks typically send out ACKs. 412b411b363SPhilipp Reisner */ 413b411b363SPhilipp Reisner static int drbd_process_done_ee(struct drbd_conf *mdev) 414b411b363SPhilipp Reisner { 415b411b363SPhilipp Reisner LIST_HEAD(work_list); 416b411b363SPhilipp Reisner LIST_HEAD(reclaimed); 417b411b363SPhilipp Reisner struct drbd_epoch_entry *e, *t; 418b411b363SPhilipp Reisner int ok = (mdev->state.conn >= C_WF_REPORT_PARAMS); 419b411b363SPhilipp Reisner 420b411b363SPhilipp Reisner spin_lock_irq(&mdev->req_lock); 421b411b363SPhilipp Reisner reclaim_net_ee(mdev, &reclaimed); 422b411b363SPhilipp Reisner list_splice_init(&mdev->done_ee, &work_list); 423b411b363SPhilipp Reisner spin_unlock_irq(&mdev->req_lock); 424b411b363SPhilipp Reisner 425b411b363SPhilipp Reisner list_for_each_entry_safe(e, t, &reclaimed, w.list) 426b411b363SPhilipp Reisner drbd_free_ee(mdev, e); 427b411b363SPhilipp Reisner 428b411b363SPhilipp Reisner /* possible callbacks here: 429b411b363SPhilipp Reisner * e_end_block, and e_end_resync_block, e_send_discard_ack. 430b411b363SPhilipp Reisner * all ignore the last argument. 431b411b363SPhilipp Reisner */ 432b411b363SPhilipp Reisner list_for_each_entry_safe(e, t, &work_list, w.list) { 433b411b363SPhilipp Reisner /* list_del not necessary, next/prev members not touched */ 434b411b363SPhilipp Reisner ok = e->w.cb(mdev, &e->w, !ok) && ok; 435b411b363SPhilipp Reisner drbd_free_ee(mdev, e); 436b411b363SPhilipp Reisner } 437b411b363SPhilipp Reisner wake_up(&mdev->ee_wait); 438b411b363SPhilipp Reisner 439b411b363SPhilipp Reisner return ok; 440b411b363SPhilipp Reisner } 441b411b363SPhilipp Reisner 442b411b363SPhilipp Reisner void _drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head) 443b411b363SPhilipp Reisner { 444b411b363SPhilipp Reisner DEFINE_WAIT(wait); 445b411b363SPhilipp Reisner 446b411b363SPhilipp Reisner /* avoids spin_lock/unlock 447b411b363SPhilipp Reisner * and calling prepare_to_wait in the fast path */ 448b411b363SPhilipp Reisner while (!list_empty(head)) { 449b411b363SPhilipp Reisner prepare_to_wait(&mdev->ee_wait, &wait, TASK_UNINTERRUPTIBLE); 450b411b363SPhilipp Reisner spin_unlock_irq(&mdev->req_lock); 451b411b363SPhilipp Reisner drbd_kick_lo(mdev); 452b411b363SPhilipp Reisner schedule(); 453b411b363SPhilipp Reisner finish_wait(&mdev->ee_wait, &wait); 454b411b363SPhilipp Reisner spin_lock_irq(&mdev->req_lock); 455b411b363SPhilipp Reisner } 456b411b363SPhilipp Reisner } 457b411b363SPhilipp Reisner 458b411b363SPhilipp Reisner void drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head) 459b411b363SPhilipp Reisner { 460b411b363SPhilipp Reisner spin_lock_irq(&mdev->req_lock); 461b411b363SPhilipp Reisner _drbd_wait_ee_list_empty(mdev, head); 462b411b363SPhilipp Reisner spin_unlock_irq(&mdev->req_lock); 463b411b363SPhilipp Reisner } 464b411b363SPhilipp Reisner 465b411b363SPhilipp Reisner /* see also kernel_accept; which is only present since 2.6.18. 466b411b363SPhilipp Reisner * also we want to log which part of it failed, exactly */ 467b411b363SPhilipp Reisner static int drbd_accept(struct drbd_conf *mdev, const char **what, 468b411b363SPhilipp Reisner struct socket *sock, struct socket **newsock) 469b411b363SPhilipp Reisner { 470b411b363SPhilipp Reisner struct sock *sk = sock->sk; 471b411b363SPhilipp Reisner int err = 0; 472b411b363SPhilipp Reisner 473b411b363SPhilipp Reisner *what = "listen"; 474b411b363SPhilipp Reisner err = sock->ops->listen(sock, 5); 475b411b363SPhilipp Reisner if (err < 0) 476b411b363SPhilipp Reisner goto out; 477b411b363SPhilipp Reisner 478b411b363SPhilipp Reisner *what = "sock_create_lite"; 479b411b363SPhilipp Reisner err = sock_create_lite(sk->sk_family, sk->sk_type, sk->sk_protocol, 480b411b363SPhilipp Reisner newsock); 481b411b363SPhilipp Reisner if (err < 0) 482b411b363SPhilipp Reisner goto out; 483b411b363SPhilipp Reisner 484b411b363SPhilipp Reisner *what = "accept"; 485b411b363SPhilipp Reisner err = sock->ops->accept(sock, *newsock, 0); 486b411b363SPhilipp Reisner if (err < 0) { 487b411b363SPhilipp Reisner sock_release(*newsock); 488b411b363SPhilipp Reisner *newsock = NULL; 489b411b363SPhilipp Reisner goto out; 490b411b363SPhilipp Reisner } 491b411b363SPhilipp Reisner (*newsock)->ops = sock->ops; 492b411b363SPhilipp Reisner 493b411b363SPhilipp Reisner out: 494b411b363SPhilipp Reisner return err; 495b411b363SPhilipp Reisner } 496b411b363SPhilipp Reisner 497b411b363SPhilipp Reisner static int drbd_recv_short(struct drbd_conf *mdev, struct socket *sock, 498b411b363SPhilipp Reisner void *buf, size_t size, int flags) 499b411b363SPhilipp Reisner { 500b411b363SPhilipp Reisner mm_segment_t oldfs; 501b411b363SPhilipp Reisner struct kvec iov = { 502b411b363SPhilipp Reisner .iov_base = buf, 503b411b363SPhilipp Reisner .iov_len = size, 504b411b363SPhilipp Reisner }; 505b411b363SPhilipp Reisner struct msghdr msg = { 506b411b363SPhilipp Reisner .msg_iovlen = 1, 507b411b363SPhilipp Reisner .msg_iov = (struct iovec *)&iov, 508b411b363SPhilipp Reisner .msg_flags = (flags ? flags : MSG_WAITALL | MSG_NOSIGNAL) 509b411b363SPhilipp Reisner }; 510b411b363SPhilipp Reisner int rv; 511b411b363SPhilipp Reisner 512b411b363SPhilipp Reisner oldfs = get_fs(); 513b411b363SPhilipp Reisner set_fs(KERNEL_DS); 514b411b363SPhilipp Reisner rv = sock_recvmsg(sock, &msg, size, msg.msg_flags); 515b411b363SPhilipp Reisner set_fs(oldfs); 516b411b363SPhilipp Reisner 517b411b363SPhilipp Reisner return rv; 518b411b363SPhilipp Reisner } 519b411b363SPhilipp Reisner 520b411b363SPhilipp Reisner static int drbd_recv(struct drbd_conf *mdev, void *buf, size_t size) 521b411b363SPhilipp Reisner { 522b411b363SPhilipp Reisner mm_segment_t oldfs; 523b411b363SPhilipp Reisner struct kvec iov = { 524b411b363SPhilipp Reisner .iov_base = buf, 525b411b363SPhilipp Reisner .iov_len = size, 526b411b363SPhilipp Reisner }; 527b411b363SPhilipp Reisner struct msghdr msg = { 528b411b363SPhilipp Reisner .msg_iovlen = 1, 529b411b363SPhilipp Reisner .msg_iov = (struct iovec *)&iov, 530b411b363SPhilipp Reisner .msg_flags = MSG_WAITALL | MSG_NOSIGNAL 531b411b363SPhilipp Reisner }; 532b411b363SPhilipp Reisner int rv; 533b411b363SPhilipp Reisner 534b411b363SPhilipp Reisner oldfs = get_fs(); 535b411b363SPhilipp Reisner set_fs(KERNEL_DS); 536b411b363SPhilipp Reisner 537b411b363SPhilipp Reisner for (;;) { 538b411b363SPhilipp Reisner rv = sock_recvmsg(mdev->data.socket, &msg, size, msg.msg_flags); 539b411b363SPhilipp Reisner if (rv == size) 540b411b363SPhilipp Reisner break; 541b411b363SPhilipp Reisner 542b411b363SPhilipp Reisner /* Note: 543b411b363SPhilipp Reisner * ECONNRESET other side closed the connection 544b411b363SPhilipp Reisner * ERESTARTSYS (on sock) we got a signal 545b411b363SPhilipp Reisner */ 546b411b363SPhilipp Reisner 547b411b363SPhilipp Reisner if (rv < 0) { 548b411b363SPhilipp Reisner if (rv == -ECONNRESET) 549b411b363SPhilipp Reisner dev_info(DEV, "sock was reset by peer\n"); 550b411b363SPhilipp Reisner else if (rv != -ERESTARTSYS) 551b411b363SPhilipp Reisner dev_err(DEV, "sock_recvmsg returned %d\n", rv); 552b411b363SPhilipp Reisner break; 553b411b363SPhilipp Reisner } else if (rv == 0) { 554b411b363SPhilipp Reisner dev_info(DEV, "sock was shut down by peer\n"); 555b411b363SPhilipp Reisner break; 556b411b363SPhilipp Reisner } else { 557b411b363SPhilipp Reisner /* signal came in, or peer/link went down, 558b411b363SPhilipp Reisner * after we read a partial message 559b411b363SPhilipp Reisner */ 560b411b363SPhilipp Reisner /* D_ASSERT(signal_pending(current)); */ 561b411b363SPhilipp Reisner break; 562b411b363SPhilipp Reisner } 563b411b363SPhilipp Reisner }; 564b411b363SPhilipp Reisner 565b411b363SPhilipp Reisner set_fs(oldfs); 566b411b363SPhilipp Reisner 567b411b363SPhilipp Reisner if (rv != size) 568b411b363SPhilipp Reisner drbd_force_state(mdev, NS(conn, C_BROKEN_PIPE)); 569b411b363SPhilipp Reisner 570b411b363SPhilipp Reisner return rv; 571b411b363SPhilipp Reisner } 572b411b363SPhilipp Reisner 5735dbf1673SLars Ellenberg /* quoting tcp(7): 5745dbf1673SLars Ellenberg * On individual connections, the socket buffer size must be set prior to the 5755dbf1673SLars Ellenberg * listen(2) or connect(2) calls in order to have it take effect. 5765dbf1673SLars Ellenberg * This is our wrapper to do so. 5775dbf1673SLars Ellenberg */ 5785dbf1673SLars Ellenberg static void drbd_setbufsize(struct socket *sock, unsigned int snd, 5795dbf1673SLars Ellenberg unsigned int rcv) 5805dbf1673SLars Ellenberg { 5815dbf1673SLars Ellenberg /* open coded SO_SNDBUF, SO_RCVBUF */ 5825dbf1673SLars Ellenberg if (snd) { 5835dbf1673SLars Ellenberg sock->sk->sk_sndbuf = snd; 5845dbf1673SLars Ellenberg sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK; 5855dbf1673SLars Ellenberg } 5865dbf1673SLars Ellenberg if (rcv) { 5875dbf1673SLars Ellenberg sock->sk->sk_rcvbuf = rcv; 5885dbf1673SLars Ellenberg sock->sk->sk_userlocks |= SOCK_RCVBUF_LOCK; 5895dbf1673SLars Ellenberg } 5905dbf1673SLars Ellenberg } 5915dbf1673SLars Ellenberg 592b411b363SPhilipp Reisner static struct socket *drbd_try_connect(struct drbd_conf *mdev) 593b411b363SPhilipp Reisner { 594b411b363SPhilipp Reisner const char *what; 595b411b363SPhilipp Reisner struct socket *sock; 596b411b363SPhilipp Reisner struct sockaddr_in6 src_in6; 597b411b363SPhilipp Reisner int err; 598b411b363SPhilipp Reisner int disconnect_on_error = 1; 599b411b363SPhilipp Reisner 600b411b363SPhilipp Reisner if (!get_net_conf(mdev)) 601b411b363SPhilipp Reisner return NULL; 602b411b363SPhilipp Reisner 603b411b363SPhilipp Reisner what = "sock_create_kern"; 604b411b363SPhilipp Reisner err = sock_create_kern(((struct sockaddr *)mdev->net_conf->my_addr)->sa_family, 605b411b363SPhilipp Reisner SOCK_STREAM, IPPROTO_TCP, &sock); 606b411b363SPhilipp Reisner if (err < 0) { 607b411b363SPhilipp Reisner sock = NULL; 608b411b363SPhilipp Reisner goto out; 609b411b363SPhilipp Reisner } 610b411b363SPhilipp Reisner 611b411b363SPhilipp Reisner sock->sk->sk_rcvtimeo = 612b411b363SPhilipp Reisner sock->sk->sk_sndtimeo = mdev->net_conf->try_connect_int*HZ; 6135dbf1673SLars Ellenberg drbd_setbufsize(sock, mdev->net_conf->sndbuf_size, 6145dbf1673SLars Ellenberg mdev->net_conf->rcvbuf_size); 615b411b363SPhilipp Reisner 616b411b363SPhilipp Reisner /* explicitly bind to the configured IP as source IP 617b411b363SPhilipp Reisner * for the outgoing connections. 618b411b363SPhilipp Reisner * This is needed for multihomed hosts and to be 619b411b363SPhilipp Reisner * able to use lo: interfaces for drbd. 620b411b363SPhilipp Reisner * Make sure to use 0 as port number, so linux selects 621b411b363SPhilipp Reisner * a free one dynamically. 622b411b363SPhilipp Reisner */ 623b411b363SPhilipp Reisner memcpy(&src_in6, mdev->net_conf->my_addr, 624b411b363SPhilipp Reisner min_t(int, mdev->net_conf->my_addr_len, sizeof(src_in6))); 625b411b363SPhilipp Reisner if (((struct sockaddr *)mdev->net_conf->my_addr)->sa_family == AF_INET6) 626b411b363SPhilipp Reisner src_in6.sin6_port = 0; 627b411b363SPhilipp Reisner else 628b411b363SPhilipp Reisner ((struct sockaddr_in *)&src_in6)->sin_port = 0; /* AF_INET & AF_SCI */ 629b411b363SPhilipp Reisner 630b411b363SPhilipp Reisner what = "bind before connect"; 631b411b363SPhilipp Reisner err = sock->ops->bind(sock, 632b411b363SPhilipp Reisner (struct sockaddr *) &src_in6, 633b411b363SPhilipp Reisner mdev->net_conf->my_addr_len); 634b411b363SPhilipp Reisner if (err < 0) 635b411b363SPhilipp Reisner goto out; 636b411b363SPhilipp Reisner 637b411b363SPhilipp Reisner /* connect may fail, peer not yet available. 638b411b363SPhilipp Reisner * stay C_WF_CONNECTION, don't go Disconnecting! */ 639b411b363SPhilipp Reisner disconnect_on_error = 0; 640b411b363SPhilipp Reisner what = "connect"; 641b411b363SPhilipp Reisner err = sock->ops->connect(sock, 642b411b363SPhilipp Reisner (struct sockaddr *)mdev->net_conf->peer_addr, 643b411b363SPhilipp Reisner mdev->net_conf->peer_addr_len, 0); 644b411b363SPhilipp Reisner 645b411b363SPhilipp Reisner out: 646b411b363SPhilipp Reisner if (err < 0) { 647b411b363SPhilipp Reisner if (sock) { 648b411b363SPhilipp Reisner sock_release(sock); 649b411b363SPhilipp Reisner sock = NULL; 650b411b363SPhilipp Reisner } 651b411b363SPhilipp Reisner switch (-err) { 652b411b363SPhilipp Reisner /* timeout, busy, signal pending */ 653b411b363SPhilipp Reisner case ETIMEDOUT: case EAGAIN: case EINPROGRESS: 654b411b363SPhilipp Reisner case EINTR: case ERESTARTSYS: 655b411b363SPhilipp Reisner /* peer not (yet) available, network problem */ 656b411b363SPhilipp Reisner case ECONNREFUSED: case ENETUNREACH: 657b411b363SPhilipp Reisner case EHOSTDOWN: case EHOSTUNREACH: 658b411b363SPhilipp Reisner disconnect_on_error = 0; 659b411b363SPhilipp Reisner break; 660b411b363SPhilipp Reisner default: 661b411b363SPhilipp Reisner dev_err(DEV, "%s failed, err = %d\n", what, err); 662b411b363SPhilipp Reisner } 663b411b363SPhilipp Reisner if (disconnect_on_error) 664b411b363SPhilipp Reisner drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); 665b411b363SPhilipp Reisner } 666b411b363SPhilipp Reisner put_net_conf(mdev); 667b411b363SPhilipp Reisner return sock; 668b411b363SPhilipp Reisner } 669b411b363SPhilipp Reisner 670b411b363SPhilipp Reisner static struct socket *drbd_wait_for_connect(struct drbd_conf *mdev) 671b411b363SPhilipp Reisner { 672b411b363SPhilipp Reisner int timeo, err; 673b411b363SPhilipp Reisner struct socket *s_estab = NULL, *s_listen; 674b411b363SPhilipp Reisner const char *what; 675b411b363SPhilipp Reisner 676b411b363SPhilipp Reisner if (!get_net_conf(mdev)) 677b411b363SPhilipp Reisner return NULL; 678b411b363SPhilipp Reisner 679b411b363SPhilipp Reisner what = "sock_create_kern"; 680b411b363SPhilipp Reisner err = sock_create_kern(((struct sockaddr *)mdev->net_conf->my_addr)->sa_family, 681b411b363SPhilipp Reisner SOCK_STREAM, IPPROTO_TCP, &s_listen); 682b411b363SPhilipp Reisner if (err) { 683b411b363SPhilipp Reisner s_listen = NULL; 684b411b363SPhilipp Reisner goto out; 685b411b363SPhilipp Reisner } 686b411b363SPhilipp Reisner 687b411b363SPhilipp Reisner timeo = mdev->net_conf->try_connect_int * HZ; 688b411b363SPhilipp Reisner timeo += (random32() & 1) ? timeo / 7 : -timeo / 7; /* 28.5% random jitter */ 689b411b363SPhilipp Reisner 690b411b363SPhilipp Reisner s_listen->sk->sk_reuse = 1; /* SO_REUSEADDR */ 691b411b363SPhilipp Reisner s_listen->sk->sk_rcvtimeo = timeo; 692b411b363SPhilipp Reisner s_listen->sk->sk_sndtimeo = timeo; 6935dbf1673SLars Ellenberg drbd_setbufsize(s_listen, mdev->net_conf->sndbuf_size, 6945dbf1673SLars Ellenberg mdev->net_conf->rcvbuf_size); 695b411b363SPhilipp Reisner 696b411b363SPhilipp Reisner what = "bind before listen"; 697b411b363SPhilipp Reisner err = s_listen->ops->bind(s_listen, 698b411b363SPhilipp Reisner (struct sockaddr *) mdev->net_conf->my_addr, 699b411b363SPhilipp Reisner mdev->net_conf->my_addr_len); 700b411b363SPhilipp Reisner if (err < 0) 701b411b363SPhilipp Reisner goto out; 702b411b363SPhilipp Reisner 703b411b363SPhilipp Reisner err = drbd_accept(mdev, &what, s_listen, &s_estab); 704b411b363SPhilipp Reisner 705b411b363SPhilipp Reisner out: 706b411b363SPhilipp Reisner if (s_listen) 707b411b363SPhilipp Reisner sock_release(s_listen); 708b411b363SPhilipp Reisner if (err < 0) { 709b411b363SPhilipp Reisner if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) { 710b411b363SPhilipp Reisner dev_err(DEV, "%s failed, err = %d\n", what, err); 711b411b363SPhilipp Reisner drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); 712b411b363SPhilipp Reisner } 713b411b363SPhilipp Reisner } 714b411b363SPhilipp Reisner put_net_conf(mdev); 715b411b363SPhilipp Reisner 716b411b363SPhilipp Reisner return s_estab; 717b411b363SPhilipp Reisner } 718b411b363SPhilipp Reisner 719b411b363SPhilipp Reisner static int drbd_send_fp(struct drbd_conf *mdev, 720b411b363SPhilipp Reisner struct socket *sock, enum drbd_packets cmd) 721b411b363SPhilipp Reisner { 722b411b363SPhilipp Reisner struct p_header *h = (struct p_header *) &mdev->data.sbuf.header; 723b411b363SPhilipp Reisner 724b411b363SPhilipp Reisner return _drbd_send_cmd(mdev, sock, cmd, h, sizeof(*h), 0); 725b411b363SPhilipp Reisner } 726b411b363SPhilipp Reisner 727b411b363SPhilipp Reisner static enum drbd_packets drbd_recv_fp(struct drbd_conf *mdev, struct socket *sock) 728b411b363SPhilipp Reisner { 729b411b363SPhilipp Reisner struct p_header *h = (struct p_header *) &mdev->data.sbuf.header; 730b411b363SPhilipp Reisner int rr; 731b411b363SPhilipp Reisner 732b411b363SPhilipp Reisner rr = drbd_recv_short(mdev, sock, h, sizeof(*h), 0); 733b411b363SPhilipp Reisner 734b411b363SPhilipp Reisner if (rr == sizeof(*h) && h->magic == BE_DRBD_MAGIC) 735b411b363SPhilipp Reisner return be16_to_cpu(h->command); 736b411b363SPhilipp Reisner 737b411b363SPhilipp Reisner return 0xffff; 738b411b363SPhilipp Reisner } 739b411b363SPhilipp Reisner 740b411b363SPhilipp Reisner /** 741b411b363SPhilipp Reisner * drbd_socket_okay() - Free the socket if its connection is not okay 742b411b363SPhilipp Reisner * @mdev: DRBD device. 743b411b363SPhilipp Reisner * @sock: pointer to the pointer to the socket. 744b411b363SPhilipp Reisner */ 745b411b363SPhilipp Reisner static int drbd_socket_okay(struct drbd_conf *mdev, struct socket **sock) 746b411b363SPhilipp Reisner { 747b411b363SPhilipp Reisner int rr; 748b411b363SPhilipp Reisner char tb[4]; 749b411b363SPhilipp Reisner 750b411b363SPhilipp Reisner if (!*sock) 751b411b363SPhilipp Reisner return FALSE; 752b411b363SPhilipp Reisner 753b411b363SPhilipp Reisner rr = drbd_recv_short(mdev, *sock, tb, 4, MSG_DONTWAIT | MSG_PEEK); 754b411b363SPhilipp Reisner 755b411b363SPhilipp Reisner if (rr > 0 || rr == -EAGAIN) { 756b411b363SPhilipp Reisner return TRUE; 757b411b363SPhilipp Reisner } else { 758b411b363SPhilipp Reisner sock_release(*sock); 759b411b363SPhilipp Reisner *sock = NULL; 760b411b363SPhilipp Reisner return FALSE; 761b411b363SPhilipp Reisner } 762b411b363SPhilipp Reisner } 763b411b363SPhilipp Reisner 764b411b363SPhilipp Reisner /* 765b411b363SPhilipp Reisner * return values: 766b411b363SPhilipp Reisner * 1 yes, we have a valid connection 767b411b363SPhilipp Reisner * 0 oops, did not work out, please try again 768b411b363SPhilipp Reisner * -1 peer talks different language, 769b411b363SPhilipp Reisner * no point in trying again, please go standalone. 770b411b363SPhilipp Reisner * -2 We do not have a network config... 771b411b363SPhilipp Reisner */ 772b411b363SPhilipp Reisner static int drbd_connect(struct drbd_conf *mdev) 773b411b363SPhilipp Reisner { 774b411b363SPhilipp Reisner struct socket *s, *sock, *msock; 775b411b363SPhilipp Reisner int try, h, ok; 776b411b363SPhilipp Reisner 777b411b363SPhilipp Reisner D_ASSERT(!mdev->data.socket); 778b411b363SPhilipp Reisner 779b411b363SPhilipp Reisner if (test_and_clear_bit(CREATE_BARRIER, &mdev->flags)) 780b411b363SPhilipp Reisner dev_err(DEV, "CREATE_BARRIER flag was set in drbd_connect - now cleared!\n"); 781b411b363SPhilipp Reisner 782b411b363SPhilipp Reisner if (drbd_request_state(mdev, NS(conn, C_WF_CONNECTION)) < SS_SUCCESS) 783b411b363SPhilipp Reisner return -2; 784b411b363SPhilipp Reisner 785b411b363SPhilipp Reisner clear_bit(DISCARD_CONCURRENT, &mdev->flags); 786b411b363SPhilipp Reisner 787b411b363SPhilipp Reisner sock = NULL; 788b411b363SPhilipp Reisner msock = NULL; 789b411b363SPhilipp Reisner 790b411b363SPhilipp Reisner do { 791b411b363SPhilipp Reisner for (try = 0;;) { 792b411b363SPhilipp Reisner /* 3 tries, this should take less than a second! */ 793b411b363SPhilipp Reisner s = drbd_try_connect(mdev); 794b411b363SPhilipp Reisner if (s || ++try >= 3) 795b411b363SPhilipp Reisner break; 796b411b363SPhilipp Reisner /* give the other side time to call bind() & listen() */ 797b411b363SPhilipp Reisner __set_current_state(TASK_INTERRUPTIBLE); 798b411b363SPhilipp Reisner schedule_timeout(HZ / 10); 799b411b363SPhilipp Reisner } 800b411b363SPhilipp Reisner 801b411b363SPhilipp Reisner if (s) { 802b411b363SPhilipp Reisner if (!sock) { 803b411b363SPhilipp Reisner drbd_send_fp(mdev, s, P_HAND_SHAKE_S); 804b411b363SPhilipp Reisner sock = s; 805b411b363SPhilipp Reisner s = NULL; 806b411b363SPhilipp Reisner } else if (!msock) { 807b411b363SPhilipp Reisner drbd_send_fp(mdev, s, P_HAND_SHAKE_M); 808b411b363SPhilipp Reisner msock = s; 809b411b363SPhilipp Reisner s = NULL; 810b411b363SPhilipp Reisner } else { 811b411b363SPhilipp Reisner dev_err(DEV, "Logic error in drbd_connect()\n"); 812b411b363SPhilipp Reisner goto out_release_sockets; 813b411b363SPhilipp Reisner } 814b411b363SPhilipp Reisner } 815b411b363SPhilipp Reisner 816b411b363SPhilipp Reisner if (sock && msock) { 817b411b363SPhilipp Reisner __set_current_state(TASK_INTERRUPTIBLE); 818b411b363SPhilipp Reisner schedule_timeout(HZ / 10); 819b411b363SPhilipp Reisner ok = drbd_socket_okay(mdev, &sock); 820b411b363SPhilipp Reisner ok = drbd_socket_okay(mdev, &msock) && ok; 821b411b363SPhilipp Reisner if (ok) 822b411b363SPhilipp Reisner break; 823b411b363SPhilipp Reisner } 824b411b363SPhilipp Reisner 825b411b363SPhilipp Reisner retry: 826b411b363SPhilipp Reisner s = drbd_wait_for_connect(mdev); 827b411b363SPhilipp Reisner if (s) { 828b411b363SPhilipp Reisner try = drbd_recv_fp(mdev, s); 829b411b363SPhilipp Reisner drbd_socket_okay(mdev, &sock); 830b411b363SPhilipp Reisner drbd_socket_okay(mdev, &msock); 831b411b363SPhilipp Reisner switch (try) { 832b411b363SPhilipp Reisner case P_HAND_SHAKE_S: 833b411b363SPhilipp Reisner if (sock) { 834b411b363SPhilipp Reisner dev_warn(DEV, "initial packet S crossed\n"); 835b411b363SPhilipp Reisner sock_release(sock); 836b411b363SPhilipp Reisner } 837b411b363SPhilipp Reisner sock = s; 838b411b363SPhilipp Reisner break; 839b411b363SPhilipp Reisner case P_HAND_SHAKE_M: 840b411b363SPhilipp Reisner if (msock) { 841b411b363SPhilipp Reisner dev_warn(DEV, "initial packet M crossed\n"); 842b411b363SPhilipp Reisner sock_release(msock); 843b411b363SPhilipp Reisner } 844b411b363SPhilipp Reisner msock = s; 845b411b363SPhilipp Reisner set_bit(DISCARD_CONCURRENT, &mdev->flags); 846b411b363SPhilipp Reisner break; 847b411b363SPhilipp Reisner default: 848b411b363SPhilipp Reisner dev_warn(DEV, "Error receiving initial packet\n"); 849b411b363SPhilipp Reisner sock_release(s); 850b411b363SPhilipp Reisner if (random32() & 1) 851b411b363SPhilipp Reisner goto retry; 852b411b363SPhilipp Reisner } 853b411b363SPhilipp Reisner } 854b411b363SPhilipp Reisner 855b411b363SPhilipp Reisner if (mdev->state.conn <= C_DISCONNECTING) 856b411b363SPhilipp Reisner goto out_release_sockets; 857b411b363SPhilipp Reisner if (signal_pending(current)) { 858b411b363SPhilipp Reisner flush_signals(current); 859b411b363SPhilipp Reisner smp_rmb(); 860b411b363SPhilipp Reisner if (get_t_state(&mdev->receiver) == Exiting) 861b411b363SPhilipp Reisner goto out_release_sockets; 862b411b363SPhilipp Reisner } 863b411b363SPhilipp Reisner 864b411b363SPhilipp Reisner if (sock && msock) { 865b411b363SPhilipp Reisner ok = drbd_socket_okay(mdev, &sock); 866b411b363SPhilipp Reisner ok = drbd_socket_okay(mdev, &msock) && ok; 867b411b363SPhilipp Reisner if (ok) 868b411b363SPhilipp Reisner break; 869b411b363SPhilipp Reisner } 870b411b363SPhilipp Reisner } while (1); 871b411b363SPhilipp Reisner 872b411b363SPhilipp Reisner msock->sk->sk_reuse = 1; /* SO_REUSEADDR */ 873b411b363SPhilipp Reisner sock->sk->sk_reuse = 1; /* SO_REUSEADDR */ 874b411b363SPhilipp Reisner 875b411b363SPhilipp Reisner sock->sk->sk_allocation = GFP_NOIO; 876b411b363SPhilipp Reisner msock->sk->sk_allocation = GFP_NOIO; 877b411b363SPhilipp Reisner 878b411b363SPhilipp Reisner sock->sk->sk_priority = TC_PRIO_INTERACTIVE_BULK; 879b411b363SPhilipp Reisner msock->sk->sk_priority = TC_PRIO_INTERACTIVE; 880b411b363SPhilipp Reisner 881b411b363SPhilipp Reisner /* NOT YET ... 882b411b363SPhilipp Reisner * sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10; 883b411b363SPhilipp Reisner * sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT; 884b411b363SPhilipp Reisner * first set it to the P_HAND_SHAKE timeout, 885b411b363SPhilipp Reisner * which we set to 4x the configured ping_timeout. */ 886b411b363SPhilipp Reisner sock->sk->sk_sndtimeo = 887b411b363SPhilipp Reisner sock->sk->sk_rcvtimeo = mdev->net_conf->ping_timeo*4*HZ/10; 888b411b363SPhilipp Reisner 889b411b363SPhilipp Reisner msock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10; 890b411b363SPhilipp Reisner msock->sk->sk_rcvtimeo = mdev->net_conf->ping_int*HZ; 891b411b363SPhilipp Reisner 892b411b363SPhilipp Reisner /* we don't want delays. 893b411b363SPhilipp Reisner * we use TCP_CORK where apropriate, though */ 894b411b363SPhilipp Reisner drbd_tcp_nodelay(sock); 895b411b363SPhilipp Reisner drbd_tcp_nodelay(msock); 896b411b363SPhilipp Reisner 897b411b363SPhilipp Reisner mdev->data.socket = sock; 898b411b363SPhilipp Reisner mdev->meta.socket = msock; 899b411b363SPhilipp Reisner mdev->last_received = jiffies; 900b411b363SPhilipp Reisner 901b411b363SPhilipp Reisner D_ASSERT(mdev->asender.task == NULL); 902b411b363SPhilipp Reisner 903b411b363SPhilipp Reisner h = drbd_do_handshake(mdev); 904b411b363SPhilipp Reisner if (h <= 0) 905b411b363SPhilipp Reisner return h; 906b411b363SPhilipp Reisner 907b411b363SPhilipp Reisner if (mdev->cram_hmac_tfm) { 908b411b363SPhilipp Reisner /* drbd_request_state(mdev, NS(conn, WFAuth)); */ 909b10d96cbSJohannes Thoma switch (drbd_do_auth(mdev)) { 910b10d96cbSJohannes Thoma case -1: 911b411b363SPhilipp Reisner dev_err(DEV, "Authentication of peer failed\n"); 912b411b363SPhilipp Reisner return -1; 913b10d96cbSJohannes Thoma case 0: 914b10d96cbSJohannes Thoma dev_err(DEV, "Authentication of peer failed, trying again.\n"); 915b10d96cbSJohannes Thoma return 0; 916b411b363SPhilipp Reisner } 917b411b363SPhilipp Reisner } 918b411b363SPhilipp Reisner 919b411b363SPhilipp Reisner if (drbd_request_state(mdev, NS(conn, C_WF_REPORT_PARAMS)) < SS_SUCCESS) 920b411b363SPhilipp Reisner return 0; 921b411b363SPhilipp Reisner 922b411b363SPhilipp Reisner sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10; 923b411b363SPhilipp Reisner sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT; 924b411b363SPhilipp Reisner 925b411b363SPhilipp Reisner atomic_set(&mdev->packet_seq, 0); 926b411b363SPhilipp Reisner mdev->peer_seq = 0; 927b411b363SPhilipp Reisner 928b411b363SPhilipp Reisner drbd_thread_start(&mdev->asender); 929b411b363SPhilipp Reisner 9307e2455c1SPhilipp Reisner if (!drbd_send_protocol(mdev)) 9317e2455c1SPhilipp Reisner return -1; 932b411b363SPhilipp Reisner drbd_send_sync_param(mdev, &mdev->sync_conf); 933e89b591cSPhilipp Reisner drbd_send_sizes(mdev, 0, 0); 934b411b363SPhilipp Reisner drbd_send_uuids(mdev); 935b411b363SPhilipp Reisner drbd_send_state(mdev); 936b411b363SPhilipp Reisner clear_bit(USE_DEGR_WFC_T, &mdev->flags); 937b411b363SPhilipp Reisner clear_bit(RESIZE_PENDING, &mdev->flags); 938b411b363SPhilipp Reisner 939b411b363SPhilipp Reisner return 1; 940b411b363SPhilipp Reisner 941b411b363SPhilipp Reisner out_release_sockets: 942b411b363SPhilipp Reisner if (sock) 943b411b363SPhilipp Reisner sock_release(sock); 944b411b363SPhilipp Reisner if (msock) 945b411b363SPhilipp Reisner sock_release(msock); 946b411b363SPhilipp Reisner return -1; 947b411b363SPhilipp Reisner } 948b411b363SPhilipp Reisner 949b411b363SPhilipp Reisner static int drbd_recv_header(struct drbd_conf *mdev, struct p_header *h) 950b411b363SPhilipp Reisner { 951b411b363SPhilipp Reisner int r; 952b411b363SPhilipp Reisner 953b411b363SPhilipp Reisner r = drbd_recv(mdev, h, sizeof(*h)); 954b411b363SPhilipp Reisner 955b411b363SPhilipp Reisner if (unlikely(r != sizeof(*h))) { 956b411b363SPhilipp Reisner dev_err(DEV, "short read expecting header on sock: r=%d\n", r); 957b411b363SPhilipp Reisner return FALSE; 958b411b363SPhilipp Reisner }; 959b411b363SPhilipp Reisner h->command = be16_to_cpu(h->command); 960b411b363SPhilipp Reisner h->length = be16_to_cpu(h->length); 961b411b363SPhilipp Reisner if (unlikely(h->magic != BE_DRBD_MAGIC)) { 962b411b363SPhilipp Reisner dev_err(DEV, "magic?? on data m: 0x%lx c: %d l: %d\n", 963b411b363SPhilipp Reisner (long)be32_to_cpu(h->magic), 964b411b363SPhilipp Reisner h->command, h->length); 965b411b363SPhilipp Reisner return FALSE; 966b411b363SPhilipp Reisner } 967b411b363SPhilipp Reisner mdev->last_received = jiffies; 968b411b363SPhilipp Reisner 969b411b363SPhilipp Reisner return TRUE; 970b411b363SPhilipp Reisner } 971b411b363SPhilipp Reisner 972b411b363SPhilipp Reisner static enum finish_epoch drbd_flush_after_epoch(struct drbd_conf *mdev, struct drbd_epoch *epoch) 973b411b363SPhilipp Reisner { 974b411b363SPhilipp Reisner int rv; 975b411b363SPhilipp Reisner 976b411b363SPhilipp Reisner if (mdev->write_ordering >= WO_bdev_flush && get_ldev(mdev)) { 977fbd9b09aSDmitry Monakhov rv = blkdev_issue_flush(mdev->ldev->backing_bdev, GFP_KERNEL, 978fbd9b09aSDmitry Monakhov NULL, BLKDEV_IFL_WAIT); 979b411b363SPhilipp Reisner if (rv) { 980b411b363SPhilipp Reisner dev_err(DEV, "local disk flush failed with status %d\n", rv); 981b411b363SPhilipp Reisner /* would rather check on EOPNOTSUPP, but that is not reliable. 982b411b363SPhilipp Reisner * don't try again for ANY return value != 0 983b411b363SPhilipp Reisner * if (rv == -EOPNOTSUPP) */ 984b411b363SPhilipp Reisner drbd_bump_write_ordering(mdev, WO_drain_io); 985b411b363SPhilipp Reisner } 986b411b363SPhilipp Reisner put_ldev(mdev); 987b411b363SPhilipp Reisner } 988b411b363SPhilipp Reisner 989b411b363SPhilipp Reisner return drbd_may_finish_epoch(mdev, epoch, EV_BARRIER_DONE); 990b411b363SPhilipp Reisner } 991b411b363SPhilipp Reisner 992b411b363SPhilipp Reisner static int w_flush(struct drbd_conf *mdev, struct drbd_work *w, int cancel) 993b411b363SPhilipp Reisner { 994b411b363SPhilipp Reisner struct flush_work *fw = (struct flush_work *)w; 995b411b363SPhilipp Reisner struct drbd_epoch *epoch = fw->epoch; 996b411b363SPhilipp Reisner 997b411b363SPhilipp Reisner kfree(w); 998b411b363SPhilipp Reisner 999b411b363SPhilipp Reisner if (!test_and_set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags)) 1000b411b363SPhilipp Reisner drbd_flush_after_epoch(mdev, epoch); 1001b411b363SPhilipp Reisner 1002b411b363SPhilipp Reisner drbd_may_finish_epoch(mdev, epoch, EV_PUT | 1003b411b363SPhilipp Reisner (mdev->state.conn < C_CONNECTED ? EV_CLEANUP : 0)); 1004b411b363SPhilipp Reisner 1005b411b363SPhilipp Reisner return 1; 1006b411b363SPhilipp Reisner } 1007b411b363SPhilipp Reisner 1008b411b363SPhilipp Reisner /** 1009b411b363SPhilipp Reisner * drbd_may_finish_epoch() - Applies an epoch_event to the epoch's state, eventually finishes it. 1010b411b363SPhilipp Reisner * @mdev: DRBD device. 1011b411b363SPhilipp Reisner * @epoch: Epoch object. 1012b411b363SPhilipp Reisner * @ev: Epoch event. 1013b411b363SPhilipp Reisner */ 1014b411b363SPhilipp Reisner static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev, 1015b411b363SPhilipp Reisner struct drbd_epoch *epoch, 1016b411b363SPhilipp Reisner enum epoch_event ev) 1017b411b363SPhilipp Reisner { 1018b411b363SPhilipp Reisner int finish, epoch_size; 1019b411b363SPhilipp Reisner struct drbd_epoch *next_epoch; 1020b411b363SPhilipp Reisner int schedule_flush = 0; 1021b411b363SPhilipp Reisner enum finish_epoch rv = FE_STILL_LIVE; 1022b411b363SPhilipp Reisner 1023b411b363SPhilipp Reisner spin_lock(&mdev->epoch_lock); 1024b411b363SPhilipp Reisner do { 1025b411b363SPhilipp Reisner next_epoch = NULL; 1026b411b363SPhilipp Reisner finish = 0; 1027b411b363SPhilipp Reisner 1028b411b363SPhilipp Reisner epoch_size = atomic_read(&epoch->epoch_size); 1029b411b363SPhilipp Reisner 1030b411b363SPhilipp Reisner switch (ev & ~EV_CLEANUP) { 1031b411b363SPhilipp Reisner case EV_PUT: 1032b411b363SPhilipp Reisner atomic_dec(&epoch->active); 1033b411b363SPhilipp Reisner break; 1034b411b363SPhilipp Reisner case EV_GOT_BARRIER_NR: 1035b411b363SPhilipp Reisner set_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags); 1036b411b363SPhilipp Reisner 1037b411b363SPhilipp Reisner /* Special case: If we just switched from WO_bio_barrier to 1038b411b363SPhilipp Reisner WO_bdev_flush we should not finish the current epoch */ 1039b411b363SPhilipp Reisner if (test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags) && epoch_size == 1 && 1040b411b363SPhilipp Reisner mdev->write_ordering != WO_bio_barrier && 1041b411b363SPhilipp Reisner epoch == mdev->current_epoch) 1042b411b363SPhilipp Reisner clear_bit(DE_CONTAINS_A_BARRIER, &epoch->flags); 1043b411b363SPhilipp Reisner break; 1044b411b363SPhilipp Reisner case EV_BARRIER_DONE: 1045b411b363SPhilipp Reisner set_bit(DE_BARRIER_IN_NEXT_EPOCH_DONE, &epoch->flags); 1046b411b363SPhilipp Reisner break; 1047b411b363SPhilipp Reisner case EV_BECAME_LAST: 1048b411b363SPhilipp Reisner /* nothing to do*/ 1049b411b363SPhilipp Reisner break; 1050b411b363SPhilipp Reisner } 1051b411b363SPhilipp Reisner 1052b411b363SPhilipp Reisner if (epoch_size != 0 && 1053b411b363SPhilipp Reisner atomic_read(&epoch->active) == 0 && 1054b411b363SPhilipp Reisner test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags) && 1055b411b363SPhilipp Reisner epoch->list.prev == &mdev->current_epoch->list && 1056b411b363SPhilipp Reisner !test_bit(DE_IS_FINISHING, &epoch->flags)) { 1057b411b363SPhilipp Reisner /* Nearly all conditions are met to finish that epoch... */ 1058b411b363SPhilipp Reisner if (test_bit(DE_BARRIER_IN_NEXT_EPOCH_DONE, &epoch->flags) || 1059b411b363SPhilipp Reisner mdev->write_ordering == WO_none || 1060b411b363SPhilipp Reisner (epoch_size == 1 && test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags)) || 1061b411b363SPhilipp Reisner ev & EV_CLEANUP) { 1062b411b363SPhilipp Reisner finish = 1; 1063b411b363SPhilipp Reisner set_bit(DE_IS_FINISHING, &epoch->flags); 1064b411b363SPhilipp Reisner } else if (!test_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags) && 1065b411b363SPhilipp Reisner mdev->write_ordering == WO_bio_barrier) { 1066b411b363SPhilipp Reisner atomic_inc(&epoch->active); 1067b411b363SPhilipp Reisner schedule_flush = 1; 1068b411b363SPhilipp Reisner } 1069b411b363SPhilipp Reisner } 1070b411b363SPhilipp Reisner if (finish) { 1071b411b363SPhilipp Reisner if (!(ev & EV_CLEANUP)) { 1072b411b363SPhilipp Reisner spin_unlock(&mdev->epoch_lock); 1073b411b363SPhilipp Reisner drbd_send_b_ack(mdev, epoch->barrier_nr, epoch_size); 1074b411b363SPhilipp Reisner spin_lock(&mdev->epoch_lock); 1075b411b363SPhilipp Reisner } 1076b411b363SPhilipp Reisner dec_unacked(mdev); 1077b411b363SPhilipp Reisner 1078b411b363SPhilipp Reisner if (mdev->current_epoch != epoch) { 1079b411b363SPhilipp Reisner next_epoch = list_entry(epoch->list.next, struct drbd_epoch, list); 1080b411b363SPhilipp Reisner list_del(&epoch->list); 1081b411b363SPhilipp Reisner ev = EV_BECAME_LAST | (ev & EV_CLEANUP); 1082b411b363SPhilipp Reisner mdev->epochs--; 1083b411b363SPhilipp Reisner kfree(epoch); 1084b411b363SPhilipp Reisner 1085b411b363SPhilipp Reisner if (rv == FE_STILL_LIVE) 1086b411b363SPhilipp Reisner rv = FE_DESTROYED; 1087b411b363SPhilipp Reisner } else { 1088b411b363SPhilipp Reisner epoch->flags = 0; 1089b411b363SPhilipp Reisner atomic_set(&epoch->epoch_size, 0); 1090b411b363SPhilipp Reisner /* atomic_set(&epoch->active, 0); is alrady zero */ 1091b411b363SPhilipp Reisner if (rv == FE_STILL_LIVE) 1092b411b363SPhilipp Reisner rv = FE_RECYCLED; 1093b411b363SPhilipp Reisner } 1094b411b363SPhilipp Reisner } 1095b411b363SPhilipp Reisner 1096b411b363SPhilipp Reisner if (!next_epoch) 1097b411b363SPhilipp Reisner break; 1098b411b363SPhilipp Reisner 1099b411b363SPhilipp Reisner epoch = next_epoch; 1100b411b363SPhilipp Reisner } while (1); 1101b411b363SPhilipp Reisner 1102b411b363SPhilipp Reisner spin_unlock(&mdev->epoch_lock); 1103b411b363SPhilipp Reisner 1104b411b363SPhilipp Reisner if (schedule_flush) { 1105b411b363SPhilipp Reisner struct flush_work *fw; 1106b411b363SPhilipp Reisner fw = kmalloc(sizeof(*fw), GFP_ATOMIC); 1107b411b363SPhilipp Reisner if (fw) { 1108b411b363SPhilipp Reisner fw->w.cb = w_flush; 1109b411b363SPhilipp Reisner fw->epoch = epoch; 1110b411b363SPhilipp Reisner drbd_queue_work(&mdev->data.work, &fw->w); 1111b411b363SPhilipp Reisner } else { 1112b411b363SPhilipp Reisner dev_warn(DEV, "Could not kmalloc a flush_work obj\n"); 1113b411b363SPhilipp Reisner set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags); 1114b411b363SPhilipp Reisner /* That is not a recursion, only one level */ 1115b411b363SPhilipp Reisner drbd_may_finish_epoch(mdev, epoch, EV_BARRIER_DONE); 1116b411b363SPhilipp Reisner drbd_may_finish_epoch(mdev, epoch, EV_PUT); 1117b411b363SPhilipp Reisner } 1118b411b363SPhilipp Reisner } 1119b411b363SPhilipp Reisner 1120b411b363SPhilipp Reisner return rv; 1121b411b363SPhilipp Reisner } 1122b411b363SPhilipp Reisner 1123b411b363SPhilipp Reisner /** 1124b411b363SPhilipp Reisner * drbd_bump_write_ordering() - Fall back to an other write ordering method 1125b411b363SPhilipp Reisner * @mdev: DRBD device. 1126b411b363SPhilipp Reisner * @wo: Write ordering method to try. 1127b411b363SPhilipp Reisner */ 1128b411b363SPhilipp Reisner void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo) __must_hold(local) 1129b411b363SPhilipp Reisner { 1130b411b363SPhilipp Reisner enum write_ordering_e pwo; 1131b411b363SPhilipp Reisner static char *write_ordering_str[] = { 1132b411b363SPhilipp Reisner [WO_none] = "none", 1133b411b363SPhilipp Reisner [WO_drain_io] = "drain", 1134b411b363SPhilipp Reisner [WO_bdev_flush] = "flush", 1135b411b363SPhilipp Reisner [WO_bio_barrier] = "barrier", 1136b411b363SPhilipp Reisner }; 1137b411b363SPhilipp Reisner 1138b411b363SPhilipp Reisner pwo = mdev->write_ordering; 1139b411b363SPhilipp Reisner wo = min(pwo, wo); 1140b411b363SPhilipp Reisner if (wo == WO_bio_barrier && mdev->ldev->dc.no_disk_barrier) 1141b411b363SPhilipp Reisner wo = WO_bdev_flush; 1142b411b363SPhilipp Reisner if (wo == WO_bdev_flush && mdev->ldev->dc.no_disk_flush) 1143b411b363SPhilipp Reisner wo = WO_drain_io; 1144b411b363SPhilipp Reisner if (wo == WO_drain_io && mdev->ldev->dc.no_disk_drain) 1145b411b363SPhilipp Reisner wo = WO_none; 1146b411b363SPhilipp Reisner mdev->write_ordering = wo; 1147b411b363SPhilipp Reisner if (pwo != mdev->write_ordering || wo == WO_bio_barrier) 1148b411b363SPhilipp Reisner dev_info(DEV, "Method to ensure write ordering: %s\n", write_ordering_str[mdev->write_ordering]); 1149b411b363SPhilipp Reisner } 1150b411b363SPhilipp Reisner 1151b411b363SPhilipp Reisner /** 115245bb912bSLars Ellenberg * drbd_submit_ee() 115345bb912bSLars Ellenberg * @mdev: DRBD device. 115445bb912bSLars Ellenberg * @e: epoch entry 115545bb912bSLars Ellenberg * @rw: flag field, see bio->bi_rw 115645bb912bSLars Ellenberg */ 115745bb912bSLars Ellenberg /* TODO allocate from our own bio_set. */ 115845bb912bSLars Ellenberg int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e, 115945bb912bSLars Ellenberg const unsigned rw, const int fault_type) 116045bb912bSLars Ellenberg { 116145bb912bSLars Ellenberg struct bio *bios = NULL; 116245bb912bSLars Ellenberg struct bio *bio; 116345bb912bSLars Ellenberg struct page *page = e->pages; 116445bb912bSLars Ellenberg sector_t sector = e->sector; 116545bb912bSLars Ellenberg unsigned ds = e->size; 116645bb912bSLars Ellenberg unsigned n_bios = 0; 116745bb912bSLars Ellenberg unsigned nr_pages = (ds + PAGE_SIZE -1) >> PAGE_SHIFT; 116845bb912bSLars Ellenberg 116945bb912bSLars Ellenberg /* In most cases, we will only need one bio. But in case the lower 117045bb912bSLars Ellenberg * level restrictions happen to be different at this offset on this 117145bb912bSLars Ellenberg * side than those of the sending peer, we may need to submit the 117245bb912bSLars Ellenberg * request in more than one bio. */ 117345bb912bSLars Ellenberg next_bio: 117445bb912bSLars Ellenberg bio = bio_alloc(GFP_NOIO, nr_pages); 117545bb912bSLars Ellenberg if (!bio) { 117645bb912bSLars Ellenberg dev_err(DEV, "submit_ee: Allocation of a bio failed\n"); 117745bb912bSLars Ellenberg goto fail; 117845bb912bSLars Ellenberg } 117945bb912bSLars Ellenberg /* > e->sector, unless this is the first bio */ 118045bb912bSLars Ellenberg bio->bi_sector = sector; 118145bb912bSLars Ellenberg bio->bi_bdev = mdev->ldev->backing_bdev; 118245bb912bSLars Ellenberg /* we special case some flags in the multi-bio case, see below 11837b6d91daSChristoph Hellwig * (REQ_UNPLUG, REQ_HARDBARRIER) */ 118445bb912bSLars Ellenberg bio->bi_rw = rw; 118545bb912bSLars Ellenberg bio->bi_private = e; 118645bb912bSLars Ellenberg bio->bi_end_io = drbd_endio_sec; 118745bb912bSLars Ellenberg 118845bb912bSLars Ellenberg bio->bi_next = bios; 118945bb912bSLars Ellenberg bios = bio; 119045bb912bSLars Ellenberg ++n_bios; 119145bb912bSLars Ellenberg 119245bb912bSLars Ellenberg page_chain_for_each(page) { 119345bb912bSLars Ellenberg unsigned len = min_t(unsigned, ds, PAGE_SIZE); 119445bb912bSLars Ellenberg if (!bio_add_page(bio, page, len, 0)) { 119545bb912bSLars Ellenberg /* a single page must always be possible! */ 119645bb912bSLars Ellenberg BUG_ON(bio->bi_vcnt == 0); 119745bb912bSLars Ellenberg goto next_bio; 119845bb912bSLars Ellenberg } 119945bb912bSLars Ellenberg ds -= len; 120045bb912bSLars Ellenberg sector += len >> 9; 120145bb912bSLars Ellenberg --nr_pages; 120245bb912bSLars Ellenberg } 120345bb912bSLars Ellenberg D_ASSERT(page == NULL); 120445bb912bSLars Ellenberg D_ASSERT(ds == 0); 120545bb912bSLars Ellenberg 120645bb912bSLars Ellenberg atomic_set(&e->pending_bios, n_bios); 120745bb912bSLars Ellenberg do { 120845bb912bSLars Ellenberg bio = bios; 120945bb912bSLars Ellenberg bios = bios->bi_next; 121045bb912bSLars Ellenberg bio->bi_next = NULL; 121145bb912bSLars Ellenberg 12127b6d91daSChristoph Hellwig /* strip off REQ_UNPLUG unless it is the last bio */ 121345bb912bSLars Ellenberg if (bios) 12147b6d91daSChristoph Hellwig bio->bi_rw &= ~REQ_UNPLUG; 121545bb912bSLars Ellenberg 121645bb912bSLars Ellenberg drbd_generic_make_request(mdev, fault_type, bio); 121745bb912bSLars Ellenberg 12187b6d91daSChristoph Hellwig /* strip off REQ_HARDBARRIER, 121945bb912bSLars Ellenberg * unless it is the first or last bio */ 122045bb912bSLars Ellenberg if (bios && bios->bi_next) 12217b6d91daSChristoph Hellwig bios->bi_rw &= ~REQ_HARDBARRIER; 122245bb912bSLars Ellenberg } while (bios); 122345bb912bSLars Ellenberg maybe_kick_lo(mdev); 122445bb912bSLars Ellenberg return 0; 122545bb912bSLars Ellenberg 122645bb912bSLars Ellenberg fail: 122745bb912bSLars Ellenberg while (bios) { 122845bb912bSLars Ellenberg bio = bios; 122945bb912bSLars Ellenberg bios = bios->bi_next; 123045bb912bSLars Ellenberg bio_put(bio); 123145bb912bSLars Ellenberg } 123245bb912bSLars Ellenberg return -ENOMEM; 123345bb912bSLars Ellenberg } 123445bb912bSLars Ellenberg 123545bb912bSLars Ellenberg /** 12367b6d91daSChristoph Hellwig * w_e_reissue() - Worker callback; Resubmit a bio, without REQ_HARDBARRIER set 1237b411b363SPhilipp Reisner * @mdev: DRBD device. 1238b411b363SPhilipp Reisner * @w: work object. 1239b411b363SPhilipp Reisner * @cancel: The connection will be closed anyways (unused in this callback) 1240b411b363SPhilipp Reisner */ 1241b411b363SPhilipp Reisner int w_e_reissue(struct drbd_conf *mdev, struct drbd_work *w, int cancel) __releases(local) 1242b411b363SPhilipp Reisner { 1243b411b363SPhilipp Reisner struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w; 1244b411b363SPhilipp Reisner /* We leave DE_CONTAINS_A_BARRIER and EE_IS_BARRIER in place, 1245b411b363SPhilipp Reisner (and DE_BARRIER_IN_NEXT_EPOCH_ISSUED in the previous Epoch) 1246b411b363SPhilipp Reisner so that we can finish that epoch in drbd_may_finish_epoch(). 1247b411b363SPhilipp Reisner That is necessary if we already have a long chain of Epochs, before 12487b6d91daSChristoph Hellwig we realize that REQ_HARDBARRIER is actually not supported */ 1249b411b363SPhilipp Reisner 1250b411b363SPhilipp Reisner /* As long as the -ENOTSUPP on the barrier is reported immediately 1251b411b363SPhilipp Reisner that will never trigger. If it is reported late, we will just 1252b411b363SPhilipp Reisner print that warning and continue correctly for all future requests 1253b411b363SPhilipp Reisner with WO_bdev_flush */ 1254b411b363SPhilipp Reisner if (previous_epoch(mdev, e->epoch)) 1255b411b363SPhilipp Reisner dev_warn(DEV, "Write ordering was not enforced (one time event)\n"); 1256b411b363SPhilipp Reisner 1257b411b363SPhilipp Reisner /* we still have a local reference, 1258b411b363SPhilipp Reisner * get_ldev was done in receive_Data. */ 1259b411b363SPhilipp Reisner 1260b411b363SPhilipp Reisner e->w.cb = e_end_block; 126145bb912bSLars Ellenberg if (drbd_submit_ee(mdev, e, WRITE, DRBD_FAULT_DT_WR) != 0) { 126245bb912bSLars Ellenberg /* drbd_submit_ee fails for one reason only: 126345bb912bSLars Ellenberg * if was not able to allocate sufficient bios. 126445bb912bSLars Ellenberg * requeue, try again later. */ 126545bb912bSLars Ellenberg e->w.cb = w_e_reissue; 126645bb912bSLars Ellenberg drbd_queue_work(&mdev->data.work, &e->w); 126745bb912bSLars Ellenberg } 1268b411b363SPhilipp Reisner return 1; 1269b411b363SPhilipp Reisner } 1270b411b363SPhilipp Reisner 1271b411b363SPhilipp Reisner static int receive_Barrier(struct drbd_conf *mdev, struct p_header *h) 1272b411b363SPhilipp Reisner { 1273b411b363SPhilipp Reisner int rv, issue_flush; 1274b411b363SPhilipp Reisner struct p_barrier *p = (struct p_barrier *)h; 1275b411b363SPhilipp Reisner struct drbd_epoch *epoch; 1276b411b363SPhilipp Reisner 1277b411b363SPhilipp Reisner ERR_IF(h->length != (sizeof(*p)-sizeof(*h))) return FALSE; 1278b411b363SPhilipp Reisner 1279b411b363SPhilipp Reisner rv = drbd_recv(mdev, h->payload, h->length); 1280b411b363SPhilipp Reisner ERR_IF(rv != h->length) return FALSE; 1281b411b363SPhilipp Reisner 1282b411b363SPhilipp Reisner inc_unacked(mdev); 1283b411b363SPhilipp Reisner 1284b411b363SPhilipp Reisner if (mdev->net_conf->wire_protocol != DRBD_PROT_C) 1285b411b363SPhilipp Reisner drbd_kick_lo(mdev); 1286b411b363SPhilipp Reisner 1287b411b363SPhilipp Reisner mdev->current_epoch->barrier_nr = p->barrier; 1288b411b363SPhilipp Reisner rv = drbd_may_finish_epoch(mdev, mdev->current_epoch, EV_GOT_BARRIER_NR); 1289b411b363SPhilipp Reisner 1290b411b363SPhilipp Reisner /* P_BARRIER_ACK may imply that the corresponding extent is dropped from 1291b411b363SPhilipp Reisner * the activity log, which means it would not be resynced in case the 1292b411b363SPhilipp Reisner * R_PRIMARY crashes now. 1293b411b363SPhilipp Reisner * Therefore we must send the barrier_ack after the barrier request was 1294b411b363SPhilipp Reisner * completed. */ 1295b411b363SPhilipp Reisner switch (mdev->write_ordering) { 1296b411b363SPhilipp Reisner case WO_bio_barrier: 1297b411b363SPhilipp Reisner case WO_none: 1298b411b363SPhilipp Reisner if (rv == FE_RECYCLED) 1299b411b363SPhilipp Reisner return TRUE; 1300b411b363SPhilipp Reisner break; 1301b411b363SPhilipp Reisner 1302b411b363SPhilipp Reisner case WO_bdev_flush: 1303b411b363SPhilipp Reisner case WO_drain_io: 1304367a8d73SPhilipp Reisner if (rv == FE_STILL_LIVE) { 1305b411b363SPhilipp Reisner set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &mdev->current_epoch->flags); 1306b411b363SPhilipp Reisner drbd_wait_ee_list_empty(mdev, &mdev->active_ee); 1307b411b363SPhilipp Reisner rv = drbd_flush_after_epoch(mdev, mdev->current_epoch); 1308367a8d73SPhilipp Reisner } 1309b411b363SPhilipp Reisner if (rv == FE_RECYCLED) 1310b411b363SPhilipp Reisner return TRUE; 1311b411b363SPhilipp Reisner 1312b411b363SPhilipp Reisner /* The asender will send all the ACKs and barrier ACKs out, since 1313b411b363SPhilipp Reisner all EEs moved from the active_ee to the done_ee. We need to 1314b411b363SPhilipp Reisner provide a new epoch object for the EEs that come in soon */ 1315b411b363SPhilipp Reisner break; 1316b411b363SPhilipp Reisner } 1317b411b363SPhilipp Reisner 1318b411b363SPhilipp Reisner /* receiver context, in the writeout path of the other node. 1319b411b363SPhilipp Reisner * avoid potential distributed deadlock */ 1320b411b363SPhilipp Reisner epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO); 1321b411b363SPhilipp Reisner if (!epoch) { 1322b411b363SPhilipp Reisner dev_warn(DEV, "Allocation of an epoch failed, slowing down\n"); 1323d3db7b48SDan Carpenter issue_flush = !test_and_set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &mdev->current_epoch->flags); 1324b411b363SPhilipp Reisner drbd_wait_ee_list_empty(mdev, &mdev->active_ee); 1325b411b363SPhilipp Reisner if (issue_flush) { 1326b411b363SPhilipp Reisner rv = drbd_flush_after_epoch(mdev, mdev->current_epoch); 1327b411b363SPhilipp Reisner if (rv == FE_RECYCLED) 1328b411b363SPhilipp Reisner return TRUE; 1329b411b363SPhilipp Reisner } 1330b411b363SPhilipp Reisner 1331b411b363SPhilipp Reisner drbd_wait_ee_list_empty(mdev, &mdev->done_ee); 1332b411b363SPhilipp Reisner 1333b411b363SPhilipp Reisner return TRUE; 1334b411b363SPhilipp Reisner } 1335b411b363SPhilipp Reisner 1336b411b363SPhilipp Reisner epoch->flags = 0; 1337b411b363SPhilipp Reisner atomic_set(&epoch->epoch_size, 0); 1338b411b363SPhilipp Reisner atomic_set(&epoch->active, 0); 1339b411b363SPhilipp Reisner 1340b411b363SPhilipp Reisner spin_lock(&mdev->epoch_lock); 1341b411b363SPhilipp Reisner if (atomic_read(&mdev->current_epoch->epoch_size)) { 1342b411b363SPhilipp Reisner list_add(&epoch->list, &mdev->current_epoch->list); 1343b411b363SPhilipp Reisner mdev->current_epoch = epoch; 1344b411b363SPhilipp Reisner mdev->epochs++; 1345b411b363SPhilipp Reisner } else { 1346b411b363SPhilipp Reisner /* The current_epoch got recycled while we allocated this one... */ 1347b411b363SPhilipp Reisner kfree(epoch); 1348b411b363SPhilipp Reisner } 1349b411b363SPhilipp Reisner spin_unlock(&mdev->epoch_lock); 1350b411b363SPhilipp Reisner 1351b411b363SPhilipp Reisner return TRUE; 1352b411b363SPhilipp Reisner } 1353b411b363SPhilipp Reisner 1354b411b363SPhilipp Reisner /* used from receive_RSDataReply (recv_resync_read) 1355b411b363SPhilipp Reisner * and from receive_Data */ 1356b411b363SPhilipp Reisner static struct drbd_epoch_entry * 1357b411b363SPhilipp Reisner read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __must_hold(local) 1358b411b363SPhilipp Reisner { 13596666032aSLars Ellenberg const sector_t capacity = drbd_get_capacity(mdev->this_bdev); 1360b411b363SPhilipp Reisner struct drbd_epoch_entry *e; 1361b411b363SPhilipp Reisner struct page *page; 136245bb912bSLars Ellenberg int dgs, ds, rr; 1363b411b363SPhilipp Reisner void *dig_in = mdev->int_dig_in; 1364b411b363SPhilipp Reisner void *dig_vv = mdev->int_dig_vv; 13656b4388acSPhilipp Reisner unsigned long *data; 1366b411b363SPhilipp Reisner 1367b411b363SPhilipp Reisner dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ? 1368b411b363SPhilipp Reisner crypto_hash_digestsize(mdev->integrity_r_tfm) : 0; 1369b411b363SPhilipp Reisner 1370b411b363SPhilipp Reisner if (dgs) { 1371b411b363SPhilipp Reisner rr = drbd_recv(mdev, dig_in, dgs); 1372b411b363SPhilipp Reisner if (rr != dgs) { 1373b411b363SPhilipp Reisner dev_warn(DEV, "short read receiving data digest: read %d expected %d\n", 1374b411b363SPhilipp Reisner rr, dgs); 1375b411b363SPhilipp Reisner return NULL; 1376b411b363SPhilipp Reisner } 1377b411b363SPhilipp Reisner } 1378b411b363SPhilipp Reisner 1379b411b363SPhilipp Reisner data_size -= dgs; 1380b411b363SPhilipp Reisner 1381b411b363SPhilipp Reisner ERR_IF(data_size & 0x1ff) return NULL; 1382b411b363SPhilipp Reisner ERR_IF(data_size > DRBD_MAX_SEGMENT_SIZE) return NULL; 1383b411b363SPhilipp Reisner 13846666032aSLars Ellenberg /* even though we trust out peer, 13856666032aSLars Ellenberg * we sometimes have to double check. */ 13866666032aSLars Ellenberg if (sector + (data_size>>9) > capacity) { 13876666032aSLars Ellenberg dev_err(DEV, "capacity: %llus < sector: %llus + size: %u\n", 13886666032aSLars Ellenberg (unsigned long long)capacity, 13896666032aSLars Ellenberg (unsigned long long)sector, data_size); 13906666032aSLars Ellenberg return NULL; 13916666032aSLars Ellenberg } 13926666032aSLars Ellenberg 1393b411b363SPhilipp Reisner /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD 1394b411b363SPhilipp Reisner * "criss-cross" setup, that might cause write-out on some other DRBD, 1395b411b363SPhilipp Reisner * which in turn might block on the other node at this very place. */ 1396b411b363SPhilipp Reisner e = drbd_alloc_ee(mdev, id, sector, data_size, GFP_NOIO); 1397b411b363SPhilipp Reisner if (!e) 1398b411b363SPhilipp Reisner return NULL; 139945bb912bSLars Ellenberg 1400b411b363SPhilipp Reisner ds = data_size; 140145bb912bSLars Ellenberg page = e->pages; 140245bb912bSLars Ellenberg page_chain_for_each(page) { 140345bb912bSLars Ellenberg unsigned len = min_t(int, ds, PAGE_SIZE); 14046b4388acSPhilipp Reisner data = kmap(page); 140545bb912bSLars Ellenberg rr = drbd_recv(mdev, data, len); 14066b4388acSPhilipp Reisner if (FAULT_ACTIVE(mdev, DRBD_FAULT_RECEIVE)) { 14076b4388acSPhilipp Reisner dev_err(DEV, "Fault injection: Corrupting data on receive\n"); 14086b4388acSPhilipp Reisner data[0] = data[0] ^ (unsigned long)-1; 14096b4388acSPhilipp Reisner } 1410b411b363SPhilipp Reisner kunmap(page); 141145bb912bSLars Ellenberg if (rr != len) { 1412b411b363SPhilipp Reisner drbd_free_ee(mdev, e); 1413b411b363SPhilipp Reisner dev_warn(DEV, "short read receiving data: read %d expected %d\n", 141445bb912bSLars Ellenberg rr, len); 1415b411b363SPhilipp Reisner return NULL; 1416b411b363SPhilipp Reisner } 1417b411b363SPhilipp Reisner ds -= rr; 1418b411b363SPhilipp Reisner } 1419b411b363SPhilipp Reisner 1420b411b363SPhilipp Reisner if (dgs) { 142145bb912bSLars Ellenberg drbd_csum_ee(mdev, mdev->integrity_r_tfm, e, dig_vv); 1422b411b363SPhilipp Reisner if (memcmp(dig_in, dig_vv, dgs)) { 1423b411b363SPhilipp Reisner dev_err(DEV, "Digest integrity check FAILED.\n"); 1424b411b363SPhilipp Reisner drbd_bcast_ee(mdev, "digest failed", 1425b411b363SPhilipp Reisner dgs, dig_in, dig_vv, e); 1426b411b363SPhilipp Reisner drbd_free_ee(mdev, e); 1427b411b363SPhilipp Reisner return NULL; 1428b411b363SPhilipp Reisner } 1429b411b363SPhilipp Reisner } 1430b411b363SPhilipp Reisner mdev->recv_cnt += data_size>>9; 1431b411b363SPhilipp Reisner return e; 1432b411b363SPhilipp Reisner } 1433b411b363SPhilipp Reisner 1434b411b363SPhilipp Reisner /* drbd_drain_block() just takes a data block 1435b411b363SPhilipp Reisner * out of the socket input buffer, and discards it. 1436b411b363SPhilipp Reisner */ 1437b411b363SPhilipp Reisner static int drbd_drain_block(struct drbd_conf *mdev, int data_size) 1438b411b363SPhilipp Reisner { 1439b411b363SPhilipp Reisner struct page *page; 1440b411b363SPhilipp Reisner int rr, rv = 1; 1441b411b363SPhilipp Reisner void *data; 1442b411b363SPhilipp Reisner 1443c3470cdeSLars Ellenberg if (!data_size) 1444c3470cdeSLars Ellenberg return TRUE; 1445c3470cdeSLars Ellenberg 144645bb912bSLars Ellenberg page = drbd_pp_alloc(mdev, 1, 1); 1447b411b363SPhilipp Reisner 1448b411b363SPhilipp Reisner data = kmap(page); 1449b411b363SPhilipp Reisner while (data_size) { 1450b411b363SPhilipp Reisner rr = drbd_recv(mdev, data, min_t(int, data_size, PAGE_SIZE)); 1451b411b363SPhilipp Reisner if (rr != min_t(int, data_size, PAGE_SIZE)) { 1452b411b363SPhilipp Reisner rv = 0; 1453b411b363SPhilipp Reisner dev_warn(DEV, "short read receiving data: read %d expected %d\n", 1454b411b363SPhilipp Reisner rr, min_t(int, data_size, PAGE_SIZE)); 1455b411b363SPhilipp Reisner break; 1456b411b363SPhilipp Reisner } 1457b411b363SPhilipp Reisner data_size -= rr; 1458b411b363SPhilipp Reisner } 1459b411b363SPhilipp Reisner kunmap(page); 1460b411b363SPhilipp Reisner drbd_pp_free(mdev, page); 1461b411b363SPhilipp Reisner return rv; 1462b411b363SPhilipp Reisner } 1463b411b363SPhilipp Reisner 1464b411b363SPhilipp Reisner static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req, 1465b411b363SPhilipp Reisner sector_t sector, int data_size) 1466b411b363SPhilipp Reisner { 1467b411b363SPhilipp Reisner struct bio_vec *bvec; 1468b411b363SPhilipp Reisner struct bio *bio; 1469b411b363SPhilipp Reisner int dgs, rr, i, expect; 1470b411b363SPhilipp Reisner void *dig_in = mdev->int_dig_in; 1471b411b363SPhilipp Reisner void *dig_vv = mdev->int_dig_vv; 1472b411b363SPhilipp Reisner 1473b411b363SPhilipp Reisner dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ? 1474b411b363SPhilipp Reisner crypto_hash_digestsize(mdev->integrity_r_tfm) : 0; 1475b411b363SPhilipp Reisner 1476b411b363SPhilipp Reisner if (dgs) { 1477b411b363SPhilipp Reisner rr = drbd_recv(mdev, dig_in, dgs); 1478b411b363SPhilipp Reisner if (rr != dgs) { 1479b411b363SPhilipp Reisner dev_warn(DEV, "short read receiving data reply digest: read %d expected %d\n", 1480b411b363SPhilipp Reisner rr, dgs); 1481b411b363SPhilipp Reisner return 0; 1482b411b363SPhilipp Reisner } 1483b411b363SPhilipp Reisner } 1484b411b363SPhilipp Reisner 1485b411b363SPhilipp Reisner data_size -= dgs; 1486b411b363SPhilipp Reisner 1487b411b363SPhilipp Reisner /* optimistically update recv_cnt. if receiving fails below, 1488b411b363SPhilipp Reisner * we disconnect anyways, and counters will be reset. */ 1489b411b363SPhilipp Reisner mdev->recv_cnt += data_size>>9; 1490b411b363SPhilipp Reisner 1491b411b363SPhilipp Reisner bio = req->master_bio; 1492b411b363SPhilipp Reisner D_ASSERT(sector == bio->bi_sector); 1493b411b363SPhilipp Reisner 1494b411b363SPhilipp Reisner bio_for_each_segment(bvec, bio, i) { 1495b411b363SPhilipp Reisner expect = min_t(int, data_size, bvec->bv_len); 1496b411b363SPhilipp Reisner rr = drbd_recv(mdev, 1497b411b363SPhilipp Reisner kmap(bvec->bv_page)+bvec->bv_offset, 1498b411b363SPhilipp Reisner expect); 1499b411b363SPhilipp Reisner kunmap(bvec->bv_page); 1500b411b363SPhilipp Reisner if (rr != expect) { 1501b411b363SPhilipp Reisner dev_warn(DEV, "short read receiving data reply: " 1502b411b363SPhilipp Reisner "read %d expected %d\n", 1503b411b363SPhilipp Reisner rr, expect); 1504b411b363SPhilipp Reisner return 0; 1505b411b363SPhilipp Reisner } 1506b411b363SPhilipp Reisner data_size -= rr; 1507b411b363SPhilipp Reisner } 1508b411b363SPhilipp Reisner 1509b411b363SPhilipp Reisner if (dgs) { 151045bb912bSLars Ellenberg drbd_csum_bio(mdev, mdev->integrity_r_tfm, bio, dig_vv); 1511b411b363SPhilipp Reisner if (memcmp(dig_in, dig_vv, dgs)) { 1512b411b363SPhilipp Reisner dev_err(DEV, "Digest integrity check FAILED. Broken NICs?\n"); 1513b411b363SPhilipp Reisner return 0; 1514b411b363SPhilipp Reisner } 1515b411b363SPhilipp Reisner } 1516b411b363SPhilipp Reisner 1517b411b363SPhilipp Reisner D_ASSERT(data_size == 0); 1518b411b363SPhilipp Reisner return 1; 1519b411b363SPhilipp Reisner } 1520b411b363SPhilipp Reisner 1521b411b363SPhilipp Reisner /* e_end_resync_block() is called via 1522b411b363SPhilipp Reisner * drbd_process_done_ee() by asender only */ 1523b411b363SPhilipp Reisner static int e_end_resync_block(struct drbd_conf *mdev, struct drbd_work *w, int unused) 1524b411b363SPhilipp Reisner { 1525b411b363SPhilipp Reisner struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w; 1526b411b363SPhilipp Reisner sector_t sector = e->sector; 1527b411b363SPhilipp Reisner int ok; 1528b411b363SPhilipp Reisner 1529b411b363SPhilipp Reisner D_ASSERT(hlist_unhashed(&e->colision)); 1530b411b363SPhilipp Reisner 153145bb912bSLars Ellenberg if (likely((e->flags & EE_WAS_ERROR) == 0)) { 1532b411b363SPhilipp Reisner drbd_set_in_sync(mdev, sector, e->size); 1533b411b363SPhilipp Reisner ok = drbd_send_ack(mdev, P_RS_WRITE_ACK, e); 1534b411b363SPhilipp Reisner } else { 1535b411b363SPhilipp Reisner /* Record failure to sync */ 1536b411b363SPhilipp Reisner drbd_rs_failed_io(mdev, sector, e->size); 1537b411b363SPhilipp Reisner 1538b411b363SPhilipp Reisner ok = drbd_send_ack(mdev, P_NEG_ACK, e); 1539b411b363SPhilipp Reisner } 1540b411b363SPhilipp Reisner dec_unacked(mdev); 1541b411b363SPhilipp Reisner 1542b411b363SPhilipp Reisner return ok; 1543b411b363SPhilipp Reisner } 1544b411b363SPhilipp Reisner 1545b411b363SPhilipp Reisner static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_size) __releases(local) 1546b411b363SPhilipp Reisner { 1547b411b363SPhilipp Reisner struct drbd_epoch_entry *e; 1548b411b363SPhilipp Reisner 1549b411b363SPhilipp Reisner e = read_in_block(mdev, ID_SYNCER, sector, data_size); 155045bb912bSLars Ellenberg if (!e) 155145bb912bSLars Ellenberg goto fail; 1552b411b363SPhilipp Reisner 1553b411b363SPhilipp Reisner dec_rs_pending(mdev); 1554b411b363SPhilipp Reisner 1555b411b363SPhilipp Reisner inc_unacked(mdev); 1556b411b363SPhilipp Reisner /* corresponding dec_unacked() in e_end_resync_block() 1557b411b363SPhilipp Reisner * respective _drbd_clear_done_ee */ 1558b411b363SPhilipp Reisner 155945bb912bSLars Ellenberg e->w.cb = e_end_resync_block; 156045bb912bSLars Ellenberg 1561b411b363SPhilipp Reisner spin_lock_irq(&mdev->req_lock); 1562b411b363SPhilipp Reisner list_add(&e->w.list, &mdev->sync_ee); 1563b411b363SPhilipp Reisner spin_unlock_irq(&mdev->req_lock); 1564b411b363SPhilipp Reisner 156545bb912bSLars Ellenberg if (drbd_submit_ee(mdev, e, WRITE, DRBD_FAULT_RS_WR) == 0) 1566b411b363SPhilipp Reisner return TRUE; 156745bb912bSLars Ellenberg 156845bb912bSLars Ellenberg drbd_free_ee(mdev, e); 156945bb912bSLars Ellenberg fail: 157045bb912bSLars Ellenberg put_ldev(mdev); 157145bb912bSLars Ellenberg return FALSE; 1572b411b363SPhilipp Reisner } 1573b411b363SPhilipp Reisner 1574b411b363SPhilipp Reisner static int receive_DataReply(struct drbd_conf *mdev, struct p_header *h) 1575b411b363SPhilipp Reisner { 1576b411b363SPhilipp Reisner struct drbd_request *req; 1577b411b363SPhilipp Reisner sector_t sector; 1578b411b363SPhilipp Reisner unsigned int header_size, data_size; 1579b411b363SPhilipp Reisner int ok; 1580b411b363SPhilipp Reisner struct p_data *p = (struct p_data *)h; 1581b411b363SPhilipp Reisner 1582b411b363SPhilipp Reisner header_size = sizeof(*p) - sizeof(*h); 1583b411b363SPhilipp Reisner data_size = h->length - header_size; 1584b411b363SPhilipp Reisner 1585b411b363SPhilipp Reisner ERR_IF(data_size == 0) return FALSE; 1586b411b363SPhilipp Reisner 1587b411b363SPhilipp Reisner if (drbd_recv(mdev, h->payload, header_size) != header_size) 1588b411b363SPhilipp Reisner return FALSE; 1589b411b363SPhilipp Reisner 1590b411b363SPhilipp Reisner sector = be64_to_cpu(p->sector); 1591b411b363SPhilipp Reisner 1592b411b363SPhilipp Reisner spin_lock_irq(&mdev->req_lock); 1593b411b363SPhilipp Reisner req = _ar_id_to_req(mdev, p->block_id, sector); 1594b411b363SPhilipp Reisner spin_unlock_irq(&mdev->req_lock); 1595b411b363SPhilipp Reisner if (unlikely(!req)) { 1596b411b363SPhilipp Reisner dev_err(DEV, "Got a corrupt block_id/sector pair(1).\n"); 1597b411b363SPhilipp Reisner return FALSE; 1598b411b363SPhilipp Reisner } 1599b411b363SPhilipp Reisner 1600b411b363SPhilipp Reisner /* hlist_del(&req->colision) is done in _req_may_be_done, to avoid 1601b411b363SPhilipp Reisner * special casing it there for the various failure cases. 1602b411b363SPhilipp Reisner * still no race with drbd_fail_pending_reads */ 1603b411b363SPhilipp Reisner ok = recv_dless_read(mdev, req, sector, data_size); 1604b411b363SPhilipp Reisner 1605b411b363SPhilipp Reisner if (ok) 1606b411b363SPhilipp Reisner req_mod(req, data_received); 1607b411b363SPhilipp Reisner /* else: nothing. handled from drbd_disconnect... 1608b411b363SPhilipp Reisner * I don't think we may complete this just yet 1609b411b363SPhilipp Reisner * in case we are "on-disconnect: freeze" */ 1610b411b363SPhilipp Reisner 1611b411b363SPhilipp Reisner return ok; 1612b411b363SPhilipp Reisner } 1613b411b363SPhilipp Reisner 1614b411b363SPhilipp Reisner static int receive_RSDataReply(struct drbd_conf *mdev, struct p_header *h) 1615b411b363SPhilipp Reisner { 1616b411b363SPhilipp Reisner sector_t sector; 1617b411b363SPhilipp Reisner unsigned int header_size, data_size; 1618b411b363SPhilipp Reisner int ok; 1619b411b363SPhilipp Reisner struct p_data *p = (struct p_data *)h; 1620b411b363SPhilipp Reisner 1621b411b363SPhilipp Reisner header_size = sizeof(*p) - sizeof(*h); 1622b411b363SPhilipp Reisner data_size = h->length - header_size; 1623b411b363SPhilipp Reisner 1624b411b363SPhilipp Reisner ERR_IF(data_size == 0) return FALSE; 1625b411b363SPhilipp Reisner 1626b411b363SPhilipp Reisner if (drbd_recv(mdev, h->payload, header_size) != header_size) 1627b411b363SPhilipp Reisner return FALSE; 1628b411b363SPhilipp Reisner 1629b411b363SPhilipp Reisner sector = be64_to_cpu(p->sector); 1630b411b363SPhilipp Reisner D_ASSERT(p->block_id == ID_SYNCER); 1631b411b363SPhilipp Reisner 1632b411b363SPhilipp Reisner if (get_ldev(mdev)) { 1633b411b363SPhilipp Reisner /* data is submitted to disk within recv_resync_read. 1634b411b363SPhilipp Reisner * corresponding put_ldev done below on error, 1635b411b363SPhilipp Reisner * or in drbd_endio_write_sec. */ 1636b411b363SPhilipp Reisner ok = recv_resync_read(mdev, sector, data_size); 1637b411b363SPhilipp Reisner } else { 1638b411b363SPhilipp Reisner if (__ratelimit(&drbd_ratelimit_state)) 1639b411b363SPhilipp Reisner dev_err(DEV, "Can not write resync data to local disk.\n"); 1640b411b363SPhilipp Reisner 1641b411b363SPhilipp Reisner ok = drbd_drain_block(mdev, data_size); 1642b411b363SPhilipp Reisner 1643b411b363SPhilipp Reisner drbd_send_ack_dp(mdev, P_NEG_ACK, p); 1644b411b363SPhilipp Reisner } 1645b411b363SPhilipp Reisner 1646b411b363SPhilipp Reisner return ok; 1647b411b363SPhilipp Reisner } 1648b411b363SPhilipp Reisner 1649b411b363SPhilipp Reisner /* e_end_block() is called via drbd_process_done_ee(). 1650b411b363SPhilipp Reisner * this means this function only runs in the asender thread 1651b411b363SPhilipp Reisner */ 1652b411b363SPhilipp Reisner static int e_end_block(struct drbd_conf *mdev, struct drbd_work *w, int cancel) 1653b411b363SPhilipp Reisner { 1654b411b363SPhilipp Reisner struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w; 1655b411b363SPhilipp Reisner sector_t sector = e->sector; 1656b411b363SPhilipp Reisner struct drbd_epoch *epoch; 1657b411b363SPhilipp Reisner int ok = 1, pcmd; 1658b411b363SPhilipp Reisner 1659b411b363SPhilipp Reisner if (e->flags & EE_IS_BARRIER) { 1660b411b363SPhilipp Reisner epoch = previous_epoch(mdev, e->epoch); 1661b411b363SPhilipp Reisner if (epoch) 1662b411b363SPhilipp Reisner drbd_may_finish_epoch(mdev, epoch, EV_BARRIER_DONE + (cancel ? EV_CLEANUP : 0)); 1663b411b363SPhilipp Reisner } 1664b411b363SPhilipp Reisner 1665b411b363SPhilipp Reisner if (mdev->net_conf->wire_protocol == DRBD_PROT_C) { 166645bb912bSLars Ellenberg if (likely((e->flags & EE_WAS_ERROR) == 0)) { 1667b411b363SPhilipp Reisner pcmd = (mdev->state.conn >= C_SYNC_SOURCE && 1668b411b363SPhilipp Reisner mdev->state.conn <= C_PAUSED_SYNC_T && 1669b411b363SPhilipp Reisner e->flags & EE_MAY_SET_IN_SYNC) ? 1670b411b363SPhilipp Reisner P_RS_WRITE_ACK : P_WRITE_ACK; 1671b411b363SPhilipp Reisner ok &= drbd_send_ack(mdev, pcmd, e); 1672b411b363SPhilipp Reisner if (pcmd == P_RS_WRITE_ACK) 1673b411b363SPhilipp Reisner drbd_set_in_sync(mdev, sector, e->size); 1674b411b363SPhilipp Reisner } else { 1675b411b363SPhilipp Reisner ok = drbd_send_ack(mdev, P_NEG_ACK, e); 1676b411b363SPhilipp Reisner /* we expect it to be marked out of sync anyways... 1677b411b363SPhilipp Reisner * maybe assert this? */ 1678b411b363SPhilipp Reisner } 1679b411b363SPhilipp Reisner dec_unacked(mdev); 1680b411b363SPhilipp Reisner } 1681b411b363SPhilipp Reisner /* we delete from the conflict detection hash _after_ we sent out the 1682b411b363SPhilipp Reisner * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */ 1683b411b363SPhilipp Reisner if (mdev->net_conf->two_primaries) { 1684b411b363SPhilipp Reisner spin_lock_irq(&mdev->req_lock); 1685b411b363SPhilipp Reisner D_ASSERT(!hlist_unhashed(&e->colision)); 1686b411b363SPhilipp Reisner hlist_del_init(&e->colision); 1687b411b363SPhilipp Reisner spin_unlock_irq(&mdev->req_lock); 1688b411b363SPhilipp Reisner } else { 1689b411b363SPhilipp Reisner D_ASSERT(hlist_unhashed(&e->colision)); 1690b411b363SPhilipp Reisner } 1691b411b363SPhilipp Reisner 1692b411b363SPhilipp Reisner drbd_may_finish_epoch(mdev, e->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0)); 1693b411b363SPhilipp Reisner 1694b411b363SPhilipp Reisner return ok; 1695b411b363SPhilipp Reisner } 1696b411b363SPhilipp Reisner 1697b411b363SPhilipp Reisner static int e_send_discard_ack(struct drbd_conf *mdev, struct drbd_work *w, int unused) 1698b411b363SPhilipp Reisner { 1699b411b363SPhilipp Reisner struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w; 1700b411b363SPhilipp Reisner int ok = 1; 1701b411b363SPhilipp Reisner 1702b411b363SPhilipp Reisner D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C); 1703b411b363SPhilipp Reisner ok = drbd_send_ack(mdev, P_DISCARD_ACK, e); 1704b411b363SPhilipp Reisner 1705b411b363SPhilipp Reisner spin_lock_irq(&mdev->req_lock); 1706b411b363SPhilipp Reisner D_ASSERT(!hlist_unhashed(&e->colision)); 1707b411b363SPhilipp Reisner hlist_del_init(&e->colision); 1708b411b363SPhilipp Reisner spin_unlock_irq(&mdev->req_lock); 1709b411b363SPhilipp Reisner 1710b411b363SPhilipp Reisner dec_unacked(mdev); 1711b411b363SPhilipp Reisner 1712b411b363SPhilipp Reisner return ok; 1713b411b363SPhilipp Reisner } 1714b411b363SPhilipp Reisner 1715b411b363SPhilipp Reisner /* Called from receive_Data. 1716b411b363SPhilipp Reisner * Synchronize packets on sock with packets on msock. 1717b411b363SPhilipp Reisner * 1718b411b363SPhilipp Reisner * This is here so even when a P_DATA packet traveling via sock overtook an Ack 1719b411b363SPhilipp Reisner * packet traveling on msock, they are still processed in the order they have 1720b411b363SPhilipp Reisner * been sent. 1721b411b363SPhilipp Reisner * 1722b411b363SPhilipp Reisner * Note: we don't care for Ack packets overtaking P_DATA packets. 1723b411b363SPhilipp Reisner * 1724b411b363SPhilipp Reisner * In case packet_seq is larger than mdev->peer_seq number, there are 1725b411b363SPhilipp Reisner * outstanding packets on the msock. We wait for them to arrive. 1726b411b363SPhilipp Reisner * In case we are the logically next packet, we update mdev->peer_seq 1727b411b363SPhilipp Reisner * ourselves. Correctly handles 32bit wrap around. 1728b411b363SPhilipp Reisner * 1729b411b363SPhilipp Reisner * Assume we have a 10 GBit connection, that is about 1<<30 byte per second, 1730b411b363SPhilipp Reisner * about 1<<21 sectors per second. So "worst" case, we have 1<<3 == 8 seconds 1731b411b363SPhilipp Reisner * for the 24bit wrap (historical atomic_t guarantee on some archs), and we have 1732b411b363SPhilipp Reisner * 1<<9 == 512 seconds aka ages for the 32bit wrap around... 1733b411b363SPhilipp Reisner * 1734b411b363SPhilipp Reisner * returns 0 if we may process the packet, 1735b411b363SPhilipp Reisner * -ERESTARTSYS if we were interrupted (by disconnect signal). */ 1736b411b363SPhilipp Reisner static int drbd_wait_peer_seq(struct drbd_conf *mdev, const u32 packet_seq) 1737b411b363SPhilipp Reisner { 1738b411b363SPhilipp Reisner DEFINE_WAIT(wait); 1739b411b363SPhilipp Reisner unsigned int p_seq; 1740b411b363SPhilipp Reisner long timeout; 1741b411b363SPhilipp Reisner int ret = 0; 1742b411b363SPhilipp Reisner spin_lock(&mdev->peer_seq_lock); 1743b411b363SPhilipp Reisner for (;;) { 1744b411b363SPhilipp Reisner prepare_to_wait(&mdev->seq_wait, &wait, TASK_INTERRUPTIBLE); 1745b411b363SPhilipp Reisner if (seq_le(packet_seq, mdev->peer_seq+1)) 1746b411b363SPhilipp Reisner break; 1747b411b363SPhilipp Reisner if (signal_pending(current)) { 1748b411b363SPhilipp Reisner ret = -ERESTARTSYS; 1749b411b363SPhilipp Reisner break; 1750b411b363SPhilipp Reisner } 1751b411b363SPhilipp Reisner p_seq = mdev->peer_seq; 1752b411b363SPhilipp Reisner spin_unlock(&mdev->peer_seq_lock); 1753b411b363SPhilipp Reisner timeout = schedule_timeout(30*HZ); 1754b411b363SPhilipp Reisner spin_lock(&mdev->peer_seq_lock); 1755b411b363SPhilipp Reisner if (timeout == 0 && p_seq == mdev->peer_seq) { 1756b411b363SPhilipp Reisner ret = -ETIMEDOUT; 1757b411b363SPhilipp Reisner dev_err(DEV, "ASSERT FAILED waited 30 seconds for sequence update, forcing reconnect\n"); 1758b411b363SPhilipp Reisner break; 1759b411b363SPhilipp Reisner } 1760b411b363SPhilipp Reisner } 1761b411b363SPhilipp Reisner finish_wait(&mdev->seq_wait, &wait); 1762b411b363SPhilipp Reisner if (mdev->peer_seq+1 == packet_seq) 1763b411b363SPhilipp Reisner mdev->peer_seq++; 1764b411b363SPhilipp Reisner spin_unlock(&mdev->peer_seq_lock); 1765b411b363SPhilipp Reisner return ret; 1766b411b363SPhilipp Reisner } 1767b411b363SPhilipp Reisner 1768b411b363SPhilipp Reisner /* mirrored write */ 1769b411b363SPhilipp Reisner static int receive_Data(struct drbd_conf *mdev, struct p_header *h) 1770b411b363SPhilipp Reisner { 1771b411b363SPhilipp Reisner sector_t sector; 1772b411b363SPhilipp Reisner struct drbd_epoch_entry *e; 1773b411b363SPhilipp Reisner struct p_data *p = (struct p_data *)h; 1774b411b363SPhilipp Reisner int header_size, data_size; 1775b411b363SPhilipp Reisner int rw = WRITE; 1776b411b363SPhilipp Reisner u32 dp_flags; 1777b411b363SPhilipp Reisner 1778b411b363SPhilipp Reisner header_size = sizeof(*p) - sizeof(*h); 1779b411b363SPhilipp Reisner data_size = h->length - header_size; 1780b411b363SPhilipp Reisner 1781b411b363SPhilipp Reisner ERR_IF(data_size == 0) return FALSE; 1782b411b363SPhilipp Reisner 1783b411b363SPhilipp Reisner if (drbd_recv(mdev, h->payload, header_size) != header_size) 1784b411b363SPhilipp Reisner return FALSE; 1785b411b363SPhilipp Reisner 1786b411b363SPhilipp Reisner if (!get_ldev(mdev)) { 1787b411b363SPhilipp Reisner if (__ratelimit(&drbd_ratelimit_state)) 1788b411b363SPhilipp Reisner dev_err(DEV, "Can not write mirrored data block " 1789b411b363SPhilipp Reisner "to local disk.\n"); 1790b411b363SPhilipp Reisner spin_lock(&mdev->peer_seq_lock); 1791b411b363SPhilipp Reisner if (mdev->peer_seq+1 == be32_to_cpu(p->seq_num)) 1792b411b363SPhilipp Reisner mdev->peer_seq++; 1793b411b363SPhilipp Reisner spin_unlock(&mdev->peer_seq_lock); 1794b411b363SPhilipp Reisner 1795b411b363SPhilipp Reisner drbd_send_ack_dp(mdev, P_NEG_ACK, p); 1796b411b363SPhilipp Reisner atomic_inc(&mdev->current_epoch->epoch_size); 1797b411b363SPhilipp Reisner return drbd_drain_block(mdev, data_size); 1798b411b363SPhilipp Reisner } 1799b411b363SPhilipp Reisner 1800b411b363SPhilipp Reisner /* get_ldev(mdev) successful. 1801b411b363SPhilipp Reisner * Corresponding put_ldev done either below (on various errors), 1802b411b363SPhilipp Reisner * or in drbd_endio_write_sec, if we successfully submit the data at 1803b411b363SPhilipp Reisner * the end of this function. */ 1804b411b363SPhilipp Reisner 1805b411b363SPhilipp Reisner sector = be64_to_cpu(p->sector); 1806b411b363SPhilipp Reisner e = read_in_block(mdev, p->block_id, sector, data_size); 1807b411b363SPhilipp Reisner if (!e) { 1808b411b363SPhilipp Reisner put_ldev(mdev); 1809b411b363SPhilipp Reisner return FALSE; 1810b411b363SPhilipp Reisner } 1811b411b363SPhilipp Reisner 1812b411b363SPhilipp Reisner e->w.cb = e_end_block; 1813b411b363SPhilipp Reisner 1814b411b363SPhilipp Reisner spin_lock(&mdev->epoch_lock); 1815b411b363SPhilipp Reisner e->epoch = mdev->current_epoch; 1816b411b363SPhilipp Reisner atomic_inc(&e->epoch->epoch_size); 1817b411b363SPhilipp Reisner atomic_inc(&e->epoch->active); 1818b411b363SPhilipp Reisner 1819b411b363SPhilipp Reisner if (mdev->write_ordering == WO_bio_barrier && atomic_read(&e->epoch->epoch_size) == 1) { 1820b411b363SPhilipp Reisner struct drbd_epoch *epoch; 1821b411b363SPhilipp Reisner /* Issue a barrier if we start a new epoch, and the previous epoch 1822b411b363SPhilipp Reisner was not a epoch containing a single request which already was 1823b411b363SPhilipp Reisner a Barrier. */ 1824b411b363SPhilipp Reisner epoch = list_entry(e->epoch->list.prev, struct drbd_epoch, list); 1825b411b363SPhilipp Reisner if (epoch == e->epoch) { 1826b411b363SPhilipp Reisner set_bit(DE_CONTAINS_A_BARRIER, &e->epoch->flags); 18277b6d91daSChristoph Hellwig rw |= REQ_HARDBARRIER; 1828b411b363SPhilipp Reisner e->flags |= EE_IS_BARRIER; 1829b411b363SPhilipp Reisner } else { 1830b411b363SPhilipp Reisner if (atomic_read(&epoch->epoch_size) > 1 || 1831b411b363SPhilipp Reisner !test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags)) { 1832b411b363SPhilipp Reisner set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags); 1833b411b363SPhilipp Reisner set_bit(DE_CONTAINS_A_BARRIER, &e->epoch->flags); 18347b6d91daSChristoph Hellwig rw |= REQ_HARDBARRIER; 1835b411b363SPhilipp Reisner e->flags |= EE_IS_BARRIER; 1836b411b363SPhilipp Reisner } 1837b411b363SPhilipp Reisner } 1838b411b363SPhilipp Reisner } 1839b411b363SPhilipp Reisner spin_unlock(&mdev->epoch_lock); 1840b411b363SPhilipp Reisner 1841b411b363SPhilipp Reisner dp_flags = be32_to_cpu(p->dp_flags); 1842b411b363SPhilipp Reisner if (dp_flags & DP_HARDBARRIER) { 1843b411b363SPhilipp Reisner dev_err(DEV, "ASSERT FAILED would have submitted barrier request\n"); 18447b6d91daSChristoph Hellwig /* rw |= REQ_HARDBARRIER; */ 1845b411b363SPhilipp Reisner } 1846b411b363SPhilipp Reisner if (dp_flags & DP_RW_SYNC) 18477b6d91daSChristoph Hellwig rw |= REQ_SYNC | REQ_UNPLUG; 1848b411b363SPhilipp Reisner if (dp_flags & DP_MAY_SET_IN_SYNC) 1849b411b363SPhilipp Reisner e->flags |= EE_MAY_SET_IN_SYNC; 1850b411b363SPhilipp Reisner 1851b411b363SPhilipp Reisner /* I'm the receiver, I do hold a net_cnt reference. */ 1852b411b363SPhilipp Reisner if (!mdev->net_conf->two_primaries) { 1853b411b363SPhilipp Reisner spin_lock_irq(&mdev->req_lock); 1854b411b363SPhilipp Reisner } else { 1855b411b363SPhilipp Reisner /* don't get the req_lock yet, 1856b411b363SPhilipp Reisner * we may sleep in drbd_wait_peer_seq */ 1857b411b363SPhilipp Reisner const int size = e->size; 1858b411b363SPhilipp Reisner const int discard = test_bit(DISCARD_CONCURRENT, &mdev->flags); 1859b411b363SPhilipp Reisner DEFINE_WAIT(wait); 1860b411b363SPhilipp Reisner struct drbd_request *i; 1861b411b363SPhilipp Reisner struct hlist_node *n; 1862b411b363SPhilipp Reisner struct hlist_head *slot; 1863b411b363SPhilipp Reisner int first; 1864b411b363SPhilipp Reisner 1865b411b363SPhilipp Reisner D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C); 1866b411b363SPhilipp Reisner BUG_ON(mdev->ee_hash == NULL); 1867b411b363SPhilipp Reisner BUG_ON(mdev->tl_hash == NULL); 1868b411b363SPhilipp Reisner 1869b411b363SPhilipp Reisner /* conflict detection and handling: 1870b411b363SPhilipp Reisner * 1. wait on the sequence number, 1871b411b363SPhilipp Reisner * in case this data packet overtook ACK packets. 1872b411b363SPhilipp Reisner * 2. check our hash tables for conflicting requests. 1873b411b363SPhilipp Reisner * we only need to walk the tl_hash, since an ee can not 1874b411b363SPhilipp Reisner * have a conflict with an other ee: on the submitting 1875b411b363SPhilipp Reisner * node, the corresponding req had already been conflicting, 1876b411b363SPhilipp Reisner * and a conflicting req is never sent. 1877b411b363SPhilipp Reisner * 1878b411b363SPhilipp Reisner * Note: for two_primaries, we are protocol C, 1879b411b363SPhilipp Reisner * so there cannot be any request that is DONE 1880b411b363SPhilipp Reisner * but still on the transfer log. 1881b411b363SPhilipp Reisner * 1882b411b363SPhilipp Reisner * unconditionally add to the ee_hash. 1883b411b363SPhilipp Reisner * 1884b411b363SPhilipp Reisner * if no conflicting request is found: 1885b411b363SPhilipp Reisner * submit. 1886b411b363SPhilipp Reisner * 1887b411b363SPhilipp Reisner * if any conflicting request is found 1888b411b363SPhilipp Reisner * that has not yet been acked, 1889b411b363SPhilipp Reisner * AND I have the "discard concurrent writes" flag: 1890b411b363SPhilipp Reisner * queue (via done_ee) the P_DISCARD_ACK; OUT. 1891b411b363SPhilipp Reisner * 1892b411b363SPhilipp Reisner * if any conflicting request is found: 1893b411b363SPhilipp Reisner * block the receiver, waiting on misc_wait 1894b411b363SPhilipp Reisner * until no more conflicting requests are there, 1895b411b363SPhilipp Reisner * or we get interrupted (disconnect). 1896b411b363SPhilipp Reisner * 1897b411b363SPhilipp Reisner * we do not just write after local io completion of those 1898b411b363SPhilipp Reisner * requests, but only after req is done completely, i.e. 1899b411b363SPhilipp Reisner * we wait for the P_DISCARD_ACK to arrive! 1900b411b363SPhilipp Reisner * 1901b411b363SPhilipp Reisner * then proceed normally, i.e. submit. 1902b411b363SPhilipp Reisner */ 1903b411b363SPhilipp Reisner if (drbd_wait_peer_seq(mdev, be32_to_cpu(p->seq_num))) 1904b411b363SPhilipp Reisner goto out_interrupted; 1905b411b363SPhilipp Reisner 1906b411b363SPhilipp Reisner spin_lock_irq(&mdev->req_lock); 1907b411b363SPhilipp Reisner 1908b411b363SPhilipp Reisner hlist_add_head(&e->colision, ee_hash_slot(mdev, sector)); 1909b411b363SPhilipp Reisner 1910b411b363SPhilipp Reisner #define OVERLAPS overlaps(i->sector, i->size, sector, size) 1911b411b363SPhilipp Reisner slot = tl_hash_slot(mdev, sector); 1912b411b363SPhilipp Reisner first = 1; 1913b411b363SPhilipp Reisner for (;;) { 1914b411b363SPhilipp Reisner int have_unacked = 0; 1915b411b363SPhilipp Reisner int have_conflict = 0; 1916b411b363SPhilipp Reisner prepare_to_wait(&mdev->misc_wait, &wait, 1917b411b363SPhilipp Reisner TASK_INTERRUPTIBLE); 1918b411b363SPhilipp Reisner hlist_for_each_entry(i, n, slot, colision) { 1919b411b363SPhilipp Reisner if (OVERLAPS) { 1920b411b363SPhilipp Reisner /* only ALERT on first iteration, 1921b411b363SPhilipp Reisner * we may be woken up early... */ 1922b411b363SPhilipp Reisner if (first) 1923b411b363SPhilipp Reisner dev_alert(DEV, "%s[%u] Concurrent local write detected!" 1924b411b363SPhilipp Reisner " new: %llus +%u; pending: %llus +%u\n", 1925b411b363SPhilipp Reisner current->comm, current->pid, 1926b411b363SPhilipp Reisner (unsigned long long)sector, size, 1927b411b363SPhilipp Reisner (unsigned long long)i->sector, i->size); 1928b411b363SPhilipp Reisner if (i->rq_state & RQ_NET_PENDING) 1929b411b363SPhilipp Reisner ++have_unacked; 1930b411b363SPhilipp Reisner ++have_conflict; 1931b411b363SPhilipp Reisner } 1932b411b363SPhilipp Reisner } 1933b411b363SPhilipp Reisner #undef OVERLAPS 1934b411b363SPhilipp Reisner if (!have_conflict) 1935b411b363SPhilipp Reisner break; 1936b411b363SPhilipp Reisner 1937b411b363SPhilipp Reisner /* Discard Ack only for the _first_ iteration */ 1938b411b363SPhilipp Reisner if (first && discard && have_unacked) { 1939b411b363SPhilipp Reisner dev_alert(DEV, "Concurrent write! [DISCARD BY FLAG] sec=%llus\n", 1940b411b363SPhilipp Reisner (unsigned long long)sector); 1941b411b363SPhilipp Reisner inc_unacked(mdev); 1942b411b363SPhilipp Reisner e->w.cb = e_send_discard_ack; 1943b411b363SPhilipp Reisner list_add_tail(&e->w.list, &mdev->done_ee); 1944b411b363SPhilipp Reisner 1945b411b363SPhilipp Reisner spin_unlock_irq(&mdev->req_lock); 1946b411b363SPhilipp Reisner 1947b411b363SPhilipp Reisner /* we could probably send that P_DISCARD_ACK ourselves, 1948b411b363SPhilipp Reisner * but I don't like the receiver using the msock */ 1949b411b363SPhilipp Reisner 1950b411b363SPhilipp Reisner put_ldev(mdev); 1951b411b363SPhilipp Reisner wake_asender(mdev); 1952b411b363SPhilipp Reisner finish_wait(&mdev->misc_wait, &wait); 1953b411b363SPhilipp Reisner return TRUE; 1954b411b363SPhilipp Reisner } 1955b411b363SPhilipp Reisner 1956b411b363SPhilipp Reisner if (signal_pending(current)) { 1957b411b363SPhilipp Reisner hlist_del_init(&e->colision); 1958b411b363SPhilipp Reisner 1959b411b363SPhilipp Reisner spin_unlock_irq(&mdev->req_lock); 1960b411b363SPhilipp Reisner 1961b411b363SPhilipp Reisner finish_wait(&mdev->misc_wait, &wait); 1962b411b363SPhilipp Reisner goto out_interrupted; 1963b411b363SPhilipp Reisner } 1964b411b363SPhilipp Reisner 1965b411b363SPhilipp Reisner spin_unlock_irq(&mdev->req_lock); 1966b411b363SPhilipp Reisner if (first) { 1967b411b363SPhilipp Reisner first = 0; 1968b411b363SPhilipp Reisner dev_alert(DEV, "Concurrent write! [W AFTERWARDS] " 1969b411b363SPhilipp Reisner "sec=%llus\n", (unsigned long long)sector); 1970b411b363SPhilipp Reisner } else if (discard) { 1971b411b363SPhilipp Reisner /* we had none on the first iteration. 1972b411b363SPhilipp Reisner * there must be none now. */ 1973b411b363SPhilipp Reisner D_ASSERT(have_unacked == 0); 1974b411b363SPhilipp Reisner } 1975b411b363SPhilipp Reisner schedule(); 1976b411b363SPhilipp Reisner spin_lock_irq(&mdev->req_lock); 1977b411b363SPhilipp Reisner } 1978b411b363SPhilipp Reisner finish_wait(&mdev->misc_wait, &wait); 1979b411b363SPhilipp Reisner } 1980b411b363SPhilipp Reisner 1981b411b363SPhilipp Reisner list_add(&e->w.list, &mdev->active_ee); 1982b411b363SPhilipp Reisner spin_unlock_irq(&mdev->req_lock); 1983b411b363SPhilipp Reisner 1984b411b363SPhilipp Reisner switch (mdev->net_conf->wire_protocol) { 1985b411b363SPhilipp Reisner case DRBD_PROT_C: 1986b411b363SPhilipp Reisner inc_unacked(mdev); 1987b411b363SPhilipp Reisner /* corresponding dec_unacked() in e_end_block() 1988b411b363SPhilipp Reisner * respective _drbd_clear_done_ee */ 1989b411b363SPhilipp Reisner break; 1990b411b363SPhilipp Reisner case DRBD_PROT_B: 1991b411b363SPhilipp Reisner /* I really don't like it that the receiver thread 1992b411b363SPhilipp Reisner * sends on the msock, but anyways */ 1993b411b363SPhilipp Reisner drbd_send_ack(mdev, P_RECV_ACK, e); 1994b411b363SPhilipp Reisner break; 1995b411b363SPhilipp Reisner case DRBD_PROT_A: 1996b411b363SPhilipp Reisner /* nothing to do */ 1997b411b363SPhilipp Reisner break; 1998b411b363SPhilipp Reisner } 1999b411b363SPhilipp Reisner 2000b411b363SPhilipp Reisner if (mdev->state.pdsk == D_DISKLESS) { 2001b411b363SPhilipp Reisner /* In case we have the only disk of the cluster, */ 2002b411b363SPhilipp Reisner drbd_set_out_of_sync(mdev, e->sector, e->size); 2003b411b363SPhilipp Reisner e->flags |= EE_CALL_AL_COMPLETE_IO; 2004b411b363SPhilipp Reisner drbd_al_begin_io(mdev, e->sector); 2005b411b363SPhilipp Reisner } 2006b411b363SPhilipp Reisner 200745bb912bSLars Ellenberg if (drbd_submit_ee(mdev, e, rw, DRBD_FAULT_DT_WR) == 0) 2008b411b363SPhilipp Reisner return TRUE; 2009b411b363SPhilipp Reisner 2010b411b363SPhilipp Reisner out_interrupted: 2011b411b363SPhilipp Reisner /* yes, the epoch_size now is imbalanced. 2012b411b363SPhilipp Reisner * but we drop the connection anyways, so we don't have a chance to 2013b411b363SPhilipp Reisner * receive a barrier... atomic_inc(&mdev->epoch_size); */ 2014b411b363SPhilipp Reisner put_ldev(mdev); 2015b411b363SPhilipp Reisner drbd_free_ee(mdev, e); 2016b411b363SPhilipp Reisner return FALSE; 2017b411b363SPhilipp Reisner } 2018b411b363SPhilipp Reisner 2019b411b363SPhilipp Reisner static int receive_DataRequest(struct drbd_conf *mdev, struct p_header *h) 2020b411b363SPhilipp Reisner { 2021b411b363SPhilipp Reisner sector_t sector; 2022b411b363SPhilipp Reisner const sector_t capacity = drbd_get_capacity(mdev->this_bdev); 2023b411b363SPhilipp Reisner struct drbd_epoch_entry *e; 2024b411b363SPhilipp Reisner struct digest_info *di = NULL; 2025b411b363SPhilipp Reisner int size, digest_size; 2026b411b363SPhilipp Reisner unsigned int fault_type; 2027b411b363SPhilipp Reisner struct p_block_req *p = 2028b411b363SPhilipp Reisner (struct p_block_req *)h; 2029b411b363SPhilipp Reisner const int brps = sizeof(*p)-sizeof(*h); 2030b411b363SPhilipp Reisner 2031b411b363SPhilipp Reisner if (drbd_recv(mdev, h->payload, brps) != brps) 2032b411b363SPhilipp Reisner return FALSE; 2033b411b363SPhilipp Reisner 2034b411b363SPhilipp Reisner sector = be64_to_cpu(p->sector); 2035b411b363SPhilipp Reisner size = be32_to_cpu(p->blksize); 2036b411b363SPhilipp Reisner 2037b411b363SPhilipp Reisner if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_SEGMENT_SIZE) { 2038b411b363SPhilipp Reisner dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__, 2039b411b363SPhilipp Reisner (unsigned long long)sector, size); 2040b411b363SPhilipp Reisner return FALSE; 2041b411b363SPhilipp Reisner } 2042b411b363SPhilipp Reisner if (sector + (size>>9) > capacity) { 2043b411b363SPhilipp Reisner dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__, 2044b411b363SPhilipp Reisner (unsigned long long)sector, size); 2045b411b363SPhilipp Reisner return FALSE; 2046b411b363SPhilipp Reisner } 2047b411b363SPhilipp Reisner 2048b411b363SPhilipp Reisner if (!get_ldev_if_state(mdev, D_UP_TO_DATE)) { 2049b411b363SPhilipp Reisner if (__ratelimit(&drbd_ratelimit_state)) 2050b411b363SPhilipp Reisner dev_err(DEV, "Can not satisfy peer's read request, " 2051b411b363SPhilipp Reisner "no local data.\n"); 2052b411b363SPhilipp Reisner drbd_send_ack_rp(mdev, h->command == P_DATA_REQUEST ? P_NEG_DREPLY : 2053b411b363SPhilipp Reisner P_NEG_RS_DREPLY , p); 2054c3470cdeSLars Ellenberg return drbd_drain_block(mdev, h->length - brps); 2055b411b363SPhilipp Reisner } 2056b411b363SPhilipp Reisner 2057b411b363SPhilipp Reisner /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD 2058b411b363SPhilipp Reisner * "criss-cross" setup, that might cause write-out on some other DRBD, 2059b411b363SPhilipp Reisner * which in turn might block on the other node at this very place. */ 2060b411b363SPhilipp Reisner e = drbd_alloc_ee(mdev, p->block_id, sector, size, GFP_NOIO); 2061b411b363SPhilipp Reisner if (!e) { 2062b411b363SPhilipp Reisner put_ldev(mdev); 2063b411b363SPhilipp Reisner return FALSE; 2064b411b363SPhilipp Reisner } 2065b411b363SPhilipp Reisner 2066b411b363SPhilipp Reisner switch (h->command) { 2067b411b363SPhilipp Reisner case P_DATA_REQUEST: 2068b411b363SPhilipp Reisner e->w.cb = w_e_end_data_req; 2069b411b363SPhilipp Reisner fault_type = DRBD_FAULT_DT_RD; 2070b411b363SPhilipp Reisner break; 2071b411b363SPhilipp Reisner case P_RS_DATA_REQUEST: 2072b411b363SPhilipp Reisner e->w.cb = w_e_end_rsdata_req; 2073b411b363SPhilipp Reisner fault_type = DRBD_FAULT_RS_RD; 2074b411b363SPhilipp Reisner /* Eventually this should become asynchronously. Currently it 2075b411b363SPhilipp Reisner * blocks the whole receiver just to delay the reading of a 2076b411b363SPhilipp Reisner * resync data block. 2077b411b363SPhilipp Reisner * the drbd_work_queue mechanism is made for this... 2078b411b363SPhilipp Reisner */ 2079b411b363SPhilipp Reisner if (!drbd_rs_begin_io(mdev, sector)) { 2080b411b363SPhilipp Reisner /* we have been interrupted, 2081b411b363SPhilipp Reisner * probably connection lost! */ 2082b411b363SPhilipp Reisner D_ASSERT(signal_pending(current)); 2083b411b363SPhilipp Reisner goto out_free_e; 2084b411b363SPhilipp Reisner } 2085b411b363SPhilipp Reisner break; 2086b411b363SPhilipp Reisner 2087b411b363SPhilipp Reisner case P_OV_REPLY: 2088b411b363SPhilipp Reisner case P_CSUM_RS_REQUEST: 2089b411b363SPhilipp Reisner fault_type = DRBD_FAULT_RS_RD; 2090b411b363SPhilipp Reisner digest_size = h->length - brps ; 2091b411b363SPhilipp Reisner di = kmalloc(sizeof(*di) + digest_size, GFP_NOIO); 2092b411b363SPhilipp Reisner if (!di) 2093b411b363SPhilipp Reisner goto out_free_e; 2094b411b363SPhilipp Reisner 2095b411b363SPhilipp Reisner di->digest_size = digest_size; 2096b411b363SPhilipp Reisner di->digest = (((char *)di)+sizeof(struct digest_info)); 2097b411b363SPhilipp Reisner 2098b411b363SPhilipp Reisner if (drbd_recv(mdev, di->digest, digest_size) != digest_size) 2099b411b363SPhilipp Reisner goto out_free_e; 2100b411b363SPhilipp Reisner 2101b411b363SPhilipp Reisner e->block_id = (u64)(unsigned long)di; 2102b411b363SPhilipp Reisner if (h->command == P_CSUM_RS_REQUEST) { 2103b411b363SPhilipp Reisner D_ASSERT(mdev->agreed_pro_version >= 89); 2104b411b363SPhilipp Reisner e->w.cb = w_e_end_csum_rs_req; 2105b411b363SPhilipp Reisner } else if (h->command == P_OV_REPLY) { 2106b411b363SPhilipp Reisner e->w.cb = w_e_end_ov_reply; 2107b411b363SPhilipp Reisner dec_rs_pending(mdev); 2108b411b363SPhilipp Reisner break; 2109b411b363SPhilipp Reisner } 2110b411b363SPhilipp Reisner 2111b411b363SPhilipp Reisner if (!drbd_rs_begin_io(mdev, sector)) { 2112b411b363SPhilipp Reisner /* we have been interrupted, probably connection lost! */ 2113b411b363SPhilipp Reisner D_ASSERT(signal_pending(current)); 2114b411b363SPhilipp Reisner goto out_free_e; 2115b411b363SPhilipp Reisner } 2116b411b363SPhilipp Reisner break; 2117b411b363SPhilipp Reisner 2118b411b363SPhilipp Reisner case P_OV_REQUEST: 2119b411b363SPhilipp Reisner if (mdev->state.conn >= C_CONNECTED && 2120b411b363SPhilipp Reisner mdev->state.conn != C_VERIFY_T) 2121b411b363SPhilipp Reisner dev_warn(DEV, "ASSERT FAILED: got P_OV_REQUEST while being %s\n", 2122b411b363SPhilipp Reisner drbd_conn_str(mdev->state.conn)); 2123b411b363SPhilipp Reisner if (mdev->ov_start_sector == ~(sector_t)0 && 2124b411b363SPhilipp Reisner mdev->agreed_pro_version >= 90) { 2125b411b363SPhilipp Reisner mdev->ov_start_sector = sector; 2126b411b363SPhilipp Reisner mdev->ov_position = sector; 2127b411b363SPhilipp Reisner mdev->ov_left = mdev->rs_total - BM_SECT_TO_BIT(sector); 2128b411b363SPhilipp Reisner dev_info(DEV, "Online Verify start sector: %llu\n", 2129b411b363SPhilipp Reisner (unsigned long long)sector); 2130b411b363SPhilipp Reisner } 2131b411b363SPhilipp Reisner e->w.cb = w_e_end_ov_req; 2132b411b363SPhilipp Reisner fault_type = DRBD_FAULT_RS_RD; 2133b411b363SPhilipp Reisner /* Eventually this should become asynchronous. Currently it 2134b411b363SPhilipp Reisner * blocks the whole receiver just to delay the reading of a 2135b411b363SPhilipp Reisner * resync data block. 2136b411b363SPhilipp Reisner * the drbd_work_queue mechanism is made for this... 2137b411b363SPhilipp Reisner */ 2138b411b363SPhilipp Reisner if (!drbd_rs_begin_io(mdev, sector)) { 2139b411b363SPhilipp Reisner /* we have been interrupted, 2140b411b363SPhilipp Reisner * probably connection lost! */ 2141b411b363SPhilipp Reisner D_ASSERT(signal_pending(current)); 2142b411b363SPhilipp Reisner goto out_free_e; 2143b411b363SPhilipp Reisner } 2144b411b363SPhilipp Reisner break; 2145b411b363SPhilipp Reisner 2146b411b363SPhilipp Reisner 2147b411b363SPhilipp Reisner default: 2148b411b363SPhilipp Reisner dev_err(DEV, "unexpected command (%s) in receive_DataRequest\n", 2149b411b363SPhilipp Reisner cmdname(h->command)); 2150b411b363SPhilipp Reisner fault_type = DRBD_FAULT_MAX; 2151b411b363SPhilipp Reisner } 2152b411b363SPhilipp Reisner 2153b411b363SPhilipp Reisner spin_lock_irq(&mdev->req_lock); 2154b411b363SPhilipp Reisner list_add(&e->w.list, &mdev->read_ee); 2155b411b363SPhilipp Reisner spin_unlock_irq(&mdev->req_lock); 2156b411b363SPhilipp Reisner 2157b411b363SPhilipp Reisner inc_unacked(mdev); 2158b411b363SPhilipp Reisner 215945bb912bSLars Ellenberg if (drbd_submit_ee(mdev, e, READ, fault_type) == 0) 2160b411b363SPhilipp Reisner return TRUE; 2161b411b363SPhilipp Reisner 2162b411b363SPhilipp Reisner out_free_e: 2163b411b363SPhilipp Reisner kfree(di); 2164b411b363SPhilipp Reisner put_ldev(mdev); 2165b411b363SPhilipp Reisner drbd_free_ee(mdev, e); 2166b411b363SPhilipp Reisner return FALSE; 2167b411b363SPhilipp Reisner } 2168b411b363SPhilipp Reisner 2169b411b363SPhilipp Reisner static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local) 2170b411b363SPhilipp Reisner { 2171b411b363SPhilipp Reisner int self, peer, rv = -100; 2172b411b363SPhilipp Reisner unsigned long ch_self, ch_peer; 2173b411b363SPhilipp Reisner 2174b411b363SPhilipp Reisner self = mdev->ldev->md.uuid[UI_BITMAP] & 1; 2175b411b363SPhilipp Reisner peer = mdev->p_uuid[UI_BITMAP] & 1; 2176b411b363SPhilipp Reisner 2177b411b363SPhilipp Reisner ch_peer = mdev->p_uuid[UI_SIZE]; 2178b411b363SPhilipp Reisner ch_self = mdev->comm_bm_set; 2179b411b363SPhilipp Reisner 2180b411b363SPhilipp Reisner switch (mdev->net_conf->after_sb_0p) { 2181b411b363SPhilipp Reisner case ASB_CONSENSUS: 2182b411b363SPhilipp Reisner case ASB_DISCARD_SECONDARY: 2183b411b363SPhilipp Reisner case ASB_CALL_HELPER: 2184b411b363SPhilipp Reisner dev_err(DEV, "Configuration error.\n"); 2185b411b363SPhilipp Reisner break; 2186b411b363SPhilipp Reisner case ASB_DISCONNECT: 2187b411b363SPhilipp Reisner break; 2188b411b363SPhilipp Reisner case ASB_DISCARD_YOUNGER_PRI: 2189b411b363SPhilipp Reisner if (self == 0 && peer == 1) { 2190b411b363SPhilipp Reisner rv = -1; 2191b411b363SPhilipp Reisner break; 2192b411b363SPhilipp Reisner } 2193b411b363SPhilipp Reisner if (self == 1 && peer == 0) { 2194b411b363SPhilipp Reisner rv = 1; 2195b411b363SPhilipp Reisner break; 2196b411b363SPhilipp Reisner } 2197b411b363SPhilipp Reisner /* Else fall through to one of the other strategies... */ 2198b411b363SPhilipp Reisner case ASB_DISCARD_OLDER_PRI: 2199b411b363SPhilipp Reisner if (self == 0 && peer == 1) { 2200b411b363SPhilipp Reisner rv = 1; 2201b411b363SPhilipp Reisner break; 2202b411b363SPhilipp Reisner } 2203b411b363SPhilipp Reisner if (self == 1 && peer == 0) { 2204b411b363SPhilipp Reisner rv = -1; 2205b411b363SPhilipp Reisner break; 2206b411b363SPhilipp Reisner } 2207b411b363SPhilipp Reisner /* Else fall through to one of the other strategies... */ 2208ad19bf6eSLars Ellenberg dev_warn(DEV, "Discard younger/older primary did not find a decision\n" 2209b411b363SPhilipp Reisner "Using discard-least-changes instead\n"); 2210b411b363SPhilipp Reisner case ASB_DISCARD_ZERO_CHG: 2211b411b363SPhilipp Reisner if (ch_peer == 0 && ch_self == 0) { 2212b411b363SPhilipp Reisner rv = test_bit(DISCARD_CONCURRENT, &mdev->flags) 2213b411b363SPhilipp Reisner ? -1 : 1; 2214b411b363SPhilipp Reisner break; 2215b411b363SPhilipp Reisner } else { 2216b411b363SPhilipp Reisner if (ch_peer == 0) { rv = 1; break; } 2217b411b363SPhilipp Reisner if (ch_self == 0) { rv = -1; break; } 2218b411b363SPhilipp Reisner } 2219b411b363SPhilipp Reisner if (mdev->net_conf->after_sb_0p == ASB_DISCARD_ZERO_CHG) 2220b411b363SPhilipp Reisner break; 2221b411b363SPhilipp Reisner case ASB_DISCARD_LEAST_CHG: 2222b411b363SPhilipp Reisner if (ch_self < ch_peer) 2223b411b363SPhilipp Reisner rv = -1; 2224b411b363SPhilipp Reisner else if (ch_self > ch_peer) 2225b411b363SPhilipp Reisner rv = 1; 2226b411b363SPhilipp Reisner else /* ( ch_self == ch_peer ) */ 2227b411b363SPhilipp Reisner /* Well, then use something else. */ 2228b411b363SPhilipp Reisner rv = test_bit(DISCARD_CONCURRENT, &mdev->flags) 2229b411b363SPhilipp Reisner ? -1 : 1; 2230b411b363SPhilipp Reisner break; 2231b411b363SPhilipp Reisner case ASB_DISCARD_LOCAL: 2232b411b363SPhilipp Reisner rv = -1; 2233b411b363SPhilipp Reisner break; 2234b411b363SPhilipp Reisner case ASB_DISCARD_REMOTE: 2235b411b363SPhilipp Reisner rv = 1; 2236b411b363SPhilipp Reisner } 2237b411b363SPhilipp Reisner 2238b411b363SPhilipp Reisner return rv; 2239b411b363SPhilipp Reisner } 2240b411b363SPhilipp Reisner 2241b411b363SPhilipp Reisner static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local) 2242b411b363SPhilipp Reisner { 2243b411b363SPhilipp Reisner int self, peer, hg, rv = -100; 2244b411b363SPhilipp Reisner 2245b411b363SPhilipp Reisner self = mdev->ldev->md.uuid[UI_BITMAP] & 1; 2246b411b363SPhilipp Reisner peer = mdev->p_uuid[UI_BITMAP] & 1; 2247b411b363SPhilipp Reisner 2248b411b363SPhilipp Reisner switch (mdev->net_conf->after_sb_1p) { 2249b411b363SPhilipp Reisner case ASB_DISCARD_YOUNGER_PRI: 2250b411b363SPhilipp Reisner case ASB_DISCARD_OLDER_PRI: 2251b411b363SPhilipp Reisner case ASB_DISCARD_LEAST_CHG: 2252b411b363SPhilipp Reisner case ASB_DISCARD_LOCAL: 2253b411b363SPhilipp Reisner case ASB_DISCARD_REMOTE: 2254b411b363SPhilipp Reisner dev_err(DEV, "Configuration error.\n"); 2255b411b363SPhilipp Reisner break; 2256b411b363SPhilipp Reisner case ASB_DISCONNECT: 2257b411b363SPhilipp Reisner break; 2258b411b363SPhilipp Reisner case ASB_CONSENSUS: 2259b411b363SPhilipp Reisner hg = drbd_asb_recover_0p(mdev); 2260b411b363SPhilipp Reisner if (hg == -1 && mdev->state.role == R_SECONDARY) 2261b411b363SPhilipp Reisner rv = hg; 2262b411b363SPhilipp Reisner if (hg == 1 && mdev->state.role == R_PRIMARY) 2263b411b363SPhilipp Reisner rv = hg; 2264b411b363SPhilipp Reisner break; 2265b411b363SPhilipp Reisner case ASB_VIOLENTLY: 2266b411b363SPhilipp Reisner rv = drbd_asb_recover_0p(mdev); 2267b411b363SPhilipp Reisner break; 2268b411b363SPhilipp Reisner case ASB_DISCARD_SECONDARY: 2269b411b363SPhilipp Reisner return mdev->state.role == R_PRIMARY ? 1 : -1; 2270b411b363SPhilipp Reisner case ASB_CALL_HELPER: 2271b411b363SPhilipp Reisner hg = drbd_asb_recover_0p(mdev); 2272b411b363SPhilipp Reisner if (hg == -1 && mdev->state.role == R_PRIMARY) { 2273b411b363SPhilipp Reisner self = drbd_set_role(mdev, R_SECONDARY, 0); 2274b411b363SPhilipp Reisner /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE, 2275b411b363SPhilipp Reisner * we might be here in C_WF_REPORT_PARAMS which is transient. 2276b411b363SPhilipp Reisner * we do not need to wait for the after state change work either. */ 2277b411b363SPhilipp Reisner self = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY)); 2278b411b363SPhilipp Reisner if (self != SS_SUCCESS) { 2279b411b363SPhilipp Reisner drbd_khelper(mdev, "pri-lost-after-sb"); 2280b411b363SPhilipp Reisner } else { 2281b411b363SPhilipp Reisner dev_warn(DEV, "Successfully gave up primary role.\n"); 2282b411b363SPhilipp Reisner rv = hg; 2283b411b363SPhilipp Reisner } 2284b411b363SPhilipp Reisner } else 2285b411b363SPhilipp Reisner rv = hg; 2286b411b363SPhilipp Reisner } 2287b411b363SPhilipp Reisner 2288b411b363SPhilipp Reisner return rv; 2289b411b363SPhilipp Reisner } 2290b411b363SPhilipp Reisner 2291b411b363SPhilipp Reisner static int drbd_asb_recover_2p(struct drbd_conf *mdev) __must_hold(local) 2292b411b363SPhilipp Reisner { 2293b411b363SPhilipp Reisner int self, peer, hg, rv = -100; 2294b411b363SPhilipp Reisner 2295b411b363SPhilipp Reisner self = mdev->ldev->md.uuid[UI_BITMAP] & 1; 2296b411b363SPhilipp Reisner peer = mdev->p_uuid[UI_BITMAP] & 1; 2297b411b363SPhilipp Reisner 2298b411b363SPhilipp Reisner switch (mdev->net_conf->after_sb_2p) { 2299b411b363SPhilipp Reisner case ASB_DISCARD_YOUNGER_PRI: 2300b411b363SPhilipp Reisner case ASB_DISCARD_OLDER_PRI: 2301b411b363SPhilipp Reisner case ASB_DISCARD_LEAST_CHG: 2302b411b363SPhilipp Reisner case ASB_DISCARD_LOCAL: 2303b411b363SPhilipp Reisner case ASB_DISCARD_REMOTE: 2304b411b363SPhilipp Reisner case ASB_CONSENSUS: 2305b411b363SPhilipp Reisner case ASB_DISCARD_SECONDARY: 2306b411b363SPhilipp Reisner dev_err(DEV, "Configuration error.\n"); 2307b411b363SPhilipp Reisner break; 2308b411b363SPhilipp Reisner case ASB_VIOLENTLY: 2309b411b363SPhilipp Reisner rv = drbd_asb_recover_0p(mdev); 2310b411b363SPhilipp Reisner break; 2311b411b363SPhilipp Reisner case ASB_DISCONNECT: 2312b411b363SPhilipp Reisner break; 2313b411b363SPhilipp Reisner case ASB_CALL_HELPER: 2314b411b363SPhilipp Reisner hg = drbd_asb_recover_0p(mdev); 2315b411b363SPhilipp Reisner if (hg == -1) { 2316b411b363SPhilipp Reisner /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE, 2317b411b363SPhilipp Reisner * we might be here in C_WF_REPORT_PARAMS which is transient. 2318b411b363SPhilipp Reisner * we do not need to wait for the after state change work either. */ 2319b411b363SPhilipp Reisner self = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY)); 2320b411b363SPhilipp Reisner if (self != SS_SUCCESS) { 2321b411b363SPhilipp Reisner drbd_khelper(mdev, "pri-lost-after-sb"); 2322b411b363SPhilipp Reisner } else { 2323b411b363SPhilipp Reisner dev_warn(DEV, "Successfully gave up primary role.\n"); 2324b411b363SPhilipp Reisner rv = hg; 2325b411b363SPhilipp Reisner } 2326b411b363SPhilipp Reisner } else 2327b411b363SPhilipp Reisner rv = hg; 2328b411b363SPhilipp Reisner } 2329b411b363SPhilipp Reisner 2330b411b363SPhilipp Reisner return rv; 2331b411b363SPhilipp Reisner } 2332b411b363SPhilipp Reisner 2333b411b363SPhilipp Reisner static void drbd_uuid_dump(struct drbd_conf *mdev, char *text, u64 *uuid, 2334b411b363SPhilipp Reisner u64 bits, u64 flags) 2335b411b363SPhilipp Reisner { 2336b411b363SPhilipp Reisner if (!uuid) { 2337b411b363SPhilipp Reisner dev_info(DEV, "%s uuid info vanished while I was looking!\n", text); 2338b411b363SPhilipp Reisner return; 2339b411b363SPhilipp Reisner } 2340b411b363SPhilipp Reisner dev_info(DEV, "%s %016llX:%016llX:%016llX:%016llX bits:%llu flags:%llX\n", 2341b411b363SPhilipp Reisner text, 2342b411b363SPhilipp Reisner (unsigned long long)uuid[UI_CURRENT], 2343b411b363SPhilipp Reisner (unsigned long long)uuid[UI_BITMAP], 2344b411b363SPhilipp Reisner (unsigned long long)uuid[UI_HISTORY_START], 2345b411b363SPhilipp Reisner (unsigned long long)uuid[UI_HISTORY_END], 2346b411b363SPhilipp Reisner (unsigned long long)bits, 2347b411b363SPhilipp Reisner (unsigned long long)flags); 2348b411b363SPhilipp Reisner } 2349b411b363SPhilipp Reisner 2350b411b363SPhilipp Reisner /* 2351b411b363SPhilipp Reisner 100 after split brain try auto recover 2352b411b363SPhilipp Reisner 2 C_SYNC_SOURCE set BitMap 2353b411b363SPhilipp Reisner 1 C_SYNC_SOURCE use BitMap 2354b411b363SPhilipp Reisner 0 no Sync 2355b411b363SPhilipp Reisner -1 C_SYNC_TARGET use BitMap 2356b411b363SPhilipp Reisner -2 C_SYNC_TARGET set BitMap 2357b411b363SPhilipp Reisner -100 after split brain, disconnect 2358b411b363SPhilipp Reisner -1000 unrelated data 2359b411b363SPhilipp Reisner */ 2360b411b363SPhilipp Reisner static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(local) 2361b411b363SPhilipp Reisner { 2362b411b363SPhilipp Reisner u64 self, peer; 2363b411b363SPhilipp Reisner int i, j; 2364b411b363SPhilipp Reisner 2365b411b363SPhilipp Reisner self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1); 2366b411b363SPhilipp Reisner peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1); 2367b411b363SPhilipp Reisner 2368b411b363SPhilipp Reisner *rule_nr = 10; 2369b411b363SPhilipp Reisner if (self == UUID_JUST_CREATED && peer == UUID_JUST_CREATED) 2370b411b363SPhilipp Reisner return 0; 2371b411b363SPhilipp Reisner 2372b411b363SPhilipp Reisner *rule_nr = 20; 2373b411b363SPhilipp Reisner if ((self == UUID_JUST_CREATED || self == (u64)0) && 2374b411b363SPhilipp Reisner peer != UUID_JUST_CREATED) 2375b411b363SPhilipp Reisner return -2; 2376b411b363SPhilipp Reisner 2377b411b363SPhilipp Reisner *rule_nr = 30; 2378b411b363SPhilipp Reisner if (self != UUID_JUST_CREATED && 2379b411b363SPhilipp Reisner (peer == UUID_JUST_CREATED || peer == (u64)0)) 2380b411b363SPhilipp Reisner return 2; 2381b411b363SPhilipp Reisner 2382b411b363SPhilipp Reisner if (self == peer) { 2383b411b363SPhilipp Reisner int rct, dc; /* roles at crash time */ 2384b411b363SPhilipp Reisner 2385b411b363SPhilipp Reisner if (mdev->p_uuid[UI_BITMAP] == (u64)0 && mdev->ldev->md.uuid[UI_BITMAP] != (u64)0) { 2386b411b363SPhilipp Reisner 2387b411b363SPhilipp Reisner if (mdev->agreed_pro_version < 91) 2388b411b363SPhilipp Reisner return -1001; 2389b411b363SPhilipp Reisner 2390b411b363SPhilipp Reisner if ((mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) && 2391b411b363SPhilipp Reisner (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1))) { 2392b411b363SPhilipp Reisner dev_info(DEV, "was SyncSource, missed the resync finished event, corrected myself:\n"); 2393b411b363SPhilipp Reisner drbd_uuid_set_bm(mdev, 0UL); 2394b411b363SPhilipp Reisner 2395b411b363SPhilipp Reisner drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid, 2396b411b363SPhilipp Reisner mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0); 2397b411b363SPhilipp Reisner *rule_nr = 34; 2398b411b363SPhilipp Reisner } else { 2399b411b363SPhilipp Reisner dev_info(DEV, "was SyncSource (peer failed to write sync_uuid)\n"); 2400b411b363SPhilipp Reisner *rule_nr = 36; 2401b411b363SPhilipp Reisner } 2402b411b363SPhilipp Reisner 2403b411b363SPhilipp Reisner return 1; 2404b411b363SPhilipp Reisner } 2405b411b363SPhilipp Reisner 2406b411b363SPhilipp Reisner if (mdev->ldev->md.uuid[UI_BITMAP] == (u64)0 && mdev->p_uuid[UI_BITMAP] != (u64)0) { 2407b411b363SPhilipp Reisner 2408b411b363SPhilipp Reisner if (mdev->agreed_pro_version < 91) 2409b411b363SPhilipp Reisner return -1001; 2410b411b363SPhilipp Reisner 2411b411b363SPhilipp Reisner if ((mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_BITMAP] & ~((u64)1)) && 2412b411b363SPhilipp Reisner (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1))) { 2413b411b363SPhilipp Reisner dev_info(DEV, "was SyncTarget, peer missed the resync finished event, corrected peer:\n"); 2414b411b363SPhilipp Reisner 2415b411b363SPhilipp Reisner mdev->p_uuid[UI_HISTORY_START + 1] = mdev->p_uuid[UI_HISTORY_START]; 2416b411b363SPhilipp Reisner mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_BITMAP]; 2417b411b363SPhilipp Reisner mdev->p_uuid[UI_BITMAP] = 0UL; 2418b411b363SPhilipp Reisner 2419b411b363SPhilipp Reisner drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]); 2420b411b363SPhilipp Reisner *rule_nr = 35; 2421b411b363SPhilipp Reisner } else { 2422b411b363SPhilipp Reisner dev_info(DEV, "was SyncTarget (failed to write sync_uuid)\n"); 2423b411b363SPhilipp Reisner *rule_nr = 37; 2424b411b363SPhilipp Reisner } 2425b411b363SPhilipp Reisner 2426b411b363SPhilipp Reisner return -1; 2427b411b363SPhilipp Reisner } 2428b411b363SPhilipp Reisner 2429b411b363SPhilipp Reisner /* Common power [off|failure] */ 2430b411b363SPhilipp Reisner rct = (test_bit(CRASHED_PRIMARY, &mdev->flags) ? 1 : 0) + 2431b411b363SPhilipp Reisner (mdev->p_uuid[UI_FLAGS] & 2); 2432b411b363SPhilipp Reisner /* lowest bit is set when we were primary, 2433b411b363SPhilipp Reisner * next bit (weight 2) is set when peer was primary */ 2434b411b363SPhilipp Reisner *rule_nr = 40; 2435b411b363SPhilipp Reisner 2436b411b363SPhilipp Reisner switch (rct) { 2437b411b363SPhilipp Reisner case 0: /* !self_pri && !peer_pri */ return 0; 2438b411b363SPhilipp Reisner case 1: /* self_pri && !peer_pri */ return 1; 2439b411b363SPhilipp Reisner case 2: /* !self_pri && peer_pri */ return -1; 2440b411b363SPhilipp Reisner case 3: /* self_pri && peer_pri */ 2441b411b363SPhilipp Reisner dc = test_bit(DISCARD_CONCURRENT, &mdev->flags); 2442b411b363SPhilipp Reisner return dc ? -1 : 1; 2443b411b363SPhilipp Reisner } 2444b411b363SPhilipp Reisner } 2445b411b363SPhilipp Reisner 2446b411b363SPhilipp Reisner *rule_nr = 50; 2447b411b363SPhilipp Reisner peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1); 2448b411b363SPhilipp Reisner if (self == peer) 2449b411b363SPhilipp Reisner return -1; 2450b411b363SPhilipp Reisner 2451b411b363SPhilipp Reisner *rule_nr = 51; 2452b411b363SPhilipp Reisner peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1); 2453b411b363SPhilipp Reisner if (self == peer) { 2454b411b363SPhilipp Reisner self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1); 2455b411b363SPhilipp Reisner peer = mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1); 2456b411b363SPhilipp Reisner if (self == peer) { 2457b411b363SPhilipp Reisner /* The last P_SYNC_UUID did not get though. Undo the last start of 2458b411b363SPhilipp Reisner resync as sync source modifications of the peer's UUIDs. */ 2459b411b363SPhilipp Reisner 2460b411b363SPhilipp Reisner if (mdev->agreed_pro_version < 91) 2461b411b363SPhilipp Reisner return -1001; 2462b411b363SPhilipp Reisner 2463b411b363SPhilipp Reisner mdev->p_uuid[UI_BITMAP] = mdev->p_uuid[UI_HISTORY_START]; 2464b411b363SPhilipp Reisner mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_HISTORY_START + 1]; 2465b411b363SPhilipp Reisner return -1; 2466b411b363SPhilipp Reisner } 2467b411b363SPhilipp Reisner } 2468b411b363SPhilipp Reisner 2469b411b363SPhilipp Reisner *rule_nr = 60; 2470b411b363SPhilipp Reisner self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1); 2471b411b363SPhilipp Reisner for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) { 2472b411b363SPhilipp Reisner peer = mdev->p_uuid[i] & ~((u64)1); 2473b411b363SPhilipp Reisner if (self == peer) 2474b411b363SPhilipp Reisner return -2; 2475b411b363SPhilipp Reisner } 2476b411b363SPhilipp Reisner 2477b411b363SPhilipp Reisner *rule_nr = 70; 2478b411b363SPhilipp Reisner self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1); 2479b411b363SPhilipp Reisner peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1); 2480b411b363SPhilipp Reisner if (self == peer) 2481b411b363SPhilipp Reisner return 1; 2482b411b363SPhilipp Reisner 2483b411b363SPhilipp Reisner *rule_nr = 71; 2484b411b363SPhilipp Reisner self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1); 2485b411b363SPhilipp Reisner if (self == peer) { 2486b411b363SPhilipp Reisner self = mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1); 2487b411b363SPhilipp Reisner peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1); 2488b411b363SPhilipp Reisner if (self == peer) { 2489b411b363SPhilipp Reisner /* The last P_SYNC_UUID did not get though. Undo the last start of 2490b411b363SPhilipp Reisner resync as sync source modifications of our UUIDs. */ 2491b411b363SPhilipp Reisner 2492b411b363SPhilipp Reisner if (mdev->agreed_pro_version < 91) 2493b411b363SPhilipp Reisner return -1001; 2494b411b363SPhilipp Reisner 2495b411b363SPhilipp Reisner _drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_HISTORY_START]); 2496b411b363SPhilipp Reisner _drbd_uuid_set(mdev, UI_HISTORY_START, mdev->ldev->md.uuid[UI_HISTORY_START + 1]); 2497b411b363SPhilipp Reisner 2498b411b363SPhilipp Reisner dev_info(DEV, "Undid last start of resync:\n"); 2499b411b363SPhilipp Reisner 2500b411b363SPhilipp Reisner drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid, 2501b411b363SPhilipp Reisner mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0); 2502b411b363SPhilipp Reisner 2503b411b363SPhilipp Reisner return 1; 2504b411b363SPhilipp Reisner } 2505b411b363SPhilipp Reisner } 2506b411b363SPhilipp Reisner 2507b411b363SPhilipp Reisner 2508b411b363SPhilipp Reisner *rule_nr = 80; 2509d8c2a36bSPhilipp Reisner peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1); 2510b411b363SPhilipp Reisner for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) { 2511b411b363SPhilipp Reisner self = mdev->ldev->md.uuid[i] & ~((u64)1); 2512b411b363SPhilipp Reisner if (self == peer) 2513b411b363SPhilipp Reisner return 2; 2514b411b363SPhilipp Reisner } 2515b411b363SPhilipp Reisner 2516b411b363SPhilipp Reisner *rule_nr = 90; 2517b411b363SPhilipp Reisner self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1); 2518b411b363SPhilipp Reisner peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1); 2519b411b363SPhilipp Reisner if (self == peer && self != ((u64)0)) 2520b411b363SPhilipp Reisner return 100; 2521b411b363SPhilipp Reisner 2522b411b363SPhilipp Reisner *rule_nr = 100; 2523b411b363SPhilipp Reisner for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) { 2524b411b363SPhilipp Reisner self = mdev->ldev->md.uuid[i] & ~((u64)1); 2525b411b363SPhilipp Reisner for (j = UI_HISTORY_START; j <= UI_HISTORY_END; j++) { 2526b411b363SPhilipp Reisner peer = mdev->p_uuid[j] & ~((u64)1); 2527b411b363SPhilipp Reisner if (self == peer) 2528b411b363SPhilipp Reisner return -100; 2529b411b363SPhilipp Reisner } 2530b411b363SPhilipp Reisner } 2531b411b363SPhilipp Reisner 2532b411b363SPhilipp Reisner return -1000; 2533b411b363SPhilipp Reisner } 2534b411b363SPhilipp Reisner 2535b411b363SPhilipp Reisner /* drbd_sync_handshake() returns the new conn state on success, or 2536b411b363SPhilipp Reisner CONN_MASK (-1) on failure. 2537b411b363SPhilipp Reisner */ 2538b411b363SPhilipp Reisner static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_role peer_role, 2539b411b363SPhilipp Reisner enum drbd_disk_state peer_disk) __must_hold(local) 2540b411b363SPhilipp Reisner { 2541b411b363SPhilipp Reisner int hg, rule_nr; 2542b411b363SPhilipp Reisner enum drbd_conns rv = C_MASK; 2543b411b363SPhilipp Reisner enum drbd_disk_state mydisk; 2544b411b363SPhilipp Reisner 2545b411b363SPhilipp Reisner mydisk = mdev->state.disk; 2546b411b363SPhilipp Reisner if (mydisk == D_NEGOTIATING) 2547b411b363SPhilipp Reisner mydisk = mdev->new_state_tmp.disk; 2548b411b363SPhilipp Reisner 2549b411b363SPhilipp Reisner dev_info(DEV, "drbd_sync_handshake:\n"); 2550b411b363SPhilipp Reisner drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid, mdev->comm_bm_set, 0); 2551b411b363SPhilipp Reisner drbd_uuid_dump(mdev, "peer", mdev->p_uuid, 2552b411b363SPhilipp Reisner mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]); 2553b411b363SPhilipp Reisner 2554b411b363SPhilipp Reisner hg = drbd_uuid_compare(mdev, &rule_nr); 2555b411b363SPhilipp Reisner 2556b411b363SPhilipp Reisner dev_info(DEV, "uuid_compare()=%d by rule %d\n", hg, rule_nr); 2557b411b363SPhilipp Reisner 2558b411b363SPhilipp Reisner if (hg == -1000) { 2559b411b363SPhilipp Reisner dev_alert(DEV, "Unrelated data, aborting!\n"); 2560b411b363SPhilipp Reisner return C_MASK; 2561b411b363SPhilipp Reisner } 2562b411b363SPhilipp Reisner if (hg == -1001) { 2563b411b363SPhilipp Reisner dev_alert(DEV, "To resolve this both sides have to support at least protocol\n"); 2564b411b363SPhilipp Reisner return C_MASK; 2565b411b363SPhilipp Reisner } 2566b411b363SPhilipp Reisner 2567b411b363SPhilipp Reisner if ((mydisk == D_INCONSISTENT && peer_disk > D_INCONSISTENT) || 2568b411b363SPhilipp Reisner (peer_disk == D_INCONSISTENT && mydisk > D_INCONSISTENT)) { 2569b411b363SPhilipp Reisner int f = (hg == -100) || abs(hg) == 2; 2570b411b363SPhilipp Reisner hg = mydisk > D_INCONSISTENT ? 1 : -1; 2571b411b363SPhilipp Reisner if (f) 2572b411b363SPhilipp Reisner hg = hg*2; 2573b411b363SPhilipp Reisner dev_info(DEV, "Becoming sync %s due to disk states.\n", 2574b411b363SPhilipp Reisner hg > 0 ? "source" : "target"); 2575b411b363SPhilipp Reisner } 2576b411b363SPhilipp Reisner 25773a11a487SAdam Gandelman if (abs(hg) == 100) 25783a11a487SAdam Gandelman drbd_khelper(mdev, "initial-split-brain"); 25793a11a487SAdam Gandelman 2580b411b363SPhilipp Reisner if (hg == 100 || (hg == -100 && mdev->net_conf->always_asbp)) { 2581b411b363SPhilipp Reisner int pcount = (mdev->state.role == R_PRIMARY) 2582b411b363SPhilipp Reisner + (peer_role == R_PRIMARY); 2583b411b363SPhilipp Reisner int forced = (hg == -100); 2584b411b363SPhilipp Reisner 2585b411b363SPhilipp Reisner switch (pcount) { 2586b411b363SPhilipp Reisner case 0: 2587b411b363SPhilipp Reisner hg = drbd_asb_recover_0p(mdev); 2588b411b363SPhilipp Reisner break; 2589b411b363SPhilipp Reisner case 1: 2590b411b363SPhilipp Reisner hg = drbd_asb_recover_1p(mdev); 2591b411b363SPhilipp Reisner break; 2592b411b363SPhilipp Reisner case 2: 2593b411b363SPhilipp Reisner hg = drbd_asb_recover_2p(mdev); 2594b411b363SPhilipp Reisner break; 2595b411b363SPhilipp Reisner } 2596b411b363SPhilipp Reisner if (abs(hg) < 100) { 2597b411b363SPhilipp Reisner dev_warn(DEV, "Split-Brain detected, %d primaries, " 2598b411b363SPhilipp Reisner "automatically solved. Sync from %s node\n", 2599b411b363SPhilipp Reisner pcount, (hg < 0) ? "peer" : "this"); 2600b411b363SPhilipp Reisner if (forced) { 2601b411b363SPhilipp Reisner dev_warn(DEV, "Doing a full sync, since" 2602b411b363SPhilipp Reisner " UUIDs where ambiguous.\n"); 2603b411b363SPhilipp Reisner hg = hg*2; 2604b411b363SPhilipp Reisner } 2605b411b363SPhilipp Reisner } 2606b411b363SPhilipp Reisner } 2607b411b363SPhilipp Reisner 2608b411b363SPhilipp Reisner if (hg == -100) { 2609b411b363SPhilipp Reisner if (mdev->net_conf->want_lose && !(mdev->p_uuid[UI_FLAGS]&1)) 2610b411b363SPhilipp Reisner hg = -1; 2611b411b363SPhilipp Reisner if (!mdev->net_conf->want_lose && (mdev->p_uuid[UI_FLAGS]&1)) 2612b411b363SPhilipp Reisner hg = 1; 2613b411b363SPhilipp Reisner 2614b411b363SPhilipp Reisner if (abs(hg) < 100) 2615b411b363SPhilipp Reisner dev_warn(DEV, "Split-Brain detected, manually solved. " 2616b411b363SPhilipp Reisner "Sync from %s node\n", 2617b411b363SPhilipp Reisner (hg < 0) ? "peer" : "this"); 2618b411b363SPhilipp Reisner } 2619b411b363SPhilipp Reisner 2620b411b363SPhilipp Reisner if (hg == -100) { 2621580b9767SLars Ellenberg /* FIXME this log message is not correct if we end up here 2622580b9767SLars Ellenberg * after an attempted attach on a diskless node. 2623580b9767SLars Ellenberg * We just refuse to attach -- well, we drop the "connection" 2624580b9767SLars Ellenberg * to that disk, in a way... */ 26253a11a487SAdam Gandelman dev_alert(DEV, "Split-Brain detected but unresolved, dropping connection!\n"); 2626b411b363SPhilipp Reisner drbd_khelper(mdev, "split-brain"); 2627b411b363SPhilipp Reisner return C_MASK; 2628b411b363SPhilipp Reisner } 2629b411b363SPhilipp Reisner 2630b411b363SPhilipp Reisner if (hg > 0 && mydisk <= D_INCONSISTENT) { 2631b411b363SPhilipp Reisner dev_err(DEV, "I shall become SyncSource, but I am inconsistent!\n"); 2632b411b363SPhilipp Reisner return C_MASK; 2633b411b363SPhilipp Reisner } 2634b411b363SPhilipp Reisner 2635b411b363SPhilipp Reisner if (hg < 0 && /* by intention we do not use mydisk here. */ 2636b411b363SPhilipp Reisner mdev->state.role == R_PRIMARY && mdev->state.disk >= D_CONSISTENT) { 2637b411b363SPhilipp Reisner switch (mdev->net_conf->rr_conflict) { 2638b411b363SPhilipp Reisner case ASB_CALL_HELPER: 2639b411b363SPhilipp Reisner drbd_khelper(mdev, "pri-lost"); 2640b411b363SPhilipp Reisner /* fall through */ 2641b411b363SPhilipp Reisner case ASB_DISCONNECT: 2642b411b363SPhilipp Reisner dev_err(DEV, "I shall become SyncTarget, but I am primary!\n"); 2643b411b363SPhilipp Reisner return C_MASK; 2644b411b363SPhilipp Reisner case ASB_VIOLENTLY: 2645b411b363SPhilipp Reisner dev_warn(DEV, "Becoming SyncTarget, violating the stable-data" 2646b411b363SPhilipp Reisner "assumption\n"); 2647b411b363SPhilipp Reisner } 2648b411b363SPhilipp Reisner } 2649b411b363SPhilipp Reisner 2650cf14c2e9SPhilipp Reisner if (mdev->net_conf->dry_run || test_bit(CONN_DRY_RUN, &mdev->flags)) { 2651cf14c2e9SPhilipp Reisner if (hg == 0) 2652cf14c2e9SPhilipp Reisner dev_info(DEV, "dry-run connect: No resync, would become Connected immediately.\n"); 2653cf14c2e9SPhilipp Reisner else 2654cf14c2e9SPhilipp Reisner dev_info(DEV, "dry-run connect: Would become %s, doing a %s resync.", 2655cf14c2e9SPhilipp Reisner drbd_conn_str(hg > 0 ? C_SYNC_SOURCE : C_SYNC_TARGET), 2656cf14c2e9SPhilipp Reisner abs(hg) >= 2 ? "full" : "bit-map based"); 2657cf14c2e9SPhilipp Reisner return C_MASK; 2658cf14c2e9SPhilipp Reisner } 2659cf14c2e9SPhilipp Reisner 2660b411b363SPhilipp Reisner if (abs(hg) >= 2) { 2661b411b363SPhilipp Reisner dev_info(DEV, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n"); 2662b411b363SPhilipp Reisner if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from sync_handshake")) 2663b411b363SPhilipp Reisner return C_MASK; 2664b411b363SPhilipp Reisner } 2665b411b363SPhilipp Reisner 2666b411b363SPhilipp Reisner if (hg > 0) { /* become sync source. */ 2667b411b363SPhilipp Reisner rv = C_WF_BITMAP_S; 2668b411b363SPhilipp Reisner } else if (hg < 0) { /* become sync target */ 2669b411b363SPhilipp Reisner rv = C_WF_BITMAP_T; 2670b411b363SPhilipp Reisner } else { 2671b411b363SPhilipp Reisner rv = C_CONNECTED; 2672b411b363SPhilipp Reisner if (drbd_bm_total_weight(mdev)) { 2673b411b363SPhilipp Reisner dev_info(DEV, "No resync, but %lu bits in bitmap!\n", 2674b411b363SPhilipp Reisner drbd_bm_total_weight(mdev)); 2675b411b363SPhilipp Reisner } 2676b411b363SPhilipp Reisner } 2677b411b363SPhilipp Reisner 2678b411b363SPhilipp Reisner return rv; 2679b411b363SPhilipp Reisner } 2680b411b363SPhilipp Reisner 2681b411b363SPhilipp Reisner /* returns 1 if invalid */ 2682b411b363SPhilipp Reisner static int cmp_after_sb(enum drbd_after_sb_p peer, enum drbd_after_sb_p self) 2683b411b363SPhilipp Reisner { 2684b411b363SPhilipp Reisner /* ASB_DISCARD_REMOTE - ASB_DISCARD_LOCAL is valid */ 2685b411b363SPhilipp Reisner if ((peer == ASB_DISCARD_REMOTE && self == ASB_DISCARD_LOCAL) || 2686b411b363SPhilipp Reisner (self == ASB_DISCARD_REMOTE && peer == ASB_DISCARD_LOCAL)) 2687b411b363SPhilipp Reisner return 0; 2688b411b363SPhilipp Reisner 2689b411b363SPhilipp Reisner /* any other things with ASB_DISCARD_REMOTE or ASB_DISCARD_LOCAL are invalid */ 2690b411b363SPhilipp Reisner if (peer == ASB_DISCARD_REMOTE || peer == ASB_DISCARD_LOCAL || 2691b411b363SPhilipp Reisner self == ASB_DISCARD_REMOTE || self == ASB_DISCARD_LOCAL) 2692b411b363SPhilipp Reisner return 1; 2693b411b363SPhilipp Reisner 2694b411b363SPhilipp Reisner /* everything else is valid if they are equal on both sides. */ 2695b411b363SPhilipp Reisner if (peer == self) 2696b411b363SPhilipp Reisner return 0; 2697b411b363SPhilipp Reisner 2698b411b363SPhilipp Reisner /* everything es is invalid. */ 2699b411b363SPhilipp Reisner return 1; 2700b411b363SPhilipp Reisner } 2701b411b363SPhilipp Reisner 2702b411b363SPhilipp Reisner static int receive_protocol(struct drbd_conf *mdev, struct p_header *h) 2703b411b363SPhilipp Reisner { 2704b411b363SPhilipp Reisner struct p_protocol *p = (struct p_protocol *)h; 2705b411b363SPhilipp Reisner int header_size, data_size; 2706b411b363SPhilipp Reisner int p_proto, p_after_sb_0p, p_after_sb_1p, p_after_sb_2p; 2707cf14c2e9SPhilipp Reisner int p_want_lose, p_two_primaries, cf; 2708b411b363SPhilipp Reisner char p_integrity_alg[SHARED_SECRET_MAX] = ""; 2709b411b363SPhilipp Reisner 2710b411b363SPhilipp Reisner header_size = sizeof(*p) - sizeof(*h); 2711b411b363SPhilipp Reisner data_size = h->length - header_size; 2712b411b363SPhilipp Reisner 2713b411b363SPhilipp Reisner if (drbd_recv(mdev, h->payload, header_size) != header_size) 2714b411b363SPhilipp Reisner return FALSE; 2715b411b363SPhilipp Reisner 2716b411b363SPhilipp Reisner p_proto = be32_to_cpu(p->protocol); 2717b411b363SPhilipp Reisner p_after_sb_0p = be32_to_cpu(p->after_sb_0p); 2718b411b363SPhilipp Reisner p_after_sb_1p = be32_to_cpu(p->after_sb_1p); 2719b411b363SPhilipp Reisner p_after_sb_2p = be32_to_cpu(p->after_sb_2p); 2720b411b363SPhilipp Reisner p_two_primaries = be32_to_cpu(p->two_primaries); 2721cf14c2e9SPhilipp Reisner cf = be32_to_cpu(p->conn_flags); 2722cf14c2e9SPhilipp Reisner p_want_lose = cf & CF_WANT_LOSE; 2723cf14c2e9SPhilipp Reisner 2724cf14c2e9SPhilipp Reisner clear_bit(CONN_DRY_RUN, &mdev->flags); 2725cf14c2e9SPhilipp Reisner 2726cf14c2e9SPhilipp Reisner if (cf & CF_DRY_RUN) 2727cf14c2e9SPhilipp Reisner set_bit(CONN_DRY_RUN, &mdev->flags); 2728b411b363SPhilipp Reisner 2729b411b363SPhilipp Reisner if (p_proto != mdev->net_conf->wire_protocol) { 2730b411b363SPhilipp Reisner dev_err(DEV, "incompatible communication protocols\n"); 2731b411b363SPhilipp Reisner goto disconnect; 2732b411b363SPhilipp Reisner } 2733b411b363SPhilipp Reisner 2734b411b363SPhilipp Reisner if (cmp_after_sb(p_after_sb_0p, mdev->net_conf->after_sb_0p)) { 2735b411b363SPhilipp Reisner dev_err(DEV, "incompatible after-sb-0pri settings\n"); 2736b411b363SPhilipp Reisner goto disconnect; 2737b411b363SPhilipp Reisner } 2738b411b363SPhilipp Reisner 2739b411b363SPhilipp Reisner if (cmp_after_sb(p_after_sb_1p, mdev->net_conf->after_sb_1p)) { 2740b411b363SPhilipp Reisner dev_err(DEV, "incompatible after-sb-1pri settings\n"); 2741b411b363SPhilipp Reisner goto disconnect; 2742b411b363SPhilipp Reisner } 2743b411b363SPhilipp Reisner 2744b411b363SPhilipp Reisner if (cmp_after_sb(p_after_sb_2p, mdev->net_conf->after_sb_2p)) { 2745b411b363SPhilipp Reisner dev_err(DEV, "incompatible after-sb-2pri settings\n"); 2746b411b363SPhilipp Reisner goto disconnect; 2747b411b363SPhilipp Reisner } 2748b411b363SPhilipp Reisner 2749b411b363SPhilipp Reisner if (p_want_lose && mdev->net_conf->want_lose) { 2750b411b363SPhilipp Reisner dev_err(DEV, "both sides have the 'want_lose' flag set\n"); 2751b411b363SPhilipp Reisner goto disconnect; 2752b411b363SPhilipp Reisner } 2753b411b363SPhilipp Reisner 2754b411b363SPhilipp Reisner if (p_two_primaries != mdev->net_conf->two_primaries) { 2755b411b363SPhilipp Reisner dev_err(DEV, "incompatible setting of the two-primaries options\n"); 2756b411b363SPhilipp Reisner goto disconnect; 2757b411b363SPhilipp Reisner } 2758b411b363SPhilipp Reisner 2759b411b363SPhilipp Reisner if (mdev->agreed_pro_version >= 87) { 2760b411b363SPhilipp Reisner unsigned char *my_alg = mdev->net_conf->integrity_alg; 2761b411b363SPhilipp Reisner 2762b411b363SPhilipp Reisner if (drbd_recv(mdev, p_integrity_alg, data_size) != data_size) 2763b411b363SPhilipp Reisner return FALSE; 2764b411b363SPhilipp Reisner 2765b411b363SPhilipp Reisner p_integrity_alg[SHARED_SECRET_MAX-1] = 0; 2766b411b363SPhilipp Reisner if (strcmp(p_integrity_alg, my_alg)) { 2767b411b363SPhilipp Reisner dev_err(DEV, "incompatible setting of the data-integrity-alg\n"); 2768b411b363SPhilipp Reisner goto disconnect; 2769b411b363SPhilipp Reisner } 2770b411b363SPhilipp Reisner dev_info(DEV, "data-integrity-alg: %s\n", 2771b411b363SPhilipp Reisner my_alg[0] ? my_alg : (unsigned char *)"<not-used>"); 2772b411b363SPhilipp Reisner } 2773b411b363SPhilipp Reisner 2774b411b363SPhilipp Reisner return TRUE; 2775b411b363SPhilipp Reisner 2776b411b363SPhilipp Reisner disconnect: 2777b411b363SPhilipp Reisner drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); 2778b411b363SPhilipp Reisner return FALSE; 2779b411b363SPhilipp Reisner } 2780b411b363SPhilipp Reisner 2781b411b363SPhilipp Reisner /* helper function 2782b411b363SPhilipp Reisner * input: alg name, feature name 2783b411b363SPhilipp Reisner * return: NULL (alg name was "") 2784b411b363SPhilipp Reisner * ERR_PTR(error) if something goes wrong 2785b411b363SPhilipp Reisner * or the crypto hash ptr, if it worked out ok. */ 2786b411b363SPhilipp Reisner struct crypto_hash *drbd_crypto_alloc_digest_safe(const struct drbd_conf *mdev, 2787b411b363SPhilipp Reisner const char *alg, const char *name) 2788b411b363SPhilipp Reisner { 2789b411b363SPhilipp Reisner struct crypto_hash *tfm; 2790b411b363SPhilipp Reisner 2791b411b363SPhilipp Reisner if (!alg[0]) 2792b411b363SPhilipp Reisner return NULL; 2793b411b363SPhilipp Reisner 2794b411b363SPhilipp Reisner tfm = crypto_alloc_hash(alg, 0, CRYPTO_ALG_ASYNC); 2795b411b363SPhilipp Reisner if (IS_ERR(tfm)) { 2796b411b363SPhilipp Reisner dev_err(DEV, "Can not allocate \"%s\" as %s (reason: %ld)\n", 2797b411b363SPhilipp Reisner alg, name, PTR_ERR(tfm)); 2798b411b363SPhilipp Reisner return tfm; 2799b411b363SPhilipp Reisner } 2800b411b363SPhilipp Reisner if (!drbd_crypto_is_hash(crypto_hash_tfm(tfm))) { 2801b411b363SPhilipp Reisner crypto_free_hash(tfm); 2802b411b363SPhilipp Reisner dev_err(DEV, "\"%s\" is not a digest (%s)\n", alg, name); 2803b411b363SPhilipp Reisner return ERR_PTR(-EINVAL); 2804b411b363SPhilipp Reisner } 2805b411b363SPhilipp Reisner return tfm; 2806b411b363SPhilipp Reisner } 2807b411b363SPhilipp Reisner 2808b411b363SPhilipp Reisner static int receive_SyncParam(struct drbd_conf *mdev, struct p_header *h) 2809b411b363SPhilipp Reisner { 2810b411b363SPhilipp Reisner int ok = TRUE; 2811b411b363SPhilipp Reisner struct p_rs_param_89 *p = (struct p_rs_param_89 *)h; 2812b411b363SPhilipp Reisner unsigned int header_size, data_size, exp_max_sz; 2813b411b363SPhilipp Reisner struct crypto_hash *verify_tfm = NULL; 2814b411b363SPhilipp Reisner struct crypto_hash *csums_tfm = NULL; 2815b411b363SPhilipp Reisner const int apv = mdev->agreed_pro_version; 2816b411b363SPhilipp Reisner 2817b411b363SPhilipp Reisner exp_max_sz = apv <= 87 ? sizeof(struct p_rs_param) 2818b411b363SPhilipp Reisner : apv == 88 ? sizeof(struct p_rs_param) 2819b411b363SPhilipp Reisner + SHARED_SECRET_MAX 2820b411b363SPhilipp Reisner : /* 89 */ sizeof(struct p_rs_param_89); 2821b411b363SPhilipp Reisner 2822b411b363SPhilipp Reisner if (h->length > exp_max_sz) { 2823b411b363SPhilipp Reisner dev_err(DEV, "SyncParam packet too long: received %u, expected <= %u bytes\n", 2824b411b363SPhilipp Reisner h->length, exp_max_sz); 2825b411b363SPhilipp Reisner return FALSE; 2826b411b363SPhilipp Reisner } 2827b411b363SPhilipp Reisner 2828b411b363SPhilipp Reisner if (apv <= 88) { 2829b411b363SPhilipp Reisner header_size = sizeof(struct p_rs_param) - sizeof(*h); 2830b411b363SPhilipp Reisner data_size = h->length - header_size; 2831b411b363SPhilipp Reisner } else /* apv >= 89 */ { 2832b411b363SPhilipp Reisner header_size = sizeof(struct p_rs_param_89) - sizeof(*h); 2833b411b363SPhilipp Reisner data_size = h->length - header_size; 2834b411b363SPhilipp Reisner D_ASSERT(data_size == 0); 2835b411b363SPhilipp Reisner } 2836b411b363SPhilipp Reisner 2837b411b363SPhilipp Reisner /* initialize verify_alg and csums_alg */ 2838b411b363SPhilipp Reisner memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX); 2839b411b363SPhilipp Reisner 2840b411b363SPhilipp Reisner if (drbd_recv(mdev, h->payload, header_size) != header_size) 2841b411b363SPhilipp Reisner return FALSE; 2842b411b363SPhilipp Reisner 2843b411b363SPhilipp Reisner mdev->sync_conf.rate = be32_to_cpu(p->rate); 2844b411b363SPhilipp Reisner 2845b411b363SPhilipp Reisner if (apv >= 88) { 2846b411b363SPhilipp Reisner if (apv == 88) { 2847b411b363SPhilipp Reisner if (data_size > SHARED_SECRET_MAX) { 2848b411b363SPhilipp Reisner dev_err(DEV, "verify-alg too long, " 2849b411b363SPhilipp Reisner "peer wants %u, accepting only %u byte\n", 2850b411b363SPhilipp Reisner data_size, SHARED_SECRET_MAX); 2851b411b363SPhilipp Reisner return FALSE; 2852b411b363SPhilipp Reisner } 2853b411b363SPhilipp Reisner 2854b411b363SPhilipp Reisner if (drbd_recv(mdev, p->verify_alg, data_size) != data_size) 2855b411b363SPhilipp Reisner return FALSE; 2856b411b363SPhilipp Reisner 2857b411b363SPhilipp Reisner /* we expect NUL terminated string */ 2858b411b363SPhilipp Reisner /* but just in case someone tries to be evil */ 2859b411b363SPhilipp Reisner D_ASSERT(p->verify_alg[data_size-1] == 0); 2860b411b363SPhilipp Reisner p->verify_alg[data_size-1] = 0; 2861b411b363SPhilipp Reisner 2862b411b363SPhilipp Reisner } else /* apv >= 89 */ { 2863b411b363SPhilipp Reisner /* we still expect NUL terminated strings */ 2864b411b363SPhilipp Reisner /* but just in case someone tries to be evil */ 2865b411b363SPhilipp Reisner D_ASSERT(p->verify_alg[SHARED_SECRET_MAX-1] == 0); 2866b411b363SPhilipp Reisner D_ASSERT(p->csums_alg[SHARED_SECRET_MAX-1] == 0); 2867b411b363SPhilipp Reisner p->verify_alg[SHARED_SECRET_MAX-1] = 0; 2868b411b363SPhilipp Reisner p->csums_alg[SHARED_SECRET_MAX-1] = 0; 2869b411b363SPhilipp Reisner } 2870b411b363SPhilipp Reisner 2871b411b363SPhilipp Reisner if (strcmp(mdev->sync_conf.verify_alg, p->verify_alg)) { 2872b411b363SPhilipp Reisner if (mdev->state.conn == C_WF_REPORT_PARAMS) { 2873b411b363SPhilipp Reisner dev_err(DEV, "Different verify-alg settings. me=\"%s\" peer=\"%s\"\n", 2874b411b363SPhilipp Reisner mdev->sync_conf.verify_alg, p->verify_alg); 2875b411b363SPhilipp Reisner goto disconnect; 2876b411b363SPhilipp Reisner } 2877b411b363SPhilipp Reisner verify_tfm = drbd_crypto_alloc_digest_safe(mdev, 2878b411b363SPhilipp Reisner p->verify_alg, "verify-alg"); 2879b411b363SPhilipp Reisner if (IS_ERR(verify_tfm)) { 2880b411b363SPhilipp Reisner verify_tfm = NULL; 2881b411b363SPhilipp Reisner goto disconnect; 2882b411b363SPhilipp Reisner } 2883b411b363SPhilipp Reisner } 2884b411b363SPhilipp Reisner 2885b411b363SPhilipp Reisner if (apv >= 89 && strcmp(mdev->sync_conf.csums_alg, p->csums_alg)) { 2886b411b363SPhilipp Reisner if (mdev->state.conn == C_WF_REPORT_PARAMS) { 2887b411b363SPhilipp Reisner dev_err(DEV, "Different csums-alg settings. me=\"%s\" peer=\"%s\"\n", 2888b411b363SPhilipp Reisner mdev->sync_conf.csums_alg, p->csums_alg); 2889b411b363SPhilipp Reisner goto disconnect; 2890b411b363SPhilipp Reisner } 2891b411b363SPhilipp Reisner csums_tfm = drbd_crypto_alloc_digest_safe(mdev, 2892b411b363SPhilipp Reisner p->csums_alg, "csums-alg"); 2893b411b363SPhilipp Reisner if (IS_ERR(csums_tfm)) { 2894b411b363SPhilipp Reisner csums_tfm = NULL; 2895b411b363SPhilipp Reisner goto disconnect; 2896b411b363SPhilipp Reisner } 2897b411b363SPhilipp Reisner } 2898b411b363SPhilipp Reisner 2899b411b363SPhilipp Reisner 2900b411b363SPhilipp Reisner spin_lock(&mdev->peer_seq_lock); 2901b411b363SPhilipp Reisner /* lock against drbd_nl_syncer_conf() */ 2902b411b363SPhilipp Reisner if (verify_tfm) { 2903b411b363SPhilipp Reisner strcpy(mdev->sync_conf.verify_alg, p->verify_alg); 2904b411b363SPhilipp Reisner mdev->sync_conf.verify_alg_len = strlen(p->verify_alg) + 1; 2905b411b363SPhilipp Reisner crypto_free_hash(mdev->verify_tfm); 2906b411b363SPhilipp Reisner mdev->verify_tfm = verify_tfm; 2907b411b363SPhilipp Reisner dev_info(DEV, "using verify-alg: \"%s\"\n", p->verify_alg); 2908b411b363SPhilipp Reisner } 2909b411b363SPhilipp Reisner if (csums_tfm) { 2910b411b363SPhilipp Reisner strcpy(mdev->sync_conf.csums_alg, p->csums_alg); 2911b411b363SPhilipp Reisner mdev->sync_conf.csums_alg_len = strlen(p->csums_alg) + 1; 2912b411b363SPhilipp Reisner crypto_free_hash(mdev->csums_tfm); 2913b411b363SPhilipp Reisner mdev->csums_tfm = csums_tfm; 2914b411b363SPhilipp Reisner dev_info(DEV, "using csums-alg: \"%s\"\n", p->csums_alg); 2915b411b363SPhilipp Reisner } 2916b411b363SPhilipp Reisner spin_unlock(&mdev->peer_seq_lock); 2917b411b363SPhilipp Reisner } 2918b411b363SPhilipp Reisner 2919b411b363SPhilipp Reisner return ok; 2920b411b363SPhilipp Reisner disconnect: 2921b411b363SPhilipp Reisner /* just for completeness: actually not needed, 2922b411b363SPhilipp Reisner * as this is not reached if csums_tfm was ok. */ 2923b411b363SPhilipp Reisner crypto_free_hash(csums_tfm); 2924b411b363SPhilipp Reisner /* but free the verify_tfm again, if csums_tfm did not work out */ 2925b411b363SPhilipp Reisner crypto_free_hash(verify_tfm); 2926b411b363SPhilipp Reisner drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); 2927b411b363SPhilipp Reisner return FALSE; 2928b411b363SPhilipp Reisner } 2929b411b363SPhilipp Reisner 2930b411b363SPhilipp Reisner static void drbd_setup_order_type(struct drbd_conf *mdev, int peer) 2931b411b363SPhilipp Reisner { 2932b411b363SPhilipp Reisner /* sorry, we currently have no working implementation 2933b411b363SPhilipp Reisner * of distributed TCQ */ 2934b411b363SPhilipp Reisner } 2935b411b363SPhilipp Reisner 2936b411b363SPhilipp Reisner /* warn if the arguments differ by more than 12.5% */ 2937b411b363SPhilipp Reisner static void warn_if_differ_considerably(struct drbd_conf *mdev, 2938b411b363SPhilipp Reisner const char *s, sector_t a, sector_t b) 2939b411b363SPhilipp Reisner { 2940b411b363SPhilipp Reisner sector_t d; 2941b411b363SPhilipp Reisner if (a == 0 || b == 0) 2942b411b363SPhilipp Reisner return; 2943b411b363SPhilipp Reisner d = (a > b) ? (a - b) : (b - a); 2944b411b363SPhilipp Reisner if (d > (a>>3) || d > (b>>3)) 2945b411b363SPhilipp Reisner dev_warn(DEV, "Considerable difference in %s: %llus vs. %llus\n", s, 2946b411b363SPhilipp Reisner (unsigned long long)a, (unsigned long long)b); 2947b411b363SPhilipp Reisner } 2948b411b363SPhilipp Reisner 2949b411b363SPhilipp Reisner static int receive_sizes(struct drbd_conf *mdev, struct p_header *h) 2950b411b363SPhilipp Reisner { 2951b411b363SPhilipp Reisner struct p_sizes *p = (struct p_sizes *)h; 2952b411b363SPhilipp Reisner enum determine_dev_size dd = unchanged; 2953b411b363SPhilipp Reisner unsigned int max_seg_s; 2954b411b363SPhilipp Reisner sector_t p_size, p_usize, my_usize; 2955b411b363SPhilipp Reisner int ldsc = 0; /* local disk size changed */ 2956e89b591cSPhilipp Reisner enum dds_flags ddsf; 2957b411b363SPhilipp Reisner 2958b411b363SPhilipp Reisner ERR_IF(h->length != (sizeof(*p)-sizeof(*h))) return FALSE; 2959b411b363SPhilipp Reisner if (drbd_recv(mdev, h->payload, h->length) != h->length) 2960b411b363SPhilipp Reisner return FALSE; 2961b411b363SPhilipp Reisner 2962b411b363SPhilipp Reisner p_size = be64_to_cpu(p->d_size); 2963b411b363SPhilipp Reisner p_usize = be64_to_cpu(p->u_size); 2964b411b363SPhilipp Reisner 2965b411b363SPhilipp Reisner if (p_size == 0 && mdev->state.disk == D_DISKLESS) { 2966b411b363SPhilipp Reisner dev_err(DEV, "some backing storage is needed\n"); 2967b411b363SPhilipp Reisner drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); 2968b411b363SPhilipp Reisner return FALSE; 2969b411b363SPhilipp Reisner } 2970b411b363SPhilipp Reisner 2971b411b363SPhilipp Reisner /* just store the peer's disk size for now. 2972b411b363SPhilipp Reisner * we still need to figure out whether we accept that. */ 2973b411b363SPhilipp Reisner mdev->p_size = p_size; 2974b411b363SPhilipp Reisner 2975b411b363SPhilipp Reisner #define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r)) 2976b411b363SPhilipp Reisner if (get_ldev(mdev)) { 2977b411b363SPhilipp Reisner warn_if_differ_considerably(mdev, "lower level device sizes", 2978b411b363SPhilipp Reisner p_size, drbd_get_max_capacity(mdev->ldev)); 2979b411b363SPhilipp Reisner warn_if_differ_considerably(mdev, "user requested size", 2980b411b363SPhilipp Reisner p_usize, mdev->ldev->dc.disk_size); 2981b411b363SPhilipp Reisner 2982b411b363SPhilipp Reisner /* if this is the first connect, or an otherwise expected 2983b411b363SPhilipp Reisner * param exchange, choose the minimum */ 2984b411b363SPhilipp Reisner if (mdev->state.conn == C_WF_REPORT_PARAMS) 2985b411b363SPhilipp Reisner p_usize = min_not_zero((sector_t)mdev->ldev->dc.disk_size, 2986b411b363SPhilipp Reisner p_usize); 2987b411b363SPhilipp Reisner 2988b411b363SPhilipp Reisner my_usize = mdev->ldev->dc.disk_size; 2989b411b363SPhilipp Reisner 2990b411b363SPhilipp Reisner if (mdev->ldev->dc.disk_size != p_usize) { 2991b411b363SPhilipp Reisner mdev->ldev->dc.disk_size = p_usize; 2992b411b363SPhilipp Reisner dev_info(DEV, "Peer sets u_size to %lu sectors\n", 2993b411b363SPhilipp Reisner (unsigned long)mdev->ldev->dc.disk_size); 2994b411b363SPhilipp Reisner } 2995b411b363SPhilipp Reisner 2996b411b363SPhilipp Reisner /* Never shrink a device with usable data during connect. 2997b411b363SPhilipp Reisner But allow online shrinking if we are connected. */ 2998a393db6fSPhilipp Reisner if (drbd_new_dev_size(mdev, mdev->ldev, 0) < 2999b411b363SPhilipp Reisner drbd_get_capacity(mdev->this_bdev) && 3000b411b363SPhilipp Reisner mdev->state.disk >= D_OUTDATED && 3001b411b363SPhilipp Reisner mdev->state.conn < C_CONNECTED) { 3002b411b363SPhilipp Reisner dev_err(DEV, "The peer's disk size is too small!\n"); 3003b411b363SPhilipp Reisner drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); 3004b411b363SPhilipp Reisner mdev->ldev->dc.disk_size = my_usize; 3005b411b363SPhilipp Reisner put_ldev(mdev); 3006b411b363SPhilipp Reisner return FALSE; 3007b411b363SPhilipp Reisner } 3008b411b363SPhilipp Reisner put_ldev(mdev); 3009b411b363SPhilipp Reisner } 3010b411b363SPhilipp Reisner #undef min_not_zero 3011b411b363SPhilipp Reisner 3012e89b591cSPhilipp Reisner ddsf = be16_to_cpu(p->dds_flags); 3013b411b363SPhilipp Reisner if (get_ldev(mdev)) { 3014e89b591cSPhilipp Reisner dd = drbd_determin_dev_size(mdev, ddsf); 3015b411b363SPhilipp Reisner put_ldev(mdev); 3016b411b363SPhilipp Reisner if (dd == dev_size_error) 3017b411b363SPhilipp Reisner return FALSE; 3018b411b363SPhilipp Reisner drbd_md_sync(mdev); 3019b411b363SPhilipp Reisner } else { 3020b411b363SPhilipp Reisner /* I am diskless, need to accept the peer's size. */ 3021b411b363SPhilipp Reisner drbd_set_my_capacity(mdev, p_size); 3022b411b363SPhilipp Reisner } 3023b411b363SPhilipp Reisner 3024b411b363SPhilipp Reisner if (get_ldev(mdev)) { 3025b411b363SPhilipp Reisner if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev)) { 3026b411b363SPhilipp Reisner mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev); 3027b411b363SPhilipp Reisner ldsc = 1; 3028b411b363SPhilipp Reisner } 3029b411b363SPhilipp Reisner 3030a1c88d0dSLars Ellenberg if (mdev->agreed_pro_version < 94) 3031b411b363SPhilipp Reisner max_seg_s = be32_to_cpu(p->max_segment_size); 3032a1c88d0dSLars Ellenberg else /* drbd 8.3.8 onwards */ 3033a1c88d0dSLars Ellenberg max_seg_s = DRBD_MAX_SEGMENT_SIZE; 3034a1c88d0dSLars Ellenberg 3035b411b363SPhilipp Reisner if (max_seg_s != queue_max_segment_size(mdev->rq_queue)) 3036b411b363SPhilipp Reisner drbd_setup_queue_param(mdev, max_seg_s); 3037b411b363SPhilipp Reisner 3038e89b591cSPhilipp Reisner drbd_setup_order_type(mdev, be16_to_cpu(p->queue_order_type)); 3039b411b363SPhilipp Reisner put_ldev(mdev); 3040b411b363SPhilipp Reisner } 3041b411b363SPhilipp Reisner 3042b411b363SPhilipp Reisner if (mdev->state.conn > C_WF_REPORT_PARAMS) { 3043b411b363SPhilipp Reisner if (be64_to_cpu(p->c_size) != 3044b411b363SPhilipp Reisner drbd_get_capacity(mdev->this_bdev) || ldsc) { 3045b411b363SPhilipp Reisner /* we have different sizes, probably peer 3046b411b363SPhilipp Reisner * needs to know my new size... */ 3047e89b591cSPhilipp Reisner drbd_send_sizes(mdev, 0, ddsf); 3048b411b363SPhilipp Reisner } 3049b411b363SPhilipp Reisner if (test_and_clear_bit(RESIZE_PENDING, &mdev->flags) || 3050b411b363SPhilipp Reisner (dd == grew && mdev->state.conn == C_CONNECTED)) { 3051b411b363SPhilipp Reisner if (mdev->state.pdsk >= D_INCONSISTENT && 3052e89b591cSPhilipp Reisner mdev->state.disk >= D_INCONSISTENT) { 3053e89b591cSPhilipp Reisner if (ddsf & DDSF_NO_RESYNC) 3054e89b591cSPhilipp Reisner dev_info(DEV, "Resync of new storage suppressed with --assume-clean\n"); 3055b411b363SPhilipp Reisner else 3056e89b591cSPhilipp Reisner resync_after_online_grow(mdev); 3057e89b591cSPhilipp Reisner } else 3058b411b363SPhilipp Reisner set_bit(RESYNC_AFTER_NEG, &mdev->flags); 3059b411b363SPhilipp Reisner } 3060b411b363SPhilipp Reisner } 3061b411b363SPhilipp Reisner 3062b411b363SPhilipp Reisner return TRUE; 3063b411b363SPhilipp Reisner } 3064b411b363SPhilipp Reisner 3065b411b363SPhilipp Reisner static int receive_uuids(struct drbd_conf *mdev, struct p_header *h) 3066b411b363SPhilipp Reisner { 3067b411b363SPhilipp Reisner struct p_uuids *p = (struct p_uuids *)h; 3068b411b363SPhilipp Reisner u64 *p_uuid; 3069b411b363SPhilipp Reisner int i; 3070b411b363SPhilipp Reisner 3071b411b363SPhilipp Reisner ERR_IF(h->length != (sizeof(*p)-sizeof(*h))) return FALSE; 3072b411b363SPhilipp Reisner if (drbd_recv(mdev, h->payload, h->length) != h->length) 3073b411b363SPhilipp Reisner return FALSE; 3074b411b363SPhilipp Reisner 3075b411b363SPhilipp Reisner p_uuid = kmalloc(sizeof(u64)*UI_EXTENDED_SIZE, GFP_NOIO); 3076b411b363SPhilipp Reisner 3077b411b363SPhilipp Reisner for (i = UI_CURRENT; i < UI_EXTENDED_SIZE; i++) 3078b411b363SPhilipp Reisner p_uuid[i] = be64_to_cpu(p->uuid[i]); 3079b411b363SPhilipp Reisner 3080b411b363SPhilipp Reisner kfree(mdev->p_uuid); 3081b411b363SPhilipp Reisner mdev->p_uuid = p_uuid; 3082b411b363SPhilipp Reisner 3083b411b363SPhilipp Reisner if (mdev->state.conn < C_CONNECTED && 3084b411b363SPhilipp Reisner mdev->state.disk < D_INCONSISTENT && 3085b411b363SPhilipp Reisner mdev->state.role == R_PRIMARY && 3086b411b363SPhilipp Reisner (mdev->ed_uuid & ~((u64)1)) != (p_uuid[UI_CURRENT] & ~((u64)1))) { 3087b411b363SPhilipp Reisner dev_err(DEV, "Can only connect to data with current UUID=%016llX\n", 3088b411b363SPhilipp Reisner (unsigned long long)mdev->ed_uuid); 3089b411b363SPhilipp Reisner drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); 3090b411b363SPhilipp Reisner return FALSE; 3091b411b363SPhilipp Reisner } 3092b411b363SPhilipp Reisner 3093b411b363SPhilipp Reisner if (get_ldev(mdev)) { 3094b411b363SPhilipp Reisner int skip_initial_sync = 3095b411b363SPhilipp Reisner mdev->state.conn == C_CONNECTED && 3096b411b363SPhilipp Reisner mdev->agreed_pro_version >= 90 && 3097b411b363SPhilipp Reisner mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && 3098b411b363SPhilipp Reisner (p_uuid[UI_FLAGS] & 8); 3099b411b363SPhilipp Reisner if (skip_initial_sync) { 3100b411b363SPhilipp Reisner dev_info(DEV, "Accepted new current UUID, preparing to skip initial sync\n"); 3101b411b363SPhilipp Reisner drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write, 3102b411b363SPhilipp Reisner "clear_n_write from receive_uuids"); 3103b411b363SPhilipp Reisner _drbd_uuid_set(mdev, UI_CURRENT, p_uuid[UI_CURRENT]); 3104b411b363SPhilipp Reisner _drbd_uuid_set(mdev, UI_BITMAP, 0); 3105b411b363SPhilipp Reisner _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE), 3106b411b363SPhilipp Reisner CS_VERBOSE, NULL); 3107b411b363SPhilipp Reisner drbd_md_sync(mdev); 3108b411b363SPhilipp Reisner } 3109b411b363SPhilipp Reisner put_ldev(mdev); 3110b411b363SPhilipp Reisner } 3111b411b363SPhilipp Reisner 3112b411b363SPhilipp Reisner /* Before we test for the disk state, we should wait until an eventually 3113b411b363SPhilipp Reisner ongoing cluster wide state change is finished. That is important if 3114b411b363SPhilipp Reisner we are primary and are detaching from our disk. We need to see the 3115b411b363SPhilipp Reisner new disk state... */ 3116b411b363SPhilipp Reisner wait_event(mdev->misc_wait, !test_bit(CLUSTER_ST_CHANGE, &mdev->flags)); 3117b411b363SPhilipp Reisner if (mdev->state.conn >= C_CONNECTED && mdev->state.disk < D_INCONSISTENT) 3118b411b363SPhilipp Reisner drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]); 3119b411b363SPhilipp Reisner 3120b411b363SPhilipp Reisner return TRUE; 3121b411b363SPhilipp Reisner } 3122b411b363SPhilipp Reisner 3123b411b363SPhilipp Reisner /** 3124b411b363SPhilipp Reisner * convert_state() - Converts the peer's view of the cluster state to our point of view 3125b411b363SPhilipp Reisner * @ps: The state as seen by the peer. 3126b411b363SPhilipp Reisner */ 3127b411b363SPhilipp Reisner static union drbd_state convert_state(union drbd_state ps) 3128b411b363SPhilipp Reisner { 3129b411b363SPhilipp Reisner union drbd_state ms; 3130b411b363SPhilipp Reisner 3131b411b363SPhilipp Reisner static enum drbd_conns c_tab[] = { 3132b411b363SPhilipp Reisner [C_CONNECTED] = C_CONNECTED, 3133b411b363SPhilipp Reisner 3134b411b363SPhilipp Reisner [C_STARTING_SYNC_S] = C_STARTING_SYNC_T, 3135b411b363SPhilipp Reisner [C_STARTING_SYNC_T] = C_STARTING_SYNC_S, 3136b411b363SPhilipp Reisner [C_DISCONNECTING] = C_TEAR_DOWN, /* C_NETWORK_FAILURE, */ 3137b411b363SPhilipp Reisner [C_VERIFY_S] = C_VERIFY_T, 3138b411b363SPhilipp Reisner [C_MASK] = C_MASK, 3139b411b363SPhilipp Reisner }; 3140b411b363SPhilipp Reisner 3141b411b363SPhilipp Reisner ms.i = ps.i; 3142b411b363SPhilipp Reisner 3143b411b363SPhilipp Reisner ms.conn = c_tab[ps.conn]; 3144b411b363SPhilipp Reisner ms.peer = ps.role; 3145b411b363SPhilipp Reisner ms.role = ps.peer; 3146b411b363SPhilipp Reisner ms.pdsk = ps.disk; 3147b411b363SPhilipp Reisner ms.disk = ps.pdsk; 3148b411b363SPhilipp Reisner ms.peer_isp = (ps.aftr_isp | ps.user_isp); 3149b411b363SPhilipp Reisner 3150b411b363SPhilipp Reisner return ms; 3151b411b363SPhilipp Reisner } 3152b411b363SPhilipp Reisner 3153b411b363SPhilipp Reisner static int receive_req_state(struct drbd_conf *mdev, struct p_header *h) 3154b411b363SPhilipp Reisner { 3155b411b363SPhilipp Reisner struct p_req_state *p = (struct p_req_state *)h; 3156b411b363SPhilipp Reisner union drbd_state mask, val; 3157b411b363SPhilipp Reisner int rv; 3158b411b363SPhilipp Reisner 3159b411b363SPhilipp Reisner ERR_IF(h->length != (sizeof(*p)-sizeof(*h))) return FALSE; 3160b411b363SPhilipp Reisner if (drbd_recv(mdev, h->payload, h->length) != h->length) 3161b411b363SPhilipp Reisner return FALSE; 3162b411b363SPhilipp Reisner 3163b411b363SPhilipp Reisner mask.i = be32_to_cpu(p->mask); 3164b411b363SPhilipp Reisner val.i = be32_to_cpu(p->val); 3165b411b363SPhilipp Reisner 3166b411b363SPhilipp Reisner if (test_bit(DISCARD_CONCURRENT, &mdev->flags) && 3167b411b363SPhilipp Reisner test_bit(CLUSTER_ST_CHANGE, &mdev->flags)) { 3168b411b363SPhilipp Reisner drbd_send_sr_reply(mdev, SS_CONCURRENT_ST_CHG); 3169b411b363SPhilipp Reisner return TRUE; 3170b411b363SPhilipp Reisner } 3171b411b363SPhilipp Reisner 3172b411b363SPhilipp Reisner mask = convert_state(mask); 3173b411b363SPhilipp Reisner val = convert_state(val); 3174b411b363SPhilipp Reisner 3175b411b363SPhilipp Reisner rv = drbd_change_state(mdev, CS_VERBOSE, mask, val); 3176b411b363SPhilipp Reisner 3177b411b363SPhilipp Reisner drbd_send_sr_reply(mdev, rv); 3178b411b363SPhilipp Reisner drbd_md_sync(mdev); 3179b411b363SPhilipp Reisner 3180b411b363SPhilipp Reisner return TRUE; 3181b411b363SPhilipp Reisner } 3182b411b363SPhilipp Reisner 3183b411b363SPhilipp Reisner static int receive_state(struct drbd_conf *mdev, struct p_header *h) 3184b411b363SPhilipp Reisner { 3185b411b363SPhilipp Reisner struct p_state *p = (struct p_state *)h; 3186b411b363SPhilipp Reisner enum drbd_conns nconn, oconn; 3187b411b363SPhilipp Reisner union drbd_state ns, peer_state; 3188b411b363SPhilipp Reisner enum drbd_disk_state real_peer_disk; 3189b411b363SPhilipp Reisner int rv; 3190b411b363SPhilipp Reisner 3191b411b363SPhilipp Reisner ERR_IF(h->length != (sizeof(*p)-sizeof(*h))) 3192b411b363SPhilipp Reisner return FALSE; 3193b411b363SPhilipp Reisner 3194b411b363SPhilipp Reisner if (drbd_recv(mdev, h->payload, h->length) != h->length) 3195b411b363SPhilipp Reisner return FALSE; 3196b411b363SPhilipp Reisner 3197b411b363SPhilipp Reisner peer_state.i = be32_to_cpu(p->state); 3198b411b363SPhilipp Reisner 3199b411b363SPhilipp Reisner real_peer_disk = peer_state.disk; 3200b411b363SPhilipp Reisner if (peer_state.disk == D_NEGOTIATING) { 3201b411b363SPhilipp Reisner real_peer_disk = mdev->p_uuid[UI_FLAGS] & 4 ? D_INCONSISTENT : D_CONSISTENT; 3202b411b363SPhilipp Reisner dev_info(DEV, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk)); 3203b411b363SPhilipp Reisner } 3204b411b363SPhilipp Reisner 3205b411b363SPhilipp Reisner spin_lock_irq(&mdev->req_lock); 3206b411b363SPhilipp Reisner retry: 3207b411b363SPhilipp Reisner oconn = nconn = mdev->state.conn; 3208b411b363SPhilipp Reisner spin_unlock_irq(&mdev->req_lock); 3209b411b363SPhilipp Reisner 3210b411b363SPhilipp Reisner if (nconn == C_WF_REPORT_PARAMS) 3211b411b363SPhilipp Reisner nconn = C_CONNECTED; 3212b411b363SPhilipp Reisner 3213b411b363SPhilipp Reisner if (mdev->p_uuid && peer_state.disk >= D_NEGOTIATING && 3214b411b363SPhilipp Reisner get_ldev_if_state(mdev, D_NEGOTIATING)) { 3215b411b363SPhilipp Reisner int cr; /* consider resync */ 3216b411b363SPhilipp Reisner 3217b411b363SPhilipp Reisner /* if we established a new connection */ 3218b411b363SPhilipp Reisner cr = (oconn < C_CONNECTED); 3219b411b363SPhilipp Reisner /* if we had an established connection 3220b411b363SPhilipp Reisner * and one of the nodes newly attaches a disk */ 3221b411b363SPhilipp Reisner cr |= (oconn == C_CONNECTED && 3222b411b363SPhilipp Reisner (peer_state.disk == D_NEGOTIATING || 3223b411b363SPhilipp Reisner mdev->state.disk == D_NEGOTIATING)); 3224b411b363SPhilipp Reisner /* if we have both been inconsistent, and the peer has been 3225b411b363SPhilipp Reisner * forced to be UpToDate with --overwrite-data */ 3226b411b363SPhilipp Reisner cr |= test_bit(CONSIDER_RESYNC, &mdev->flags); 3227b411b363SPhilipp Reisner /* if we had been plain connected, and the admin requested to 3228b411b363SPhilipp Reisner * start a sync by "invalidate" or "invalidate-remote" */ 3229b411b363SPhilipp Reisner cr |= (oconn == C_CONNECTED && 3230b411b363SPhilipp Reisner (peer_state.conn >= C_STARTING_SYNC_S && 3231b411b363SPhilipp Reisner peer_state.conn <= C_WF_BITMAP_T)); 3232b411b363SPhilipp Reisner 3233b411b363SPhilipp Reisner if (cr) 3234b411b363SPhilipp Reisner nconn = drbd_sync_handshake(mdev, peer_state.role, real_peer_disk); 3235b411b363SPhilipp Reisner 3236b411b363SPhilipp Reisner put_ldev(mdev); 3237b411b363SPhilipp Reisner if (nconn == C_MASK) { 3238580b9767SLars Ellenberg nconn = C_CONNECTED; 3239b411b363SPhilipp Reisner if (mdev->state.disk == D_NEGOTIATING) { 3240b411b363SPhilipp Reisner drbd_force_state(mdev, NS(disk, D_DISKLESS)); 3241b411b363SPhilipp Reisner } else if (peer_state.disk == D_NEGOTIATING) { 3242b411b363SPhilipp Reisner dev_err(DEV, "Disk attach process on the peer node was aborted.\n"); 3243b411b363SPhilipp Reisner peer_state.disk = D_DISKLESS; 3244580b9767SLars Ellenberg real_peer_disk = D_DISKLESS; 3245b411b363SPhilipp Reisner } else { 3246cf14c2e9SPhilipp Reisner if (test_and_clear_bit(CONN_DRY_RUN, &mdev->flags)) 3247cf14c2e9SPhilipp Reisner return FALSE; 3248b411b363SPhilipp Reisner D_ASSERT(oconn == C_WF_REPORT_PARAMS); 3249b411b363SPhilipp Reisner drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); 3250b411b363SPhilipp Reisner return FALSE; 3251b411b363SPhilipp Reisner } 3252b411b363SPhilipp Reisner } 3253b411b363SPhilipp Reisner } 3254b411b363SPhilipp Reisner 3255b411b363SPhilipp Reisner spin_lock_irq(&mdev->req_lock); 3256b411b363SPhilipp Reisner if (mdev->state.conn != oconn) 3257b411b363SPhilipp Reisner goto retry; 3258b411b363SPhilipp Reisner clear_bit(CONSIDER_RESYNC, &mdev->flags); 3259b411b363SPhilipp Reisner ns.i = mdev->state.i; 3260b411b363SPhilipp Reisner ns.conn = nconn; 3261b411b363SPhilipp Reisner ns.peer = peer_state.role; 3262b411b363SPhilipp Reisner ns.pdsk = real_peer_disk; 3263b411b363SPhilipp Reisner ns.peer_isp = (peer_state.aftr_isp | peer_state.user_isp); 3264b411b363SPhilipp Reisner if ((nconn == C_CONNECTED || nconn == C_WF_BITMAP_S) && ns.disk == D_NEGOTIATING) 3265b411b363SPhilipp Reisner ns.disk = mdev->new_state_tmp.disk; 3266b411b363SPhilipp Reisner 3267b411b363SPhilipp Reisner rv = _drbd_set_state(mdev, ns, CS_VERBOSE | CS_HARD, NULL); 3268b411b363SPhilipp Reisner ns = mdev->state; 3269b411b363SPhilipp Reisner spin_unlock_irq(&mdev->req_lock); 3270b411b363SPhilipp Reisner 3271b411b363SPhilipp Reisner if (rv < SS_SUCCESS) { 3272b411b363SPhilipp Reisner drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); 3273b411b363SPhilipp Reisner return FALSE; 3274b411b363SPhilipp Reisner } 3275b411b363SPhilipp Reisner 3276b411b363SPhilipp Reisner if (oconn > C_WF_REPORT_PARAMS) { 3277b411b363SPhilipp Reisner if (nconn > C_CONNECTED && peer_state.conn <= C_CONNECTED && 3278b411b363SPhilipp Reisner peer_state.disk != D_NEGOTIATING ) { 3279b411b363SPhilipp Reisner /* we want resync, peer has not yet decided to sync... */ 3280b411b363SPhilipp Reisner /* Nowadays only used when forcing a node into primary role and 3281b411b363SPhilipp Reisner setting its disk to UpToDate with that */ 3282b411b363SPhilipp Reisner drbd_send_uuids(mdev); 3283b411b363SPhilipp Reisner drbd_send_state(mdev); 3284b411b363SPhilipp Reisner } 3285b411b363SPhilipp Reisner } 3286b411b363SPhilipp Reisner 3287b411b363SPhilipp Reisner mdev->net_conf->want_lose = 0; 3288b411b363SPhilipp Reisner 3289b411b363SPhilipp Reisner drbd_md_sync(mdev); /* update connected indicator, la_size, ... */ 3290b411b363SPhilipp Reisner 3291b411b363SPhilipp Reisner return TRUE; 3292b411b363SPhilipp Reisner } 3293b411b363SPhilipp Reisner 3294b411b363SPhilipp Reisner static int receive_sync_uuid(struct drbd_conf *mdev, struct p_header *h) 3295b411b363SPhilipp Reisner { 3296b411b363SPhilipp Reisner struct p_rs_uuid *p = (struct p_rs_uuid *)h; 3297b411b363SPhilipp Reisner 3298b411b363SPhilipp Reisner wait_event(mdev->misc_wait, 3299b411b363SPhilipp Reisner mdev->state.conn == C_WF_SYNC_UUID || 3300b411b363SPhilipp Reisner mdev->state.conn < C_CONNECTED || 3301b411b363SPhilipp Reisner mdev->state.disk < D_NEGOTIATING); 3302b411b363SPhilipp Reisner 3303b411b363SPhilipp Reisner /* D_ASSERT( mdev->state.conn == C_WF_SYNC_UUID ); */ 3304b411b363SPhilipp Reisner 3305b411b363SPhilipp Reisner ERR_IF(h->length != (sizeof(*p)-sizeof(*h))) return FALSE; 3306b411b363SPhilipp Reisner if (drbd_recv(mdev, h->payload, h->length) != h->length) 3307b411b363SPhilipp Reisner return FALSE; 3308b411b363SPhilipp Reisner 3309b411b363SPhilipp Reisner /* Here the _drbd_uuid_ functions are right, current should 3310b411b363SPhilipp Reisner _not_ be rotated into the history */ 3311b411b363SPhilipp Reisner if (get_ldev_if_state(mdev, D_NEGOTIATING)) { 3312b411b363SPhilipp Reisner _drbd_uuid_set(mdev, UI_CURRENT, be64_to_cpu(p->uuid)); 3313b411b363SPhilipp Reisner _drbd_uuid_set(mdev, UI_BITMAP, 0UL); 3314b411b363SPhilipp Reisner 3315b411b363SPhilipp Reisner drbd_start_resync(mdev, C_SYNC_TARGET); 3316b411b363SPhilipp Reisner 3317b411b363SPhilipp Reisner put_ldev(mdev); 3318b411b363SPhilipp Reisner } else 3319b411b363SPhilipp Reisner dev_err(DEV, "Ignoring SyncUUID packet!\n"); 3320b411b363SPhilipp Reisner 3321b411b363SPhilipp Reisner return TRUE; 3322b411b363SPhilipp Reisner } 3323b411b363SPhilipp Reisner 3324b411b363SPhilipp Reisner enum receive_bitmap_ret { OK, DONE, FAILED }; 3325b411b363SPhilipp Reisner 3326b411b363SPhilipp Reisner static enum receive_bitmap_ret 3327b411b363SPhilipp Reisner receive_bitmap_plain(struct drbd_conf *mdev, struct p_header *h, 3328b411b363SPhilipp Reisner unsigned long *buffer, struct bm_xfer_ctx *c) 3329b411b363SPhilipp Reisner { 3330b411b363SPhilipp Reisner unsigned num_words = min_t(size_t, BM_PACKET_WORDS, c->bm_words - c->word_offset); 3331b411b363SPhilipp Reisner unsigned want = num_words * sizeof(long); 3332b411b363SPhilipp Reisner 3333b411b363SPhilipp Reisner if (want != h->length) { 3334b411b363SPhilipp Reisner dev_err(DEV, "%s:want (%u) != h->length (%u)\n", __func__, want, h->length); 3335b411b363SPhilipp Reisner return FAILED; 3336b411b363SPhilipp Reisner } 3337b411b363SPhilipp Reisner if (want == 0) 3338b411b363SPhilipp Reisner return DONE; 3339b411b363SPhilipp Reisner if (drbd_recv(mdev, buffer, want) != want) 3340b411b363SPhilipp Reisner return FAILED; 3341b411b363SPhilipp Reisner 3342b411b363SPhilipp Reisner drbd_bm_merge_lel(mdev, c->word_offset, num_words, buffer); 3343b411b363SPhilipp Reisner 3344b411b363SPhilipp Reisner c->word_offset += num_words; 3345b411b363SPhilipp Reisner c->bit_offset = c->word_offset * BITS_PER_LONG; 3346b411b363SPhilipp Reisner if (c->bit_offset > c->bm_bits) 3347b411b363SPhilipp Reisner c->bit_offset = c->bm_bits; 3348b411b363SPhilipp Reisner 3349b411b363SPhilipp Reisner return OK; 3350b411b363SPhilipp Reisner } 3351b411b363SPhilipp Reisner 3352b411b363SPhilipp Reisner static enum receive_bitmap_ret 3353b411b363SPhilipp Reisner recv_bm_rle_bits(struct drbd_conf *mdev, 3354b411b363SPhilipp Reisner struct p_compressed_bm *p, 3355b411b363SPhilipp Reisner struct bm_xfer_ctx *c) 3356b411b363SPhilipp Reisner { 3357b411b363SPhilipp Reisner struct bitstream bs; 3358b411b363SPhilipp Reisner u64 look_ahead; 3359b411b363SPhilipp Reisner u64 rl; 3360b411b363SPhilipp Reisner u64 tmp; 3361b411b363SPhilipp Reisner unsigned long s = c->bit_offset; 3362b411b363SPhilipp Reisner unsigned long e; 3363b411b363SPhilipp Reisner int len = p->head.length - (sizeof(*p) - sizeof(p->head)); 3364b411b363SPhilipp Reisner int toggle = DCBP_get_start(p); 3365b411b363SPhilipp Reisner int have; 3366b411b363SPhilipp Reisner int bits; 3367b411b363SPhilipp Reisner 3368b411b363SPhilipp Reisner bitstream_init(&bs, p->code, len, DCBP_get_pad_bits(p)); 3369b411b363SPhilipp Reisner 3370b411b363SPhilipp Reisner bits = bitstream_get_bits(&bs, &look_ahead, 64); 3371b411b363SPhilipp Reisner if (bits < 0) 3372b411b363SPhilipp Reisner return FAILED; 3373b411b363SPhilipp Reisner 3374b411b363SPhilipp Reisner for (have = bits; have > 0; s += rl, toggle = !toggle) { 3375b411b363SPhilipp Reisner bits = vli_decode_bits(&rl, look_ahead); 3376b411b363SPhilipp Reisner if (bits <= 0) 3377b411b363SPhilipp Reisner return FAILED; 3378b411b363SPhilipp Reisner 3379b411b363SPhilipp Reisner if (toggle) { 3380b411b363SPhilipp Reisner e = s + rl -1; 3381b411b363SPhilipp Reisner if (e >= c->bm_bits) { 3382b411b363SPhilipp Reisner dev_err(DEV, "bitmap overflow (e:%lu) while decoding bm RLE packet\n", e); 3383b411b363SPhilipp Reisner return FAILED; 3384b411b363SPhilipp Reisner } 3385b411b363SPhilipp Reisner _drbd_bm_set_bits(mdev, s, e); 3386b411b363SPhilipp Reisner } 3387b411b363SPhilipp Reisner 3388b411b363SPhilipp Reisner if (have < bits) { 3389b411b363SPhilipp Reisner dev_err(DEV, "bitmap decoding error: h:%d b:%d la:0x%08llx l:%u/%u\n", 3390b411b363SPhilipp Reisner have, bits, look_ahead, 3391b411b363SPhilipp Reisner (unsigned int)(bs.cur.b - p->code), 3392b411b363SPhilipp Reisner (unsigned int)bs.buf_len); 3393b411b363SPhilipp Reisner return FAILED; 3394b411b363SPhilipp Reisner } 3395b411b363SPhilipp Reisner look_ahead >>= bits; 3396b411b363SPhilipp Reisner have -= bits; 3397b411b363SPhilipp Reisner 3398b411b363SPhilipp Reisner bits = bitstream_get_bits(&bs, &tmp, 64 - have); 3399b411b363SPhilipp Reisner if (bits < 0) 3400b411b363SPhilipp Reisner return FAILED; 3401b411b363SPhilipp Reisner look_ahead |= tmp << have; 3402b411b363SPhilipp Reisner have += bits; 3403b411b363SPhilipp Reisner } 3404b411b363SPhilipp Reisner 3405b411b363SPhilipp Reisner c->bit_offset = s; 3406b411b363SPhilipp Reisner bm_xfer_ctx_bit_to_word_offset(c); 3407b411b363SPhilipp Reisner 3408b411b363SPhilipp Reisner return (s == c->bm_bits) ? DONE : OK; 3409b411b363SPhilipp Reisner } 3410b411b363SPhilipp Reisner 3411b411b363SPhilipp Reisner static enum receive_bitmap_ret 3412b411b363SPhilipp Reisner decode_bitmap_c(struct drbd_conf *mdev, 3413b411b363SPhilipp Reisner struct p_compressed_bm *p, 3414b411b363SPhilipp Reisner struct bm_xfer_ctx *c) 3415b411b363SPhilipp Reisner { 3416b411b363SPhilipp Reisner if (DCBP_get_code(p) == RLE_VLI_Bits) 3417b411b363SPhilipp Reisner return recv_bm_rle_bits(mdev, p, c); 3418b411b363SPhilipp Reisner 3419b411b363SPhilipp Reisner /* other variants had been implemented for evaluation, 3420b411b363SPhilipp Reisner * but have been dropped as this one turned out to be "best" 3421b411b363SPhilipp Reisner * during all our tests. */ 3422b411b363SPhilipp Reisner 3423b411b363SPhilipp Reisner dev_err(DEV, "receive_bitmap_c: unknown encoding %u\n", p->encoding); 3424b411b363SPhilipp Reisner drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR)); 3425b411b363SPhilipp Reisner return FAILED; 3426b411b363SPhilipp Reisner } 3427b411b363SPhilipp Reisner 3428b411b363SPhilipp Reisner void INFO_bm_xfer_stats(struct drbd_conf *mdev, 3429b411b363SPhilipp Reisner const char *direction, struct bm_xfer_ctx *c) 3430b411b363SPhilipp Reisner { 3431b411b363SPhilipp Reisner /* what would it take to transfer it "plaintext" */ 3432b411b363SPhilipp Reisner unsigned plain = sizeof(struct p_header) * 3433b411b363SPhilipp Reisner ((c->bm_words+BM_PACKET_WORDS-1)/BM_PACKET_WORDS+1) 3434b411b363SPhilipp Reisner + c->bm_words * sizeof(long); 3435b411b363SPhilipp Reisner unsigned total = c->bytes[0] + c->bytes[1]; 3436b411b363SPhilipp Reisner unsigned r; 3437b411b363SPhilipp Reisner 3438b411b363SPhilipp Reisner /* total can not be zero. but just in case: */ 3439b411b363SPhilipp Reisner if (total == 0) 3440b411b363SPhilipp Reisner return; 3441b411b363SPhilipp Reisner 3442b411b363SPhilipp Reisner /* don't report if not compressed */ 3443b411b363SPhilipp Reisner if (total >= plain) 3444b411b363SPhilipp Reisner return; 3445b411b363SPhilipp Reisner 3446b411b363SPhilipp Reisner /* total < plain. check for overflow, still */ 3447b411b363SPhilipp Reisner r = (total > UINT_MAX/1000) ? (total / (plain/1000)) 3448b411b363SPhilipp Reisner : (1000 * total / plain); 3449b411b363SPhilipp Reisner 3450b411b363SPhilipp Reisner if (r > 1000) 3451b411b363SPhilipp Reisner r = 1000; 3452b411b363SPhilipp Reisner 3453b411b363SPhilipp Reisner r = 1000 - r; 3454b411b363SPhilipp Reisner dev_info(DEV, "%s bitmap stats [Bytes(packets)]: plain %u(%u), RLE %u(%u), " 3455b411b363SPhilipp Reisner "total %u; compression: %u.%u%%\n", 3456b411b363SPhilipp Reisner direction, 3457b411b363SPhilipp Reisner c->bytes[1], c->packets[1], 3458b411b363SPhilipp Reisner c->bytes[0], c->packets[0], 3459b411b363SPhilipp Reisner total, r/10, r % 10); 3460b411b363SPhilipp Reisner } 3461b411b363SPhilipp Reisner 3462b411b363SPhilipp Reisner /* Since we are processing the bitfield from lower addresses to higher, 3463b411b363SPhilipp Reisner it does not matter if the process it in 32 bit chunks or 64 bit 3464b411b363SPhilipp Reisner chunks as long as it is little endian. (Understand it as byte stream, 3465b411b363SPhilipp Reisner beginning with the lowest byte...) If we would use big endian 3466b411b363SPhilipp Reisner we would need to process it from the highest address to the lowest, 3467b411b363SPhilipp Reisner in order to be agnostic to the 32 vs 64 bits issue. 3468b411b363SPhilipp Reisner 3469b411b363SPhilipp Reisner returns 0 on failure, 1 if we successfully received it. */ 3470b411b363SPhilipp Reisner static int receive_bitmap(struct drbd_conf *mdev, struct p_header *h) 3471b411b363SPhilipp Reisner { 3472b411b363SPhilipp Reisner struct bm_xfer_ctx c; 3473b411b363SPhilipp Reisner void *buffer; 3474b411b363SPhilipp Reisner enum receive_bitmap_ret ret; 3475b411b363SPhilipp Reisner int ok = FALSE; 3476b411b363SPhilipp Reisner 3477b411b363SPhilipp Reisner wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt)); 3478b411b363SPhilipp Reisner 3479b411b363SPhilipp Reisner drbd_bm_lock(mdev, "receive bitmap"); 3480b411b363SPhilipp Reisner 3481b411b363SPhilipp Reisner /* maybe we should use some per thread scratch page, 3482b411b363SPhilipp Reisner * and allocate that during initial device creation? */ 3483b411b363SPhilipp Reisner buffer = (unsigned long *) __get_free_page(GFP_NOIO); 3484b411b363SPhilipp Reisner if (!buffer) { 3485b411b363SPhilipp Reisner dev_err(DEV, "failed to allocate one page buffer in %s\n", __func__); 3486b411b363SPhilipp Reisner goto out; 3487b411b363SPhilipp Reisner } 3488b411b363SPhilipp Reisner 3489b411b363SPhilipp Reisner c = (struct bm_xfer_ctx) { 3490b411b363SPhilipp Reisner .bm_bits = drbd_bm_bits(mdev), 3491b411b363SPhilipp Reisner .bm_words = drbd_bm_words(mdev), 3492b411b363SPhilipp Reisner }; 3493b411b363SPhilipp Reisner 3494b411b363SPhilipp Reisner do { 3495b411b363SPhilipp Reisner if (h->command == P_BITMAP) { 3496b411b363SPhilipp Reisner ret = receive_bitmap_plain(mdev, h, buffer, &c); 3497b411b363SPhilipp Reisner } else if (h->command == P_COMPRESSED_BITMAP) { 3498b411b363SPhilipp Reisner /* MAYBE: sanity check that we speak proto >= 90, 3499b411b363SPhilipp Reisner * and the feature is enabled! */ 3500b411b363SPhilipp Reisner struct p_compressed_bm *p; 3501b411b363SPhilipp Reisner 3502b411b363SPhilipp Reisner if (h->length > BM_PACKET_PAYLOAD_BYTES) { 3503b411b363SPhilipp Reisner dev_err(DEV, "ReportCBitmap packet too large\n"); 3504b411b363SPhilipp Reisner goto out; 3505b411b363SPhilipp Reisner } 3506b411b363SPhilipp Reisner /* use the page buff */ 3507b411b363SPhilipp Reisner p = buffer; 3508b411b363SPhilipp Reisner memcpy(p, h, sizeof(*h)); 3509b411b363SPhilipp Reisner if (drbd_recv(mdev, p->head.payload, h->length) != h->length) 3510b411b363SPhilipp Reisner goto out; 3511b411b363SPhilipp Reisner if (p->head.length <= (sizeof(*p) - sizeof(p->head))) { 3512b411b363SPhilipp Reisner dev_err(DEV, "ReportCBitmap packet too small (l:%u)\n", p->head.length); 3513b411b363SPhilipp Reisner return FAILED; 3514b411b363SPhilipp Reisner } 3515b411b363SPhilipp Reisner ret = decode_bitmap_c(mdev, p, &c); 3516b411b363SPhilipp Reisner } else { 3517b411b363SPhilipp Reisner dev_warn(DEV, "receive_bitmap: h->command neither ReportBitMap nor ReportCBitMap (is 0x%x)", h->command); 3518b411b363SPhilipp Reisner goto out; 3519b411b363SPhilipp Reisner } 3520b411b363SPhilipp Reisner 3521b411b363SPhilipp Reisner c.packets[h->command == P_BITMAP]++; 3522b411b363SPhilipp Reisner c.bytes[h->command == P_BITMAP] += sizeof(struct p_header) + h->length; 3523b411b363SPhilipp Reisner 3524b411b363SPhilipp Reisner if (ret != OK) 3525b411b363SPhilipp Reisner break; 3526b411b363SPhilipp Reisner 3527b411b363SPhilipp Reisner if (!drbd_recv_header(mdev, h)) 3528b411b363SPhilipp Reisner goto out; 3529b411b363SPhilipp Reisner } while (ret == OK); 3530b411b363SPhilipp Reisner if (ret == FAILED) 3531b411b363SPhilipp Reisner goto out; 3532b411b363SPhilipp Reisner 3533b411b363SPhilipp Reisner INFO_bm_xfer_stats(mdev, "receive", &c); 3534b411b363SPhilipp Reisner 3535b411b363SPhilipp Reisner if (mdev->state.conn == C_WF_BITMAP_T) { 3536b411b363SPhilipp Reisner ok = !drbd_send_bitmap(mdev); 3537b411b363SPhilipp Reisner if (!ok) 3538b411b363SPhilipp Reisner goto out; 3539b411b363SPhilipp Reisner /* Omit CS_ORDERED with this state transition to avoid deadlocks. */ 3540b411b363SPhilipp Reisner ok = _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE); 3541b411b363SPhilipp Reisner D_ASSERT(ok == SS_SUCCESS); 3542b411b363SPhilipp Reisner } else if (mdev->state.conn != C_WF_BITMAP_S) { 3543b411b363SPhilipp Reisner /* admin may have requested C_DISCONNECTING, 3544b411b363SPhilipp Reisner * other threads may have noticed network errors */ 3545b411b363SPhilipp Reisner dev_info(DEV, "unexpected cstate (%s) in receive_bitmap\n", 3546b411b363SPhilipp Reisner drbd_conn_str(mdev->state.conn)); 3547b411b363SPhilipp Reisner } 3548b411b363SPhilipp Reisner 3549b411b363SPhilipp Reisner ok = TRUE; 3550b411b363SPhilipp Reisner out: 3551b411b363SPhilipp Reisner drbd_bm_unlock(mdev); 3552b411b363SPhilipp Reisner if (ok && mdev->state.conn == C_WF_BITMAP_S) 3553b411b363SPhilipp Reisner drbd_start_resync(mdev, C_SYNC_SOURCE); 3554b411b363SPhilipp Reisner free_page((unsigned long) buffer); 3555b411b363SPhilipp Reisner return ok; 3556b411b363SPhilipp Reisner } 3557b411b363SPhilipp Reisner 3558b411b363SPhilipp Reisner static int receive_skip(struct drbd_conf *mdev, struct p_header *h) 3559b411b363SPhilipp Reisner { 3560b411b363SPhilipp Reisner /* TODO zero copy sink :) */ 3561b411b363SPhilipp Reisner static char sink[128]; 3562b411b363SPhilipp Reisner int size, want, r; 3563b411b363SPhilipp Reisner 3564b411b363SPhilipp Reisner dev_warn(DEV, "skipping unknown optional packet type %d, l: %d!\n", 3565b411b363SPhilipp Reisner h->command, h->length); 3566b411b363SPhilipp Reisner 3567b411b363SPhilipp Reisner size = h->length; 3568b411b363SPhilipp Reisner while (size > 0) { 3569b411b363SPhilipp Reisner want = min_t(int, size, sizeof(sink)); 3570b411b363SPhilipp Reisner r = drbd_recv(mdev, sink, want); 3571b411b363SPhilipp Reisner ERR_IF(r <= 0) break; 3572b411b363SPhilipp Reisner size -= r; 3573b411b363SPhilipp Reisner } 3574b411b363SPhilipp Reisner return size == 0; 3575b411b363SPhilipp Reisner } 3576b411b363SPhilipp Reisner 3577b411b363SPhilipp Reisner static int receive_UnplugRemote(struct drbd_conf *mdev, struct p_header *h) 3578b411b363SPhilipp Reisner { 3579b411b363SPhilipp Reisner if (mdev->state.disk >= D_INCONSISTENT) 3580b411b363SPhilipp Reisner drbd_kick_lo(mdev); 3581b411b363SPhilipp Reisner 3582b411b363SPhilipp Reisner /* Make sure we've acked all the TCP data associated 3583b411b363SPhilipp Reisner * with the data requests being unplugged */ 3584b411b363SPhilipp Reisner drbd_tcp_quickack(mdev->data.socket); 3585b411b363SPhilipp Reisner 3586b411b363SPhilipp Reisner return TRUE; 3587b411b363SPhilipp Reisner } 3588b411b363SPhilipp Reisner 35890ced55a3SPhilipp Reisner static void timeval_sub_us(struct timeval* tv, unsigned int us) 35900ced55a3SPhilipp Reisner { 35910ced55a3SPhilipp Reisner tv->tv_sec -= us / 1000000; 35920ced55a3SPhilipp Reisner us = us % 1000000; 35930ced55a3SPhilipp Reisner if (tv->tv_usec > us) { 35940ced55a3SPhilipp Reisner tv->tv_usec += 1000000; 35950ced55a3SPhilipp Reisner tv->tv_sec--; 35960ced55a3SPhilipp Reisner } 35970ced55a3SPhilipp Reisner tv->tv_usec -= us; 35980ced55a3SPhilipp Reisner } 35990ced55a3SPhilipp Reisner 36000ced55a3SPhilipp Reisner static void got_delay_probe(struct drbd_conf *mdev, int from, struct p_delay_probe *p) 36010ced55a3SPhilipp Reisner { 36020ced55a3SPhilipp Reisner struct delay_probe *dp; 36030ced55a3SPhilipp Reisner struct list_head *le; 36040ced55a3SPhilipp Reisner struct timeval now; 36050ced55a3SPhilipp Reisner int seq_num; 36060ced55a3SPhilipp Reisner int offset; 36070ced55a3SPhilipp Reisner int data_delay; 36080ced55a3SPhilipp Reisner 36090ced55a3SPhilipp Reisner seq_num = be32_to_cpu(p->seq_num); 36100ced55a3SPhilipp Reisner offset = be32_to_cpu(p->offset); 36110ced55a3SPhilipp Reisner 36120ced55a3SPhilipp Reisner spin_lock(&mdev->peer_seq_lock); 36130ced55a3SPhilipp Reisner if (!list_empty(&mdev->delay_probes)) { 36140ced55a3SPhilipp Reisner if (from == USE_DATA_SOCKET) 36150ced55a3SPhilipp Reisner le = mdev->delay_probes.next; 36160ced55a3SPhilipp Reisner else 36170ced55a3SPhilipp Reisner le = mdev->delay_probes.prev; 36180ced55a3SPhilipp Reisner 36190ced55a3SPhilipp Reisner dp = list_entry(le, struct delay_probe, list); 36200ced55a3SPhilipp Reisner 36210ced55a3SPhilipp Reisner if (dp->seq_num == seq_num) { 36220ced55a3SPhilipp Reisner list_del(le); 36230ced55a3SPhilipp Reisner spin_unlock(&mdev->peer_seq_lock); 36240ced55a3SPhilipp Reisner do_gettimeofday(&now); 36250ced55a3SPhilipp Reisner timeval_sub_us(&now, offset); 36260ced55a3SPhilipp Reisner data_delay = 36270ced55a3SPhilipp Reisner now.tv_usec - dp->time.tv_usec + 36280ced55a3SPhilipp Reisner (now.tv_sec - dp->time.tv_sec) * 1000000; 36290ced55a3SPhilipp Reisner 36300ced55a3SPhilipp Reisner if (data_delay > 0) 36310ced55a3SPhilipp Reisner mdev->data_delay = data_delay; 36320ced55a3SPhilipp Reisner 36330ced55a3SPhilipp Reisner kfree(dp); 36340ced55a3SPhilipp Reisner return; 36350ced55a3SPhilipp Reisner } 36360ced55a3SPhilipp Reisner 36370ced55a3SPhilipp Reisner if (dp->seq_num > seq_num) { 36380ced55a3SPhilipp Reisner spin_unlock(&mdev->peer_seq_lock); 36390ced55a3SPhilipp Reisner dev_warn(DEV, "Previous allocation failure of struct delay_probe?\n"); 36400ced55a3SPhilipp Reisner return; /* Do not alloca a struct delay_probe.... */ 36410ced55a3SPhilipp Reisner } 36420ced55a3SPhilipp Reisner } 36430ced55a3SPhilipp Reisner spin_unlock(&mdev->peer_seq_lock); 36440ced55a3SPhilipp Reisner 36450ced55a3SPhilipp Reisner dp = kmalloc(sizeof(struct delay_probe), GFP_NOIO); 36460ced55a3SPhilipp Reisner if (!dp) { 36470ced55a3SPhilipp Reisner dev_warn(DEV, "Failed to allocate a struct delay_probe, do not worry.\n"); 36480ced55a3SPhilipp Reisner return; 36490ced55a3SPhilipp Reisner } 36500ced55a3SPhilipp Reisner 36510ced55a3SPhilipp Reisner dp->seq_num = seq_num; 36520ced55a3SPhilipp Reisner do_gettimeofday(&dp->time); 36530ced55a3SPhilipp Reisner timeval_sub_us(&dp->time, offset); 36540ced55a3SPhilipp Reisner 36550ced55a3SPhilipp Reisner spin_lock(&mdev->peer_seq_lock); 36560ced55a3SPhilipp Reisner if (from == USE_DATA_SOCKET) 36570ced55a3SPhilipp Reisner list_add(&dp->list, &mdev->delay_probes); 36580ced55a3SPhilipp Reisner else 36590ced55a3SPhilipp Reisner list_add_tail(&dp->list, &mdev->delay_probes); 36600ced55a3SPhilipp Reisner spin_unlock(&mdev->peer_seq_lock); 36610ced55a3SPhilipp Reisner } 36620ced55a3SPhilipp Reisner 36630ced55a3SPhilipp Reisner static int receive_delay_probe(struct drbd_conf *mdev, struct p_header *h) 36640ced55a3SPhilipp Reisner { 36650ced55a3SPhilipp Reisner struct p_delay_probe *p = (struct p_delay_probe *)h; 36660ced55a3SPhilipp Reisner 36670ced55a3SPhilipp Reisner ERR_IF(h->length != (sizeof(*p)-sizeof(*h))) return FALSE; 36680ced55a3SPhilipp Reisner if (drbd_recv(mdev, h->payload, h->length) != h->length) 36690ced55a3SPhilipp Reisner return FALSE; 36700ced55a3SPhilipp Reisner 36710ced55a3SPhilipp Reisner got_delay_probe(mdev, USE_DATA_SOCKET, p); 36720ced55a3SPhilipp Reisner return TRUE; 36730ced55a3SPhilipp Reisner } 36740ced55a3SPhilipp Reisner 3675b411b363SPhilipp Reisner typedef int (*drbd_cmd_handler_f)(struct drbd_conf *, struct p_header *); 3676b411b363SPhilipp Reisner 3677b411b363SPhilipp Reisner static drbd_cmd_handler_f drbd_default_handler[] = { 3678b411b363SPhilipp Reisner [P_DATA] = receive_Data, 3679b411b363SPhilipp Reisner [P_DATA_REPLY] = receive_DataReply, 3680b411b363SPhilipp Reisner [P_RS_DATA_REPLY] = receive_RSDataReply, 3681b411b363SPhilipp Reisner [P_BARRIER] = receive_Barrier, 3682b411b363SPhilipp Reisner [P_BITMAP] = receive_bitmap, 3683b411b363SPhilipp Reisner [P_COMPRESSED_BITMAP] = receive_bitmap, 3684b411b363SPhilipp Reisner [P_UNPLUG_REMOTE] = receive_UnplugRemote, 3685b411b363SPhilipp Reisner [P_DATA_REQUEST] = receive_DataRequest, 3686b411b363SPhilipp Reisner [P_RS_DATA_REQUEST] = receive_DataRequest, 3687b411b363SPhilipp Reisner [P_SYNC_PARAM] = receive_SyncParam, 3688b411b363SPhilipp Reisner [P_SYNC_PARAM89] = receive_SyncParam, 3689b411b363SPhilipp Reisner [P_PROTOCOL] = receive_protocol, 3690b411b363SPhilipp Reisner [P_UUIDS] = receive_uuids, 3691b411b363SPhilipp Reisner [P_SIZES] = receive_sizes, 3692b411b363SPhilipp Reisner [P_STATE] = receive_state, 3693b411b363SPhilipp Reisner [P_STATE_CHG_REQ] = receive_req_state, 3694b411b363SPhilipp Reisner [P_SYNC_UUID] = receive_sync_uuid, 3695b411b363SPhilipp Reisner [P_OV_REQUEST] = receive_DataRequest, 3696b411b363SPhilipp Reisner [P_OV_REPLY] = receive_DataRequest, 3697b411b363SPhilipp Reisner [P_CSUM_RS_REQUEST] = receive_DataRequest, 36980ced55a3SPhilipp Reisner [P_DELAY_PROBE] = receive_delay_probe, 3699b411b363SPhilipp Reisner /* anything missing from this table is in 3700b411b363SPhilipp Reisner * the asender_tbl, see get_asender_cmd */ 3701b411b363SPhilipp Reisner [P_MAX_CMD] = NULL, 3702b411b363SPhilipp Reisner }; 3703b411b363SPhilipp Reisner 3704b411b363SPhilipp Reisner static drbd_cmd_handler_f *drbd_cmd_handler = drbd_default_handler; 3705b411b363SPhilipp Reisner static drbd_cmd_handler_f *drbd_opt_cmd_handler; 3706b411b363SPhilipp Reisner 3707b411b363SPhilipp Reisner static void drbdd(struct drbd_conf *mdev) 3708b411b363SPhilipp Reisner { 3709b411b363SPhilipp Reisner drbd_cmd_handler_f handler; 3710b411b363SPhilipp Reisner struct p_header *header = &mdev->data.rbuf.header; 3711b411b363SPhilipp Reisner 3712b411b363SPhilipp Reisner while (get_t_state(&mdev->receiver) == Running) { 3713b411b363SPhilipp Reisner drbd_thread_current_set_cpu(mdev); 37140b33a916SLars Ellenberg if (!drbd_recv_header(mdev, header)) { 37150b33a916SLars Ellenberg drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR)); 3716b411b363SPhilipp Reisner break; 37170b33a916SLars Ellenberg } 3718b411b363SPhilipp Reisner 3719b411b363SPhilipp Reisner if (header->command < P_MAX_CMD) 3720b411b363SPhilipp Reisner handler = drbd_cmd_handler[header->command]; 3721b411b363SPhilipp Reisner else if (P_MAY_IGNORE < header->command 3722b411b363SPhilipp Reisner && header->command < P_MAX_OPT_CMD) 3723b411b363SPhilipp Reisner handler = drbd_opt_cmd_handler[header->command-P_MAY_IGNORE]; 3724b411b363SPhilipp Reisner else if (header->command > P_MAX_OPT_CMD) 3725b411b363SPhilipp Reisner handler = receive_skip; 3726b411b363SPhilipp Reisner else 3727b411b363SPhilipp Reisner handler = NULL; 3728b411b363SPhilipp Reisner 3729b411b363SPhilipp Reisner if (unlikely(!handler)) { 3730b411b363SPhilipp Reisner dev_err(DEV, "unknown packet type %d, l: %d!\n", 3731b411b363SPhilipp Reisner header->command, header->length); 3732b411b363SPhilipp Reisner drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR)); 3733b411b363SPhilipp Reisner break; 3734b411b363SPhilipp Reisner } 3735b411b363SPhilipp Reisner if (unlikely(!handler(mdev, header))) { 3736b411b363SPhilipp Reisner dev_err(DEV, "error receiving %s, l: %d!\n", 3737b411b363SPhilipp Reisner cmdname(header->command), header->length); 3738b411b363SPhilipp Reisner drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR)); 3739b411b363SPhilipp Reisner break; 3740b411b363SPhilipp Reisner } 3741b411b363SPhilipp Reisner } 3742b411b363SPhilipp Reisner } 3743b411b363SPhilipp Reisner 3744b411b363SPhilipp Reisner static void drbd_fail_pending_reads(struct drbd_conf *mdev) 3745b411b363SPhilipp Reisner { 3746b411b363SPhilipp Reisner struct hlist_head *slot; 3747b411b363SPhilipp Reisner struct hlist_node *pos; 3748b411b363SPhilipp Reisner struct hlist_node *tmp; 3749b411b363SPhilipp Reisner struct drbd_request *req; 3750b411b363SPhilipp Reisner int i; 3751b411b363SPhilipp Reisner 3752b411b363SPhilipp Reisner /* 3753b411b363SPhilipp Reisner * Application READ requests 3754b411b363SPhilipp Reisner */ 3755b411b363SPhilipp Reisner spin_lock_irq(&mdev->req_lock); 3756b411b363SPhilipp Reisner for (i = 0; i < APP_R_HSIZE; i++) { 3757b411b363SPhilipp Reisner slot = mdev->app_reads_hash+i; 3758b411b363SPhilipp Reisner hlist_for_each_entry_safe(req, pos, tmp, slot, colision) { 3759b411b363SPhilipp Reisner /* it may (but should not any longer!) 3760b411b363SPhilipp Reisner * be on the work queue; if that assert triggers, 3761b411b363SPhilipp Reisner * we need to also grab the 3762b411b363SPhilipp Reisner * spin_lock_irq(&mdev->data.work.q_lock); 3763b411b363SPhilipp Reisner * and list_del_init here. */ 3764b411b363SPhilipp Reisner D_ASSERT(list_empty(&req->w.list)); 3765b411b363SPhilipp Reisner /* It would be nice to complete outside of spinlock. 3766b411b363SPhilipp Reisner * But this is easier for now. */ 3767b411b363SPhilipp Reisner _req_mod(req, connection_lost_while_pending); 3768b411b363SPhilipp Reisner } 3769b411b363SPhilipp Reisner } 3770b411b363SPhilipp Reisner for (i = 0; i < APP_R_HSIZE; i++) 3771b411b363SPhilipp Reisner if (!hlist_empty(mdev->app_reads_hash+i)) 3772b411b363SPhilipp Reisner dev_warn(DEV, "ASSERT FAILED: app_reads_hash[%d].first: " 3773b411b363SPhilipp Reisner "%p, should be NULL\n", i, mdev->app_reads_hash[i].first); 3774b411b363SPhilipp Reisner 3775b411b363SPhilipp Reisner memset(mdev->app_reads_hash, 0, APP_R_HSIZE*sizeof(void *)); 3776b411b363SPhilipp Reisner spin_unlock_irq(&mdev->req_lock); 3777b411b363SPhilipp Reisner } 3778b411b363SPhilipp Reisner 3779b411b363SPhilipp Reisner void drbd_flush_workqueue(struct drbd_conf *mdev) 3780b411b363SPhilipp Reisner { 3781b411b363SPhilipp Reisner struct drbd_wq_barrier barr; 3782b411b363SPhilipp Reisner 3783b411b363SPhilipp Reisner barr.w.cb = w_prev_work_done; 3784b411b363SPhilipp Reisner init_completion(&barr.done); 3785b411b363SPhilipp Reisner drbd_queue_work(&mdev->data.work, &barr.w); 3786b411b363SPhilipp Reisner wait_for_completion(&barr.done); 3787b411b363SPhilipp Reisner } 3788b411b363SPhilipp Reisner 3789b411b363SPhilipp Reisner static void drbd_disconnect(struct drbd_conf *mdev) 3790b411b363SPhilipp Reisner { 3791b411b363SPhilipp Reisner enum drbd_fencing_p fp; 3792b411b363SPhilipp Reisner union drbd_state os, ns; 3793b411b363SPhilipp Reisner int rv = SS_UNKNOWN_ERROR; 3794b411b363SPhilipp Reisner unsigned int i; 3795b411b363SPhilipp Reisner 3796b411b363SPhilipp Reisner if (mdev->state.conn == C_STANDALONE) 3797b411b363SPhilipp Reisner return; 3798b411b363SPhilipp Reisner if (mdev->state.conn >= C_WF_CONNECTION) 3799b411b363SPhilipp Reisner dev_err(DEV, "ASSERT FAILED cstate = %s, expected < WFConnection\n", 3800b411b363SPhilipp Reisner drbd_conn_str(mdev->state.conn)); 3801b411b363SPhilipp Reisner 3802b411b363SPhilipp Reisner /* asender does not clean up anything. it must not interfere, either */ 3803b411b363SPhilipp Reisner drbd_thread_stop(&mdev->asender); 3804b411b363SPhilipp Reisner drbd_free_sock(mdev); 3805b411b363SPhilipp Reisner 3806b411b363SPhilipp Reisner spin_lock_irq(&mdev->req_lock); 3807b411b363SPhilipp Reisner _drbd_wait_ee_list_empty(mdev, &mdev->active_ee); 3808b411b363SPhilipp Reisner _drbd_wait_ee_list_empty(mdev, &mdev->sync_ee); 3809b411b363SPhilipp Reisner _drbd_wait_ee_list_empty(mdev, &mdev->read_ee); 3810b411b363SPhilipp Reisner spin_unlock_irq(&mdev->req_lock); 3811b411b363SPhilipp Reisner 3812b411b363SPhilipp Reisner /* We do not have data structures that would allow us to 3813b411b363SPhilipp Reisner * get the rs_pending_cnt down to 0 again. 3814b411b363SPhilipp Reisner * * On C_SYNC_TARGET we do not have any data structures describing 3815b411b363SPhilipp Reisner * the pending RSDataRequest's we have sent. 3816b411b363SPhilipp Reisner * * On C_SYNC_SOURCE there is no data structure that tracks 3817b411b363SPhilipp Reisner * the P_RS_DATA_REPLY blocks that we sent to the SyncTarget. 3818b411b363SPhilipp Reisner * And no, it is not the sum of the reference counts in the 3819b411b363SPhilipp Reisner * resync_LRU. The resync_LRU tracks the whole operation including 3820b411b363SPhilipp Reisner * the disk-IO, while the rs_pending_cnt only tracks the blocks 3821b411b363SPhilipp Reisner * on the fly. */ 3822b411b363SPhilipp Reisner drbd_rs_cancel_all(mdev); 3823b411b363SPhilipp Reisner mdev->rs_total = 0; 3824b411b363SPhilipp Reisner mdev->rs_failed = 0; 3825b411b363SPhilipp Reisner atomic_set(&mdev->rs_pending_cnt, 0); 3826b411b363SPhilipp Reisner wake_up(&mdev->misc_wait); 3827b411b363SPhilipp Reisner 3828b411b363SPhilipp Reisner /* make sure syncer is stopped and w_resume_next_sg queued */ 3829b411b363SPhilipp Reisner del_timer_sync(&mdev->resync_timer); 3830b411b363SPhilipp Reisner set_bit(STOP_SYNC_TIMER, &mdev->flags); 3831b411b363SPhilipp Reisner resync_timer_fn((unsigned long)mdev); 3832b411b363SPhilipp Reisner 3833b411b363SPhilipp Reisner /* wait for all w_e_end_data_req, w_e_end_rsdata_req, w_send_barrier, 3834b411b363SPhilipp Reisner * w_make_resync_request etc. which may still be on the worker queue 3835b411b363SPhilipp Reisner * to be "canceled" */ 3836b411b363SPhilipp Reisner drbd_flush_workqueue(mdev); 3837b411b363SPhilipp Reisner 3838b411b363SPhilipp Reisner /* This also does reclaim_net_ee(). If we do this too early, we might 3839b411b363SPhilipp Reisner * miss some resync ee and pages.*/ 3840b411b363SPhilipp Reisner drbd_process_done_ee(mdev); 3841b411b363SPhilipp Reisner 3842b411b363SPhilipp Reisner kfree(mdev->p_uuid); 3843b411b363SPhilipp Reisner mdev->p_uuid = NULL; 3844b411b363SPhilipp Reisner 3845b411b363SPhilipp Reisner if (!mdev->state.susp) 3846b411b363SPhilipp Reisner tl_clear(mdev); 3847b411b363SPhilipp Reisner 3848b411b363SPhilipp Reisner drbd_fail_pending_reads(mdev); 3849b411b363SPhilipp Reisner 3850b411b363SPhilipp Reisner dev_info(DEV, "Connection closed\n"); 3851b411b363SPhilipp Reisner 3852b411b363SPhilipp Reisner drbd_md_sync(mdev); 3853b411b363SPhilipp Reisner 3854b411b363SPhilipp Reisner fp = FP_DONT_CARE; 3855b411b363SPhilipp Reisner if (get_ldev(mdev)) { 3856b411b363SPhilipp Reisner fp = mdev->ldev->dc.fencing; 3857b411b363SPhilipp Reisner put_ldev(mdev); 3858b411b363SPhilipp Reisner } 3859b411b363SPhilipp Reisner 3860b411b363SPhilipp Reisner if (mdev->state.role == R_PRIMARY) { 3861b411b363SPhilipp Reisner if (fp >= FP_RESOURCE && mdev->state.pdsk >= D_UNKNOWN) { 3862b411b363SPhilipp Reisner enum drbd_disk_state nps = drbd_try_outdate_peer(mdev); 3863b411b363SPhilipp Reisner drbd_request_state(mdev, NS(pdsk, nps)); 3864b411b363SPhilipp Reisner } 3865b411b363SPhilipp Reisner } 3866b411b363SPhilipp Reisner 3867b411b363SPhilipp Reisner spin_lock_irq(&mdev->req_lock); 3868b411b363SPhilipp Reisner os = mdev->state; 3869b411b363SPhilipp Reisner if (os.conn >= C_UNCONNECTED) { 3870b411b363SPhilipp Reisner /* Do not restart in case we are C_DISCONNECTING */ 3871b411b363SPhilipp Reisner ns = os; 3872b411b363SPhilipp Reisner ns.conn = C_UNCONNECTED; 3873b411b363SPhilipp Reisner rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL); 3874b411b363SPhilipp Reisner } 3875b411b363SPhilipp Reisner spin_unlock_irq(&mdev->req_lock); 3876b411b363SPhilipp Reisner 3877b411b363SPhilipp Reisner if (os.conn == C_DISCONNECTING) { 3878b411b363SPhilipp Reisner struct hlist_head *h; 3879b411b363SPhilipp Reisner wait_event(mdev->misc_wait, atomic_read(&mdev->net_cnt) == 0); 3880b411b363SPhilipp Reisner 3881b411b363SPhilipp Reisner /* we must not free the tl_hash 3882b411b363SPhilipp Reisner * while application io is still on the fly */ 3883b411b363SPhilipp Reisner wait_event(mdev->misc_wait, atomic_read(&mdev->ap_bio_cnt) == 0); 3884b411b363SPhilipp Reisner 3885b411b363SPhilipp Reisner spin_lock_irq(&mdev->req_lock); 3886b411b363SPhilipp Reisner /* paranoia code */ 3887b411b363SPhilipp Reisner for (h = mdev->ee_hash; h < mdev->ee_hash + mdev->ee_hash_s; h++) 3888b411b363SPhilipp Reisner if (h->first) 3889b411b363SPhilipp Reisner dev_err(DEV, "ASSERT FAILED ee_hash[%u].first == %p, expected NULL\n", 3890b411b363SPhilipp Reisner (int)(h - mdev->ee_hash), h->first); 3891b411b363SPhilipp Reisner kfree(mdev->ee_hash); 3892b411b363SPhilipp Reisner mdev->ee_hash = NULL; 3893b411b363SPhilipp Reisner mdev->ee_hash_s = 0; 3894b411b363SPhilipp Reisner 3895b411b363SPhilipp Reisner /* paranoia code */ 3896b411b363SPhilipp Reisner for (h = mdev->tl_hash; h < mdev->tl_hash + mdev->tl_hash_s; h++) 3897b411b363SPhilipp Reisner if (h->first) 3898b411b363SPhilipp Reisner dev_err(DEV, "ASSERT FAILED tl_hash[%u] == %p, expected NULL\n", 3899b411b363SPhilipp Reisner (int)(h - mdev->tl_hash), h->first); 3900b411b363SPhilipp Reisner kfree(mdev->tl_hash); 3901b411b363SPhilipp Reisner mdev->tl_hash = NULL; 3902b411b363SPhilipp Reisner mdev->tl_hash_s = 0; 3903b411b363SPhilipp Reisner spin_unlock_irq(&mdev->req_lock); 3904b411b363SPhilipp Reisner 3905b411b363SPhilipp Reisner crypto_free_hash(mdev->cram_hmac_tfm); 3906b411b363SPhilipp Reisner mdev->cram_hmac_tfm = NULL; 3907b411b363SPhilipp Reisner 3908b411b363SPhilipp Reisner kfree(mdev->net_conf); 3909b411b363SPhilipp Reisner mdev->net_conf = NULL; 3910b411b363SPhilipp Reisner drbd_request_state(mdev, NS(conn, C_STANDALONE)); 3911b411b363SPhilipp Reisner } 3912b411b363SPhilipp Reisner 3913b411b363SPhilipp Reisner /* tcp_close and release of sendpage pages can be deferred. I don't 3914b411b363SPhilipp Reisner * want to use SO_LINGER, because apparently it can be deferred for 3915b411b363SPhilipp Reisner * more than 20 seconds (longest time I checked). 3916b411b363SPhilipp Reisner * 3917b411b363SPhilipp Reisner * Actually we don't care for exactly when the network stack does its 3918b411b363SPhilipp Reisner * put_page(), but release our reference on these pages right here. 3919b411b363SPhilipp Reisner */ 3920b411b363SPhilipp Reisner i = drbd_release_ee(mdev, &mdev->net_ee); 3921b411b363SPhilipp Reisner if (i) 3922b411b363SPhilipp Reisner dev_info(DEV, "net_ee not empty, killed %u entries\n", i); 3923b411b363SPhilipp Reisner i = atomic_read(&mdev->pp_in_use); 3924b411b363SPhilipp Reisner if (i) 392545bb912bSLars Ellenberg dev_info(DEV, "pp_in_use = %d, expected 0\n", i); 3926b411b363SPhilipp Reisner 3927b411b363SPhilipp Reisner D_ASSERT(list_empty(&mdev->read_ee)); 3928b411b363SPhilipp Reisner D_ASSERT(list_empty(&mdev->active_ee)); 3929b411b363SPhilipp Reisner D_ASSERT(list_empty(&mdev->sync_ee)); 3930b411b363SPhilipp Reisner D_ASSERT(list_empty(&mdev->done_ee)); 3931b411b363SPhilipp Reisner 3932b411b363SPhilipp Reisner /* ok, no more ee's on the fly, it is safe to reset the epoch_size */ 3933b411b363SPhilipp Reisner atomic_set(&mdev->current_epoch->epoch_size, 0); 3934b411b363SPhilipp Reisner D_ASSERT(list_empty(&mdev->current_epoch->list)); 3935b411b363SPhilipp Reisner } 3936b411b363SPhilipp Reisner 3937b411b363SPhilipp Reisner /* 3938b411b363SPhilipp Reisner * We support PRO_VERSION_MIN to PRO_VERSION_MAX. The protocol version 3939b411b363SPhilipp Reisner * we can agree on is stored in agreed_pro_version. 3940b411b363SPhilipp Reisner * 3941b411b363SPhilipp Reisner * feature flags and the reserved array should be enough room for future 3942b411b363SPhilipp Reisner * enhancements of the handshake protocol, and possible plugins... 3943b411b363SPhilipp Reisner * 3944b411b363SPhilipp Reisner * for now, they are expected to be zero, but ignored. 3945b411b363SPhilipp Reisner */ 3946b411b363SPhilipp Reisner static int drbd_send_handshake(struct drbd_conf *mdev) 3947b411b363SPhilipp Reisner { 3948b411b363SPhilipp Reisner /* ASSERT current == mdev->receiver ... */ 3949b411b363SPhilipp Reisner struct p_handshake *p = &mdev->data.sbuf.handshake; 3950b411b363SPhilipp Reisner int ok; 3951b411b363SPhilipp Reisner 3952b411b363SPhilipp Reisner if (mutex_lock_interruptible(&mdev->data.mutex)) { 3953b411b363SPhilipp Reisner dev_err(DEV, "interrupted during initial handshake\n"); 3954b411b363SPhilipp Reisner return 0; /* interrupted. not ok. */ 3955b411b363SPhilipp Reisner } 3956b411b363SPhilipp Reisner 3957b411b363SPhilipp Reisner if (mdev->data.socket == NULL) { 3958b411b363SPhilipp Reisner mutex_unlock(&mdev->data.mutex); 3959b411b363SPhilipp Reisner return 0; 3960b411b363SPhilipp Reisner } 3961b411b363SPhilipp Reisner 3962b411b363SPhilipp Reisner memset(p, 0, sizeof(*p)); 3963b411b363SPhilipp Reisner p->protocol_min = cpu_to_be32(PRO_VERSION_MIN); 3964b411b363SPhilipp Reisner p->protocol_max = cpu_to_be32(PRO_VERSION_MAX); 3965b411b363SPhilipp Reisner ok = _drbd_send_cmd( mdev, mdev->data.socket, P_HAND_SHAKE, 3966b411b363SPhilipp Reisner (struct p_header *)p, sizeof(*p), 0 ); 3967b411b363SPhilipp Reisner mutex_unlock(&mdev->data.mutex); 3968b411b363SPhilipp Reisner return ok; 3969b411b363SPhilipp Reisner } 3970b411b363SPhilipp Reisner 3971b411b363SPhilipp Reisner /* 3972b411b363SPhilipp Reisner * return values: 3973b411b363SPhilipp Reisner * 1 yes, we have a valid connection 3974b411b363SPhilipp Reisner * 0 oops, did not work out, please try again 3975b411b363SPhilipp Reisner * -1 peer talks different language, 3976b411b363SPhilipp Reisner * no point in trying again, please go standalone. 3977b411b363SPhilipp Reisner */ 3978b411b363SPhilipp Reisner static int drbd_do_handshake(struct drbd_conf *mdev) 3979b411b363SPhilipp Reisner { 3980b411b363SPhilipp Reisner /* ASSERT current == mdev->receiver ... */ 3981b411b363SPhilipp Reisner struct p_handshake *p = &mdev->data.rbuf.handshake; 3982b411b363SPhilipp Reisner const int expect = sizeof(struct p_handshake) 3983b411b363SPhilipp Reisner -sizeof(struct p_header); 3984b411b363SPhilipp Reisner int rv; 3985b411b363SPhilipp Reisner 3986b411b363SPhilipp Reisner rv = drbd_send_handshake(mdev); 3987b411b363SPhilipp Reisner if (!rv) 3988b411b363SPhilipp Reisner return 0; 3989b411b363SPhilipp Reisner 3990b411b363SPhilipp Reisner rv = drbd_recv_header(mdev, &p->head); 3991b411b363SPhilipp Reisner if (!rv) 3992b411b363SPhilipp Reisner return 0; 3993b411b363SPhilipp Reisner 3994b411b363SPhilipp Reisner if (p->head.command != P_HAND_SHAKE) { 3995b411b363SPhilipp Reisner dev_err(DEV, "expected HandShake packet, received: %s (0x%04x)\n", 3996b411b363SPhilipp Reisner cmdname(p->head.command), p->head.command); 3997b411b363SPhilipp Reisner return -1; 3998b411b363SPhilipp Reisner } 3999b411b363SPhilipp Reisner 4000b411b363SPhilipp Reisner if (p->head.length != expect) { 4001b411b363SPhilipp Reisner dev_err(DEV, "expected HandShake length: %u, received: %u\n", 4002b411b363SPhilipp Reisner expect, p->head.length); 4003b411b363SPhilipp Reisner return -1; 4004b411b363SPhilipp Reisner } 4005b411b363SPhilipp Reisner 4006b411b363SPhilipp Reisner rv = drbd_recv(mdev, &p->head.payload, expect); 4007b411b363SPhilipp Reisner 4008b411b363SPhilipp Reisner if (rv != expect) { 4009b411b363SPhilipp Reisner dev_err(DEV, "short read receiving handshake packet: l=%u\n", rv); 4010b411b363SPhilipp Reisner return 0; 4011b411b363SPhilipp Reisner } 4012b411b363SPhilipp Reisner 4013b411b363SPhilipp Reisner p->protocol_min = be32_to_cpu(p->protocol_min); 4014b411b363SPhilipp Reisner p->protocol_max = be32_to_cpu(p->protocol_max); 4015b411b363SPhilipp Reisner if (p->protocol_max == 0) 4016b411b363SPhilipp Reisner p->protocol_max = p->protocol_min; 4017b411b363SPhilipp Reisner 4018b411b363SPhilipp Reisner if (PRO_VERSION_MAX < p->protocol_min || 4019b411b363SPhilipp Reisner PRO_VERSION_MIN > p->protocol_max) 4020b411b363SPhilipp Reisner goto incompat; 4021b411b363SPhilipp Reisner 4022b411b363SPhilipp Reisner mdev->agreed_pro_version = min_t(int, PRO_VERSION_MAX, p->protocol_max); 4023b411b363SPhilipp Reisner 4024b411b363SPhilipp Reisner dev_info(DEV, "Handshake successful: " 4025b411b363SPhilipp Reisner "Agreed network protocol version %d\n", mdev->agreed_pro_version); 4026b411b363SPhilipp Reisner 4027b411b363SPhilipp Reisner return 1; 4028b411b363SPhilipp Reisner 4029b411b363SPhilipp Reisner incompat: 4030b411b363SPhilipp Reisner dev_err(DEV, "incompatible DRBD dialects: " 4031b411b363SPhilipp Reisner "I support %d-%d, peer supports %d-%d\n", 4032b411b363SPhilipp Reisner PRO_VERSION_MIN, PRO_VERSION_MAX, 4033b411b363SPhilipp Reisner p->protocol_min, p->protocol_max); 4034b411b363SPhilipp Reisner return -1; 4035b411b363SPhilipp Reisner } 4036b411b363SPhilipp Reisner 4037b411b363SPhilipp Reisner #if !defined(CONFIG_CRYPTO_HMAC) && !defined(CONFIG_CRYPTO_HMAC_MODULE) 4038b411b363SPhilipp Reisner static int drbd_do_auth(struct drbd_conf *mdev) 4039b411b363SPhilipp Reisner { 4040b411b363SPhilipp Reisner dev_err(DEV, "This kernel was build without CONFIG_CRYPTO_HMAC.\n"); 4041b411b363SPhilipp Reisner dev_err(DEV, "You need to disable 'cram-hmac-alg' in drbd.conf.\n"); 4042b10d96cbSJohannes Thoma return -1; 4043b411b363SPhilipp Reisner } 4044b411b363SPhilipp Reisner #else 4045b411b363SPhilipp Reisner #define CHALLENGE_LEN 64 4046b10d96cbSJohannes Thoma 4047b10d96cbSJohannes Thoma /* Return value: 4048b10d96cbSJohannes Thoma 1 - auth succeeded, 4049b10d96cbSJohannes Thoma 0 - failed, try again (network error), 4050b10d96cbSJohannes Thoma -1 - auth failed, don't try again. 4051b10d96cbSJohannes Thoma */ 4052b10d96cbSJohannes Thoma 4053b411b363SPhilipp Reisner static int drbd_do_auth(struct drbd_conf *mdev) 4054b411b363SPhilipp Reisner { 4055b411b363SPhilipp Reisner char my_challenge[CHALLENGE_LEN]; /* 64 Bytes... */ 4056b411b363SPhilipp Reisner struct scatterlist sg; 4057b411b363SPhilipp Reisner char *response = NULL; 4058b411b363SPhilipp Reisner char *right_response = NULL; 4059b411b363SPhilipp Reisner char *peers_ch = NULL; 4060b411b363SPhilipp Reisner struct p_header p; 4061b411b363SPhilipp Reisner unsigned int key_len = strlen(mdev->net_conf->shared_secret); 4062b411b363SPhilipp Reisner unsigned int resp_size; 4063b411b363SPhilipp Reisner struct hash_desc desc; 4064b411b363SPhilipp Reisner int rv; 4065b411b363SPhilipp Reisner 4066b411b363SPhilipp Reisner desc.tfm = mdev->cram_hmac_tfm; 4067b411b363SPhilipp Reisner desc.flags = 0; 4068b411b363SPhilipp Reisner 4069b411b363SPhilipp Reisner rv = crypto_hash_setkey(mdev->cram_hmac_tfm, 4070b411b363SPhilipp Reisner (u8 *)mdev->net_conf->shared_secret, key_len); 4071b411b363SPhilipp Reisner if (rv) { 4072b411b363SPhilipp Reisner dev_err(DEV, "crypto_hash_setkey() failed with %d\n", rv); 4073b10d96cbSJohannes Thoma rv = -1; 4074b411b363SPhilipp Reisner goto fail; 4075b411b363SPhilipp Reisner } 4076b411b363SPhilipp Reisner 4077b411b363SPhilipp Reisner get_random_bytes(my_challenge, CHALLENGE_LEN); 4078b411b363SPhilipp Reisner 4079b411b363SPhilipp Reisner rv = drbd_send_cmd2(mdev, P_AUTH_CHALLENGE, my_challenge, CHALLENGE_LEN); 4080b411b363SPhilipp Reisner if (!rv) 4081b411b363SPhilipp Reisner goto fail; 4082b411b363SPhilipp Reisner 4083b411b363SPhilipp Reisner rv = drbd_recv_header(mdev, &p); 4084b411b363SPhilipp Reisner if (!rv) 4085b411b363SPhilipp Reisner goto fail; 4086b411b363SPhilipp Reisner 4087b411b363SPhilipp Reisner if (p.command != P_AUTH_CHALLENGE) { 4088b411b363SPhilipp Reisner dev_err(DEV, "expected AuthChallenge packet, received: %s (0x%04x)\n", 4089b411b363SPhilipp Reisner cmdname(p.command), p.command); 4090b411b363SPhilipp Reisner rv = 0; 4091b411b363SPhilipp Reisner goto fail; 4092b411b363SPhilipp Reisner } 4093b411b363SPhilipp Reisner 4094b411b363SPhilipp Reisner if (p.length > CHALLENGE_LEN*2) { 4095b411b363SPhilipp Reisner dev_err(DEV, "expected AuthChallenge payload too big.\n"); 4096b10d96cbSJohannes Thoma rv = -1; 4097b411b363SPhilipp Reisner goto fail; 4098b411b363SPhilipp Reisner } 4099b411b363SPhilipp Reisner 4100b411b363SPhilipp Reisner peers_ch = kmalloc(p.length, GFP_NOIO); 4101b411b363SPhilipp Reisner if (peers_ch == NULL) { 4102b411b363SPhilipp Reisner dev_err(DEV, "kmalloc of peers_ch failed\n"); 4103b10d96cbSJohannes Thoma rv = -1; 4104b411b363SPhilipp Reisner goto fail; 4105b411b363SPhilipp Reisner } 4106b411b363SPhilipp Reisner 4107b411b363SPhilipp Reisner rv = drbd_recv(mdev, peers_ch, p.length); 4108b411b363SPhilipp Reisner 4109b411b363SPhilipp Reisner if (rv != p.length) { 4110b411b363SPhilipp Reisner dev_err(DEV, "short read AuthChallenge: l=%u\n", rv); 4111b411b363SPhilipp Reisner rv = 0; 4112b411b363SPhilipp Reisner goto fail; 4113b411b363SPhilipp Reisner } 4114b411b363SPhilipp Reisner 4115b411b363SPhilipp Reisner resp_size = crypto_hash_digestsize(mdev->cram_hmac_tfm); 4116b411b363SPhilipp Reisner response = kmalloc(resp_size, GFP_NOIO); 4117b411b363SPhilipp Reisner if (response == NULL) { 4118b411b363SPhilipp Reisner dev_err(DEV, "kmalloc of response failed\n"); 4119b10d96cbSJohannes Thoma rv = -1; 4120b411b363SPhilipp Reisner goto fail; 4121b411b363SPhilipp Reisner } 4122b411b363SPhilipp Reisner 4123b411b363SPhilipp Reisner sg_init_table(&sg, 1); 4124b411b363SPhilipp Reisner sg_set_buf(&sg, peers_ch, p.length); 4125b411b363SPhilipp Reisner 4126b411b363SPhilipp Reisner rv = crypto_hash_digest(&desc, &sg, sg.length, response); 4127b411b363SPhilipp Reisner if (rv) { 4128b411b363SPhilipp Reisner dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv); 4129b10d96cbSJohannes Thoma rv = -1; 4130b411b363SPhilipp Reisner goto fail; 4131b411b363SPhilipp Reisner } 4132b411b363SPhilipp Reisner 4133b411b363SPhilipp Reisner rv = drbd_send_cmd2(mdev, P_AUTH_RESPONSE, response, resp_size); 4134b411b363SPhilipp Reisner if (!rv) 4135b411b363SPhilipp Reisner goto fail; 4136b411b363SPhilipp Reisner 4137b411b363SPhilipp Reisner rv = drbd_recv_header(mdev, &p); 4138b411b363SPhilipp Reisner if (!rv) 4139b411b363SPhilipp Reisner goto fail; 4140b411b363SPhilipp Reisner 4141b411b363SPhilipp Reisner if (p.command != P_AUTH_RESPONSE) { 4142b411b363SPhilipp Reisner dev_err(DEV, "expected AuthResponse packet, received: %s (0x%04x)\n", 4143b411b363SPhilipp Reisner cmdname(p.command), p.command); 4144b411b363SPhilipp Reisner rv = 0; 4145b411b363SPhilipp Reisner goto fail; 4146b411b363SPhilipp Reisner } 4147b411b363SPhilipp Reisner 4148b411b363SPhilipp Reisner if (p.length != resp_size) { 4149b411b363SPhilipp Reisner dev_err(DEV, "expected AuthResponse payload of wrong size\n"); 4150b411b363SPhilipp Reisner rv = 0; 4151b411b363SPhilipp Reisner goto fail; 4152b411b363SPhilipp Reisner } 4153b411b363SPhilipp Reisner 4154b411b363SPhilipp Reisner rv = drbd_recv(mdev, response , resp_size); 4155b411b363SPhilipp Reisner 4156b411b363SPhilipp Reisner if (rv != resp_size) { 4157b411b363SPhilipp Reisner dev_err(DEV, "short read receiving AuthResponse: l=%u\n", rv); 4158b411b363SPhilipp Reisner rv = 0; 4159b411b363SPhilipp Reisner goto fail; 4160b411b363SPhilipp Reisner } 4161b411b363SPhilipp Reisner 4162b411b363SPhilipp Reisner right_response = kmalloc(resp_size, GFP_NOIO); 41632d1ee87dSJulia Lawall if (right_response == NULL) { 4164b411b363SPhilipp Reisner dev_err(DEV, "kmalloc of right_response failed\n"); 4165b10d96cbSJohannes Thoma rv = -1; 4166b411b363SPhilipp Reisner goto fail; 4167b411b363SPhilipp Reisner } 4168b411b363SPhilipp Reisner 4169b411b363SPhilipp Reisner sg_set_buf(&sg, my_challenge, CHALLENGE_LEN); 4170b411b363SPhilipp Reisner 4171b411b363SPhilipp Reisner rv = crypto_hash_digest(&desc, &sg, sg.length, right_response); 4172b411b363SPhilipp Reisner if (rv) { 4173b411b363SPhilipp Reisner dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv); 4174b10d96cbSJohannes Thoma rv = -1; 4175b411b363SPhilipp Reisner goto fail; 4176b411b363SPhilipp Reisner } 4177b411b363SPhilipp Reisner 4178b411b363SPhilipp Reisner rv = !memcmp(response, right_response, resp_size); 4179b411b363SPhilipp Reisner 4180b411b363SPhilipp Reisner if (rv) 4181b411b363SPhilipp Reisner dev_info(DEV, "Peer authenticated using %d bytes of '%s' HMAC\n", 4182b411b363SPhilipp Reisner resp_size, mdev->net_conf->cram_hmac_alg); 4183b10d96cbSJohannes Thoma else 4184b10d96cbSJohannes Thoma rv = -1; 4185b411b363SPhilipp Reisner 4186b411b363SPhilipp Reisner fail: 4187b411b363SPhilipp Reisner kfree(peers_ch); 4188b411b363SPhilipp Reisner kfree(response); 4189b411b363SPhilipp Reisner kfree(right_response); 4190b411b363SPhilipp Reisner 4191b411b363SPhilipp Reisner return rv; 4192b411b363SPhilipp Reisner } 4193b411b363SPhilipp Reisner #endif 4194b411b363SPhilipp Reisner 4195b411b363SPhilipp Reisner int drbdd_init(struct drbd_thread *thi) 4196b411b363SPhilipp Reisner { 4197b411b363SPhilipp Reisner struct drbd_conf *mdev = thi->mdev; 4198b411b363SPhilipp Reisner unsigned int minor = mdev_to_minor(mdev); 4199b411b363SPhilipp Reisner int h; 4200b411b363SPhilipp Reisner 4201b411b363SPhilipp Reisner sprintf(current->comm, "drbd%d_receiver", minor); 4202b411b363SPhilipp Reisner 4203b411b363SPhilipp Reisner dev_info(DEV, "receiver (re)started\n"); 4204b411b363SPhilipp Reisner 4205b411b363SPhilipp Reisner do { 4206b411b363SPhilipp Reisner h = drbd_connect(mdev); 4207b411b363SPhilipp Reisner if (h == 0) { 4208b411b363SPhilipp Reisner drbd_disconnect(mdev); 4209b411b363SPhilipp Reisner __set_current_state(TASK_INTERRUPTIBLE); 4210b411b363SPhilipp Reisner schedule_timeout(HZ); 4211b411b363SPhilipp Reisner } 4212b411b363SPhilipp Reisner if (h == -1) { 4213b411b363SPhilipp Reisner dev_warn(DEV, "Discarding network configuration.\n"); 4214b411b363SPhilipp Reisner drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); 4215b411b363SPhilipp Reisner } 4216b411b363SPhilipp Reisner } while (h == 0); 4217b411b363SPhilipp Reisner 4218b411b363SPhilipp Reisner if (h > 0) { 4219b411b363SPhilipp Reisner if (get_net_conf(mdev)) { 4220b411b363SPhilipp Reisner drbdd(mdev); 4221b411b363SPhilipp Reisner put_net_conf(mdev); 4222b411b363SPhilipp Reisner } 4223b411b363SPhilipp Reisner } 4224b411b363SPhilipp Reisner 4225b411b363SPhilipp Reisner drbd_disconnect(mdev); 4226b411b363SPhilipp Reisner 4227b411b363SPhilipp Reisner dev_info(DEV, "receiver terminated\n"); 4228b411b363SPhilipp Reisner return 0; 4229b411b363SPhilipp Reisner } 4230b411b363SPhilipp Reisner 4231b411b363SPhilipp Reisner /* ********* acknowledge sender ******** */ 4232b411b363SPhilipp Reisner 4233b411b363SPhilipp Reisner static int got_RqSReply(struct drbd_conf *mdev, struct p_header *h) 4234b411b363SPhilipp Reisner { 4235b411b363SPhilipp Reisner struct p_req_state_reply *p = (struct p_req_state_reply *)h; 4236b411b363SPhilipp Reisner 4237b411b363SPhilipp Reisner int retcode = be32_to_cpu(p->retcode); 4238b411b363SPhilipp Reisner 4239b411b363SPhilipp Reisner if (retcode >= SS_SUCCESS) { 4240b411b363SPhilipp Reisner set_bit(CL_ST_CHG_SUCCESS, &mdev->flags); 4241b411b363SPhilipp Reisner } else { 4242b411b363SPhilipp Reisner set_bit(CL_ST_CHG_FAIL, &mdev->flags); 4243b411b363SPhilipp Reisner dev_err(DEV, "Requested state change failed by peer: %s (%d)\n", 4244b411b363SPhilipp Reisner drbd_set_st_err_str(retcode), retcode); 4245b411b363SPhilipp Reisner } 4246b411b363SPhilipp Reisner wake_up(&mdev->state_wait); 4247b411b363SPhilipp Reisner 4248b411b363SPhilipp Reisner return TRUE; 4249b411b363SPhilipp Reisner } 4250b411b363SPhilipp Reisner 4251b411b363SPhilipp Reisner static int got_Ping(struct drbd_conf *mdev, struct p_header *h) 4252b411b363SPhilipp Reisner { 4253b411b363SPhilipp Reisner return drbd_send_ping_ack(mdev); 4254b411b363SPhilipp Reisner 4255b411b363SPhilipp Reisner } 4256b411b363SPhilipp Reisner 4257b411b363SPhilipp Reisner static int got_PingAck(struct drbd_conf *mdev, struct p_header *h) 4258b411b363SPhilipp Reisner { 4259b411b363SPhilipp Reisner /* restore idle timeout */ 4260b411b363SPhilipp Reisner mdev->meta.socket->sk->sk_rcvtimeo = mdev->net_conf->ping_int*HZ; 4261309d1608SPhilipp Reisner if (!test_and_set_bit(GOT_PING_ACK, &mdev->flags)) 4262309d1608SPhilipp Reisner wake_up(&mdev->misc_wait); 4263b411b363SPhilipp Reisner 4264b411b363SPhilipp Reisner return TRUE; 4265b411b363SPhilipp Reisner } 4266b411b363SPhilipp Reisner 4267b411b363SPhilipp Reisner static int got_IsInSync(struct drbd_conf *mdev, struct p_header *h) 4268b411b363SPhilipp Reisner { 4269b411b363SPhilipp Reisner struct p_block_ack *p = (struct p_block_ack *)h; 4270b411b363SPhilipp Reisner sector_t sector = be64_to_cpu(p->sector); 4271b411b363SPhilipp Reisner int blksize = be32_to_cpu(p->blksize); 4272b411b363SPhilipp Reisner 4273b411b363SPhilipp Reisner D_ASSERT(mdev->agreed_pro_version >= 89); 4274b411b363SPhilipp Reisner 4275b411b363SPhilipp Reisner update_peer_seq(mdev, be32_to_cpu(p->seq_num)); 4276b411b363SPhilipp Reisner 4277b411b363SPhilipp Reisner drbd_rs_complete_io(mdev, sector); 4278b411b363SPhilipp Reisner drbd_set_in_sync(mdev, sector, blksize); 4279b411b363SPhilipp Reisner /* rs_same_csums is supposed to count in units of BM_BLOCK_SIZE */ 4280b411b363SPhilipp Reisner mdev->rs_same_csum += (blksize >> BM_BLOCK_SHIFT); 4281b411b363SPhilipp Reisner dec_rs_pending(mdev); 4282b411b363SPhilipp Reisner 4283b411b363SPhilipp Reisner return TRUE; 4284b411b363SPhilipp Reisner } 4285b411b363SPhilipp Reisner 4286b411b363SPhilipp Reisner /* when we receive the ACK for a write request, 4287b411b363SPhilipp Reisner * verify that we actually know about it */ 4288b411b363SPhilipp Reisner static struct drbd_request *_ack_id_to_req(struct drbd_conf *mdev, 4289b411b363SPhilipp Reisner u64 id, sector_t sector) 4290b411b363SPhilipp Reisner { 4291b411b363SPhilipp Reisner struct hlist_head *slot = tl_hash_slot(mdev, sector); 4292b411b363SPhilipp Reisner struct hlist_node *n; 4293b411b363SPhilipp Reisner struct drbd_request *req; 4294b411b363SPhilipp Reisner 4295b411b363SPhilipp Reisner hlist_for_each_entry(req, n, slot, colision) { 4296b411b363SPhilipp Reisner if ((unsigned long)req == (unsigned long)id) { 4297b411b363SPhilipp Reisner if (req->sector != sector) { 4298b411b363SPhilipp Reisner dev_err(DEV, "_ack_id_to_req: found req %p but it has " 4299b411b363SPhilipp Reisner "wrong sector (%llus versus %llus)\n", req, 4300b411b363SPhilipp Reisner (unsigned long long)req->sector, 4301b411b363SPhilipp Reisner (unsigned long long)sector); 4302b411b363SPhilipp Reisner break; 4303b411b363SPhilipp Reisner } 4304b411b363SPhilipp Reisner return req; 4305b411b363SPhilipp Reisner } 4306b411b363SPhilipp Reisner } 4307b411b363SPhilipp Reisner dev_err(DEV, "_ack_id_to_req: failed to find req %p, sector %llus in list\n", 4308b411b363SPhilipp Reisner (void *)(unsigned long)id, (unsigned long long)sector); 4309b411b363SPhilipp Reisner return NULL; 4310b411b363SPhilipp Reisner } 4311b411b363SPhilipp Reisner 4312b411b363SPhilipp Reisner typedef struct drbd_request *(req_validator_fn) 4313b411b363SPhilipp Reisner (struct drbd_conf *mdev, u64 id, sector_t sector); 4314b411b363SPhilipp Reisner 4315b411b363SPhilipp Reisner static int validate_req_change_req_state(struct drbd_conf *mdev, 4316b411b363SPhilipp Reisner u64 id, sector_t sector, req_validator_fn validator, 4317b411b363SPhilipp Reisner const char *func, enum drbd_req_event what) 4318b411b363SPhilipp Reisner { 4319b411b363SPhilipp Reisner struct drbd_request *req; 4320b411b363SPhilipp Reisner struct bio_and_error m; 4321b411b363SPhilipp Reisner 4322b411b363SPhilipp Reisner spin_lock_irq(&mdev->req_lock); 4323b411b363SPhilipp Reisner req = validator(mdev, id, sector); 4324b411b363SPhilipp Reisner if (unlikely(!req)) { 4325b411b363SPhilipp Reisner spin_unlock_irq(&mdev->req_lock); 4326b411b363SPhilipp Reisner dev_err(DEV, "%s: got a corrupt block_id/sector pair\n", func); 4327b411b363SPhilipp Reisner return FALSE; 4328b411b363SPhilipp Reisner } 4329b411b363SPhilipp Reisner __req_mod(req, what, &m); 4330b411b363SPhilipp Reisner spin_unlock_irq(&mdev->req_lock); 4331b411b363SPhilipp Reisner 4332b411b363SPhilipp Reisner if (m.bio) 4333b411b363SPhilipp Reisner complete_master_bio(mdev, &m); 4334b411b363SPhilipp Reisner return TRUE; 4335b411b363SPhilipp Reisner } 4336b411b363SPhilipp Reisner 4337b411b363SPhilipp Reisner static int got_BlockAck(struct drbd_conf *mdev, struct p_header *h) 4338b411b363SPhilipp Reisner { 4339b411b363SPhilipp Reisner struct p_block_ack *p = (struct p_block_ack *)h; 4340b411b363SPhilipp Reisner sector_t sector = be64_to_cpu(p->sector); 4341b411b363SPhilipp Reisner int blksize = be32_to_cpu(p->blksize); 4342b411b363SPhilipp Reisner enum drbd_req_event what; 4343b411b363SPhilipp Reisner 4344b411b363SPhilipp Reisner update_peer_seq(mdev, be32_to_cpu(p->seq_num)); 4345b411b363SPhilipp Reisner 4346b411b363SPhilipp Reisner if (is_syncer_block_id(p->block_id)) { 4347b411b363SPhilipp Reisner drbd_set_in_sync(mdev, sector, blksize); 4348b411b363SPhilipp Reisner dec_rs_pending(mdev); 4349b411b363SPhilipp Reisner return TRUE; 4350b411b363SPhilipp Reisner } 4351b411b363SPhilipp Reisner switch (be16_to_cpu(h->command)) { 4352b411b363SPhilipp Reisner case P_RS_WRITE_ACK: 4353b411b363SPhilipp Reisner D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C); 4354b411b363SPhilipp Reisner what = write_acked_by_peer_and_sis; 4355b411b363SPhilipp Reisner break; 4356b411b363SPhilipp Reisner case P_WRITE_ACK: 4357b411b363SPhilipp Reisner D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C); 4358b411b363SPhilipp Reisner what = write_acked_by_peer; 4359b411b363SPhilipp Reisner break; 4360b411b363SPhilipp Reisner case P_RECV_ACK: 4361b411b363SPhilipp Reisner D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_B); 4362b411b363SPhilipp Reisner what = recv_acked_by_peer; 4363b411b363SPhilipp Reisner break; 4364b411b363SPhilipp Reisner case P_DISCARD_ACK: 4365b411b363SPhilipp Reisner D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C); 4366b411b363SPhilipp Reisner what = conflict_discarded_by_peer; 4367b411b363SPhilipp Reisner break; 4368b411b363SPhilipp Reisner default: 4369b411b363SPhilipp Reisner D_ASSERT(0); 4370b411b363SPhilipp Reisner return FALSE; 4371b411b363SPhilipp Reisner } 4372b411b363SPhilipp Reisner 4373b411b363SPhilipp Reisner return validate_req_change_req_state(mdev, p->block_id, sector, 4374b411b363SPhilipp Reisner _ack_id_to_req, __func__ , what); 4375b411b363SPhilipp Reisner } 4376b411b363SPhilipp Reisner 4377b411b363SPhilipp Reisner static int got_NegAck(struct drbd_conf *mdev, struct p_header *h) 4378b411b363SPhilipp Reisner { 4379b411b363SPhilipp Reisner struct p_block_ack *p = (struct p_block_ack *)h; 4380b411b363SPhilipp Reisner sector_t sector = be64_to_cpu(p->sector); 4381b411b363SPhilipp Reisner 4382b411b363SPhilipp Reisner if (__ratelimit(&drbd_ratelimit_state)) 4383b411b363SPhilipp Reisner dev_warn(DEV, "Got NegAck packet. Peer is in troubles?\n"); 4384b411b363SPhilipp Reisner 4385b411b363SPhilipp Reisner update_peer_seq(mdev, be32_to_cpu(p->seq_num)); 4386b411b363SPhilipp Reisner 4387b411b363SPhilipp Reisner if (is_syncer_block_id(p->block_id)) { 4388b411b363SPhilipp Reisner int size = be32_to_cpu(p->blksize); 4389b411b363SPhilipp Reisner dec_rs_pending(mdev); 4390b411b363SPhilipp Reisner drbd_rs_failed_io(mdev, sector, size); 4391b411b363SPhilipp Reisner return TRUE; 4392b411b363SPhilipp Reisner } 4393b411b363SPhilipp Reisner return validate_req_change_req_state(mdev, p->block_id, sector, 4394b411b363SPhilipp Reisner _ack_id_to_req, __func__ , neg_acked); 4395b411b363SPhilipp Reisner } 4396b411b363SPhilipp Reisner 4397b411b363SPhilipp Reisner static int got_NegDReply(struct drbd_conf *mdev, struct p_header *h) 4398b411b363SPhilipp Reisner { 4399b411b363SPhilipp Reisner struct p_block_ack *p = (struct p_block_ack *)h; 4400b411b363SPhilipp Reisner sector_t sector = be64_to_cpu(p->sector); 4401b411b363SPhilipp Reisner 4402b411b363SPhilipp Reisner update_peer_seq(mdev, be32_to_cpu(p->seq_num)); 4403b411b363SPhilipp Reisner dev_err(DEV, "Got NegDReply; Sector %llus, len %u; Fail original request.\n", 4404b411b363SPhilipp Reisner (unsigned long long)sector, be32_to_cpu(p->blksize)); 4405b411b363SPhilipp Reisner 4406b411b363SPhilipp Reisner return validate_req_change_req_state(mdev, p->block_id, sector, 4407b411b363SPhilipp Reisner _ar_id_to_req, __func__ , neg_acked); 4408b411b363SPhilipp Reisner } 4409b411b363SPhilipp Reisner 4410b411b363SPhilipp Reisner static int got_NegRSDReply(struct drbd_conf *mdev, struct p_header *h) 4411b411b363SPhilipp Reisner { 4412b411b363SPhilipp Reisner sector_t sector; 4413b411b363SPhilipp Reisner int size; 4414b411b363SPhilipp Reisner struct p_block_ack *p = (struct p_block_ack *)h; 4415b411b363SPhilipp Reisner 4416b411b363SPhilipp Reisner sector = be64_to_cpu(p->sector); 4417b411b363SPhilipp Reisner size = be32_to_cpu(p->blksize); 4418b411b363SPhilipp Reisner 4419b411b363SPhilipp Reisner update_peer_seq(mdev, be32_to_cpu(p->seq_num)); 4420b411b363SPhilipp Reisner 4421b411b363SPhilipp Reisner dec_rs_pending(mdev); 4422b411b363SPhilipp Reisner 4423b411b363SPhilipp Reisner if (get_ldev_if_state(mdev, D_FAILED)) { 4424b411b363SPhilipp Reisner drbd_rs_complete_io(mdev, sector); 4425b411b363SPhilipp Reisner drbd_rs_failed_io(mdev, sector, size); 4426b411b363SPhilipp Reisner put_ldev(mdev); 4427b411b363SPhilipp Reisner } 4428b411b363SPhilipp Reisner 4429b411b363SPhilipp Reisner return TRUE; 4430b411b363SPhilipp Reisner } 4431b411b363SPhilipp Reisner 4432b411b363SPhilipp Reisner static int got_BarrierAck(struct drbd_conf *mdev, struct p_header *h) 4433b411b363SPhilipp Reisner { 4434b411b363SPhilipp Reisner struct p_barrier_ack *p = (struct p_barrier_ack *)h; 4435b411b363SPhilipp Reisner 4436b411b363SPhilipp Reisner tl_release(mdev, p->barrier, be32_to_cpu(p->set_size)); 4437b411b363SPhilipp Reisner 4438b411b363SPhilipp Reisner return TRUE; 4439b411b363SPhilipp Reisner } 4440b411b363SPhilipp Reisner 4441b411b363SPhilipp Reisner static int got_OVResult(struct drbd_conf *mdev, struct p_header *h) 4442b411b363SPhilipp Reisner { 4443b411b363SPhilipp Reisner struct p_block_ack *p = (struct p_block_ack *)h; 4444b411b363SPhilipp Reisner struct drbd_work *w; 4445b411b363SPhilipp Reisner sector_t sector; 4446b411b363SPhilipp Reisner int size; 4447b411b363SPhilipp Reisner 4448b411b363SPhilipp Reisner sector = be64_to_cpu(p->sector); 4449b411b363SPhilipp Reisner size = be32_to_cpu(p->blksize); 4450b411b363SPhilipp Reisner 4451b411b363SPhilipp Reisner update_peer_seq(mdev, be32_to_cpu(p->seq_num)); 4452b411b363SPhilipp Reisner 4453b411b363SPhilipp Reisner if (be64_to_cpu(p->block_id) == ID_OUT_OF_SYNC) 4454b411b363SPhilipp Reisner drbd_ov_oos_found(mdev, sector, size); 4455b411b363SPhilipp Reisner else 4456b411b363SPhilipp Reisner ov_oos_print(mdev); 4457b411b363SPhilipp Reisner 4458b411b363SPhilipp Reisner drbd_rs_complete_io(mdev, sector); 4459b411b363SPhilipp Reisner dec_rs_pending(mdev); 4460b411b363SPhilipp Reisner 4461b411b363SPhilipp Reisner if (--mdev->ov_left == 0) { 4462b411b363SPhilipp Reisner w = kmalloc(sizeof(*w), GFP_NOIO); 4463b411b363SPhilipp Reisner if (w) { 4464b411b363SPhilipp Reisner w->cb = w_ov_finished; 4465b411b363SPhilipp Reisner drbd_queue_work_front(&mdev->data.work, w); 4466b411b363SPhilipp Reisner } else { 4467b411b363SPhilipp Reisner dev_err(DEV, "kmalloc(w) failed."); 4468b411b363SPhilipp Reisner ov_oos_print(mdev); 4469b411b363SPhilipp Reisner drbd_resync_finished(mdev); 4470b411b363SPhilipp Reisner } 4471b411b363SPhilipp Reisner } 4472b411b363SPhilipp Reisner return TRUE; 4473b411b363SPhilipp Reisner } 4474b411b363SPhilipp Reisner 44750ced55a3SPhilipp Reisner static int got_delay_probe_m(struct drbd_conf *mdev, struct p_header *h) 44760ced55a3SPhilipp Reisner { 44770ced55a3SPhilipp Reisner struct p_delay_probe *p = (struct p_delay_probe *)h; 44780ced55a3SPhilipp Reisner 44790ced55a3SPhilipp Reisner got_delay_probe(mdev, USE_META_SOCKET, p); 44800ced55a3SPhilipp Reisner return TRUE; 44810ced55a3SPhilipp Reisner } 44820ced55a3SPhilipp Reisner 4483b411b363SPhilipp Reisner struct asender_cmd { 4484b411b363SPhilipp Reisner size_t pkt_size; 4485b411b363SPhilipp Reisner int (*process)(struct drbd_conf *mdev, struct p_header *h); 4486b411b363SPhilipp Reisner }; 4487b411b363SPhilipp Reisner 4488b411b363SPhilipp Reisner static struct asender_cmd *get_asender_cmd(int cmd) 4489b411b363SPhilipp Reisner { 4490b411b363SPhilipp Reisner static struct asender_cmd asender_tbl[] = { 4491b411b363SPhilipp Reisner /* anything missing from this table is in 4492b411b363SPhilipp Reisner * the drbd_cmd_handler (drbd_default_handler) table, 4493b411b363SPhilipp Reisner * see the beginning of drbdd() */ 4494b411b363SPhilipp Reisner [P_PING] = { sizeof(struct p_header), got_Ping }, 4495b411b363SPhilipp Reisner [P_PING_ACK] = { sizeof(struct p_header), got_PingAck }, 4496b411b363SPhilipp Reisner [P_RECV_ACK] = { sizeof(struct p_block_ack), got_BlockAck }, 4497b411b363SPhilipp Reisner [P_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck }, 4498b411b363SPhilipp Reisner [P_RS_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck }, 4499b411b363SPhilipp Reisner [P_DISCARD_ACK] = { sizeof(struct p_block_ack), got_BlockAck }, 4500b411b363SPhilipp Reisner [P_NEG_ACK] = { sizeof(struct p_block_ack), got_NegAck }, 4501b411b363SPhilipp Reisner [P_NEG_DREPLY] = { sizeof(struct p_block_ack), got_NegDReply }, 4502b411b363SPhilipp Reisner [P_NEG_RS_DREPLY] = { sizeof(struct p_block_ack), got_NegRSDReply}, 4503b411b363SPhilipp Reisner [P_OV_RESULT] = { sizeof(struct p_block_ack), got_OVResult }, 4504b411b363SPhilipp Reisner [P_BARRIER_ACK] = { sizeof(struct p_barrier_ack), got_BarrierAck }, 4505b411b363SPhilipp Reisner [P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), got_RqSReply }, 4506b411b363SPhilipp Reisner [P_RS_IS_IN_SYNC] = { sizeof(struct p_block_ack), got_IsInSync }, 45070ced55a3SPhilipp Reisner [P_DELAY_PROBE] = { sizeof(struct p_delay_probe), got_delay_probe_m }, 4508b411b363SPhilipp Reisner [P_MAX_CMD] = { 0, NULL }, 4509b411b363SPhilipp Reisner }; 4510b411b363SPhilipp Reisner if (cmd > P_MAX_CMD || asender_tbl[cmd].process == NULL) 4511b411b363SPhilipp Reisner return NULL; 4512b411b363SPhilipp Reisner return &asender_tbl[cmd]; 4513b411b363SPhilipp Reisner } 4514b411b363SPhilipp Reisner 4515b411b363SPhilipp Reisner int drbd_asender(struct drbd_thread *thi) 4516b411b363SPhilipp Reisner { 4517b411b363SPhilipp Reisner struct drbd_conf *mdev = thi->mdev; 4518b411b363SPhilipp Reisner struct p_header *h = &mdev->meta.rbuf.header; 4519b411b363SPhilipp Reisner struct asender_cmd *cmd = NULL; 4520b411b363SPhilipp Reisner 4521b411b363SPhilipp Reisner int rv, len; 4522b411b363SPhilipp Reisner void *buf = h; 4523b411b363SPhilipp Reisner int received = 0; 4524b411b363SPhilipp Reisner int expect = sizeof(struct p_header); 4525b411b363SPhilipp Reisner int empty; 4526b411b363SPhilipp Reisner 4527b411b363SPhilipp Reisner sprintf(current->comm, "drbd%d_asender", mdev_to_minor(mdev)); 4528b411b363SPhilipp Reisner 4529b411b363SPhilipp Reisner current->policy = SCHED_RR; /* Make this a realtime task! */ 4530b411b363SPhilipp Reisner current->rt_priority = 2; /* more important than all other tasks */ 4531b411b363SPhilipp Reisner 4532b411b363SPhilipp Reisner while (get_t_state(thi) == Running) { 4533b411b363SPhilipp Reisner drbd_thread_current_set_cpu(mdev); 4534b411b363SPhilipp Reisner if (test_and_clear_bit(SEND_PING, &mdev->flags)) { 4535b411b363SPhilipp Reisner ERR_IF(!drbd_send_ping(mdev)) goto reconnect; 4536b411b363SPhilipp Reisner mdev->meta.socket->sk->sk_rcvtimeo = 4537b411b363SPhilipp Reisner mdev->net_conf->ping_timeo*HZ/10; 4538b411b363SPhilipp Reisner } 4539b411b363SPhilipp Reisner 4540b411b363SPhilipp Reisner /* conditionally cork; 4541b411b363SPhilipp Reisner * it may hurt latency if we cork without much to send */ 4542b411b363SPhilipp Reisner if (!mdev->net_conf->no_cork && 4543b411b363SPhilipp Reisner 3 < atomic_read(&mdev->unacked_cnt)) 4544b411b363SPhilipp Reisner drbd_tcp_cork(mdev->meta.socket); 4545b411b363SPhilipp Reisner while (1) { 4546b411b363SPhilipp Reisner clear_bit(SIGNAL_ASENDER, &mdev->flags); 4547b411b363SPhilipp Reisner flush_signals(current); 4548b411b363SPhilipp Reisner if (!drbd_process_done_ee(mdev)) { 4549b411b363SPhilipp Reisner dev_err(DEV, "process_done_ee() = NOT_OK\n"); 4550b411b363SPhilipp Reisner goto reconnect; 4551b411b363SPhilipp Reisner } 4552b411b363SPhilipp Reisner /* to avoid race with newly queued ACKs */ 4553b411b363SPhilipp Reisner set_bit(SIGNAL_ASENDER, &mdev->flags); 4554b411b363SPhilipp Reisner spin_lock_irq(&mdev->req_lock); 4555b411b363SPhilipp Reisner empty = list_empty(&mdev->done_ee); 4556b411b363SPhilipp Reisner spin_unlock_irq(&mdev->req_lock); 4557b411b363SPhilipp Reisner /* new ack may have been queued right here, 4558b411b363SPhilipp Reisner * but then there is also a signal pending, 4559b411b363SPhilipp Reisner * and we start over... */ 4560b411b363SPhilipp Reisner if (empty) 4561b411b363SPhilipp Reisner break; 4562b411b363SPhilipp Reisner } 4563b411b363SPhilipp Reisner /* but unconditionally uncork unless disabled */ 4564b411b363SPhilipp Reisner if (!mdev->net_conf->no_cork) 4565b411b363SPhilipp Reisner drbd_tcp_uncork(mdev->meta.socket); 4566b411b363SPhilipp Reisner 4567b411b363SPhilipp Reisner /* short circuit, recv_msg would return EINTR anyways. */ 4568b411b363SPhilipp Reisner if (signal_pending(current)) 4569b411b363SPhilipp Reisner continue; 4570b411b363SPhilipp Reisner 4571b411b363SPhilipp Reisner rv = drbd_recv_short(mdev, mdev->meta.socket, 4572b411b363SPhilipp Reisner buf, expect-received, 0); 4573b411b363SPhilipp Reisner clear_bit(SIGNAL_ASENDER, &mdev->flags); 4574b411b363SPhilipp Reisner 4575b411b363SPhilipp Reisner flush_signals(current); 4576b411b363SPhilipp Reisner 4577b411b363SPhilipp Reisner /* Note: 4578b411b363SPhilipp Reisner * -EINTR (on meta) we got a signal 4579b411b363SPhilipp Reisner * -EAGAIN (on meta) rcvtimeo expired 4580b411b363SPhilipp Reisner * -ECONNRESET other side closed the connection 4581b411b363SPhilipp Reisner * -ERESTARTSYS (on data) we got a signal 4582b411b363SPhilipp Reisner * rv < 0 other than above: unexpected error! 4583b411b363SPhilipp Reisner * rv == expected: full header or command 4584b411b363SPhilipp Reisner * rv < expected: "woken" by signal during receive 4585b411b363SPhilipp Reisner * rv == 0 : "connection shut down by peer" 4586b411b363SPhilipp Reisner */ 4587b411b363SPhilipp Reisner if (likely(rv > 0)) { 4588b411b363SPhilipp Reisner received += rv; 4589b411b363SPhilipp Reisner buf += rv; 4590b411b363SPhilipp Reisner } else if (rv == 0) { 4591b411b363SPhilipp Reisner dev_err(DEV, "meta connection shut down by peer.\n"); 4592b411b363SPhilipp Reisner goto reconnect; 4593b411b363SPhilipp Reisner } else if (rv == -EAGAIN) { 4594b411b363SPhilipp Reisner if (mdev->meta.socket->sk->sk_rcvtimeo == 4595b411b363SPhilipp Reisner mdev->net_conf->ping_timeo*HZ/10) { 4596b411b363SPhilipp Reisner dev_err(DEV, "PingAck did not arrive in time.\n"); 4597b411b363SPhilipp Reisner goto reconnect; 4598b411b363SPhilipp Reisner } 4599b411b363SPhilipp Reisner set_bit(SEND_PING, &mdev->flags); 4600b411b363SPhilipp Reisner continue; 4601b411b363SPhilipp Reisner } else if (rv == -EINTR) { 4602b411b363SPhilipp Reisner continue; 4603b411b363SPhilipp Reisner } else { 4604b411b363SPhilipp Reisner dev_err(DEV, "sock_recvmsg returned %d\n", rv); 4605b411b363SPhilipp Reisner goto reconnect; 4606b411b363SPhilipp Reisner } 4607b411b363SPhilipp Reisner 4608b411b363SPhilipp Reisner if (received == expect && cmd == NULL) { 4609b411b363SPhilipp Reisner if (unlikely(h->magic != BE_DRBD_MAGIC)) { 4610b411b363SPhilipp Reisner dev_err(DEV, "magic?? on meta m: 0x%lx c: %d l: %d\n", 4611b411b363SPhilipp Reisner (long)be32_to_cpu(h->magic), 4612b411b363SPhilipp Reisner h->command, h->length); 4613b411b363SPhilipp Reisner goto reconnect; 4614b411b363SPhilipp Reisner } 4615b411b363SPhilipp Reisner cmd = get_asender_cmd(be16_to_cpu(h->command)); 4616b411b363SPhilipp Reisner len = be16_to_cpu(h->length); 4617b411b363SPhilipp Reisner if (unlikely(cmd == NULL)) { 4618b411b363SPhilipp Reisner dev_err(DEV, "unknown command?? on meta m: 0x%lx c: %d l: %d\n", 4619b411b363SPhilipp Reisner (long)be32_to_cpu(h->magic), 4620b411b363SPhilipp Reisner h->command, h->length); 4621b411b363SPhilipp Reisner goto disconnect; 4622b411b363SPhilipp Reisner } 4623b411b363SPhilipp Reisner expect = cmd->pkt_size; 46246a0afdf5SJens Axboe ERR_IF(len != expect-sizeof(struct p_header)) 4625b411b363SPhilipp Reisner goto reconnect; 4626b411b363SPhilipp Reisner } 4627b411b363SPhilipp Reisner if (received == expect) { 4628b411b363SPhilipp Reisner D_ASSERT(cmd != NULL); 4629b411b363SPhilipp Reisner if (!cmd->process(mdev, h)) 4630b411b363SPhilipp Reisner goto reconnect; 4631b411b363SPhilipp Reisner 4632b411b363SPhilipp Reisner buf = h; 4633b411b363SPhilipp Reisner received = 0; 4634b411b363SPhilipp Reisner expect = sizeof(struct p_header); 4635b411b363SPhilipp Reisner cmd = NULL; 4636b411b363SPhilipp Reisner } 4637b411b363SPhilipp Reisner } 4638b411b363SPhilipp Reisner 4639b411b363SPhilipp Reisner if (0) { 4640b411b363SPhilipp Reisner reconnect: 4641b411b363SPhilipp Reisner drbd_force_state(mdev, NS(conn, C_NETWORK_FAILURE)); 4642b411b363SPhilipp Reisner } 4643b411b363SPhilipp Reisner if (0) { 4644b411b363SPhilipp Reisner disconnect: 4645b411b363SPhilipp Reisner drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); 4646b411b363SPhilipp Reisner } 4647b411b363SPhilipp Reisner clear_bit(SIGNAL_ASENDER, &mdev->flags); 4648b411b363SPhilipp Reisner 4649b411b363SPhilipp Reisner D_ASSERT(mdev->state.conn < C_CONNECTED); 4650b411b363SPhilipp Reisner dev_info(DEV, "asender terminated\n"); 4651b411b363SPhilipp Reisner 4652b411b363SPhilipp Reisner return 0; 4653b411b363SPhilipp Reisner } 4654