1 /*
2    drbd_receiver.c
3 
4    This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5 
6    Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7    Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8    Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9 
10    drbd is free software; you can redistribute it and/or modify
11    it under the terms of the GNU General Public License as published by
12    the Free Software Foundation; either version 2, or (at your option)
13    any later version.
14 
15    drbd is distributed in the hope that it will be useful,
16    but WITHOUT ANY WARRANTY; without even the implied warranty of
17    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18    GNU General Public License for more details.
19 
20    You should have received a copy of the GNU General Public License
21    along with drbd; see the file COPYING.  If not, write to
22    the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23  */
24 
25 
26 #include <linux/module.h>
27 
28 #include <asm/uaccess.h>
29 #include <net/sock.h>
30 
31 #include <linux/drbd.h>
32 #include <linux/fs.h>
33 #include <linux/file.h>
34 #include <linux/in.h>
35 #include <linux/mm.h>
36 #include <linux/memcontrol.h>
37 #include <linux/mm_inline.h>
38 #include <linux/slab.h>
39 #include <linux/pkt_sched.h>
40 #define __KERNEL_SYSCALLS__
41 #include <linux/unistd.h>
42 #include <linux/vmalloc.h>
43 #include <linux/random.h>
44 #include <linux/string.h>
45 #include <linux/scatterlist.h>
46 #include "drbd_int.h"
47 #include "drbd_req.h"
48 
49 #include "drbd_vli.h"
50 
51 struct packet_info {
52 	enum drbd_packet cmd;
53 	unsigned int size;
54 	unsigned int vnr;
55 	void *data;
56 };
57 
58 enum finish_epoch {
59 	FE_STILL_LIVE,
60 	FE_DESTROYED,
61 	FE_RECYCLED,
62 };
63 
64 static int drbd_do_features(struct drbd_tconn *tconn);
65 static int drbd_do_auth(struct drbd_tconn *tconn);
66 static int drbd_disconnected(struct drbd_conf *mdev);
67 
68 static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *, struct drbd_epoch *, enum epoch_event);
69 static int e_end_block(struct drbd_work *, int);
70 
71 
72 #define GFP_TRY	(__GFP_HIGHMEM | __GFP_NOWARN)
73 
74 /*
75  * some helper functions to deal with single linked page lists,
76  * page->private being our "next" pointer.
77  */
78 
79 /* If at least n pages are linked at head, get n pages off.
80  * Otherwise, don't modify head, and return NULL.
81  * Locking is the responsibility of the caller.
82  */
83 static struct page *page_chain_del(struct page **head, int n)
84 {
85 	struct page *page;
86 	struct page *tmp;
87 
88 	BUG_ON(!n);
89 	BUG_ON(!head);
90 
91 	page = *head;
92 
93 	if (!page)
94 		return NULL;
95 
96 	while (page) {
97 		tmp = page_chain_next(page);
98 		if (--n == 0)
99 			break; /* found sufficient pages */
100 		if (tmp == NULL)
101 			/* insufficient pages, don't use any of them. */
102 			return NULL;
103 		page = tmp;
104 	}
105 
106 	/* add end of list marker for the returned list */
107 	set_page_private(page, 0);
108 	/* actual return value, and adjustment of head */
109 	page = *head;
110 	*head = tmp;
111 	return page;
112 }
113 
114 /* may be used outside of locks to find the tail of a (usually short)
115  * "private" page chain, before adding it back to a global chain head
116  * with page_chain_add() under a spinlock. */
117 static struct page *page_chain_tail(struct page *page, int *len)
118 {
119 	struct page *tmp;
120 	int i = 1;
121 	while ((tmp = page_chain_next(page)))
122 		++i, page = tmp;
123 	if (len)
124 		*len = i;
125 	return page;
126 }
127 
128 static int page_chain_free(struct page *page)
129 {
130 	struct page *tmp;
131 	int i = 0;
132 	page_chain_for_each_safe(page, tmp) {
133 		put_page(page);
134 		++i;
135 	}
136 	return i;
137 }
138 
139 static void page_chain_add(struct page **head,
140 		struct page *chain_first, struct page *chain_last)
141 {
142 #if 1
143 	struct page *tmp;
144 	tmp = page_chain_tail(chain_first, NULL);
145 	BUG_ON(tmp != chain_last);
146 #endif
147 
148 	/* add chain to head */
149 	set_page_private(chain_last, (unsigned long)*head);
150 	*head = chain_first;
151 }
152 
153 static struct page *__drbd_alloc_pages(struct drbd_conf *mdev,
154 				       unsigned int number)
155 {
156 	struct page *page = NULL;
157 	struct page *tmp = NULL;
158 	unsigned int i = 0;
159 
160 	/* Yes, testing drbd_pp_vacant outside the lock is racy.
161 	 * So what. It saves a spin_lock. */
162 	if (drbd_pp_vacant >= number) {
163 		spin_lock(&drbd_pp_lock);
164 		page = page_chain_del(&drbd_pp_pool, number);
165 		if (page)
166 			drbd_pp_vacant -= number;
167 		spin_unlock(&drbd_pp_lock);
168 		if (page)
169 			return page;
170 	}
171 
172 	/* GFP_TRY, because we must not cause arbitrary write-out: in a DRBD
173 	 * "criss-cross" setup, that might cause write-out on some other DRBD,
174 	 * which in turn might block on the other node at this very place.  */
175 	for (i = 0; i < number; i++) {
176 		tmp = alloc_page(GFP_TRY);
177 		if (!tmp)
178 			break;
179 		set_page_private(tmp, (unsigned long)page);
180 		page = tmp;
181 	}
182 
183 	if (i == number)
184 		return page;
185 
186 	/* Not enough pages immediately available this time.
187 	 * No need to jump around here, drbd_alloc_pages will retry this
188 	 * function "soon". */
189 	if (page) {
190 		tmp = page_chain_tail(page, NULL);
191 		spin_lock(&drbd_pp_lock);
192 		page_chain_add(&drbd_pp_pool, page, tmp);
193 		drbd_pp_vacant += i;
194 		spin_unlock(&drbd_pp_lock);
195 	}
196 	return NULL;
197 }
198 
199 static void reclaim_finished_net_peer_reqs(struct drbd_conf *mdev,
200 					   struct list_head *to_be_freed)
201 {
202 	struct drbd_peer_request *peer_req;
203 	struct list_head *le, *tle;
204 
205 	/* The EEs are always appended to the end of the list. Since
206 	   they are sent in order over the wire, they have to finish
207 	   in order. As soon as we see the first not finished we can
208 	   stop to examine the list... */
209 
210 	list_for_each_safe(le, tle, &mdev->net_ee) {
211 		peer_req = list_entry(le, struct drbd_peer_request, w.list);
212 		if (drbd_peer_req_has_active_page(peer_req))
213 			break;
214 		list_move(le, to_be_freed);
215 	}
216 }
217 
218 static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev)
219 {
220 	LIST_HEAD(reclaimed);
221 	struct drbd_peer_request *peer_req, *t;
222 
223 	spin_lock_irq(&mdev->tconn->req_lock);
224 	reclaim_finished_net_peer_reqs(mdev, &reclaimed);
225 	spin_unlock_irq(&mdev->tconn->req_lock);
226 
227 	list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
228 		drbd_free_net_peer_req(mdev, peer_req);
229 }
230 
231 /**
232  * drbd_alloc_pages() - Returns @number pages, retries forever (or until signalled)
233  * @mdev:	DRBD device.
234  * @number:	number of pages requested
235  * @retry:	whether to retry, if not enough pages are available right now
236  *
237  * Tries to allocate number pages, first from our own page pool, then from
238  * the kernel, unless this allocation would exceed the max_buffers setting.
239  * Possibly retry until DRBD frees sufficient pages somewhere else.
240  *
241  * Returns a page chain linked via page->private.
242  */
243 struct page *drbd_alloc_pages(struct drbd_conf *mdev, unsigned int number,
244 			      bool retry)
245 {
246 	struct page *page = NULL;
247 	struct net_conf *nc;
248 	DEFINE_WAIT(wait);
249 	int mxb;
250 
251 	/* Yes, we may run up to @number over max_buffers. If we
252 	 * follow it strictly, the admin will get it wrong anyways. */
253 	rcu_read_lock();
254 	nc = rcu_dereference(mdev->tconn->net_conf);
255 	mxb = nc ? nc->max_buffers : 1000000;
256 	rcu_read_unlock();
257 
258 	if (atomic_read(&mdev->pp_in_use) < mxb)
259 		page = __drbd_alloc_pages(mdev, number);
260 
261 	while (page == NULL) {
262 		prepare_to_wait(&drbd_pp_wait, &wait, TASK_INTERRUPTIBLE);
263 
264 		drbd_kick_lo_and_reclaim_net(mdev);
265 
266 		if (atomic_read(&mdev->pp_in_use) < mxb) {
267 			page = __drbd_alloc_pages(mdev, number);
268 			if (page)
269 				break;
270 		}
271 
272 		if (!retry)
273 			break;
274 
275 		if (signal_pending(current)) {
276 			dev_warn(DEV, "drbd_alloc_pages interrupted!\n");
277 			break;
278 		}
279 
280 		schedule();
281 	}
282 	finish_wait(&drbd_pp_wait, &wait);
283 
284 	if (page)
285 		atomic_add(number, &mdev->pp_in_use);
286 	return page;
287 }
288 
289 /* Must not be used from irq, as that may deadlock: see drbd_alloc_pages.
290  * Is also used from inside an other spin_lock_irq(&mdev->tconn->req_lock);
291  * Either links the page chain back to the global pool,
292  * or returns all pages to the system. */
293 static void drbd_free_pages(struct drbd_conf *mdev, struct page *page, int is_net)
294 {
295 	atomic_t *a = is_net ? &mdev->pp_in_use_by_net : &mdev->pp_in_use;
296 	int i;
297 
298 	if (page == NULL)
299 		return;
300 
301 	if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * minor_count)
302 		i = page_chain_free(page);
303 	else {
304 		struct page *tmp;
305 		tmp = page_chain_tail(page, &i);
306 		spin_lock(&drbd_pp_lock);
307 		page_chain_add(&drbd_pp_pool, page, tmp);
308 		drbd_pp_vacant += i;
309 		spin_unlock(&drbd_pp_lock);
310 	}
311 	i = atomic_sub_return(i, a);
312 	if (i < 0)
313 		dev_warn(DEV, "ASSERTION FAILED: %s: %d < 0\n",
314 			is_net ? "pp_in_use_by_net" : "pp_in_use", i);
315 	wake_up(&drbd_pp_wait);
316 }
317 
318 /*
319 You need to hold the req_lock:
320  _drbd_wait_ee_list_empty()
321 
322 You must not have the req_lock:
323  drbd_free_peer_req()
324  drbd_alloc_peer_req()
325  drbd_free_peer_reqs()
326  drbd_ee_fix_bhs()
327  drbd_finish_peer_reqs()
328  drbd_clear_done_ee()
329  drbd_wait_ee_list_empty()
330 */
331 
332 struct drbd_peer_request *
333 drbd_alloc_peer_req(struct drbd_conf *mdev, u64 id, sector_t sector,
334 		    unsigned int data_size, gfp_t gfp_mask) __must_hold(local)
335 {
336 	struct drbd_peer_request *peer_req;
337 	struct page *page = NULL;
338 	unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT;
339 
340 	if (drbd_insert_fault(mdev, DRBD_FAULT_AL_EE))
341 		return NULL;
342 
343 	peer_req = mempool_alloc(drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM);
344 	if (!peer_req) {
345 		if (!(gfp_mask & __GFP_NOWARN))
346 			dev_err(DEV, "%s: allocation failed\n", __func__);
347 		return NULL;
348 	}
349 
350 	if (data_size) {
351 		page = drbd_alloc_pages(mdev, nr_pages, (gfp_mask & __GFP_WAIT));
352 		if (!page)
353 			goto fail;
354 	}
355 
356 	drbd_clear_interval(&peer_req->i);
357 	peer_req->i.size = data_size;
358 	peer_req->i.sector = sector;
359 	peer_req->i.local = false;
360 	peer_req->i.waiting = false;
361 
362 	peer_req->epoch = NULL;
363 	peer_req->w.mdev = mdev;
364 	peer_req->pages = page;
365 	atomic_set(&peer_req->pending_bios, 0);
366 	peer_req->flags = 0;
367 	/*
368 	 * The block_id is opaque to the receiver.  It is not endianness
369 	 * converted, and sent back to the sender unchanged.
370 	 */
371 	peer_req->block_id = id;
372 
373 	return peer_req;
374 
375  fail:
376 	mempool_free(peer_req, drbd_ee_mempool);
377 	return NULL;
378 }
379 
380 void __drbd_free_peer_req(struct drbd_conf *mdev, struct drbd_peer_request *peer_req,
381 		       int is_net)
382 {
383 	if (peer_req->flags & EE_HAS_DIGEST)
384 		kfree(peer_req->digest);
385 	drbd_free_pages(mdev, peer_req->pages, is_net);
386 	D_ASSERT(atomic_read(&peer_req->pending_bios) == 0);
387 	D_ASSERT(drbd_interval_empty(&peer_req->i));
388 	mempool_free(peer_req, drbd_ee_mempool);
389 }
390 
391 int drbd_free_peer_reqs(struct drbd_conf *mdev, struct list_head *list)
392 {
393 	LIST_HEAD(work_list);
394 	struct drbd_peer_request *peer_req, *t;
395 	int count = 0;
396 	int is_net = list == &mdev->net_ee;
397 
398 	spin_lock_irq(&mdev->tconn->req_lock);
399 	list_splice_init(list, &work_list);
400 	spin_unlock_irq(&mdev->tconn->req_lock);
401 
402 	list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
403 		__drbd_free_peer_req(mdev, peer_req, is_net);
404 		count++;
405 	}
406 	return count;
407 }
408 
409 /*
410  * See also comments in _req_mod(,BARRIER_ACKED) and receive_Barrier.
411  */
412 static int drbd_finish_peer_reqs(struct drbd_conf *mdev)
413 {
414 	LIST_HEAD(work_list);
415 	LIST_HEAD(reclaimed);
416 	struct drbd_peer_request *peer_req, *t;
417 	int err = 0;
418 
419 	spin_lock_irq(&mdev->tconn->req_lock);
420 	reclaim_finished_net_peer_reqs(mdev, &reclaimed);
421 	list_splice_init(&mdev->done_ee, &work_list);
422 	spin_unlock_irq(&mdev->tconn->req_lock);
423 
424 	list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
425 		drbd_free_net_peer_req(mdev, peer_req);
426 
427 	/* possible callbacks here:
428 	 * e_end_block, and e_end_resync_block, e_send_superseded.
429 	 * all ignore the last argument.
430 	 */
431 	list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
432 		int err2;
433 
434 		/* list_del not necessary, next/prev members not touched */
435 		err2 = peer_req->w.cb(&peer_req->w, !!err);
436 		if (!err)
437 			err = err2;
438 		drbd_free_peer_req(mdev, peer_req);
439 	}
440 	wake_up(&mdev->ee_wait);
441 
442 	return err;
443 }
444 
445 static void _drbd_wait_ee_list_empty(struct drbd_conf *mdev,
446 				     struct list_head *head)
447 {
448 	DEFINE_WAIT(wait);
449 
450 	/* avoids spin_lock/unlock
451 	 * and calling prepare_to_wait in the fast path */
452 	while (!list_empty(head)) {
453 		prepare_to_wait(&mdev->ee_wait, &wait, TASK_UNINTERRUPTIBLE);
454 		spin_unlock_irq(&mdev->tconn->req_lock);
455 		io_schedule();
456 		finish_wait(&mdev->ee_wait, &wait);
457 		spin_lock_irq(&mdev->tconn->req_lock);
458 	}
459 }
460 
461 static void drbd_wait_ee_list_empty(struct drbd_conf *mdev,
462 				    struct list_head *head)
463 {
464 	spin_lock_irq(&mdev->tconn->req_lock);
465 	_drbd_wait_ee_list_empty(mdev, head);
466 	spin_unlock_irq(&mdev->tconn->req_lock);
467 }
468 
469 static int drbd_recv_short(struct socket *sock, void *buf, size_t size, int flags)
470 {
471 	mm_segment_t oldfs;
472 	struct kvec iov = {
473 		.iov_base = buf,
474 		.iov_len = size,
475 	};
476 	struct msghdr msg = {
477 		.msg_iovlen = 1,
478 		.msg_iov = (struct iovec *)&iov,
479 		.msg_flags = (flags ? flags : MSG_WAITALL | MSG_NOSIGNAL)
480 	};
481 	int rv;
482 
483 	oldfs = get_fs();
484 	set_fs(KERNEL_DS);
485 	rv = sock_recvmsg(sock, &msg, size, msg.msg_flags);
486 	set_fs(oldfs);
487 
488 	return rv;
489 }
490 
491 static int drbd_recv(struct drbd_tconn *tconn, void *buf, size_t size)
492 {
493 	int rv;
494 
495 	rv = drbd_recv_short(tconn->data.socket, buf, size, 0);
496 
497 	if (rv < 0) {
498 		if (rv == -ECONNRESET)
499 			conn_info(tconn, "sock was reset by peer\n");
500 		else if (rv != -ERESTARTSYS)
501 			conn_err(tconn, "sock_recvmsg returned %d\n", rv);
502 	} else if (rv == 0) {
503 		if (test_bit(DISCONNECT_SENT, &tconn->flags)) {
504 			long t;
505 			rcu_read_lock();
506 			t = rcu_dereference(tconn->net_conf)->ping_timeo * HZ/10;
507 			rcu_read_unlock();
508 
509 			t = wait_event_timeout(tconn->ping_wait, tconn->cstate < C_WF_REPORT_PARAMS, t);
510 
511 			if (t)
512 				goto out;
513 		}
514 		conn_info(tconn, "sock was shut down by peer\n");
515 	}
516 
517 	if (rv != size)
518 		conn_request_state(tconn, NS(conn, C_BROKEN_PIPE), CS_HARD);
519 
520 out:
521 	return rv;
522 }
523 
524 static int drbd_recv_all(struct drbd_tconn *tconn, void *buf, size_t size)
525 {
526 	int err;
527 
528 	err = drbd_recv(tconn, buf, size);
529 	if (err != size) {
530 		if (err >= 0)
531 			err = -EIO;
532 	} else
533 		err = 0;
534 	return err;
535 }
536 
537 static int drbd_recv_all_warn(struct drbd_tconn *tconn, void *buf, size_t size)
538 {
539 	int err;
540 
541 	err = drbd_recv_all(tconn, buf, size);
542 	if (err && !signal_pending(current))
543 		conn_warn(tconn, "short read (expected size %d)\n", (int)size);
544 	return err;
545 }
546 
547 /* quoting tcp(7):
548  *   On individual connections, the socket buffer size must be set prior to the
549  *   listen(2) or connect(2) calls in order to have it take effect.
550  * This is our wrapper to do so.
551  */
552 static void drbd_setbufsize(struct socket *sock, unsigned int snd,
553 		unsigned int rcv)
554 {
555 	/* open coded SO_SNDBUF, SO_RCVBUF */
556 	if (snd) {
557 		sock->sk->sk_sndbuf = snd;
558 		sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
559 	}
560 	if (rcv) {
561 		sock->sk->sk_rcvbuf = rcv;
562 		sock->sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
563 	}
564 }
565 
566 static struct socket *drbd_try_connect(struct drbd_tconn *tconn)
567 {
568 	const char *what;
569 	struct socket *sock;
570 	struct sockaddr_in6 src_in6;
571 	struct sockaddr_in6 peer_in6;
572 	struct net_conf *nc;
573 	int err, peer_addr_len, my_addr_len;
574 	int sndbuf_size, rcvbuf_size, connect_int;
575 	int disconnect_on_error = 1;
576 
577 	rcu_read_lock();
578 	nc = rcu_dereference(tconn->net_conf);
579 	if (!nc) {
580 		rcu_read_unlock();
581 		return NULL;
582 	}
583 	sndbuf_size = nc->sndbuf_size;
584 	rcvbuf_size = nc->rcvbuf_size;
585 	connect_int = nc->connect_int;
586 	rcu_read_unlock();
587 
588 	my_addr_len = min_t(int, tconn->my_addr_len, sizeof(src_in6));
589 	memcpy(&src_in6, &tconn->my_addr, my_addr_len);
590 
591 	if (((struct sockaddr *)&tconn->my_addr)->sa_family == AF_INET6)
592 		src_in6.sin6_port = 0;
593 	else
594 		((struct sockaddr_in *)&src_in6)->sin_port = 0; /* AF_INET & AF_SCI */
595 
596 	peer_addr_len = min_t(int, tconn->peer_addr_len, sizeof(src_in6));
597 	memcpy(&peer_in6, &tconn->peer_addr, peer_addr_len);
598 
599 	what = "sock_create_kern";
600 	err = sock_create_kern(((struct sockaddr *)&src_in6)->sa_family,
601 			       SOCK_STREAM, IPPROTO_TCP, &sock);
602 	if (err < 0) {
603 		sock = NULL;
604 		goto out;
605 	}
606 
607 	sock->sk->sk_rcvtimeo =
608 	sock->sk->sk_sndtimeo = connect_int * HZ;
609 	drbd_setbufsize(sock, sndbuf_size, rcvbuf_size);
610 
611        /* explicitly bind to the configured IP as source IP
612 	*  for the outgoing connections.
613 	*  This is needed for multihomed hosts and to be
614 	*  able to use lo: interfaces for drbd.
615 	* Make sure to use 0 as port number, so linux selects
616 	*  a free one dynamically.
617 	*/
618 	what = "bind before connect";
619 	err = sock->ops->bind(sock, (struct sockaddr *) &src_in6, my_addr_len);
620 	if (err < 0)
621 		goto out;
622 
623 	/* connect may fail, peer not yet available.
624 	 * stay C_WF_CONNECTION, don't go Disconnecting! */
625 	disconnect_on_error = 0;
626 	what = "connect";
627 	err = sock->ops->connect(sock, (struct sockaddr *) &peer_in6, peer_addr_len, 0);
628 
629 out:
630 	if (err < 0) {
631 		if (sock) {
632 			sock_release(sock);
633 			sock = NULL;
634 		}
635 		switch (-err) {
636 			/* timeout, busy, signal pending */
637 		case ETIMEDOUT: case EAGAIN: case EINPROGRESS:
638 		case EINTR: case ERESTARTSYS:
639 			/* peer not (yet) available, network problem */
640 		case ECONNREFUSED: case ENETUNREACH:
641 		case EHOSTDOWN:    case EHOSTUNREACH:
642 			disconnect_on_error = 0;
643 			break;
644 		default:
645 			conn_err(tconn, "%s failed, err = %d\n", what, err);
646 		}
647 		if (disconnect_on_error)
648 			conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
649 	}
650 
651 	return sock;
652 }
653 
654 struct accept_wait_data {
655 	struct drbd_tconn *tconn;
656 	struct socket *s_listen;
657 	struct completion door_bell;
658 	void (*original_sk_state_change)(struct sock *sk);
659 
660 };
661 
662 static void drbd_incoming_connection(struct sock *sk)
663 {
664 	struct accept_wait_data *ad = sk->sk_user_data;
665 	void (*state_change)(struct sock *sk);
666 
667 	state_change = ad->original_sk_state_change;
668 	if (sk->sk_state == TCP_ESTABLISHED)
669 		complete(&ad->door_bell);
670 	state_change(sk);
671 }
672 
673 static int prepare_listen_socket(struct drbd_tconn *tconn, struct accept_wait_data *ad)
674 {
675 	int err, sndbuf_size, rcvbuf_size, my_addr_len;
676 	struct sockaddr_in6 my_addr;
677 	struct socket *s_listen;
678 	struct net_conf *nc;
679 	const char *what;
680 
681 	rcu_read_lock();
682 	nc = rcu_dereference(tconn->net_conf);
683 	if (!nc) {
684 		rcu_read_unlock();
685 		return -EIO;
686 	}
687 	sndbuf_size = nc->sndbuf_size;
688 	rcvbuf_size = nc->rcvbuf_size;
689 	rcu_read_unlock();
690 
691 	my_addr_len = min_t(int, tconn->my_addr_len, sizeof(struct sockaddr_in6));
692 	memcpy(&my_addr, &tconn->my_addr, my_addr_len);
693 
694 	what = "sock_create_kern";
695 	err = sock_create_kern(((struct sockaddr *)&my_addr)->sa_family,
696 			       SOCK_STREAM, IPPROTO_TCP, &s_listen);
697 	if (err) {
698 		s_listen = NULL;
699 		goto out;
700 	}
701 
702 	s_listen->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
703 	drbd_setbufsize(s_listen, sndbuf_size, rcvbuf_size);
704 
705 	what = "bind before listen";
706 	err = s_listen->ops->bind(s_listen, (struct sockaddr *)&my_addr, my_addr_len);
707 	if (err < 0)
708 		goto out;
709 
710 	ad->s_listen = s_listen;
711 	write_lock_bh(&s_listen->sk->sk_callback_lock);
712 	ad->original_sk_state_change = s_listen->sk->sk_state_change;
713 	s_listen->sk->sk_state_change = drbd_incoming_connection;
714 	s_listen->sk->sk_user_data = ad;
715 	write_unlock_bh(&s_listen->sk->sk_callback_lock);
716 
717 	what = "listen";
718 	err = s_listen->ops->listen(s_listen, 5);
719 	if (err < 0)
720 		goto out;
721 
722 	return 0;
723 out:
724 	if (s_listen)
725 		sock_release(s_listen);
726 	if (err < 0) {
727 		if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
728 			conn_err(tconn, "%s failed, err = %d\n", what, err);
729 			conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
730 		}
731 	}
732 
733 	return -EIO;
734 }
735 
736 static void unregister_state_change(struct sock *sk, struct accept_wait_data *ad)
737 {
738 	write_lock_bh(&sk->sk_callback_lock);
739 	sk->sk_state_change = ad->original_sk_state_change;
740 	sk->sk_user_data = NULL;
741 	write_unlock_bh(&sk->sk_callback_lock);
742 }
743 
744 static struct socket *drbd_wait_for_connect(struct drbd_tconn *tconn, struct accept_wait_data *ad)
745 {
746 	int timeo, connect_int, err = 0;
747 	struct socket *s_estab = NULL;
748 	struct net_conf *nc;
749 
750 	rcu_read_lock();
751 	nc = rcu_dereference(tconn->net_conf);
752 	if (!nc) {
753 		rcu_read_unlock();
754 		return NULL;
755 	}
756 	connect_int = nc->connect_int;
757 	rcu_read_unlock();
758 
759 	timeo = connect_int * HZ;
760 	/* 28.5% random jitter */
761 	timeo += (prandom_u32() & 1) ? timeo / 7 : -timeo / 7;
762 
763 	err = wait_for_completion_interruptible_timeout(&ad->door_bell, timeo);
764 	if (err <= 0)
765 		return NULL;
766 
767 	err = kernel_accept(ad->s_listen, &s_estab, 0);
768 	if (err < 0) {
769 		if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
770 			conn_err(tconn, "accept failed, err = %d\n", err);
771 			conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
772 		}
773 	}
774 
775 	if (s_estab)
776 		unregister_state_change(s_estab->sk, ad);
777 
778 	return s_estab;
779 }
780 
781 static int decode_header(struct drbd_tconn *, void *, struct packet_info *);
782 
783 static int send_first_packet(struct drbd_tconn *tconn, struct drbd_socket *sock,
784 			     enum drbd_packet cmd)
785 {
786 	if (!conn_prepare_command(tconn, sock))
787 		return -EIO;
788 	return conn_send_command(tconn, sock, cmd, 0, NULL, 0);
789 }
790 
791 static int receive_first_packet(struct drbd_tconn *tconn, struct socket *sock)
792 {
793 	unsigned int header_size = drbd_header_size(tconn);
794 	struct packet_info pi;
795 	int err;
796 
797 	err = drbd_recv_short(sock, tconn->data.rbuf, header_size, 0);
798 	if (err != header_size) {
799 		if (err >= 0)
800 			err = -EIO;
801 		return err;
802 	}
803 	err = decode_header(tconn, tconn->data.rbuf, &pi);
804 	if (err)
805 		return err;
806 	return pi.cmd;
807 }
808 
809 /**
810  * drbd_socket_okay() - Free the socket if its connection is not okay
811  * @sock:	pointer to the pointer to the socket.
812  */
813 static int drbd_socket_okay(struct socket **sock)
814 {
815 	int rr;
816 	char tb[4];
817 
818 	if (!*sock)
819 		return false;
820 
821 	rr = drbd_recv_short(*sock, tb, 4, MSG_DONTWAIT | MSG_PEEK);
822 
823 	if (rr > 0 || rr == -EAGAIN) {
824 		return true;
825 	} else {
826 		sock_release(*sock);
827 		*sock = NULL;
828 		return false;
829 	}
830 }
831 /* Gets called if a connection is established, or if a new minor gets created
832    in a connection */
833 int drbd_connected(struct drbd_conf *mdev)
834 {
835 	int err;
836 
837 	atomic_set(&mdev->packet_seq, 0);
838 	mdev->peer_seq = 0;
839 
840 	mdev->state_mutex = mdev->tconn->agreed_pro_version < 100 ?
841 		&mdev->tconn->cstate_mutex :
842 		&mdev->own_state_mutex;
843 
844 	err = drbd_send_sync_param(mdev);
845 	if (!err)
846 		err = drbd_send_sizes(mdev, 0, 0);
847 	if (!err)
848 		err = drbd_send_uuids(mdev);
849 	if (!err)
850 		err = drbd_send_current_state(mdev);
851 	clear_bit(USE_DEGR_WFC_T, &mdev->flags);
852 	clear_bit(RESIZE_PENDING, &mdev->flags);
853 	atomic_set(&mdev->ap_in_flight, 0);
854 	mod_timer(&mdev->request_timer, jiffies + HZ); /* just start it here. */
855 	return err;
856 }
857 
858 /*
859  * return values:
860  *   1 yes, we have a valid connection
861  *   0 oops, did not work out, please try again
862  *  -1 peer talks different language,
863  *     no point in trying again, please go standalone.
864  *  -2 We do not have a network config...
865  */
866 static int conn_connect(struct drbd_tconn *tconn)
867 {
868 	struct drbd_socket sock, msock;
869 	struct drbd_conf *mdev;
870 	struct net_conf *nc;
871 	int vnr, timeout, h, ok;
872 	bool discard_my_data;
873 	enum drbd_state_rv rv;
874 	struct accept_wait_data ad = {
875 		.tconn = tconn,
876 		.door_bell = COMPLETION_INITIALIZER_ONSTACK(ad.door_bell),
877 	};
878 
879 	clear_bit(DISCONNECT_SENT, &tconn->flags);
880 	if (conn_request_state(tconn, NS(conn, C_WF_CONNECTION), CS_VERBOSE) < SS_SUCCESS)
881 		return -2;
882 
883 	mutex_init(&sock.mutex);
884 	sock.sbuf = tconn->data.sbuf;
885 	sock.rbuf = tconn->data.rbuf;
886 	sock.socket = NULL;
887 	mutex_init(&msock.mutex);
888 	msock.sbuf = tconn->meta.sbuf;
889 	msock.rbuf = tconn->meta.rbuf;
890 	msock.socket = NULL;
891 
892 	/* Assume that the peer only understands protocol 80 until we know better.  */
893 	tconn->agreed_pro_version = 80;
894 
895 	if (prepare_listen_socket(tconn, &ad))
896 		return 0;
897 
898 	do {
899 		struct socket *s;
900 
901 		s = drbd_try_connect(tconn);
902 		if (s) {
903 			if (!sock.socket) {
904 				sock.socket = s;
905 				send_first_packet(tconn, &sock, P_INITIAL_DATA);
906 			} else if (!msock.socket) {
907 				clear_bit(RESOLVE_CONFLICTS, &tconn->flags);
908 				msock.socket = s;
909 				send_first_packet(tconn, &msock, P_INITIAL_META);
910 			} else {
911 				conn_err(tconn, "Logic error in conn_connect()\n");
912 				goto out_release_sockets;
913 			}
914 		}
915 
916 		if (sock.socket && msock.socket) {
917 			rcu_read_lock();
918 			nc = rcu_dereference(tconn->net_conf);
919 			timeout = nc->ping_timeo * HZ / 10;
920 			rcu_read_unlock();
921 			schedule_timeout_interruptible(timeout);
922 			ok = drbd_socket_okay(&sock.socket);
923 			ok = drbd_socket_okay(&msock.socket) && ok;
924 			if (ok)
925 				break;
926 		}
927 
928 retry:
929 		s = drbd_wait_for_connect(tconn, &ad);
930 		if (s) {
931 			int fp = receive_first_packet(tconn, s);
932 			drbd_socket_okay(&sock.socket);
933 			drbd_socket_okay(&msock.socket);
934 			switch (fp) {
935 			case P_INITIAL_DATA:
936 				if (sock.socket) {
937 					conn_warn(tconn, "initial packet S crossed\n");
938 					sock_release(sock.socket);
939 					sock.socket = s;
940 					goto randomize;
941 				}
942 				sock.socket = s;
943 				break;
944 			case P_INITIAL_META:
945 				set_bit(RESOLVE_CONFLICTS, &tconn->flags);
946 				if (msock.socket) {
947 					conn_warn(tconn, "initial packet M crossed\n");
948 					sock_release(msock.socket);
949 					msock.socket = s;
950 					goto randomize;
951 				}
952 				msock.socket = s;
953 				break;
954 			default:
955 				conn_warn(tconn, "Error receiving initial packet\n");
956 				sock_release(s);
957 randomize:
958 				if (prandom_u32() & 1)
959 					goto retry;
960 			}
961 		}
962 
963 		if (tconn->cstate <= C_DISCONNECTING)
964 			goto out_release_sockets;
965 		if (signal_pending(current)) {
966 			flush_signals(current);
967 			smp_rmb();
968 			if (get_t_state(&tconn->receiver) == EXITING)
969 				goto out_release_sockets;
970 		}
971 
972 		ok = drbd_socket_okay(&sock.socket);
973 		ok = drbd_socket_okay(&msock.socket) && ok;
974 	} while (!ok);
975 
976 	if (ad.s_listen)
977 		sock_release(ad.s_listen);
978 
979 	sock.socket->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
980 	msock.socket->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
981 
982 	sock.socket->sk->sk_allocation = GFP_NOIO;
983 	msock.socket->sk->sk_allocation = GFP_NOIO;
984 
985 	sock.socket->sk->sk_priority = TC_PRIO_INTERACTIVE_BULK;
986 	msock.socket->sk->sk_priority = TC_PRIO_INTERACTIVE;
987 
988 	/* NOT YET ...
989 	 * sock.socket->sk->sk_sndtimeo = tconn->net_conf->timeout*HZ/10;
990 	 * sock.socket->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
991 	 * first set it to the P_CONNECTION_FEATURES timeout,
992 	 * which we set to 4x the configured ping_timeout. */
993 	rcu_read_lock();
994 	nc = rcu_dereference(tconn->net_conf);
995 
996 	sock.socket->sk->sk_sndtimeo =
997 	sock.socket->sk->sk_rcvtimeo = nc->ping_timeo*4*HZ/10;
998 
999 	msock.socket->sk->sk_rcvtimeo = nc->ping_int*HZ;
1000 	timeout = nc->timeout * HZ / 10;
1001 	discard_my_data = nc->discard_my_data;
1002 	rcu_read_unlock();
1003 
1004 	msock.socket->sk->sk_sndtimeo = timeout;
1005 
1006 	/* we don't want delays.
1007 	 * we use TCP_CORK where appropriate, though */
1008 	drbd_tcp_nodelay(sock.socket);
1009 	drbd_tcp_nodelay(msock.socket);
1010 
1011 	tconn->data.socket = sock.socket;
1012 	tconn->meta.socket = msock.socket;
1013 	tconn->last_received = jiffies;
1014 
1015 	h = drbd_do_features(tconn);
1016 	if (h <= 0)
1017 		return h;
1018 
1019 	if (tconn->cram_hmac_tfm) {
1020 		/* drbd_request_state(mdev, NS(conn, WFAuth)); */
1021 		switch (drbd_do_auth(tconn)) {
1022 		case -1:
1023 			conn_err(tconn, "Authentication of peer failed\n");
1024 			return -1;
1025 		case 0:
1026 			conn_err(tconn, "Authentication of peer failed, trying again.\n");
1027 			return 0;
1028 		}
1029 	}
1030 
1031 	tconn->data.socket->sk->sk_sndtimeo = timeout;
1032 	tconn->data.socket->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
1033 
1034 	if (drbd_send_protocol(tconn) == -EOPNOTSUPP)
1035 		return -1;
1036 
1037 	set_bit(STATE_SENT, &tconn->flags);
1038 
1039 	rcu_read_lock();
1040 	idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1041 		kref_get(&mdev->kref);
1042 		rcu_read_unlock();
1043 
1044 		/* Prevent a race between resync-handshake and
1045 		 * being promoted to Primary.
1046 		 *
1047 		 * Grab and release the state mutex, so we know that any current
1048 		 * drbd_set_role() is finished, and any incoming drbd_set_role
1049 		 * will see the STATE_SENT flag, and wait for it to be cleared.
1050 		 */
1051 		mutex_lock(mdev->state_mutex);
1052 		mutex_unlock(mdev->state_mutex);
1053 
1054 		if (discard_my_data)
1055 			set_bit(DISCARD_MY_DATA, &mdev->flags);
1056 		else
1057 			clear_bit(DISCARD_MY_DATA, &mdev->flags);
1058 
1059 		drbd_connected(mdev);
1060 		kref_put(&mdev->kref, &drbd_minor_destroy);
1061 		rcu_read_lock();
1062 	}
1063 	rcu_read_unlock();
1064 
1065 	rv = conn_request_state(tconn, NS(conn, C_WF_REPORT_PARAMS), CS_VERBOSE);
1066 	if (rv < SS_SUCCESS || tconn->cstate != C_WF_REPORT_PARAMS) {
1067 		clear_bit(STATE_SENT, &tconn->flags);
1068 		return 0;
1069 	}
1070 
1071 	drbd_thread_start(&tconn->asender);
1072 
1073 	mutex_lock(&tconn->conf_update);
1074 	/* The discard_my_data flag is a single-shot modifier to the next
1075 	 * connection attempt, the handshake of which is now well underway.
1076 	 * No need for rcu style copying of the whole struct
1077 	 * just to clear a single value. */
1078 	tconn->net_conf->discard_my_data = 0;
1079 	mutex_unlock(&tconn->conf_update);
1080 
1081 	return h;
1082 
1083 out_release_sockets:
1084 	if (ad.s_listen)
1085 		sock_release(ad.s_listen);
1086 	if (sock.socket)
1087 		sock_release(sock.socket);
1088 	if (msock.socket)
1089 		sock_release(msock.socket);
1090 	return -1;
1091 }
1092 
1093 static int decode_header(struct drbd_tconn *tconn, void *header, struct packet_info *pi)
1094 {
1095 	unsigned int header_size = drbd_header_size(tconn);
1096 
1097 	if (header_size == sizeof(struct p_header100) &&
1098 	    *(__be32 *)header == cpu_to_be32(DRBD_MAGIC_100)) {
1099 		struct p_header100 *h = header;
1100 		if (h->pad != 0) {
1101 			conn_err(tconn, "Header padding is not zero\n");
1102 			return -EINVAL;
1103 		}
1104 		pi->vnr = be16_to_cpu(h->volume);
1105 		pi->cmd = be16_to_cpu(h->command);
1106 		pi->size = be32_to_cpu(h->length);
1107 	} else if (header_size == sizeof(struct p_header95) &&
1108 		   *(__be16 *)header == cpu_to_be16(DRBD_MAGIC_BIG)) {
1109 		struct p_header95 *h = header;
1110 		pi->cmd = be16_to_cpu(h->command);
1111 		pi->size = be32_to_cpu(h->length);
1112 		pi->vnr = 0;
1113 	} else if (header_size == sizeof(struct p_header80) &&
1114 		   *(__be32 *)header == cpu_to_be32(DRBD_MAGIC)) {
1115 		struct p_header80 *h = header;
1116 		pi->cmd = be16_to_cpu(h->command);
1117 		pi->size = be16_to_cpu(h->length);
1118 		pi->vnr = 0;
1119 	} else {
1120 		conn_err(tconn, "Wrong magic value 0x%08x in protocol version %d\n",
1121 			 be32_to_cpu(*(__be32 *)header),
1122 			 tconn->agreed_pro_version);
1123 		return -EINVAL;
1124 	}
1125 	pi->data = header + header_size;
1126 	return 0;
1127 }
1128 
1129 static int drbd_recv_header(struct drbd_tconn *tconn, struct packet_info *pi)
1130 {
1131 	void *buffer = tconn->data.rbuf;
1132 	int err;
1133 
1134 	err = drbd_recv_all_warn(tconn, buffer, drbd_header_size(tconn));
1135 	if (err)
1136 		return err;
1137 
1138 	err = decode_header(tconn, buffer, pi);
1139 	tconn->last_received = jiffies;
1140 
1141 	return err;
1142 }
1143 
1144 static void drbd_flush(struct drbd_tconn *tconn)
1145 {
1146 	int rv;
1147 	struct drbd_conf *mdev;
1148 	int vnr;
1149 
1150 	if (tconn->write_ordering >= WO_bdev_flush) {
1151 		rcu_read_lock();
1152 		idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1153 			if (!get_ldev(mdev))
1154 				continue;
1155 			kref_get(&mdev->kref);
1156 			rcu_read_unlock();
1157 
1158 			rv = blkdev_issue_flush(mdev->ldev->backing_bdev,
1159 					GFP_NOIO, NULL);
1160 			if (rv) {
1161 				dev_info(DEV, "local disk flush failed with status %d\n", rv);
1162 				/* would rather check on EOPNOTSUPP, but that is not reliable.
1163 				 * don't try again for ANY return value != 0
1164 				 * if (rv == -EOPNOTSUPP) */
1165 				drbd_bump_write_ordering(tconn, WO_drain_io);
1166 			}
1167 			put_ldev(mdev);
1168 			kref_put(&mdev->kref, &drbd_minor_destroy);
1169 
1170 			rcu_read_lock();
1171 			if (rv)
1172 				break;
1173 		}
1174 		rcu_read_unlock();
1175 	}
1176 }
1177 
1178 /**
1179  * drbd_may_finish_epoch() - Applies an epoch_event to the epoch's state, eventually finishes it.
1180  * @mdev:	DRBD device.
1181  * @epoch:	Epoch object.
1182  * @ev:		Epoch event.
1183  */
1184 static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *tconn,
1185 					       struct drbd_epoch *epoch,
1186 					       enum epoch_event ev)
1187 {
1188 	int epoch_size;
1189 	struct drbd_epoch *next_epoch;
1190 	enum finish_epoch rv = FE_STILL_LIVE;
1191 
1192 	spin_lock(&tconn->epoch_lock);
1193 	do {
1194 		next_epoch = NULL;
1195 
1196 		epoch_size = atomic_read(&epoch->epoch_size);
1197 
1198 		switch (ev & ~EV_CLEANUP) {
1199 		case EV_PUT:
1200 			atomic_dec(&epoch->active);
1201 			break;
1202 		case EV_GOT_BARRIER_NR:
1203 			set_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags);
1204 			break;
1205 		case EV_BECAME_LAST:
1206 			/* nothing to do*/
1207 			break;
1208 		}
1209 
1210 		if (epoch_size != 0 &&
1211 		    atomic_read(&epoch->active) == 0 &&
1212 		    (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags) || ev & EV_CLEANUP)) {
1213 			if (!(ev & EV_CLEANUP)) {
1214 				spin_unlock(&tconn->epoch_lock);
1215 				drbd_send_b_ack(epoch->tconn, epoch->barrier_nr, epoch_size);
1216 				spin_lock(&tconn->epoch_lock);
1217 			}
1218 #if 0
1219 			/* FIXME: dec unacked on connection, once we have
1220 			 * something to count pending connection packets in. */
1221 			if (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags))
1222 				dec_unacked(epoch->tconn);
1223 #endif
1224 
1225 			if (tconn->current_epoch != epoch) {
1226 				next_epoch = list_entry(epoch->list.next, struct drbd_epoch, list);
1227 				list_del(&epoch->list);
1228 				ev = EV_BECAME_LAST | (ev & EV_CLEANUP);
1229 				tconn->epochs--;
1230 				kfree(epoch);
1231 
1232 				if (rv == FE_STILL_LIVE)
1233 					rv = FE_DESTROYED;
1234 			} else {
1235 				epoch->flags = 0;
1236 				atomic_set(&epoch->epoch_size, 0);
1237 				/* atomic_set(&epoch->active, 0); is already zero */
1238 				if (rv == FE_STILL_LIVE)
1239 					rv = FE_RECYCLED;
1240 			}
1241 		}
1242 
1243 		if (!next_epoch)
1244 			break;
1245 
1246 		epoch = next_epoch;
1247 	} while (1);
1248 
1249 	spin_unlock(&tconn->epoch_lock);
1250 
1251 	return rv;
1252 }
1253 
1254 /**
1255  * drbd_bump_write_ordering() - Fall back to an other write ordering method
1256  * @tconn:	DRBD connection.
1257  * @wo:		Write ordering method to try.
1258  */
1259 void drbd_bump_write_ordering(struct drbd_tconn *tconn, enum write_ordering_e wo)
1260 {
1261 	struct disk_conf *dc;
1262 	struct drbd_conf *mdev;
1263 	enum write_ordering_e pwo;
1264 	int vnr;
1265 	static char *write_ordering_str[] = {
1266 		[WO_none] = "none",
1267 		[WO_drain_io] = "drain",
1268 		[WO_bdev_flush] = "flush",
1269 	};
1270 
1271 	pwo = tconn->write_ordering;
1272 	wo = min(pwo, wo);
1273 	rcu_read_lock();
1274 	idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1275 		if (!get_ldev_if_state(mdev, D_ATTACHING))
1276 			continue;
1277 		dc = rcu_dereference(mdev->ldev->disk_conf);
1278 
1279 		if (wo == WO_bdev_flush && !dc->disk_flushes)
1280 			wo = WO_drain_io;
1281 		if (wo == WO_drain_io && !dc->disk_drain)
1282 			wo = WO_none;
1283 		put_ldev(mdev);
1284 	}
1285 	rcu_read_unlock();
1286 	tconn->write_ordering = wo;
1287 	if (pwo != tconn->write_ordering || wo == WO_bdev_flush)
1288 		conn_info(tconn, "Method to ensure write ordering: %s\n", write_ordering_str[tconn->write_ordering]);
1289 }
1290 
1291 /**
1292  * drbd_submit_peer_request()
1293  * @mdev:	DRBD device.
1294  * @peer_req:	peer request
1295  * @rw:		flag field, see bio->bi_rw
1296  *
1297  * May spread the pages to multiple bios,
1298  * depending on bio_add_page restrictions.
1299  *
1300  * Returns 0 if all bios have been submitted,
1301  * -ENOMEM if we could not allocate enough bios,
1302  * -ENOSPC (any better suggestion?) if we have not been able to bio_add_page a
1303  *  single page to an empty bio (which should never happen and likely indicates
1304  *  that the lower level IO stack is in some way broken). This has been observed
1305  *  on certain Xen deployments.
1306  */
1307 /* TODO allocate from our own bio_set. */
1308 int drbd_submit_peer_request(struct drbd_conf *mdev,
1309 			     struct drbd_peer_request *peer_req,
1310 			     const unsigned rw, const int fault_type)
1311 {
1312 	struct bio *bios = NULL;
1313 	struct bio *bio;
1314 	struct page *page = peer_req->pages;
1315 	sector_t sector = peer_req->i.sector;
1316 	unsigned ds = peer_req->i.size;
1317 	unsigned n_bios = 0;
1318 	unsigned nr_pages = (ds + PAGE_SIZE -1) >> PAGE_SHIFT;
1319 	int err = -ENOMEM;
1320 
1321 	/* In most cases, we will only need one bio.  But in case the lower
1322 	 * level restrictions happen to be different at this offset on this
1323 	 * side than those of the sending peer, we may need to submit the
1324 	 * request in more than one bio.
1325 	 *
1326 	 * Plain bio_alloc is good enough here, this is no DRBD internally
1327 	 * generated bio, but a bio allocated on behalf of the peer.
1328 	 */
1329 next_bio:
1330 	bio = bio_alloc(GFP_NOIO, nr_pages);
1331 	if (!bio) {
1332 		dev_err(DEV, "submit_ee: Allocation of a bio failed\n");
1333 		goto fail;
1334 	}
1335 	/* > peer_req->i.sector, unless this is the first bio */
1336 	bio->bi_iter.bi_sector = sector;
1337 	bio->bi_bdev = mdev->ldev->backing_bdev;
1338 	bio->bi_rw = rw;
1339 	bio->bi_private = peer_req;
1340 	bio->bi_end_io = drbd_peer_request_endio;
1341 
1342 	bio->bi_next = bios;
1343 	bios = bio;
1344 	++n_bios;
1345 
1346 	page_chain_for_each(page) {
1347 		unsigned len = min_t(unsigned, ds, PAGE_SIZE);
1348 		if (!bio_add_page(bio, page, len, 0)) {
1349 			/* A single page must always be possible!
1350 			 * But in case it fails anyways,
1351 			 * we deal with it, and complain (below). */
1352 			if (bio->bi_vcnt == 0) {
1353 				dev_err(DEV,
1354 					"bio_add_page failed for len=%u, "
1355 					"bi_vcnt=0 (bi_sector=%llu)\n",
1356 					len, (uint64_t)bio->bi_iter.bi_sector);
1357 				err = -ENOSPC;
1358 				goto fail;
1359 			}
1360 			goto next_bio;
1361 		}
1362 		ds -= len;
1363 		sector += len >> 9;
1364 		--nr_pages;
1365 	}
1366 	D_ASSERT(page == NULL);
1367 	D_ASSERT(ds == 0);
1368 
1369 	atomic_set(&peer_req->pending_bios, n_bios);
1370 	do {
1371 		bio = bios;
1372 		bios = bios->bi_next;
1373 		bio->bi_next = NULL;
1374 
1375 		drbd_generic_make_request(mdev, fault_type, bio);
1376 	} while (bios);
1377 	return 0;
1378 
1379 fail:
1380 	while (bios) {
1381 		bio = bios;
1382 		bios = bios->bi_next;
1383 		bio_put(bio);
1384 	}
1385 	return err;
1386 }
1387 
1388 static void drbd_remove_epoch_entry_interval(struct drbd_conf *mdev,
1389 					     struct drbd_peer_request *peer_req)
1390 {
1391 	struct drbd_interval *i = &peer_req->i;
1392 
1393 	drbd_remove_interval(&mdev->write_requests, i);
1394 	drbd_clear_interval(i);
1395 
1396 	/* Wake up any processes waiting for this peer request to complete.  */
1397 	if (i->waiting)
1398 		wake_up(&mdev->misc_wait);
1399 }
1400 
1401 void conn_wait_active_ee_empty(struct drbd_tconn *tconn)
1402 {
1403 	struct drbd_conf *mdev;
1404 	int vnr;
1405 
1406 	rcu_read_lock();
1407 	idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1408 		kref_get(&mdev->kref);
1409 		rcu_read_unlock();
1410 		drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
1411 		kref_put(&mdev->kref, &drbd_minor_destroy);
1412 		rcu_read_lock();
1413 	}
1414 	rcu_read_unlock();
1415 }
1416 
1417 static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
1418 {
1419 	int rv;
1420 	struct p_barrier *p = pi->data;
1421 	struct drbd_epoch *epoch;
1422 
1423 	/* FIXME these are unacked on connection,
1424 	 * not a specific (peer)device.
1425 	 */
1426 	tconn->current_epoch->barrier_nr = p->barrier;
1427 	tconn->current_epoch->tconn = tconn;
1428 	rv = drbd_may_finish_epoch(tconn, tconn->current_epoch, EV_GOT_BARRIER_NR);
1429 
1430 	/* P_BARRIER_ACK may imply that the corresponding extent is dropped from
1431 	 * the activity log, which means it would not be resynced in case the
1432 	 * R_PRIMARY crashes now.
1433 	 * Therefore we must send the barrier_ack after the barrier request was
1434 	 * completed. */
1435 	switch (tconn->write_ordering) {
1436 	case WO_none:
1437 		if (rv == FE_RECYCLED)
1438 			return 0;
1439 
1440 		/* receiver context, in the writeout path of the other node.
1441 		 * avoid potential distributed deadlock */
1442 		epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1443 		if (epoch)
1444 			break;
1445 		else
1446 			conn_warn(tconn, "Allocation of an epoch failed, slowing down\n");
1447 			/* Fall through */
1448 
1449 	case WO_bdev_flush:
1450 	case WO_drain_io:
1451 		conn_wait_active_ee_empty(tconn);
1452 		drbd_flush(tconn);
1453 
1454 		if (atomic_read(&tconn->current_epoch->epoch_size)) {
1455 			epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1456 			if (epoch)
1457 				break;
1458 		}
1459 
1460 		return 0;
1461 	default:
1462 		conn_err(tconn, "Strangeness in tconn->write_ordering %d\n", tconn->write_ordering);
1463 		return -EIO;
1464 	}
1465 
1466 	epoch->flags = 0;
1467 	atomic_set(&epoch->epoch_size, 0);
1468 	atomic_set(&epoch->active, 0);
1469 
1470 	spin_lock(&tconn->epoch_lock);
1471 	if (atomic_read(&tconn->current_epoch->epoch_size)) {
1472 		list_add(&epoch->list, &tconn->current_epoch->list);
1473 		tconn->current_epoch = epoch;
1474 		tconn->epochs++;
1475 	} else {
1476 		/* The current_epoch got recycled while we allocated this one... */
1477 		kfree(epoch);
1478 	}
1479 	spin_unlock(&tconn->epoch_lock);
1480 
1481 	return 0;
1482 }
1483 
1484 /* used from receive_RSDataReply (recv_resync_read)
1485  * and from receive_Data */
1486 static struct drbd_peer_request *
1487 read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector,
1488 	      int data_size) __must_hold(local)
1489 {
1490 	const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
1491 	struct drbd_peer_request *peer_req;
1492 	struct page *page;
1493 	int dgs, ds, err;
1494 	void *dig_in = mdev->tconn->int_dig_in;
1495 	void *dig_vv = mdev->tconn->int_dig_vv;
1496 	unsigned long *data;
1497 
1498 	dgs = 0;
1499 	if (mdev->tconn->peer_integrity_tfm) {
1500 		dgs = crypto_hash_digestsize(mdev->tconn->peer_integrity_tfm);
1501 		/*
1502 		 * FIXME: Receive the incoming digest into the receive buffer
1503 		 *	  here, together with its struct p_data?
1504 		 */
1505 		err = drbd_recv_all_warn(mdev->tconn, dig_in, dgs);
1506 		if (err)
1507 			return NULL;
1508 		data_size -= dgs;
1509 	}
1510 
1511 	if (!expect(IS_ALIGNED(data_size, 512)))
1512 		return NULL;
1513 	if (!expect(data_size <= DRBD_MAX_BIO_SIZE))
1514 		return NULL;
1515 
1516 	/* even though we trust out peer,
1517 	 * we sometimes have to double check. */
1518 	if (sector + (data_size>>9) > capacity) {
1519 		dev_err(DEV, "request from peer beyond end of local disk: "
1520 			"capacity: %llus < sector: %llus + size: %u\n",
1521 			(unsigned long long)capacity,
1522 			(unsigned long long)sector, data_size);
1523 		return NULL;
1524 	}
1525 
1526 	/* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
1527 	 * "criss-cross" setup, that might cause write-out on some other DRBD,
1528 	 * which in turn might block on the other node at this very place.  */
1529 	peer_req = drbd_alloc_peer_req(mdev, id, sector, data_size, GFP_NOIO);
1530 	if (!peer_req)
1531 		return NULL;
1532 
1533 	if (!data_size)
1534 		return peer_req;
1535 
1536 	ds = data_size;
1537 	page = peer_req->pages;
1538 	page_chain_for_each(page) {
1539 		unsigned len = min_t(int, ds, PAGE_SIZE);
1540 		data = kmap(page);
1541 		err = drbd_recv_all_warn(mdev->tconn, data, len);
1542 		if (drbd_insert_fault(mdev, DRBD_FAULT_RECEIVE)) {
1543 			dev_err(DEV, "Fault injection: Corrupting data on receive\n");
1544 			data[0] = data[0] ^ (unsigned long)-1;
1545 		}
1546 		kunmap(page);
1547 		if (err) {
1548 			drbd_free_peer_req(mdev, peer_req);
1549 			return NULL;
1550 		}
1551 		ds -= len;
1552 	}
1553 
1554 	if (dgs) {
1555 		drbd_csum_ee(mdev, mdev->tconn->peer_integrity_tfm, peer_req, dig_vv);
1556 		if (memcmp(dig_in, dig_vv, dgs)) {
1557 			dev_err(DEV, "Digest integrity check FAILED: %llus +%u\n",
1558 				(unsigned long long)sector, data_size);
1559 			drbd_free_peer_req(mdev, peer_req);
1560 			return NULL;
1561 		}
1562 	}
1563 	mdev->recv_cnt += data_size>>9;
1564 	return peer_req;
1565 }
1566 
1567 /* drbd_drain_block() just takes a data block
1568  * out of the socket input buffer, and discards it.
1569  */
1570 static int drbd_drain_block(struct drbd_conf *mdev, int data_size)
1571 {
1572 	struct page *page;
1573 	int err = 0;
1574 	void *data;
1575 
1576 	if (!data_size)
1577 		return 0;
1578 
1579 	page = drbd_alloc_pages(mdev, 1, 1);
1580 
1581 	data = kmap(page);
1582 	while (data_size) {
1583 		unsigned int len = min_t(int, data_size, PAGE_SIZE);
1584 
1585 		err = drbd_recv_all_warn(mdev->tconn, data, len);
1586 		if (err)
1587 			break;
1588 		data_size -= len;
1589 	}
1590 	kunmap(page);
1591 	drbd_free_pages(mdev, page, 0);
1592 	return err;
1593 }
1594 
1595 static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
1596 			   sector_t sector, int data_size)
1597 {
1598 	struct bio_vec bvec;
1599 	struct bvec_iter iter;
1600 	struct bio *bio;
1601 	int dgs, err, expect;
1602 	void *dig_in = mdev->tconn->int_dig_in;
1603 	void *dig_vv = mdev->tconn->int_dig_vv;
1604 
1605 	dgs = 0;
1606 	if (mdev->tconn->peer_integrity_tfm) {
1607 		dgs = crypto_hash_digestsize(mdev->tconn->peer_integrity_tfm);
1608 		err = drbd_recv_all_warn(mdev->tconn, dig_in, dgs);
1609 		if (err)
1610 			return err;
1611 		data_size -= dgs;
1612 	}
1613 
1614 	/* optimistically update recv_cnt.  if receiving fails below,
1615 	 * we disconnect anyways, and counters will be reset. */
1616 	mdev->recv_cnt += data_size>>9;
1617 
1618 	bio = req->master_bio;
1619 	D_ASSERT(sector == bio->bi_iter.bi_sector);
1620 
1621 	bio_for_each_segment(bvec, bio, iter) {
1622 		void *mapped = kmap(bvec.bv_page) + bvec.bv_offset;
1623 		expect = min_t(int, data_size, bvec.bv_len);
1624 		err = drbd_recv_all_warn(mdev->tconn, mapped, expect);
1625 		kunmap(bvec.bv_page);
1626 		if (err)
1627 			return err;
1628 		data_size -= expect;
1629 	}
1630 
1631 	if (dgs) {
1632 		drbd_csum_bio(mdev, mdev->tconn->peer_integrity_tfm, bio, dig_vv);
1633 		if (memcmp(dig_in, dig_vv, dgs)) {
1634 			dev_err(DEV, "Digest integrity check FAILED. Broken NICs?\n");
1635 			return -EINVAL;
1636 		}
1637 	}
1638 
1639 	D_ASSERT(data_size == 0);
1640 	return 0;
1641 }
1642 
1643 /*
1644  * e_end_resync_block() is called in asender context via
1645  * drbd_finish_peer_reqs().
1646  */
1647 static int e_end_resync_block(struct drbd_work *w, int unused)
1648 {
1649 	struct drbd_peer_request *peer_req =
1650 		container_of(w, struct drbd_peer_request, w);
1651 	struct drbd_conf *mdev = w->mdev;
1652 	sector_t sector = peer_req->i.sector;
1653 	int err;
1654 
1655 	D_ASSERT(drbd_interval_empty(&peer_req->i));
1656 
1657 	if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
1658 		drbd_set_in_sync(mdev, sector, peer_req->i.size);
1659 		err = drbd_send_ack(mdev, P_RS_WRITE_ACK, peer_req);
1660 	} else {
1661 		/* Record failure to sync */
1662 		drbd_rs_failed_io(mdev, sector, peer_req->i.size);
1663 
1664 		err  = drbd_send_ack(mdev, P_NEG_ACK, peer_req);
1665 	}
1666 	dec_unacked(mdev);
1667 
1668 	return err;
1669 }
1670 
1671 static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_size) __releases(local)
1672 {
1673 	struct drbd_peer_request *peer_req;
1674 
1675 	peer_req = read_in_block(mdev, ID_SYNCER, sector, data_size);
1676 	if (!peer_req)
1677 		goto fail;
1678 
1679 	dec_rs_pending(mdev);
1680 
1681 	inc_unacked(mdev);
1682 	/* corresponding dec_unacked() in e_end_resync_block()
1683 	 * respective _drbd_clear_done_ee */
1684 
1685 	peer_req->w.cb = e_end_resync_block;
1686 
1687 	spin_lock_irq(&mdev->tconn->req_lock);
1688 	list_add(&peer_req->w.list, &mdev->sync_ee);
1689 	spin_unlock_irq(&mdev->tconn->req_lock);
1690 
1691 	atomic_add(data_size >> 9, &mdev->rs_sect_ev);
1692 	if (drbd_submit_peer_request(mdev, peer_req, WRITE, DRBD_FAULT_RS_WR) == 0)
1693 		return 0;
1694 
1695 	/* don't care for the reason here */
1696 	dev_err(DEV, "submit failed, triggering re-connect\n");
1697 	spin_lock_irq(&mdev->tconn->req_lock);
1698 	list_del(&peer_req->w.list);
1699 	spin_unlock_irq(&mdev->tconn->req_lock);
1700 
1701 	drbd_free_peer_req(mdev, peer_req);
1702 fail:
1703 	put_ldev(mdev);
1704 	return -EIO;
1705 }
1706 
1707 static struct drbd_request *
1708 find_request(struct drbd_conf *mdev, struct rb_root *root, u64 id,
1709 	     sector_t sector, bool missing_ok, const char *func)
1710 {
1711 	struct drbd_request *req;
1712 
1713 	/* Request object according to our peer */
1714 	req = (struct drbd_request *)(unsigned long)id;
1715 	if (drbd_contains_interval(root, sector, &req->i) && req->i.local)
1716 		return req;
1717 	if (!missing_ok) {
1718 		dev_err(DEV, "%s: failed to find request 0x%lx, sector %llus\n", func,
1719 			(unsigned long)id, (unsigned long long)sector);
1720 	}
1721 	return NULL;
1722 }
1723 
1724 static int receive_DataReply(struct drbd_tconn *tconn, struct packet_info *pi)
1725 {
1726 	struct drbd_conf *mdev;
1727 	struct drbd_request *req;
1728 	sector_t sector;
1729 	int err;
1730 	struct p_data *p = pi->data;
1731 
1732 	mdev = vnr_to_mdev(tconn, pi->vnr);
1733 	if (!mdev)
1734 		return -EIO;
1735 
1736 	sector = be64_to_cpu(p->sector);
1737 
1738 	spin_lock_irq(&mdev->tconn->req_lock);
1739 	req = find_request(mdev, &mdev->read_requests, p->block_id, sector, false, __func__);
1740 	spin_unlock_irq(&mdev->tconn->req_lock);
1741 	if (unlikely(!req))
1742 		return -EIO;
1743 
1744 	/* hlist_del(&req->collision) is done in _req_may_be_done, to avoid
1745 	 * special casing it there for the various failure cases.
1746 	 * still no race with drbd_fail_pending_reads */
1747 	err = recv_dless_read(mdev, req, sector, pi->size);
1748 	if (!err)
1749 		req_mod(req, DATA_RECEIVED);
1750 	/* else: nothing. handled from drbd_disconnect...
1751 	 * I don't think we may complete this just yet
1752 	 * in case we are "on-disconnect: freeze" */
1753 
1754 	return err;
1755 }
1756 
1757 static int receive_RSDataReply(struct drbd_tconn *tconn, struct packet_info *pi)
1758 {
1759 	struct drbd_conf *mdev;
1760 	sector_t sector;
1761 	int err;
1762 	struct p_data *p = pi->data;
1763 
1764 	mdev = vnr_to_mdev(tconn, pi->vnr);
1765 	if (!mdev)
1766 		return -EIO;
1767 
1768 	sector = be64_to_cpu(p->sector);
1769 	D_ASSERT(p->block_id == ID_SYNCER);
1770 
1771 	if (get_ldev(mdev)) {
1772 		/* data is submitted to disk within recv_resync_read.
1773 		 * corresponding put_ldev done below on error,
1774 		 * or in drbd_peer_request_endio. */
1775 		err = recv_resync_read(mdev, sector, pi->size);
1776 	} else {
1777 		if (__ratelimit(&drbd_ratelimit_state))
1778 			dev_err(DEV, "Can not write resync data to local disk.\n");
1779 
1780 		err = drbd_drain_block(mdev, pi->size);
1781 
1782 		drbd_send_ack_dp(mdev, P_NEG_ACK, p, pi->size);
1783 	}
1784 
1785 	atomic_add(pi->size >> 9, &mdev->rs_sect_in);
1786 
1787 	return err;
1788 }
1789 
1790 static void restart_conflicting_writes(struct drbd_conf *mdev,
1791 				       sector_t sector, int size)
1792 {
1793 	struct drbd_interval *i;
1794 	struct drbd_request *req;
1795 
1796 	drbd_for_each_overlap(i, &mdev->write_requests, sector, size) {
1797 		if (!i->local)
1798 			continue;
1799 		req = container_of(i, struct drbd_request, i);
1800 		if (req->rq_state & RQ_LOCAL_PENDING ||
1801 		    !(req->rq_state & RQ_POSTPONED))
1802 			continue;
1803 		/* as it is RQ_POSTPONED, this will cause it to
1804 		 * be queued on the retry workqueue. */
1805 		__req_mod(req, CONFLICT_RESOLVED, NULL);
1806 	}
1807 }
1808 
1809 /*
1810  * e_end_block() is called in asender context via drbd_finish_peer_reqs().
1811  */
1812 static int e_end_block(struct drbd_work *w, int cancel)
1813 {
1814 	struct drbd_peer_request *peer_req =
1815 		container_of(w, struct drbd_peer_request, w);
1816 	struct drbd_conf *mdev = w->mdev;
1817 	sector_t sector = peer_req->i.sector;
1818 	int err = 0, pcmd;
1819 
1820 	if (peer_req->flags & EE_SEND_WRITE_ACK) {
1821 		if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
1822 			pcmd = (mdev->state.conn >= C_SYNC_SOURCE &&
1823 				mdev->state.conn <= C_PAUSED_SYNC_T &&
1824 				peer_req->flags & EE_MAY_SET_IN_SYNC) ?
1825 				P_RS_WRITE_ACK : P_WRITE_ACK;
1826 			err = drbd_send_ack(mdev, pcmd, peer_req);
1827 			if (pcmd == P_RS_WRITE_ACK)
1828 				drbd_set_in_sync(mdev, sector, peer_req->i.size);
1829 		} else {
1830 			err = drbd_send_ack(mdev, P_NEG_ACK, peer_req);
1831 			/* we expect it to be marked out of sync anyways...
1832 			 * maybe assert this?  */
1833 		}
1834 		dec_unacked(mdev);
1835 	}
1836 	/* we delete from the conflict detection hash _after_ we sent out the
1837 	 * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right.  */
1838 	if (peer_req->flags & EE_IN_INTERVAL_TREE) {
1839 		spin_lock_irq(&mdev->tconn->req_lock);
1840 		D_ASSERT(!drbd_interval_empty(&peer_req->i));
1841 		drbd_remove_epoch_entry_interval(mdev, peer_req);
1842 		if (peer_req->flags & EE_RESTART_REQUESTS)
1843 			restart_conflicting_writes(mdev, sector, peer_req->i.size);
1844 		spin_unlock_irq(&mdev->tconn->req_lock);
1845 	} else
1846 		D_ASSERT(drbd_interval_empty(&peer_req->i));
1847 
1848 	drbd_may_finish_epoch(mdev->tconn, peer_req->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0));
1849 
1850 	return err;
1851 }
1852 
1853 static int e_send_ack(struct drbd_work *w, enum drbd_packet ack)
1854 {
1855 	struct drbd_conf *mdev = w->mdev;
1856 	struct drbd_peer_request *peer_req =
1857 		container_of(w, struct drbd_peer_request, w);
1858 	int err;
1859 
1860 	err = drbd_send_ack(mdev, ack, peer_req);
1861 	dec_unacked(mdev);
1862 
1863 	return err;
1864 }
1865 
1866 static int e_send_superseded(struct drbd_work *w, int unused)
1867 {
1868 	return e_send_ack(w, P_SUPERSEDED);
1869 }
1870 
1871 static int e_send_retry_write(struct drbd_work *w, int unused)
1872 {
1873 	struct drbd_tconn *tconn = w->mdev->tconn;
1874 
1875 	return e_send_ack(w, tconn->agreed_pro_version >= 100 ?
1876 			     P_RETRY_WRITE : P_SUPERSEDED);
1877 }
1878 
1879 static bool seq_greater(u32 a, u32 b)
1880 {
1881 	/*
1882 	 * We assume 32-bit wrap-around here.
1883 	 * For 24-bit wrap-around, we would have to shift:
1884 	 *  a <<= 8; b <<= 8;
1885 	 */
1886 	return (s32)a - (s32)b > 0;
1887 }
1888 
1889 static u32 seq_max(u32 a, u32 b)
1890 {
1891 	return seq_greater(a, b) ? a : b;
1892 }
1893 
1894 static void update_peer_seq(struct drbd_conf *mdev, unsigned int peer_seq)
1895 {
1896 	unsigned int newest_peer_seq;
1897 
1898 	if (test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags)) {
1899 		spin_lock(&mdev->peer_seq_lock);
1900 		newest_peer_seq = seq_max(mdev->peer_seq, peer_seq);
1901 		mdev->peer_seq = newest_peer_seq;
1902 		spin_unlock(&mdev->peer_seq_lock);
1903 		/* wake up only if we actually changed mdev->peer_seq */
1904 		if (peer_seq == newest_peer_seq)
1905 			wake_up(&mdev->seq_wait);
1906 	}
1907 }
1908 
1909 static inline int overlaps(sector_t s1, int l1, sector_t s2, int l2)
1910 {
1911 	return !((s1 + (l1>>9) <= s2) || (s1 >= s2 + (l2>>9)));
1912 }
1913 
1914 /* maybe change sync_ee into interval trees as well? */
1915 static bool overlapping_resync_write(struct drbd_conf *mdev, struct drbd_peer_request *peer_req)
1916 {
1917 	struct drbd_peer_request *rs_req;
1918 	bool rv = 0;
1919 
1920 	spin_lock_irq(&mdev->tconn->req_lock);
1921 	list_for_each_entry(rs_req, &mdev->sync_ee, w.list) {
1922 		if (overlaps(peer_req->i.sector, peer_req->i.size,
1923 			     rs_req->i.sector, rs_req->i.size)) {
1924 			rv = 1;
1925 			break;
1926 		}
1927 	}
1928 	spin_unlock_irq(&mdev->tconn->req_lock);
1929 
1930 	return rv;
1931 }
1932 
1933 /* Called from receive_Data.
1934  * Synchronize packets on sock with packets on msock.
1935  *
1936  * This is here so even when a P_DATA packet traveling via sock overtook an Ack
1937  * packet traveling on msock, they are still processed in the order they have
1938  * been sent.
1939  *
1940  * Note: we don't care for Ack packets overtaking P_DATA packets.
1941  *
1942  * In case packet_seq is larger than mdev->peer_seq number, there are
1943  * outstanding packets on the msock. We wait for them to arrive.
1944  * In case we are the logically next packet, we update mdev->peer_seq
1945  * ourselves. Correctly handles 32bit wrap around.
1946  *
1947  * Assume we have a 10 GBit connection, that is about 1<<30 byte per second,
1948  * about 1<<21 sectors per second. So "worst" case, we have 1<<3 == 8 seconds
1949  * for the 24bit wrap (historical atomic_t guarantee on some archs), and we have
1950  * 1<<9 == 512 seconds aka ages for the 32bit wrap around...
1951  *
1952  * returns 0 if we may process the packet,
1953  * -ERESTARTSYS if we were interrupted (by disconnect signal). */
1954 static int wait_for_and_update_peer_seq(struct drbd_conf *mdev, const u32 peer_seq)
1955 {
1956 	DEFINE_WAIT(wait);
1957 	long timeout;
1958 	int ret = 0, tp;
1959 
1960 	if (!test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags))
1961 		return 0;
1962 
1963 	spin_lock(&mdev->peer_seq_lock);
1964 	for (;;) {
1965 		if (!seq_greater(peer_seq - 1, mdev->peer_seq)) {
1966 			mdev->peer_seq = seq_max(mdev->peer_seq, peer_seq);
1967 			break;
1968 		}
1969 
1970 		if (signal_pending(current)) {
1971 			ret = -ERESTARTSYS;
1972 			break;
1973 		}
1974 
1975 		rcu_read_lock();
1976 		tp = rcu_dereference(mdev->tconn->net_conf)->two_primaries;
1977 		rcu_read_unlock();
1978 
1979 		if (!tp)
1980 			break;
1981 
1982 		/* Only need to wait if two_primaries is enabled */
1983 		prepare_to_wait(&mdev->seq_wait, &wait, TASK_INTERRUPTIBLE);
1984 		spin_unlock(&mdev->peer_seq_lock);
1985 		rcu_read_lock();
1986 		timeout = rcu_dereference(mdev->tconn->net_conf)->ping_timeo*HZ/10;
1987 		rcu_read_unlock();
1988 		timeout = schedule_timeout(timeout);
1989 		spin_lock(&mdev->peer_seq_lock);
1990 		if (!timeout) {
1991 			ret = -ETIMEDOUT;
1992 			dev_err(DEV, "Timed out waiting for missing ack packets; disconnecting\n");
1993 			break;
1994 		}
1995 	}
1996 	spin_unlock(&mdev->peer_seq_lock);
1997 	finish_wait(&mdev->seq_wait, &wait);
1998 	return ret;
1999 }
2000 
2001 /* see also bio_flags_to_wire()
2002  * DRBD_REQ_*, because we need to semantically map the flags to data packet
2003  * flags and back. We may replicate to other kernel versions. */
2004 static unsigned long wire_flags_to_bio(struct drbd_conf *mdev, u32 dpf)
2005 {
2006 	return  (dpf & DP_RW_SYNC ? REQ_SYNC : 0) |
2007 		(dpf & DP_FUA ? REQ_FUA : 0) |
2008 		(dpf & DP_FLUSH ? REQ_FLUSH : 0) |
2009 		(dpf & DP_DISCARD ? REQ_DISCARD : 0);
2010 }
2011 
2012 static void fail_postponed_requests(struct drbd_conf *mdev, sector_t sector,
2013 				    unsigned int size)
2014 {
2015 	struct drbd_interval *i;
2016 
2017     repeat:
2018 	drbd_for_each_overlap(i, &mdev->write_requests, sector, size) {
2019 		struct drbd_request *req;
2020 		struct bio_and_error m;
2021 
2022 		if (!i->local)
2023 			continue;
2024 		req = container_of(i, struct drbd_request, i);
2025 		if (!(req->rq_state & RQ_POSTPONED))
2026 			continue;
2027 		req->rq_state &= ~RQ_POSTPONED;
2028 		__req_mod(req, NEG_ACKED, &m);
2029 		spin_unlock_irq(&mdev->tconn->req_lock);
2030 		if (m.bio)
2031 			complete_master_bio(mdev, &m);
2032 		spin_lock_irq(&mdev->tconn->req_lock);
2033 		goto repeat;
2034 	}
2035 }
2036 
2037 static int handle_write_conflicts(struct drbd_conf *mdev,
2038 				  struct drbd_peer_request *peer_req)
2039 {
2040 	struct drbd_tconn *tconn = mdev->tconn;
2041 	bool resolve_conflicts = test_bit(RESOLVE_CONFLICTS, &tconn->flags);
2042 	sector_t sector = peer_req->i.sector;
2043 	const unsigned int size = peer_req->i.size;
2044 	struct drbd_interval *i;
2045 	bool equal;
2046 	int err;
2047 
2048 	/*
2049 	 * Inserting the peer request into the write_requests tree will prevent
2050 	 * new conflicting local requests from being added.
2051 	 */
2052 	drbd_insert_interval(&mdev->write_requests, &peer_req->i);
2053 
2054     repeat:
2055 	drbd_for_each_overlap(i, &mdev->write_requests, sector, size) {
2056 		if (i == &peer_req->i)
2057 			continue;
2058 
2059 		if (!i->local) {
2060 			/*
2061 			 * Our peer has sent a conflicting remote request; this
2062 			 * should not happen in a two-node setup.  Wait for the
2063 			 * earlier peer request to complete.
2064 			 */
2065 			err = drbd_wait_misc(mdev, i);
2066 			if (err)
2067 				goto out;
2068 			goto repeat;
2069 		}
2070 
2071 		equal = i->sector == sector && i->size == size;
2072 		if (resolve_conflicts) {
2073 			/*
2074 			 * If the peer request is fully contained within the
2075 			 * overlapping request, it can be considered overwritten
2076 			 * and thus superseded; otherwise, it will be retried
2077 			 * once all overlapping requests have completed.
2078 			 */
2079 			bool superseded = i->sector <= sector && i->sector +
2080 				       (i->size >> 9) >= sector + (size >> 9);
2081 
2082 			if (!equal)
2083 				dev_alert(DEV, "Concurrent writes detected: "
2084 					       "local=%llus +%u, remote=%llus +%u, "
2085 					       "assuming %s came first\n",
2086 					  (unsigned long long)i->sector, i->size,
2087 					  (unsigned long long)sector, size,
2088 					  superseded ? "local" : "remote");
2089 
2090 			inc_unacked(mdev);
2091 			peer_req->w.cb = superseded ? e_send_superseded :
2092 						   e_send_retry_write;
2093 			list_add_tail(&peer_req->w.list, &mdev->done_ee);
2094 			wake_asender(mdev->tconn);
2095 
2096 			err = -ENOENT;
2097 			goto out;
2098 		} else {
2099 			struct drbd_request *req =
2100 				container_of(i, struct drbd_request, i);
2101 
2102 			if (!equal)
2103 				dev_alert(DEV, "Concurrent writes detected: "
2104 					       "local=%llus +%u, remote=%llus +%u\n",
2105 					  (unsigned long long)i->sector, i->size,
2106 					  (unsigned long long)sector, size);
2107 
2108 			if (req->rq_state & RQ_LOCAL_PENDING ||
2109 			    !(req->rq_state & RQ_POSTPONED)) {
2110 				/*
2111 				 * Wait for the node with the discard flag to
2112 				 * decide if this request has been superseded
2113 				 * or needs to be retried.
2114 				 * Requests that have been superseded will
2115 				 * disappear from the write_requests tree.
2116 				 *
2117 				 * In addition, wait for the conflicting
2118 				 * request to finish locally before submitting
2119 				 * the conflicting peer request.
2120 				 */
2121 				err = drbd_wait_misc(mdev, &req->i);
2122 				if (err) {
2123 					_conn_request_state(mdev->tconn,
2124 							    NS(conn, C_TIMEOUT),
2125 							    CS_HARD);
2126 					fail_postponed_requests(mdev, sector, size);
2127 					goto out;
2128 				}
2129 				goto repeat;
2130 			}
2131 			/*
2132 			 * Remember to restart the conflicting requests after
2133 			 * the new peer request has completed.
2134 			 */
2135 			peer_req->flags |= EE_RESTART_REQUESTS;
2136 		}
2137 	}
2138 	err = 0;
2139 
2140     out:
2141 	if (err)
2142 		drbd_remove_epoch_entry_interval(mdev, peer_req);
2143 	return err;
2144 }
2145 
2146 /* mirrored write */
2147 static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
2148 {
2149 	struct drbd_conf *mdev;
2150 	sector_t sector;
2151 	struct drbd_peer_request *peer_req;
2152 	struct p_data *p = pi->data;
2153 	u32 peer_seq = be32_to_cpu(p->seq_num);
2154 	int rw = WRITE;
2155 	u32 dp_flags;
2156 	int err, tp;
2157 
2158 	mdev = vnr_to_mdev(tconn, pi->vnr);
2159 	if (!mdev)
2160 		return -EIO;
2161 
2162 	if (!get_ldev(mdev)) {
2163 		int err2;
2164 
2165 		err = wait_for_and_update_peer_seq(mdev, peer_seq);
2166 		drbd_send_ack_dp(mdev, P_NEG_ACK, p, pi->size);
2167 		atomic_inc(&tconn->current_epoch->epoch_size);
2168 		err2 = drbd_drain_block(mdev, pi->size);
2169 		if (!err)
2170 			err = err2;
2171 		return err;
2172 	}
2173 
2174 	/*
2175 	 * Corresponding put_ldev done either below (on various errors), or in
2176 	 * drbd_peer_request_endio, if we successfully submit the data at the
2177 	 * end of this function.
2178 	 */
2179 
2180 	sector = be64_to_cpu(p->sector);
2181 	peer_req = read_in_block(mdev, p->block_id, sector, pi->size);
2182 	if (!peer_req) {
2183 		put_ldev(mdev);
2184 		return -EIO;
2185 	}
2186 
2187 	peer_req->w.cb = e_end_block;
2188 
2189 	dp_flags = be32_to_cpu(p->dp_flags);
2190 	rw |= wire_flags_to_bio(mdev, dp_flags);
2191 	if (peer_req->pages == NULL) {
2192 		D_ASSERT(peer_req->i.size == 0);
2193 		D_ASSERT(dp_flags & DP_FLUSH);
2194 	}
2195 
2196 	if (dp_flags & DP_MAY_SET_IN_SYNC)
2197 		peer_req->flags |= EE_MAY_SET_IN_SYNC;
2198 
2199 	spin_lock(&tconn->epoch_lock);
2200 	peer_req->epoch = tconn->current_epoch;
2201 	atomic_inc(&peer_req->epoch->epoch_size);
2202 	atomic_inc(&peer_req->epoch->active);
2203 	spin_unlock(&tconn->epoch_lock);
2204 
2205 	rcu_read_lock();
2206 	tp = rcu_dereference(mdev->tconn->net_conf)->two_primaries;
2207 	rcu_read_unlock();
2208 	if (tp) {
2209 		peer_req->flags |= EE_IN_INTERVAL_TREE;
2210 		err = wait_for_and_update_peer_seq(mdev, peer_seq);
2211 		if (err)
2212 			goto out_interrupted;
2213 		spin_lock_irq(&mdev->tconn->req_lock);
2214 		err = handle_write_conflicts(mdev, peer_req);
2215 		if (err) {
2216 			spin_unlock_irq(&mdev->tconn->req_lock);
2217 			if (err == -ENOENT) {
2218 				put_ldev(mdev);
2219 				return 0;
2220 			}
2221 			goto out_interrupted;
2222 		}
2223 	} else {
2224 		update_peer_seq(mdev, peer_seq);
2225 		spin_lock_irq(&mdev->tconn->req_lock);
2226 	}
2227 	list_add(&peer_req->w.list, &mdev->active_ee);
2228 	spin_unlock_irq(&mdev->tconn->req_lock);
2229 
2230 	if (mdev->state.conn == C_SYNC_TARGET)
2231 		wait_event(mdev->ee_wait, !overlapping_resync_write(mdev, peer_req));
2232 
2233 	if (mdev->tconn->agreed_pro_version < 100) {
2234 		rcu_read_lock();
2235 		switch (rcu_dereference(mdev->tconn->net_conf)->wire_protocol) {
2236 		case DRBD_PROT_C:
2237 			dp_flags |= DP_SEND_WRITE_ACK;
2238 			break;
2239 		case DRBD_PROT_B:
2240 			dp_flags |= DP_SEND_RECEIVE_ACK;
2241 			break;
2242 		}
2243 		rcu_read_unlock();
2244 	}
2245 
2246 	if (dp_flags & DP_SEND_WRITE_ACK) {
2247 		peer_req->flags |= EE_SEND_WRITE_ACK;
2248 		inc_unacked(mdev);
2249 		/* corresponding dec_unacked() in e_end_block()
2250 		 * respective _drbd_clear_done_ee */
2251 	}
2252 
2253 	if (dp_flags & DP_SEND_RECEIVE_ACK) {
2254 		/* I really don't like it that the receiver thread
2255 		 * sends on the msock, but anyways */
2256 		drbd_send_ack(mdev, P_RECV_ACK, peer_req);
2257 	}
2258 
2259 	if (mdev->state.pdsk < D_INCONSISTENT) {
2260 		/* In case we have the only disk of the cluster, */
2261 		drbd_set_out_of_sync(mdev, peer_req->i.sector, peer_req->i.size);
2262 		peer_req->flags |= EE_CALL_AL_COMPLETE_IO;
2263 		peer_req->flags &= ~EE_MAY_SET_IN_SYNC;
2264 		drbd_al_begin_io(mdev, &peer_req->i, true);
2265 	}
2266 
2267 	err = drbd_submit_peer_request(mdev, peer_req, rw, DRBD_FAULT_DT_WR);
2268 	if (!err)
2269 		return 0;
2270 
2271 	/* don't care for the reason here */
2272 	dev_err(DEV, "submit failed, triggering re-connect\n");
2273 	spin_lock_irq(&mdev->tconn->req_lock);
2274 	list_del(&peer_req->w.list);
2275 	drbd_remove_epoch_entry_interval(mdev, peer_req);
2276 	spin_unlock_irq(&mdev->tconn->req_lock);
2277 	if (peer_req->flags & EE_CALL_AL_COMPLETE_IO)
2278 		drbd_al_complete_io(mdev, &peer_req->i);
2279 
2280 out_interrupted:
2281 	drbd_may_finish_epoch(tconn, peer_req->epoch, EV_PUT + EV_CLEANUP);
2282 	put_ldev(mdev);
2283 	drbd_free_peer_req(mdev, peer_req);
2284 	return err;
2285 }
2286 
2287 /* We may throttle resync, if the lower device seems to be busy,
2288  * and current sync rate is above c_min_rate.
2289  *
2290  * To decide whether or not the lower device is busy, we use a scheme similar
2291  * to MD RAID is_mddev_idle(): if the partition stats reveal "significant"
2292  * (more than 64 sectors) of activity we cannot account for with our own resync
2293  * activity, it obviously is "busy".
2294  *
2295  * The current sync rate used here uses only the most recent two step marks,
2296  * to have a short time average so we can react faster.
2297  */
2298 int drbd_rs_should_slow_down(struct drbd_conf *mdev, sector_t sector)
2299 {
2300 	struct gendisk *disk = mdev->ldev->backing_bdev->bd_contains->bd_disk;
2301 	unsigned long db, dt, dbdt;
2302 	struct lc_element *tmp;
2303 	int curr_events;
2304 	int throttle = 0;
2305 	unsigned int c_min_rate;
2306 
2307 	rcu_read_lock();
2308 	c_min_rate = rcu_dereference(mdev->ldev->disk_conf)->c_min_rate;
2309 	rcu_read_unlock();
2310 
2311 	/* feature disabled? */
2312 	if (c_min_rate == 0)
2313 		return 0;
2314 
2315 	spin_lock_irq(&mdev->al_lock);
2316 	tmp = lc_find(mdev->resync, BM_SECT_TO_EXT(sector));
2317 	if (tmp) {
2318 		struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce);
2319 		if (test_bit(BME_PRIORITY, &bm_ext->flags)) {
2320 			spin_unlock_irq(&mdev->al_lock);
2321 			return 0;
2322 		}
2323 		/* Do not slow down if app IO is already waiting for this extent */
2324 	}
2325 	spin_unlock_irq(&mdev->al_lock);
2326 
2327 	curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
2328 		      (int)part_stat_read(&disk->part0, sectors[1]) -
2329 			atomic_read(&mdev->rs_sect_ev);
2330 
2331 	if (!mdev->rs_last_events || curr_events - mdev->rs_last_events > 64) {
2332 		unsigned long rs_left;
2333 		int i;
2334 
2335 		mdev->rs_last_events = curr_events;
2336 
2337 		/* sync speed average over the last 2*DRBD_SYNC_MARK_STEP,
2338 		 * approx. */
2339 		i = (mdev->rs_last_mark + DRBD_SYNC_MARKS-1) % DRBD_SYNC_MARKS;
2340 
2341 		if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T)
2342 			rs_left = mdev->ov_left;
2343 		else
2344 			rs_left = drbd_bm_total_weight(mdev) - mdev->rs_failed;
2345 
2346 		dt = ((long)jiffies - (long)mdev->rs_mark_time[i]) / HZ;
2347 		if (!dt)
2348 			dt++;
2349 		db = mdev->rs_mark_left[i] - rs_left;
2350 		dbdt = Bit2KB(db/dt);
2351 
2352 		if (dbdt > c_min_rate)
2353 			throttle = 1;
2354 	}
2355 	return throttle;
2356 }
2357 
2358 
2359 static int receive_DataRequest(struct drbd_tconn *tconn, struct packet_info *pi)
2360 {
2361 	struct drbd_conf *mdev;
2362 	sector_t sector;
2363 	sector_t capacity;
2364 	struct drbd_peer_request *peer_req;
2365 	struct digest_info *di = NULL;
2366 	int size, verb;
2367 	unsigned int fault_type;
2368 	struct p_block_req *p =	pi->data;
2369 
2370 	mdev = vnr_to_mdev(tconn, pi->vnr);
2371 	if (!mdev)
2372 		return -EIO;
2373 	capacity = drbd_get_capacity(mdev->this_bdev);
2374 
2375 	sector = be64_to_cpu(p->sector);
2376 	size   = be32_to_cpu(p->blksize);
2377 
2378 	if (size <= 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_BIO_SIZE) {
2379 		dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
2380 				(unsigned long long)sector, size);
2381 		return -EINVAL;
2382 	}
2383 	if (sector + (size>>9) > capacity) {
2384 		dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
2385 				(unsigned long long)sector, size);
2386 		return -EINVAL;
2387 	}
2388 
2389 	if (!get_ldev_if_state(mdev, D_UP_TO_DATE)) {
2390 		verb = 1;
2391 		switch (pi->cmd) {
2392 		case P_DATA_REQUEST:
2393 			drbd_send_ack_rp(mdev, P_NEG_DREPLY, p);
2394 			break;
2395 		case P_RS_DATA_REQUEST:
2396 		case P_CSUM_RS_REQUEST:
2397 		case P_OV_REQUEST:
2398 			drbd_send_ack_rp(mdev, P_NEG_RS_DREPLY , p);
2399 			break;
2400 		case P_OV_REPLY:
2401 			verb = 0;
2402 			dec_rs_pending(mdev);
2403 			drbd_send_ack_ex(mdev, P_OV_RESULT, sector, size, ID_IN_SYNC);
2404 			break;
2405 		default:
2406 			BUG();
2407 		}
2408 		if (verb && __ratelimit(&drbd_ratelimit_state))
2409 			dev_err(DEV, "Can not satisfy peer's read request, "
2410 			    "no local data.\n");
2411 
2412 		/* drain possibly payload */
2413 		return drbd_drain_block(mdev, pi->size);
2414 	}
2415 
2416 	/* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
2417 	 * "criss-cross" setup, that might cause write-out on some other DRBD,
2418 	 * which in turn might block on the other node at this very place.  */
2419 	peer_req = drbd_alloc_peer_req(mdev, p->block_id, sector, size, GFP_NOIO);
2420 	if (!peer_req) {
2421 		put_ldev(mdev);
2422 		return -ENOMEM;
2423 	}
2424 
2425 	switch (pi->cmd) {
2426 	case P_DATA_REQUEST:
2427 		peer_req->w.cb = w_e_end_data_req;
2428 		fault_type = DRBD_FAULT_DT_RD;
2429 		/* application IO, don't drbd_rs_begin_io */
2430 		goto submit;
2431 
2432 	case P_RS_DATA_REQUEST:
2433 		peer_req->w.cb = w_e_end_rsdata_req;
2434 		fault_type = DRBD_FAULT_RS_RD;
2435 		/* used in the sector offset progress display */
2436 		mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
2437 		break;
2438 
2439 	case P_OV_REPLY:
2440 	case P_CSUM_RS_REQUEST:
2441 		fault_type = DRBD_FAULT_RS_RD;
2442 		di = kmalloc(sizeof(*di) + pi->size, GFP_NOIO);
2443 		if (!di)
2444 			goto out_free_e;
2445 
2446 		di->digest_size = pi->size;
2447 		di->digest = (((char *)di)+sizeof(struct digest_info));
2448 
2449 		peer_req->digest = di;
2450 		peer_req->flags |= EE_HAS_DIGEST;
2451 
2452 		if (drbd_recv_all(mdev->tconn, di->digest, pi->size))
2453 			goto out_free_e;
2454 
2455 		if (pi->cmd == P_CSUM_RS_REQUEST) {
2456 			D_ASSERT(mdev->tconn->agreed_pro_version >= 89);
2457 			peer_req->w.cb = w_e_end_csum_rs_req;
2458 			/* used in the sector offset progress display */
2459 			mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
2460 		} else if (pi->cmd == P_OV_REPLY) {
2461 			/* track progress, we may need to throttle */
2462 			atomic_add(size >> 9, &mdev->rs_sect_in);
2463 			peer_req->w.cb = w_e_end_ov_reply;
2464 			dec_rs_pending(mdev);
2465 			/* drbd_rs_begin_io done when we sent this request,
2466 			 * but accounting still needs to be done. */
2467 			goto submit_for_resync;
2468 		}
2469 		break;
2470 
2471 	case P_OV_REQUEST:
2472 		if (mdev->ov_start_sector == ~(sector_t)0 &&
2473 		    mdev->tconn->agreed_pro_version >= 90) {
2474 			unsigned long now = jiffies;
2475 			int i;
2476 			mdev->ov_start_sector = sector;
2477 			mdev->ov_position = sector;
2478 			mdev->ov_left = drbd_bm_bits(mdev) - BM_SECT_TO_BIT(sector);
2479 			mdev->rs_total = mdev->ov_left;
2480 			for (i = 0; i < DRBD_SYNC_MARKS; i++) {
2481 				mdev->rs_mark_left[i] = mdev->ov_left;
2482 				mdev->rs_mark_time[i] = now;
2483 			}
2484 			dev_info(DEV, "Online Verify start sector: %llu\n",
2485 					(unsigned long long)sector);
2486 		}
2487 		peer_req->w.cb = w_e_end_ov_req;
2488 		fault_type = DRBD_FAULT_RS_RD;
2489 		break;
2490 
2491 	default:
2492 		BUG();
2493 	}
2494 
2495 	/* Throttle, drbd_rs_begin_io and submit should become asynchronous
2496 	 * wrt the receiver, but it is not as straightforward as it may seem.
2497 	 * Various places in the resync start and stop logic assume resync
2498 	 * requests are processed in order, requeuing this on the worker thread
2499 	 * introduces a bunch of new code for synchronization between threads.
2500 	 *
2501 	 * Unlimited throttling before drbd_rs_begin_io may stall the resync
2502 	 * "forever", throttling after drbd_rs_begin_io will lock that extent
2503 	 * for application writes for the same time.  For now, just throttle
2504 	 * here, where the rest of the code expects the receiver to sleep for
2505 	 * a while, anyways.
2506 	 */
2507 
2508 	/* Throttle before drbd_rs_begin_io, as that locks out application IO;
2509 	 * this defers syncer requests for some time, before letting at least
2510 	 * on request through.  The resync controller on the receiving side
2511 	 * will adapt to the incoming rate accordingly.
2512 	 *
2513 	 * We cannot throttle here if remote is Primary/SyncTarget:
2514 	 * we would also throttle its application reads.
2515 	 * In that case, throttling is done on the SyncTarget only.
2516 	 */
2517 	if (mdev->state.peer != R_PRIMARY && drbd_rs_should_slow_down(mdev, sector))
2518 		schedule_timeout_uninterruptible(HZ/10);
2519 	if (drbd_rs_begin_io(mdev, sector))
2520 		goto out_free_e;
2521 
2522 submit_for_resync:
2523 	atomic_add(size >> 9, &mdev->rs_sect_ev);
2524 
2525 submit:
2526 	inc_unacked(mdev);
2527 	spin_lock_irq(&mdev->tconn->req_lock);
2528 	list_add_tail(&peer_req->w.list, &mdev->read_ee);
2529 	spin_unlock_irq(&mdev->tconn->req_lock);
2530 
2531 	if (drbd_submit_peer_request(mdev, peer_req, READ, fault_type) == 0)
2532 		return 0;
2533 
2534 	/* don't care for the reason here */
2535 	dev_err(DEV, "submit failed, triggering re-connect\n");
2536 	spin_lock_irq(&mdev->tconn->req_lock);
2537 	list_del(&peer_req->w.list);
2538 	spin_unlock_irq(&mdev->tconn->req_lock);
2539 	/* no drbd_rs_complete_io(), we are dropping the connection anyways */
2540 
2541 out_free_e:
2542 	put_ldev(mdev);
2543 	drbd_free_peer_req(mdev, peer_req);
2544 	return -EIO;
2545 }
2546 
2547 static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local)
2548 {
2549 	int self, peer, rv = -100;
2550 	unsigned long ch_self, ch_peer;
2551 	enum drbd_after_sb_p after_sb_0p;
2552 
2553 	self = mdev->ldev->md.uuid[UI_BITMAP] & 1;
2554 	peer = mdev->p_uuid[UI_BITMAP] & 1;
2555 
2556 	ch_peer = mdev->p_uuid[UI_SIZE];
2557 	ch_self = mdev->comm_bm_set;
2558 
2559 	rcu_read_lock();
2560 	after_sb_0p = rcu_dereference(mdev->tconn->net_conf)->after_sb_0p;
2561 	rcu_read_unlock();
2562 	switch (after_sb_0p) {
2563 	case ASB_CONSENSUS:
2564 	case ASB_DISCARD_SECONDARY:
2565 	case ASB_CALL_HELPER:
2566 	case ASB_VIOLENTLY:
2567 		dev_err(DEV, "Configuration error.\n");
2568 		break;
2569 	case ASB_DISCONNECT:
2570 		break;
2571 	case ASB_DISCARD_YOUNGER_PRI:
2572 		if (self == 0 && peer == 1) {
2573 			rv = -1;
2574 			break;
2575 		}
2576 		if (self == 1 && peer == 0) {
2577 			rv =  1;
2578 			break;
2579 		}
2580 		/* Else fall through to one of the other strategies... */
2581 	case ASB_DISCARD_OLDER_PRI:
2582 		if (self == 0 && peer == 1) {
2583 			rv = 1;
2584 			break;
2585 		}
2586 		if (self == 1 && peer == 0) {
2587 			rv = -1;
2588 			break;
2589 		}
2590 		/* Else fall through to one of the other strategies... */
2591 		dev_warn(DEV, "Discard younger/older primary did not find a decision\n"
2592 		     "Using discard-least-changes instead\n");
2593 	case ASB_DISCARD_ZERO_CHG:
2594 		if (ch_peer == 0 && ch_self == 0) {
2595 			rv = test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags)
2596 				? -1 : 1;
2597 			break;
2598 		} else {
2599 			if (ch_peer == 0) { rv =  1; break; }
2600 			if (ch_self == 0) { rv = -1; break; }
2601 		}
2602 		if (after_sb_0p == ASB_DISCARD_ZERO_CHG)
2603 			break;
2604 	case ASB_DISCARD_LEAST_CHG:
2605 		if	(ch_self < ch_peer)
2606 			rv = -1;
2607 		else if (ch_self > ch_peer)
2608 			rv =  1;
2609 		else /* ( ch_self == ch_peer ) */
2610 		     /* Well, then use something else. */
2611 			rv = test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags)
2612 				? -1 : 1;
2613 		break;
2614 	case ASB_DISCARD_LOCAL:
2615 		rv = -1;
2616 		break;
2617 	case ASB_DISCARD_REMOTE:
2618 		rv =  1;
2619 	}
2620 
2621 	return rv;
2622 }
2623 
2624 static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local)
2625 {
2626 	int hg, rv = -100;
2627 	enum drbd_after_sb_p after_sb_1p;
2628 
2629 	rcu_read_lock();
2630 	after_sb_1p = rcu_dereference(mdev->tconn->net_conf)->after_sb_1p;
2631 	rcu_read_unlock();
2632 	switch (after_sb_1p) {
2633 	case ASB_DISCARD_YOUNGER_PRI:
2634 	case ASB_DISCARD_OLDER_PRI:
2635 	case ASB_DISCARD_LEAST_CHG:
2636 	case ASB_DISCARD_LOCAL:
2637 	case ASB_DISCARD_REMOTE:
2638 	case ASB_DISCARD_ZERO_CHG:
2639 		dev_err(DEV, "Configuration error.\n");
2640 		break;
2641 	case ASB_DISCONNECT:
2642 		break;
2643 	case ASB_CONSENSUS:
2644 		hg = drbd_asb_recover_0p(mdev);
2645 		if (hg == -1 && mdev->state.role == R_SECONDARY)
2646 			rv = hg;
2647 		if (hg == 1  && mdev->state.role == R_PRIMARY)
2648 			rv = hg;
2649 		break;
2650 	case ASB_VIOLENTLY:
2651 		rv = drbd_asb_recover_0p(mdev);
2652 		break;
2653 	case ASB_DISCARD_SECONDARY:
2654 		return mdev->state.role == R_PRIMARY ? 1 : -1;
2655 	case ASB_CALL_HELPER:
2656 		hg = drbd_asb_recover_0p(mdev);
2657 		if (hg == -1 && mdev->state.role == R_PRIMARY) {
2658 			enum drbd_state_rv rv2;
2659 
2660 			 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2661 			  * we might be here in C_WF_REPORT_PARAMS which is transient.
2662 			  * we do not need to wait for the after state change work either. */
2663 			rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2664 			if (rv2 != SS_SUCCESS) {
2665 				drbd_khelper(mdev, "pri-lost-after-sb");
2666 			} else {
2667 				dev_warn(DEV, "Successfully gave up primary role.\n");
2668 				rv = hg;
2669 			}
2670 		} else
2671 			rv = hg;
2672 	}
2673 
2674 	return rv;
2675 }
2676 
2677 static int drbd_asb_recover_2p(struct drbd_conf *mdev) __must_hold(local)
2678 {
2679 	int hg, rv = -100;
2680 	enum drbd_after_sb_p after_sb_2p;
2681 
2682 	rcu_read_lock();
2683 	after_sb_2p = rcu_dereference(mdev->tconn->net_conf)->after_sb_2p;
2684 	rcu_read_unlock();
2685 	switch (after_sb_2p) {
2686 	case ASB_DISCARD_YOUNGER_PRI:
2687 	case ASB_DISCARD_OLDER_PRI:
2688 	case ASB_DISCARD_LEAST_CHG:
2689 	case ASB_DISCARD_LOCAL:
2690 	case ASB_DISCARD_REMOTE:
2691 	case ASB_CONSENSUS:
2692 	case ASB_DISCARD_SECONDARY:
2693 	case ASB_DISCARD_ZERO_CHG:
2694 		dev_err(DEV, "Configuration error.\n");
2695 		break;
2696 	case ASB_VIOLENTLY:
2697 		rv = drbd_asb_recover_0p(mdev);
2698 		break;
2699 	case ASB_DISCONNECT:
2700 		break;
2701 	case ASB_CALL_HELPER:
2702 		hg = drbd_asb_recover_0p(mdev);
2703 		if (hg == -1) {
2704 			enum drbd_state_rv rv2;
2705 
2706 			 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2707 			  * we might be here in C_WF_REPORT_PARAMS which is transient.
2708 			  * we do not need to wait for the after state change work either. */
2709 			rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2710 			if (rv2 != SS_SUCCESS) {
2711 				drbd_khelper(mdev, "pri-lost-after-sb");
2712 			} else {
2713 				dev_warn(DEV, "Successfully gave up primary role.\n");
2714 				rv = hg;
2715 			}
2716 		} else
2717 			rv = hg;
2718 	}
2719 
2720 	return rv;
2721 }
2722 
2723 static void drbd_uuid_dump(struct drbd_conf *mdev, char *text, u64 *uuid,
2724 			   u64 bits, u64 flags)
2725 {
2726 	if (!uuid) {
2727 		dev_info(DEV, "%s uuid info vanished while I was looking!\n", text);
2728 		return;
2729 	}
2730 	dev_info(DEV, "%s %016llX:%016llX:%016llX:%016llX bits:%llu flags:%llX\n",
2731 	     text,
2732 	     (unsigned long long)uuid[UI_CURRENT],
2733 	     (unsigned long long)uuid[UI_BITMAP],
2734 	     (unsigned long long)uuid[UI_HISTORY_START],
2735 	     (unsigned long long)uuid[UI_HISTORY_END],
2736 	     (unsigned long long)bits,
2737 	     (unsigned long long)flags);
2738 }
2739 
2740 /*
2741   100	after split brain try auto recover
2742     2	C_SYNC_SOURCE set BitMap
2743     1	C_SYNC_SOURCE use BitMap
2744     0	no Sync
2745    -1	C_SYNC_TARGET use BitMap
2746    -2	C_SYNC_TARGET set BitMap
2747  -100	after split brain, disconnect
2748 -1000	unrelated data
2749 -1091   requires proto 91
2750 -1096   requires proto 96
2751  */
2752 static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(local)
2753 {
2754 	u64 self, peer;
2755 	int i, j;
2756 
2757 	self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2758 	peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2759 
2760 	*rule_nr = 10;
2761 	if (self == UUID_JUST_CREATED && peer == UUID_JUST_CREATED)
2762 		return 0;
2763 
2764 	*rule_nr = 20;
2765 	if ((self == UUID_JUST_CREATED || self == (u64)0) &&
2766 	     peer != UUID_JUST_CREATED)
2767 		return -2;
2768 
2769 	*rule_nr = 30;
2770 	if (self != UUID_JUST_CREATED &&
2771 	    (peer == UUID_JUST_CREATED || peer == (u64)0))
2772 		return 2;
2773 
2774 	if (self == peer) {
2775 		int rct, dc; /* roles at crash time */
2776 
2777 		if (mdev->p_uuid[UI_BITMAP] == (u64)0 && mdev->ldev->md.uuid[UI_BITMAP] != (u64)0) {
2778 
2779 			if (mdev->tconn->agreed_pro_version < 91)
2780 				return -1091;
2781 
2782 			if ((mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) &&
2783 			    (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1))) {
2784 				dev_info(DEV, "was SyncSource, missed the resync finished event, corrected myself:\n");
2785 				drbd_uuid_move_history(mdev);
2786 				mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[UI_BITMAP];
2787 				mdev->ldev->md.uuid[UI_BITMAP] = 0;
2788 
2789 				drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2790 					       mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2791 				*rule_nr = 34;
2792 			} else {
2793 				dev_info(DEV, "was SyncSource (peer failed to write sync_uuid)\n");
2794 				*rule_nr = 36;
2795 			}
2796 
2797 			return 1;
2798 		}
2799 
2800 		if (mdev->ldev->md.uuid[UI_BITMAP] == (u64)0 && mdev->p_uuid[UI_BITMAP] != (u64)0) {
2801 
2802 			if (mdev->tconn->agreed_pro_version < 91)
2803 				return -1091;
2804 
2805 			if ((mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_BITMAP] & ~((u64)1)) &&
2806 			    (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1))) {
2807 				dev_info(DEV, "was SyncTarget, peer missed the resync finished event, corrected peer:\n");
2808 
2809 				mdev->p_uuid[UI_HISTORY_START + 1] = mdev->p_uuid[UI_HISTORY_START];
2810 				mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_BITMAP];
2811 				mdev->p_uuid[UI_BITMAP] = 0UL;
2812 
2813 				drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2814 				*rule_nr = 35;
2815 			} else {
2816 				dev_info(DEV, "was SyncTarget (failed to write sync_uuid)\n");
2817 				*rule_nr = 37;
2818 			}
2819 
2820 			return -1;
2821 		}
2822 
2823 		/* Common power [off|failure] */
2824 		rct = (test_bit(CRASHED_PRIMARY, &mdev->flags) ? 1 : 0) +
2825 			(mdev->p_uuid[UI_FLAGS] & 2);
2826 		/* lowest bit is set when we were primary,
2827 		 * next bit (weight 2) is set when peer was primary */
2828 		*rule_nr = 40;
2829 
2830 		switch (rct) {
2831 		case 0: /* !self_pri && !peer_pri */ return 0;
2832 		case 1: /*  self_pri && !peer_pri */ return 1;
2833 		case 2: /* !self_pri &&  peer_pri */ return -1;
2834 		case 3: /*  self_pri &&  peer_pri */
2835 			dc = test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags);
2836 			return dc ? -1 : 1;
2837 		}
2838 	}
2839 
2840 	*rule_nr = 50;
2841 	peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2842 	if (self == peer)
2843 		return -1;
2844 
2845 	*rule_nr = 51;
2846 	peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1);
2847 	if (self == peer) {
2848 		if (mdev->tconn->agreed_pro_version < 96 ?
2849 		    (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) ==
2850 		    (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1)) :
2851 		    peer + UUID_NEW_BM_OFFSET == (mdev->p_uuid[UI_BITMAP] & ~((u64)1))) {
2852 			/* The last P_SYNC_UUID did not get though. Undo the last start of
2853 			   resync as sync source modifications of the peer's UUIDs. */
2854 
2855 			if (mdev->tconn->agreed_pro_version < 91)
2856 				return -1091;
2857 
2858 			mdev->p_uuid[UI_BITMAP] = mdev->p_uuid[UI_HISTORY_START];
2859 			mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_HISTORY_START + 1];
2860 
2861 			dev_info(DEV, "Lost last syncUUID packet, corrected:\n");
2862 			drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2863 
2864 			return -1;
2865 		}
2866 	}
2867 
2868 	*rule_nr = 60;
2869 	self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2870 	for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2871 		peer = mdev->p_uuid[i] & ~((u64)1);
2872 		if (self == peer)
2873 			return -2;
2874 	}
2875 
2876 	*rule_nr = 70;
2877 	self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2878 	peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2879 	if (self == peer)
2880 		return 1;
2881 
2882 	*rule_nr = 71;
2883 	self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1);
2884 	if (self == peer) {
2885 		if (mdev->tconn->agreed_pro_version < 96 ?
2886 		    (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) ==
2887 		    (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) :
2888 		    self + UUID_NEW_BM_OFFSET == (mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1))) {
2889 			/* The last P_SYNC_UUID did not get though. Undo the last start of
2890 			   resync as sync source modifications of our UUIDs. */
2891 
2892 			if (mdev->tconn->agreed_pro_version < 91)
2893 				return -1091;
2894 
2895 			__drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_HISTORY_START]);
2896 			__drbd_uuid_set(mdev, UI_HISTORY_START, mdev->ldev->md.uuid[UI_HISTORY_START + 1]);
2897 
2898 			dev_info(DEV, "Last syncUUID did not get through, corrected:\n");
2899 			drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2900 				       mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2901 
2902 			return 1;
2903 		}
2904 	}
2905 
2906 
2907 	*rule_nr = 80;
2908 	peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2909 	for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2910 		self = mdev->ldev->md.uuid[i] & ~((u64)1);
2911 		if (self == peer)
2912 			return 2;
2913 	}
2914 
2915 	*rule_nr = 90;
2916 	self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2917 	peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2918 	if (self == peer && self != ((u64)0))
2919 		return 100;
2920 
2921 	*rule_nr = 100;
2922 	for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2923 		self = mdev->ldev->md.uuid[i] & ~((u64)1);
2924 		for (j = UI_HISTORY_START; j <= UI_HISTORY_END; j++) {
2925 			peer = mdev->p_uuid[j] & ~((u64)1);
2926 			if (self == peer)
2927 				return -100;
2928 		}
2929 	}
2930 
2931 	return -1000;
2932 }
2933 
2934 /* drbd_sync_handshake() returns the new conn state on success, or
2935    CONN_MASK (-1) on failure.
2936  */
2937 static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_role peer_role,
2938 					   enum drbd_disk_state peer_disk) __must_hold(local)
2939 {
2940 	enum drbd_conns rv = C_MASK;
2941 	enum drbd_disk_state mydisk;
2942 	struct net_conf *nc;
2943 	int hg, rule_nr, rr_conflict, tentative;
2944 
2945 	mydisk = mdev->state.disk;
2946 	if (mydisk == D_NEGOTIATING)
2947 		mydisk = mdev->new_state_tmp.disk;
2948 
2949 	dev_info(DEV, "drbd_sync_handshake:\n");
2950 
2951 	spin_lock_irq(&mdev->ldev->md.uuid_lock);
2952 	drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid, mdev->comm_bm_set, 0);
2953 	drbd_uuid_dump(mdev, "peer", mdev->p_uuid,
2954 		       mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2955 
2956 	hg = drbd_uuid_compare(mdev, &rule_nr);
2957 	spin_unlock_irq(&mdev->ldev->md.uuid_lock);
2958 
2959 	dev_info(DEV, "uuid_compare()=%d by rule %d\n", hg, rule_nr);
2960 
2961 	if (hg == -1000) {
2962 		dev_alert(DEV, "Unrelated data, aborting!\n");
2963 		return C_MASK;
2964 	}
2965 	if (hg < -1000) {
2966 		dev_alert(DEV, "To resolve this both sides have to support at least protocol %d\n", -hg - 1000);
2967 		return C_MASK;
2968 	}
2969 
2970 	if    ((mydisk == D_INCONSISTENT && peer_disk > D_INCONSISTENT) ||
2971 	    (peer_disk == D_INCONSISTENT && mydisk    > D_INCONSISTENT)) {
2972 		int f = (hg == -100) || abs(hg) == 2;
2973 		hg = mydisk > D_INCONSISTENT ? 1 : -1;
2974 		if (f)
2975 			hg = hg*2;
2976 		dev_info(DEV, "Becoming sync %s due to disk states.\n",
2977 		     hg > 0 ? "source" : "target");
2978 	}
2979 
2980 	if (abs(hg) == 100)
2981 		drbd_khelper(mdev, "initial-split-brain");
2982 
2983 	rcu_read_lock();
2984 	nc = rcu_dereference(mdev->tconn->net_conf);
2985 
2986 	if (hg == 100 || (hg == -100 && nc->always_asbp)) {
2987 		int pcount = (mdev->state.role == R_PRIMARY)
2988 			   + (peer_role == R_PRIMARY);
2989 		int forced = (hg == -100);
2990 
2991 		switch (pcount) {
2992 		case 0:
2993 			hg = drbd_asb_recover_0p(mdev);
2994 			break;
2995 		case 1:
2996 			hg = drbd_asb_recover_1p(mdev);
2997 			break;
2998 		case 2:
2999 			hg = drbd_asb_recover_2p(mdev);
3000 			break;
3001 		}
3002 		if (abs(hg) < 100) {
3003 			dev_warn(DEV, "Split-Brain detected, %d primaries, "
3004 			     "automatically solved. Sync from %s node\n",
3005 			     pcount, (hg < 0) ? "peer" : "this");
3006 			if (forced) {
3007 				dev_warn(DEV, "Doing a full sync, since"
3008 				     " UUIDs where ambiguous.\n");
3009 				hg = hg*2;
3010 			}
3011 		}
3012 	}
3013 
3014 	if (hg == -100) {
3015 		if (test_bit(DISCARD_MY_DATA, &mdev->flags) && !(mdev->p_uuid[UI_FLAGS]&1))
3016 			hg = -1;
3017 		if (!test_bit(DISCARD_MY_DATA, &mdev->flags) && (mdev->p_uuid[UI_FLAGS]&1))
3018 			hg = 1;
3019 
3020 		if (abs(hg) < 100)
3021 			dev_warn(DEV, "Split-Brain detected, manually solved. "
3022 			     "Sync from %s node\n",
3023 			     (hg < 0) ? "peer" : "this");
3024 	}
3025 	rr_conflict = nc->rr_conflict;
3026 	tentative = nc->tentative;
3027 	rcu_read_unlock();
3028 
3029 	if (hg == -100) {
3030 		/* FIXME this log message is not correct if we end up here
3031 		 * after an attempted attach on a diskless node.
3032 		 * We just refuse to attach -- well, we drop the "connection"
3033 		 * to that disk, in a way... */
3034 		dev_alert(DEV, "Split-Brain detected but unresolved, dropping connection!\n");
3035 		drbd_khelper(mdev, "split-brain");
3036 		return C_MASK;
3037 	}
3038 
3039 	if (hg > 0 && mydisk <= D_INCONSISTENT) {
3040 		dev_err(DEV, "I shall become SyncSource, but I am inconsistent!\n");
3041 		return C_MASK;
3042 	}
3043 
3044 	if (hg < 0 && /* by intention we do not use mydisk here. */
3045 	    mdev->state.role == R_PRIMARY && mdev->state.disk >= D_CONSISTENT) {
3046 		switch (rr_conflict) {
3047 		case ASB_CALL_HELPER:
3048 			drbd_khelper(mdev, "pri-lost");
3049 			/* fall through */
3050 		case ASB_DISCONNECT:
3051 			dev_err(DEV, "I shall become SyncTarget, but I am primary!\n");
3052 			return C_MASK;
3053 		case ASB_VIOLENTLY:
3054 			dev_warn(DEV, "Becoming SyncTarget, violating the stable-data"
3055 			     "assumption\n");
3056 		}
3057 	}
3058 
3059 	if (tentative || test_bit(CONN_DRY_RUN, &mdev->tconn->flags)) {
3060 		if (hg == 0)
3061 			dev_info(DEV, "dry-run connect: No resync, would become Connected immediately.\n");
3062 		else
3063 			dev_info(DEV, "dry-run connect: Would become %s, doing a %s resync.",
3064 				 drbd_conn_str(hg > 0 ? C_SYNC_SOURCE : C_SYNC_TARGET),
3065 				 abs(hg) >= 2 ? "full" : "bit-map based");
3066 		return C_MASK;
3067 	}
3068 
3069 	if (abs(hg) >= 2) {
3070 		dev_info(DEV, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n");
3071 		if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from sync_handshake",
3072 					BM_LOCKED_SET_ALLOWED))
3073 			return C_MASK;
3074 	}
3075 
3076 	if (hg > 0) { /* become sync source. */
3077 		rv = C_WF_BITMAP_S;
3078 	} else if (hg < 0) { /* become sync target */
3079 		rv = C_WF_BITMAP_T;
3080 	} else {
3081 		rv = C_CONNECTED;
3082 		if (drbd_bm_total_weight(mdev)) {
3083 			dev_info(DEV, "No resync, but %lu bits in bitmap!\n",
3084 			     drbd_bm_total_weight(mdev));
3085 		}
3086 	}
3087 
3088 	return rv;
3089 }
3090 
3091 static enum drbd_after_sb_p convert_after_sb(enum drbd_after_sb_p peer)
3092 {
3093 	/* ASB_DISCARD_REMOTE - ASB_DISCARD_LOCAL is valid */
3094 	if (peer == ASB_DISCARD_REMOTE)
3095 		return ASB_DISCARD_LOCAL;
3096 
3097 	/* any other things with ASB_DISCARD_REMOTE or ASB_DISCARD_LOCAL are invalid */
3098 	if (peer == ASB_DISCARD_LOCAL)
3099 		return ASB_DISCARD_REMOTE;
3100 
3101 	/* everything else is valid if they are equal on both sides. */
3102 	return peer;
3103 }
3104 
3105 static int receive_protocol(struct drbd_tconn *tconn, struct packet_info *pi)
3106 {
3107 	struct p_protocol *p = pi->data;
3108 	enum drbd_after_sb_p p_after_sb_0p, p_after_sb_1p, p_after_sb_2p;
3109 	int p_proto, p_discard_my_data, p_two_primaries, cf;
3110 	struct net_conf *nc, *old_net_conf, *new_net_conf = NULL;
3111 	char integrity_alg[SHARED_SECRET_MAX] = "";
3112 	struct crypto_hash *peer_integrity_tfm = NULL;
3113 	void *int_dig_in = NULL, *int_dig_vv = NULL;
3114 
3115 	p_proto		= be32_to_cpu(p->protocol);
3116 	p_after_sb_0p	= be32_to_cpu(p->after_sb_0p);
3117 	p_after_sb_1p	= be32_to_cpu(p->after_sb_1p);
3118 	p_after_sb_2p	= be32_to_cpu(p->after_sb_2p);
3119 	p_two_primaries = be32_to_cpu(p->two_primaries);
3120 	cf		= be32_to_cpu(p->conn_flags);
3121 	p_discard_my_data = cf & CF_DISCARD_MY_DATA;
3122 
3123 	if (tconn->agreed_pro_version >= 87) {
3124 		int err;
3125 
3126 		if (pi->size > sizeof(integrity_alg))
3127 			return -EIO;
3128 		err = drbd_recv_all(tconn, integrity_alg, pi->size);
3129 		if (err)
3130 			return err;
3131 		integrity_alg[SHARED_SECRET_MAX - 1] = 0;
3132 	}
3133 
3134 	if (pi->cmd != P_PROTOCOL_UPDATE) {
3135 		clear_bit(CONN_DRY_RUN, &tconn->flags);
3136 
3137 		if (cf & CF_DRY_RUN)
3138 			set_bit(CONN_DRY_RUN, &tconn->flags);
3139 
3140 		rcu_read_lock();
3141 		nc = rcu_dereference(tconn->net_conf);
3142 
3143 		if (p_proto != nc->wire_protocol) {
3144 			conn_err(tconn, "incompatible %s settings\n", "protocol");
3145 			goto disconnect_rcu_unlock;
3146 		}
3147 
3148 		if (convert_after_sb(p_after_sb_0p) != nc->after_sb_0p) {
3149 			conn_err(tconn, "incompatible %s settings\n", "after-sb-0pri");
3150 			goto disconnect_rcu_unlock;
3151 		}
3152 
3153 		if (convert_after_sb(p_after_sb_1p) != nc->after_sb_1p) {
3154 			conn_err(tconn, "incompatible %s settings\n", "after-sb-1pri");
3155 			goto disconnect_rcu_unlock;
3156 		}
3157 
3158 		if (convert_after_sb(p_after_sb_2p) != nc->after_sb_2p) {
3159 			conn_err(tconn, "incompatible %s settings\n", "after-sb-2pri");
3160 			goto disconnect_rcu_unlock;
3161 		}
3162 
3163 		if (p_discard_my_data && nc->discard_my_data) {
3164 			conn_err(tconn, "incompatible %s settings\n", "discard-my-data");
3165 			goto disconnect_rcu_unlock;
3166 		}
3167 
3168 		if (p_two_primaries != nc->two_primaries) {
3169 			conn_err(tconn, "incompatible %s settings\n", "allow-two-primaries");
3170 			goto disconnect_rcu_unlock;
3171 		}
3172 
3173 		if (strcmp(integrity_alg, nc->integrity_alg)) {
3174 			conn_err(tconn, "incompatible %s settings\n", "data-integrity-alg");
3175 			goto disconnect_rcu_unlock;
3176 		}
3177 
3178 		rcu_read_unlock();
3179 	}
3180 
3181 	if (integrity_alg[0]) {
3182 		int hash_size;
3183 
3184 		/*
3185 		 * We can only change the peer data integrity algorithm
3186 		 * here.  Changing our own data integrity algorithm
3187 		 * requires that we send a P_PROTOCOL_UPDATE packet at
3188 		 * the same time; otherwise, the peer has no way to
3189 		 * tell between which packets the algorithm should
3190 		 * change.
3191 		 */
3192 
3193 		peer_integrity_tfm = crypto_alloc_hash(integrity_alg, 0, CRYPTO_ALG_ASYNC);
3194 		if (!peer_integrity_tfm) {
3195 			conn_err(tconn, "peer data-integrity-alg %s not supported\n",
3196 				 integrity_alg);
3197 			goto disconnect;
3198 		}
3199 
3200 		hash_size = crypto_hash_digestsize(peer_integrity_tfm);
3201 		int_dig_in = kmalloc(hash_size, GFP_KERNEL);
3202 		int_dig_vv = kmalloc(hash_size, GFP_KERNEL);
3203 		if (!(int_dig_in && int_dig_vv)) {
3204 			conn_err(tconn, "Allocation of buffers for data integrity checking failed\n");
3205 			goto disconnect;
3206 		}
3207 	}
3208 
3209 	new_net_conf = kmalloc(sizeof(struct net_conf), GFP_KERNEL);
3210 	if (!new_net_conf) {
3211 		conn_err(tconn, "Allocation of new net_conf failed\n");
3212 		goto disconnect;
3213 	}
3214 
3215 	mutex_lock(&tconn->data.mutex);
3216 	mutex_lock(&tconn->conf_update);
3217 	old_net_conf = tconn->net_conf;
3218 	*new_net_conf = *old_net_conf;
3219 
3220 	new_net_conf->wire_protocol = p_proto;
3221 	new_net_conf->after_sb_0p = convert_after_sb(p_after_sb_0p);
3222 	new_net_conf->after_sb_1p = convert_after_sb(p_after_sb_1p);
3223 	new_net_conf->after_sb_2p = convert_after_sb(p_after_sb_2p);
3224 	new_net_conf->two_primaries = p_two_primaries;
3225 
3226 	rcu_assign_pointer(tconn->net_conf, new_net_conf);
3227 	mutex_unlock(&tconn->conf_update);
3228 	mutex_unlock(&tconn->data.mutex);
3229 
3230 	crypto_free_hash(tconn->peer_integrity_tfm);
3231 	kfree(tconn->int_dig_in);
3232 	kfree(tconn->int_dig_vv);
3233 	tconn->peer_integrity_tfm = peer_integrity_tfm;
3234 	tconn->int_dig_in = int_dig_in;
3235 	tconn->int_dig_vv = int_dig_vv;
3236 
3237 	if (strcmp(old_net_conf->integrity_alg, integrity_alg))
3238 		conn_info(tconn, "peer data-integrity-alg: %s\n",
3239 			  integrity_alg[0] ? integrity_alg : "(none)");
3240 
3241 	synchronize_rcu();
3242 	kfree(old_net_conf);
3243 	return 0;
3244 
3245 disconnect_rcu_unlock:
3246 	rcu_read_unlock();
3247 disconnect:
3248 	crypto_free_hash(peer_integrity_tfm);
3249 	kfree(int_dig_in);
3250 	kfree(int_dig_vv);
3251 	conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
3252 	return -EIO;
3253 }
3254 
3255 /* helper function
3256  * input: alg name, feature name
3257  * return: NULL (alg name was "")
3258  *         ERR_PTR(error) if something goes wrong
3259  *         or the crypto hash ptr, if it worked out ok. */
3260 struct crypto_hash *drbd_crypto_alloc_digest_safe(const struct drbd_conf *mdev,
3261 		const char *alg, const char *name)
3262 {
3263 	struct crypto_hash *tfm;
3264 
3265 	if (!alg[0])
3266 		return NULL;
3267 
3268 	tfm = crypto_alloc_hash(alg, 0, CRYPTO_ALG_ASYNC);
3269 	if (IS_ERR(tfm)) {
3270 		dev_err(DEV, "Can not allocate \"%s\" as %s (reason: %ld)\n",
3271 			alg, name, PTR_ERR(tfm));
3272 		return tfm;
3273 	}
3274 	return tfm;
3275 }
3276 
3277 static int ignore_remaining_packet(struct drbd_tconn *tconn, struct packet_info *pi)
3278 {
3279 	void *buffer = tconn->data.rbuf;
3280 	int size = pi->size;
3281 
3282 	while (size) {
3283 		int s = min_t(int, size, DRBD_SOCKET_BUFFER_SIZE);
3284 		s = drbd_recv(tconn, buffer, s);
3285 		if (s <= 0) {
3286 			if (s < 0)
3287 				return s;
3288 			break;
3289 		}
3290 		size -= s;
3291 	}
3292 	if (size)
3293 		return -EIO;
3294 	return 0;
3295 }
3296 
3297 /*
3298  * config_unknown_volume  -  device configuration command for unknown volume
3299  *
3300  * When a device is added to an existing connection, the node on which the
3301  * device is added first will send configuration commands to its peer but the
3302  * peer will not know about the device yet.  It will warn and ignore these
3303  * commands.  Once the device is added on the second node, the second node will
3304  * send the same device configuration commands, but in the other direction.
3305  *
3306  * (We can also end up here if drbd is misconfigured.)
3307  */
3308 static int config_unknown_volume(struct drbd_tconn *tconn, struct packet_info *pi)
3309 {
3310 	conn_warn(tconn, "%s packet received for volume %u, which is not configured locally\n",
3311 		  cmdname(pi->cmd), pi->vnr);
3312 	return ignore_remaining_packet(tconn, pi);
3313 }
3314 
3315 static int receive_SyncParam(struct drbd_tconn *tconn, struct packet_info *pi)
3316 {
3317 	struct drbd_conf *mdev;
3318 	struct p_rs_param_95 *p;
3319 	unsigned int header_size, data_size, exp_max_sz;
3320 	struct crypto_hash *verify_tfm = NULL;
3321 	struct crypto_hash *csums_tfm = NULL;
3322 	struct net_conf *old_net_conf, *new_net_conf = NULL;
3323 	struct disk_conf *old_disk_conf = NULL, *new_disk_conf = NULL;
3324 	const int apv = tconn->agreed_pro_version;
3325 	struct fifo_buffer *old_plan = NULL, *new_plan = NULL;
3326 	int fifo_size = 0;
3327 	int err;
3328 
3329 	mdev = vnr_to_mdev(tconn, pi->vnr);
3330 	if (!mdev)
3331 		return config_unknown_volume(tconn, pi);
3332 
3333 	exp_max_sz  = apv <= 87 ? sizeof(struct p_rs_param)
3334 		    : apv == 88 ? sizeof(struct p_rs_param)
3335 					+ SHARED_SECRET_MAX
3336 		    : apv <= 94 ? sizeof(struct p_rs_param_89)
3337 		    : /* apv >= 95 */ sizeof(struct p_rs_param_95);
3338 
3339 	if (pi->size > exp_max_sz) {
3340 		dev_err(DEV, "SyncParam packet too long: received %u, expected <= %u bytes\n",
3341 		    pi->size, exp_max_sz);
3342 		return -EIO;
3343 	}
3344 
3345 	if (apv <= 88) {
3346 		header_size = sizeof(struct p_rs_param);
3347 		data_size = pi->size - header_size;
3348 	} else if (apv <= 94) {
3349 		header_size = sizeof(struct p_rs_param_89);
3350 		data_size = pi->size - header_size;
3351 		D_ASSERT(data_size == 0);
3352 	} else {
3353 		header_size = sizeof(struct p_rs_param_95);
3354 		data_size = pi->size - header_size;
3355 		D_ASSERT(data_size == 0);
3356 	}
3357 
3358 	/* initialize verify_alg and csums_alg */
3359 	p = pi->data;
3360 	memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
3361 
3362 	err = drbd_recv_all(mdev->tconn, p, header_size);
3363 	if (err)
3364 		return err;
3365 
3366 	mutex_lock(&mdev->tconn->conf_update);
3367 	old_net_conf = mdev->tconn->net_conf;
3368 	if (get_ldev(mdev)) {
3369 		new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
3370 		if (!new_disk_conf) {
3371 			put_ldev(mdev);
3372 			mutex_unlock(&mdev->tconn->conf_update);
3373 			dev_err(DEV, "Allocation of new disk_conf failed\n");
3374 			return -ENOMEM;
3375 		}
3376 
3377 		old_disk_conf = mdev->ldev->disk_conf;
3378 		*new_disk_conf = *old_disk_conf;
3379 
3380 		new_disk_conf->resync_rate = be32_to_cpu(p->resync_rate);
3381 	}
3382 
3383 	if (apv >= 88) {
3384 		if (apv == 88) {
3385 			if (data_size > SHARED_SECRET_MAX || data_size == 0) {
3386 				dev_err(DEV, "verify-alg of wrong size, "
3387 					"peer wants %u, accepting only up to %u byte\n",
3388 					data_size, SHARED_SECRET_MAX);
3389 				err = -EIO;
3390 				goto reconnect;
3391 			}
3392 
3393 			err = drbd_recv_all(mdev->tconn, p->verify_alg, data_size);
3394 			if (err)
3395 				goto reconnect;
3396 			/* we expect NUL terminated string */
3397 			/* but just in case someone tries to be evil */
3398 			D_ASSERT(p->verify_alg[data_size-1] == 0);
3399 			p->verify_alg[data_size-1] = 0;
3400 
3401 		} else /* apv >= 89 */ {
3402 			/* we still expect NUL terminated strings */
3403 			/* but just in case someone tries to be evil */
3404 			D_ASSERT(p->verify_alg[SHARED_SECRET_MAX-1] == 0);
3405 			D_ASSERT(p->csums_alg[SHARED_SECRET_MAX-1] == 0);
3406 			p->verify_alg[SHARED_SECRET_MAX-1] = 0;
3407 			p->csums_alg[SHARED_SECRET_MAX-1] = 0;
3408 		}
3409 
3410 		if (strcmp(old_net_conf->verify_alg, p->verify_alg)) {
3411 			if (mdev->state.conn == C_WF_REPORT_PARAMS) {
3412 				dev_err(DEV, "Different verify-alg settings. me=\"%s\" peer=\"%s\"\n",
3413 				    old_net_conf->verify_alg, p->verify_alg);
3414 				goto disconnect;
3415 			}
3416 			verify_tfm = drbd_crypto_alloc_digest_safe(mdev,
3417 					p->verify_alg, "verify-alg");
3418 			if (IS_ERR(verify_tfm)) {
3419 				verify_tfm = NULL;
3420 				goto disconnect;
3421 			}
3422 		}
3423 
3424 		if (apv >= 89 && strcmp(old_net_conf->csums_alg, p->csums_alg)) {
3425 			if (mdev->state.conn == C_WF_REPORT_PARAMS) {
3426 				dev_err(DEV, "Different csums-alg settings. me=\"%s\" peer=\"%s\"\n",
3427 				    old_net_conf->csums_alg, p->csums_alg);
3428 				goto disconnect;
3429 			}
3430 			csums_tfm = drbd_crypto_alloc_digest_safe(mdev,
3431 					p->csums_alg, "csums-alg");
3432 			if (IS_ERR(csums_tfm)) {
3433 				csums_tfm = NULL;
3434 				goto disconnect;
3435 			}
3436 		}
3437 
3438 		if (apv > 94 && new_disk_conf) {
3439 			new_disk_conf->c_plan_ahead = be32_to_cpu(p->c_plan_ahead);
3440 			new_disk_conf->c_delay_target = be32_to_cpu(p->c_delay_target);
3441 			new_disk_conf->c_fill_target = be32_to_cpu(p->c_fill_target);
3442 			new_disk_conf->c_max_rate = be32_to_cpu(p->c_max_rate);
3443 
3444 			fifo_size = (new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ;
3445 			if (fifo_size != mdev->rs_plan_s->size) {
3446 				new_plan = fifo_alloc(fifo_size);
3447 				if (!new_plan) {
3448 					dev_err(DEV, "kmalloc of fifo_buffer failed");
3449 					put_ldev(mdev);
3450 					goto disconnect;
3451 				}
3452 			}
3453 		}
3454 
3455 		if (verify_tfm || csums_tfm) {
3456 			new_net_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
3457 			if (!new_net_conf) {
3458 				dev_err(DEV, "Allocation of new net_conf failed\n");
3459 				goto disconnect;
3460 			}
3461 
3462 			*new_net_conf = *old_net_conf;
3463 
3464 			if (verify_tfm) {
3465 				strcpy(new_net_conf->verify_alg, p->verify_alg);
3466 				new_net_conf->verify_alg_len = strlen(p->verify_alg) + 1;
3467 				crypto_free_hash(mdev->tconn->verify_tfm);
3468 				mdev->tconn->verify_tfm = verify_tfm;
3469 				dev_info(DEV, "using verify-alg: \"%s\"\n", p->verify_alg);
3470 			}
3471 			if (csums_tfm) {
3472 				strcpy(new_net_conf->csums_alg, p->csums_alg);
3473 				new_net_conf->csums_alg_len = strlen(p->csums_alg) + 1;
3474 				crypto_free_hash(mdev->tconn->csums_tfm);
3475 				mdev->tconn->csums_tfm = csums_tfm;
3476 				dev_info(DEV, "using csums-alg: \"%s\"\n", p->csums_alg);
3477 			}
3478 			rcu_assign_pointer(tconn->net_conf, new_net_conf);
3479 		}
3480 	}
3481 
3482 	if (new_disk_conf) {
3483 		rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
3484 		put_ldev(mdev);
3485 	}
3486 
3487 	if (new_plan) {
3488 		old_plan = mdev->rs_plan_s;
3489 		rcu_assign_pointer(mdev->rs_plan_s, new_plan);
3490 	}
3491 
3492 	mutex_unlock(&mdev->tconn->conf_update);
3493 	synchronize_rcu();
3494 	if (new_net_conf)
3495 		kfree(old_net_conf);
3496 	kfree(old_disk_conf);
3497 	kfree(old_plan);
3498 
3499 	return 0;
3500 
3501 reconnect:
3502 	if (new_disk_conf) {
3503 		put_ldev(mdev);
3504 		kfree(new_disk_conf);
3505 	}
3506 	mutex_unlock(&mdev->tconn->conf_update);
3507 	return -EIO;
3508 
3509 disconnect:
3510 	kfree(new_plan);
3511 	if (new_disk_conf) {
3512 		put_ldev(mdev);
3513 		kfree(new_disk_conf);
3514 	}
3515 	mutex_unlock(&mdev->tconn->conf_update);
3516 	/* just for completeness: actually not needed,
3517 	 * as this is not reached if csums_tfm was ok. */
3518 	crypto_free_hash(csums_tfm);
3519 	/* but free the verify_tfm again, if csums_tfm did not work out */
3520 	crypto_free_hash(verify_tfm);
3521 	conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
3522 	return -EIO;
3523 }
3524 
3525 /* warn if the arguments differ by more than 12.5% */
3526 static void warn_if_differ_considerably(struct drbd_conf *mdev,
3527 	const char *s, sector_t a, sector_t b)
3528 {
3529 	sector_t d;
3530 	if (a == 0 || b == 0)
3531 		return;
3532 	d = (a > b) ? (a - b) : (b - a);
3533 	if (d > (a>>3) || d > (b>>3))
3534 		dev_warn(DEV, "Considerable difference in %s: %llus vs. %llus\n", s,
3535 		     (unsigned long long)a, (unsigned long long)b);
3536 }
3537 
3538 static int receive_sizes(struct drbd_tconn *tconn, struct packet_info *pi)
3539 {
3540 	struct drbd_conf *mdev;
3541 	struct p_sizes *p = pi->data;
3542 	enum determine_dev_size dd = DS_UNCHANGED;
3543 	sector_t p_size, p_usize, my_usize;
3544 	int ldsc = 0; /* local disk size changed */
3545 	enum dds_flags ddsf;
3546 
3547 	mdev = vnr_to_mdev(tconn, pi->vnr);
3548 	if (!mdev)
3549 		return config_unknown_volume(tconn, pi);
3550 
3551 	p_size = be64_to_cpu(p->d_size);
3552 	p_usize = be64_to_cpu(p->u_size);
3553 
3554 	/* just store the peer's disk size for now.
3555 	 * we still need to figure out whether we accept that. */
3556 	mdev->p_size = p_size;
3557 
3558 	if (get_ldev(mdev)) {
3559 		rcu_read_lock();
3560 		my_usize = rcu_dereference(mdev->ldev->disk_conf)->disk_size;
3561 		rcu_read_unlock();
3562 
3563 		warn_if_differ_considerably(mdev, "lower level device sizes",
3564 			   p_size, drbd_get_max_capacity(mdev->ldev));
3565 		warn_if_differ_considerably(mdev, "user requested size",
3566 					    p_usize, my_usize);
3567 
3568 		/* if this is the first connect, or an otherwise expected
3569 		 * param exchange, choose the minimum */
3570 		if (mdev->state.conn == C_WF_REPORT_PARAMS)
3571 			p_usize = min_not_zero(my_usize, p_usize);
3572 
3573 		/* Never shrink a device with usable data during connect.
3574 		   But allow online shrinking if we are connected. */
3575 		if (drbd_new_dev_size(mdev, mdev->ldev, p_usize, 0) <
3576 		    drbd_get_capacity(mdev->this_bdev) &&
3577 		    mdev->state.disk >= D_OUTDATED &&
3578 		    mdev->state.conn < C_CONNECTED) {
3579 			dev_err(DEV, "The peer's disk size is too small!\n");
3580 			conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
3581 			put_ldev(mdev);
3582 			return -EIO;
3583 		}
3584 
3585 		if (my_usize != p_usize) {
3586 			struct disk_conf *old_disk_conf, *new_disk_conf = NULL;
3587 
3588 			new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
3589 			if (!new_disk_conf) {
3590 				dev_err(DEV, "Allocation of new disk_conf failed\n");
3591 				put_ldev(mdev);
3592 				return -ENOMEM;
3593 			}
3594 
3595 			mutex_lock(&mdev->tconn->conf_update);
3596 			old_disk_conf = mdev->ldev->disk_conf;
3597 			*new_disk_conf = *old_disk_conf;
3598 			new_disk_conf->disk_size = p_usize;
3599 
3600 			rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
3601 			mutex_unlock(&mdev->tconn->conf_update);
3602 			synchronize_rcu();
3603 			kfree(old_disk_conf);
3604 
3605 			dev_info(DEV, "Peer sets u_size to %lu sectors\n",
3606 				 (unsigned long)my_usize);
3607 		}
3608 
3609 		put_ldev(mdev);
3610 	}
3611 
3612 	ddsf = be16_to_cpu(p->dds_flags);
3613 	if (get_ldev(mdev)) {
3614 		dd = drbd_determine_dev_size(mdev, ddsf, NULL);
3615 		put_ldev(mdev);
3616 		if (dd == DS_ERROR)
3617 			return -EIO;
3618 		drbd_md_sync(mdev);
3619 	} else {
3620 		/* I am diskless, need to accept the peer's size. */
3621 		drbd_set_my_capacity(mdev, p_size);
3622 	}
3623 
3624 	mdev->peer_max_bio_size = be32_to_cpu(p->max_bio_size);
3625 	drbd_reconsider_max_bio_size(mdev);
3626 
3627 	if (get_ldev(mdev)) {
3628 		if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev)) {
3629 			mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
3630 			ldsc = 1;
3631 		}
3632 
3633 		put_ldev(mdev);
3634 	}
3635 
3636 	if (mdev->state.conn > C_WF_REPORT_PARAMS) {
3637 		if (be64_to_cpu(p->c_size) !=
3638 		    drbd_get_capacity(mdev->this_bdev) || ldsc) {
3639 			/* we have different sizes, probably peer
3640 			 * needs to know my new size... */
3641 			drbd_send_sizes(mdev, 0, ddsf);
3642 		}
3643 		if (test_and_clear_bit(RESIZE_PENDING, &mdev->flags) ||
3644 		    (dd == DS_GREW && mdev->state.conn == C_CONNECTED)) {
3645 			if (mdev->state.pdsk >= D_INCONSISTENT &&
3646 			    mdev->state.disk >= D_INCONSISTENT) {
3647 				if (ddsf & DDSF_NO_RESYNC)
3648 					dev_info(DEV, "Resync of new storage suppressed with --assume-clean\n");
3649 				else
3650 					resync_after_online_grow(mdev);
3651 			} else
3652 				set_bit(RESYNC_AFTER_NEG, &mdev->flags);
3653 		}
3654 	}
3655 
3656 	return 0;
3657 }
3658 
3659 static int receive_uuids(struct drbd_tconn *tconn, struct packet_info *pi)
3660 {
3661 	struct drbd_conf *mdev;
3662 	struct p_uuids *p = pi->data;
3663 	u64 *p_uuid;
3664 	int i, updated_uuids = 0;
3665 
3666 	mdev = vnr_to_mdev(tconn, pi->vnr);
3667 	if (!mdev)
3668 		return config_unknown_volume(tconn, pi);
3669 
3670 	p_uuid = kmalloc(sizeof(u64)*UI_EXTENDED_SIZE, GFP_NOIO);
3671 	if (!p_uuid) {
3672 		dev_err(DEV, "kmalloc of p_uuid failed\n");
3673 		return false;
3674 	}
3675 
3676 	for (i = UI_CURRENT; i < UI_EXTENDED_SIZE; i++)
3677 		p_uuid[i] = be64_to_cpu(p->uuid[i]);
3678 
3679 	kfree(mdev->p_uuid);
3680 	mdev->p_uuid = p_uuid;
3681 
3682 	if (mdev->state.conn < C_CONNECTED &&
3683 	    mdev->state.disk < D_INCONSISTENT &&
3684 	    mdev->state.role == R_PRIMARY &&
3685 	    (mdev->ed_uuid & ~((u64)1)) != (p_uuid[UI_CURRENT] & ~((u64)1))) {
3686 		dev_err(DEV, "Can only connect to data with current UUID=%016llX\n",
3687 		    (unsigned long long)mdev->ed_uuid);
3688 		conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
3689 		return -EIO;
3690 	}
3691 
3692 	if (get_ldev(mdev)) {
3693 		int skip_initial_sync =
3694 			mdev->state.conn == C_CONNECTED &&
3695 			mdev->tconn->agreed_pro_version >= 90 &&
3696 			mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED &&
3697 			(p_uuid[UI_FLAGS] & 8);
3698 		if (skip_initial_sync) {
3699 			dev_info(DEV, "Accepted new current UUID, preparing to skip initial sync\n");
3700 			drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write,
3701 					"clear_n_write from receive_uuids",
3702 					BM_LOCKED_TEST_ALLOWED);
3703 			_drbd_uuid_set(mdev, UI_CURRENT, p_uuid[UI_CURRENT]);
3704 			_drbd_uuid_set(mdev, UI_BITMAP, 0);
3705 			_drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
3706 					CS_VERBOSE, NULL);
3707 			drbd_md_sync(mdev);
3708 			updated_uuids = 1;
3709 		}
3710 		put_ldev(mdev);
3711 	} else if (mdev->state.disk < D_INCONSISTENT &&
3712 		   mdev->state.role == R_PRIMARY) {
3713 		/* I am a diskless primary, the peer just created a new current UUID
3714 		   for me. */
3715 		updated_uuids = drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
3716 	}
3717 
3718 	/* Before we test for the disk state, we should wait until an eventually
3719 	   ongoing cluster wide state change is finished. That is important if
3720 	   we are primary and are detaching from our disk. We need to see the
3721 	   new disk state... */
3722 	mutex_lock(mdev->state_mutex);
3723 	mutex_unlock(mdev->state_mutex);
3724 	if (mdev->state.conn >= C_CONNECTED && mdev->state.disk < D_INCONSISTENT)
3725 		updated_uuids |= drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
3726 
3727 	if (updated_uuids)
3728 		drbd_print_uuids(mdev, "receiver updated UUIDs to");
3729 
3730 	return 0;
3731 }
3732 
3733 /**
3734  * convert_state() - Converts the peer's view of the cluster state to our point of view
3735  * @ps:		The state as seen by the peer.
3736  */
3737 static union drbd_state convert_state(union drbd_state ps)
3738 {
3739 	union drbd_state ms;
3740 
3741 	static enum drbd_conns c_tab[] = {
3742 		[C_WF_REPORT_PARAMS] = C_WF_REPORT_PARAMS,
3743 		[C_CONNECTED] = C_CONNECTED,
3744 
3745 		[C_STARTING_SYNC_S] = C_STARTING_SYNC_T,
3746 		[C_STARTING_SYNC_T] = C_STARTING_SYNC_S,
3747 		[C_DISCONNECTING] = C_TEAR_DOWN, /* C_NETWORK_FAILURE, */
3748 		[C_VERIFY_S]       = C_VERIFY_T,
3749 		[C_MASK]   = C_MASK,
3750 	};
3751 
3752 	ms.i = ps.i;
3753 
3754 	ms.conn = c_tab[ps.conn];
3755 	ms.peer = ps.role;
3756 	ms.role = ps.peer;
3757 	ms.pdsk = ps.disk;
3758 	ms.disk = ps.pdsk;
3759 	ms.peer_isp = (ps.aftr_isp | ps.user_isp);
3760 
3761 	return ms;
3762 }
3763 
3764 static int receive_req_state(struct drbd_tconn *tconn, struct packet_info *pi)
3765 {
3766 	struct drbd_conf *mdev;
3767 	struct p_req_state *p = pi->data;
3768 	union drbd_state mask, val;
3769 	enum drbd_state_rv rv;
3770 
3771 	mdev = vnr_to_mdev(tconn, pi->vnr);
3772 	if (!mdev)
3773 		return -EIO;
3774 
3775 	mask.i = be32_to_cpu(p->mask);
3776 	val.i = be32_to_cpu(p->val);
3777 
3778 	if (test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags) &&
3779 	    mutex_is_locked(mdev->state_mutex)) {
3780 		drbd_send_sr_reply(mdev, SS_CONCURRENT_ST_CHG);
3781 		return 0;
3782 	}
3783 
3784 	mask = convert_state(mask);
3785 	val = convert_state(val);
3786 
3787 	rv = drbd_change_state(mdev, CS_VERBOSE, mask, val);
3788 	drbd_send_sr_reply(mdev, rv);
3789 
3790 	drbd_md_sync(mdev);
3791 
3792 	return 0;
3793 }
3794 
3795 static int receive_req_conn_state(struct drbd_tconn *tconn, struct packet_info *pi)
3796 {
3797 	struct p_req_state *p = pi->data;
3798 	union drbd_state mask, val;
3799 	enum drbd_state_rv rv;
3800 
3801 	mask.i = be32_to_cpu(p->mask);
3802 	val.i = be32_to_cpu(p->val);
3803 
3804 	if (test_bit(RESOLVE_CONFLICTS, &tconn->flags) &&
3805 	    mutex_is_locked(&tconn->cstate_mutex)) {
3806 		conn_send_sr_reply(tconn, SS_CONCURRENT_ST_CHG);
3807 		return 0;
3808 	}
3809 
3810 	mask = convert_state(mask);
3811 	val = convert_state(val);
3812 
3813 	rv = conn_request_state(tconn, mask, val, CS_VERBOSE | CS_LOCAL_ONLY | CS_IGN_OUTD_FAIL);
3814 	conn_send_sr_reply(tconn, rv);
3815 
3816 	return 0;
3817 }
3818 
3819 static int receive_state(struct drbd_tconn *tconn, struct packet_info *pi)
3820 {
3821 	struct drbd_conf *mdev;
3822 	struct p_state *p = pi->data;
3823 	union drbd_state os, ns, peer_state;
3824 	enum drbd_disk_state real_peer_disk;
3825 	enum chg_state_flags cs_flags;
3826 	int rv;
3827 
3828 	mdev = vnr_to_mdev(tconn, pi->vnr);
3829 	if (!mdev)
3830 		return config_unknown_volume(tconn, pi);
3831 
3832 	peer_state.i = be32_to_cpu(p->state);
3833 
3834 	real_peer_disk = peer_state.disk;
3835 	if (peer_state.disk == D_NEGOTIATING) {
3836 		real_peer_disk = mdev->p_uuid[UI_FLAGS] & 4 ? D_INCONSISTENT : D_CONSISTENT;
3837 		dev_info(DEV, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk));
3838 	}
3839 
3840 	spin_lock_irq(&mdev->tconn->req_lock);
3841  retry:
3842 	os = ns = drbd_read_state(mdev);
3843 	spin_unlock_irq(&mdev->tconn->req_lock);
3844 
3845 	/* If some other part of the code (asender thread, timeout)
3846 	 * already decided to close the connection again,
3847 	 * we must not "re-establish" it here. */
3848 	if (os.conn <= C_TEAR_DOWN)
3849 		return -ECONNRESET;
3850 
3851 	/* If this is the "end of sync" confirmation, usually the peer disk
3852 	 * transitions from D_INCONSISTENT to D_UP_TO_DATE. For empty (0 bits
3853 	 * set) resync started in PausedSyncT, or if the timing of pause-/
3854 	 * unpause-sync events has been "just right", the peer disk may
3855 	 * transition from D_CONSISTENT to D_UP_TO_DATE as well.
3856 	 */
3857 	if ((os.pdsk == D_INCONSISTENT || os.pdsk == D_CONSISTENT) &&
3858 	    real_peer_disk == D_UP_TO_DATE &&
3859 	    os.conn > C_CONNECTED && os.disk == D_UP_TO_DATE) {
3860 		/* If we are (becoming) SyncSource, but peer is still in sync
3861 		 * preparation, ignore its uptodate-ness to avoid flapping, it
3862 		 * will change to inconsistent once the peer reaches active
3863 		 * syncing states.
3864 		 * It may have changed syncer-paused flags, however, so we
3865 		 * cannot ignore this completely. */
3866 		if (peer_state.conn > C_CONNECTED &&
3867 		    peer_state.conn < C_SYNC_SOURCE)
3868 			real_peer_disk = D_INCONSISTENT;
3869 
3870 		/* if peer_state changes to connected at the same time,
3871 		 * it explicitly notifies us that it finished resync.
3872 		 * Maybe we should finish it up, too? */
3873 		else if (os.conn >= C_SYNC_SOURCE &&
3874 			 peer_state.conn == C_CONNECTED) {
3875 			if (drbd_bm_total_weight(mdev) <= mdev->rs_failed)
3876 				drbd_resync_finished(mdev);
3877 			return 0;
3878 		}
3879 	}
3880 
3881 	/* explicit verify finished notification, stop sector reached. */
3882 	if (os.conn == C_VERIFY_T && os.disk == D_UP_TO_DATE &&
3883 	    peer_state.conn == C_CONNECTED && real_peer_disk == D_UP_TO_DATE) {
3884 		ov_out_of_sync_print(mdev);
3885 		drbd_resync_finished(mdev);
3886 		return 0;
3887 	}
3888 
3889 	/* peer says his disk is inconsistent, while we think it is uptodate,
3890 	 * and this happens while the peer still thinks we have a sync going on,
3891 	 * but we think we are already done with the sync.
3892 	 * We ignore this to avoid flapping pdsk.
3893 	 * This should not happen, if the peer is a recent version of drbd. */
3894 	if (os.pdsk == D_UP_TO_DATE && real_peer_disk == D_INCONSISTENT &&
3895 	    os.conn == C_CONNECTED && peer_state.conn > C_SYNC_SOURCE)
3896 		real_peer_disk = D_UP_TO_DATE;
3897 
3898 	if (ns.conn == C_WF_REPORT_PARAMS)
3899 		ns.conn = C_CONNECTED;
3900 
3901 	if (peer_state.conn == C_AHEAD)
3902 		ns.conn = C_BEHIND;
3903 
3904 	if (mdev->p_uuid && peer_state.disk >= D_NEGOTIATING &&
3905 	    get_ldev_if_state(mdev, D_NEGOTIATING)) {
3906 		int cr; /* consider resync */
3907 
3908 		/* if we established a new connection */
3909 		cr  = (os.conn < C_CONNECTED);
3910 		/* if we had an established connection
3911 		 * and one of the nodes newly attaches a disk */
3912 		cr |= (os.conn == C_CONNECTED &&
3913 		       (peer_state.disk == D_NEGOTIATING ||
3914 			os.disk == D_NEGOTIATING));
3915 		/* if we have both been inconsistent, and the peer has been
3916 		 * forced to be UpToDate with --overwrite-data */
3917 		cr |= test_bit(CONSIDER_RESYNC, &mdev->flags);
3918 		/* if we had been plain connected, and the admin requested to
3919 		 * start a sync by "invalidate" or "invalidate-remote" */
3920 		cr |= (os.conn == C_CONNECTED &&
3921 				(peer_state.conn >= C_STARTING_SYNC_S &&
3922 				 peer_state.conn <= C_WF_BITMAP_T));
3923 
3924 		if (cr)
3925 			ns.conn = drbd_sync_handshake(mdev, peer_state.role, real_peer_disk);
3926 
3927 		put_ldev(mdev);
3928 		if (ns.conn == C_MASK) {
3929 			ns.conn = C_CONNECTED;
3930 			if (mdev->state.disk == D_NEGOTIATING) {
3931 				drbd_force_state(mdev, NS(disk, D_FAILED));
3932 			} else if (peer_state.disk == D_NEGOTIATING) {
3933 				dev_err(DEV, "Disk attach process on the peer node was aborted.\n");
3934 				peer_state.disk = D_DISKLESS;
3935 				real_peer_disk = D_DISKLESS;
3936 			} else {
3937 				if (test_and_clear_bit(CONN_DRY_RUN, &mdev->tconn->flags))
3938 					return -EIO;
3939 				D_ASSERT(os.conn == C_WF_REPORT_PARAMS);
3940 				conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
3941 				return -EIO;
3942 			}
3943 		}
3944 	}
3945 
3946 	spin_lock_irq(&mdev->tconn->req_lock);
3947 	if (os.i != drbd_read_state(mdev).i)
3948 		goto retry;
3949 	clear_bit(CONSIDER_RESYNC, &mdev->flags);
3950 	ns.peer = peer_state.role;
3951 	ns.pdsk = real_peer_disk;
3952 	ns.peer_isp = (peer_state.aftr_isp | peer_state.user_isp);
3953 	if ((ns.conn == C_CONNECTED || ns.conn == C_WF_BITMAP_S) && ns.disk == D_NEGOTIATING)
3954 		ns.disk = mdev->new_state_tmp.disk;
3955 	cs_flags = CS_VERBOSE + (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED ? 0 : CS_HARD);
3956 	if (ns.pdsk == D_CONSISTENT && drbd_suspended(mdev) && ns.conn == C_CONNECTED && os.conn < C_CONNECTED &&
3957 	    test_bit(NEW_CUR_UUID, &mdev->flags)) {
3958 		/* Do not allow tl_restart(RESEND) for a rebooted peer. We can only allow this
3959 		   for temporal network outages! */
3960 		spin_unlock_irq(&mdev->tconn->req_lock);
3961 		dev_err(DEV, "Aborting Connect, can not thaw IO with an only Consistent peer\n");
3962 		tl_clear(mdev->tconn);
3963 		drbd_uuid_new_current(mdev);
3964 		clear_bit(NEW_CUR_UUID, &mdev->flags);
3965 		conn_request_state(mdev->tconn, NS2(conn, C_PROTOCOL_ERROR, susp, 0), CS_HARD);
3966 		return -EIO;
3967 	}
3968 	rv = _drbd_set_state(mdev, ns, cs_flags, NULL);
3969 	ns = drbd_read_state(mdev);
3970 	spin_unlock_irq(&mdev->tconn->req_lock);
3971 
3972 	if (rv < SS_SUCCESS) {
3973 		conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
3974 		return -EIO;
3975 	}
3976 
3977 	if (os.conn > C_WF_REPORT_PARAMS) {
3978 		if (ns.conn > C_CONNECTED && peer_state.conn <= C_CONNECTED &&
3979 		    peer_state.disk != D_NEGOTIATING ) {
3980 			/* we want resync, peer has not yet decided to sync... */
3981 			/* Nowadays only used when forcing a node into primary role and
3982 			   setting its disk to UpToDate with that */
3983 			drbd_send_uuids(mdev);
3984 			drbd_send_current_state(mdev);
3985 		}
3986 	}
3987 
3988 	clear_bit(DISCARD_MY_DATA, &mdev->flags);
3989 
3990 	drbd_md_sync(mdev); /* update connected indicator, la_size_sect, ... */
3991 
3992 	return 0;
3993 }
3994 
3995 static int receive_sync_uuid(struct drbd_tconn *tconn, struct packet_info *pi)
3996 {
3997 	struct drbd_conf *mdev;
3998 	struct p_rs_uuid *p = pi->data;
3999 
4000 	mdev = vnr_to_mdev(tconn, pi->vnr);
4001 	if (!mdev)
4002 		return -EIO;
4003 
4004 	wait_event(mdev->misc_wait,
4005 		   mdev->state.conn == C_WF_SYNC_UUID ||
4006 		   mdev->state.conn == C_BEHIND ||
4007 		   mdev->state.conn < C_CONNECTED ||
4008 		   mdev->state.disk < D_NEGOTIATING);
4009 
4010 	/* D_ASSERT( mdev->state.conn == C_WF_SYNC_UUID ); */
4011 
4012 	/* Here the _drbd_uuid_ functions are right, current should
4013 	   _not_ be rotated into the history */
4014 	if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
4015 		_drbd_uuid_set(mdev, UI_CURRENT, be64_to_cpu(p->uuid));
4016 		_drbd_uuid_set(mdev, UI_BITMAP, 0UL);
4017 
4018 		drbd_print_uuids(mdev, "updated sync uuid");
4019 		drbd_start_resync(mdev, C_SYNC_TARGET);
4020 
4021 		put_ldev(mdev);
4022 	} else
4023 		dev_err(DEV, "Ignoring SyncUUID packet!\n");
4024 
4025 	return 0;
4026 }
4027 
4028 /**
4029  * receive_bitmap_plain
4030  *
4031  * Return 0 when done, 1 when another iteration is needed, and a negative error
4032  * code upon failure.
4033  */
4034 static int
4035 receive_bitmap_plain(struct drbd_conf *mdev, unsigned int size,
4036 		     unsigned long *p, struct bm_xfer_ctx *c)
4037 {
4038 	unsigned int data_size = DRBD_SOCKET_BUFFER_SIZE -
4039 				 drbd_header_size(mdev->tconn);
4040 	unsigned int num_words = min_t(size_t, data_size / sizeof(*p),
4041 				       c->bm_words - c->word_offset);
4042 	unsigned int want = num_words * sizeof(*p);
4043 	int err;
4044 
4045 	if (want != size) {
4046 		dev_err(DEV, "%s:want (%u) != size (%u)\n", __func__, want, size);
4047 		return -EIO;
4048 	}
4049 	if (want == 0)
4050 		return 0;
4051 	err = drbd_recv_all(mdev->tconn, p, want);
4052 	if (err)
4053 		return err;
4054 
4055 	drbd_bm_merge_lel(mdev, c->word_offset, num_words, p);
4056 
4057 	c->word_offset += num_words;
4058 	c->bit_offset = c->word_offset * BITS_PER_LONG;
4059 	if (c->bit_offset > c->bm_bits)
4060 		c->bit_offset = c->bm_bits;
4061 
4062 	return 1;
4063 }
4064 
4065 static enum drbd_bitmap_code dcbp_get_code(struct p_compressed_bm *p)
4066 {
4067 	return (enum drbd_bitmap_code)(p->encoding & 0x0f);
4068 }
4069 
4070 static int dcbp_get_start(struct p_compressed_bm *p)
4071 {
4072 	return (p->encoding & 0x80) != 0;
4073 }
4074 
4075 static int dcbp_get_pad_bits(struct p_compressed_bm *p)
4076 {
4077 	return (p->encoding >> 4) & 0x7;
4078 }
4079 
4080 /**
4081  * recv_bm_rle_bits
4082  *
4083  * Return 0 when done, 1 when another iteration is needed, and a negative error
4084  * code upon failure.
4085  */
4086 static int
4087 recv_bm_rle_bits(struct drbd_conf *mdev,
4088 		struct p_compressed_bm *p,
4089 		 struct bm_xfer_ctx *c,
4090 		 unsigned int len)
4091 {
4092 	struct bitstream bs;
4093 	u64 look_ahead;
4094 	u64 rl;
4095 	u64 tmp;
4096 	unsigned long s = c->bit_offset;
4097 	unsigned long e;
4098 	int toggle = dcbp_get_start(p);
4099 	int have;
4100 	int bits;
4101 
4102 	bitstream_init(&bs, p->code, len, dcbp_get_pad_bits(p));
4103 
4104 	bits = bitstream_get_bits(&bs, &look_ahead, 64);
4105 	if (bits < 0)
4106 		return -EIO;
4107 
4108 	for (have = bits; have > 0; s += rl, toggle = !toggle) {
4109 		bits = vli_decode_bits(&rl, look_ahead);
4110 		if (bits <= 0)
4111 			return -EIO;
4112 
4113 		if (toggle) {
4114 			e = s + rl -1;
4115 			if (e >= c->bm_bits) {
4116 				dev_err(DEV, "bitmap overflow (e:%lu) while decoding bm RLE packet\n", e);
4117 				return -EIO;
4118 			}
4119 			_drbd_bm_set_bits(mdev, s, e);
4120 		}
4121 
4122 		if (have < bits) {
4123 			dev_err(DEV, "bitmap decoding error: h:%d b:%d la:0x%08llx l:%u/%u\n",
4124 				have, bits, look_ahead,
4125 				(unsigned int)(bs.cur.b - p->code),
4126 				(unsigned int)bs.buf_len);
4127 			return -EIO;
4128 		}
4129 		/* if we consumed all 64 bits, assign 0; >> 64 is "undefined"; */
4130 		if (likely(bits < 64))
4131 			look_ahead >>= bits;
4132 		else
4133 			look_ahead = 0;
4134 		have -= bits;
4135 
4136 		bits = bitstream_get_bits(&bs, &tmp, 64 - have);
4137 		if (bits < 0)
4138 			return -EIO;
4139 		look_ahead |= tmp << have;
4140 		have += bits;
4141 	}
4142 
4143 	c->bit_offset = s;
4144 	bm_xfer_ctx_bit_to_word_offset(c);
4145 
4146 	return (s != c->bm_bits);
4147 }
4148 
4149 /**
4150  * decode_bitmap_c
4151  *
4152  * Return 0 when done, 1 when another iteration is needed, and a negative error
4153  * code upon failure.
4154  */
4155 static int
4156 decode_bitmap_c(struct drbd_conf *mdev,
4157 		struct p_compressed_bm *p,
4158 		struct bm_xfer_ctx *c,
4159 		unsigned int len)
4160 {
4161 	if (dcbp_get_code(p) == RLE_VLI_Bits)
4162 		return recv_bm_rle_bits(mdev, p, c, len - sizeof(*p));
4163 
4164 	/* other variants had been implemented for evaluation,
4165 	 * but have been dropped as this one turned out to be "best"
4166 	 * during all our tests. */
4167 
4168 	dev_err(DEV, "receive_bitmap_c: unknown encoding %u\n", p->encoding);
4169 	conn_request_state(mdev->tconn, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
4170 	return -EIO;
4171 }
4172 
4173 void INFO_bm_xfer_stats(struct drbd_conf *mdev,
4174 		const char *direction, struct bm_xfer_ctx *c)
4175 {
4176 	/* what would it take to transfer it "plaintext" */
4177 	unsigned int header_size = drbd_header_size(mdev->tconn);
4178 	unsigned int data_size = DRBD_SOCKET_BUFFER_SIZE - header_size;
4179 	unsigned int plain =
4180 		header_size * (DIV_ROUND_UP(c->bm_words, data_size) + 1) +
4181 		c->bm_words * sizeof(unsigned long);
4182 	unsigned int total = c->bytes[0] + c->bytes[1];
4183 	unsigned int r;
4184 
4185 	/* total can not be zero. but just in case: */
4186 	if (total == 0)
4187 		return;
4188 
4189 	/* don't report if not compressed */
4190 	if (total >= plain)
4191 		return;
4192 
4193 	/* total < plain. check for overflow, still */
4194 	r = (total > UINT_MAX/1000) ? (total / (plain/1000))
4195 		                    : (1000 * total / plain);
4196 
4197 	if (r > 1000)
4198 		r = 1000;
4199 
4200 	r = 1000 - r;
4201 	dev_info(DEV, "%s bitmap stats [Bytes(packets)]: plain %u(%u), RLE %u(%u), "
4202 	     "total %u; compression: %u.%u%%\n",
4203 			direction,
4204 			c->bytes[1], c->packets[1],
4205 			c->bytes[0], c->packets[0],
4206 			total, r/10, r % 10);
4207 }
4208 
4209 /* Since we are processing the bitfield from lower addresses to higher,
4210    it does not matter if the process it in 32 bit chunks or 64 bit
4211    chunks as long as it is little endian. (Understand it as byte stream,
4212    beginning with the lowest byte...) If we would use big endian
4213    we would need to process it from the highest address to the lowest,
4214    in order to be agnostic to the 32 vs 64 bits issue.
4215 
4216    returns 0 on failure, 1 if we successfully received it. */
4217 static int receive_bitmap(struct drbd_tconn *tconn, struct packet_info *pi)
4218 {
4219 	struct drbd_conf *mdev;
4220 	struct bm_xfer_ctx c;
4221 	int err;
4222 
4223 	mdev = vnr_to_mdev(tconn, pi->vnr);
4224 	if (!mdev)
4225 		return -EIO;
4226 
4227 	drbd_bm_lock(mdev, "receive bitmap", BM_LOCKED_SET_ALLOWED);
4228 	/* you are supposed to send additional out-of-sync information
4229 	 * if you actually set bits during this phase */
4230 
4231 	c = (struct bm_xfer_ctx) {
4232 		.bm_bits = drbd_bm_bits(mdev),
4233 		.bm_words = drbd_bm_words(mdev),
4234 	};
4235 
4236 	for(;;) {
4237 		if (pi->cmd == P_BITMAP)
4238 			err = receive_bitmap_plain(mdev, pi->size, pi->data, &c);
4239 		else if (pi->cmd == P_COMPRESSED_BITMAP) {
4240 			/* MAYBE: sanity check that we speak proto >= 90,
4241 			 * and the feature is enabled! */
4242 			struct p_compressed_bm *p = pi->data;
4243 
4244 			if (pi->size > DRBD_SOCKET_BUFFER_SIZE - drbd_header_size(tconn)) {
4245 				dev_err(DEV, "ReportCBitmap packet too large\n");
4246 				err = -EIO;
4247 				goto out;
4248 			}
4249 			if (pi->size <= sizeof(*p)) {
4250 				dev_err(DEV, "ReportCBitmap packet too small (l:%u)\n", pi->size);
4251 				err = -EIO;
4252 				goto out;
4253 			}
4254 			err = drbd_recv_all(mdev->tconn, p, pi->size);
4255 			if (err)
4256 			       goto out;
4257 			err = decode_bitmap_c(mdev, p, &c, pi->size);
4258 		} else {
4259 			dev_warn(DEV, "receive_bitmap: cmd neither ReportBitMap nor ReportCBitMap (is 0x%x)", pi->cmd);
4260 			err = -EIO;
4261 			goto out;
4262 		}
4263 
4264 		c.packets[pi->cmd == P_BITMAP]++;
4265 		c.bytes[pi->cmd == P_BITMAP] += drbd_header_size(tconn) + pi->size;
4266 
4267 		if (err <= 0) {
4268 			if (err < 0)
4269 				goto out;
4270 			break;
4271 		}
4272 		err = drbd_recv_header(mdev->tconn, pi);
4273 		if (err)
4274 			goto out;
4275 	}
4276 
4277 	INFO_bm_xfer_stats(mdev, "receive", &c);
4278 
4279 	if (mdev->state.conn == C_WF_BITMAP_T) {
4280 		enum drbd_state_rv rv;
4281 
4282 		err = drbd_send_bitmap(mdev);
4283 		if (err)
4284 			goto out;
4285 		/* Omit CS_ORDERED with this state transition to avoid deadlocks. */
4286 		rv = _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
4287 		D_ASSERT(rv == SS_SUCCESS);
4288 	} else if (mdev->state.conn != C_WF_BITMAP_S) {
4289 		/* admin may have requested C_DISCONNECTING,
4290 		 * other threads may have noticed network errors */
4291 		dev_info(DEV, "unexpected cstate (%s) in receive_bitmap\n",
4292 		    drbd_conn_str(mdev->state.conn));
4293 	}
4294 	err = 0;
4295 
4296  out:
4297 	drbd_bm_unlock(mdev);
4298 	if (!err && mdev->state.conn == C_WF_BITMAP_S)
4299 		drbd_start_resync(mdev, C_SYNC_SOURCE);
4300 	return err;
4301 }
4302 
4303 static int receive_skip(struct drbd_tconn *tconn, struct packet_info *pi)
4304 {
4305 	conn_warn(tconn, "skipping unknown optional packet type %d, l: %d!\n",
4306 		 pi->cmd, pi->size);
4307 
4308 	return ignore_remaining_packet(tconn, pi);
4309 }
4310 
4311 static int receive_UnplugRemote(struct drbd_tconn *tconn, struct packet_info *pi)
4312 {
4313 	/* Make sure we've acked all the TCP data associated
4314 	 * with the data requests being unplugged */
4315 	drbd_tcp_quickack(tconn->data.socket);
4316 
4317 	return 0;
4318 }
4319 
4320 static int receive_out_of_sync(struct drbd_tconn *tconn, struct packet_info *pi)
4321 {
4322 	struct drbd_conf *mdev;
4323 	struct p_block_desc *p = pi->data;
4324 
4325 	mdev = vnr_to_mdev(tconn, pi->vnr);
4326 	if (!mdev)
4327 		return -EIO;
4328 
4329 	switch (mdev->state.conn) {
4330 	case C_WF_SYNC_UUID:
4331 	case C_WF_BITMAP_T:
4332 	case C_BEHIND:
4333 			break;
4334 	default:
4335 		dev_err(DEV, "ASSERT FAILED cstate = %s, expected: WFSyncUUID|WFBitMapT|Behind\n",
4336 				drbd_conn_str(mdev->state.conn));
4337 	}
4338 
4339 	drbd_set_out_of_sync(mdev, be64_to_cpu(p->sector), be32_to_cpu(p->blksize));
4340 
4341 	return 0;
4342 }
4343 
4344 struct data_cmd {
4345 	int expect_payload;
4346 	size_t pkt_size;
4347 	int (*fn)(struct drbd_tconn *, struct packet_info *);
4348 };
4349 
4350 static struct data_cmd drbd_cmd_handler[] = {
4351 	[P_DATA]	    = { 1, sizeof(struct p_data), receive_Data },
4352 	[P_DATA_REPLY]	    = { 1, sizeof(struct p_data), receive_DataReply },
4353 	[P_RS_DATA_REPLY]   = { 1, sizeof(struct p_data), receive_RSDataReply } ,
4354 	[P_BARRIER]	    = { 0, sizeof(struct p_barrier), receive_Barrier } ,
4355 	[P_BITMAP]	    = { 1, 0, receive_bitmap } ,
4356 	[P_COMPRESSED_BITMAP] = { 1, 0, receive_bitmap } ,
4357 	[P_UNPLUG_REMOTE]   = { 0, 0, receive_UnplugRemote },
4358 	[P_DATA_REQUEST]    = { 0, sizeof(struct p_block_req), receive_DataRequest },
4359 	[P_RS_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
4360 	[P_SYNC_PARAM]	    = { 1, 0, receive_SyncParam },
4361 	[P_SYNC_PARAM89]    = { 1, 0, receive_SyncParam },
4362 	[P_PROTOCOL]        = { 1, sizeof(struct p_protocol), receive_protocol },
4363 	[P_UUIDS]	    = { 0, sizeof(struct p_uuids), receive_uuids },
4364 	[P_SIZES]	    = { 0, sizeof(struct p_sizes), receive_sizes },
4365 	[P_STATE]	    = { 0, sizeof(struct p_state), receive_state },
4366 	[P_STATE_CHG_REQ]   = { 0, sizeof(struct p_req_state), receive_req_state },
4367 	[P_SYNC_UUID]       = { 0, sizeof(struct p_rs_uuid), receive_sync_uuid },
4368 	[P_OV_REQUEST]      = { 0, sizeof(struct p_block_req), receive_DataRequest },
4369 	[P_OV_REPLY]        = { 1, sizeof(struct p_block_req), receive_DataRequest },
4370 	[P_CSUM_RS_REQUEST] = { 1, sizeof(struct p_block_req), receive_DataRequest },
4371 	[P_DELAY_PROBE]     = { 0, sizeof(struct p_delay_probe93), receive_skip },
4372 	[P_OUT_OF_SYNC]     = { 0, sizeof(struct p_block_desc), receive_out_of_sync },
4373 	[P_CONN_ST_CHG_REQ] = { 0, sizeof(struct p_req_state), receive_req_conn_state },
4374 	[P_PROTOCOL_UPDATE] = { 1, sizeof(struct p_protocol), receive_protocol },
4375 };
4376 
4377 static void drbdd(struct drbd_tconn *tconn)
4378 {
4379 	struct packet_info pi;
4380 	size_t shs; /* sub header size */
4381 	int err;
4382 
4383 	while (get_t_state(&tconn->receiver) == RUNNING) {
4384 		struct data_cmd *cmd;
4385 
4386 		drbd_thread_current_set_cpu(&tconn->receiver);
4387 		if (drbd_recv_header(tconn, &pi))
4388 			goto err_out;
4389 
4390 		cmd = &drbd_cmd_handler[pi.cmd];
4391 		if (unlikely(pi.cmd >= ARRAY_SIZE(drbd_cmd_handler) || !cmd->fn)) {
4392 			conn_err(tconn, "Unexpected data packet %s (0x%04x)",
4393 				 cmdname(pi.cmd), pi.cmd);
4394 			goto err_out;
4395 		}
4396 
4397 		shs = cmd->pkt_size;
4398 		if (pi.size > shs && !cmd->expect_payload) {
4399 			conn_err(tconn, "No payload expected %s l:%d\n",
4400 				 cmdname(pi.cmd), pi.size);
4401 			goto err_out;
4402 		}
4403 
4404 		if (shs) {
4405 			err = drbd_recv_all_warn(tconn, pi.data, shs);
4406 			if (err)
4407 				goto err_out;
4408 			pi.size -= shs;
4409 		}
4410 
4411 		err = cmd->fn(tconn, &pi);
4412 		if (err) {
4413 			conn_err(tconn, "error receiving %s, e: %d l: %d!\n",
4414 				 cmdname(pi.cmd), err, pi.size);
4415 			goto err_out;
4416 		}
4417 	}
4418 	return;
4419 
4420     err_out:
4421 	conn_request_state(tconn, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
4422 }
4423 
4424 void conn_flush_workqueue(struct drbd_tconn *tconn)
4425 {
4426 	struct drbd_wq_barrier barr;
4427 
4428 	barr.w.cb = w_prev_work_done;
4429 	barr.w.tconn = tconn;
4430 	init_completion(&barr.done);
4431 	drbd_queue_work(&tconn->sender_work, &barr.w);
4432 	wait_for_completion(&barr.done);
4433 }
4434 
4435 static void conn_disconnect(struct drbd_tconn *tconn)
4436 {
4437 	struct drbd_conf *mdev;
4438 	enum drbd_conns oc;
4439 	int vnr;
4440 
4441 	if (tconn->cstate == C_STANDALONE)
4442 		return;
4443 
4444 	/* We are about to start the cleanup after connection loss.
4445 	 * Make sure drbd_make_request knows about that.
4446 	 * Usually we should be in some network failure state already,
4447 	 * but just in case we are not, we fix it up here.
4448 	 */
4449 	conn_request_state(tconn, NS(conn, C_NETWORK_FAILURE), CS_HARD);
4450 
4451 	/* asender does not clean up anything. it must not interfere, either */
4452 	drbd_thread_stop(&tconn->asender);
4453 	drbd_free_sock(tconn);
4454 
4455 	rcu_read_lock();
4456 	idr_for_each_entry(&tconn->volumes, mdev, vnr) {
4457 		kref_get(&mdev->kref);
4458 		rcu_read_unlock();
4459 		drbd_disconnected(mdev);
4460 		kref_put(&mdev->kref, &drbd_minor_destroy);
4461 		rcu_read_lock();
4462 	}
4463 	rcu_read_unlock();
4464 
4465 	if (!list_empty(&tconn->current_epoch->list))
4466 		conn_err(tconn, "ASSERTION FAILED: tconn->current_epoch->list not empty\n");
4467 	/* ok, no more ee's on the fly, it is safe to reset the epoch_size */
4468 	atomic_set(&tconn->current_epoch->epoch_size, 0);
4469 	tconn->send.seen_any_write_yet = false;
4470 
4471 	conn_info(tconn, "Connection closed\n");
4472 
4473 	if (conn_highest_role(tconn) == R_PRIMARY && conn_highest_pdsk(tconn) >= D_UNKNOWN)
4474 		conn_try_outdate_peer_async(tconn);
4475 
4476 	spin_lock_irq(&tconn->req_lock);
4477 	oc = tconn->cstate;
4478 	if (oc >= C_UNCONNECTED)
4479 		_conn_request_state(tconn, NS(conn, C_UNCONNECTED), CS_VERBOSE);
4480 
4481 	spin_unlock_irq(&tconn->req_lock);
4482 
4483 	if (oc == C_DISCONNECTING)
4484 		conn_request_state(tconn, NS(conn, C_STANDALONE), CS_VERBOSE | CS_HARD);
4485 }
4486 
4487 static int drbd_disconnected(struct drbd_conf *mdev)
4488 {
4489 	unsigned int i;
4490 
4491 	/* wait for current activity to cease. */
4492 	spin_lock_irq(&mdev->tconn->req_lock);
4493 	_drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
4494 	_drbd_wait_ee_list_empty(mdev, &mdev->sync_ee);
4495 	_drbd_wait_ee_list_empty(mdev, &mdev->read_ee);
4496 	spin_unlock_irq(&mdev->tconn->req_lock);
4497 
4498 	/* We do not have data structures that would allow us to
4499 	 * get the rs_pending_cnt down to 0 again.
4500 	 *  * On C_SYNC_TARGET we do not have any data structures describing
4501 	 *    the pending RSDataRequest's we have sent.
4502 	 *  * On C_SYNC_SOURCE there is no data structure that tracks
4503 	 *    the P_RS_DATA_REPLY blocks that we sent to the SyncTarget.
4504 	 *  And no, it is not the sum of the reference counts in the
4505 	 *  resync_LRU. The resync_LRU tracks the whole operation including
4506 	 *  the disk-IO, while the rs_pending_cnt only tracks the blocks
4507 	 *  on the fly. */
4508 	drbd_rs_cancel_all(mdev);
4509 	mdev->rs_total = 0;
4510 	mdev->rs_failed = 0;
4511 	atomic_set(&mdev->rs_pending_cnt, 0);
4512 	wake_up(&mdev->misc_wait);
4513 
4514 	del_timer_sync(&mdev->resync_timer);
4515 	resync_timer_fn((unsigned long)mdev);
4516 
4517 	/* wait for all w_e_end_data_req, w_e_end_rsdata_req, w_send_barrier,
4518 	 * w_make_resync_request etc. which may still be on the worker queue
4519 	 * to be "canceled" */
4520 	drbd_flush_workqueue(mdev);
4521 
4522 	drbd_finish_peer_reqs(mdev);
4523 
4524 	/* This second workqueue flush is necessary, since drbd_finish_peer_reqs()
4525 	   might have issued a work again. The one before drbd_finish_peer_reqs() is
4526 	   necessary to reclain net_ee in drbd_finish_peer_reqs(). */
4527 	drbd_flush_workqueue(mdev);
4528 
4529 	/* need to do it again, drbd_finish_peer_reqs() may have populated it
4530 	 * again via drbd_try_clear_on_disk_bm(). */
4531 	drbd_rs_cancel_all(mdev);
4532 
4533 	kfree(mdev->p_uuid);
4534 	mdev->p_uuid = NULL;
4535 
4536 	if (!drbd_suspended(mdev))
4537 		tl_clear(mdev->tconn);
4538 
4539 	drbd_md_sync(mdev);
4540 
4541 	/* serialize with bitmap writeout triggered by the state change,
4542 	 * if any. */
4543 	wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
4544 
4545 	/* tcp_close and release of sendpage pages can be deferred.  I don't
4546 	 * want to use SO_LINGER, because apparently it can be deferred for
4547 	 * more than 20 seconds (longest time I checked).
4548 	 *
4549 	 * Actually we don't care for exactly when the network stack does its
4550 	 * put_page(), but release our reference on these pages right here.
4551 	 */
4552 	i = drbd_free_peer_reqs(mdev, &mdev->net_ee);
4553 	if (i)
4554 		dev_info(DEV, "net_ee not empty, killed %u entries\n", i);
4555 	i = atomic_read(&mdev->pp_in_use_by_net);
4556 	if (i)
4557 		dev_info(DEV, "pp_in_use_by_net = %d, expected 0\n", i);
4558 	i = atomic_read(&mdev->pp_in_use);
4559 	if (i)
4560 		dev_info(DEV, "pp_in_use = %d, expected 0\n", i);
4561 
4562 	D_ASSERT(list_empty(&mdev->read_ee));
4563 	D_ASSERT(list_empty(&mdev->active_ee));
4564 	D_ASSERT(list_empty(&mdev->sync_ee));
4565 	D_ASSERT(list_empty(&mdev->done_ee));
4566 
4567 	return 0;
4568 }
4569 
4570 /*
4571  * We support PRO_VERSION_MIN to PRO_VERSION_MAX. The protocol version
4572  * we can agree on is stored in agreed_pro_version.
4573  *
4574  * feature flags and the reserved array should be enough room for future
4575  * enhancements of the handshake protocol, and possible plugins...
4576  *
4577  * for now, they are expected to be zero, but ignored.
4578  */
4579 static int drbd_send_features(struct drbd_tconn *tconn)
4580 {
4581 	struct drbd_socket *sock;
4582 	struct p_connection_features *p;
4583 
4584 	sock = &tconn->data;
4585 	p = conn_prepare_command(tconn, sock);
4586 	if (!p)
4587 		return -EIO;
4588 	memset(p, 0, sizeof(*p));
4589 	p->protocol_min = cpu_to_be32(PRO_VERSION_MIN);
4590 	p->protocol_max = cpu_to_be32(PRO_VERSION_MAX);
4591 	return conn_send_command(tconn, sock, P_CONNECTION_FEATURES, sizeof(*p), NULL, 0);
4592 }
4593 
4594 /*
4595  * return values:
4596  *   1 yes, we have a valid connection
4597  *   0 oops, did not work out, please try again
4598  *  -1 peer talks different language,
4599  *     no point in trying again, please go standalone.
4600  */
4601 static int drbd_do_features(struct drbd_tconn *tconn)
4602 {
4603 	/* ASSERT current == tconn->receiver ... */
4604 	struct p_connection_features *p;
4605 	const int expect = sizeof(struct p_connection_features);
4606 	struct packet_info pi;
4607 	int err;
4608 
4609 	err = drbd_send_features(tconn);
4610 	if (err)
4611 		return 0;
4612 
4613 	err = drbd_recv_header(tconn, &pi);
4614 	if (err)
4615 		return 0;
4616 
4617 	if (pi.cmd != P_CONNECTION_FEATURES) {
4618 		conn_err(tconn, "expected ConnectionFeatures packet, received: %s (0x%04x)\n",
4619 			 cmdname(pi.cmd), pi.cmd);
4620 		return -1;
4621 	}
4622 
4623 	if (pi.size != expect) {
4624 		conn_err(tconn, "expected ConnectionFeatures length: %u, received: %u\n",
4625 		     expect, pi.size);
4626 		return -1;
4627 	}
4628 
4629 	p = pi.data;
4630 	err = drbd_recv_all_warn(tconn, p, expect);
4631 	if (err)
4632 		return 0;
4633 
4634 	p->protocol_min = be32_to_cpu(p->protocol_min);
4635 	p->protocol_max = be32_to_cpu(p->protocol_max);
4636 	if (p->protocol_max == 0)
4637 		p->protocol_max = p->protocol_min;
4638 
4639 	if (PRO_VERSION_MAX < p->protocol_min ||
4640 	    PRO_VERSION_MIN > p->protocol_max)
4641 		goto incompat;
4642 
4643 	tconn->agreed_pro_version = min_t(int, PRO_VERSION_MAX, p->protocol_max);
4644 
4645 	conn_info(tconn, "Handshake successful: "
4646 	     "Agreed network protocol version %d\n", tconn->agreed_pro_version);
4647 
4648 	return 1;
4649 
4650  incompat:
4651 	conn_err(tconn, "incompatible DRBD dialects: "
4652 	    "I support %d-%d, peer supports %d-%d\n",
4653 	    PRO_VERSION_MIN, PRO_VERSION_MAX,
4654 	    p->protocol_min, p->protocol_max);
4655 	return -1;
4656 }
4657 
4658 #if !defined(CONFIG_CRYPTO_HMAC) && !defined(CONFIG_CRYPTO_HMAC_MODULE)
4659 static int drbd_do_auth(struct drbd_tconn *tconn)
4660 {
4661 	conn_err(tconn, "This kernel was build without CONFIG_CRYPTO_HMAC.\n");
4662 	conn_err(tconn, "You need to disable 'cram-hmac-alg' in drbd.conf.\n");
4663 	return -1;
4664 }
4665 #else
4666 #define CHALLENGE_LEN 64
4667 
4668 /* Return value:
4669 	1 - auth succeeded,
4670 	0 - failed, try again (network error),
4671 	-1 - auth failed, don't try again.
4672 */
4673 
4674 static int drbd_do_auth(struct drbd_tconn *tconn)
4675 {
4676 	struct drbd_socket *sock;
4677 	char my_challenge[CHALLENGE_LEN];  /* 64 Bytes... */
4678 	struct scatterlist sg;
4679 	char *response = NULL;
4680 	char *right_response = NULL;
4681 	char *peers_ch = NULL;
4682 	unsigned int key_len;
4683 	char secret[SHARED_SECRET_MAX]; /* 64 byte */
4684 	unsigned int resp_size;
4685 	struct hash_desc desc;
4686 	struct packet_info pi;
4687 	struct net_conf *nc;
4688 	int err, rv;
4689 
4690 	/* FIXME: Put the challenge/response into the preallocated socket buffer.  */
4691 
4692 	rcu_read_lock();
4693 	nc = rcu_dereference(tconn->net_conf);
4694 	key_len = strlen(nc->shared_secret);
4695 	memcpy(secret, nc->shared_secret, key_len);
4696 	rcu_read_unlock();
4697 
4698 	desc.tfm = tconn->cram_hmac_tfm;
4699 	desc.flags = 0;
4700 
4701 	rv = crypto_hash_setkey(tconn->cram_hmac_tfm, (u8 *)secret, key_len);
4702 	if (rv) {
4703 		conn_err(tconn, "crypto_hash_setkey() failed with %d\n", rv);
4704 		rv = -1;
4705 		goto fail;
4706 	}
4707 
4708 	get_random_bytes(my_challenge, CHALLENGE_LEN);
4709 
4710 	sock = &tconn->data;
4711 	if (!conn_prepare_command(tconn, sock)) {
4712 		rv = 0;
4713 		goto fail;
4714 	}
4715 	rv = !conn_send_command(tconn, sock, P_AUTH_CHALLENGE, 0,
4716 				my_challenge, CHALLENGE_LEN);
4717 	if (!rv)
4718 		goto fail;
4719 
4720 	err = drbd_recv_header(tconn, &pi);
4721 	if (err) {
4722 		rv = 0;
4723 		goto fail;
4724 	}
4725 
4726 	if (pi.cmd != P_AUTH_CHALLENGE) {
4727 		conn_err(tconn, "expected AuthChallenge packet, received: %s (0x%04x)\n",
4728 			 cmdname(pi.cmd), pi.cmd);
4729 		rv = 0;
4730 		goto fail;
4731 	}
4732 
4733 	if (pi.size > CHALLENGE_LEN * 2) {
4734 		conn_err(tconn, "expected AuthChallenge payload too big.\n");
4735 		rv = -1;
4736 		goto fail;
4737 	}
4738 
4739 	peers_ch = kmalloc(pi.size, GFP_NOIO);
4740 	if (peers_ch == NULL) {
4741 		conn_err(tconn, "kmalloc of peers_ch failed\n");
4742 		rv = -1;
4743 		goto fail;
4744 	}
4745 
4746 	err = drbd_recv_all_warn(tconn, peers_ch, pi.size);
4747 	if (err) {
4748 		rv = 0;
4749 		goto fail;
4750 	}
4751 
4752 	resp_size = crypto_hash_digestsize(tconn->cram_hmac_tfm);
4753 	response = kmalloc(resp_size, GFP_NOIO);
4754 	if (response == NULL) {
4755 		conn_err(tconn, "kmalloc of response failed\n");
4756 		rv = -1;
4757 		goto fail;
4758 	}
4759 
4760 	sg_init_table(&sg, 1);
4761 	sg_set_buf(&sg, peers_ch, pi.size);
4762 
4763 	rv = crypto_hash_digest(&desc, &sg, sg.length, response);
4764 	if (rv) {
4765 		conn_err(tconn, "crypto_hash_digest() failed with %d\n", rv);
4766 		rv = -1;
4767 		goto fail;
4768 	}
4769 
4770 	if (!conn_prepare_command(tconn, sock)) {
4771 		rv = 0;
4772 		goto fail;
4773 	}
4774 	rv = !conn_send_command(tconn, sock, P_AUTH_RESPONSE, 0,
4775 				response, resp_size);
4776 	if (!rv)
4777 		goto fail;
4778 
4779 	err = drbd_recv_header(tconn, &pi);
4780 	if (err) {
4781 		rv = 0;
4782 		goto fail;
4783 	}
4784 
4785 	if (pi.cmd != P_AUTH_RESPONSE) {
4786 		conn_err(tconn, "expected AuthResponse packet, received: %s (0x%04x)\n",
4787 			 cmdname(pi.cmd), pi.cmd);
4788 		rv = 0;
4789 		goto fail;
4790 	}
4791 
4792 	if (pi.size != resp_size) {
4793 		conn_err(tconn, "expected AuthResponse payload of wrong size\n");
4794 		rv = 0;
4795 		goto fail;
4796 	}
4797 
4798 	err = drbd_recv_all_warn(tconn, response , resp_size);
4799 	if (err) {
4800 		rv = 0;
4801 		goto fail;
4802 	}
4803 
4804 	right_response = kmalloc(resp_size, GFP_NOIO);
4805 	if (right_response == NULL) {
4806 		conn_err(tconn, "kmalloc of right_response failed\n");
4807 		rv = -1;
4808 		goto fail;
4809 	}
4810 
4811 	sg_set_buf(&sg, my_challenge, CHALLENGE_LEN);
4812 
4813 	rv = crypto_hash_digest(&desc, &sg, sg.length, right_response);
4814 	if (rv) {
4815 		conn_err(tconn, "crypto_hash_digest() failed with %d\n", rv);
4816 		rv = -1;
4817 		goto fail;
4818 	}
4819 
4820 	rv = !memcmp(response, right_response, resp_size);
4821 
4822 	if (rv)
4823 		conn_info(tconn, "Peer authenticated using %d bytes HMAC\n",
4824 		     resp_size);
4825 	else
4826 		rv = -1;
4827 
4828  fail:
4829 	kfree(peers_ch);
4830 	kfree(response);
4831 	kfree(right_response);
4832 
4833 	return rv;
4834 }
4835 #endif
4836 
4837 int drbdd_init(struct drbd_thread *thi)
4838 {
4839 	struct drbd_tconn *tconn = thi->tconn;
4840 	int h;
4841 
4842 	conn_info(tconn, "receiver (re)started\n");
4843 
4844 	do {
4845 		h = conn_connect(tconn);
4846 		if (h == 0) {
4847 			conn_disconnect(tconn);
4848 			schedule_timeout_interruptible(HZ);
4849 		}
4850 		if (h == -1) {
4851 			conn_warn(tconn, "Discarding network configuration.\n");
4852 			conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
4853 		}
4854 	} while (h == 0);
4855 
4856 	if (h > 0)
4857 		drbdd(tconn);
4858 
4859 	conn_disconnect(tconn);
4860 
4861 	conn_info(tconn, "receiver terminated\n");
4862 	return 0;
4863 }
4864 
4865 /* ********* acknowledge sender ******** */
4866 
4867 static int got_conn_RqSReply(struct drbd_tconn *tconn, struct packet_info *pi)
4868 {
4869 	struct p_req_state_reply *p = pi->data;
4870 	int retcode = be32_to_cpu(p->retcode);
4871 
4872 	if (retcode >= SS_SUCCESS) {
4873 		set_bit(CONN_WD_ST_CHG_OKAY, &tconn->flags);
4874 	} else {
4875 		set_bit(CONN_WD_ST_CHG_FAIL, &tconn->flags);
4876 		conn_err(tconn, "Requested state change failed by peer: %s (%d)\n",
4877 			 drbd_set_st_err_str(retcode), retcode);
4878 	}
4879 	wake_up(&tconn->ping_wait);
4880 
4881 	return 0;
4882 }
4883 
4884 static int got_RqSReply(struct drbd_tconn *tconn, struct packet_info *pi)
4885 {
4886 	struct drbd_conf *mdev;
4887 	struct p_req_state_reply *p = pi->data;
4888 	int retcode = be32_to_cpu(p->retcode);
4889 
4890 	mdev = vnr_to_mdev(tconn, pi->vnr);
4891 	if (!mdev)
4892 		return -EIO;
4893 
4894 	if (test_bit(CONN_WD_ST_CHG_REQ, &tconn->flags)) {
4895 		D_ASSERT(tconn->agreed_pro_version < 100);
4896 		return got_conn_RqSReply(tconn, pi);
4897 	}
4898 
4899 	if (retcode >= SS_SUCCESS) {
4900 		set_bit(CL_ST_CHG_SUCCESS, &mdev->flags);
4901 	} else {
4902 		set_bit(CL_ST_CHG_FAIL, &mdev->flags);
4903 		dev_err(DEV, "Requested state change failed by peer: %s (%d)\n",
4904 			drbd_set_st_err_str(retcode), retcode);
4905 	}
4906 	wake_up(&mdev->state_wait);
4907 
4908 	return 0;
4909 }
4910 
4911 static int got_Ping(struct drbd_tconn *tconn, struct packet_info *pi)
4912 {
4913 	return drbd_send_ping_ack(tconn);
4914 
4915 }
4916 
4917 static int got_PingAck(struct drbd_tconn *tconn, struct packet_info *pi)
4918 {
4919 	/* restore idle timeout */
4920 	tconn->meta.socket->sk->sk_rcvtimeo = tconn->net_conf->ping_int*HZ;
4921 	if (!test_and_set_bit(GOT_PING_ACK, &tconn->flags))
4922 		wake_up(&tconn->ping_wait);
4923 
4924 	return 0;
4925 }
4926 
4927 static int got_IsInSync(struct drbd_tconn *tconn, struct packet_info *pi)
4928 {
4929 	struct drbd_conf *mdev;
4930 	struct p_block_ack *p = pi->data;
4931 	sector_t sector = be64_to_cpu(p->sector);
4932 	int blksize = be32_to_cpu(p->blksize);
4933 
4934 	mdev = vnr_to_mdev(tconn, pi->vnr);
4935 	if (!mdev)
4936 		return -EIO;
4937 
4938 	D_ASSERT(mdev->tconn->agreed_pro_version >= 89);
4939 
4940 	update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4941 
4942 	if (get_ldev(mdev)) {
4943 		drbd_rs_complete_io(mdev, sector);
4944 		drbd_set_in_sync(mdev, sector, blksize);
4945 		/* rs_same_csums is supposed to count in units of BM_BLOCK_SIZE */
4946 		mdev->rs_same_csum += (blksize >> BM_BLOCK_SHIFT);
4947 		put_ldev(mdev);
4948 	}
4949 	dec_rs_pending(mdev);
4950 	atomic_add(blksize >> 9, &mdev->rs_sect_in);
4951 
4952 	return 0;
4953 }
4954 
4955 static int
4956 validate_req_change_req_state(struct drbd_conf *mdev, u64 id, sector_t sector,
4957 			      struct rb_root *root, const char *func,
4958 			      enum drbd_req_event what, bool missing_ok)
4959 {
4960 	struct drbd_request *req;
4961 	struct bio_and_error m;
4962 
4963 	spin_lock_irq(&mdev->tconn->req_lock);
4964 	req = find_request(mdev, root, id, sector, missing_ok, func);
4965 	if (unlikely(!req)) {
4966 		spin_unlock_irq(&mdev->tconn->req_lock);
4967 		return -EIO;
4968 	}
4969 	__req_mod(req, what, &m);
4970 	spin_unlock_irq(&mdev->tconn->req_lock);
4971 
4972 	if (m.bio)
4973 		complete_master_bio(mdev, &m);
4974 	return 0;
4975 }
4976 
4977 static int got_BlockAck(struct drbd_tconn *tconn, struct packet_info *pi)
4978 {
4979 	struct drbd_conf *mdev;
4980 	struct p_block_ack *p = pi->data;
4981 	sector_t sector = be64_to_cpu(p->sector);
4982 	int blksize = be32_to_cpu(p->blksize);
4983 	enum drbd_req_event what;
4984 
4985 	mdev = vnr_to_mdev(tconn, pi->vnr);
4986 	if (!mdev)
4987 		return -EIO;
4988 
4989 	update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4990 
4991 	if (p->block_id == ID_SYNCER) {
4992 		drbd_set_in_sync(mdev, sector, blksize);
4993 		dec_rs_pending(mdev);
4994 		return 0;
4995 	}
4996 	switch (pi->cmd) {
4997 	case P_RS_WRITE_ACK:
4998 		what = WRITE_ACKED_BY_PEER_AND_SIS;
4999 		break;
5000 	case P_WRITE_ACK:
5001 		what = WRITE_ACKED_BY_PEER;
5002 		break;
5003 	case P_RECV_ACK:
5004 		what = RECV_ACKED_BY_PEER;
5005 		break;
5006 	case P_SUPERSEDED:
5007 		what = CONFLICT_RESOLVED;
5008 		break;
5009 	case P_RETRY_WRITE:
5010 		what = POSTPONE_WRITE;
5011 		break;
5012 	default:
5013 		BUG();
5014 	}
5015 
5016 	return validate_req_change_req_state(mdev, p->block_id, sector,
5017 					     &mdev->write_requests, __func__,
5018 					     what, false);
5019 }
5020 
5021 static int got_NegAck(struct drbd_tconn *tconn, struct packet_info *pi)
5022 {
5023 	struct drbd_conf *mdev;
5024 	struct p_block_ack *p = pi->data;
5025 	sector_t sector = be64_to_cpu(p->sector);
5026 	int size = be32_to_cpu(p->blksize);
5027 	int err;
5028 
5029 	mdev = vnr_to_mdev(tconn, pi->vnr);
5030 	if (!mdev)
5031 		return -EIO;
5032 
5033 	update_peer_seq(mdev, be32_to_cpu(p->seq_num));
5034 
5035 	if (p->block_id == ID_SYNCER) {
5036 		dec_rs_pending(mdev);
5037 		drbd_rs_failed_io(mdev, sector, size);
5038 		return 0;
5039 	}
5040 
5041 	err = validate_req_change_req_state(mdev, p->block_id, sector,
5042 					    &mdev->write_requests, __func__,
5043 					    NEG_ACKED, true);
5044 	if (err) {
5045 		/* Protocol A has no P_WRITE_ACKs, but has P_NEG_ACKs.
5046 		   The master bio might already be completed, therefore the
5047 		   request is no longer in the collision hash. */
5048 		/* In Protocol B we might already have got a P_RECV_ACK
5049 		   but then get a P_NEG_ACK afterwards. */
5050 		drbd_set_out_of_sync(mdev, sector, size);
5051 	}
5052 	return 0;
5053 }
5054 
5055 static int got_NegDReply(struct drbd_tconn *tconn, struct packet_info *pi)
5056 {
5057 	struct drbd_conf *mdev;
5058 	struct p_block_ack *p = pi->data;
5059 	sector_t sector = be64_to_cpu(p->sector);
5060 
5061 	mdev = vnr_to_mdev(tconn, pi->vnr);
5062 	if (!mdev)
5063 		return -EIO;
5064 
5065 	update_peer_seq(mdev, be32_to_cpu(p->seq_num));
5066 
5067 	dev_err(DEV, "Got NegDReply; Sector %llus, len %u.\n",
5068 	    (unsigned long long)sector, be32_to_cpu(p->blksize));
5069 
5070 	return validate_req_change_req_state(mdev, p->block_id, sector,
5071 					     &mdev->read_requests, __func__,
5072 					     NEG_ACKED, false);
5073 }
5074 
5075 static int got_NegRSDReply(struct drbd_tconn *tconn, struct packet_info *pi)
5076 {
5077 	struct drbd_conf *mdev;
5078 	sector_t sector;
5079 	int size;
5080 	struct p_block_ack *p = pi->data;
5081 
5082 	mdev = vnr_to_mdev(tconn, pi->vnr);
5083 	if (!mdev)
5084 		return -EIO;
5085 
5086 	sector = be64_to_cpu(p->sector);
5087 	size = be32_to_cpu(p->blksize);
5088 
5089 	update_peer_seq(mdev, be32_to_cpu(p->seq_num));
5090 
5091 	dec_rs_pending(mdev);
5092 
5093 	if (get_ldev_if_state(mdev, D_FAILED)) {
5094 		drbd_rs_complete_io(mdev, sector);
5095 		switch (pi->cmd) {
5096 		case P_NEG_RS_DREPLY:
5097 			drbd_rs_failed_io(mdev, sector, size);
5098 		case P_RS_CANCEL:
5099 			break;
5100 		default:
5101 			BUG();
5102 		}
5103 		put_ldev(mdev);
5104 	}
5105 
5106 	return 0;
5107 }
5108 
5109 static int got_BarrierAck(struct drbd_tconn *tconn, struct packet_info *pi)
5110 {
5111 	struct p_barrier_ack *p = pi->data;
5112 	struct drbd_conf *mdev;
5113 	int vnr;
5114 
5115 	tl_release(tconn, p->barrier, be32_to_cpu(p->set_size));
5116 
5117 	rcu_read_lock();
5118 	idr_for_each_entry(&tconn->volumes, mdev, vnr) {
5119 		if (mdev->state.conn == C_AHEAD &&
5120 		    atomic_read(&mdev->ap_in_flight) == 0 &&
5121 		    !test_and_set_bit(AHEAD_TO_SYNC_SOURCE, &mdev->flags)) {
5122 			mdev->start_resync_timer.expires = jiffies + HZ;
5123 			add_timer(&mdev->start_resync_timer);
5124 		}
5125 	}
5126 	rcu_read_unlock();
5127 
5128 	return 0;
5129 }
5130 
5131 static int got_OVResult(struct drbd_tconn *tconn, struct packet_info *pi)
5132 {
5133 	struct drbd_conf *mdev;
5134 	struct p_block_ack *p = pi->data;
5135 	struct drbd_work *w;
5136 	sector_t sector;
5137 	int size;
5138 
5139 	mdev = vnr_to_mdev(tconn, pi->vnr);
5140 	if (!mdev)
5141 		return -EIO;
5142 
5143 	sector = be64_to_cpu(p->sector);
5144 	size = be32_to_cpu(p->blksize);
5145 
5146 	update_peer_seq(mdev, be32_to_cpu(p->seq_num));
5147 
5148 	if (be64_to_cpu(p->block_id) == ID_OUT_OF_SYNC)
5149 		drbd_ov_out_of_sync_found(mdev, sector, size);
5150 	else
5151 		ov_out_of_sync_print(mdev);
5152 
5153 	if (!get_ldev(mdev))
5154 		return 0;
5155 
5156 	drbd_rs_complete_io(mdev, sector);
5157 	dec_rs_pending(mdev);
5158 
5159 	--mdev->ov_left;
5160 
5161 	/* let's advance progress step marks only for every other megabyte */
5162 	if ((mdev->ov_left & 0x200) == 0x200)
5163 		drbd_advance_rs_marks(mdev, mdev->ov_left);
5164 
5165 	if (mdev->ov_left == 0) {
5166 		w = kmalloc(sizeof(*w), GFP_NOIO);
5167 		if (w) {
5168 			w->cb = w_ov_finished;
5169 			w->mdev = mdev;
5170 			drbd_queue_work(&mdev->tconn->sender_work, w);
5171 		} else {
5172 			dev_err(DEV, "kmalloc(w) failed.");
5173 			ov_out_of_sync_print(mdev);
5174 			drbd_resync_finished(mdev);
5175 		}
5176 	}
5177 	put_ldev(mdev);
5178 	return 0;
5179 }
5180 
5181 static int got_skip(struct drbd_tconn *tconn, struct packet_info *pi)
5182 {
5183 	return 0;
5184 }
5185 
5186 static int tconn_finish_peer_reqs(struct drbd_tconn *tconn)
5187 {
5188 	struct drbd_conf *mdev;
5189 	int vnr, not_empty = 0;
5190 
5191 	do {
5192 		clear_bit(SIGNAL_ASENDER, &tconn->flags);
5193 		flush_signals(current);
5194 
5195 		rcu_read_lock();
5196 		idr_for_each_entry(&tconn->volumes, mdev, vnr) {
5197 			kref_get(&mdev->kref);
5198 			rcu_read_unlock();
5199 			if (drbd_finish_peer_reqs(mdev)) {
5200 				kref_put(&mdev->kref, &drbd_minor_destroy);
5201 				return 1;
5202 			}
5203 			kref_put(&mdev->kref, &drbd_minor_destroy);
5204 			rcu_read_lock();
5205 		}
5206 		set_bit(SIGNAL_ASENDER, &tconn->flags);
5207 
5208 		spin_lock_irq(&tconn->req_lock);
5209 		idr_for_each_entry(&tconn->volumes, mdev, vnr) {
5210 			not_empty = !list_empty(&mdev->done_ee);
5211 			if (not_empty)
5212 				break;
5213 		}
5214 		spin_unlock_irq(&tconn->req_lock);
5215 		rcu_read_unlock();
5216 	} while (not_empty);
5217 
5218 	return 0;
5219 }
5220 
5221 struct asender_cmd {
5222 	size_t pkt_size;
5223 	int (*fn)(struct drbd_tconn *tconn, struct packet_info *);
5224 };
5225 
5226 static struct asender_cmd asender_tbl[] = {
5227 	[P_PING]	    = { 0, got_Ping },
5228 	[P_PING_ACK]	    = { 0, got_PingAck },
5229 	[P_RECV_ACK]	    = { sizeof(struct p_block_ack), got_BlockAck },
5230 	[P_WRITE_ACK]	    = { sizeof(struct p_block_ack), got_BlockAck },
5231 	[P_RS_WRITE_ACK]    = { sizeof(struct p_block_ack), got_BlockAck },
5232 	[P_SUPERSEDED]   = { sizeof(struct p_block_ack), got_BlockAck },
5233 	[P_NEG_ACK]	    = { sizeof(struct p_block_ack), got_NegAck },
5234 	[P_NEG_DREPLY]	    = { sizeof(struct p_block_ack), got_NegDReply },
5235 	[P_NEG_RS_DREPLY]   = { sizeof(struct p_block_ack), got_NegRSDReply },
5236 	[P_OV_RESULT]	    = { sizeof(struct p_block_ack), got_OVResult },
5237 	[P_BARRIER_ACK]	    = { sizeof(struct p_barrier_ack), got_BarrierAck },
5238 	[P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), got_RqSReply },
5239 	[P_RS_IS_IN_SYNC]   = { sizeof(struct p_block_ack), got_IsInSync },
5240 	[P_DELAY_PROBE]     = { sizeof(struct p_delay_probe93), got_skip },
5241 	[P_RS_CANCEL]       = { sizeof(struct p_block_ack), got_NegRSDReply },
5242 	[P_CONN_ST_CHG_REPLY]={ sizeof(struct p_req_state_reply), got_conn_RqSReply },
5243 	[P_RETRY_WRITE]	    = { sizeof(struct p_block_ack), got_BlockAck },
5244 };
5245 
5246 int drbd_asender(struct drbd_thread *thi)
5247 {
5248 	struct drbd_tconn *tconn = thi->tconn;
5249 	struct asender_cmd *cmd = NULL;
5250 	struct packet_info pi;
5251 	int rv;
5252 	void *buf    = tconn->meta.rbuf;
5253 	int received = 0;
5254 	unsigned int header_size = drbd_header_size(tconn);
5255 	int expect   = header_size;
5256 	bool ping_timeout_active = false;
5257 	struct net_conf *nc;
5258 	int ping_timeo, tcp_cork, ping_int;
5259 	struct sched_param param = { .sched_priority = 2 };
5260 
5261 	rv = sched_setscheduler(current, SCHED_RR, &param);
5262 	if (rv < 0)
5263 		conn_err(tconn, "drbd_asender: ERROR set priority, ret=%d\n", rv);
5264 
5265 	while (get_t_state(thi) == RUNNING) {
5266 		drbd_thread_current_set_cpu(thi);
5267 
5268 		rcu_read_lock();
5269 		nc = rcu_dereference(tconn->net_conf);
5270 		ping_timeo = nc->ping_timeo;
5271 		tcp_cork = nc->tcp_cork;
5272 		ping_int = nc->ping_int;
5273 		rcu_read_unlock();
5274 
5275 		if (test_and_clear_bit(SEND_PING, &tconn->flags)) {
5276 			if (drbd_send_ping(tconn)) {
5277 				conn_err(tconn, "drbd_send_ping has failed\n");
5278 				goto reconnect;
5279 			}
5280 			tconn->meta.socket->sk->sk_rcvtimeo = ping_timeo * HZ / 10;
5281 			ping_timeout_active = true;
5282 		}
5283 
5284 		/* TODO: conditionally cork; it may hurt latency if we cork without
5285 		   much to send */
5286 		if (tcp_cork)
5287 			drbd_tcp_cork(tconn->meta.socket);
5288 		if (tconn_finish_peer_reqs(tconn)) {
5289 			conn_err(tconn, "tconn_finish_peer_reqs() failed\n");
5290 			goto reconnect;
5291 		}
5292 		/* but unconditionally uncork unless disabled */
5293 		if (tcp_cork)
5294 			drbd_tcp_uncork(tconn->meta.socket);
5295 
5296 		/* short circuit, recv_msg would return EINTR anyways. */
5297 		if (signal_pending(current))
5298 			continue;
5299 
5300 		rv = drbd_recv_short(tconn->meta.socket, buf, expect-received, 0);
5301 		clear_bit(SIGNAL_ASENDER, &tconn->flags);
5302 
5303 		flush_signals(current);
5304 
5305 		/* Note:
5306 		 * -EINTR	 (on meta) we got a signal
5307 		 * -EAGAIN	 (on meta) rcvtimeo expired
5308 		 * -ECONNRESET	 other side closed the connection
5309 		 * -ERESTARTSYS  (on data) we got a signal
5310 		 * rv <  0	 other than above: unexpected error!
5311 		 * rv == expected: full header or command
5312 		 * rv <  expected: "woken" by signal during receive
5313 		 * rv == 0	 : "connection shut down by peer"
5314 		 */
5315 		if (likely(rv > 0)) {
5316 			received += rv;
5317 			buf	 += rv;
5318 		} else if (rv == 0) {
5319 			if (test_bit(DISCONNECT_SENT, &tconn->flags)) {
5320 				long t;
5321 				rcu_read_lock();
5322 				t = rcu_dereference(tconn->net_conf)->ping_timeo * HZ/10;
5323 				rcu_read_unlock();
5324 
5325 				t = wait_event_timeout(tconn->ping_wait,
5326 						       tconn->cstate < C_WF_REPORT_PARAMS,
5327 						       t);
5328 				if (t)
5329 					break;
5330 			}
5331 			conn_err(tconn, "meta connection shut down by peer.\n");
5332 			goto reconnect;
5333 		} else if (rv == -EAGAIN) {
5334 			/* If the data socket received something meanwhile,
5335 			 * that is good enough: peer is still alive. */
5336 			if (time_after(tconn->last_received,
5337 				jiffies - tconn->meta.socket->sk->sk_rcvtimeo))
5338 				continue;
5339 			if (ping_timeout_active) {
5340 				conn_err(tconn, "PingAck did not arrive in time.\n");
5341 				goto reconnect;
5342 			}
5343 			set_bit(SEND_PING, &tconn->flags);
5344 			continue;
5345 		} else if (rv == -EINTR) {
5346 			continue;
5347 		} else {
5348 			conn_err(tconn, "sock_recvmsg returned %d\n", rv);
5349 			goto reconnect;
5350 		}
5351 
5352 		if (received == expect && cmd == NULL) {
5353 			if (decode_header(tconn, tconn->meta.rbuf, &pi))
5354 				goto reconnect;
5355 			cmd = &asender_tbl[pi.cmd];
5356 			if (pi.cmd >= ARRAY_SIZE(asender_tbl) || !cmd->fn) {
5357 				conn_err(tconn, "Unexpected meta packet %s (0x%04x)\n",
5358 					 cmdname(pi.cmd), pi.cmd);
5359 				goto disconnect;
5360 			}
5361 			expect = header_size + cmd->pkt_size;
5362 			if (pi.size != expect - header_size) {
5363 				conn_err(tconn, "Wrong packet size on meta (c: %d, l: %d)\n",
5364 					pi.cmd, pi.size);
5365 				goto reconnect;
5366 			}
5367 		}
5368 		if (received == expect) {
5369 			bool err;
5370 
5371 			err = cmd->fn(tconn, &pi);
5372 			if (err) {
5373 				conn_err(tconn, "%pf failed\n", cmd->fn);
5374 				goto reconnect;
5375 			}
5376 
5377 			tconn->last_received = jiffies;
5378 
5379 			if (cmd == &asender_tbl[P_PING_ACK]) {
5380 				/* restore idle timeout */
5381 				tconn->meta.socket->sk->sk_rcvtimeo = ping_int * HZ;
5382 				ping_timeout_active = false;
5383 			}
5384 
5385 			buf	 = tconn->meta.rbuf;
5386 			received = 0;
5387 			expect	 = header_size;
5388 			cmd	 = NULL;
5389 		}
5390 	}
5391 
5392 	if (0) {
5393 reconnect:
5394 		conn_request_state(tconn, NS(conn, C_NETWORK_FAILURE), CS_HARD);
5395 		conn_md_sync(tconn);
5396 	}
5397 	if (0) {
5398 disconnect:
5399 		conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
5400 	}
5401 	clear_bit(SIGNAL_ASENDER, &tconn->flags);
5402 
5403 	conn_info(tconn, "asender terminated\n");
5404 
5405 	return 0;
5406 }
5407