xref: /openbmc/linux/drivers/block/drbd/drbd_main.c (revision e23feb16)
1 /*
2    drbd.c
3 
4    This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5 
6    Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7    Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8    Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9 
10    Thanks to Carter Burden, Bart Grantham and Gennadiy Nerubayev
11    from Logicworks, Inc. for making SDP replication support possible.
12 
13    drbd is free software; you can redistribute it and/or modify
14    it under the terms of the GNU General Public License as published by
15    the Free Software Foundation; either version 2, or (at your option)
16    any later version.
17 
18    drbd is distributed in the hope that it will be useful,
19    but WITHOUT ANY WARRANTY; without even the implied warranty of
20    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21    GNU General Public License for more details.
22 
23    You should have received a copy of the GNU General Public License
24    along with drbd; see the file COPYING.  If not, write to
25    the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26 
27  */
28 
29 #include <linux/module.h>
30 #include <linux/drbd.h>
31 #include <asm/uaccess.h>
32 #include <asm/types.h>
33 #include <net/sock.h>
34 #include <linux/ctype.h>
35 #include <linux/mutex.h>
36 #include <linux/fs.h>
37 #include <linux/file.h>
38 #include <linux/proc_fs.h>
39 #include <linux/init.h>
40 #include <linux/mm.h>
41 #include <linux/memcontrol.h>
42 #include <linux/mm_inline.h>
43 #include <linux/slab.h>
44 #include <linux/random.h>
45 #include <linux/reboot.h>
46 #include <linux/notifier.h>
47 #include <linux/kthread.h>
48 #include <linux/workqueue.h>
49 #define __KERNEL_SYSCALLS__
50 #include <linux/unistd.h>
51 #include <linux/vmalloc.h>
52 
53 #include <linux/drbd_limits.h>
54 #include "drbd_int.h"
55 #include "drbd_req.h" /* only for _req_mod in tl_release and tl_clear */
56 
57 #include "drbd_vli.h"
58 
59 static DEFINE_MUTEX(drbd_main_mutex);
60 int drbdd_init(struct drbd_thread *);
61 int drbd_worker(struct drbd_thread *);
62 int drbd_asender(struct drbd_thread *);
63 
64 int drbd_init(void);
65 static int drbd_open(struct block_device *bdev, fmode_t mode);
66 static void drbd_release(struct gendisk *gd, fmode_t mode);
67 static int w_md_sync(struct drbd_work *w, int unused);
68 static void md_sync_timer_fn(unsigned long data);
69 static int w_bitmap_io(struct drbd_work *w, int unused);
70 static int w_go_diskless(struct drbd_work *w, int unused);
71 
72 MODULE_AUTHOR("Philipp Reisner <phil@linbit.com>, "
73 	      "Lars Ellenberg <lars@linbit.com>");
74 MODULE_DESCRIPTION("drbd - Distributed Replicated Block Device v" REL_VERSION);
75 MODULE_VERSION(REL_VERSION);
76 MODULE_LICENSE("GPL");
77 MODULE_PARM_DESC(minor_count, "Approximate number of drbd devices ("
78 		 __stringify(DRBD_MINOR_COUNT_MIN) "-" __stringify(DRBD_MINOR_COUNT_MAX) ")");
79 MODULE_ALIAS_BLOCKDEV_MAJOR(DRBD_MAJOR);
80 
81 #include <linux/moduleparam.h>
82 /* allow_open_on_secondary */
83 MODULE_PARM_DESC(allow_oos, "DONT USE!");
84 /* thanks to these macros, if compiled into the kernel (not-module),
85  * this becomes the boot parameter drbd.minor_count */
86 module_param(minor_count, uint, 0444);
87 module_param(disable_sendpage, bool, 0644);
88 module_param(allow_oos, bool, 0);
89 module_param(proc_details, int, 0644);
90 
91 #ifdef CONFIG_DRBD_FAULT_INJECTION
92 int enable_faults;
93 int fault_rate;
94 static int fault_count;
95 int fault_devs;
96 /* bitmap of enabled faults */
97 module_param(enable_faults, int, 0664);
98 /* fault rate % value - applies to all enabled faults */
99 module_param(fault_rate, int, 0664);
100 /* count of faults inserted */
101 module_param(fault_count, int, 0664);
102 /* bitmap of devices to insert faults on */
103 module_param(fault_devs, int, 0644);
104 #endif
105 
106 /* module parameter, defined */
107 unsigned int minor_count = DRBD_MINOR_COUNT_DEF;
108 bool disable_sendpage;
109 bool allow_oos;
110 int proc_details;       /* Detail level in proc drbd*/
111 
112 /* Module parameter for setting the user mode helper program
113  * to run. Default is /sbin/drbdadm */
114 char usermode_helper[80] = "/sbin/drbdadm";
115 
116 module_param_string(usermode_helper, usermode_helper, sizeof(usermode_helper), 0644);
117 
118 /* in 2.6.x, our device mapping and config info contains our virtual gendisks
119  * as member "struct gendisk *vdisk;"
120  */
121 struct idr minors;
122 struct list_head drbd_tconns;  /* list of struct drbd_tconn */
123 
124 struct kmem_cache *drbd_request_cache;
125 struct kmem_cache *drbd_ee_cache;	/* peer requests */
126 struct kmem_cache *drbd_bm_ext_cache;	/* bitmap extents */
127 struct kmem_cache *drbd_al_ext_cache;	/* activity log extents */
128 mempool_t *drbd_request_mempool;
129 mempool_t *drbd_ee_mempool;
130 mempool_t *drbd_md_io_page_pool;
131 struct bio_set *drbd_md_io_bio_set;
132 
133 /* I do not use a standard mempool, because:
134    1) I want to hand out the pre-allocated objects first.
135    2) I want to be able to interrupt sleeping allocation with a signal.
136    Note: This is a single linked list, the next pointer is the private
137 	 member of struct page.
138  */
139 struct page *drbd_pp_pool;
140 spinlock_t   drbd_pp_lock;
141 int          drbd_pp_vacant;
142 wait_queue_head_t drbd_pp_wait;
143 
144 DEFINE_RATELIMIT_STATE(drbd_ratelimit_state, 5 * HZ, 5);
145 
146 static const struct block_device_operations drbd_ops = {
147 	.owner =   THIS_MODULE,
148 	.open =    drbd_open,
149 	.release = drbd_release,
150 };
151 
152 struct bio *bio_alloc_drbd(gfp_t gfp_mask)
153 {
154 	struct bio *bio;
155 
156 	if (!drbd_md_io_bio_set)
157 		return bio_alloc(gfp_mask, 1);
158 
159 	bio = bio_alloc_bioset(gfp_mask, 1, drbd_md_io_bio_set);
160 	if (!bio)
161 		return NULL;
162 	return bio;
163 }
164 
165 #ifdef __CHECKER__
166 /* When checking with sparse, and this is an inline function, sparse will
167    give tons of false positives. When this is a real functions sparse works.
168  */
169 int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_state mins)
170 {
171 	int io_allowed;
172 
173 	atomic_inc(&mdev->local_cnt);
174 	io_allowed = (mdev->state.disk >= mins);
175 	if (!io_allowed) {
176 		if (atomic_dec_and_test(&mdev->local_cnt))
177 			wake_up(&mdev->misc_wait);
178 	}
179 	return io_allowed;
180 }
181 
182 #endif
183 
184 /**
185  * tl_release() - mark as BARRIER_ACKED all requests in the corresponding transfer log epoch
186  * @tconn:	DRBD connection.
187  * @barrier_nr:	Expected identifier of the DRBD write barrier packet.
188  * @set_size:	Expected number of requests before that barrier.
189  *
190  * In case the passed barrier_nr or set_size does not match the oldest
191  * epoch of not yet barrier-acked requests, this function will cause a
192  * termination of the connection.
193  */
194 void tl_release(struct drbd_tconn *tconn, unsigned int barrier_nr,
195 		unsigned int set_size)
196 {
197 	struct drbd_request *r;
198 	struct drbd_request *req = NULL;
199 	int expect_epoch = 0;
200 	int expect_size = 0;
201 
202 	spin_lock_irq(&tconn->req_lock);
203 
204 	/* find oldest not yet barrier-acked write request,
205 	 * count writes in its epoch. */
206 	list_for_each_entry(r, &tconn->transfer_log, tl_requests) {
207 		const unsigned s = r->rq_state;
208 		if (!req) {
209 			if (!(s & RQ_WRITE))
210 				continue;
211 			if (!(s & RQ_NET_MASK))
212 				continue;
213 			if (s & RQ_NET_DONE)
214 				continue;
215 			req = r;
216 			expect_epoch = req->epoch;
217 			expect_size ++;
218 		} else {
219 			if (r->epoch != expect_epoch)
220 				break;
221 			if (!(s & RQ_WRITE))
222 				continue;
223 			/* if (s & RQ_DONE): not expected */
224 			/* if (!(s & RQ_NET_MASK)): not expected */
225 			expect_size++;
226 		}
227 	}
228 
229 	/* first some paranoia code */
230 	if (req == NULL) {
231 		conn_err(tconn, "BAD! BarrierAck #%u received, but no epoch in tl!?\n",
232 			 barrier_nr);
233 		goto bail;
234 	}
235 	if (expect_epoch != barrier_nr) {
236 		conn_err(tconn, "BAD! BarrierAck #%u received, expected #%u!\n",
237 			 barrier_nr, expect_epoch);
238 		goto bail;
239 	}
240 
241 	if (expect_size != set_size) {
242 		conn_err(tconn, "BAD! BarrierAck #%u received with n_writes=%u, expected n_writes=%u!\n",
243 			 barrier_nr, set_size, expect_size);
244 		goto bail;
245 	}
246 
247 	/* Clean up list of requests processed during current epoch. */
248 	/* this extra list walk restart is paranoia,
249 	 * to catch requests being barrier-acked "unexpectedly".
250 	 * It usually should find the same req again, or some READ preceding it. */
251 	list_for_each_entry(req, &tconn->transfer_log, tl_requests)
252 		if (req->epoch == expect_epoch)
253 			break;
254 	list_for_each_entry_safe_from(req, r, &tconn->transfer_log, tl_requests) {
255 		if (req->epoch != expect_epoch)
256 			break;
257 		_req_mod(req, BARRIER_ACKED);
258 	}
259 	spin_unlock_irq(&tconn->req_lock);
260 
261 	return;
262 
263 bail:
264 	spin_unlock_irq(&tconn->req_lock);
265 	conn_request_state(tconn, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
266 }
267 
268 
269 /**
270  * _tl_restart() - Walks the transfer log, and applies an action to all requests
271  * @mdev:	DRBD device.
272  * @what:       The action/event to perform with all request objects
273  *
274  * @what might be one of CONNECTION_LOST_WHILE_PENDING, RESEND, FAIL_FROZEN_DISK_IO,
275  * RESTART_FROZEN_DISK_IO.
276  */
277 /* must hold resource->req_lock */
278 void _tl_restart(struct drbd_tconn *tconn, enum drbd_req_event what)
279 {
280 	struct drbd_request *req, *r;
281 
282 	list_for_each_entry_safe(req, r, &tconn->transfer_log, tl_requests)
283 		_req_mod(req, what);
284 }
285 
286 void tl_restart(struct drbd_tconn *tconn, enum drbd_req_event what)
287 {
288 	spin_lock_irq(&tconn->req_lock);
289 	_tl_restart(tconn, what);
290 	spin_unlock_irq(&tconn->req_lock);
291 }
292 
293 /**
294  * tl_clear() - Clears all requests and &struct drbd_tl_epoch objects out of the TL
295  * @mdev:	DRBD device.
296  *
297  * This is called after the connection to the peer was lost. The storage covered
298  * by the requests on the transfer gets marked as our of sync. Called from the
299  * receiver thread and the worker thread.
300  */
301 void tl_clear(struct drbd_tconn *tconn)
302 {
303 	tl_restart(tconn, CONNECTION_LOST_WHILE_PENDING);
304 }
305 
306 /**
307  * tl_abort_disk_io() - Abort disk I/O for all requests for a certain mdev in the TL
308  * @mdev:	DRBD device.
309  */
310 void tl_abort_disk_io(struct drbd_conf *mdev)
311 {
312 	struct drbd_tconn *tconn = mdev->tconn;
313 	struct drbd_request *req, *r;
314 
315 	spin_lock_irq(&tconn->req_lock);
316 	list_for_each_entry_safe(req, r, &tconn->transfer_log, tl_requests) {
317 		if (!(req->rq_state & RQ_LOCAL_PENDING))
318 			continue;
319 		if (req->w.mdev != mdev)
320 			continue;
321 		_req_mod(req, ABORT_DISK_IO);
322 	}
323 	spin_unlock_irq(&tconn->req_lock);
324 }
325 
326 static int drbd_thread_setup(void *arg)
327 {
328 	struct drbd_thread *thi = (struct drbd_thread *) arg;
329 	struct drbd_tconn *tconn = thi->tconn;
330 	unsigned long flags;
331 	int retval;
332 
333 	snprintf(current->comm, sizeof(current->comm), "drbd_%c_%s",
334 		 thi->name[0], thi->tconn->name);
335 
336 restart:
337 	retval = thi->function(thi);
338 
339 	spin_lock_irqsave(&thi->t_lock, flags);
340 
341 	/* if the receiver has been "EXITING", the last thing it did
342 	 * was set the conn state to "StandAlone",
343 	 * if now a re-connect request comes in, conn state goes C_UNCONNECTED,
344 	 * and receiver thread will be "started".
345 	 * drbd_thread_start needs to set "RESTARTING" in that case.
346 	 * t_state check and assignment needs to be within the same spinlock,
347 	 * so either thread_start sees EXITING, and can remap to RESTARTING,
348 	 * or thread_start see NONE, and can proceed as normal.
349 	 */
350 
351 	if (thi->t_state == RESTARTING) {
352 		conn_info(tconn, "Restarting %s thread\n", thi->name);
353 		thi->t_state = RUNNING;
354 		spin_unlock_irqrestore(&thi->t_lock, flags);
355 		goto restart;
356 	}
357 
358 	thi->task = NULL;
359 	thi->t_state = NONE;
360 	smp_mb();
361 	complete_all(&thi->stop);
362 	spin_unlock_irqrestore(&thi->t_lock, flags);
363 
364 	conn_info(tconn, "Terminating %s\n", current->comm);
365 
366 	/* Release mod reference taken when thread was started */
367 
368 	kref_put(&tconn->kref, &conn_destroy);
369 	module_put(THIS_MODULE);
370 	return retval;
371 }
372 
373 static void drbd_thread_init(struct drbd_tconn *tconn, struct drbd_thread *thi,
374 			     int (*func) (struct drbd_thread *), char *name)
375 {
376 	spin_lock_init(&thi->t_lock);
377 	thi->task    = NULL;
378 	thi->t_state = NONE;
379 	thi->function = func;
380 	thi->tconn = tconn;
381 	strncpy(thi->name, name, ARRAY_SIZE(thi->name));
382 }
383 
384 int drbd_thread_start(struct drbd_thread *thi)
385 {
386 	struct drbd_tconn *tconn = thi->tconn;
387 	struct task_struct *nt;
388 	unsigned long flags;
389 
390 	/* is used from state engine doing drbd_thread_stop_nowait,
391 	 * while holding the req lock irqsave */
392 	spin_lock_irqsave(&thi->t_lock, flags);
393 
394 	switch (thi->t_state) {
395 	case NONE:
396 		conn_info(tconn, "Starting %s thread (from %s [%d])\n",
397 			 thi->name, current->comm, current->pid);
398 
399 		/* Get ref on module for thread - this is released when thread exits */
400 		if (!try_module_get(THIS_MODULE)) {
401 			conn_err(tconn, "Failed to get module reference in drbd_thread_start\n");
402 			spin_unlock_irqrestore(&thi->t_lock, flags);
403 			return false;
404 		}
405 
406 		kref_get(&thi->tconn->kref);
407 
408 		init_completion(&thi->stop);
409 		thi->reset_cpu_mask = 1;
410 		thi->t_state = RUNNING;
411 		spin_unlock_irqrestore(&thi->t_lock, flags);
412 		flush_signals(current); /* otherw. may get -ERESTARTNOINTR */
413 
414 		nt = kthread_create(drbd_thread_setup, (void *) thi,
415 				    "drbd_%c_%s", thi->name[0], thi->tconn->name);
416 
417 		if (IS_ERR(nt)) {
418 			conn_err(tconn, "Couldn't start thread\n");
419 
420 			kref_put(&tconn->kref, &conn_destroy);
421 			module_put(THIS_MODULE);
422 			return false;
423 		}
424 		spin_lock_irqsave(&thi->t_lock, flags);
425 		thi->task = nt;
426 		thi->t_state = RUNNING;
427 		spin_unlock_irqrestore(&thi->t_lock, flags);
428 		wake_up_process(nt);
429 		break;
430 	case EXITING:
431 		thi->t_state = RESTARTING;
432 		conn_info(tconn, "Restarting %s thread (from %s [%d])\n",
433 				thi->name, current->comm, current->pid);
434 		/* fall through */
435 	case RUNNING:
436 	case RESTARTING:
437 	default:
438 		spin_unlock_irqrestore(&thi->t_lock, flags);
439 		break;
440 	}
441 
442 	return true;
443 }
444 
445 
446 void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait)
447 {
448 	unsigned long flags;
449 
450 	enum drbd_thread_state ns = restart ? RESTARTING : EXITING;
451 
452 	/* may be called from state engine, holding the req lock irqsave */
453 	spin_lock_irqsave(&thi->t_lock, flags);
454 
455 	if (thi->t_state == NONE) {
456 		spin_unlock_irqrestore(&thi->t_lock, flags);
457 		if (restart)
458 			drbd_thread_start(thi);
459 		return;
460 	}
461 
462 	if (thi->t_state != ns) {
463 		if (thi->task == NULL) {
464 			spin_unlock_irqrestore(&thi->t_lock, flags);
465 			return;
466 		}
467 
468 		thi->t_state = ns;
469 		smp_mb();
470 		init_completion(&thi->stop);
471 		if (thi->task != current)
472 			force_sig(DRBD_SIGKILL, thi->task);
473 	}
474 
475 	spin_unlock_irqrestore(&thi->t_lock, flags);
476 
477 	if (wait)
478 		wait_for_completion(&thi->stop);
479 }
480 
481 static struct drbd_thread *drbd_task_to_thread(struct drbd_tconn *tconn, struct task_struct *task)
482 {
483 	struct drbd_thread *thi =
484 		task == tconn->receiver.task ? &tconn->receiver :
485 		task == tconn->asender.task  ? &tconn->asender :
486 		task == tconn->worker.task   ? &tconn->worker : NULL;
487 
488 	return thi;
489 }
490 
491 char *drbd_task_to_thread_name(struct drbd_tconn *tconn, struct task_struct *task)
492 {
493 	struct drbd_thread *thi = drbd_task_to_thread(tconn, task);
494 	return thi ? thi->name : task->comm;
495 }
496 
497 int conn_lowest_minor(struct drbd_tconn *tconn)
498 {
499 	struct drbd_conf *mdev;
500 	int vnr = 0, m;
501 
502 	rcu_read_lock();
503 	mdev = idr_get_next(&tconn->volumes, &vnr);
504 	m = mdev ? mdev_to_minor(mdev) : -1;
505 	rcu_read_unlock();
506 
507 	return m;
508 }
509 
510 #ifdef CONFIG_SMP
511 /**
512  * drbd_calc_cpu_mask() - Generate CPU masks, spread over all CPUs
513  * @mdev:	DRBD device.
514  *
515  * Forces all threads of a device onto the same CPU. This is beneficial for
516  * DRBD's performance. May be overwritten by user's configuration.
517  */
518 void drbd_calc_cpu_mask(struct drbd_tconn *tconn)
519 {
520 	int ord, cpu;
521 
522 	/* user override. */
523 	if (cpumask_weight(tconn->cpu_mask))
524 		return;
525 
526 	ord = conn_lowest_minor(tconn) % cpumask_weight(cpu_online_mask);
527 	for_each_online_cpu(cpu) {
528 		if (ord-- == 0) {
529 			cpumask_set_cpu(cpu, tconn->cpu_mask);
530 			return;
531 		}
532 	}
533 	/* should not be reached */
534 	cpumask_setall(tconn->cpu_mask);
535 }
536 
537 /**
538  * drbd_thread_current_set_cpu() - modifies the cpu mask of the _current_ thread
539  * @mdev:	DRBD device.
540  * @thi:	drbd_thread object
541  *
542  * call in the "main loop" of _all_ threads, no need for any mutex, current won't die
543  * prematurely.
544  */
545 void drbd_thread_current_set_cpu(struct drbd_thread *thi)
546 {
547 	struct task_struct *p = current;
548 
549 	if (!thi->reset_cpu_mask)
550 		return;
551 	thi->reset_cpu_mask = 0;
552 	set_cpus_allowed_ptr(p, thi->tconn->cpu_mask);
553 }
554 #endif
555 
556 /**
557  * drbd_header_size  -  size of a packet header
558  *
559  * The header size is a multiple of 8, so any payload following the header is
560  * word aligned on 64-bit architectures.  (The bitmap send and receive code
561  * relies on this.)
562  */
563 unsigned int drbd_header_size(struct drbd_tconn *tconn)
564 {
565 	if (tconn->agreed_pro_version >= 100) {
566 		BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct p_header100), 8));
567 		return sizeof(struct p_header100);
568 	} else {
569 		BUILD_BUG_ON(sizeof(struct p_header80) !=
570 			     sizeof(struct p_header95));
571 		BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct p_header80), 8));
572 		return sizeof(struct p_header80);
573 	}
574 }
575 
576 static unsigned int prepare_header80(struct p_header80 *h, enum drbd_packet cmd, int size)
577 {
578 	h->magic   = cpu_to_be32(DRBD_MAGIC);
579 	h->command = cpu_to_be16(cmd);
580 	h->length  = cpu_to_be16(size);
581 	return sizeof(struct p_header80);
582 }
583 
584 static unsigned int prepare_header95(struct p_header95 *h, enum drbd_packet cmd, int size)
585 {
586 	h->magic   = cpu_to_be16(DRBD_MAGIC_BIG);
587 	h->command = cpu_to_be16(cmd);
588 	h->length = cpu_to_be32(size);
589 	return sizeof(struct p_header95);
590 }
591 
592 static unsigned int prepare_header100(struct p_header100 *h, enum drbd_packet cmd,
593 				      int size, int vnr)
594 {
595 	h->magic = cpu_to_be32(DRBD_MAGIC_100);
596 	h->volume = cpu_to_be16(vnr);
597 	h->command = cpu_to_be16(cmd);
598 	h->length = cpu_to_be32(size);
599 	h->pad = 0;
600 	return sizeof(struct p_header100);
601 }
602 
603 static unsigned int prepare_header(struct drbd_tconn *tconn, int vnr,
604 				   void *buffer, enum drbd_packet cmd, int size)
605 {
606 	if (tconn->agreed_pro_version >= 100)
607 		return prepare_header100(buffer, cmd, size, vnr);
608 	else if (tconn->agreed_pro_version >= 95 &&
609 		 size > DRBD_MAX_SIZE_H80_PACKET)
610 		return prepare_header95(buffer, cmd, size);
611 	else
612 		return prepare_header80(buffer, cmd, size);
613 }
614 
615 static void *__conn_prepare_command(struct drbd_tconn *tconn,
616 				    struct drbd_socket *sock)
617 {
618 	if (!sock->socket)
619 		return NULL;
620 	return sock->sbuf + drbd_header_size(tconn);
621 }
622 
623 void *conn_prepare_command(struct drbd_tconn *tconn, struct drbd_socket *sock)
624 {
625 	void *p;
626 
627 	mutex_lock(&sock->mutex);
628 	p = __conn_prepare_command(tconn, sock);
629 	if (!p)
630 		mutex_unlock(&sock->mutex);
631 
632 	return p;
633 }
634 
635 void *drbd_prepare_command(struct drbd_conf *mdev, struct drbd_socket *sock)
636 {
637 	return conn_prepare_command(mdev->tconn, sock);
638 }
639 
640 static int __send_command(struct drbd_tconn *tconn, int vnr,
641 			  struct drbd_socket *sock, enum drbd_packet cmd,
642 			  unsigned int header_size, void *data,
643 			  unsigned int size)
644 {
645 	int msg_flags;
646 	int err;
647 
648 	/*
649 	 * Called with @data == NULL and the size of the data blocks in @size
650 	 * for commands that send data blocks.  For those commands, omit the
651 	 * MSG_MORE flag: this will increase the likelihood that data blocks
652 	 * which are page aligned on the sender will end up page aligned on the
653 	 * receiver.
654 	 */
655 	msg_flags = data ? MSG_MORE : 0;
656 
657 	header_size += prepare_header(tconn, vnr, sock->sbuf, cmd,
658 				      header_size + size);
659 	err = drbd_send_all(tconn, sock->socket, sock->sbuf, header_size,
660 			    msg_flags);
661 	if (data && !err)
662 		err = drbd_send_all(tconn, sock->socket, data, size, 0);
663 	return err;
664 }
665 
666 static int __conn_send_command(struct drbd_tconn *tconn, struct drbd_socket *sock,
667 			       enum drbd_packet cmd, unsigned int header_size,
668 			       void *data, unsigned int size)
669 {
670 	return __send_command(tconn, 0, sock, cmd, header_size, data, size);
671 }
672 
673 int conn_send_command(struct drbd_tconn *tconn, struct drbd_socket *sock,
674 		      enum drbd_packet cmd, unsigned int header_size,
675 		      void *data, unsigned int size)
676 {
677 	int err;
678 
679 	err = __conn_send_command(tconn, sock, cmd, header_size, data, size);
680 	mutex_unlock(&sock->mutex);
681 	return err;
682 }
683 
684 int drbd_send_command(struct drbd_conf *mdev, struct drbd_socket *sock,
685 		      enum drbd_packet cmd, unsigned int header_size,
686 		      void *data, unsigned int size)
687 {
688 	int err;
689 
690 	err = __send_command(mdev->tconn, mdev->vnr, sock, cmd, header_size,
691 			     data, size);
692 	mutex_unlock(&sock->mutex);
693 	return err;
694 }
695 
696 int drbd_send_ping(struct drbd_tconn *tconn)
697 {
698 	struct drbd_socket *sock;
699 
700 	sock = &tconn->meta;
701 	if (!conn_prepare_command(tconn, sock))
702 		return -EIO;
703 	return conn_send_command(tconn, sock, P_PING, 0, NULL, 0);
704 }
705 
706 int drbd_send_ping_ack(struct drbd_tconn *tconn)
707 {
708 	struct drbd_socket *sock;
709 
710 	sock = &tconn->meta;
711 	if (!conn_prepare_command(tconn, sock))
712 		return -EIO;
713 	return conn_send_command(tconn, sock, P_PING_ACK, 0, NULL, 0);
714 }
715 
716 int drbd_send_sync_param(struct drbd_conf *mdev)
717 {
718 	struct drbd_socket *sock;
719 	struct p_rs_param_95 *p;
720 	int size;
721 	const int apv = mdev->tconn->agreed_pro_version;
722 	enum drbd_packet cmd;
723 	struct net_conf *nc;
724 	struct disk_conf *dc;
725 
726 	sock = &mdev->tconn->data;
727 	p = drbd_prepare_command(mdev, sock);
728 	if (!p)
729 		return -EIO;
730 
731 	rcu_read_lock();
732 	nc = rcu_dereference(mdev->tconn->net_conf);
733 
734 	size = apv <= 87 ? sizeof(struct p_rs_param)
735 		: apv == 88 ? sizeof(struct p_rs_param)
736 			+ strlen(nc->verify_alg) + 1
737 		: apv <= 94 ? sizeof(struct p_rs_param_89)
738 		: /* apv >= 95 */ sizeof(struct p_rs_param_95);
739 
740 	cmd = apv >= 89 ? P_SYNC_PARAM89 : P_SYNC_PARAM;
741 
742 	/* initialize verify_alg and csums_alg */
743 	memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
744 
745 	if (get_ldev(mdev)) {
746 		dc = rcu_dereference(mdev->ldev->disk_conf);
747 		p->resync_rate = cpu_to_be32(dc->resync_rate);
748 		p->c_plan_ahead = cpu_to_be32(dc->c_plan_ahead);
749 		p->c_delay_target = cpu_to_be32(dc->c_delay_target);
750 		p->c_fill_target = cpu_to_be32(dc->c_fill_target);
751 		p->c_max_rate = cpu_to_be32(dc->c_max_rate);
752 		put_ldev(mdev);
753 	} else {
754 		p->resync_rate = cpu_to_be32(DRBD_RESYNC_RATE_DEF);
755 		p->c_plan_ahead = cpu_to_be32(DRBD_C_PLAN_AHEAD_DEF);
756 		p->c_delay_target = cpu_to_be32(DRBD_C_DELAY_TARGET_DEF);
757 		p->c_fill_target = cpu_to_be32(DRBD_C_FILL_TARGET_DEF);
758 		p->c_max_rate = cpu_to_be32(DRBD_C_MAX_RATE_DEF);
759 	}
760 
761 	if (apv >= 88)
762 		strcpy(p->verify_alg, nc->verify_alg);
763 	if (apv >= 89)
764 		strcpy(p->csums_alg, nc->csums_alg);
765 	rcu_read_unlock();
766 
767 	return drbd_send_command(mdev, sock, cmd, size, NULL, 0);
768 }
769 
770 int __drbd_send_protocol(struct drbd_tconn *tconn, enum drbd_packet cmd)
771 {
772 	struct drbd_socket *sock;
773 	struct p_protocol *p;
774 	struct net_conf *nc;
775 	int size, cf;
776 
777 	sock = &tconn->data;
778 	p = __conn_prepare_command(tconn, sock);
779 	if (!p)
780 		return -EIO;
781 
782 	rcu_read_lock();
783 	nc = rcu_dereference(tconn->net_conf);
784 
785 	if (nc->tentative && tconn->agreed_pro_version < 92) {
786 		rcu_read_unlock();
787 		mutex_unlock(&sock->mutex);
788 		conn_err(tconn, "--dry-run is not supported by peer");
789 		return -EOPNOTSUPP;
790 	}
791 
792 	size = sizeof(*p);
793 	if (tconn->agreed_pro_version >= 87)
794 		size += strlen(nc->integrity_alg) + 1;
795 
796 	p->protocol      = cpu_to_be32(nc->wire_protocol);
797 	p->after_sb_0p   = cpu_to_be32(nc->after_sb_0p);
798 	p->after_sb_1p   = cpu_to_be32(nc->after_sb_1p);
799 	p->after_sb_2p   = cpu_to_be32(nc->after_sb_2p);
800 	p->two_primaries = cpu_to_be32(nc->two_primaries);
801 	cf = 0;
802 	if (nc->discard_my_data)
803 		cf |= CF_DISCARD_MY_DATA;
804 	if (nc->tentative)
805 		cf |= CF_DRY_RUN;
806 	p->conn_flags    = cpu_to_be32(cf);
807 
808 	if (tconn->agreed_pro_version >= 87)
809 		strcpy(p->integrity_alg, nc->integrity_alg);
810 	rcu_read_unlock();
811 
812 	return __conn_send_command(tconn, sock, cmd, size, NULL, 0);
813 }
814 
815 int drbd_send_protocol(struct drbd_tconn *tconn)
816 {
817 	int err;
818 
819 	mutex_lock(&tconn->data.mutex);
820 	err = __drbd_send_protocol(tconn, P_PROTOCOL);
821 	mutex_unlock(&tconn->data.mutex);
822 
823 	return err;
824 }
825 
826 int _drbd_send_uuids(struct drbd_conf *mdev, u64 uuid_flags)
827 {
828 	struct drbd_socket *sock;
829 	struct p_uuids *p;
830 	int i;
831 
832 	if (!get_ldev_if_state(mdev, D_NEGOTIATING))
833 		return 0;
834 
835 	sock = &mdev->tconn->data;
836 	p = drbd_prepare_command(mdev, sock);
837 	if (!p) {
838 		put_ldev(mdev);
839 		return -EIO;
840 	}
841 	spin_lock_irq(&mdev->ldev->md.uuid_lock);
842 	for (i = UI_CURRENT; i < UI_SIZE; i++)
843 		p->uuid[i] = cpu_to_be64(mdev->ldev->md.uuid[i]);
844 	spin_unlock_irq(&mdev->ldev->md.uuid_lock);
845 
846 	mdev->comm_bm_set = drbd_bm_total_weight(mdev);
847 	p->uuid[UI_SIZE] = cpu_to_be64(mdev->comm_bm_set);
848 	rcu_read_lock();
849 	uuid_flags |= rcu_dereference(mdev->tconn->net_conf)->discard_my_data ? 1 : 0;
850 	rcu_read_unlock();
851 	uuid_flags |= test_bit(CRASHED_PRIMARY, &mdev->flags) ? 2 : 0;
852 	uuid_flags |= mdev->new_state_tmp.disk == D_INCONSISTENT ? 4 : 0;
853 	p->uuid[UI_FLAGS] = cpu_to_be64(uuid_flags);
854 
855 	put_ldev(mdev);
856 	return drbd_send_command(mdev, sock, P_UUIDS, sizeof(*p), NULL, 0);
857 }
858 
859 int drbd_send_uuids(struct drbd_conf *mdev)
860 {
861 	return _drbd_send_uuids(mdev, 0);
862 }
863 
864 int drbd_send_uuids_skip_initial_sync(struct drbd_conf *mdev)
865 {
866 	return _drbd_send_uuids(mdev, 8);
867 }
868 
869 void drbd_print_uuids(struct drbd_conf *mdev, const char *text)
870 {
871 	if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
872 		u64 *uuid = mdev->ldev->md.uuid;
873 		dev_info(DEV, "%s %016llX:%016llX:%016llX:%016llX\n",
874 		     text,
875 		     (unsigned long long)uuid[UI_CURRENT],
876 		     (unsigned long long)uuid[UI_BITMAP],
877 		     (unsigned long long)uuid[UI_HISTORY_START],
878 		     (unsigned long long)uuid[UI_HISTORY_END]);
879 		put_ldev(mdev);
880 	} else {
881 		dev_info(DEV, "%s effective data uuid: %016llX\n",
882 				text,
883 				(unsigned long long)mdev->ed_uuid);
884 	}
885 }
886 
887 void drbd_gen_and_send_sync_uuid(struct drbd_conf *mdev)
888 {
889 	struct drbd_socket *sock;
890 	struct p_rs_uuid *p;
891 	u64 uuid;
892 
893 	D_ASSERT(mdev->state.disk == D_UP_TO_DATE);
894 
895 	uuid = mdev->ldev->md.uuid[UI_BITMAP];
896 	if (uuid && uuid != UUID_JUST_CREATED)
897 		uuid = uuid + UUID_NEW_BM_OFFSET;
898 	else
899 		get_random_bytes(&uuid, sizeof(u64));
900 	drbd_uuid_set(mdev, UI_BITMAP, uuid);
901 	drbd_print_uuids(mdev, "updated sync UUID");
902 	drbd_md_sync(mdev);
903 
904 	sock = &mdev->tconn->data;
905 	p = drbd_prepare_command(mdev, sock);
906 	if (p) {
907 		p->uuid = cpu_to_be64(uuid);
908 		drbd_send_command(mdev, sock, P_SYNC_UUID, sizeof(*p), NULL, 0);
909 	}
910 }
911 
912 int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags flags)
913 {
914 	struct drbd_socket *sock;
915 	struct p_sizes *p;
916 	sector_t d_size, u_size;
917 	int q_order_type;
918 	unsigned int max_bio_size;
919 
920 	if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
921 		D_ASSERT(mdev->ldev->backing_bdev);
922 		d_size = drbd_get_max_capacity(mdev->ldev);
923 		rcu_read_lock();
924 		u_size = rcu_dereference(mdev->ldev->disk_conf)->disk_size;
925 		rcu_read_unlock();
926 		q_order_type = drbd_queue_order_type(mdev);
927 		max_bio_size = queue_max_hw_sectors(mdev->ldev->backing_bdev->bd_disk->queue) << 9;
928 		max_bio_size = min(max_bio_size, DRBD_MAX_BIO_SIZE);
929 		put_ldev(mdev);
930 	} else {
931 		d_size = 0;
932 		u_size = 0;
933 		q_order_type = QUEUE_ORDERED_NONE;
934 		max_bio_size = DRBD_MAX_BIO_SIZE; /* ... multiple BIOs per peer_request */
935 	}
936 
937 	sock = &mdev->tconn->data;
938 	p = drbd_prepare_command(mdev, sock);
939 	if (!p)
940 		return -EIO;
941 
942 	if (mdev->tconn->agreed_pro_version <= 94)
943 		max_bio_size = min(max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
944 	else if (mdev->tconn->agreed_pro_version < 100)
945 		max_bio_size = min(max_bio_size, DRBD_MAX_BIO_SIZE_P95);
946 
947 	p->d_size = cpu_to_be64(d_size);
948 	p->u_size = cpu_to_be64(u_size);
949 	p->c_size = cpu_to_be64(trigger_reply ? 0 : drbd_get_capacity(mdev->this_bdev));
950 	p->max_bio_size = cpu_to_be32(max_bio_size);
951 	p->queue_order_type = cpu_to_be16(q_order_type);
952 	p->dds_flags = cpu_to_be16(flags);
953 	return drbd_send_command(mdev, sock, P_SIZES, sizeof(*p), NULL, 0);
954 }
955 
956 /**
957  * drbd_send_current_state() - Sends the drbd state to the peer
958  * @mdev:	DRBD device.
959  */
960 int drbd_send_current_state(struct drbd_conf *mdev)
961 {
962 	struct drbd_socket *sock;
963 	struct p_state *p;
964 
965 	sock = &mdev->tconn->data;
966 	p = drbd_prepare_command(mdev, sock);
967 	if (!p)
968 		return -EIO;
969 	p->state = cpu_to_be32(mdev->state.i); /* Within the send mutex */
970 	return drbd_send_command(mdev, sock, P_STATE, sizeof(*p), NULL, 0);
971 }
972 
973 /**
974  * drbd_send_state() - After a state change, sends the new state to the peer
975  * @mdev:      DRBD device.
976  * @state:     the state to send, not necessarily the current state.
977  *
978  * Each state change queues an "after_state_ch" work, which will eventually
979  * send the resulting new state to the peer. If more state changes happen
980  * between queuing and processing of the after_state_ch work, we still
981  * want to send each intermediary state in the order it occurred.
982  */
983 int drbd_send_state(struct drbd_conf *mdev, union drbd_state state)
984 {
985 	struct drbd_socket *sock;
986 	struct p_state *p;
987 
988 	sock = &mdev->tconn->data;
989 	p = drbd_prepare_command(mdev, sock);
990 	if (!p)
991 		return -EIO;
992 	p->state = cpu_to_be32(state.i); /* Within the send mutex */
993 	return drbd_send_command(mdev, sock, P_STATE, sizeof(*p), NULL, 0);
994 }
995 
996 int drbd_send_state_req(struct drbd_conf *mdev, union drbd_state mask, union drbd_state val)
997 {
998 	struct drbd_socket *sock;
999 	struct p_req_state *p;
1000 
1001 	sock = &mdev->tconn->data;
1002 	p = drbd_prepare_command(mdev, sock);
1003 	if (!p)
1004 		return -EIO;
1005 	p->mask = cpu_to_be32(mask.i);
1006 	p->val = cpu_to_be32(val.i);
1007 	return drbd_send_command(mdev, sock, P_STATE_CHG_REQ, sizeof(*p), NULL, 0);
1008 }
1009 
1010 int conn_send_state_req(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val)
1011 {
1012 	enum drbd_packet cmd;
1013 	struct drbd_socket *sock;
1014 	struct p_req_state *p;
1015 
1016 	cmd = tconn->agreed_pro_version < 100 ? P_STATE_CHG_REQ : P_CONN_ST_CHG_REQ;
1017 	sock = &tconn->data;
1018 	p = conn_prepare_command(tconn, sock);
1019 	if (!p)
1020 		return -EIO;
1021 	p->mask = cpu_to_be32(mask.i);
1022 	p->val = cpu_to_be32(val.i);
1023 	return conn_send_command(tconn, sock, cmd, sizeof(*p), NULL, 0);
1024 }
1025 
1026 void drbd_send_sr_reply(struct drbd_conf *mdev, enum drbd_state_rv retcode)
1027 {
1028 	struct drbd_socket *sock;
1029 	struct p_req_state_reply *p;
1030 
1031 	sock = &mdev->tconn->meta;
1032 	p = drbd_prepare_command(mdev, sock);
1033 	if (p) {
1034 		p->retcode = cpu_to_be32(retcode);
1035 		drbd_send_command(mdev, sock, P_STATE_CHG_REPLY, sizeof(*p), NULL, 0);
1036 	}
1037 }
1038 
1039 void conn_send_sr_reply(struct drbd_tconn *tconn, enum drbd_state_rv retcode)
1040 {
1041 	struct drbd_socket *sock;
1042 	struct p_req_state_reply *p;
1043 	enum drbd_packet cmd = tconn->agreed_pro_version < 100 ? P_STATE_CHG_REPLY : P_CONN_ST_CHG_REPLY;
1044 
1045 	sock = &tconn->meta;
1046 	p = conn_prepare_command(tconn, sock);
1047 	if (p) {
1048 		p->retcode = cpu_to_be32(retcode);
1049 		conn_send_command(tconn, sock, cmd, sizeof(*p), NULL, 0);
1050 	}
1051 }
1052 
1053 static void dcbp_set_code(struct p_compressed_bm *p, enum drbd_bitmap_code code)
1054 {
1055 	BUG_ON(code & ~0xf);
1056 	p->encoding = (p->encoding & ~0xf) | code;
1057 }
1058 
1059 static void dcbp_set_start(struct p_compressed_bm *p, int set)
1060 {
1061 	p->encoding = (p->encoding & ~0x80) | (set ? 0x80 : 0);
1062 }
1063 
1064 static void dcbp_set_pad_bits(struct p_compressed_bm *p, int n)
1065 {
1066 	BUG_ON(n & ~0x7);
1067 	p->encoding = (p->encoding & (~0x7 << 4)) | (n << 4);
1068 }
1069 
1070 int fill_bitmap_rle_bits(struct drbd_conf *mdev,
1071 			 struct p_compressed_bm *p,
1072 			 unsigned int size,
1073 			 struct bm_xfer_ctx *c)
1074 {
1075 	struct bitstream bs;
1076 	unsigned long plain_bits;
1077 	unsigned long tmp;
1078 	unsigned long rl;
1079 	unsigned len;
1080 	unsigned toggle;
1081 	int bits, use_rle;
1082 
1083 	/* may we use this feature? */
1084 	rcu_read_lock();
1085 	use_rle = rcu_dereference(mdev->tconn->net_conf)->use_rle;
1086 	rcu_read_unlock();
1087 	if (!use_rle || mdev->tconn->agreed_pro_version < 90)
1088 		return 0;
1089 
1090 	if (c->bit_offset >= c->bm_bits)
1091 		return 0; /* nothing to do. */
1092 
1093 	/* use at most thus many bytes */
1094 	bitstream_init(&bs, p->code, size, 0);
1095 	memset(p->code, 0, size);
1096 	/* plain bits covered in this code string */
1097 	plain_bits = 0;
1098 
1099 	/* p->encoding & 0x80 stores whether the first run length is set.
1100 	 * bit offset is implicit.
1101 	 * start with toggle == 2 to be able to tell the first iteration */
1102 	toggle = 2;
1103 
1104 	/* see how much plain bits we can stuff into one packet
1105 	 * using RLE and VLI. */
1106 	do {
1107 		tmp = (toggle == 0) ? _drbd_bm_find_next_zero(mdev, c->bit_offset)
1108 				    : _drbd_bm_find_next(mdev, c->bit_offset);
1109 		if (tmp == -1UL)
1110 			tmp = c->bm_bits;
1111 		rl = tmp - c->bit_offset;
1112 
1113 		if (toggle == 2) { /* first iteration */
1114 			if (rl == 0) {
1115 				/* the first checked bit was set,
1116 				 * store start value, */
1117 				dcbp_set_start(p, 1);
1118 				/* but skip encoding of zero run length */
1119 				toggle = !toggle;
1120 				continue;
1121 			}
1122 			dcbp_set_start(p, 0);
1123 		}
1124 
1125 		/* paranoia: catch zero runlength.
1126 		 * can only happen if bitmap is modified while we scan it. */
1127 		if (rl == 0) {
1128 			dev_err(DEV, "unexpected zero runlength while encoding bitmap "
1129 			    "t:%u bo:%lu\n", toggle, c->bit_offset);
1130 			return -1;
1131 		}
1132 
1133 		bits = vli_encode_bits(&bs, rl);
1134 		if (bits == -ENOBUFS) /* buffer full */
1135 			break;
1136 		if (bits <= 0) {
1137 			dev_err(DEV, "error while encoding bitmap: %d\n", bits);
1138 			return 0;
1139 		}
1140 
1141 		toggle = !toggle;
1142 		plain_bits += rl;
1143 		c->bit_offset = tmp;
1144 	} while (c->bit_offset < c->bm_bits);
1145 
1146 	len = bs.cur.b - p->code + !!bs.cur.bit;
1147 
1148 	if (plain_bits < (len << 3)) {
1149 		/* incompressible with this method.
1150 		 * we need to rewind both word and bit position. */
1151 		c->bit_offset -= plain_bits;
1152 		bm_xfer_ctx_bit_to_word_offset(c);
1153 		c->bit_offset = c->word_offset * BITS_PER_LONG;
1154 		return 0;
1155 	}
1156 
1157 	/* RLE + VLI was able to compress it just fine.
1158 	 * update c->word_offset. */
1159 	bm_xfer_ctx_bit_to_word_offset(c);
1160 
1161 	/* store pad_bits */
1162 	dcbp_set_pad_bits(p, (8 - bs.cur.bit) & 0x7);
1163 
1164 	return len;
1165 }
1166 
1167 /**
1168  * send_bitmap_rle_or_plain
1169  *
1170  * Return 0 when done, 1 when another iteration is needed, and a negative error
1171  * code upon failure.
1172  */
1173 static int
1174 send_bitmap_rle_or_plain(struct drbd_conf *mdev, struct bm_xfer_ctx *c)
1175 {
1176 	struct drbd_socket *sock = &mdev->tconn->data;
1177 	unsigned int header_size = drbd_header_size(mdev->tconn);
1178 	struct p_compressed_bm *p = sock->sbuf + header_size;
1179 	int len, err;
1180 
1181 	len = fill_bitmap_rle_bits(mdev, p,
1182 			DRBD_SOCKET_BUFFER_SIZE - header_size - sizeof(*p), c);
1183 	if (len < 0)
1184 		return -EIO;
1185 
1186 	if (len) {
1187 		dcbp_set_code(p, RLE_VLI_Bits);
1188 		err = __send_command(mdev->tconn, mdev->vnr, sock,
1189 				     P_COMPRESSED_BITMAP, sizeof(*p) + len,
1190 				     NULL, 0);
1191 		c->packets[0]++;
1192 		c->bytes[0] += header_size + sizeof(*p) + len;
1193 
1194 		if (c->bit_offset >= c->bm_bits)
1195 			len = 0; /* DONE */
1196 	} else {
1197 		/* was not compressible.
1198 		 * send a buffer full of plain text bits instead. */
1199 		unsigned int data_size;
1200 		unsigned long num_words;
1201 		unsigned long *p = sock->sbuf + header_size;
1202 
1203 		data_size = DRBD_SOCKET_BUFFER_SIZE - header_size;
1204 		num_words = min_t(size_t, data_size / sizeof(*p),
1205 				  c->bm_words - c->word_offset);
1206 		len = num_words * sizeof(*p);
1207 		if (len)
1208 			drbd_bm_get_lel(mdev, c->word_offset, num_words, p);
1209 		err = __send_command(mdev->tconn, mdev->vnr, sock, P_BITMAP, len, NULL, 0);
1210 		c->word_offset += num_words;
1211 		c->bit_offset = c->word_offset * BITS_PER_LONG;
1212 
1213 		c->packets[1]++;
1214 		c->bytes[1] += header_size + len;
1215 
1216 		if (c->bit_offset > c->bm_bits)
1217 			c->bit_offset = c->bm_bits;
1218 	}
1219 	if (!err) {
1220 		if (len == 0) {
1221 			INFO_bm_xfer_stats(mdev, "send", c);
1222 			return 0;
1223 		} else
1224 			return 1;
1225 	}
1226 	return -EIO;
1227 }
1228 
1229 /* See the comment at receive_bitmap() */
1230 static int _drbd_send_bitmap(struct drbd_conf *mdev)
1231 {
1232 	struct bm_xfer_ctx c;
1233 	int err;
1234 
1235 	if (!expect(mdev->bitmap))
1236 		return false;
1237 
1238 	if (get_ldev(mdev)) {
1239 		if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) {
1240 			dev_info(DEV, "Writing the whole bitmap, MDF_FullSync was set.\n");
1241 			drbd_bm_set_all(mdev);
1242 			if (drbd_bm_write(mdev)) {
1243 				/* write_bm did fail! Leave full sync flag set in Meta P_DATA
1244 				 * but otherwise process as per normal - need to tell other
1245 				 * side that a full resync is required! */
1246 				dev_err(DEV, "Failed to write bitmap to disk!\n");
1247 			} else {
1248 				drbd_md_clear_flag(mdev, MDF_FULL_SYNC);
1249 				drbd_md_sync(mdev);
1250 			}
1251 		}
1252 		put_ldev(mdev);
1253 	}
1254 
1255 	c = (struct bm_xfer_ctx) {
1256 		.bm_bits = drbd_bm_bits(mdev),
1257 		.bm_words = drbd_bm_words(mdev),
1258 	};
1259 
1260 	do {
1261 		err = send_bitmap_rle_or_plain(mdev, &c);
1262 	} while (err > 0);
1263 
1264 	return err == 0;
1265 }
1266 
1267 int drbd_send_bitmap(struct drbd_conf *mdev)
1268 {
1269 	struct drbd_socket *sock = &mdev->tconn->data;
1270 	int err = -1;
1271 
1272 	mutex_lock(&sock->mutex);
1273 	if (sock->socket)
1274 		err = !_drbd_send_bitmap(mdev);
1275 	mutex_unlock(&sock->mutex);
1276 	return err;
1277 }
1278 
1279 void drbd_send_b_ack(struct drbd_tconn *tconn, u32 barrier_nr, u32 set_size)
1280 {
1281 	struct drbd_socket *sock;
1282 	struct p_barrier_ack *p;
1283 
1284 	if (tconn->cstate < C_WF_REPORT_PARAMS)
1285 		return;
1286 
1287 	sock = &tconn->meta;
1288 	p = conn_prepare_command(tconn, sock);
1289 	if (!p)
1290 		return;
1291 	p->barrier = barrier_nr;
1292 	p->set_size = cpu_to_be32(set_size);
1293 	conn_send_command(tconn, sock, P_BARRIER_ACK, sizeof(*p), NULL, 0);
1294 }
1295 
1296 /**
1297  * _drbd_send_ack() - Sends an ack packet
1298  * @mdev:	DRBD device.
1299  * @cmd:	Packet command code.
1300  * @sector:	sector, needs to be in big endian byte order
1301  * @blksize:	size in byte, needs to be in big endian byte order
1302  * @block_id:	Id, big endian byte order
1303  */
1304 static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packet cmd,
1305 			  u64 sector, u32 blksize, u64 block_id)
1306 {
1307 	struct drbd_socket *sock;
1308 	struct p_block_ack *p;
1309 
1310 	if (mdev->state.conn < C_CONNECTED)
1311 		return -EIO;
1312 
1313 	sock = &mdev->tconn->meta;
1314 	p = drbd_prepare_command(mdev, sock);
1315 	if (!p)
1316 		return -EIO;
1317 	p->sector = sector;
1318 	p->block_id = block_id;
1319 	p->blksize = blksize;
1320 	p->seq_num = cpu_to_be32(atomic_inc_return(&mdev->packet_seq));
1321 	return drbd_send_command(mdev, sock, cmd, sizeof(*p), NULL, 0);
1322 }
1323 
1324 /* dp->sector and dp->block_id already/still in network byte order,
1325  * data_size is payload size according to dp->head,
1326  * and may need to be corrected for digest size. */
1327 void drbd_send_ack_dp(struct drbd_conf *mdev, enum drbd_packet cmd,
1328 		      struct p_data *dp, int data_size)
1329 {
1330 	if (mdev->tconn->peer_integrity_tfm)
1331 		data_size -= crypto_hash_digestsize(mdev->tconn->peer_integrity_tfm);
1332 	_drbd_send_ack(mdev, cmd, dp->sector, cpu_to_be32(data_size),
1333 		       dp->block_id);
1334 }
1335 
1336 void drbd_send_ack_rp(struct drbd_conf *mdev, enum drbd_packet cmd,
1337 		      struct p_block_req *rp)
1338 {
1339 	_drbd_send_ack(mdev, cmd, rp->sector, rp->blksize, rp->block_id);
1340 }
1341 
1342 /**
1343  * drbd_send_ack() - Sends an ack packet
1344  * @mdev:	DRBD device
1345  * @cmd:	packet command code
1346  * @peer_req:	peer request
1347  */
1348 int drbd_send_ack(struct drbd_conf *mdev, enum drbd_packet cmd,
1349 		  struct drbd_peer_request *peer_req)
1350 {
1351 	return _drbd_send_ack(mdev, cmd,
1352 			      cpu_to_be64(peer_req->i.sector),
1353 			      cpu_to_be32(peer_req->i.size),
1354 			      peer_req->block_id);
1355 }
1356 
1357 /* This function misuses the block_id field to signal if the blocks
1358  * are is sync or not. */
1359 int drbd_send_ack_ex(struct drbd_conf *mdev, enum drbd_packet cmd,
1360 		     sector_t sector, int blksize, u64 block_id)
1361 {
1362 	return _drbd_send_ack(mdev, cmd,
1363 			      cpu_to_be64(sector),
1364 			      cpu_to_be32(blksize),
1365 			      cpu_to_be64(block_id));
1366 }
1367 
1368 int drbd_send_drequest(struct drbd_conf *mdev, int cmd,
1369 		       sector_t sector, int size, u64 block_id)
1370 {
1371 	struct drbd_socket *sock;
1372 	struct p_block_req *p;
1373 
1374 	sock = &mdev->tconn->data;
1375 	p = drbd_prepare_command(mdev, sock);
1376 	if (!p)
1377 		return -EIO;
1378 	p->sector = cpu_to_be64(sector);
1379 	p->block_id = block_id;
1380 	p->blksize = cpu_to_be32(size);
1381 	return drbd_send_command(mdev, sock, cmd, sizeof(*p), NULL, 0);
1382 }
1383 
1384 int drbd_send_drequest_csum(struct drbd_conf *mdev, sector_t sector, int size,
1385 			    void *digest, int digest_size, enum drbd_packet cmd)
1386 {
1387 	struct drbd_socket *sock;
1388 	struct p_block_req *p;
1389 
1390 	/* FIXME: Put the digest into the preallocated socket buffer.  */
1391 
1392 	sock = &mdev->tconn->data;
1393 	p = drbd_prepare_command(mdev, sock);
1394 	if (!p)
1395 		return -EIO;
1396 	p->sector = cpu_to_be64(sector);
1397 	p->block_id = ID_SYNCER /* unused */;
1398 	p->blksize = cpu_to_be32(size);
1399 	return drbd_send_command(mdev, sock, cmd, sizeof(*p),
1400 				 digest, digest_size);
1401 }
1402 
1403 int drbd_send_ov_request(struct drbd_conf *mdev, sector_t sector, int size)
1404 {
1405 	struct drbd_socket *sock;
1406 	struct p_block_req *p;
1407 
1408 	sock = &mdev->tconn->data;
1409 	p = drbd_prepare_command(mdev, sock);
1410 	if (!p)
1411 		return -EIO;
1412 	p->sector = cpu_to_be64(sector);
1413 	p->block_id = ID_SYNCER /* unused */;
1414 	p->blksize = cpu_to_be32(size);
1415 	return drbd_send_command(mdev, sock, P_OV_REQUEST, sizeof(*p), NULL, 0);
1416 }
1417 
1418 /* called on sndtimeo
1419  * returns false if we should retry,
1420  * true if we think connection is dead
1421  */
1422 static int we_should_drop_the_connection(struct drbd_tconn *tconn, struct socket *sock)
1423 {
1424 	int drop_it;
1425 	/* long elapsed = (long)(jiffies - mdev->last_received); */
1426 
1427 	drop_it =   tconn->meta.socket == sock
1428 		|| !tconn->asender.task
1429 		|| get_t_state(&tconn->asender) != RUNNING
1430 		|| tconn->cstate < C_WF_REPORT_PARAMS;
1431 
1432 	if (drop_it)
1433 		return true;
1434 
1435 	drop_it = !--tconn->ko_count;
1436 	if (!drop_it) {
1437 		conn_err(tconn, "[%s/%d] sock_sendmsg time expired, ko = %u\n",
1438 			 current->comm, current->pid, tconn->ko_count);
1439 		request_ping(tconn);
1440 	}
1441 
1442 	return drop_it; /* && (mdev->state == R_PRIMARY) */;
1443 }
1444 
1445 static void drbd_update_congested(struct drbd_tconn *tconn)
1446 {
1447 	struct sock *sk = tconn->data.socket->sk;
1448 	if (sk->sk_wmem_queued > sk->sk_sndbuf * 4 / 5)
1449 		set_bit(NET_CONGESTED, &tconn->flags);
1450 }
1451 
1452 /* The idea of sendpage seems to be to put some kind of reference
1453  * to the page into the skb, and to hand it over to the NIC. In
1454  * this process get_page() gets called.
1455  *
1456  * As soon as the page was really sent over the network put_page()
1457  * gets called by some part of the network layer. [ NIC driver? ]
1458  *
1459  * [ get_page() / put_page() increment/decrement the count. If count
1460  *   reaches 0 the page will be freed. ]
1461  *
1462  * This works nicely with pages from FSs.
1463  * But this means that in protocol A we might signal IO completion too early!
1464  *
1465  * In order not to corrupt data during a resync we must make sure
1466  * that we do not reuse our own buffer pages (EEs) to early, therefore
1467  * we have the net_ee list.
1468  *
1469  * XFS seems to have problems, still, it submits pages with page_count == 0!
1470  * As a workaround, we disable sendpage on pages
1471  * with page_count == 0 or PageSlab.
1472  */
1473 static int _drbd_no_send_page(struct drbd_conf *mdev, struct page *page,
1474 			      int offset, size_t size, unsigned msg_flags)
1475 {
1476 	struct socket *socket;
1477 	void *addr;
1478 	int err;
1479 
1480 	socket = mdev->tconn->data.socket;
1481 	addr = kmap(page) + offset;
1482 	err = drbd_send_all(mdev->tconn, socket, addr, size, msg_flags);
1483 	kunmap(page);
1484 	if (!err)
1485 		mdev->send_cnt += size >> 9;
1486 	return err;
1487 }
1488 
1489 static int _drbd_send_page(struct drbd_conf *mdev, struct page *page,
1490 		    int offset, size_t size, unsigned msg_flags)
1491 {
1492 	struct socket *socket = mdev->tconn->data.socket;
1493 	mm_segment_t oldfs = get_fs();
1494 	int len = size;
1495 	int err = -EIO;
1496 
1497 	/* e.g. XFS meta- & log-data is in slab pages, which have a
1498 	 * page_count of 0 and/or have PageSlab() set.
1499 	 * we cannot use send_page for those, as that does get_page();
1500 	 * put_page(); and would cause either a VM_BUG directly, or
1501 	 * __page_cache_release a page that would actually still be referenced
1502 	 * by someone, leading to some obscure delayed Oops somewhere else. */
1503 	if (disable_sendpage || (page_count(page) < 1) || PageSlab(page))
1504 		return _drbd_no_send_page(mdev, page, offset, size, msg_flags);
1505 
1506 	msg_flags |= MSG_NOSIGNAL;
1507 	drbd_update_congested(mdev->tconn);
1508 	set_fs(KERNEL_DS);
1509 	do {
1510 		int sent;
1511 
1512 		sent = socket->ops->sendpage(socket, page, offset, len, msg_flags);
1513 		if (sent <= 0) {
1514 			if (sent == -EAGAIN) {
1515 				if (we_should_drop_the_connection(mdev->tconn, socket))
1516 					break;
1517 				continue;
1518 			}
1519 			dev_warn(DEV, "%s: size=%d len=%d sent=%d\n",
1520 			     __func__, (int)size, len, sent);
1521 			if (sent < 0)
1522 				err = sent;
1523 			break;
1524 		}
1525 		len    -= sent;
1526 		offset += sent;
1527 	} while (len > 0 /* THINK && mdev->cstate >= C_CONNECTED*/);
1528 	set_fs(oldfs);
1529 	clear_bit(NET_CONGESTED, &mdev->tconn->flags);
1530 
1531 	if (len == 0) {
1532 		err = 0;
1533 		mdev->send_cnt += size >> 9;
1534 	}
1535 	return err;
1536 }
1537 
1538 static int _drbd_send_bio(struct drbd_conf *mdev, struct bio *bio)
1539 {
1540 	struct bio_vec *bvec;
1541 	int i;
1542 	/* hint all but last page with MSG_MORE */
1543 	bio_for_each_segment(bvec, bio, i) {
1544 		int err;
1545 
1546 		err = _drbd_no_send_page(mdev, bvec->bv_page,
1547 					 bvec->bv_offset, bvec->bv_len,
1548 					 i == bio->bi_vcnt - 1 ? 0 : MSG_MORE);
1549 		if (err)
1550 			return err;
1551 	}
1552 	return 0;
1553 }
1554 
1555 static int _drbd_send_zc_bio(struct drbd_conf *mdev, struct bio *bio)
1556 {
1557 	struct bio_vec *bvec;
1558 	int i;
1559 	/* hint all but last page with MSG_MORE */
1560 	bio_for_each_segment(bvec, bio, i) {
1561 		int err;
1562 
1563 		err = _drbd_send_page(mdev, bvec->bv_page,
1564 				      bvec->bv_offset, bvec->bv_len,
1565 				      i == bio->bi_vcnt - 1 ? 0 : MSG_MORE);
1566 		if (err)
1567 			return err;
1568 	}
1569 	return 0;
1570 }
1571 
1572 static int _drbd_send_zc_ee(struct drbd_conf *mdev,
1573 			    struct drbd_peer_request *peer_req)
1574 {
1575 	struct page *page = peer_req->pages;
1576 	unsigned len = peer_req->i.size;
1577 	int err;
1578 
1579 	/* hint all but last page with MSG_MORE */
1580 	page_chain_for_each(page) {
1581 		unsigned l = min_t(unsigned, len, PAGE_SIZE);
1582 
1583 		err = _drbd_send_page(mdev, page, 0, l,
1584 				      page_chain_next(page) ? MSG_MORE : 0);
1585 		if (err)
1586 			return err;
1587 		len -= l;
1588 	}
1589 	return 0;
1590 }
1591 
1592 static u32 bio_flags_to_wire(struct drbd_conf *mdev, unsigned long bi_rw)
1593 {
1594 	if (mdev->tconn->agreed_pro_version >= 95)
1595 		return  (bi_rw & REQ_SYNC ? DP_RW_SYNC : 0) |
1596 			(bi_rw & REQ_FUA ? DP_FUA : 0) |
1597 			(bi_rw & REQ_FLUSH ? DP_FLUSH : 0) |
1598 			(bi_rw & REQ_DISCARD ? DP_DISCARD : 0);
1599 	else
1600 		return bi_rw & REQ_SYNC ? DP_RW_SYNC : 0;
1601 }
1602 
1603 /* Used to send write requests
1604  * R_PRIMARY -> Peer	(P_DATA)
1605  */
1606 int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
1607 {
1608 	struct drbd_socket *sock;
1609 	struct p_data *p;
1610 	unsigned int dp_flags = 0;
1611 	int dgs;
1612 	int err;
1613 
1614 	sock = &mdev->tconn->data;
1615 	p = drbd_prepare_command(mdev, sock);
1616 	dgs = mdev->tconn->integrity_tfm ? crypto_hash_digestsize(mdev->tconn->integrity_tfm) : 0;
1617 
1618 	if (!p)
1619 		return -EIO;
1620 	p->sector = cpu_to_be64(req->i.sector);
1621 	p->block_id = (unsigned long)req;
1622 	p->seq_num = cpu_to_be32(atomic_inc_return(&mdev->packet_seq));
1623 	dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
1624 	if (mdev->state.conn >= C_SYNC_SOURCE &&
1625 	    mdev->state.conn <= C_PAUSED_SYNC_T)
1626 		dp_flags |= DP_MAY_SET_IN_SYNC;
1627 	if (mdev->tconn->agreed_pro_version >= 100) {
1628 		if (req->rq_state & RQ_EXP_RECEIVE_ACK)
1629 			dp_flags |= DP_SEND_RECEIVE_ACK;
1630 		if (req->rq_state & RQ_EXP_WRITE_ACK)
1631 			dp_flags |= DP_SEND_WRITE_ACK;
1632 	}
1633 	p->dp_flags = cpu_to_be32(dp_flags);
1634 	if (dgs)
1635 		drbd_csum_bio(mdev, mdev->tconn->integrity_tfm, req->master_bio, p + 1);
1636 	err = __send_command(mdev->tconn, mdev->vnr, sock, P_DATA, sizeof(*p) + dgs, NULL, req->i.size);
1637 	if (!err) {
1638 		/* For protocol A, we have to memcpy the payload into
1639 		 * socket buffers, as we may complete right away
1640 		 * as soon as we handed it over to tcp, at which point the data
1641 		 * pages may become invalid.
1642 		 *
1643 		 * For data-integrity enabled, we copy it as well, so we can be
1644 		 * sure that even if the bio pages may still be modified, it
1645 		 * won't change the data on the wire, thus if the digest checks
1646 		 * out ok after sending on this side, but does not fit on the
1647 		 * receiving side, we sure have detected corruption elsewhere.
1648 		 */
1649 		if (!(req->rq_state & (RQ_EXP_RECEIVE_ACK | RQ_EXP_WRITE_ACK)) || dgs)
1650 			err = _drbd_send_bio(mdev, req->master_bio);
1651 		else
1652 			err = _drbd_send_zc_bio(mdev, req->master_bio);
1653 
1654 		/* double check digest, sometimes buffers have been modified in flight. */
1655 		if (dgs > 0 && dgs <= 64) {
1656 			/* 64 byte, 512 bit, is the largest digest size
1657 			 * currently supported in kernel crypto. */
1658 			unsigned char digest[64];
1659 			drbd_csum_bio(mdev, mdev->tconn->integrity_tfm, req->master_bio, digest);
1660 			if (memcmp(p + 1, digest, dgs)) {
1661 				dev_warn(DEV,
1662 					"Digest mismatch, buffer modified by upper layers during write: %llus +%u\n",
1663 					(unsigned long long)req->i.sector, req->i.size);
1664 			}
1665 		} /* else if (dgs > 64) {
1666 		     ... Be noisy about digest too large ...
1667 		} */
1668 	}
1669 	mutex_unlock(&sock->mutex);  /* locked by drbd_prepare_command() */
1670 
1671 	return err;
1672 }
1673 
1674 /* answer packet, used to send data back for read requests:
1675  *  Peer       -> (diskless) R_PRIMARY   (P_DATA_REPLY)
1676  *  C_SYNC_SOURCE -> C_SYNC_TARGET         (P_RS_DATA_REPLY)
1677  */
1678 int drbd_send_block(struct drbd_conf *mdev, enum drbd_packet cmd,
1679 		    struct drbd_peer_request *peer_req)
1680 {
1681 	struct drbd_socket *sock;
1682 	struct p_data *p;
1683 	int err;
1684 	int dgs;
1685 
1686 	sock = &mdev->tconn->data;
1687 	p = drbd_prepare_command(mdev, sock);
1688 
1689 	dgs = mdev->tconn->integrity_tfm ? crypto_hash_digestsize(mdev->tconn->integrity_tfm) : 0;
1690 
1691 	if (!p)
1692 		return -EIO;
1693 	p->sector = cpu_to_be64(peer_req->i.sector);
1694 	p->block_id = peer_req->block_id;
1695 	p->seq_num = 0;  /* unused */
1696 	p->dp_flags = 0;
1697 	if (dgs)
1698 		drbd_csum_ee(mdev, mdev->tconn->integrity_tfm, peer_req, p + 1);
1699 	err = __send_command(mdev->tconn, mdev->vnr, sock, cmd, sizeof(*p) + dgs, NULL, peer_req->i.size);
1700 	if (!err)
1701 		err = _drbd_send_zc_ee(mdev, peer_req);
1702 	mutex_unlock(&sock->mutex);  /* locked by drbd_prepare_command() */
1703 
1704 	return err;
1705 }
1706 
1707 int drbd_send_out_of_sync(struct drbd_conf *mdev, struct drbd_request *req)
1708 {
1709 	struct drbd_socket *sock;
1710 	struct p_block_desc *p;
1711 
1712 	sock = &mdev->tconn->data;
1713 	p = drbd_prepare_command(mdev, sock);
1714 	if (!p)
1715 		return -EIO;
1716 	p->sector = cpu_to_be64(req->i.sector);
1717 	p->blksize = cpu_to_be32(req->i.size);
1718 	return drbd_send_command(mdev, sock, P_OUT_OF_SYNC, sizeof(*p), NULL, 0);
1719 }
1720 
1721 /*
1722   drbd_send distinguishes two cases:
1723 
1724   Packets sent via the data socket "sock"
1725   and packets sent via the meta data socket "msock"
1726 
1727 		    sock                      msock
1728   -----------------+-------------------------+------------------------------
1729   timeout           conf.timeout / 2          conf.timeout / 2
1730   timeout action    send a ping via msock     Abort communication
1731 					      and close all sockets
1732 */
1733 
1734 /*
1735  * you must have down()ed the appropriate [m]sock_mutex elsewhere!
1736  */
1737 int drbd_send(struct drbd_tconn *tconn, struct socket *sock,
1738 	      void *buf, size_t size, unsigned msg_flags)
1739 {
1740 	struct kvec iov;
1741 	struct msghdr msg;
1742 	int rv, sent = 0;
1743 
1744 	if (!sock)
1745 		return -EBADR;
1746 
1747 	/* THINK  if (signal_pending) return ... ? */
1748 
1749 	iov.iov_base = buf;
1750 	iov.iov_len  = size;
1751 
1752 	msg.msg_name       = NULL;
1753 	msg.msg_namelen    = 0;
1754 	msg.msg_control    = NULL;
1755 	msg.msg_controllen = 0;
1756 	msg.msg_flags      = msg_flags | MSG_NOSIGNAL;
1757 
1758 	if (sock == tconn->data.socket) {
1759 		rcu_read_lock();
1760 		tconn->ko_count = rcu_dereference(tconn->net_conf)->ko_count;
1761 		rcu_read_unlock();
1762 		drbd_update_congested(tconn);
1763 	}
1764 	do {
1765 		/* STRANGE
1766 		 * tcp_sendmsg does _not_ use its size parameter at all ?
1767 		 *
1768 		 * -EAGAIN on timeout, -EINTR on signal.
1769 		 */
1770 /* THINK
1771  * do we need to block DRBD_SIG if sock == &meta.socket ??
1772  * otherwise wake_asender() might interrupt some send_*Ack !
1773  */
1774 		rv = kernel_sendmsg(sock, &msg, &iov, 1, size);
1775 		if (rv == -EAGAIN) {
1776 			if (we_should_drop_the_connection(tconn, sock))
1777 				break;
1778 			else
1779 				continue;
1780 		}
1781 		if (rv == -EINTR) {
1782 			flush_signals(current);
1783 			rv = 0;
1784 		}
1785 		if (rv < 0)
1786 			break;
1787 		sent += rv;
1788 		iov.iov_base += rv;
1789 		iov.iov_len  -= rv;
1790 	} while (sent < size);
1791 
1792 	if (sock == tconn->data.socket)
1793 		clear_bit(NET_CONGESTED, &tconn->flags);
1794 
1795 	if (rv <= 0) {
1796 		if (rv != -EAGAIN) {
1797 			conn_err(tconn, "%s_sendmsg returned %d\n",
1798 				 sock == tconn->meta.socket ? "msock" : "sock",
1799 				 rv);
1800 			conn_request_state(tconn, NS(conn, C_BROKEN_PIPE), CS_HARD);
1801 		} else
1802 			conn_request_state(tconn, NS(conn, C_TIMEOUT), CS_HARD);
1803 	}
1804 
1805 	return sent;
1806 }
1807 
1808 /**
1809  * drbd_send_all  -  Send an entire buffer
1810  *
1811  * Returns 0 upon success and a negative error value otherwise.
1812  */
1813 int drbd_send_all(struct drbd_tconn *tconn, struct socket *sock, void *buffer,
1814 		  size_t size, unsigned msg_flags)
1815 {
1816 	int err;
1817 
1818 	err = drbd_send(tconn, sock, buffer, size, msg_flags);
1819 	if (err < 0)
1820 		return err;
1821 	if (err != size)
1822 		return -EIO;
1823 	return 0;
1824 }
1825 
1826 static int drbd_open(struct block_device *bdev, fmode_t mode)
1827 {
1828 	struct drbd_conf *mdev = bdev->bd_disk->private_data;
1829 	unsigned long flags;
1830 	int rv = 0;
1831 
1832 	mutex_lock(&drbd_main_mutex);
1833 	spin_lock_irqsave(&mdev->tconn->req_lock, flags);
1834 	/* to have a stable mdev->state.role
1835 	 * and no race with updating open_cnt */
1836 
1837 	if (mdev->state.role != R_PRIMARY) {
1838 		if (mode & FMODE_WRITE)
1839 			rv = -EROFS;
1840 		else if (!allow_oos)
1841 			rv = -EMEDIUMTYPE;
1842 	}
1843 
1844 	if (!rv)
1845 		mdev->open_cnt++;
1846 	spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
1847 	mutex_unlock(&drbd_main_mutex);
1848 
1849 	return rv;
1850 }
1851 
1852 static void drbd_release(struct gendisk *gd, fmode_t mode)
1853 {
1854 	struct drbd_conf *mdev = gd->private_data;
1855 	mutex_lock(&drbd_main_mutex);
1856 	mdev->open_cnt--;
1857 	mutex_unlock(&drbd_main_mutex);
1858 }
1859 
1860 static void drbd_set_defaults(struct drbd_conf *mdev)
1861 {
1862 	/* Beware! The actual layout differs
1863 	 * between big endian and little endian */
1864 	mdev->state = (union drbd_dev_state) {
1865 		{ .role = R_SECONDARY,
1866 		  .peer = R_UNKNOWN,
1867 		  .conn = C_STANDALONE,
1868 		  .disk = D_DISKLESS,
1869 		  .pdsk = D_UNKNOWN,
1870 		} };
1871 }
1872 
1873 void drbd_init_set_defaults(struct drbd_conf *mdev)
1874 {
1875 	/* the memset(,0,) did most of this.
1876 	 * note: only assignments, no allocation in here */
1877 
1878 	drbd_set_defaults(mdev);
1879 
1880 	atomic_set(&mdev->ap_bio_cnt, 0);
1881 	atomic_set(&mdev->ap_pending_cnt, 0);
1882 	atomic_set(&mdev->rs_pending_cnt, 0);
1883 	atomic_set(&mdev->unacked_cnt, 0);
1884 	atomic_set(&mdev->local_cnt, 0);
1885 	atomic_set(&mdev->pp_in_use_by_net, 0);
1886 	atomic_set(&mdev->rs_sect_in, 0);
1887 	atomic_set(&mdev->rs_sect_ev, 0);
1888 	atomic_set(&mdev->ap_in_flight, 0);
1889 	atomic_set(&mdev->md_io_in_use, 0);
1890 
1891 	mutex_init(&mdev->own_state_mutex);
1892 	mdev->state_mutex = &mdev->own_state_mutex;
1893 
1894 	spin_lock_init(&mdev->al_lock);
1895 	spin_lock_init(&mdev->peer_seq_lock);
1896 
1897 	INIT_LIST_HEAD(&mdev->active_ee);
1898 	INIT_LIST_HEAD(&mdev->sync_ee);
1899 	INIT_LIST_HEAD(&mdev->done_ee);
1900 	INIT_LIST_HEAD(&mdev->read_ee);
1901 	INIT_LIST_HEAD(&mdev->net_ee);
1902 	INIT_LIST_HEAD(&mdev->resync_reads);
1903 	INIT_LIST_HEAD(&mdev->resync_work.list);
1904 	INIT_LIST_HEAD(&mdev->unplug_work.list);
1905 	INIT_LIST_HEAD(&mdev->go_diskless.list);
1906 	INIT_LIST_HEAD(&mdev->md_sync_work.list);
1907 	INIT_LIST_HEAD(&mdev->start_resync_work.list);
1908 	INIT_LIST_HEAD(&mdev->bm_io_work.w.list);
1909 
1910 	mdev->resync_work.cb  = w_resync_timer;
1911 	mdev->unplug_work.cb  = w_send_write_hint;
1912 	mdev->go_diskless.cb  = w_go_diskless;
1913 	mdev->md_sync_work.cb = w_md_sync;
1914 	mdev->bm_io_work.w.cb = w_bitmap_io;
1915 	mdev->start_resync_work.cb = w_start_resync;
1916 
1917 	mdev->resync_work.mdev  = mdev;
1918 	mdev->unplug_work.mdev  = mdev;
1919 	mdev->go_diskless.mdev  = mdev;
1920 	mdev->md_sync_work.mdev = mdev;
1921 	mdev->bm_io_work.w.mdev = mdev;
1922 	mdev->start_resync_work.mdev = mdev;
1923 
1924 	init_timer(&mdev->resync_timer);
1925 	init_timer(&mdev->md_sync_timer);
1926 	init_timer(&mdev->start_resync_timer);
1927 	init_timer(&mdev->request_timer);
1928 	mdev->resync_timer.function = resync_timer_fn;
1929 	mdev->resync_timer.data = (unsigned long) mdev;
1930 	mdev->md_sync_timer.function = md_sync_timer_fn;
1931 	mdev->md_sync_timer.data = (unsigned long) mdev;
1932 	mdev->start_resync_timer.function = start_resync_timer_fn;
1933 	mdev->start_resync_timer.data = (unsigned long) mdev;
1934 	mdev->request_timer.function = request_timer_fn;
1935 	mdev->request_timer.data = (unsigned long) mdev;
1936 
1937 	init_waitqueue_head(&mdev->misc_wait);
1938 	init_waitqueue_head(&mdev->state_wait);
1939 	init_waitqueue_head(&mdev->ee_wait);
1940 	init_waitqueue_head(&mdev->al_wait);
1941 	init_waitqueue_head(&mdev->seq_wait);
1942 
1943 	mdev->resync_wenr = LC_FREE;
1944 	mdev->peer_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
1945 	mdev->local_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
1946 }
1947 
1948 void drbd_mdev_cleanup(struct drbd_conf *mdev)
1949 {
1950 	int i;
1951 	if (mdev->tconn->receiver.t_state != NONE)
1952 		dev_err(DEV, "ASSERT FAILED: receiver t_state == %d expected 0.\n",
1953 				mdev->tconn->receiver.t_state);
1954 
1955 	mdev->al_writ_cnt  =
1956 	mdev->bm_writ_cnt  =
1957 	mdev->read_cnt     =
1958 	mdev->recv_cnt     =
1959 	mdev->send_cnt     =
1960 	mdev->writ_cnt     =
1961 	mdev->p_size       =
1962 	mdev->rs_start     =
1963 	mdev->rs_total     =
1964 	mdev->rs_failed    = 0;
1965 	mdev->rs_last_events = 0;
1966 	mdev->rs_last_sect_ev = 0;
1967 	for (i = 0; i < DRBD_SYNC_MARKS; i++) {
1968 		mdev->rs_mark_left[i] = 0;
1969 		mdev->rs_mark_time[i] = 0;
1970 	}
1971 	D_ASSERT(mdev->tconn->net_conf == NULL);
1972 
1973 	drbd_set_my_capacity(mdev, 0);
1974 	if (mdev->bitmap) {
1975 		/* maybe never allocated. */
1976 		drbd_bm_resize(mdev, 0, 1);
1977 		drbd_bm_cleanup(mdev);
1978 	}
1979 
1980 	drbd_free_bc(mdev->ldev);
1981 	mdev->ldev = NULL;
1982 
1983 	clear_bit(AL_SUSPENDED, &mdev->flags);
1984 
1985 	D_ASSERT(list_empty(&mdev->active_ee));
1986 	D_ASSERT(list_empty(&mdev->sync_ee));
1987 	D_ASSERT(list_empty(&mdev->done_ee));
1988 	D_ASSERT(list_empty(&mdev->read_ee));
1989 	D_ASSERT(list_empty(&mdev->net_ee));
1990 	D_ASSERT(list_empty(&mdev->resync_reads));
1991 	D_ASSERT(list_empty(&mdev->tconn->sender_work.q));
1992 	D_ASSERT(list_empty(&mdev->resync_work.list));
1993 	D_ASSERT(list_empty(&mdev->unplug_work.list));
1994 	D_ASSERT(list_empty(&mdev->go_diskless.list));
1995 
1996 	drbd_set_defaults(mdev);
1997 }
1998 
1999 
2000 static void drbd_destroy_mempools(void)
2001 {
2002 	struct page *page;
2003 
2004 	while (drbd_pp_pool) {
2005 		page = drbd_pp_pool;
2006 		drbd_pp_pool = (struct page *)page_private(page);
2007 		__free_page(page);
2008 		drbd_pp_vacant--;
2009 	}
2010 
2011 	/* D_ASSERT(atomic_read(&drbd_pp_vacant)==0); */
2012 
2013 	if (drbd_md_io_bio_set)
2014 		bioset_free(drbd_md_io_bio_set);
2015 	if (drbd_md_io_page_pool)
2016 		mempool_destroy(drbd_md_io_page_pool);
2017 	if (drbd_ee_mempool)
2018 		mempool_destroy(drbd_ee_mempool);
2019 	if (drbd_request_mempool)
2020 		mempool_destroy(drbd_request_mempool);
2021 	if (drbd_ee_cache)
2022 		kmem_cache_destroy(drbd_ee_cache);
2023 	if (drbd_request_cache)
2024 		kmem_cache_destroy(drbd_request_cache);
2025 	if (drbd_bm_ext_cache)
2026 		kmem_cache_destroy(drbd_bm_ext_cache);
2027 	if (drbd_al_ext_cache)
2028 		kmem_cache_destroy(drbd_al_ext_cache);
2029 
2030 	drbd_md_io_bio_set   = NULL;
2031 	drbd_md_io_page_pool = NULL;
2032 	drbd_ee_mempool      = NULL;
2033 	drbd_request_mempool = NULL;
2034 	drbd_ee_cache        = NULL;
2035 	drbd_request_cache   = NULL;
2036 	drbd_bm_ext_cache    = NULL;
2037 	drbd_al_ext_cache    = NULL;
2038 
2039 	return;
2040 }
2041 
2042 static int drbd_create_mempools(void)
2043 {
2044 	struct page *page;
2045 	const int number = (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * minor_count;
2046 	int i;
2047 
2048 	/* prepare our caches and mempools */
2049 	drbd_request_mempool = NULL;
2050 	drbd_ee_cache        = NULL;
2051 	drbd_request_cache   = NULL;
2052 	drbd_bm_ext_cache    = NULL;
2053 	drbd_al_ext_cache    = NULL;
2054 	drbd_pp_pool         = NULL;
2055 	drbd_md_io_page_pool = NULL;
2056 	drbd_md_io_bio_set   = NULL;
2057 
2058 	/* caches */
2059 	drbd_request_cache = kmem_cache_create(
2060 		"drbd_req", sizeof(struct drbd_request), 0, 0, NULL);
2061 	if (drbd_request_cache == NULL)
2062 		goto Enomem;
2063 
2064 	drbd_ee_cache = kmem_cache_create(
2065 		"drbd_ee", sizeof(struct drbd_peer_request), 0, 0, NULL);
2066 	if (drbd_ee_cache == NULL)
2067 		goto Enomem;
2068 
2069 	drbd_bm_ext_cache = kmem_cache_create(
2070 		"drbd_bm", sizeof(struct bm_extent), 0, 0, NULL);
2071 	if (drbd_bm_ext_cache == NULL)
2072 		goto Enomem;
2073 
2074 	drbd_al_ext_cache = kmem_cache_create(
2075 		"drbd_al", sizeof(struct lc_element), 0, 0, NULL);
2076 	if (drbd_al_ext_cache == NULL)
2077 		goto Enomem;
2078 
2079 	/* mempools */
2080 	drbd_md_io_bio_set = bioset_create(DRBD_MIN_POOL_PAGES, 0);
2081 	if (drbd_md_io_bio_set == NULL)
2082 		goto Enomem;
2083 
2084 	drbd_md_io_page_pool = mempool_create_page_pool(DRBD_MIN_POOL_PAGES, 0);
2085 	if (drbd_md_io_page_pool == NULL)
2086 		goto Enomem;
2087 
2088 	drbd_request_mempool = mempool_create(number,
2089 		mempool_alloc_slab, mempool_free_slab, drbd_request_cache);
2090 	if (drbd_request_mempool == NULL)
2091 		goto Enomem;
2092 
2093 	drbd_ee_mempool = mempool_create(number,
2094 		mempool_alloc_slab, mempool_free_slab, drbd_ee_cache);
2095 	if (drbd_ee_mempool == NULL)
2096 		goto Enomem;
2097 
2098 	/* drbd's page pool */
2099 	spin_lock_init(&drbd_pp_lock);
2100 
2101 	for (i = 0; i < number; i++) {
2102 		page = alloc_page(GFP_HIGHUSER);
2103 		if (!page)
2104 			goto Enomem;
2105 		set_page_private(page, (unsigned long)drbd_pp_pool);
2106 		drbd_pp_pool = page;
2107 	}
2108 	drbd_pp_vacant = number;
2109 
2110 	return 0;
2111 
2112 Enomem:
2113 	drbd_destroy_mempools(); /* in case we allocated some */
2114 	return -ENOMEM;
2115 }
2116 
2117 static int drbd_notify_sys(struct notifier_block *this, unsigned long code,
2118 	void *unused)
2119 {
2120 	/* just so we have it.  you never know what interesting things we
2121 	 * might want to do here some day...
2122 	 */
2123 
2124 	return NOTIFY_DONE;
2125 }
2126 
2127 static struct notifier_block drbd_notifier = {
2128 	.notifier_call = drbd_notify_sys,
2129 };
2130 
2131 static void drbd_release_all_peer_reqs(struct drbd_conf *mdev)
2132 {
2133 	int rr;
2134 
2135 	rr = drbd_free_peer_reqs(mdev, &mdev->active_ee);
2136 	if (rr)
2137 		dev_err(DEV, "%d EEs in active list found!\n", rr);
2138 
2139 	rr = drbd_free_peer_reqs(mdev, &mdev->sync_ee);
2140 	if (rr)
2141 		dev_err(DEV, "%d EEs in sync list found!\n", rr);
2142 
2143 	rr = drbd_free_peer_reqs(mdev, &mdev->read_ee);
2144 	if (rr)
2145 		dev_err(DEV, "%d EEs in read list found!\n", rr);
2146 
2147 	rr = drbd_free_peer_reqs(mdev, &mdev->done_ee);
2148 	if (rr)
2149 		dev_err(DEV, "%d EEs in done list found!\n", rr);
2150 
2151 	rr = drbd_free_peer_reqs(mdev, &mdev->net_ee);
2152 	if (rr)
2153 		dev_err(DEV, "%d EEs in net list found!\n", rr);
2154 }
2155 
2156 /* caution. no locking. */
2157 void drbd_minor_destroy(struct kref *kref)
2158 {
2159 	struct drbd_conf *mdev = container_of(kref, struct drbd_conf, kref);
2160 	struct drbd_tconn *tconn = mdev->tconn;
2161 
2162 	del_timer_sync(&mdev->request_timer);
2163 
2164 	/* paranoia asserts */
2165 	D_ASSERT(mdev->open_cnt == 0);
2166 	/* end paranoia asserts */
2167 
2168 	/* cleanup stuff that may have been allocated during
2169 	 * device (re-)configuration or state changes */
2170 
2171 	if (mdev->this_bdev)
2172 		bdput(mdev->this_bdev);
2173 
2174 	drbd_free_bc(mdev->ldev);
2175 	mdev->ldev = NULL;
2176 
2177 	drbd_release_all_peer_reqs(mdev);
2178 
2179 	lc_destroy(mdev->act_log);
2180 	lc_destroy(mdev->resync);
2181 
2182 	kfree(mdev->p_uuid);
2183 	/* mdev->p_uuid = NULL; */
2184 
2185 	if (mdev->bitmap) /* should no longer be there. */
2186 		drbd_bm_cleanup(mdev);
2187 	__free_page(mdev->md_io_page);
2188 	put_disk(mdev->vdisk);
2189 	blk_cleanup_queue(mdev->rq_queue);
2190 	kfree(mdev->rs_plan_s);
2191 	kfree(mdev);
2192 
2193 	kref_put(&tconn->kref, &conn_destroy);
2194 }
2195 
2196 /* One global retry thread, if we need to push back some bio and have it
2197  * reinserted through our make request function.
2198  */
2199 static struct retry_worker {
2200 	struct workqueue_struct *wq;
2201 	struct work_struct worker;
2202 
2203 	spinlock_t lock;
2204 	struct list_head writes;
2205 } retry;
2206 
2207 static void do_retry(struct work_struct *ws)
2208 {
2209 	struct retry_worker *retry = container_of(ws, struct retry_worker, worker);
2210 	LIST_HEAD(writes);
2211 	struct drbd_request *req, *tmp;
2212 
2213 	spin_lock_irq(&retry->lock);
2214 	list_splice_init(&retry->writes, &writes);
2215 	spin_unlock_irq(&retry->lock);
2216 
2217 	list_for_each_entry_safe(req, tmp, &writes, tl_requests) {
2218 		struct drbd_conf *mdev = req->w.mdev;
2219 		struct bio *bio = req->master_bio;
2220 		unsigned long start_time = req->start_time;
2221 		bool expected;
2222 
2223 		expected =
2224 			expect(atomic_read(&req->completion_ref) == 0) &&
2225 			expect(req->rq_state & RQ_POSTPONED) &&
2226 			expect((req->rq_state & RQ_LOCAL_PENDING) == 0 ||
2227 				(req->rq_state & RQ_LOCAL_ABORTED) != 0);
2228 
2229 		if (!expected)
2230 			dev_err(DEV, "req=%p completion_ref=%d rq_state=%x\n",
2231 				req, atomic_read(&req->completion_ref),
2232 				req->rq_state);
2233 
2234 		/* We still need to put one kref associated with the
2235 		 * "completion_ref" going zero in the code path that queued it
2236 		 * here.  The request object may still be referenced by a
2237 		 * frozen local req->private_bio, in case we force-detached.
2238 		 */
2239 		kref_put(&req->kref, drbd_req_destroy);
2240 
2241 		/* A single suspended or otherwise blocking device may stall
2242 		 * all others as well.  Fortunately, this code path is to
2243 		 * recover from a situation that "should not happen":
2244 		 * concurrent writes in multi-primary setup.
2245 		 * In a "normal" lifecycle, this workqueue is supposed to be
2246 		 * destroyed without ever doing anything.
2247 		 * If it turns out to be an issue anyways, we can do per
2248 		 * resource (replication group) or per device (minor) retry
2249 		 * workqueues instead.
2250 		 */
2251 
2252 		/* We are not just doing generic_make_request(),
2253 		 * as we want to keep the start_time information. */
2254 		inc_ap_bio(mdev);
2255 		__drbd_make_request(mdev, bio, start_time);
2256 	}
2257 }
2258 
2259 void drbd_restart_request(struct drbd_request *req)
2260 {
2261 	unsigned long flags;
2262 	spin_lock_irqsave(&retry.lock, flags);
2263 	list_move_tail(&req->tl_requests, &retry.writes);
2264 	spin_unlock_irqrestore(&retry.lock, flags);
2265 
2266 	/* Drop the extra reference that would otherwise
2267 	 * have been dropped by complete_master_bio.
2268 	 * do_retry() needs to grab a new one. */
2269 	dec_ap_bio(req->w.mdev);
2270 
2271 	queue_work(retry.wq, &retry.worker);
2272 }
2273 
2274 
2275 static void drbd_cleanup(void)
2276 {
2277 	unsigned int i;
2278 	struct drbd_conf *mdev;
2279 	struct drbd_tconn *tconn, *tmp;
2280 
2281 	unregister_reboot_notifier(&drbd_notifier);
2282 
2283 	/* first remove proc,
2284 	 * drbdsetup uses it's presence to detect
2285 	 * whether DRBD is loaded.
2286 	 * If we would get stuck in proc removal,
2287 	 * but have netlink already deregistered,
2288 	 * some drbdsetup commands may wait forever
2289 	 * for an answer.
2290 	 */
2291 	if (drbd_proc)
2292 		remove_proc_entry("drbd", NULL);
2293 
2294 	if (retry.wq)
2295 		destroy_workqueue(retry.wq);
2296 
2297 	drbd_genl_unregister();
2298 
2299 	idr_for_each_entry(&minors, mdev, i) {
2300 		idr_remove(&minors, mdev_to_minor(mdev));
2301 		idr_remove(&mdev->tconn->volumes, mdev->vnr);
2302 		destroy_workqueue(mdev->submit.wq);
2303 		del_gendisk(mdev->vdisk);
2304 		/* synchronize_rcu(); No other threads running at this point */
2305 		kref_put(&mdev->kref, &drbd_minor_destroy);
2306 	}
2307 
2308 	/* not _rcu since, no other updater anymore. Genl already unregistered */
2309 	list_for_each_entry_safe(tconn, tmp, &drbd_tconns, all_tconn) {
2310 		list_del(&tconn->all_tconn); /* not _rcu no proc, not other threads */
2311 		/* synchronize_rcu(); */
2312 		kref_put(&tconn->kref, &conn_destroy);
2313 	}
2314 
2315 	drbd_destroy_mempools();
2316 	unregister_blkdev(DRBD_MAJOR, "drbd");
2317 
2318 	idr_destroy(&minors);
2319 
2320 	printk(KERN_INFO "drbd: module cleanup done.\n");
2321 }
2322 
2323 /**
2324  * drbd_congested() - Callback for the flusher thread
2325  * @congested_data:	User data
2326  * @bdi_bits:		Bits the BDI flusher thread is currently interested in
2327  *
2328  * Returns 1<<BDI_async_congested and/or 1<<BDI_sync_congested if we are congested.
2329  */
2330 static int drbd_congested(void *congested_data, int bdi_bits)
2331 {
2332 	struct drbd_conf *mdev = congested_data;
2333 	struct request_queue *q;
2334 	char reason = '-';
2335 	int r = 0;
2336 
2337 	if (!may_inc_ap_bio(mdev)) {
2338 		/* DRBD has frozen IO */
2339 		r = bdi_bits;
2340 		reason = 'd';
2341 		goto out;
2342 	}
2343 
2344 	if (test_bit(CALLBACK_PENDING, &mdev->tconn->flags)) {
2345 		r |= (1 << BDI_async_congested);
2346 		/* Without good local data, we would need to read from remote,
2347 		 * and that would need the worker thread as well, which is
2348 		 * currently blocked waiting for that usermode helper to
2349 		 * finish.
2350 		 */
2351 		if (!get_ldev_if_state(mdev, D_UP_TO_DATE))
2352 			r |= (1 << BDI_sync_congested);
2353 		else
2354 			put_ldev(mdev);
2355 		r &= bdi_bits;
2356 		reason = 'c';
2357 		goto out;
2358 	}
2359 
2360 	if (get_ldev(mdev)) {
2361 		q = bdev_get_queue(mdev->ldev->backing_bdev);
2362 		r = bdi_congested(&q->backing_dev_info, bdi_bits);
2363 		put_ldev(mdev);
2364 		if (r)
2365 			reason = 'b';
2366 	}
2367 
2368 	if (bdi_bits & (1 << BDI_async_congested) && test_bit(NET_CONGESTED, &mdev->tconn->flags)) {
2369 		r |= (1 << BDI_async_congested);
2370 		reason = reason == 'b' ? 'a' : 'n';
2371 	}
2372 
2373 out:
2374 	mdev->congestion_reason = reason;
2375 	return r;
2376 }
2377 
2378 static void drbd_init_workqueue(struct drbd_work_queue* wq)
2379 {
2380 	spin_lock_init(&wq->q_lock);
2381 	INIT_LIST_HEAD(&wq->q);
2382 	init_waitqueue_head(&wq->q_wait);
2383 }
2384 
2385 struct drbd_tconn *conn_get_by_name(const char *name)
2386 {
2387 	struct drbd_tconn *tconn;
2388 
2389 	if (!name || !name[0])
2390 		return NULL;
2391 
2392 	rcu_read_lock();
2393 	list_for_each_entry_rcu(tconn, &drbd_tconns, all_tconn) {
2394 		if (!strcmp(tconn->name, name)) {
2395 			kref_get(&tconn->kref);
2396 			goto found;
2397 		}
2398 	}
2399 	tconn = NULL;
2400 found:
2401 	rcu_read_unlock();
2402 	return tconn;
2403 }
2404 
2405 struct drbd_tconn *conn_get_by_addrs(void *my_addr, int my_addr_len,
2406 				     void *peer_addr, int peer_addr_len)
2407 {
2408 	struct drbd_tconn *tconn;
2409 
2410 	rcu_read_lock();
2411 	list_for_each_entry_rcu(tconn, &drbd_tconns, all_tconn) {
2412 		if (tconn->my_addr_len == my_addr_len &&
2413 		    tconn->peer_addr_len == peer_addr_len &&
2414 		    !memcmp(&tconn->my_addr, my_addr, my_addr_len) &&
2415 		    !memcmp(&tconn->peer_addr, peer_addr, peer_addr_len)) {
2416 			kref_get(&tconn->kref);
2417 			goto found;
2418 		}
2419 	}
2420 	tconn = NULL;
2421 found:
2422 	rcu_read_unlock();
2423 	return tconn;
2424 }
2425 
2426 static int drbd_alloc_socket(struct drbd_socket *socket)
2427 {
2428 	socket->rbuf = (void *) __get_free_page(GFP_KERNEL);
2429 	if (!socket->rbuf)
2430 		return -ENOMEM;
2431 	socket->sbuf = (void *) __get_free_page(GFP_KERNEL);
2432 	if (!socket->sbuf)
2433 		return -ENOMEM;
2434 	return 0;
2435 }
2436 
2437 static void drbd_free_socket(struct drbd_socket *socket)
2438 {
2439 	free_page((unsigned long) socket->sbuf);
2440 	free_page((unsigned long) socket->rbuf);
2441 }
2442 
2443 void conn_free_crypto(struct drbd_tconn *tconn)
2444 {
2445 	drbd_free_sock(tconn);
2446 
2447 	crypto_free_hash(tconn->csums_tfm);
2448 	crypto_free_hash(tconn->verify_tfm);
2449 	crypto_free_hash(tconn->cram_hmac_tfm);
2450 	crypto_free_hash(tconn->integrity_tfm);
2451 	crypto_free_hash(tconn->peer_integrity_tfm);
2452 	kfree(tconn->int_dig_in);
2453 	kfree(tconn->int_dig_vv);
2454 
2455 	tconn->csums_tfm = NULL;
2456 	tconn->verify_tfm = NULL;
2457 	tconn->cram_hmac_tfm = NULL;
2458 	tconn->integrity_tfm = NULL;
2459 	tconn->peer_integrity_tfm = NULL;
2460 	tconn->int_dig_in = NULL;
2461 	tconn->int_dig_vv = NULL;
2462 }
2463 
2464 int set_resource_options(struct drbd_tconn *tconn, struct res_opts *res_opts)
2465 {
2466 	cpumask_var_t new_cpu_mask;
2467 	int err;
2468 
2469 	if (!zalloc_cpumask_var(&new_cpu_mask, GFP_KERNEL))
2470 		return -ENOMEM;
2471 		/*
2472 		retcode = ERR_NOMEM;
2473 		drbd_msg_put_info("unable to allocate cpumask");
2474 		*/
2475 
2476 	/* silently ignore cpu mask on UP kernel */
2477 	if (nr_cpu_ids > 1 && res_opts->cpu_mask[0] != 0) {
2478 		/* FIXME: Get rid of constant 32 here */
2479 		err = bitmap_parse(res_opts->cpu_mask, 32,
2480 				   cpumask_bits(new_cpu_mask), nr_cpu_ids);
2481 		if (err) {
2482 			conn_warn(tconn, "bitmap_parse() failed with %d\n", err);
2483 			/* retcode = ERR_CPU_MASK_PARSE; */
2484 			goto fail;
2485 		}
2486 	}
2487 	tconn->res_opts = *res_opts;
2488 	if (!cpumask_equal(tconn->cpu_mask, new_cpu_mask)) {
2489 		cpumask_copy(tconn->cpu_mask, new_cpu_mask);
2490 		drbd_calc_cpu_mask(tconn);
2491 		tconn->receiver.reset_cpu_mask = 1;
2492 		tconn->asender.reset_cpu_mask = 1;
2493 		tconn->worker.reset_cpu_mask = 1;
2494 	}
2495 	err = 0;
2496 
2497 fail:
2498 	free_cpumask_var(new_cpu_mask);
2499 	return err;
2500 
2501 }
2502 
2503 /* caller must be under genl_lock() */
2504 struct drbd_tconn *conn_create(const char *name, struct res_opts *res_opts)
2505 {
2506 	struct drbd_tconn *tconn;
2507 
2508 	tconn = kzalloc(sizeof(struct drbd_tconn), GFP_KERNEL);
2509 	if (!tconn)
2510 		return NULL;
2511 
2512 	tconn->name = kstrdup(name, GFP_KERNEL);
2513 	if (!tconn->name)
2514 		goto fail;
2515 
2516 	if (drbd_alloc_socket(&tconn->data))
2517 		goto fail;
2518 	if (drbd_alloc_socket(&tconn->meta))
2519 		goto fail;
2520 
2521 	if (!zalloc_cpumask_var(&tconn->cpu_mask, GFP_KERNEL))
2522 		goto fail;
2523 
2524 	if (set_resource_options(tconn, res_opts))
2525 		goto fail;
2526 
2527 	tconn->current_epoch = kzalloc(sizeof(struct drbd_epoch), GFP_KERNEL);
2528 	if (!tconn->current_epoch)
2529 		goto fail;
2530 
2531 	INIT_LIST_HEAD(&tconn->transfer_log);
2532 
2533 	INIT_LIST_HEAD(&tconn->current_epoch->list);
2534 	tconn->epochs = 1;
2535 	spin_lock_init(&tconn->epoch_lock);
2536 	tconn->write_ordering = WO_bdev_flush;
2537 
2538 	tconn->send.seen_any_write_yet = false;
2539 	tconn->send.current_epoch_nr = 0;
2540 	tconn->send.current_epoch_writes = 0;
2541 
2542 	tconn->cstate = C_STANDALONE;
2543 	mutex_init(&tconn->cstate_mutex);
2544 	spin_lock_init(&tconn->req_lock);
2545 	mutex_init(&tconn->conf_update);
2546 	init_waitqueue_head(&tconn->ping_wait);
2547 	idr_init(&tconn->volumes);
2548 
2549 	drbd_init_workqueue(&tconn->sender_work);
2550 	mutex_init(&tconn->data.mutex);
2551 	mutex_init(&tconn->meta.mutex);
2552 
2553 	drbd_thread_init(tconn, &tconn->receiver, drbdd_init, "receiver");
2554 	drbd_thread_init(tconn, &tconn->worker, drbd_worker, "worker");
2555 	drbd_thread_init(tconn, &tconn->asender, drbd_asender, "asender");
2556 
2557 	kref_init(&tconn->kref);
2558 	list_add_tail_rcu(&tconn->all_tconn, &drbd_tconns);
2559 
2560 	return tconn;
2561 
2562 fail:
2563 	kfree(tconn->current_epoch);
2564 	free_cpumask_var(tconn->cpu_mask);
2565 	drbd_free_socket(&tconn->meta);
2566 	drbd_free_socket(&tconn->data);
2567 	kfree(tconn->name);
2568 	kfree(tconn);
2569 
2570 	return NULL;
2571 }
2572 
2573 void conn_destroy(struct kref *kref)
2574 {
2575 	struct drbd_tconn *tconn = container_of(kref, struct drbd_tconn, kref);
2576 
2577 	if (atomic_read(&tconn->current_epoch->epoch_size) !=  0)
2578 		conn_err(tconn, "epoch_size:%d\n", atomic_read(&tconn->current_epoch->epoch_size));
2579 	kfree(tconn->current_epoch);
2580 
2581 	idr_destroy(&tconn->volumes);
2582 
2583 	free_cpumask_var(tconn->cpu_mask);
2584 	drbd_free_socket(&tconn->meta);
2585 	drbd_free_socket(&tconn->data);
2586 	kfree(tconn->name);
2587 	kfree(tconn->int_dig_in);
2588 	kfree(tconn->int_dig_vv);
2589 	kfree(tconn);
2590 }
2591 
2592 int init_submitter(struct drbd_conf *mdev)
2593 {
2594 	/* opencoded create_singlethread_workqueue(),
2595 	 * to be able to say "drbd%d", ..., minor */
2596 	mdev->submit.wq = alloc_workqueue("drbd%u_submit",
2597 			WQ_UNBOUND | WQ_MEM_RECLAIM, 1, mdev->minor);
2598 	if (!mdev->submit.wq)
2599 		return -ENOMEM;
2600 
2601 	INIT_WORK(&mdev->submit.worker, do_submit);
2602 	spin_lock_init(&mdev->submit.lock);
2603 	INIT_LIST_HEAD(&mdev->submit.writes);
2604 	return 0;
2605 }
2606 
2607 enum drbd_ret_code conn_new_minor(struct drbd_tconn *tconn, unsigned int minor, int vnr)
2608 {
2609 	struct drbd_conf *mdev;
2610 	struct gendisk *disk;
2611 	struct request_queue *q;
2612 	int vnr_got = vnr;
2613 	int minor_got = minor;
2614 	enum drbd_ret_code err = ERR_NOMEM;
2615 
2616 	mdev = minor_to_mdev(minor);
2617 	if (mdev)
2618 		return ERR_MINOR_EXISTS;
2619 
2620 	/* GFP_KERNEL, we are outside of all write-out paths */
2621 	mdev = kzalloc(sizeof(struct drbd_conf), GFP_KERNEL);
2622 	if (!mdev)
2623 		return ERR_NOMEM;
2624 
2625 	kref_get(&tconn->kref);
2626 	mdev->tconn = tconn;
2627 
2628 	mdev->minor = minor;
2629 	mdev->vnr = vnr;
2630 
2631 	drbd_init_set_defaults(mdev);
2632 
2633 	q = blk_alloc_queue(GFP_KERNEL);
2634 	if (!q)
2635 		goto out_no_q;
2636 	mdev->rq_queue = q;
2637 	q->queuedata   = mdev;
2638 
2639 	disk = alloc_disk(1);
2640 	if (!disk)
2641 		goto out_no_disk;
2642 	mdev->vdisk = disk;
2643 
2644 	set_disk_ro(disk, true);
2645 
2646 	disk->queue = q;
2647 	disk->major = DRBD_MAJOR;
2648 	disk->first_minor = minor;
2649 	disk->fops = &drbd_ops;
2650 	sprintf(disk->disk_name, "drbd%d", minor);
2651 	disk->private_data = mdev;
2652 
2653 	mdev->this_bdev = bdget(MKDEV(DRBD_MAJOR, minor));
2654 	/* we have no partitions. we contain only ourselves. */
2655 	mdev->this_bdev->bd_contains = mdev->this_bdev;
2656 
2657 	q->backing_dev_info.congested_fn = drbd_congested;
2658 	q->backing_dev_info.congested_data = mdev;
2659 
2660 	blk_queue_make_request(q, drbd_make_request);
2661 	blk_queue_flush(q, REQ_FLUSH | REQ_FUA);
2662 	/* Setting the max_hw_sectors to an odd value of 8kibyte here
2663 	   This triggers a max_bio_size message upon first attach or connect */
2664 	blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8);
2665 	blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
2666 	blk_queue_merge_bvec(q, drbd_merge_bvec);
2667 	q->queue_lock = &mdev->tconn->req_lock; /* needed since we use */
2668 
2669 	mdev->md_io_page = alloc_page(GFP_KERNEL);
2670 	if (!mdev->md_io_page)
2671 		goto out_no_io_page;
2672 
2673 	if (drbd_bm_init(mdev))
2674 		goto out_no_bitmap;
2675 	mdev->read_requests = RB_ROOT;
2676 	mdev->write_requests = RB_ROOT;
2677 
2678 	minor_got = idr_alloc(&minors, mdev, minor, minor + 1, GFP_KERNEL);
2679 	if (minor_got < 0) {
2680 		if (minor_got == -ENOSPC) {
2681 			err = ERR_MINOR_EXISTS;
2682 			drbd_msg_put_info("requested minor exists already");
2683 		}
2684 		goto out_no_minor_idr;
2685 	}
2686 
2687 	vnr_got = idr_alloc(&tconn->volumes, mdev, vnr, vnr + 1, GFP_KERNEL);
2688 	if (vnr_got < 0) {
2689 		if (vnr_got == -ENOSPC) {
2690 			err = ERR_INVALID_REQUEST;
2691 			drbd_msg_put_info("requested volume exists already");
2692 		}
2693 		goto out_idr_remove_minor;
2694 	}
2695 
2696 	if (init_submitter(mdev)) {
2697 		err = ERR_NOMEM;
2698 		drbd_msg_put_info("unable to create submit workqueue");
2699 		goto out_idr_remove_vol;
2700 	}
2701 
2702 	add_disk(disk);
2703 	kref_init(&mdev->kref); /* one ref for both idrs and the the add_disk */
2704 
2705 	/* inherit the connection state */
2706 	mdev->state.conn = tconn->cstate;
2707 	if (mdev->state.conn == C_WF_REPORT_PARAMS)
2708 		drbd_connected(mdev);
2709 
2710 	return NO_ERROR;
2711 
2712 out_idr_remove_vol:
2713 	idr_remove(&tconn->volumes, vnr_got);
2714 out_idr_remove_minor:
2715 	idr_remove(&minors, minor_got);
2716 	synchronize_rcu();
2717 out_no_minor_idr:
2718 	drbd_bm_cleanup(mdev);
2719 out_no_bitmap:
2720 	__free_page(mdev->md_io_page);
2721 out_no_io_page:
2722 	put_disk(disk);
2723 out_no_disk:
2724 	blk_cleanup_queue(q);
2725 out_no_q:
2726 	kfree(mdev);
2727 	kref_put(&tconn->kref, &conn_destroy);
2728 	return err;
2729 }
2730 
2731 int __init drbd_init(void)
2732 {
2733 	int err;
2734 
2735 	if (minor_count < DRBD_MINOR_COUNT_MIN || minor_count > DRBD_MINOR_COUNT_MAX) {
2736 		printk(KERN_ERR
2737 		       "drbd: invalid minor_count (%d)\n", minor_count);
2738 #ifdef MODULE
2739 		return -EINVAL;
2740 #else
2741 		minor_count = DRBD_MINOR_COUNT_DEF;
2742 #endif
2743 	}
2744 
2745 	err = register_blkdev(DRBD_MAJOR, "drbd");
2746 	if (err) {
2747 		printk(KERN_ERR
2748 		       "drbd: unable to register block device major %d\n",
2749 		       DRBD_MAJOR);
2750 		return err;
2751 	}
2752 
2753 	err = drbd_genl_register();
2754 	if (err) {
2755 		printk(KERN_ERR "drbd: unable to register generic netlink family\n");
2756 		goto fail;
2757 	}
2758 
2759 
2760 	register_reboot_notifier(&drbd_notifier);
2761 
2762 	/*
2763 	 * allocate all necessary structs
2764 	 */
2765 	init_waitqueue_head(&drbd_pp_wait);
2766 
2767 	drbd_proc = NULL; /* play safe for drbd_cleanup */
2768 	idr_init(&minors);
2769 
2770 	err = drbd_create_mempools();
2771 	if (err)
2772 		goto fail;
2773 
2774 	err = -ENOMEM;
2775 	drbd_proc = proc_create_data("drbd", S_IFREG | S_IRUGO , NULL, &drbd_proc_fops, NULL);
2776 	if (!drbd_proc)	{
2777 		printk(KERN_ERR "drbd: unable to register proc file\n");
2778 		goto fail;
2779 	}
2780 
2781 	rwlock_init(&global_state_lock);
2782 	INIT_LIST_HEAD(&drbd_tconns);
2783 
2784 	retry.wq = create_singlethread_workqueue("drbd-reissue");
2785 	if (!retry.wq) {
2786 		printk(KERN_ERR "drbd: unable to create retry workqueue\n");
2787 		goto fail;
2788 	}
2789 	INIT_WORK(&retry.worker, do_retry);
2790 	spin_lock_init(&retry.lock);
2791 	INIT_LIST_HEAD(&retry.writes);
2792 
2793 	printk(KERN_INFO "drbd: initialized. "
2794 	       "Version: " REL_VERSION " (api:%d/proto:%d-%d)\n",
2795 	       API_VERSION, PRO_VERSION_MIN, PRO_VERSION_MAX);
2796 	printk(KERN_INFO "drbd: %s\n", drbd_buildtag());
2797 	printk(KERN_INFO "drbd: registered as block device major %d\n",
2798 		DRBD_MAJOR);
2799 
2800 	return 0; /* Success! */
2801 
2802 fail:
2803 	drbd_cleanup();
2804 	if (err == -ENOMEM)
2805 		printk(KERN_ERR "drbd: ran out of memory\n");
2806 	else
2807 		printk(KERN_ERR "drbd: initialization failure\n");
2808 	return err;
2809 }
2810 
2811 void drbd_free_bc(struct drbd_backing_dev *ldev)
2812 {
2813 	if (ldev == NULL)
2814 		return;
2815 
2816 	blkdev_put(ldev->backing_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
2817 	blkdev_put(ldev->md_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
2818 
2819 	kfree(ldev->disk_conf);
2820 	kfree(ldev);
2821 }
2822 
2823 void drbd_free_sock(struct drbd_tconn *tconn)
2824 {
2825 	if (tconn->data.socket) {
2826 		mutex_lock(&tconn->data.mutex);
2827 		kernel_sock_shutdown(tconn->data.socket, SHUT_RDWR);
2828 		sock_release(tconn->data.socket);
2829 		tconn->data.socket = NULL;
2830 		mutex_unlock(&tconn->data.mutex);
2831 	}
2832 	if (tconn->meta.socket) {
2833 		mutex_lock(&tconn->meta.mutex);
2834 		kernel_sock_shutdown(tconn->meta.socket, SHUT_RDWR);
2835 		sock_release(tconn->meta.socket);
2836 		tconn->meta.socket = NULL;
2837 		mutex_unlock(&tconn->meta.mutex);
2838 	}
2839 }
2840 
2841 /* meta data management */
2842 
2843 void conn_md_sync(struct drbd_tconn *tconn)
2844 {
2845 	struct drbd_conf *mdev;
2846 	int vnr;
2847 
2848 	rcu_read_lock();
2849 	idr_for_each_entry(&tconn->volumes, mdev, vnr) {
2850 		kref_get(&mdev->kref);
2851 		rcu_read_unlock();
2852 		drbd_md_sync(mdev);
2853 		kref_put(&mdev->kref, &drbd_minor_destroy);
2854 		rcu_read_lock();
2855 	}
2856 	rcu_read_unlock();
2857 }
2858 
2859 /* aligned 4kByte */
2860 struct meta_data_on_disk {
2861 	u64 la_size_sect;      /* last agreed size. */
2862 	u64 uuid[UI_SIZE];   /* UUIDs. */
2863 	u64 device_uuid;
2864 	u64 reserved_u64_1;
2865 	u32 flags;             /* MDF */
2866 	u32 magic;
2867 	u32 md_size_sect;
2868 	u32 al_offset;         /* offset to this block */
2869 	u32 al_nr_extents;     /* important for restoring the AL (userspace) */
2870 	      /* `-- act_log->nr_elements <-- ldev->dc.al_extents */
2871 	u32 bm_offset;         /* offset to the bitmap, from here */
2872 	u32 bm_bytes_per_bit;  /* BM_BLOCK_SIZE */
2873 	u32 la_peer_max_bio_size;   /* last peer max_bio_size */
2874 
2875 	/* see al_tr_number_to_on_disk_sector() */
2876 	u32 al_stripes;
2877 	u32 al_stripe_size_4k;
2878 
2879 	u8 reserved_u8[4096 - (7*8 + 10*4)];
2880 } __packed;
2881 
2882 
2883 
2884 void drbd_md_write(struct drbd_conf *mdev, void *b)
2885 {
2886 	struct meta_data_on_disk *buffer = b;
2887 	sector_t sector;
2888 	int i;
2889 
2890 	memset(buffer, 0, sizeof(*buffer));
2891 
2892 	buffer->la_size_sect = cpu_to_be64(drbd_get_capacity(mdev->this_bdev));
2893 	for (i = UI_CURRENT; i < UI_SIZE; i++)
2894 		buffer->uuid[i] = cpu_to_be64(mdev->ldev->md.uuid[i]);
2895 	buffer->flags = cpu_to_be32(mdev->ldev->md.flags);
2896 	buffer->magic = cpu_to_be32(DRBD_MD_MAGIC_84_UNCLEAN);
2897 
2898 	buffer->md_size_sect  = cpu_to_be32(mdev->ldev->md.md_size_sect);
2899 	buffer->al_offset     = cpu_to_be32(mdev->ldev->md.al_offset);
2900 	buffer->al_nr_extents = cpu_to_be32(mdev->act_log->nr_elements);
2901 	buffer->bm_bytes_per_bit = cpu_to_be32(BM_BLOCK_SIZE);
2902 	buffer->device_uuid = cpu_to_be64(mdev->ldev->md.device_uuid);
2903 
2904 	buffer->bm_offset = cpu_to_be32(mdev->ldev->md.bm_offset);
2905 	buffer->la_peer_max_bio_size = cpu_to_be32(mdev->peer_max_bio_size);
2906 
2907 	buffer->al_stripes = cpu_to_be32(mdev->ldev->md.al_stripes);
2908 	buffer->al_stripe_size_4k = cpu_to_be32(mdev->ldev->md.al_stripe_size_4k);
2909 
2910 	D_ASSERT(drbd_md_ss(mdev->ldev) == mdev->ldev->md.md_offset);
2911 	sector = mdev->ldev->md.md_offset;
2912 
2913 	if (drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) {
2914 		/* this was a try anyways ... */
2915 		dev_err(DEV, "meta data update failed!\n");
2916 		drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR);
2917 	}
2918 }
2919 
2920 /**
2921  * drbd_md_sync() - Writes the meta data super block if the MD_DIRTY flag bit is set
2922  * @mdev:	DRBD device.
2923  */
2924 void drbd_md_sync(struct drbd_conf *mdev)
2925 {
2926 	struct meta_data_on_disk *buffer;
2927 
2928 	/* Don't accidentally change the DRBD meta data layout. */
2929 	BUILD_BUG_ON(UI_SIZE != 4);
2930 	BUILD_BUG_ON(sizeof(struct meta_data_on_disk) != 4096);
2931 
2932 	del_timer(&mdev->md_sync_timer);
2933 	/* timer may be rearmed by drbd_md_mark_dirty() now. */
2934 	if (!test_and_clear_bit(MD_DIRTY, &mdev->flags))
2935 		return;
2936 
2937 	/* We use here D_FAILED and not D_ATTACHING because we try to write
2938 	 * metadata even if we detach due to a disk failure! */
2939 	if (!get_ldev_if_state(mdev, D_FAILED))
2940 		return;
2941 
2942 	buffer = drbd_md_get_buffer(mdev);
2943 	if (!buffer)
2944 		goto out;
2945 
2946 	drbd_md_write(mdev, buffer);
2947 
2948 	/* Update mdev->ldev->md.la_size_sect,
2949 	 * since we updated it on metadata. */
2950 	mdev->ldev->md.la_size_sect = drbd_get_capacity(mdev->this_bdev);
2951 
2952 	drbd_md_put_buffer(mdev);
2953 out:
2954 	put_ldev(mdev);
2955 }
2956 
2957 static int check_activity_log_stripe_size(struct drbd_conf *mdev,
2958 		struct meta_data_on_disk *on_disk,
2959 		struct drbd_md *in_core)
2960 {
2961 	u32 al_stripes = be32_to_cpu(on_disk->al_stripes);
2962 	u32 al_stripe_size_4k = be32_to_cpu(on_disk->al_stripe_size_4k);
2963 	u64 al_size_4k;
2964 
2965 	/* both not set: default to old fixed size activity log */
2966 	if (al_stripes == 0 && al_stripe_size_4k == 0) {
2967 		al_stripes = 1;
2968 		al_stripe_size_4k = MD_32kB_SECT/8;
2969 	}
2970 
2971 	/* some paranoia plausibility checks */
2972 
2973 	/* we need both values to be set */
2974 	if (al_stripes == 0 || al_stripe_size_4k == 0)
2975 		goto err;
2976 
2977 	al_size_4k = (u64)al_stripes * al_stripe_size_4k;
2978 
2979 	/* Upper limit of activity log area, to avoid potential overflow
2980 	 * problems in al_tr_number_to_on_disk_sector(). As right now, more
2981 	 * than 72 * 4k blocks total only increases the amount of history,
2982 	 * limiting this arbitrarily to 16 GB is not a real limitation ;-)  */
2983 	if (al_size_4k > (16 * 1024 * 1024/4))
2984 		goto err;
2985 
2986 	/* Lower limit: we need at least 8 transaction slots (32kB)
2987 	 * to not break existing setups */
2988 	if (al_size_4k < MD_32kB_SECT/8)
2989 		goto err;
2990 
2991 	in_core->al_stripe_size_4k = al_stripe_size_4k;
2992 	in_core->al_stripes = al_stripes;
2993 	in_core->al_size_4k = al_size_4k;
2994 
2995 	return 0;
2996 err:
2997 	dev_err(DEV, "invalid activity log striping: al_stripes=%u, al_stripe_size_4k=%u\n",
2998 			al_stripes, al_stripe_size_4k);
2999 	return -EINVAL;
3000 }
3001 
3002 static int check_offsets_and_sizes(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
3003 {
3004 	sector_t capacity = drbd_get_capacity(bdev->md_bdev);
3005 	struct drbd_md *in_core = &bdev->md;
3006 	s32 on_disk_al_sect;
3007 	s32 on_disk_bm_sect;
3008 
3009 	/* The on-disk size of the activity log, calculated from offsets, and
3010 	 * the size of the activity log calculated from the stripe settings,
3011 	 * should match.
3012 	 * Though we could relax this a bit: it is ok, if the striped activity log
3013 	 * fits in the available on-disk activity log size.
3014 	 * Right now, that would break how resize is implemented.
3015 	 * TODO: make drbd_determine_dev_size() (and the drbdmeta tool) aware
3016 	 * of possible unused padding space in the on disk layout. */
3017 	if (in_core->al_offset < 0) {
3018 		if (in_core->bm_offset > in_core->al_offset)
3019 			goto err;
3020 		on_disk_al_sect = -in_core->al_offset;
3021 		on_disk_bm_sect = in_core->al_offset - in_core->bm_offset;
3022 	} else {
3023 		if (in_core->al_offset != MD_4kB_SECT)
3024 			goto err;
3025 		if (in_core->bm_offset < in_core->al_offset + in_core->al_size_4k * MD_4kB_SECT)
3026 			goto err;
3027 
3028 		on_disk_al_sect = in_core->bm_offset - MD_4kB_SECT;
3029 		on_disk_bm_sect = in_core->md_size_sect - in_core->bm_offset;
3030 	}
3031 
3032 	/* old fixed size meta data is exactly that: fixed. */
3033 	if (in_core->meta_dev_idx >= 0) {
3034 		if (in_core->md_size_sect != MD_128MB_SECT
3035 		||  in_core->al_offset != MD_4kB_SECT
3036 		||  in_core->bm_offset != MD_4kB_SECT + MD_32kB_SECT
3037 		||  in_core->al_stripes != 1
3038 		||  in_core->al_stripe_size_4k != MD_32kB_SECT/8)
3039 			goto err;
3040 	}
3041 
3042 	if (capacity < in_core->md_size_sect)
3043 		goto err;
3044 	if (capacity - in_core->md_size_sect < drbd_md_first_sector(bdev))
3045 		goto err;
3046 
3047 	/* should be aligned, and at least 32k */
3048 	if ((on_disk_al_sect & 7) || (on_disk_al_sect < MD_32kB_SECT))
3049 		goto err;
3050 
3051 	/* should fit (for now: exactly) into the available on-disk space;
3052 	 * overflow prevention is in check_activity_log_stripe_size() above. */
3053 	if (on_disk_al_sect != in_core->al_size_4k * MD_4kB_SECT)
3054 		goto err;
3055 
3056 	/* again, should be aligned */
3057 	if (in_core->bm_offset & 7)
3058 		goto err;
3059 
3060 	/* FIXME check for device grow with flex external meta data? */
3061 
3062 	/* can the available bitmap space cover the last agreed device size? */
3063 	if (on_disk_bm_sect < (in_core->la_size_sect+7)/MD_4kB_SECT/8/512)
3064 		goto err;
3065 
3066 	return 0;
3067 
3068 err:
3069 	dev_err(DEV, "meta data offsets don't make sense: idx=%d "
3070 			"al_s=%u, al_sz4k=%u, al_offset=%d, bm_offset=%d, "
3071 			"md_size_sect=%u, la_size=%llu, md_capacity=%llu\n",
3072 			in_core->meta_dev_idx,
3073 			in_core->al_stripes, in_core->al_stripe_size_4k,
3074 			in_core->al_offset, in_core->bm_offset, in_core->md_size_sect,
3075 			(unsigned long long)in_core->la_size_sect,
3076 			(unsigned long long)capacity);
3077 
3078 	return -EINVAL;
3079 }
3080 
3081 
3082 /**
3083  * drbd_md_read() - Reads in the meta data super block
3084  * @mdev:	DRBD device.
3085  * @bdev:	Device from which the meta data should be read in.
3086  *
3087  * Return NO_ERROR on success, and an enum drbd_ret_code in case
3088  * something goes wrong.
3089  *
3090  * Called exactly once during drbd_adm_attach(), while still being D_DISKLESS,
3091  * even before @bdev is assigned to @mdev->ldev.
3092  */
3093 int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
3094 {
3095 	struct meta_data_on_disk *buffer;
3096 	u32 magic, flags;
3097 	int i, rv = NO_ERROR;
3098 
3099 	if (mdev->state.disk != D_DISKLESS)
3100 		return ERR_DISK_CONFIGURED;
3101 
3102 	buffer = drbd_md_get_buffer(mdev);
3103 	if (!buffer)
3104 		return ERR_NOMEM;
3105 
3106 	/* First, figure out where our meta data superblock is located,
3107 	 * and read it. */
3108 	bdev->md.meta_dev_idx = bdev->disk_conf->meta_dev_idx;
3109 	bdev->md.md_offset = drbd_md_ss(bdev);
3110 
3111 	if (drbd_md_sync_page_io(mdev, bdev, bdev->md.md_offset, READ)) {
3112 		/* NOTE: can't do normal error processing here as this is
3113 		   called BEFORE disk is attached */
3114 		dev_err(DEV, "Error while reading metadata.\n");
3115 		rv = ERR_IO_MD_DISK;
3116 		goto err;
3117 	}
3118 
3119 	magic = be32_to_cpu(buffer->magic);
3120 	flags = be32_to_cpu(buffer->flags);
3121 	if (magic == DRBD_MD_MAGIC_84_UNCLEAN ||
3122 	    (magic == DRBD_MD_MAGIC_08 && !(flags & MDF_AL_CLEAN))) {
3123 			/* btw: that's Activity Log clean, not "all" clean. */
3124 		dev_err(DEV, "Found unclean meta data. Did you \"drbdadm apply-al\"?\n");
3125 		rv = ERR_MD_UNCLEAN;
3126 		goto err;
3127 	}
3128 
3129 	rv = ERR_MD_INVALID;
3130 	if (magic != DRBD_MD_MAGIC_08) {
3131 		if (magic == DRBD_MD_MAGIC_07)
3132 			dev_err(DEV, "Found old (0.7) meta data magic. Did you \"drbdadm create-md\"?\n");
3133 		else
3134 			dev_err(DEV, "Meta data magic not found. Did you \"drbdadm create-md\"?\n");
3135 		goto err;
3136 	}
3137 
3138 	if (be32_to_cpu(buffer->bm_bytes_per_bit) != BM_BLOCK_SIZE) {
3139 		dev_err(DEV, "unexpected bm_bytes_per_bit: %u (expected %u)\n",
3140 		    be32_to_cpu(buffer->bm_bytes_per_bit), BM_BLOCK_SIZE);
3141 		goto err;
3142 	}
3143 
3144 
3145 	/* convert to in_core endian */
3146 	bdev->md.la_size_sect = be64_to_cpu(buffer->la_size_sect);
3147 	for (i = UI_CURRENT; i < UI_SIZE; i++)
3148 		bdev->md.uuid[i] = be64_to_cpu(buffer->uuid[i]);
3149 	bdev->md.flags = be32_to_cpu(buffer->flags);
3150 	bdev->md.device_uuid = be64_to_cpu(buffer->device_uuid);
3151 
3152 	bdev->md.md_size_sect = be32_to_cpu(buffer->md_size_sect);
3153 	bdev->md.al_offset = be32_to_cpu(buffer->al_offset);
3154 	bdev->md.bm_offset = be32_to_cpu(buffer->bm_offset);
3155 
3156 	if (check_activity_log_stripe_size(mdev, buffer, &bdev->md))
3157 		goto err;
3158 	if (check_offsets_and_sizes(mdev, bdev))
3159 		goto err;
3160 
3161 	if (be32_to_cpu(buffer->bm_offset) != bdev->md.bm_offset) {
3162 		dev_err(DEV, "unexpected bm_offset: %d (expected %d)\n",
3163 		    be32_to_cpu(buffer->bm_offset), bdev->md.bm_offset);
3164 		goto err;
3165 	}
3166 	if (be32_to_cpu(buffer->md_size_sect) != bdev->md.md_size_sect) {
3167 		dev_err(DEV, "unexpected md_size: %u (expected %u)\n",
3168 		    be32_to_cpu(buffer->md_size_sect), bdev->md.md_size_sect);
3169 		goto err;
3170 	}
3171 
3172 	rv = NO_ERROR;
3173 
3174 	spin_lock_irq(&mdev->tconn->req_lock);
3175 	if (mdev->state.conn < C_CONNECTED) {
3176 		unsigned int peer;
3177 		peer = be32_to_cpu(buffer->la_peer_max_bio_size);
3178 		peer = max(peer, DRBD_MAX_BIO_SIZE_SAFE);
3179 		mdev->peer_max_bio_size = peer;
3180 	}
3181 	spin_unlock_irq(&mdev->tconn->req_lock);
3182 
3183  err:
3184 	drbd_md_put_buffer(mdev);
3185 
3186 	return rv;
3187 }
3188 
3189 /**
3190  * drbd_md_mark_dirty() - Mark meta data super block as dirty
3191  * @mdev:	DRBD device.
3192  *
3193  * Call this function if you change anything that should be written to
3194  * the meta-data super block. This function sets MD_DIRTY, and starts a
3195  * timer that ensures that within five seconds you have to call drbd_md_sync().
3196  */
3197 #ifdef DEBUG
3198 void drbd_md_mark_dirty_(struct drbd_conf *mdev, unsigned int line, const char *func)
3199 {
3200 	if (!test_and_set_bit(MD_DIRTY, &mdev->flags)) {
3201 		mod_timer(&mdev->md_sync_timer, jiffies + HZ);
3202 		mdev->last_md_mark_dirty.line = line;
3203 		mdev->last_md_mark_dirty.func = func;
3204 	}
3205 }
3206 #else
3207 void drbd_md_mark_dirty(struct drbd_conf *mdev)
3208 {
3209 	if (!test_and_set_bit(MD_DIRTY, &mdev->flags))
3210 		mod_timer(&mdev->md_sync_timer, jiffies + 5*HZ);
3211 }
3212 #endif
3213 
3214 void drbd_uuid_move_history(struct drbd_conf *mdev) __must_hold(local)
3215 {
3216 	int i;
3217 
3218 	for (i = UI_HISTORY_START; i < UI_HISTORY_END; i++)
3219 		mdev->ldev->md.uuid[i+1] = mdev->ldev->md.uuid[i];
3220 }
3221 
3222 void __drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
3223 {
3224 	if (idx == UI_CURRENT) {
3225 		if (mdev->state.role == R_PRIMARY)
3226 			val |= 1;
3227 		else
3228 			val &= ~((u64)1);
3229 
3230 		drbd_set_ed_uuid(mdev, val);
3231 	}
3232 
3233 	mdev->ldev->md.uuid[idx] = val;
3234 	drbd_md_mark_dirty(mdev);
3235 }
3236 
3237 void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
3238 {
3239 	unsigned long flags;
3240 	spin_lock_irqsave(&mdev->ldev->md.uuid_lock, flags);
3241 	__drbd_uuid_set(mdev, idx, val);
3242 	spin_unlock_irqrestore(&mdev->ldev->md.uuid_lock, flags);
3243 }
3244 
3245 void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
3246 {
3247 	unsigned long flags;
3248 	spin_lock_irqsave(&mdev->ldev->md.uuid_lock, flags);
3249 	if (mdev->ldev->md.uuid[idx]) {
3250 		drbd_uuid_move_history(mdev);
3251 		mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[idx];
3252 	}
3253 	__drbd_uuid_set(mdev, idx, val);
3254 	spin_unlock_irqrestore(&mdev->ldev->md.uuid_lock, flags);
3255 }
3256 
3257 /**
3258  * drbd_uuid_new_current() - Creates a new current UUID
3259  * @mdev:	DRBD device.
3260  *
3261  * Creates a new current UUID, and rotates the old current UUID into
3262  * the bitmap slot. Causes an incremental resync upon next connect.
3263  */
3264 void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local)
3265 {
3266 	u64 val;
3267 	unsigned long long bm_uuid;
3268 
3269 	get_random_bytes(&val, sizeof(u64));
3270 
3271 	spin_lock_irq(&mdev->ldev->md.uuid_lock);
3272 	bm_uuid = mdev->ldev->md.uuid[UI_BITMAP];
3273 
3274 	if (bm_uuid)
3275 		dev_warn(DEV, "bm UUID was already set: %llX\n", bm_uuid);
3276 
3277 	mdev->ldev->md.uuid[UI_BITMAP] = mdev->ldev->md.uuid[UI_CURRENT];
3278 	__drbd_uuid_set(mdev, UI_CURRENT, val);
3279 	spin_unlock_irq(&mdev->ldev->md.uuid_lock);
3280 
3281 	drbd_print_uuids(mdev, "new current UUID");
3282 	/* get it to stable storage _now_ */
3283 	drbd_md_sync(mdev);
3284 }
3285 
3286 void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local)
3287 {
3288 	unsigned long flags;
3289 	if (mdev->ldev->md.uuid[UI_BITMAP] == 0 && val == 0)
3290 		return;
3291 
3292 	spin_lock_irqsave(&mdev->ldev->md.uuid_lock, flags);
3293 	if (val == 0) {
3294 		drbd_uuid_move_history(mdev);
3295 		mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[UI_BITMAP];
3296 		mdev->ldev->md.uuid[UI_BITMAP] = 0;
3297 	} else {
3298 		unsigned long long bm_uuid = mdev->ldev->md.uuid[UI_BITMAP];
3299 		if (bm_uuid)
3300 			dev_warn(DEV, "bm UUID was already set: %llX\n", bm_uuid);
3301 
3302 		mdev->ldev->md.uuid[UI_BITMAP] = val & ~((u64)1);
3303 	}
3304 	spin_unlock_irqrestore(&mdev->ldev->md.uuid_lock, flags);
3305 
3306 	drbd_md_mark_dirty(mdev);
3307 }
3308 
3309 /**
3310  * drbd_bmio_set_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
3311  * @mdev:	DRBD device.
3312  *
3313  * Sets all bits in the bitmap and writes the whole bitmap to stable storage.
3314  */
3315 int drbd_bmio_set_n_write(struct drbd_conf *mdev)
3316 {
3317 	int rv = -EIO;
3318 
3319 	if (get_ldev_if_state(mdev, D_ATTACHING)) {
3320 		drbd_md_set_flag(mdev, MDF_FULL_SYNC);
3321 		drbd_md_sync(mdev);
3322 		drbd_bm_set_all(mdev);
3323 
3324 		rv = drbd_bm_write(mdev);
3325 
3326 		if (!rv) {
3327 			drbd_md_clear_flag(mdev, MDF_FULL_SYNC);
3328 			drbd_md_sync(mdev);
3329 		}
3330 
3331 		put_ldev(mdev);
3332 	}
3333 
3334 	return rv;
3335 }
3336 
3337 /**
3338  * drbd_bmio_clear_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
3339  * @mdev:	DRBD device.
3340  *
3341  * Clears all bits in the bitmap and writes the whole bitmap to stable storage.
3342  */
3343 int drbd_bmio_clear_n_write(struct drbd_conf *mdev)
3344 {
3345 	int rv = -EIO;
3346 
3347 	drbd_resume_al(mdev);
3348 	if (get_ldev_if_state(mdev, D_ATTACHING)) {
3349 		drbd_bm_clear_all(mdev);
3350 		rv = drbd_bm_write(mdev);
3351 		put_ldev(mdev);
3352 	}
3353 
3354 	return rv;
3355 }
3356 
3357 static int w_bitmap_io(struct drbd_work *w, int unused)
3358 {
3359 	struct bm_io_work *work = container_of(w, struct bm_io_work, w);
3360 	struct drbd_conf *mdev = w->mdev;
3361 	int rv = -EIO;
3362 
3363 	D_ASSERT(atomic_read(&mdev->ap_bio_cnt) == 0);
3364 
3365 	if (get_ldev(mdev)) {
3366 		drbd_bm_lock(mdev, work->why, work->flags);
3367 		rv = work->io_fn(mdev);
3368 		drbd_bm_unlock(mdev);
3369 		put_ldev(mdev);
3370 	}
3371 
3372 	clear_bit_unlock(BITMAP_IO, &mdev->flags);
3373 	wake_up(&mdev->misc_wait);
3374 
3375 	if (work->done)
3376 		work->done(mdev, rv);
3377 
3378 	clear_bit(BITMAP_IO_QUEUED, &mdev->flags);
3379 	work->why = NULL;
3380 	work->flags = 0;
3381 
3382 	return 0;
3383 }
3384 
3385 void drbd_ldev_destroy(struct drbd_conf *mdev)
3386 {
3387 	lc_destroy(mdev->resync);
3388 	mdev->resync = NULL;
3389 	lc_destroy(mdev->act_log);
3390 	mdev->act_log = NULL;
3391 	__no_warn(local,
3392 		drbd_free_bc(mdev->ldev);
3393 		mdev->ldev = NULL;);
3394 
3395 	clear_bit(GO_DISKLESS, &mdev->flags);
3396 }
3397 
3398 static int w_go_diskless(struct drbd_work *w, int unused)
3399 {
3400 	struct drbd_conf *mdev = w->mdev;
3401 
3402 	D_ASSERT(mdev->state.disk == D_FAILED);
3403 	/* we cannot assert local_cnt == 0 here, as get_ldev_if_state will
3404 	 * inc/dec it frequently. Once we are D_DISKLESS, no one will touch
3405 	 * the protected members anymore, though, so once put_ldev reaches zero
3406 	 * again, it will be safe to free them. */
3407 
3408 	/* Try to write changed bitmap pages, read errors may have just
3409 	 * set some bits outside the area covered by the activity log.
3410 	 *
3411 	 * If we have an IO error during the bitmap writeout,
3412 	 * we will want a full sync next time, just in case.
3413 	 * (Do we want a specific meta data flag for this?)
3414 	 *
3415 	 * If that does not make it to stable storage either,
3416 	 * we cannot do anything about that anymore.
3417 	 *
3418 	 * We still need to check if both bitmap and ldev are present, we may
3419 	 * end up here after a failed attach, before ldev was even assigned.
3420 	 */
3421 	if (mdev->bitmap && mdev->ldev) {
3422 		/* An interrupted resync or similar is allowed to recounts bits
3423 		 * while we detach.
3424 		 * Any modifications would not be expected anymore, though.
3425 		 */
3426 		if (drbd_bitmap_io_from_worker(mdev, drbd_bm_write,
3427 					"detach", BM_LOCKED_TEST_ALLOWED)) {
3428 			if (test_bit(WAS_READ_ERROR, &mdev->flags)) {
3429 				drbd_md_set_flag(mdev, MDF_FULL_SYNC);
3430 				drbd_md_sync(mdev);
3431 			}
3432 		}
3433 	}
3434 
3435 	drbd_force_state(mdev, NS(disk, D_DISKLESS));
3436 	return 0;
3437 }
3438 
3439 /**
3440  * drbd_queue_bitmap_io() - Queues an IO operation on the whole bitmap
3441  * @mdev:	DRBD device.
3442  * @io_fn:	IO callback to be called when bitmap IO is possible
3443  * @done:	callback to be called after the bitmap IO was performed
3444  * @why:	Descriptive text of the reason for doing the IO
3445  *
3446  * While IO on the bitmap happens we freeze application IO thus we ensure
3447  * that drbd_set_out_of_sync() can not be called. This function MAY ONLY be
3448  * called from worker context. It MUST NOT be used while a previous such
3449  * work is still pending!
3450  */
3451 void drbd_queue_bitmap_io(struct drbd_conf *mdev,
3452 			  int (*io_fn)(struct drbd_conf *),
3453 			  void (*done)(struct drbd_conf *, int),
3454 			  char *why, enum bm_flag flags)
3455 {
3456 	D_ASSERT(current == mdev->tconn->worker.task);
3457 
3458 	D_ASSERT(!test_bit(BITMAP_IO_QUEUED, &mdev->flags));
3459 	D_ASSERT(!test_bit(BITMAP_IO, &mdev->flags));
3460 	D_ASSERT(list_empty(&mdev->bm_io_work.w.list));
3461 	if (mdev->bm_io_work.why)
3462 		dev_err(DEV, "FIXME going to queue '%s' but '%s' still pending?\n",
3463 			why, mdev->bm_io_work.why);
3464 
3465 	mdev->bm_io_work.io_fn = io_fn;
3466 	mdev->bm_io_work.done = done;
3467 	mdev->bm_io_work.why = why;
3468 	mdev->bm_io_work.flags = flags;
3469 
3470 	spin_lock_irq(&mdev->tconn->req_lock);
3471 	set_bit(BITMAP_IO, &mdev->flags);
3472 	if (atomic_read(&mdev->ap_bio_cnt) == 0) {
3473 		if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags))
3474 			drbd_queue_work(&mdev->tconn->sender_work, &mdev->bm_io_work.w);
3475 	}
3476 	spin_unlock_irq(&mdev->tconn->req_lock);
3477 }
3478 
3479 /**
3480  * drbd_bitmap_io() -  Does an IO operation on the whole bitmap
3481  * @mdev:	DRBD device.
3482  * @io_fn:	IO callback to be called when bitmap IO is possible
3483  * @why:	Descriptive text of the reason for doing the IO
3484  *
3485  * freezes application IO while that the actual IO operations runs. This
3486  * functions MAY NOT be called from worker context.
3487  */
3488 int drbd_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *),
3489 		char *why, enum bm_flag flags)
3490 {
3491 	int rv;
3492 
3493 	D_ASSERT(current != mdev->tconn->worker.task);
3494 
3495 	if ((flags & BM_LOCKED_SET_ALLOWED) == 0)
3496 		drbd_suspend_io(mdev);
3497 
3498 	drbd_bm_lock(mdev, why, flags);
3499 	rv = io_fn(mdev);
3500 	drbd_bm_unlock(mdev);
3501 
3502 	if ((flags & BM_LOCKED_SET_ALLOWED) == 0)
3503 		drbd_resume_io(mdev);
3504 
3505 	return rv;
3506 }
3507 
3508 void drbd_md_set_flag(struct drbd_conf *mdev, int flag) __must_hold(local)
3509 {
3510 	if ((mdev->ldev->md.flags & flag) != flag) {
3511 		drbd_md_mark_dirty(mdev);
3512 		mdev->ldev->md.flags |= flag;
3513 	}
3514 }
3515 
3516 void drbd_md_clear_flag(struct drbd_conf *mdev, int flag) __must_hold(local)
3517 {
3518 	if ((mdev->ldev->md.flags & flag) != 0) {
3519 		drbd_md_mark_dirty(mdev);
3520 		mdev->ldev->md.flags &= ~flag;
3521 	}
3522 }
3523 int drbd_md_test_flag(struct drbd_backing_dev *bdev, int flag)
3524 {
3525 	return (bdev->md.flags & flag) != 0;
3526 }
3527 
3528 static void md_sync_timer_fn(unsigned long data)
3529 {
3530 	struct drbd_conf *mdev = (struct drbd_conf *) data;
3531 
3532 	/* must not double-queue! */
3533 	if (list_empty(&mdev->md_sync_work.list))
3534 		drbd_queue_work_front(&mdev->tconn->sender_work, &mdev->md_sync_work);
3535 }
3536 
3537 static int w_md_sync(struct drbd_work *w, int unused)
3538 {
3539 	struct drbd_conf *mdev = w->mdev;
3540 
3541 	dev_warn(DEV, "md_sync_timer expired! Worker calls drbd_md_sync().\n");
3542 #ifdef DEBUG
3543 	dev_warn(DEV, "last md_mark_dirty: %s:%u\n",
3544 		mdev->last_md_mark_dirty.func, mdev->last_md_mark_dirty.line);
3545 #endif
3546 	drbd_md_sync(mdev);
3547 	return 0;
3548 }
3549 
3550 const char *cmdname(enum drbd_packet cmd)
3551 {
3552 	/* THINK may need to become several global tables
3553 	 * when we want to support more than
3554 	 * one PRO_VERSION */
3555 	static const char *cmdnames[] = {
3556 		[P_DATA]	        = "Data",
3557 		[P_DATA_REPLY]	        = "DataReply",
3558 		[P_RS_DATA_REPLY]	= "RSDataReply",
3559 		[P_BARRIER]	        = "Barrier",
3560 		[P_BITMAP]	        = "ReportBitMap",
3561 		[P_BECOME_SYNC_TARGET]  = "BecomeSyncTarget",
3562 		[P_BECOME_SYNC_SOURCE]  = "BecomeSyncSource",
3563 		[P_UNPLUG_REMOTE]	= "UnplugRemote",
3564 		[P_DATA_REQUEST]	= "DataRequest",
3565 		[P_RS_DATA_REQUEST]     = "RSDataRequest",
3566 		[P_SYNC_PARAM]	        = "SyncParam",
3567 		[P_SYNC_PARAM89]	= "SyncParam89",
3568 		[P_PROTOCOL]            = "ReportProtocol",
3569 		[P_UUIDS]	        = "ReportUUIDs",
3570 		[P_SIZES]	        = "ReportSizes",
3571 		[P_STATE]	        = "ReportState",
3572 		[P_SYNC_UUID]           = "ReportSyncUUID",
3573 		[P_AUTH_CHALLENGE]      = "AuthChallenge",
3574 		[P_AUTH_RESPONSE]	= "AuthResponse",
3575 		[P_PING]		= "Ping",
3576 		[P_PING_ACK]	        = "PingAck",
3577 		[P_RECV_ACK]	        = "RecvAck",
3578 		[P_WRITE_ACK]	        = "WriteAck",
3579 		[P_RS_WRITE_ACK]	= "RSWriteAck",
3580 		[P_SUPERSEDED]          = "Superseded",
3581 		[P_NEG_ACK]	        = "NegAck",
3582 		[P_NEG_DREPLY]	        = "NegDReply",
3583 		[P_NEG_RS_DREPLY]	= "NegRSDReply",
3584 		[P_BARRIER_ACK]	        = "BarrierAck",
3585 		[P_STATE_CHG_REQ]       = "StateChgRequest",
3586 		[P_STATE_CHG_REPLY]     = "StateChgReply",
3587 		[P_OV_REQUEST]          = "OVRequest",
3588 		[P_OV_REPLY]            = "OVReply",
3589 		[P_OV_RESULT]           = "OVResult",
3590 		[P_CSUM_RS_REQUEST]     = "CsumRSRequest",
3591 		[P_RS_IS_IN_SYNC]	= "CsumRSIsInSync",
3592 		[P_COMPRESSED_BITMAP]   = "CBitmap",
3593 		[P_DELAY_PROBE]         = "DelayProbe",
3594 		[P_OUT_OF_SYNC]		= "OutOfSync",
3595 		[P_RETRY_WRITE]		= "RetryWrite",
3596 		[P_RS_CANCEL]		= "RSCancel",
3597 		[P_CONN_ST_CHG_REQ]	= "conn_st_chg_req",
3598 		[P_CONN_ST_CHG_REPLY]	= "conn_st_chg_reply",
3599 		[P_RETRY_WRITE]		= "retry_write",
3600 		[P_PROTOCOL_UPDATE]	= "protocol_update",
3601 
3602 		/* enum drbd_packet, but not commands - obsoleted flags:
3603 		 *	P_MAY_IGNORE
3604 		 *	P_MAX_OPT_CMD
3605 		 */
3606 	};
3607 
3608 	/* too big for the array: 0xfffX */
3609 	if (cmd == P_INITIAL_META)
3610 		return "InitialMeta";
3611 	if (cmd == P_INITIAL_DATA)
3612 		return "InitialData";
3613 	if (cmd == P_CONNECTION_FEATURES)
3614 		return "ConnectionFeatures";
3615 	if (cmd >= ARRAY_SIZE(cmdnames))
3616 		return "Unknown";
3617 	return cmdnames[cmd];
3618 }
3619 
3620 /**
3621  * drbd_wait_misc  -  wait for a request to make progress
3622  * @mdev:	device associated with the request
3623  * @i:		the struct drbd_interval embedded in struct drbd_request or
3624  *		struct drbd_peer_request
3625  */
3626 int drbd_wait_misc(struct drbd_conf *mdev, struct drbd_interval *i)
3627 {
3628 	struct net_conf *nc;
3629 	DEFINE_WAIT(wait);
3630 	long timeout;
3631 
3632 	rcu_read_lock();
3633 	nc = rcu_dereference(mdev->tconn->net_conf);
3634 	if (!nc) {
3635 		rcu_read_unlock();
3636 		return -ETIMEDOUT;
3637 	}
3638 	timeout = nc->ko_count ? nc->timeout * HZ / 10 * nc->ko_count : MAX_SCHEDULE_TIMEOUT;
3639 	rcu_read_unlock();
3640 
3641 	/* Indicate to wake up mdev->misc_wait on progress.  */
3642 	i->waiting = true;
3643 	prepare_to_wait(&mdev->misc_wait, &wait, TASK_INTERRUPTIBLE);
3644 	spin_unlock_irq(&mdev->tconn->req_lock);
3645 	timeout = schedule_timeout(timeout);
3646 	finish_wait(&mdev->misc_wait, &wait);
3647 	spin_lock_irq(&mdev->tconn->req_lock);
3648 	if (!timeout || mdev->state.conn < C_CONNECTED)
3649 		return -ETIMEDOUT;
3650 	if (signal_pending(current))
3651 		return -ERESTARTSYS;
3652 	return 0;
3653 }
3654 
3655 #ifdef CONFIG_DRBD_FAULT_INJECTION
3656 /* Fault insertion support including random number generator shamelessly
3657  * stolen from kernel/rcutorture.c */
3658 struct fault_random_state {
3659 	unsigned long state;
3660 	unsigned long count;
3661 };
3662 
3663 #define FAULT_RANDOM_MULT 39916801  /* prime */
3664 #define FAULT_RANDOM_ADD	479001701 /* prime */
3665 #define FAULT_RANDOM_REFRESH 10000
3666 
3667 /*
3668  * Crude but fast random-number generator.  Uses a linear congruential
3669  * generator, with occasional help from get_random_bytes().
3670  */
3671 static unsigned long
3672 _drbd_fault_random(struct fault_random_state *rsp)
3673 {
3674 	long refresh;
3675 
3676 	if (!rsp->count--) {
3677 		get_random_bytes(&refresh, sizeof(refresh));
3678 		rsp->state += refresh;
3679 		rsp->count = FAULT_RANDOM_REFRESH;
3680 	}
3681 	rsp->state = rsp->state * FAULT_RANDOM_MULT + FAULT_RANDOM_ADD;
3682 	return swahw32(rsp->state);
3683 }
3684 
3685 static char *
3686 _drbd_fault_str(unsigned int type) {
3687 	static char *_faults[] = {
3688 		[DRBD_FAULT_MD_WR] = "Meta-data write",
3689 		[DRBD_FAULT_MD_RD] = "Meta-data read",
3690 		[DRBD_FAULT_RS_WR] = "Resync write",
3691 		[DRBD_FAULT_RS_RD] = "Resync read",
3692 		[DRBD_FAULT_DT_WR] = "Data write",
3693 		[DRBD_FAULT_DT_RD] = "Data read",
3694 		[DRBD_FAULT_DT_RA] = "Data read ahead",
3695 		[DRBD_FAULT_BM_ALLOC] = "BM allocation",
3696 		[DRBD_FAULT_AL_EE] = "EE allocation",
3697 		[DRBD_FAULT_RECEIVE] = "receive data corruption",
3698 	};
3699 
3700 	return (type < DRBD_FAULT_MAX) ? _faults[type] : "**Unknown**";
3701 }
3702 
3703 unsigned int
3704 _drbd_insert_fault(struct drbd_conf *mdev, unsigned int type)
3705 {
3706 	static struct fault_random_state rrs = {0, 0};
3707 
3708 	unsigned int ret = (
3709 		(fault_devs == 0 ||
3710 			((1 << mdev_to_minor(mdev)) & fault_devs) != 0) &&
3711 		(((_drbd_fault_random(&rrs) % 100) + 1) <= fault_rate));
3712 
3713 	if (ret) {
3714 		fault_count++;
3715 
3716 		if (__ratelimit(&drbd_ratelimit_state))
3717 			dev_warn(DEV, "***Simulating %s failure\n",
3718 				_drbd_fault_str(type));
3719 	}
3720 
3721 	return ret;
3722 }
3723 #endif
3724 
3725 const char *drbd_buildtag(void)
3726 {
3727 	/* DRBD built from external sources has here a reference to the
3728 	   git hash of the source code. */
3729 
3730 	static char buildtag[38] = "\0uilt-in";
3731 
3732 	if (buildtag[0] == 0) {
3733 #ifdef MODULE
3734 		sprintf(buildtag, "srcversion: %-24s", THIS_MODULE->srcversion);
3735 #else
3736 		buildtag[0] = 'b';
3737 #endif
3738 	}
3739 
3740 	return buildtag;
3741 }
3742 
3743 module_init(drbd_init)
3744 module_exit(drbd_cleanup)
3745 
3746 EXPORT_SYMBOL(drbd_conn_str);
3747 EXPORT_SYMBOL(drbd_role_str);
3748 EXPORT_SYMBOL(drbd_disk_str);
3749 EXPORT_SYMBOL(drbd_set_st_err_str);
3750