xref: /openbmc/linux/drivers/block/drbd/drbd_main.c (revision 4f6cce39)
1 /*
2    drbd.c
3 
4    This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5 
6    Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7    Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8    Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9 
10    Thanks to Carter Burden, Bart Grantham and Gennadiy Nerubayev
11    from Logicworks, Inc. for making SDP replication support possible.
12 
13    drbd is free software; you can redistribute it and/or modify
14    it under the terms of the GNU General Public License as published by
15    the Free Software Foundation; either version 2, or (at your option)
16    any later version.
17 
18    drbd is distributed in the hope that it will be useful,
19    but WITHOUT ANY WARRANTY; without even the implied warranty of
20    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21    GNU General Public License for more details.
22 
23    You should have received a copy of the GNU General Public License
24    along with drbd; see the file COPYING.  If not, write to
25    the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26 
27  */
28 
29 #define pr_fmt(fmt)	KBUILD_MODNAME ": " fmt
30 
31 #include <linux/module.h>
32 #include <linux/jiffies.h>
33 #include <linux/drbd.h>
34 #include <linux/uaccess.h>
35 #include <asm/types.h>
36 #include <net/sock.h>
37 #include <linux/ctype.h>
38 #include <linux/mutex.h>
39 #include <linux/fs.h>
40 #include <linux/file.h>
41 #include <linux/proc_fs.h>
42 #include <linux/init.h>
43 #include <linux/mm.h>
44 #include <linux/memcontrol.h>
45 #include <linux/mm_inline.h>
46 #include <linux/slab.h>
47 #include <linux/random.h>
48 #include <linux/reboot.h>
49 #include <linux/notifier.h>
50 #include <linux/kthread.h>
51 #include <linux/workqueue.h>
52 #define __KERNEL_SYSCALLS__
53 #include <linux/unistd.h>
54 #include <linux/vmalloc.h>
55 #include <linux/sched/signal.h>
56 
57 #include <linux/drbd_limits.h>
58 #include "drbd_int.h"
59 #include "drbd_protocol.h"
60 #include "drbd_req.h" /* only for _req_mod in tl_release and tl_clear */
61 #include "drbd_vli.h"
62 #include "drbd_debugfs.h"
63 
64 static DEFINE_MUTEX(drbd_main_mutex);
65 static int drbd_open(struct block_device *bdev, fmode_t mode);
66 static void drbd_release(struct gendisk *gd, fmode_t mode);
67 static void md_sync_timer_fn(unsigned long data);
68 static int w_bitmap_io(struct drbd_work *w, int unused);
69 
70 MODULE_AUTHOR("Philipp Reisner <phil@linbit.com>, "
71 	      "Lars Ellenberg <lars@linbit.com>");
72 MODULE_DESCRIPTION("drbd - Distributed Replicated Block Device v" REL_VERSION);
73 MODULE_VERSION(REL_VERSION);
74 MODULE_LICENSE("GPL");
75 MODULE_PARM_DESC(minor_count, "Approximate number of drbd devices ("
76 		 __stringify(DRBD_MINOR_COUNT_MIN) "-" __stringify(DRBD_MINOR_COUNT_MAX) ")");
77 MODULE_ALIAS_BLOCKDEV_MAJOR(DRBD_MAJOR);
78 
79 #include <linux/moduleparam.h>
80 /* allow_open_on_secondary */
81 MODULE_PARM_DESC(allow_oos, "DONT USE!");
82 /* thanks to these macros, if compiled into the kernel (not-module),
83  * this becomes the boot parameter drbd.minor_count */
84 module_param(minor_count, uint, 0444);
85 module_param(disable_sendpage, bool, 0644);
86 module_param(allow_oos, bool, 0);
87 module_param(proc_details, int, 0644);
88 
89 #ifdef CONFIG_DRBD_FAULT_INJECTION
90 int enable_faults;
91 int fault_rate;
92 static int fault_count;
93 int fault_devs;
94 /* bitmap of enabled faults */
95 module_param(enable_faults, int, 0664);
96 /* fault rate % value - applies to all enabled faults */
97 module_param(fault_rate, int, 0664);
98 /* count of faults inserted */
99 module_param(fault_count, int, 0664);
100 /* bitmap of devices to insert faults on */
101 module_param(fault_devs, int, 0644);
102 #endif
103 
104 /* module parameter, defined */
105 unsigned int minor_count = DRBD_MINOR_COUNT_DEF;
106 bool disable_sendpage;
107 bool allow_oos;
108 int proc_details;       /* Detail level in proc drbd*/
109 
110 /* Module parameter for setting the user mode helper program
111  * to run. Default is /sbin/drbdadm */
112 char usermode_helper[80] = "/sbin/drbdadm";
113 
114 module_param_string(usermode_helper, usermode_helper, sizeof(usermode_helper), 0644);
115 
116 /* in 2.6.x, our device mapping and config info contains our virtual gendisks
117  * as member "struct gendisk *vdisk;"
118  */
119 struct idr drbd_devices;
120 struct list_head drbd_resources;
121 struct mutex resources_mutex;
122 
123 struct kmem_cache *drbd_request_cache;
124 struct kmem_cache *drbd_ee_cache;	/* peer requests */
125 struct kmem_cache *drbd_bm_ext_cache;	/* bitmap extents */
126 struct kmem_cache *drbd_al_ext_cache;	/* activity log extents */
127 mempool_t *drbd_request_mempool;
128 mempool_t *drbd_ee_mempool;
129 mempool_t *drbd_md_io_page_pool;
130 struct bio_set *drbd_md_io_bio_set;
131 
132 /* I do not use a standard mempool, because:
133    1) I want to hand out the pre-allocated objects first.
134    2) I want to be able to interrupt sleeping allocation with a signal.
135    Note: This is a single linked list, the next pointer is the private
136 	 member of struct page.
137  */
138 struct page *drbd_pp_pool;
139 spinlock_t   drbd_pp_lock;
140 int          drbd_pp_vacant;
141 wait_queue_head_t drbd_pp_wait;
142 
143 DEFINE_RATELIMIT_STATE(drbd_ratelimit_state, 5 * HZ, 5);
144 
145 static const struct block_device_operations drbd_ops = {
146 	.owner =   THIS_MODULE,
147 	.open =    drbd_open,
148 	.release = drbd_release,
149 };
150 
151 struct bio *bio_alloc_drbd(gfp_t gfp_mask)
152 {
153 	struct bio *bio;
154 
155 	if (!drbd_md_io_bio_set)
156 		return bio_alloc(gfp_mask, 1);
157 
158 	bio = bio_alloc_bioset(gfp_mask, 1, drbd_md_io_bio_set);
159 	if (!bio)
160 		return NULL;
161 	return bio;
162 }
163 
164 #ifdef __CHECKER__
165 /* When checking with sparse, and this is an inline function, sparse will
166    give tons of false positives. When this is a real functions sparse works.
167  */
168 int _get_ldev_if_state(struct drbd_device *device, enum drbd_disk_state mins)
169 {
170 	int io_allowed;
171 
172 	atomic_inc(&device->local_cnt);
173 	io_allowed = (device->state.disk >= mins);
174 	if (!io_allowed) {
175 		if (atomic_dec_and_test(&device->local_cnt))
176 			wake_up(&device->misc_wait);
177 	}
178 	return io_allowed;
179 }
180 
181 #endif
182 
183 /**
184  * tl_release() - mark as BARRIER_ACKED all requests in the corresponding transfer log epoch
185  * @connection:	DRBD connection.
186  * @barrier_nr:	Expected identifier of the DRBD write barrier packet.
187  * @set_size:	Expected number of requests before that barrier.
188  *
189  * In case the passed barrier_nr or set_size does not match the oldest
190  * epoch of not yet barrier-acked requests, this function will cause a
191  * termination of the connection.
192  */
193 void tl_release(struct drbd_connection *connection, unsigned int barrier_nr,
194 		unsigned int set_size)
195 {
196 	struct drbd_request *r;
197 	struct drbd_request *req = NULL;
198 	int expect_epoch = 0;
199 	int expect_size = 0;
200 
201 	spin_lock_irq(&connection->resource->req_lock);
202 
203 	/* find oldest not yet barrier-acked write request,
204 	 * count writes in its epoch. */
205 	list_for_each_entry(r, &connection->transfer_log, tl_requests) {
206 		const unsigned s = r->rq_state;
207 		if (!req) {
208 			if (!(s & RQ_WRITE))
209 				continue;
210 			if (!(s & RQ_NET_MASK))
211 				continue;
212 			if (s & RQ_NET_DONE)
213 				continue;
214 			req = r;
215 			expect_epoch = req->epoch;
216 			expect_size ++;
217 		} else {
218 			if (r->epoch != expect_epoch)
219 				break;
220 			if (!(s & RQ_WRITE))
221 				continue;
222 			/* if (s & RQ_DONE): not expected */
223 			/* if (!(s & RQ_NET_MASK)): not expected */
224 			expect_size++;
225 		}
226 	}
227 
228 	/* first some paranoia code */
229 	if (req == NULL) {
230 		drbd_err(connection, "BAD! BarrierAck #%u received, but no epoch in tl!?\n",
231 			 barrier_nr);
232 		goto bail;
233 	}
234 	if (expect_epoch != barrier_nr) {
235 		drbd_err(connection, "BAD! BarrierAck #%u received, expected #%u!\n",
236 			 barrier_nr, expect_epoch);
237 		goto bail;
238 	}
239 
240 	if (expect_size != set_size) {
241 		drbd_err(connection, "BAD! BarrierAck #%u received with n_writes=%u, expected n_writes=%u!\n",
242 			 barrier_nr, set_size, expect_size);
243 		goto bail;
244 	}
245 
246 	/* Clean up list of requests processed during current epoch. */
247 	/* this extra list walk restart is paranoia,
248 	 * to catch requests being barrier-acked "unexpectedly".
249 	 * It usually should find the same req again, or some READ preceding it. */
250 	list_for_each_entry(req, &connection->transfer_log, tl_requests)
251 		if (req->epoch == expect_epoch)
252 			break;
253 	list_for_each_entry_safe_from(req, r, &connection->transfer_log, tl_requests) {
254 		if (req->epoch != expect_epoch)
255 			break;
256 		_req_mod(req, BARRIER_ACKED);
257 	}
258 	spin_unlock_irq(&connection->resource->req_lock);
259 
260 	return;
261 
262 bail:
263 	spin_unlock_irq(&connection->resource->req_lock);
264 	conn_request_state(connection, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
265 }
266 
267 
268 /**
269  * _tl_restart() - Walks the transfer log, and applies an action to all requests
270  * @connection:	DRBD connection to operate on.
271  * @what:       The action/event to perform with all request objects
272  *
273  * @what might be one of CONNECTION_LOST_WHILE_PENDING, RESEND, FAIL_FROZEN_DISK_IO,
274  * RESTART_FROZEN_DISK_IO.
275  */
276 /* must hold resource->req_lock */
277 void _tl_restart(struct drbd_connection *connection, enum drbd_req_event what)
278 {
279 	struct drbd_request *req, *r;
280 
281 	list_for_each_entry_safe(req, r, &connection->transfer_log, tl_requests)
282 		_req_mod(req, what);
283 }
284 
285 void tl_restart(struct drbd_connection *connection, enum drbd_req_event what)
286 {
287 	spin_lock_irq(&connection->resource->req_lock);
288 	_tl_restart(connection, what);
289 	spin_unlock_irq(&connection->resource->req_lock);
290 }
291 
292 /**
293  * tl_clear() - Clears all requests and &struct drbd_tl_epoch objects out of the TL
294  * @device:	DRBD device.
295  *
296  * This is called after the connection to the peer was lost. The storage covered
297  * by the requests on the transfer gets marked as our of sync. Called from the
298  * receiver thread and the worker thread.
299  */
300 void tl_clear(struct drbd_connection *connection)
301 {
302 	tl_restart(connection, CONNECTION_LOST_WHILE_PENDING);
303 }
304 
305 /**
306  * tl_abort_disk_io() - Abort disk I/O for all requests for a certain device in the TL
307  * @device:	DRBD device.
308  */
309 void tl_abort_disk_io(struct drbd_device *device)
310 {
311 	struct drbd_connection *connection = first_peer_device(device)->connection;
312 	struct drbd_request *req, *r;
313 
314 	spin_lock_irq(&connection->resource->req_lock);
315 	list_for_each_entry_safe(req, r, &connection->transfer_log, tl_requests) {
316 		if (!(req->rq_state & RQ_LOCAL_PENDING))
317 			continue;
318 		if (req->device != device)
319 			continue;
320 		_req_mod(req, ABORT_DISK_IO);
321 	}
322 	spin_unlock_irq(&connection->resource->req_lock);
323 }
324 
325 static int drbd_thread_setup(void *arg)
326 {
327 	struct drbd_thread *thi = (struct drbd_thread *) arg;
328 	struct drbd_resource *resource = thi->resource;
329 	unsigned long flags;
330 	int retval;
331 
332 	snprintf(current->comm, sizeof(current->comm), "drbd_%c_%s",
333 		 thi->name[0],
334 		 resource->name);
335 
336 restart:
337 	retval = thi->function(thi);
338 
339 	spin_lock_irqsave(&thi->t_lock, flags);
340 
341 	/* if the receiver has been "EXITING", the last thing it did
342 	 * was set the conn state to "StandAlone",
343 	 * if now a re-connect request comes in, conn state goes C_UNCONNECTED,
344 	 * and receiver thread will be "started".
345 	 * drbd_thread_start needs to set "RESTARTING" in that case.
346 	 * t_state check and assignment needs to be within the same spinlock,
347 	 * so either thread_start sees EXITING, and can remap to RESTARTING,
348 	 * or thread_start see NONE, and can proceed as normal.
349 	 */
350 
351 	if (thi->t_state == RESTARTING) {
352 		drbd_info(resource, "Restarting %s thread\n", thi->name);
353 		thi->t_state = RUNNING;
354 		spin_unlock_irqrestore(&thi->t_lock, flags);
355 		goto restart;
356 	}
357 
358 	thi->task = NULL;
359 	thi->t_state = NONE;
360 	smp_mb();
361 	complete_all(&thi->stop);
362 	spin_unlock_irqrestore(&thi->t_lock, flags);
363 
364 	drbd_info(resource, "Terminating %s\n", current->comm);
365 
366 	/* Release mod reference taken when thread was started */
367 
368 	if (thi->connection)
369 		kref_put(&thi->connection->kref, drbd_destroy_connection);
370 	kref_put(&resource->kref, drbd_destroy_resource);
371 	module_put(THIS_MODULE);
372 	return retval;
373 }
374 
375 static void drbd_thread_init(struct drbd_resource *resource, struct drbd_thread *thi,
376 			     int (*func) (struct drbd_thread *), const char *name)
377 {
378 	spin_lock_init(&thi->t_lock);
379 	thi->task    = NULL;
380 	thi->t_state = NONE;
381 	thi->function = func;
382 	thi->resource = resource;
383 	thi->connection = NULL;
384 	thi->name = name;
385 }
386 
387 int drbd_thread_start(struct drbd_thread *thi)
388 {
389 	struct drbd_resource *resource = thi->resource;
390 	struct task_struct *nt;
391 	unsigned long flags;
392 
393 	/* is used from state engine doing drbd_thread_stop_nowait,
394 	 * while holding the req lock irqsave */
395 	spin_lock_irqsave(&thi->t_lock, flags);
396 
397 	switch (thi->t_state) {
398 	case NONE:
399 		drbd_info(resource, "Starting %s thread (from %s [%d])\n",
400 			 thi->name, current->comm, current->pid);
401 
402 		/* Get ref on module for thread - this is released when thread exits */
403 		if (!try_module_get(THIS_MODULE)) {
404 			drbd_err(resource, "Failed to get module reference in drbd_thread_start\n");
405 			spin_unlock_irqrestore(&thi->t_lock, flags);
406 			return false;
407 		}
408 
409 		kref_get(&resource->kref);
410 		if (thi->connection)
411 			kref_get(&thi->connection->kref);
412 
413 		init_completion(&thi->stop);
414 		thi->reset_cpu_mask = 1;
415 		thi->t_state = RUNNING;
416 		spin_unlock_irqrestore(&thi->t_lock, flags);
417 		flush_signals(current); /* otherw. may get -ERESTARTNOINTR */
418 
419 		nt = kthread_create(drbd_thread_setup, (void *) thi,
420 				    "drbd_%c_%s", thi->name[0], thi->resource->name);
421 
422 		if (IS_ERR(nt)) {
423 			drbd_err(resource, "Couldn't start thread\n");
424 
425 			if (thi->connection)
426 				kref_put(&thi->connection->kref, drbd_destroy_connection);
427 			kref_put(&resource->kref, drbd_destroy_resource);
428 			module_put(THIS_MODULE);
429 			return false;
430 		}
431 		spin_lock_irqsave(&thi->t_lock, flags);
432 		thi->task = nt;
433 		thi->t_state = RUNNING;
434 		spin_unlock_irqrestore(&thi->t_lock, flags);
435 		wake_up_process(nt);
436 		break;
437 	case EXITING:
438 		thi->t_state = RESTARTING;
439 		drbd_info(resource, "Restarting %s thread (from %s [%d])\n",
440 				thi->name, current->comm, current->pid);
441 		/* fall through */
442 	case RUNNING:
443 	case RESTARTING:
444 	default:
445 		spin_unlock_irqrestore(&thi->t_lock, flags);
446 		break;
447 	}
448 
449 	return true;
450 }
451 
452 
453 void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait)
454 {
455 	unsigned long flags;
456 
457 	enum drbd_thread_state ns = restart ? RESTARTING : EXITING;
458 
459 	/* may be called from state engine, holding the req lock irqsave */
460 	spin_lock_irqsave(&thi->t_lock, flags);
461 
462 	if (thi->t_state == NONE) {
463 		spin_unlock_irqrestore(&thi->t_lock, flags);
464 		if (restart)
465 			drbd_thread_start(thi);
466 		return;
467 	}
468 
469 	if (thi->t_state != ns) {
470 		if (thi->task == NULL) {
471 			spin_unlock_irqrestore(&thi->t_lock, flags);
472 			return;
473 		}
474 
475 		thi->t_state = ns;
476 		smp_mb();
477 		init_completion(&thi->stop);
478 		if (thi->task != current)
479 			force_sig(DRBD_SIGKILL, thi->task);
480 	}
481 
482 	spin_unlock_irqrestore(&thi->t_lock, flags);
483 
484 	if (wait)
485 		wait_for_completion(&thi->stop);
486 }
487 
488 int conn_lowest_minor(struct drbd_connection *connection)
489 {
490 	struct drbd_peer_device *peer_device;
491 	int vnr = 0, minor = -1;
492 
493 	rcu_read_lock();
494 	peer_device = idr_get_next(&connection->peer_devices, &vnr);
495 	if (peer_device)
496 		minor = device_to_minor(peer_device->device);
497 	rcu_read_unlock();
498 
499 	return minor;
500 }
501 
502 #ifdef CONFIG_SMP
503 /**
504  * drbd_calc_cpu_mask() - Generate CPU masks, spread over all CPUs
505  *
506  * Forces all threads of a resource onto the same CPU. This is beneficial for
507  * DRBD's performance. May be overwritten by user's configuration.
508  */
509 static void drbd_calc_cpu_mask(cpumask_var_t *cpu_mask)
510 {
511 	unsigned int *resources_per_cpu, min_index = ~0;
512 
513 	resources_per_cpu = kzalloc(nr_cpu_ids * sizeof(*resources_per_cpu), GFP_KERNEL);
514 	if (resources_per_cpu) {
515 		struct drbd_resource *resource;
516 		unsigned int cpu, min = ~0;
517 
518 		rcu_read_lock();
519 		for_each_resource_rcu(resource, &drbd_resources) {
520 			for_each_cpu(cpu, resource->cpu_mask)
521 				resources_per_cpu[cpu]++;
522 		}
523 		rcu_read_unlock();
524 		for_each_online_cpu(cpu) {
525 			if (resources_per_cpu[cpu] < min) {
526 				min = resources_per_cpu[cpu];
527 				min_index = cpu;
528 			}
529 		}
530 		kfree(resources_per_cpu);
531 	}
532 	if (min_index == ~0) {
533 		cpumask_setall(*cpu_mask);
534 		return;
535 	}
536 	cpumask_set_cpu(min_index, *cpu_mask);
537 }
538 
539 /**
540  * drbd_thread_current_set_cpu() - modifies the cpu mask of the _current_ thread
541  * @device:	DRBD device.
542  * @thi:	drbd_thread object
543  *
544  * call in the "main loop" of _all_ threads, no need for any mutex, current won't die
545  * prematurely.
546  */
547 void drbd_thread_current_set_cpu(struct drbd_thread *thi)
548 {
549 	struct drbd_resource *resource = thi->resource;
550 	struct task_struct *p = current;
551 
552 	if (!thi->reset_cpu_mask)
553 		return;
554 	thi->reset_cpu_mask = 0;
555 	set_cpus_allowed_ptr(p, resource->cpu_mask);
556 }
557 #else
558 #define drbd_calc_cpu_mask(A) ({})
559 #endif
560 
561 /**
562  * drbd_header_size  -  size of a packet header
563  *
564  * The header size is a multiple of 8, so any payload following the header is
565  * word aligned on 64-bit architectures.  (The bitmap send and receive code
566  * relies on this.)
567  */
568 unsigned int drbd_header_size(struct drbd_connection *connection)
569 {
570 	if (connection->agreed_pro_version >= 100) {
571 		BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct p_header100), 8));
572 		return sizeof(struct p_header100);
573 	} else {
574 		BUILD_BUG_ON(sizeof(struct p_header80) !=
575 			     sizeof(struct p_header95));
576 		BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct p_header80), 8));
577 		return sizeof(struct p_header80);
578 	}
579 }
580 
581 static unsigned int prepare_header80(struct p_header80 *h, enum drbd_packet cmd, int size)
582 {
583 	h->magic   = cpu_to_be32(DRBD_MAGIC);
584 	h->command = cpu_to_be16(cmd);
585 	h->length  = cpu_to_be16(size);
586 	return sizeof(struct p_header80);
587 }
588 
589 static unsigned int prepare_header95(struct p_header95 *h, enum drbd_packet cmd, int size)
590 {
591 	h->magic   = cpu_to_be16(DRBD_MAGIC_BIG);
592 	h->command = cpu_to_be16(cmd);
593 	h->length = cpu_to_be32(size);
594 	return sizeof(struct p_header95);
595 }
596 
597 static unsigned int prepare_header100(struct p_header100 *h, enum drbd_packet cmd,
598 				      int size, int vnr)
599 {
600 	h->magic = cpu_to_be32(DRBD_MAGIC_100);
601 	h->volume = cpu_to_be16(vnr);
602 	h->command = cpu_to_be16(cmd);
603 	h->length = cpu_to_be32(size);
604 	h->pad = 0;
605 	return sizeof(struct p_header100);
606 }
607 
608 static unsigned int prepare_header(struct drbd_connection *connection, int vnr,
609 				   void *buffer, enum drbd_packet cmd, int size)
610 {
611 	if (connection->agreed_pro_version >= 100)
612 		return prepare_header100(buffer, cmd, size, vnr);
613 	else if (connection->agreed_pro_version >= 95 &&
614 		 size > DRBD_MAX_SIZE_H80_PACKET)
615 		return prepare_header95(buffer, cmd, size);
616 	else
617 		return prepare_header80(buffer, cmd, size);
618 }
619 
620 static void *__conn_prepare_command(struct drbd_connection *connection,
621 				    struct drbd_socket *sock)
622 {
623 	if (!sock->socket)
624 		return NULL;
625 	return sock->sbuf + drbd_header_size(connection);
626 }
627 
628 void *conn_prepare_command(struct drbd_connection *connection, struct drbd_socket *sock)
629 {
630 	void *p;
631 
632 	mutex_lock(&sock->mutex);
633 	p = __conn_prepare_command(connection, sock);
634 	if (!p)
635 		mutex_unlock(&sock->mutex);
636 
637 	return p;
638 }
639 
640 void *drbd_prepare_command(struct drbd_peer_device *peer_device, struct drbd_socket *sock)
641 {
642 	return conn_prepare_command(peer_device->connection, sock);
643 }
644 
645 static int __send_command(struct drbd_connection *connection, int vnr,
646 			  struct drbd_socket *sock, enum drbd_packet cmd,
647 			  unsigned int header_size, void *data,
648 			  unsigned int size)
649 {
650 	int msg_flags;
651 	int err;
652 
653 	/*
654 	 * Called with @data == NULL and the size of the data blocks in @size
655 	 * for commands that send data blocks.  For those commands, omit the
656 	 * MSG_MORE flag: this will increase the likelihood that data blocks
657 	 * which are page aligned on the sender will end up page aligned on the
658 	 * receiver.
659 	 */
660 	msg_flags = data ? MSG_MORE : 0;
661 
662 	header_size += prepare_header(connection, vnr, sock->sbuf, cmd,
663 				      header_size + size);
664 	err = drbd_send_all(connection, sock->socket, sock->sbuf, header_size,
665 			    msg_flags);
666 	if (data && !err)
667 		err = drbd_send_all(connection, sock->socket, data, size, 0);
668 	/* DRBD protocol "pings" are latency critical.
669 	 * This is supposed to trigger tcp_push_pending_frames() */
670 	if (!err && (cmd == P_PING || cmd == P_PING_ACK))
671 		drbd_tcp_nodelay(sock->socket);
672 
673 	return err;
674 }
675 
676 static int __conn_send_command(struct drbd_connection *connection, struct drbd_socket *sock,
677 			       enum drbd_packet cmd, unsigned int header_size,
678 			       void *data, unsigned int size)
679 {
680 	return __send_command(connection, 0, sock, cmd, header_size, data, size);
681 }
682 
683 int conn_send_command(struct drbd_connection *connection, struct drbd_socket *sock,
684 		      enum drbd_packet cmd, unsigned int header_size,
685 		      void *data, unsigned int size)
686 {
687 	int err;
688 
689 	err = __conn_send_command(connection, sock, cmd, header_size, data, size);
690 	mutex_unlock(&sock->mutex);
691 	return err;
692 }
693 
694 int drbd_send_command(struct drbd_peer_device *peer_device, struct drbd_socket *sock,
695 		      enum drbd_packet cmd, unsigned int header_size,
696 		      void *data, unsigned int size)
697 {
698 	int err;
699 
700 	err = __send_command(peer_device->connection, peer_device->device->vnr,
701 			     sock, cmd, header_size, data, size);
702 	mutex_unlock(&sock->mutex);
703 	return err;
704 }
705 
706 int drbd_send_ping(struct drbd_connection *connection)
707 {
708 	struct drbd_socket *sock;
709 
710 	sock = &connection->meta;
711 	if (!conn_prepare_command(connection, sock))
712 		return -EIO;
713 	return conn_send_command(connection, sock, P_PING, 0, NULL, 0);
714 }
715 
716 int drbd_send_ping_ack(struct drbd_connection *connection)
717 {
718 	struct drbd_socket *sock;
719 
720 	sock = &connection->meta;
721 	if (!conn_prepare_command(connection, sock))
722 		return -EIO;
723 	return conn_send_command(connection, sock, P_PING_ACK, 0, NULL, 0);
724 }
725 
726 int drbd_send_sync_param(struct drbd_peer_device *peer_device)
727 {
728 	struct drbd_socket *sock;
729 	struct p_rs_param_95 *p;
730 	int size;
731 	const int apv = peer_device->connection->agreed_pro_version;
732 	enum drbd_packet cmd;
733 	struct net_conf *nc;
734 	struct disk_conf *dc;
735 
736 	sock = &peer_device->connection->data;
737 	p = drbd_prepare_command(peer_device, sock);
738 	if (!p)
739 		return -EIO;
740 
741 	rcu_read_lock();
742 	nc = rcu_dereference(peer_device->connection->net_conf);
743 
744 	size = apv <= 87 ? sizeof(struct p_rs_param)
745 		: apv == 88 ? sizeof(struct p_rs_param)
746 			+ strlen(nc->verify_alg) + 1
747 		: apv <= 94 ? sizeof(struct p_rs_param_89)
748 		: /* apv >= 95 */ sizeof(struct p_rs_param_95);
749 
750 	cmd = apv >= 89 ? P_SYNC_PARAM89 : P_SYNC_PARAM;
751 
752 	/* initialize verify_alg and csums_alg */
753 	memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
754 
755 	if (get_ldev(peer_device->device)) {
756 		dc = rcu_dereference(peer_device->device->ldev->disk_conf);
757 		p->resync_rate = cpu_to_be32(dc->resync_rate);
758 		p->c_plan_ahead = cpu_to_be32(dc->c_plan_ahead);
759 		p->c_delay_target = cpu_to_be32(dc->c_delay_target);
760 		p->c_fill_target = cpu_to_be32(dc->c_fill_target);
761 		p->c_max_rate = cpu_to_be32(dc->c_max_rate);
762 		put_ldev(peer_device->device);
763 	} else {
764 		p->resync_rate = cpu_to_be32(DRBD_RESYNC_RATE_DEF);
765 		p->c_plan_ahead = cpu_to_be32(DRBD_C_PLAN_AHEAD_DEF);
766 		p->c_delay_target = cpu_to_be32(DRBD_C_DELAY_TARGET_DEF);
767 		p->c_fill_target = cpu_to_be32(DRBD_C_FILL_TARGET_DEF);
768 		p->c_max_rate = cpu_to_be32(DRBD_C_MAX_RATE_DEF);
769 	}
770 
771 	if (apv >= 88)
772 		strcpy(p->verify_alg, nc->verify_alg);
773 	if (apv >= 89)
774 		strcpy(p->csums_alg, nc->csums_alg);
775 	rcu_read_unlock();
776 
777 	return drbd_send_command(peer_device, sock, cmd, size, NULL, 0);
778 }
779 
780 int __drbd_send_protocol(struct drbd_connection *connection, enum drbd_packet cmd)
781 {
782 	struct drbd_socket *sock;
783 	struct p_protocol *p;
784 	struct net_conf *nc;
785 	int size, cf;
786 
787 	sock = &connection->data;
788 	p = __conn_prepare_command(connection, sock);
789 	if (!p)
790 		return -EIO;
791 
792 	rcu_read_lock();
793 	nc = rcu_dereference(connection->net_conf);
794 
795 	if (nc->tentative && connection->agreed_pro_version < 92) {
796 		rcu_read_unlock();
797 		mutex_unlock(&sock->mutex);
798 		drbd_err(connection, "--dry-run is not supported by peer");
799 		return -EOPNOTSUPP;
800 	}
801 
802 	size = sizeof(*p);
803 	if (connection->agreed_pro_version >= 87)
804 		size += strlen(nc->integrity_alg) + 1;
805 
806 	p->protocol      = cpu_to_be32(nc->wire_protocol);
807 	p->after_sb_0p   = cpu_to_be32(nc->after_sb_0p);
808 	p->after_sb_1p   = cpu_to_be32(nc->after_sb_1p);
809 	p->after_sb_2p   = cpu_to_be32(nc->after_sb_2p);
810 	p->two_primaries = cpu_to_be32(nc->two_primaries);
811 	cf = 0;
812 	if (nc->discard_my_data)
813 		cf |= CF_DISCARD_MY_DATA;
814 	if (nc->tentative)
815 		cf |= CF_DRY_RUN;
816 	p->conn_flags    = cpu_to_be32(cf);
817 
818 	if (connection->agreed_pro_version >= 87)
819 		strcpy(p->integrity_alg, nc->integrity_alg);
820 	rcu_read_unlock();
821 
822 	return __conn_send_command(connection, sock, cmd, size, NULL, 0);
823 }
824 
825 int drbd_send_protocol(struct drbd_connection *connection)
826 {
827 	int err;
828 
829 	mutex_lock(&connection->data.mutex);
830 	err = __drbd_send_protocol(connection, P_PROTOCOL);
831 	mutex_unlock(&connection->data.mutex);
832 
833 	return err;
834 }
835 
836 static int _drbd_send_uuids(struct drbd_peer_device *peer_device, u64 uuid_flags)
837 {
838 	struct drbd_device *device = peer_device->device;
839 	struct drbd_socket *sock;
840 	struct p_uuids *p;
841 	int i;
842 
843 	if (!get_ldev_if_state(device, D_NEGOTIATING))
844 		return 0;
845 
846 	sock = &peer_device->connection->data;
847 	p = drbd_prepare_command(peer_device, sock);
848 	if (!p) {
849 		put_ldev(device);
850 		return -EIO;
851 	}
852 	spin_lock_irq(&device->ldev->md.uuid_lock);
853 	for (i = UI_CURRENT; i < UI_SIZE; i++)
854 		p->uuid[i] = cpu_to_be64(device->ldev->md.uuid[i]);
855 	spin_unlock_irq(&device->ldev->md.uuid_lock);
856 
857 	device->comm_bm_set = drbd_bm_total_weight(device);
858 	p->uuid[UI_SIZE] = cpu_to_be64(device->comm_bm_set);
859 	rcu_read_lock();
860 	uuid_flags |= rcu_dereference(peer_device->connection->net_conf)->discard_my_data ? 1 : 0;
861 	rcu_read_unlock();
862 	uuid_flags |= test_bit(CRASHED_PRIMARY, &device->flags) ? 2 : 0;
863 	uuid_flags |= device->new_state_tmp.disk == D_INCONSISTENT ? 4 : 0;
864 	p->uuid[UI_FLAGS] = cpu_to_be64(uuid_flags);
865 
866 	put_ldev(device);
867 	return drbd_send_command(peer_device, sock, P_UUIDS, sizeof(*p), NULL, 0);
868 }
869 
870 int drbd_send_uuids(struct drbd_peer_device *peer_device)
871 {
872 	return _drbd_send_uuids(peer_device, 0);
873 }
874 
875 int drbd_send_uuids_skip_initial_sync(struct drbd_peer_device *peer_device)
876 {
877 	return _drbd_send_uuids(peer_device, 8);
878 }
879 
880 void drbd_print_uuids(struct drbd_device *device, const char *text)
881 {
882 	if (get_ldev_if_state(device, D_NEGOTIATING)) {
883 		u64 *uuid = device->ldev->md.uuid;
884 		drbd_info(device, "%s %016llX:%016llX:%016llX:%016llX\n",
885 		     text,
886 		     (unsigned long long)uuid[UI_CURRENT],
887 		     (unsigned long long)uuid[UI_BITMAP],
888 		     (unsigned long long)uuid[UI_HISTORY_START],
889 		     (unsigned long long)uuid[UI_HISTORY_END]);
890 		put_ldev(device);
891 	} else {
892 		drbd_info(device, "%s effective data uuid: %016llX\n",
893 				text,
894 				(unsigned long long)device->ed_uuid);
895 	}
896 }
897 
898 void drbd_gen_and_send_sync_uuid(struct drbd_peer_device *peer_device)
899 {
900 	struct drbd_device *device = peer_device->device;
901 	struct drbd_socket *sock;
902 	struct p_rs_uuid *p;
903 	u64 uuid;
904 
905 	D_ASSERT(device, device->state.disk == D_UP_TO_DATE);
906 
907 	uuid = device->ldev->md.uuid[UI_BITMAP];
908 	if (uuid && uuid != UUID_JUST_CREATED)
909 		uuid = uuid + UUID_NEW_BM_OFFSET;
910 	else
911 		get_random_bytes(&uuid, sizeof(u64));
912 	drbd_uuid_set(device, UI_BITMAP, uuid);
913 	drbd_print_uuids(device, "updated sync UUID");
914 	drbd_md_sync(device);
915 
916 	sock = &peer_device->connection->data;
917 	p = drbd_prepare_command(peer_device, sock);
918 	if (p) {
919 		p->uuid = cpu_to_be64(uuid);
920 		drbd_send_command(peer_device, sock, P_SYNC_UUID, sizeof(*p), NULL, 0);
921 	}
922 }
923 
924 /* communicated if (agreed_features & DRBD_FF_WSAME) */
925 void assign_p_sizes_qlim(struct drbd_device *device, struct p_sizes *p, struct request_queue *q)
926 {
927 	if (q) {
928 		p->qlim->physical_block_size = cpu_to_be32(queue_physical_block_size(q));
929 		p->qlim->logical_block_size = cpu_to_be32(queue_logical_block_size(q));
930 		p->qlim->alignment_offset = cpu_to_be32(queue_alignment_offset(q));
931 		p->qlim->io_min = cpu_to_be32(queue_io_min(q));
932 		p->qlim->io_opt = cpu_to_be32(queue_io_opt(q));
933 		p->qlim->discard_enabled = blk_queue_discard(q);
934 		p->qlim->discard_zeroes_data = queue_discard_zeroes_data(q);
935 		p->qlim->write_same_capable = !!q->limits.max_write_same_sectors;
936 	} else {
937 		q = device->rq_queue;
938 		p->qlim->physical_block_size = cpu_to_be32(queue_physical_block_size(q));
939 		p->qlim->logical_block_size = cpu_to_be32(queue_logical_block_size(q));
940 		p->qlim->alignment_offset = 0;
941 		p->qlim->io_min = cpu_to_be32(queue_io_min(q));
942 		p->qlim->io_opt = cpu_to_be32(queue_io_opt(q));
943 		p->qlim->discard_enabled = 0;
944 		p->qlim->discard_zeroes_data = 0;
945 		p->qlim->write_same_capable = 0;
946 	}
947 }
948 
949 int drbd_send_sizes(struct drbd_peer_device *peer_device, int trigger_reply, enum dds_flags flags)
950 {
951 	struct drbd_device *device = peer_device->device;
952 	struct drbd_socket *sock;
953 	struct p_sizes *p;
954 	sector_t d_size, u_size;
955 	int q_order_type;
956 	unsigned int max_bio_size;
957 	unsigned int packet_size;
958 
959 	sock = &peer_device->connection->data;
960 	p = drbd_prepare_command(peer_device, sock);
961 	if (!p)
962 		return -EIO;
963 
964 	packet_size = sizeof(*p);
965 	if (peer_device->connection->agreed_features & DRBD_FF_WSAME)
966 		packet_size += sizeof(p->qlim[0]);
967 
968 	memset(p, 0, packet_size);
969 	if (get_ldev_if_state(device, D_NEGOTIATING)) {
970 		struct request_queue *q = bdev_get_queue(device->ldev->backing_bdev);
971 		d_size = drbd_get_max_capacity(device->ldev);
972 		rcu_read_lock();
973 		u_size = rcu_dereference(device->ldev->disk_conf)->disk_size;
974 		rcu_read_unlock();
975 		q_order_type = drbd_queue_order_type(device);
976 		max_bio_size = queue_max_hw_sectors(q) << 9;
977 		max_bio_size = min(max_bio_size, DRBD_MAX_BIO_SIZE);
978 		assign_p_sizes_qlim(device, p, q);
979 		put_ldev(device);
980 	} else {
981 		d_size = 0;
982 		u_size = 0;
983 		q_order_type = QUEUE_ORDERED_NONE;
984 		max_bio_size = DRBD_MAX_BIO_SIZE; /* ... multiple BIOs per peer_request */
985 		assign_p_sizes_qlim(device, p, NULL);
986 	}
987 
988 	if (peer_device->connection->agreed_pro_version <= 94)
989 		max_bio_size = min(max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
990 	else if (peer_device->connection->agreed_pro_version < 100)
991 		max_bio_size = min(max_bio_size, DRBD_MAX_BIO_SIZE_P95);
992 
993 	p->d_size = cpu_to_be64(d_size);
994 	p->u_size = cpu_to_be64(u_size);
995 	p->c_size = cpu_to_be64(trigger_reply ? 0 : drbd_get_capacity(device->this_bdev));
996 	p->max_bio_size = cpu_to_be32(max_bio_size);
997 	p->queue_order_type = cpu_to_be16(q_order_type);
998 	p->dds_flags = cpu_to_be16(flags);
999 
1000 	return drbd_send_command(peer_device, sock, P_SIZES, packet_size, NULL, 0);
1001 }
1002 
1003 /**
1004  * drbd_send_current_state() - Sends the drbd state to the peer
1005  * @peer_device:	DRBD peer device.
1006  */
1007 int drbd_send_current_state(struct drbd_peer_device *peer_device)
1008 {
1009 	struct drbd_socket *sock;
1010 	struct p_state *p;
1011 
1012 	sock = &peer_device->connection->data;
1013 	p = drbd_prepare_command(peer_device, sock);
1014 	if (!p)
1015 		return -EIO;
1016 	p->state = cpu_to_be32(peer_device->device->state.i); /* Within the send mutex */
1017 	return drbd_send_command(peer_device, sock, P_STATE, sizeof(*p), NULL, 0);
1018 }
1019 
1020 /**
1021  * drbd_send_state() - After a state change, sends the new state to the peer
1022  * @peer_device:      DRBD peer device.
1023  * @state:     the state to send, not necessarily the current state.
1024  *
1025  * Each state change queues an "after_state_ch" work, which will eventually
1026  * send the resulting new state to the peer. If more state changes happen
1027  * between queuing and processing of the after_state_ch work, we still
1028  * want to send each intermediary state in the order it occurred.
1029  */
1030 int drbd_send_state(struct drbd_peer_device *peer_device, union drbd_state state)
1031 {
1032 	struct drbd_socket *sock;
1033 	struct p_state *p;
1034 
1035 	sock = &peer_device->connection->data;
1036 	p = drbd_prepare_command(peer_device, sock);
1037 	if (!p)
1038 		return -EIO;
1039 	p->state = cpu_to_be32(state.i); /* Within the send mutex */
1040 	return drbd_send_command(peer_device, sock, P_STATE, sizeof(*p), NULL, 0);
1041 }
1042 
1043 int drbd_send_state_req(struct drbd_peer_device *peer_device, union drbd_state mask, union drbd_state val)
1044 {
1045 	struct drbd_socket *sock;
1046 	struct p_req_state *p;
1047 
1048 	sock = &peer_device->connection->data;
1049 	p = drbd_prepare_command(peer_device, sock);
1050 	if (!p)
1051 		return -EIO;
1052 	p->mask = cpu_to_be32(mask.i);
1053 	p->val = cpu_to_be32(val.i);
1054 	return drbd_send_command(peer_device, sock, P_STATE_CHG_REQ, sizeof(*p), NULL, 0);
1055 }
1056 
1057 int conn_send_state_req(struct drbd_connection *connection, union drbd_state mask, union drbd_state val)
1058 {
1059 	enum drbd_packet cmd;
1060 	struct drbd_socket *sock;
1061 	struct p_req_state *p;
1062 
1063 	cmd = connection->agreed_pro_version < 100 ? P_STATE_CHG_REQ : P_CONN_ST_CHG_REQ;
1064 	sock = &connection->data;
1065 	p = conn_prepare_command(connection, sock);
1066 	if (!p)
1067 		return -EIO;
1068 	p->mask = cpu_to_be32(mask.i);
1069 	p->val = cpu_to_be32(val.i);
1070 	return conn_send_command(connection, sock, cmd, sizeof(*p), NULL, 0);
1071 }
1072 
1073 void drbd_send_sr_reply(struct drbd_peer_device *peer_device, enum drbd_state_rv retcode)
1074 {
1075 	struct drbd_socket *sock;
1076 	struct p_req_state_reply *p;
1077 
1078 	sock = &peer_device->connection->meta;
1079 	p = drbd_prepare_command(peer_device, sock);
1080 	if (p) {
1081 		p->retcode = cpu_to_be32(retcode);
1082 		drbd_send_command(peer_device, sock, P_STATE_CHG_REPLY, sizeof(*p), NULL, 0);
1083 	}
1084 }
1085 
1086 void conn_send_sr_reply(struct drbd_connection *connection, enum drbd_state_rv retcode)
1087 {
1088 	struct drbd_socket *sock;
1089 	struct p_req_state_reply *p;
1090 	enum drbd_packet cmd = connection->agreed_pro_version < 100 ? P_STATE_CHG_REPLY : P_CONN_ST_CHG_REPLY;
1091 
1092 	sock = &connection->meta;
1093 	p = conn_prepare_command(connection, sock);
1094 	if (p) {
1095 		p->retcode = cpu_to_be32(retcode);
1096 		conn_send_command(connection, sock, cmd, sizeof(*p), NULL, 0);
1097 	}
1098 }
1099 
1100 static void dcbp_set_code(struct p_compressed_bm *p, enum drbd_bitmap_code code)
1101 {
1102 	BUG_ON(code & ~0xf);
1103 	p->encoding = (p->encoding & ~0xf) | code;
1104 }
1105 
1106 static void dcbp_set_start(struct p_compressed_bm *p, int set)
1107 {
1108 	p->encoding = (p->encoding & ~0x80) | (set ? 0x80 : 0);
1109 }
1110 
1111 static void dcbp_set_pad_bits(struct p_compressed_bm *p, int n)
1112 {
1113 	BUG_ON(n & ~0x7);
1114 	p->encoding = (p->encoding & (~0x7 << 4)) | (n << 4);
1115 }
1116 
1117 static int fill_bitmap_rle_bits(struct drbd_device *device,
1118 			 struct p_compressed_bm *p,
1119 			 unsigned int size,
1120 			 struct bm_xfer_ctx *c)
1121 {
1122 	struct bitstream bs;
1123 	unsigned long plain_bits;
1124 	unsigned long tmp;
1125 	unsigned long rl;
1126 	unsigned len;
1127 	unsigned toggle;
1128 	int bits, use_rle;
1129 
1130 	/* may we use this feature? */
1131 	rcu_read_lock();
1132 	use_rle = rcu_dereference(first_peer_device(device)->connection->net_conf)->use_rle;
1133 	rcu_read_unlock();
1134 	if (!use_rle || first_peer_device(device)->connection->agreed_pro_version < 90)
1135 		return 0;
1136 
1137 	if (c->bit_offset >= c->bm_bits)
1138 		return 0; /* nothing to do. */
1139 
1140 	/* use at most thus many bytes */
1141 	bitstream_init(&bs, p->code, size, 0);
1142 	memset(p->code, 0, size);
1143 	/* plain bits covered in this code string */
1144 	plain_bits = 0;
1145 
1146 	/* p->encoding & 0x80 stores whether the first run length is set.
1147 	 * bit offset is implicit.
1148 	 * start with toggle == 2 to be able to tell the first iteration */
1149 	toggle = 2;
1150 
1151 	/* see how much plain bits we can stuff into one packet
1152 	 * using RLE and VLI. */
1153 	do {
1154 		tmp = (toggle == 0) ? _drbd_bm_find_next_zero(device, c->bit_offset)
1155 				    : _drbd_bm_find_next(device, c->bit_offset);
1156 		if (tmp == -1UL)
1157 			tmp = c->bm_bits;
1158 		rl = tmp - c->bit_offset;
1159 
1160 		if (toggle == 2) { /* first iteration */
1161 			if (rl == 0) {
1162 				/* the first checked bit was set,
1163 				 * store start value, */
1164 				dcbp_set_start(p, 1);
1165 				/* but skip encoding of zero run length */
1166 				toggle = !toggle;
1167 				continue;
1168 			}
1169 			dcbp_set_start(p, 0);
1170 		}
1171 
1172 		/* paranoia: catch zero runlength.
1173 		 * can only happen if bitmap is modified while we scan it. */
1174 		if (rl == 0) {
1175 			drbd_err(device, "unexpected zero runlength while encoding bitmap "
1176 			    "t:%u bo:%lu\n", toggle, c->bit_offset);
1177 			return -1;
1178 		}
1179 
1180 		bits = vli_encode_bits(&bs, rl);
1181 		if (bits == -ENOBUFS) /* buffer full */
1182 			break;
1183 		if (bits <= 0) {
1184 			drbd_err(device, "error while encoding bitmap: %d\n", bits);
1185 			return 0;
1186 		}
1187 
1188 		toggle = !toggle;
1189 		plain_bits += rl;
1190 		c->bit_offset = tmp;
1191 	} while (c->bit_offset < c->bm_bits);
1192 
1193 	len = bs.cur.b - p->code + !!bs.cur.bit;
1194 
1195 	if (plain_bits < (len << 3)) {
1196 		/* incompressible with this method.
1197 		 * we need to rewind both word and bit position. */
1198 		c->bit_offset -= plain_bits;
1199 		bm_xfer_ctx_bit_to_word_offset(c);
1200 		c->bit_offset = c->word_offset * BITS_PER_LONG;
1201 		return 0;
1202 	}
1203 
1204 	/* RLE + VLI was able to compress it just fine.
1205 	 * update c->word_offset. */
1206 	bm_xfer_ctx_bit_to_word_offset(c);
1207 
1208 	/* store pad_bits */
1209 	dcbp_set_pad_bits(p, (8 - bs.cur.bit) & 0x7);
1210 
1211 	return len;
1212 }
1213 
1214 /**
1215  * send_bitmap_rle_or_plain
1216  *
1217  * Return 0 when done, 1 when another iteration is needed, and a negative error
1218  * code upon failure.
1219  */
1220 static int
1221 send_bitmap_rle_or_plain(struct drbd_device *device, struct bm_xfer_ctx *c)
1222 {
1223 	struct drbd_socket *sock = &first_peer_device(device)->connection->data;
1224 	unsigned int header_size = drbd_header_size(first_peer_device(device)->connection);
1225 	struct p_compressed_bm *p = sock->sbuf + header_size;
1226 	int len, err;
1227 
1228 	len = fill_bitmap_rle_bits(device, p,
1229 			DRBD_SOCKET_BUFFER_SIZE - header_size - sizeof(*p), c);
1230 	if (len < 0)
1231 		return -EIO;
1232 
1233 	if (len) {
1234 		dcbp_set_code(p, RLE_VLI_Bits);
1235 		err = __send_command(first_peer_device(device)->connection, device->vnr, sock,
1236 				     P_COMPRESSED_BITMAP, sizeof(*p) + len,
1237 				     NULL, 0);
1238 		c->packets[0]++;
1239 		c->bytes[0] += header_size + sizeof(*p) + len;
1240 
1241 		if (c->bit_offset >= c->bm_bits)
1242 			len = 0; /* DONE */
1243 	} else {
1244 		/* was not compressible.
1245 		 * send a buffer full of plain text bits instead. */
1246 		unsigned int data_size;
1247 		unsigned long num_words;
1248 		unsigned long *p = sock->sbuf + header_size;
1249 
1250 		data_size = DRBD_SOCKET_BUFFER_SIZE - header_size;
1251 		num_words = min_t(size_t, data_size / sizeof(*p),
1252 				  c->bm_words - c->word_offset);
1253 		len = num_words * sizeof(*p);
1254 		if (len)
1255 			drbd_bm_get_lel(device, c->word_offset, num_words, p);
1256 		err = __send_command(first_peer_device(device)->connection, device->vnr, sock, P_BITMAP, len, NULL, 0);
1257 		c->word_offset += num_words;
1258 		c->bit_offset = c->word_offset * BITS_PER_LONG;
1259 
1260 		c->packets[1]++;
1261 		c->bytes[1] += header_size + len;
1262 
1263 		if (c->bit_offset > c->bm_bits)
1264 			c->bit_offset = c->bm_bits;
1265 	}
1266 	if (!err) {
1267 		if (len == 0) {
1268 			INFO_bm_xfer_stats(device, "send", c);
1269 			return 0;
1270 		} else
1271 			return 1;
1272 	}
1273 	return -EIO;
1274 }
1275 
1276 /* See the comment at receive_bitmap() */
1277 static int _drbd_send_bitmap(struct drbd_device *device)
1278 {
1279 	struct bm_xfer_ctx c;
1280 	int err;
1281 
1282 	if (!expect(device->bitmap))
1283 		return false;
1284 
1285 	if (get_ldev(device)) {
1286 		if (drbd_md_test_flag(device->ldev, MDF_FULL_SYNC)) {
1287 			drbd_info(device, "Writing the whole bitmap, MDF_FullSync was set.\n");
1288 			drbd_bm_set_all(device);
1289 			if (drbd_bm_write(device)) {
1290 				/* write_bm did fail! Leave full sync flag set in Meta P_DATA
1291 				 * but otherwise process as per normal - need to tell other
1292 				 * side that a full resync is required! */
1293 				drbd_err(device, "Failed to write bitmap to disk!\n");
1294 			} else {
1295 				drbd_md_clear_flag(device, MDF_FULL_SYNC);
1296 				drbd_md_sync(device);
1297 			}
1298 		}
1299 		put_ldev(device);
1300 	}
1301 
1302 	c = (struct bm_xfer_ctx) {
1303 		.bm_bits = drbd_bm_bits(device),
1304 		.bm_words = drbd_bm_words(device),
1305 	};
1306 
1307 	do {
1308 		err = send_bitmap_rle_or_plain(device, &c);
1309 	} while (err > 0);
1310 
1311 	return err == 0;
1312 }
1313 
1314 int drbd_send_bitmap(struct drbd_device *device)
1315 {
1316 	struct drbd_socket *sock = &first_peer_device(device)->connection->data;
1317 	int err = -1;
1318 
1319 	mutex_lock(&sock->mutex);
1320 	if (sock->socket)
1321 		err = !_drbd_send_bitmap(device);
1322 	mutex_unlock(&sock->mutex);
1323 	return err;
1324 }
1325 
1326 void drbd_send_b_ack(struct drbd_connection *connection, u32 barrier_nr, u32 set_size)
1327 {
1328 	struct drbd_socket *sock;
1329 	struct p_barrier_ack *p;
1330 
1331 	if (connection->cstate < C_WF_REPORT_PARAMS)
1332 		return;
1333 
1334 	sock = &connection->meta;
1335 	p = conn_prepare_command(connection, sock);
1336 	if (!p)
1337 		return;
1338 	p->barrier = barrier_nr;
1339 	p->set_size = cpu_to_be32(set_size);
1340 	conn_send_command(connection, sock, P_BARRIER_ACK, sizeof(*p), NULL, 0);
1341 }
1342 
1343 /**
1344  * _drbd_send_ack() - Sends an ack packet
1345  * @device:	DRBD device.
1346  * @cmd:	Packet command code.
1347  * @sector:	sector, needs to be in big endian byte order
1348  * @blksize:	size in byte, needs to be in big endian byte order
1349  * @block_id:	Id, big endian byte order
1350  */
1351 static int _drbd_send_ack(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
1352 			  u64 sector, u32 blksize, u64 block_id)
1353 {
1354 	struct drbd_socket *sock;
1355 	struct p_block_ack *p;
1356 
1357 	if (peer_device->device->state.conn < C_CONNECTED)
1358 		return -EIO;
1359 
1360 	sock = &peer_device->connection->meta;
1361 	p = drbd_prepare_command(peer_device, sock);
1362 	if (!p)
1363 		return -EIO;
1364 	p->sector = sector;
1365 	p->block_id = block_id;
1366 	p->blksize = blksize;
1367 	p->seq_num = cpu_to_be32(atomic_inc_return(&peer_device->device->packet_seq));
1368 	return drbd_send_command(peer_device, sock, cmd, sizeof(*p), NULL, 0);
1369 }
1370 
1371 /* dp->sector and dp->block_id already/still in network byte order,
1372  * data_size is payload size according to dp->head,
1373  * and may need to be corrected for digest size. */
1374 void drbd_send_ack_dp(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
1375 		      struct p_data *dp, int data_size)
1376 {
1377 	if (peer_device->connection->peer_integrity_tfm)
1378 		data_size -= crypto_ahash_digestsize(peer_device->connection->peer_integrity_tfm);
1379 	_drbd_send_ack(peer_device, cmd, dp->sector, cpu_to_be32(data_size),
1380 		       dp->block_id);
1381 }
1382 
1383 void drbd_send_ack_rp(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
1384 		      struct p_block_req *rp)
1385 {
1386 	_drbd_send_ack(peer_device, cmd, rp->sector, rp->blksize, rp->block_id);
1387 }
1388 
1389 /**
1390  * drbd_send_ack() - Sends an ack packet
1391  * @device:	DRBD device
1392  * @cmd:	packet command code
1393  * @peer_req:	peer request
1394  */
1395 int drbd_send_ack(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
1396 		  struct drbd_peer_request *peer_req)
1397 {
1398 	return _drbd_send_ack(peer_device, cmd,
1399 			      cpu_to_be64(peer_req->i.sector),
1400 			      cpu_to_be32(peer_req->i.size),
1401 			      peer_req->block_id);
1402 }
1403 
1404 /* This function misuses the block_id field to signal if the blocks
1405  * are is sync or not. */
1406 int drbd_send_ack_ex(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
1407 		     sector_t sector, int blksize, u64 block_id)
1408 {
1409 	return _drbd_send_ack(peer_device, cmd,
1410 			      cpu_to_be64(sector),
1411 			      cpu_to_be32(blksize),
1412 			      cpu_to_be64(block_id));
1413 }
1414 
1415 int drbd_send_rs_deallocated(struct drbd_peer_device *peer_device,
1416 			     struct drbd_peer_request *peer_req)
1417 {
1418 	struct drbd_socket *sock;
1419 	struct p_block_desc *p;
1420 
1421 	sock = &peer_device->connection->data;
1422 	p = drbd_prepare_command(peer_device, sock);
1423 	if (!p)
1424 		return -EIO;
1425 	p->sector = cpu_to_be64(peer_req->i.sector);
1426 	p->blksize = cpu_to_be32(peer_req->i.size);
1427 	p->pad = 0;
1428 	return drbd_send_command(peer_device, sock, P_RS_DEALLOCATED, sizeof(*p), NULL, 0);
1429 }
1430 
1431 int drbd_send_drequest(struct drbd_peer_device *peer_device, int cmd,
1432 		       sector_t sector, int size, u64 block_id)
1433 {
1434 	struct drbd_socket *sock;
1435 	struct p_block_req *p;
1436 
1437 	sock = &peer_device->connection->data;
1438 	p = drbd_prepare_command(peer_device, sock);
1439 	if (!p)
1440 		return -EIO;
1441 	p->sector = cpu_to_be64(sector);
1442 	p->block_id = block_id;
1443 	p->blksize = cpu_to_be32(size);
1444 	return drbd_send_command(peer_device, sock, cmd, sizeof(*p), NULL, 0);
1445 }
1446 
1447 int drbd_send_drequest_csum(struct drbd_peer_device *peer_device, sector_t sector, int size,
1448 			    void *digest, int digest_size, enum drbd_packet cmd)
1449 {
1450 	struct drbd_socket *sock;
1451 	struct p_block_req *p;
1452 
1453 	/* FIXME: Put the digest into the preallocated socket buffer.  */
1454 
1455 	sock = &peer_device->connection->data;
1456 	p = drbd_prepare_command(peer_device, sock);
1457 	if (!p)
1458 		return -EIO;
1459 	p->sector = cpu_to_be64(sector);
1460 	p->block_id = ID_SYNCER /* unused */;
1461 	p->blksize = cpu_to_be32(size);
1462 	return drbd_send_command(peer_device, sock, cmd, sizeof(*p), digest, digest_size);
1463 }
1464 
1465 int drbd_send_ov_request(struct drbd_peer_device *peer_device, sector_t sector, int size)
1466 {
1467 	struct drbd_socket *sock;
1468 	struct p_block_req *p;
1469 
1470 	sock = &peer_device->connection->data;
1471 	p = drbd_prepare_command(peer_device, sock);
1472 	if (!p)
1473 		return -EIO;
1474 	p->sector = cpu_to_be64(sector);
1475 	p->block_id = ID_SYNCER /* unused */;
1476 	p->blksize = cpu_to_be32(size);
1477 	return drbd_send_command(peer_device, sock, P_OV_REQUEST, sizeof(*p), NULL, 0);
1478 }
1479 
1480 /* called on sndtimeo
1481  * returns false if we should retry,
1482  * true if we think connection is dead
1483  */
1484 static int we_should_drop_the_connection(struct drbd_connection *connection, struct socket *sock)
1485 {
1486 	int drop_it;
1487 	/* long elapsed = (long)(jiffies - device->last_received); */
1488 
1489 	drop_it =   connection->meta.socket == sock
1490 		|| !connection->ack_receiver.task
1491 		|| get_t_state(&connection->ack_receiver) != RUNNING
1492 		|| connection->cstate < C_WF_REPORT_PARAMS;
1493 
1494 	if (drop_it)
1495 		return true;
1496 
1497 	drop_it = !--connection->ko_count;
1498 	if (!drop_it) {
1499 		drbd_err(connection, "[%s/%d] sock_sendmsg time expired, ko = %u\n",
1500 			 current->comm, current->pid, connection->ko_count);
1501 		request_ping(connection);
1502 	}
1503 
1504 	return drop_it; /* && (device->state == R_PRIMARY) */;
1505 }
1506 
1507 static void drbd_update_congested(struct drbd_connection *connection)
1508 {
1509 	struct sock *sk = connection->data.socket->sk;
1510 	if (sk->sk_wmem_queued > sk->sk_sndbuf * 4 / 5)
1511 		set_bit(NET_CONGESTED, &connection->flags);
1512 }
1513 
1514 /* The idea of sendpage seems to be to put some kind of reference
1515  * to the page into the skb, and to hand it over to the NIC. In
1516  * this process get_page() gets called.
1517  *
1518  * As soon as the page was really sent over the network put_page()
1519  * gets called by some part of the network layer. [ NIC driver? ]
1520  *
1521  * [ get_page() / put_page() increment/decrement the count. If count
1522  *   reaches 0 the page will be freed. ]
1523  *
1524  * This works nicely with pages from FSs.
1525  * But this means that in protocol A we might signal IO completion too early!
1526  *
1527  * In order not to corrupt data during a resync we must make sure
1528  * that we do not reuse our own buffer pages (EEs) to early, therefore
1529  * we have the net_ee list.
1530  *
1531  * XFS seems to have problems, still, it submits pages with page_count == 0!
1532  * As a workaround, we disable sendpage on pages
1533  * with page_count == 0 or PageSlab.
1534  */
1535 static int _drbd_no_send_page(struct drbd_peer_device *peer_device, struct page *page,
1536 			      int offset, size_t size, unsigned msg_flags)
1537 {
1538 	struct socket *socket;
1539 	void *addr;
1540 	int err;
1541 
1542 	socket = peer_device->connection->data.socket;
1543 	addr = kmap(page) + offset;
1544 	err = drbd_send_all(peer_device->connection, socket, addr, size, msg_flags);
1545 	kunmap(page);
1546 	if (!err)
1547 		peer_device->device->send_cnt += size >> 9;
1548 	return err;
1549 }
1550 
1551 static int _drbd_send_page(struct drbd_peer_device *peer_device, struct page *page,
1552 		    int offset, size_t size, unsigned msg_flags)
1553 {
1554 	struct socket *socket = peer_device->connection->data.socket;
1555 	mm_segment_t oldfs = get_fs();
1556 	int len = size;
1557 	int err = -EIO;
1558 
1559 	/* e.g. XFS meta- & log-data is in slab pages, which have a
1560 	 * page_count of 0 and/or have PageSlab() set.
1561 	 * we cannot use send_page for those, as that does get_page();
1562 	 * put_page(); and would cause either a VM_BUG directly, or
1563 	 * __page_cache_release a page that would actually still be referenced
1564 	 * by someone, leading to some obscure delayed Oops somewhere else. */
1565 	if (disable_sendpage || (page_count(page) < 1) || PageSlab(page))
1566 		return _drbd_no_send_page(peer_device, page, offset, size, msg_flags);
1567 
1568 	msg_flags |= MSG_NOSIGNAL;
1569 	drbd_update_congested(peer_device->connection);
1570 	set_fs(KERNEL_DS);
1571 	do {
1572 		int sent;
1573 
1574 		sent = socket->ops->sendpage(socket, page, offset, len, msg_flags);
1575 		if (sent <= 0) {
1576 			if (sent == -EAGAIN) {
1577 				if (we_should_drop_the_connection(peer_device->connection, socket))
1578 					break;
1579 				continue;
1580 			}
1581 			drbd_warn(peer_device->device, "%s: size=%d len=%d sent=%d\n",
1582 			     __func__, (int)size, len, sent);
1583 			if (sent < 0)
1584 				err = sent;
1585 			break;
1586 		}
1587 		len    -= sent;
1588 		offset += sent;
1589 	} while (len > 0 /* THINK && device->cstate >= C_CONNECTED*/);
1590 	set_fs(oldfs);
1591 	clear_bit(NET_CONGESTED, &peer_device->connection->flags);
1592 
1593 	if (len == 0) {
1594 		err = 0;
1595 		peer_device->device->send_cnt += size >> 9;
1596 	}
1597 	return err;
1598 }
1599 
1600 static int _drbd_send_bio(struct drbd_peer_device *peer_device, struct bio *bio)
1601 {
1602 	struct bio_vec bvec;
1603 	struct bvec_iter iter;
1604 
1605 	/* hint all but last page with MSG_MORE */
1606 	bio_for_each_segment(bvec, bio, iter) {
1607 		int err;
1608 
1609 		err = _drbd_no_send_page(peer_device, bvec.bv_page,
1610 					 bvec.bv_offset, bvec.bv_len,
1611 					 bio_iter_last(bvec, iter)
1612 					 ? 0 : MSG_MORE);
1613 		if (err)
1614 			return err;
1615 		/* REQ_OP_WRITE_SAME has only one segment */
1616 		if (bio_op(bio) == REQ_OP_WRITE_SAME)
1617 			break;
1618 	}
1619 	return 0;
1620 }
1621 
1622 static int _drbd_send_zc_bio(struct drbd_peer_device *peer_device, struct bio *bio)
1623 {
1624 	struct bio_vec bvec;
1625 	struct bvec_iter iter;
1626 
1627 	/* hint all but last page with MSG_MORE */
1628 	bio_for_each_segment(bvec, bio, iter) {
1629 		int err;
1630 
1631 		err = _drbd_send_page(peer_device, bvec.bv_page,
1632 				      bvec.bv_offset, bvec.bv_len,
1633 				      bio_iter_last(bvec, iter) ? 0 : MSG_MORE);
1634 		if (err)
1635 			return err;
1636 		/* REQ_OP_WRITE_SAME has only one segment */
1637 		if (bio_op(bio) == REQ_OP_WRITE_SAME)
1638 			break;
1639 	}
1640 	return 0;
1641 }
1642 
1643 static int _drbd_send_zc_ee(struct drbd_peer_device *peer_device,
1644 			    struct drbd_peer_request *peer_req)
1645 {
1646 	struct page *page = peer_req->pages;
1647 	unsigned len = peer_req->i.size;
1648 	int err;
1649 
1650 	/* hint all but last page with MSG_MORE */
1651 	page_chain_for_each(page) {
1652 		unsigned l = min_t(unsigned, len, PAGE_SIZE);
1653 
1654 		err = _drbd_send_page(peer_device, page, 0, l,
1655 				      page_chain_next(page) ? MSG_MORE : 0);
1656 		if (err)
1657 			return err;
1658 		len -= l;
1659 	}
1660 	return 0;
1661 }
1662 
1663 static u32 bio_flags_to_wire(struct drbd_connection *connection,
1664 			     struct bio *bio)
1665 {
1666 	if (connection->agreed_pro_version >= 95)
1667 		return  (bio->bi_opf & REQ_SYNC ? DP_RW_SYNC : 0) |
1668 			(bio->bi_opf & REQ_FUA ? DP_FUA : 0) |
1669 			(bio->bi_opf & REQ_PREFLUSH ? DP_FLUSH : 0) |
1670 			(bio_op(bio) == REQ_OP_WRITE_SAME ? DP_WSAME : 0) |
1671 			(bio_op(bio) == REQ_OP_DISCARD ? DP_DISCARD : 0);
1672 	else
1673 		return bio->bi_opf & REQ_SYNC ? DP_RW_SYNC : 0;
1674 }
1675 
1676 /* Used to send write or TRIM aka REQ_DISCARD requests
1677  * R_PRIMARY -> Peer	(P_DATA, P_TRIM)
1678  */
1679 int drbd_send_dblock(struct drbd_peer_device *peer_device, struct drbd_request *req)
1680 {
1681 	struct drbd_device *device = peer_device->device;
1682 	struct drbd_socket *sock;
1683 	struct p_data *p;
1684 	struct p_wsame *wsame = NULL;
1685 	void *digest_out;
1686 	unsigned int dp_flags = 0;
1687 	int digest_size;
1688 	int err;
1689 
1690 	sock = &peer_device->connection->data;
1691 	p = drbd_prepare_command(peer_device, sock);
1692 	digest_size = peer_device->connection->integrity_tfm ?
1693 		      crypto_ahash_digestsize(peer_device->connection->integrity_tfm) : 0;
1694 
1695 	if (!p)
1696 		return -EIO;
1697 	p->sector = cpu_to_be64(req->i.sector);
1698 	p->block_id = (unsigned long)req;
1699 	p->seq_num = cpu_to_be32(atomic_inc_return(&device->packet_seq));
1700 	dp_flags = bio_flags_to_wire(peer_device->connection, req->master_bio);
1701 	if (device->state.conn >= C_SYNC_SOURCE &&
1702 	    device->state.conn <= C_PAUSED_SYNC_T)
1703 		dp_flags |= DP_MAY_SET_IN_SYNC;
1704 	if (peer_device->connection->agreed_pro_version >= 100) {
1705 		if (req->rq_state & RQ_EXP_RECEIVE_ACK)
1706 			dp_flags |= DP_SEND_RECEIVE_ACK;
1707 		/* During resync, request an explicit write ack,
1708 		 * even in protocol != C */
1709 		if (req->rq_state & RQ_EXP_WRITE_ACK
1710 		|| (dp_flags & DP_MAY_SET_IN_SYNC))
1711 			dp_flags |= DP_SEND_WRITE_ACK;
1712 	}
1713 	p->dp_flags = cpu_to_be32(dp_flags);
1714 
1715 	if (dp_flags & DP_DISCARD) {
1716 		struct p_trim *t = (struct p_trim*)p;
1717 		t->size = cpu_to_be32(req->i.size);
1718 		err = __send_command(peer_device->connection, device->vnr, sock, P_TRIM, sizeof(*t), NULL, 0);
1719 		goto out;
1720 	}
1721 	if (dp_flags & DP_WSAME) {
1722 		/* this will only work if DRBD_FF_WSAME is set AND the
1723 		 * handshake agreed that all nodes and backend devices are
1724 		 * WRITE_SAME capable and agree on logical_block_size */
1725 		wsame = (struct p_wsame*)p;
1726 		digest_out = wsame + 1;
1727 		wsame->size = cpu_to_be32(req->i.size);
1728 	} else
1729 		digest_out = p + 1;
1730 
1731 	/* our digest is still only over the payload.
1732 	 * TRIM does not carry any payload. */
1733 	if (digest_size)
1734 		drbd_csum_bio(peer_device->connection->integrity_tfm, req->master_bio, digest_out);
1735 	if (wsame) {
1736 		err =
1737 		    __send_command(peer_device->connection, device->vnr, sock, P_WSAME,
1738 				   sizeof(*wsame) + digest_size, NULL,
1739 				   bio_iovec(req->master_bio).bv_len);
1740 	} else
1741 		err =
1742 		    __send_command(peer_device->connection, device->vnr, sock, P_DATA,
1743 				   sizeof(*p) + digest_size, NULL, req->i.size);
1744 	if (!err) {
1745 		/* For protocol A, we have to memcpy the payload into
1746 		 * socket buffers, as we may complete right away
1747 		 * as soon as we handed it over to tcp, at which point the data
1748 		 * pages may become invalid.
1749 		 *
1750 		 * For data-integrity enabled, we copy it as well, so we can be
1751 		 * sure that even if the bio pages may still be modified, it
1752 		 * won't change the data on the wire, thus if the digest checks
1753 		 * out ok after sending on this side, but does not fit on the
1754 		 * receiving side, we sure have detected corruption elsewhere.
1755 		 */
1756 		if (!(req->rq_state & (RQ_EXP_RECEIVE_ACK | RQ_EXP_WRITE_ACK)) || digest_size)
1757 			err = _drbd_send_bio(peer_device, req->master_bio);
1758 		else
1759 			err = _drbd_send_zc_bio(peer_device, req->master_bio);
1760 
1761 		/* double check digest, sometimes buffers have been modified in flight. */
1762 		if (digest_size > 0 && digest_size <= 64) {
1763 			/* 64 byte, 512 bit, is the largest digest size
1764 			 * currently supported in kernel crypto. */
1765 			unsigned char digest[64];
1766 			drbd_csum_bio(peer_device->connection->integrity_tfm, req->master_bio, digest);
1767 			if (memcmp(p + 1, digest, digest_size)) {
1768 				drbd_warn(device,
1769 					"Digest mismatch, buffer modified by upper layers during write: %llus +%u\n",
1770 					(unsigned long long)req->i.sector, req->i.size);
1771 			}
1772 		} /* else if (digest_size > 64) {
1773 		     ... Be noisy about digest too large ...
1774 		} */
1775 	}
1776 out:
1777 	mutex_unlock(&sock->mutex);  /* locked by drbd_prepare_command() */
1778 
1779 	return err;
1780 }
1781 
1782 /* answer packet, used to send data back for read requests:
1783  *  Peer       -> (diskless) R_PRIMARY   (P_DATA_REPLY)
1784  *  C_SYNC_SOURCE -> C_SYNC_TARGET         (P_RS_DATA_REPLY)
1785  */
1786 int drbd_send_block(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
1787 		    struct drbd_peer_request *peer_req)
1788 {
1789 	struct drbd_device *device = peer_device->device;
1790 	struct drbd_socket *sock;
1791 	struct p_data *p;
1792 	int err;
1793 	int digest_size;
1794 
1795 	sock = &peer_device->connection->data;
1796 	p = drbd_prepare_command(peer_device, sock);
1797 
1798 	digest_size = peer_device->connection->integrity_tfm ?
1799 		      crypto_ahash_digestsize(peer_device->connection->integrity_tfm) : 0;
1800 
1801 	if (!p)
1802 		return -EIO;
1803 	p->sector = cpu_to_be64(peer_req->i.sector);
1804 	p->block_id = peer_req->block_id;
1805 	p->seq_num = 0;  /* unused */
1806 	p->dp_flags = 0;
1807 	if (digest_size)
1808 		drbd_csum_ee(peer_device->connection->integrity_tfm, peer_req, p + 1);
1809 	err = __send_command(peer_device->connection, device->vnr, sock, cmd, sizeof(*p) + digest_size, NULL, peer_req->i.size);
1810 	if (!err)
1811 		err = _drbd_send_zc_ee(peer_device, peer_req);
1812 	mutex_unlock(&sock->mutex);  /* locked by drbd_prepare_command() */
1813 
1814 	return err;
1815 }
1816 
1817 int drbd_send_out_of_sync(struct drbd_peer_device *peer_device, struct drbd_request *req)
1818 {
1819 	struct drbd_socket *sock;
1820 	struct p_block_desc *p;
1821 
1822 	sock = &peer_device->connection->data;
1823 	p = drbd_prepare_command(peer_device, sock);
1824 	if (!p)
1825 		return -EIO;
1826 	p->sector = cpu_to_be64(req->i.sector);
1827 	p->blksize = cpu_to_be32(req->i.size);
1828 	return drbd_send_command(peer_device, sock, P_OUT_OF_SYNC, sizeof(*p), NULL, 0);
1829 }
1830 
1831 /*
1832   drbd_send distinguishes two cases:
1833 
1834   Packets sent via the data socket "sock"
1835   and packets sent via the meta data socket "msock"
1836 
1837 		    sock                      msock
1838   -----------------+-------------------------+------------------------------
1839   timeout           conf.timeout / 2          conf.timeout / 2
1840   timeout action    send a ping via msock     Abort communication
1841 					      and close all sockets
1842 */
1843 
1844 /*
1845  * you must have down()ed the appropriate [m]sock_mutex elsewhere!
1846  */
1847 int drbd_send(struct drbd_connection *connection, struct socket *sock,
1848 	      void *buf, size_t size, unsigned msg_flags)
1849 {
1850 	struct kvec iov = {.iov_base = buf, .iov_len = size};
1851 	struct msghdr msg;
1852 	int rv, sent = 0;
1853 
1854 	if (!sock)
1855 		return -EBADR;
1856 
1857 	/* THINK  if (signal_pending) return ... ? */
1858 
1859 	msg.msg_name       = NULL;
1860 	msg.msg_namelen    = 0;
1861 	msg.msg_control    = NULL;
1862 	msg.msg_controllen = 0;
1863 	msg.msg_flags      = msg_flags | MSG_NOSIGNAL;
1864 
1865 	iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, &iov, 1, size);
1866 
1867 	if (sock == connection->data.socket) {
1868 		rcu_read_lock();
1869 		connection->ko_count = rcu_dereference(connection->net_conf)->ko_count;
1870 		rcu_read_unlock();
1871 		drbd_update_congested(connection);
1872 	}
1873 	do {
1874 		rv = sock_sendmsg(sock, &msg);
1875 		if (rv == -EAGAIN) {
1876 			if (we_should_drop_the_connection(connection, sock))
1877 				break;
1878 			else
1879 				continue;
1880 		}
1881 		if (rv == -EINTR) {
1882 			flush_signals(current);
1883 			rv = 0;
1884 		}
1885 		if (rv < 0)
1886 			break;
1887 		sent += rv;
1888 	} while (sent < size);
1889 
1890 	if (sock == connection->data.socket)
1891 		clear_bit(NET_CONGESTED, &connection->flags);
1892 
1893 	if (rv <= 0) {
1894 		if (rv != -EAGAIN) {
1895 			drbd_err(connection, "%s_sendmsg returned %d\n",
1896 				 sock == connection->meta.socket ? "msock" : "sock",
1897 				 rv);
1898 			conn_request_state(connection, NS(conn, C_BROKEN_PIPE), CS_HARD);
1899 		} else
1900 			conn_request_state(connection, NS(conn, C_TIMEOUT), CS_HARD);
1901 	}
1902 
1903 	return sent;
1904 }
1905 
1906 /**
1907  * drbd_send_all  -  Send an entire buffer
1908  *
1909  * Returns 0 upon success and a negative error value otherwise.
1910  */
1911 int drbd_send_all(struct drbd_connection *connection, struct socket *sock, void *buffer,
1912 		  size_t size, unsigned msg_flags)
1913 {
1914 	int err;
1915 
1916 	err = drbd_send(connection, sock, buffer, size, msg_flags);
1917 	if (err < 0)
1918 		return err;
1919 	if (err != size)
1920 		return -EIO;
1921 	return 0;
1922 }
1923 
1924 static int drbd_open(struct block_device *bdev, fmode_t mode)
1925 {
1926 	struct drbd_device *device = bdev->bd_disk->private_data;
1927 	unsigned long flags;
1928 	int rv = 0;
1929 
1930 	mutex_lock(&drbd_main_mutex);
1931 	spin_lock_irqsave(&device->resource->req_lock, flags);
1932 	/* to have a stable device->state.role
1933 	 * and no race with updating open_cnt */
1934 
1935 	if (device->state.role != R_PRIMARY) {
1936 		if (mode & FMODE_WRITE)
1937 			rv = -EROFS;
1938 		else if (!allow_oos)
1939 			rv = -EMEDIUMTYPE;
1940 	}
1941 
1942 	if (!rv)
1943 		device->open_cnt++;
1944 	spin_unlock_irqrestore(&device->resource->req_lock, flags);
1945 	mutex_unlock(&drbd_main_mutex);
1946 
1947 	return rv;
1948 }
1949 
1950 static void drbd_release(struct gendisk *gd, fmode_t mode)
1951 {
1952 	struct drbd_device *device = gd->private_data;
1953 	mutex_lock(&drbd_main_mutex);
1954 	device->open_cnt--;
1955 	mutex_unlock(&drbd_main_mutex);
1956 }
1957 
1958 static void drbd_set_defaults(struct drbd_device *device)
1959 {
1960 	/* Beware! The actual layout differs
1961 	 * between big endian and little endian */
1962 	device->state = (union drbd_dev_state) {
1963 		{ .role = R_SECONDARY,
1964 		  .peer = R_UNKNOWN,
1965 		  .conn = C_STANDALONE,
1966 		  .disk = D_DISKLESS,
1967 		  .pdsk = D_UNKNOWN,
1968 		} };
1969 }
1970 
1971 void drbd_init_set_defaults(struct drbd_device *device)
1972 {
1973 	/* the memset(,0,) did most of this.
1974 	 * note: only assignments, no allocation in here */
1975 
1976 	drbd_set_defaults(device);
1977 
1978 	atomic_set(&device->ap_bio_cnt, 0);
1979 	atomic_set(&device->ap_actlog_cnt, 0);
1980 	atomic_set(&device->ap_pending_cnt, 0);
1981 	atomic_set(&device->rs_pending_cnt, 0);
1982 	atomic_set(&device->unacked_cnt, 0);
1983 	atomic_set(&device->local_cnt, 0);
1984 	atomic_set(&device->pp_in_use_by_net, 0);
1985 	atomic_set(&device->rs_sect_in, 0);
1986 	atomic_set(&device->rs_sect_ev, 0);
1987 	atomic_set(&device->ap_in_flight, 0);
1988 	atomic_set(&device->md_io.in_use, 0);
1989 
1990 	mutex_init(&device->own_state_mutex);
1991 	device->state_mutex = &device->own_state_mutex;
1992 
1993 	spin_lock_init(&device->al_lock);
1994 	spin_lock_init(&device->peer_seq_lock);
1995 
1996 	INIT_LIST_HEAD(&device->active_ee);
1997 	INIT_LIST_HEAD(&device->sync_ee);
1998 	INIT_LIST_HEAD(&device->done_ee);
1999 	INIT_LIST_HEAD(&device->read_ee);
2000 	INIT_LIST_HEAD(&device->net_ee);
2001 	INIT_LIST_HEAD(&device->resync_reads);
2002 	INIT_LIST_HEAD(&device->resync_work.list);
2003 	INIT_LIST_HEAD(&device->unplug_work.list);
2004 	INIT_LIST_HEAD(&device->bm_io_work.w.list);
2005 	INIT_LIST_HEAD(&device->pending_master_completion[0]);
2006 	INIT_LIST_HEAD(&device->pending_master_completion[1]);
2007 	INIT_LIST_HEAD(&device->pending_completion[0]);
2008 	INIT_LIST_HEAD(&device->pending_completion[1]);
2009 
2010 	device->resync_work.cb  = w_resync_timer;
2011 	device->unplug_work.cb  = w_send_write_hint;
2012 	device->bm_io_work.w.cb = w_bitmap_io;
2013 
2014 	init_timer(&device->resync_timer);
2015 	init_timer(&device->md_sync_timer);
2016 	init_timer(&device->start_resync_timer);
2017 	init_timer(&device->request_timer);
2018 	device->resync_timer.function = resync_timer_fn;
2019 	device->resync_timer.data = (unsigned long) device;
2020 	device->md_sync_timer.function = md_sync_timer_fn;
2021 	device->md_sync_timer.data = (unsigned long) device;
2022 	device->start_resync_timer.function = start_resync_timer_fn;
2023 	device->start_resync_timer.data = (unsigned long) device;
2024 	device->request_timer.function = request_timer_fn;
2025 	device->request_timer.data = (unsigned long) device;
2026 
2027 	init_waitqueue_head(&device->misc_wait);
2028 	init_waitqueue_head(&device->state_wait);
2029 	init_waitqueue_head(&device->ee_wait);
2030 	init_waitqueue_head(&device->al_wait);
2031 	init_waitqueue_head(&device->seq_wait);
2032 
2033 	device->resync_wenr = LC_FREE;
2034 	device->peer_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
2035 	device->local_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
2036 }
2037 
2038 void drbd_device_cleanup(struct drbd_device *device)
2039 {
2040 	int i;
2041 	if (first_peer_device(device)->connection->receiver.t_state != NONE)
2042 		drbd_err(device, "ASSERT FAILED: receiver t_state == %d expected 0.\n",
2043 				first_peer_device(device)->connection->receiver.t_state);
2044 
2045 	device->al_writ_cnt  =
2046 	device->bm_writ_cnt  =
2047 	device->read_cnt     =
2048 	device->recv_cnt     =
2049 	device->send_cnt     =
2050 	device->writ_cnt     =
2051 	device->p_size       =
2052 	device->rs_start     =
2053 	device->rs_total     =
2054 	device->rs_failed    = 0;
2055 	device->rs_last_events = 0;
2056 	device->rs_last_sect_ev = 0;
2057 	for (i = 0; i < DRBD_SYNC_MARKS; i++) {
2058 		device->rs_mark_left[i] = 0;
2059 		device->rs_mark_time[i] = 0;
2060 	}
2061 	D_ASSERT(device, first_peer_device(device)->connection->net_conf == NULL);
2062 
2063 	drbd_set_my_capacity(device, 0);
2064 	if (device->bitmap) {
2065 		/* maybe never allocated. */
2066 		drbd_bm_resize(device, 0, 1);
2067 		drbd_bm_cleanup(device);
2068 	}
2069 
2070 	drbd_backing_dev_free(device, device->ldev);
2071 	device->ldev = NULL;
2072 
2073 	clear_bit(AL_SUSPENDED, &device->flags);
2074 
2075 	D_ASSERT(device, list_empty(&device->active_ee));
2076 	D_ASSERT(device, list_empty(&device->sync_ee));
2077 	D_ASSERT(device, list_empty(&device->done_ee));
2078 	D_ASSERT(device, list_empty(&device->read_ee));
2079 	D_ASSERT(device, list_empty(&device->net_ee));
2080 	D_ASSERT(device, list_empty(&device->resync_reads));
2081 	D_ASSERT(device, list_empty(&first_peer_device(device)->connection->sender_work.q));
2082 	D_ASSERT(device, list_empty(&device->resync_work.list));
2083 	D_ASSERT(device, list_empty(&device->unplug_work.list));
2084 
2085 	drbd_set_defaults(device);
2086 }
2087 
2088 
2089 static void drbd_destroy_mempools(void)
2090 {
2091 	struct page *page;
2092 
2093 	while (drbd_pp_pool) {
2094 		page = drbd_pp_pool;
2095 		drbd_pp_pool = (struct page *)page_private(page);
2096 		__free_page(page);
2097 		drbd_pp_vacant--;
2098 	}
2099 
2100 	/* D_ASSERT(device, atomic_read(&drbd_pp_vacant)==0); */
2101 
2102 	if (drbd_md_io_bio_set)
2103 		bioset_free(drbd_md_io_bio_set);
2104 	if (drbd_md_io_page_pool)
2105 		mempool_destroy(drbd_md_io_page_pool);
2106 	if (drbd_ee_mempool)
2107 		mempool_destroy(drbd_ee_mempool);
2108 	if (drbd_request_mempool)
2109 		mempool_destroy(drbd_request_mempool);
2110 	if (drbd_ee_cache)
2111 		kmem_cache_destroy(drbd_ee_cache);
2112 	if (drbd_request_cache)
2113 		kmem_cache_destroy(drbd_request_cache);
2114 	if (drbd_bm_ext_cache)
2115 		kmem_cache_destroy(drbd_bm_ext_cache);
2116 	if (drbd_al_ext_cache)
2117 		kmem_cache_destroy(drbd_al_ext_cache);
2118 
2119 	drbd_md_io_bio_set   = NULL;
2120 	drbd_md_io_page_pool = NULL;
2121 	drbd_ee_mempool      = NULL;
2122 	drbd_request_mempool = NULL;
2123 	drbd_ee_cache        = NULL;
2124 	drbd_request_cache   = NULL;
2125 	drbd_bm_ext_cache    = NULL;
2126 	drbd_al_ext_cache    = NULL;
2127 
2128 	return;
2129 }
2130 
2131 static int drbd_create_mempools(void)
2132 {
2133 	struct page *page;
2134 	const int number = (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * minor_count;
2135 	int i;
2136 
2137 	/* prepare our caches and mempools */
2138 	drbd_request_mempool = NULL;
2139 	drbd_ee_cache        = NULL;
2140 	drbd_request_cache   = NULL;
2141 	drbd_bm_ext_cache    = NULL;
2142 	drbd_al_ext_cache    = NULL;
2143 	drbd_pp_pool         = NULL;
2144 	drbd_md_io_page_pool = NULL;
2145 	drbd_md_io_bio_set   = NULL;
2146 
2147 	/* caches */
2148 	drbd_request_cache = kmem_cache_create(
2149 		"drbd_req", sizeof(struct drbd_request), 0, 0, NULL);
2150 	if (drbd_request_cache == NULL)
2151 		goto Enomem;
2152 
2153 	drbd_ee_cache = kmem_cache_create(
2154 		"drbd_ee", sizeof(struct drbd_peer_request), 0, 0, NULL);
2155 	if (drbd_ee_cache == NULL)
2156 		goto Enomem;
2157 
2158 	drbd_bm_ext_cache = kmem_cache_create(
2159 		"drbd_bm", sizeof(struct bm_extent), 0, 0, NULL);
2160 	if (drbd_bm_ext_cache == NULL)
2161 		goto Enomem;
2162 
2163 	drbd_al_ext_cache = kmem_cache_create(
2164 		"drbd_al", sizeof(struct lc_element), 0, 0, NULL);
2165 	if (drbd_al_ext_cache == NULL)
2166 		goto Enomem;
2167 
2168 	/* mempools */
2169 	drbd_md_io_bio_set = bioset_create(DRBD_MIN_POOL_PAGES, 0);
2170 	if (drbd_md_io_bio_set == NULL)
2171 		goto Enomem;
2172 
2173 	drbd_md_io_page_pool = mempool_create_page_pool(DRBD_MIN_POOL_PAGES, 0);
2174 	if (drbd_md_io_page_pool == NULL)
2175 		goto Enomem;
2176 
2177 	drbd_request_mempool = mempool_create_slab_pool(number,
2178 		drbd_request_cache);
2179 	if (drbd_request_mempool == NULL)
2180 		goto Enomem;
2181 
2182 	drbd_ee_mempool = mempool_create_slab_pool(number, drbd_ee_cache);
2183 	if (drbd_ee_mempool == NULL)
2184 		goto Enomem;
2185 
2186 	/* drbd's page pool */
2187 	spin_lock_init(&drbd_pp_lock);
2188 
2189 	for (i = 0; i < number; i++) {
2190 		page = alloc_page(GFP_HIGHUSER);
2191 		if (!page)
2192 			goto Enomem;
2193 		set_page_private(page, (unsigned long)drbd_pp_pool);
2194 		drbd_pp_pool = page;
2195 	}
2196 	drbd_pp_vacant = number;
2197 
2198 	return 0;
2199 
2200 Enomem:
2201 	drbd_destroy_mempools(); /* in case we allocated some */
2202 	return -ENOMEM;
2203 }
2204 
2205 static void drbd_release_all_peer_reqs(struct drbd_device *device)
2206 {
2207 	int rr;
2208 
2209 	rr = drbd_free_peer_reqs(device, &device->active_ee);
2210 	if (rr)
2211 		drbd_err(device, "%d EEs in active list found!\n", rr);
2212 
2213 	rr = drbd_free_peer_reqs(device, &device->sync_ee);
2214 	if (rr)
2215 		drbd_err(device, "%d EEs in sync list found!\n", rr);
2216 
2217 	rr = drbd_free_peer_reqs(device, &device->read_ee);
2218 	if (rr)
2219 		drbd_err(device, "%d EEs in read list found!\n", rr);
2220 
2221 	rr = drbd_free_peer_reqs(device, &device->done_ee);
2222 	if (rr)
2223 		drbd_err(device, "%d EEs in done list found!\n", rr);
2224 
2225 	rr = drbd_free_peer_reqs(device, &device->net_ee);
2226 	if (rr)
2227 		drbd_err(device, "%d EEs in net list found!\n", rr);
2228 }
2229 
2230 /* caution. no locking. */
2231 void drbd_destroy_device(struct kref *kref)
2232 {
2233 	struct drbd_device *device = container_of(kref, struct drbd_device, kref);
2234 	struct drbd_resource *resource = device->resource;
2235 	struct drbd_peer_device *peer_device, *tmp_peer_device;
2236 
2237 	del_timer_sync(&device->request_timer);
2238 
2239 	/* paranoia asserts */
2240 	D_ASSERT(device, device->open_cnt == 0);
2241 	/* end paranoia asserts */
2242 
2243 	/* cleanup stuff that may have been allocated during
2244 	 * device (re-)configuration or state changes */
2245 
2246 	if (device->this_bdev)
2247 		bdput(device->this_bdev);
2248 
2249 	drbd_backing_dev_free(device, device->ldev);
2250 	device->ldev = NULL;
2251 
2252 	drbd_release_all_peer_reqs(device);
2253 
2254 	lc_destroy(device->act_log);
2255 	lc_destroy(device->resync);
2256 
2257 	kfree(device->p_uuid);
2258 	/* device->p_uuid = NULL; */
2259 
2260 	if (device->bitmap) /* should no longer be there. */
2261 		drbd_bm_cleanup(device);
2262 	__free_page(device->md_io.page);
2263 	put_disk(device->vdisk);
2264 	blk_cleanup_queue(device->rq_queue);
2265 	kfree(device->rs_plan_s);
2266 
2267 	/* not for_each_connection(connection, resource):
2268 	 * those may have been cleaned up and disassociated already.
2269 	 */
2270 	for_each_peer_device_safe(peer_device, tmp_peer_device, device) {
2271 		kref_put(&peer_device->connection->kref, drbd_destroy_connection);
2272 		kfree(peer_device);
2273 	}
2274 	memset(device, 0xfd, sizeof(*device));
2275 	kfree(device);
2276 	kref_put(&resource->kref, drbd_destroy_resource);
2277 }
2278 
2279 /* One global retry thread, if we need to push back some bio and have it
2280  * reinserted through our make request function.
2281  */
2282 static struct retry_worker {
2283 	struct workqueue_struct *wq;
2284 	struct work_struct worker;
2285 
2286 	spinlock_t lock;
2287 	struct list_head writes;
2288 } retry;
2289 
2290 static void do_retry(struct work_struct *ws)
2291 {
2292 	struct retry_worker *retry = container_of(ws, struct retry_worker, worker);
2293 	LIST_HEAD(writes);
2294 	struct drbd_request *req, *tmp;
2295 
2296 	spin_lock_irq(&retry->lock);
2297 	list_splice_init(&retry->writes, &writes);
2298 	spin_unlock_irq(&retry->lock);
2299 
2300 	list_for_each_entry_safe(req, tmp, &writes, tl_requests) {
2301 		struct drbd_device *device = req->device;
2302 		struct bio *bio = req->master_bio;
2303 		unsigned long start_jif = req->start_jif;
2304 		bool expected;
2305 
2306 		expected =
2307 			expect(atomic_read(&req->completion_ref) == 0) &&
2308 			expect(req->rq_state & RQ_POSTPONED) &&
2309 			expect((req->rq_state & RQ_LOCAL_PENDING) == 0 ||
2310 				(req->rq_state & RQ_LOCAL_ABORTED) != 0);
2311 
2312 		if (!expected)
2313 			drbd_err(device, "req=%p completion_ref=%d rq_state=%x\n",
2314 				req, atomic_read(&req->completion_ref),
2315 				req->rq_state);
2316 
2317 		/* We still need to put one kref associated with the
2318 		 * "completion_ref" going zero in the code path that queued it
2319 		 * here.  The request object may still be referenced by a
2320 		 * frozen local req->private_bio, in case we force-detached.
2321 		 */
2322 		kref_put(&req->kref, drbd_req_destroy);
2323 
2324 		/* A single suspended or otherwise blocking device may stall
2325 		 * all others as well.  Fortunately, this code path is to
2326 		 * recover from a situation that "should not happen":
2327 		 * concurrent writes in multi-primary setup.
2328 		 * In a "normal" lifecycle, this workqueue is supposed to be
2329 		 * destroyed without ever doing anything.
2330 		 * If it turns out to be an issue anyways, we can do per
2331 		 * resource (replication group) or per device (minor) retry
2332 		 * workqueues instead.
2333 		 */
2334 
2335 		/* We are not just doing generic_make_request(),
2336 		 * as we want to keep the start_time information. */
2337 		inc_ap_bio(device);
2338 		__drbd_make_request(device, bio, start_jif);
2339 	}
2340 }
2341 
2342 /* called via drbd_req_put_completion_ref(),
2343  * holds resource->req_lock */
2344 void drbd_restart_request(struct drbd_request *req)
2345 {
2346 	unsigned long flags;
2347 	spin_lock_irqsave(&retry.lock, flags);
2348 	list_move_tail(&req->tl_requests, &retry.writes);
2349 	spin_unlock_irqrestore(&retry.lock, flags);
2350 
2351 	/* Drop the extra reference that would otherwise
2352 	 * have been dropped by complete_master_bio.
2353 	 * do_retry() needs to grab a new one. */
2354 	dec_ap_bio(req->device);
2355 
2356 	queue_work(retry.wq, &retry.worker);
2357 }
2358 
2359 void drbd_destroy_resource(struct kref *kref)
2360 {
2361 	struct drbd_resource *resource =
2362 		container_of(kref, struct drbd_resource, kref);
2363 
2364 	idr_destroy(&resource->devices);
2365 	free_cpumask_var(resource->cpu_mask);
2366 	kfree(resource->name);
2367 	memset(resource, 0xf2, sizeof(*resource));
2368 	kfree(resource);
2369 }
2370 
2371 void drbd_free_resource(struct drbd_resource *resource)
2372 {
2373 	struct drbd_connection *connection, *tmp;
2374 
2375 	for_each_connection_safe(connection, tmp, resource) {
2376 		list_del(&connection->connections);
2377 		drbd_debugfs_connection_cleanup(connection);
2378 		kref_put(&connection->kref, drbd_destroy_connection);
2379 	}
2380 	drbd_debugfs_resource_cleanup(resource);
2381 	kref_put(&resource->kref, drbd_destroy_resource);
2382 }
2383 
2384 static void drbd_cleanup(void)
2385 {
2386 	unsigned int i;
2387 	struct drbd_device *device;
2388 	struct drbd_resource *resource, *tmp;
2389 
2390 	/* first remove proc,
2391 	 * drbdsetup uses it's presence to detect
2392 	 * whether DRBD is loaded.
2393 	 * If we would get stuck in proc removal,
2394 	 * but have netlink already deregistered,
2395 	 * some drbdsetup commands may wait forever
2396 	 * for an answer.
2397 	 */
2398 	if (drbd_proc)
2399 		remove_proc_entry("drbd", NULL);
2400 
2401 	if (retry.wq)
2402 		destroy_workqueue(retry.wq);
2403 
2404 	drbd_genl_unregister();
2405 	drbd_debugfs_cleanup();
2406 
2407 	idr_for_each_entry(&drbd_devices, device, i)
2408 		drbd_delete_device(device);
2409 
2410 	/* not _rcu since, no other updater anymore. Genl already unregistered */
2411 	for_each_resource_safe(resource, tmp, &drbd_resources) {
2412 		list_del(&resource->resources);
2413 		drbd_free_resource(resource);
2414 	}
2415 
2416 	drbd_destroy_mempools();
2417 	unregister_blkdev(DRBD_MAJOR, "drbd");
2418 
2419 	idr_destroy(&drbd_devices);
2420 
2421 	pr_info("module cleanup done.\n");
2422 }
2423 
2424 /**
2425  * drbd_congested() - Callback for the flusher thread
2426  * @congested_data:	User data
2427  * @bdi_bits:		Bits the BDI flusher thread is currently interested in
2428  *
2429  * Returns 1<<WB_async_congested and/or 1<<WB_sync_congested if we are congested.
2430  */
2431 static int drbd_congested(void *congested_data, int bdi_bits)
2432 {
2433 	struct drbd_device *device = congested_data;
2434 	struct request_queue *q;
2435 	char reason = '-';
2436 	int r = 0;
2437 
2438 	if (!may_inc_ap_bio(device)) {
2439 		/* DRBD has frozen IO */
2440 		r = bdi_bits;
2441 		reason = 'd';
2442 		goto out;
2443 	}
2444 
2445 	if (test_bit(CALLBACK_PENDING, &first_peer_device(device)->connection->flags)) {
2446 		r |= (1 << WB_async_congested);
2447 		/* Without good local data, we would need to read from remote,
2448 		 * and that would need the worker thread as well, which is
2449 		 * currently blocked waiting for that usermode helper to
2450 		 * finish.
2451 		 */
2452 		if (!get_ldev_if_state(device, D_UP_TO_DATE))
2453 			r |= (1 << WB_sync_congested);
2454 		else
2455 			put_ldev(device);
2456 		r &= bdi_bits;
2457 		reason = 'c';
2458 		goto out;
2459 	}
2460 
2461 	if (get_ldev(device)) {
2462 		q = bdev_get_queue(device->ldev->backing_bdev);
2463 		r = bdi_congested(q->backing_dev_info, bdi_bits);
2464 		put_ldev(device);
2465 		if (r)
2466 			reason = 'b';
2467 	}
2468 
2469 	if (bdi_bits & (1 << WB_async_congested) &&
2470 	    test_bit(NET_CONGESTED, &first_peer_device(device)->connection->flags)) {
2471 		r |= (1 << WB_async_congested);
2472 		reason = reason == 'b' ? 'a' : 'n';
2473 	}
2474 
2475 out:
2476 	device->congestion_reason = reason;
2477 	return r;
2478 }
2479 
2480 static void drbd_init_workqueue(struct drbd_work_queue* wq)
2481 {
2482 	spin_lock_init(&wq->q_lock);
2483 	INIT_LIST_HEAD(&wq->q);
2484 	init_waitqueue_head(&wq->q_wait);
2485 }
2486 
2487 struct completion_work {
2488 	struct drbd_work w;
2489 	struct completion done;
2490 };
2491 
2492 static int w_complete(struct drbd_work *w, int cancel)
2493 {
2494 	struct completion_work *completion_work =
2495 		container_of(w, struct completion_work, w);
2496 
2497 	complete(&completion_work->done);
2498 	return 0;
2499 }
2500 
2501 void drbd_flush_workqueue(struct drbd_work_queue *work_queue)
2502 {
2503 	struct completion_work completion_work;
2504 
2505 	completion_work.w.cb = w_complete;
2506 	init_completion(&completion_work.done);
2507 	drbd_queue_work(work_queue, &completion_work.w);
2508 	wait_for_completion(&completion_work.done);
2509 }
2510 
2511 struct drbd_resource *drbd_find_resource(const char *name)
2512 {
2513 	struct drbd_resource *resource;
2514 
2515 	if (!name || !name[0])
2516 		return NULL;
2517 
2518 	rcu_read_lock();
2519 	for_each_resource_rcu(resource, &drbd_resources) {
2520 		if (!strcmp(resource->name, name)) {
2521 			kref_get(&resource->kref);
2522 			goto found;
2523 		}
2524 	}
2525 	resource = NULL;
2526 found:
2527 	rcu_read_unlock();
2528 	return resource;
2529 }
2530 
2531 struct drbd_connection *conn_get_by_addrs(void *my_addr, int my_addr_len,
2532 				     void *peer_addr, int peer_addr_len)
2533 {
2534 	struct drbd_resource *resource;
2535 	struct drbd_connection *connection;
2536 
2537 	rcu_read_lock();
2538 	for_each_resource_rcu(resource, &drbd_resources) {
2539 		for_each_connection_rcu(connection, resource) {
2540 			if (connection->my_addr_len == my_addr_len &&
2541 			    connection->peer_addr_len == peer_addr_len &&
2542 			    !memcmp(&connection->my_addr, my_addr, my_addr_len) &&
2543 			    !memcmp(&connection->peer_addr, peer_addr, peer_addr_len)) {
2544 				kref_get(&connection->kref);
2545 				goto found;
2546 			}
2547 		}
2548 	}
2549 	connection = NULL;
2550 found:
2551 	rcu_read_unlock();
2552 	return connection;
2553 }
2554 
2555 static int drbd_alloc_socket(struct drbd_socket *socket)
2556 {
2557 	socket->rbuf = (void *) __get_free_page(GFP_KERNEL);
2558 	if (!socket->rbuf)
2559 		return -ENOMEM;
2560 	socket->sbuf = (void *) __get_free_page(GFP_KERNEL);
2561 	if (!socket->sbuf)
2562 		return -ENOMEM;
2563 	return 0;
2564 }
2565 
2566 static void drbd_free_socket(struct drbd_socket *socket)
2567 {
2568 	free_page((unsigned long) socket->sbuf);
2569 	free_page((unsigned long) socket->rbuf);
2570 }
2571 
2572 void conn_free_crypto(struct drbd_connection *connection)
2573 {
2574 	drbd_free_sock(connection);
2575 
2576 	crypto_free_ahash(connection->csums_tfm);
2577 	crypto_free_ahash(connection->verify_tfm);
2578 	crypto_free_shash(connection->cram_hmac_tfm);
2579 	crypto_free_ahash(connection->integrity_tfm);
2580 	crypto_free_ahash(connection->peer_integrity_tfm);
2581 	kfree(connection->int_dig_in);
2582 	kfree(connection->int_dig_vv);
2583 
2584 	connection->csums_tfm = NULL;
2585 	connection->verify_tfm = NULL;
2586 	connection->cram_hmac_tfm = NULL;
2587 	connection->integrity_tfm = NULL;
2588 	connection->peer_integrity_tfm = NULL;
2589 	connection->int_dig_in = NULL;
2590 	connection->int_dig_vv = NULL;
2591 }
2592 
2593 int set_resource_options(struct drbd_resource *resource, struct res_opts *res_opts)
2594 {
2595 	struct drbd_connection *connection;
2596 	cpumask_var_t new_cpu_mask;
2597 	int err;
2598 
2599 	if (!zalloc_cpumask_var(&new_cpu_mask, GFP_KERNEL))
2600 		return -ENOMEM;
2601 
2602 	/* silently ignore cpu mask on UP kernel */
2603 	if (nr_cpu_ids > 1 && res_opts->cpu_mask[0] != 0) {
2604 		err = bitmap_parse(res_opts->cpu_mask, DRBD_CPU_MASK_SIZE,
2605 				   cpumask_bits(new_cpu_mask), nr_cpu_ids);
2606 		if (err == -EOVERFLOW) {
2607 			/* So what. mask it out. */
2608 			cpumask_var_t tmp_cpu_mask;
2609 			if (zalloc_cpumask_var(&tmp_cpu_mask, GFP_KERNEL)) {
2610 				cpumask_setall(tmp_cpu_mask);
2611 				cpumask_and(new_cpu_mask, new_cpu_mask, tmp_cpu_mask);
2612 				drbd_warn(resource, "Overflow in bitmap_parse(%.12s%s), truncating to %u bits\n",
2613 					res_opts->cpu_mask,
2614 					strlen(res_opts->cpu_mask) > 12 ? "..." : "",
2615 					nr_cpu_ids);
2616 				free_cpumask_var(tmp_cpu_mask);
2617 				err = 0;
2618 			}
2619 		}
2620 		if (err) {
2621 			drbd_warn(resource, "bitmap_parse() failed with %d\n", err);
2622 			/* retcode = ERR_CPU_MASK_PARSE; */
2623 			goto fail;
2624 		}
2625 	}
2626 	resource->res_opts = *res_opts;
2627 	if (cpumask_empty(new_cpu_mask))
2628 		drbd_calc_cpu_mask(&new_cpu_mask);
2629 	if (!cpumask_equal(resource->cpu_mask, new_cpu_mask)) {
2630 		cpumask_copy(resource->cpu_mask, new_cpu_mask);
2631 		for_each_connection_rcu(connection, resource) {
2632 			connection->receiver.reset_cpu_mask = 1;
2633 			connection->ack_receiver.reset_cpu_mask = 1;
2634 			connection->worker.reset_cpu_mask = 1;
2635 		}
2636 	}
2637 	err = 0;
2638 
2639 fail:
2640 	free_cpumask_var(new_cpu_mask);
2641 	return err;
2642 
2643 }
2644 
2645 struct drbd_resource *drbd_create_resource(const char *name)
2646 {
2647 	struct drbd_resource *resource;
2648 
2649 	resource = kzalloc(sizeof(struct drbd_resource), GFP_KERNEL);
2650 	if (!resource)
2651 		goto fail;
2652 	resource->name = kstrdup(name, GFP_KERNEL);
2653 	if (!resource->name)
2654 		goto fail_free_resource;
2655 	if (!zalloc_cpumask_var(&resource->cpu_mask, GFP_KERNEL))
2656 		goto fail_free_name;
2657 	kref_init(&resource->kref);
2658 	idr_init(&resource->devices);
2659 	INIT_LIST_HEAD(&resource->connections);
2660 	resource->write_ordering = WO_BDEV_FLUSH;
2661 	list_add_tail_rcu(&resource->resources, &drbd_resources);
2662 	mutex_init(&resource->conf_update);
2663 	mutex_init(&resource->adm_mutex);
2664 	spin_lock_init(&resource->req_lock);
2665 	drbd_debugfs_resource_add(resource);
2666 	return resource;
2667 
2668 fail_free_name:
2669 	kfree(resource->name);
2670 fail_free_resource:
2671 	kfree(resource);
2672 fail:
2673 	return NULL;
2674 }
2675 
2676 /* caller must be under adm_mutex */
2677 struct drbd_connection *conn_create(const char *name, struct res_opts *res_opts)
2678 {
2679 	struct drbd_resource *resource;
2680 	struct drbd_connection *connection;
2681 
2682 	connection = kzalloc(sizeof(struct drbd_connection), GFP_KERNEL);
2683 	if (!connection)
2684 		return NULL;
2685 
2686 	if (drbd_alloc_socket(&connection->data))
2687 		goto fail;
2688 	if (drbd_alloc_socket(&connection->meta))
2689 		goto fail;
2690 
2691 	connection->current_epoch = kzalloc(sizeof(struct drbd_epoch), GFP_KERNEL);
2692 	if (!connection->current_epoch)
2693 		goto fail;
2694 
2695 	INIT_LIST_HEAD(&connection->transfer_log);
2696 
2697 	INIT_LIST_HEAD(&connection->current_epoch->list);
2698 	connection->epochs = 1;
2699 	spin_lock_init(&connection->epoch_lock);
2700 
2701 	connection->send.seen_any_write_yet = false;
2702 	connection->send.current_epoch_nr = 0;
2703 	connection->send.current_epoch_writes = 0;
2704 
2705 	resource = drbd_create_resource(name);
2706 	if (!resource)
2707 		goto fail;
2708 
2709 	connection->cstate = C_STANDALONE;
2710 	mutex_init(&connection->cstate_mutex);
2711 	init_waitqueue_head(&connection->ping_wait);
2712 	idr_init(&connection->peer_devices);
2713 
2714 	drbd_init_workqueue(&connection->sender_work);
2715 	mutex_init(&connection->data.mutex);
2716 	mutex_init(&connection->meta.mutex);
2717 
2718 	drbd_thread_init(resource, &connection->receiver, drbd_receiver, "receiver");
2719 	connection->receiver.connection = connection;
2720 	drbd_thread_init(resource, &connection->worker, drbd_worker, "worker");
2721 	connection->worker.connection = connection;
2722 	drbd_thread_init(resource, &connection->ack_receiver, drbd_ack_receiver, "ack_recv");
2723 	connection->ack_receiver.connection = connection;
2724 
2725 	kref_init(&connection->kref);
2726 
2727 	connection->resource = resource;
2728 
2729 	if (set_resource_options(resource, res_opts))
2730 		goto fail_resource;
2731 
2732 	kref_get(&resource->kref);
2733 	list_add_tail_rcu(&connection->connections, &resource->connections);
2734 	drbd_debugfs_connection_add(connection);
2735 	return connection;
2736 
2737 fail_resource:
2738 	list_del(&resource->resources);
2739 	drbd_free_resource(resource);
2740 fail:
2741 	kfree(connection->current_epoch);
2742 	drbd_free_socket(&connection->meta);
2743 	drbd_free_socket(&connection->data);
2744 	kfree(connection);
2745 	return NULL;
2746 }
2747 
2748 void drbd_destroy_connection(struct kref *kref)
2749 {
2750 	struct drbd_connection *connection = container_of(kref, struct drbd_connection, kref);
2751 	struct drbd_resource *resource = connection->resource;
2752 
2753 	if (atomic_read(&connection->current_epoch->epoch_size) !=  0)
2754 		drbd_err(connection, "epoch_size:%d\n", atomic_read(&connection->current_epoch->epoch_size));
2755 	kfree(connection->current_epoch);
2756 
2757 	idr_destroy(&connection->peer_devices);
2758 
2759 	drbd_free_socket(&connection->meta);
2760 	drbd_free_socket(&connection->data);
2761 	kfree(connection->int_dig_in);
2762 	kfree(connection->int_dig_vv);
2763 	memset(connection, 0xfc, sizeof(*connection));
2764 	kfree(connection);
2765 	kref_put(&resource->kref, drbd_destroy_resource);
2766 }
2767 
2768 static int init_submitter(struct drbd_device *device)
2769 {
2770 	/* opencoded create_singlethread_workqueue(),
2771 	 * to be able to say "drbd%d", ..., minor */
2772 	device->submit.wq =
2773 		alloc_ordered_workqueue("drbd%u_submit", WQ_MEM_RECLAIM, device->minor);
2774 	if (!device->submit.wq)
2775 		return -ENOMEM;
2776 
2777 	INIT_WORK(&device->submit.worker, do_submit);
2778 	INIT_LIST_HEAD(&device->submit.writes);
2779 	return 0;
2780 }
2781 
2782 enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsigned int minor)
2783 {
2784 	struct drbd_resource *resource = adm_ctx->resource;
2785 	struct drbd_connection *connection;
2786 	struct drbd_device *device;
2787 	struct drbd_peer_device *peer_device, *tmp_peer_device;
2788 	struct gendisk *disk;
2789 	struct request_queue *q;
2790 	int id;
2791 	int vnr = adm_ctx->volume;
2792 	enum drbd_ret_code err = ERR_NOMEM;
2793 
2794 	device = minor_to_device(minor);
2795 	if (device)
2796 		return ERR_MINOR_OR_VOLUME_EXISTS;
2797 
2798 	/* GFP_KERNEL, we are outside of all write-out paths */
2799 	device = kzalloc(sizeof(struct drbd_device), GFP_KERNEL);
2800 	if (!device)
2801 		return ERR_NOMEM;
2802 	kref_init(&device->kref);
2803 
2804 	kref_get(&resource->kref);
2805 	device->resource = resource;
2806 	device->minor = minor;
2807 	device->vnr = vnr;
2808 
2809 	drbd_init_set_defaults(device);
2810 
2811 	q = blk_alloc_queue(GFP_KERNEL);
2812 	if (!q)
2813 		goto out_no_q;
2814 	device->rq_queue = q;
2815 	q->queuedata   = device;
2816 
2817 	disk = alloc_disk(1);
2818 	if (!disk)
2819 		goto out_no_disk;
2820 	device->vdisk = disk;
2821 
2822 	set_disk_ro(disk, true);
2823 
2824 	disk->queue = q;
2825 	disk->major = DRBD_MAJOR;
2826 	disk->first_minor = minor;
2827 	disk->fops = &drbd_ops;
2828 	sprintf(disk->disk_name, "drbd%d", minor);
2829 	disk->private_data = device;
2830 
2831 	device->this_bdev = bdget(MKDEV(DRBD_MAJOR, minor));
2832 	/* we have no partitions. we contain only ourselves. */
2833 	device->this_bdev->bd_contains = device->this_bdev;
2834 
2835 	q->backing_dev_info->congested_fn = drbd_congested;
2836 	q->backing_dev_info->congested_data = device;
2837 
2838 	blk_queue_make_request(q, drbd_make_request);
2839 	blk_queue_write_cache(q, true, true);
2840 	/* Setting the max_hw_sectors to an odd value of 8kibyte here
2841 	   This triggers a max_bio_size message upon first attach or connect */
2842 	blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8);
2843 	blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
2844 	q->queue_lock = &resource->req_lock;
2845 
2846 	device->md_io.page = alloc_page(GFP_KERNEL);
2847 	if (!device->md_io.page)
2848 		goto out_no_io_page;
2849 
2850 	if (drbd_bm_init(device))
2851 		goto out_no_bitmap;
2852 	device->read_requests = RB_ROOT;
2853 	device->write_requests = RB_ROOT;
2854 
2855 	id = idr_alloc(&drbd_devices, device, minor, minor + 1, GFP_KERNEL);
2856 	if (id < 0) {
2857 		if (id == -ENOSPC)
2858 			err = ERR_MINOR_OR_VOLUME_EXISTS;
2859 		goto out_no_minor_idr;
2860 	}
2861 	kref_get(&device->kref);
2862 
2863 	id = idr_alloc(&resource->devices, device, vnr, vnr + 1, GFP_KERNEL);
2864 	if (id < 0) {
2865 		if (id == -ENOSPC)
2866 			err = ERR_MINOR_OR_VOLUME_EXISTS;
2867 		goto out_idr_remove_minor;
2868 	}
2869 	kref_get(&device->kref);
2870 
2871 	INIT_LIST_HEAD(&device->peer_devices);
2872 	INIT_LIST_HEAD(&device->pending_bitmap_io);
2873 	for_each_connection(connection, resource) {
2874 		peer_device = kzalloc(sizeof(struct drbd_peer_device), GFP_KERNEL);
2875 		if (!peer_device)
2876 			goto out_idr_remove_from_resource;
2877 		peer_device->connection = connection;
2878 		peer_device->device = device;
2879 
2880 		list_add(&peer_device->peer_devices, &device->peer_devices);
2881 		kref_get(&device->kref);
2882 
2883 		id = idr_alloc(&connection->peer_devices, peer_device, vnr, vnr + 1, GFP_KERNEL);
2884 		if (id < 0) {
2885 			if (id == -ENOSPC)
2886 				err = ERR_INVALID_REQUEST;
2887 			goto out_idr_remove_from_resource;
2888 		}
2889 		kref_get(&connection->kref);
2890 		INIT_WORK(&peer_device->send_acks_work, drbd_send_acks_wf);
2891 	}
2892 
2893 	if (init_submitter(device)) {
2894 		err = ERR_NOMEM;
2895 		goto out_idr_remove_vol;
2896 	}
2897 
2898 	add_disk(disk);
2899 
2900 	/* inherit the connection state */
2901 	device->state.conn = first_connection(resource)->cstate;
2902 	if (device->state.conn == C_WF_REPORT_PARAMS) {
2903 		for_each_peer_device(peer_device, device)
2904 			drbd_connected(peer_device);
2905 	}
2906 	/* move to create_peer_device() */
2907 	for_each_peer_device(peer_device, device)
2908 		drbd_debugfs_peer_device_add(peer_device);
2909 	drbd_debugfs_device_add(device);
2910 	return NO_ERROR;
2911 
2912 out_idr_remove_vol:
2913 	idr_remove(&connection->peer_devices, vnr);
2914 out_idr_remove_from_resource:
2915 	for_each_connection(connection, resource) {
2916 		peer_device = idr_remove(&connection->peer_devices, vnr);
2917 		if (peer_device)
2918 			kref_put(&connection->kref, drbd_destroy_connection);
2919 	}
2920 	for_each_peer_device_safe(peer_device, tmp_peer_device, device) {
2921 		list_del(&peer_device->peer_devices);
2922 		kfree(peer_device);
2923 	}
2924 	idr_remove(&resource->devices, vnr);
2925 out_idr_remove_minor:
2926 	idr_remove(&drbd_devices, minor);
2927 	synchronize_rcu();
2928 out_no_minor_idr:
2929 	drbd_bm_cleanup(device);
2930 out_no_bitmap:
2931 	__free_page(device->md_io.page);
2932 out_no_io_page:
2933 	put_disk(disk);
2934 out_no_disk:
2935 	blk_cleanup_queue(q);
2936 out_no_q:
2937 	kref_put(&resource->kref, drbd_destroy_resource);
2938 	kfree(device);
2939 	return err;
2940 }
2941 
2942 void drbd_delete_device(struct drbd_device *device)
2943 {
2944 	struct drbd_resource *resource = device->resource;
2945 	struct drbd_connection *connection;
2946 	struct drbd_peer_device *peer_device;
2947 
2948 	/* move to free_peer_device() */
2949 	for_each_peer_device(peer_device, device)
2950 		drbd_debugfs_peer_device_cleanup(peer_device);
2951 	drbd_debugfs_device_cleanup(device);
2952 	for_each_connection(connection, resource) {
2953 		idr_remove(&connection->peer_devices, device->vnr);
2954 		kref_put(&device->kref, drbd_destroy_device);
2955 	}
2956 	idr_remove(&resource->devices, device->vnr);
2957 	kref_put(&device->kref, drbd_destroy_device);
2958 	idr_remove(&drbd_devices, device_to_minor(device));
2959 	kref_put(&device->kref, drbd_destroy_device);
2960 	del_gendisk(device->vdisk);
2961 	synchronize_rcu();
2962 	kref_put(&device->kref, drbd_destroy_device);
2963 }
2964 
2965 static int __init drbd_init(void)
2966 {
2967 	int err;
2968 
2969 	if (minor_count < DRBD_MINOR_COUNT_MIN || minor_count > DRBD_MINOR_COUNT_MAX) {
2970 		pr_err("invalid minor_count (%d)\n", minor_count);
2971 #ifdef MODULE
2972 		return -EINVAL;
2973 #else
2974 		minor_count = DRBD_MINOR_COUNT_DEF;
2975 #endif
2976 	}
2977 
2978 	err = register_blkdev(DRBD_MAJOR, "drbd");
2979 	if (err) {
2980 		pr_err("unable to register block device major %d\n",
2981 		       DRBD_MAJOR);
2982 		return err;
2983 	}
2984 
2985 	/*
2986 	 * allocate all necessary structs
2987 	 */
2988 	init_waitqueue_head(&drbd_pp_wait);
2989 
2990 	drbd_proc = NULL; /* play safe for drbd_cleanup */
2991 	idr_init(&drbd_devices);
2992 
2993 	mutex_init(&resources_mutex);
2994 	INIT_LIST_HEAD(&drbd_resources);
2995 
2996 	err = drbd_genl_register();
2997 	if (err) {
2998 		pr_err("unable to register generic netlink family\n");
2999 		goto fail;
3000 	}
3001 
3002 	err = drbd_create_mempools();
3003 	if (err)
3004 		goto fail;
3005 
3006 	err = -ENOMEM;
3007 	drbd_proc = proc_create_data("drbd", S_IFREG | S_IRUGO , NULL, &drbd_proc_fops, NULL);
3008 	if (!drbd_proc)	{
3009 		pr_err("unable to register proc file\n");
3010 		goto fail;
3011 	}
3012 
3013 	retry.wq = create_singlethread_workqueue("drbd-reissue");
3014 	if (!retry.wq) {
3015 		pr_err("unable to create retry workqueue\n");
3016 		goto fail;
3017 	}
3018 	INIT_WORK(&retry.worker, do_retry);
3019 	spin_lock_init(&retry.lock);
3020 	INIT_LIST_HEAD(&retry.writes);
3021 
3022 	if (drbd_debugfs_init())
3023 		pr_notice("failed to initialize debugfs -- will not be available\n");
3024 
3025 	pr_info("initialized. "
3026 	       "Version: " REL_VERSION " (api:%d/proto:%d-%d)\n",
3027 	       API_VERSION, PRO_VERSION_MIN, PRO_VERSION_MAX);
3028 	pr_info("%s\n", drbd_buildtag());
3029 	pr_info("registered as block device major %d\n", DRBD_MAJOR);
3030 	return 0; /* Success! */
3031 
3032 fail:
3033 	drbd_cleanup();
3034 	if (err == -ENOMEM)
3035 		pr_err("ran out of memory\n");
3036 	else
3037 		pr_err("initialization failure\n");
3038 	return err;
3039 }
3040 
3041 static void drbd_free_one_sock(struct drbd_socket *ds)
3042 {
3043 	struct socket *s;
3044 	mutex_lock(&ds->mutex);
3045 	s = ds->socket;
3046 	ds->socket = NULL;
3047 	mutex_unlock(&ds->mutex);
3048 	if (s) {
3049 		/* so debugfs does not need to mutex_lock() */
3050 		synchronize_rcu();
3051 		kernel_sock_shutdown(s, SHUT_RDWR);
3052 		sock_release(s);
3053 	}
3054 }
3055 
3056 void drbd_free_sock(struct drbd_connection *connection)
3057 {
3058 	if (connection->data.socket)
3059 		drbd_free_one_sock(&connection->data);
3060 	if (connection->meta.socket)
3061 		drbd_free_one_sock(&connection->meta);
3062 }
3063 
3064 /* meta data management */
3065 
3066 void conn_md_sync(struct drbd_connection *connection)
3067 {
3068 	struct drbd_peer_device *peer_device;
3069 	int vnr;
3070 
3071 	rcu_read_lock();
3072 	idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
3073 		struct drbd_device *device = peer_device->device;
3074 
3075 		kref_get(&device->kref);
3076 		rcu_read_unlock();
3077 		drbd_md_sync(device);
3078 		kref_put(&device->kref, drbd_destroy_device);
3079 		rcu_read_lock();
3080 	}
3081 	rcu_read_unlock();
3082 }
3083 
3084 /* aligned 4kByte */
3085 struct meta_data_on_disk {
3086 	u64 la_size_sect;      /* last agreed size. */
3087 	u64 uuid[UI_SIZE];   /* UUIDs. */
3088 	u64 device_uuid;
3089 	u64 reserved_u64_1;
3090 	u32 flags;             /* MDF */
3091 	u32 magic;
3092 	u32 md_size_sect;
3093 	u32 al_offset;         /* offset to this block */
3094 	u32 al_nr_extents;     /* important for restoring the AL (userspace) */
3095 	      /* `-- act_log->nr_elements <-- ldev->dc.al_extents */
3096 	u32 bm_offset;         /* offset to the bitmap, from here */
3097 	u32 bm_bytes_per_bit;  /* BM_BLOCK_SIZE */
3098 	u32 la_peer_max_bio_size;   /* last peer max_bio_size */
3099 
3100 	/* see al_tr_number_to_on_disk_sector() */
3101 	u32 al_stripes;
3102 	u32 al_stripe_size_4k;
3103 
3104 	u8 reserved_u8[4096 - (7*8 + 10*4)];
3105 } __packed;
3106 
3107 
3108 
3109 void drbd_md_write(struct drbd_device *device, void *b)
3110 {
3111 	struct meta_data_on_disk *buffer = b;
3112 	sector_t sector;
3113 	int i;
3114 
3115 	memset(buffer, 0, sizeof(*buffer));
3116 
3117 	buffer->la_size_sect = cpu_to_be64(drbd_get_capacity(device->this_bdev));
3118 	for (i = UI_CURRENT; i < UI_SIZE; i++)
3119 		buffer->uuid[i] = cpu_to_be64(device->ldev->md.uuid[i]);
3120 	buffer->flags = cpu_to_be32(device->ldev->md.flags);
3121 	buffer->magic = cpu_to_be32(DRBD_MD_MAGIC_84_UNCLEAN);
3122 
3123 	buffer->md_size_sect  = cpu_to_be32(device->ldev->md.md_size_sect);
3124 	buffer->al_offset     = cpu_to_be32(device->ldev->md.al_offset);
3125 	buffer->al_nr_extents = cpu_to_be32(device->act_log->nr_elements);
3126 	buffer->bm_bytes_per_bit = cpu_to_be32(BM_BLOCK_SIZE);
3127 	buffer->device_uuid = cpu_to_be64(device->ldev->md.device_uuid);
3128 
3129 	buffer->bm_offset = cpu_to_be32(device->ldev->md.bm_offset);
3130 	buffer->la_peer_max_bio_size = cpu_to_be32(device->peer_max_bio_size);
3131 
3132 	buffer->al_stripes = cpu_to_be32(device->ldev->md.al_stripes);
3133 	buffer->al_stripe_size_4k = cpu_to_be32(device->ldev->md.al_stripe_size_4k);
3134 
3135 	D_ASSERT(device, drbd_md_ss(device->ldev) == device->ldev->md.md_offset);
3136 	sector = device->ldev->md.md_offset;
3137 
3138 	if (drbd_md_sync_page_io(device, device->ldev, sector, REQ_OP_WRITE)) {
3139 		/* this was a try anyways ... */
3140 		drbd_err(device, "meta data update failed!\n");
3141 		drbd_chk_io_error(device, 1, DRBD_META_IO_ERROR);
3142 	}
3143 }
3144 
3145 /**
3146  * drbd_md_sync() - Writes the meta data super block if the MD_DIRTY flag bit is set
3147  * @device:	DRBD device.
3148  */
3149 void drbd_md_sync(struct drbd_device *device)
3150 {
3151 	struct meta_data_on_disk *buffer;
3152 
3153 	/* Don't accidentally change the DRBD meta data layout. */
3154 	BUILD_BUG_ON(UI_SIZE != 4);
3155 	BUILD_BUG_ON(sizeof(struct meta_data_on_disk) != 4096);
3156 
3157 	del_timer(&device->md_sync_timer);
3158 	/* timer may be rearmed by drbd_md_mark_dirty() now. */
3159 	if (!test_and_clear_bit(MD_DIRTY, &device->flags))
3160 		return;
3161 
3162 	/* We use here D_FAILED and not D_ATTACHING because we try to write
3163 	 * metadata even if we detach due to a disk failure! */
3164 	if (!get_ldev_if_state(device, D_FAILED))
3165 		return;
3166 
3167 	buffer = drbd_md_get_buffer(device, __func__);
3168 	if (!buffer)
3169 		goto out;
3170 
3171 	drbd_md_write(device, buffer);
3172 
3173 	/* Update device->ldev->md.la_size_sect,
3174 	 * since we updated it on metadata. */
3175 	device->ldev->md.la_size_sect = drbd_get_capacity(device->this_bdev);
3176 
3177 	drbd_md_put_buffer(device);
3178 out:
3179 	put_ldev(device);
3180 }
3181 
3182 static int check_activity_log_stripe_size(struct drbd_device *device,
3183 		struct meta_data_on_disk *on_disk,
3184 		struct drbd_md *in_core)
3185 {
3186 	u32 al_stripes = be32_to_cpu(on_disk->al_stripes);
3187 	u32 al_stripe_size_4k = be32_to_cpu(on_disk->al_stripe_size_4k);
3188 	u64 al_size_4k;
3189 
3190 	/* both not set: default to old fixed size activity log */
3191 	if (al_stripes == 0 && al_stripe_size_4k == 0) {
3192 		al_stripes = 1;
3193 		al_stripe_size_4k = MD_32kB_SECT/8;
3194 	}
3195 
3196 	/* some paranoia plausibility checks */
3197 
3198 	/* we need both values to be set */
3199 	if (al_stripes == 0 || al_stripe_size_4k == 0)
3200 		goto err;
3201 
3202 	al_size_4k = (u64)al_stripes * al_stripe_size_4k;
3203 
3204 	/* Upper limit of activity log area, to avoid potential overflow
3205 	 * problems in al_tr_number_to_on_disk_sector(). As right now, more
3206 	 * than 72 * 4k blocks total only increases the amount of history,
3207 	 * limiting this arbitrarily to 16 GB is not a real limitation ;-)  */
3208 	if (al_size_4k > (16 * 1024 * 1024/4))
3209 		goto err;
3210 
3211 	/* Lower limit: we need at least 8 transaction slots (32kB)
3212 	 * to not break existing setups */
3213 	if (al_size_4k < MD_32kB_SECT/8)
3214 		goto err;
3215 
3216 	in_core->al_stripe_size_4k = al_stripe_size_4k;
3217 	in_core->al_stripes = al_stripes;
3218 	in_core->al_size_4k = al_size_4k;
3219 
3220 	return 0;
3221 err:
3222 	drbd_err(device, "invalid activity log striping: al_stripes=%u, al_stripe_size_4k=%u\n",
3223 			al_stripes, al_stripe_size_4k);
3224 	return -EINVAL;
3225 }
3226 
3227 static int check_offsets_and_sizes(struct drbd_device *device, struct drbd_backing_dev *bdev)
3228 {
3229 	sector_t capacity = drbd_get_capacity(bdev->md_bdev);
3230 	struct drbd_md *in_core = &bdev->md;
3231 	s32 on_disk_al_sect;
3232 	s32 on_disk_bm_sect;
3233 
3234 	/* The on-disk size of the activity log, calculated from offsets, and
3235 	 * the size of the activity log calculated from the stripe settings,
3236 	 * should match.
3237 	 * Though we could relax this a bit: it is ok, if the striped activity log
3238 	 * fits in the available on-disk activity log size.
3239 	 * Right now, that would break how resize is implemented.
3240 	 * TODO: make drbd_determine_dev_size() (and the drbdmeta tool) aware
3241 	 * of possible unused padding space in the on disk layout. */
3242 	if (in_core->al_offset < 0) {
3243 		if (in_core->bm_offset > in_core->al_offset)
3244 			goto err;
3245 		on_disk_al_sect = -in_core->al_offset;
3246 		on_disk_bm_sect = in_core->al_offset - in_core->bm_offset;
3247 	} else {
3248 		if (in_core->al_offset != MD_4kB_SECT)
3249 			goto err;
3250 		if (in_core->bm_offset < in_core->al_offset + in_core->al_size_4k * MD_4kB_SECT)
3251 			goto err;
3252 
3253 		on_disk_al_sect = in_core->bm_offset - MD_4kB_SECT;
3254 		on_disk_bm_sect = in_core->md_size_sect - in_core->bm_offset;
3255 	}
3256 
3257 	/* old fixed size meta data is exactly that: fixed. */
3258 	if (in_core->meta_dev_idx >= 0) {
3259 		if (in_core->md_size_sect != MD_128MB_SECT
3260 		||  in_core->al_offset != MD_4kB_SECT
3261 		||  in_core->bm_offset != MD_4kB_SECT + MD_32kB_SECT
3262 		||  in_core->al_stripes != 1
3263 		||  in_core->al_stripe_size_4k != MD_32kB_SECT/8)
3264 			goto err;
3265 	}
3266 
3267 	if (capacity < in_core->md_size_sect)
3268 		goto err;
3269 	if (capacity - in_core->md_size_sect < drbd_md_first_sector(bdev))
3270 		goto err;
3271 
3272 	/* should be aligned, and at least 32k */
3273 	if ((on_disk_al_sect & 7) || (on_disk_al_sect < MD_32kB_SECT))
3274 		goto err;
3275 
3276 	/* should fit (for now: exactly) into the available on-disk space;
3277 	 * overflow prevention is in check_activity_log_stripe_size() above. */
3278 	if (on_disk_al_sect != in_core->al_size_4k * MD_4kB_SECT)
3279 		goto err;
3280 
3281 	/* again, should be aligned */
3282 	if (in_core->bm_offset & 7)
3283 		goto err;
3284 
3285 	/* FIXME check for device grow with flex external meta data? */
3286 
3287 	/* can the available bitmap space cover the last agreed device size? */
3288 	if (on_disk_bm_sect < (in_core->la_size_sect+7)/MD_4kB_SECT/8/512)
3289 		goto err;
3290 
3291 	return 0;
3292 
3293 err:
3294 	drbd_err(device, "meta data offsets don't make sense: idx=%d "
3295 			"al_s=%u, al_sz4k=%u, al_offset=%d, bm_offset=%d, "
3296 			"md_size_sect=%u, la_size=%llu, md_capacity=%llu\n",
3297 			in_core->meta_dev_idx,
3298 			in_core->al_stripes, in_core->al_stripe_size_4k,
3299 			in_core->al_offset, in_core->bm_offset, in_core->md_size_sect,
3300 			(unsigned long long)in_core->la_size_sect,
3301 			(unsigned long long)capacity);
3302 
3303 	return -EINVAL;
3304 }
3305 
3306 
3307 /**
3308  * drbd_md_read() - Reads in the meta data super block
3309  * @device:	DRBD device.
3310  * @bdev:	Device from which the meta data should be read in.
3311  *
3312  * Return NO_ERROR on success, and an enum drbd_ret_code in case
3313  * something goes wrong.
3314  *
3315  * Called exactly once during drbd_adm_attach(), while still being D_DISKLESS,
3316  * even before @bdev is assigned to @device->ldev.
3317  */
3318 int drbd_md_read(struct drbd_device *device, struct drbd_backing_dev *bdev)
3319 {
3320 	struct meta_data_on_disk *buffer;
3321 	u32 magic, flags;
3322 	int i, rv = NO_ERROR;
3323 
3324 	if (device->state.disk != D_DISKLESS)
3325 		return ERR_DISK_CONFIGURED;
3326 
3327 	buffer = drbd_md_get_buffer(device, __func__);
3328 	if (!buffer)
3329 		return ERR_NOMEM;
3330 
3331 	/* First, figure out where our meta data superblock is located,
3332 	 * and read it. */
3333 	bdev->md.meta_dev_idx = bdev->disk_conf->meta_dev_idx;
3334 	bdev->md.md_offset = drbd_md_ss(bdev);
3335 	/* Even for (flexible or indexed) external meta data,
3336 	 * initially restrict us to the 4k superblock for now.
3337 	 * Affects the paranoia out-of-range access check in drbd_md_sync_page_io(). */
3338 	bdev->md.md_size_sect = 8;
3339 
3340 	if (drbd_md_sync_page_io(device, bdev, bdev->md.md_offset,
3341 				 REQ_OP_READ)) {
3342 		/* NOTE: can't do normal error processing here as this is
3343 		   called BEFORE disk is attached */
3344 		drbd_err(device, "Error while reading metadata.\n");
3345 		rv = ERR_IO_MD_DISK;
3346 		goto err;
3347 	}
3348 
3349 	magic = be32_to_cpu(buffer->magic);
3350 	flags = be32_to_cpu(buffer->flags);
3351 	if (magic == DRBD_MD_MAGIC_84_UNCLEAN ||
3352 	    (magic == DRBD_MD_MAGIC_08 && !(flags & MDF_AL_CLEAN))) {
3353 			/* btw: that's Activity Log clean, not "all" clean. */
3354 		drbd_err(device, "Found unclean meta data. Did you \"drbdadm apply-al\"?\n");
3355 		rv = ERR_MD_UNCLEAN;
3356 		goto err;
3357 	}
3358 
3359 	rv = ERR_MD_INVALID;
3360 	if (magic != DRBD_MD_MAGIC_08) {
3361 		if (magic == DRBD_MD_MAGIC_07)
3362 			drbd_err(device, "Found old (0.7) meta data magic. Did you \"drbdadm create-md\"?\n");
3363 		else
3364 			drbd_err(device, "Meta data magic not found. Did you \"drbdadm create-md\"?\n");
3365 		goto err;
3366 	}
3367 
3368 	if (be32_to_cpu(buffer->bm_bytes_per_bit) != BM_BLOCK_SIZE) {
3369 		drbd_err(device, "unexpected bm_bytes_per_bit: %u (expected %u)\n",
3370 		    be32_to_cpu(buffer->bm_bytes_per_bit), BM_BLOCK_SIZE);
3371 		goto err;
3372 	}
3373 
3374 
3375 	/* convert to in_core endian */
3376 	bdev->md.la_size_sect = be64_to_cpu(buffer->la_size_sect);
3377 	for (i = UI_CURRENT; i < UI_SIZE; i++)
3378 		bdev->md.uuid[i] = be64_to_cpu(buffer->uuid[i]);
3379 	bdev->md.flags = be32_to_cpu(buffer->flags);
3380 	bdev->md.device_uuid = be64_to_cpu(buffer->device_uuid);
3381 
3382 	bdev->md.md_size_sect = be32_to_cpu(buffer->md_size_sect);
3383 	bdev->md.al_offset = be32_to_cpu(buffer->al_offset);
3384 	bdev->md.bm_offset = be32_to_cpu(buffer->bm_offset);
3385 
3386 	if (check_activity_log_stripe_size(device, buffer, &bdev->md))
3387 		goto err;
3388 	if (check_offsets_and_sizes(device, bdev))
3389 		goto err;
3390 
3391 	if (be32_to_cpu(buffer->bm_offset) != bdev->md.bm_offset) {
3392 		drbd_err(device, "unexpected bm_offset: %d (expected %d)\n",
3393 		    be32_to_cpu(buffer->bm_offset), bdev->md.bm_offset);
3394 		goto err;
3395 	}
3396 	if (be32_to_cpu(buffer->md_size_sect) != bdev->md.md_size_sect) {
3397 		drbd_err(device, "unexpected md_size: %u (expected %u)\n",
3398 		    be32_to_cpu(buffer->md_size_sect), bdev->md.md_size_sect);
3399 		goto err;
3400 	}
3401 
3402 	rv = NO_ERROR;
3403 
3404 	spin_lock_irq(&device->resource->req_lock);
3405 	if (device->state.conn < C_CONNECTED) {
3406 		unsigned int peer;
3407 		peer = be32_to_cpu(buffer->la_peer_max_bio_size);
3408 		peer = max(peer, DRBD_MAX_BIO_SIZE_SAFE);
3409 		device->peer_max_bio_size = peer;
3410 	}
3411 	spin_unlock_irq(&device->resource->req_lock);
3412 
3413  err:
3414 	drbd_md_put_buffer(device);
3415 
3416 	return rv;
3417 }
3418 
3419 /**
3420  * drbd_md_mark_dirty() - Mark meta data super block as dirty
3421  * @device:	DRBD device.
3422  *
3423  * Call this function if you change anything that should be written to
3424  * the meta-data super block. This function sets MD_DIRTY, and starts a
3425  * timer that ensures that within five seconds you have to call drbd_md_sync().
3426  */
3427 #ifdef DEBUG
3428 void drbd_md_mark_dirty_(struct drbd_device *device, unsigned int line, const char *func)
3429 {
3430 	if (!test_and_set_bit(MD_DIRTY, &device->flags)) {
3431 		mod_timer(&device->md_sync_timer, jiffies + HZ);
3432 		device->last_md_mark_dirty.line = line;
3433 		device->last_md_mark_dirty.func = func;
3434 	}
3435 }
3436 #else
3437 void drbd_md_mark_dirty(struct drbd_device *device)
3438 {
3439 	if (!test_and_set_bit(MD_DIRTY, &device->flags))
3440 		mod_timer(&device->md_sync_timer, jiffies + 5*HZ);
3441 }
3442 #endif
3443 
3444 void drbd_uuid_move_history(struct drbd_device *device) __must_hold(local)
3445 {
3446 	int i;
3447 
3448 	for (i = UI_HISTORY_START; i < UI_HISTORY_END; i++)
3449 		device->ldev->md.uuid[i+1] = device->ldev->md.uuid[i];
3450 }
3451 
3452 void __drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local)
3453 {
3454 	if (idx == UI_CURRENT) {
3455 		if (device->state.role == R_PRIMARY)
3456 			val |= 1;
3457 		else
3458 			val &= ~((u64)1);
3459 
3460 		drbd_set_ed_uuid(device, val);
3461 	}
3462 
3463 	device->ldev->md.uuid[idx] = val;
3464 	drbd_md_mark_dirty(device);
3465 }
3466 
3467 void _drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local)
3468 {
3469 	unsigned long flags;
3470 	spin_lock_irqsave(&device->ldev->md.uuid_lock, flags);
3471 	__drbd_uuid_set(device, idx, val);
3472 	spin_unlock_irqrestore(&device->ldev->md.uuid_lock, flags);
3473 }
3474 
3475 void drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local)
3476 {
3477 	unsigned long flags;
3478 	spin_lock_irqsave(&device->ldev->md.uuid_lock, flags);
3479 	if (device->ldev->md.uuid[idx]) {
3480 		drbd_uuid_move_history(device);
3481 		device->ldev->md.uuid[UI_HISTORY_START] = device->ldev->md.uuid[idx];
3482 	}
3483 	__drbd_uuid_set(device, idx, val);
3484 	spin_unlock_irqrestore(&device->ldev->md.uuid_lock, flags);
3485 }
3486 
3487 /**
3488  * drbd_uuid_new_current() - Creates a new current UUID
3489  * @device:	DRBD device.
3490  *
3491  * Creates a new current UUID, and rotates the old current UUID into
3492  * the bitmap slot. Causes an incremental resync upon next connect.
3493  */
3494 void drbd_uuid_new_current(struct drbd_device *device) __must_hold(local)
3495 {
3496 	u64 val;
3497 	unsigned long long bm_uuid;
3498 
3499 	get_random_bytes(&val, sizeof(u64));
3500 
3501 	spin_lock_irq(&device->ldev->md.uuid_lock);
3502 	bm_uuid = device->ldev->md.uuid[UI_BITMAP];
3503 
3504 	if (bm_uuid)
3505 		drbd_warn(device, "bm UUID was already set: %llX\n", bm_uuid);
3506 
3507 	device->ldev->md.uuid[UI_BITMAP] = device->ldev->md.uuid[UI_CURRENT];
3508 	__drbd_uuid_set(device, UI_CURRENT, val);
3509 	spin_unlock_irq(&device->ldev->md.uuid_lock);
3510 
3511 	drbd_print_uuids(device, "new current UUID");
3512 	/* get it to stable storage _now_ */
3513 	drbd_md_sync(device);
3514 }
3515 
3516 void drbd_uuid_set_bm(struct drbd_device *device, u64 val) __must_hold(local)
3517 {
3518 	unsigned long flags;
3519 	if (device->ldev->md.uuid[UI_BITMAP] == 0 && val == 0)
3520 		return;
3521 
3522 	spin_lock_irqsave(&device->ldev->md.uuid_lock, flags);
3523 	if (val == 0) {
3524 		drbd_uuid_move_history(device);
3525 		device->ldev->md.uuid[UI_HISTORY_START] = device->ldev->md.uuid[UI_BITMAP];
3526 		device->ldev->md.uuid[UI_BITMAP] = 0;
3527 	} else {
3528 		unsigned long long bm_uuid = device->ldev->md.uuid[UI_BITMAP];
3529 		if (bm_uuid)
3530 			drbd_warn(device, "bm UUID was already set: %llX\n", bm_uuid);
3531 
3532 		device->ldev->md.uuid[UI_BITMAP] = val & ~((u64)1);
3533 	}
3534 	spin_unlock_irqrestore(&device->ldev->md.uuid_lock, flags);
3535 
3536 	drbd_md_mark_dirty(device);
3537 }
3538 
3539 /**
3540  * drbd_bmio_set_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
3541  * @device:	DRBD device.
3542  *
3543  * Sets all bits in the bitmap and writes the whole bitmap to stable storage.
3544  */
3545 int drbd_bmio_set_n_write(struct drbd_device *device) __must_hold(local)
3546 {
3547 	int rv = -EIO;
3548 
3549 	drbd_md_set_flag(device, MDF_FULL_SYNC);
3550 	drbd_md_sync(device);
3551 	drbd_bm_set_all(device);
3552 
3553 	rv = drbd_bm_write(device);
3554 
3555 	if (!rv) {
3556 		drbd_md_clear_flag(device, MDF_FULL_SYNC);
3557 		drbd_md_sync(device);
3558 	}
3559 
3560 	return rv;
3561 }
3562 
3563 /**
3564  * drbd_bmio_clear_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
3565  * @device:	DRBD device.
3566  *
3567  * Clears all bits in the bitmap and writes the whole bitmap to stable storage.
3568  */
3569 int drbd_bmio_clear_n_write(struct drbd_device *device) __must_hold(local)
3570 {
3571 	drbd_resume_al(device);
3572 	drbd_bm_clear_all(device);
3573 	return drbd_bm_write(device);
3574 }
3575 
3576 static int w_bitmap_io(struct drbd_work *w, int unused)
3577 {
3578 	struct drbd_device *device =
3579 		container_of(w, struct drbd_device, bm_io_work.w);
3580 	struct bm_io_work *work = &device->bm_io_work;
3581 	int rv = -EIO;
3582 
3583 	if (work->flags != BM_LOCKED_CHANGE_ALLOWED) {
3584 		int cnt = atomic_read(&device->ap_bio_cnt);
3585 		if (cnt)
3586 			drbd_err(device, "FIXME: ap_bio_cnt %d, expected 0; queued for '%s'\n",
3587 					cnt, work->why);
3588 	}
3589 
3590 	if (get_ldev(device)) {
3591 		drbd_bm_lock(device, work->why, work->flags);
3592 		rv = work->io_fn(device);
3593 		drbd_bm_unlock(device);
3594 		put_ldev(device);
3595 	}
3596 
3597 	clear_bit_unlock(BITMAP_IO, &device->flags);
3598 	wake_up(&device->misc_wait);
3599 
3600 	if (work->done)
3601 		work->done(device, rv);
3602 
3603 	clear_bit(BITMAP_IO_QUEUED, &device->flags);
3604 	work->why = NULL;
3605 	work->flags = 0;
3606 
3607 	return 0;
3608 }
3609 
3610 /**
3611  * drbd_queue_bitmap_io() - Queues an IO operation on the whole bitmap
3612  * @device:	DRBD device.
3613  * @io_fn:	IO callback to be called when bitmap IO is possible
3614  * @done:	callback to be called after the bitmap IO was performed
3615  * @why:	Descriptive text of the reason for doing the IO
3616  *
3617  * While IO on the bitmap happens we freeze application IO thus we ensure
3618  * that drbd_set_out_of_sync() can not be called. This function MAY ONLY be
3619  * called from worker context. It MUST NOT be used while a previous such
3620  * work is still pending!
3621  *
3622  * Its worker function encloses the call of io_fn() by get_ldev() and
3623  * put_ldev().
3624  */
3625 void drbd_queue_bitmap_io(struct drbd_device *device,
3626 			  int (*io_fn)(struct drbd_device *),
3627 			  void (*done)(struct drbd_device *, int),
3628 			  char *why, enum bm_flag flags)
3629 {
3630 	D_ASSERT(device, current == first_peer_device(device)->connection->worker.task);
3631 
3632 	D_ASSERT(device, !test_bit(BITMAP_IO_QUEUED, &device->flags));
3633 	D_ASSERT(device, !test_bit(BITMAP_IO, &device->flags));
3634 	D_ASSERT(device, list_empty(&device->bm_io_work.w.list));
3635 	if (device->bm_io_work.why)
3636 		drbd_err(device, "FIXME going to queue '%s' but '%s' still pending?\n",
3637 			why, device->bm_io_work.why);
3638 
3639 	device->bm_io_work.io_fn = io_fn;
3640 	device->bm_io_work.done = done;
3641 	device->bm_io_work.why = why;
3642 	device->bm_io_work.flags = flags;
3643 
3644 	spin_lock_irq(&device->resource->req_lock);
3645 	set_bit(BITMAP_IO, &device->flags);
3646 	/* don't wait for pending application IO if the caller indicates that
3647 	 * application IO does not conflict anyways. */
3648 	if (flags == BM_LOCKED_CHANGE_ALLOWED || atomic_read(&device->ap_bio_cnt) == 0) {
3649 		if (!test_and_set_bit(BITMAP_IO_QUEUED, &device->flags))
3650 			drbd_queue_work(&first_peer_device(device)->connection->sender_work,
3651 					&device->bm_io_work.w);
3652 	}
3653 	spin_unlock_irq(&device->resource->req_lock);
3654 }
3655 
3656 /**
3657  * drbd_bitmap_io() -  Does an IO operation on the whole bitmap
3658  * @device:	DRBD device.
3659  * @io_fn:	IO callback to be called when bitmap IO is possible
3660  * @why:	Descriptive text of the reason for doing the IO
3661  *
3662  * freezes application IO while that the actual IO operations runs. This
3663  * functions MAY NOT be called from worker context.
3664  */
3665 int drbd_bitmap_io(struct drbd_device *device, int (*io_fn)(struct drbd_device *),
3666 		char *why, enum bm_flag flags)
3667 {
3668 	/* Only suspend io, if some operation is supposed to be locked out */
3669 	const bool do_suspend_io = flags & (BM_DONT_CLEAR|BM_DONT_SET|BM_DONT_TEST);
3670 	int rv;
3671 
3672 	D_ASSERT(device, current != first_peer_device(device)->connection->worker.task);
3673 
3674 	if (do_suspend_io)
3675 		drbd_suspend_io(device);
3676 
3677 	drbd_bm_lock(device, why, flags);
3678 	rv = io_fn(device);
3679 	drbd_bm_unlock(device);
3680 
3681 	if (do_suspend_io)
3682 		drbd_resume_io(device);
3683 
3684 	return rv;
3685 }
3686 
3687 void drbd_md_set_flag(struct drbd_device *device, int flag) __must_hold(local)
3688 {
3689 	if ((device->ldev->md.flags & flag) != flag) {
3690 		drbd_md_mark_dirty(device);
3691 		device->ldev->md.flags |= flag;
3692 	}
3693 }
3694 
3695 void drbd_md_clear_flag(struct drbd_device *device, int flag) __must_hold(local)
3696 {
3697 	if ((device->ldev->md.flags & flag) != 0) {
3698 		drbd_md_mark_dirty(device);
3699 		device->ldev->md.flags &= ~flag;
3700 	}
3701 }
3702 int drbd_md_test_flag(struct drbd_backing_dev *bdev, int flag)
3703 {
3704 	return (bdev->md.flags & flag) != 0;
3705 }
3706 
3707 static void md_sync_timer_fn(unsigned long data)
3708 {
3709 	struct drbd_device *device = (struct drbd_device *) data;
3710 	drbd_device_post_work(device, MD_SYNC);
3711 }
3712 
3713 const char *cmdname(enum drbd_packet cmd)
3714 {
3715 	/* THINK may need to become several global tables
3716 	 * when we want to support more than
3717 	 * one PRO_VERSION */
3718 	static const char *cmdnames[] = {
3719 		[P_DATA]	        = "Data",
3720 		[P_WSAME]	        = "WriteSame",
3721 		[P_TRIM]	        = "Trim",
3722 		[P_DATA_REPLY]	        = "DataReply",
3723 		[P_RS_DATA_REPLY]	= "RSDataReply",
3724 		[P_BARRIER]	        = "Barrier",
3725 		[P_BITMAP]	        = "ReportBitMap",
3726 		[P_BECOME_SYNC_TARGET]  = "BecomeSyncTarget",
3727 		[P_BECOME_SYNC_SOURCE]  = "BecomeSyncSource",
3728 		[P_UNPLUG_REMOTE]	= "UnplugRemote",
3729 		[P_DATA_REQUEST]	= "DataRequest",
3730 		[P_RS_DATA_REQUEST]     = "RSDataRequest",
3731 		[P_SYNC_PARAM]	        = "SyncParam",
3732 		[P_SYNC_PARAM89]	= "SyncParam89",
3733 		[P_PROTOCOL]            = "ReportProtocol",
3734 		[P_UUIDS]	        = "ReportUUIDs",
3735 		[P_SIZES]	        = "ReportSizes",
3736 		[P_STATE]	        = "ReportState",
3737 		[P_SYNC_UUID]           = "ReportSyncUUID",
3738 		[P_AUTH_CHALLENGE]      = "AuthChallenge",
3739 		[P_AUTH_RESPONSE]	= "AuthResponse",
3740 		[P_PING]		= "Ping",
3741 		[P_PING_ACK]	        = "PingAck",
3742 		[P_RECV_ACK]	        = "RecvAck",
3743 		[P_WRITE_ACK]	        = "WriteAck",
3744 		[P_RS_WRITE_ACK]	= "RSWriteAck",
3745 		[P_SUPERSEDED]          = "Superseded",
3746 		[P_NEG_ACK]	        = "NegAck",
3747 		[P_NEG_DREPLY]	        = "NegDReply",
3748 		[P_NEG_RS_DREPLY]	= "NegRSDReply",
3749 		[P_BARRIER_ACK]	        = "BarrierAck",
3750 		[P_STATE_CHG_REQ]       = "StateChgRequest",
3751 		[P_STATE_CHG_REPLY]     = "StateChgReply",
3752 		[P_OV_REQUEST]          = "OVRequest",
3753 		[P_OV_REPLY]            = "OVReply",
3754 		[P_OV_RESULT]           = "OVResult",
3755 		[P_CSUM_RS_REQUEST]     = "CsumRSRequest",
3756 		[P_RS_IS_IN_SYNC]	= "CsumRSIsInSync",
3757 		[P_COMPRESSED_BITMAP]   = "CBitmap",
3758 		[P_DELAY_PROBE]         = "DelayProbe",
3759 		[P_OUT_OF_SYNC]		= "OutOfSync",
3760 		[P_RETRY_WRITE]		= "RetryWrite",
3761 		[P_RS_CANCEL]		= "RSCancel",
3762 		[P_CONN_ST_CHG_REQ]	= "conn_st_chg_req",
3763 		[P_CONN_ST_CHG_REPLY]	= "conn_st_chg_reply",
3764 		[P_RETRY_WRITE]		= "retry_write",
3765 		[P_PROTOCOL_UPDATE]	= "protocol_update",
3766 		[P_RS_THIN_REQ]         = "rs_thin_req",
3767 		[P_RS_DEALLOCATED]      = "rs_deallocated",
3768 
3769 		/* enum drbd_packet, but not commands - obsoleted flags:
3770 		 *	P_MAY_IGNORE
3771 		 *	P_MAX_OPT_CMD
3772 		 */
3773 	};
3774 
3775 	/* too big for the array: 0xfffX */
3776 	if (cmd == P_INITIAL_META)
3777 		return "InitialMeta";
3778 	if (cmd == P_INITIAL_DATA)
3779 		return "InitialData";
3780 	if (cmd == P_CONNECTION_FEATURES)
3781 		return "ConnectionFeatures";
3782 	if (cmd >= ARRAY_SIZE(cmdnames))
3783 		return "Unknown";
3784 	return cmdnames[cmd];
3785 }
3786 
3787 /**
3788  * drbd_wait_misc  -  wait for a request to make progress
3789  * @device:	device associated with the request
3790  * @i:		the struct drbd_interval embedded in struct drbd_request or
3791  *		struct drbd_peer_request
3792  */
3793 int drbd_wait_misc(struct drbd_device *device, struct drbd_interval *i)
3794 {
3795 	struct net_conf *nc;
3796 	DEFINE_WAIT(wait);
3797 	long timeout;
3798 
3799 	rcu_read_lock();
3800 	nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
3801 	if (!nc) {
3802 		rcu_read_unlock();
3803 		return -ETIMEDOUT;
3804 	}
3805 	timeout = nc->ko_count ? nc->timeout * HZ / 10 * nc->ko_count : MAX_SCHEDULE_TIMEOUT;
3806 	rcu_read_unlock();
3807 
3808 	/* Indicate to wake up device->misc_wait on progress.  */
3809 	i->waiting = true;
3810 	prepare_to_wait(&device->misc_wait, &wait, TASK_INTERRUPTIBLE);
3811 	spin_unlock_irq(&device->resource->req_lock);
3812 	timeout = schedule_timeout(timeout);
3813 	finish_wait(&device->misc_wait, &wait);
3814 	spin_lock_irq(&device->resource->req_lock);
3815 	if (!timeout || device->state.conn < C_CONNECTED)
3816 		return -ETIMEDOUT;
3817 	if (signal_pending(current))
3818 		return -ERESTARTSYS;
3819 	return 0;
3820 }
3821 
3822 void lock_all_resources(void)
3823 {
3824 	struct drbd_resource *resource;
3825 	int __maybe_unused i = 0;
3826 
3827 	mutex_lock(&resources_mutex);
3828 	local_irq_disable();
3829 	for_each_resource(resource, &drbd_resources)
3830 		spin_lock_nested(&resource->req_lock, i++);
3831 }
3832 
3833 void unlock_all_resources(void)
3834 {
3835 	struct drbd_resource *resource;
3836 
3837 	for_each_resource(resource, &drbd_resources)
3838 		spin_unlock(&resource->req_lock);
3839 	local_irq_enable();
3840 	mutex_unlock(&resources_mutex);
3841 }
3842 
3843 #ifdef CONFIG_DRBD_FAULT_INJECTION
3844 /* Fault insertion support including random number generator shamelessly
3845  * stolen from kernel/rcutorture.c */
3846 struct fault_random_state {
3847 	unsigned long state;
3848 	unsigned long count;
3849 };
3850 
3851 #define FAULT_RANDOM_MULT 39916801  /* prime */
3852 #define FAULT_RANDOM_ADD	479001701 /* prime */
3853 #define FAULT_RANDOM_REFRESH 10000
3854 
3855 /*
3856  * Crude but fast random-number generator.  Uses a linear congruential
3857  * generator, with occasional help from get_random_bytes().
3858  */
3859 static unsigned long
3860 _drbd_fault_random(struct fault_random_state *rsp)
3861 {
3862 	long refresh;
3863 
3864 	if (!rsp->count--) {
3865 		get_random_bytes(&refresh, sizeof(refresh));
3866 		rsp->state += refresh;
3867 		rsp->count = FAULT_RANDOM_REFRESH;
3868 	}
3869 	rsp->state = rsp->state * FAULT_RANDOM_MULT + FAULT_RANDOM_ADD;
3870 	return swahw32(rsp->state);
3871 }
3872 
3873 static char *
3874 _drbd_fault_str(unsigned int type) {
3875 	static char *_faults[] = {
3876 		[DRBD_FAULT_MD_WR] = "Meta-data write",
3877 		[DRBD_FAULT_MD_RD] = "Meta-data read",
3878 		[DRBD_FAULT_RS_WR] = "Resync write",
3879 		[DRBD_FAULT_RS_RD] = "Resync read",
3880 		[DRBD_FAULT_DT_WR] = "Data write",
3881 		[DRBD_FAULT_DT_RD] = "Data read",
3882 		[DRBD_FAULT_DT_RA] = "Data read ahead",
3883 		[DRBD_FAULT_BM_ALLOC] = "BM allocation",
3884 		[DRBD_FAULT_AL_EE] = "EE allocation",
3885 		[DRBD_FAULT_RECEIVE] = "receive data corruption",
3886 	};
3887 
3888 	return (type < DRBD_FAULT_MAX) ? _faults[type] : "**Unknown**";
3889 }
3890 
3891 unsigned int
3892 _drbd_insert_fault(struct drbd_device *device, unsigned int type)
3893 {
3894 	static struct fault_random_state rrs = {0, 0};
3895 
3896 	unsigned int ret = (
3897 		(fault_devs == 0 ||
3898 			((1 << device_to_minor(device)) & fault_devs) != 0) &&
3899 		(((_drbd_fault_random(&rrs) % 100) + 1) <= fault_rate));
3900 
3901 	if (ret) {
3902 		fault_count++;
3903 
3904 		if (__ratelimit(&drbd_ratelimit_state))
3905 			drbd_warn(device, "***Simulating %s failure\n",
3906 				_drbd_fault_str(type));
3907 	}
3908 
3909 	return ret;
3910 }
3911 #endif
3912 
3913 const char *drbd_buildtag(void)
3914 {
3915 	/* DRBD built from external sources has here a reference to the
3916 	   git hash of the source code. */
3917 
3918 	static char buildtag[38] = "\0uilt-in";
3919 
3920 	if (buildtag[0] == 0) {
3921 #ifdef MODULE
3922 		sprintf(buildtag, "srcversion: %-24s", THIS_MODULE->srcversion);
3923 #else
3924 		buildtag[0] = 'b';
3925 #endif
3926 	}
3927 
3928 	return buildtag;
3929 }
3930 
3931 module_init(drbd_init)
3932 module_exit(drbd_cleanup)
3933 
3934 EXPORT_SYMBOL(drbd_conn_str);
3935 EXPORT_SYMBOL(drbd_role_str);
3936 EXPORT_SYMBOL(drbd_disk_str);
3937 EXPORT_SYMBOL(drbd_set_st_err_str);
3938