xref: /openbmc/linux/drivers/block/drbd/drbd_nl.c (revision 78560d41)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3    drbd_nl.c
4 
5    This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
6 
7    Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
8    Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
9    Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
10 
11 
12  */
13 
14 #define pr_fmt(fmt)	KBUILD_MODNAME ": " fmt
15 
16 #include <linux/module.h>
17 #include <linux/drbd.h>
18 #include <linux/in.h>
19 #include <linux/fs.h>
20 #include <linux/file.h>
21 #include <linux/slab.h>
22 #include <linux/blkpg.h>
23 #include <linux/cpumask.h>
24 #include "drbd_int.h"
25 #include "drbd_protocol.h"
26 #include "drbd_req.h"
27 #include "drbd_state_change.h"
28 #include <asm/unaligned.h>
29 #include <linux/drbd_limits.h>
30 #include <linux/kthread.h>
31 
32 #include <net/genetlink.h>
33 
34 /* .doit */
35 // int drbd_adm_create_resource(struct sk_buff *skb, struct genl_info *info);
36 // int drbd_adm_delete_resource(struct sk_buff *skb, struct genl_info *info);
37 
38 int drbd_adm_new_minor(struct sk_buff *skb, struct genl_info *info);
39 int drbd_adm_del_minor(struct sk_buff *skb, struct genl_info *info);
40 
41 int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info);
42 int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info);
43 int drbd_adm_down(struct sk_buff *skb, struct genl_info *info);
44 
45 int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info);
46 int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info);
47 int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info);
48 int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info);
49 int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info);
50 int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info);
51 int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info);
52 int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info);
53 int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info);
54 int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info);
55 int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info);
56 int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info);
57 int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info);
58 int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info);
59 int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info);
60 int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info);
61 int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info);
62 int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info);
63 int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info);
64 int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info);
65 /* .dumpit */
66 int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb);
67 int drbd_adm_dump_resources(struct sk_buff *skb, struct netlink_callback *cb);
68 int drbd_adm_dump_devices(struct sk_buff *skb, struct netlink_callback *cb);
69 int drbd_adm_dump_devices_done(struct netlink_callback *cb);
70 int drbd_adm_dump_connections(struct sk_buff *skb, struct netlink_callback *cb);
71 int drbd_adm_dump_connections_done(struct netlink_callback *cb);
72 int drbd_adm_dump_peer_devices(struct sk_buff *skb, struct netlink_callback *cb);
73 int drbd_adm_dump_peer_devices_done(struct netlink_callback *cb);
74 int drbd_adm_get_initial_state(struct sk_buff *skb, struct netlink_callback *cb);
75 
76 #include <linux/drbd_genl_api.h>
77 #include "drbd_nla.h"
78 #include <linux/genl_magic_func.h>
79 
80 static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
81 static atomic_t notify_genl_seq = ATOMIC_INIT(2); /* two. */
82 
83 DEFINE_MUTEX(notification_mutex);
84 
85 /* used blkdev_get_by_path, to claim our meta data device(s) */
86 static char *drbd_m_holder = "Hands off! this is DRBD's meta data device.";
87 
88 static void drbd_adm_send_reply(struct sk_buff *skb, struct genl_info *info)
89 {
90 	genlmsg_end(skb, genlmsg_data(nlmsg_data(nlmsg_hdr(skb))));
91 	if (genlmsg_reply(skb, info))
92 		pr_err("error sending genl reply\n");
93 }
94 
95 /* Used on a fresh "drbd_adm_prepare"d reply_skb, this cannot fail: The only
96  * reason it could fail was no space in skb, and there are 4k available. */
97 static int drbd_msg_put_info(struct sk_buff *skb, const char *info)
98 {
99 	struct nlattr *nla;
100 	int err = -EMSGSIZE;
101 
102 	if (!info || !info[0])
103 		return 0;
104 
105 	nla = nla_nest_start_noflag(skb, DRBD_NLA_CFG_REPLY);
106 	if (!nla)
107 		return err;
108 
109 	err = nla_put_string(skb, T_info_text, info);
110 	if (err) {
111 		nla_nest_cancel(skb, nla);
112 		return err;
113 	} else
114 		nla_nest_end(skb, nla);
115 	return 0;
116 }
117 
118 __printf(2, 3)
119 static int drbd_msg_sprintf_info(struct sk_buff *skb, const char *fmt, ...)
120 {
121 	va_list args;
122 	struct nlattr *nla, *txt;
123 	int err = -EMSGSIZE;
124 	int len;
125 
126 	nla = nla_nest_start_noflag(skb, DRBD_NLA_CFG_REPLY);
127 	if (!nla)
128 		return err;
129 
130 	txt = nla_reserve(skb, T_info_text, 256);
131 	if (!txt) {
132 		nla_nest_cancel(skb, nla);
133 		return err;
134 	}
135 	va_start(args, fmt);
136 	len = vscnprintf(nla_data(txt), 256, fmt, args);
137 	va_end(args);
138 
139 	/* maybe: retry with larger reserve, if truncated */
140 	txt->nla_len = nla_attr_size(len+1);
141 	nlmsg_trim(skb, (char*)txt + NLA_ALIGN(txt->nla_len));
142 	nla_nest_end(skb, nla);
143 
144 	return 0;
145 }
146 
147 /* This would be a good candidate for a "pre_doit" hook,
148  * and per-family private info->pointers.
149  * But we need to stay compatible with older kernels.
150  * If it returns successfully, adm_ctx members are valid.
151  *
152  * At this point, we still rely on the global genl_lock().
153  * If we want to avoid that, and allow "genl_family.parallel_ops", we may need
154  * to add additional synchronization against object destruction/modification.
155  */
156 #define DRBD_ADM_NEED_MINOR	1
157 #define DRBD_ADM_NEED_RESOURCE	2
158 #define DRBD_ADM_NEED_CONNECTION 4
159 static int drbd_adm_prepare(struct drbd_config_context *adm_ctx,
160 	struct sk_buff *skb, struct genl_info *info, unsigned flags)
161 {
162 	struct drbd_genlmsghdr *d_in = info->userhdr;
163 	const u8 cmd = info->genlhdr->cmd;
164 	int err;
165 
166 	memset(adm_ctx, 0, sizeof(*adm_ctx));
167 
168 	/* genl_rcv_msg only checks for CAP_NET_ADMIN on "GENL_ADMIN_PERM" :( */
169 	if (cmd != DRBD_ADM_GET_STATUS && !capable(CAP_NET_ADMIN))
170 	       return -EPERM;
171 
172 	adm_ctx->reply_skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
173 	if (!adm_ctx->reply_skb) {
174 		err = -ENOMEM;
175 		goto fail;
176 	}
177 
178 	adm_ctx->reply_dh = genlmsg_put_reply(adm_ctx->reply_skb,
179 					info, &drbd_genl_family, 0, cmd);
180 	/* put of a few bytes into a fresh skb of >= 4k will always succeed.
181 	 * but anyways */
182 	if (!adm_ctx->reply_dh) {
183 		err = -ENOMEM;
184 		goto fail;
185 	}
186 
187 	adm_ctx->reply_dh->minor = d_in->minor;
188 	adm_ctx->reply_dh->ret_code = NO_ERROR;
189 
190 	adm_ctx->volume = VOLUME_UNSPECIFIED;
191 	if (info->attrs[DRBD_NLA_CFG_CONTEXT]) {
192 		struct nlattr *nla;
193 		/* parse and validate only */
194 		err = drbd_cfg_context_from_attrs(NULL, info);
195 		if (err)
196 			goto fail;
197 
198 		/* It was present, and valid,
199 		 * copy it over to the reply skb. */
200 		err = nla_put_nohdr(adm_ctx->reply_skb,
201 				info->attrs[DRBD_NLA_CFG_CONTEXT]->nla_len,
202 				info->attrs[DRBD_NLA_CFG_CONTEXT]);
203 		if (err)
204 			goto fail;
205 
206 		/* and assign stuff to the adm_ctx */
207 		nla = nested_attr_tb[__nla_type(T_ctx_volume)];
208 		if (nla)
209 			adm_ctx->volume = nla_get_u32(nla);
210 		nla = nested_attr_tb[__nla_type(T_ctx_resource_name)];
211 		if (nla)
212 			adm_ctx->resource_name = nla_data(nla);
213 		adm_ctx->my_addr = nested_attr_tb[__nla_type(T_ctx_my_addr)];
214 		adm_ctx->peer_addr = nested_attr_tb[__nla_type(T_ctx_peer_addr)];
215 		if ((adm_ctx->my_addr &&
216 		     nla_len(adm_ctx->my_addr) > sizeof(adm_ctx->connection->my_addr)) ||
217 		    (adm_ctx->peer_addr &&
218 		     nla_len(adm_ctx->peer_addr) > sizeof(adm_ctx->connection->peer_addr))) {
219 			err = -EINVAL;
220 			goto fail;
221 		}
222 	}
223 
224 	adm_ctx->minor = d_in->minor;
225 	adm_ctx->device = minor_to_device(d_in->minor);
226 
227 	/* We are protected by the global genl_lock().
228 	 * But we may explicitly drop it/retake it in drbd_adm_set_role(),
229 	 * so make sure this object stays around. */
230 	if (adm_ctx->device)
231 		kref_get(&adm_ctx->device->kref);
232 
233 	if (adm_ctx->resource_name) {
234 		adm_ctx->resource = drbd_find_resource(adm_ctx->resource_name);
235 	}
236 
237 	if (!adm_ctx->device && (flags & DRBD_ADM_NEED_MINOR)) {
238 		drbd_msg_put_info(adm_ctx->reply_skb, "unknown minor");
239 		return ERR_MINOR_INVALID;
240 	}
241 	if (!adm_ctx->resource && (flags & DRBD_ADM_NEED_RESOURCE)) {
242 		drbd_msg_put_info(adm_ctx->reply_skb, "unknown resource");
243 		if (adm_ctx->resource_name)
244 			return ERR_RES_NOT_KNOWN;
245 		return ERR_INVALID_REQUEST;
246 	}
247 
248 	if (flags & DRBD_ADM_NEED_CONNECTION) {
249 		if (adm_ctx->resource) {
250 			drbd_msg_put_info(adm_ctx->reply_skb, "no resource name expected");
251 			return ERR_INVALID_REQUEST;
252 		}
253 		if (adm_ctx->device) {
254 			drbd_msg_put_info(adm_ctx->reply_skb, "no minor number expected");
255 			return ERR_INVALID_REQUEST;
256 		}
257 		if (adm_ctx->my_addr && adm_ctx->peer_addr)
258 			adm_ctx->connection = conn_get_by_addrs(nla_data(adm_ctx->my_addr),
259 							  nla_len(adm_ctx->my_addr),
260 							  nla_data(adm_ctx->peer_addr),
261 							  nla_len(adm_ctx->peer_addr));
262 		if (!adm_ctx->connection) {
263 			drbd_msg_put_info(adm_ctx->reply_skb, "unknown connection");
264 			return ERR_INVALID_REQUEST;
265 		}
266 	}
267 
268 	/* some more paranoia, if the request was over-determined */
269 	if (adm_ctx->device && adm_ctx->resource &&
270 	    adm_ctx->device->resource != adm_ctx->resource) {
271 		pr_warn("request: minor=%u, resource=%s; but that minor belongs to resource %s\n",
272 			adm_ctx->minor, adm_ctx->resource->name,
273 			adm_ctx->device->resource->name);
274 		drbd_msg_put_info(adm_ctx->reply_skb, "minor exists in different resource");
275 		return ERR_INVALID_REQUEST;
276 	}
277 	if (adm_ctx->device &&
278 	    adm_ctx->volume != VOLUME_UNSPECIFIED &&
279 	    adm_ctx->volume != adm_ctx->device->vnr) {
280 		pr_warn("request: minor=%u, volume=%u; but that minor is volume %u in %s\n",
281 			adm_ctx->minor, adm_ctx->volume,
282 			adm_ctx->device->vnr, adm_ctx->device->resource->name);
283 		drbd_msg_put_info(adm_ctx->reply_skb, "minor exists as different volume");
284 		return ERR_INVALID_REQUEST;
285 	}
286 
287 	/* still, provide adm_ctx->resource always, if possible. */
288 	if (!adm_ctx->resource) {
289 		adm_ctx->resource = adm_ctx->device ? adm_ctx->device->resource
290 			: adm_ctx->connection ? adm_ctx->connection->resource : NULL;
291 		if (adm_ctx->resource)
292 			kref_get(&adm_ctx->resource->kref);
293 	}
294 
295 	return NO_ERROR;
296 
297 fail:
298 	nlmsg_free(adm_ctx->reply_skb);
299 	adm_ctx->reply_skb = NULL;
300 	return err;
301 }
302 
303 static int drbd_adm_finish(struct drbd_config_context *adm_ctx,
304 	struct genl_info *info, int retcode)
305 {
306 	if (adm_ctx->device) {
307 		kref_put(&adm_ctx->device->kref, drbd_destroy_device);
308 		adm_ctx->device = NULL;
309 	}
310 	if (adm_ctx->connection) {
311 		kref_put(&adm_ctx->connection->kref, &drbd_destroy_connection);
312 		adm_ctx->connection = NULL;
313 	}
314 	if (adm_ctx->resource) {
315 		kref_put(&adm_ctx->resource->kref, drbd_destroy_resource);
316 		adm_ctx->resource = NULL;
317 	}
318 
319 	if (!adm_ctx->reply_skb)
320 		return -ENOMEM;
321 
322 	adm_ctx->reply_dh->ret_code = retcode;
323 	drbd_adm_send_reply(adm_ctx->reply_skb, info);
324 	return 0;
325 }
326 
327 static void setup_khelper_env(struct drbd_connection *connection, char **envp)
328 {
329 	char *afs;
330 
331 	/* FIXME: A future version will not allow this case. */
332 	if (connection->my_addr_len == 0 || connection->peer_addr_len == 0)
333 		return;
334 
335 	switch (((struct sockaddr *)&connection->peer_addr)->sa_family) {
336 	case AF_INET6:
337 		afs = "ipv6";
338 		snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI6",
339 			 &((struct sockaddr_in6 *)&connection->peer_addr)->sin6_addr);
340 		break;
341 	case AF_INET:
342 		afs = "ipv4";
343 		snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
344 			 &((struct sockaddr_in *)&connection->peer_addr)->sin_addr);
345 		break;
346 	default:
347 		afs = "ssocks";
348 		snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
349 			 &((struct sockaddr_in *)&connection->peer_addr)->sin_addr);
350 	}
351 	snprintf(envp[3], 20, "DRBD_PEER_AF=%s", afs);
352 }
353 
354 int drbd_khelper(struct drbd_device *device, char *cmd)
355 {
356 	char *envp[] = { "HOME=/",
357 			"TERM=linux",
358 			"PATH=/sbin:/usr/sbin:/bin:/usr/bin",
359 			 (char[20]) { }, /* address family */
360 			 (char[60]) { }, /* address */
361 			NULL };
362 	char mb[14];
363 	char *argv[] = {drbd_usermode_helper, cmd, mb, NULL };
364 	struct drbd_connection *connection = first_peer_device(device)->connection;
365 	struct sib_info sib;
366 	int ret;
367 
368 	if (current == connection->worker.task)
369 		set_bit(CALLBACK_PENDING, &connection->flags);
370 
371 	snprintf(mb, 14, "minor-%d", device_to_minor(device));
372 	setup_khelper_env(connection, envp);
373 
374 	/* The helper may take some time.
375 	 * write out any unsynced meta data changes now */
376 	drbd_md_sync(device);
377 
378 	drbd_info(device, "helper command: %s %s %s\n", drbd_usermode_helper, cmd, mb);
379 	sib.sib_reason = SIB_HELPER_PRE;
380 	sib.helper_name = cmd;
381 	drbd_bcast_event(device, &sib);
382 	notify_helper(NOTIFY_CALL, device, connection, cmd, 0);
383 	ret = call_usermodehelper(drbd_usermode_helper, argv, envp, UMH_WAIT_PROC);
384 	if (ret)
385 		drbd_warn(device, "helper command: %s %s %s exit code %u (0x%x)\n",
386 				drbd_usermode_helper, cmd, mb,
387 				(ret >> 8) & 0xff, ret);
388 	else
389 		drbd_info(device, "helper command: %s %s %s exit code %u (0x%x)\n",
390 				drbd_usermode_helper, cmd, mb,
391 				(ret >> 8) & 0xff, ret);
392 	sib.sib_reason = SIB_HELPER_POST;
393 	sib.helper_exit_code = ret;
394 	drbd_bcast_event(device, &sib);
395 	notify_helper(NOTIFY_RESPONSE, device, connection, cmd, ret);
396 
397 	if (current == connection->worker.task)
398 		clear_bit(CALLBACK_PENDING, &connection->flags);
399 
400 	if (ret < 0) /* Ignore any ERRNOs we got. */
401 		ret = 0;
402 
403 	return ret;
404 }
405 
406 enum drbd_peer_state conn_khelper(struct drbd_connection *connection, char *cmd)
407 {
408 	char *envp[] = { "HOME=/",
409 			"TERM=linux",
410 			"PATH=/sbin:/usr/sbin:/bin:/usr/bin",
411 			 (char[20]) { }, /* address family */
412 			 (char[60]) { }, /* address */
413 			NULL };
414 	char *resource_name = connection->resource->name;
415 	char *argv[] = {drbd_usermode_helper, cmd, resource_name, NULL };
416 	int ret;
417 
418 	setup_khelper_env(connection, envp);
419 	conn_md_sync(connection);
420 
421 	drbd_info(connection, "helper command: %s %s %s\n", drbd_usermode_helper, cmd, resource_name);
422 	/* TODO: conn_bcast_event() ?? */
423 	notify_helper(NOTIFY_CALL, NULL, connection, cmd, 0);
424 
425 	ret = call_usermodehelper(drbd_usermode_helper, argv, envp, UMH_WAIT_PROC);
426 	if (ret)
427 		drbd_warn(connection, "helper command: %s %s %s exit code %u (0x%x)\n",
428 			  drbd_usermode_helper, cmd, resource_name,
429 			  (ret >> 8) & 0xff, ret);
430 	else
431 		drbd_info(connection, "helper command: %s %s %s exit code %u (0x%x)\n",
432 			  drbd_usermode_helper, cmd, resource_name,
433 			  (ret >> 8) & 0xff, ret);
434 	/* TODO: conn_bcast_event() ?? */
435 	notify_helper(NOTIFY_RESPONSE, NULL, connection, cmd, ret);
436 
437 	if (ret < 0) /* Ignore any ERRNOs we got. */
438 		ret = 0;
439 
440 	return ret;
441 }
442 
443 static enum drbd_fencing_p highest_fencing_policy(struct drbd_connection *connection)
444 {
445 	enum drbd_fencing_p fp = FP_NOT_AVAIL;
446 	struct drbd_peer_device *peer_device;
447 	int vnr;
448 
449 	rcu_read_lock();
450 	idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
451 		struct drbd_device *device = peer_device->device;
452 		if (get_ldev_if_state(device, D_CONSISTENT)) {
453 			struct disk_conf *disk_conf =
454 				rcu_dereference(peer_device->device->ldev->disk_conf);
455 			fp = max_t(enum drbd_fencing_p, fp, disk_conf->fencing);
456 			put_ldev(device);
457 		}
458 	}
459 	rcu_read_unlock();
460 
461 	return fp;
462 }
463 
464 static bool resource_is_supended(struct drbd_resource *resource)
465 {
466 	return resource->susp || resource->susp_fen || resource->susp_nod;
467 }
468 
469 bool conn_try_outdate_peer(struct drbd_connection *connection)
470 {
471 	struct drbd_resource * const resource = connection->resource;
472 	unsigned int connect_cnt;
473 	union drbd_state mask = { };
474 	union drbd_state val = { };
475 	enum drbd_fencing_p fp;
476 	char *ex_to_string;
477 	int r;
478 
479 	spin_lock_irq(&resource->req_lock);
480 	if (connection->cstate >= C_WF_REPORT_PARAMS) {
481 		drbd_err(connection, "Expected cstate < C_WF_REPORT_PARAMS\n");
482 		spin_unlock_irq(&resource->req_lock);
483 		return false;
484 	}
485 
486 	connect_cnt = connection->connect_cnt;
487 	spin_unlock_irq(&resource->req_lock);
488 
489 	fp = highest_fencing_policy(connection);
490 	switch (fp) {
491 	case FP_NOT_AVAIL:
492 		drbd_warn(connection, "Not fencing peer, I'm not even Consistent myself.\n");
493 		spin_lock_irq(&resource->req_lock);
494 		if (connection->cstate < C_WF_REPORT_PARAMS) {
495 			_conn_request_state(connection,
496 					    (union drbd_state) { { .susp_fen = 1 } },
497 					    (union drbd_state) { { .susp_fen = 0 } },
498 					    CS_VERBOSE | CS_HARD | CS_DC_SUSP);
499 			/* We are no longer suspended due to the fencing policy.
500 			 * We may still be suspended due to the on-no-data-accessible policy.
501 			 * If that was OND_IO_ERROR, fail pending requests. */
502 			if (!resource_is_supended(resource))
503 				_tl_restart(connection, CONNECTION_LOST_WHILE_PENDING);
504 		}
505 		/* Else: in case we raced with a connection handshake,
506 		 * let the handshake figure out if we maybe can RESEND,
507 		 * and do not resume/fail pending requests here.
508 		 * Worst case is we stay suspended for now, which may be
509 		 * resolved by either re-establishing the replication link, or
510 		 * the next link failure, or eventually the administrator.  */
511 		spin_unlock_irq(&resource->req_lock);
512 		return false;
513 
514 	case FP_DONT_CARE:
515 		return true;
516 	default: ;
517 	}
518 
519 	r = conn_khelper(connection, "fence-peer");
520 
521 	switch ((r>>8) & 0xff) {
522 	case P_INCONSISTENT: /* peer is inconsistent */
523 		ex_to_string = "peer is inconsistent or worse";
524 		mask.pdsk = D_MASK;
525 		val.pdsk = D_INCONSISTENT;
526 		break;
527 	case P_OUTDATED: /* peer got outdated, or was already outdated */
528 		ex_to_string = "peer was fenced";
529 		mask.pdsk = D_MASK;
530 		val.pdsk = D_OUTDATED;
531 		break;
532 	case P_DOWN: /* peer was down */
533 		if (conn_highest_disk(connection) == D_UP_TO_DATE) {
534 			/* we will(have) create(d) a new UUID anyways... */
535 			ex_to_string = "peer is unreachable, assumed to be dead";
536 			mask.pdsk = D_MASK;
537 			val.pdsk = D_OUTDATED;
538 		} else {
539 			ex_to_string = "peer unreachable, doing nothing since disk != UpToDate";
540 		}
541 		break;
542 	case P_PRIMARY: /* Peer is primary, voluntarily outdate myself.
543 		 * This is useful when an unconnected R_SECONDARY is asked to
544 		 * become R_PRIMARY, but finds the other peer being active. */
545 		ex_to_string = "peer is active";
546 		drbd_warn(connection, "Peer is primary, outdating myself.\n");
547 		mask.disk = D_MASK;
548 		val.disk = D_OUTDATED;
549 		break;
550 	case P_FENCING:
551 		/* THINK: do we need to handle this
552 		 * like case 4, or more like case 5? */
553 		if (fp != FP_STONITH)
554 			drbd_err(connection, "fence-peer() = 7 && fencing != Stonith !!!\n");
555 		ex_to_string = "peer was stonithed";
556 		mask.pdsk = D_MASK;
557 		val.pdsk = D_OUTDATED;
558 		break;
559 	default:
560 		/* The script is broken ... */
561 		drbd_err(connection, "fence-peer helper broken, returned %d\n", (r>>8)&0xff);
562 		return false; /* Eventually leave IO frozen */
563 	}
564 
565 	drbd_info(connection, "fence-peer helper returned %d (%s)\n",
566 		  (r>>8) & 0xff, ex_to_string);
567 
568 	/* Not using
569 	   conn_request_state(connection, mask, val, CS_VERBOSE);
570 	   here, because we might were able to re-establish the connection in the
571 	   meantime. */
572 	spin_lock_irq(&resource->req_lock);
573 	if (connection->cstate < C_WF_REPORT_PARAMS && !test_bit(STATE_SENT, &connection->flags)) {
574 		if (connection->connect_cnt != connect_cnt)
575 			/* In case the connection was established and droped
576 			   while the fence-peer handler was running, ignore it */
577 			drbd_info(connection, "Ignoring fence-peer exit code\n");
578 		else
579 			_conn_request_state(connection, mask, val, CS_VERBOSE);
580 	}
581 	spin_unlock_irq(&resource->req_lock);
582 
583 	return conn_highest_pdsk(connection) <= D_OUTDATED;
584 }
585 
586 static int _try_outdate_peer_async(void *data)
587 {
588 	struct drbd_connection *connection = (struct drbd_connection *)data;
589 
590 	conn_try_outdate_peer(connection);
591 
592 	kref_put(&connection->kref, drbd_destroy_connection);
593 	return 0;
594 }
595 
596 void conn_try_outdate_peer_async(struct drbd_connection *connection)
597 {
598 	struct task_struct *opa;
599 
600 	kref_get(&connection->kref);
601 	/* We may have just sent a signal to this thread
602 	 * to get it out of some blocking network function.
603 	 * Clear signals; otherwise kthread_run(), which internally uses
604 	 * wait_on_completion_killable(), will mistake our pending signal
605 	 * for a new fatal signal and fail. */
606 	flush_signals(current);
607 	opa = kthread_run(_try_outdate_peer_async, connection, "drbd_async_h");
608 	if (IS_ERR(opa)) {
609 		drbd_err(connection, "out of mem, failed to invoke fence-peer helper\n");
610 		kref_put(&connection->kref, drbd_destroy_connection);
611 	}
612 }
613 
614 enum drbd_state_rv
615 drbd_set_role(struct drbd_device *const device, enum drbd_role new_role, int force)
616 {
617 	struct drbd_peer_device *const peer_device = first_peer_device(device);
618 	struct drbd_connection *const connection = peer_device ? peer_device->connection : NULL;
619 	const int max_tries = 4;
620 	enum drbd_state_rv rv = SS_UNKNOWN_ERROR;
621 	struct net_conf *nc;
622 	int try = 0;
623 	int forced = 0;
624 	union drbd_state mask, val;
625 
626 	if (new_role == R_PRIMARY) {
627 		struct drbd_connection *connection;
628 
629 		/* Detect dead peers as soon as possible.  */
630 
631 		rcu_read_lock();
632 		for_each_connection(connection, device->resource)
633 			request_ping(connection);
634 		rcu_read_unlock();
635 	}
636 
637 	mutex_lock(device->state_mutex);
638 
639 	mask.i = 0; mask.role = R_MASK;
640 	val.i  = 0; val.role  = new_role;
641 
642 	while (try++ < max_tries) {
643 		rv = _drbd_request_state_holding_state_mutex(device, mask, val, CS_WAIT_COMPLETE);
644 
645 		/* in case we first succeeded to outdate,
646 		 * but now suddenly could establish a connection */
647 		if (rv == SS_CW_FAILED_BY_PEER && mask.pdsk != 0) {
648 			val.pdsk = 0;
649 			mask.pdsk = 0;
650 			continue;
651 		}
652 
653 		if (rv == SS_NO_UP_TO_DATE_DISK && force &&
654 		    (device->state.disk < D_UP_TO_DATE &&
655 		     device->state.disk >= D_INCONSISTENT)) {
656 			mask.disk = D_MASK;
657 			val.disk  = D_UP_TO_DATE;
658 			forced = 1;
659 			continue;
660 		}
661 
662 		if (rv == SS_NO_UP_TO_DATE_DISK &&
663 		    device->state.disk == D_CONSISTENT && mask.pdsk == 0) {
664 			D_ASSERT(device, device->state.pdsk == D_UNKNOWN);
665 
666 			if (conn_try_outdate_peer(connection)) {
667 				val.disk = D_UP_TO_DATE;
668 				mask.disk = D_MASK;
669 			}
670 			continue;
671 		}
672 
673 		if (rv == SS_NOTHING_TO_DO)
674 			goto out;
675 		if (rv == SS_PRIMARY_NOP && mask.pdsk == 0) {
676 			if (!conn_try_outdate_peer(connection) && force) {
677 				drbd_warn(device, "Forced into split brain situation!\n");
678 				mask.pdsk = D_MASK;
679 				val.pdsk  = D_OUTDATED;
680 
681 			}
682 			continue;
683 		}
684 		if (rv == SS_TWO_PRIMARIES) {
685 			/* Maybe the peer is detected as dead very soon...
686 			   retry at most once more in this case. */
687 			if (try < max_tries) {
688 				int timeo;
689 				try = max_tries - 1;
690 				rcu_read_lock();
691 				nc = rcu_dereference(connection->net_conf);
692 				timeo = nc ? (nc->ping_timeo + 1) * HZ / 10 : 1;
693 				rcu_read_unlock();
694 				schedule_timeout_interruptible(timeo);
695 			}
696 			continue;
697 		}
698 		if (rv < SS_SUCCESS) {
699 			rv = _drbd_request_state(device, mask, val,
700 						CS_VERBOSE + CS_WAIT_COMPLETE);
701 			if (rv < SS_SUCCESS)
702 				goto out;
703 		}
704 		break;
705 	}
706 
707 	if (rv < SS_SUCCESS)
708 		goto out;
709 
710 	if (forced)
711 		drbd_warn(device, "Forced to consider local data as UpToDate!\n");
712 
713 	/* Wait until nothing is on the fly :) */
714 	wait_event(device->misc_wait, atomic_read(&device->ap_pending_cnt) == 0);
715 
716 	/* FIXME also wait for all pending P_BARRIER_ACK? */
717 
718 	if (new_role == R_SECONDARY) {
719 		if (get_ldev(device)) {
720 			device->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
721 			put_ldev(device);
722 		}
723 	} else {
724 		mutex_lock(&device->resource->conf_update);
725 		nc = connection->net_conf;
726 		if (nc)
727 			nc->discard_my_data = 0; /* without copy; single bit op is atomic */
728 		mutex_unlock(&device->resource->conf_update);
729 
730 		if (get_ldev(device)) {
731 			if (((device->state.conn < C_CONNECTED ||
732 			       device->state.pdsk <= D_FAILED)
733 			      && device->ldev->md.uuid[UI_BITMAP] == 0) || forced)
734 				drbd_uuid_new_current(device);
735 
736 			device->ldev->md.uuid[UI_CURRENT] |=  (u64)1;
737 			put_ldev(device);
738 		}
739 	}
740 
741 	/* writeout of activity log covered areas of the bitmap
742 	 * to stable storage done in after state change already */
743 
744 	if (device->state.conn >= C_WF_REPORT_PARAMS) {
745 		/* if this was forced, we should consider sync */
746 		if (forced)
747 			drbd_send_uuids(peer_device);
748 		drbd_send_current_state(peer_device);
749 	}
750 
751 	drbd_md_sync(device);
752 	set_disk_ro(device->vdisk, new_role == R_SECONDARY);
753 	kobject_uevent(&disk_to_dev(device->vdisk)->kobj, KOBJ_CHANGE);
754 out:
755 	mutex_unlock(device->state_mutex);
756 	return rv;
757 }
758 
759 static const char *from_attrs_err_to_txt(int err)
760 {
761 	return	err == -ENOMSG ? "required attribute missing" :
762 		err == -EOPNOTSUPP ? "unknown mandatory attribute" :
763 		err == -EEXIST ? "can not change invariant setting" :
764 		"invalid attribute value";
765 }
766 
767 int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info)
768 {
769 	struct drbd_config_context adm_ctx;
770 	struct set_role_parms parms;
771 	int err;
772 	enum drbd_ret_code retcode;
773 
774 	retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
775 	if (!adm_ctx.reply_skb)
776 		return retcode;
777 	if (retcode != NO_ERROR)
778 		goto out;
779 
780 	memset(&parms, 0, sizeof(parms));
781 	if (info->attrs[DRBD_NLA_SET_ROLE_PARMS]) {
782 		err = set_role_parms_from_attrs(&parms, info);
783 		if (err) {
784 			retcode = ERR_MANDATORY_TAG;
785 			drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
786 			goto out;
787 		}
788 	}
789 	genl_unlock();
790 	mutex_lock(&adm_ctx.resource->adm_mutex);
791 
792 	if (info->genlhdr->cmd == DRBD_ADM_PRIMARY)
793 		retcode = drbd_set_role(adm_ctx.device, R_PRIMARY, parms.assume_uptodate);
794 	else
795 		retcode = drbd_set_role(adm_ctx.device, R_SECONDARY, 0);
796 
797 	mutex_unlock(&adm_ctx.resource->adm_mutex);
798 	genl_lock();
799 out:
800 	drbd_adm_finish(&adm_ctx, info, retcode);
801 	return 0;
802 }
803 
804 /* Initializes the md.*_offset members, so we are able to find
805  * the on disk meta data.
806  *
807  * We currently have two possible layouts:
808  * external:
809  *   |----------- md_size_sect ------------------|
810  *   [ 4k superblock ][ activity log ][  Bitmap  ]
811  *   | al_offset == 8 |
812  *   | bm_offset = al_offset + X      |
813  *  ==> bitmap sectors = md_size_sect - bm_offset
814  *
815  * internal:
816  *            |----------- md_size_sect ------------------|
817  * [data.....][  Bitmap  ][ activity log ][ 4k superblock ]
818  *                        | al_offset < 0 |
819  *            | bm_offset = al_offset - Y |
820  *  ==> bitmap sectors = Y = al_offset - bm_offset
821  *
822  *  Activity log size used to be fixed 32kB,
823  *  but is about to become configurable.
824  */
825 static void drbd_md_set_sector_offsets(struct drbd_device *device,
826 				       struct drbd_backing_dev *bdev)
827 {
828 	sector_t md_size_sect = 0;
829 	unsigned int al_size_sect = bdev->md.al_size_4k * 8;
830 
831 	bdev->md.md_offset = drbd_md_ss(bdev);
832 
833 	switch (bdev->md.meta_dev_idx) {
834 	default:
835 		/* v07 style fixed size indexed meta data */
836 		bdev->md.md_size_sect = MD_128MB_SECT;
837 		bdev->md.al_offset = MD_4kB_SECT;
838 		bdev->md.bm_offset = MD_4kB_SECT + al_size_sect;
839 		break;
840 	case DRBD_MD_INDEX_FLEX_EXT:
841 		/* just occupy the full device; unit: sectors */
842 		bdev->md.md_size_sect = drbd_get_capacity(bdev->md_bdev);
843 		bdev->md.al_offset = MD_4kB_SECT;
844 		bdev->md.bm_offset = MD_4kB_SECT + al_size_sect;
845 		break;
846 	case DRBD_MD_INDEX_INTERNAL:
847 	case DRBD_MD_INDEX_FLEX_INT:
848 		/* al size is still fixed */
849 		bdev->md.al_offset = -al_size_sect;
850 		/* we need (slightly less than) ~ this much bitmap sectors: */
851 		md_size_sect = drbd_get_capacity(bdev->backing_bdev);
852 		md_size_sect = ALIGN(md_size_sect, BM_SECT_PER_EXT);
853 		md_size_sect = BM_SECT_TO_EXT(md_size_sect);
854 		md_size_sect = ALIGN(md_size_sect, 8);
855 
856 		/* plus the "drbd meta data super block",
857 		 * and the activity log; */
858 		md_size_sect += MD_4kB_SECT + al_size_sect;
859 
860 		bdev->md.md_size_sect = md_size_sect;
861 		/* bitmap offset is adjusted by 'super' block size */
862 		bdev->md.bm_offset   = -md_size_sect + MD_4kB_SECT;
863 		break;
864 	}
865 }
866 
867 /* input size is expected to be in KB */
868 char *ppsize(char *buf, unsigned long long size)
869 {
870 	/* Needs 9 bytes at max including trailing NUL:
871 	 * -1ULL ==> "16384 EB" */
872 	static char units[] = { 'K', 'M', 'G', 'T', 'P', 'E' };
873 	int base = 0;
874 	while (size >= 10000 && base < sizeof(units)-1) {
875 		/* shift + round */
876 		size = (size >> 10) + !!(size & (1<<9));
877 		base++;
878 	}
879 	sprintf(buf, "%u %cB", (unsigned)size, units[base]);
880 
881 	return buf;
882 }
883 
884 /* there is still a theoretical deadlock when called from receiver
885  * on an D_INCONSISTENT R_PRIMARY:
886  *  remote READ does inc_ap_bio, receiver would need to receive answer
887  *  packet from remote to dec_ap_bio again.
888  *  receiver receive_sizes(), comes here,
889  *  waits for ap_bio_cnt == 0. -> deadlock.
890  * but this cannot happen, actually, because:
891  *  R_PRIMARY D_INCONSISTENT, and peer's disk is unreachable
892  *  (not connected, or bad/no disk on peer):
893  *  see drbd_fail_request_early, ap_bio_cnt is zero.
894  *  R_PRIMARY D_INCONSISTENT, and C_SYNC_TARGET:
895  *  peer may not initiate a resize.
896  */
897 /* Note these are not to be confused with
898  * drbd_adm_suspend_io/drbd_adm_resume_io,
899  * which are (sub) state changes triggered by admin (drbdsetup),
900  * and can be long lived.
901  * This changes an device->flag, is triggered by drbd internals,
902  * and should be short-lived. */
903 /* It needs to be a counter, since multiple threads might
904    independently suspend and resume IO. */
905 void drbd_suspend_io(struct drbd_device *device)
906 {
907 	atomic_inc(&device->suspend_cnt);
908 	if (drbd_suspended(device))
909 		return;
910 	wait_event(device->misc_wait, !atomic_read(&device->ap_bio_cnt));
911 }
912 
913 void drbd_resume_io(struct drbd_device *device)
914 {
915 	if (atomic_dec_and_test(&device->suspend_cnt))
916 		wake_up(&device->misc_wait);
917 }
918 
919 /**
920  * drbd_determine_dev_size() -  Sets the right device size obeying all constraints
921  * @device:	DRBD device.
922  *
923  * Returns 0 on success, negative return values indicate errors.
924  * You should call drbd_md_sync() after calling this function.
925  */
926 enum determine_dev_size
927 drbd_determine_dev_size(struct drbd_device *device, enum dds_flags flags, struct resize_parms *rs) __must_hold(local)
928 {
929 	struct md_offsets_and_sizes {
930 		u64 last_agreed_sect;
931 		u64 md_offset;
932 		s32 al_offset;
933 		s32 bm_offset;
934 		u32 md_size_sect;
935 
936 		u32 al_stripes;
937 		u32 al_stripe_size_4k;
938 	} prev;
939 	sector_t u_size, size;
940 	struct drbd_md *md = &device->ldev->md;
941 	void *buffer;
942 
943 	int md_moved, la_size_changed;
944 	enum determine_dev_size rv = DS_UNCHANGED;
945 
946 	/* We may change the on-disk offsets of our meta data below.  Lock out
947 	 * anything that may cause meta data IO, to avoid acting on incomplete
948 	 * layout changes or scribbling over meta data that is in the process
949 	 * of being moved.
950 	 *
951 	 * Move is not exactly correct, btw, currently we have all our meta
952 	 * data in core memory, to "move" it we just write it all out, there
953 	 * are no reads. */
954 	drbd_suspend_io(device);
955 	buffer = drbd_md_get_buffer(device, __func__); /* Lock meta-data IO */
956 	if (!buffer) {
957 		drbd_resume_io(device);
958 		return DS_ERROR;
959 	}
960 
961 	/* remember current offset and sizes */
962 	prev.last_agreed_sect = md->la_size_sect;
963 	prev.md_offset = md->md_offset;
964 	prev.al_offset = md->al_offset;
965 	prev.bm_offset = md->bm_offset;
966 	prev.md_size_sect = md->md_size_sect;
967 	prev.al_stripes = md->al_stripes;
968 	prev.al_stripe_size_4k = md->al_stripe_size_4k;
969 
970 	if (rs) {
971 		/* rs is non NULL if we should change the AL layout only */
972 		md->al_stripes = rs->al_stripes;
973 		md->al_stripe_size_4k = rs->al_stripe_size / 4;
974 		md->al_size_4k = (u64)rs->al_stripes * rs->al_stripe_size / 4;
975 	}
976 
977 	drbd_md_set_sector_offsets(device, device->ldev);
978 
979 	rcu_read_lock();
980 	u_size = rcu_dereference(device->ldev->disk_conf)->disk_size;
981 	rcu_read_unlock();
982 	size = drbd_new_dev_size(device, device->ldev, u_size, flags & DDSF_FORCED);
983 
984 	if (size < prev.last_agreed_sect) {
985 		if (rs && u_size == 0) {
986 			/* Remove "rs &&" later. This check should always be active, but
987 			   right now the receiver expects the permissive behavior */
988 			drbd_warn(device, "Implicit shrink not allowed. "
989 				 "Use --size=%llus for explicit shrink.\n",
990 				 (unsigned long long)size);
991 			rv = DS_ERROR_SHRINK;
992 		}
993 		if (u_size > size)
994 			rv = DS_ERROR_SPACE_MD;
995 		if (rv != DS_UNCHANGED)
996 			goto err_out;
997 	}
998 
999 	if (drbd_get_capacity(device->this_bdev) != size ||
1000 	    drbd_bm_capacity(device) != size) {
1001 		int err;
1002 		err = drbd_bm_resize(device, size, !(flags & DDSF_NO_RESYNC));
1003 		if (unlikely(err)) {
1004 			/* currently there is only one error: ENOMEM! */
1005 			size = drbd_bm_capacity(device);
1006 			if (size == 0) {
1007 				drbd_err(device, "OUT OF MEMORY! "
1008 				    "Could not allocate bitmap!\n");
1009 			} else {
1010 				drbd_err(device, "BM resizing failed. "
1011 				    "Leaving size unchanged\n");
1012 			}
1013 			rv = DS_ERROR;
1014 		}
1015 		/* racy, see comments above. */
1016 		drbd_set_my_capacity(device, size);
1017 		md->la_size_sect = size;
1018 	}
1019 	if (rv <= DS_ERROR)
1020 		goto err_out;
1021 
1022 	la_size_changed = (prev.last_agreed_sect != md->la_size_sect);
1023 
1024 	md_moved = prev.md_offset    != md->md_offset
1025 		|| prev.md_size_sect != md->md_size_sect;
1026 
1027 	if (la_size_changed || md_moved || rs) {
1028 		u32 prev_flags;
1029 
1030 		/* We do some synchronous IO below, which may take some time.
1031 		 * Clear the timer, to avoid scary "timer expired!" messages,
1032 		 * "Superblock" is written out at least twice below, anyways. */
1033 		del_timer(&device->md_sync_timer);
1034 
1035 		/* We won't change the "al-extents" setting, we just may need
1036 		 * to move the on-disk location of the activity log ringbuffer.
1037 		 * Lock for transaction is good enough, it may well be "dirty"
1038 		 * or even "starving". */
1039 		wait_event(device->al_wait, lc_try_lock_for_transaction(device->act_log));
1040 
1041 		/* mark current on-disk bitmap and activity log as unreliable */
1042 		prev_flags = md->flags;
1043 		md->flags |= MDF_FULL_SYNC | MDF_AL_DISABLED;
1044 		drbd_md_write(device, buffer);
1045 
1046 		drbd_al_initialize(device, buffer);
1047 
1048 		drbd_info(device, "Writing the whole bitmap, %s\n",
1049 			 la_size_changed && md_moved ? "size changed and md moved" :
1050 			 la_size_changed ? "size changed" : "md moved");
1051 		/* next line implicitly does drbd_suspend_io()+drbd_resume_io() */
1052 		drbd_bitmap_io(device, md_moved ? &drbd_bm_write_all : &drbd_bm_write,
1053 			       "size changed", BM_LOCKED_MASK);
1054 
1055 		/* on-disk bitmap and activity log is authoritative again
1056 		 * (unless there was an IO error meanwhile...) */
1057 		md->flags = prev_flags;
1058 		drbd_md_write(device, buffer);
1059 
1060 		if (rs)
1061 			drbd_info(device, "Changed AL layout to al-stripes = %d, al-stripe-size-kB = %d\n",
1062 				  md->al_stripes, md->al_stripe_size_4k * 4);
1063 	}
1064 
1065 	if (size > prev.last_agreed_sect)
1066 		rv = prev.last_agreed_sect ? DS_GREW : DS_GREW_FROM_ZERO;
1067 	if (size < prev.last_agreed_sect)
1068 		rv = DS_SHRUNK;
1069 
1070 	if (0) {
1071 	err_out:
1072 		/* restore previous offset and sizes */
1073 		md->la_size_sect = prev.last_agreed_sect;
1074 		md->md_offset = prev.md_offset;
1075 		md->al_offset = prev.al_offset;
1076 		md->bm_offset = prev.bm_offset;
1077 		md->md_size_sect = prev.md_size_sect;
1078 		md->al_stripes = prev.al_stripes;
1079 		md->al_stripe_size_4k = prev.al_stripe_size_4k;
1080 		md->al_size_4k = (u64)prev.al_stripes * prev.al_stripe_size_4k;
1081 	}
1082 	lc_unlock(device->act_log);
1083 	wake_up(&device->al_wait);
1084 	drbd_md_put_buffer(device);
1085 	drbd_resume_io(device);
1086 
1087 	return rv;
1088 }
1089 
1090 sector_t
1091 drbd_new_dev_size(struct drbd_device *device, struct drbd_backing_dev *bdev,
1092 		  sector_t u_size, int assume_peer_has_space)
1093 {
1094 	sector_t p_size = device->p_size;   /* partner's disk size. */
1095 	sector_t la_size_sect = bdev->md.la_size_sect; /* last agreed size. */
1096 	sector_t m_size; /* my size */
1097 	sector_t size = 0;
1098 
1099 	m_size = drbd_get_max_capacity(bdev);
1100 
1101 	if (device->state.conn < C_CONNECTED && assume_peer_has_space) {
1102 		drbd_warn(device, "Resize while not connected was forced by the user!\n");
1103 		p_size = m_size;
1104 	}
1105 
1106 	if (p_size && m_size) {
1107 		size = min_t(sector_t, p_size, m_size);
1108 	} else {
1109 		if (la_size_sect) {
1110 			size = la_size_sect;
1111 			if (m_size && m_size < size)
1112 				size = m_size;
1113 			if (p_size && p_size < size)
1114 				size = p_size;
1115 		} else {
1116 			if (m_size)
1117 				size = m_size;
1118 			if (p_size)
1119 				size = p_size;
1120 		}
1121 	}
1122 
1123 	if (size == 0)
1124 		drbd_err(device, "Both nodes diskless!\n");
1125 
1126 	if (u_size) {
1127 		if (u_size > size)
1128 			drbd_err(device, "Requested disk size is too big (%lu > %lu)\n",
1129 			    (unsigned long)u_size>>1, (unsigned long)size>>1);
1130 		else
1131 			size = u_size;
1132 	}
1133 
1134 	return size;
1135 }
1136 
1137 /**
1138  * drbd_check_al_size() - Ensures that the AL is of the right size
1139  * @device:	DRBD device.
1140  *
1141  * Returns -EBUSY if current al lru is still used, -ENOMEM when allocation
1142  * failed, and 0 on success. You should call drbd_md_sync() after you called
1143  * this function.
1144  */
1145 static int drbd_check_al_size(struct drbd_device *device, struct disk_conf *dc)
1146 {
1147 	struct lru_cache *n, *t;
1148 	struct lc_element *e;
1149 	unsigned int in_use;
1150 	int i;
1151 
1152 	if (device->act_log &&
1153 	    device->act_log->nr_elements == dc->al_extents)
1154 		return 0;
1155 
1156 	in_use = 0;
1157 	t = device->act_log;
1158 	n = lc_create("act_log", drbd_al_ext_cache, AL_UPDATES_PER_TRANSACTION,
1159 		dc->al_extents, sizeof(struct lc_element), 0);
1160 
1161 	if (n == NULL) {
1162 		drbd_err(device, "Cannot allocate act_log lru!\n");
1163 		return -ENOMEM;
1164 	}
1165 	spin_lock_irq(&device->al_lock);
1166 	if (t) {
1167 		for (i = 0; i < t->nr_elements; i++) {
1168 			e = lc_element_by_index(t, i);
1169 			if (e->refcnt)
1170 				drbd_err(device, "refcnt(%d)==%d\n",
1171 				    e->lc_number, e->refcnt);
1172 			in_use += e->refcnt;
1173 		}
1174 	}
1175 	if (!in_use)
1176 		device->act_log = n;
1177 	spin_unlock_irq(&device->al_lock);
1178 	if (in_use) {
1179 		drbd_err(device, "Activity log still in use!\n");
1180 		lc_destroy(n);
1181 		return -EBUSY;
1182 	} else {
1183 		lc_destroy(t);
1184 	}
1185 	drbd_md_mark_dirty(device); /* we changed device->act_log->nr_elemens */
1186 	return 0;
1187 }
1188 
1189 static void blk_queue_discard_granularity(struct request_queue *q, unsigned int granularity)
1190 {
1191 	q->limits.discard_granularity = granularity;
1192 }
1193 
1194 static unsigned int drbd_max_discard_sectors(struct drbd_connection *connection)
1195 {
1196 	/* when we introduced REQ_WRITE_SAME support, we also bumped
1197 	 * our maximum supported batch bio size used for discards. */
1198 	if (connection->agreed_features & DRBD_FF_WSAME)
1199 		return DRBD_MAX_BBIO_SECTORS;
1200 	/* before, with DRBD <= 8.4.6, we only allowed up to one AL_EXTENT_SIZE. */
1201 	return AL_EXTENT_SIZE >> 9;
1202 }
1203 
1204 static void decide_on_discard_support(struct drbd_device *device,
1205 			struct request_queue *q,
1206 			struct request_queue *b,
1207 			bool discard_zeroes_if_aligned)
1208 {
1209 	/* q = drbd device queue (device->rq_queue)
1210 	 * b = backing device queue (device->ldev->backing_bdev->bd_disk->queue),
1211 	 *     or NULL if diskless
1212 	 */
1213 	struct drbd_connection *connection = first_peer_device(device)->connection;
1214 	bool can_do = b ? blk_queue_discard(b) : true;
1215 
1216 	if (can_do && connection->cstate >= C_CONNECTED && !(connection->agreed_features & DRBD_FF_TRIM)) {
1217 		can_do = false;
1218 		drbd_info(connection, "peer DRBD too old, does not support TRIM: disabling discards\n");
1219 	}
1220 	if (can_do) {
1221 		/* We don't care for the granularity, really.
1222 		 * Stacking limits below should fix it for the local
1223 		 * device.  Whether or not it is a suitable granularity
1224 		 * on the remote device is not our problem, really. If
1225 		 * you care, you need to use devices with similar
1226 		 * topology on all peers. */
1227 		blk_queue_discard_granularity(q, 512);
1228 		q->limits.max_discard_sectors = drbd_max_discard_sectors(connection);
1229 		blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
1230 		q->limits.max_write_zeroes_sectors = drbd_max_discard_sectors(connection);
1231 	} else {
1232 		blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q);
1233 		blk_queue_discard_granularity(q, 0);
1234 		q->limits.max_discard_sectors = 0;
1235 		q->limits.max_write_zeroes_sectors = 0;
1236 	}
1237 }
1238 
1239 static void fixup_discard_if_not_supported(struct request_queue *q)
1240 {
1241 	/* To avoid confusion, if this queue does not support discard, clear
1242 	 * max_discard_sectors, which is what lsblk -D reports to the user.
1243 	 * Older kernels got this wrong in "stack limits".
1244 	 * */
1245 	if (!blk_queue_discard(q)) {
1246 		blk_queue_max_discard_sectors(q, 0);
1247 		blk_queue_discard_granularity(q, 0);
1248 	}
1249 }
1250 
1251 static void fixup_write_zeroes(struct drbd_device *device, struct request_queue *q)
1252 {
1253 	/* Fixup max_write_zeroes_sectors after blk_queue_stack_limits():
1254 	 * if we can handle "zeroes" efficiently on the protocol,
1255 	 * we want to do that, even if our backend does not announce
1256 	 * max_write_zeroes_sectors itself. */
1257 	struct drbd_connection *connection = first_peer_device(device)->connection;
1258 	/* If the peer announces WZEROES support, use it.  Otherwise, rather
1259 	 * send explicit zeroes than rely on some discard-zeroes-data magic. */
1260 	if (connection->agreed_features & DRBD_FF_WZEROES)
1261 		q->limits.max_write_zeroes_sectors = DRBD_MAX_BBIO_SECTORS;
1262 	else
1263 		q->limits.max_write_zeroes_sectors = 0;
1264 }
1265 
1266 static void decide_on_write_same_support(struct drbd_device *device,
1267 			struct request_queue *q,
1268 			struct request_queue *b, struct o_qlim *o,
1269 			bool disable_write_same)
1270 {
1271 	struct drbd_peer_device *peer_device = first_peer_device(device);
1272 	struct drbd_connection *connection = peer_device->connection;
1273 	bool can_do = b ? b->limits.max_write_same_sectors : true;
1274 
1275 	if (can_do && disable_write_same) {
1276 		can_do = false;
1277 		drbd_info(peer_device, "WRITE_SAME disabled by config\n");
1278 	}
1279 
1280 	if (can_do && connection->cstate >= C_CONNECTED && !(connection->agreed_features & DRBD_FF_WSAME)) {
1281 		can_do = false;
1282 		drbd_info(peer_device, "peer does not support WRITE_SAME\n");
1283 	}
1284 
1285 	if (o) {
1286 		/* logical block size; queue_logical_block_size(NULL) is 512 */
1287 		unsigned int peer_lbs = be32_to_cpu(o->logical_block_size);
1288 		unsigned int me_lbs_b = queue_logical_block_size(b);
1289 		unsigned int me_lbs = queue_logical_block_size(q);
1290 
1291 		if (me_lbs_b != me_lbs) {
1292 			drbd_warn(device,
1293 				"logical block size of local backend does not match (drbd:%u, backend:%u); was this a late attach?\n",
1294 				me_lbs, me_lbs_b);
1295 			/* rather disable write same than trigger some BUG_ON later in the scsi layer. */
1296 			can_do = false;
1297 		}
1298 		if (me_lbs_b != peer_lbs) {
1299 			drbd_warn(peer_device, "logical block sizes do not match (me:%u, peer:%u); this may cause problems.\n",
1300 				me_lbs, peer_lbs);
1301 			if (can_do) {
1302 				drbd_dbg(peer_device, "logical block size mismatch: WRITE_SAME disabled.\n");
1303 				can_do = false;
1304 			}
1305 			me_lbs = max(me_lbs, me_lbs_b);
1306 			/* We cannot change the logical block size of an in-use queue.
1307 			 * We can only hope that access happens to be properly aligned.
1308 			 * If not, the peer will likely produce an IO error, and detach. */
1309 			if (peer_lbs > me_lbs) {
1310 				if (device->state.role != R_PRIMARY) {
1311 					blk_queue_logical_block_size(q, peer_lbs);
1312 					drbd_warn(peer_device, "logical block size set to %u\n", peer_lbs);
1313 				} else {
1314 					drbd_warn(peer_device,
1315 						"current Primary must NOT adjust logical block size (%u -> %u); hope for the best.\n",
1316 						me_lbs, peer_lbs);
1317 				}
1318 			}
1319 		}
1320 		if (can_do && !o->write_same_capable) {
1321 			/* If we introduce an open-coded write-same loop on the receiving side,
1322 			 * the peer would present itself as "capable". */
1323 			drbd_dbg(peer_device, "WRITE_SAME disabled (peer device not capable)\n");
1324 			can_do = false;
1325 		}
1326 	}
1327 
1328 	blk_queue_max_write_same_sectors(q, can_do ? DRBD_MAX_BBIO_SECTORS : 0);
1329 }
1330 
1331 static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backing_dev *bdev,
1332 				   unsigned int max_bio_size, struct o_qlim *o)
1333 {
1334 	struct request_queue * const q = device->rq_queue;
1335 	unsigned int max_hw_sectors = max_bio_size >> 9;
1336 	unsigned int max_segments = 0;
1337 	struct request_queue *b = NULL;
1338 	struct disk_conf *dc;
1339 	bool discard_zeroes_if_aligned = true;
1340 	bool disable_write_same = false;
1341 
1342 	if (bdev) {
1343 		b = bdev->backing_bdev->bd_disk->queue;
1344 
1345 		max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9);
1346 		rcu_read_lock();
1347 		dc = rcu_dereference(device->ldev->disk_conf);
1348 		max_segments = dc->max_bio_bvecs;
1349 		discard_zeroes_if_aligned = dc->discard_zeroes_if_aligned;
1350 		disable_write_same = dc->disable_write_same;
1351 		rcu_read_unlock();
1352 
1353 		blk_set_stacking_limits(&q->limits);
1354 	}
1355 
1356 	blk_queue_max_hw_sectors(q, max_hw_sectors);
1357 	/* This is the workaround for "bio would need to, but cannot, be split" */
1358 	blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
1359 	blk_queue_segment_boundary(q, PAGE_SIZE-1);
1360 	decide_on_discard_support(device, q, b, discard_zeroes_if_aligned);
1361 	decide_on_write_same_support(device, q, b, o, disable_write_same);
1362 
1363 	if (b) {
1364 		blk_queue_stack_limits(q, b);
1365 
1366 		if (q->backing_dev_info->ra_pages !=
1367 		    b->backing_dev_info->ra_pages) {
1368 			drbd_info(device, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n",
1369 				 q->backing_dev_info->ra_pages,
1370 				 b->backing_dev_info->ra_pages);
1371 			q->backing_dev_info->ra_pages =
1372 						b->backing_dev_info->ra_pages;
1373 		}
1374 	}
1375 	fixup_discard_if_not_supported(q);
1376 	fixup_write_zeroes(device, q);
1377 }
1378 
1379 void drbd_reconsider_queue_parameters(struct drbd_device *device, struct drbd_backing_dev *bdev, struct o_qlim *o)
1380 {
1381 	unsigned int now, new, local, peer;
1382 
1383 	now = queue_max_hw_sectors(device->rq_queue) << 9;
1384 	local = device->local_max_bio_size; /* Eventually last known value, from volatile memory */
1385 	peer = device->peer_max_bio_size; /* Eventually last known value, from meta data */
1386 
1387 	if (bdev) {
1388 		local = queue_max_hw_sectors(bdev->backing_bdev->bd_disk->queue) << 9;
1389 		device->local_max_bio_size = local;
1390 	}
1391 	local = min(local, DRBD_MAX_BIO_SIZE);
1392 
1393 	/* We may ignore peer limits if the peer is modern enough.
1394 	   Because new from 8.3.8 onwards the peer can use multiple
1395 	   BIOs for a single peer_request */
1396 	if (device->state.conn >= C_WF_REPORT_PARAMS) {
1397 		if (first_peer_device(device)->connection->agreed_pro_version < 94)
1398 			peer = min(device->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
1399 			/* Correct old drbd (up to 8.3.7) if it believes it can do more than 32KiB */
1400 		else if (first_peer_device(device)->connection->agreed_pro_version == 94)
1401 			peer = DRBD_MAX_SIZE_H80_PACKET;
1402 		else if (first_peer_device(device)->connection->agreed_pro_version < 100)
1403 			peer = DRBD_MAX_BIO_SIZE_P95;  /* drbd 8.3.8 onwards, before 8.4.0 */
1404 		else
1405 			peer = DRBD_MAX_BIO_SIZE;
1406 
1407 		/* We may later detach and re-attach on a disconnected Primary.
1408 		 * Avoid this setting to jump back in that case.
1409 		 * We want to store what we know the peer DRBD can handle,
1410 		 * not what the peer IO backend can handle. */
1411 		if (peer > device->peer_max_bio_size)
1412 			device->peer_max_bio_size = peer;
1413 	}
1414 	new = min(local, peer);
1415 
1416 	if (device->state.role == R_PRIMARY && new < now)
1417 		drbd_err(device, "ASSERT FAILED new < now; (%u < %u)\n", new, now);
1418 
1419 	if (new != now)
1420 		drbd_info(device, "max BIO size = %u\n", new);
1421 
1422 	drbd_setup_queue_param(device, bdev, new, o);
1423 }
1424 
1425 /* Starts the worker thread */
1426 static void conn_reconfig_start(struct drbd_connection *connection)
1427 {
1428 	drbd_thread_start(&connection->worker);
1429 	drbd_flush_workqueue(&connection->sender_work);
1430 }
1431 
1432 /* if still unconfigured, stops worker again. */
1433 static void conn_reconfig_done(struct drbd_connection *connection)
1434 {
1435 	bool stop_threads;
1436 	spin_lock_irq(&connection->resource->req_lock);
1437 	stop_threads = conn_all_vols_unconf(connection) &&
1438 		connection->cstate == C_STANDALONE;
1439 	spin_unlock_irq(&connection->resource->req_lock);
1440 	if (stop_threads) {
1441 		/* ack_receiver thread and ack_sender workqueue are implicitly
1442 		 * stopped by receiver in conn_disconnect() */
1443 		drbd_thread_stop(&connection->receiver);
1444 		drbd_thread_stop(&connection->worker);
1445 	}
1446 }
1447 
1448 /* Make sure IO is suspended before calling this function(). */
1449 static void drbd_suspend_al(struct drbd_device *device)
1450 {
1451 	int s = 0;
1452 
1453 	if (!lc_try_lock(device->act_log)) {
1454 		drbd_warn(device, "Failed to lock al in drbd_suspend_al()\n");
1455 		return;
1456 	}
1457 
1458 	drbd_al_shrink(device);
1459 	spin_lock_irq(&device->resource->req_lock);
1460 	if (device->state.conn < C_CONNECTED)
1461 		s = !test_and_set_bit(AL_SUSPENDED, &device->flags);
1462 	spin_unlock_irq(&device->resource->req_lock);
1463 	lc_unlock(device->act_log);
1464 
1465 	if (s)
1466 		drbd_info(device, "Suspended AL updates\n");
1467 }
1468 
1469 
1470 static bool should_set_defaults(struct genl_info *info)
1471 {
1472 	unsigned flags = ((struct drbd_genlmsghdr*)info->userhdr)->flags;
1473 	return 0 != (flags & DRBD_GENL_F_SET_DEFAULTS);
1474 }
1475 
1476 static unsigned int drbd_al_extents_max(struct drbd_backing_dev *bdev)
1477 {
1478 	/* This is limited by 16 bit "slot" numbers,
1479 	 * and by available on-disk context storage.
1480 	 *
1481 	 * Also (u16)~0 is special (denotes a "free" extent).
1482 	 *
1483 	 * One transaction occupies one 4kB on-disk block,
1484 	 * we have n such blocks in the on disk ring buffer,
1485 	 * the "current" transaction may fail (n-1),
1486 	 * and there is 919 slot numbers context information per transaction.
1487 	 *
1488 	 * 72 transaction blocks amounts to more than 2**16 context slots,
1489 	 * so cap there first.
1490 	 */
1491 	const unsigned int max_al_nr = DRBD_AL_EXTENTS_MAX;
1492 	const unsigned int sufficient_on_disk =
1493 		(max_al_nr + AL_CONTEXT_PER_TRANSACTION -1)
1494 		/AL_CONTEXT_PER_TRANSACTION;
1495 
1496 	unsigned int al_size_4k = bdev->md.al_size_4k;
1497 
1498 	if (al_size_4k > sufficient_on_disk)
1499 		return max_al_nr;
1500 
1501 	return (al_size_4k - 1) * AL_CONTEXT_PER_TRANSACTION;
1502 }
1503 
1504 static bool write_ordering_changed(struct disk_conf *a, struct disk_conf *b)
1505 {
1506 	return	a->disk_barrier != b->disk_barrier ||
1507 		a->disk_flushes != b->disk_flushes ||
1508 		a->disk_drain != b->disk_drain;
1509 }
1510 
1511 static void sanitize_disk_conf(struct drbd_device *device, struct disk_conf *disk_conf,
1512 			       struct drbd_backing_dev *nbc)
1513 {
1514 	struct request_queue * const q = nbc->backing_bdev->bd_disk->queue;
1515 
1516 	if (disk_conf->al_extents < DRBD_AL_EXTENTS_MIN)
1517 		disk_conf->al_extents = DRBD_AL_EXTENTS_MIN;
1518 	if (disk_conf->al_extents > drbd_al_extents_max(nbc))
1519 		disk_conf->al_extents = drbd_al_extents_max(nbc);
1520 
1521 	if (!blk_queue_discard(q)) {
1522 		if (disk_conf->rs_discard_granularity) {
1523 			disk_conf->rs_discard_granularity = 0; /* disable feature */
1524 			drbd_info(device, "rs_discard_granularity feature disabled\n");
1525 		}
1526 	}
1527 
1528 	if (disk_conf->rs_discard_granularity) {
1529 		int orig_value = disk_conf->rs_discard_granularity;
1530 		int remainder;
1531 
1532 		if (q->limits.discard_granularity > disk_conf->rs_discard_granularity)
1533 			disk_conf->rs_discard_granularity = q->limits.discard_granularity;
1534 
1535 		remainder = disk_conf->rs_discard_granularity % q->limits.discard_granularity;
1536 		disk_conf->rs_discard_granularity += remainder;
1537 
1538 		if (disk_conf->rs_discard_granularity > q->limits.max_discard_sectors << 9)
1539 			disk_conf->rs_discard_granularity = q->limits.max_discard_sectors << 9;
1540 
1541 		if (disk_conf->rs_discard_granularity != orig_value)
1542 			drbd_info(device, "rs_discard_granularity changed to %d\n",
1543 				  disk_conf->rs_discard_granularity);
1544 	}
1545 }
1546 
1547 static int disk_opts_check_al_size(struct drbd_device *device, struct disk_conf *dc)
1548 {
1549 	int err = -EBUSY;
1550 
1551 	if (device->act_log &&
1552 	    device->act_log->nr_elements == dc->al_extents)
1553 		return 0;
1554 
1555 	drbd_suspend_io(device);
1556 	/* If IO completion is currently blocked, we would likely wait
1557 	 * "forever" for the activity log to become unused. So we don't. */
1558 	if (atomic_read(&device->ap_bio_cnt))
1559 		goto out;
1560 
1561 	wait_event(device->al_wait, lc_try_lock(device->act_log));
1562 	drbd_al_shrink(device);
1563 	err = drbd_check_al_size(device, dc);
1564 	lc_unlock(device->act_log);
1565 	wake_up(&device->al_wait);
1566 out:
1567 	drbd_resume_io(device);
1568 	return err;
1569 }
1570 
1571 int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
1572 {
1573 	struct drbd_config_context adm_ctx;
1574 	enum drbd_ret_code retcode;
1575 	struct drbd_device *device;
1576 	struct disk_conf *new_disk_conf, *old_disk_conf;
1577 	struct fifo_buffer *old_plan = NULL, *new_plan = NULL;
1578 	int err;
1579 	unsigned int fifo_size;
1580 
1581 	retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
1582 	if (!adm_ctx.reply_skb)
1583 		return retcode;
1584 	if (retcode != NO_ERROR)
1585 		goto finish;
1586 
1587 	device = adm_ctx.device;
1588 	mutex_lock(&adm_ctx.resource->adm_mutex);
1589 
1590 	/* we also need a disk
1591 	 * to change the options on */
1592 	if (!get_ldev(device)) {
1593 		retcode = ERR_NO_DISK;
1594 		goto out;
1595 	}
1596 
1597 	new_disk_conf = kmalloc(sizeof(struct disk_conf), GFP_KERNEL);
1598 	if (!new_disk_conf) {
1599 		retcode = ERR_NOMEM;
1600 		goto fail;
1601 	}
1602 
1603 	mutex_lock(&device->resource->conf_update);
1604 	old_disk_conf = device->ldev->disk_conf;
1605 	*new_disk_conf = *old_disk_conf;
1606 	if (should_set_defaults(info))
1607 		set_disk_conf_defaults(new_disk_conf);
1608 
1609 	err = disk_conf_from_attrs_for_change(new_disk_conf, info);
1610 	if (err && err != -ENOMSG) {
1611 		retcode = ERR_MANDATORY_TAG;
1612 		drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
1613 		goto fail_unlock;
1614 	}
1615 
1616 	if (!expect(new_disk_conf->resync_rate >= 1))
1617 		new_disk_conf->resync_rate = 1;
1618 
1619 	sanitize_disk_conf(device, new_disk_conf, device->ldev);
1620 
1621 	if (new_disk_conf->c_plan_ahead > DRBD_C_PLAN_AHEAD_MAX)
1622 		new_disk_conf->c_plan_ahead = DRBD_C_PLAN_AHEAD_MAX;
1623 
1624 	fifo_size = (new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ;
1625 	if (fifo_size != device->rs_plan_s->size) {
1626 		new_plan = fifo_alloc(fifo_size);
1627 		if (!new_plan) {
1628 			drbd_err(device, "kmalloc of fifo_buffer failed");
1629 			retcode = ERR_NOMEM;
1630 			goto fail_unlock;
1631 		}
1632 	}
1633 
1634 	err = disk_opts_check_al_size(device, new_disk_conf);
1635 	if (err) {
1636 		/* Could be just "busy". Ignore?
1637 		 * Introduce dedicated error code? */
1638 		drbd_msg_put_info(adm_ctx.reply_skb,
1639 			"Try again without changing current al-extents setting");
1640 		retcode = ERR_NOMEM;
1641 		goto fail_unlock;
1642 	}
1643 
1644 	lock_all_resources();
1645 	retcode = drbd_resync_after_valid(device, new_disk_conf->resync_after);
1646 	if (retcode == NO_ERROR) {
1647 		rcu_assign_pointer(device->ldev->disk_conf, new_disk_conf);
1648 		drbd_resync_after_changed(device);
1649 	}
1650 	unlock_all_resources();
1651 
1652 	if (retcode != NO_ERROR)
1653 		goto fail_unlock;
1654 
1655 	if (new_plan) {
1656 		old_plan = device->rs_plan_s;
1657 		rcu_assign_pointer(device->rs_plan_s, new_plan);
1658 	}
1659 
1660 	mutex_unlock(&device->resource->conf_update);
1661 
1662 	if (new_disk_conf->al_updates)
1663 		device->ldev->md.flags &= ~MDF_AL_DISABLED;
1664 	else
1665 		device->ldev->md.flags |= MDF_AL_DISABLED;
1666 
1667 	if (new_disk_conf->md_flushes)
1668 		clear_bit(MD_NO_FUA, &device->flags);
1669 	else
1670 		set_bit(MD_NO_FUA, &device->flags);
1671 
1672 	if (write_ordering_changed(old_disk_conf, new_disk_conf))
1673 		drbd_bump_write_ordering(device->resource, NULL, WO_BDEV_FLUSH);
1674 
1675 	if (old_disk_conf->discard_zeroes_if_aligned != new_disk_conf->discard_zeroes_if_aligned
1676 	||  old_disk_conf->disable_write_same != new_disk_conf->disable_write_same)
1677 		drbd_reconsider_queue_parameters(device, device->ldev, NULL);
1678 
1679 	drbd_md_sync(device);
1680 
1681 	if (device->state.conn >= C_CONNECTED) {
1682 		struct drbd_peer_device *peer_device;
1683 
1684 		for_each_peer_device(peer_device, device)
1685 			drbd_send_sync_param(peer_device);
1686 	}
1687 
1688 	synchronize_rcu();
1689 	kfree(old_disk_conf);
1690 	kfree(old_plan);
1691 	mod_timer(&device->request_timer, jiffies + HZ);
1692 	goto success;
1693 
1694 fail_unlock:
1695 	mutex_unlock(&device->resource->conf_update);
1696  fail:
1697 	kfree(new_disk_conf);
1698 	kfree(new_plan);
1699 success:
1700 	put_ldev(device);
1701  out:
1702 	mutex_unlock(&adm_ctx.resource->adm_mutex);
1703  finish:
1704 	drbd_adm_finish(&adm_ctx, info, retcode);
1705 	return 0;
1706 }
1707 
1708 static struct block_device *open_backing_dev(struct drbd_device *device,
1709 		const char *bdev_path, void *claim_ptr, bool do_bd_link)
1710 {
1711 	struct block_device *bdev;
1712 	int err = 0;
1713 
1714 	bdev = blkdev_get_by_path(bdev_path,
1715 				  FMODE_READ | FMODE_WRITE | FMODE_EXCL, claim_ptr);
1716 	if (IS_ERR(bdev)) {
1717 		drbd_err(device, "open(\"%s\") failed with %ld\n",
1718 				bdev_path, PTR_ERR(bdev));
1719 		return bdev;
1720 	}
1721 
1722 	if (!do_bd_link)
1723 		return bdev;
1724 
1725 	err = bd_link_disk_holder(bdev, device->vdisk);
1726 	if (err) {
1727 		blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
1728 		drbd_err(device, "bd_link_disk_holder(\"%s\", ...) failed with %d\n",
1729 				bdev_path, err);
1730 		bdev = ERR_PTR(err);
1731 	}
1732 	return bdev;
1733 }
1734 
1735 static int open_backing_devices(struct drbd_device *device,
1736 		struct disk_conf *new_disk_conf,
1737 		struct drbd_backing_dev *nbc)
1738 {
1739 	struct block_device *bdev;
1740 
1741 	bdev = open_backing_dev(device, new_disk_conf->backing_dev, device, true);
1742 	if (IS_ERR(bdev))
1743 		return ERR_OPEN_DISK;
1744 	nbc->backing_bdev = bdev;
1745 
1746 	/*
1747 	 * meta_dev_idx >= 0: external fixed size, possibly multiple
1748 	 * drbd sharing one meta device.  TODO in that case, paranoia
1749 	 * check that [md_bdev, meta_dev_idx] is not yet used by some
1750 	 * other drbd minor!  (if you use drbd.conf + drbdadm, that
1751 	 * should check it for you already; but if you don't, or
1752 	 * someone fooled it, we need to double check here)
1753 	 */
1754 	bdev = open_backing_dev(device, new_disk_conf->meta_dev,
1755 		/* claim ptr: device, if claimed exclusively; shared drbd_m_holder,
1756 		 * if potentially shared with other drbd minors */
1757 			(new_disk_conf->meta_dev_idx < 0) ? (void*)device : (void*)drbd_m_holder,
1758 		/* avoid double bd_claim_by_disk() for the same (source,target) tuple,
1759 		 * as would happen with internal metadata. */
1760 			(new_disk_conf->meta_dev_idx != DRBD_MD_INDEX_FLEX_INT &&
1761 			 new_disk_conf->meta_dev_idx != DRBD_MD_INDEX_INTERNAL));
1762 	if (IS_ERR(bdev))
1763 		return ERR_OPEN_MD_DISK;
1764 	nbc->md_bdev = bdev;
1765 	return NO_ERROR;
1766 }
1767 
1768 static void close_backing_dev(struct drbd_device *device, struct block_device *bdev,
1769 	bool do_bd_unlink)
1770 {
1771 	if (!bdev)
1772 		return;
1773 	if (do_bd_unlink)
1774 		bd_unlink_disk_holder(bdev, device->vdisk);
1775 	blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
1776 }
1777 
1778 void drbd_backing_dev_free(struct drbd_device *device, struct drbd_backing_dev *ldev)
1779 {
1780 	if (ldev == NULL)
1781 		return;
1782 
1783 	close_backing_dev(device, ldev->md_bdev, ldev->md_bdev != ldev->backing_bdev);
1784 	close_backing_dev(device, ldev->backing_bdev, true);
1785 
1786 	kfree(ldev->disk_conf);
1787 	kfree(ldev);
1788 }
1789 
1790 int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
1791 {
1792 	struct drbd_config_context adm_ctx;
1793 	struct drbd_device *device;
1794 	struct drbd_peer_device *peer_device;
1795 	struct drbd_connection *connection;
1796 	int err;
1797 	enum drbd_ret_code retcode;
1798 	enum determine_dev_size dd;
1799 	sector_t max_possible_sectors;
1800 	sector_t min_md_device_sectors;
1801 	struct drbd_backing_dev *nbc = NULL; /* new_backing_conf */
1802 	struct disk_conf *new_disk_conf = NULL;
1803 	struct lru_cache *resync_lru = NULL;
1804 	struct fifo_buffer *new_plan = NULL;
1805 	union drbd_state ns, os;
1806 	enum drbd_state_rv rv;
1807 	struct net_conf *nc;
1808 
1809 	retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
1810 	if (!adm_ctx.reply_skb)
1811 		return retcode;
1812 	if (retcode != NO_ERROR)
1813 		goto finish;
1814 
1815 	device = adm_ctx.device;
1816 	mutex_lock(&adm_ctx.resource->adm_mutex);
1817 	peer_device = first_peer_device(device);
1818 	connection = peer_device->connection;
1819 	conn_reconfig_start(connection);
1820 
1821 	/* if you want to reconfigure, please tear down first */
1822 	if (device->state.disk > D_DISKLESS) {
1823 		retcode = ERR_DISK_CONFIGURED;
1824 		goto fail;
1825 	}
1826 	/* It may just now have detached because of IO error.  Make sure
1827 	 * drbd_ldev_destroy is done already, we may end up here very fast,
1828 	 * e.g. if someone calls attach from the on-io-error handler,
1829 	 * to realize a "hot spare" feature (not that I'd recommend that) */
1830 	wait_event(device->misc_wait, !test_bit(GOING_DISKLESS, &device->flags));
1831 
1832 	/* make sure there is no leftover from previous force-detach attempts */
1833 	clear_bit(FORCE_DETACH, &device->flags);
1834 	clear_bit(WAS_IO_ERROR, &device->flags);
1835 	clear_bit(WAS_READ_ERROR, &device->flags);
1836 
1837 	/* and no leftover from previously aborted resync or verify, either */
1838 	device->rs_total = 0;
1839 	device->rs_failed = 0;
1840 	atomic_set(&device->rs_pending_cnt, 0);
1841 
1842 	/* allocation not in the IO path, drbdsetup context */
1843 	nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL);
1844 	if (!nbc) {
1845 		retcode = ERR_NOMEM;
1846 		goto fail;
1847 	}
1848 	spin_lock_init(&nbc->md.uuid_lock);
1849 
1850 	new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
1851 	if (!new_disk_conf) {
1852 		retcode = ERR_NOMEM;
1853 		goto fail;
1854 	}
1855 	nbc->disk_conf = new_disk_conf;
1856 
1857 	set_disk_conf_defaults(new_disk_conf);
1858 	err = disk_conf_from_attrs(new_disk_conf, info);
1859 	if (err) {
1860 		retcode = ERR_MANDATORY_TAG;
1861 		drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
1862 		goto fail;
1863 	}
1864 
1865 	if (new_disk_conf->c_plan_ahead > DRBD_C_PLAN_AHEAD_MAX)
1866 		new_disk_conf->c_plan_ahead = DRBD_C_PLAN_AHEAD_MAX;
1867 
1868 	new_plan = fifo_alloc((new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ);
1869 	if (!new_plan) {
1870 		retcode = ERR_NOMEM;
1871 		goto fail;
1872 	}
1873 
1874 	if (new_disk_conf->meta_dev_idx < DRBD_MD_INDEX_FLEX_INT) {
1875 		retcode = ERR_MD_IDX_INVALID;
1876 		goto fail;
1877 	}
1878 
1879 	rcu_read_lock();
1880 	nc = rcu_dereference(connection->net_conf);
1881 	if (nc) {
1882 		if (new_disk_conf->fencing == FP_STONITH && nc->wire_protocol == DRBD_PROT_A) {
1883 			rcu_read_unlock();
1884 			retcode = ERR_STONITH_AND_PROT_A;
1885 			goto fail;
1886 		}
1887 	}
1888 	rcu_read_unlock();
1889 
1890 	retcode = open_backing_devices(device, new_disk_conf, nbc);
1891 	if (retcode != NO_ERROR)
1892 		goto fail;
1893 
1894 	if ((nbc->backing_bdev == nbc->md_bdev) !=
1895 	    (new_disk_conf->meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
1896 	     new_disk_conf->meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)) {
1897 		retcode = ERR_MD_IDX_INVALID;
1898 		goto fail;
1899 	}
1900 
1901 	resync_lru = lc_create("resync", drbd_bm_ext_cache,
1902 			1, 61, sizeof(struct bm_extent),
1903 			offsetof(struct bm_extent, lce));
1904 	if (!resync_lru) {
1905 		retcode = ERR_NOMEM;
1906 		goto fail;
1907 	}
1908 
1909 	/* Read our meta data super block early.
1910 	 * This also sets other on-disk offsets. */
1911 	retcode = drbd_md_read(device, nbc);
1912 	if (retcode != NO_ERROR)
1913 		goto fail;
1914 
1915 	sanitize_disk_conf(device, new_disk_conf, nbc);
1916 
1917 	if (drbd_get_max_capacity(nbc) < new_disk_conf->disk_size) {
1918 		drbd_err(device, "max capacity %llu smaller than disk size %llu\n",
1919 			(unsigned long long) drbd_get_max_capacity(nbc),
1920 			(unsigned long long) new_disk_conf->disk_size);
1921 		retcode = ERR_DISK_TOO_SMALL;
1922 		goto fail;
1923 	}
1924 
1925 	if (new_disk_conf->meta_dev_idx < 0) {
1926 		max_possible_sectors = DRBD_MAX_SECTORS_FLEX;
1927 		/* at least one MB, otherwise it does not make sense */
1928 		min_md_device_sectors = (2<<10);
1929 	} else {
1930 		max_possible_sectors = DRBD_MAX_SECTORS;
1931 		min_md_device_sectors = MD_128MB_SECT * (new_disk_conf->meta_dev_idx + 1);
1932 	}
1933 
1934 	if (drbd_get_capacity(nbc->md_bdev) < min_md_device_sectors) {
1935 		retcode = ERR_MD_DISK_TOO_SMALL;
1936 		drbd_warn(device, "refusing attach: md-device too small, "
1937 		     "at least %llu sectors needed for this meta-disk type\n",
1938 		     (unsigned long long) min_md_device_sectors);
1939 		goto fail;
1940 	}
1941 
1942 	/* Make sure the new disk is big enough
1943 	 * (we may currently be R_PRIMARY with no local disk...) */
1944 	if (drbd_get_max_capacity(nbc) <
1945 	    drbd_get_capacity(device->this_bdev)) {
1946 		retcode = ERR_DISK_TOO_SMALL;
1947 		goto fail;
1948 	}
1949 
1950 	nbc->known_size = drbd_get_capacity(nbc->backing_bdev);
1951 
1952 	if (nbc->known_size > max_possible_sectors) {
1953 		drbd_warn(device, "==> truncating very big lower level device "
1954 			"to currently maximum possible %llu sectors <==\n",
1955 			(unsigned long long) max_possible_sectors);
1956 		if (new_disk_conf->meta_dev_idx >= 0)
1957 			drbd_warn(device, "==>> using internal or flexible "
1958 				      "meta data may help <<==\n");
1959 	}
1960 
1961 	drbd_suspend_io(device);
1962 	/* also wait for the last barrier ack. */
1963 	/* FIXME see also https://daiquiri.linbit/cgi-bin/bugzilla/show_bug.cgi?id=171
1964 	 * We need a way to either ignore barrier acks for barriers sent before a device
1965 	 * was attached, or a way to wait for all pending barrier acks to come in.
1966 	 * As barriers are counted per resource,
1967 	 * we'd need to suspend io on all devices of a resource.
1968 	 */
1969 	wait_event(device->misc_wait, !atomic_read(&device->ap_pending_cnt) || drbd_suspended(device));
1970 	/* and for any other previously queued work */
1971 	drbd_flush_workqueue(&connection->sender_work);
1972 
1973 	rv = _drbd_request_state(device, NS(disk, D_ATTACHING), CS_VERBOSE);
1974 	retcode = rv;  /* FIXME: Type mismatch. */
1975 	drbd_resume_io(device);
1976 	if (rv < SS_SUCCESS)
1977 		goto fail;
1978 
1979 	if (!get_ldev_if_state(device, D_ATTACHING))
1980 		goto force_diskless;
1981 
1982 	if (!device->bitmap) {
1983 		if (drbd_bm_init(device)) {
1984 			retcode = ERR_NOMEM;
1985 			goto force_diskless_dec;
1986 		}
1987 	}
1988 
1989 	if (device->state.pdsk != D_UP_TO_DATE && device->ed_uuid &&
1990 	    (device->state.role == R_PRIMARY || device->state.peer == R_PRIMARY) &&
1991             (device->ed_uuid & ~((u64)1)) != (nbc->md.uuid[UI_CURRENT] & ~((u64)1))) {
1992 		drbd_err(device, "Can only attach to data with current UUID=%016llX\n",
1993 		    (unsigned long long)device->ed_uuid);
1994 		retcode = ERR_DATA_NOT_CURRENT;
1995 		goto force_diskless_dec;
1996 	}
1997 
1998 	/* Since we are diskless, fix the activity log first... */
1999 	if (drbd_check_al_size(device, new_disk_conf)) {
2000 		retcode = ERR_NOMEM;
2001 		goto force_diskless_dec;
2002 	}
2003 
2004 	/* Prevent shrinking of consistent devices ! */
2005 	{
2006 	unsigned long long nsz = drbd_new_dev_size(device, nbc, nbc->disk_conf->disk_size, 0);
2007 	unsigned long long eff = nbc->md.la_size_sect;
2008 	if (drbd_md_test_flag(nbc, MDF_CONSISTENT) && nsz < eff) {
2009 		if (nsz == nbc->disk_conf->disk_size) {
2010 			drbd_warn(device, "truncating a consistent device during attach (%llu < %llu)\n", nsz, eff);
2011 		} else {
2012 			drbd_warn(device, "refusing to truncate a consistent device (%llu < %llu)\n", nsz, eff);
2013 			drbd_msg_sprintf_info(adm_ctx.reply_skb,
2014 				"To-be-attached device has last effective > current size, and is consistent\n"
2015 				"(%llu > %llu sectors). Refusing to attach.", eff, nsz);
2016 			retcode = ERR_IMPLICIT_SHRINK;
2017 			goto force_diskless_dec;
2018 		}
2019 	}
2020 	}
2021 
2022 	lock_all_resources();
2023 	retcode = drbd_resync_after_valid(device, new_disk_conf->resync_after);
2024 	if (retcode != NO_ERROR) {
2025 		unlock_all_resources();
2026 		goto force_diskless_dec;
2027 	}
2028 
2029 	/* Reset the "barriers don't work" bits here, then force meta data to
2030 	 * be written, to ensure we determine if barriers are supported. */
2031 	if (new_disk_conf->md_flushes)
2032 		clear_bit(MD_NO_FUA, &device->flags);
2033 	else
2034 		set_bit(MD_NO_FUA, &device->flags);
2035 
2036 	/* Point of no return reached.
2037 	 * Devices and memory are no longer released by error cleanup below.
2038 	 * now device takes over responsibility, and the state engine should
2039 	 * clean it up somewhere.  */
2040 	D_ASSERT(device, device->ldev == NULL);
2041 	device->ldev = nbc;
2042 	device->resync = resync_lru;
2043 	device->rs_plan_s = new_plan;
2044 	nbc = NULL;
2045 	resync_lru = NULL;
2046 	new_disk_conf = NULL;
2047 	new_plan = NULL;
2048 
2049 	drbd_resync_after_changed(device);
2050 	drbd_bump_write_ordering(device->resource, device->ldev, WO_BDEV_FLUSH);
2051 	unlock_all_resources();
2052 
2053 	if (drbd_md_test_flag(device->ldev, MDF_CRASHED_PRIMARY))
2054 		set_bit(CRASHED_PRIMARY, &device->flags);
2055 	else
2056 		clear_bit(CRASHED_PRIMARY, &device->flags);
2057 
2058 	if (drbd_md_test_flag(device->ldev, MDF_PRIMARY_IND) &&
2059 	    !(device->state.role == R_PRIMARY && device->resource->susp_nod))
2060 		set_bit(CRASHED_PRIMARY, &device->flags);
2061 
2062 	device->send_cnt = 0;
2063 	device->recv_cnt = 0;
2064 	device->read_cnt = 0;
2065 	device->writ_cnt = 0;
2066 
2067 	drbd_reconsider_queue_parameters(device, device->ldev, NULL);
2068 
2069 	/* If I am currently not R_PRIMARY,
2070 	 * but meta data primary indicator is set,
2071 	 * I just now recover from a hard crash,
2072 	 * and have been R_PRIMARY before that crash.
2073 	 *
2074 	 * Now, if I had no connection before that crash
2075 	 * (have been degraded R_PRIMARY), chances are that
2076 	 * I won't find my peer now either.
2077 	 *
2078 	 * In that case, and _only_ in that case,
2079 	 * we use the degr-wfc-timeout instead of the default,
2080 	 * so we can automatically recover from a crash of a
2081 	 * degraded but active "cluster" after a certain timeout.
2082 	 */
2083 	clear_bit(USE_DEGR_WFC_T, &device->flags);
2084 	if (device->state.role != R_PRIMARY &&
2085 	     drbd_md_test_flag(device->ldev, MDF_PRIMARY_IND) &&
2086 	    !drbd_md_test_flag(device->ldev, MDF_CONNECTED_IND))
2087 		set_bit(USE_DEGR_WFC_T, &device->flags);
2088 
2089 	dd = drbd_determine_dev_size(device, 0, NULL);
2090 	if (dd <= DS_ERROR) {
2091 		retcode = ERR_NOMEM_BITMAP;
2092 		goto force_diskless_dec;
2093 	} else if (dd == DS_GREW)
2094 		set_bit(RESYNC_AFTER_NEG, &device->flags);
2095 
2096 	if (drbd_md_test_flag(device->ldev, MDF_FULL_SYNC) ||
2097 	    (test_bit(CRASHED_PRIMARY, &device->flags) &&
2098 	     drbd_md_test_flag(device->ldev, MDF_AL_DISABLED))) {
2099 		drbd_info(device, "Assuming that all blocks are out of sync "
2100 		     "(aka FullSync)\n");
2101 		if (drbd_bitmap_io(device, &drbd_bmio_set_n_write,
2102 			"set_n_write from attaching", BM_LOCKED_MASK)) {
2103 			retcode = ERR_IO_MD_DISK;
2104 			goto force_diskless_dec;
2105 		}
2106 	} else {
2107 		if (drbd_bitmap_io(device, &drbd_bm_read,
2108 			"read from attaching", BM_LOCKED_MASK)) {
2109 			retcode = ERR_IO_MD_DISK;
2110 			goto force_diskless_dec;
2111 		}
2112 	}
2113 
2114 	if (_drbd_bm_total_weight(device) == drbd_bm_bits(device))
2115 		drbd_suspend_al(device); /* IO is still suspended here... */
2116 
2117 	spin_lock_irq(&device->resource->req_lock);
2118 	os = drbd_read_state(device);
2119 	ns = os;
2120 	/* If MDF_CONSISTENT is not set go into inconsistent state,
2121 	   otherwise investigate MDF_WasUpToDate...
2122 	   If MDF_WAS_UP_TO_DATE is not set go into D_OUTDATED disk state,
2123 	   otherwise into D_CONSISTENT state.
2124 	*/
2125 	if (drbd_md_test_flag(device->ldev, MDF_CONSISTENT)) {
2126 		if (drbd_md_test_flag(device->ldev, MDF_WAS_UP_TO_DATE))
2127 			ns.disk = D_CONSISTENT;
2128 		else
2129 			ns.disk = D_OUTDATED;
2130 	} else {
2131 		ns.disk = D_INCONSISTENT;
2132 	}
2133 
2134 	if (drbd_md_test_flag(device->ldev, MDF_PEER_OUT_DATED))
2135 		ns.pdsk = D_OUTDATED;
2136 
2137 	rcu_read_lock();
2138 	if (ns.disk == D_CONSISTENT &&
2139 	    (ns.pdsk == D_OUTDATED || rcu_dereference(device->ldev->disk_conf)->fencing == FP_DONT_CARE))
2140 		ns.disk = D_UP_TO_DATE;
2141 
2142 	/* All tests on MDF_PRIMARY_IND, MDF_CONNECTED_IND,
2143 	   MDF_CONSISTENT and MDF_WAS_UP_TO_DATE must happen before
2144 	   this point, because drbd_request_state() modifies these
2145 	   flags. */
2146 
2147 	if (rcu_dereference(device->ldev->disk_conf)->al_updates)
2148 		device->ldev->md.flags &= ~MDF_AL_DISABLED;
2149 	else
2150 		device->ldev->md.flags |= MDF_AL_DISABLED;
2151 
2152 	rcu_read_unlock();
2153 
2154 	/* In case we are C_CONNECTED postpone any decision on the new disk
2155 	   state after the negotiation phase. */
2156 	if (device->state.conn == C_CONNECTED) {
2157 		device->new_state_tmp.i = ns.i;
2158 		ns.i = os.i;
2159 		ns.disk = D_NEGOTIATING;
2160 
2161 		/* We expect to receive up-to-date UUIDs soon.
2162 		   To avoid a race in receive_state, free p_uuid while
2163 		   holding req_lock. I.e. atomic with the state change */
2164 		kfree(device->p_uuid);
2165 		device->p_uuid = NULL;
2166 	}
2167 
2168 	rv = _drbd_set_state(device, ns, CS_VERBOSE, NULL);
2169 	spin_unlock_irq(&device->resource->req_lock);
2170 
2171 	if (rv < SS_SUCCESS)
2172 		goto force_diskless_dec;
2173 
2174 	mod_timer(&device->request_timer, jiffies + HZ);
2175 
2176 	if (device->state.role == R_PRIMARY)
2177 		device->ldev->md.uuid[UI_CURRENT] |=  (u64)1;
2178 	else
2179 		device->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
2180 
2181 	drbd_md_mark_dirty(device);
2182 	drbd_md_sync(device);
2183 
2184 	kobject_uevent(&disk_to_dev(device->vdisk)->kobj, KOBJ_CHANGE);
2185 	put_ldev(device);
2186 	conn_reconfig_done(connection);
2187 	mutex_unlock(&adm_ctx.resource->adm_mutex);
2188 	drbd_adm_finish(&adm_ctx, info, retcode);
2189 	return 0;
2190 
2191  force_diskless_dec:
2192 	put_ldev(device);
2193  force_diskless:
2194 	drbd_force_state(device, NS(disk, D_DISKLESS));
2195 	drbd_md_sync(device);
2196  fail:
2197 	conn_reconfig_done(connection);
2198 	if (nbc) {
2199 		close_backing_dev(device, nbc->md_bdev, nbc->md_bdev != nbc->backing_bdev);
2200 		close_backing_dev(device, nbc->backing_bdev, true);
2201 		kfree(nbc);
2202 	}
2203 	kfree(new_disk_conf);
2204 	lc_destroy(resync_lru);
2205 	kfree(new_plan);
2206 	mutex_unlock(&adm_ctx.resource->adm_mutex);
2207  finish:
2208 	drbd_adm_finish(&adm_ctx, info, retcode);
2209 	return 0;
2210 }
2211 
2212 static int adm_detach(struct drbd_device *device, int force)
2213 {
2214 	if (force) {
2215 		set_bit(FORCE_DETACH, &device->flags);
2216 		drbd_force_state(device, NS(disk, D_FAILED));
2217 		return SS_SUCCESS;
2218 	}
2219 
2220 	return drbd_request_detach_interruptible(device);
2221 }
2222 
2223 /* Detaching the disk is a process in multiple stages.  First we need to lock
2224  * out application IO, in-flight IO, IO stuck in drbd_al_begin_io.
2225  * Then we transition to D_DISKLESS, and wait for put_ldev() to return all
2226  * internal references as well.
2227  * Only then we have finally detached. */
2228 int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info)
2229 {
2230 	struct drbd_config_context adm_ctx;
2231 	enum drbd_ret_code retcode;
2232 	struct detach_parms parms = { };
2233 	int err;
2234 
2235 	retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
2236 	if (!adm_ctx.reply_skb)
2237 		return retcode;
2238 	if (retcode != NO_ERROR)
2239 		goto out;
2240 
2241 	if (info->attrs[DRBD_NLA_DETACH_PARMS]) {
2242 		err = detach_parms_from_attrs(&parms, info);
2243 		if (err) {
2244 			retcode = ERR_MANDATORY_TAG;
2245 			drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
2246 			goto out;
2247 		}
2248 	}
2249 
2250 	mutex_lock(&adm_ctx.resource->adm_mutex);
2251 	retcode = adm_detach(adm_ctx.device, parms.force_detach);
2252 	mutex_unlock(&adm_ctx.resource->adm_mutex);
2253 out:
2254 	drbd_adm_finish(&adm_ctx, info, retcode);
2255 	return 0;
2256 }
2257 
2258 static bool conn_resync_running(struct drbd_connection *connection)
2259 {
2260 	struct drbd_peer_device *peer_device;
2261 	bool rv = false;
2262 	int vnr;
2263 
2264 	rcu_read_lock();
2265 	idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
2266 		struct drbd_device *device = peer_device->device;
2267 		if (device->state.conn == C_SYNC_SOURCE ||
2268 		    device->state.conn == C_SYNC_TARGET ||
2269 		    device->state.conn == C_PAUSED_SYNC_S ||
2270 		    device->state.conn == C_PAUSED_SYNC_T) {
2271 			rv = true;
2272 			break;
2273 		}
2274 	}
2275 	rcu_read_unlock();
2276 
2277 	return rv;
2278 }
2279 
2280 static bool conn_ov_running(struct drbd_connection *connection)
2281 {
2282 	struct drbd_peer_device *peer_device;
2283 	bool rv = false;
2284 	int vnr;
2285 
2286 	rcu_read_lock();
2287 	idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
2288 		struct drbd_device *device = peer_device->device;
2289 		if (device->state.conn == C_VERIFY_S ||
2290 		    device->state.conn == C_VERIFY_T) {
2291 			rv = true;
2292 			break;
2293 		}
2294 	}
2295 	rcu_read_unlock();
2296 
2297 	return rv;
2298 }
2299 
2300 static enum drbd_ret_code
2301 _check_net_options(struct drbd_connection *connection, struct net_conf *old_net_conf, struct net_conf *new_net_conf)
2302 {
2303 	struct drbd_peer_device *peer_device;
2304 	int i;
2305 
2306 	if (old_net_conf && connection->cstate == C_WF_REPORT_PARAMS && connection->agreed_pro_version < 100) {
2307 		if (new_net_conf->wire_protocol != old_net_conf->wire_protocol)
2308 			return ERR_NEED_APV_100;
2309 
2310 		if (new_net_conf->two_primaries != old_net_conf->two_primaries)
2311 			return ERR_NEED_APV_100;
2312 
2313 		if (strcmp(new_net_conf->integrity_alg, old_net_conf->integrity_alg))
2314 			return ERR_NEED_APV_100;
2315 	}
2316 
2317 	if (!new_net_conf->two_primaries &&
2318 	    conn_highest_role(connection) == R_PRIMARY &&
2319 	    conn_highest_peer(connection) == R_PRIMARY)
2320 		return ERR_NEED_ALLOW_TWO_PRI;
2321 
2322 	if (new_net_conf->two_primaries &&
2323 	    (new_net_conf->wire_protocol != DRBD_PROT_C))
2324 		return ERR_NOT_PROTO_C;
2325 
2326 	idr_for_each_entry(&connection->peer_devices, peer_device, i) {
2327 		struct drbd_device *device = peer_device->device;
2328 		if (get_ldev(device)) {
2329 			enum drbd_fencing_p fp = rcu_dereference(device->ldev->disk_conf)->fencing;
2330 			put_ldev(device);
2331 			if (new_net_conf->wire_protocol == DRBD_PROT_A && fp == FP_STONITH)
2332 				return ERR_STONITH_AND_PROT_A;
2333 		}
2334 		if (device->state.role == R_PRIMARY && new_net_conf->discard_my_data)
2335 			return ERR_DISCARD_IMPOSSIBLE;
2336 	}
2337 
2338 	if (new_net_conf->on_congestion != OC_BLOCK && new_net_conf->wire_protocol != DRBD_PROT_A)
2339 		return ERR_CONG_NOT_PROTO_A;
2340 
2341 	return NO_ERROR;
2342 }
2343 
2344 static enum drbd_ret_code
2345 check_net_options(struct drbd_connection *connection, struct net_conf *new_net_conf)
2346 {
2347 	enum drbd_ret_code rv;
2348 	struct drbd_peer_device *peer_device;
2349 	int i;
2350 
2351 	rcu_read_lock();
2352 	rv = _check_net_options(connection, rcu_dereference(connection->net_conf), new_net_conf);
2353 	rcu_read_unlock();
2354 
2355 	/* connection->peer_devices protected by genl_lock() here */
2356 	idr_for_each_entry(&connection->peer_devices, peer_device, i) {
2357 		struct drbd_device *device = peer_device->device;
2358 		if (!device->bitmap) {
2359 			if (drbd_bm_init(device))
2360 				return ERR_NOMEM;
2361 		}
2362 	}
2363 
2364 	return rv;
2365 }
2366 
2367 struct crypto {
2368 	struct crypto_shash *verify_tfm;
2369 	struct crypto_shash *csums_tfm;
2370 	struct crypto_shash *cram_hmac_tfm;
2371 	struct crypto_shash *integrity_tfm;
2372 };
2373 
2374 static int
2375 alloc_shash(struct crypto_shash **tfm, char *tfm_name, int err_alg)
2376 {
2377 	if (!tfm_name[0])
2378 		return NO_ERROR;
2379 
2380 	*tfm = crypto_alloc_shash(tfm_name, 0, 0);
2381 	if (IS_ERR(*tfm)) {
2382 		*tfm = NULL;
2383 		return err_alg;
2384 	}
2385 
2386 	return NO_ERROR;
2387 }
2388 
2389 static enum drbd_ret_code
2390 alloc_crypto(struct crypto *crypto, struct net_conf *new_net_conf)
2391 {
2392 	char hmac_name[CRYPTO_MAX_ALG_NAME];
2393 	enum drbd_ret_code rv;
2394 
2395 	rv = alloc_shash(&crypto->csums_tfm, new_net_conf->csums_alg,
2396 			 ERR_CSUMS_ALG);
2397 	if (rv != NO_ERROR)
2398 		return rv;
2399 	rv = alloc_shash(&crypto->verify_tfm, new_net_conf->verify_alg,
2400 			 ERR_VERIFY_ALG);
2401 	if (rv != NO_ERROR)
2402 		return rv;
2403 	rv = alloc_shash(&crypto->integrity_tfm, new_net_conf->integrity_alg,
2404 			 ERR_INTEGRITY_ALG);
2405 	if (rv != NO_ERROR)
2406 		return rv;
2407 	if (new_net_conf->cram_hmac_alg[0] != 0) {
2408 		snprintf(hmac_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)",
2409 			 new_net_conf->cram_hmac_alg);
2410 
2411 		rv = alloc_shash(&crypto->cram_hmac_tfm, hmac_name,
2412 				 ERR_AUTH_ALG);
2413 	}
2414 
2415 	return rv;
2416 }
2417 
2418 static void free_crypto(struct crypto *crypto)
2419 {
2420 	crypto_free_shash(crypto->cram_hmac_tfm);
2421 	crypto_free_shash(crypto->integrity_tfm);
2422 	crypto_free_shash(crypto->csums_tfm);
2423 	crypto_free_shash(crypto->verify_tfm);
2424 }
2425 
2426 int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
2427 {
2428 	struct drbd_config_context adm_ctx;
2429 	enum drbd_ret_code retcode;
2430 	struct drbd_connection *connection;
2431 	struct net_conf *old_net_conf, *new_net_conf = NULL;
2432 	int err;
2433 	int ovr; /* online verify running */
2434 	int rsr; /* re-sync running */
2435 	struct crypto crypto = { };
2436 
2437 	retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_CONNECTION);
2438 	if (!adm_ctx.reply_skb)
2439 		return retcode;
2440 	if (retcode != NO_ERROR)
2441 		goto finish;
2442 
2443 	connection = adm_ctx.connection;
2444 	mutex_lock(&adm_ctx.resource->adm_mutex);
2445 
2446 	new_net_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
2447 	if (!new_net_conf) {
2448 		retcode = ERR_NOMEM;
2449 		goto out;
2450 	}
2451 
2452 	conn_reconfig_start(connection);
2453 
2454 	mutex_lock(&connection->data.mutex);
2455 	mutex_lock(&connection->resource->conf_update);
2456 	old_net_conf = connection->net_conf;
2457 
2458 	if (!old_net_conf) {
2459 		drbd_msg_put_info(adm_ctx.reply_skb, "net conf missing, try connect");
2460 		retcode = ERR_INVALID_REQUEST;
2461 		goto fail;
2462 	}
2463 
2464 	*new_net_conf = *old_net_conf;
2465 	if (should_set_defaults(info))
2466 		set_net_conf_defaults(new_net_conf);
2467 
2468 	err = net_conf_from_attrs_for_change(new_net_conf, info);
2469 	if (err && err != -ENOMSG) {
2470 		retcode = ERR_MANDATORY_TAG;
2471 		drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
2472 		goto fail;
2473 	}
2474 
2475 	retcode = check_net_options(connection, new_net_conf);
2476 	if (retcode != NO_ERROR)
2477 		goto fail;
2478 
2479 	/* re-sync running */
2480 	rsr = conn_resync_running(connection);
2481 	if (rsr && strcmp(new_net_conf->csums_alg, old_net_conf->csums_alg)) {
2482 		retcode = ERR_CSUMS_RESYNC_RUNNING;
2483 		goto fail;
2484 	}
2485 
2486 	/* online verify running */
2487 	ovr = conn_ov_running(connection);
2488 	if (ovr && strcmp(new_net_conf->verify_alg, old_net_conf->verify_alg)) {
2489 		retcode = ERR_VERIFY_RUNNING;
2490 		goto fail;
2491 	}
2492 
2493 	retcode = alloc_crypto(&crypto, new_net_conf);
2494 	if (retcode != NO_ERROR)
2495 		goto fail;
2496 
2497 	rcu_assign_pointer(connection->net_conf, new_net_conf);
2498 
2499 	if (!rsr) {
2500 		crypto_free_shash(connection->csums_tfm);
2501 		connection->csums_tfm = crypto.csums_tfm;
2502 		crypto.csums_tfm = NULL;
2503 	}
2504 	if (!ovr) {
2505 		crypto_free_shash(connection->verify_tfm);
2506 		connection->verify_tfm = crypto.verify_tfm;
2507 		crypto.verify_tfm = NULL;
2508 	}
2509 
2510 	crypto_free_shash(connection->integrity_tfm);
2511 	connection->integrity_tfm = crypto.integrity_tfm;
2512 	if (connection->cstate >= C_WF_REPORT_PARAMS && connection->agreed_pro_version >= 100)
2513 		/* Do this without trying to take connection->data.mutex again.  */
2514 		__drbd_send_protocol(connection, P_PROTOCOL_UPDATE);
2515 
2516 	crypto_free_shash(connection->cram_hmac_tfm);
2517 	connection->cram_hmac_tfm = crypto.cram_hmac_tfm;
2518 
2519 	mutex_unlock(&connection->resource->conf_update);
2520 	mutex_unlock(&connection->data.mutex);
2521 	synchronize_rcu();
2522 	kfree(old_net_conf);
2523 
2524 	if (connection->cstate >= C_WF_REPORT_PARAMS) {
2525 		struct drbd_peer_device *peer_device;
2526 		int vnr;
2527 
2528 		idr_for_each_entry(&connection->peer_devices, peer_device, vnr)
2529 			drbd_send_sync_param(peer_device);
2530 	}
2531 
2532 	goto done;
2533 
2534  fail:
2535 	mutex_unlock(&connection->resource->conf_update);
2536 	mutex_unlock(&connection->data.mutex);
2537 	free_crypto(&crypto);
2538 	kfree(new_net_conf);
2539  done:
2540 	conn_reconfig_done(connection);
2541  out:
2542 	mutex_unlock(&adm_ctx.resource->adm_mutex);
2543  finish:
2544 	drbd_adm_finish(&adm_ctx, info, retcode);
2545 	return 0;
2546 }
2547 
2548 static void connection_to_info(struct connection_info *info,
2549 			       struct drbd_connection *connection)
2550 {
2551 	info->conn_connection_state = connection->cstate;
2552 	info->conn_role = conn_highest_peer(connection);
2553 }
2554 
2555 static void peer_device_to_info(struct peer_device_info *info,
2556 				struct drbd_peer_device *peer_device)
2557 {
2558 	struct drbd_device *device = peer_device->device;
2559 
2560 	info->peer_repl_state =
2561 		max_t(enum drbd_conns, C_WF_REPORT_PARAMS, device->state.conn);
2562 	info->peer_disk_state = device->state.pdsk;
2563 	info->peer_resync_susp_user = device->state.user_isp;
2564 	info->peer_resync_susp_peer = device->state.peer_isp;
2565 	info->peer_resync_susp_dependency = device->state.aftr_isp;
2566 }
2567 
2568 int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info)
2569 {
2570 	struct connection_info connection_info;
2571 	enum drbd_notification_type flags;
2572 	unsigned int peer_devices = 0;
2573 	struct drbd_config_context adm_ctx;
2574 	struct drbd_peer_device *peer_device;
2575 	struct net_conf *old_net_conf, *new_net_conf = NULL;
2576 	struct crypto crypto = { };
2577 	struct drbd_resource *resource;
2578 	struct drbd_connection *connection;
2579 	enum drbd_ret_code retcode;
2580 	int i;
2581 	int err;
2582 
2583 	retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE);
2584 
2585 	if (!adm_ctx.reply_skb)
2586 		return retcode;
2587 	if (retcode != NO_ERROR)
2588 		goto out;
2589 	if (!(adm_ctx.my_addr && adm_ctx.peer_addr)) {
2590 		drbd_msg_put_info(adm_ctx.reply_skb, "connection endpoint(s) missing");
2591 		retcode = ERR_INVALID_REQUEST;
2592 		goto out;
2593 	}
2594 
2595 	/* No need for _rcu here. All reconfiguration is
2596 	 * strictly serialized on genl_lock(). We are protected against
2597 	 * concurrent reconfiguration/addition/deletion */
2598 	for_each_resource(resource, &drbd_resources) {
2599 		for_each_connection(connection, resource) {
2600 			if (nla_len(adm_ctx.my_addr) == connection->my_addr_len &&
2601 			    !memcmp(nla_data(adm_ctx.my_addr), &connection->my_addr,
2602 				    connection->my_addr_len)) {
2603 				retcode = ERR_LOCAL_ADDR;
2604 				goto out;
2605 			}
2606 
2607 			if (nla_len(adm_ctx.peer_addr) == connection->peer_addr_len &&
2608 			    !memcmp(nla_data(adm_ctx.peer_addr), &connection->peer_addr,
2609 				    connection->peer_addr_len)) {
2610 				retcode = ERR_PEER_ADDR;
2611 				goto out;
2612 			}
2613 		}
2614 	}
2615 
2616 	mutex_lock(&adm_ctx.resource->adm_mutex);
2617 	connection = first_connection(adm_ctx.resource);
2618 	conn_reconfig_start(connection);
2619 
2620 	if (connection->cstate > C_STANDALONE) {
2621 		retcode = ERR_NET_CONFIGURED;
2622 		goto fail;
2623 	}
2624 
2625 	/* allocation not in the IO path, drbdsetup / netlink process context */
2626 	new_net_conf = kzalloc(sizeof(*new_net_conf), GFP_KERNEL);
2627 	if (!new_net_conf) {
2628 		retcode = ERR_NOMEM;
2629 		goto fail;
2630 	}
2631 
2632 	set_net_conf_defaults(new_net_conf);
2633 
2634 	err = net_conf_from_attrs(new_net_conf, info);
2635 	if (err && err != -ENOMSG) {
2636 		retcode = ERR_MANDATORY_TAG;
2637 		drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
2638 		goto fail;
2639 	}
2640 
2641 	retcode = check_net_options(connection, new_net_conf);
2642 	if (retcode != NO_ERROR)
2643 		goto fail;
2644 
2645 	retcode = alloc_crypto(&crypto, new_net_conf);
2646 	if (retcode != NO_ERROR)
2647 		goto fail;
2648 
2649 	((char *)new_net_conf->shared_secret)[SHARED_SECRET_MAX-1] = 0;
2650 
2651 	drbd_flush_workqueue(&connection->sender_work);
2652 
2653 	mutex_lock(&adm_ctx.resource->conf_update);
2654 	old_net_conf = connection->net_conf;
2655 	if (old_net_conf) {
2656 		retcode = ERR_NET_CONFIGURED;
2657 		mutex_unlock(&adm_ctx.resource->conf_update);
2658 		goto fail;
2659 	}
2660 	rcu_assign_pointer(connection->net_conf, new_net_conf);
2661 
2662 	conn_free_crypto(connection);
2663 	connection->cram_hmac_tfm = crypto.cram_hmac_tfm;
2664 	connection->integrity_tfm = crypto.integrity_tfm;
2665 	connection->csums_tfm = crypto.csums_tfm;
2666 	connection->verify_tfm = crypto.verify_tfm;
2667 
2668 	connection->my_addr_len = nla_len(adm_ctx.my_addr);
2669 	memcpy(&connection->my_addr, nla_data(adm_ctx.my_addr), connection->my_addr_len);
2670 	connection->peer_addr_len = nla_len(adm_ctx.peer_addr);
2671 	memcpy(&connection->peer_addr, nla_data(adm_ctx.peer_addr), connection->peer_addr_len);
2672 
2673 	idr_for_each_entry(&connection->peer_devices, peer_device, i) {
2674 		peer_devices++;
2675 	}
2676 
2677 	connection_to_info(&connection_info, connection);
2678 	flags = (peer_devices--) ? NOTIFY_CONTINUES : 0;
2679 	mutex_lock(&notification_mutex);
2680 	notify_connection_state(NULL, 0, connection, &connection_info, NOTIFY_CREATE | flags);
2681 	idr_for_each_entry(&connection->peer_devices, peer_device, i) {
2682 		struct peer_device_info peer_device_info;
2683 
2684 		peer_device_to_info(&peer_device_info, peer_device);
2685 		flags = (peer_devices--) ? NOTIFY_CONTINUES : 0;
2686 		notify_peer_device_state(NULL, 0, peer_device, &peer_device_info, NOTIFY_CREATE | flags);
2687 	}
2688 	mutex_unlock(&notification_mutex);
2689 	mutex_unlock(&adm_ctx.resource->conf_update);
2690 
2691 	rcu_read_lock();
2692 	idr_for_each_entry(&connection->peer_devices, peer_device, i) {
2693 		struct drbd_device *device = peer_device->device;
2694 		device->send_cnt = 0;
2695 		device->recv_cnt = 0;
2696 	}
2697 	rcu_read_unlock();
2698 
2699 	retcode = conn_request_state(connection, NS(conn, C_UNCONNECTED), CS_VERBOSE);
2700 
2701 	conn_reconfig_done(connection);
2702 	mutex_unlock(&adm_ctx.resource->adm_mutex);
2703 	drbd_adm_finish(&adm_ctx, info, retcode);
2704 	return 0;
2705 
2706 fail:
2707 	free_crypto(&crypto);
2708 	kfree(new_net_conf);
2709 
2710 	conn_reconfig_done(connection);
2711 	mutex_unlock(&adm_ctx.resource->adm_mutex);
2712 out:
2713 	drbd_adm_finish(&adm_ctx, info, retcode);
2714 	return 0;
2715 }
2716 
2717 static enum drbd_state_rv conn_try_disconnect(struct drbd_connection *connection, bool force)
2718 {
2719 	enum drbd_conns cstate;
2720 	enum drbd_state_rv rv;
2721 
2722 repeat:
2723 	rv = conn_request_state(connection, NS(conn, C_DISCONNECTING),
2724 			force ? CS_HARD : 0);
2725 
2726 	switch (rv) {
2727 	case SS_NOTHING_TO_DO:
2728 		break;
2729 	case SS_ALREADY_STANDALONE:
2730 		return SS_SUCCESS;
2731 	case SS_PRIMARY_NOP:
2732 		/* Our state checking code wants to see the peer outdated. */
2733 		rv = conn_request_state(connection, NS2(conn, C_DISCONNECTING, pdsk, D_OUTDATED), 0);
2734 
2735 		if (rv == SS_OUTDATE_WO_CONN) /* lost connection before graceful disconnect succeeded */
2736 			rv = conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_VERBOSE);
2737 
2738 		break;
2739 	case SS_CW_FAILED_BY_PEER:
2740 		spin_lock_irq(&connection->resource->req_lock);
2741 		cstate = connection->cstate;
2742 		spin_unlock_irq(&connection->resource->req_lock);
2743 		if (cstate <= C_WF_CONNECTION)
2744 			goto repeat;
2745 		/* The peer probably wants to see us outdated. */
2746 		rv = conn_request_state(connection, NS2(conn, C_DISCONNECTING,
2747 							disk, D_OUTDATED), 0);
2748 		if (rv == SS_IS_DISKLESS || rv == SS_LOWER_THAN_OUTDATED) {
2749 			rv = conn_request_state(connection, NS(conn, C_DISCONNECTING),
2750 					CS_HARD);
2751 		}
2752 		break;
2753 	default:;
2754 		/* no special handling necessary */
2755 	}
2756 
2757 	if (rv >= SS_SUCCESS) {
2758 		enum drbd_state_rv rv2;
2759 		/* No one else can reconfigure the network while I am here.
2760 		 * The state handling only uses drbd_thread_stop_nowait(),
2761 		 * we want to really wait here until the receiver is no more.
2762 		 */
2763 		drbd_thread_stop(&connection->receiver);
2764 
2765 		/* Race breaker.  This additional state change request may be
2766 		 * necessary, if this was a forced disconnect during a receiver
2767 		 * restart.  We may have "killed" the receiver thread just
2768 		 * after drbd_receiver() returned.  Typically, we should be
2769 		 * C_STANDALONE already, now, and this becomes a no-op.
2770 		 */
2771 		rv2 = conn_request_state(connection, NS(conn, C_STANDALONE),
2772 				CS_VERBOSE | CS_HARD);
2773 		if (rv2 < SS_SUCCESS)
2774 			drbd_err(connection,
2775 				"unexpected rv2=%d in conn_try_disconnect()\n",
2776 				rv2);
2777 		/* Unlike in DRBD 9, the state engine has generated
2778 		 * NOTIFY_DESTROY events before clearing connection->net_conf. */
2779 	}
2780 	return rv;
2781 }
2782 
2783 int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info)
2784 {
2785 	struct drbd_config_context adm_ctx;
2786 	struct disconnect_parms parms;
2787 	struct drbd_connection *connection;
2788 	enum drbd_state_rv rv;
2789 	enum drbd_ret_code retcode;
2790 	int err;
2791 
2792 	retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_CONNECTION);
2793 	if (!adm_ctx.reply_skb)
2794 		return retcode;
2795 	if (retcode != NO_ERROR)
2796 		goto fail;
2797 
2798 	connection = adm_ctx.connection;
2799 	memset(&parms, 0, sizeof(parms));
2800 	if (info->attrs[DRBD_NLA_DISCONNECT_PARMS]) {
2801 		err = disconnect_parms_from_attrs(&parms, info);
2802 		if (err) {
2803 			retcode = ERR_MANDATORY_TAG;
2804 			drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
2805 			goto fail;
2806 		}
2807 	}
2808 
2809 	mutex_lock(&adm_ctx.resource->adm_mutex);
2810 	rv = conn_try_disconnect(connection, parms.force_disconnect);
2811 	if (rv < SS_SUCCESS)
2812 		retcode = rv;  /* FIXME: Type mismatch. */
2813 	else
2814 		retcode = NO_ERROR;
2815 	mutex_unlock(&adm_ctx.resource->adm_mutex);
2816  fail:
2817 	drbd_adm_finish(&adm_ctx, info, retcode);
2818 	return 0;
2819 }
2820 
2821 void resync_after_online_grow(struct drbd_device *device)
2822 {
2823 	int iass; /* I am sync source */
2824 
2825 	drbd_info(device, "Resync of new storage after online grow\n");
2826 	if (device->state.role != device->state.peer)
2827 		iass = (device->state.role == R_PRIMARY);
2828 	else
2829 		iass = test_bit(RESOLVE_CONFLICTS, &first_peer_device(device)->connection->flags);
2830 
2831 	if (iass)
2832 		drbd_start_resync(device, C_SYNC_SOURCE);
2833 	else
2834 		_drbd_request_state(device, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE + CS_SERIALIZE);
2835 }
2836 
2837 int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
2838 {
2839 	struct drbd_config_context adm_ctx;
2840 	struct disk_conf *old_disk_conf, *new_disk_conf = NULL;
2841 	struct resize_parms rs;
2842 	struct drbd_device *device;
2843 	enum drbd_ret_code retcode;
2844 	enum determine_dev_size dd;
2845 	bool change_al_layout = false;
2846 	enum dds_flags ddsf;
2847 	sector_t u_size;
2848 	int err;
2849 
2850 	retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
2851 	if (!adm_ctx.reply_skb)
2852 		return retcode;
2853 	if (retcode != NO_ERROR)
2854 		goto finish;
2855 
2856 	mutex_lock(&adm_ctx.resource->adm_mutex);
2857 	device = adm_ctx.device;
2858 	if (!get_ldev(device)) {
2859 		retcode = ERR_NO_DISK;
2860 		goto fail;
2861 	}
2862 
2863 	memset(&rs, 0, sizeof(struct resize_parms));
2864 	rs.al_stripes = device->ldev->md.al_stripes;
2865 	rs.al_stripe_size = device->ldev->md.al_stripe_size_4k * 4;
2866 	if (info->attrs[DRBD_NLA_RESIZE_PARMS]) {
2867 		err = resize_parms_from_attrs(&rs, info);
2868 		if (err) {
2869 			retcode = ERR_MANDATORY_TAG;
2870 			drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
2871 			goto fail_ldev;
2872 		}
2873 	}
2874 
2875 	if (device->state.conn > C_CONNECTED) {
2876 		retcode = ERR_RESIZE_RESYNC;
2877 		goto fail_ldev;
2878 	}
2879 
2880 	if (device->state.role == R_SECONDARY &&
2881 	    device->state.peer == R_SECONDARY) {
2882 		retcode = ERR_NO_PRIMARY;
2883 		goto fail_ldev;
2884 	}
2885 
2886 	if (rs.no_resync && first_peer_device(device)->connection->agreed_pro_version < 93) {
2887 		retcode = ERR_NEED_APV_93;
2888 		goto fail_ldev;
2889 	}
2890 
2891 	rcu_read_lock();
2892 	u_size = rcu_dereference(device->ldev->disk_conf)->disk_size;
2893 	rcu_read_unlock();
2894 	if (u_size != (sector_t)rs.resize_size) {
2895 		new_disk_conf = kmalloc(sizeof(struct disk_conf), GFP_KERNEL);
2896 		if (!new_disk_conf) {
2897 			retcode = ERR_NOMEM;
2898 			goto fail_ldev;
2899 		}
2900 	}
2901 
2902 	if (device->ldev->md.al_stripes != rs.al_stripes ||
2903 	    device->ldev->md.al_stripe_size_4k != rs.al_stripe_size / 4) {
2904 		u32 al_size_k = rs.al_stripes * rs.al_stripe_size;
2905 
2906 		if (al_size_k > (16 * 1024 * 1024)) {
2907 			retcode = ERR_MD_LAYOUT_TOO_BIG;
2908 			goto fail_ldev;
2909 		}
2910 
2911 		if (al_size_k < MD_32kB_SECT/2) {
2912 			retcode = ERR_MD_LAYOUT_TOO_SMALL;
2913 			goto fail_ldev;
2914 		}
2915 
2916 		if (device->state.conn != C_CONNECTED && !rs.resize_force) {
2917 			retcode = ERR_MD_LAYOUT_CONNECTED;
2918 			goto fail_ldev;
2919 		}
2920 
2921 		change_al_layout = true;
2922 	}
2923 
2924 	if (device->ldev->known_size != drbd_get_capacity(device->ldev->backing_bdev))
2925 		device->ldev->known_size = drbd_get_capacity(device->ldev->backing_bdev);
2926 
2927 	if (new_disk_conf) {
2928 		mutex_lock(&device->resource->conf_update);
2929 		old_disk_conf = device->ldev->disk_conf;
2930 		*new_disk_conf = *old_disk_conf;
2931 		new_disk_conf->disk_size = (sector_t)rs.resize_size;
2932 		rcu_assign_pointer(device->ldev->disk_conf, new_disk_conf);
2933 		mutex_unlock(&device->resource->conf_update);
2934 		synchronize_rcu();
2935 		kfree(old_disk_conf);
2936 		new_disk_conf = NULL;
2937 	}
2938 
2939 	ddsf = (rs.resize_force ? DDSF_FORCED : 0) | (rs.no_resync ? DDSF_NO_RESYNC : 0);
2940 	dd = drbd_determine_dev_size(device, ddsf, change_al_layout ? &rs : NULL);
2941 	drbd_md_sync(device);
2942 	put_ldev(device);
2943 	if (dd == DS_ERROR) {
2944 		retcode = ERR_NOMEM_BITMAP;
2945 		goto fail;
2946 	} else if (dd == DS_ERROR_SPACE_MD) {
2947 		retcode = ERR_MD_LAYOUT_NO_FIT;
2948 		goto fail;
2949 	} else if (dd == DS_ERROR_SHRINK) {
2950 		retcode = ERR_IMPLICIT_SHRINK;
2951 		goto fail;
2952 	}
2953 
2954 	if (device->state.conn == C_CONNECTED) {
2955 		if (dd == DS_GREW)
2956 			set_bit(RESIZE_PENDING, &device->flags);
2957 
2958 		drbd_send_uuids(first_peer_device(device));
2959 		drbd_send_sizes(first_peer_device(device), 1, ddsf);
2960 	}
2961 
2962  fail:
2963 	mutex_unlock(&adm_ctx.resource->adm_mutex);
2964  finish:
2965 	drbd_adm_finish(&adm_ctx, info, retcode);
2966 	return 0;
2967 
2968  fail_ldev:
2969 	put_ldev(device);
2970 	kfree(new_disk_conf);
2971 	goto fail;
2972 }
2973 
2974 int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info)
2975 {
2976 	struct drbd_config_context adm_ctx;
2977 	enum drbd_ret_code retcode;
2978 	struct res_opts res_opts;
2979 	int err;
2980 
2981 	retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE);
2982 	if (!adm_ctx.reply_skb)
2983 		return retcode;
2984 	if (retcode != NO_ERROR)
2985 		goto fail;
2986 
2987 	res_opts = adm_ctx.resource->res_opts;
2988 	if (should_set_defaults(info))
2989 		set_res_opts_defaults(&res_opts);
2990 
2991 	err = res_opts_from_attrs(&res_opts, info);
2992 	if (err && err != -ENOMSG) {
2993 		retcode = ERR_MANDATORY_TAG;
2994 		drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
2995 		goto fail;
2996 	}
2997 
2998 	mutex_lock(&adm_ctx.resource->adm_mutex);
2999 	err = set_resource_options(adm_ctx.resource, &res_opts);
3000 	if (err) {
3001 		retcode = ERR_INVALID_REQUEST;
3002 		if (err == -ENOMEM)
3003 			retcode = ERR_NOMEM;
3004 	}
3005 	mutex_unlock(&adm_ctx.resource->adm_mutex);
3006 
3007 fail:
3008 	drbd_adm_finish(&adm_ctx, info, retcode);
3009 	return 0;
3010 }
3011 
3012 int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info)
3013 {
3014 	struct drbd_config_context adm_ctx;
3015 	struct drbd_device *device;
3016 	int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
3017 
3018 	retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
3019 	if (!adm_ctx.reply_skb)
3020 		return retcode;
3021 	if (retcode != NO_ERROR)
3022 		goto out;
3023 
3024 	device = adm_ctx.device;
3025 	if (!get_ldev(device)) {
3026 		retcode = ERR_NO_DISK;
3027 		goto out;
3028 	}
3029 
3030 	mutex_lock(&adm_ctx.resource->adm_mutex);
3031 
3032 	/* If there is still bitmap IO pending, probably because of a previous
3033 	 * resync just being finished, wait for it before requesting a new resync.
3034 	 * Also wait for it's after_state_ch(). */
3035 	drbd_suspend_io(device);
3036 	wait_event(device->misc_wait, !test_bit(BITMAP_IO, &device->flags));
3037 	drbd_flush_workqueue(&first_peer_device(device)->connection->sender_work);
3038 
3039 	/* If we happen to be C_STANDALONE R_SECONDARY, just change to
3040 	 * D_INCONSISTENT, and set all bits in the bitmap.  Otherwise,
3041 	 * try to start a resync handshake as sync target for full sync.
3042 	 */
3043 	if (device->state.conn == C_STANDALONE && device->state.role == R_SECONDARY) {
3044 		retcode = drbd_request_state(device, NS(disk, D_INCONSISTENT));
3045 		if (retcode >= SS_SUCCESS) {
3046 			if (drbd_bitmap_io(device, &drbd_bmio_set_n_write,
3047 				"set_n_write from invalidate", BM_LOCKED_MASK))
3048 				retcode = ERR_IO_MD_DISK;
3049 		}
3050 	} else
3051 		retcode = drbd_request_state(device, NS(conn, C_STARTING_SYNC_T));
3052 	drbd_resume_io(device);
3053 	mutex_unlock(&adm_ctx.resource->adm_mutex);
3054 	put_ldev(device);
3055 out:
3056 	drbd_adm_finish(&adm_ctx, info, retcode);
3057 	return 0;
3058 }
3059 
3060 static int drbd_adm_simple_request_state(struct sk_buff *skb, struct genl_info *info,
3061 		union drbd_state mask, union drbd_state val)
3062 {
3063 	struct drbd_config_context adm_ctx;
3064 	enum drbd_ret_code retcode;
3065 
3066 	retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
3067 	if (!adm_ctx.reply_skb)
3068 		return retcode;
3069 	if (retcode != NO_ERROR)
3070 		goto out;
3071 
3072 	mutex_lock(&adm_ctx.resource->adm_mutex);
3073 	retcode = drbd_request_state(adm_ctx.device, mask, val);
3074 	mutex_unlock(&adm_ctx.resource->adm_mutex);
3075 out:
3076 	drbd_adm_finish(&adm_ctx, info, retcode);
3077 	return 0;
3078 }
3079 
3080 static int drbd_bmio_set_susp_al(struct drbd_device *device) __must_hold(local)
3081 {
3082 	int rv;
3083 
3084 	rv = drbd_bmio_set_n_write(device);
3085 	drbd_suspend_al(device);
3086 	return rv;
3087 }
3088 
3089 int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info)
3090 {
3091 	struct drbd_config_context adm_ctx;
3092 	int retcode; /* drbd_ret_code, drbd_state_rv */
3093 	struct drbd_device *device;
3094 
3095 	retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
3096 	if (!adm_ctx.reply_skb)
3097 		return retcode;
3098 	if (retcode != NO_ERROR)
3099 		goto out;
3100 
3101 	device = adm_ctx.device;
3102 	if (!get_ldev(device)) {
3103 		retcode = ERR_NO_DISK;
3104 		goto out;
3105 	}
3106 
3107 	mutex_lock(&adm_ctx.resource->adm_mutex);
3108 
3109 	/* If there is still bitmap IO pending, probably because of a previous
3110 	 * resync just being finished, wait for it before requesting a new resync.
3111 	 * Also wait for it's after_state_ch(). */
3112 	drbd_suspend_io(device);
3113 	wait_event(device->misc_wait, !test_bit(BITMAP_IO, &device->flags));
3114 	drbd_flush_workqueue(&first_peer_device(device)->connection->sender_work);
3115 
3116 	/* If we happen to be C_STANDALONE R_PRIMARY, just set all bits
3117 	 * in the bitmap.  Otherwise, try to start a resync handshake
3118 	 * as sync source for full sync.
3119 	 */
3120 	if (device->state.conn == C_STANDALONE && device->state.role == R_PRIMARY) {
3121 		/* The peer will get a resync upon connect anyways. Just make that
3122 		   into a full resync. */
3123 		retcode = drbd_request_state(device, NS(pdsk, D_INCONSISTENT));
3124 		if (retcode >= SS_SUCCESS) {
3125 			if (drbd_bitmap_io(device, &drbd_bmio_set_susp_al,
3126 				"set_n_write from invalidate_peer",
3127 				BM_LOCKED_SET_ALLOWED))
3128 				retcode = ERR_IO_MD_DISK;
3129 		}
3130 	} else
3131 		retcode = drbd_request_state(device, NS(conn, C_STARTING_SYNC_S));
3132 	drbd_resume_io(device);
3133 	mutex_unlock(&adm_ctx.resource->adm_mutex);
3134 	put_ldev(device);
3135 out:
3136 	drbd_adm_finish(&adm_ctx, info, retcode);
3137 	return 0;
3138 }
3139 
3140 int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info)
3141 {
3142 	struct drbd_config_context adm_ctx;
3143 	enum drbd_ret_code retcode;
3144 
3145 	retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
3146 	if (!adm_ctx.reply_skb)
3147 		return retcode;
3148 	if (retcode != NO_ERROR)
3149 		goto out;
3150 
3151 	mutex_lock(&adm_ctx.resource->adm_mutex);
3152 	if (drbd_request_state(adm_ctx.device, NS(user_isp, 1)) == SS_NOTHING_TO_DO)
3153 		retcode = ERR_PAUSE_IS_SET;
3154 	mutex_unlock(&adm_ctx.resource->adm_mutex);
3155 out:
3156 	drbd_adm_finish(&adm_ctx, info, retcode);
3157 	return 0;
3158 }
3159 
3160 int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info)
3161 {
3162 	struct drbd_config_context adm_ctx;
3163 	union drbd_dev_state s;
3164 	enum drbd_ret_code retcode;
3165 
3166 	retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
3167 	if (!adm_ctx.reply_skb)
3168 		return retcode;
3169 	if (retcode != NO_ERROR)
3170 		goto out;
3171 
3172 	mutex_lock(&adm_ctx.resource->adm_mutex);
3173 	if (drbd_request_state(adm_ctx.device, NS(user_isp, 0)) == SS_NOTHING_TO_DO) {
3174 		s = adm_ctx.device->state;
3175 		if (s.conn == C_PAUSED_SYNC_S || s.conn == C_PAUSED_SYNC_T) {
3176 			retcode = s.aftr_isp ? ERR_PIC_AFTER_DEP :
3177 				  s.peer_isp ? ERR_PIC_PEER_DEP : ERR_PAUSE_IS_CLEAR;
3178 		} else {
3179 			retcode = ERR_PAUSE_IS_CLEAR;
3180 		}
3181 	}
3182 	mutex_unlock(&adm_ctx.resource->adm_mutex);
3183 out:
3184 	drbd_adm_finish(&adm_ctx, info, retcode);
3185 	return 0;
3186 }
3187 
3188 int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info)
3189 {
3190 	return drbd_adm_simple_request_state(skb, info, NS(susp, 1));
3191 }
3192 
3193 int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info)
3194 {
3195 	struct drbd_config_context adm_ctx;
3196 	struct drbd_device *device;
3197 	int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
3198 
3199 	retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
3200 	if (!adm_ctx.reply_skb)
3201 		return retcode;
3202 	if (retcode != NO_ERROR)
3203 		goto out;
3204 
3205 	mutex_lock(&adm_ctx.resource->adm_mutex);
3206 	device = adm_ctx.device;
3207 	if (test_bit(NEW_CUR_UUID, &device->flags)) {
3208 		if (get_ldev_if_state(device, D_ATTACHING)) {
3209 			drbd_uuid_new_current(device);
3210 			put_ldev(device);
3211 		} else {
3212 			/* This is effectively a multi-stage "forced down".
3213 			 * The NEW_CUR_UUID bit is supposedly only set, if we
3214 			 * lost the replication connection, and are configured
3215 			 * to freeze IO and wait for some fence-peer handler.
3216 			 * So we still don't have a replication connection.
3217 			 * And now we don't have a local disk either.  After
3218 			 * resume, we will fail all pending and new IO, because
3219 			 * we don't have any data anymore.  Which means we will
3220 			 * eventually be able to terminate all users of this
3221 			 * device, and then take it down.  By bumping the
3222 			 * "effective" data uuid, we make sure that you really
3223 			 * need to tear down before you reconfigure, we will
3224 			 * the refuse to re-connect or re-attach (because no
3225 			 * matching real data uuid exists).
3226 			 */
3227 			u64 val;
3228 			get_random_bytes(&val, sizeof(u64));
3229 			drbd_set_ed_uuid(device, val);
3230 			drbd_warn(device, "Resumed without access to data; please tear down before attempting to re-configure.\n");
3231 		}
3232 		clear_bit(NEW_CUR_UUID, &device->flags);
3233 	}
3234 	drbd_suspend_io(device);
3235 	retcode = drbd_request_state(device, NS3(susp, 0, susp_nod, 0, susp_fen, 0));
3236 	if (retcode == SS_SUCCESS) {
3237 		if (device->state.conn < C_CONNECTED)
3238 			tl_clear(first_peer_device(device)->connection);
3239 		if (device->state.disk == D_DISKLESS || device->state.disk == D_FAILED)
3240 			tl_restart(first_peer_device(device)->connection, FAIL_FROZEN_DISK_IO);
3241 	}
3242 	drbd_resume_io(device);
3243 	mutex_unlock(&adm_ctx.resource->adm_mutex);
3244 out:
3245 	drbd_adm_finish(&adm_ctx, info, retcode);
3246 	return 0;
3247 }
3248 
3249 int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info)
3250 {
3251 	return drbd_adm_simple_request_state(skb, info, NS(disk, D_OUTDATED));
3252 }
3253 
3254 static int nla_put_drbd_cfg_context(struct sk_buff *skb,
3255 				    struct drbd_resource *resource,
3256 				    struct drbd_connection *connection,
3257 				    struct drbd_device *device)
3258 {
3259 	struct nlattr *nla;
3260 	nla = nla_nest_start_noflag(skb, DRBD_NLA_CFG_CONTEXT);
3261 	if (!nla)
3262 		goto nla_put_failure;
3263 	if (device &&
3264 	    nla_put_u32(skb, T_ctx_volume, device->vnr))
3265 		goto nla_put_failure;
3266 	if (nla_put_string(skb, T_ctx_resource_name, resource->name))
3267 		goto nla_put_failure;
3268 	if (connection) {
3269 		if (connection->my_addr_len &&
3270 		    nla_put(skb, T_ctx_my_addr, connection->my_addr_len, &connection->my_addr))
3271 			goto nla_put_failure;
3272 		if (connection->peer_addr_len &&
3273 		    nla_put(skb, T_ctx_peer_addr, connection->peer_addr_len, &connection->peer_addr))
3274 			goto nla_put_failure;
3275 	}
3276 	nla_nest_end(skb, nla);
3277 	return 0;
3278 
3279 nla_put_failure:
3280 	if (nla)
3281 		nla_nest_cancel(skb, nla);
3282 	return -EMSGSIZE;
3283 }
3284 
3285 /*
3286  * The generic netlink dump callbacks are called outside the genl_lock(), so
3287  * they cannot use the simple attribute parsing code which uses global
3288  * attribute tables.
3289  */
3290 static struct nlattr *find_cfg_context_attr(const struct nlmsghdr *nlh, int attr)
3291 {
3292 	const unsigned hdrlen = GENL_HDRLEN + GENL_MAGIC_FAMILY_HDRSZ;
3293 	const int maxtype = ARRAY_SIZE(drbd_cfg_context_nl_policy) - 1;
3294 	struct nlattr *nla;
3295 
3296 	nla = nla_find(nlmsg_attrdata(nlh, hdrlen), nlmsg_attrlen(nlh, hdrlen),
3297 		       DRBD_NLA_CFG_CONTEXT);
3298 	if (!nla)
3299 		return NULL;
3300 	return drbd_nla_find_nested(maxtype, nla, __nla_type(attr));
3301 }
3302 
3303 static void resource_to_info(struct resource_info *, struct drbd_resource *);
3304 
3305 int drbd_adm_dump_resources(struct sk_buff *skb, struct netlink_callback *cb)
3306 {
3307 	struct drbd_genlmsghdr *dh;
3308 	struct drbd_resource *resource;
3309 	struct resource_info resource_info;
3310 	struct resource_statistics resource_statistics;
3311 	int err;
3312 
3313 	rcu_read_lock();
3314 	if (cb->args[0]) {
3315 		for_each_resource_rcu(resource, &drbd_resources)
3316 			if (resource == (struct drbd_resource *)cb->args[0])
3317 				goto found_resource;
3318 		err = 0;  /* resource was probably deleted */
3319 		goto out;
3320 	}
3321 	resource = list_entry(&drbd_resources,
3322 			      struct drbd_resource, resources);
3323 
3324 found_resource:
3325 	list_for_each_entry_continue_rcu(resource, &drbd_resources, resources) {
3326 		goto put_result;
3327 	}
3328 	err = 0;
3329 	goto out;
3330 
3331 put_result:
3332 	dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
3333 			cb->nlh->nlmsg_seq, &drbd_genl_family,
3334 			NLM_F_MULTI, DRBD_ADM_GET_RESOURCES);
3335 	err = -ENOMEM;
3336 	if (!dh)
3337 		goto out;
3338 	dh->minor = -1U;
3339 	dh->ret_code = NO_ERROR;
3340 	err = nla_put_drbd_cfg_context(skb, resource, NULL, NULL);
3341 	if (err)
3342 		goto out;
3343 	err = res_opts_to_skb(skb, &resource->res_opts, !capable(CAP_SYS_ADMIN));
3344 	if (err)
3345 		goto out;
3346 	resource_to_info(&resource_info, resource);
3347 	err = resource_info_to_skb(skb, &resource_info, !capable(CAP_SYS_ADMIN));
3348 	if (err)
3349 		goto out;
3350 	resource_statistics.res_stat_write_ordering = resource->write_ordering;
3351 	err = resource_statistics_to_skb(skb, &resource_statistics, !capable(CAP_SYS_ADMIN));
3352 	if (err)
3353 		goto out;
3354 	cb->args[0] = (long)resource;
3355 	genlmsg_end(skb, dh);
3356 	err = 0;
3357 
3358 out:
3359 	rcu_read_unlock();
3360 	if (err)
3361 		return err;
3362 	return skb->len;
3363 }
3364 
3365 static void device_to_statistics(struct device_statistics *s,
3366 				 struct drbd_device *device)
3367 {
3368 	memset(s, 0, sizeof(*s));
3369 	s->dev_upper_blocked = !may_inc_ap_bio(device);
3370 	if (get_ldev(device)) {
3371 		struct drbd_md *md = &device->ldev->md;
3372 		u64 *history_uuids = (u64 *)s->history_uuids;
3373 		struct request_queue *q;
3374 		int n;
3375 
3376 		spin_lock_irq(&md->uuid_lock);
3377 		s->dev_current_uuid = md->uuid[UI_CURRENT];
3378 		BUILD_BUG_ON(sizeof(s->history_uuids) < UI_HISTORY_END - UI_HISTORY_START + 1);
3379 		for (n = 0; n < UI_HISTORY_END - UI_HISTORY_START + 1; n++)
3380 			history_uuids[n] = md->uuid[UI_HISTORY_START + n];
3381 		for (; n < HISTORY_UUIDS; n++)
3382 			history_uuids[n] = 0;
3383 		s->history_uuids_len = HISTORY_UUIDS;
3384 		spin_unlock_irq(&md->uuid_lock);
3385 
3386 		s->dev_disk_flags = md->flags;
3387 		q = bdev_get_queue(device->ldev->backing_bdev);
3388 		s->dev_lower_blocked =
3389 			bdi_congested(q->backing_dev_info,
3390 				      (1 << WB_async_congested) |
3391 				      (1 << WB_sync_congested));
3392 		put_ldev(device);
3393 	}
3394 	s->dev_size = drbd_get_capacity(device->this_bdev);
3395 	s->dev_read = device->read_cnt;
3396 	s->dev_write = device->writ_cnt;
3397 	s->dev_al_writes = device->al_writ_cnt;
3398 	s->dev_bm_writes = device->bm_writ_cnt;
3399 	s->dev_upper_pending = atomic_read(&device->ap_bio_cnt);
3400 	s->dev_lower_pending = atomic_read(&device->local_cnt);
3401 	s->dev_al_suspended = test_bit(AL_SUSPENDED, &device->flags);
3402 	s->dev_exposed_data_uuid = device->ed_uuid;
3403 }
3404 
3405 static int put_resource_in_arg0(struct netlink_callback *cb, int holder_nr)
3406 {
3407 	if (cb->args[0]) {
3408 		struct drbd_resource *resource =
3409 			(struct drbd_resource *)cb->args[0];
3410 		kref_put(&resource->kref, drbd_destroy_resource);
3411 	}
3412 
3413 	return 0;
3414 }
3415 
3416 int drbd_adm_dump_devices_done(struct netlink_callback *cb) {
3417 	return put_resource_in_arg0(cb, 7);
3418 }
3419 
3420 static void device_to_info(struct device_info *, struct drbd_device *);
3421 
3422 int drbd_adm_dump_devices(struct sk_buff *skb, struct netlink_callback *cb)
3423 {
3424 	struct nlattr *resource_filter;
3425 	struct drbd_resource *resource;
3426 	struct drbd_device *uninitialized_var(device);
3427 	int minor, err, retcode;
3428 	struct drbd_genlmsghdr *dh;
3429 	struct device_info device_info;
3430 	struct device_statistics device_statistics;
3431 	struct idr *idr_to_search;
3432 
3433 	resource = (struct drbd_resource *)cb->args[0];
3434 	if (!cb->args[0] && !cb->args[1]) {
3435 		resource_filter = find_cfg_context_attr(cb->nlh, T_ctx_resource_name);
3436 		if (resource_filter) {
3437 			retcode = ERR_RES_NOT_KNOWN;
3438 			resource = drbd_find_resource(nla_data(resource_filter));
3439 			if (!resource)
3440 				goto put_result;
3441 			cb->args[0] = (long)resource;
3442 		}
3443 	}
3444 
3445 	rcu_read_lock();
3446 	minor = cb->args[1];
3447 	idr_to_search = resource ? &resource->devices : &drbd_devices;
3448 	device = idr_get_next(idr_to_search, &minor);
3449 	if (!device) {
3450 		err = 0;
3451 		goto out;
3452 	}
3453 	idr_for_each_entry_continue(idr_to_search, device, minor) {
3454 		retcode = NO_ERROR;
3455 		goto put_result;  /* only one iteration */
3456 	}
3457 	err = 0;
3458 	goto out;  /* no more devices */
3459 
3460 put_result:
3461 	dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
3462 			cb->nlh->nlmsg_seq, &drbd_genl_family,
3463 			NLM_F_MULTI, DRBD_ADM_GET_DEVICES);
3464 	err = -ENOMEM;
3465 	if (!dh)
3466 		goto out;
3467 	dh->ret_code = retcode;
3468 	dh->minor = -1U;
3469 	if (retcode == NO_ERROR) {
3470 		dh->minor = device->minor;
3471 		err = nla_put_drbd_cfg_context(skb, device->resource, NULL, device);
3472 		if (err)
3473 			goto out;
3474 		if (get_ldev(device)) {
3475 			struct disk_conf *disk_conf =
3476 				rcu_dereference(device->ldev->disk_conf);
3477 
3478 			err = disk_conf_to_skb(skb, disk_conf, !capable(CAP_SYS_ADMIN));
3479 			put_ldev(device);
3480 			if (err)
3481 				goto out;
3482 		}
3483 		device_to_info(&device_info, device);
3484 		err = device_info_to_skb(skb, &device_info, !capable(CAP_SYS_ADMIN));
3485 		if (err)
3486 			goto out;
3487 
3488 		device_to_statistics(&device_statistics, device);
3489 		err = device_statistics_to_skb(skb, &device_statistics, !capable(CAP_SYS_ADMIN));
3490 		if (err)
3491 			goto out;
3492 		cb->args[1] = minor + 1;
3493 	}
3494 	genlmsg_end(skb, dh);
3495 	err = 0;
3496 
3497 out:
3498 	rcu_read_unlock();
3499 	if (err)
3500 		return err;
3501 	return skb->len;
3502 }
3503 
3504 int drbd_adm_dump_connections_done(struct netlink_callback *cb)
3505 {
3506 	return put_resource_in_arg0(cb, 6);
3507 }
3508 
3509 enum { SINGLE_RESOURCE, ITERATE_RESOURCES };
3510 
3511 int drbd_adm_dump_connections(struct sk_buff *skb, struct netlink_callback *cb)
3512 {
3513 	struct nlattr *resource_filter;
3514 	struct drbd_resource *resource = NULL, *next_resource;
3515 	struct drbd_connection *uninitialized_var(connection);
3516 	int err = 0, retcode;
3517 	struct drbd_genlmsghdr *dh;
3518 	struct connection_info connection_info;
3519 	struct connection_statistics connection_statistics;
3520 
3521 	rcu_read_lock();
3522 	resource = (struct drbd_resource *)cb->args[0];
3523 	if (!cb->args[0]) {
3524 		resource_filter = find_cfg_context_attr(cb->nlh, T_ctx_resource_name);
3525 		if (resource_filter) {
3526 			retcode = ERR_RES_NOT_KNOWN;
3527 			resource = drbd_find_resource(nla_data(resource_filter));
3528 			if (!resource)
3529 				goto put_result;
3530 			cb->args[0] = (long)resource;
3531 			cb->args[1] = SINGLE_RESOURCE;
3532 		}
3533 	}
3534 	if (!resource) {
3535 		if (list_empty(&drbd_resources))
3536 			goto out;
3537 		resource = list_first_entry(&drbd_resources, struct drbd_resource, resources);
3538 		kref_get(&resource->kref);
3539 		cb->args[0] = (long)resource;
3540 		cb->args[1] = ITERATE_RESOURCES;
3541 	}
3542 
3543     next_resource:
3544 	rcu_read_unlock();
3545 	mutex_lock(&resource->conf_update);
3546 	rcu_read_lock();
3547 	if (cb->args[2]) {
3548 		for_each_connection_rcu(connection, resource)
3549 			if (connection == (struct drbd_connection *)cb->args[2])
3550 				goto found_connection;
3551 		/* connection was probably deleted */
3552 		goto no_more_connections;
3553 	}
3554 	connection = list_entry(&resource->connections, struct drbd_connection, connections);
3555 
3556 found_connection:
3557 	list_for_each_entry_continue_rcu(connection, &resource->connections, connections) {
3558 		if (!has_net_conf(connection))
3559 			continue;
3560 		retcode = NO_ERROR;
3561 		goto put_result;  /* only one iteration */
3562 	}
3563 
3564 no_more_connections:
3565 	if (cb->args[1] == ITERATE_RESOURCES) {
3566 		for_each_resource_rcu(next_resource, &drbd_resources) {
3567 			if (next_resource == resource)
3568 				goto found_resource;
3569 		}
3570 		/* resource was probably deleted */
3571 	}
3572 	goto out;
3573 
3574 found_resource:
3575 	list_for_each_entry_continue_rcu(next_resource, &drbd_resources, resources) {
3576 		mutex_unlock(&resource->conf_update);
3577 		kref_put(&resource->kref, drbd_destroy_resource);
3578 		resource = next_resource;
3579 		kref_get(&resource->kref);
3580 		cb->args[0] = (long)resource;
3581 		cb->args[2] = 0;
3582 		goto next_resource;
3583 	}
3584 	goto out;  /* no more resources */
3585 
3586 put_result:
3587 	dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
3588 			cb->nlh->nlmsg_seq, &drbd_genl_family,
3589 			NLM_F_MULTI, DRBD_ADM_GET_CONNECTIONS);
3590 	err = -ENOMEM;
3591 	if (!dh)
3592 		goto out;
3593 	dh->ret_code = retcode;
3594 	dh->minor = -1U;
3595 	if (retcode == NO_ERROR) {
3596 		struct net_conf *net_conf;
3597 
3598 		err = nla_put_drbd_cfg_context(skb, resource, connection, NULL);
3599 		if (err)
3600 			goto out;
3601 		net_conf = rcu_dereference(connection->net_conf);
3602 		if (net_conf) {
3603 			err = net_conf_to_skb(skb, net_conf, !capable(CAP_SYS_ADMIN));
3604 			if (err)
3605 				goto out;
3606 		}
3607 		connection_to_info(&connection_info, connection);
3608 		err = connection_info_to_skb(skb, &connection_info, !capable(CAP_SYS_ADMIN));
3609 		if (err)
3610 			goto out;
3611 		connection_statistics.conn_congested = test_bit(NET_CONGESTED, &connection->flags);
3612 		err = connection_statistics_to_skb(skb, &connection_statistics, !capable(CAP_SYS_ADMIN));
3613 		if (err)
3614 			goto out;
3615 		cb->args[2] = (long)connection;
3616 	}
3617 	genlmsg_end(skb, dh);
3618 	err = 0;
3619 
3620 out:
3621 	rcu_read_unlock();
3622 	if (resource)
3623 		mutex_unlock(&resource->conf_update);
3624 	if (err)
3625 		return err;
3626 	return skb->len;
3627 }
3628 
3629 enum mdf_peer_flag {
3630 	MDF_PEER_CONNECTED =	1 << 0,
3631 	MDF_PEER_OUTDATED =	1 << 1,
3632 	MDF_PEER_FENCING =	1 << 2,
3633 	MDF_PEER_FULL_SYNC =	1 << 3,
3634 };
3635 
3636 static void peer_device_to_statistics(struct peer_device_statistics *s,
3637 				      struct drbd_peer_device *peer_device)
3638 {
3639 	struct drbd_device *device = peer_device->device;
3640 
3641 	memset(s, 0, sizeof(*s));
3642 	s->peer_dev_received = device->recv_cnt;
3643 	s->peer_dev_sent = device->send_cnt;
3644 	s->peer_dev_pending = atomic_read(&device->ap_pending_cnt) +
3645 			      atomic_read(&device->rs_pending_cnt);
3646 	s->peer_dev_unacked = atomic_read(&device->unacked_cnt);
3647 	s->peer_dev_out_of_sync = drbd_bm_total_weight(device) << (BM_BLOCK_SHIFT - 9);
3648 	s->peer_dev_resync_failed = device->rs_failed << (BM_BLOCK_SHIFT - 9);
3649 	if (get_ldev(device)) {
3650 		struct drbd_md *md = &device->ldev->md;
3651 
3652 		spin_lock_irq(&md->uuid_lock);
3653 		s->peer_dev_bitmap_uuid = md->uuid[UI_BITMAP];
3654 		spin_unlock_irq(&md->uuid_lock);
3655 		s->peer_dev_flags =
3656 			(drbd_md_test_flag(device->ldev, MDF_CONNECTED_IND) ?
3657 				MDF_PEER_CONNECTED : 0) +
3658 			(drbd_md_test_flag(device->ldev, MDF_CONSISTENT) &&
3659 			 !drbd_md_test_flag(device->ldev, MDF_WAS_UP_TO_DATE) ?
3660 				MDF_PEER_OUTDATED : 0) +
3661 			/* FIXME: MDF_PEER_FENCING? */
3662 			(drbd_md_test_flag(device->ldev, MDF_FULL_SYNC) ?
3663 				MDF_PEER_FULL_SYNC : 0);
3664 		put_ldev(device);
3665 	}
3666 }
3667 
3668 int drbd_adm_dump_peer_devices_done(struct netlink_callback *cb)
3669 {
3670 	return put_resource_in_arg0(cb, 9);
3671 }
3672 
3673 int drbd_adm_dump_peer_devices(struct sk_buff *skb, struct netlink_callback *cb)
3674 {
3675 	struct nlattr *resource_filter;
3676 	struct drbd_resource *resource;
3677 	struct drbd_device *uninitialized_var(device);
3678 	struct drbd_peer_device *peer_device = NULL;
3679 	int minor, err, retcode;
3680 	struct drbd_genlmsghdr *dh;
3681 	struct idr *idr_to_search;
3682 
3683 	resource = (struct drbd_resource *)cb->args[0];
3684 	if (!cb->args[0] && !cb->args[1]) {
3685 		resource_filter = find_cfg_context_attr(cb->nlh, T_ctx_resource_name);
3686 		if (resource_filter) {
3687 			retcode = ERR_RES_NOT_KNOWN;
3688 			resource = drbd_find_resource(nla_data(resource_filter));
3689 			if (!resource)
3690 				goto put_result;
3691 		}
3692 		cb->args[0] = (long)resource;
3693 	}
3694 
3695 	rcu_read_lock();
3696 	minor = cb->args[1];
3697 	idr_to_search = resource ? &resource->devices : &drbd_devices;
3698 	device = idr_find(idr_to_search, minor);
3699 	if (!device) {
3700 next_device:
3701 		minor++;
3702 		cb->args[2] = 0;
3703 		device = idr_get_next(idr_to_search, &minor);
3704 		if (!device) {
3705 			err = 0;
3706 			goto out;
3707 		}
3708 	}
3709 	if (cb->args[2]) {
3710 		for_each_peer_device(peer_device, device)
3711 			if (peer_device == (struct drbd_peer_device *)cb->args[2])
3712 				goto found_peer_device;
3713 		/* peer device was probably deleted */
3714 		goto next_device;
3715 	}
3716 	/* Make peer_device point to the list head (not the first entry). */
3717 	peer_device = list_entry(&device->peer_devices, struct drbd_peer_device, peer_devices);
3718 
3719 found_peer_device:
3720 	list_for_each_entry_continue_rcu(peer_device, &device->peer_devices, peer_devices) {
3721 		if (!has_net_conf(peer_device->connection))
3722 			continue;
3723 		retcode = NO_ERROR;
3724 		goto put_result;  /* only one iteration */
3725 	}
3726 	goto next_device;
3727 
3728 put_result:
3729 	dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
3730 			cb->nlh->nlmsg_seq, &drbd_genl_family,
3731 			NLM_F_MULTI, DRBD_ADM_GET_PEER_DEVICES);
3732 	err = -ENOMEM;
3733 	if (!dh)
3734 		goto out;
3735 	dh->ret_code = retcode;
3736 	dh->minor = -1U;
3737 	if (retcode == NO_ERROR) {
3738 		struct peer_device_info peer_device_info;
3739 		struct peer_device_statistics peer_device_statistics;
3740 
3741 		dh->minor = minor;
3742 		err = nla_put_drbd_cfg_context(skb, device->resource, peer_device->connection, device);
3743 		if (err)
3744 			goto out;
3745 		peer_device_to_info(&peer_device_info, peer_device);
3746 		err = peer_device_info_to_skb(skb, &peer_device_info, !capable(CAP_SYS_ADMIN));
3747 		if (err)
3748 			goto out;
3749 		peer_device_to_statistics(&peer_device_statistics, peer_device);
3750 		err = peer_device_statistics_to_skb(skb, &peer_device_statistics, !capable(CAP_SYS_ADMIN));
3751 		if (err)
3752 			goto out;
3753 		cb->args[1] = minor;
3754 		cb->args[2] = (long)peer_device;
3755 	}
3756 	genlmsg_end(skb, dh);
3757 	err = 0;
3758 
3759 out:
3760 	rcu_read_unlock();
3761 	if (err)
3762 		return err;
3763 	return skb->len;
3764 }
3765 /*
3766  * Return the connection of @resource if @resource has exactly one connection.
3767  */
3768 static struct drbd_connection *the_only_connection(struct drbd_resource *resource)
3769 {
3770 	struct list_head *connections = &resource->connections;
3771 
3772 	if (list_empty(connections) || connections->next->next != connections)
3773 		return NULL;
3774 	return list_first_entry(&resource->connections, struct drbd_connection, connections);
3775 }
3776 
3777 static int nla_put_status_info(struct sk_buff *skb, struct drbd_device *device,
3778 		const struct sib_info *sib)
3779 {
3780 	struct drbd_resource *resource = device->resource;
3781 	struct state_info *si = NULL; /* for sizeof(si->member); */
3782 	struct nlattr *nla;
3783 	int got_ldev;
3784 	int err = 0;
3785 	int exclude_sensitive;
3786 
3787 	/* If sib != NULL, this is drbd_bcast_event, which anyone can listen
3788 	 * to.  So we better exclude_sensitive information.
3789 	 *
3790 	 * If sib == NULL, this is drbd_adm_get_status, executed synchronously
3791 	 * in the context of the requesting user process. Exclude sensitive
3792 	 * information, unless current has superuser.
3793 	 *
3794 	 * NOTE: for drbd_adm_get_status_all(), this is a netlink dump, and
3795 	 * relies on the current implementation of netlink_dump(), which
3796 	 * executes the dump callback successively from netlink_recvmsg(),
3797 	 * always in the context of the receiving process */
3798 	exclude_sensitive = sib || !capable(CAP_SYS_ADMIN);
3799 
3800 	got_ldev = get_ldev(device);
3801 
3802 	/* We need to add connection name and volume number information still.
3803 	 * Minor number is in drbd_genlmsghdr. */
3804 	if (nla_put_drbd_cfg_context(skb, resource, the_only_connection(resource), device))
3805 		goto nla_put_failure;
3806 
3807 	if (res_opts_to_skb(skb, &device->resource->res_opts, exclude_sensitive))
3808 		goto nla_put_failure;
3809 
3810 	rcu_read_lock();
3811 	if (got_ldev) {
3812 		struct disk_conf *disk_conf;
3813 
3814 		disk_conf = rcu_dereference(device->ldev->disk_conf);
3815 		err = disk_conf_to_skb(skb, disk_conf, exclude_sensitive);
3816 	}
3817 	if (!err) {
3818 		struct net_conf *nc;
3819 
3820 		nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
3821 		if (nc)
3822 			err = net_conf_to_skb(skb, nc, exclude_sensitive);
3823 	}
3824 	rcu_read_unlock();
3825 	if (err)
3826 		goto nla_put_failure;
3827 
3828 	nla = nla_nest_start_noflag(skb, DRBD_NLA_STATE_INFO);
3829 	if (!nla)
3830 		goto nla_put_failure;
3831 	if (nla_put_u32(skb, T_sib_reason, sib ? sib->sib_reason : SIB_GET_STATUS_REPLY) ||
3832 	    nla_put_u32(skb, T_current_state, device->state.i) ||
3833 	    nla_put_u64_0pad(skb, T_ed_uuid, device->ed_uuid) ||
3834 	    nla_put_u64_0pad(skb, T_capacity,
3835 			     drbd_get_capacity(device->this_bdev)) ||
3836 	    nla_put_u64_0pad(skb, T_send_cnt, device->send_cnt) ||
3837 	    nla_put_u64_0pad(skb, T_recv_cnt, device->recv_cnt) ||
3838 	    nla_put_u64_0pad(skb, T_read_cnt, device->read_cnt) ||
3839 	    nla_put_u64_0pad(skb, T_writ_cnt, device->writ_cnt) ||
3840 	    nla_put_u64_0pad(skb, T_al_writ_cnt, device->al_writ_cnt) ||
3841 	    nla_put_u64_0pad(skb, T_bm_writ_cnt, device->bm_writ_cnt) ||
3842 	    nla_put_u32(skb, T_ap_bio_cnt, atomic_read(&device->ap_bio_cnt)) ||
3843 	    nla_put_u32(skb, T_ap_pending_cnt, atomic_read(&device->ap_pending_cnt)) ||
3844 	    nla_put_u32(skb, T_rs_pending_cnt, atomic_read(&device->rs_pending_cnt)))
3845 		goto nla_put_failure;
3846 
3847 	if (got_ldev) {
3848 		int err;
3849 
3850 		spin_lock_irq(&device->ldev->md.uuid_lock);
3851 		err = nla_put(skb, T_uuids, sizeof(si->uuids), device->ldev->md.uuid);
3852 		spin_unlock_irq(&device->ldev->md.uuid_lock);
3853 
3854 		if (err)
3855 			goto nla_put_failure;
3856 
3857 		if (nla_put_u32(skb, T_disk_flags, device->ldev->md.flags) ||
3858 		    nla_put_u64_0pad(skb, T_bits_total, drbd_bm_bits(device)) ||
3859 		    nla_put_u64_0pad(skb, T_bits_oos,
3860 				     drbd_bm_total_weight(device)))
3861 			goto nla_put_failure;
3862 		if (C_SYNC_SOURCE <= device->state.conn &&
3863 		    C_PAUSED_SYNC_T >= device->state.conn) {
3864 			if (nla_put_u64_0pad(skb, T_bits_rs_total,
3865 					     device->rs_total) ||
3866 			    nla_put_u64_0pad(skb, T_bits_rs_failed,
3867 					     device->rs_failed))
3868 				goto nla_put_failure;
3869 		}
3870 	}
3871 
3872 	if (sib) {
3873 		switch(sib->sib_reason) {
3874 		case SIB_SYNC_PROGRESS:
3875 		case SIB_GET_STATUS_REPLY:
3876 			break;
3877 		case SIB_STATE_CHANGE:
3878 			if (nla_put_u32(skb, T_prev_state, sib->os.i) ||
3879 			    nla_put_u32(skb, T_new_state, sib->ns.i))
3880 				goto nla_put_failure;
3881 			break;
3882 		case SIB_HELPER_POST:
3883 			if (nla_put_u32(skb, T_helper_exit_code,
3884 					sib->helper_exit_code))
3885 				goto nla_put_failure;
3886 			/* fall through */
3887 		case SIB_HELPER_PRE:
3888 			if (nla_put_string(skb, T_helper, sib->helper_name))
3889 				goto nla_put_failure;
3890 			break;
3891 		}
3892 	}
3893 	nla_nest_end(skb, nla);
3894 
3895 	if (0)
3896 nla_put_failure:
3897 		err = -EMSGSIZE;
3898 	if (got_ldev)
3899 		put_ldev(device);
3900 	return err;
3901 }
3902 
3903 int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info)
3904 {
3905 	struct drbd_config_context adm_ctx;
3906 	enum drbd_ret_code retcode;
3907 	int err;
3908 
3909 	retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
3910 	if (!adm_ctx.reply_skb)
3911 		return retcode;
3912 	if (retcode != NO_ERROR)
3913 		goto out;
3914 
3915 	err = nla_put_status_info(adm_ctx.reply_skb, adm_ctx.device, NULL);
3916 	if (err) {
3917 		nlmsg_free(adm_ctx.reply_skb);
3918 		return err;
3919 	}
3920 out:
3921 	drbd_adm_finish(&adm_ctx, info, retcode);
3922 	return 0;
3923 }
3924 
3925 static int get_one_status(struct sk_buff *skb, struct netlink_callback *cb)
3926 {
3927 	struct drbd_device *device;
3928 	struct drbd_genlmsghdr *dh;
3929 	struct drbd_resource *pos = (struct drbd_resource *)cb->args[0];
3930 	struct drbd_resource *resource = NULL;
3931 	struct drbd_resource *tmp;
3932 	unsigned volume = cb->args[1];
3933 
3934 	/* Open coded, deferred, iteration:
3935 	 * for_each_resource_safe(resource, tmp, &drbd_resources) {
3936 	 *      connection = "first connection of resource or undefined";
3937 	 *	idr_for_each_entry(&resource->devices, device, i) {
3938 	 *	  ...
3939 	 *	}
3940 	 * }
3941 	 * where resource is cb->args[0];
3942 	 * and i is cb->args[1];
3943 	 *
3944 	 * cb->args[2] indicates if we shall loop over all resources,
3945 	 * or just dump all volumes of a single resource.
3946 	 *
3947 	 * This may miss entries inserted after this dump started,
3948 	 * or entries deleted before they are reached.
3949 	 *
3950 	 * We need to make sure the device won't disappear while
3951 	 * we are looking at it, and revalidate our iterators
3952 	 * on each iteration.
3953 	 */
3954 
3955 	/* synchronize with conn_create()/drbd_destroy_connection() */
3956 	rcu_read_lock();
3957 	/* revalidate iterator position */
3958 	for_each_resource_rcu(tmp, &drbd_resources) {
3959 		if (pos == NULL) {
3960 			/* first iteration */
3961 			pos = tmp;
3962 			resource = pos;
3963 			break;
3964 		}
3965 		if (tmp == pos) {
3966 			resource = pos;
3967 			break;
3968 		}
3969 	}
3970 	if (resource) {
3971 next_resource:
3972 		device = idr_get_next(&resource->devices, &volume);
3973 		if (!device) {
3974 			/* No more volumes to dump on this resource.
3975 			 * Advance resource iterator. */
3976 			pos = list_entry_rcu(resource->resources.next,
3977 					     struct drbd_resource, resources);
3978 			/* Did we dump any volume of this resource yet? */
3979 			if (volume != 0) {
3980 				/* If we reached the end of the list,
3981 				 * or only a single resource dump was requested,
3982 				 * we are done. */
3983 				if (&pos->resources == &drbd_resources || cb->args[2])
3984 					goto out;
3985 				volume = 0;
3986 				resource = pos;
3987 				goto next_resource;
3988 			}
3989 		}
3990 
3991 		dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
3992 				cb->nlh->nlmsg_seq, &drbd_genl_family,
3993 				NLM_F_MULTI, DRBD_ADM_GET_STATUS);
3994 		if (!dh)
3995 			goto out;
3996 
3997 		if (!device) {
3998 			/* This is a connection without a single volume.
3999 			 * Suprisingly enough, it may have a network
4000 			 * configuration. */
4001 			struct drbd_connection *connection;
4002 
4003 			dh->minor = -1U;
4004 			dh->ret_code = NO_ERROR;
4005 			connection = the_only_connection(resource);
4006 			if (nla_put_drbd_cfg_context(skb, resource, connection, NULL))
4007 				goto cancel;
4008 			if (connection) {
4009 				struct net_conf *nc;
4010 
4011 				nc = rcu_dereference(connection->net_conf);
4012 				if (nc && net_conf_to_skb(skb, nc, 1) != 0)
4013 					goto cancel;
4014 			}
4015 			goto done;
4016 		}
4017 
4018 		D_ASSERT(device, device->vnr == volume);
4019 		D_ASSERT(device, device->resource == resource);
4020 
4021 		dh->minor = device_to_minor(device);
4022 		dh->ret_code = NO_ERROR;
4023 
4024 		if (nla_put_status_info(skb, device, NULL)) {
4025 cancel:
4026 			genlmsg_cancel(skb, dh);
4027 			goto out;
4028 		}
4029 done:
4030 		genlmsg_end(skb, dh);
4031 	}
4032 
4033 out:
4034 	rcu_read_unlock();
4035 	/* where to start the next iteration */
4036 	cb->args[0] = (long)pos;
4037 	cb->args[1] = (pos == resource) ? volume + 1 : 0;
4038 
4039 	/* No more resources/volumes/minors found results in an empty skb.
4040 	 * Which will terminate the dump. */
4041         return skb->len;
4042 }
4043 
4044 /*
4045  * Request status of all resources, or of all volumes within a single resource.
4046  *
4047  * This is a dump, as the answer may not fit in a single reply skb otherwise.
4048  * Which means we cannot use the family->attrbuf or other such members, because
4049  * dump is NOT protected by the genl_lock().  During dump, we only have access
4050  * to the incoming skb, and need to opencode "parsing" of the nlattr payload.
4051  *
4052  * Once things are setup properly, we call into get_one_status().
4053  */
4054 int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb)
4055 {
4056 	const unsigned hdrlen = GENL_HDRLEN + GENL_MAGIC_FAMILY_HDRSZ;
4057 	struct nlattr *nla;
4058 	const char *resource_name;
4059 	struct drbd_resource *resource;
4060 	int maxtype;
4061 
4062 	/* Is this a followup call? */
4063 	if (cb->args[0]) {
4064 		/* ... of a single resource dump,
4065 		 * and the resource iterator has been advanced already? */
4066 		if (cb->args[2] && cb->args[2] != cb->args[0])
4067 			return 0; /* DONE. */
4068 		goto dump;
4069 	}
4070 
4071 	/* First call (from netlink_dump_start).  We need to figure out
4072 	 * which resource(s) the user wants us to dump. */
4073 	nla = nla_find(nlmsg_attrdata(cb->nlh, hdrlen),
4074 			nlmsg_attrlen(cb->nlh, hdrlen),
4075 			DRBD_NLA_CFG_CONTEXT);
4076 
4077 	/* No explicit context given.  Dump all. */
4078 	if (!nla)
4079 		goto dump;
4080 	maxtype = ARRAY_SIZE(drbd_cfg_context_nl_policy) - 1;
4081 	nla = drbd_nla_find_nested(maxtype, nla, __nla_type(T_ctx_resource_name));
4082 	if (IS_ERR(nla))
4083 		return PTR_ERR(nla);
4084 	/* context given, but no name present? */
4085 	if (!nla)
4086 		return -EINVAL;
4087 	resource_name = nla_data(nla);
4088 	if (!*resource_name)
4089 		return -ENODEV;
4090 	resource = drbd_find_resource(resource_name);
4091 	if (!resource)
4092 		return -ENODEV;
4093 
4094 	kref_put(&resource->kref, drbd_destroy_resource); /* get_one_status() revalidates the resource */
4095 
4096 	/* prime iterators, and set "filter" mode mark:
4097 	 * only dump this connection. */
4098 	cb->args[0] = (long)resource;
4099 	/* cb->args[1] = 0; passed in this way. */
4100 	cb->args[2] = (long)resource;
4101 
4102 dump:
4103 	return get_one_status(skb, cb);
4104 }
4105 
4106 int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info)
4107 {
4108 	struct drbd_config_context adm_ctx;
4109 	enum drbd_ret_code retcode;
4110 	struct timeout_parms tp;
4111 	int err;
4112 
4113 	retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
4114 	if (!adm_ctx.reply_skb)
4115 		return retcode;
4116 	if (retcode != NO_ERROR)
4117 		goto out;
4118 
4119 	tp.timeout_type =
4120 		adm_ctx.device->state.pdsk == D_OUTDATED ? UT_PEER_OUTDATED :
4121 		test_bit(USE_DEGR_WFC_T, &adm_ctx.device->flags) ? UT_DEGRADED :
4122 		UT_DEFAULT;
4123 
4124 	err = timeout_parms_to_priv_skb(adm_ctx.reply_skb, &tp);
4125 	if (err) {
4126 		nlmsg_free(adm_ctx.reply_skb);
4127 		return err;
4128 	}
4129 out:
4130 	drbd_adm_finish(&adm_ctx, info, retcode);
4131 	return 0;
4132 }
4133 
4134 int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info)
4135 {
4136 	struct drbd_config_context adm_ctx;
4137 	struct drbd_device *device;
4138 	enum drbd_ret_code retcode;
4139 	struct start_ov_parms parms;
4140 
4141 	retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
4142 	if (!adm_ctx.reply_skb)
4143 		return retcode;
4144 	if (retcode != NO_ERROR)
4145 		goto out;
4146 
4147 	device = adm_ctx.device;
4148 
4149 	/* resume from last known position, if possible */
4150 	parms.ov_start_sector = device->ov_start_sector;
4151 	parms.ov_stop_sector = ULLONG_MAX;
4152 	if (info->attrs[DRBD_NLA_START_OV_PARMS]) {
4153 		int err = start_ov_parms_from_attrs(&parms, info);
4154 		if (err) {
4155 			retcode = ERR_MANDATORY_TAG;
4156 			drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
4157 			goto out;
4158 		}
4159 	}
4160 	mutex_lock(&adm_ctx.resource->adm_mutex);
4161 
4162 	/* w_make_ov_request expects position to be aligned */
4163 	device->ov_start_sector = parms.ov_start_sector & ~(BM_SECT_PER_BIT-1);
4164 	device->ov_stop_sector = parms.ov_stop_sector;
4165 
4166 	/* If there is still bitmap IO pending, e.g. previous resync or verify
4167 	 * just being finished, wait for it before requesting a new resync. */
4168 	drbd_suspend_io(device);
4169 	wait_event(device->misc_wait, !test_bit(BITMAP_IO, &device->flags));
4170 	retcode = drbd_request_state(device, NS(conn, C_VERIFY_S));
4171 	drbd_resume_io(device);
4172 
4173 	mutex_unlock(&adm_ctx.resource->adm_mutex);
4174 out:
4175 	drbd_adm_finish(&adm_ctx, info, retcode);
4176 	return 0;
4177 }
4178 
4179 
4180 int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info)
4181 {
4182 	struct drbd_config_context adm_ctx;
4183 	struct drbd_device *device;
4184 	enum drbd_ret_code retcode;
4185 	int skip_initial_sync = 0;
4186 	int err;
4187 	struct new_c_uuid_parms args;
4188 
4189 	retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
4190 	if (!adm_ctx.reply_skb)
4191 		return retcode;
4192 	if (retcode != NO_ERROR)
4193 		goto out_nolock;
4194 
4195 	device = adm_ctx.device;
4196 	memset(&args, 0, sizeof(args));
4197 	if (info->attrs[DRBD_NLA_NEW_C_UUID_PARMS]) {
4198 		err = new_c_uuid_parms_from_attrs(&args, info);
4199 		if (err) {
4200 			retcode = ERR_MANDATORY_TAG;
4201 			drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
4202 			goto out_nolock;
4203 		}
4204 	}
4205 
4206 	mutex_lock(&adm_ctx.resource->adm_mutex);
4207 	mutex_lock(device->state_mutex); /* Protects us against serialized state changes. */
4208 
4209 	if (!get_ldev(device)) {
4210 		retcode = ERR_NO_DISK;
4211 		goto out;
4212 	}
4213 
4214 	/* this is "skip initial sync", assume to be clean */
4215 	if (device->state.conn == C_CONNECTED &&
4216 	    first_peer_device(device)->connection->agreed_pro_version >= 90 &&
4217 	    device->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && args.clear_bm) {
4218 		drbd_info(device, "Preparing to skip initial sync\n");
4219 		skip_initial_sync = 1;
4220 	} else if (device->state.conn != C_STANDALONE) {
4221 		retcode = ERR_CONNECTED;
4222 		goto out_dec;
4223 	}
4224 
4225 	drbd_uuid_set(device, UI_BITMAP, 0); /* Rotate UI_BITMAP to History 1, etc... */
4226 	drbd_uuid_new_current(device); /* New current, previous to UI_BITMAP */
4227 
4228 	if (args.clear_bm) {
4229 		err = drbd_bitmap_io(device, &drbd_bmio_clear_n_write,
4230 			"clear_n_write from new_c_uuid", BM_LOCKED_MASK);
4231 		if (err) {
4232 			drbd_err(device, "Writing bitmap failed with %d\n", err);
4233 			retcode = ERR_IO_MD_DISK;
4234 		}
4235 		if (skip_initial_sync) {
4236 			drbd_send_uuids_skip_initial_sync(first_peer_device(device));
4237 			_drbd_uuid_set(device, UI_BITMAP, 0);
4238 			drbd_print_uuids(device, "cleared bitmap UUID");
4239 			spin_lock_irq(&device->resource->req_lock);
4240 			_drbd_set_state(_NS2(device, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
4241 					CS_VERBOSE, NULL);
4242 			spin_unlock_irq(&device->resource->req_lock);
4243 		}
4244 	}
4245 
4246 	drbd_md_sync(device);
4247 out_dec:
4248 	put_ldev(device);
4249 out:
4250 	mutex_unlock(device->state_mutex);
4251 	mutex_unlock(&adm_ctx.resource->adm_mutex);
4252 out_nolock:
4253 	drbd_adm_finish(&adm_ctx, info, retcode);
4254 	return 0;
4255 }
4256 
4257 static enum drbd_ret_code
4258 drbd_check_resource_name(struct drbd_config_context *adm_ctx)
4259 {
4260 	const char *name = adm_ctx->resource_name;
4261 	if (!name || !name[0]) {
4262 		drbd_msg_put_info(adm_ctx->reply_skb, "resource name missing");
4263 		return ERR_MANDATORY_TAG;
4264 	}
4265 	/* if we want to use these in sysfs/configfs/debugfs some day,
4266 	 * we must not allow slashes */
4267 	if (strchr(name, '/')) {
4268 		drbd_msg_put_info(adm_ctx->reply_skb, "invalid resource name");
4269 		return ERR_INVALID_REQUEST;
4270 	}
4271 	return NO_ERROR;
4272 }
4273 
4274 static void resource_to_info(struct resource_info *info,
4275 			     struct drbd_resource *resource)
4276 {
4277 	info->res_role = conn_highest_role(first_connection(resource));
4278 	info->res_susp = resource->susp;
4279 	info->res_susp_nod = resource->susp_nod;
4280 	info->res_susp_fen = resource->susp_fen;
4281 }
4282 
4283 int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info)
4284 {
4285 	struct drbd_connection *connection;
4286 	struct drbd_config_context adm_ctx;
4287 	enum drbd_ret_code retcode;
4288 	struct res_opts res_opts;
4289 	int err;
4290 
4291 	retcode = drbd_adm_prepare(&adm_ctx, skb, info, 0);
4292 	if (!adm_ctx.reply_skb)
4293 		return retcode;
4294 	if (retcode != NO_ERROR)
4295 		goto out;
4296 
4297 	set_res_opts_defaults(&res_opts);
4298 	err = res_opts_from_attrs(&res_opts, info);
4299 	if (err && err != -ENOMSG) {
4300 		retcode = ERR_MANDATORY_TAG;
4301 		drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
4302 		goto out;
4303 	}
4304 
4305 	retcode = drbd_check_resource_name(&adm_ctx);
4306 	if (retcode != NO_ERROR)
4307 		goto out;
4308 
4309 	if (adm_ctx.resource) {
4310 		if (info->nlhdr->nlmsg_flags & NLM_F_EXCL) {
4311 			retcode = ERR_INVALID_REQUEST;
4312 			drbd_msg_put_info(adm_ctx.reply_skb, "resource exists");
4313 		}
4314 		/* else: still NO_ERROR */
4315 		goto out;
4316 	}
4317 
4318 	/* not yet safe for genl_family.parallel_ops */
4319 	mutex_lock(&resources_mutex);
4320 	connection = conn_create(adm_ctx.resource_name, &res_opts);
4321 	mutex_unlock(&resources_mutex);
4322 
4323 	if (connection) {
4324 		struct resource_info resource_info;
4325 
4326 		mutex_lock(&notification_mutex);
4327 		resource_to_info(&resource_info, connection->resource);
4328 		notify_resource_state(NULL, 0, connection->resource,
4329 				      &resource_info, NOTIFY_CREATE);
4330 		mutex_unlock(&notification_mutex);
4331 	} else
4332 		retcode = ERR_NOMEM;
4333 
4334 out:
4335 	drbd_adm_finish(&adm_ctx, info, retcode);
4336 	return 0;
4337 }
4338 
4339 static void device_to_info(struct device_info *info,
4340 			   struct drbd_device *device)
4341 {
4342 	info->dev_disk_state = device->state.disk;
4343 }
4344 
4345 
4346 int drbd_adm_new_minor(struct sk_buff *skb, struct genl_info *info)
4347 {
4348 	struct drbd_config_context adm_ctx;
4349 	struct drbd_genlmsghdr *dh = info->userhdr;
4350 	enum drbd_ret_code retcode;
4351 
4352 	retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE);
4353 	if (!adm_ctx.reply_skb)
4354 		return retcode;
4355 	if (retcode != NO_ERROR)
4356 		goto out;
4357 
4358 	if (dh->minor > MINORMASK) {
4359 		drbd_msg_put_info(adm_ctx.reply_skb, "requested minor out of range");
4360 		retcode = ERR_INVALID_REQUEST;
4361 		goto out;
4362 	}
4363 	if (adm_ctx.volume > DRBD_VOLUME_MAX) {
4364 		drbd_msg_put_info(adm_ctx.reply_skb, "requested volume id out of range");
4365 		retcode = ERR_INVALID_REQUEST;
4366 		goto out;
4367 	}
4368 
4369 	/* drbd_adm_prepare made sure already
4370 	 * that first_peer_device(device)->connection and device->vnr match the request. */
4371 	if (adm_ctx.device) {
4372 		if (info->nlhdr->nlmsg_flags & NLM_F_EXCL)
4373 			retcode = ERR_MINOR_OR_VOLUME_EXISTS;
4374 		/* else: still NO_ERROR */
4375 		goto out;
4376 	}
4377 
4378 	mutex_lock(&adm_ctx.resource->adm_mutex);
4379 	retcode = drbd_create_device(&adm_ctx, dh->minor);
4380 	if (retcode == NO_ERROR) {
4381 		struct drbd_device *device;
4382 		struct drbd_peer_device *peer_device;
4383 		struct device_info info;
4384 		unsigned int peer_devices = 0;
4385 		enum drbd_notification_type flags;
4386 
4387 		device = minor_to_device(dh->minor);
4388 		for_each_peer_device(peer_device, device) {
4389 			if (!has_net_conf(peer_device->connection))
4390 				continue;
4391 			peer_devices++;
4392 		}
4393 
4394 		device_to_info(&info, device);
4395 		mutex_lock(&notification_mutex);
4396 		flags = (peer_devices--) ? NOTIFY_CONTINUES : 0;
4397 		notify_device_state(NULL, 0, device, &info, NOTIFY_CREATE | flags);
4398 		for_each_peer_device(peer_device, device) {
4399 			struct peer_device_info peer_device_info;
4400 
4401 			if (!has_net_conf(peer_device->connection))
4402 				continue;
4403 			peer_device_to_info(&peer_device_info, peer_device);
4404 			flags = (peer_devices--) ? NOTIFY_CONTINUES : 0;
4405 			notify_peer_device_state(NULL, 0, peer_device, &peer_device_info,
4406 						 NOTIFY_CREATE | flags);
4407 		}
4408 		mutex_unlock(&notification_mutex);
4409 	}
4410 	mutex_unlock(&adm_ctx.resource->adm_mutex);
4411 out:
4412 	drbd_adm_finish(&adm_ctx, info, retcode);
4413 	return 0;
4414 }
4415 
4416 static enum drbd_ret_code adm_del_minor(struct drbd_device *device)
4417 {
4418 	struct drbd_peer_device *peer_device;
4419 
4420 	if (device->state.disk == D_DISKLESS &&
4421 	    /* no need to be device->state.conn == C_STANDALONE &&
4422 	     * we may want to delete a minor from a live replication group.
4423 	     */
4424 	    device->state.role == R_SECONDARY) {
4425 		struct drbd_connection *connection =
4426 			first_connection(device->resource);
4427 
4428 		_drbd_request_state(device, NS(conn, C_WF_REPORT_PARAMS),
4429 				    CS_VERBOSE + CS_WAIT_COMPLETE);
4430 
4431 		/* If the state engine hasn't stopped the sender thread yet, we
4432 		 * need to flush the sender work queue before generating the
4433 		 * DESTROY events here. */
4434 		if (get_t_state(&connection->worker) == RUNNING)
4435 			drbd_flush_workqueue(&connection->sender_work);
4436 
4437 		mutex_lock(&notification_mutex);
4438 		for_each_peer_device(peer_device, device) {
4439 			if (!has_net_conf(peer_device->connection))
4440 				continue;
4441 			notify_peer_device_state(NULL, 0, peer_device, NULL,
4442 						 NOTIFY_DESTROY | NOTIFY_CONTINUES);
4443 		}
4444 		notify_device_state(NULL, 0, device, NULL, NOTIFY_DESTROY);
4445 		mutex_unlock(&notification_mutex);
4446 
4447 		drbd_delete_device(device);
4448 		return NO_ERROR;
4449 	} else
4450 		return ERR_MINOR_CONFIGURED;
4451 }
4452 
4453 int drbd_adm_del_minor(struct sk_buff *skb, struct genl_info *info)
4454 {
4455 	struct drbd_config_context adm_ctx;
4456 	enum drbd_ret_code retcode;
4457 
4458 	retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
4459 	if (!adm_ctx.reply_skb)
4460 		return retcode;
4461 	if (retcode != NO_ERROR)
4462 		goto out;
4463 
4464 	mutex_lock(&adm_ctx.resource->adm_mutex);
4465 	retcode = adm_del_minor(adm_ctx.device);
4466 	mutex_unlock(&adm_ctx.resource->adm_mutex);
4467 out:
4468 	drbd_adm_finish(&adm_ctx, info, retcode);
4469 	return 0;
4470 }
4471 
4472 static int adm_del_resource(struct drbd_resource *resource)
4473 {
4474 	struct drbd_connection *connection;
4475 
4476 	for_each_connection(connection, resource) {
4477 		if (connection->cstate > C_STANDALONE)
4478 			return ERR_NET_CONFIGURED;
4479 	}
4480 	if (!idr_is_empty(&resource->devices))
4481 		return ERR_RES_IN_USE;
4482 
4483 	/* The state engine has stopped the sender thread, so we don't
4484 	 * need to flush the sender work queue before generating the
4485 	 * DESTROY event here. */
4486 	mutex_lock(&notification_mutex);
4487 	notify_resource_state(NULL, 0, resource, NULL, NOTIFY_DESTROY);
4488 	mutex_unlock(&notification_mutex);
4489 
4490 	mutex_lock(&resources_mutex);
4491 	list_del_rcu(&resource->resources);
4492 	mutex_unlock(&resources_mutex);
4493 	/* Make sure all threads have actually stopped: state handling only
4494 	 * does drbd_thread_stop_nowait(). */
4495 	list_for_each_entry(connection, &resource->connections, connections)
4496 		drbd_thread_stop(&connection->worker);
4497 	synchronize_rcu();
4498 	drbd_free_resource(resource);
4499 	return NO_ERROR;
4500 }
4501 
4502 int drbd_adm_down(struct sk_buff *skb, struct genl_info *info)
4503 {
4504 	struct drbd_config_context adm_ctx;
4505 	struct drbd_resource *resource;
4506 	struct drbd_connection *connection;
4507 	struct drbd_device *device;
4508 	int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
4509 	unsigned i;
4510 
4511 	retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE);
4512 	if (!adm_ctx.reply_skb)
4513 		return retcode;
4514 	if (retcode != NO_ERROR)
4515 		goto finish;
4516 
4517 	resource = adm_ctx.resource;
4518 	mutex_lock(&resource->adm_mutex);
4519 	/* demote */
4520 	for_each_connection(connection, resource) {
4521 		struct drbd_peer_device *peer_device;
4522 
4523 		idr_for_each_entry(&connection->peer_devices, peer_device, i) {
4524 			retcode = drbd_set_role(peer_device->device, R_SECONDARY, 0);
4525 			if (retcode < SS_SUCCESS) {
4526 				drbd_msg_put_info(adm_ctx.reply_skb, "failed to demote");
4527 				goto out;
4528 			}
4529 		}
4530 
4531 		retcode = conn_try_disconnect(connection, 0);
4532 		if (retcode < SS_SUCCESS) {
4533 			drbd_msg_put_info(adm_ctx.reply_skb, "failed to disconnect");
4534 			goto out;
4535 		}
4536 	}
4537 
4538 	/* detach */
4539 	idr_for_each_entry(&resource->devices, device, i) {
4540 		retcode = adm_detach(device, 0);
4541 		if (retcode < SS_SUCCESS || retcode > NO_ERROR) {
4542 			drbd_msg_put_info(adm_ctx.reply_skb, "failed to detach");
4543 			goto out;
4544 		}
4545 	}
4546 
4547 	/* delete volumes */
4548 	idr_for_each_entry(&resource->devices, device, i) {
4549 		retcode = adm_del_minor(device);
4550 		if (retcode != NO_ERROR) {
4551 			/* "can not happen" */
4552 			drbd_msg_put_info(adm_ctx.reply_skb, "failed to delete volume");
4553 			goto out;
4554 		}
4555 	}
4556 
4557 	retcode = adm_del_resource(resource);
4558 out:
4559 	mutex_unlock(&resource->adm_mutex);
4560 finish:
4561 	drbd_adm_finish(&adm_ctx, info, retcode);
4562 	return 0;
4563 }
4564 
4565 int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info)
4566 {
4567 	struct drbd_config_context adm_ctx;
4568 	struct drbd_resource *resource;
4569 	enum drbd_ret_code retcode;
4570 
4571 	retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE);
4572 	if (!adm_ctx.reply_skb)
4573 		return retcode;
4574 	if (retcode != NO_ERROR)
4575 		goto finish;
4576 	resource = adm_ctx.resource;
4577 
4578 	mutex_lock(&resource->adm_mutex);
4579 	retcode = adm_del_resource(resource);
4580 	mutex_unlock(&resource->adm_mutex);
4581 finish:
4582 	drbd_adm_finish(&adm_ctx, info, retcode);
4583 	return 0;
4584 }
4585 
4586 void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib)
4587 {
4588 	struct sk_buff *msg;
4589 	struct drbd_genlmsghdr *d_out;
4590 	unsigned seq;
4591 	int err = -ENOMEM;
4592 
4593 	seq = atomic_inc_return(&drbd_genl_seq);
4594 	msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
4595 	if (!msg)
4596 		goto failed;
4597 
4598 	err = -EMSGSIZE;
4599 	d_out = genlmsg_put(msg, 0, seq, &drbd_genl_family, 0, DRBD_EVENT);
4600 	if (!d_out) /* cannot happen, but anyways. */
4601 		goto nla_put_failure;
4602 	d_out->minor = device_to_minor(device);
4603 	d_out->ret_code = NO_ERROR;
4604 
4605 	if (nla_put_status_info(msg, device, sib))
4606 		goto nla_put_failure;
4607 	genlmsg_end(msg, d_out);
4608 	err = drbd_genl_multicast_events(msg, GFP_NOWAIT);
4609 	/* msg has been consumed or freed in netlink_broadcast() */
4610 	if (err && err != -ESRCH)
4611 		goto failed;
4612 
4613 	return;
4614 
4615 nla_put_failure:
4616 	nlmsg_free(msg);
4617 failed:
4618 	drbd_err(device, "Error %d while broadcasting event. "
4619 			"Event seq:%u sib_reason:%u\n",
4620 			err, seq, sib->sib_reason);
4621 }
4622 
4623 static int nla_put_notification_header(struct sk_buff *msg,
4624 				       enum drbd_notification_type type)
4625 {
4626 	struct drbd_notification_header nh = {
4627 		.nh_type = type,
4628 	};
4629 
4630 	return drbd_notification_header_to_skb(msg, &nh, true);
4631 }
4632 
4633 void notify_resource_state(struct sk_buff *skb,
4634 			   unsigned int seq,
4635 			   struct drbd_resource *resource,
4636 			   struct resource_info *resource_info,
4637 			   enum drbd_notification_type type)
4638 {
4639 	struct resource_statistics resource_statistics;
4640 	struct drbd_genlmsghdr *dh;
4641 	bool multicast = false;
4642 	int err;
4643 
4644 	if (!skb) {
4645 		seq = atomic_inc_return(&notify_genl_seq);
4646 		skb = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
4647 		err = -ENOMEM;
4648 		if (!skb)
4649 			goto failed;
4650 		multicast = true;
4651 	}
4652 
4653 	err = -EMSGSIZE;
4654 	dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_RESOURCE_STATE);
4655 	if (!dh)
4656 		goto nla_put_failure;
4657 	dh->minor = -1U;
4658 	dh->ret_code = NO_ERROR;
4659 	if (nla_put_drbd_cfg_context(skb, resource, NULL, NULL) ||
4660 	    nla_put_notification_header(skb, type) ||
4661 	    ((type & ~NOTIFY_FLAGS) != NOTIFY_DESTROY &&
4662 	     resource_info_to_skb(skb, resource_info, true)))
4663 		goto nla_put_failure;
4664 	resource_statistics.res_stat_write_ordering = resource->write_ordering;
4665 	err = resource_statistics_to_skb(skb, &resource_statistics, !capable(CAP_SYS_ADMIN));
4666 	if (err)
4667 		goto nla_put_failure;
4668 	genlmsg_end(skb, dh);
4669 	if (multicast) {
4670 		err = drbd_genl_multicast_events(skb, GFP_NOWAIT);
4671 		/* skb has been consumed or freed in netlink_broadcast() */
4672 		if (err && err != -ESRCH)
4673 			goto failed;
4674 	}
4675 	return;
4676 
4677 nla_put_failure:
4678 	nlmsg_free(skb);
4679 failed:
4680 	drbd_err(resource, "Error %d while broadcasting event. Event seq:%u\n",
4681 			err, seq);
4682 }
4683 
4684 void notify_device_state(struct sk_buff *skb,
4685 			 unsigned int seq,
4686 			 struct drbd_device *device,
4687 			 struct device_info *device_info,
4688 			 enum drbd_notification_type type)
4689 {
4690 	struct device_statistics device_statistics;
4691 	struct drbd_genlmsghdr *dh;
4692 	bool multicast = false;
4693 	int err;
4694 
4695 	if (!skb) {
4696 		seq = atomic_inc_return(&notify_genl_seq);
4697 		skb = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
4698 		err = -ENOMEM;
4699 		if (!skb)
4700 			goto failed;
4701 		multicast = true;
4702 	}
4703 
4704 	err = -EMSGSIZE;
4705 	dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_DEVICE_STATE);
4706 	if (!dh)
4707 		goto nla_put_failure;
4708 	dh->minor = device->minor;
4709 	dh->ret_code = NO_ERROR;
4710 	if (nla_put_drbd_cfg_context(skb, device->resource, NULL, device) ||
4711 	    nla_put_notification_header(skb, type) ||
4712 	    ((type & ~NOTIFY_FLAGS) != NOTIFY_DESTROY &&
4713 	     device_info_to_skb(skb, device_info, true)))
4714 		goto nla_put_failure;
4715 	device_to_statistics(&device_statistics, device);
4716 	device_statistics_to_skb(skb, &device_statistics, !capable(CAP_SYS_ADMIN));
4717 	genlmsg_end(skb, dh);
4718 	if (multicast) {
4719 		err = drbd_genl_multicast_events(skb, GFP_NOWAIT);
4720 		/* skb has been consumed or freed in netlink_broadcast() */
4721 		if (err && err != -ESRCH)
4722 			goto failed;
4723 	}
4724 	return;
4725 
4726 nla_put_failure:
4727 	nlmsg_free(skb);
4728 failed:
4729 	drbd_err(device, "Error %d while broadcasting event. Event seq:%u\n",
4730 		 err, seq);
4731 }
4732 
4733 void notify_connection_state(struct sk_buff *skb,
4734 			     unsigned int seq,
4735 			     struct drbd_connection *connection,
4736 			     struct connection_info *connection_info,
4737 			     enum drbd_notification_type type)
4738 {
4739 	struct connection_statistics connection_statistics;
4740 	struct drbd_genlmsghdr *dh;
4741 	bool multicast = false;
4742 	int err;
4743 
4744 	if (!skb) {
4745 		seq = atomic_inc_return(&notify_genl_seq);
4746 		skb = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
4747 		err = -ENOMEM;
4748 		if (!skb)
4749 			goto failed;
4750 		multicast = true;
4751 	}
4752 
4753 	err = -EMSGSIZE;
4754 	dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_CONNECTION_STATE);
4755 	if (!dh)
4756 		goto nla_put_failure;
4757 	dh->minor = -1U;
4758 	dh->ret_code = NO_ERROR;
4759 	if (nla_put_drbd_cfg_context(skb, connection->resource, connection, NULL) ||
4760 	    nla_put_notification_header(skb, type) ||
4761 	    ((type & ~NOTIFY_FLAGS) != NOTIFY_DESTROY &&
4762 	     connection_info_to_skb(skb, connection_info, true)))
4763 		goto nla_put_failure;
4764 	connection_statistics.conn_congested = test_bit(NET_CONGESTED, &connection->flags);
4765 	connection_statistics_to_skb(skb, &connection_statistics, !capable(CAP_SYS_ADMIN));
4766 	genlmsg_end(skb, dh);
4767 	if (multicast) {
4768 		err = drbd_genl_multicast_events(skb, GFP_NOWAIT);
4769 		/* skb has been consumed or freed in netlink_broadcast() */
4770 		if (err && err != -ESRCH)
4771 			goto failed;
4772 	}
4773 	return;
4774 
4775 nla_put_failure:
4776 	nlmsg_free(skb);
4777 failed:
4778 	drbd_err(connection, "Error %d while broadcasting event. Event seq:%u\n",
4779 		 err, seq);
4780 }
4781 
4782 void notify_peer_device_state(struct sk_buff *skb,
4783 			      unsigned int seq,
4784 			      struct drbd_peer_device *peer_device,
4785 			      struct peer_device_info *peer_device_info,
4786 			      enum drbd_notification_type type)
4787 {
4788 	struct peer_device_statistics peer_device_statistics;
4789 	struct drbd_resource *resource = peer_device->device->resource;
4790 	struct drbd_genlmsghdr *dh;
4791 	bool multicast = false;
4792 	int err;
4793 
4794 	if (!skb) {
4795 		seq = atomic_inc_return(&notify_genl_seq);
4796 		skb = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
4797 		err = -ENOMEM;
4798 		if (!skb)
4799 			goto failed;
4800 		multicast = true;
4801 	}
4802 
4803 	err = -EMSGSIZE;
4804 	dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_PEER_DEVICE_STATE);
4805 	if (!dh)
4806 		goto nla_put_failure;
4807 	dh->minor = -1U;
4808 	dh->ret_code = NO_ERROR;
4809 	if (nla_put_drbd_cfg_context(skb, resource, peer_device->connection, peer_device->device) ||
4810 	    nla_put_notification_header(skb, type) ||
4811 	    ((type & ~NOTIFY_FLAGS) != NOTIFY_DESTROY &&
4812 	     peer_device_info_to_skb(skb, peer_device_info, true)))
4813 		goto nla_put_failure;
4814 	peer_device_to_statistics(&peer_device_statistics, peer_device);
4815 	peer_device_statistics_to_skb(skb, &peer_device_statistics, !capable(CAP_SYS_ADMIN));
4816 	genlmsg_end(skb, dh);
4817 	if (multicast) {
4818 		err = drbd_genl_multicast_events(skb, GFP_NOWAIT);
4819 		/* skb has been consumed or freed in netlink_broadcast() */
4820 		if (err && err != -ESRCH)
4821 			goto failed;
4822 	}
4823 	return;
4824 
4825 nla_put_failure:
4826 	nlmsg_free(skb);
4827 failed:
4828 	drbd_err(peer_device, "Error %d while broadcasting event. Event seq:%u\n",
4829 		 err, seq);
4830 }
4831 
4832 void notify_helper(enum drbd_notification_type type,
4833 		   struct drbd_device *device, struct drbd_connection *connection,
4834 		   const char *name, int status)
4835 {
4836 	struct drbd_resource *resource = device ? device->resource : connection->resource;
4837 	struct drbd_helper_info helper_info;
4838 	unsigned int seq = atomic_inc_return(&notify_genl_seq);
4839 	struct sk_buff *skb = NULL;
4840 	struct drbd_genlmsghdr *dh;
4841 	int err;
4842 
4843 	strlcpy(helper_info.helper_name, name, sizeof(helper_info.helper_name));
4844 	helper_info.helper_name_len = min(strlen(name), sizeof(helper_info.helper_name));
4845 	helper_info.helper_status = status;
4846 
4847 	skb = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
4848 	err = -ENOMEM;
4849 	if (!skb)
4850 		goto fail;
4851 
4852 	err = -EMSGSIZE;
4853 	dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_HELPER);
4854 	if (!dh)
4855 		goto fail;
4856 	dh->minor = device ? device->minor : -1;
4857 	dh->ret_code = NO_ERROR;
4858 	mutex_lock(&notification_mutex);
4859 	if (nla_put_drbd_cfg_context(skb, resource, connection, device) ||
4860 	    nla_put_notification_header(skb, type) ||
4861 	    drbd_helper_info_to_skb(skb, &helper_info, true))
4862 		goto unlock_fail;
4863 	genlmsg_end(skb, dh);
4864 	err = drbd_genl_multicast_events(skb, GFP_NOWAIT);
4865 	skb = NULL;
4866 	/* skb has been consumed or freed in netlink_broadcast() */
4867 	if (err && err != -ESRCH)
4868 		goto unlock_fail;
4869 	mutex_unlock(&notification_mutex);
4870 	return;
4871 
4872 unlock_fail:
4873 	mutex_unlock(&notification_mutex);
4874 fail:
4875 	nlmsg_free(skb);
4876 	drbd_err(resource, "Error %d while broadcasting event. Event seq:%u\n",
4877 		 err, seq);
4878 }
4879 
4880 static void notify_initial_state_done(struct sk_buff *skb, unsigned int seq)
4881 {
4882 	struct drbd_genlmsghdr *dh;
4883 	int err;
4884 
4885 	err = -EMSGSIZE;
4886 	dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_INITIAL_STATE_DONE);
4887 	if (!dh)
4888 		goto nla_put_failure;
4889 	dh->minor = -1U;
4890 	dh->ret_code = NO_ERROR;
4891 	if (nla_put_notification_header(skb, NOTIFY_EXISTS))
4892 		goto nla_put_failure;
4893 	genlmsg_end(skb, dh);
4894 	return;
4895 
4896 nla_put_failure:
4897 	nlmsg_free(skb);
4898 	pr_err("Error %d sending event. Event seq:%u\n", err, seq);
4899 }
4900 
4901 static void free_state_changes(struct list_head *list)
4902 {
4903 	while (!list_empty(list)) {
4904 		struct drbd_state_change *state_change =
4905 			list_first_entry(list, struct drbd_state_change, list);
4906 		list_del(&state_change->list);
4907 		forget_state_change(state_change);
4908 	}
4909 }
4910 
4911 static unsigned int notifications_for_state_change(struct drbd_state_change *state_change)
4912 {
4913 	return 1 +
4914 	       state_change->n_connections +
4915 	       state_change->n_devices +
4916 	       state_change->n_devices * state_change->n_connections;
4917 }
4918 
4919 static int get_initial_state(struct sk_buff *skb, struct netlink_callback *cb)
4920 {
4921 	struct drbd_state_change *state_change = (struct drbd_state_change *)cb->args[0];
4922 	unsigned int seq = cb->args[2];
4923 	unsigned int n;
4924 	enum drbd_notification_type flags = 0;
4925 
4926 	/* There is no need for taking notification_mutex here: it doesn't
4927 	   matter if the initial state events mix with later state chage
4928 	   events; we can always tell the events apart by the NOTIFY_EXISTS
4929 	   flag. */
4930 
4931 	cb->args[5]--;
4932 	if (cb->args[5] == 1) {
4933 		notify_initial_state_done(skb, seq);
4934 		goto out;
4935 	}
4936 	n = cb->args[4]++;
4937 	if (cb->args[4] < cb->args[3])
4938 		flags |= NOTIFY_CONTINUES;
4939 	if (n < 1) {
4940 		notify_resource_state_change(skb, seq, state_change->resource,
4941 					     NOTIFY_EXISTS | flags);
4942 		goto next;
4943 	}
4944 	n--;
4945 	if (n < state_change->n_connections) {
4946 		notify_connection_state_change(skb, seq, &state_change->connections[n],
4947 					       NOTIFY_EXISTS | flags);
4948 		goto next;
4949 	}
4950 	n -= state_change->n_connections;
4951 	if (n < state_change->n_devices) {
4952 		notify_device_state_change(skb, seq, &state_change->devices[n],
4953 					   NOTIFY_EXISTS | flags);
4954 		goto next;
4955 	}
4956 	n -= state_change->n_devices;
4957 	if (n < state_change->n_devices * state_change->n_connections) {
4958 		notify_peer_device_state_change(skb, seq, &state_change->peer_devices[n],
4959 						NOTIFY_EXISTS | flags);
4960 		goto next;
4961 	}
4962 
4963 next:
4964 	if (cb->args[4] == cb->args[3]) {
4965 		struct drbd_state_change *next_state_change =
4966 			list_entry(state_change->list.next,
4967 				   struct drbd_state_change, list);
4968 		cb->args[0] = (long)next_state_change;
4969 		cb->args[3] = notifications_for_state_change(next_state_change);
4970 		cb->args[4] = 0;
4971 	}
4972 out:
4973 	return skb->len;
4974 }
4975 
4976 int drbd_adm_get_initial_state(struct sk_buff *skb, struct netlink_callback *cb)
4977 {
4978 	struct drbd_resource *resource;
4979 	LIST_HEAD(head);
4980 
4981 	if (cb->args[5] >= 1) {
4982 		if (cb->args[5] > 1)
4983 			return get_initial_state(skb, cb);
4984 		if (cb->args[0]) {
4985 			struct drbd_state_change *state_change =
4986 				(struct drbd_state_change *)cb->args[0];
4987 
4988 			/* connect list to head */
4989 			list_add(&head, &state_change->list);
4990 			free_state_changes(&head);
4991 		}
4992 		return 0;
4993 	}
4994 
4995 	cb->args[5] = 2;  /* number of iterations */
4996 	mutex_lock(&resources_mutex);
4997 	for_each_resource(resource, &drbd_resources) {
4998 		struct drbd_state_change *state_change;
4999 
5000 		state_change = remember_old_state(resource, GFP_KERNEL);
5001 		if (!state_change) {
5002 			if (!list_empty(&head))
5003 				free_state_changes(&head);
5004 			mutex_unlock(&resources_mutex);
5005 			return -ENOMEM;
5006 		}
5007 		copy_old_to_new_state_change(state_change);
5008 		list_add_tail(&state_change->list, &head);
5009 		cb->args[5] += notifications_for_state_change(state_change);
5010 	}
5011 	mutex_unlock(&resources_mutex);
5012 
5013 	if (!list_empty(&head)) {
5014 		struct drbd_state_change *state_change =
5015 			list_entry(head.next, struct drbd_state_change, list);
5016 		cb->args[0] = (long)state_change;
5017 		cb->args[3] = notifications_for_state_change(state_change);
5018 		list_del(&head);  /* detach list from head */
5019 	}
5020 
5021 	cb->args[2] = cb->nlh->nlmsg_seq;
5022 	return get_initial_state(skb, cb);
5023 }
5024