1 /* 2 drbd_nl.c 3 4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg. 5 6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH. 7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>. 8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>. 9 10 drbd is free software; you can redistribute it and/or modify 11 it under the terms of the GNU General Public License as published by 12 the Free Software Foundation; either version 2, or (at your option) 13 any later version. 14 15 drbd is distributed in the hope that it will be useful, 16 but WITHOUT ANY WARRANTY; without even the implied warranty of 17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 GNU General Public License for more details. 19 20 You should have received a copy of the GNU General Public License 21 along with drbd; see the file COPYING. If not, write to 22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 23 24 */ 25 26 #include <linux/module.h> 27 #include <linux/drbd.h> 28 #include <linux/in.h> 29 #include <linux/fs.h> 30 #include <linux/file.h> 31 #include <linux/slab.h> 32 #include <linux/connector.h> 33 #include <linux/blkpg.h> 34 #include <linux/cpumask.h> 35 #include "drbd_int.h" 36 #include "drbd_wrappers.h" 37 #include <asm/unaligned.h> 38 #include <linux/drbd_tag_magic.h> 39 #include <linux/drbd_limits.h> 40 41 static unsigned short *tl_add_blob(unsigned short *, enum drbd_tags, const void *, int); 42 static unsigned short *tl_add_str(unsigned short *, enum drbd_tags, const char *); 43 static unsigned short *tl_add_int(unsigned short *, enum drbd_tags, const void *); 44 45 /* see get_sb_bdev and bd_claim */ 46 static char *drbd_m_holder = "Hands off! this is DRBD's meta data device."; 47 48 /* Generate the tag_list to struct functions */ 49 #define NL_PACKET(name, number, fields) \ 50 static int name ## _from_tags(struct drbd_conf *mdev, \ 51 unsigned short *tags, struct name *arg) __attribute__ ((unused)); \ 52 static int name ## _from_tags(struct drbd_conf *mdev, \ 53 unsigned short *tags, struct name *arg) \ 54 { \ 55 int tag; \ 56 int dlen; \ 57 \ 58 while ((tag = get_unaligned(tags++)) != TT_END) { \ 59 dlen = get_unaligned(tags++); \ 60 switch (tag_number(tag)) { \ 61 fields \ 62 default: \ 63 if (tag & T_MANDATORY) { \ 64 dev_err(DEV, "Unknown tag: %d\n", tag_number(tag)); \ 65 return 0; \ 66 } \ 67 } \ 68 tags = (unsigned short *)((char *)tags + dlen); \ 69 } \ 70 return 1; \ 71 } 72 #define NL_INTEGER(pn, pr, member) \ 73 case pn: /* D_ASSERT( tag_type(tag) == TT_INTEGER ); */ \ 74 arg->member = get_unaligned((int *)(tags)); \ 75 break; 76 #define NL_INT64(pn, pr, member) \ 77 case pn: /* D_ASSERT( tag_type(tag) == TT_INT64 ); */ \ 78 arg->member = get_unaligned((u64 *)(tags)); \ 79 break; 80 #define NL_BIT(pn, pr, member) \ 81 case pn: /* D_ASSERT( tag_type(tag) == TT_BIT ); */ \ 82 arg->member = *(char *)(tags) ? 1 : 0; \ 83 break; 84 #define NL_STRING(pn, pr, member, len) \ 85 case pn: /* D_ASSERT( tag_type(tag) == TT_STRING ); */ \ 86 if (dlen > len) { \ 87 dev_err(DEV, "arg too long: %s (%u wanted, max len: %u bytes)\n", \ 88 #member, dlen, (unsigned int)len); \ 89 return 0; \ 90 } \ 91 arg->member ## _len = dlen; \ 92 memcpy(arg->member, tags, min_t(size_t, dlen, len)); \ 93 break; 94 #include "linux/drbd_nl.h" 95 96 /* Generate the struct to tag_list functions */ 97 #define NL_PACKET(name, number, fields) \ 98 static unsigned short* \ 99 name ## _to_tags(struct drbd_conf *mdev, \ 100 struct name *arg, unsigned short *tags) __attribute__ ((unused)); \ 101 static unsigned short* \ 102 name ## _to_tags(struct drbd_conf *mdev, \ 103 struct name *arg, unsigned short *tags) \ 104 { \ 105 fields \ 106 return tags; \ 107 } 108 109 #define NL_INTEGER(pn, pr, member) \ 110 put_unaligned(pn | pr | TT_INTEGER, tags++); \ 111 put_unaligned(sizeof(int), tags++); \ 112 put_unaligned(arg->member, (int *)tags); \ 113 tags = (unsigned short *)((char *)tags+sizeof(int)); 114 #define NL_INT64(pn, pr, member) \ 115 put_unaligned(pn | pr | TT_INT64, tags++); \ 116 put_unaligned(sizeof(u64), tags++); \ 117 put_unaligned(arg->member, (u64 *)tags); \ 118 tags = (unsigned short *)((char *)tags+sizeof(u64)); 119 #define NL_BIT(pn, pr, member) \ 120 put_unaligned(pn | pr | TT_BIT, tags++); \ 121 put_unaligned(sizeof(char), tags++); \ 122 *(char *)tags = arg->member; \ 123 tags = (unsigned short *)((char *)tags+sizeof(char)); 124 #define NL_STRING(pn, pr, member, len) \ 125 put_unaligned(pn | pr | TT_STRING, tags++); \ 126 put_unaligned(arg->member ## _len, tags++); \ 127 memcpy(tags, arg->member, arg->member ## _len); \ 128 tags = (unsigned short *)((char *)tags + arg->member ## _len); 129 #include "linux/drbd_nl.h" 130 131 void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name); 132 void drbd_nl_send_reply(struct cn_msg *, int); 133 134 int drbd_khelper(struct drbd_conf *mdev, char *cmd) 135 { 136 char *envp[] = { "HOME=/", 137 "TERM=linux", 138 "PATH=/sbin:/usr/sbin:/bin:/usr/bin", 139 NULL, /* Will be set to address family */ 140 NULL, /* Will be set to address */ 141 NULL }; 142 143 char mb[12], af[20], ad[60], *afs; 144 char *argv[] = {usermode_helper, cmd, mb, NULL }; 145 int ret; 146 147 snprintf(mb, 12, "minor-%d", mdev_to_minor(mdev)); 148 149 if (get_net_conf(mdev)) { 150 switch (((struct sockaddr *)mdev->net_conf->peer_addr)->sa_family) { 151 case AF_INET6: 152 afs = "ipv6"; 153 snprintf(ad, 60, "DRBD_PEER_ADDRESS=%pI6", 154 &((struct sockaddr_in6 *)mdev->net_conf->peer_addr)->sin6_addr); 155 break; 156 case AF_INET: 157 afs = "ipv4"; 158 snprintf(ad, 60, "DRBD_PEER_ADDRESS=%pI4", 159 &((struct sockaddr_in *)mdev->net_conf->peer_addr)->sin_addr); 160 break; 161 default: 162 afs = "ssocks"; 163 snprintf(ad, 60, "DRBD_PEER_ADDRESS=%pI4", 164 &((struct sockaddr_in *)mdev->net_conf->peer_addr)->sin_addr); 165 } 166 snprintf(af, 20, "DRBD_PEER_AF=%s", afs); 167 envp[3]=af; 168 envp[4]=ad; 169 put_net_conf(mdev); 170 } 171 172 dev_info(DEV, "helper command: %s %s %s\n", usermode_helper, cmd, mb); 173 174 drbd_bcast_ev_helper(mdev, cmd); 175 ret = call_usermodehelper(usermode_helper, argv, envp, 1); 176 if (ret) 177 dev_warn(DEV, "helper command: %s %s %s exit code %u (0x%x)\n", 178 usermode_helper, cmd, mb, 179 (ret >> 8) & 0xff, ret); 180 else 181 dev_info(DEV, "helper command: %s %s %s exit code %u (0x%x)\n", 182 usermode_helper, cmd, mb, 183 (ret >> 8) & 0xff, ret); 184 185 if (ret < 0) /* Ignore any ERRNOs we got. */ 186 ret = 0; 187 188 return ret; 189 } 190 191 enum drbd_disk_state drbd_try_outdate_peer(struct drbd_conf *mdev) 192 { 193 char *ex_to_string; 194 int r; 195 enum drbd_disk_state nps; 196 enum drbd_fencing_p fp; 197 198 D_ASSERT(mdev->state.pdsk == D_UNKNOWN); 199 200 if (get_ldev_if_state(mdev, D_CONSISTENT)) { 201 fp = mdev->ldev->dc.fencing; 202 put_ldev(mdev); 203 } else { 204 dev_warn(DEV, "Not fencing peer, I'm not even Consistent myself.\n"); 205 return mdev->state.pdsk; 206 } 207 208 if (fp == FP_STONITH) 209 _drbd_request_state(mdev, NS(susp, 1), CS_WAIT_COMPLETE); 210 211 r = drbd_khelper(mdev, "fence-peer"); 212 213 switch ((r>>8) & 0xff) { 214 case 3: /* peer is inconsistent */ 215 ex_to_string = "peer is inconsistent or worse"; 216 nps = D_INCONSISTENT; 217 break; 218 case 4: /* peer got outdated, or was already outdated */ 219 ex_to_string = "peer was fenced"; 220 nps = D_OUTDATED; 221 break; 222 case 5: /* peer was down */ 223 if (mdev->state.disk == D_UP_TO_DATE) { 224 /* we will(have) create(d) a new UUID anyways... */ 225 ex_to_string = "peer is unreachable, assumed to be dead"; 226 nps = D_OUTDATED; 227 } else { 228 ex_to_string = "peer unreachable, doing nothing since disk != UpToDate"; 229 nps = mdev->state.pdsk; 230 } 231 break; 232 case 6: /* Peer is primary, voluntarily outdate myself. 233 * This is useful when an unconnected R_SECONDARY is asked to 234 * become R_PRIMARY, but finds the other peer being active. */ 235 ex_to_string = "peer is active"; 236 dev_warn(DEV, "Peer is primary, outdating myself.\n"); 237 nps = D_UNKNOWN; 238 _drbd_request_state(mdev, NS(disk, D_OUTDATED), CS_WAIT_COMPLETE); 239 break; 240 case 7: 241 if (fp != FP_STONITH) 242 dev_err(DEV, "fence-peer() = 7 && fencing != Stonith !!!\n"); 243 ex_to_string = "peer was stonithed"; 244 nps = D_OUTDATED; 245 break; 246 default: 247 /* The script is broken ... */ 248 nps = D_UNKNOWN; 249 dev_err(DEV, "fence-peer helper broken, returned %d\n", (r>>8)&0xff); 250 return nps; 251 } 252 253 dev_info(DEV, "fence-peer helper returned %d (%s)\n", 254 (r>>8) & 0xff, ex_to_string); 255 return nps; 256 } 257 258 259 int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force) 260 { 261 const int max_tries = 4; 262 int r = 0; 263 int try = 0; 264 int forced = 0; 265 union drbd_state mask, val; 266 enum drbd_disk_state nps; 267 268 if (new_role == R_PRIMARY) 269 request_ping(mdev); /* Detect a dead peer ASAP */ 270 271 mutex_lock(&mdev->state_mutex); 272 273 mask.i = 0; mask.role = R_MASK; 274 val.i = 0; val.role = new_role; 275 276 while (try++ < max_tries) { 277 r = _drbd_request_state(mdev, mask, val, CS_WAIT_COMPLETE); 278 279 /* in case we first succeeded to outdate, 280 * but now suddenly could establish a connection */ 281 if (r == SS_CW_FAILED_BY_PEER && mask.pdsk != 0) { 282 val.pdsk = 0; 283 mask.pdsk = 0; 284 continue; 285 } 286 287 if (r == SS_NO_UP_TO_DATE_DISK && force && 288 (mdev->state.disk == D_INCONSISTENT || 289 mdev->state.disk == D_OUTDATED)) { 290 mask.disk = D_MASK; 291 val.disk = D_UP_TO_DATE; 292 forced = 1; 293 continue; 294 } 295 296 if (r == SS_NO_UP_TO_DATE_DISK && 297 mdev->state.disk == D_CONSISTENT && mask.pdsk == 0) { 298 D_ASSERT(mdev->state.pdsk == D_UNKNOWN); 299 nps = drbd_try_outdate_peer(mdev); 300 301 if (nps == D_OUTDATED || nps == D_INCONSISTENT) { 302 val.disk = D_UP_TO_DATE; 303 mask.disk = D_MASK; 304 } 305 306 val.pdsk = nps; 307 mask.pdsk = D_MASK; 308 309 continue; 310 } 311 312 if (r == SS_NOTHING_TO_DO) 313 goto fail; 314 if (r == SS_PRIMARY_NOP && mask.pdsk == 0) { 315 nps = drbd_try_outdate_peer(mdev); 316 317 if (force && nps > D_OUTDATED) { 318 dev_warn(DEV, "Forced into split brain situation!\n"); 319 nps = D_OUTDATED; 320 } 321 322 mask.pdsk = D_MASK; 323 val.pdsk = nps; 324 325 continue; 326 } 327 if (r == SS_TWO_PRIMARIES) { 328 /* Maybe the peer is detected as dead very soon... 329 retry at most once more in this case. */ 330 __set_current_state(TASK_INTERRUPTIBLE); 331 schedule_timeout((mdev->net_conf->ping_timeo+1)*HZ/10); 332 if (try < max_tries) 333 try = max_tries - 1; 334 continue; 335 } 336 if (r < SS_SUCCESS) { 337 r = _drbd_request_state(mdev, mask, val, 338 CS_VERBOSE + CS_WAIT_COMPLETE); 339 if (r < SS_SUCCESS) 340 goto fail; 341 } 342 break; 343 } 344 345 if (r < SS_SUCCESS) 346 goto fail; 347 348 if (forced) 349 dev_warn(DEV, "Forced to consider local data as UpToDate!\n"); 350 351 /* Wait until nothing is on the fly :) */ 352 wait_event(mdev->misc_wait, atomic_read(&mdev->ap_pending_cnt) == 0); 353 354 if (new_role == R_SECONDARY) { 355 set_disk_ro(mdev->vdisk, TRUE); 356 if (get_ldev(mdev)) { 357 mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1; 358 put_ldev(mdev); 359 } 360 } else { 361 if (get_net_conf(mdev)) { 362 mdev->net_conf->want_lose = 0; 363 put_net_conf(mdev); 364 } 365 set_disk_ro(mdev->vdisk, FALSE); 366 if (get_ldev(mdev)) { 367 if (((mdev->state.conn < C_CONNECTED || 368 mdev->state.pdsk <= D_FAILED) 369 && mdev->ldev->md.uuid[UI_BITMAP] == 0) || forced) 370 drbd_uuid_new_current(mdev); 371 372 mdev->ldev->md.uuid[UI_CURRENT] |= (u64)1; 373 put_ldev(mdev); 374 } 375 } 376 377 if ((new_role == R_SECONDARY) && get_ldev(mdev)) { 378 drbd_al_to_on_disk_bm(mdev); 379 put_ldev(mdev); 380 } 381 382 if (mdev->state.conn >= C_WF_REPORT_PARAMS) { 383 /* if this was forced, we should consider sync */ 384 if (forced) 385 drbd_send_uuids(mdev); 386 drbd_send_state(mdev); 387 } 388 389 drbd_md_sync(mdev); 390 391 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE); 392 fail: 393 mutex_unlock(&mdev->state_mutex); 394 return r; 395 } 396 397 398 static int drbd_nl_primary(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 399 struct drbd_nl_cfg_reply *reply) 400 { 401 struct primary primary_args; 402 403 memset(&primary_args, 0, sizeof(struct primary)); 404 if (!primary_from_tags(mdev, nlp->tag_list, &primary_args)) { 405 reply->ret_code = ERR_MANDATORY_TAG; 406 return 0; 407 } 408 409 reply->ret_code = 410 drbd_set_role(mdev, R_PRIMARY, primary_args.overwrite_peer); 411 412 return 0; 413 } 414 415 static int drbd_nl_secondary(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 416 struct drbd_nl_cfg_reply *reply) 417 { 418 reply->ret_code = drbd_set_role(mdev, R_SECONDARY, 0); 419 420 return 0; 421 } 422 423 /* initializes the md.*_offset members, so we are able to find 424 * the on disk meta data */ 425 static void drbd_md_set_sector_offsets(struct drbd_conf *mdev, 426 struct drbd_backing_dev *bdev) 427 { 428 sector_t md_size_sect = 0; 429 switch (bdev->dc.meta_dev_idx) { 430 default: 431 /* v07 style fixed size indexed meta data */ 432 bdev->md.md_size_sect = MD_RESERVED_SECT; 433 bdev->md.md_offset = drbd_md_ss__(mdev, bdev); 434 bdev->md.al_offset = MD_AL_OFFSET; 435 bdev->md.bm_offset = MD_BM_OFFSET; 436 break; 437 case DRBD_MD_INDEX_FLEX_EXT: 438 /* just occupy the full device; unit: sectors */ 439 bdev->md.md_size_sect = drbd_get_capacity(bdev->md_bdev); 440 bdev->md.md_offset = 0; 441 bdev->md.al_offset = MD_AL_OFFSET; 442 bdev->md.bm_offset = MD_BM_OFFSET; 443 break; 444 case DRBD_MD_INDEX_INTERNAL: 445 case DRBD_MD_INDEX_FLEX_INT: 446 bdev->md.md_offset = drbd_md_ss__(mdev, bdev); 447 /* al size is still fixed */ 448 bdev->md.al_offset = -MD_AL_MAX_SIZE; 449 /* we need (slightly less than) ~ this much bitmap sectors: */ 450 md_size_sect = drbd_get_capacity(bdev->backing_bdev); 451 md_size_sect = ALIGN(md_size_sect, BM_SECT_PER_EXT); 452 md_size_sect = BM_SECT_TO_EXT(md_size_sect); 453 md_size_sect = ALIGN(md_size_sect, 8); 454 455 /* plus the "drbd meta data super block", 456 * and the activity log; */ 457 md_size_sect += MD_BM_OFFSET; 458 459 bdev->md.md_size_sect = md_size_sect; 460 /* bitmap offset is adjusted by 'super' block size */ 461 bdev->md.bm_offset = -md_size_sect + MD_AL_OFFSET; 462 break; 463 } 464 } 465 466 char *ppsize(char *buf, unsigned long long size) 467 { 468 /* Needs 9 bytes at max. */ 469 static char units[] = { 'K', 'M', 'G', 'T', 'P', 'E' }; 470 int base = 0; 471 while (size >= 10000) { 472 /* shift + round */ 473 size = (size >> 10) + !!(size & (1<<9)); 474 base++; 475 } 476 sprintf(buf, "%lu %cB", (long)size, units[base]); 477 478 return buf; 479 } 480 481 /* there is still a theoretical deadlock when called from receiver 482 * on an D_INCONSISTENT R_PRIMARY: 483 * remote READ does inc_ap_bio, receiver would need to receive answer 484 * packet from remote to dec_ap_bio again. 485 * receiver receive_sizes(), comes here, 486 * waits for ap_bio_cnt == 0. -> deadlock. 487 * but this cannot happen, actually, because: 488 * R_PRIMARY D_INCONSISTENT, and peer's disk is unreachable 489 * (not connected, or bad/no disk on peer): 490 * see drbd_fail_request_early, ap_bio_cnt is zero. 491 * R_PRIMARY D_INCONSISTENT, and C_SYNC_TARGET: 492 * peer may not initiate a resize. 493 */ 494 void drbd_suspend_io(struct drbd_conf *mdev) 495 { 496 set_bit(SUSPEND_IO, &mdev->flags); 497 wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt)); 498 } 499 500 void drbd_resume_io(struct drbd_conf *mdev) 501 { 502 clear_bit(SUSPEND_IO, &mdev->flags); 503 wake_up(&mdev->misc_wait); 504 } 505 506 /** 507 * drbd_determine_dev_size() - Sets the right device size obeying all constraints 508 * @mdev: DRBD device. 509 * 510 * Returns 0 on success, negative return values indicate errors. 511 * You should call drbd_md_sync() after calling this function. 512 */ 513 enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *mdev) __must_hold(local) 514 { 515 sector_t prev_first_sect, prev_size; /* previous meta location */ 516 sector_t la_size; 517 sector_t size; 518 char ppb[10]; 519 520 int md_moved, la_size_changed; 521 enum determine_dev_size rv = unchanged; 522 523 /* race: 524 * application request passes inc_ap_bio, 525 * but then cannot get an AL-reference. 526 * this function later may wait on ap_bio_cnt == 0. -> deadlock. 527 * 528 * to avoid that: 529 * Suspend IO right here. 530 * still lock the act_log to not trigger ASSERTs there. 531 */ 532 drbd_suspend_io(mdev); 533 534 /* no wait necessary anymore, actually we could assert that */ 535 wait_event(mdev->al_wait, lc_try_lock(mdev->act_log)); 536 537 prev_first_sect = drbd_md_first_sector(mdev->ldev); 538 prev_size = mdev->ldev->md.md_size_sect; 539 la_size = mdev->ldev->md.la_size_sect; 540 541 /* TODO: should only be some assert here, not (re)init... */ 542 drbd_md_set_sector_offsets(mdev, mdev->ldev); 543 544 size = drbd_new_dev_size(mdev, mdev->ldev); 545 546 if (drbd_get_capacity(mdev->this_bdev) != size || 547 drbd_bm_capacity(mdev) != size) { 548 int err; 549 err = drbd_bm_resize(mdev, size); 550 if (unlikely(err)) { 551 /* currently there is only one error: ENOMEM! */ 552 size = drbd_bm_capacity(mdev)>>1; 553 if (size == 0) { 554 dev_err(DEV, "OUT OF MEMORY! " 555 "Could not allocate bitmap!\n"); 556 } else { 557 dev_err(DEV, "BM resizing failed. " 558 "Leaving size unchanged at size = %lu KB\n", 559 (unsigned long)size); 560 } 561 rv = dev_size_error; 562 } 563 /* racy, see comments above. */ 564 drbd_set_my_capacity(mdev, size); 565 mdev->ldev->md.la_size_sect = size; 566 dev_info(DEV, "size = %s (%llu KB)\n", ppsize(ppb, size>>1), 567 (unsigned long long)size>>1); 568 } 569 if (rv == dev_size_error) 570 goto out; 571 572 la_size_changed = (la_size != mdev->ldev->md.la_size_sect); 573 574 md_moved = prev_first_sect != drbd_md_first_sector(mdev->ldev) 575 || prev_size != mdev->ldev->md.md_size_sect; 576 577 if (la_size_changed || md_moved) { 578 drbd_al_shrink(mdev); /* All extents inactive. */ 579 dev_info(DEV, "Writing the whole bitmap, %s\n", 580 la_size_changed && md_moved ? "size changed and md moved" : 581 la_size_changed ? "size changed" : "md moved"); 582 rv = drbd_bitmap_io(mdev, &drbd_bm_write, "size changed"); /* does drbd_resume_io() ! */ 583 drbd_md_mark_dirty(mdev); 584 } 585 586 if (size > la_size) 587 rv = grew; 588 if (size < la_size) 589 rv = shrunk; 590 out: 591 lc_unlock(mdev->act_log); 592 wake_up(&mdev->al_wait); 593 drbd_resume_io(mdev); 594 595 return rv; 596 } 597 598 sector_t 599 drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev) 600 { 601 sector_t p_size = mdev->p_size; /* partner's disk size. */ 602 sector_t la_size = bdev->md.la_size_sect; /* last agreed size. */ 603 sector_t m_size; /* my size */ 604 sector_t u_size = bdev->dc.disk_size; /* size requested by user. */ 605 sector_t size = 0; 606 607 m_size = drbd_get_max_capacity(bdev); 608 609 if (p_size && m_size) { 610 size = min_t(sector_t, p_size, m_size); 611 } else { 612 if (la_size) { 613 size = la_size; 614 if (m_size && m_size < size) 615 size = m_size; 616 if (p_size && p_size < size) 617 size = p_size; 618 } else { 619 if (m_size) 620 size = m_size; 621 if (p_size) 622 size = p_size; 623 } 624 } 625 626 if (size == 0) 627 dev_err(DEV, "Both nodes diskless!\n"); 628 629 if (u_size) { 630 if (u_size > size) 631 dev_err(DEV, "Requested disk size is too big (%lu > %lu)\n", 632 (unsigned long)u_size>>1, (unsigned long)size>>1); 633 else 634 size = u_size; 635 } 636 637 return size; 638 } 639 640 /** 641 * drbd_check_al_size() - Ensures that the AL is of the right size 642 * @mdev: DRBD device. 643 * 644 * Returns -EBUSY if current al lru is still used, -ENOMEM when allocation 645 * failed, and 0 on success. You should call drbd_md_sync() after you called 646 * this function. 647 */ 648 static int drbd_check_al_size(struct drbd_conf *mdev) 649 { 650 struct lru_cache *n, *t; 651 struct lc_element *e; 652 unsigned int in_use; 653 int i; 654 655 ERR_IF(mdev->sync_conf.al_extents < 7) 656 mdev->sync_conf.al_extents = 127; 657 658 if (mdev->act_log && 659 mdev->act_log->nr_elements == mdev->sync_conf.al_extents) 660 return 0; 661 662 in_use = 0; 663 t = mdev->act_log; 664 n = lc_create("act_log", drbd_al_ext_cache, 665 mdev->sync_conf.al_extents, sizeof(struct lc_element), 0); 666 667 if (n == NULL) { 668 dev_err(DEV, "Cannot allocate act_log lru!\n"); 669 return -ENOMEM; 670 } 671 spin_lock_irq(&mdev->al_lock); 672 if (t) { 673 for (i = 0; i < t->nr_elements; i++) { 674 e = lc_element_by_index(t, i); 675 if (e->refcnt) 676 dev_err(DEV, "refcnt(%d)==%d\n", 677 e->lc_number, e->refcnt); 678 in_use += e->refcnt; 679 } 680 } 681 if (!in_use) 682 mdev->act_log = n; 683 spin_unlock_irq(&mdev->al_lock); 684 if (in_use) { 685 dev_err(DEV, "Activity log still in use!\n"); 686 lc_destroy(n); 687 return -EBUSY; 688 } else { 689 if (t) 690 lc_destroy(t); 691 } 692 drbd_md_mark_dirty(mdev); /* we changed mdev->act_log->nr_elemens */ 693 return 0; 694 } 695 696 void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_seg_s) __must_hold(local) 697 { 698 struct request_queue * const q = mdev->rq_queue; 699 struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue; 700 int max_segments = mdev->ldev->dc.max_bio_bvecs; 701 702 if (b->merge_bvec_fn && !mdev->ldev->dc.use_bmbv) 703 max_seg_s = PAGE_SIZE; 704 705 max_seg_s = min(queue_max_sectors(b) * queue_logical_block_size(b), max_seg_s); 706 707 blk_queue_max_sectors(q, max_seg_s >> 9); 708 blk_queue_max_phys_segments(q, max_segments ? max_segments : MAX_PHYS_SEGMENTS); 709 blk_queue_max_hw_segments(q, max_segments ? max_segments : MAX_HW_SEGMENTS); 710 blk_queue_max_segment_size(q, max_seg_s); 711 blk_queue_logical_block_size(q, 512); 712 blk_queue_segment_boundary(q, PAGE_SIZE-1); 713 blk_stack_limits(&q->limits, &b->limits, 0); 714 715 if (b->merge_bvec_fn) 716 dev_warn(DEV, "Backing device's merge_bvec_fn() = %p\n", 717 b->merge_bvec_fn); 718 dev_info(DEV, "max_segment_size ( = BIO size ) = %u\n", queue_max_segment_size(q)); 719 720 if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) { 721 dev_info(DEV, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n", 722 q->backing_dev_info.ra_pages, 723 b->backing_dev_info.ra_pages); 724 q->backing_dev_info.ra_pages = b->backing_dev_info.ra_pages; 725 } 726 } 727 728 /* serialize deconfig (worker exiting, doing cleanup) 729 * and reconfig (drbdsetup disk, drbdsetup net) 730 * 731 * wait for a potentially exiting worker, then restart it, 732 * or start a new one. 733 */ 734 static void drbd_reconfig_start(struct drbd_conf *mdev) 735 { 736 wait_event(mdev->state_wait, !test_and_set_bit(CONFIG_PENDING, &mdev->flags)); 737 wait_event(mdev->state_wait, !test_bit(DEVICE_DYING, &mdev->flags)); 738 drbd_thread_start(&mdev->worker); 739 } 740 741 /* if still unconfigured, stops worker again. 742 * if configured now, clears CONFIG_PENDING. 743 * wakes potential waiters */ 744 static void drbd_reconfig_done(struct drbd_conf *mdev) 745 { 746 spin_lock_irq(&mdev->req_lock); 747 if (mdev->state.disk == D_DISKLESS && 748 mdev->state.conn == C_STANDALONE && 749 mdev->state.role == R_SECONDARY) { 750 set_bit(DEVICE_DYING, &mdev->flags); 751 drbd_thread_stop_nowait(&mdev->worker); 752 } else 753 clear_bit(CONFIG_PENDING, &mdev->flags); 754 spin_unlock_irq(&mdev->req_lock); 755 wake_up(&mdev->state_wait); 756 } 757 758 /* does always return 0; 759 * interesting return code is in reply->ret_code */ 760 static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 761 struct drbd_nl_cfg_reply *reply) 762 { 763 enum drbd_ret_codes retcode; 764 enum determine_dev_size dd; 765 sector_t max_possible_sectors; 766 sector_t min_md_device_sectors; 767 struct drbd_backing_dev *nbc = NULL; /* new_backing_conf */ 768 struct inode *inode, *inode2; 769 struct lru_cache *resync_lru = NULL; 770 union drbd_state ns, os; 771 int rv; 772 int cp_discovered = 0; 773 int logical_block_size; 774 775 drbd_reconfig_start(mdev); 776 777 /* if you want to reconfigure, please tear down first */ 778 if (mdev->state.disk > D_DISKLESS) { 779 retcode = ERR_DISK_CONFIGURED; 780 goto fail; 781 } 782 783 /* allocation not in the IO path, cqueue thread context */ 784 nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL); 785 if (!nbc) { 786 retcode = ERR_NOMEM; 787 goto fail; 788 } 789 790 nbc->dc.disk_size = DRBD_DISK_SIZE_SECT_DEF; 791 nbc->dc.on_io_error = DRBD_ON_IO_ERROR_DEF; 792 nbc->dc.fencing = DRBD_FENCING_DEF; 793 nbc->dc.max_bio_bvecs = DRBD_MAX_BIO_BVECS_DEF; 794 795 if (!disk_conf_from_tags(mdev, nlp->tag_list, &nbc->dc)) { 796 retcode = ERR_MANDATORY_TAG; 797 goto fail; 798 } 799 800 if (nbc->dc.meta_dev_idx < DRBD_MD_INDEX_FLEX_INT) { 801 retcode = ERR_MD_IDX_INVALID; 802 goto fail; 803 } 804 805 nbc->lo_file = filp_open(nbc->dc.backing_dev, O_RDWR, 0); 806 if (IS_ERR(nbc->lo_file)) { 807 dev_err(DEV, "open(\"%s\") failed with %ld\n", nbc->dc.backing_dev, 808 PTR_ERR(nbc->lo_file)); 809 nbc->lo_file = NULL; 810 retcode = ERR_OPEN_DISK; 811 goto fail; 812 } 813 814 inode = nbc->lo_file->f_dentry->d_inode; 815 816 if (!S_ISBLK(inode->i_mode)) { 817 retcode = ERR_DISK_NOT_BDEV; 818 goto fail; 819 } 820 821 nbc->md_file = filp_open(nbc->dc.meta_dev, O_RDWR, 0); 822 if (IS_ERR(nbc->md_file)) { 823 dev_err(DEV, "open(\"%s\") failed with %ld\n", nbc->dc.meta_dev, 824 PTR_ERR(nbc->md_file)); 825 nbc->md_file = NULL; 826 retcode = ERR_OPEN_MD_DISK; 827 goto fail; 828 } 829 830 inode2 = nbc->md_file->f_dentry->d_inode; 831 832 if (!S_ISBLK(inode2->i_mode)) { 833 retcode = ERR_MD_NOT_BDEV; 834 goto fail; 835 } 836 837 nbc->backing_bdev = inode->i_bdev; 838 if (bd_claim(nbc->backing_bdev, mdev)) { 839 printk(KERN_ERR "drbd: bd_claim(%p,%p); failed [%p;%p;%u]\n", 840 nbc->backing_bdev, mdev, 841 nbc->backing_bdev->bd_holder, 842 nbc->backing_bdev->bd_contains->bd_holder, 843 nbc->backing_bdev->bd_holders); 844 retcode = ERR_BDCLAIM_DISK; 845 goto fail; 846 } 847 848 resync_lru = lc_create("resync", drbd_bm_ext_cache, 849 61, sizeof(struct bm_extent), 850 offsetof(struct bm_extent, lce)); 851 if (!resync_lru) { 852 retcode = ERR_NOMEM; 853 goto release_bdev_fail; 854 } 855 856 /* meta_dev_idx >= 0: external fixed size, 857 * possibly multiple drbd sharing one meta device. 858 * TODO in that case, paranoia check that [md_bdev, meta_dev_idx] is 859 * not yet used by some other drbd minor! 860 * (if you use drbd.conf + drbdadm, 861 * that should check it for you already; but if you don't, or someone 862 * fooled it, we need to double check here) */ 863 nbc->md_bdev = inode2->i_bdev; 864 if (bd_claim(nbc->md_bdev, (nbc->dc.meta_dev_idx < 0) ? (void *)mdev 865 : (void *) drbd_m_holder)) { 866 retcode = ERR_BDCLAIM_MD_DISK; 867 goto release_bdev_fail; 868 } 869 870 if ((nbc->backing_bdev == nbc->md_bdev) != 871 (nbc->dc.meta_dev_idx == DRBD_MD_INDEX_INTERNAL || 872 nbc->dc.meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)) { 873 retcode = ERR_MD_IDX_INVALID; 874 goto release_bdev2_fail; 875 } 876 877 /* RT - for drbd_get_max_capacity() DRBD_MD_INDEX_FLEX_INT */ 878 drbd_md_set_sector_offsets(mdev, nbc); 879 880 if (drbd_get_max_capacity(nbc) < nbc->dc.disk_size) { 881 dev_err(DEV, "max capacity %llu smaller than disk size %llu\n", 882 (unsigned long long) drbd_get_max_capacity(nbc), 883 (unsigned long long) nbc->dc.disk_size); 884 retcode = ERR_DISK_TO_SMALL; 885 goto release_bdev2_fail; 886 } 887 888 if (nbc->dc.meta_dev_idx < 0) { 889 max_possible_sectors = DRBD_MAX_SECTORS_FLEX; 890 /* at least one MB, otherwise it does not make sense */ 891 min_md_device_sectors = (2<<10); 892 } else { 893 max_possible_sectors = DRBD_MAX_SECTORS; 894 min_md_device_sectors = MD_RESERVED_SECT * (nbc->dc.meta_dev_idx + 1); 895 } 896 897 if (drbd_get_capacity(nbc->md_bdev) < min_md_device_sectors) { 898 retcode = ERR_MD_DISK_TO_SMALL; 899 dev_warn(DEV, "refusing attach: md-device too small, " 900 "at least %llu sectors needed for this meta-disk type\n", 901 (unsigned long long) min_md_device_sectors); 902 goto release_bdev2_fail; 903 } 904 905 /* Make sure the new disk is big enough 906 * (we may currently be R_PRIMARY with no local disk...) */ 907 if (drbd_get_max_capacity(nbc) < 908 drbd_get_capacity(mdev->this_bdev)) { 909 retcode = ERR_DISK_TO_SMALL; 910 goto release_bdev2_fail; 911 } 912 913 nbc->known_size = drbd_get_capacity(nbc->backing_bdev); 914 915 if (nbc->known_size > max_possible_sectors) { 916 dev_warn(DEV, "==> truncating very big lower level device " 917 "to currently maximum possible %llu sectors <==\n", 918 (unsigned long long) max_possible_sectors); 919 if (nbc->dc.meta_dev_idx >= 0) 920 dev_warn(DEV, "==>> using internal or flexible " 921 "meta data may help <<==\n"); 922 } 923 924 drbd_suspend_io(mdev); 925 /* also wait for the last barrier ack. */ 926 wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_pending_cnt)); 927 /* and for any other previously queued work */ 928 drbd_flush_workqueue(mdev); 929 930 retcode = _drbd_request_state(mdev, NS(disk, D_ATTACHING), CS_VERBOSE); 931 drbd_resume_io(mdev); 932 if (retcode < SS_SUCCESS) 933 goto release_bdev2_fail; 934 935 if (!get_ldev_if_state(mdev, D_ATTACHING)) 936 goto force_diskless; 937 938 drbd_md_set_sector_offsets(mdev, nbc); 939 940 if (!mdev->bitmap) { 941 if (drbd_bm_init(mdev)) { 942 retcode = ERR_NOMEM; 943 goto force_diskless_dec; 944 } 945 } 946 947 retcode = drbd_md_read(mdev, nbc); 948 if (retcode != NO_ERROR) 949 goto force_diskless_dec; 950 951 if (mdev->state.conn < C_CONNECTED && 952 mdev->state.role == R_PRIMARY && 953 (mdev->ed_uuid & ~((u64)1)) != (nbc->md.uuid[UI_CURRENT] & ~((u64)1))) { 954 dev_err(DEV, "Can only attach to data with current UUID=%016llX\n", 955 (unsigned long long)mdev->ed_uuid); 956 retcode = ERR_DATA_NOT_CURRENT; 957 goto force_diskless_dec; 958 } 959 960 /* Since we are diskless, fix the activity log first... */ 961 if (drbd_check_al_size(mdev)) { 962 retcode = ERR_NOMEM; 963 goto force_diskless_dec; 964 } 965 966 /* Prevent shrinking of consistent devices ! */ 967 if (drbd_md_test_flag(nbc, MDF_CONSISTENT) && 968 drbd_new_dev_size(mdev, nbc) < nbc->md.la_size_sect) { 969 dev_warn(DEV, "refusing to truncate a consistent device\n"); 970 retcode = ERR_DISK_TO_SMALL; 971 goto force_diskless_dec; 972 } 973 974 if (!drbd_al_read_log(mdev, nbc)) { 975 retcode = ERR_IO_MD_DISK; 976 goto force_diskless_dec; 977 } 978 979 /* allocate a second IO page if logical_block_size != 512 */ 980 logical_block_size = bdev_logical_block_size(nbc->md_bdev); 981 if (logical_block_size == 0) 982 logical_block_size = MD_SECTOR_SIZE; 983 984 if (logical_block_size != MD_SECTOR_SIZE) { 985 if (!mdev->md_io_tmpp) { 986 struct page *page = alloc_page(GFP_NOIO); 987 if (!page) 988 goto force_diskless_dec; 989 990 dev_warn(DEV, "Meta data's bdev logical_block_size = %d != %d\n", 991 logical_block_size, MD_SECTOR_SIZE); 992 dev_warn(DEV, "Workaround engaged (has performance impact).\n"); 993 994 mdev->md_io_tmpp = page; 995 } 996 } 997 998 /* Reset the "barriers don't work" bits here, then force meta data to 999 * be written, to ensure we determine if barriers are supported. */ 1000 if (nbc->dc.no_md_flush) 1001 set_bit(MD_NO_BARRIER, &mdev->flags); 1002 else 1003 clear_bit(MD_NO_BARRIER, &mdev->flags); 1004 1005 /* Point of no return reached. 1006 * Devices and memory are no longer released by error cleanup below. 1007 * now mdev takes over responsibility, and the state engine should 1008 * clean it up somewhere. */ 1009 D_ASSERT(mdev->ldev == NULL); 1010 mdev->ldev = nbc; 1011 mdev->resync = resync_lru; 1012 nbc = NULL; 1013 resync_lru = NULL; 1014 1015 mdev->write_ordering = WO_bio_barrier; 1016 drbd_bump_write_ordering(mdev, WO_bio_barrier); 1017 1018 if (drbd_md_test_flag(mdev->ldev, MDF_CRASHED_PRIMARY)) 1019 set_bit(CRASHED_PRIMARY, &mdev->flags); 1020 else 1021 clear_bit(CRASHED_PRIMARY, &mdev->flags); 1022 1023 if (drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND)) { 1024 set_bit(CRASHED_PRIMARY, &mdev->flags); 1025 cp_discovered = 1; 1026 } 1027 1028 mdev->send_cnt = 0; 1029 mdev->recv_cnt = 0; 1030 mdev->read_cnt = 0; 1031 mdev->writ_cnt = 0; 1032 1033 drbd_setup_queue_param(mdev, DRBD_MAX_SEGMENT_SIZE); 1034 1035 /* If I am currently not R_PRIMARY, 1036 * but meta data primary indicator is set, 1037 * I just now recover from a hard crash, 1038 * and have been R_PRIMARY before that crash. 1039 * 1040 * Now, if I had no connection before that crash 1041 * (have been degraded R_PRIMARY), chances are that 1042 * I won't find my peer now either. 1043 * 1044 * In that case, and _only_ in that case, 1045 * we use the degr-wfc-timeout instead of the default, 1046 * so we can automatically recover from a crash of a 1047 * degraded but active "cluster" after a certain timeout. 1048 */ 1049 clear_bit(USE_DEGR_WFC_T, &mdev->flags); 1050 if (mdev->state.role != R_PRIMARY && 1051 drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) && 1052 !drbd_md_test_flag(mdev->ldev, MDF_CONNECTED_IND)) 1053 set_bit(USE_DEGR_WFC_T, &mdev->flags); 1054 1055 dd = drbd_determin_dev_size(mdev); 1056 if (dd == dev_size_error) { 1057 retcode = ERR_NOMEM_BITMAP; 1058 goto force_diskless_dec; 1059 } else if (dd == grew) 1060 set_bit(RESYNC_AFTER_NEG, &mdev->flags); 1061 1062 if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) { 1063 dev_info(DEV, "Assuming that all blocks are out of sync " 1064 "(aka FullSync)\n"); 1065 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from attaching")) { 1066 retcode = ERR_IO_MD_DISK; 1067 goto force_diskless_dec; 1068 } 1069 } else { 1070 if (drbd_bitmap_io(mdev, &drbd_bm_read, "read from attaching") < 0) { 1071 retcode = ERR_IO_MD_DISK; 1072 goto force_diskless_dec; 1073 } 1074 } 1075 1076 if (cp_discovered) { 1077 drbd_al_apply_to_bm(mdev); 1078 drbd_al_to_on_disk_bm(mdev); 1079 } 1080 1081 spin_lock_irq(&mdev->req_lock); 1082 os = mdev->state; 1083 ns.i = os.i; 1084 /* If MDF_CONSISTENT is not set go into inconsistent state, 1085 otherwise investigate MDF_WasUpToDate... 1086 If MDF_WAS_UP_TO_DATE is not set go into D_OUTDATED disk state, 1087 otherwise into D_CONSISTENT state. 1088 */ 1089 if (drbd_md_test_flag(mdev->ldev, MDF_CONSISTENT)) { 1090 if (drbd_md_test_flag(mdev->ldev, MDF_WAS_UP_TO_DATE)) 1091 ns.disk = D_CONSISTENT; 1092 else 1093 ns.disk = D_OUTDATED; 1094 } else { 1095 ns.disk = D_INCONSISTENT; 1096 } 1097 1098 if (drbd_md_test_flag(mdev->ldev, MDF_PEER_OUT_DATED)) 1099 ns.pdsk = D_OUTDATED; 1100 1101 if ( ns.disk == D_CONSISTENT && 1102 (ns.pdsk == D_OUTDATED || mdev->ldev->dc.fencing == FP_DONT_CARE)) 1103 ns.disk = D_UP_TO_DATE; 1104 1105 /* All tests on MDF_PRIMARY_IND, MDF_CONNECTED_IND, 1106 MDF_CONSISTENT and MDF_WAS_UP_TO_DATE must happen before 1107 this point, because drbd_request_state() modifies these 1108 flags. */ 1109 1110 /* In case we are C_CONNECTED postpone any decision on the new disk 1111 state after the negotiation phase. */ 1112 if (mdev->state.conn == C_CONNECTED) { 1113 mdev->new_state_tmp.i = ns.i; 1114 ns.i = os.i; 1115 ns.disk = D_NEGOTIATING; 1116 } 1117 1118 rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL); 1119 ns = mdev->state; 1120 spin_unlock_irq(&mdev->req_lock); 1121 1122 if (rv < SS_SUCCESS) 1123 goto force_diskless_dec; 1124 1125 if (mdev->state.role == R_PRIMARY) 1126 mdev->ldev->md.uuid[UI_CURRENT] |= (u64)1; 1127 else 1128 mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1; 1129 1130 drbd_md_mark_dirty(mdev); 1131 drbd_md_sync(mdev); 1132 1133 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE); 1134 put_ldev(mdev); 1135 reply->ret_code = retcode; 1136 drbd_reconfig_done(mdev); 1137 return 0; 1138 1139 force_diskless_dec: 1140 put_ldev(mdev); 1141 force_diskless: 1142 drbd_force_state(mdev, NS(disk, D_DISKLESS)); 1143 drbd_md_sync(mdev); 1144 release_bdev2_fail: 1145 if (nbc) 1146 bd_release(nbc->md_bdev); 1147 release_bdev_fail: 1148 if (nbc) 1149 bd_release(nbc->backing_bdev); 1150 fail: 1151 if (nbc) { 1152 if (nbc->lo_file) 1153 fput(nbc->lo_file); 1154 if (nbc->md_file) 1155 fput(nbc->md_file); 1156 kfree(nbc); 1157 } 1158 lc_destroy(resync_lru); 1159 1160 reply->ret_code = retcode; 1161 drbd_reconfig_done(mdev); 1162 return 0; 1163 } 1164 1165 static int drbd_nl_detach(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1166 struct drbd_nl_cfg_reply *reply) 1167 { 1168 reply->ret_code = drbd_request_state(mdev, NS(disk, D_DISKLESS)); 1169 return 0; 1170 } 1171 1172 static int drbd_nl_net_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1173 struct drbd_nl_cfg_reply *reply) 1174 { 1175 int i, ns; 1176 enum drbd_ret_codes retcode; 1177 struct net_conf *new_conf = NULL; 1178 struct crypto_hash *tfm = NULL; 1179 struct crypto_hash *integrity_w_tfm = NULL; 1180 struct crypto_hash *integrity_r_tfm = NULL; 1181 struct hlist_head *new_tl_hash = NULL; 1182 struct hlist_head *new_ee_hash = NULL; 1183 struct drbd_conf *odev; 1184 char hmac_name[CRYPTO_MAX_ALG_NAME]; 1185 void *int_dig_out = NULL; 1186 void *int_dig_in = NULL; 1187 void *int_dig_vv = NULL; 1188 struct sockaddr *new_my_addr, *new_peer_addr, *taken_addr; 1189 1190 drbd_reconfig_start(mdev); 1191 1192 if (mdev->state.conn > C_STANDALONE) { 1193 retcode = ERR_NET_CONFIGURED; 1194 goto fail; 1195 } 1196 1197 /* allocation not in the IO path, cqueue thread context */ 1198 new_conf = kmalloc(sizeof(struct net_conf), GFP_KERNEL); 1199 if (!new_conf) { 1200 retcode = ERR_NOMEM; 1201 goto fail; 1202 } 1203 1204 memset(new_conf, 0, sizeof(struct net_conf)); 1205 new_conf->timeout = DRBD_TIMEOUT_DEF; 1206 new_conf->try_connect_int = DRBD_CONNECT_INT_DEF; 1207 new_conf->ping_int = DRBD_PING_INT_DEF; 1208 new_conf->max_epoch_size = DRBD_MAX_EPOCH_SIZE_DEF; 1209 new_conf->max_buffers = DRBD_MAX_BUFFERS_DEF; 1210 new_conf->unplug_watermark = DRBD_UNPLUG_WATERMARK_DEF; 1211 new_conf->sndbuf_size = DRBD_SNDBUF_SIZE_DEF; 1212 new_conf->rcvbuf_size = DRBD_RCVBUF_SIZE_DEF; 1213 new_conf->ko_count = DRBD_KO_COUNT_DEF; 1214 new_conf->after_sb_0p = DRBD_AFTER_SB_0P_DEF; 1215 new_conf->after_sb_1p = DRBD_AFTER_SB_1P_DEF; 1216 new_conf->after_sb_2p = DRBD_AFTER_SB_2P_DEF; 1217 new_conf->want_lose = 0; 1218 new_conf->two_primaries = 0; 1219 new_conf->wire_protocol = DRBD_PROT_C; 1220 new_conf->ping_timeo = DRBD_PING_TIMEO_DEF; 1221 new_conf->rr_conflict = DRBD_RR_CONFLICT_DEF; 1222 1223 if (!net_conf_from_tags(mdev, nlp->tag_list, new_conf)) { 1224 retcode = ERR_MANDATORY_TAG; 1225 goto fail; 1226 } 1227 1228 if (new_conf->two_primaries 1229 && (new_conf->wire_protocol != DRBD_PROT_C)) { 1230 retcode = ERR_NOT_PROTO_C; 1231 goto fail; 1232 }; 1233 1234 if (mdev->state.role == R_PRIMARY && new_conf->want_lose) { 1235 retcode = ERR_DISCARD; 1236 goto fail; 1237 } 1238 1239 retcode = NO_ERROR; 1240 1241 new_my_addr = (struct sockaddr *)&new_conf->my_addr; 1242 new_peer_addr = (struct sockaddr *)&new_conf->peer_addr; 1243 for (i = 0; i < minor_count; i++) { 1244 odev = minor_to_mdev(i); 1245 if (!odev || odev == mdev) 1246 continue; 1247 if (get_net_conf(odev)) { 1248 taken_addr = (struct sockaddr *)&odev->net_conf->my_addr; 1249 if (new_conf->my_addr_len == odev->net_conf->my_addr_len && 1250 !memcmp(new_my_addr, taken_addr, new_conf->my_addr_len)) 1251 retcode = ERR_LOCAL_ADDR; 1252 1253 taken_addr = (struct sockaddr *)&odev->net_conf->peer_addr; 1254 if (new_conf->peer_addr_len == odev->net_conf->peer_addr_len && 1255 !memcmp(new_peer_addr, taken_addr, new_conf->peer_addr_len)) 1256 retcode = ERR_PEER_ADDR; 1257 1258 put_net_conf(odev); 1259 if (retcode != NO_ERROR) 1260 goto fail; 1261 } 1262 } 1263 1264 if (new_conf->cram_hmac_alg[0] != 0) { 1265 snprintf(hmac_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)", 1266 new_conf->cram_hmac_alg); 1267 tfm = crypto_alloc_hash(hmac_name, 0, CRYPTO_ALG_ASYNC); 1268 if (IS_ERR(tfm)) { 1269 tfm = NULL; 1270 retcode = ERR_AUTH_ALG; 1271 goto fail; 1272 } 1273 1274 if (crypto_tfm_alg_type(crypto_hash_tfm(tfm)) != CRYPTO_ALG_TYPE_SHASH) { 1275 retcode = ERR_AUTH_ALG_ND; 1276 goto fail; 1277 } 1278 } 1279 1280 if (new_conf->integrity_alg[0]) { 1281 integrity_w_tfm = crypto_alloc_hash(new_conf->integrity_alg, 0, CRYPTO_ALG_ASYNC); 1282 if (IS_ERR(integrity_w_tfm)) { 1283 integrity_w_tfm = NULL; 1284 retcode=ERR_INTEGRITY_ALG; 1285 goto fail; 1286 } 1287 1288 if (!drbd_crypto_is_hash(crypto_hash_tfm(integrity_w_tfm))) { 1289 retcode=ERR_INTEGRITY_ALG_ND; 1290 goto fail; 1291 } 1292 1293 integrity_r_tfm = crypto_alloc_hash(new_conf->integrity_alg, 0, CRYPTO_ALG_ASYNC); 1294 if (IS_ERR(integrity_r_tfm)) { 1295 integrity_r_tfm = NULL; 1296 retcode=ERR_INTEGRITY_ALG; 1297 goto fail; 1298 } 1299 } 1300 1301 ns = new_conf->max_epoch_size/8; 1302 if (mdev->tl_hash_s != ns) { 1303 new_tl_hash = kzalloc(ns*sizeof(void *), GFP_KERNEL); 1304 if (!new_tl_hash) { 1305 retcode = ERR_NOMEM; 1306 goto fail; 1307 } 1308 } 1309 1310 ns = new_conf->max_buffers/8; 1311 if (new_conf->two_primaries && (mdev->ee_hash_s != ns)) { 1312 new_ee_hash = kzalloc(ns*sizeof(void *), GFP_KERNEL); 1313 if (!new_ee_hash) { 1314 retcode = ERR_NOMEM; 1315 goto fail; 1316 } 1317 } 1318 1319 ((char *)new_conf->shared_secret)[SHARED_SECRET_MAX-1] = 0; 1320 1321 if (integrity_w_tfm) { 1322 i = crypto_hash_digestsize(integrity_w_tfm); 1323 int_dig_out = kmalloc(i, GFP_KERNEL); 1324 if (!int_dig_out) { 1325 retcode = ERR_NOMEM; 1326 goto fail; 1327 } 1328 int_dig_in = kmalloc(i, GFP_KERNEL); 1329 if (!int_dig_in) { 1330 retcode = ERR_NOMEM; 1331 goto fail; 1332 } 1333 int_dig_vv = kmalloc(i, GFP_KERNEL); 1334 if (!int_dig_vv) { 1335 retcode = ERR_NOMEM; 1336 goto fail; 1337 } 1338 } 1339 1340 if (!mdev->bitmap) { 1341 if(drbd_bm_init(mdev)) { 1342 retcode = ERR_NOMEM; 1343 goto fail; 1344 } 1345 } 1346 1347 spin_lock_irq(&mdev->req_lock); 1348 if (mdev->net_conf != NULL) { 1349 retcode = ERR_NET_CONFIGURED; 1350 spin_unlock_irq(&mdev->req_lock); 1351 goto fail; 1352 } 1353 mdev->net_conf = new_conf; 1354 1355 mdev->send_cnt = 0; 1356 mdev->recv_cnt = 0; 1357 1358 if (new_tl_hash) { 1359 kfree(mdev->tl_hash); 1360 mdev->tl_hash_s = mdev->net_conf->max_epoch_size/8; 1361 mdev->tl_hash = new_tl_hash; 1362 } 1363 1364 if (new_ee_hash) { 1365 kfree(mdev->ee_hash); 1366 mdev->ee_hash_s = mdev->net_conf->max_buffers/8; 1367 mdev->ee_hash = new_ee_hash; 1368 } 1369 1370 crypto_free_hash(mdev->cram_hmac_tfm); 1371 mdev->cram_hmac_tfm = tfm; 1372 1373 crypto_free_hash(mdev->integrity_w_tfm); 1374 mdev->integrity_w_tfm = integrity_w_tfm; 1375 1376 crypto_free_hash(mdev->integrity_r_tfm); 1377 mdev->integrity_r_tfm = integrity_r_tfm; 1378 1379 kfree(mdev->int_dig_out); 1380 kfree(mdev->int_dig_in); 1381 kfree(mdev->int_dig_vv); 1382 mdev->int_dig_out=int_dig_out; 1383 mdev->int_dig_in=int_dig_in; 1384 mdev->int_dig_vv=int_dig_vv; 1385 spin_unlock_irq(&mdev->req_lock); 1386 1387 retcode = _drbd_request_state(mdev, NS(conn, C_UNCONNECTED), CS_VERBOSE); 1388 1389 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE); 1390 reply->ret_code = retcode; 1391 drbd_reconfig_done(mdev); 1392 return 0; 1393 1394 fail: 1395 kfree(int_dig_out); 1396 kfree(int_dig_in); 1397 kfree(int_dig_vv); 1398 crypto_free_hash(tfm); 1399 crypto_free_hash(integrity_w_tfm); 1400 crypto_free_hash(integrity_r_tfm); 1401 kfree(new_tl_hash); 1402 kfree(new_ee_hash); 1403 kfree(new_conf); 1404 1405 reply->ret_code = retcode; 1406 drbd_reconfig_done(mdev); 1407 return 0; 1408 } 1409 1410 static int drbd_nl_disconnect(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1411 struct drbd_nl_cfg_reply *reply) 1412 { 1413 int retcode; 1414 1415 retcode = _drbd_request_state(mdev, NS(conn, C_DISCONNECTING), CS_ORDERED); 1416 1417 if (retcode == SS_NOTHING_TO_DO) 1418 goto done; 1419 else if (retcode == SS_ALREADY_STANDALONE) 1420 goto done; 1421 else if (retcode == SS_PRIMARY_NOP) { 1422 /* Our statche checking code wants to see the peer outdated. */ 1423 retcode = drbd_request_state(mdev, NS2(conn, C_DISCONNECTING, 1424 pdsk, D_OUTDATED)); 1425 } else if (retcode == SS_CW_FAILED_BY_PEER) { 1426 /* The peer probably wants to see us outdated. */ 1427 retcode = _drbd_request_state(mdev, NS2(conn, C_DISCONNECTING, 1428 disk, D_OUTDATED), 1429 CS_ORDERED); 1430 if (retcode == SS_IS_DISKLESS || retcode == SS_LOWER_THAN_OUTDATED) { 1431 drbd_force_state(mdev, NS(conn, C_DISCONNECTING)); 1432 retcode = SS_SUCCESS; 1433 } 1434 } 1435 1436 if (retcode < SS_SUCCESS) 1437 goto fail; 1438 1439 if (wait_event_interruptible(mdev->state_wait, 1440 mdev->state.conn != C_DISCONNECTING)) { 1441 /* Do not test for mdev->state.conn == C_STANDALONE, since 1442 someone else might connect us in the mean time! */ 1443 retcode = ERR_INTR; 1444 goto fail; 1445 } 1446 1447 done: 1448 retcode = NO_ERROR; 1449 fail: 1450 drbd_md_sync(mdev); 1451 reply->ret_code = retcode; 1452 return 0; 1453 } 1454 1455 void resync_after_online_grow(struct drbd_conf *mdev) 1456 { 1457 int iass; /* I am sync source */ 1458 1459 dev_info(DEV, "Resync of new storage after online grow\n"); 1460 if (mdev->state.role != mdev->state.peer) 1461 iass = (mdev->state.role == R_PRIMARY); 1462 else 1463 iass = test_bit(DISCARD_CONCURRENT, &mdev->flags); 1464 1465 if (iass) 1466 drbd_start_resync(mdev, C_SYNC_SOURCE); 1467 else 1468 _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE + CS_SERIALIZE); 1469 } 1470 1471 static int drbd_nl_resize(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1472 struct drbd_nl_cfg_reply *reply) 1473 { 1474 struct resize rs; 1475 int retcode = NO_ERROR; 1476 int ldsc = 0; /* local disk size changed */ 1477 enum determine_dev_size dd; 1478 1479 memset(&rs, 0, sizeof(struct resize)); 1480 if (!resize_from_tags(mdev, nlp->tag_list, &rs)) { 1481 retcode = ERR_MANDATORY_TAG; 1482 goto fail; 1483 } 1484 1485 if (mdev->state.conn > C_CONNECTED) { 1486 retcode = ERR_RESIZE_RESYNC; 1487 goto fail; 1488 } 1489 1490 if (mdev->state.role == R_SECONDARY && 1491 mdev->state.peer == R_SECONDARY) { 1492 retcode = ERR_NO_PRIMARY; 1493 goto fail; 1494 } 1495 1496 if (!get_ldev(mdev)) { 1497 retcode = ERR_NO_DISK; 1498 goto fail; 1499 } 1500 1501 if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev)) { 1502 mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev); 1503 ldsc = 1; 1504 } 1505 1506 mdev->ldev->dc.disk_size = (sector_t)rs.resize_size; 1507 dd = drbd_determin_dev_size(mdev); 1508 drbd_md_sync(mdev); 1509 put_ldev(mdev); 1510 if (dd == dev_size_error) { 1511 retcode = ERR_NOMEM_BITMAP; 1512 goto fail; 1513 } 1514 1515 if (mdev->state.conn == C_CONNECTED && (dd != unchanged || ldsc)) { 1516 if (dd == grew) 1517 set_bit(RESIZE_PENDING, &mdev->flags); 1518 1519 drbd_send_uuids(mdev); 1520 drbd_send_sizes(mdev, 1); 1521 } 1522 1523 fail: 1524 reply->ret_code = retcode; 1525 return 0; 1526 } 1527 1528 static int drbd_nl_syncer_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1529 struct drbd_nl_cfg_reply *reply) 1530 { 1531 int retcode = NO_ERROR; 1532 int err; 1533 int ovr; /* online verify running */ 1534 int rsr; /* re-sync running */ 1535 struct crypto_hash *verify_tfm = NULL; 1536 struct crypto_hash *csums_tfm = NULL; 1537 struct syncer_conf sc; 1538 cpumask_var_t new_cpu_mask; 1539 1540 if (!zalloc_cpumask_var(&new_cpu_mask, GFP_KERNEL)) { 1541 retcode = ERR_NOMEM; 1542 goto fail; 1543 } 1544 1545 if (nlp->flags & DRBD_NL_SET_DEFAULTS) { 1546 memset(&sc, 0, sizeof(struct syncer_conf)); 1547 sc.rate = DRBD_RATE_DEF; 1548 sc.after = DRBD_AFTER_DEF; 1549 sc.al_extents = DRBD_AL_EXTENTS_DEF; 1550 } else 1551 memcpy(&sc, &mdev->sync_conf, sizeof(struct syncer_conf)); 1552 1553 if (!syncer_conf_from_tags(mdev, nlp->tag_list, &sc)) { 1554 retcode = ERR_MANDATORY_TAG; 1555 goto fail; 1556 } 1557 1558 /* re-sync running */ 1559 rsr = ( mdev->state.conn == C_SYNC_SOURCE || 1560 mdev->state.conn == C_SYNC_TARGET || 1561 mdev->state.conn == C_PAUSED_SYNC_S || 1562 mdev->state.conn == C_PAUSED_SYNC_T ); 1563 1564 if (rsr && strcmp(sc.csums_alg, mdev->sync_conf.csums_alg)) { 1565 retcode = ERR_CSUMS_RESYNC_RUNNING; 1566 goto fail; 1567 } 1568 1569 if (!rsr && sc.csums_alg[0]) { 1570 csums_tfm = crypto_alloc_hash(sc.csums_alg, 0, CRYPTO_ALG_ASYNC); 1571 if (IS_ERR(csums_tfm)) { 1572 csums_tfm = NULL; 1573 retcode = ERR_CSUMS_ALG; 1574 goto fail; 1575 } 1576 1577 if (!drbd_crypto_is_hash(crypto_hash_tfm(csums_tfm))) { 1578 retcode = ERR_CSUMS_ALG_ND; 1579 goto fail; 1580 } 1581 } 1582 1583 /* online verify running */ 1584 ovr = (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T); 1585 1586 if (ovr) { 1587 if (strcmp(sc.verify_alg, mdev->sync_conf.verify_alg)) { 1588 retcode = ERR_VERIFY_RUNNING; 1589 goto fail; 1590 } 1591 } 1592 1593 if (!ovr && sc.verify_alg[0]) { 1594 verify_tfm = crypto_alloc_hash(sc.verify_alg, 0, CRYPTO_ALG_ASYNC); 1595 if (IS_ERR(verify_tfm)) { 1596 verify_tfm = NULL; 1597 retcode = ERR_VERIFY_ALG; 1598 goto fail; 1599 } 1600 1601 if (!drbd_crypto_is_hash(crypto_hash_tfm(verify_tfm))) { 1602 retcode = ERR_VERIFY_ALG_ND; 1603 goto fail; 1604 } 1605 } 1606 1607 /* silently ignore cpu mask on UP kernel */ 1608 if (nr_cpu_ids > 1 && sc.cpu_mask[0] != 0) { 1609 err = __bitmap_parse(sc.cpu_mask, 32, 0, 1610 cpumask_bits(new_cpu_mask), nr_cpu_ids); 1611 if (err) { 1612 dev_warn(DEV, "__bitmap_parse() failed with %d\n", err); 1613 retcode = ERR_CPU_MASK_PARSE; 1614 goto fail; 1615 } 1616 } 1617 1618 ERR_IF (sc.rate < 1) sc.rate = 1; 1619 ERR_IF (sc.al_extents < 7) sc.al_extents = 127; /* arbitrary minimum */ 1620 #define AL_MAX ((MD_AL_MAX_SIZE-1) * AL_EXTENTS_PT) 1621 if (sc.al_extents > AL_MAX) { 1622 dev_err(DEV, "sc.al_extents > %d\n", AL_MAX); 1623 sc.al_extents = AL_MAX; 1624 } 1625 #undef AL_MAX 1626 1627 /* most sanity checks done, try to assign the new sync-after 1628 * dependency. need to hold the global lock in there, 1629 * to avoid a race in the dependency loop check. */ 1630 retcode = drbd_alter_sa(mdev, sc.after); 1631 if (retcode != NO_ERROR) 1632 goto fail; 1633 1634 /* ok, assign the rest of it as well. 1635 * lock against receive_SyncParam() */ 1636 spin_lock(&mdev->peer_seq_lock); 1637 mdev->sync_conf = sc; 1638 1639 if (!rsr) { 1640 crypto_free_hash(mdev->csums_tfm); 1641 mdev->csums_tfm = csums_tfm; 1642 csums_tfm = NULL; 1643 } 1644 1645 if (!ovr) { 1646 crypto_free_hash(mdev->verify_tfm); 1647 mdev->verify_tfm = verify_tfm; 1648 verify_tfm = NULL; 1649 } 1650 spin_unlock(&mdev->peer_seq_lock); 1651 1652 if (get_ldev(mdev)) { 1653 wait_event(mdev->al_wait, lc_try_lock(mdev->act_log)); 1654 drbd_al_shrink(mdev); 1655 err = drbd_check_al_size(mdev); 1656 lc_unlock(mdev->act_log); 1657 wake_up(&mdev->al_wait); 1658 1659 put_ldev(mdev); 1660 drbd_md_sync(mdev); 1661 1662 if (err) { 1663 retcode = ERR_NOMEM; 1664 goto fail; 1665 } 1666 } 1667 1668 if (mdev->state.conn >= C_CONNECTED) 1669 drbd_send_sync_param(mdev, &sc); 1670 1671 if (!cpumask_equal(mdev->cpu_mask, new_cpu_mask)) { 1672 cpumask_copy(mdev->cpu_mask, new_cpu_mask); 1673 drbd_calc_cpu_mask(mdev); 1674 mdev->receiver.reset_cpu_mask = 1; 1675 mdev->asender.reset_cpu_mask = 1; 1676 mdev->worker.reset_cpu_mask = 1; 1677 } 1678 1679 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE); 1680 fail: 1681 free_cpumask_var(new_cpu_mask); 1682 crypto_free_hash(csums_tfm); 1683 crypto_free_hash(verify_tfm); 1684 reply->ret_code = retcode; 1685 return 0; 1686 } 1687 1688 static int drbd_nl_invalidate(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1689 struct drbd_nl_cfg_reply *reply) 1690 { 1691 int retcode; 1692 1693 retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T), CS_ORDERED); 1694 1695 if (retcode < SS_SUCCESS && retcode != SS_NEED_CONNECTION) 1696 retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T)); 1697 1698 while (retcode == SS_NEED_CONNECTION) { 1699 spin_lock_irq(&mdev->req_lock); 1700 if (mdev->state.conn < C_CONNECTED) 1701 retcode = _drbd_set_state(_NS(mdev, disk, D_INCONSISTENT), CS_VERBOSE, NULL); 1702 spin_unlock_irq(&mdev->req_lock); 1703 1704 if (retcode != SS_NEED_CONNECTION) 1705 break; 1706 1707 retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T)); 1708 } 1709 1710 reply->ret_code = retcode; 1711 return 0; 1712 } 1713 1714 static int drbd_nl_invalidate_peer(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1715 struct drbd_nl_cfg_reply *reply) 1716 { 1717 1718 reply->ret_code = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S)); 1719 1720 return 0; 1721 } 1722 1723 static int drbd_nl_pause_sync(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1724 struct drbd_nl_cfg_reply *reply) 1725 { 1726 int retcode = NO_ERROR; 1727 1728 if (drbd_request_state(mdev, NS(user_isp, 1)) == SS_NOTHING_TO_DO) 1729 retcode = ERR_PAUSE_IS_SET; 1730 1731 reply->ret_code = retcode; 1732 return 0; 1733 } 1734 1735 static int drbd_nl_resume_sync(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1736 struct drbd_nl_cfg_reply *reply) 1737 { 1738 int retcode = NO_ERROR; 1739 1740 if (drbd_request_state(mdev, NS(user_isp, 0)) == SS_NOTHING_TO_DO) 1741 retcode = ERR_PAUSE_IS_CLEAR; 1742 1743 reply->ret_code = retcode; 1744 return 0; 1745 } 1746 1747 static int drbd_nl_suspend_io(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1748 struct drbd_nl_cfg_reply *reply) 1749 { 1750 reply->ret_code = drbd_request_state(mdev, NS(susp, 1)); 1751 1752 return 0; 1753 } 1754 1755 static int drbd_nl_resume_io(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1756 struct drbd_nl_cfg_reply *reply) 1757 { 1758 reply->ret_code = drbd_request_state(mdev, NS(susp, 0)); 1759 return 0; 1760 } 1761 1762 static int drbd_nl_outdate(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1763 struct drbd_nl_cfg_reply *reply) 1764 { 1765 reply->ret_code = drbd_request_state(mdev, NS(disk, D_OUTDATED)); 1766 return 0; 1767 } 1768 1769 static int drbd_nl_get_config(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1770 struct drbd_nl_cfg_reply *reply) 1771 { 1772 unsigned short *tl; 1773 1774 tl = reply->tag_list; 1775 1776 if (get_ldev(mdev)) { 1777 tl = disk_conf_to_tags(mdev, &mdev->ldev->dc, tl); 1778 put_ldev(mdev); 1779 } 1780 1781 if (get_net_conf(mdev)) { 1782 tl = net_conf_to_tags(mdev, mdev->net_conf, tl); 1783 put_net_conf(mdev); 1784 } 1785 tl = syncer_conf_to_tags(mdev, &mdev->sync_conf, tl); 1786 1787 put_unaligned(TT_END, tl++); /* Close the tag list */ 1788 1789 return (int)((char *)tl - (char *)reply->tag_list); 1790 } 1791 1792 static int drbd_nl_get_state(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1793 struct drbd_nl_cfg_reply *reply) 1794 { 1795 unsigned short *tl = reply->tag_list; 1796 union drbd_state s = mdev->state; 1797 unsigned long rs_left; 1798 unsigned int res; 1799 1800 tl = get_state_to_tags(mdev, (struct get_state *)&s, tl); 1801 1802 /* no local ref, no bitmap, no syncer progress. */ 1803 if (s.conn >= C_SYNC_SOURCE && s.conn <= C_PAUSED_SYNC_T) { 1804 if (get_ldev(mdev)) { 1805 drbd_get_syncer_progress(mdev, &rs_left, &res); 1806 tl = tl_add_int(tl, T_sync_progress, &res); 1807 put_ldev(mdev); 1808 } 1809 } 1810 put_unaligned(TT_END, tl++); /* Close the tag list */ 1811 1812 return (int)((char *)tl - (char *)reply->tag_list); 1813 } 1814 1815 static int drbd_nl_get_uuids(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1816 struct drbd_nl_cfg_reply *reply) 1817 { 1818 unsigned short *tl; 1819 1820 tl = reply->tag_list; 1821 1822 if (get_ldev(mdev)) { 1823 tl = tl_add_blob(tl, T_uuids, mdev->ldev->md.uuid, UI_SIZE*sizeof(u64)); 1824 tl = tl_add_int(tl, T_uuids_flags, &mdev->ldev->md.flags); 1825 put_ldev(mdev); 1826 } 1827 put_unaligned(TT_END, tl++); /* Close the tag list */ 1828 1829 return (int)((char *)tl - (char *)reply->tag_list); 1830 } 1831 1832 /** 1833 * drbd_nl_get_timeout_flag() - Used by drbdsetup to find out which timeout value to use 1834 * @mdev: DRBD device. 1835 * @nlp: Netlink/connector packet from drbdsetup 1836 * @reply: Reply packet for drbdsetup 1837 */ 1838 static int drbd_nl_get_timeout_flag(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1839 struct drbd_nl_cfg_reply *reply) 1840 { 1841 unsigned short *tl; 1842 char rv; 1843 1844 tl = reply->tag_list; 1845 1846 rv = mdev->state.pdsk == D_OUTDATED ? UT_PEER_OUTDATED : 1847 test_bit(USE_DEGR_WFC_T, &mdev->flags) ? UT_DEGRADED : UT_DEFAULT; 1848 1849 tl = tl_add_blob(tl, T_use_degraded, &rv, sizeof(rv)); 1850 put_unaligned(TT_END, tl++); /* Close the tag list */ 1851 1852 return (int)((char *)tl - (char *)reply->tag_list); 1853 } 1854 1855 static int drbd_nl_start_ov(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1856 struct drbd_nl_cfg_reply *reply) 1857 { 1858 /* default to resume from last known position, if possible */ 1859 struct start_ov args = 1860 { .start_sector = mdev->ov_start_sector }; 1861 1862 if (!start_ov_from_tags(mdev, nlp->tag_list, &args)) { 1863 reply->ret_code = ERR_MANDATORY_TAG; 1864 return 0; 1865 } 1866 /* w_make_ov_request expects position to be aligned */ 1867 mdev->ov_start_sector = args.start_sector & ~BM_SECT_PER_BIT; 1868 reply->ret_code = drbd_request_state(mdev,NS(conn,C_VERIFY_S)); 1869 return 0; 1870 } 1871 1872 1873 static int drbd_nl_new_c_uuid(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, 1874 struct drbd_nl_cfg_reply *reply) 1875 { 1876 int retcode = NO_ERROR; 1877 int skip_initial_sync = 0; 1878 int err; 1879 1880 struct new_c_uuid args; 1881 1882 memset(&args, 0, sizeof(struct new_c_uuid)); 1883 if (!new_c_uuid_from_tags(mdev, nlp->tag_list, &args)) { 1884 reply->ret_code = ERR_MANDATORY_TAG; 1885 return 0; 1886 } 1887 1888 mutex_lock(&mdev->state_mutex); /* Protects us against serialized state changes. */ 1889 1890 if (!get_ldev(mdev)) { 1891 retcode = ERR_NO_DISK; 1892 goto out; 1893 } 1894 1895 /* this is "skip initial sync", assume to be clean */ 1896 if (mdev->state.conn == C_CONNECTED && mdev->agreed_pro_version >= 90 && 1897 mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && args.clear_bm) { 1898 dev_info(DEV, "Preparing to skip initial sync\n"); 1899 skip_initial_sync = 1; 1900 } else if (mdev->state.conn != C_STANDALONE) { 1901 retcode = ERR_CONNECTED; 1902 goto out_dec; 1903 } 1904 1905 drbd_uuid_set(mdev, UI_BITMAP, 0); /* Rotate UI_BITMAP to History 1, etc... */ 1906 drbd_uuid_new_current(mdev); /* New current, previous to UI_BITMAP */ 1907 1908 if (args.clear_bm) { 1909 err = drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write, "clear_n_write from new_c_uuid"); 1910 if (err) { 1911 dev_err(DEV, "Writing bitmap failed with %d\n",err); 1912 retcode = ERR_IO_MD_DISK; 1913 } 1914 if (skip_initial_sync) { 1915 drbd_send_uuids_skip_initial_sync(mdev); 1916 _drbd_uuid_set(mdev, UI_BITMAP, 0); 1917 spin_lock_irq(&mdev->req_lock); 1918 _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE), 1919 CS_VERBOSE, NULL); 1920 spin_unlock_irq(&mdev->req_lock); 1921 } 1922 } 1923 1924 drbd_md_sync(mdev); 1925 out_dec: 1926 put_ldev(mdev); 1927 out: 1928 mutex_unlock(&mdev->state_mutex); 1929 1930 reply->ret_code = retcode; 1931 return 0; 1932 } 1933 1934 static struct drbd_conf *ensure_mdev(struct drbd_nl_cfg_req *nlp) 1935 { 1936 struct drbd_conf *mdev; 1937 1938 if (nlp->drbd_minor >= minor_count) 1939 return NULL; 1940 1941 mdev = minor_to_mdev(nlp->drbd_minor); 1942 1943 if (!mdev && (nlp->flags & DRBD_NL_CREATE_DEVICE)) { 1944 struct gendisk *disk = NULL; 1945 mdev = drbd_new_device(nlp->drbd_minor); 1946 1947 spin_lock_irq(&drbd_pp_lock); 1948 if (minor_table[nlp->drbd_minor] == NULL) { 1949 minor_table[nlp->drbd_minor] = mdev; 1950 disk = mdev->vdisk; 1951 mdev = NULL; 1952 } /* else: we lost the race */ 1953 spin_unlock_irq(&drbd_pp_lock); 1954 1955 if (disk) /* we won the race above */ 1956 /* in case we ever add a drbd_delete_device(), 1957 * don't forget the del_gendisk! */ 1958 add_disk(disk); 1959 else /* we lost the race above */ 1960 drbd_free_mdev(mdev); 1961 1962 mdev = minor_to_mdev(nlp->drbd_minor); 1963 } 1964 1965 return mdev; 1966 } 1967 1968 struct cn_handler_struct { 1969 int (*function)(struct drbd_conf *, 1970 struct drbd_nl_cfg_req *, 1971 struct drbd_nl_cfg_reply *); 1972 int reply_body_size; 1973 }; 1974 1975 static struct cn_handler_struct cnd_table[] = { 1976 [ P_primary ] = { &drbd_nl_primary, 0 }, 1977 [ P_secondary ] = { &drbd_nl_secondary, 0 }, 1978 [ P_disk_conf ] = { &drbd_nl_disk_conf, 0 }, 1979 [ P_detach ] = { &drbd_nl_detach, 0 }, 1980 [ P_net_conf ] = { &drbd_nl_net_conf, 0 }, 1981 [ P_disconnect ] = { &drbd_nl_disconnect, 0 }, 1982 [ P_resize ] = { &drbd_nl_resize, 0 }, 1983 [ P_syncer_conf ] = { &drbd_nl_syncer_conf, 0 }, 1984 [ P_invalidate ] = { &drbd_nl_invalidate, 0 }, 1985 [ P_invalidate_peer ] = { &drbd_nl_invalidate_peer, 0 }, 1986 [ P_pause_sync ] = { &drbd_nl_pause_sync, 0 }, 1987 [ P_resume_sync ] = { &drbd_nl_resume_sync, 0 }, 1988 [ P_suspend_io ] = { &drbd_nl_suspend_io, 0 }, 1989 [ P_resume_io ] = { &drbd_nl_resume_io, 0 }, 1990 [ P_outdate ] = { &drbd_nl_outdate, 0 }, 1991 [ P_get_config ] = { &drbd_nl_get_config, 1992 sizeof(struct syncer_conf_tag_len_struct) + 1993 sizeof(struct disk_conf_tag_len_struct) + 1994 sizeof(struct net_conf_tag_len_struct) }, 1995 [ P_get_state ] = { &drbd_nl_get_state, 1996 sizeof(struct get_state_tag_len_struct) + 1997 sizeof(struct sync_progress_tag_len_struct) }, 1998 [ P_get_uuids ] = { &drbd_nl_get_uuids, 1999 sizeof(struct get_uuids_tag_len_struct) }, 2000 [ P_get_timeout_flag ] = { &drbd_nl_get_timeout_flag, 2001 sizeof(struct get_timeout_flag_tag_len_struct)}, 2002 [ P_start_ov ] = { &drbd_nl_start_ov, 0 }, 2003 [ P_new_c_uuid ] = { &drbd_nl_new_c_uuid, 0 }, 2004 }; 2005 2006 static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms *nsp) 2007 { 2008 struct drbd_nl_cfg_req *nlp = (struct drbd_nl_cfg_req *)req->data; 2009 struct cn_handler_struct *cm; 2010 struct cn_msg *cn_reply; 2011 struct drbd_nl_cfg_reply *reply; 2012 struct drbd_conf *mdev; 2013 int retcode, rr; 2014 int reply_size = sizeof(struct cn_msg) 2015 + sizeof(struct drbd_nl_cfg_reply) 2016 + sizeof(short int); 2017 2018 if (!try_module_get(THIS_MODULE)) { 2019 printk(KERN_ERR "drbd: try_module_get() failed!\n"); 2020 return; 2021 } 2022 2023 if (!cap_raised(nsp->eff_cap, CAP_SYS_ADMIN)) { 2024 retcode = ERR_PERM; 2025 goto fail; 2026 } 2027 2028 mdev = ensure_mdev(nlp); 2029 if (!mdev) { 2030 retcode = ERR_MINOR_INVALID; 2031 goto fail; 2032 } 2033 2034 if (nlp->packet_type >= P_nl_after_last_packet) { 2035 retcode = ERR_PACKET_NR; 2036 goto fail; 2037 } 2038 2039 cm = cnd_table + nlp->packet_type; 2040 2041 /* This may happen if packet number is 0: */ 2042 if (cm->function == NULL) { 2043 retcode = ERR_PACKET_NR; 2044 goto fail; 2045 } 2046 2047 reply_size += cm->reply_body_size; 2048 2049 /* allocation not in the IO path, cqueue thread context */ 2050 cn_reply = kmalloc(reply_size, GFP_KERNEL); 2051 if (!cn_reply) { 2052 retcode = ERR_NOMEM; 2053 goto fail; 2054 } 2055 reply = (struct drbd_nl_cfg_reply *) cn_reply->data; 2056 2057 reply->packet_type = 2058 cm->reply_body_size ? nlp->packet_type : P_nl_after_last_packet; 2059 reply->minor = nlp->drbd_minor; 2060 reply->ret_code = NO_ERROR; /* Might by modified by cm->function. */ 2061 /* reply->tag_list; might be modified by cm->function. */ 2062 2063 rr = cm->function(mdev, nlp, reply); 2064 2065 cn_reply->id = req->id; 2066 cn_reply->seq = req->seq; 2067 cn_reply->ack = req->ack + 1; 2068 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) + rr; 2069 cn_reply->flags = 0; 2070 2071 rr = cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_KERNEL); 2072 if (rr && rr != -ESRCH) 2073 printk(KERN_INFO "drbd: cn_netlink_send()=%d\n", rr); 2074 2075 kfree(cn_reply); 2076 module_put(THIS_MODULE); 2077 return; 2078 fail: 2079 drbd_nl_send_reply(req, retcode); 2080 module_put(THIS_MODULE); 2081 } 2082 2083 static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */ 2084 2085 static unsigned short * 2086 __tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data, 2087 unsigned short len, int nul_terminated) 2088 { 2089 unsigned short l = tag_descriptions[tag_number(tag)].max_len; 2090 len = (len < l) ? len : l; 2091 put_unaligned(tag, tl++); 2092 put_unaligned(len, tl++); 2093 memcpy(tl, data, len); 2094 tl = (unsigned short*)((char*)tl + len); 2095 if (nul_terminated) 2096 *((char*)tl - 1) = 0; 2097 return tl; 2098 } 2099 2100 static unsigned short * 2101 tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data, int len) 2102 { 2103 return __tl_add_blob(tl, tag, data, len, 0); 2104 } 2105 2106 static unsigned short * 2107 tl_add_str(unsigned short *tl, enum drbd_tags tag, const char *str) 2108 { 2109 return __tl_add_blob(tl, tag, str, strlen(str)+1, 0); 2110 } 2111 2112 static unsigned short * 2113 tl_add_int(unsigned short *tl, enum drbd_tags tag, const void *val) 2114 { 2115 put_unaligned(tag, tl++); 2116 switch(tag_type(tag)) { 2117 case TT_INTEGER: 2118 put_unaligned(sizeof(int), tl++); 2119 put_unaligned(*(int *)val, (int *)tl); 2120 tl = (unsigned short*)((char*)tl+sizeof(int)); 2121 break; 2122 case TT_INT64: 2123 put_unaligned(sizeof(u64), tl++); 2124 put_unaligned(*(u64 *)val, (u64 *)tl); 2125 tl = (unsigned short*)((char*)tl+sizeof(u64)); 2126 break; 2127 default: 2128 /* someone did something stupid. */ 2129 ; 2130 } 2131 return tl; 2132 } 2133 2134 void drbd_bcast_state(struct drbd_conf *mdev, union drbd_state state) 2135 { 2136 char buffer[sizeof(struct cn_msg)+ 2137 sizeof(struct drbd_nl_cfg_reply)+ 2138 sizeof(struct get_state_tag_len_struct)+ 2139 sizeof(short int)]; 2140 struct cn_msg *cn_reply = (struct cn_msg *) buffer; 2141 struct drbd_nl_cfg_reply *reply = 2142 (struct drbd_nl_cfg_reply *)cn_reply->data; 2143 unsigned short *tl = reply->tag_list; 2144 2145 /* dev_warn(DEV, "drbd_bcast_state() got called\n"); */ 2146 2147 tl = get_state_to_tags(mdev, (struct get_state *)&state, tl); 2148 2149 put_unaligned(TT_END, tl++); /* Close the tag list */ 2150 2151 cn_reply->id.idx = CN_IDX_DRBD; 2152 cn_reply->id.val = CN_VAL_DRBD; 2153 2154 cn_reply->seq = atomic_add_return(1, &drbd_nl_seq); 2155 cn_reply->ack = 0; /* not used here. */ 2156 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) + 2157 (int)((char *)tl - (char *)reply->tag_list); 2158 cn_reply->flags = 0; 2159 2160 reply->packet_type = P_get_state; 2161 reply->minor = mdev_to_minor(mdev); 2162 reply->ret_code = NO_ERROR; 2163 2164 cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO); 2165 } 2166 2167 void drbd_bcast_ev_helper(struct drbd_conf *mdev, char *helper_name) 2168 { 2169 char buffer[sizeof(struct cn_msg)+ 2170 sizeof(struct drbd_nl_cfg_reply)+ 2171 sizeof(struct call_helper_tag_len_struct)+ 2172 sizeof(short int)]; 2173 struct cn_msg *cn_reply = (struct cn_msg *) buffer; 2174 struct drbd_nl_cfg_reply *reply = 2175 (struct drbd_nl_cfg_reply *)cn_reply->data; 2176 unsigned short *tl = reply->tag_list; 2177 2178 /* dev_warn(DEV, "drbd_bcast_state() got called\n"); */ 2179 2180 tl = tl_add_str(tl, T_helper, helper_name); 2181 put_unaligned(TT_END, tl++); /* Close the tag list */ 2182 2183 cn_reply->id.idx = CN_IDX_DRBD; 2184 cn_reply->id.val = CN_VAL_DRBD; 2185 2186 cn_reply->seq = atomic_add_return(1, &drbd_nl_seq); 2187 cn_reply->ack = 0; /* not used here. */ 2188 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) + 2189 (int)((char *)tl - (char *)reply->tag_list); 2190 cn_reply->flags = 0; 2191 2192 reply->packet_type = P_call_helper; 2193 reply->minor = mdev_to_minor(mdev); 2194 reply->ret_code = NO_ERROR; 2195 2196 cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO); 2197 } 2198 2199 void drbd_bcast_ee(struct drbd_conf *mdev, 2200 const char *reason, const int dgs, 2201 const char* seen_hash, const char* calc_hash, 2202 const struct drbd_epoch_entry* e) 2203 { 2204 struct cn_msg *cn_reply; 2205 struct drbd_nl_cfg_reply *reply; 2206 struct bio_vec *bvec; 2207 unsigned short *tl; 2208 int i; 2209 2210 if (!e) 2211 return; 2212 if (!reason || !reason[0]) 2213 return; 2214 2215 /* apparently we have to memcpy twice, first to prepare the data for the 2216 * struct cn_msg, then within cn_netlink_send from the cn_msg to the 2217 * netlink skb. */ 2218 /* receiver thread context, which is not in the writeout path (of this node), 2219 * but may be in the writeout path of the _other_ node. 2220 * GFP_NOIO to avoid potential "distributed deadlock". */ 2221 cn_reply = kmalloc( 2222 sizeof(struct cn_msg)+ 2223 sizeof(struct drbd_nl_cfg_reply)+ 2224 sizeof(struct dump_ee_tag_len_struct)+ 2225 sizeof(short int), 2226 GFP_NOIO); 2227 2228 if (!cn_reply) { 2229 dev_err(DEV, "could not kmalloc buffer for drbd_bcast_ee, sector %llu, size %u\n", 2230 (unsigned long long)e->sector, e->size); 2231 return; 2232 } 2233 2234 reply = (struct drbd_nl_cfg_reply*)cn_reply->data; 2235 tl = reply->tag_list; 2236 2237 tl = tl_add_str(tl, T_dump_ee_reason, reason); 2238 tl = tl_add_blob(tl, T_seen_digest, seen_hash, dgs); 2239 tl = tl_add_blob(tl, T_calc_digest, calc_hash, dgs); 2240 tl = tl_add_int(tl, T_ee_sector, &e->sector); 2241 tl = tl_add_int(tl, T_ee_block_id, &e->block_id); 2242 2243 put_unaligned(T_ee_data, tl++); 2244 put_unaligned(e->size, tl++); 2245 2246 __bio_for_each_segment(bvec, e->private_bio, i, 0) { 2247 void *d = kmap(bvec->bv_page); 2248 memcpy(tl, d + bvec->bv_offset, bvec->bv_len); 2249 kunmap(bvec->bv_page); 2250 tl=(unsigned short*)((char*)tl + bvec->bv_len); 2251 } 2252 put_unaligned(TT_END, tl++); /* Close the tag list */ 2253 2254 cn_reply->id.idx = CN_IDX_DRBD; 2255 cn_reply->id.val = CN_VAL_DRBD; 2256 2257 cn_reply->seq = atomic_add_return(1,&drbd_nl_seq); 2258 cn_reply->ack = 0; // not used here. 2259 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) + 2260 (int)((char*)tl - (char*)reply->tag_list); 2261 cn_reply->flags = 0; 2262 2263 reply->packet_type = P_dump_ee; 2264 reply->minor = mdev_to_minor(mdev); 2265 reply->ret_code = NO_ERROR; 2266 2267 cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO); 2268 kfree(cn_reply); 2269 } 2270 2271 void drbd_bcast_sync_progress(struct drbd_conf *mdev) 2272 { 2273 char buffer[sizeof(struct cn_msg)+ 2274 sizeof(struct drbd_nl_cfg_reply)+ 2275 sizeof(struct sync_progress_tag_len_struct)+ 2276 sizeof(short int)]; 2277 struct cn_msg *cn_reply = (struct cn_msg *) buffer; 2278 struct drbd_nl_cfg_reply *reply = 2279 (struct drbd_nl_cfg_reply *)cn_reply->data; 2280 unsigned short *tl = reply->tag_list; 2281 unsigned long rs_left; 2282 unsigned int res; 2283 2284 /* no local ref, no bitmap, no syncer progress, no broadcast. */ 2285 if (!get_ldev(mdev)) 2286 return; 2287 drbd_get_syncer_progress(mdev, &rs_left, &res); 2288 put_ldev(mdev); 2289 2290 tl = tl_add_int(tl, T_sync_progress, &res); 2291 put_unaligned(TT_END, tl++); /* Close the tag list */ 2292 2293 cn_reply->id.idx = CN_IDX_DRBD; 2294 cn_reply->id.val = CN_VAL_DRBD; 2295 2296 cn_reply->seq = atomic_add_return(1, &drbd_nl_seq); 2297 cn_reply->ack = 0; /* not used here. */ 2298 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) + 2299 (int)((char *)tl - (char *)reply->tag_list); 2300 cn_reply->flags = 0; 2301 2302 reply->packet_type = P_sync_progress; 2303 reply->minor = mdev_to_minor(mdev); 2304 reply->ret_code = NO_ERROR; 2305 2306 cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO); 2307 } 2308 2309 int __init drbd_nl_init(void) 2310 { 2311 static struct cb_id cn_id_drbd; 2312 int err, try=10; 2313 2314 cn_id_drbd.val = CN_VAL_DRBD; 2315 do { 2316 cn_id_drbd.idx = cn_idx; 2317 err = cn_add_callback(&cn_id_drbd, "cn_drbd", &drbd_connector_callback); 2318 if (!err) 2319 break; 2320 cn_idx = (cn_idx + CN_IDX_STEP); 2321 } while (try--); 2322 2323 if (err) { 2324 printk(KERN_ERR "drbd: cn_drbd failed to register\n"); 2325 return err; 2326 } 2327 2328 return 0; 2329 } 2330 2331 void drbd_nl_cleanup(void) 2332 { 2333 static struct cb_id cn_id_drbd; 2334 2335 cn_id_drbd.idx = cn_idx; 2336 cn_id_drbd.val = CN_VAL_DRBD; 2337 2338 cn_del_callback(&cn_id_drbd); 2339 } 2340 2341 void drbd_nl_send_reply(struct cn_msg *req, int ret_code) 2342 { 2343 char buffer[sizeof(struct cn_msg)+sizeof(struct drbd_nl_cfg_reply)]; 2344 struct cn_msg *cn_reply = (struct cn_msg *) buffer; 2345 struct drbd_nl_cfg_reply *reply = 2346 (struct drbd_nl_cfg_reply *)cn_reply->data; 2347 int rr; 2348 2349 cn_reply->id = req->id; 2350 2351 cn_reply->seq = req->seq; 2352 cn_reply->ack = req->ack + 1; 2353 cn_reply->len = sizeof(struct drbd_nl_cfg_reply); 2354 cn_reply->flags = 0; 2355 2356 reply->minor = ((struct drbd_nl_cfg_req *)req->data)->drbd_minor; 2357 reply->ret_code = ret_code; 2358 2359 rr = cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO); 2360 if (rr && rr != -ESRCH) 2361 printk(KERN_INFO "drbd: cn_netlink_send()=%d\n", rr); 2362 } 2363 2364