1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2010-2011 Neil Brown 4 * Copyright (C) 2010-2018 Red Hat, Inc. All rights reserved. 5 * 6 * This file is released under the GPL. 7 */ 8 9 #include <linux/slab.h> 10 #include <linux/module.h> 11 12 #include "md.h" 13 #include "raid1.h" 14 #include "raid5.h" 15 #include "raid10.h" 16 #include "md-bitmap.h" 17 18 #include <linux/device-mapper.h> 19 20 #define DM_MSG_PREFIX "raid" 21 #define MAX_RAID_DEVICES 253 /* md-raid kernel limit */ 22 23 /* 24 * Minimum sectors of free reshape space per raid device 25 */ 26 #define MIN_FREE_RESHAPE_SPACE to_sector(4*4096) 27 28 /* 29 * Minimum journal space 4 MiB in sectors. 30 */ 31 #define MIN_RAID456_JOURNAL_SPACE (4*2048) 32 33 static bool devices_handle_discard_safely; 34 35 /* 36 * The following flags are used by dm-raid to set up the array state. 37 * They must be cleared before md_run is called. 38 */ 39 #define FirstUse 10 /* rdev flag */ 40 41 struct raid_dev { 42 /* 43 * Two DM devices, one to hold metadata and one to hold the 44 * actual data/parity. The reason for this is to not confuse 45 * ti->len and give more flexibility in altering size and 46 * characteristics. 47 * 48 * While it is possible for this device to be associated 49 * with a different physical device than the data_dev, it 50 * is intended for it to be the same. 51 * |--------- Physical Device ---------| 52 * |- meta_dev -|------ data_dev ------| 53 */ 54 struct dm_dev *meta_dev; 55 struct dm_dev *data_dev; 56 struct md_rdev rdev; 57 }; 58 59 /* 60 * Bits for establishing rs->ctr_flags 61 * 62 * 1 = no flag value 63 * 2 = flag with value 64 */ 65 #define __CTR_FLAG_SYNC 0 /* 1 */ /* Not with raid0! */ 66 #define __CTR_FLAG_NOSYNC 1 /* 1 */ /* Not with raid0! */ 67 #define __CTR_FLAG_REBUILD 2 /* 2 */ /* Not with raid0! */ 68 #define __CTR_FLAG_DAEMON_SLEEP 3 /* 2 */ /* Not with raid0! */ 69 #define __CTR_FLAG_MIN_RECOVERY_RATE 4 /* 2 */ /* Not with raid0! */ 70 #define __CTR_FLAG_MAX_RECOVERY_RATE 5 /* 2 */ /* Not with raid0! */ 71 #define __CTR_FLAG_MAX_WRITE_BEHIND 6 /* 2 */ /* Only with raid1! */ 72 #define __CTR_FLAG_WRITE_MOSTLY 7 /* 2 */ /* Only with raid1! */ 73 #define __CTR_FLAG_STRIPE_CACHE 8 /* 2 */ /* Only with raid4/5/6! */ 74 #define __CTR_FLAG_REGION_SIZE 9 /* 2 */ /* Not with raid0! */ 75 #define __CTR_FLAG_RAID10_COPIES 10 /* 2 */ /* Only with raid10 */ 76 #define __CTR_FLAG_RAID10_FORMAT 11 /* 2 */ /* Only with raid10 */ 77 /* New for v1.9.0 */ 78 #define __CTR_FLAG_DELTA_DISKS 12 /* 2 */ /* Only with reshapable raid1/4/5/6/10! */ 79 #define __CTR_FLAG_DATA_OFFSET 13 /* 2 */ /* Only with reshapable raid4/5/6/10! */ 80 #define __CTR_FLAG_RAID10_USE_NEAR_SETS 14 /* 2 */ /* Only with raid10! */ 81 82 /* New for v1.10.0 */ 83 #define __CTR_FLAG_JOURNAL_DEV 15 /* 2 */ /* Only with raid4/5/6 (journal device)! */ 84 85 /* New for v1.11.1 */ 86 #define __CTR_FLAG_JOURNAL_MODE 16 /* 2 */ /* Only with raid4/5/6 (journal mode)! */ 87 88 /* 89 * Flags for rs->ctr_flags field. 90 */ 91 #define CTR_FLAG_SYNC (1 << __CTR_FLAG_SYNC) 92 #define CTR_FLAG_NOSYNC (1 << __CTR_FLAG_NOSYNC) 93 #define CTR_FLAG_REBUILD (1 << __CTR_FLAG_REBUILD) 94 #define CTR_FLAG_DAEMON_SLEEP (1 << __CTR_FLAG_DAEMON_SLEEP) 95 #define CTR_FLAG_MIN_RECOVERY_RATE (1 << __CTR_FLAG_MIN_RECOVERY_RATE) 96 #define CTR_FLAG_MAX_RECOVERY_RATE (1 << __CTR_FLAG_MAX_RECOVERY_RATE) 97 #define CTR_FLAG_MAX_WRITE_BEHIND (1 << __CTR_FLAG_MAX_WRITE_BEHIND) 98 #define CTR_FLAG_WRITE_MOSTLY (1 << __CTR_FLAG_WRITE_MOSTLY) 99 #define CTR_FLAG_STRIPE_CACHE (1 << __CTR_FLAG_STRIPE_CACHE) 100 #define CTR_FLAG_REGION_SIZE (1 << __CTR_FLAG_REGION_SIZE) 101 #define CTR_FLAG_RAID10_COPIES (1 << __CTR_FLAG_RAID10_COPIES) 102 #define CTR_FLAG_RAID10_FORMAT (1 << __CTR_FLAG_RAID10_FORMAT) 103 #define CTR_FLAG_DELTA_DISKS (1 << __CTR_FLAG_DELTA_DISKS) 104 #define CTR_FLAG_DATA_OFFSET (1 << __CTR_FLAG_DATA_OFFSET) 105 #define CTR_FLAG_RAID10_USE_NEAR_SETS (1 << __CTR_FLAG_RAID10_USE_NEAR_SETS) 106 #define CTR_FLAG_JOURNAL_DEV (1 << __CTR_FLAG_JOURNAL_DEV) 107 #define CTR_FLAG_JOURNAL_MODE (1 << __CTR_FLAG_JOURNAL_MODE) 108 109 /* 110 * Definitions of various constructor flags to 111 * be used in checks of valid / invalid flags 112 * per raid level. 113 */ 114 /* Define all any sync flags */ 115 #define CTR_FLAGS_ANY_SYNC (CTR_FLAG_SYNC | CTR_FLAG_NOSYNC) 116 117 /* Define flags for options without argument (e.g. 'nosync') */ 118 #define CTR_FLAG_OPTIONS_NO_ARGS (CTR_FLAGS_ANY_SYNC | \ 119 CTR_FLAG_RAID10_USE_NEAR_SETS) 120 121 /* Define flags for options with one argument (e.g. 'delta_disks +2') */ 122 #define CTR_FLAG_OPTIONS_ONE_ARG (CTR_FLAG_REBUILD | \ 123 CTR_FLAG_WRITE_MOSTLY | \ 124 CTR_FLAG_DAEMON_SLEEP | \ 125 CTR_FLAG_MIN_RECOVERY_RATE | \ 126 CTR_FLAG_MAX_RECOVERY_RATE | \ 127 CTR_FLAG_MAX_WRITE_BEHIND | \ 128 CTR_FLAG_STRIPE_CACHE | \ 129 CTR_FLAG_REGION_SIZE | \ 130 CTR_FLAG_RAID10_COPIES | \ 131 CTR_FLAG_RAID10_FORMAT | \ 132 CTR_FLAG_DELTA_DISKS | \ 133 CTR_FLAG_DATA_OFFSET | \ 134 CTR_FLAG_JOURNAL_DEV | \ 135 CTR_FLAG_JOURNAL_MODE) 136 137 /* Valid options definitions per raid level... */ 138 139 /* "raid0" does only accept data offset */ 140 #define RAID0_VALID_FLAGS (CTR_FLAG_DATA_OFFSET) 141 142 /* "raid1" does not accept stripe cache, data offset, delta_disks or any raid10 options */ 143 #define RAID1_VALID_FLAGS (CTR_FLAGS_ANY_SYNC | \ 144 CTR_FLAG_REBUILD | \ 145 CTR_FLAG_WRITE_MOSTLY | \ 146 CTR_FLAG_DAEMON_SLEEP | \ 147 CTR_FLAG_MIN_RECOVERY_RATE | \ 148 CTR_FLAG_MAX_RECOVERY_RATE | \ 149 CTR_FLAG_MAX_WRITE_BEHIND | \ 150 CTR_FLAG_REGION_SIZE | \ 151 CTR_FLAG_DELTA_DISKS | \ 152 CTR_FLAG_DATA_OFFSET) 153 154 /* "raid10" does not accept any raid1 or stripe cache options */ 155 #define RAID10_VALID_FLAGS (CTR_FLAGS_ANY_SYNC | \ 156 CTR_FLAG_REBUILD | \ 157 CTR_FLAG_DAEMON_SLEEP | \ 158 CTR_FLAG_MIN_RECOVERY_RATE | \ 159 CTR_FLAG_MAX_RECOVERY_RATE | \ 160 CTR_FLAG_REGION_SIZE | \ 161 CTR_FLAG_RAID10_COPIES | \ 162 CTR_FLAG_RAID10_FORMAT | \ 163 CTR_FLAG_DELTA_DISKS | \ 164 CTR_FLAG_DATA_OFFSET | \ 165 CTR_FLAG_RAID10_USE_NEAR_SETS) 166 167 /* 168 * "raid4/5/6" do not accept any raid1 or raid10 specific options 169 * 170 * "raid6" does not accept "nosync", because it is not guaranteed 171 * that both parity and q-syndrome are being written properly with 172 * any writes 173 */ 174 #define RAID45_VALID_FLAGS (CTR_FLAGS_ANY_SYNC | \ 175 CTR_FLAG_REBUILD | \ 176 CTR_FLAG_DAEMON_SLEEP | \ 177 CTR_FLAG_MIN_RECOVERY_RATE | \ 178 CTR_FLAG_MAX_RECOVERY_RATE | \ 179 CTR_FLAG_STRIPE_CACHE | \ 180 CTR_FLAG_REGION_SIZE | \ 181 CTR_FLAG_DELTA_DISKS | \ 182 CTR_FLAG_DATA_OFFSET | \ 183 CTR_FLAG_JOURNAL_DEV | \ 184 CTR_FLAG_JOURNAL_MODE) 185 186 #define RAID6_VALID_FLAGS (CTR_FLAG_SYNC | \ 187 CTR_FLAG_REBUILD | \ 188 CTR_FLAG_DAEMON_SLEEP | \ 189 CTR_FLAG_MIN_RECOVERY_RATE | \ 190 CTR_FLAG_MAX_RECOVERY_RATE | \ 191 CTR_FLAG_STRIPE_CACHE | \ 192 CTR_FLAG_REGION_SIZE | \ 193 CTR_FLAG_DELTA_DISKS | \ 194 CTR_FLAG_DATA_OFFSET | \ 195 CTR_FLAG_JOURNAL_DEV | \ 196 CTR_FLAG_JOURNAL_MODE) 197 /* ...valid options definitions per raid level */ 198 199 /* 200 * Flags for rs->runtime_flags field 201 * (RT_FLAG prefix meaning "runtime flag") 202 * 203 * These are all internal and used to define runtime state, 204 * e.g. to prevent another resume from preresume processing 205 * the raid set all over again. 206 */ 207 #define RT_FLAG_RS_PRERESUMED 0 208 #define RT_FLAG_RS_RESUMED 1 209 #define RT_FLAG_RS_BITMAP_LOADED 2 210 #define RT_FLAG_UPDATE_SBS 3 211 #define RT_FLAG_RESHAPE_RS 4 212 #define RT_FLAG_RS_SUSPENDED 5 213 #define RT_FLAG_RS_IN_SYNC 6 214 #define RT_FLAG_RS_RESYNCING 7 215 #define RT_FLAG_RS_GROW 8 216 217 /* Array elements of 64 bit needed for rebuild/failed disk bits */ 218 #define DISKS_ARRAY_ELEMS ((MAX_RAID_DEVICES + (sizeof(uint64_t) * 8 - 1)) / sizeof(uint64_t) / 8) 219 220 /* 221 * raid set level, layout and chunk sectors backup/restore 222 */ 223 struct rs_layout { 224 int new_level; 225 int new_layout; 226 int new_chunk_sectors; 227 }; 228 229 struct raid_set { 230 struct dm_target *ti; 231 232 uint32_t stripe_cache_entries; 233 unsigned long ctr_flags; 234 unsigned long runtime_flags; 235 236 uint64_t rebuild_disks[DISKS_ARRAY_ELEMS]; 237 238 int raid_disks; 239 int delta_disks; 240 int data_offset; 241 int raid10_copies; 242 int requested_bitmap_chunk_sectors; 243 244 struct mddev md; 245 struct raid_type *raid_type; 246 247 sector_t array_sectors; 248 sector_t dev_sectors; 249 250 /* Optional raid4/5/6 journal device */ 251 struct journal_dev { 252 struct dm_dev *dev; 253 struct md_rdev rdev; 254 int mode; 255 } journal_dev; 256 257 struct raid_dev dev[]; 258 }; 259 260 static void rs_config_backup(struct raid_set *rs, struct rs_layout *l) 261 { 262 struct mddev *mddev = &rs->md; 263 264 l->new_level = mddev->new_level; 265 l->new_layout = mddev->new_layout; 266 l->new_chunk_sectors = mddev->new_chunk_sectors; 267 } 268 269 static void rs_config_restore(struct raid_set *rs, struct rs_layout *l) 270 { 271 struct mddev *mddev = &rs->md; 272 273 mddev->new_level = l->new_level; 274 mddev->new_layout = l->new_layout; 275 mddev->new_chunk_sectors = l->new_chunk_sectors; 276 } 277 278 /* raid10 algorithms (i.e. formats) */ 279 #define ALGORITHM_RAID10_DEFAULT 0 280 #define ALGORITHM_RAID10_NEAR 1 281 #define ALGORITHM_RAID10_OFFSET 2 282 #define ALGORITHM_RAID10_FAR 3 283 284 /* Supported raid types and properties. */ 285 static struct raid_type { 286 const char *name; /* RAID algorithm. */ 287 const char *descr; /* Descriptor text for logging. */ 288 const unsigned int parity_devs; /* # of parity devices. */ 289 const unsigned int minimal_devs;/* minimal # of devices in set. */ 290 const unsigned int level; /* RAID level. */ 291 const unsigned int algorithm; /* RAID algorithm. */ 292 } raid_types[] = { 293 {"raid0", "raid0 (striping)", 0, 2, 0, 0 /* NONE */}, 294 {"raid1", "raid1 (mirroring)", 0, 2, 1, 0 /* NONE */}, 295 {"raid10_far", "raid10 far (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_FAR}, 296 {"raid10_offset", "raid10 offset (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_OFFSET}, 297 {"raid10_near", "raid10 near (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_NEAR}, 298 {"raid10", "raid10 (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_DEFAULT}, 299 {"raid4", "raid4 (dedicated first parity disk)", 1, 2, 5, ALGORITHM_PARITY_0}, /* raid4 layout = raid5_0 */ 300 {"raid5_n", "raid5 (dedicated last parity disk)", 1, 2, 5, ALGORITHM_PARITY_N}, 301 {"raid5_ls", "raid5 (left symmetric)", 1, 2, 5, ALGORITHM_LEFT_SYMMETRIC}, 302 {"raid5_rs", "raid5 (right symmetric)", 1, 2, 5, ALGORITHM_RIGHT_SYMMETRIC}, 303 {"raid5_la", "raid5 (left asymmetric)", 1, 2, 5, ALGORITHM_LEFT_ASYMMETRIC}, 304 {"raid5_ra", "raid5 (right asymmetric)", 1, 2, 5, ALGORITHM_RIGHT_ASYMMETRIC}, 305 {"raid6_zr", "raid6 (zero restart)", 2, 4, 6, ALGORITHM_ROTATING_ZERO_RESTART}, 306 {"raid6_nr", "raid6 (N restart)", 2, 4, 6, ALGORITHM_ROTATING_N_RESTART}, 307 {"raid6_nc", "raid6 (N continue)", 2, 4, 6, ALGORITHM_ROTATING_N_CONTINUE}, 308 {"raid6_n_6", "raid6 (dedicated parity/Q n/6)", 2, 4, 6, ALGORITHM_PARITY_N_6}, 309 {"raid6_ls_6", "raid6 (left symmetric dedicated Q 6)", 2, 4, 6, ALGORITHM_LEFT_SYMMETRIC_6}, 310 {"raid6_rs_6", "raid6 (right symmetric dedicated Q 6)", 2, 4, 6, ALGORITHM_RIGHT_SYMMETRIC_6}, 311 {"raid6_la_6", "raid6 (left asymmetric dedicated Q 6)", 2, 4, 6, ALGORITHM_LEFT_ASYMMETRIC_6}, 312 {"raid6_ra_6", "raid6 (right asymmetric dedicated Q 6)", 2, 4, 6, ALGORITHM_RIGHT_ASYMMETRIC_6} 313 }; 314 315 /* True, if @v is in inclusive range [@min, @max] */ 316 static bool __within_range(long v, long min, long max) 317 { 318 return v >= min && v <= max; 319 } 320 321 /* All table line arguments are defined here */ 322 static struct arg_name_flag { 323 const unsigned long flag; 324 const char *name; 325 } __arg_name_flags[] = { 326 { CTR_FLAG_SYNC, "sync"}, 327 { CTR_FLAG_NOSYNC, "nosync"}, 328 { CTR_FLAG_REBUILD, "rebuild"}, 329 { CTR_FLAG_DAEMON_SLEEP, "daemon_sleep"}, 330 { CTR_FLAG_MIN_RECOVERY_RATE, "min_recovery_rate"}, 331 { CTR_FLAG_MAX_RECOVERY_RATE, "max_recovery_rate"}, 332 { CTR_FLAG_MAX_WRITE_BEHIND, "max_write_behind"}, 333 { CTR_FLAG_WRITE_MOSTLY, "write_mostly"}, 334 { CTR_FLAG_STRIPE_CACHE, "stripe_cache"}, 335 { CTR_FLAG_REGION_SIZE, "region_size"}, 336 { CTR_FLAG_RAID10_COPIES, "raid10_copies"}, 337 { CTR_FLAG_RAID10_FORMAT, "raid10_format"}, 338 { CTR_FLAG_DATA_OFFSET, "data_offset"}, 339 { CTR_FLAG_DELTA_DISKS, "delta_disks"}, 340 { CTR_FLAG_RAID10_USE_NEAR_SETS, "raid10_use_near_sets"}, 341 { CTR_FLAG_JOURNAL_DEV, "journal_dev" }, 342 { CTR_FLAG_JOURNAL_MODE, "journal_mode" }, 343 }; 344 345 /* Return argument name string for given @flag */ 346 static const char *dm_raid_arg_name_by_flag(const uint32_t flag) 347 { 348 if (hweight32(flag) == 1) { 349 struct arg_name_flag *anf = __arg_name_flags + ARRAY_SIZE(__arg_name_flags); 350 351 while (anf-- > __arg_name_flags) 352 if (flag & anf->flag) 353 return anf->name; 354 355 } else 356 DMERR("%s called with more than one flag!", __func__); 357 358 return NULL; 359 } 360 361 /* Define correlation of raid456 journal cache modes and dm-raid target line parameters */ 362 static struct { 363 const int mode; 364 const char *param; 365 } _raid456_journal_mode[] = { 366 { R5C_JOURNAL_MODE_WRITE_THROUGH, "writethrough" }, 367 { R5C_JOURNAL_MODE_WRITE_BACK, "writeback" } 368 }; 369 370 /* Return MD raid4/5/6 journal mode for dm @journal_mode one */ 371 static int dm_raid_journal_mode_to_md(const char *mode) 372 { 373 int m = ARRAY_SIZE(_raid456_journal_mode); 374 375 while (m--) 376 if (!strcasecmp(mode, _raid456_journal_mode[m].param)) 377 return _raid456_journal_mode[m].mode; 378 379 return -EINVAL; 380 } 381 382 /* Return dm-raid raid4/5/6 journal mode string for @mode */ 383 static const char *md_journal_mode_to_dm_raid(const int mode) 384 { 385 int m = ARRAY_SIZE(_raid456_journal_mode); 386 387 while (m--) 388 if (mode == _raid456_journal_mode[m].mode) 389 return _raid456_journal_mode[m].param; 390 391 return "unknown"; 392 } 393 394 /* 395 * Bool helpers to test for various raid levels of a raid set. 396 * It's level as reported by the superblock rather than 397 * the requested raid_type passed to the constructor. 398 */ 399 /* Return true, if raid set in @rs is raid0 */ 400 static bool rs_is_raid0(struct raid_set *rs) 401 { 402 return !rs->md.level; 403 } 404 405 /* Return true, if raid set in @rs is raid1 */ 406 static bool rs_is_raid1(struct raid_set *rs) 407 { 408 return rs->md.level == 1; 409 } 410 411 /* Return true, if raid set in @rs is raid10 */ 412 static bool rs_is_raid10(struct raid_set *rs) 413 { 414 return rs->md.level == 10; 415 } 416 417 /* Return true, if raid set in @rs is level 6 */ 418 static bool rs_is_raid6(struct raid_set *rs) 419 { 420 return rs->md.level == 6; 421 } 422 423 /* Return true, if raid set in @rs is level 4, 5 or 6 */ 424 static bool rs_is_raid456(struct raid_set *rs) 425 { 426 return __within_range(rs->md.level, 4, 6); 427 } 428 429 /* Return true, if raid set in @rs is reshapable */ 430 static bool __is_raid10_far(int layout); 431 static bool rs_is_reshapable(struct raid_set *rs) 432 { 433 return rs_is_raid456(rs) || 434 (rs_is_raid10(rs) && !__is_raid10_far(rs->md.new_layout)); 435 } 436 437 /* Return true, if raid set in @rs is recovering */ 438 static bool rs_is_recovering(struct raid_set *rs) 439 { 440 return rs->md.recovery_cp < rs->md.dev_sectors; 441 } 442 443 /* Return true, if raid set in @rs is reshaping */ 444 static bool rs_is_reshaping(struct raid_set *rs) 445 { 446 return rs->md.reshape_position != MaxSector; 447 } 448 449 /* 450 * bool helpers to test for various raid levels of a raid type @rt 451 */ 452 453 /* Return true, if raid type in @rt is raid0 */ 454 static bool rt_is_raid0(struct raid_type *rt) 455 { 456 return !rt->level; 457 } 458 459 /* Return true, if raid type in @rt is raid1 */ 460 static bool rt_is_raid1(struct raid_type *rt) 461 { 462 return rt->level == 1; 463 } 464 465 /* Return true, if raid type in @rt is raid10 */ 466 static bool rt_is_raid10(struct raid_type *rt) 467 { 468 return rt->level == 10; 469 } 470 471 /* Return true, if raid type in @rt is raid4/5 */ 472 static bool rt_is_raid45(struct raid_type *rt) 473 { 474 return __within_range(rt->level, 4, 5); 475 } 476 477 /* Return true, if raid type in @rt is raid6 */ 478 static bool rt_is_raid6(struct raid_type *rt) 479 { 480 return rt->level == 6; 481 } 482 483 /* Return true, if raid type in @rt is raid4/5/6 */ 484 static bool rt_is_raid456(struct raid_type *rt) 485 { 486 return __within_range(rt->level, 4, 6); 487 } 488 /* END: raid level bools */ 489 490 /* Return valid ctr flags for the raid level of @rs */ 491 static unsigned long __valid_flags(struct raid_set *rs) 492 { 493 if (rt_is_raid0(rs->raid_type)) 494 return RAID0_VALID_FLAGS; 495 else if (rt_is_raid1(rs->raid_type)) 496 return RAID1_VALID_FLAGS; 497 else if (rt_is_raid10(rs->raid_type)) 498 return RAID10_VALID_FLAGS; 499 else if (rt_is_raid45(rs->raid_type)) 500 return RAID45_VALID_FLAGS; 501 else if (rt_is_raid6(rs->raid_type)) 502 return RAID6_VALID_FLAGS; 503 504 return 0; 505 } 506 507 /* 508 * Check for valid flags set on @rs 509 * 510 * Has to be called after parsing of the ctr flags! 511 */ 512 static int rs_check_for_valid_flags(struct raid_set *rs) 513 { 514 if (rs->ctr_flags & ~__valid_flags(rs)) { 515 rs->ti->error = "Invalid flags combination"; 516 return -EINVAL; 517 } 518 519 return 0; 520 } 521 522 /* MD raid10 bit definitions and helpers */ 523 #define RAID10_OFFSET (1 << 16) /* stripes with data copies area adjacent on devices */ 524 #define RAID10_BROCKEN_USE_FAR_SETS (1 << 17) /* Broken in raid10.c: use sets instead of whole stripe rotation */ 525 #define RAID10_USE_FAR_SETS (1 << 18) /* Use sets instead of whole stripe rotation */ 526 #define RAID10_FAR_COPIES_SHIFT 8 /* raid10 # far copies shift (2nd byte of layout) */ 527 528 /* Return md raid10 near copies for @layout */ 529 static unsigned int __raid10_near_copies(int layout) 530 { 531 return layout & 0xFF; 532 } 533 534 /* Return md raid10 far copies for @layout */ 535 static unsigned int __raid10_far_copies(int layout) 536 { 537 return __raid10_near_copies(layout >> RAID10_FAR_COPIES_SHIFT); 538 } 539 540 /* Return true if md raid10 offset for @layout */ 541 static bool __is_raid10_offset(int layout) 542 { 543 return !!(layout & RAID10_OFFSET); 544 } 545 546 /* Return true if md raid10 near for @layout */ 547 static bool __is_raid10_near(int layout) 548 { 549 return !__is_raid10_offset(layout) && __raid10_near_copies(layout) > 1; 550 } 551 552 /* Return true if md raid10 far for @layout */ 553 static bool __is_raid10_far(int layout) 554 { 555 return !__is_raid10_offset(layout) && __raid10_far_copies(layout) > 1; 556 } 557 558 /* Return md raid10 layout string for @layout */ 559 static const char *raid10_md_layout_to_format(int layout) 560 { 561 /* 562 * Bit 16 stands for "offset" 563 * (i.e. adjacent stripes hold copies) 564 * 565 * Refer to MD's raid10.c for details 566 */ 567 if (__is_raid10_offset(layout)) 568 return "offset"; 569 570 if (__raid10_near_copies(layout) > 1) 571 return "near"; 572 573 if (__raid10_far_copies(layout) > 1) 574 return "far"; 575 576 return "unknown"; 577 } 578 579 /* Return md raid10 algorithm for @name */ 580 static int raid10_name_to_format(const char *name) 581 { 582 if (!strcasecmp(name, "near")) 583 return ALGORITHM_RAID10_NEAR; 584 else if (!strcasecmp(name, "offset")) 585 return ALGORITHM_RAID10_OFFSET; 586 else if (!strcasecmp(name, "far")) 587 return ALGORITHM_RAID10_FAR; 588 589 return -EINVAL; 590 } 591 592 /* Return md raid10 copies for @layout */ 593 static unsigned int raid10_md_layout_to_copies(int layout) 594 { 595 return max(__raid10_near_copies(layout), __raid10_far_copies(layout)); 596 } 597 598 /* Return md raid10 format id for @format string */ 599 static int raid10_format_to_md_layout(struct raid_set *rs, 600 unsigned int algorithm, 601 unsigned int copies) 602 { 603 unsigned int n = 1, f = 1, r = 0; 604 605 /* 606 * MD resilienece flaw: 607 * 608 * enabling use_far_sets for far/offset formats causes copies 609 * to be colocated on the same devs together with their origins! 610 * 611 * -> disable it for now in the definition above 612 */ 613 if (algorithm == ALGORITHM_RAID10_DEFAULT || 614 algorithm == ALGORITHM_RAID10_NEAR) 615 n = copies; 616 617 else if (algorithm == ALGORITHM_RAID10_OFFSET) { 618 f = copies; 619 r = RAID10_OFFSET; 620 if (!test_bit(__CTR_FLAG_RAID10_USE_NEAR_SETS, &rs->ctr_flags)) 621 r |= RAID10_USE_FAR_SETS; 622 623 } else if (algorithm == ALGORITHM_RAID10_FAR) { 624 f = copies; 625 if (!test_bit(__CTR_FLAG_RAID10_USE_NEAR_SETS, &rs->ctr_flags)) 626 r |= RAID10_USE_FAR_SETS; 627 628 } else 629 return -EINVAL; 630 631 return r | (f << RAID10_FAR_COPIES_SHIFT) | n; 632 } 633 /* END: MD raid10 bit definitions and helpers */ 634 635 /* Check for any of the raid10 algorithms */ 636 static bool __got_raid10(struct raid_type *rtp, const int layout) 637 { 638 if (rtp->level == 10) { 639 switch (rtp->algorithm) { 640 case ALGORITHM_RAID10_DEFAULT: 641 case ALGORITHM_RAID10_NEAR: 642 return __is_raid10_near(layout); 643 case ALGORITHM_RAID10_OFFSET: 644 return __is_raid10_offset(layout); 645 case ALGORITHM_RAID10_FAR: 646 return __is_raid10_far(layout); 647 default: 648 break; 649 } 650 } 651 652 return false; 653 } 654 655 /* Return raid_type for @name */ 656 static struct raid_type *get_raid_type(const char *name) 657 { 658 struct raid_type *rtp = raid_types + ARRAY_SIZE(raid_types); 659 660 while (rtp-- > raid_types) 661 if (!strcasecmp(rtp->name, name)) 662 return rtp; 663 664 return NULL; 665 } 666 667 /* Return raid_type for @name based derived from @level and @layout */ 668 static struct raid_type *get_raid_type_by_ll(const int level, const int layout) 669 { 670 struct raid_type *rtp = raid_types + ARRAY_SIZE(raid_types); 671 672 while (rtp-- > raid_types) { 673 /* RAID10 special checks based on @layout flags/properties */ 674 if (rtp->level == level && 675 (__got_raid10(rtp, layout) || rtp->algorithm == layout)) 676 return rtp; 677 } 678 679 return NULL; 680 } 681 682 /* Adjust rdev sectors */ 683 static void rs_set_rdev_sectors(struct raid_set *rs) 684 { 685 struct mddev *mddev = &rs->md; 686 struct md_rdev *rdev; 687 688 /* 689 * raid10 sets rdev->sector to the device size, which 690 * is unintended in case of out-of-place reshaping 691 */ 692 rdev_for_each(rdev, mddev) 693 if (!test_bit(Journal, &rdev->flags)) 694 rdev->sectors = mddev->dev_sectors; 695 } 696 697 /* 698 * Change bdev capacity of @rs in case of a disk add/remove reshape 699 */ 700 static void rs_set_capacity(struct raid_set *rs) 701 { 702 struct gendisk *gendisk = dm_disk(dm_table_get_md(rs->ti->table)); 703 704 set_capacity_and_notify(gendisk, rs->md.array_sectors); 705 } 706 707 /* 708 * Set the mddev properties in @rs to the current 709 * ones retrieved from the freshest superblock 710 */ 711 static void rs_set_cur(struct raid_set *rs) 712 { 713 struct mddev *mddev = &rs->md; 714 715 mddev->new_level = mddev->level; 716 mddev->new_layout = mddev->layout; 717 mddev->new_chunk_sectors = mddev->chunk_sectors; 718 } 719 720 /* 721 * Set the mddev properties in @rs to the new 722 * ones requested by the ctr 723 */ 724 static void rs_set_new(struct raid_set *rs) 725 { 726 struct mddev *mddev = &rs->md; 727 728 mddev->level = mddev->new_level; 729 mddev->layout = mddev->new_layout; 730 mddev->chunk_sectors = mddev->new_chunk_sectors; 731 mddev->raid_disks = rs->raid_disks; 732 mddev->delta_disks = 0; 733 } 734 735 static struct raid_set *raid_set_alloc(struct dm_target *ti, struct raid_type *raid_type, 736 unsigned int raid_devs) 737 { 738 unsigned int i; 739 struct raid_set *rs; 740 741 if (raid_devs <= raid_type->parity_devs) { 742 ti->error = "Insufficient number of devices"; 743 return ERR_PTR(-EINVAL); 744 } 745 746 rs = kzalloc(struct_size(rs, dev, raid_devs), GFP_KERNEL); 747 if (!rs) { 748 ti->error = "Cannot allocate raid context"; 749 return ERR_PTR(-ENOMEM); 750 } 751 752 mddev_init(&rs->md); 753 754 rs->raid_disks = raid_devs; 755 rs->delta_disks = 0; 756 757 rs->ti = ti; 758 rs->raid_type = raid_type; 759 rs->stripe_cache_entries = 256; 760 rs->md.raid_disks = raid_devs; 761 rs->md.level = raid_type->level; 762 rs->md.new_level = rs->md.level; 763 rs->md.layout = raid_type->algorithm; 764 rs->md.new_layout = rs->md.layout; 765 rs->md.delta_disks = 0; 766 rs->md.recovery_cp = MaxSector; 767 768 for (i = 0; i < raid_devs; i++) 769 md_rdev_init(&rs->dev[i].rdev); 770 771 /* 772 * Remaining items to be initialized by further RAID params: 773 * rs->md.persistent 774 * rs->md.external 775 * rs->md.chunk_sectors 776 * rs->md.new_chunk_sectors 777 * rs->md.dev_sectors 778 */ 779 780 return rs; 781 } 782 783 /* Free all @rs allocations */ 784 static void raid_set_free(struct raid_set *rs) 785 { 786 int i; 787 788 if (rs->journal_dev.dev) { 789 md_rdev_clear(&rs->journal_dev.rdev); 790 dm_put_device(rs->ti, rs->journal_dev.dev); 791 } 792 793 for (i = 0; i < rs->raid_disks; i++) { 794 if (rs->dev[i].meta_dev) 795 dm_put_device(rs->ti, rs->dev[i].meta_dev); 796 md_rdev_clear(&rs->dev[i].rdev); 797 if (rs->dev[i].data_dev) 798 dm_put_device(rs->ti, rs->dev[i].data_dev); 799 } 800 801 kfree(rs); 802 } 803 804 /* 805 * For every device we have two words 806 * <meta_dev>: meta device name or '-' if missing 807 * <data_dev>: data device name or '-' if missing 808 * 809 * The following are permitted: 810 * - - 811 * - <data_dev> 812 * <meta_dev> <data_dev> 813 * 814 * The following is not allowed: 815 * <meta_dev> - 816 * 817 * This code parses those words. If there is a failure, 818 * the caller must use raid_set_free() to unwind the operations. 819 */ 820 static int parse_dev_params(struct raid_set *rs, struct dm_arg_set *as) 821 { 822 int i; 823 int rebuild = 0; 824 int metadata_available = 0; 825 int r = 0; 826 const char *arg; 827 828 /* Put off the number of raid devices argument to get to dev pairs */ 829 arg = dm_shift_arg(as); 830 if (!arg) 831 return -EINVAL; 832 833 for (i = 0; i < rs->raid_disks; i++) { 834 rs->dev[i].rdev.raid_disk = i; 835 836 rs->dev[i].meta_dev = NULL; 837 rs->dev[i].data_dev = NULL; 838 839 /* 840 * There are no offsets initially. 841 * Out of place reshape will set them accordingly. 842 */ 843 rs->dev[i].rdev.data_offset = 0; 844 rs->dev[i].rdev.new_data_offset = 0; 845 rs->dev[i].rdev.mddev = &rs->md; 846 847 arg = dm_shift_arg(as); 848 if (!arg) 849 return -EINVAL; 850 851 if (strcmp(arg, "-")) { 852 r = dm_get_device(rs->ti, arg, dm_table_get_mode(rs->ti->table), 853 &rs->dev[i].meta_dev); 854 if (r) { 855 rs->ti->error = "RAID metadata device lookup failure"; 856 return r; 857 } 858 859 rs->dev[i].rdev.sb_page = alloc_page(GFP_KERNEL); 860 if (!rs->dev[i].rdev.sb_page) { 861 rs->ti->error = "Failed to allocate superblock page"; 862 return -ENOMEM; 863 } 864 } 865 866 arg = dm_shift_arg(as); 867 if (!arg) 868 return -EINVAL; 869 870 if (!strcmp(arg, "-")) { 871 if (!test_bit(In_sync, &rs->dev[i].rdev.flags) && 872 (!rs->dev[i].rdev.recovery_offset)) { 873 rs->ti->error = "Drive designated for rebuild not specified"; 874 return -EINVAL; 875 } 876 877 if (rs->dev[i].meta_dev) { 878 rs->ti->error = "No data device supplied with metadata device"; 879 return -EINVAL; 880 } 881 882 continue; 883 } 884 885 r = dm_get_device(rs->ti, arg, dm_table_get_mode(rs->ti->table), 886 &rs->dev[i].data_dev); 887 if (r) { 888 rs->ti->error = "RAID device lookup failure"; 889 return r; 890 } 891 892 if (rs->dev[i].meta_dev) { 893 metadata_available = 1; 894 rs->dev[i].rdev.meta_bdev = rs->dev[i].meta_dev->bdev; 895 } 896 rs->dev[i].rdev.bdev = rs->dev[i].data_dev->bdev; 897 list_add_tail(&rs->dev[i].rdev.same_set, &rs->md.disks); 898 if (!test_bit(In_sync, &rs->dev[i].rdev.flags)) 899 rebuild++; 900 } 901 902 if (rs->journal_dev.dev) 903 list_add_tail(&rs->journal_dev.rdev.same_set, &rs->md.disks); 904 905 if (metadata_available) { 906 rs->md.external = 0; 907 rs->md.persistent = 1; 908 rs->md.major_version = 2; 909 } else if (rebuild && !rs->md.recovery_cp) { 910 /* 911 * Without metadata, we will not be able to tell if the array 912 * is in-sync or not - we must assume it is not. Therefore, 913 * it is impossible to rebuild a drive. 914 * 915 * Even if there is metadata, the on-disk information may 916 * indicate that the array is not in-sync and it will then 917 * fail at that time. 918 * 919 * User could specify 'nosync' option if desperate. 920 */ 921 rs->ti->error = "Unable to rebuild drive while array is not in-sync"; 922 return -EINVAL; 923 } 924 925 return 0; 926 } 927 928 /* 929 * validate_region_size 930 * @rs 931 * @region_size: region size in sectors. If 0, pick a size (4MiB default). 932 * 933 * Set rs->md.bitmap_info.chunksize (which really refers to 'region size'). 934 * Ensure that (ti->len/region_size < 2^21) - required by MD bitmap. 935 * 936 * Returns: 0 on success, -EINVAL on failure. 937 */ 938 static int validate_region_size(struct raid_set *rs, unsigned long region_size) 939 { 940 unsigned long min_region_size = rs->ti->len / (1 << 21); 941 942 if (rs_is_raid0(rs)) 943 return 0; 944 945 if (!region_size) { 946 /* 947 * Choose a reasonable default. All figures in sectors. 948 */ 949 if (min_region_size > (1 << 13)) { 950 /* If not a power of 2, make it the next power of 2 */ 951 region_size = roundup_pow_of_two(min_region_size); 952 DMINFO("Choosing default region size of %lu sectors", 953 region_size); 954 } else { 955 DMINFO("Choosing default region size of 4MiB"); 956 region_size = 1 << 13; /* sectors */ 957 } 958 } else { 959 /* 960 * Validate user-supplied value. 961 */ 962 if (region_size > rs->ti->len) { 963 rs->ti->error = "Supplied region size is too large"; 964 return -EINVAL; 965 } 966 967 if (region_size < min_region_size) { 968 DMERR("Supplied region_size (%lu sectors) below minimum (%lu)", 969 region_size, min_region_size); 970 rs->ti->error = "Supplied region size is too small"; 971 return -EINVAL; 972 } 973 974 if (!is_power_of_2(region_size)) { 975 rs->ti->error = "Region size is not a power of 2"; 976 return -EINVAL; 977 } 978 979 if (region_size < rs->md.chunk_sectors) { 980 rs->ti->error = "Region size is smaller than the chunk size"; 981 return -EINVAL; 982 } 983 } 984 985 /* 986 * Convert sectors to bytes. 987 */ 988 rs->md.bitmap_info.chunksize = to_bytes(region_size); 989 990 return 0; 991 } 992 993 /* 994 * validate_raid_redundancy 995 * @rs 996 * 997 * Determine if there are enough devices in the array that haven't 998 * failed (or are being rebuilt) to form a usable array. 999 * 1000 * Returns: 0 on success, -EINVAL on failure. 1001 */ 1002 static int validate_raid_redundancy(struct raid_set *rs) 1003 { 1004 unsigned int i, rebuild_cnt = 0; 1005 unsigned int rebuilds_per_group = 0, copies, raid_disks; 1006 unsigned int group_size, last_group_start; 1007 1008 for (i = 0; i < rs->raid_disks; i++) 1009 if (!test_bit(FirstUse, &rs->dev[i].rdev.flags) && 1010 ((!test_bit(In_sync, &rs->dev[i].rdev.flags) || 1011 !rs->dev[i].rdev.sb_page))) 1012 rebuild_cnt++; 1013 1014 switch (rs->md.level) { 1015 case 0: 1016 break; 1017 case 1: 1018 if (rebuild_cnt >= rs->md.raid_disks) 1019 goto too_many; 1020 break; 1021 case 4: 1022 case 5: 1023 case 6: 1024 if (rebuild_cnt > rs->raid_type->parity_devs) 1025 goto too_many; 1026 break; 1027 case 10: 1028 copies = raid10_md_layout_to_copies(rs->md.new_layout); 1029 if (copies < 2) { 1030 DMERR("Bogus raid10 data copies < 2!"); 1031 return -EINVAL; 1032 } 1033 1034 if (rebuild_cnt < copies) 1035 break; 1036 1037 /* 1038 * It is possible to have a higher rebuild count for RAID10, 1039 * as long as the failed devices occur in different mirror 1040 * groups (i.e. different stripes). 1041 * 1042 * When checking "near" format, make sure no adjacent devices 1043 * have failed beyond what can be handled. In addition to the 1044 * simple case where the number of devices is a multiple of the 1045 * number of copies, we must also handle cases where the number 1046 * of devices is not a multiple of the number of copies. 1047 * E.g. dev1 dev2 dev3 dev4 dev5 1048 * A A B B C 1049 * C D D E E 1050 */ 1051 raid_disks = min(rs->raid_disks, rs->md.raid_disks); 1052 if (__is_raid10_near(rs->md.new_layout)) { 1053 for (i = 0; i < raid_disks; i++) { 1054 if (!(i % copies)) 1055 rebuilds_per_group = 0; 1056 if ((!rs->dev[i].rdev.sb_page || 1057 !test_bit(In_sync, &rs->dev[i].rdev.flags)) && 1058 (++rebuilds_per_group >= copies)) 1059 goto too_many; 1060 } 1061 break; 1062 } 1063 1064 /* 1065 * When checking "far" and "offset" formats, we need to ensure 1066 * that the device that holds its copy is not also dead or 1067 * being rebuilt. (Note that "far" and "offset" formats only 1068 * support two copies right now. These formats also only ever 1069 * use the 'use_far_sets' variant.) 1070 * 1071 * This check is somewhat complicated by the need to account 1072 * for arrays that are not a multiple of (far) copies. This 1073 * results in the need to treat the last (potentially larger) 1074 * set differently. 1075 */ 1076 group_size = (raid_disks / copies); 1077 last_group_start = (raid_disks / group_size) - 1; 1078 last_group_start *= group_size; 1079 for (i = 0; i < raid_disks; i++) { 1080 if (!(i % copies) && !(i > last_group_start)) 1081 rebuilds_per_group = 0; 1082 if ((!rs->dev[i].rdev.sb_page || 1083 !test_bit(In_sync, &rs->dev[i].rdev.flags)) && 1084 (++rebuilds_per_group >= copies)) 1085 goto too_many; 1086 } 1087 break; 1088 default: 1089 if (rebuild_cnt) 1090 return -EINVAL; 1091 } 1092 1093 return 0; 1094 1095 too_many: 1096 return -EINVAL; 1097 } 1098 1099 /* 1100 * Possible arguments are... 1101 * <chunk_size> [optional_args] 1102 * 1103 * Argument definitions 1104 * <chunk_size> The number of sectors per disk that 1105 * will form the "stripe" 1106 * [[no]sync] Force or prevent recovery of the 1107 * entire array 1108 * [rebuild <idx>] Rebuild the drive indicated by the index 1109 * [daemon_sleep <ms>] Time between bitmap daemon work to 1110 * clear bits 1111 * [min_recovery_rate <kB/sec/disk>] Throttle RAID initialization 1112 * [max_recovery_rate <kB/sec/disk>] Throttle RAID initialization 1113 * [write_mostly <idx>] Indicate a write mostly drive via index 1114 * [max_write_behind <sectors>] See '-write-behind=' (man mdadm) 1115 * [stripe_cache <sectors>] Stripe cache size for higher RAIDs 1116 * [region_size <sectors>] Defines granularity of bitmap 1117 * [journal_dev <dev>] raid4/5/6 journaling deviice 1118 * (i.e. write hole closing log) 1119 * 1120 * RAID10-only options: 1121 * [raid10_copies <# copies>] Number of copies. (Default: 2) 1122 * [raid10_format <near|far|offset>] Layout algorithm. (Default: near) 1123 */ 1124 static int parse_raid_params(struct raid_set *rs, struct dm_arg_set *as, 1125 unsigned int num_raid_params) 1126 { 1127 int value, raid10_format = ALGORITHM_RAID10_DEFAULT; 1128 unsigned int raid10_copies = 2; 1129 unsigned int i, write_mostly = 0; 1130 unsigned int region_size = 0; 1131 sector_t max_io_len; 1132 const char *arg, *key; 1133 struct raid_dev *rd; 1134 struct raid_type *rt = rs->raid_type; 1135 1136 arg = dm_shift_arg(as); 1137 num_raid_params--; /* Account for chunk_size argument */ 1138 1139 if (kstrtoint(arg, 10, &value) < 0) { 1140 rs->ti->error = "Bad numerical argument given for chunk_size"; 1141 return -EINVAL; 1142 } 1143 1144 /* 1145 * First, parse the in-order required arguments 1146 * "chunk_size" is the only argument of this type. 1147 */ 1148 if (rt_is_raid1(rt)) { 1149 if (value) 1150 DMERR("Ignoring chunk size parameter for RAID 1"); 1151 value = 0; 1152 } else if (!is_power_of_2(value)) { 1153 rs->ti->error = "Chunk size must be a power of 2"; 1154 return -EINVAL; 1155 } else if (value < 8) { 1156 rs->ti->error = "Chunk size value is too small"; 1157 return -EINVAL; 1158 } 1159 1160 rs->md.new_chunk_sectors = rs->md.chunk_sectors = value; 1161 1162 /* 1163 * We set each individual device as In_sync with a completed 1164 * 'recovery_offset'. If there has been a device failure or 1165 * replacement then one of the following cases applies: 1166 * 1167 * 1) User specifies 'rebuild'. 1168 * - Device is reset when param is read. 1169 * 2) A new device is supplied. 1170 * - No matching superblock found, resets device. 1171 * 3) Device failure was transient and returns on reload. 1172 * - Failure noticed, resets device for bitmap replay. 1173 * 4) Device hadn't completed recovery after previous failure. 1174 * - Superblock is read and overrides recovery_offset. 1175 * 1176 * What is found in the superblocks of the devices is always 1177 * authoritative, unless 'rebuild' or '[no]sync' was specified. 1178 */ 1179 for (i = 0; i < rs->raid_disks; i++) { 1180 set_bit(In_sync, &rs->dev[i].rdev.flags); 1181 rs->dev[i].rdev.recovery_offset = MaxSector; 1182 } 1183 1184 /* 1185 * Second, parse the unordered optional arguments 1186 */ 1187 for (i = 0; i < num_raid_params; i++) { 1188 key = dm_shift_arg(as); 1189 if (!key) { 1190 rs->ti->error = "Not enough raid parameters given"; 1191 return -EINVAL; 1192 } 1193 1194 if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_NOSYNC))) { 1195 if (test_and_set_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags)) { 1196 rs->ti->error = "Only one 'nosync' argument allowed"; 1197 return -EINVAL; 1198 } 1199 continue; 1200 } 1201 if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_SYNC))) { 1202 if (test_and_set_bit(__CTR_FLAG_SYNC, &rs->ctr_flags)) { 1203 rs->ti->error = "Only one 'sync' argument allowed"; 1204 return -EINVAL; 1205 } 1206 continue; 1207 } 1208 if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_RAID10_USE_NEAR_SETS))) { 1209 if (test_and_set_bit(__CTR_FLAG_RAID10_USE_NEAR_SETS, &rs->ctr_flags)) { 1210 rs->ti->error = "Only one 'raid10_use_new_sets' argument allowed"; 1211 return -EINVAL; 1212 } 1213 continue; 1214 } 1215 1216 arg = dm_shift_arg(as); 1217 i++; /* Account for the argument pairs */ 1218 if (!arg) { 1219 rs->ti->error = "Wrong number of raid parameters given"; 1220 return -EINVAL; 1221 } 1222 1223 /* 1224 * Parameters that take a string value are checked here. 1225 */ 1226 /* "raid10_format {near|offset|far} */ 1227 if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_RAID10_FORMAT))) { 1228 if (test_and_set_bit(__CTR_FLAG_RAID10_FORMAT, &rs->ctr_flags)) { 1229 rs->ti->error = "Only one 'raid10_format' argument pair allowed"; 1230 return -EINVAL; 1231 } 1232 if (!rt_is_raid10(rt)) { 1233 rs->ti->error = "'raid10_format' is an invalid parameter for this RAID type"; 1234 return -EINVAL; 1235 } 1236 raid10_format = raid10_name_to_format(arg); 1237 if (raid10_format < 0) { 1238 rs->ti->error = "Invalid 'raid10_format' value given"; 1239 return raid10_format; 1240 } 1241 continue; 1242 } 1243 1244 /* "journal_dev <dev>" */ 1245 if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_JOURNAL_DEV))) { 1246 int r; 1247 struct md_rdev *jdev; 1248 1249 if (test_and_set_bit(__CTR_FLAG_JOURNAL_DEV, &rs->ctr_flags)) { 1250 rs->ti->error = "Only one raid4/5/6 set journaling device allowed"; 1251 return -EINVAL; 1252 } 1253 if (!rt_is_raid456(rt)) { 1254 rs->ti->error = "'journal_dev' is an invalid parameter for this RAID type"; 1255 return -EINVAL; 1256 } 1257 r = dm_get_device(rs->ti, arg, dm_table_get_mode(rs->ti->table), 1258 &rs->journal_dev.dev); 1259 if (r) { 1260 rs->ti->error = "raid4/5/6 journal device lookup failure"; 1261 return r; 1262 } 1263 jdev = &rs->journal_dev.rdev; 1264 md_rdev_init(jdev); 1265 jdev->mddev = &rs->md; 1266 jdev->bdev = rs->journal_dev.dev->bdev; 1267 jdev->sectors = bdev_nr_sectors(jdev->bdev); 1268 if (jdev->sectors < MIN_RAID456_JOURNAL_SPACE) { 1269 rs->ti->error = "No space for raid4/5/6 journal"; 1270 return -ENOSPC; 1271 } 1272 rs->journal_dev.mode = R5C_JOURNAL_MODE_WRITE_THROUGH; 1273 set_bit(Journal, &jdev->flags); 1274 continue; 1275 } 1276 1277 /* "journal_mode <mode>" ("journal_dev" mandatory!) */ 1278 if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_JOURNAL_MODE))) { 1279 int r; 1280 1281 if (!test_bit(__CTR_FLAG_JOURNAL_DEV, &rs->ctr_flags)) { 1282 rs->ti->error = "raid4/5/6 'journal_mode' is invalid without 'journal_dev'"; 1283 return -EINVAL; 1284 } 1285 if (test_and_set_bit(__CTR_FLAG_JOURNAL_MODE, &rs->ctr_flags)) { 1286 rs->ti->error = "Only one raid4/5/6 'journal_mode' argument allowed"; 1287 return -EINVAL; 1288 } 1289 r = dm_raid_journal_mode_to_md(arg); 1290 if (r < 0) { 1291 rs->ti->error = "Invalid 'journal_mode' argument"; 1292 return r; 1293 } 1294 rs->journal_dev.mode = r; 1295 continue; 1296 } 1297 1298 /* 1299 * Parameters with number values from here on. 1300 */ 1301 if (kstrtoint(arg, 10, &value) < 0) { 1302 rs->ti->error = "Bad numerical argument given in raid params"; 1303 return -EINVAL; 1304 } 1305 1306 if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_REBUILD))) { 1307 /* 1308 * "rebuild" is being passed in by userspace to provide 1309 * indexes of replaced devices and to set up additional 1310 * devices on raid level takeover. 1311 */ 1312 if (!__within_range(value, 0, rs->raid_disks - 1)) { 1313 rs->ti->error = "Invalid rebuild index given"; 1314 return -EINVAL; 1315 } 1316 1317 if (test_and_set_bit(value, (void *) rs->rebuild_disks)) { 1318 rs->ti->error = "rebuild for this index already given"; 1319 return -EINVAL; 1320 } 1321 1322 rd = rs->dev + value; 1323 clear_bit(In_sync, &rd->rdev.flags); 1324 clear_bit(Faulty, &rd->rdev.flags); 1325 rd->rdev.recovery_offset = 0; 1326 set_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags); 1327 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_WRITE_MOSTLY))) { 1328 if (!rt_is_raid1(rt)) { 1329 rs->ti->error = "write_mostly option is only valid for RAID1"; 1330 return -EINVAL; 1331 } 1332 1333 if (!__within_range(value, 0, rs->md.raid_disks - 1)) { 1334 rs->ti->error = "Invalid write_mostly index given"; 1335 return -EINVAL; 1336 } 1337 1338 write_mostly++; 1339 set_bit(WriteMostly, &rs->dev[value].rdev.flags); 1340 set_bit(__CTR_FLAG_WRITE_MOSTLY, &rs->ctr_flags); 1341 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_MAX_WRITE_BEHIND))) { 1342 if (!rt_is_raid1(rt)) { 1343 rs->ti->error = "max_write_behind option is only valid for RAID1"; 1344 return -EINVAL; 1345 } 1346 1347 if (test_and_set_bit(__CTR_FLAG_MAX_WRITE_BEHIND, &rs->ctr_flags)) { 1348 rs->ti->error = "Only one max_write_behind argument pair allowed"; 1349 return -EINVAL; 1350 } 1351 1352 /* 1353 * In device-mapper, we specify things in sectors, but 1354 * MD records this value in kB 1355 */ 1356 if (value < 0 || value / 2 > COUNTER_MAX) { 1357 rs->ti->error = "Max write-behind limit out of range"; 1358 return -EINVAL; 1359 } 1360 1361 rs->md.bitmap_info.max_write_behind = value / 2; 1362 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_DAEMON_SLEEP))) { 1363 if (test_and_set_bit(__CTR_FLAG_DAEMON_SLEEP, &rs->ctr_flags)) { 1364 rs->ti->error = "Only one daemon_sleep argument pair allowed"; 1365 return -EINVAL; 1366 } 1367 if (value < 0) { 1368 rs->ti->error = "daemon sleep period out of range"; 1369 return -EINVAL; 1370 } 1371 rs->md.bitmap_info.daemon_sleep = value; 1372 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_DATA_OFFSET))) { 1373 /* Userspace passes new data_offset after having extended the data image LV */ 1374 if (test_and_set_bit(__CTR_FLAG_DATA_OFFSET, &rs->ctr_flags)) { 1375 rs->ti->error = "Only one data_offset argument pair allowed"; 1376 return -EINVAL; 1377 } 1378 /* Ensure sensible data offset */ 1379 if (value < 0 || 1380 (value && (value < MIN_FREE_RESHAPE_SPACE || value % to_sector(PAGE_SIZE)))) { 1381 rs->ti->error = "Bogus data_offset value"; 1382 return -EINVAL; 1383 } 1384 rs->data_offset = value; 1385 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_DELTA_DISKS))) { 1386 /* Define the +/-# of disks to add to/remove from the given raid set */ 1387 if (test_and_set_bit(__CTR_FLAG_DELTA_DISKS, &rs->ctr_flags)) { 1388 rs->ti->error = "Only one delta_disks argument pair allowed"; 1389 return -EINVAL; 1390 } 1391 /* Ensure MAX_RAID_DEVICES and raid type minimal_devs! */ 1392 if (!__within_range(abs(value), 1, MAX_RAID_DEVICES - rt->minimal_devs)) { 1393 rs->ti->error = "Too many delta_disk requested"; 1394 return -EINVAL; 1395 } 1396 1397 rs->delta_disks = value; 1398 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_STRIPE_CACHE))) { 1399 if (test_and_set_bit(__CTR_FLAG_STRIPE_CACHE, &rs->ctr_flags)) { 1400 rs->ti->error = "Only one stripe_cache argument pair allowed"; 1401 return -EINVAL; 1402 } 1403 1404 if (!rt_is_raid456(rt)) { 1405 rs->ti->error = "Inappropriate argument: stripe_cache"; 1406 return -EINVAL; 1407 } 1408 1409 if (value < 0) { 1410 rs->ti->error = "Bogus stripe cache entries value"; 1411 return -EINVAL; 1412 } 1413 rs->stripe_cache_entries = value; 1414 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_MIN_RECOVERY_RATE))) { 1415 if (test_and_set_bit(__CTR_FLAG_MIN_RECOVERY_RATE, &rs->ctr_flags)) { 1416 rs->ti->error = "Only one min_recovery_rate argument pair allowed"; 1417 return -EINVAL; 1418 } 1419 1420 if (value < 0) { 1421 rs->ti->error = "min_recovery_rate out of range"; 1422 return -EINVAL; 1423 } 1424 rs->md.sync_speed_min = value; 1425 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_MAX_RECOVERY_RATE))) { 1426 if (test_and_set_bit(__CTR_FLAG_MAX_RECOVERY_RATE, &rs->ctr_flags)) { 1427 rs->ti->error = "Only one max_recovery_rate argument pair allowed"; 1428 return -EINVAL; 1429 } 1430 1431 if (value < 0) { 1432 rs->ti->error = "max_recovery_rate out of range"; 1433 return -EINVAL; 1434 } 1435 rs->md.sync_speed_max = value; 1436 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_REGION_SIZE))) { 1437 if (test_and_set_bit(__CTR_FLAG_REGION_SIZE, &rs->ctr_flags)) { 1438 rs->ti->error = "Only one region_size argument pair allowed"; 1439 return -EINVAL; 1440 } 1441 1442 region_size = value; 1443 rs->requested_bitmap_chunk_sectors = value; 1444 } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_RAID10_COPIES))) { 1445 if (test_and_set_bit(__CTR_FLAG_RAID10_COPIES, &rs->ctr_flags)) { 1446 rs->ti->error = "Only one raid10_copies argument pair allowed"; 1447 return -EINVAL; 1448 } 1449 1450 if (!__within_range(value, 2, rs->md.raid_disks)) { 1451 rs->ti->error = "Bad value for 'raid10_copies'"; 1452 return -EINVAL; 1453 } 1454 1455 raid10_copies = value; 1456 } else { 1457 DMERR("Unable to parse RAID parameter: %s", key); 1458 rs->ti->error = "Unable to parse RAID parameter"; 1459 return -EINVAL; 1460 } 1461 } 1462 1463 if (test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags) && 1464 test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags)) { 1465 rs->ti->error = "sync and nosync are mutually exclusive"; 1466 return -EINVAL; 1467 } 1468 1469 if (test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags) && 1470 (test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags) || 1471 test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags))) { 1472 rs->ti->error = "sync/nosync and rebuild are mutually exclusive"; 1473 return -EINVAL; 1474 } 1475 1476 if (write_mostly >= rs->md.raid_disks) { 1477 rs->ti->error = "Can't set all raid1 devices to write_mostly"; 1478 return -EINVAL; 1479 } 1480 1481 if (rs->md.sync_speed_max && 1482 rs->md.sync_speed_min > rs->md.sync_speed_max) { 1483 rs->ti->error = "Bogus recovery rates"; 1484 return -EINVAL; 1485 } 1486 1487 if (validate_region_size(rs, region_size)) 1488 return -EINVAL; 1489 1490 if (rs->md.chunk_sectors) 1491 max_io_len = rs->md.chunk_sectors; 1492 else 1493 max_io_len = region_size; 1494 1495 if (dm_set_target_max_io_len(rs->ti, max_io_len)) 1496 return -EINVAL; 1497 1498 if (rt_is_raid10(rt)) { 1499 if (raid10_copies > rs->md.raid_disks) { 1500 rs->ti->error = "Not enough devices to satisfy specification"; 1501 return -EINVAL; 1502 } 1503 1504 rs->md.new_layout = raid10_format_to_md_layout(rs, raid10_format, raid10_copies); 1505 if (rs->md.new_layout < 0) { 1506 rs->ti->error = "Error getting raid10 format"; 1507 return rs->md.new_layout; 1508 } 1509 1510 rt = get_raid_type_by_ll(10, rs->md.new_layout); 1511 if (!rt) { 1512 rs->ti->error = "Failed to recognize new raid10 layout"; 1513 return -EINVAL; 1514 } 1515 1516 if ((rt->algorithm == ALGORITHM_RAID10_DEFAULT || 1517 rt->algorithm == ALGORITHM_RAID10_NEAR) && 1518 test_bit(__CTR_FLAG_RAID10_USE_NEAR_SETS, &rs->ctr_flags)) { 1519 rs->ti->error = "RAID10 format 'near' and 'raid10_use_near_sets' are incompatible"; 1520 return -EINVAL; 1521 } 1522 } 1523 1524 rs->raid10_copies = raid10_copies; 1525 1526 /* Assume there are no metadata devices until the drives are parsed */ 1527 rs->md.persistent = 0; 1528 rs->md.external = 1; 1529 1530 /* Check, if any invalid ctr arguments have been passed in for the raid level */ 1531 return rs_check_for_valid_flags(rs); 1532 } 1533 1534 /* Set raid4/5/6 cache size */ 1535 static int rs_set_raid456_stripe_cache(struct raid_set *rs) 1536 { 1537 int r; 1538 struct r5conf *conf; 1539 struct mddev *mddev = &rs->md; 1540 uint32_t min_stripes = max(mddev->chunk_sectors, mddev->new_chunk_sectors) / 2; 1541 uint32_t nr_stripes = rs->stripe_cache_entries; 1542 1543 if (!rt_is_raid456(rs->raid_type)) { 1544 rs->ti->error = "Inappropriate raid level; cannot change stripe_cache size"; 1545 return -EINVAL; 1546 } 1547 1548 if (nr_stripes < min_stripes) { 1549 DMINFO("Adjusting requested %u stripe cache entries to %u to suit stripe size", 1550 nr_stripes, min_stripes); 1551 nr_stripes = min_stripes; 1552 } 1553 1554 conf = mddev->private; 1555 if (!conf) { 1556 rs->ti->error = "Cannot change stripe_cache size on inactive RAID set"; 1557 return -EINVAL; 1558 } 1559 1560 /* Try setting number of stripes in raid456 stripe cache */ 1561 if (conf->min_nr_stripes != nr_stripes) { 1562 r = raid5_set_cache_size(mddev, nr_stripes); 1563 if (r) { 1564 rs->ti->error = "Failed to set raid4/5/6 stripe cache size"; 1565 return r; 1566 } 1567 1568 DMINFO("%u stripe cache entries", nr_stripes); 1569 } 1570 1571 return 0; 1572 } 1573 1574 /* Return # of data stripes as kept in mddev as of @rs (i.e. as of superblock) */ 1575 static unsigned int mddev_data_stripes(struct raid_set *rs) 1576 { 1577 return rs->md.raid_disks - rs->raid_type->parity_devs; 1578 } 1579 1580 /* Return # of data stripes of @rs (i.e. as of ctr) */ 1581 static unsigned int rs_data_stripes(struct raid_set *rs) 1582 { 1583 return rs->raid_disks - rs->raid_type->parity_devs; 1584 } 1585 1586 /* 1587 * Retrieve rdev->sectors from any valid raid device of @rs 1588 * to allow userpace to pass in arbitray "- -" device tupples. 1589 */ 1590 static sector_t __rdev_sectors(struct raid_set *rs) 1591 { 1592 int i; 1593 1594 for (i = 0; i < rs->raid_disks; i++) { 1595 struct md_rdev *rdev = &rs->dev[i].rdev; 1596 1597 if (!test_bit(Journal, &rdev->flags) && 1598 rdev->bdev && rdev->sectors) 1599 return rdev->sectors; 1600 } 1601 1602 return 0; 1603 } 1604 1605 /* Check that calculated dev_sectors fits all component devices. */ 1606 static int _check_data_dev_sectors(struct raid_set *rs) 1607 { 1608 sector_t ds = ~0; 1609 struct md_rdev *rdev; 1610 1611 rdev_for_each(rdev, &rs->md) 1612 if (!test_bit(Journal, &rdev->flags) && rdev->bdev) { 1613 ds = min(ds, bdev_nr_sectors(rdev->bdev)); 1614 if (ds < rs->md.dev_sectors) { 1615 rs->ti->error = "Component device(s) too small"; 1616 return -EINVAL; 1617 } 1618 } 1619 1620 return 0; 1621 } 1622 1623 /* Calculate the sectors per device and per array used for @rs */ 1624 static int rs_set_dev_and_array_sectors(struct raid_set *rs, sector_t sectors, bool use_mddev) 1625 { 1626 int delta_disks; 1627 unsigned int data_stripes; 1628 sector_t array_sectors = sectors, dev_sectors = sectors; 1629 struct mddev *mddev = &rs->md; 1630 1631 if (use_mddev) { 1632 delta_disks = mddev->delta_disks; 1633 data_stripes = mddev_data_stripes(rs); 1634 } else { 1635 delta_disks = rs->delta_disks; 1636 data_stripes = rs_data_stripes(rs); 1637 } 1638 1639 /* Special raid1 case w/o delta_disks support (yet) */ 1640 if (rt_is_raid1(rs->raid_type)) 1641 ; 1642 else if (rt_is_raid10(rs->raid_type)) { 1643 if (rs->raid10_copies < 2 || 1644 delta_disks < 0) { 1645 rs->ti->error = "Bogus raid10 data copies or delta disks"; 1646 return -EINVAL; 1647 } 1648 1649 dev_sectors *= rs->raid10_copies; 1650 if (sector_div(dev_sectors, data_stripes)) 1651 goto bad; 1652 1653 array_sectors = (data_stripes + delta_disks) * dev_sectors; 1654 if (sector_div(array_sectors, rs->raid10_copies)) 1655 goto bad; 1656 1657 } else if (sector_div(dev_sectors, data_stripes)) 1658 goto bad; 1659 1660 else 1661 /* Striped layouts */ 1662 array_sectors = (data_stripes + delta_disks) * dev_sectors; 1663 1664 mddev->array_sectors = array_sectors; 1665 mddev->dev_sectors = dev_sectors; 1666 rs_set_rdev_sectors(rs); 1667 1668 return _check_data_dev_sectors(rs); 1669 bad: 1670 rs->ti->error = "Target length not divisible by number of data devices"; 1671 return -EINVAL; 1672 } 1673 1674 /* Setup recovery on @rs */ 1675 static void rs_setup_recovery(struct raid_set *rs, sector_t dev_sectors) 1676 { 1677 /* raid0 does not recover */ 1678 if (rs_is_raid0(rs)) 1679 rs->md.recovery_cp = MaxSector; 1680 /* 1681 * A raid6 set has to be recovered either 1682 * completely or for the grown part to 1683 * ensure proper parity and Q-Syndrome 1684 */ 1685 else if (rs_is_raid6(rs)) 1686 rs->md.recovery_cp = dev_sectors; 1687 /* 1688 * Other raid set types may skip recovery 1689 * depending on the 'nosync' flag. 1690 */ 1691 else 1692 rs->md.recovery_cp = test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags) 1693 ? MaxSector : dev_sectors; 1694 } 1695 1696 static void do_table_event(struct work_struct *ws) 1697 { 1698 struct raid_set *rs = container_of(ws, struct raid_set, md.event_work); 1699 1700 smp_rmb(); /* Make sure we access most actual mddev properties */ 1701 if (!rs_is_reshaping(rs)) { 1702 if (rs_is_raid10(rs)) 1703 rs_set_rdev_sectors(rs); 1704 rs_set_capacity(rs); 1705 } 1706 dm_table_event(rs->ti->table); 1707 } 1708 1709 /* 1710 * Make sure a valid takover (level switch) is being requested on @rs 1711 * 1712 * Conversions of raid sets from one MD personality to another 1713 * have to conform to restrictions which are enforced here. 1714 */ 1715 static int rs_check_takeover(struct raid_set *rs) 1716 { 1717 struct mddev *mddev = &rs->md; 1718 unsigned int near_copies; 1719 1720 if (rs->md.degraded) { 1721 rs->ti->error = "Can't takeover degraded raid set"; 1722 return -EPERM; 1723 } 1724 1725 if (rs_is_reshaping(rs)) { 1726 rs->ti->error = "Can't takeover reshaping raid set"; 1727 return -EPERM; 1728 } 1729 1730 switch (mddev->level) { 1731 case 0: 1732 /* raid0 -> raid1/5 with one disk */ 1733 if ((mddev->new_level == 1 || mddev->new_level == 5) && 1734 mddev->raid_disks == 1) 1735 return 0; 1736 1737 /* raid0 -> raid10 */ 1738 if (mddev->new_level == 10 && 1739 !(rs->raid_disks % mddev->raid_disks)) 1740 return 0; 1741 1742 /* raid0 with multiple disks -> raid4/5/6 */ 1743 if (__within_range(mddev->new_level, 4, 6) && 1744 mddev->new_layout == ALGORITHM_PARITY_N && 1745 mddev->raid_disks > 1) 1746 return 0; 1747 1748 break; 1749 1750 case 10: 1751 /* Can't takeover raid10_offset! */ 1752 if (__is_raid10_offset(mddev->layout)) 1753 break; 1754 1755 near_copies = __raid10_near_copies(mddev->layout); 1756 1757 /* raid10* -> raid0 */ 1758 if (mddev->new_level == 0) { 1759 /* Can takeover raid10_near with raid disks divisable by data copies! */ 1760 if (near_copies > 1 && 1761 !(mddev->raid_disks % near_copies)) { 1762 mddev->raid_disks /= near_copies; 1763 mddev->delta_disks = mddev->raid_disks; 1764 return 0; 1765 } 1766 1767 /* Can takeover raid10_far */ 1768 if (near_copies == 1 && 1769 __raid10_far_copies(mddev->layout) > 1) 1770 return 0; 1771 1772 break; 1773 } 1774 1775 /* raid10_{near,far} -> raid1 */ 1776 if (mddev->new_level == 1 && 1777 max(near_copies, __raid10_far_copies(mddev->layout)) == mddev->raid_disks) 1778 return 0; 1779 1780 /* raid10_{near,far} with 2 disks -> raid4/5 */ 1781 if (__within_range(mddev->new_level, 4, 5) && 1782 mddev->raid_disks == 2) 1783 return 0; 1784 break; 1785 1786 case 1: 1787 /* raid1 with 2 disks -> raid4/5 */ 1788 if (__within_range(mddev->new_level, 4, 5) && 1789 mddev->raid_disks == 2) { 1790 mddev->degraded = 1; 1791 return 0; 1792 } 1793 1794 /* raid1 -> raid0 */ 1795 if (mddev->new_level == 0 && 1796 mddev->raid_disks == 1) 1797 return 0; 1798 1799 /* raid1 -> raid10 */ 1800 if (mddev->new_level == 10) 1801 return 0; 1802 break; 1803 1804 case 4: 1805 /* raid4 -> raid0 */ 1806 if (mddev->new_level == 0) 1807 return 0; 1808 1809 /* raid4 -> raid1/5 with 2 disks */ 1810 if ((mddev->new_level == 1 || mddev->new_level == 5) && 1811 mddev->raid_disks == 2) 1812 return 0; 1813 1814 /* raid4 -> raid5/6 with parity N */ 1815 if (__within_range(mddev->new_level, 5, 6) && 1816 mddev->layout == ALGORITHM_PARITY_N) 1817 return 0; 1818 break; 1819 1820 case 5: 1821 /* raid5 with parity N -> raid0 */ 1822 if (mddev->new_level == 0 && 1823 mddev->layout == ALGORITHM_PARITY_N) 1824 return 0; 1825 1826 /* raid5 with parity N -> raid4 */ 1827 if (mddev->new_level == 4 && 1828 mddev->layout == ALGORITHM_PARITY_N) 1829 return 0; 1830 1831 /* raid5 with 2 disks -> raid1/4/10 */ 1832 if ((mddev->new_level == 1 || mddev->new_level == 4 || mddev->new_level == 10) && 1833 mddev->raid_disks == 2) 1834 return 0; 1835 1836 /* raid5_* -> raid6_*_6 with Q-Syndrome N (e.g. raid5_ra -> raid6_ra_6 */ 1837 if (mddev->new_level == 6 && 1838 ((mddev->layout == ALGORITHM_PARITY_N && mddev->new_layout == ALGORITHM_PARITY_N) || 1839 __within_range(mddev->new_layout, ALGORITHM_LEFT_ASYMMETRIC_6, ALGORITHM_RIGHT_SYMMETRIC_6))) 1840 return 0; 1841 break; 1842 1843 case 6: 1844 /* raid6 with parity N -> raid0 */ 1845 if (mddev->new_level == 0 && 1846 mddev->layout == ALGORITHM_PARITY_N) 1847 return 0; 1848 1849 /* raid6 with parity N -> raid4 */ 1850 if (mddev->new_level == 4 && 1851 mddev->layout == ALGORITHM_PARITY_N) 1852 return 0; 1853 1854 /* raid6_*_n with Q-Syndrome N -> raid5_* */ 1855 if (mddev->new_level == 5 && 1856 ((mddev->layout == ALGORITHM_PARITY_N && mddev->new_layout == ALGORITHM_PARITY_N) || 1857 __within_range(mddev->new_layout, ALGORITHM_LEFT_ASYMMETRIC, ALGORITHM_RIGHT_SYMMETRIC))) 1858 return 0; 1859 break; 1860 1861 default: 1862 break; 1863 } 1864 1865 rs->ti->error = "takeover not possible"; 1866 return -EINVAL; 1867 } 1868 1869 /* True if @rs requested to be taken over */ 1870 static bool rs_takeover_requested(struct raid_set *rs) 1871 { 1872 return rs->md.new_level != rs->md.level; 1873 } 1874 1875 /* True if layout is set to reshape. */ 1876 static bool rs_is_layout_change(struct raid_set *rs, bool use_mddev) 1877 { 1878 return (use_mddev ? rs->md.delta_disks : rs->delta_disks) || 1879 rs->md.new_layout != rs->md.layout || 1880 rs->md.new_chunk_sectors != rs->md.chunk_sectors; 1881 } 1882 1883 /* True if @rs is requested to reshape by ctr */ 1884 static bool rs_reshape_requested(struct raid_set *rs) 1885 { 1886 bool change; 1887 struct mddev *mddev = &rs->md; 1888 1889 if (rs_takeover_requested(rs)) 1890 return false; 1891 1892 if (rs_is_raid0(rs)) 1893 return false; 1894 1895 change = rs_is_layout_change(rs, false); 1896 1897 /* Historical case to support raid1 reshape without delta disks */ 1898 if (rs_is_raid1(rs)) { 1899 if (rs->delta_disks) 1900 return !!rs->delta_disks; 1901 1902 return !change && 1903 mddev->raid_disks != rs->raid_disks; 1904 } 1905 1906 if (rs_is_raid10(rs)) 1907 return change && 1908 !__is_raid10_far(mddev->new_layout) && 1909 rs->delta_disks >= 0; 1910 1911 return change; 1912 } 1913 1914 /* Features */ 1915 #define FEATURE_FLAG_SUPPORTS_V190 0x1 /* Supports extended superblock */ 1916 1917 /* State flags for sb->flags */ 1918 #define SB_FLAG_RESHAPE_ACTIVE 0x1 1919 #define SB_FLAG_RESHAPE_BACKWARDS 0x2 1920 1921 /* 1922 * This structure is never routinely used by userspace, unlike md superblocks. 1923 * Devices with this superblock should only ever be accessed via device-mapper. 1924 */ 1925 #define DM_RAID_MAGIC 0x64526D44 1926 struct dm_raid_superblock { 1927 __le32 magic; /* "DmRd" */ 1928 __le32 compat_features; /* Used to indicate compatible features (like 1.9.0 ondisk metadata extension) */ 1929 1930 __le32 num_devices; /* Number of devices in this raid set. (Max 64) */ 1931 __le32 array_position; /* The position of this drive in the raid set */ 1932 1933 __le64 events; /* Incremented by md when superblock updated */ 1934 __le64 failed_devices; /* Pre 1.9.0 part of bit field of devices to */ 1935 /* indicate failures (see extension below) */ 1936 1937 /* 1938 * This offset tracks the progress of the repair or replacement of 1939 * an individual drive. 1940 */ 1941 __le64 disk_recovery_offset; 1942 1943 /* 1944 * This offset tracks the progress of the initial raid set 1945 * synchronisation/parity calculation. 1946 */ 1947 __le64 array_resync_offset; 1948 1949 /* 1950 * raid characteristics 1951 */ 1952 __le32 level; 1953 __le32 layout; 1954 __le32 stripe_sectors; 1955 1956 /******************************************************************** 1957 * BELOW FOLLOW V1.9.0 EXTENSIONS TO THE PRISTINE SUPERBLOCK FORMAT!!! 1958 * 1959 * FEATURE_FLAG_SUPPORTS_V190 in the compat_features member indicates that those exist 1960 */ 1961 1962 __le32 flags; /* Flags defining array states for reshaping */ 1963 1964 /* 1965 * This offset tracks the progress of a raid 1966 * set reshape in order to be able to restart it 1967 */ 1968 __le64 reshape_position; 1969 1970 /* 1971 * These define the properties of the array in case of an interrupted reshape 1972 */ 1973 __le32 new_level; 1974 __le32 new_layout; 1975 __le32 new_stripe_sectors; 1976 __le32 delta_disks; 1977 1978 __le64 array_sectors; /* Array size in sectors */ 1979 1980 /* 1981 * Sector offsets to data on devices (reshaping). 1982 * Needed to support out of place reshaping, thus 1983 * not writing over any stripes whilst converting 1984 * them from old to new layout 1985 */ 1986 __le64 data_offset; 1987 __le64 new_data_offset; 1988 1989 __le64 sectors; /* Used device size in sectors */ 1990 1991 /* 1992 * Additional Bit field of devices indicating failures to support 1993 * up to 256 devices with the 1.9.0 on-disk metadata format 1994 */ 1995 __le64 extended_failed_devices[DISKS_ARRAY_ELEMS - 1]; 1996 1997 __le32 incompat_features; /* Used to indicate any incompatible features */ 1998 1999 /* Always set rest up to logical block size to 0 when writing (see get_metadata_device() below). */ 2000 } __packed; 2001 2002 /* 2003 * Check for reshape constraints on raid set @rs: 2004 * 2005 * - reshape function non-existent 2006 * - degraded set 2007 * - ongoing recovery 2008 * - ongoing reshape 2009 * 2010 * Returns 0 if none or -EPERM if given constraint 2011 * and error message reference in @errmsg 2012 */ 2013 static int rs_check_reshape(struct raid_set *rs) 2014 { 2015 struct mddev *mddev = &rs->md; 2016 2017 if (!mddev->pers || !mddev->pers->check_reshape) 2018 rs->ti->error = "Reshape not supported"; 2019 else if (mddev->degraded) 2020 rs->ti->error = "Can't reshape degraded raid set"; 2021 else if (rs_is_recovering(rs)) 2022 rs->ti->error = "Convert request on recovering raid set prohibited"; 2023 else if (rs_is_reshaping(rs)) 2024 rs->ti->error = "raid set already reshaping!"; 2025 else if (!(rs_is_raid1(rs) || rs_is_raid10(rs) || rs_is_raid456(rs))) 2026 rs->ti->error = "Reshaping only supported for raid1/4/5/6/10"; 2027 else 2028 return 0; 2029 2030 return -EPERM; 2031 } 2032 2033 static int read_disk_sb(struct md_rdev *rdev, int size, bool force_reload) 2034 { 2035 BUG_ON(!rdev->sb_page); 2036 2037 if (rdev->sb_loaded && !force_reload) 2038 return 0; 2039 2040 rdev->sb_loaded = 0; 2041 2042 if (!sync_page_io(rdev, 0, size, rdev->sb_page, REQ_OP_READ, true)) { 2043 DMERR("Failed to read superblock of device at position %d", 2044 rdev->raid_disk); 2045 md_error(rdev->mddev, rdev); 2046 set_bit(Faulty, &rdev->flags); 2047 return -EIO; 2048 } 2049 2050 rdev->sb_loaded = 1; 2051 2052 return 0; 2053 } 2054 2055 static void sb_retrieve_failed_devices(struct dm_raid_superblock *sb, uint64_t *failed_devices) 2056 { 2057 failed_devices[0] = le64_to_cpu(sb->failed_devices); 2058 memset(failed_devices + 1, 0, sizeof(sb->extended_failed_devices)); 2059 2060 if (le32_to_cpu(sb->compat_features) & FEATURE_FLAG_SUPPORTS_V190) { 2061 int i = ARRAY_SIZE(sb->extended_failed_devices); 2062 2063 while (i--) 2064 failed_devices[i+1] = le64_to_cpu(sb->extended_failed_devices[i]); 2065 } 2066 } 2067 2068 static void sb_update_failed_devices(struct dm_raid_superblock *sb, uint64_t *failed_devices) 2069 { 2070 int i = ARRAY_SIZE(sb->extended_failed_devices); 2071 2072 sb->failed_devices = cpu_to_le64(failed_devices[0]); 2073 while (i--) 2074 sb->extended_failed_devices[i] = cpu_to_le64(failed_devices[i+1]); 2075 } 2076 2077 /* 2078 * Synchronize the superblock members with the raid set properties 2079 * 2080 * All superblock data is little endian. 2081 */ 2082 static void super_sync(struct mddev *mddev, struct md_rdev *rdev) 2083 { 2084 bool update_failed_devices = false; 2085 unsigned int i; 2086 uint64_t failed_devices[DISKS_ARRAY_ELEMS]; 2087 struct dm_raid_superblock *sb; 2088 struct raid_set *rs = container_of(mddev, struct raid_set, md); 2089 2090 /* No metadata device, no superblock */ 2091 if (!rdev->meta_bdev) 2092 return; 2093 2094 BUG_ON(!rdev->sb_page); 2095 2096 sb = page_address(rdev->sb_page); 2097 2098 sb_retrieve_failed_devices(sb, failed_devices); 2099 2100 for (i = 0; i < rs->raid_disks; i++) 2101 if (!rs->dev[i].data_dev || test_bit(Faulty, &rs->dev[i].rdev.flags)) { 2102 update_failed_devices = true; 2103 set_bit(i, (void *) failed_devices); 2104 } 2105 2106 if (update_failed_devices) 2107 sb_update_failed_devices(sb, failed_devices); 2108 2109 sb->magic = cpu_to_le32(DM_RAID_MAGIC); 2110 sb->compat_features = cpu_to_le32(FEATURE_FLAG_SUPPORTS_V190); 2111 2112 sb->num_devices = cpu_to_le32(mddev->raid_disks); 2113 sb->array_position = cpu_to_le32(rdev->raid_disk); 2114 2115 sb->events = cpu_to_le64(mddev->events); 2116 2117 sb->disk_recovery_offset = cpu_to_le64(rdev->recovery_offset); 2118 sb->array_resync_offset = cpu_to_le64(mddev->recovery_cp); 2119 2120 sb->level = cpu_to_le32(mddev->level); 2121 sb->layout = cpu_to_le32(mddev->layout); 2122 sb->stripe_sectors = cpu_to_le32(mddev->chunk_sectors); 2123 2124 /******************************************************************** 2125 * BELOW FOLLOW V1.9.0 EXTENSIONS TO THE PRISTINE SUPERBLOCK FORMAT!!! 2126 * 2127 * FEATURE_FLAG_SUPPORTS_V190 in the compat_features member indicates that those exist 2128 */ 2129 sb->new_level = cpu_to_le32(mddev->new_level); 2130 sb->new_layout = cpu_to_le32(mddev->new_layout); 2131 sb->new_stripe_sectors = cpu_to_le32(mddev->new_chunk_sectors); 2132 2133 sb->delta_disks = cpu_to_le32(mddev->delta_disks); 2134 2135 smp_rmb(); /* Make sure we access most recent reshape position */ 2136 sb->reshape_position = cpu_to_le64(mddev->reshape_position); 2137 if (le64_to_cpu(sb->reshape_position) != MaxSector) { 2138 /* Flag ongoing reshape */ 2139 sb->flags |= cpu_to_le32(SB_FLAG_RESHAPE_ACTIVE); 2140 2141 if (mddev->delta_disks < 0 || mddev->reshape_backwards) 2142 sb->flags |= cpu_to_le32(SB_FLAG_RESHAPE_BACKWARDS); 2143 } else { 2144 /* Clear reshape flags */ 2145 sb->flags &= ~(cpu_to_le32(SB_FLAG_RESHAPE_ACTIVE|SB_FLAG_RESHAPE_BACKWARDS)); 2146 } 2147 2148 sb->array_sectors = cpu_to_le64(mddev->array_sectors); 2149 sb->data_offset = cpu_to_le64(rdev->data_offset); 2150 sb->new_data_offset = cpu_to_le64(rdev->new_data_offset); 2151 sb->sectors = cpu_to_le64(rdev->sectors); 2152 sb->incompat_features = cpu_to_le32(0); 2153 2154 /* Zero out the rest of the payload after the size of the superblock */ 2155 memset(sb + 1, 0, rdev->sb_size - sizeof(*sb)); 2156 } 2157 2158 /* 2159 * super_load 2160 * 2161 * This function creates a superblock if one is not found on the device 2162 * and will decide which superblock to use if there's a choice. 2163 * 2164 * Return: 1 if use rdev, 0 if use refdev, -Exxx otherwise 2165 */ 2166 static int super_load(struct md_rdev *rdev, struct md_rdev *refdev) 2167 { 2168 int r; 2169 struct dm_raid_superblock *sb; 2170 struct dm_raid_superblock *refsb; 2171 uint64_t events_sb, events_refsb; 2172 2173 r = read_disk_sb(rdev, rdev->sb_size, false); 2174 if (r) 2175 return r; 2176 2177 sb = page_address(rdev->sb_page); 2178 2179 /* 2180 * Two cases that we want to write new superblocks and rebuild: 2181 * 1) New device (no matching magic number) 2182 * 2) Device specified for rebuild (!In_sync w/ offset == 0) 2183 */ 2184 if ((sb->magic != cpu_to_le32(DM_RAID_MAGIC)) || 2185 (!test_bit(In_sync, &rdev->flags) && !rdev->recovery_offset)) { 2186 super_sync(rdev->mddev, rdev); 2187 2188 set_bit(FirstUse, &rdev->flags); 2189 sb->compat_features = cpu_to_le32(FEATURE_FLAG_SUPPORTS_V190); 2190 2191 /* Force writing of superblocks to disk */ 2192 set_bit(MD_SB_CHANGE_DEVS, &rdev->mddev->sb_flags); 2193 2194 /* Any superblock is better than none, choose that if given */ 2195 return refdev ? 0 : 1; 2196 } 2197 2198 if (!refdev) 2199 return 1; 2200 2201 events_sb = le64_to_cpu(sb->events); 2202 2203 refsb = page_address(refdev->sb_page); 2204 events_refsb = le64_to_cpu(refsb->events); 2205 2206 return (events_sb > events_refsb) ? 1 : 0; 2207 } 2208 2209 static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev) 2210 { 2211 int role; 2212 unsigned int d; 2213 struct mddev *mddev = &rs->md; 2214 uint64_t events_sb; 2215 uint64_t failed_devices[DISKS_ARRAY_ELEMS]; 2216 struct dm_raid_superblock *sb; 2217 uint32_t new_devs = 0, rebuild_and_new = 0, rebuilds = 0; 2218 struct md_rdev *r; 2219 struct dm_raid_superblock *sb2; 2220 2221 sb = page_address(rdev->sb_page); 2222 events_sb = le64_to_cpu(sb->events); 2223 2224 /* 2225 * Initialise to 1 if this is a new superblock. 2226 */ 2227 mddev->events = events_sb ? : 1; 2228 2229 mddev->reshape_position = MaxSector; 2230 2231 mddev->raid_disks = le32_to_cpu(sb->num_devices); 2232 mddev->level = le32_to_cpu(sb->level); 2233 mddev->layout = le32_to_cpu(sb->layout); 2234 mddev->chunk_sectors = le32_to_cpu(sb->stripe_sectors); 2235 2236 /* 2237 * Reshaping is supported, e.g. reshape_position is valid 2238 * in superblock and superblock content is authoritative. 2239 */ 2240 if (le32_to_cpu(sb->compat_features) & FEATURE_FLAG_SUPPORTS_V190) { 2241 /* Superblock is authoritative wrt given raid set layout! */ 2242 mddev->new_level = le32_to_cpu(sb->new_level); 2243 mddev->new_layout = le32_to_cpu(sb->new_layout); 2244 mddev->new_chunk_sectors = le32_to_cpu(sb->new_stripe_sectors); 2245 mddev->delta_disks = le32_to_cpu(sb->delta_disks); 2246 mddev->array_sectors = le64_to_cpu(sb->array_sectors); 2247 2248 /* raid was reshaping and got interrupted */ 2249 if (le32_to_cpu(sb->flags) & SB_FLAG_RESHAPE_ACTIVE) { 2250 if (test_bit(__CTR_FLAG_DELTA_DISKS, &rs->ctr_flags)) { 2251 DMERR("Reshape requested but raid set is still reshaping"); 2252 return -EINVAL; 2253 } 2254 2255 if (mddev->delta_disks < 0 || 2256 (!mddev->delta_disks && (le32_to_cpu(sb->flags) & SB_FLAG_RESHAPE_BACKWARDS))) 2257 mddev->reshape_backwards = 1; 2258 else 2259 mddev->reshape_backwards = 0; 2260 2261 mddev->reshape_position = le64_to_cpu(sb->reshape_position); 2262 rs->raid_type = get_raid_type_by_ll(mddev->level, mddev->layout); 2263 } 2264 2265 } else { 2266 /* 2267 * No takeover/reshaping, because we don't have the extended v1.9.0 metadata 2268 */ 2269 struct raid_type *rt_cur = get_raid_type_by_ll(mddev->level, mddev->layout); 2270 struct raid_type *rt_new = get_raid_type_by_ll(mddev->new_level, mddev->new_layout); 2271 2272 if (rs_takeover_requested(rs)) { 2273 if (rt_cur && rt_new) 2274 DMERR("Takeover raid sets from %s to %s not yet supported by metadata. (raid level change)", 2275 rt_cur->name, rt_new->name); 2276 else 2277 DMERR("Takeover raid sets not yet supported by metadata. (raid level change)"); 2278 return -EINVAL; 2279 } else if (rs_reshape_requested(rs)) { 2280 DMERR("Reshaping raid sets not yet supported by metadata. (raid layout change keeping level)"); 2281 if (mddev->layout != mddev->new_layout) { 2282 if (rt_cur && rt_new) 2283 DMERR(" current layout %s vs new layout %s", 2284 rt_cur->name, rt_new->name); 2285 else 2286 DMERR(" current layout 0x%X vs new layout 0x%X", 2287 le32_to_cpu(sb->layout), mddev->new_layout); 2288 } 2289 if (mddev->chunk_sectors != mddev->new_chunk_sectors) 2290 DMERR(" current stripe sectors %u vs new stripe sectors %u", 2291 mddev->chunk_sectors, mddev->new_chunk_sectors); 2292 if (rs->delta_disks) 2293 DMERR(" current %u disks vs new %u disks", 2294 mddev->raid_disks, mddev->raid_disks + rs->delta_disks); 2295 if (rs_is_raid10(rs)) { 2296 DMERR(" Old layout: %s w/ %u copies", 2297 raid10_md_layout_to_format(mddev->layout), 2298 raid10_md_layout_to_copies(mddev->layout)); 2299 DMERR(" New layout: %s w/ %u copies", 2300 raid10_md_layout_to_format(mddev->new_layout), 2301 raid10_md_layout_to_copies(mddev->new_layout)); 2302 } 2303 return -EINVAL; 2304 } 2305 2306 DMINFO("Discovered old metadata format; upgrading to extended metadata format"); 2307 } 2308 2309 if (!test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags)) 2310 mddev->recovery_cp = le64_to_cpu(sb->array_resync_offset); 2311 2312 /* 2313 * During load, we set FirstUse if a new superblock was written. 2314 * There are two reasons we might not have a superblock: 2315 * 1) The raid set is brand new - in which case, all of the 2316 * devices must have their In_sync bit set. Also, 2317 * recovery_cp must be 0, unless forced. 2318 * 2) This is a new device being added to an old raid set 2319 * and the new device needs to be rebuilt - in which 2320 * case the In_sync bit will /not/ be set and 2321 * recovery_cp must be MaxSector. 2322 * 3) This is/are a new device(s) being added to an old 2323 * raid set during takeover to a higher raid level 2324 * to provide capacity for redundancy or during reshape 2325 * to add capacity to grow the raid set. 2326 */ 2327 d = 0; 2328 rdev_for_each(r, mddev) { 2329 if (test_bit(Journal, &rdev->flags)) 2330 continue; 2331 2332 if (test_bit(FirstUse, &r->flags)) 2333 new_devs++; 2334 2335 if (!test_bit(In_sync, &r->flags)) { 2336 DMINFO("Device %d specified for rebuild; clearing superblock", 2337 r->raid_disk); 2338 rebuilds++; 2339 2340 if (test_bit(FirstUse, &r->flags)) 2341 rebuild_and_new++; 2342 } 2343 2344 d++; 2345 } 2346 2347 if (new_devs == rs->raid_disks || !rebuilds) { 2348 /* Replace a broken device */ 2349 if (new_devs == rs->raid_disks) { 2350 DMINFO("Superblocks created for new raid set"); 2351 set_bit(MD_ARRAY_FIRST_USE, &mddev->flags); 2352 } else if (new_devs != rebuilds && 2353 new_devs != rs->delta_disks) { 2354 DMERR("New device injected into existing raid set without " 2355 "'delta_disks' or 'rebuild' parameter specified"); 2356 return -EINVAL; 2357 } 2358 } else if (new_devs && new_devs != rebuilds) { 2359 DMERR("%u 'rebuild' devices cannot be injected into" 2360 " a raid set with %u other first-time devices", 2361 rebuilds, new_devs); 2362 return -EINVAL; 2363 } else if (rebuilds) { 2364 if (rebuild_and_new && rebuilds != rebuild_and_new) { 2365 DMERR("new device%s provided without 'rebuild'", 2366 new_devs > 1 ? "s" : ""); 2367 return -EINVAL; 2368 } else if (!test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags) && rs_is_recovering(rs)) { 2369 DMERR("'rebuild' specified while raid set is not in-sync (recovery_cp=%llu)", 2370 (unsigned long long) mddev->recovery_cp); 2371 return -EINVAL; 2372 } else if (rs_is_reshaping(rs)) { 2373 DMERR("'rebuild' specified while raid set is being reshaped (reshape_position=%llu)", 2374 (unsigned long long) mddev->reshape_position); 2375 return -EINVAL; 2376 } 2377 } 2378 2379 /* 2380 * Now we set the Faulty bit for those devices that are 2381 * recorded in the superblock as failed. 2382 */ 2383 sb_retrieve_failed_devices(sb, failed_devices); 2384 rdev_for_each(r, mddev) { 2385 if (test_bit(Journal, &rdev->flags) || 2386 !r->sb_page) 2387 continue; 2388 sb2 = page_address(r->sb_page); 2389 sb2->failed_devices = 0; 2390 memset(sb2->extended_failed_devices, 0, sizeof(sb2->extended_failed_devices)); 2391 2392 /* 2393 * Check for any device re-ordering. 2394 */ 2395 if (!test_bit(FirstUse, &r->flags) && (r->raid_disk >= 0)) { 2396 role = le32_to_cpu(sb2->array_position); 2397 if (role < 0) 2398 continue; 2399 2400 if (role != r->raid_disk) { 2401 if (rs_is_raid10(rs) && __is_raid10_near(mddev->layout)) { 2402 if (mddev->raid_disks % __raid10_near_copies(mddev->layout) || 2403 rs->raid_disks % rs->raid10_copies) { 2404 rs->ti->error = 2405 "Cannot change raid10 near set to odd # of devices!"; 2406 return -EINVAL; 2407 } 2408 2409 sb2->array_position = cpu_to_le32(r->raid_disk); 2410 2411 } else if (!(rs_is_raid10(rs) && rt_is_raid0(rs->raid_type)) && 2412 !(rs_is_raid0(rs) && rt_is_raid10(rs->raid_type)) && 2413 !rt_is_raid1(rs->raid_type)) { 2414 rs->ti->error = "Cannot change device positions in raid set"; 2415 return -EINVAL; 2416 } 2417 2418 DMINFO("raid device #%d now at position #%d", role, r->raid_disk); 2419 } 2420 2421 /* 2422 * Partial recovery is performed on 2423 * returning failed devices. 2424 */ 2425 if (test_bit(role, (void *) failed_devices)) 2426 set_bit(Faulty, &r->flags); 2427 } 2428 } 2429 2430 return 0; 2431 } 2432 2433 static int super_validate(struct raid_set *rs, struct md_rdev *rdev) 2434 { 2435 struct mddev *mddev = &rs->md; 2436 struct dm_raid_superblock *sb; 2437 2438 if (rs_is_raid0(rs) || !rdev->sb_page || rdev->raid_disk < 0) 2439 return 0; 2440 2441 sb = page_address(rdev->sb_page); 2442 2443 /* 2444 * If mddev->events is not set, we know we have not yet initialized 2445 * the array. 2446 */ 2447 if (!mddev->events && super_init_validation(rs, rdev)) 2448 return -EINVAL; 2449 2450 if (le32_to_cpu(sb->compat_features) && 2451 le32_to_cpu(sb->compat_features) != FEATURE_FLAG_SUPPORTS_V190) { 2452 rs->ti->error = "Unable to assemble array: Unknown flag(s) in compatible feature flags"; 2453 return -EINVAL; 2454 } 2455 2456 if (sb->incompat_features) { 2457 rs->ti->error = "Unable to assemble array: No incompatible feature flags supported yet"; 2458 return -EINVAL; 2459 } 2460 2461 /* Enable bitmap creation on @rs unless no metadevs or raid0 or journaled raid4/5/6 set. */ 2462 mddev->bitmap_info.offset = (rt_is_raid0(rs->raid_type) || rs->journal_dev.dev) ? 0 : to_sector(4096); 2463 mddev->bitmap_info.default_offset = mddev->bitmap_info.offset; 2464 2465 if (!test_and_clear_bit(FirstUse, &rdev->flags)) { 2466 /* 2467 * Retrieve rdev size stored in superblock to be prepared for shrink. 2468 * Check extended superblock members are present otherwise the size 2469 * will not be set! 2470 */ 2471 if (le32_to_cpu(sb->compat_features) & FEATURE_FLAG_SUPPORTS_V190) 2472 rdev->sectors = le64_to_cpu(sb->sectors); 2473 2474 rdev->recovery_offset = le64_to_cpu(sb->disk_recovery_offset); 2475 if (rdev->recovery_offset == MaxSector) 2476 set_bit(In_sync, &rdev->flags); 2477 /* 2478 * If no reshape in progress -> we're recovering single 2479 * disk(s) and have to set the device(s) to out-of-sync 2480 */ 2481 else if (!rs_is_reshaping(rs)) 2482 clear_bit(In_sync, &rdev->flags); /* Mandatory for recovery */ 2483 } 2484 2485 /* 2486 * If a device comes back, set it as not In_sync and no longer faulty. 2487 */ 2488 if (test_and_clear_bit(Faulty, &rdev->flags)) { 2489 rdev->recovery_offset = 0; 2490 clear_bit(In_sync, &rdev->flags); 2491 rdev->saved_raid_disk = rdev->raid_disk; 2492 } 2493 2494 /* Reshape support -> restore repective data offsets */ 2495 rdev->data_offset = le64_to_cpu(sb->data_offset); 2496 rdev->new_data_offset = le64_to_cpu(sb->new_data_offset); 2497 2498 return 0; 2499 } 2500 2501 /* 2502 * Analyse superblocks and select the freshest. 2503 */ 2504 static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs) 2505 { 2506 int r; 2507 struct md_rdev *rdev, *freshest; 2508 struct mddev *mddev = &rs->md; 2509 2510 freshest = NULL; 2511 rdev_for_each(rdev, mddev) { 2512 if (test_bit(Journal, &rdev->flags)) 2513 continue; 2514 2515 if (!rdev->meta_bdev) 2516 continue; 2517 2518 /* Set superblock offset/size for metadata device. */ 2519 rdev->sb_start = 0; 2520 rdev->sb_size = bdev_logical_block_size(rdev->meta_bdev); 2521 if (rdev->sb_size < sizeof(struct dm_raid_superblock) || rdev->sb_size > PAGE_SIZE) { 2522 DMERR("superblock size of a logical block is no longer valid"); 2523 return -EINVAL; 2524 } 2525 2526 /* 2527 * Skipping super_load due to CTR_FLAG_SYNC will cause 2528 * the array to undergo initialization again as 2529 * though it were new. This is the intended effect 2530 * of the "sync" directive. 2531 * 2532 * With reshaping capability added, we must ensure that 2533 * the "sync" directive is disallowed during the reshape. 2534 */ 2535 if (test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags)) 2536 continue; 2537 2538 r = super_load(rdev, freshest); 2539 2540 switch (r) { 2541 case 1: 2542 freshest = rdev; 2543 break; 2544 case 0: 2545 break; 2546 default: 2547 /* This is a failure to read the superblock from the metadata device. */ 2548 /* 2549 * We have to keep any raid0 data/metadata device pairs or 2550 * the MD raid0 personality will fail to start the array. 2551 */ 2552 if (rs_is_raid0(rs)) 2553 continue; 2554 2555 /* 2556 * We keep the dm_devs to be able to emit the device tuple 2557 * properly on the table line in raid_status() (rather than 2558 * mistakenly acting as if '- -' got passed into the constructor). 2559 * 2560 * The rdev has to stay on the same_set list to allow for 2561 * the attempt to restore faulty devices on second resume. 2562 */ 2563 rdev->raid_disk = rdev->saved_raid_disk = -1; 2564 break; 2565 } 2566 } 2567 2568 if (!freshest) 2569 return 0; 2570 2571 /* 2572 * Validation of the freshest device provides the source of 2573 * validation for the remaining devices. 2574 */ 2575 rs->ti->error = "Unable to assemble array: Invalid superblocks"; 2576 if (super_validate(rs, freshest)) 2577 return -EINVAL; 2578 2579 if (validate_raid_redundancy(rs)) { 2580 rs->ti->error = "Insufficient redundancy to activate array"; 2581 return -EINVAL; 2582 } 2583 2584 rdev_for_each(rdev, mddev) 2585 if (!test_bit(Journal, &rdev->flags) && 2586 rdev != freshest && 2587 super_validate(rs, rdev)) 2588 return -EINVAL; 2589 return 0; 2590 } 2591 2592 /* 2593 * Adjust data_offset and new_data_offset on all disk members of @rs 2594 * for out of place reshaping if requested by constructor 2595 * 2596 * We need free space at the beginning of each raid disk for forward 2597 * and at the end for backward reshapes which userspace has to provide 2598 * via remapping/reordering of space. 2599 */ 2600 static int rs_adjust_data_offsets(struct raid_set *rs) 2601 { 2602 sector_t data_offset = 0, new_data_offset = 0; 2603 struct md_rdev *rdev; 2604 2605 /* Constructor did not request data offset change */ 2606 if (!test_bit(__CTR_FLAG_DATA_OFFSET, &rs->ctr_flags)) { 2607 if (!rs_is_reshapable(rs)) 2608 goto out; 2609 2610 return 0; 2611 } 2612 2613 /* HM FIXME: get In_Sync raid_dev? */ 2614 rdev = &rs->dev[0].rdev; 2615 2616 if (rs->delta_disks < 0) { 2617 /* 2618 * Removing disks (reshaping backwards): 2619 * 2620 * - before reshape: data is at offset 0 and free space 2621 * is at end of each component LV 2622 * 2623 * - after reshape: data is at offset rs->data_offset != 0 on each component LV 2624 */ 2625 data_offset = 0; 2626 new_data_offset = rs->data_offset; 2627 2628 } else if (rs->delta_disks > 0) { 2629 /* 2630 * Adding disks (reshaping forwards): 2631 * 2632 * - before reshape: data is at offset rs->data_offset != 0 and 2633 * free space is at begin of each component LV 2634 * 2635 * - after reshape: data is at offset 0 on each component LV 2636 */ 2637 data_offset = rs->data_offset; 2638 new_data_offset = 0; 2639 2640 } else { 2641 /* 2642 * User space passes in 0 for data offset after having removed reshape space 2643 * 2644 * - or - (data offset != 0) 2645 * 2646 * Changing RAID layout or chunk size -> toggle offsets 2647 * 2648 * - before reshape: data is at offset rs->data_offset 0 and 2649 * free space is at end of each component LV 2650 * -or- 2651 * data is at offset rs->data_offset != 0 and 2652 * free space is at begin of each component LV 2653 * 2654 * - after reshape: data is at offset 0 if it was at offset != 0 2655 * or at offset != 0 if it was at offset 0 2656 * on each component LV 2657 * 2658 */ 2659 data_offset = rs->data_offset ? rdev->data_offset : 0; 2660 new_data_offset = data_offset ? 0 : rs->data_offset; 2661 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags); 2662 } 2663 2664 /* 2665 * Make sure we got a minimum amount of free sectors per device 2666 */ 2667 if (rs->data_offset && 2668 bdev_nr_sectors(rdev->bdev) - rs->md.dev_sectors < MIN_FREE_RESHAPE_SPACE) { 2669 rs->ti->error = data_offset ? "No space for forward reshape" : 2670 "No space for backward reshape"; 2671 return -ENOSPC; 2672 } 2673 out: 2674 /* 2675 * Raise recovery_cp in case data_offset != 0 to 2676 * avoid false recovery positives in the constructor. 2677 */ 2678 if (rs->md.recovery_cp < rs->md.dev_sectors) 2679 rs->md.recovery_cp += rs->dev[0].rdev.data_offset; 2680 2681 /* Adjust data offsets on all rdevs but on any raid4/5/6 journal device */ 2682 rdev_for_each(rdev, &rs->md) { 2683 if (!test_bit(Journal, &rdev->flags)) { 2684 rdev->data_offset = data_offset; 2685 rdev->new_data_offset = new_data_offset; 2686 } 2687 } 2688 2689 return 0; 2690 } 2691 2692 /* Userpace reordered disks -> adjust raid_disk indexes in @rs */ 2693 static void __reorder_raid_disk_indexes(struct raid_set *rs) 2694 { 2695 int i = 0; 2696 struct md_rdev *rdev; 2697 2698 rdev_for_each(rdev, &rs->md) { 2699 if (!test_bit(Journal, &rdev->flags)) { 2700 rdev->raid_disk = i++; 2701 rdev->saved_raid_disk = rdev->new_raid_disk = -1; 2702 } 2703 } 2704 } 2705 2706 /* 2707 * Setup @rs for takeover by a different raid level 2708 */ 2709 static int rs_setup_takeover(struct raid_set *rs) 2710 { 2711 struct mddev *mddev = &rs->md; 2712 struct md_rdev *rdev; 2713 unsigned int d = mddev->raid_disks = rs->raid_disks; 2714 sector_t new_data_offset = rs->dev[0].rdev.data_offset ? 0 : rs->data_offset; 2715 2716 if (rt_is_raid10(rs->raid_type)) { 2717 if (rs_is_raid0(rs)) { 2718 /* Userpace reordered disks -> adjust raid_disk indexes */ 2719 __reorder_raid_disk_indexes(rs); 2720 2721 /* raid0 -> raid10_far layout */ 2722 mddev->layout = raid10_format_to_md_layout(rs, ALGORITHM_RAID10_FAR, 2723 rs->raid10_copies); 2724 } else if (rs_is_raid1(rs)) 2725 /* raid1 -> raid10_near layout */ 2726 mddev->layout = raid10_format_to_md_layout(rs, ALGORITHM_RAID10_NEAR, 2727 rs->raid_disks); 2728 else 2729 return -EINVAL; 2730 2731 } 2732 2733 clear_bit(MD_ARRAY_FIRST_USE, &mddev->flags); 2734 mddev->recovery_cp = MaxSector; 2735 2736 while (d--) { 2737 rdev = &rs->dev[d].rdev; 2738 2739 if (test_bit(d, (void *) rs->rebuild_disks)) { 2740 clear_bit(In_sync, &rdev->flags); 2741 clear_bit(Faulty, &rdev->flags); 2742 mddev->recovery_cp = rdev->recovery_offset = 0; 2743 /* Bitmap has to be created when we do an "up" takeover */ 2744 set_bit(MD_ARRAY_FIRST_USE, &mddev->flags); 2745 } 2746 2747 rdev->new_data_offset = new_data_offset; 2748 } 2749 2750 return 0; 2751 } 2752 2753 /* Prepare @rs for reshape */ 2754 static int rs_prepare_reshape(struct raid_set *rs) 2755 { 2756 bool reshape; 2757 struct mddev *mddev = &rs->md; 2758 2759 if (rs_is_raid10(rs)) { 2760 if (rs->raid_disks != mddev->raid_disks && 2761 __is_raid10_near(mddev->layout) && 2762 rs->raid10_copies && 2763 rs->raid10_copies != __raid10_near_copies(mddev->layout)) { 2764 /* 2765 * raid disk have to be multiple of data copies to allow this conversion, 2766 * 2767 * This is actually not a reshape it is a 2768 * rebuild of any additional mirrors per group 2769 */ 2770 if (rs->raid_disks % rs->raid10_copies) { 2771 rs->ti->error = "Can't reshape raid10 mirror groups"; 2772 return -EINVAL; 2773 } 2774 2775 /* Userpace reordered disks to add/remove mirrors -> adjust raid_disk indexes */ 2776 __reorder_raid_disk_indexes(rs); 2777 mddev->layout = raid10_format_to_md_layout(rs, ALGORITHM_RAID10_NEAR, 2778 rs->raid10_copies); 2779 mddev->new_layout = mddev->layout; 2780 reshape = false; 2781 } else 2782 reshape = true; 2783 2784 } else if (rs_is_raid456(rs)) 2785 reshape = true; 2786 2787 else if (rs_is_raid1(rs)) { 2788 if (rs->delta_disks) { 2789 /* Process raid1 via delta_disks */ 2790 mddev->degraded = rs->delta_disks < 0 ? -rs->delta_disks : rs->delta_disks; 2791 reshape = true; 2792 } else { 2793 /* Process raid1 without delta_disks */ 2794 mddev->raid_disks = rs->raid_disks; 2795 reshape = false; 2796 } 2797 } else { 2798 rs->ti->error = "Called with bogus raid type"; 2799 return -EINVAL; 2800 } 2801 2802 if (reshape) { 2803 set_bit(RT_FLAG_RESHAPE_RS, &rs->runtime_flags); 2804 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags); 2805 } else if (mddev->raid_disks < rs->raid_disks) 2806 /* Create new superblocks and bitmaps, if any new disks */ 2807 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags); 2808 2809 return 0; 2810 } 2811 2812 /* Get reshape sectors from data_offsets or raid set */ 2813 static sector_t _get_reshape_sectors(struct raid_set *rs) 2814 { 2815 struct md_rdev *rdev; 2816 sector_t reshape_sectors = 0; 2817 2818 rdev_for_each(rdev, &rs->md) 2819 if (!test_bit(Journal, &rdev->flags)) { 2820 reshape_sectors = (rdev->data_offset > rdev->new_data_offset) ? 2821 rdev->data_offset - rdev->new_data_offset : 2822 rdev->new_data_offset - rdev->data_offset; 2823 break; 2824 } 2825 2826 return max(reshape_sectors, (sector_t) rs->data_offset); 2827 } 2828 2829 /* 2830 * Reshape: 2831 * - change raid layout 2832 * - change chunk size 2833 * - add disks 2834 * - remove disks 2835 */ 2836 static int rs_setup_reshape(struct raid_set *rs) 2837 { 2838 int r = 0; 2839 unsigned int cur_raid_devs, d; 2840 sector_t reshape_sectors = _get_reshape_sectors(rs); 2841 struct mddev *mddev = &rs->md; 2842 struct md_rdev *rdev; 2843 2844 mddev->delta_disks = rs->delta_disks; 2845 cur_raid_devs = mddev->raid_disks; 2846 2847 /* Ignore impossible layout change whilst adding/removing disks */ 2848 if (mddev->delta_disks && 2849 mddev->layout != mddev->new_layout) { 2850 DMINFO("Ignoring invalid layout change with delta_disks=%d", rs->delta_disks); 2851 mddev->new_layout = mddev->layout; 2852 } 2853 2854 /* 2855 * Adjust array size: 2856 * 2857 * - in case of adding disk(s), array size has 2858 * to grow after the disk adding reshape, 2859 * which'll happen in the event handler; 2860 * reshape will happen forward, so space has to 2861 * be available at the beginning of each disk 2862 * 2863 * - in case of removing disk(s), array size 2864 * has to shrink before starting the reshape, 2865 * which'll happen here; 2866 * reshape will happen backward, so space has to 2867 * be available at the end of each disk 2868 * 2869 * - data_offset and new_data_offset are 2870 * adjusted for aforementioned out of place 2871 * reshaping based on userspace passing in 2872 * the "data_offset <sectors>" key/value 2873 * pair via the constructor 2874 */ 2875 2876 /* Add disk(s) */ 2877 if (rs->delta_disks > 0) { 2878 /* Prepare disks for check in raid4/5/6/10 {check|start}_reshape */ 2879 for (d = cur_raid_devs; d < rs->raid_disks; d++) { 2880 rdev = &rs->dev[d].rdev; 2881 clear_bit(In_sync, &rdev->flags); 2882 2883 /* 2884 * save_raid_disk needs to be -1, or recovery_offset will be set to 0 2885 * by md, which'll store that erroneously in the superblock on reshape 2886 */ 2887 rdev->saved_raid_disk = -1; 2888 rdev->raid_disk = d; 2889 2890 rdev->sectors = mddev->dev_sectors; 2891 rdev->recovery_offset = rs_is_raid1(rs) ? 0 : MaxSector; 2892 } 2893 2894 mddev->reshape_backwards = 0; /* adding disk(s) -> forward reshape */ 2895 2896 /* Remove disk(s) */ 2897 } else if (rs->delta_disks < 0) { 2898 r = rs_set_dev_and_array_sectors(rs, rs->ti->len, true); 2899 mddev->reshape_backwards = 1; /* removing disk(s) -> backward reshape */ 2900 2901 /* Change layout and/or chunk size */ 2902 } else { 2903 /* 2904 * Reshape layout (e.g. raid5_ls -> raid5_n) and/or chunk size: 2905 * 2906 * keeping number of disks and do layout change -> 2907 * 2908 * toggle reshape_backward depending on data_offset: 2909 * 2910 * - free space upfront -> reshape forward 2911 * 2912 * - free space at the end -> reshape backward 2913 * 2914 * 2915 * This utilizes free reshape space avoiding the need 2916 * for userspace to move (parts of) LV segments in 2917 * case of layout/chunksize change (for disk 2918 * adding/removing reshape space has to be at 2919 * the proper address (see above with delta_disks): 2920 * 2921 * add disk(s) -> begin 2922 * remove disk(s)-> end 2923 */ 2924 mddev->reshape_backwards = rs->dev[0].rdev.data_offset ? 0 : 1; 2925 } 2926 2927 /* 2928 * Adjust device size for forward reshape 2929 * because md_finish_reshape() reduces it. 2930 */ 2931 if (!mddev->reshape_backwards) 2932 rdev_for_each(rdev, &rs->md) 2933 if (!test_bit(Journal, &rdev->flags)) 2934 rdev->sectors += reshape_sectors; 2935 2936 return r; 2937 } 2938 2939 /* 2940 * If the md resync thread has updated superblock with max reshape position 2941 * at the end of a reshape but not (yet) reset the layout configuration 2942 * changes -> reset the latter. 2943 */ 2944 static void rs_reset_inconclusive_reshape(struct raid_set *rs) 2945 { 2946 if (!rs_is_reshaping(rs) && rs_is_layout_change(rs, true)) { 2947 rs_set_cur(rs); 2948 rs->md.delta_disks = 0; 2949 rs->md.reshape_backwards = 0; 2950 } 2951 } 2952 2953 /* 2954 * Enable/disable discard support on RAID set depending on 2955 * RAID level and discard properties of underlying RAID members. 2956 */ 2957 static void configure_discard_support(struct raid_set *rs) 2958 { 2959 int i; 2960 bool raid456; 2961 struct dm_target *ti = rs->ti; 2962 2963 /* 2964 * XXX: RAID level 4,5,6 require zeroing for safety. 2965 */ 2966 raid456 = rs_is_raid456(rs); 2967 2968 for (i = 0; i < rs->raid_disks; i++) { 2969 if (!rs->dev[i].rdev.bdev || 2970 !bdev_max_discard_sectors(rs->dev[i].rdev.bdev)) 2971 return; 2972 2973 if (raid456) { 2974 if (!devices_handle_discard_safely) { 2975 DMERR("raid456 discard support disabled due to discard_zeroes_data uncertainty."); 2976 DMERR("Set dm-raid.devices_handle_discard_safely=Y to override."); 2977 return; 2978 } 2979 } 2980 } 2981 2982 ti->num_discard_bios = 1; 2983 } 2984 2985 /* 2986 * Construct a RAID0/1/10/4/5/6 mapping: 2987 * Args: 2988 * <raid_type> <#raid_params> <raid_params>{0,} \ 2989 * <#raid_devs> [<meta_dev1> <dev1>]{1,} 2990 * 2991 * <raid_params> varies by <raid_type>. See 'parse_raid_params' for 2992 * details on possible <raid_params>. 2993 * 2994 * Userspace is free to initialize the metadata devices, hence the superblocks to 2995 * enforce recreation based on the passed in table parameters. 2996 * 2997 */ 2998 static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv) 2999 { 3000 int r; 3001 bool resize = false; 3002 struct raid_type *rt; 3003 unsigned int num_raid_params, num_raid_devs; 3004 sector_t sb_array_sectors, rdev_sectors, reshape_sectors; 3005 struct raid_set *rs = NULL; 3006 const char *arg; 3007 struct rs_layout rs_layout; 3008 struct dm_arg_set as = { argc, argv }, as_nrd; 3009 struct dm_arg _args[] = { 3010 { 0, as.argc, "Cannot understand number of raid parameters" }, 3011 { 1, 254, "Cannot understand number of raid devices parameters" } 3012 }; 3013 3014 arg = dm_shift_arg(&as); 3015 if (!arg) { 3016 ti->error = "No arguments"; 3017 return -EINVAL; 3018 } 3019 3020 rt = get_raid_type(arg); 3021 if (!rt) { 3022 ti->error = "Unrecognised raid_type"; 3023 return -EINVAL; 3024 } 3025 3026 /* Must have <#raid_params> */ 3027 if (dm_read_arg_group(_args, &as, &num_raid_params, &ti->error)) 3028 return -EINVAL; 3029 3030 /* number of raid device tupples <meta_dev data_dev> */ 3031 as_nrd = as; 3032 dm_consume_args(&as_nrd, num_raid_params); 3033 _args[1].max = (as_nrd.argc - 1) / 2; 3034 if (dm_read_arg(_args + 1, &as_nrd, &num_raid_devs, &ti->error)) 3035 return -EINVAL; 3036 3037 if (!__within_range(num_raid_devs, 1, MAX_RAID_DEVICES)) { 3038 ti->error = "Invalid number of supplied raid devices"; 3039 return -EINVAL; 3040 } 3041 3042 rs = raid_set_alloc(ti, rt, num_raid_devs); 3043 if (IS_ERR(rs)) 3044 return PTR_ERR(rs); 3045 3046 r = parse_raid_params(rs, &as, num_raid_params); 3047 if (r) 3048 goto bad; 3049 3050 r = parse_dev_params(rs, &as); 3051 if (r) 3052 goto bad; 3053 3054 rs->md.sync_super = super_sync; 3055 3056 /* 3057 * Calculate ctr requested array and device sizes to allow 3058 * for superblock analysis needing device sizes defined. 3059 * 3060 * Any existing superblock will overwrite the array and device sizes 3061 */ 3062 r = rs_set_dev_and_array_sectors(rs, rs->ti->len, false); 3063 if (r) 3064 goto bad; 3065 3066 /* Memorize just calculated, potentially larger sizes to grow the raid set in preresume */ 3067 rs->array_sectors = rs->md.array_sectors; 3068 rs->dev_sectors = rs->md.dev_sectors; 3069 3070 /* 3071 * Backup any new raid set level, layout, ... 3072 * requested to be able to compare to superblock 3073 * members for conversion decisions. 3074 */ 3075 rs_config_backup(rs, &rs_layout); 3076 3077 r = analyse_superblocks(ti, rs); 3078 if (r) 3079 goto bad; 3080 3081 /* All in-core metadata now as of current superblocks after calling analyse_superblocks() */ 3082 sb_array_sectors = rs->md.array_sectors; 3083 rdev_sectors = __rdev_sectors(rs); 3084 if (!rdev_sectors) { 3085 ti->error = "Invalid rdev size"; 3086 r = -EINVAL; 3087 goto bad; 3088 } 3089 3090 3091 reshape_sectors = _get_reshape_sectors(rs); 3092 if (rs->dev_sectors != rdev_sectors) { 3093 resize = (rs->dev_sectors != rdev_sectors - reshape_sectors); 3094 if (rs->dev_sectors > rdev_sectors - reshape_sectors) 3095 set_bit(RT_FLAG_RS_GROW, &rs->runtime_flags); 3096 } 3097 3098 INIT_WORK(&rs->md.event_work, do_table_event); 3099 ti->private = rs; 3100 ti->num_flush_bios = 1; 3101 ti->needs_bio_set_dev = true; 3102 3103 /* Restore any requested new layout for conversion decision */ 3104 rs_config_restore(rs, &rs_layout); 3105 3106 /* 3107 * Now that we have any superblock metadata available, 3108 * check for new, recovering, reshaping, to be taken over, 3109 * to be reshaped or an existing, unchanged raid set to 3110 * run in sequence. 3111 */ 3112 if (test_bit(MD_ARRAY_FIRST_USE, &rs->md.flags)) { 3113 /* A new raid6 set has to be recovered to ensure proper parity and Q-Syndrome */ 3114 if (rs_is_raid6(rs) && 3115 test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags)) { 3116 ti->error = "'nosync' not allowed for new raid6 set"; 3117 r = -EINVAL; 3118 goto bad; 3119 } 3120 rs_setup_recovery(rs, 0); 3121 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags); 3122 rs_set_new(rs); 3123 } else if (rs_is_recovering(rs)) { 3124 /* A recovering raid set may be resized */ 3125 goto size_check; 3126 } else if (rs_is_reshaping(rs)) { 3127 /* Have to reject size change request during reshape */ 3128 if (resize) { 3129 ti->error = "Can't resize a reshaping raid set"; 3130 r = -EPERM; 3131 goto bad; 3132 } 3133 /* skip setup rs */ 3134 } else if (rs_takeover_requested(rs)) { 3135 if (rs_is_reshaping(rs)) { 3136 ti->error = "Can't takeover a reshaping raid set"; 3137 r = -EPERM; 3138 goto bad; 3139 } 3140 3141 /* We can't takeover a journaled raid4/5/6 */ 3142 if (test_bit(__CTR_FLAG_JOURNAL_DEV, &rs->ctr_flags)) { 3143 ti->error = "Can't takeover a journaled raid4/5/6 set"; 3144 r = -EPERM; 3145 goto bad; 3146 } 3147 3148 /* 3149 * If a takeover is needed, userspace sets any additional 3150 * devices to rebuild and we can check for a valid request here. 3151 * 3152 * If acceptable, set the level to the new requested 3153 * one, prohibit requesting recovery, allow the raid 3154 * set to run and store superblocks during resume. 3155 */ 3156 r = rs_check_takeover(rs); 3157 if (r) 3158 goto bad; 3159 3160 r = rs_setup_takeover(rs); 3161 if (r) 3162 goto bad; 3163 3164 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags); 3165 /* Takeover ain't recovery, so disable recovery */ 3166 rs_setup_recovery(rs, MaxSector); 3167 rs_set_new(rs); 3168 } else if (rs_reshape_requested(rs)) { 3169 /* Only request grow on raid set size extensions, not on reshapes. */ 3170 clear_bit(RT_FLAG_RS_GROW, &rs->runtime_flags); 3171 3172 /* 3173 * No need to check for 'ongoing' takeover here, because takeover 3174 * is an instant operation as oposed to an ongoing reshape. 3175 */ 3176 3177 /* We can't reshape a journaled raid4/5/6 */ 3178 if (test_bit(__CTR_FLAG_JOURNAL_DEV, &rs->ctr_flags)) { 3179 ti->error = "Can't reshape a journaled raid4/5/6 set"; 3180 r = -EPERM; 3181 goto bad; 3182 } 3183 3184 /* Out-of-place space has to be available to allow for a reshape unless raid1! */ 3185 if (reshape_sectors || rs_is_raid1(rs)) { 3186 /* 3187 * We can only prepare for a reshape here, because the 3188 * raid set needs to run to provide the repective reshape 3189 * check functions via its MD personality instance. 3190 * 3191 * So do the reshape check after md_run() succeeded. 3192 */ 3193 r = rs_prepare_reshape(rs); 3194 if (r) 3195 goto bad; 3196 3197 /* Reshaping ain't recovery, so disable recovery */ 3198 rs_setup_recovery(rs, MaxSector); 3199 } 3200 rs_set_cur(rs); 3201 } else { 3202 size_check: 3203 /* May not set recovery when a device rebuild is requested */ 3204 if (test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags)) { 3205 clear_bit(RT_FLAG_RS_GROW, &rs->runtime_flags); 3206 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags); 3207 rs_setup_recovery(rs, MaxSector); 3208 } else if (test_bit(RT_FLAG_RS_GROW, &rs->runtime_flags)) { 3209 /* 3210 * Set raid set to current size, i.e. size as of 3211 * superblocks to grow to larger size in preresume. 3212 */ 3213 r = rs_set_dev_and_array_sectors(rs, sb_array_sectors, false); 3214 if (r) 3215 goto bad; 3216 3217 rs_setup_recovery(rs, rs->md.recovery_cp < rs->md.dev_sectors ? rs->md.recovery_cp : rs->md.dev_sectors); 3218 } else { 3219 /* This is no size change or it is shrinking, update size and record in superblocks */ 3220 r = rs_set_dev_and_array_sectors(rs, rs->ti->len, false); 3221 if (r) 3222 goto bad; 3223 3224 if (sb_array_sectors > rs->array_sectors) 3225 set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags); 3226 } 3227 rs_set_cur(rs); 3228 } 3229 3230 /* If constructor requested it, change data and new_data offsets */ 3231 r = rs_adjust_data_offsets(rs); 3232 if (r) 3233 goto bad; 3234 3235 /* Catch any inconclusive reshape superblock content. */ 3236 rs_reset_inconclusive_reshape(rs); 3237 3238 /* Start raid set read-only and assumed clean to change in raid_resume() */ 3239 rs->md.ro = 1; 3240 rs->md.in_sync = 1; 3241 3242 /* Keep array frozen until resume. */ 3243 set_bit(MD_RECOVERY_FROZEN, &rs->md.recovery); 3244 3245 /* Has to be held on running the array */ 3246 mddev_lock_nointr(&rs->md); 3247 r = md_run(&rs->md); 3248 rs->md.in_sync = 0; /* Assume already marked dirty */ 3249 if (r) { 3250 ti->error = "Failed to run raid array"; 3251 mddev_unlock(&rs->md); 3252 goto bad; 3253 } 3254 3255 r = md_start(&rs->md); 3256 if (r) { 3257 ti->error = "Failed to start raid array"; 3258 mddev_unlock(&rs->md); 3259 goto bad_md_start; 3260 } 3261 3262 /* If raid4/5/6 journal mode explicitly requested (only possible with journal dev) -> set it */ 3263 if (test_bit(__CTR_FLAG_JOURNAL_MODE, &rs->ctr_flags)) { 3264 r = r5c_journal_mode_set(&rs->md, rs->journal_dev.mode); 3265 if (r) { 3266 ti->error = "Failed to set raid4/5/6 journal mode"; 3267 mddev_unlock(&rs->md); 3268 goto bad_journal_mode_set; 3269 } 3270 } 3271 3272 mddev_suspend(&rs->md); 3273 set_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags); 3274 3275 /* Try to adjust the raid4/5/6 stripe cache size to the stripe size */ 3276 if (rs_is_raid456(rs)) { 3277 r = rs_set_raid456_stripe_cache(rs); 3278 if (r) 3279 goto bad_stripe_cache; 3280 } 3281 3282 /* Now do an early reshape check */ 3283 if (test_bit(RT_FLAG_RESHAPE_RS, &rs->runtime_flags)) { 3284 r = rs_check_reshape(rs); 3285 if (r) 3286 goto bad_check_reshape; 3287 3288 /* Restore new, ctr requested layout to perform check */ 3289 rs_config_restore(rs, &rs_layout); 3290 3291 if (rs->md.pers->start_reshape) { 3292 r = rs->md.pers->check_reshape(&rs->md); 3293 if (r) { 3294 ti->error = "Reshape check failed"; 3295 goto bad_check_reshape; 3296 } 3297 } 3298 } 3299 3300 /* Disable/enable discard support on raid set. */ 3301 configure_discard_support(rs); 3302 3303 mddev_unlock(&rs->md); 3304 return 0; 3305 3306 bad_md_start: 3307 bad_journal_mode_set: 3308 bad_stripe_cache: 3309 bad_check_reshape: 3310 md_stop(&rs->md); 3311 bad: 3312 raid_set_free(rs); 3313 3314 return r; 3315 } 3316 3317 static void raid_dtr(struct dm_target *ti) 3318 { 3319 struct raid_set *rs = ti->private; 3320 3321 md_stop(&rs->md); 3322 raid_set_free(rs); 3323 } 3324 3325 static int raid_map(struct dm_target *ti, struct bio *bio) 3326 { 3327 struct raid_set *rs = ti->private; 3328 struct mddev *mddev = &rs->md; 3329 3330 /* 3331 * If we're reshaping to add disk(s)), ti->len and 3332 * mddev->array_sectors will differ during the process 3333 * (ti->len > mddev->array_sectors), so we have to requeue 3334 * bios with addresses > mddev->array_sectors here or 3335 * there will occur accesses past EOD of the component 3336 * data images thus erroring the raid set. 3337 */ 3338 if (unlikely(bio_end_sector(bio) > mddev->array_sectors)) 3339 return DM_MAPIO_REQUEUE; 3340 3341 md_handle_request(mddev, bio); 3342 3343 return DM_MAPIO_SUBMITTED; 3344 } 3345 3346 /* Return sync state string for @state */ 3347 enum sync_state { st_frozen, st_reshape, st_resync, st_check, st_repair, st_recover, st_idle }; 3348 static const char *sync_str(enum sync_state state) 3349 { 3350 /* Has to be in above sync_state order! */ 3351 static const char *sync_strs[] = { 3352 "frozen", 3353 "reshape", 3354 "resync", 3355 "check", 3356 "repair", 3357 "recover", 3358 "idle" 3359 }; 3360 3361 return __within_range(state, 0, ARRAY_SIZE(sync_strs) - 1) ? sync_strs[state] : "undef"; 3362 }; 3363 3364 /* Return enum sync_state for @mddev derived from @recovery flags */ 3365 static enum sync_state decipher_sync_action(struct mddev *mddev, unsigned long recovery) 3366 { 3367 if (test_bit(MD_RECOVERY_FROZEN, &recovery)) 3368 return st_frozen; 3369 3370 /* The MD sync thread can be done with io or be interrupted but still be running */ 3371 if (!test_bit(MD_RECOVERY_DONE, &recovery) && 3372 (test_bit(MD_RECOVERY_RUNNING, &recovery) || 3373 (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &recovery)))) { 3374 if (test_bit(MD_RECOVERY_RESHAPE, &recovery)) 3375 return st_reshape; 3376 3377 if (test_bit(MD_RECOVERY_SYNC, &recovery)) { 3378 if (!test_bit(MD_RECOVERY_REQUESTED, &recovery)) 3379 return st_resync; 3380 if (test_bit(MD_RECOVERY_CHECK, &recovery)) 3381 return st_check; 3382 return st_repair; 3383 } 3384 3385 if (test_bit(MD_RECOVERY_RECOVER, &recovery)) 3386 return st_recover; 3387 3388 if (mddev->reshape_position != MaxSector) 3389 return st_reshape; 3390 } 3391 3392 return st_idle; 3393 } 3394 3395 /* 3396 * Return status string for @rdev 3397 * 3398 * Status characters: 3399 * 3400 * 'D' = Dead/Failed raid set component or raid4/5/6 journal device 3401 * 'a' = Alive but not in-sync raid set component _or_ alive raid4/5/6 'write_back' journal device 3402 * 'A' = Alive and in-sync raid set component _or_ alive raid4/5/6 'write_through' journal device 3403 * '-' = Non-existing device (i.e. uspace passed '- -' into the ctr) 3404 */ 3405 static const char *__raid_dev_status(struct raid_set *rs, struct md_rdev *rdev) 3406 { 3407 if (!rdev->bdev) 3408 return "-"; 3409 else if (test_bit(Faulty, &rdev->flags)) 3410 return "D"; 3411 else if (test_bit(Journal, &rdev->flags)) 3412 return (rs->journal_dev.mode == R5C_JOURNAL_MODE_WRITE_THROUGH) ? "A" : "a"; 3413 else if (test_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags) || 3414 (!test_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags) && 3415 !test_bit(In_sync, &rdev->flags))) 3416 return "a"; 3417 else 3418 return "A"; 3419 } 3420 3421 /* Helper to return resync/reshape progress for @rs and runtime flags for raid set in sync / resynching */ 3422 static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery, 3423 enum sync_state state, sector_t resync_max_sectors) 3424 { 3425 sector_t r; 3426 struct mddev *mddev = &rs->md; 3427 3428 clear_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags); 3429 clear_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags); 3430 3431 if (rs_is_raid0(rs)) { 3432 r = resync_max_sectors; 3433 set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags); 3434 3435 } else { 3436 if (state == st_idle && !test_bit(MD_RECOVERY_INTR, &recovery)) 3437 r = mddev->recovery_cp; 3438 else 3439 r = mddev->curr_resync_completed; 3440 3441 if (state == st_idle && r >= resync_max_sectors) { 3442 /* 3443 * Sync complete. 3444 */ 3445 /* In case we have finished recovering, the array is in sync. */ 3446 if (test_bit(MD_RECOVERY_RECOVER, &recovery)) 3447 set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags); 3448 3449 } else if (state == st_recover) 3450 /* 3451 * In case we are recovering, the array is not in sync 3452 * and health chars should show the recovering legs. 3453 * 3454 * Already retrieved recovery offset from curr_resync_completed above. 3455 */ 3456 ; 3457 3458 else if (state == st_resync || state == st_reshape) 3459 /* 3460 * If "resync/reshape" is occurring, the raid set 3461 * is or may be out of sync hence the health 3462 * characters shall be 'a'. 3463 */ 3464 set_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags); 3465 3466 else if (state == st_check || state == st_repair) 3467 /* 3468 * If "check" or "repair" is occurring, the raid set has 3469 * undergone an initial sync and the health characters 3470 * should not be 'a' anymore. 3471 */ 3472 set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags); 3473 3474 else if (test_bit(MD_RECOVERY_NEEDED, &recovery)) 3475 /* 3476 * We are idle and recovery is needed, prevent 'A' chars race 3477 * caused by components still set to in-sync by constructor. 3478 */ 3479 set_bit(RT_FLAG_RS_RESYNCING, &rs->runtime_flags); 3480 3481 else { 3482 /* 3483 * We are idle and the raid set may be doing an initial 3484 * sync, or it may be rebuilding individual components. 3485 * If all the devices are In_sync, then it is the raid set 3486 * that is being initialized. 3487 */ 3488 struct md_rdev *rdev; 3489 3490 set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags); 3491 rdev_for_each(rdev, mddev) 3492 if (!test_bit(Journal, &rdev->flags) && 3493 !test_bit(In_sync, &rdev->flags)) { 3494 clear_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags); 3495 break; 3496 } 3497 } 3498 } 3499 3500 return min(r, resync_max_sectors); 3501 } 3502 3503 /* Helper to return @dev name or "-" if !@dev */ 3504 static const char *__get_dev_name(struct dm_dev *dev) 3505 { 3506 return dev ? dev->name : "-"; 3507 } 3508 3509 static void raid_status(struct dm_target *ti, status_type_t type, 3510 unsigned int status_flags, char *result, unsigned int maxlen) 3511 { 3512 struct raid_set *rs = ti->private; 3513 struct mddev *mddev = &rs->md; 3514 struct r5conf *conf = rs_is_raid456(rs) ? mddev->private : NULL; 3515 int i, max_nr_stripes = conf ? conf->max_nr_stripes : 0; 3516 unsigned long recovery; 3517 unsigned int raid_param_cnt = 1; /* at least 1 for chunksize */ 3518 unsigned int sz = 0; 3519 unsigned int rebuild_writemostly_count = 0; 3520 sector_t progress, resync_max_sectors, resync_mismatches; 3521 enum sync_state state; 3522 struct raid_type *rt; 3523 3524 switch (type) { 3525 case STATUSTYPE_INFO: 3526 /* *Should* always succeed */ 3527 rt = get_raid_type_by_ll(mddev->new_level, mddev->new_layout); 3528 if (!rt) 3529 return; 3530 3531 DMEMIT("%s %d ", rt->name, mddev->raid_disks); 3532 3533 /* Access most recent mddev properties for status output */ 3534 smp_rmb(); 3535 /* Get sensible max sectors even if raid set not yet started */ 3536 resync_max_sectors = test_bit(RT_FLAG_RS_PRERESUMED, &rs->runtime_flags) ? 3537 mddev->resync_max_sectors : mddev->dev_sectors; 3538 recovery = rs->md.recovery; 3539 state = decipher_sync_action(mddev, recovery); 3540 progress = rs_get_progress(rs, recovery, state, resync_max_sectors); 3541 resync_mismatches = (mddev->last_sync_action && !strcasecmp(mddev->last_sync_action, "check")) ? 3542 atomic64_read(&mddev->resync_mismatches) : 0; 3543 3544 /* HM FIXME: do we want another state char for raid0? It shows 'D'/'A'/'-' now */ 3545 for (i = 0; i < rs->raid_disks; i++) 3546 DMEMIT(__raid_dev_status(rs, &rs->dev[i].rdev)); 3547 3548 /* 3549 * In-sync/Reshape ratio: 3550 * The in-sync ratio shows the progress of: 3551 * - Initializing the raid set 3552 * - Rebuilding a subset of devices of the raid set 3553 * The user can distinguish between the two by referring 3554 * to the status characters. 3555 * 3556 * The reshape ratio shows the progress of 3557 * changing the raid layout or the number of 3558 * disks of a raid set 3559 */ 3560 DMEMIT(" %llu/%llu", (unsigned long long) progress, 3561 (unsigned long long) resync_max_sectors); 3562 3563 /* 3564 * v1.5.0+: 3565 * 3566 * Sync action: 3567 * See Documentation/admin-guide/device-mapper/dm-raid.rst for 3568 * information on each of these states. 3569 */ 3570 DMEMIT(" %s", sync_str(state)); 3571 3572 /* 3573 * v1.5.0+: 3574 * 3575 * resync_mismatches/mismatch_cnt 3576 * This field shows the number of discrepancies found when 3577 * performing a "check" of the raid set. 3578 */ 3579 DMEMIT(" %llu", (unsigned long long) resync_mismatches); 3580 3581 /* 3582 * v1.9.0+: 3583 * 3584 * data_offset (needed for out of space reshaping) 3585 * This field shows the data offset into the data 3586 * image LV where the first stripes data starts. 3587 * 3588 * We keep data_offset equal on all raid disks of the set, 3589 * so retrieving it from the first raid disk is sufficient. 3590 */ 3591 DMEMIT(" %llu", (unsigned long long) rs->dev[0].rdev.data_offset); 3592 3593 /* 3594 * v1.10.0+: 3595 */ 3596 DMEMIT(" %s", test_bit(__CTR_FLAG_JOURNAL_DEV, &rs->ctr_flags) ? 3597 __raid_dev_status(rs, &rs->journal_dev.rdev) : "-"); 3598 break; 3599 3600 case STATUSTYPE_TABLE: 3601 /* Report the table line string you would use to construct this raid set */ 3602 3603 /* 3604 * Count any rebuild or writemostly argument pairs and subtract the 3605 * hweight count being added below of any rebuild and writemostly ctr flags. 3606 */ 3607 for (i = 0; i < rs->raid_disks; i++) { 3608 rebuild_writemostly_count += (test_bit(i, (void *) rs->rebuild_disks) ? 2 : 0) + 3609 (test_bit(WriteMostly, &rs->dev[i].rdev.flags) ? 2 : 0); 3610 } 3611 rebuild_writemostly_count -= (test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags) ? 2 : 0) + 3612 (test_bit(__CTR_FLAG_WRITE_MOSTLY, &rs->ctr_flags) ? 2 : 0); 3613 /* Calculate raid parameter count based on ^ rebuild/writemostly argument counts and ctr flags set. */ 3614 raid_param_cnt += rebuild_writemostly_count + 3615 hweight32(rs->ctr_flags & CTR_FLAG_OPTIONS_NO_ARGS) + 3616 hweight32(rs->ctr_flags & CTR_FLAG_OPTIONS_ONE_ARG) * 2; 3617 /* Emit table line */ 3618 /* This has to be in the documented order for userspace! */ 3619 DMEMIT("%s %u %u", rs->raid_type->name, raid_param_cnt, mddev->new_chunk_sectors); 3620 if (test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags)) 3621 DMEMIT(" %s", dm_raid_arg_name_by_flag(CTR_FLAG_SYNC)); 3622 if (test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags)) 3623 DMEMIT(" %s", dm_raid_arg_name_by_flag(CTR_FLAG_NOSYNC)); 3624 if (test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags)) 3625 for (i = 0; i < rs->raid_disks; i++) 3626 if (test_bit(i, (void *) rs->rebuild_disks)) 3627 DMEMIT(" %s %u", dm_raid_arg_name_by_flag(CTR_FLAG_REBUILD), i); 3628 if (test_bit(__CTR_FLAG_DAEMON_SLEEP, &rs->ctr_flags)) 3629 DMEMIT(" %s %lu", dm_raid_arg_name_by_flag(CTR_FLAG_DAEMON_SLEEP), 3630 mddev->bitmap_info.daemon_sleep); 3631 if (test_bit(__CTR_FLAG_MIN_RECOVERY_RATE, &rs->ctr_flags)) 3632 DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_MIN_RECOVERY_RATE), 3633 mddev->sync_speed_min); 3634 if (test_bit(__CTR_FLAG_MAX_RECOVERY_RATE, &rs->ctr_flags)) 3635 DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_MAX_RECOVERY_RATE), 3636 mddev->sync_speed_max); 3637 if (test_bit(__CTR_FLAG_WRITE_MOSTLY, &rs->ctr_flags)) 3638 for (i = 0; i < rs->raid_disks; i++) 3639 if (test_bit(WriteMostly, &rs->dev[i].rdev.flags)) 3640 DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_WRITE_MOSTLY), 3641 rs->dev[i].rdev.raid_disk); 3642 if (test_bit(__CTR_FLAG_MAX_WRITE_BEHIND, &rs->ctr_flags)) 3643 DMEMIT(" %s %lu", dm_raid_arg_name_by_flag(CTR_FLAG_MAX_WRITE_BEHIND), 3644 mddev->bitmap_info.max_write_behind); 3645 if (test_bit(__CTR_FLAG_STRIPE_CACHE, &rs->ctr_flags)) 3646 DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_STRIPE_CACHE), 3647 max_nr_stripes); 3648 if (test_bit(__CTR_FLAG_REGION_SIZE, &rs->ctr_flags)) 3649 DMEMIT(" %s %llu", dm_raid_arg_name_by_flag(CTR_FLAG_REGION_SIZE), 3650 (unsigned long long) to_sector(mddev->bitmap_info.chunksize)); 3651 if (test_bit(__CTR_FLAG_RAID10_COPIES, &rs->ctr_flags)) 3652 DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_RAID10_COPIES), 3653 raid10_md_layout_to_copies(mddev->layout)); 3654 if (test_bit(__CTR_FLAG_RAID10_FORMAT, &rs->ctr_flags)) 3655 DMEMIT(" %s %s", dm_raid_arg_name_by_flag(CTR_FLAG_RAID10_FORMAT), 3656 raid10_md_layout_to_format(mddev->layout)); 3657 if (test_bit(__CTR_FLAG_DELTA_DISKS, &rs->ctr_flags)) 3658 DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_DELTA_DISKS), 3659 max(rs->delta_disks, mddev->delta_disks)); 3660 if (test_bit(__CTR_FLAG_DATA_OFFSET, &rs->ctr_flags)) 3661 DMEMIT(" %s %llu", dm_raid_arg_name_by_flag(CTR_FLAG_DATA_OFFSET), 3662 (unsigned long long) rs->data_offset); 3663 if (test_bit(__CTR_FLAG_JOURNAL_DEV, &rs->ctr_flags)) 3664 DMEMIT(" %s %s", dm_raid_arg_name_by_flag(CTR_FLAG_JOURNAL_DEV), 3665 __get_dev_name(rs->journal_dev.dev)); 3666 if (test_bit(__CTR_FLAG_JOURNAL_MODE, &rs->ctr_flags)) 3667 DMEMIT(" %s %s", dm_raid_arg_name_by_flag(CTR_FLAG_JOURNAL_MODE), 3668 md_journal_mode_to_dm_raid(rs->journal_dev.mode)); 3669 DMEMIT(" %d", rs->raid_disks); 3670 for (i = 0; i < rs->raid_disks; i++) 3671 DMEMIT(" %s %s", __get_dev_name(rs->dev[i].meta_dev), 3672 __get_dev_name(rs->dev[i].data_dev)); 3673 break; 3674 3675 case STATUSTYPE_IMA: 3676 rt = get_raid_type_by_ll(mddev->new_level, mddev->new_layout); 3677 if (!rt) 3678 return; 3679 3680 DMEMIT_TARGET_NAME_VERSION(ti->type); 3681 DMEMIT(",raid_type=%s,raid_disks=%d", rt->name, mddev->raid_disks); 3682 3683 /* Access most recent mddev properties for status output */ 3684 smp_rmb(); 3685 recovery = rs->md.recovery; 3686 state = decipher_sync_action(mddev, recovery); 3687 DMEMIT(",raid_state=%s", sync_str(state)); 3688 3689 for (i = 0; i < rs->raid_disks; i++) { 3690 DMEMIT(",raid_device_%d_status=", i); 3691 DMEMIT(__raid_dev_status(rs, &rs->dev[i].rdev)); 3692 } 3693 3694 if (rt_is_raid456(rt)) { 3695 DMEMIT(",journal_dev_mode="); 3696 switch (rs->journal_dev.mode) { 3697 case R5C_JOURNAL_MODE_WRITE_THROUGH: 3698 DMEMIT("%s", 3699 _raid456_journal_mode[R5C_JOURNAL_MODE_WRITE_THROUGH].param); 3700 break; 3701 case R5C_JOURNAL_MODE_WRITE_BACK: 3702 DMEMIT("%s", 3703 _raid456_journal_mode[R5C_JOURNAL_MODE_WRITE_BACK].param); 3704 break; 3705 default: 3706 DMEMIT("invalid"); 3707 break; 3708 } 3709 } 3710 DMEMIT(";"); 3711 break; 3712 } 3713 } 3714 3715 static int raid_message(struct dm_target *ti, unsigned int argc, char **argv, 3716 char *result, unsigned int maxlen) 3717 { 3718 struct raid_set *rs = ti->private; 3719 struct mddev *mddev = &rs->md; 3720 3721 if (!mddev->pers || !mddev->pers->sync_request) 3722 return -EINVAL; 3723 3724 if (!strcasecmp(argv[0], "frozen")) 3725 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 3726 else 3727 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 3728 3729 if (!strcasecmp(argv[0], "idle") || !strcasecmp(argv[0], "frozen")) { 3730 if (mddev->sync_thread) { 3731 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 3732 md_unregister_thread(&mddev->sync_thread); 3733 md_reap_sync_thread(mddev); 3734 } 3735 } else if (decipher_sync_action(mddev, mddev->recovery) != st_idle) 3736 return -EBUSY; 3737 else if (!strcasecmp(argv[0], "resync")) 3738 ; /* MD_RECOVERY_NEEDED set below */ 3739 else if (!strcasecmp(argv[0], "recover")) 3740 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 3741 else { 3742 if (!strcasecmp(argv[0], "check")) { 3743 set_bit(MD_RECOVERY_CHECK, &mddev->recovery); 3744 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); 3745 set_bit(MD_RECOVERY_SYNC, &mddev->recovery); 3746 } else if (!strcasecmp(argv[0], "repair")) { 3747 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); 3748 set_bit(MD_RECOVERY_SYNC, &mddev->recovery); 3749 } else 3750 return -EINVAL; 3751 } 3752 if (mddev->ro == 2) { 3753 /* A write to sync_action is enough to justify 3754 * canceling read-auto mode 3755 */ 3756 mddev->ro = 0; 3757 if (!mddev->suspended && mddev->sync_thread) 3758 md_wakeup_thread(mddev->sync_thread); 3759 } 3760 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 3761 if (!mddev->suspended && mddev->thread) 3762 md_wakeup_thread(mddev->thread); 3763 3764 return 0; 3765 } 3766 3767 static int raid_iterate_devices(struct dm_target *ti, 3768 iterate_devices_callout_fn fn, void *data) 3769 { 3770 struct raid_set *rs = ti->private; 3771 unsigned int i; 3772 int r = 0; 3773 3774 for (i = 0; !r && i < rs->raid_disks; i++) { 3775 if (rs->dev[i].data_dev) { 3776 r = fn(ti, rs->dev[i].data_dev, 3777 0, /* No offset on data devs */ 3778 rs->md.dev_sectors, data); 3779 } 3780 } 3781 3782 return r; 3783 } 3784 3785 static void raid_io_hints(struct dm_target *ti, struct queue_limits *limits) 3786 { 3787 struct raid_set *rs = ti->private; 3788 unsigned int chunk_size_bytes = to_bytes(rs->md.chunk_sectors); 3789 3790 blk_limits_io_min(limits, chunk_size_bytes); 3791 blk_limits_io_opt(limits, chunk_size_bytes * mddev_data_stripes(rs)); 3792 } 3793 3794 static void raid_postsuspend(struct dm_target *ti) 3795 { 3796 struct raid_set *rs = ti->private; 3797 3798 if (!test_and_set_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags)) { 3799 /* Writes have to be stopped before suspending to avoid deadlocks. */ 3800 if (!test_bit(MD_RECOVERY_FROZEN, &rs->md.recovery)) 3801 md_stop_writes(&rs->md); 3802 3803 mddev_lock_nointr(&rs->md); 3804 mddev_suspend(&rs->md); 3805 mddev_unlock(&rs->md); 3806 } 3807 } 3808 3809 static void attempt_restore_of_faulty_devices(struct raid_set *rs) 3810 { 3811 int i; 3812 uint64_t cleared_failed_devices[DISKS_ARRAY_ELEMS]; 3813 unsigned long flags; 3814 bool cleared = false; 3815 struct dm_raid_superblock *sb; 3816 struct mddev *mddev = &rs->md; 3817 struct md_rdev *r; 3818 3819 /* RAID personalities have to provide hot add/remove methods or we need to bail out. */ 3820 if (!mddev->pers || !mddev->pers->hot_add_disk || !mddev->pers->hot_remove_disk) 3821 return; 3822 3823 memset(cleared_failed_devices, 0, sizeof(cleared_failed_devices)); 3824 3825 for (i = 0; i < rs->raid_disks; i++) { 3826 r = &rs->dev[i].rdev; 3827 /* HM FIXME: enhance journal device recovery processing */ 3828 if (test_bit(Journal, &r->flags)) 3829 continue; 3830 3831 if (test_bit(Faulty, &r->flags) && 3832 r->meta_bdev && !read_disk_sb(r, r->sb_size, true)) { 3833 DMINFO("Faulty %s device #%d has readable super block." 3834 " Attempting to revive it.", 3835 rs->raid_type->name, i); 3836 3837 /* 3838 * Faulty bit may be set, but sometimes the array can 3839 * be suspended before the personalities can respond 3840 * by removing the device from the array (i.e. calling 3841 * 'hot_remove_disk'). If they haven't yet removed 3842 * the failed device, its 'raid_disk' number will be 3843 * '>= 0' - meaning we must call this function 3844 * ourselves. 3845 */ 3846 flags = r->flags; 3847 clear_bit(In_sync, &r->flags); /* Mandatory for hot remove. */ 3848 if (r->raid_disk >= 0) { 3849 if (mddev->pers->hot_remove_disk(mddev, r)) { 3850 /* Failed to revive this device, try next */ 3851 r->flags = flags; 3852 continue; 3853 } 3854 } else 3855 r->raid_disk = r->saved_raid_disk = i; 3856 3857 clear_bit(Faulty, &r->flags); 3858 clear_bit(WriteErrorSeen, &r->flags); 3859 3860 if (mddev->pers->hot_add_disk(mddev, r)) { 3861 /* Failed to revive this device, try next */ 3862 r->raid_disk = r->saved_raid_disk = -1; 3863 r->flags = flags; 3864 } else { 3865 clear_bit(In_sync, &r->flags); 3866 r->recovery_offset = 0; 3867 set_bit(i, (void *) cleared_failed_devices); 3868 cleared = true; 3869 } 3870 } 3871 } 3872 3873 /* If any failed devices could be cleared, update all sbs failed_devices bits */ 3874 if (cleared) { 3875 uint64_t failed_devices[DISKS_ARRAY_ELEMS]; 3876 3877 rdev_for_each(r, &rs->md) { 3878 if (test_bit(Journal, &r->flags)) 3879 continue; 3880 3881 sb = page_address(r->sb_page); 3882 sb_retrieve_failed_devices(sb, failed_devices); 3883 3884 for (i = 0; i < DISKS_ARRAY_ELEMS; i++) 3885 failed_devices[i] &= ~cleared_failed_devices[i]; 3886 3887 sb_update_failed_devices(sb, failed_devices); 3888 } 3889 } 3890 } 3891 3892 static int __load_dirty_region_bitmap(struct raid_set *rs) 3893 { 3894 int r = 0; 3895 3896 /* Try loading the bitmap unless "raid0", which does not have one */ 3897 if (!rs_is_raid0(rs) && 3898 !test_and_set_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags)) { 3899 r = md_bitmap_load(&rs->md); 3900 if (r) 3901 DMERR("Failed to load bitmap"); 3902 } 3903 3904 return r; 3905 } 3906 3907 /* Enforce updating all superblocks */ 3908 static void rs_update_sbs(struct raid_set *rs) 3909 { 3910 struct mddev *mddev = &rs->md; 3911 int ro = mddev->ro; 3912 3913 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 3914 mddev->ro = 0; 3915 md_update_sb(mddev, 1); 3916 mddev->ro = ro; 3917 } 3918 3919 /* 3920 * Reshape changes raid algorithm of @rs to new one within personality 3921 * (e.g. raid6_zr -> raid6_nc), changes stripe size, adds/removes 3922 * disks from a raid set thus growing/shrinking it or resizes the set 3923 * 3924 * Call mddev_lock_nointr() before! 3925 */ 3926 static int rs_start_reshape(struct raid_set *rs) 3927 { 3928 int r; 3929 struct mddev *mddev = &rs->md; 3930 struct md_personality *pers = mddev->pers; 3931 3932 /* Don't allow the sync thread to work until the table gets reloaded. */ 3933 set_bit(MD_RECOVERY_WAIT, &mddev->recovery); 3934 3935 r = rs_setup_reshape(rs); 3936 if (r) 3937 return r; 3938 3939 /* 3940 * Check any reshape constraints enforced by the personalility 3941 * 3942 * May as well already kick the reshape off so that * pers->start_reshape() becomes optional. 3943 */ 3944 r = pers->check_reshape(mddev); 3945 if (r) { 3946 rs->ti->error = "pers->check_reshape() failed"; 3947 return r; 3948 } 3949 3950 /* 3951 * Personality may not provide start reshape method in which 3952 * case check_reshape above has already covered everything 3953 */ 3954 if (pers->start_reshape) { 3955 r = pers->start_reshape(mddev); 3956 if (r) { 3957 rs->ti->error = "pers->start_reshape() failed"; 3958 return r; 3959 } 3960 } 3961 3962 /* 3963 * Now reshape got set up, update superblocks to 3964 * reflect the fact so that a table reload will 3965 * access proper superblock content in the ctr. 3966 */ 3967 rs_update_sbs(rs); 3968 3969 return 0; 3970 } 3971 3972 static int raid_preresume(struct dm_target *ti) 3973 { 3974 int r; 3975 struct raid_set *rs = ti->private; 3976 struct mddev *mddev = &rs->md; 3977 3978 /* This is a resume after a suspend of the set -> it's already started. */ 3979 if (test_and_set_bit(RT_FLAG_RS_PRERESUMED, &rs->runtime_flags)) 3980 return 0; 3981 3982 /* 3983 * The superblocks need to be updated on disk if the 3984 * array is new or new devices got added (thus zeroed 3985 * out by userspace) or __load_dirty_region_bitmap 3986 * will overwrite them in core with old data or fail. 3987 */ 3988 if (test_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags)) 3989 rs_update_sbs(rs); 3990 3991 /* Load the bitmap from disk unless raid0 */ 3992 r = __load_dirty_region_bitmap(rs); 3993 if (r) 3994 return r; 3995 3996 /* We are extending the raid set size, adjust mddev/md_rdev sizes and set capacity. */ 3997 if (test_bit(RT_FLAG_RS_GROW, &rs->runtime_flags)) { 3998 mddev->array_sectors = rs->array_sectors; 3999 mddev->dev_sectors = rs->dev_sectors; 4000 rs_set_rdev_sectors(rs); 4001 rs_set_capacity(rs); 4002 } 4003 4004 /* Resize bitmap to adjust to changed region size (aka MD bitmap chunksize) or grown device size */ 4005 if (test_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags) && mddev->bitmap && 4006 (test_bit(RT_FLAG_RS_GROW, &rs->runtime_flags) || 4007 (rs->requested_bitmap_chunk_sectors && 4008 mddev->bitmap_info.chunksize != to_bytes(rs->requested_bitmap_chunk_sectors)))) { 4009 int chunksize = to_bytes(rs->requested_bitmap_chunk_sectors) ?: mddev->bitmap_info.chunksize; 4010 4011 r = md_bitmap_resize(mddev->bitmap, mddev->dev_sectors, chunksize, 0); 4012 if (r) 4013 DMERR("Failed to resize bitmap"); 4014 } 4015 4016 /* Check for any resize/reshape on @rs and adjust/initiate */ 4017 /* Be prepared for mddev_resume() in raid_resume() */ 4018 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4019 if (mddev->recovery_cp && mddev->recovery_cp < MaxSector) { 4020 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); 4021 mddev->resync_min = mddev->recovery_cp; 4022 if (test_bit(RT_FLAG_RS_GROW, &rs->runtime_flags)) 4023 mddev->resync_max_sectors = mddev->dev_sectors; 4024 } 4025 4026 /* Check for any reshape request unless new raid set */ 4027 if (test_bit(RT_FLAG_RESHAPE_RS, &rs->runtime_flags)) { 4028 /* Initiate a reshape. */ 4029 rs_set_rdev_sectors(rs); 4030 mddev_lock_nointr(mddev); 4031 r = rs_start_reshape(rs); 4032 mddev_unlock(mddev); 4033 if (r) 4034 DMWARN("Failed to check/start reshape, continuing without change"); 4035 r = 0; 4036 } 4037 4038 return r; 4039 } 4040 4041 static void raid_resume(struct dm_target *ti) 4042 { 4043 struct raid_set *rs = ti->private; 4044 struct mddev *mddev = &rs->md; 4045 4046 if (test_and_set_bit(RT_FLAG_RS_RESUMED, &rs->runtime_flags)) { 4047 /* 4048 * A secondary resume while the device is active. 4049 * Take this opportunity to check whether any failed 4050 * devices are reachable again. 4051 */ 4052 attempt_restore_of_faulty_devices(rs); 4053 } 4054 4055 if (test_and_clear_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags)) { 4056 /* Only reduce raid set size before running a disk removing reshape. */ 4057 if (mddev->delta_disks < 0) 4058 rs_set_capacity(rs); 4059 4060 mddev_lock_nointr(mddev); 4061 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4062 mddev->ro = 0; 4063 mddev->in_sync = 0; 4064 mddev_resume(mddev); 4065 mddev_unlock(mddev); 4066 } 4067 } 4068 4069 static struct target_type raid_target = { 4070 .name = "raid", 4071 .version = {1, 15, 1}, 4072 .module = THIS_MODULE, 4073 .ctr = raid_ctr, 4074 .dtr = raid_dtr, 4075 .map = raid_map, 4076 .status = raid_status, 4077 .message = raid_message, 4078 .iterate_devices = raid_iterate_devices, 4079 .io_hints = raid_io_hints, 4080 .postsuspend = raid_postsuspend, 4081 .preresume = raid_preresume, 4082 .resume = raid_resume, 4083 }; 4084 4085 static int __init dm_raid_init(void) 4086 { 4087 DMINFO("Loading target version %u.%u.%u", 4088 raid_target.version[0], 4089 raid_target.version[1], 4090 raid_target.version[2]); 4091 return dm_register_target(&raid_target); 4092 } 4093 4094 static void __exit dm_raid_exit(void) 4095 { 4096 dm_unregister_target(&raid_target); 4097 } 4098 4099 module_init(dm_raid_init); 4100 module_exit(dm_raid_exit); 4101 4102 module_param(devices_handle_discard_safely, bool, 0644); 4103 MODULE_PARM_DESC(devices_handle_discard_safely, 4104 "Set to Y if all devices in each array reliably return zeroes on reads from discarded regions"); 4105 4106 MODULE_DESCRIPTION(DM_NAME " raid0/1/10/4/5/6 target"); 4107 MODULE_ALIAS("dm-raid0"); 4108 MODULE_ALIAS("dm-raid1"); 4109 MODULE_ALIAS("dm-raid10"); 4110 MODULE_ALIAS("dm-raid4"); 4111 MODULE_ALIAS("dm-raid5"); 4112 MODULE_ALIAS("dm-raid6"); 4113 MODULE_AUTHOR("Neil Brown <dm-devel@redhat.com>"); 4114 MODULE_AUTHOR("Heinz Mauelshagen <dm-devel@redhat.com>"); 4115 MODULE_LICENSE("GPL"); 4116