1 /* 2 * Partial Parity Log for closing the RAID5 write hole 3 * Copyright (c) 2017, Intel Corporation. 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 */ 14 15 #include <linux/kernel.h> 16 #include <linux/blkdev.h> 17 #include <linux/slab.h> 18 #include <linux/crc32c.h> 19 #include <linux/flex_array.h> 20 #include <linux/async_tx.h> 21 #include <linux/raid/md_p.h> 22 #include "md.h" 23 #include "raid5.h" 24 25 /* 26 * PPL consists of a 4KB header (struct ppl_header) and at least 128KB for 27 * partial parity data. The header contains an array of entries 28 * (struct ppl_header_entry) which describe the logged write requests. 29 * Partial parity for the entries comes after the header, written in the same 30 * sequence as the entries: 31 * 32 * Header 33 * entry0 34 * ... 35 * entryN 36 * PP data 37 * PP for entry0 38 * ... 39 * PP for entryN 40 * 41 * An entry describes one or more consecutive stripe_heads, up to a full 42 * stripe. The modifed raid data chunks form an m-by-n matrix, where m is the 43 * number of stripe_heads in the entry and n is the number of modified data 44 * disks. Every stripe_head in the entry must write to the same data disks. 45 * An example of a valid case described by a single entry (writes to the first 46 * stripe of a 4 disk array, 16k chunk size): 47 * 48 * sh->sector dd0 dd1 dd2 ppl 49 * +-----+-----+-----+ 50 * 0 | --- | --- | --- | +----+ 51 * 8 | -W- | -W- | --- | | pp | data_sector = 8 52 * 16 | -W- | -W- | --- | | pp | data_size = 3 * 2 * 4k 53 * 24 | -W- | -W- | --- | | pp | pp_size = 3 * 4k 54 * +-----+-----+-----+ +----+ 55 * 56 * data_sector is the first raid sector of the modified data, data_size is the 57 * total size of modified data and pp_size is the size of partial parity for 58 * this entry. Entries for full stripe writes contain no partial parity 59 * (pp_size = 0), they only mark the stripes for which parity should be 60 * recalculated after an unclean shutdown. Every entry holds a checksum of its 61 * partial parity, the header also has a checksum of the header itself. 62 * 63 * A write request is always logged to the PPL instance stored on the parity 64 * disk of the corresponding stripe. For each member disk there is one ppl_log 65 * used to handle logging for this disk, independently from others. They are 66 * grouped in child_logs array in struct ppl_conf, which is assigned to 67 * r5conf->log_private. 68 * 69 * ppl_io_unit represents a full PPL write, header_page contains the ppl_header. 70 * PPL entries for logged stripes are added in ppl_log_stripe(). A stripe_head 71 * can be appended to the last entry if it meets the conditions for a valid 72 * entry described above, otherwise a new entry is added. Checksums of entries 73 * are calculated incrementally as stripes containing partial parity are being 74 * added. ppl_submit_iounit() calculates the checksum of the header and submits 75 * a bio containing the header page and partial parity pages (sh->ppl_page) for 76 * all stripes of the io_unit. When the PPL write completes, the stripes 77 * associated with the io_unit are released and raid5d starts writing their data 78 * and parity. When all stripes are written, the io_unit is freed and the next 79 * can be submitted. 80 * 81 * An io_unit is used to gather stripes until it is submitted or becomes full 82 * (if the maximum number of entries or size of PPL is reached). Another io_unit 83 * can't be submitted until the previous has completed (PPL and stripe 84 * data+parity is written). The log->io_list tracks all io_units of a log 85 * (for a single member disk). New io_units are added to the end of the list 86 * and the first io_unit is submitted, if it is not submitted already. 87 * The current io_unit accepting new stripes is always at the end of the list. 88 */ 89 90 #define PPL_SPACE_SIZE (128 * 1024) 91 92 struct ppl_conf { 93 struct mddev *mddev; 94 95 /* array of child logs, one for each raid disk */ 96 struct ppl_log *child_logs; 97 int count; 98 99 int block_size; /* the logical block size used for data_sector 100 * in ppl_header_entry */ 101 u32 signature; /* raid array identifier */ 102 atomic64_t seq; /* current log write sequence number */ 103 104 struct kmem_cache *io_kc; 105 mempool_t *io_pool; 106 struct bio_set *bs; 107 108 /* used only for recovery */ 109 int recovered_entries; 110 int mismatch_count; 111 112 /* stripes to retry if failed to allocate io_unit */ 113 struct list_head no_mem_stripes; 114 spinlock_t no_mem_stripes_lock; 115 }; 116 117 struct ppl_log { 118 struct ppl_conf *ppl_conf; /* shared between all log instances */ 119 120 struct md_rdev *rdev; /* array member disk associated with 121 * this log instance */ 122 struct mutex io_mutex; 123 struct ppl_io_unit *current_io; /* current io_unit accepting new data 124 * always at the end of io_list */ 125 spinlock_t io_list_lock; 126 struct list_head io_list; /* all io_units of this log */ 127 128 sector_t next_io_sector; 129 unsigned int entry_space; 130 bool use_multippl; 131 }; 132 133 #define PPL_IO_INLINE_BVECS 32 134 135 struct ppl_io_unit { 136 struct ppl_log *log; 137 138 struct page *header_page; /* for ppl_header */ 139 140 unsigned int entries_count; /* number of entries in ppl_header */ 141 unsigned int pp_size; /* total size current of partial parity */ 142 143 u64 seq; /* sequence number of this log write */ 144 struct list_head log_sibling; /* log->io_list */ 145 146 struct list_head stripe_list; /* stripes added to the io_unit */ 147 atomic_t pending_stripes; /* how many stripes not written to raid */ 148 149 bool submitted; /* true if write to log started */ 150 151 /* inline bio and its biovec for submitting the iounit */ 152 struct bio bio; 153 struct bio_vec biovec[PPL_IO_INLINE_BVECS]; 154 }; 155 156 struct dma_async_tx_descriptor * 157 ops_run_partial_parity(struct stripe_head *sh, struct raid5_percpu *percpu, 158 struct dma_async_tx_descriptor *tx) 159 { 160 int disks = sh->disks; 161 struct page **srcs = flex_array_get(percpu->scribble, 0); 162 int count = 0, pd_idx = sh->pd_idx, i; 163 struct async_submit_ctl submit; 164 165 pr_debug("%s: stripe %llu\n", __func__, (unsigned long long)sh->sector); 166 167 /* 168 * Partial parity is the XOR of stripe data chunks that are not changed 169 * during the write request. Depending on available data 170 * (read-modify-write vs. reconstruct-write case) we calculate it 171 * differently. 172 */ 173 if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) { 174 /* 175 * rmw: xor old data and parity from updated disks 176 * This is calculated earlier by ops_run_prexor5() so just copy 177 * the parity dev page. 178 */ 179 srcs[count++] = sh->dev[pd_idx].page; 180 } else if (sh->reconstruct_state == reconstruct_state_drain_run) { 181 /* rcw: xor data from all not updated disks */ 182 for (i = disks; i--;) { 183 struct r5dev *dev = &sh->dev[i]; 184 if (test_bit(R5_UPTODATE, &dev->flags)) 185 srcs[count++] = dev->page; 186 } 187 } else { 188 return tx; 189 } 190 191 init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, tx, 192 NULL, sh, flex_array_get(percpu->scribble, 0) 193 + sizeof(struct page *) * (sh->disks + 2)); 194 195 if (count == 1) 196 tx = async_memcpy(sh->ppl_page, srcs[0], 0, 0, PAGE_SIZE, 197 &submit); 198 else 199 tx = async_xor(sh->ppl_page, srcs, 0, count, PAGE_SIZE, 200 &submit); 201 202 return tx; 203 } 204 205 static void *ppl_io_pool_alloc(gfp_t gfp_mask, void *pool_data) 206 { 207 struct kmem_cache *kc = pool_data; 208 struct ppl_io_unit *io; 209 210 io = kmem_cache_alloc(kc, gfp_mask); 211 if (!io) 212 return NULL; 213 214 io->header_page = alloc_page(gfp_mask); 215 if (!io->header_page) { 216 kmem_cache_free(kc, io); 217 return NULL; 218 } 219 220 return io; 221 } 222 223 static void ppl_io_pool_free(void *element, void *pool_data) 224 { 225 struct kmem_cache *kc = pool_data; 226 struct ppl_io_unit *io = element; 227 228 __free_page(io->header_page); 229 kmem_cache_free(kc, io); 230 } 231 232 static struct ppl_io_unit *ppl_new_iounit(struct ppl_log *log, 233 struct stripe_head *sh) 234 { 235 struct ppl_conf *ppl_conf = log->ppl_conf; 236 struct ppl_io_unit *io; 237 struct ppl_header *pplhdr; 238 struct page *header_page; 239 240 io = mempool_alloc(ppl_conf->io_pool, GFP_NOWAIT); 241 if (!io) 242 return NULL; 243 244 header_page = io->header_page; 245 memset(io, 0, sizeof(*io)); 246 io->header_page = header_page; 247 248 io->log = log; 249 INIT_LIST_HEAD(&io->log_sibling); 250 INIT_LIST_HEAD(&io->stripe_list); 251 atomic_set(&io->pending_stripes, 0); 252 bio_init(&io->bio, io->biovec, PPL_IO_INLINE_BVECS); 253 254 pplhdr = page_address(io->header_page); 255 clear_page(pplhdr); 256 memset(pplhdr->reserved, 0xff, PPL_HDR_RESERVED); 257 pplhdr->signature = cpu_to_le32(ppl_conf->signature); 258 259 io->seq = atomic64_add_return(1, &ppl_conf->seq); 260 pplhdr->generation = cpu_to_le64(io->seq); 261 262 return io; 263 } 264 265 static int ppl_log_stripe(struct ppl_log *log, struct stripe_head *sh) 266 { 267 struct ppl_io_unit *io = log->current_io; 268 struct ppl_header_entry *e = NULL; 269 struct ppl_header *pplhdr; 270 int i; 271 sector_t data_sector = 0; 272 int data_disks = 0; 273 struct r5conf *conf = sh->raid_conf; 274 275 pr_debug("%s: stripe: %llu\n", __func__, (unsigned long long)sh->sector); 276 277 /* check if current io_unit is full */ 278 if (io && (io->pp_size == log->entry_space || 279 io->entries_count == PPL_HDR_MAX_ENTRIES)) { 280 pr_debug("%s: add io_unit blocked by seq: %llu\n", 281 __func__, io->seq); 282 io = NULL; 283 } 284 285 /* add a new unit if there is none or the current is full */ 286 if (!io) { 287 io = ppl_new_iounit(log, sh); 288 if (!io) 289 return -ENOMEM; 290 spin_lock_irq(&log->io_list_lock); 291 list_add_tail(&io->log_sibling, &log->io_list); 292 spin_unlock_irq(&log->io_list_lock); 293 294 log->current_io = io; 295 } 296 297 for (i = 0; i < sh->disks; i++) { 298 struct r5dev *dev = &sh->dev[i]; 299 300 if (i != sh->pd_idx && test_bit(R5_Wantwrite, &dev->flags)) { 301 if (!data_disks || dev->sector < data_sector) 302 data_sector = dev->sector; 303 data_disks++; 304 } 305 } 306 BUG_ON(!data_disks); 307 308 pr_debug("%s: seq: %llu data_sector: %llu data_disks: %d\n", __func__, 309 io->seq, (unsigned long long)data_sector, data_disks); 310 311 pplhdr = page_address(io->header_page); 312 313 if (io->entries_count > 0) { 314 struct ppl_header_entry *last = 315 &pplhdr->entries[io->entries_count - 1]; 316 struct stripe_head *sh_last = list_last_entry( 317 &io->stripe_list, struct stripe_head, log_list); 318 u64 data_sector_last = le64_to_cpu(last->data_sector); 319 u32 data_size_last = le32_to_cpu(last->data_size); 320 321 /* 322 * Check if we can append the stripe to the last entry. It must 323 * be just after the last logged stripe and write to the same 324 * disks. Use bit shift and logarithm to avoid 64-bit division. 325 */ 326 if ((sh->sector == sh_last->sector + STRIPE_SECTORS) && 327 (data_sector >> ilog2(conf->chunk_sectors) == 328 data_sector_last >> ilog2(conf->chunk_sectors)) && 329 ((data_sector - data_sector_last) * data_disks == 330 data_size_last >> 9)) 331 e = last; 332 } 333 334 if (!e) { 335 e = &pplhdr->entries[io->entries_count++]; 336 e->data_sector = cpu_to_le64(data_sector); 337 e->parity_disk = cpu_to_le32(sh->pd_idx); 338 e->checksum = cpu_to_le32(~0); 339 } 340 341 le32_add_cpu(&e->data_size, data_disks << PAGE_SHIFT); 342 343 /* don't write any PP if full stripe write */ 344 if (!test_bit(STRIPE_FULL_WRITE, &sh->state)) { 345 le32_add_cpu(&e->pp_size, PAGE_SIZE); 346 io->pp_size += PAGE_SIZE; 347 e->checksum = cpu_to_le32(crc32c_le(le32_to_cpu(e->checksum), 348 page_address(sh->ppl_page), 349 PAGE_SIZE)); 350 } 351 352 list_add_tail(&sh->log_list, &io->stripe_list); 353 atomic_inc(&io->pending_stripes); 354 sh->ppl_io = io; 355 356 return 0; 357 } 358 359 int ppl_write_stripe(struct r5conf *conf, struct stripe_head *sh) 360 { 361 struct ppl_conf *ppl_conf = conf->log_private; 362 struct ppl_io_unit *io = sh->ppl_io; 363 struct ppl_log *log; 364 365 if (io || test_bit(STRIPE_SYNCING, &sh->state) || !sh->ppl_page || 366 !test_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags) || 367 !test_bit(R5_Insync, &sh->dev[sh->pd_idx].flags)) { 368 clear_bit(STRIPE_LOG_TRAPPED, &sh->state); 369 return -EAGAIN; 370 } 371 372 log = &ppl_conf->child_logs[sh->pd_idx]; 373 374 mutex_lock(&log->io_mutex); 375 376 if (!log->rdev || test_bit(Faulty, &log->rdev->flags)) { 377 mutex_unlock(&log->io_mutex); 378 return -EAGAIN; 379 } 380 381 set_bit(STRIPE_LOG_TRAPPED, &sh->state); 382 clear_bit(STRIPE_DELAYED, &sh->state); 383 atomic_inc(&sh->count); 384 385 if (ppl_log_stripe(log, sh)) { 386 spin_lock_irq(&ppl_conf->no_mem_stripes_lock); 387 list_add_tail(&sh->log_list, &ppl_conf->no_mem_stripes); 388 spin_unlock_irq(&ppl_conf->no_mem_stripes_lock); 389 } 390 391 mutex_unlock(&log->io_mutex); 392 393 return 0; 394 } 395 396 static void ppl_log_endio(struct bio *bio) 397 { 398 struct ppl_io_unit *io = bio->bi_private; 399 struct ppl_log *log = io->log; 400 struct ppl_conf *ppl_conf = log->ppl_conf; 401 struct stripe_head *sh, *next; 402 403 pr_debug("%s: seq: %llu\n", __func__, io->seq); 404 405 if (bio->bi_status) 406 md_error(ppl_conf->mddev, log->rdev); 407 408 list_for_each_entry_safe(sh, next, &io->stripe_list, log_list) { 409 list_del_init(&sh->log_list); 410 411 set_bit(STRIPE_HANDLE, &sh->state); 412 raid5_release_stripe(sh); 413 } 414 } 415 416 static void ppl_submit_iounit_bio(struct ppl_io_unit *io, struct bio *bio) 417 { 418 char b[BDEVNAME_SIZE]; 419 420 pr_debug("%s: seq: %llu size: %u sector: %llu dev: %s\n", 421 __func__, io->seq, bio->bi_iter.bi_size, 422 (unsigned long long)bio->bi_iter.bi_sector, 423 bio_devname(bio, b)); 424 425 submit_bio(bio); 426 } 427 428 static void ppl_submit_iounit(struct ppl_io_unit *io) 429 { 430 struct ppl_log *log = io->log; 431 struct ppl_conf *ppl_conf = log->ppl_conf; 432 struct ppl_header *pplhdr = page_address(io->header_page); 433 struct bio *bio = &io->bio; 434 struct stripe_head *sh; 435 int i; 436 437 bio->bi_private = io; 438 439 if (!log->rdev || test_bit(Faulty, &log->rdev->flags)) { 440 ppl_log_endio(bio); 441 return; 442 } 443 444 for (i = 0; i < io->entries_count; i++) { 445 struct ppl_header_entry *e = &pplhdr->entries[i]; 446 447 pr_debug("%s: seq: %llu entry: %d data_sector: %llu pp_size: %u data_size: %u\n", 448 __func__, io->seq, i, le64_to_cpu(e->data_sector), 449 le32_to_cpu(e->pp_size), le32_to_cpu(e->data_size)); 450 451 e->data_sector = cpu_to_le64(le64_to_cpu(e->data_sector) >> 452 ilog2(ppl_conf->block_size >> 9)); 453 e->checksum = cpu_to_le32(~le32_to_cpu(e->checksum)); 454 } 455 456 pplhdr->entries_count = cpu_to_le32(io->entries_count); 457 pplhdr->checksum = cpu_to_le32(~crc32c_le(~0, pplhdr, PPL_HEADER_SIZE)); 458 459 /* Rewind the buffer if current PPL is larger then remaining space */ 460 if (log->use_multippl && 461 log->rdev->ppl.sector + log->rdev->ppl.size - log->next_io_sector < 462 (PPL_HEADER_SIZE + io->pp_size) >> 9) 463 log->next_io_sector = log->rdev->ppl.sector; 464 465 466 bio->bi_end_io = ppl_log_endio; 467 bio->bi_opf = REQ_OP_WRITE | REQ_FUA; 468 bio_set_dev(bio, log->rdev->bdev); 469 bio->bi_iter.bi_sector = log->next_io_sector; 470 bio_add_page(bio, io->header_page, PAGE_SIZE, 0); 471 472 pr_debug("%s: log->current_io_sector: %llu\n", __func__, 473 (unsigned long long)log->next_io_sector); 474 475 if (log->use_multippl) 476 log->next_io_sector += (PPL_HEADER_SIZE + io->pp_size) >> 9; 477 478 list_for_each_entry(sh, &io->stripe_list, log_list) { 479 /* entries for full stripe writes have no partial parity */ 480 if (test_bit(STRIPE_FULL_WRITE, &sh->state)) 481 continue; 482 483 if (!bio_add_page(bio, sh->ppl_page, PAGE_SIZE, 0)) { 484 struct bio *prev = bio; 485 486 bio = bio_alloc_bioset(GFP_NOIO, BIO_MAX_PAGES, 487 ppl_conf->bs); 488 bio->bi_opf = prev->bi_opf; 489 bio_copy_dev(bio, prev); 490 bio->bi_iter.bi_sector = bio_end_sector(prev); 491 bio_add_page(bio, sh->ppl_page, PAGE_SIZE, 0); 492 493 bio_chain(bio, prev); 494 ppl_submit_iounit_bio(io, prev); 495 } 496 } 497 498 ppl_submit_iounit_bio(io, bio); 499 } 500 501 static void ppl_submit_current_io(struct ppl_log *log) 502 { 503 struct ppl_io_unit *io; 504 505 spin_lock_irq(&log->io_list_lock); 506 507 io = list_first_entry_or_null(&log->io_list, struct ppl_io_unit, 508 log_sibling); 509 if (io && io->submitted) 510 io = NULL; 511 512 spin_unlock_irq(&log->io_list_lock); 513 514 if (io) { 515 io->submitted = true; 516 517 if (io == log->current_io) 518 log->current_io = NULL; 519 520 ppl_submit_iounit(io); 521 } 522 } 523 524 void ppl_write_stripe_run(struct r5conf *conf) 525 { 526 struct ppl_conf *ppl_conf = conf->log_private; 527 struct ppl_log *log; 528 int i; 529 530 for (i = 0; i < ppl_conf->count; i++) { 531 log = &ppl_conf->child_logs[i]; 532 533 mutex_lock(&log->io_mutex); 534 ppl_submit_current_io(log); 535 mutex_unlock(&log->io_mutex); 536 } 537 } 538 539 static void ppl_io_unit_finished(struct ppl_io_unit *io) 540 { 541 struct ppl_log *log = io->log; 542 struct ppl_conf *ppl_conf = log->ppl_conf; 543 unsigned long flags; 544 545 pr_debug("%s: seq: %llu\n", __func__, io->seq); 546 547 local_irq_save(flags); 548 549 spin_lock(&log->io_list_lock); 550 list_del(&io->log_sibling); 551 spin_unlock(&log->io_list_lock); 552 553 mempool_free(io, ppl_conf->io_pool); 554 555 spin_lock(&ppl_conf->no_mem_stripes_lock); 556 if (!list_empty(&ppl_conf->no_mem_stripes)) { 557 struct stripe_head *sh; 558 559 sh = list_first_entry(&ppl_conf->no_mem_stripes, 560 struct stripe_head, log_list); 561 list_del_init(&sh->log_list); 562 set_bit(STRIPE_HANDLE, &sh->state); 563 raid5_release_stripe(sh); 564 } 565 spin_unlock(&ppl_conf->no_mem_stripes_lock); 566 567 local_irq_restore(flags); 568 } 569 570 void ppl_stripe_write_finished(struct stripe_head *sh) 571 { 572 struct ppl_io_unit *io; 573 574 io = sh->ppl_io; 575 sh->ppl_io = NULL; 576 577 if (io && atomic_dec_and_test(&io->pending_stripes)) 578 ppl_io_unit_finished(io); 579 } 580 581 static void ppl_xor(int size, struct page *page1, struct page *page2) 582 { 583 struct async_submit_ctl submit; 584 struct dma_async_tx_descriptor *tx; 585 struct page *xor_srcs[] = { page1, page2 }; 586 587 init_async_submit(&submit, ASYNC_TX_ACK|ASYNC_TX_XOR_DROP_DST, 588 NULL, NULL, NULL, NULL); 589 tx = async_xor(page1, xor_srcs, 0, 2, size, &submit); 590 591 async_tx_quiesce(&tx); 592 } 593 594 /* 595 * PPL recovery strategy: xor partial parity and data from all modified data 596 * disks within a stripe and write the result as the new stripe parity. If all 597 * stripe data disks are modified (full stripe write), no partial parity is 598 * available, so just xor the data disks. 599 * 600 * Recovery of a PPL entry shall occur only if all modified data disks are 601 * available and read from all of them succeeds. 602 * 603 * A PPL entry applies to a stripe, partial parity size for an entry is at most 604 * the size of the chunk. Examples of possible cases for a single entry: 605 * 606 * case 0: single data disk write: 607 * data0 data1 data2 ppl parity 608 * +--------+--------+--------+ +--------------------+ 609 * | ------ | ------ | ------ | +----+ | (no change) | 610 * | ------ | -data- | ------ | | pp | -> | data1 ^ pp | 611 * | ------ | -data- | ------ | | pp | -> | data1 ^ pp | 612 * | ------ | ------ | ------ | +----+ | (no change) | 613 * +--------+--------+--------+ +--------------------+ 614 * pp_size = data_size 615 * 616 * case 1: more than one data disk write: 617 * data0 data1 data2 ppl parity 618 * +--------+--------+--------+ +--------------------+ 619 * | ------ | ------ | ------ | +----+ | (no change) | 620 * | -data- | -data- | ------ | | pp | -> | data0 ^ data1 ^ pp | 621 * | -data- | -data- | ------ | | pp | -> | data0 ^ data1 ^ pp | 622 * | ------ | ------ | ------ | +----+ | (no change) | 623 * +--------+--------+--------+ +--------------------+ 624 * pp_size = data_size / modified_data_disks 625 * 626 * case 2: write to all data disks (also full stripe write): 627 * data0 data1 data2 parity 628 * +--------+--------+--------+ +--------------------+ 629 * | ------ | ------ | ------ | | (no change) | 630 * | -data- | -data- | -data- | --------> | xor all data | 631 * | ------ | ------ | ------ | --------> | (no change) | 632 * | ------ | ------ | ------ | | (no change) | 633 * +--------+--------+--------+ +--------------------+ 634 * pp_size = 0 635 * 636 * The following cases are possible only in other implementations. The recovery 637 * code can handle them, but they are not generated at runtime because they can 638 * be reduced to cases 0, 1 and 2: 639 * 640 * case 3: 641 * data0 data1 data2 ppl parity 642 * +--------+--------+--------+ +----+ +--------------------+ 643 * | ------ | -data- | -data- | | pp | | data1 ^ data2 ^ pp | 644 * | ------ | -data- | -data- | | pp | -> | data1 ^ data2 ^ pp | 645 * | -data- | -data- | -data- | | -- | -> | xor all data | 646 * | -data- | -data- | ------ | | pp | | data0 ^ data1 ^ pp | 647 * +--------+--------+--------+ +----+ +--------------------+ 648 * pp_size = chunk_size 649 * 650 * case 4: 651 * data0 data1 data2 ppl parity 652 * +--------+--------+--------+ +----+ +--------------------+ 653 * | ------ | -data- | ------ | | pp | | data1 ^ pp | 654 * | ------ | ------ | ------ | | -- | -> | (no change) | 655 * | ------ | ------ | ------ | | -- | -> | (no change) | 656 * | -data- | ------ | ------ | | pp | | data0 ^ pp | 657 * +--------+--------+--------+ +----+ +--------------------+ 658 * pp_size = chunk_size 659 */ 660 static int ppl_recover_entry(struct ppl_log *log, struct ppl_header_entry *e, 661 sector_t ppl_sector) 662 { 663 struct ppl_conf *ppl_conf = log->ppl_conf; 664 struct mddev *mddev = ppl_conf->mddev; 665 struct r5conf *conf = mddev->private; 666 int block_size = ppl_conf->block_size; 667 struct page *page1; 668 struct page *page2; 669 sector_t r_sector_first; 670 sector_t r_sector_last; 671 int strip_sectors; 672 int data_disks; 673 int i; 674 int ret = 0; 675 char b[BDEVNAME_SIZE]; 676 unsigned int pp_size = le32_to_cpu(e->pp_size); 677 unsigned int data_size = le32_to_cpu(e->data_size); 678 679 page1 = alloc_page(GFP_KERNEL); 680 page2 = alloc_page(GFP_KERNEL); 681 682 if (!page1 || !page2) { 683 ret = -ENOMEM; 684 goto out; 685 } 686 687 r_sector_first = le64_to_cpu(e->data_sector) * (block_size >> 9); 688 689 if ((pp_size >> 9) < conf->chunk_sectors) { 690 if (pp_size > 0) { 691 data_disks = data_size / pp_size; 692 strip_sectors = pp_size >> 9; 693 } else { 694 data_disks = conf->raid_disks - conf->max_degraded; 695 strip_sectors = (data_size >> 9) / data_disks; 696 } 697 r_sector_last = r_sector_first + 698 (data_disks - 1) * conf->chunk_sectors + 699 strip_sectors; 700 } else { 701 data_disks = conf->raid_disks - conf->max_degraded; 702 strip_sectors = conf->chunk_sectors; 703 r_sector_last = r_sector_first + (data_size >> 9); 704 } 705 706 pr_debug("%s: array sector first: %llu last: %llu\n", __func__, 707 (unsigned long long)r_sector_first, 708 (unsigned long long)r_sector_last); 709 710 /* if start and end is 4k aligned, use a 4k block */ 711 if (block_size == 512 && 712 (r_sector_first & (STRIPE_SECTORS - 1)) == 0 && 713 (r_sector_last & (STRIPE_SECTORS - 1)) == 0) 714 block_size = STRIPE_SIZE; 715 716 /* iterate through blocks in strip */ 717 for (i = 0; i < strip_sectors; i += (block_size >> 9)) { 718 bool update_parity = false; 719 sector_t parity_sector; 720 struct md_rdev *parity_rdev; 721 struct stripe_head sh; 722 int disk; 723 int indent = 0; 724 725 pr_debug("%s:%*s iter %d start\n", __func__, indent, "", i); 726 indent += 2; 727 728 memset(page_address(page1), 0, PAGE_SIZE); 729 730 /* iterate through data member disks */ 731 for (disk = 0; disk < data_disks; disk++) { 732 int dd_idx; 733 struct md_rdev *rdev; 734 sector_t sector; 735 sector_t r_sector = r_sector_first + i + 736 (disk * conf->chunk_sectors); 737 738 pr_debug("%s:%*s data member disk %d start\n", 739 __func__, indent, "", disk); 740 indent += 2; 741 742 if (r_sector >= r_sector_last) { 743 pr_debug("%s:%*s array sector %llu doesn't need parity update\n", 744 __func__, indent, "", 745 (unsigned long long)r_sector); 746 indent -= 2; 747 continue; 748 } 749 750 update_parity = true; 751 752 /* map raid sector to member disk */ 753 sector = raid5_compute_sector(conf, r_sector, 0, 754 &dd_idx, NULL); 755 pr_debug("%s:%*s processing array sector %llu => data member disk %d, sector %llu\n", 756 __func__, indent, "", 757 (unsigned long long)r_sector, dd_idx, 758 (unsigned long long)sector); 759 760 rdev = conf->disks[dd_idx].rdev; 761 if (!rdev || (!test_bit(In_sync, &rdev->flags) && 762 sector >= rdev->recovery_offset)) { 763 pr_debug("%s:%*s data member disk %d missing\n", 764 __func__, indent, "", dd_idx); 765 update_parity = false; 766 break; 767 } 768 769 pr_debug("%s:%*s reading data member disk %s sector %llu\n", 770 __func__, indent, "", bdevname(rdev->bdev, b), 771 (unsigned long long)sector); 772 if (!sync_page_io(rdev, sector, block_size, page2, 773 REQ_OP_READ, 0, false)) { 774 md_error(mddev, rdev); 775 pr_debug("%s:%*s read failed!\n", __func__, 776 indent, ""); 777 ret = -EIO; 778 goto out; 779 } 780 781 ppl_xor(block_size, page1, page2); 782 783 indent -= 2; 784 } 785 786 if (!update_parity) 787 continue; 788 789 if (pp_size > 0) { 790 pr_debug("%s:%*s reading pp disk sector %llu\n", 791 __func__, indent, "", 792 (unsigned long long)(ppl_sector + i)); 793 if (!sync_page_io(log->rdev, 794 ppl_sector - log->rdev->data_offset + i, 795 block_size, page2, REQ_OP_READ, 0, 796 false)) { 797 pr_debug("%s:%*s read failed!\n", __func__, 798 indent, ""); 799 md_error(mddev, log->rdev); 800 ret = -EIO; 801 goto out; 802 } 803 804 ppl_xor(block_size, page1, page2); 805 } 806 807 /* map raid sector to parity disk */ 808 parity_sector = raid5_compute_sector(conf, r_sector_first + i, 809 0, &disk, &sh); 810 BUG_ON(sh.pd_idx != le32_to_cpu(e->parity_disk)); 811 parity_rdev = conf->disks[sh.pd_idx].rdev; 812 813 BUG_ON(parity_rdev->bdev->bd_dev != log->rdev->bdev->bd_dev); 814 pr_debug("%s:%*s write parity at sector %llu, disk %s\n", 815 __func__, indent, "", 816 (unsigned long long)parity_sector, 817 bdevname(parity_rdev->bdev, b)); 818 if (!sync_page_io(parity_rdev, parity_sector, block_size, 819 page1, REQ_OP_WRITE, 0, false)) { 820 pr_debug("%s:%*s parity write error!\n", __func__, 821 indent, ""); 822 md_error(mddev, parity_rdev); 823 ret = -EIO; 824 goto out; 825 } 826 } 827 out: 828 if (page1) 829 __free_page(page1); 830 if (page2) 831 __free_page(page2); 832 return ret; 833 } 834 835 static int ppl_recover(struct ppl_log *log, struct ppl_header *pplhdr, 836 sector_t offset) 837 { 838 struct ppl_conf *ppl_conf = log->ppl_conf; 839 struct md_rdev *rdev = log->rdev; 840 struct mddev *mddev = rdev->mddev; 841 sector_t ppl_sector = rdev->ppl.sector + offset + 842 (PPL_HEADER_SIZE >> 9); 843 struct page *page; 844 int i; 845 int ret = 0; 846 847 page = alloc_page(GFP_KERNEL); 848 if (!page) 849 return -ENOMEM; 850 851 /* iterate through all PPL entries saved */ 852 for (i = 0; i < le32_to_cpu(pplhdr->entries_count); i++) { 853 struct ppl_header_entry *e = &pplhdr->entries[i]; 854 u32 pp_size = le32_to_cpu(e->pp_size); 855 sector_t sector = ppl_sector; 856 int ppl_entry_sectors = pp_size >> 9; 857 u32 crc, crc_stored; 858 859 pr_debug("%s: disk: %d entry: %d ppl_sector: %llu pp_size: %u\n", 860 __func__, rdev->raid_disk, i, 861 (unsigned long long)ppl_sector, pp_size); 862 863 crc = ~0; 864 crc_stored = le32_to_cpu(e->checksum); 865 866 /* read parial parity for this entry and calculate its checksum */ 867 while (pp_size) { 868 int s = pp_size > PAGE_SIZE ? PAGE_SIZE : pp_size; 869 870 if (!sync_page_io(rdev, sector - rdev->data_offset, 871 s, page, REQ_OP_READ, 0, false)) { 872 md_error(mddev, rdev); 873 ret = -EIO; 874 goto out; 875 } 876 877 crc = crc32c_le(crc, page_address(page), s); 878 879 pp_size -= s; 880 sector += s >> 9; 881 } 882 883 crc = ~crc; 884 885 if (crc != crc_stored) { 886 /* 887 * Don't recover this entry if the checksum does not 888 * match, but keep going and try to recover other 889 * entries. 890 */ 891 pr_debug("%s: ppl entry crc does not match: stored: 0x%x calculated: 0x%x\n", 892 __func__, crc_stored, crc); 893 ppl_conf->mismatch_count++; 894 } else { 895 ret = ppl_recover_entry(log, e, ppl_sector); 896 if (ret) 897 goto out; 898 ppl_conf->recovered_entries++; 899 } 900 901 ppl_sector += ppl_entry_sectors; 902 } 903 904 /* flush the disk cache after recovery if necessary */ 905 ret = blkdev_issue_flush(rdev->bdev, GFP_KERNEL, NULL); 906 out: 907 __free_page(page); 908 return ret; 909 } 910 911 static int ppl_write_empty_header(struct ppl_log *log) 912 { 913 struct page *page; 914 struct ppl_header *pplhdr; 915 struct md_rdev *rdev = log->rdev; 916 int ret = 0; 917 918 pr_debug("%s: disk: %d ppl_sector: %llu\n", __func__, 919 rdev->raid_disk, (unsigned long long)rdev->ppl.sector); 920 921 page = alloc_page(GFP_NOIO | __GFP_ZERO); 922 if (!page) 923 return -ENOMEM; 924 925 pplhdr = page_address(page); 926 /* zero out PPL space to avoid collision with old PPLs */ 927 blkdev_issue_zeroout(rdev->bdev, rdev->ppl.sector, 928 log->rdev->ppl.size, GFP_NOIO, 0); 929 memset(pplhdr->reserved, 0xff, PPL_HDR_RESERVED); 930 pplhdr->signature = cpu_to_le32(log->ppl_conf->signature); 931 pplhdr->checksum = cpu_to_le32(~crc32c_le(~0, pplhdr, PAGE_SIZE)); 932 933 if (!sync_page_io(rdev, rdev->ppl.sector - rdev->data_offset, 934 PPL_HEADER_SIZE, page, REQ_OP_WRITE | REQ_SYNC | 935 REQ_FUA, 0, false)) { 936 md_error(rdev->mddev, rdev); 937 ret = -EIO; 938 } 939 940 __free_page(page); 941 return ret; 942 } 943 944 static int ppl_load_distributed(struct ppl_log *log) 945 { 946 struct ppl_conf *ppl_conf = log->ppl_conf; 947 struct md_rdev *rdev = log->rdev; 948 struct mddev *mddev = rdev->mddev; 949 struct page *page, *page2, *tmp; 950 struct ppl_header *pplhdr = NULL, *prev_pplhdr = NULL; 951 u32 crc, crc_stored; 952 u32 signature; 953 int ret = 0, i; 954 sector_t pplhdr_offset = 0, prev_pplhdr_offset = 0; 955 956 pr_debug("%s: disk: %d\n", __func__, rdev->raid_disk); 957 /* read PPL headers, find the recent one */ 958 page = alloc_page(GFP_KERNEL); 959 if (!page) 960 return -ENOMEM; 961 962 page2 = alloc_page(GFP_KERNEL); 963 if (!page2) { 964 __free_page(page); 965 return -ENOMEM; 966 } 967 968 /* searching ppl area for latest ppl */ 969 while (pplhdr_offset < rdev->ppl.size - (PPL_HEADER_SIZE >> 9)) { 970 if (!sync_page_io(rdev, 971 rdev->ppl.sector - rdev->data_offset + 972 pplhdr_offset, PAGE_SIZE, page, REQ_OP_READ, 973 0, false)) { 974 md_error(mddev, rdev); 975 ret = -EIO; 976 /* if not able to read - don't recover any PPL */ 977 pplhdr = NULL; 978 break; 979 } 980 pplhdr = page_address(page); 981 982 /* check header validity */ 983 crc_stored = le32_to_cpu(pplhdr->checksum); 984 pplhdr->checksum = 0; 985 crc = ~crc32c_le(~0, pplhdr, PAGE_SIZE); 986 987 if (crc_stored != crc) { 988 pr_debug("%s: ppl header crc does not match: stored: 0x%x calculated: 0x%x (offset: %llu)\n", 989 __func__, crc_stored, crc, 990 (unsigned long long)pplhdr_offset); 991 pplhdr = prev_pplhdr; 992 pplhdr_offset = prev_pplhdr_offset; 993 break; 994 } 995 996 signature = le32_to_cpu(pplhdr->signature); 997 998 if (mddev->external) { 999 /* 1000 * For external metadata the header signature is set and 1001 * validated in userspace. 1002 */ 1003 ppl_conf->signature = signature; 1004 } else if (ppl_conf->signature != signature) { 1005 pr_debug("%s: ppl header signature does not match: stored: 0x%x configured: 0x%x (offset: %llu)\n", 1006 __func__, signature, ppl_conf->signature, 1007 (unsigned long long)pplhdr_offset); 1008 pplhdr = prev_pplhdr; 1009 pplhdr_offset = prev_pplhdr_offset; 1010 break; 1011 } 1012 1013 if (prev_pplhdr && le64_to_cpu(prev_pplhdr->generation) > 1014 le64_to_cpu(pplhdr->generation)) { 1015 /* previous was newest */ 1016 pplhdr = prev_pplhdr; 1017 pplhdr_offset = prev_pplhdr_offset; 1018 break; 1019 } 1020 1021 prev_pplhdr_offset = pplhdr_offset; 1022 prev_pplhdr = pplhdr; 1023 1024 tmp = page; 1025 page = page2; 1026 page2 = tmp; 1027 1028 /* calculate next potential ppl offset */ 1029 for (i = 0; i < le32_to_cpu(pplhdr->entries_count); i++) 1030 pplhdr_offset += 1031 le32_to_cpu(pplhdr->entries[i].pp_size) >> 9; 1032 pplhdr_offset += PPL_HEADER_SIZE >> 9; 1033 } 1034 1035 /* no valid ppl found */ 1036 if (!pplhdr) 1037 ppl_conf->mismatch_count++; 1038 else 1039 pr_debug("%s: latest PPL found at offset: %llu, with generation: %llu\n", 1040 __func__, (unsigned long long)pplhdr_offset, 1041 le64_to_cpu(pplhdr->generation)); 1042 1043 /* attempt to recover from log if we are starting a dirty array */ 1044 if (pplhdr && !mddev->pers && mddev->recovery_cp != MaxSector) 1045 ret = ppl_recover(log, pplhdr, pplhdr_offset); 1046 1047 /* write empty header if we are starting the array */ 1048 if (!ret && !mddev->pers) 1049 ret = ppl_write_empty_header(log); 1050 1051 __free_page(page); 1052 __free_page(page2); 1053 1054 pr_debug("%s: return: %d mismatch_count: %d recovered_entries: %d\n", 1055 __func__, ret, ppl_conf->mismatch_count, 1056 ppl_conf->recovered_entries); 1057 return ret; 1058 } 1059 1060 static int ppl_load(struct ppl_conf *ppl_conf) 1061 { 1062 int ret = 0; 1063 u32 signature = 0; 1064 bool signature_set = false; 1065 int i; 1066 1067 for (i = 0; i < ppl_conf->count; i++) { 1068 struct ppl_log *log = &ppl_conf->child_logs[i]; 1069 1070 /* skip missing drive */ 1071 if (!log->rdev) 1072 continue; 1073 1074 ret = ppl_load_distributed(log); 1075 if (ret) 1076 break; 1077 1078 /* 1079 * For external metadata we can't check if the signature is 1080 * correct on a single drive, but we can check if it is the same 1081 * on all drives. 1082 */ 1083 if (ppl_conf->mddev->external) { 1084 if (!signature_set) { 1085 signature = ppl_conf->signature; 1086 signature_set = true; 1087 } else if (signature != ppl_conf->signature) { 1088 pr_warn("md/raid:%s: PPL header signature does not match on all member drives\n", 1089 mdname(ppl_conf->mddev)); 1090 ret = -EINVAL; 1091 break; 1092 } 1093 } 1094 } 1095 1096 pr_debug("%s: return: %d mismatch_count: %d recovered_entries: %d\n", 1097 __func__, ret, ppl_conf->mismatch_count, 1098 ppl_conf->recovered_entries); 1099 return ret; 1100 } 1101 1102 static void __ppl_exit_log(struct ppl_conf *ppl_conf) 1103 { 1104 clear_bit(MD_HAS_PPL, &ppl_conf->mddev->flags); 1105 clear_bit(MD_HAS_MULTIPLE_PPLS, &ppl_conf->mddev->flags); 1106 1107 kfree(ppl_conf->child_logs); 1108 1109 if (ppl_conf->bs) 1110 bioset_free(ppl_conf->bs); 1111 mempool_destroy(ppl_conf->io_pool); 1112 kmem_cache_destroy(ppl_conf->io_kc); 1113 1114 kfree(ppl_conf); 1115 } 1116 1117 void ppl_exit_log(struct r5conf *conf) 1118 { 1119 struct ppl_conf *ppl_conf = conf->log_private; 1120 1121 if (ppl_conf) { 1122 __ppl_exit_log(ppl_conf); 1123 conf->log_private = NULL; 1124 } 1125 } 1126 1127 static int ppl_validate_rdev(struct md_rdev *rdev) 1128 { 1129 char b[BDEVNAME_SIZE]; 1130 int ppl_data_sectors; 1131 int ppl_size_new; 1132 1133 /* 1134 * The configured PPL size must be enough to store 1135 * the header and (at the very least) partial parity 1136 * for one stripe. Round it down to ensure the data 1137 * space is cleanly divisible by stripe size. 1138 */ 1139 ppl_data_sectors = rdev->ppl.size - (PPL_HEADER_SIZE >> 9); 1140 1141 if (ppl_data_sectors > 0) 1142 ppl_data_sectors = rounddown(ppl_data_sectors, STRIPE_SECTORS); 1143 1144 if (ppl_data_sectors <= 0) { 1145 pr_warn("md/raid:%s: PPL space too small on %s\n", 1146 mdname(rdev->mddev), bdevname(rdev->bdev, b)); 1147 return -ENOSPC; 1148 } 1149 1150 ppl_size_new = ppl_data_sectors + (PPL_HEADER_SIZE >> 9); 1151 1152 if ((rdev->ppl.sector < rdev->data_offset && 1153 rdev->ppl.sector + ppl_size_new > rdev->data_offset) || 1154 (rdev->ppl.sector >= rdev->data_offset && 1155 rdev->data_offset + rdev->sectors > rdev->ppl.sector)) { 1156 pr_warn("md/raid:%s: PPL space overlaps with data on %s\n", 1157 mdname(rdev->mddev), bdevname(rdev->bdev, b)); 1158 return -EINVAL; 1159 } 1160 1161 if (!rdev->mddev->external && 1162 ((rdev->ppl.offset > 0 && rdev->ppl.offset < (rdev->sb_size >> 9)) || 1163 (rdev->ppl.offset <= 0 && rdev->ppl.offset + ppl_size_new > 0))) { 1164 pr_warn("md/raid:%s: PPL space overlaps with superblock on %s\n", 1165 mdname(rdev->mddev), bdevname(rdev->bdev, b)); 1166 return -EINVAL; 1167 } 1168 1169 rdev->ppl.size = ppl_size_new; 1170 1171 return 0; 1172 } 1173 1174 static void ppl_init_child_log(struct ppl_log *log, struct md_rdev *rdev) 1175 { 1176 if ((rdev->ppl.size << 9) >= (PPL_SPACE_SIZE + 1177 PPL_HEADER_SIZE) * 2) { 1178 log->use_multippl = true; 1179 set_bit(MD_HAS_MULTIPLE_PPLS, 1180 &log->ppl_conf->mddev->flags); 1181 log->entry_space = PPL_SPACE_SIZE; 1182 } else { 1183 log->use_multippl = false; 1184 log->entry_space = (log->rdev->ppl.size << 9) - 1185 PPL_HEADER_SIZE; 1186 } 1187 log->next_io_sector = rdev->ppl.sector; 1188 } 1189 1190 int ppl_init_log(struct r5conf *conf) 1191 { 1192 struct ppl_conf *ppl_conf; 1193 struct mddev *mddev = conf->mddev; 1194 int ret = 0; 1195 int i; 1196 bool need_cache_flush = false; 1197 1198 pr_debug("md/raid:%s: enabling distributed Partial Parity Log\n", 1199 mdname(conf->mddev)); 1200 1201 if (PAGE_SIZE != 4096) 1202 return -EINVAL; 1203 1204 if (mddev->level != 5) { 1205 pr_warn("md/raid:%s PPL is not compatible with raid level %d\n", 1206 mdname(mddev), mddev->level); 1207 return -EINVAL; 1208 } 1209 1210 if (mddev->bitmap_info.file || mddev->bitmap_info.offset) { 1211 pr_warn("md/raid:%s PPL is not compatible with bitmap\n", 1212 mdname(mddev)); 1213 return -EINVAL; 1214 } 1215 1216 if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) { 1217 pr_warn("md/raid:%s PPL is not compatible with journal\n", 1218 mdname(mddev)); 1219 return -EINVAL; 1220 } 1221 1222 ppl_conf = kzalloc(sizeof(struct ppl_conf), GFP_KERNEL); 1223 if (!ppl_conf) 1224 return -ENOMEM; 1225 1226 ppl_conf->mddev = mddev; 1227 1228 ppl_conf->io_kc = KMEM_CACHE(ppl_io_unit, 0); 1229 if (!ppl_conf->io_kc) { 1230 ret = -ENOMEM; 1231 goto err; 1232 } 1233 1234 ppl_conf->io_pool = mempool_create(conf->raid_disks, ppl_io_pool_alloc, 1235 ppl_io_pool_free, ppl_conf->io_kc); 1236 if (!ppl_conf->io_pool) { 1237 ret = -ENOMEM; 1238 goto err; 1239 } 1240 1241 ppl_conf->bs = bioset_create(conf->raid_disks, 0, BIOSET_NEED_BVECS); 1242 if (!ppl_conf->bs) { 1243 ret = -ENOMEM; 1244 goto err; 1245 } 1246 1247 ppl_conf->count = conf->raid_disks; 1248 ppl_conf->child_logs = kcalloc(ppl_conf->count, sizeof(struct ppl_log), 1249 GFP_KERNEL); 1250 if (!ppl_conf->child_logs) { 1251 ret = -ENOMEM; 1252 goto err; 1253 } 1254 1255 atomic64_set(&ppl_conf->seq, 0); 1256 INIT_LIST_HEAD(&ppl_conf->no_mem_stripes); 1257 spin_lock_init(&ppl_conf->no_mem_stripes_lock); 1258 1259 if (!mddev->external) { 1260 ppl_conf->signature = ~crc32c_le(~0, mddev->uuid, sizeof(mddev->uuid)); 1261 ppl_conf->block_size = 512; 1262 } else { 1263 ppl_conf->block_size = queue_logical_block_size(mddev->queue); 1264 } 1265 1266 for (i = 0; i < ppl_conf->count; i++) { 1267 struct ppl_log *log = &ppl_conf->child_logs[i]; 1268 struct md_rdev *rdev = conf->disks[i].rdev; 1269 1270 mutex_init(&log->io_mutex); 1271 spin_lock_init(&log->io_list_lock); 1272 INIT_LIST_HEAD(&log->io_list); 1273 1274 log->ppl_conf = ppl_conf; 1275 log->rdev = rdev; 1276 1277 if (rdev) { 1278 struct request_queue *q; 1279 1280 ret = ppl_validate_rdev(rdev); 1281 if (ret) 1282 goto err; 1283 1284 q = bdev_get_queue(rdev->bdev); 1285 if (test_bit(QUEUE_FLAG_WC, &q->queue_flags)) 1286 need_cache_flush = true; 1287 ppl_init_child_log(log, rdev); 1288 } 1289 } 1290 1291 if (need_cache_flush) 1292 pr_warn("md/raid:%s: Volatile write-back cache should be disabled on all member drives when using PPL!\n", 1293 mdname(mddev)); 1294 1295 /* load and possibly recover the logs from the member disks */ 1296 ret = ppl_load(ppl_conf); 1297 1298 if (ret) { 1299 goto err; 1300 } else if (!mddev->pers && mddev->recovery_cp == 0 && 1301 ppl_conf->recovered_entries > 0 && 1302 ppl_conf->mismatch_count == 0) { 1303 /* 1304 * If we are starting a dirty array and the recovery succeeds 1305 * without any issues, set the array as clean. 1306 */ 1307 mddev->recovery_cp = MaxSector; 1308 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); 1309 } else if (mddev->pers && ppl_conf->mismatch_count > 0) { 1310 /* no mismatch allowed when enabling PPL for a running array */ 1311 ret = -EINVAL; 1312 goto err; 1313 } 1314 1315 conf->log_private = ppl_conf; 1316 set_bit(MD_HAS_PPL, &ppl_conf->mddev->flags); 1317 1318 return 0; 1319 err: 1320 __ppl_exit_log(ppl_conf); 1321 return ret; 1322 } 1323 1324 int ppl_modify_log(struct r5conf *conf, struct md_rdev *rdev, bool add) 1325 { 1326 struct ppl_conf *ppl_conf = conf->log_private; 1327 struct ppl_log *log; 1328 int ret = 0; 1329 char b[BDEVNAME_SIZE]; 1330 1331 if (!rdev) 1332 return -EINVAL; 1333 1334 pr_debug("%s: disk: %d operation: %s dev: %s\n", 1335 __func__, rdev->raid_disk, add ? "add" : "remove", 1336 bdevname(rdev->bdev, b)); 1337 1338 if (rdev->raid_disk < 0) 1339 return 0; 1340 1341 if (rdev->raid_disk >= ppl_conf->count) 1342 return -ENODEV; 1343 1344 log = &ppl_conf->child_logs[rdev->raid_disk]; 1345 1346 mutex_lock(&log->io_mutex); 1347 if (add) { 1348 ret = ppl_validate_rdev(rdev); 1349 if (!ret) { 1350 log->rdev = rdev; 1351 ret = ppl_write_empty_header(log); 1352 ppl_init_child_log(log, rdev); 1353 } 1354 } else { 1355 log->rdev = NULL; 1356 } 1357 mutex_unlock(&log->io_mutex); 1358 1359 return ret; 1360 } 1361