1 /* 2 * Partial Parity Log for closing the RAID5 write hole 3 * Copyright (c) 2017, Intel Corporation. 4 * 5 * This program is free software; you can redistribute it and/or modify it 6 * under the terms and conditions of the GNU General Public License, 7 * version 2, as published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 * more details. 13 */ 14 15 #include <linux/kernel.h> 16 #include <linux/blkdev.h> 17 #include <linux/slab.h> 18 #include <linux/crc32c.h> 19 #include <linux/flex_array.h> 20 #include <linux/async_tx.h> 21 #include <linux/raid/md_p.h> 22 #include "md.h" 23 #include "raid5.h" 24 25 /* 26 * PPL consists of a 4KB header (struct ppl_header) and at least 128KB for 27 * partial parity data. The header contains an array of entries 28 * (struct ppl_header_entry) which describe the logged write requests. 29 * Partial parity for the entries comes after the header, written in the same 30 * sequence as the entries: 31 * 32 * Header 33 * entry0 34 * ... 35 * entryN 36 * PP data 37 * PP for entry0 38 * ... 39 * PP for entryN 40 * 41 * An entry describes one or more consecutive stripe_heads, up to a full 42 * stripe. The modifed raid data chunks form an m-by-n matrix, where m is the 43 * number of stripe_heads in the entry and n is the number of modified data 44 * disks. Every stripe_head in the entry must write to the same data disks. 45 * An example of a valid case described by a single entry (writes to the first 46 * stripe of a 4 disk array, 16k chunk size): 47 * 48 * sh->sector dd0 dd1 dd2 ppl 49 * +-----+-----+-----+ 50 * 0 | --- | --- | --- | +----+ 51 * 8 | -W- | -W- | --- | | pp | data_sector = 8 52 * 16 | -W- | -W- | --- | | pp | data_size = 3 * 2 * 4k 53 * 24 | -W- | -W- | --- | | pp | pp_size = 3 * 4k 54 * +-----+-----+-----+ +----+ 55 * 56 * data_sector is the first raid sector of the modified data, data_size is the 57 * total size of modified data and pp_size is the size of partial parity for 58 * this entry. Entries for full stripe writes contain no partial parity 59 * (pp_size = 0), they only mark the stripes for which parity should be 60 * recalculated after an unclean shutdown. Every entry holds a checksum of its 61 * partial parity, the header also has a checksum of the header itself. 62 * 63 * A write request is always logged to the PPL instance stored on the parity 64 * disk of the corresponding stripe. For each member disk there is one ppl_log 65 * used to handle logging for this disk, independently from others. They are 66 * grouped in child_logs array in struct ppl_conf, which is assigned to 67 * r5conf->log_private. 68 * 69 * ppl_io_unit represents a full PPL write, header_page contains the ppl_header. 70 * PPL entries for logged stripes are added in ppl_log_stripe(). A stripe_head 71 * can be appended to the last entry if it meets the conditions for a valid 72 * entry described above, otherwise a new entry is added. Checksums of entries 73 * are calculated incrementally as stripes containing partial parity are being 74 * added. ppl_submit_iounit() calculates the checksum of the header and submits 75 * a bio containing the header page and partial parity pages (sh->ppl_page) for 76 * all stripes of the io_unit. When the PPL write completes, the stripes 77 * associated with the io_unit are released and raid5d starts writing their data 78 * and parity. When all stripes are written, the io_unit is freed and the next 79 * can be submitted. 80 * 81 * An io_unit is used to gather stripes until it is submitted or becomes full 82 * (if the maximum number of entries or size of PPL is reached). Another io_unit 83 * can't be submitted until the previous has completed (PPL and stripe 84 * data+parity is written). The log->io_list tracks all io_units of a log 85 * (for a single member disk). New io_units are added to the end of the list 86 * and the first io_unit is submitted, if it is not submitted already. 87 * The current io_unit accepting new stripes is always at the end of the list. 88 * 89 * If write-back cache is enabled for any of the disks in the array, its data 90 * must be flushed before next io_unit is submitted. 91 */ 92 93 #define PPL_SPACE_SIZE (128 * 1024) 94 95 struct ppl_conf { 96 struct mddev *mddev; 97 98 /* array of child logs, one for each raid disk */ 99 struct ppl_log *child_logs; 100 int count; 101 102 int block_size; /* the logical block size used for data_sector 103 * in ppl_header_entry */ 104 u32 signature; /* raid array identifier */ 105 atomic64_t seq; /* current log write sequence number */ 106 107 struct kmem_cache *io_kc; 108 mempool_t *io_pool; 109 struct bio_set *bs; 110 struct bio_set *flush_bs; 111 112 /* used only for recovery */ 113 int recovered_entries; 114 int mismatch_count; 115 116 /* stripes to retry if failed to allocate io_unit */ 117 struct list_head no_mem_stripes; 118 spinlock_t no_mem_stripes_lock; 119 }; 120 121 struct ppl_log { 122 struct ppl_conf *ppl_conf; /* shared between all log instances */ 123 124 struct md_rdev *rdev; /* array member disk associated with 125 * this log instance */ 126 struct mutex io_mutex; 127 struct ppl_io_unit *current_io; /* current io_unit accepting new data 128 * always at the end of io_list */ 129 spinlock_t io_list_lock; 130 struct list_head io_list; /* all io_units of this log */ 131 132 sector_t next_io_sector; 133 unsigned int entry_space; 134 bool use_multippl; 135 bool wb_cache_on; 136 unsigned long disk_flush_bitmap; 137 }; 138 139 #define PPL_IO_INLINE_BVECS 32 140 141 struct ppl_io_unit { 142 struct ppl_log *log; 143 144 struct page *header_page; /* for ppl_header */ 145 146 unsigned int entries_count; /* number of entries in ppl_header */ 147 unsigned int pp_size; /* total size current of partial parity */ 148 149 u64 seq; /* sequence number of this log write */ 150 struct list_head log_sibling; /* log->io_list */ 151 152 struct list_head stripe_list; /* stripes added to the io_unit */ 153 atomic_t pending_stripes; /* how many stripes not written to raid */ 154 atomic_t pending_flushes; /* how many disk flushes are in progress */ 155 156 bool submitted; /* true if write to log started */ 157 158 /* inline bio and its biovec for submitting the iounit */ 159 struct bio bio; 160 struct bio_vec biovec[PPL_IO_INLINE_BVECS]; 161 }; 162 163 struct dma_async_tx_descriptor * 164 ops_run_partial_parity(struct stripe_head *sh, struct raid5_percpu *percpu, 165 struct dma_async_tx_descriptor *tx) 166 { 167 int disks = sh->disks; 168 struct page **srcs = flex_array_get(percpu->scribble, 0); 169 int count = 0, pd_idx = sh->pd_idx, i; 170 struct async_submit_ctl submit; 171 172 pr_debug("%s: stripe %llu\n", __func__, (unsigned long long)sh->sector); 173 174 /* 175 * Partial parity is the XOR of stripe data chunks that are not changed 176 * during the write request. Depending on available data 177 * (read-modify-write vs. reconstruct-write case) we calculate it 178 * differently. 179 */ 180 if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) { 181 /* 182 * rmw: xor old data and parity from updated disks 183 * This is calculated earlier by ops_run_prexor5() so just copy 184 * the parity dev page. 185 */ 186 srcs[count++] = sh->dev[pd_idx].page; 187 } else if (sh->reconstruct_state == reconstruct_state_drain_run) { 188 /* rcw: xor data from all not updated disks */ 189 for (i = disks; i--;) { 190 struct r5dev *dev = &sh->dev[i]; 191 if (test_bit(R5_UPTODATE, &dev->flags)) 192 srcs[count++] = dev->page; 193 } 194 } else { 195 return tx; 196 } 197 198 init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, tx, 199 NULL, sh, flex_array_get(percpu->scribble, 0) 200 + sizeof(struct page *) * (sh->disks + 2)); 201 202 if (count == 1) 203 tx = async_memcpy(sh->ppl_page, srcs[0], 0, 0, PAGE_SIZE, 204 &submit); 205 else 206 tx = async_xor(sh->ppl_page, srcs, 0, count, PAGE_SIZE, 207 &submit); 208 209 return tx; 210 } 211 212 static void *ppl_io_pool_alloc(gfp_t gfp_mask, void *pool_data) 213 { 214 struct kmem_cache *kc = pool_data; 215 struct ppl_io_unit *io; 216 217 io = kmem_cache_alloc(kc, gfp_mask); 218 if (!io) 219 return NULL; 220 221 io->header_page = alloc_page(gfp_mask); 222 if (!io->header_page) { 223 kmem_cache_free(kc, io); 224 return NULL; 225 } 226 227 return io; 228 } 229 230 static void ppl_io_pool_free(void *element, void *pool_data) 231 { 232 struct kmem_cache *kc = pool_data; 233 struct ppl_io_unit *io = element; 234 235 __free_page(io->header_page); 236 kmem_cache_free(kc, io); 237 } 238 239 static struct ppl_io_unit *ppl_new_iounit(struct ppl_log *log, 240 struct stripe_head *sh) 241 { 242 struct ppl_conf *ppl_conf = log->ppl_conf; 243 struct ppl_io_unit *io; 244 struct ppl_header *pplhdr; 245 struct page *header_page; 246 247 io = mempool_alloc(ppl_conf->io_pool, GFP_NOWAIT); 248 if (!io) 249 return NULL; 250 251 header_page = io->header_page; 252 memset(io, 0, sizeof(*io)); 253 io->header_page = header_page; 254 255 io->log = log; 256 INIT_LIST_HEAD(&io->log_sibling); 257 INIT_LIST_HEAD(&io->stripe_list); 258 atomic_set(&io->pending_stripes, 0); 259 atomic_set(&io->pending_flushes, 0); 260 bio_init(&io->bio, io->biovec, PPL_IO_INLINE_BVECS); 261 262 pplhdr = page_address(io->header_page); 263 clear_page(pplhdr); 264 memset(pplhdr->reserved, 0xff, PPL_HDR_RESERVED); 265 pplhdr->signature = cpu_to_le32(ppl_conf->signature); 266 267 io->seq = atomic64_add_return(1, &ppl_conf->seq); 268 pplhdr->generation = cpu_to_le64(io->seq); 269 270 return io; 271 } 272 273 static int ppl_log_stripe(struct ppl_log *log, struct stripe_head *sh) 274 { 275 struct ppl_io_unit *io = log->current_io; 276 struct ppl_header_entry *e = NULL; 277 struct ppl_header *pplhdr; 278 int i; 279 sector_t data_sector = 0; 280 int data_disks = 0; 281 struct r5conf *conf = sh->raid_conf; 282 283 pr_debug("%s: stripe: %llu\n", __func__, (unsigned long long)sh->sector); 284 285 /* check if current io_unit is full */ 286 if (io && (io->pp_size == log->entry_space || 287 io->entries_count == PPL_HDR_MAX_ENTRIES)) { 288 pr_debug("%s: add io_unit blocked by seq: %llu\n", 289 __func__, io->seq); 290 io = NULL; 291 } 292 293 /* add a new unit if there is none or the current is full */ 294 if (!io) { 295 io = ppl_new_iounit(log, sh); 296 if (!io) 297 return -ENOMEM; 298 spin_lock_irq(&log->io_list_lock); 299 list_add_tail(&io->log_sibling, &log->io_list); 300 spin_unlock_irq(&log->io_list_lock); 301 302 log->current_io = io; 303 } 304 305 for (i = 0; i < sh->disks; i++) { 306 struct r5dev *dev = &sh->dev[i]; 307 308 if (i != sh->pd_idx && test_bit(R5_Wantwrite, &dev->flags)) { 309 if (!data_disks || dev->sector < data_sector) 310 data_sector = dev->sector; 311 data_disks++; 312 } 313 } 314 BUG_ON(!data_disks); 315 316 pr_debug("%s: seq: %llu data_sector: %llu data_disks: %d\n", __func__, 317 io->seq, (unsigned long long)data_sector, data_disks); 318 319 pplhdr = page_address(io->header_page); 320 321 if (io->entries_count > 0) { 322 struct ppl_header_entry *last = 323 &pplhdr->entries[io->entries_count - 1]; 324 struct stripe_head *sh_last = list_last_entry( 325 &io->stripe_list, struct stripe_head, log_list); 326 u64 data_sector_last = le64_to_cpu(last->data_sector); 327 u32 data_size_last = le32_to_cpu(last->data_size); 328 329 /* 330 * Check if we can append the stripe to the last entry. It must 331 * be just after the last logged stripe and write to the same 332 * disks. Use bit shift and logarithm to avoid 64-bit division. 333 */ 334 if ((sh->sector == sh_last->sector + STRIPE_SECTORS) && 335 (data_sector >> ilog2(conf->chunk_sectors) == 336 data_sector_last >> ilog2(conf->chunk_sectors)) && 337 ((data_sector - data_sector_last) * data_disks == 338 data_size_last >> 9)) 339 e = last; 340 } 341 342 if (!e) { 343 e = &pplhdr->entries[io->entries_count++]; 344 e->data_sector = cpu_to_le64(data_sector); 345 e->parity_disk = cpu_to_le32(sh->pd_idx); 346 e->checksum = cpu_to_le32(~0); 347 } 348 349 le32_add_cpu(&e->data_size, data_disks << PAGE_SHIFT); 350 351 /* don't write any PP if full stripe write */ 352 if (!test_bit(STRIPE_FULL_WRITE, &sh->state)) { 353 le32_add_cpu(&e->pp_size, PAGE_SIZE); 354 io->pp_size += PAGE_SIZE; 355 e->checksum = cpu_to_le32(crc32c_le(le32_to_cpu(e->checksum), 356 page_address(sh->ppl_page), 357 PAGE_SIZE)); 358 } 359 360 list_add_tail(&sh->log_list, &io->stripe_list); 361 atomic_inc(&io->pending_stripes); 362 sh->ppl_io = io; 363 364 return 0; 365 } 366 367 int ppl_write_stripe(struct r5conf *conf, struct stripe_head *sh) 368 { 369 struct ppl_conf *ppl_conf = conf->log_private; 370 struct ppl_io_unit *io = sh->ppl_io; 371 struct ppl_log *log; 372 373 if (io || test_bit(STRIPE_SYNCING, &sh->state) || !sh->ppl_page || 374 !test_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags) || 375 !test_bit(R5_Insync, &sh->dev[sh->pd_idx].flags)) { 376 clear_bit(STRIPE_LOG_TRAPPED, &sh->state); 377 return -EAGAIN; 378 } 379 380 log = &ppl_conf->child_logs[sh->pd_idx]; 381 382 mutex_lock(&log->io_mutex); 383 384 if (!log->rdev || test_bit(Faulty, &log->rdev->flags)) { 385 mutex_unlock(&log->io_mutex); 386 return -EAGAIN; 387 } 388 389 set_bit(STRIPE_LOG_TRAPPED, &sh->state); 390 clear_bit(STRIPE_DELAYED, &sh->state); 391 atomic_inc(&sh->count); 392 393 if (ppl_log_stripe(log, sh)) { 394 spin_lock_irq(&ppl_conf->no_mem_stripes_lock); 395 list_add_tail(&sh->log_list, &ppl_conf->no_mem_stripes); 396 spin_unlock_irq(&ppl_conf->no_mem_stripes_lock); 397 } 398 399 mutex_unlock(&log->io_mutex); 400 401 return 0; 402 } 403 404 static void ppl_log_endio(struct bio *bio) 405 { 406 struct ppl_io_unit *io = bio->bi_private; 407 struct ppl_log *log = io->log; 408 struct ppl_conf *ppl_conf = log->ppl_conf; 409 struct stripe_head *sh, *next; 410 411 pr_debug("%s: seq: %llu\n", __func__, io->seq); 412 413 if (bio->bi_status) 414 md_error(ppl_conf->mddev, log->rdev); 415 416 list_for_each_entry_safe(sh, next, &io->stripe_list, log_list) { 417 list_del_init(&sh->log_list); 418 419 set_bit(STRIPE_HANDLE, &sh->state); 420 raid5_release_stripe(sh); 421 } 422 } 423 424 static void ppl_submit_iounit_bio(struct ppl_io_unit *io, struct bio *bio) 425 { 426 char b[BDEVNAME_SIZE]; 427 428 pr_debug("%s: seq: %llu size: %u sector: %llu dev: %s\n", 429 __func__, io->seq, bio->bi_iter.bi_size, 430 (unsigned long long)bio->bi_iter.bi_sector, 431 bio_devname(bio, b)); 432 433 submit_bio(bio); 434 } 435 436 static void ppl_submit_iounit(struct ppl_io_unit *io) 437 { 438 struct ppl_log *log = io->log; 439 struct ppl_conf *ppl_conf = log->ppl_conf; 440 struct ppl_header *pplhdr = page_address(io->header_page); 441 struct bio *bio = &io->bio; 442 struct stripe_head *sh; 443 int i; 444 445 bio->bi_private = io; 446 447 if (!log->rdev || test_bit(Faulty, &log->rdev->flags)) { 448 ppl_log_endio(bio); 449 return; 450 } 451 452 for (i = 0; i < io->entries_count; i++) { 453 struct ppl_header_entry *e = &pplhdr->entries[i]; 454 455 pr_debug("%s: seq: %llu entry: %d data_sector: %llu pp_size: %u data_size: %u\n", 456 __func__, io->seq, i, le64_to_cpu(e->data_sector), 457 le32_to_cpu(e->pp_size), le32_to_cpu(e->data_size)); 458 459 e->data_sector = cpu_to_le64(le64_to_cpu(e->data_sector) >> 460 ilog2(ppl_conf->block_size >> 9)); 461 e->checksum = cpu_to_le32(~le32_to_cpu(e->checksum)); 462 } 463 464 pplhdr->entries_count = cpu_to_le32(io->entries_count); 465 pplhdr->checksum = cpu_to_le32(~crc32c_le(~0, pplhdr, PPL_HEADER_SIZE)); 466 467 /* Rewind the buffer if current PPL is larger then remaining space */ 468 if (log->use_multippl && 469 log->rdev->ppl.sector + log->rdev->ppl.size - log->next_io_sector < 470 (PPL_HEADER_SIZE + io->pp_size) >> 9) 471 log->next_io_sector = log->rdev->ppl.sector; 472 473 474 bio->bi_end_io = ppl_log_endio; 475 bio->bi_opf = REQ_OP_WRITE | REQ_FUA; 476 bio_set_dev(bio, log->rdev->bdev); 477 bio->bi_iter.bi_sector = log->next_io_sector; 478 bio_add_page(bio, io->header_page, PAGE_SIZE, 0); 479 480 pr_debug("%s: log->current_io_sector: %llu\n", __func__, 481 (unsigned long long)log->next_io_sector); 482 483 if (log->use_multippl) 484 log->next_io_sector += (PPL_HEADER_SIZE + io->pp_size) >> 9; 485 486 WARN_ON(log->disk_flush_bitmap != 0); 487 488 list_for_each_entry(sh, &io->stripe_list, log_list) { 489 for (i = 0; i < sh->disks; i++) { 490 struct r5dev *dev = &sh->dev[i]; 491 492 if ((ppl_conf->child_logs[i].wb_cache_on) && 493 (test_bit(R5_Wantwrite, &dev->flags))) { 494 set_bit(i, &log->disk_flush_bitmap); 495 } 496 } 497 498 /* entries for full stripe writes have no partial parity */ 499 if (test_bit(STRIPE_FULL_WRITE, &sh->state)) 500 continue; 501 502 if (!bio_add_page(bio, sh->ppl_page, PAGE_SIZE, 0)) { 503 struct bio *prev = bio; 504 505 bio = bio_alloc_bioset(GFP_NOIO, BIO_MAX_PAGES, 506 ppl_conf->bs); 507 bio->bi_opf = prev->bi_opf; 508 bio_copy_dev(bio, prev); 509 bio->bi_iter.bi_sector = bio_end_sector(prev); 510 bio_add_page(bio, sh->ppl_page, PAGE_SIZE, 0); 511 512 bio_chain(bio, prev); 513 ppl_submit_iounit_bio(io, prev); 514 } 515 } 516 517 ppl_submit_iounit_bio(io, bio); 518 } 519 520 static void ppl_submit_current_io(struct ppl_log *log) 521 { 522 struct ppl_io_unit *io; 523 524 spin_lock_irq(&log->io_list_lock); 525 526 io = list_first_entry_or_null(&log->io_list, struct ppl_io_unit, 527 log_sibling); 528 if (io && io->submitted) 529 io = NULL; 530 531 spin_unlock_irq(&log->io_list_lock); 532 533 if (io) { 534 io->submitted = true; 535 536 if (io == log->current_io) 537 log->current_io = NULL; 538 539 ppl_submit_iounit(io); 540 } 541 } 542 543 void ppl_write_stripe_run(struct r5conf *conf) 544 { 545 struct ppl_conf *ppl_conf = conf->log_private; 546 struct ppl_log *log; 547 int i; 548 549 for (i = 0; i < ppl_conf->count; i++) { 550 log = &ppl_conf->child_logs[i]; 551 552 mutex_lock(&log->io_mutex); 553 ppl_submit_current_io(log); 554 mutex_unlock(&log->io_mutex); 555 } 556 } 557 558 static void ppl_io_unit_finished(struct ppl_io_unit *io) 559 { 560 struct ppl_log *log = io->log; 561 struct ppl_conf *ppl_conf = log->ppl_conf; 562 struct r5conf *conf = ppl_conf->mddev->private; 563 unsigned long flags; 564 565 pr_debug("%s: seq: %llu\n", __func__, io->seq); 566 567 local_irq_save(flags); 568 569 spin_lock(&log->io_list_lock); 570 list_del(&io->log_sibling); 571 spin_unlock(&log->io_list_lock); 572 573 mempool_free(io, ppl_conf->io_pool); 574 575 spin_lock(&ppl_conf->no_mem_stripes_lock); 576 if (!list_empty(&ppl_conf->no_mem_stripes)) { 577 struct stripe_head *sh; 578 579 sh = list_first_entry(&ppl_conf->no_mem_stripes, 580 struct stripe_head, log_list); 581 list_del_init(&sh->log_list); 582 set_bit(STRIPE_HANDLE, &sh->state); 583 raid5_release_stripe(sh); 584 } 585 spin_unlock(&ppl_conf->no_mem_stripes_lock); 586 587 local_irq_restore(flags); 588 589 wake_up(&conf->wait_for_quiescent); 590 } 591 592 static void ppl_flush_endio(struct bio *bio) 593 { 594 struct ppl_io_unit *io = bio->bi_private; 595 struct ppl_log *log = io->log; 596 struct ppl_conf *ppl_conf = log->ppl_conf; 597 struct r5conf *conf = ppl_conf->mddev->private; 598 char b[BDEVNAME_SIZE]; 599 600 pr_debug("%s: dev: %s\n", __func__, bio_devname(bio, b)); 601 602 if (bio->bi_status) { 603 struct md_rdev *rdev; 604 605 rcu_read_lock(); 606 rdev = md_find_rdev_rcu(conf->mddev, bio_dev(bio)); 607 if (rdev) 608 md_error(rdev->mddev, rdev); 609 rcu_read_unlock(); 610 } 611 612 bio_put(bio); 613 614 if (atomic_dec_and_test(&io->pending_flushes)) { 615 ppl_io_unit_finished(io); 616 md_wakeup_thread(conf->mddev->thread); 617 } 618 } 619 620 static void ppl_do_flush(struct ppl_io_unit *io) 621 { 622 struct ppl_log *log = io->log; 623 struct ppl_conf *ppl_conf = log->ppl_conf; 624 struct r5conf *conf = ppl_conf->mddev->private; 625 int raid_disks = conf->raid_disks; 626 int flushed_disks = 0; 627 int i; 628 629 atomic_set(&io->pending_flushes, raid_disks); 630 631 for_each_set_bit(i, &log->disk_flush_bitmap, raid_disks) { 632 struct md_rdev *rdev; 633 struct block_device *bdev = NULL; 634 635 rcu_read_lock(); 636 rdev = rcu_dereference(conf->disks[i].rdev); 637 if (rdev && !test_bit(Faulty, &rdev->flags)) 638 bdev = rdev->bdev; 639 rcu_read_unlock(); 640 641 if (bdev) { 642 struct bio *bio; 643 char b[BDEVNAME_SIZE]; 644 645 bio = bio_alloc_bioset(GFP_NOIO, 0, ppl_conf->flush_bs); 646 bio_set_dev(bio, bdev); 647 bio->bi_private = io; 648 bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; 649 bio->bi_end_io = ppl_flush_endio; 650 651 pr_debug("%s: dev: %s\n", __func__, 652 bio_devname(bio, b)); 653 654 submit_bio(bio); 655 flushed_disks++; 656 } 657 } 658 659 log->disk_flush_bitmap = 0; 660 661 for (i = flushed_disks ; i < raid_disks; i++) { 662 if (atomic_dec_and_test(&io->pending_flushes)) 663 ppl_io_unit_finished(io); 664 } 665 } 666 667 static inline bool ppl_no_io_unit_submitted(struct r5conf *conf, 668 struct ppl_log *log) 669 { 670 struct ppl_io_unit *io; 671 672 io = list_first_entry_or_null(&log->io_list, struct ppl_io_unit, 673 log_sibling); 674 675 return !io || !io->submitted; 676 } 677 678 void ppl_quiesce(struct r5conf *conf, int quiesce) 679 { 680 struct ppl_conf *ppl_conf = conf->log_private; 681 int i; 682 683 if (quiesce) { 684 for (i = 0; i < ppl_conf->count; i++) { 685 struct ppl_log *log = &ppl_conf->child_logs[i]; 686 687 spin_lock_irq(&log->io_list_lock); 688 wait_event_lock_irq(conf->wait_for_quiescent, 689 ppl_no_io_unit_submitted(conf, log), 690 log->io_list_lock); 691 spin_unlock_irq(&log->io_list_lock); 692 } 693 } 694 } 695 696 void ppl_stripe_write_finished(struct stripe_head *sh) 697 { 698 struct ppl_io_unit *io; 699 700 io = sh->ppl_io; 701 sh->ppl_io = NULL; 702 703 if (io && atomic_dec_and_test(&io->pending_stripes)) { 704 if (io->log->disk_flush_bitmap) 705 ppl_do_flush(io); 706 else 707 ppl_io_unit_finished(io); 708 } 709 } 710 711 static void ppl_xor(int size, struct page *page1, struct page *page2) 712 { 713 struct async_submit_ctl submit; 714 struct dma_async_tx_descriptor *tx; 715 struct page *xor_srcs[] = { page1, page2 }; 716 717 init_async_submit(&submit, ASYNC_TX_ACK|ASYNC_TX_XOR_DROP_DST, 718 NULL, NULL, NULL, NULL); 719 tx = async_xor(page1, xor_srcs, 0, 2, size, &submit); 720 721 async_tx_quiesce(&tx); 722 } 723 724 /* 725 * PPL recovery strategy: xor partial parity and data from all modified data 726 * disks within a stripe and write the result as the new stripe parity. If all 727 * stripe data disks are modified (full stripe write), no partial parity is 728 * available, so just xor the data disks. 729 * 730 * Recovery of a PPL entry shall occur only if all modified data disks are 731 * available and read from all of them succeeds. 732 * 733 * A PPL entry applies to a stripe, partial parity size for an entry is at most 734 * the size of the chunk. Examples of possible cases for a single entry: 735 * 736 * case 0: single data disk write: 737 * data0 data1 data2 ppl parity 738 * +--------+--------+--------+ +--------------------+ 739 * | ------ | ------ | ------ | +----+ | (no change) | 740 * | ------ | -data- | ------ | | pp | -> | data1 ^ pp | 741 * | ------ | -data- | ------ | | pp | -> | data1 ^ pp | 742 * | ------ | ------ | ------ | +----+ | (no change) | 743 * +--------+--------+--------+ +--------------------+ 744 * pp_size = data_size 745 * 746 * case 1: more than one data disk write: 747 * data0 data1 data2 ppl parity 748 * +--------+--------+--------+ +--------------------+ 749 * | ------ | ------ | ------ | +----+ | (no change) | 750 * | -data- | -data- | ------ | | pp | -> | data0 ^ data1 ^ pp | 751 * | -data- | -data- | ------ | | pp | -> | data0 ^ data1 ^ pp | 752 * | ------ | ------ | ------ | +----+ | (no change) | 753 * +--------+--------+--------+ +--------------------+ 754 * pp_size = data_size / modified_data_disks 755 * 756 * case 2: write to all data disks (also full stripe write): 757 * data0 data1 data2 parity 758 * +--------+--------+--------+ +--------------------+ 759 * | ------ | ------ | ------ | | (no change) | 760 * | -data- | -data- | -data- | --------> | xor all data | 761 * | ------ | ------ | ------ | --------> | (no change) | 762 * | ------ | ------ | ------ | | (no change) | 763 * +--------+--------+--------+ +--------------------+ 764 * pp_size = 0 765 * 766 * The following cases are possible only in other implementations. The recovery 767 * code can handle them, but they are not generated at runtime because they can 768 * be reduced to cases 0, 1 and 2: 769 * 770 * case 3: 771 * data0 data1 data2 ppl parity 772 * +--------+--------+--------+ +----+ +--------------------+ 773 * | ------ | -data- | -data- | | pp | | data1 ^ data2 ^ pp | 774 * | ------ | -data- | -data- | | pp | -> | data1 ^ data2 ^ pp | 775 * | -data- | -data- | -data- | | -- | -> | xor all data | 776 * | -data- | -data- | ------ | | pp | | data0 ^ data1 ^ pp | 777 * +--------+--------+--------+ +----+ +--------------------+ 778 * pp_size = chunk_size 779 * 780 * case 4: 781 * data0 data1 data2 ppl parity 782 * +--------+--------+--------+ +----+ +--------------------+ 783 * | ------ | -data- | ------ | | pp | | data1 ^ pp | 784 * | ------ | ------ | ------ | | -- | -> | (no change) | 785 * | ------ | ------ | ------ | | -- | -> | (no change) | 786 * | -data- | ------ | ------ | | pp | | data0 ^ pp | 787 * +--------+--------+--------+ +----+ +--------------------+ 788 * pp_size = chunk_size 789 */ 790 static int ppl_recover_entry(struct ppl_log *log, struct ppl_header_entry *e, 791 sector_t ppl_sector) 792 { 793 struct ppl_conf *ppl_conf = log->ppl_conf; 794 struct mddev *mddev = ppl_conf->mddev; 795 struct r5conf *conf = mddev->private; 796 int block_size = ppl_conf->block_size; 797 struct page *page1; 798 struct page *page2; 799 sector_t r_sector_first; 800 sector_t r_sector_last; 801 int strip_sectors; 802 int data_disks; 803 int i; 804 int ret = 0; 805 char b[BDEVNAME_SIZE]; 806 unsigned int pp_size = le32_to_cpu(e->pp_size); 807 unsigned int data_size = le32_to_cpu(e->data_size); 808 809 page1 = alloc_page(GFP_KERNEL); 810 page2 = alloc_page(GFP_KERNEL); 811 812 if (!page1 || !page2) { 813 ret = -ENOMEM; 814 goto out; 815 } 816 817 r_sector_first = le64_to_cpu(e->data_sector) * (block_size >> 9); 818 819 if ((pp_size >> 9) < conf->chunk_sectors) { 820 if (pp_size > 0) { 821 data_disks = data_size / pp_size; 822 strip_sectors = pp_size >> 9; 823 } else { 824 data_disks = conf->raid_disks - conf->max_degraded; 825 strip_sectors = (data_size >> 9) / data_disks; 826 } 827 r_sector_last = r_sector_first + 828 (data_disks - 1) * conf->chunk_sectors + 829 strip_sectors; 830 } else { 831 data_disks = conf->raid_disks - conf->max_degraded; 832 strip_sectors = conf->chunk_sectors; 833 r_sector_last = r_sector_first + (data_size >> 9); 834 } 835 836 pr_debug("%s: array sector first: %llu last: %llu\n", __func__, 837 (unsigned long long)r_sector_first, 838 (unsigned long long)r_sector_last); 839 840 /* if start and end is 4k aligned, use a 4k block */ 841 if (block_size == 512 && 842 (r_sector_first & (STRIPE_SECTORS - 1)) == 0 && 843 (r_sector_last & (STRIPE_SECTORS - 1)) == 0) 844 block_size = STRIPE_SIZE; 845 846 /* iterate through blocks in strip */ 847 for (i = 0; i < strip_sectors; i += (block_size >> 9)) { 848 bool update_parity = false; 849 sector_t parity_sector; 850 struct md_rdev *parity_rdev; 851 struct stripe_head sh; 852 int disk; 853 int indent = 0; 854 855 pr_debug("%s:%*s iter %d start\n", __func__, indent, "", i); 856 indent += 2; 857 858 memset(page_address(page1), 0, PAGE_SIZE); 859 860 /* iterate through data member disks */ 861 for (disk = 0; disk < data_disks; disk++) { 862 int dd_idx; 863 struct md_rdev *rdev; 864 sector_t sector; 865 sector_t r_sector = r_sector_first + i + 866 (disk * conf->chunk_sectors); 867 868 pr_debug("%s:%*s data member disk %d start\n", 869 __func__, indent, "", disk); 870 indent += 2; 871 872 if (r_sector >= r_sector_last) { 873 pr_debug("%s:%*s array sector %llu doesn't need parity update\n", 874 __func__, indent, "", 875 (unsigned long long)r_sector); 876 indent -= 2; 877 continue; 878 } 879 880 update_parity = true; 881 882 /* map raid sector to member disk */ 883 sector = raid5_compute_sector(conf, r_sector, 0, 884 &dd_idx, NULL); 885 pr_debug("%s:%*s processing array sector %llu => data member disk %d, sector %llu\n", 886 __func__, indent, "", 887 (unsigned long long)r_sector, dd_idx, 888 (unsigned long long)sector); 889 890 rdev = conf->disks[dd_idx].rdev; 891 if (!rdev || (!test_bit(In_sync, &rdev->flags) && 892 sector >= rdev->recovery_offset)) { 893 pr_debug("%s:%*s data member disk %d missing\n", 894 __func__, indent, "", dd_idx); 895 update_parity = false; 896 break; 897 } 898 899 pr_debug("%s:%*s reading data member disk %s sector %llu\n", 900 __func__, indent, "", bdevname(rdev->bdev, b), 901 (unsigned long long)sector); 902 if (!sync_page_io(rdev, sector, block_size, page2, 903 REQ_OP_READ, 0, false)) { 904 md_error(mddev, rdev); 905 pr_debug("%s:%*s read failed!\n", __func__, 906 indent, ""); 907 ret = -EIO; 908 goto out; 909 } 910 911 ppl_xor(block_size, page1, page2); 912 913 indent -= 2; 914 } 915 916 if (!update_parity) 917 continue; 918 919 if (pp_size > 0) { 920 pr_debug("%s:%*s reading pp disk sector %llu\n", 921 __func__, indent, "", 922 (unsigned long long)(ppl_sector + i)); 923 if (!sync_page_io(log->rdev, 924 ppl_sector - log->rdev->data_offset + i, 925 block_size, page2, REQ_OP_READ, 0, 926 false)) { 927 pr_debug("%s:%*s read failed!\n", __func__, 928 indent, ""); 929 md_error(mddev, log->rdev); 930 ret = -EIO; 931 goto out; 932 } 933 934 ppl_xor(block_size, page1, page2); 935 } 936 937 /* map raid sector to parity disk */ 938 parity_sector = raid5_compute_sector(conf, r_sector_first + i, 939 0, &disk, &sh); 940 BUG_ON(sh.pd_idx != le32_to_cpu(e->parity_disk)); 941 parity_rdev = conf->disks[sh.pd_idx].rdev; 942 943 BUG_ON(parity_rdev->bdev->bd_dev != log->rdev->bdev->bd_dev); 944 pr_debug("%s:%*s write parity at sector %llu, disk %s\n", 945 __func__, indent, "", 946 (unsigned long long)parity_sector, 947 bdevname(parity_rdev->bdev, b)); 948 if (!sync_page_io(parity_rdev, parity_sector, block_size, 949 page1, REQ_OP_WRITE, 0, false)) { 950 pr_debug("%s:%*s parity write error!\n", __func__, 951 indent, ""); 952 md_error(mddev, parity_rdev); 953 ret = -EIO; 954 goto out; 955 } 956 } 957 out: 958 if (page1) 959 __free_page(page1); 960 if (page2) 961 __free_page(page2); 962 return ret; 963 } 964 965 static int ppl_recover(struct ppl_log *log, struct ppl_header *pplhdr, 966 sector_t offset) 967 { 968 struct ppl_conf *ppl_conf = log->ppl_conf; 969 struct md_rdev *rdev = log->rdev; 970 struct mddev *mddev = rdev->mddev; 971 sector_t ppl_sector = rdev->ppl.sector + offset + 972 (PPL_HEADER_SIZE >> 9); 973 struct page *page; 974 int i; 975 int ret = 0; 976 977 page = alloc_page(GFP_KERNEL); 978 if (!page) 979 return -ENOMEM; 980 981 /* iterate through all PPL entries saved */ 982 for (i = 0; i < le32_to_cpu(pplhdr->entries_count); i++) { 983 struct ppl_header_entry *e = &pplhdr->entries[i]; 984 u32 pp_size = le32_to_cpu(e->pp_size); 985 sector_t sector = ppl_sector; 986 int ppl_entry_sectors = pp_size >> 9; 987 u32 crc, crc_stored; 988 989 pr_debug("%s: disk: %d entry: %d ppl_sector: %llu pp_size: %u\n", 990 __func__, rdev->raid_disk, i, 991 (unsigned long long)ppl_sector, pp_size); 992 993 crc = ~0; 994 crc_stored = le32_to_cpu(e->checksum); 995 996 /* read parial parity for this entry and calculate its checksum */ 997 while (pp_size) { 998 int s = pp_size > PAGE_SIZE ? PAGE_SIZE : pp_size; 999 1000 if (!sync_page_io(rdev, sector - rdev->data_offset, 1001 s, page, REQ_OP_READ, 0, false)) { 1002 md_error(mddev, rdev); 1003 ret = -EIO; 1004 goto out; 1005 } 1006 1007 crc = crc32c_le(crc, page_address(page), s); 1008 1009 pp_size -= s; 1010 sector += s >> 9; 1011 } 1012 1013 crc = ~crc; 1014 1015 if (crc != crc_stored) { 1016 /* 1017 * Don't recover this entry if the checksum does not 1018 * match, but keep going and try to recover other 1019 * entries. 1020 */ 1021 pr_debug("%s: ppl entry crc does not match: stored: 0x%x calculated: 0x%x\n", 1022 __func__, crc_stored, crc); 1023 ppl_conf->mismatch_count++; 1024 } else { 1025 ret = ppl_recover_entry(log, e, ppl_sector); 1026 if (ret) 1027 goto out; 1028 ppl_conf->recovered_entries++; 1029 } 1030 1031 ppl_sector += ppl_entry_sectors; 1032 } 1033 1034 /* flush the disk cache after recovery if necessary */ 1035 ret = blkdev_issue_flush(rdev->bdev, GFP_KERNEL, NULL); 1036 out: 1037 __free_page(page); 1038 return ret; 1039 } 1040 1041 static int ppl_write_empty_header(struct ppl_log *log) 1042 { 1043 struct page *page; 1044 struct ppl_header *pplhdr; 1045 struct md_rdev *rdev = log->rdev; 1046 int ret = 0; 1047 1048 pr_debug("%s: disk: %d ppl_sector: %llu\n", __func__, 1049 rdev->raid_disk, (unsigned long long)rdev->ppl.sector); 1050 1051 page = alloc_page(GFP_NOIO | __GFP_ZERO); 1052 if (!page) 1053 return -ENOMEM; 1054 1055 pplhdr = page_address(page); 1056 /* zero out PPL space to avoid collision with old PPLs */ 1057 blkdev_issue_zeroout(rdev->bdev, rdev->ppl.sector, 1058 log->rdev->ppl.size, GFP_NOIO, 0); 1059 memset(pplhdr->reserved, 0xff, PPL_HDR_RESERVED); 1060 pplhdr->signature = cpu_to_le32(log->ppl_conf->signature); 1061 pplhdr->checksum = cpu_to_le32(~crc32c_le(~0, pplhdr, PAGE_SIZE)); 1062 1063 if (!sync_page_io(rdev, rdev->ppl.sector - rdev->data_offset, 1064 PPL_HEADER_SIZE, page, REQ_OP_WRITE | REQ_SYNC | 1065 REQ_FUA, 0, false)) { 1066 md_error(rdev->mddev, rdev); 1067 ret = -EIO; 1068 } 1069 1070 __free_page(page); 1071 return ret; 1072 } 1073 1074 static int ppl_load_distributed(struct ppl_log *log) 1075 { 1076 struct ppl_conf *ppl_conf = log->ppl_conf; 1077 struct md_rdev *rdev = log->rdev; 1078 struct mddev *mddev = rdev->mddev; 1079 struct page *page, *page2, *tmp; 1080 struct ppl_header *pplhdr = NULL, *prev_pplhdr = NULL; 1081 u32 crc, crc_stored; 1082 u32 signature; 1083 int ret = 0, i; 1084 sector_t pplhdr_offset = 0, prev_pplhdr_offset = 0; 1085 1086 pr_debug("%s: disk: %d\n", __func__, rdev->raid_disk); 1087 /* read PPL headers, find the recent one */ 1088 page = alloc_page(GFP_KERNEL); 1089 if (!page) 1090 return -ENOMEM; 1091 1092 page2 = alloc_page(GFP_KERNEL); 1093 if (!page2) { 1094 __free_page(page); 1095 return -ENOMEM; 1096 } 1097 1098 /* searching ppl area for latest ppl */ 1099 while (pplhdr_offset < rdev->ppl.size - (PPL_HEADER_SIZE >> 9)) { 1100 if (!sync_page_io(rdev, 1101 rdev->ppl.sector - rdev->data_offset + 1102 pplhdr_offset, PAGE_SIZE, page, REQ_OP_READ, 1103 0, false)) { 1104 md_error(mddev, rdev); 1105 ret = -EIO; 1106 /* if not able to read - don't recover any PPL */ 1107 pplhdr = NULL; 1108 break; 1109 } 1110 pplhdr = page_address(page); 1111 1112 /* check header validity */ 1113 crc_stored = le32_to_cpu(pplhdr->checksum); 1114 pplhdr->checksum = 0; 1115 crc = ~crc32c_le(~0, pplhdr, PAGE_SIZE); 1116 1117 if (crc_stored != crc) { 1118 pr_debug("%s: ppl header crc does not match: stored: 0x%x calculated: 0x%x (offset: %llu)\n", 1119 __func__, crc_stored, crc, 1120 (unsigned long long)pplhdr_offset); 1121 pplhdr = prev_pplhdr; 1122 pplhdr_offset = prev_pplhdr_offset; 1123 break; 1124 } 1125 1126 signature = le32_to_cpu(pplhdr->signature); 1127 1128 if (mddev->external) { 1129 /* 1130 * For external metadata the header signature is set and 1131 * validated in userspace. 1132 */ 1133 ppl_conf->signature = signature; 1134 } else if (ppl_conf->signature != signature) { 1135 pr_debug("%s: ppl header signature does not match: stored: 0x%x configured: 0x%x (offset: %llu)\n", 1136 __func__, signature, ppl_conf->signature, 1137 (unsigned long long)pplhdr_offset); 1138 pplhdr = prev_pplhdr; 1139 pplhdr_offset = prev_pplhdr_offset; 1140 break; 1141 } 1142 1143 if (prev_pplhdr && le64_to_cpu(prev_pplhdr->generation) > 1144 le64_to_cpu(pplhdr->generation)) { 1145 /* previous was newest */ 1146 pplhdr = prev_pplhdr; 1147 pplhdr_offset = prev_pplhdr_offset; 1148 break; 1149 } 1150 1151 prev_pplhdr_offset = pplhdr_offset; 1152 prev_pplhdr = pplhdr; 1153 1154 tmp = page; 1155 page = page2; 1156 page2 = tmp; 1157 1158 /* calculate next potential ppl offset */ 1159 for (i = 0; i < le32_to_cpu(pplhdr->entries_count); i++) 1160 pplhdr_offset += 1161 le32_to_cpu(pplhdr->entries[i].pp_size) >> 9; 1162 pplhdr_offset += PPL_HEADER_SIZE >> 9; 1163 } 1164 1165 /* no valid ppl found */ 1166 if (!pplhdr) 1167 ppl_conf->mismatch_count++; 1168 else 1169 pr_debug("%s: latest PPL found at offset: %llu, with generation: %llu\n", 1170 __func__, (unsigned long long)pplhdr_offset, 1171 le64_to_cpu(pplhdr->generation)); 1172 1173 /* attempt to recover from log if we are starting a dirty array */ 1174 if (pplhdr && !mddev->pers && mddev->recovery_cp != MaxSector) 1175 ret = ppl_recover(log, pplhdr, pplhdr_offset); 1176 1177 /* write empty header if we are starting the array */ 1178 if (!ret && !mddev->pers) 1179 ret = ppl_write_empty_header(log); 1180 1181 __free_page(page); 1182 __free_page(page2); 1183 1184 pr_debug("%s: return: %d mismatch_count: %d recovered_entries: %d\n", 1185 __func__, ret, ppl_conf->mismatch_count, 1186 ppl_conf->recovered_entries); 1187 return ret; 1188 } 1189 1190 static int ppl_load(struct ppl_conf *ppl_conf) 1191 { 1192 int ret = 0; 1193 u32 signature = 0; 1194 bool signature_set = false; 1195 int i; 1196 1197 for (i = 0; i < ppl_conf->count; i++) { 1198 struct ppl_log *log = &ppl_conf->child_logs[i]; 1199 1200 /* skip missing drive */ 1201 if (!log->rdev) 1202 continue; 1203 1204 ret = ppl_load_distributed(log); 1205 if (ret) 1206 break; 1207 1208 /* 1209 * For external metadata we can't check if the signature is 1210 * correct on a single drive, but we can check if it is the same 1211 * on all drives. 1212 */ 1213 if (ppl_conf->mddev->external) { 1214 if (!signature_set) { 1215 signature = ppl_conf->signature; 1216 signature_set = true; 1217 } else if (signature != ppl_conf->signature) { 1218 pr_warn("md/raid:%s: PPL header signature does not match on all member drives\n", 1219 mdname(ppl_conf->mddev)); 1220 ret = -EINVAL; 1221 break; 1222 } 1223 } 1224 } 1225 1226 pr_debug("%s: return: %d mismatch_count: %d recovered_entries: %d\n", 1227 __func__, ret, ppl_conf->mismatch_count, 1228 ppl_conf->recovered_entries); 1229 return ret; 1230 } 1231 1232 static void __ppl_exit_log(struct ppl_conf *ppl_conf) 1233 { 1234 clear_bit(MD_HAS_PPL, &ppl_conf->mddev->flags); 1235 clear_bit(MD_HAS_MULTIPLE_PPLS, &ppl_conf->mddev->flags); 1236 1237 kfree(ppl_conf->child_logs); 1238 1239 if (ppl_conf->bs) 1240 bioset_free(ppl_conf->bs); 1241 if (ppl_conf->flush_bs) 1242 bioset_free(ppl_conf->flush_bs); 1243 mempool_destroy(ppl_conf->io_pool); 1244 kmem_cache_destroy(ppl_conf->io_kc); 1245 1246 kfree(ppl_conf); 1247 } 1248 1249 void ppl_exit_log(struct r5conf *conf) 1250 { 1251 struct ppl_conf *ppl_conf = conf->log_private; 1252 1253 if (ppl_conf) { 1254 __ppl_exit_log(ppl_conf); 1255 conf->log_private = NULL; 1256 } 1257 } 1258 1259 static int ppl_validate_rdev(struct md_rdev *rdev) 1260 { 1261 char b[BDEVNAME_SIZE]; 1262 int ppl_data_sectors; 1263 int ppl_size_new; 1264 1265 /* 1266 * The configured PPL size must be enough to store 1267 * the header and (at the very least) partial parity 1268 * for one stripe. Round it down to ensure the data 1269 * space is cleanly divisible by stripe size. 1270 */ 1271 ppl_data_sectors = rdev->ppl.size - (PPL_HEADER_SIZE >> 9); 1272 1273 if (ppl_data_sectors > 0) 1274 ppl_data_sectors = rounddown(ppl_data_sectors, STRIPE_SECTORS); 1275 1276 if (ppl_data_sectors <= 0) { 1277 pr_warn("md/raid:%s: PPL space too small on %s\n", 1278 mdname(rdev->mddev), bdevname(rdev->bdev, b)); 1279 return -ENOSPC; 1280 } 1281 1282 ppl_size_new = ppl_data_sectors + (PPL_HEADER_SIZE >> 9); 1283 1284 if ((rdev->ppl.sector < rdev->data_offset && 1285 rdev->ppl.sector + ppl_size_new > rdev->data_offset) || 1286 (rdev->ppl.sector >= rdev->data_offset && 1287 rdev->data_offset + rdev->sectors > rdev->ppl.sector)) { 1288 pr_warn("md/raid:%s: PPL space overlaps with data on %s\n", 1289 mdname(rdev->mddev), bdevname(rdev->bdev, b)); 1290 return -EINVAL; 1291 } 1292 1293 if (!rdev->mddev->external && 1294 ((rdev->ppl.offset > 0 && rdev->ppl.offset < (rdev->sb_size >> 9)) || 1295 (rdev->ppl.offset <= 0 && rdev->ppl.offset + ppl_size_new > 0))) { 1296 pr_warn("md/raid:%s: PPL space overlaps with superblock on %s\n", 1297 mdname(rdev->mddev), bdevname(rdev->bdev, b)); 1298 return -EINVAL; 1299 } 1300 1301 rdev->ppl.size = ppl_size_new; 1302 1303 return 0; 1304 } 1305 1306 static void ppl_init_child_log(struct ppl_log *log, struct md_rdev *rdev) 1307 { 1308 struct request_queue *q; 1309 1310 if ((rdev->ppl.size << 9) >= (PPL_SPACE_SIZE + 1311 PPL_HEADER_SIZE) * 2) { 1312 log->use_multippl = true; 1313 set_bit(MD_HAS_MULTIPLE_PPLS, 1314 &log->ppl_conf->mddev->flags); 1315 log->entry_space = PPL_SPACE_SIZE; 1316 } else { 1317 log->use_multippl = false; 1318 log->entry_space = (log->rdev->ppl.size << 9) - 1319 PPL_HEADER_SIZE; 1320 } 1321 log->next_io_sector = rdev->ppl.sector; 1322 1323 q = bdev_get_queue(rdev->bdev); 1324 if (test_bit(QUEUE_FLAG_WC, &q->queue_flags)) 1325 log->wb_cache_on = true; 1326 } 1327 1328 int ppl_init_log(struct r5conf *conf) 1329 { 1330 struct ppl_conf *ppl_conf; 1331 struct mddev *mddev = conf->mddev; 1332 int ret = 0; 1333 int max_disks; 1334 int i; 1335 1336 pr_debug("md/raid:%s: enabling distributed Partial Parity Log\n", 1337 mdname(conf->mddev)); 1338 1339 if (PAGE_SIZE != 4096) 1340 return -EINVAL; 1341 1342 if (mddev->level != 5) { 1343 pr_warn("md/raid:%s PPL is not compatible with raid level %d\n", 1344 mdname(mddev), mddev->level); 1345 return -EINVAL; 1346 } 1347 1348 if (mddev->bitmap_info.file || mddev->bitmap_info.offset) { 1349 pr_warn("md/raid:%s PPL is not compatible with bitmap\n", 1350 mdname(mddev)); 1351 return -EINVAL; 1352 } 1353 1354 if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) { 1355 pr_warn("md/raid:%s PPL is not compatible with journal\n", 1356 mdname(mddev)); 1357 return -EINVAL; 1358 } 1359 1360 max_disks = FIELD_SIZEOF(struct ppl_log, disk_flush_bitmap) * 1361 BITS_PER_BYTE; 1362 if (conf->raid_disks > max_disks) { 1363 pr_warn("md/raid:%s PPL doesn't support over %d disks in the array\n", 1364 mdname(mddev), max_disks); 1365 return -EINVAL; 1366 } 1367 1368 ppl_conf = kzalloc(sizeof(struct ppl_conf), GFP_KERNEL); 1369 if (!ppl_conf) 1370 return -ENOMEM; 1371 1372 ppl_conf->mddev = mddev; 1373 1374 ppl_conf->io_kc = KMEM_CACHE(ppl_io_unit, 0); 1375 if (!ppl_conf->io_kc) { 1376 ret = -ENOMEM; 1377 goto err; 1378 } 1379 1380 ppl_conf->io_pool = mempool_create(conf->raid_disks, ppl_io_pool_alloc, 1381 ppl_io_pool_free, ppl_conf->io_kc); 1382 if (!ppl_conf->io_pool) { 1383 ret = -ENOMEM; 1384 goto err; 1385 } 1386 1387 ppl_conf->bs = bioset_create(conf->raid_disks, 0, BIOSET_NEED_BVECS); 1388 if (!ppl_conf->bs) { 1389 ret = -ENOMEM; 1390 goto err; 1391 } 1392 1393 ppl_conf->flush_bs = bioset_create(conf->raid_disks, 0, 0); 1394 if (!ppl_conf->flush_bs) { 1395 ret = -ENOMEM; 1396 goto err; 1397 } 1398 1399 ppl_conf->count = conf->raid_disks; 1400 ppl_conf->child_logs = kcalloc(ppl_conf->count, sizeof(struct ppl_log), 1401 GFP_KERNEL); 1402 if (!ppl_conf->child_logs) { 1403 ret = -ENOMEM; 1404 goto err; 1405 } 1406 1407 atomic64_set(&ppl_conf->seq, 0); 1408 INIT_LIST_HEAD(&ppl_conf->no_mem_stripes); 1409 spin_lock_init(&ppl_conf->no_mem_stripes_lock); 1410 1411 if (!mddev->external) { 1412 ppl_conf->signature = ~crc32c_le(~0, mddev->uuid, sizeof(mddev->uuid)); 1413 ppl_conf->block_size = 512; 1414 } else { 1415 ppl_conf->block_size = queue_logical_block_size(mddev->queue); 1416 } 1417 1418 for (i = 0; i < ppl_conf->count; i++) { 1419 struct ppl_log *log = &ppl_conf->child_logs[i]; 1420 struct md_rdev *rdev = conf->disks[i].rdev; 1421 1422 mutex_init(&log->io_mutex); 1423 spin_lock_init(&log->io_list_lock); 1424 INIT_LIST_HEAD(&log->io_list); 1425 1426 log->ppl_conf = ppl_conf; 1427 log->rdev = rdev; 1428 1429 if (rdev) { 1430 ret = ppl_validate_rdev(rdev); 1431 if (ret) 1432 goto err; 1433 1434 ppl_init_child_log(log, rdev); 1435 } 1436 } 1437 1438 /* load and possibly recover the logs from the member disks */ 1439 ret = ppl_load(ppl_conf); 1440 1441 if (ret) { 1442 goto err; 1443 } else if (!mddev->pers && mddev->recovery_cp == 0 && 1444 ppl_conf->recovered_entries > 0 && 1445 ppl_conf->mismatch_count == 0) { 1446 /* 1447 * If we are starting a dirty array and the recovery succeeds 1448 * without any issues, set the array as clean. 1449 */ 1450 mddev->recovery_cp = MaxSector; 1451 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); 1452 } else if (mddev->pers && ppl_conf->mismatch_count > 0) { 1453 /* no mismatch allowed when enabling PPL for a running array */ 1454 ret = -EINVAL; 1455 goto err; 1456 } 1457 1458 conf->log_private = ppl_conf; 1459 set_bit(MD_HAS_PPL, &ppl_conf->mddev->flags); 1460 1461 return 0; 1462 err: 1463 __ppl_exit_log(ppl_conf); 1464 return ret; 1465 } 1466 1467 int ppl_modify_log(struct r5conf *conf, struct md_rdev *rdev, bool add) 1468 { 1469 struct ppl_conf *ppl_conf = conf->log_private; 1470 struct ppl_log *log; 1471 int ret = 0; 1472 char b[BDEVNAME_SIZE]; 1473 1474 if (!rdev) 1475 return -EINVAL; 1476 1477 pr_debug("%s: disk: %d operation: %s dev: %s\n", 1478 __func__, rdev->raid_disk, add ? "add" : "remove", 1479 bdevname(rdev->bdev, b)); 1480 1481 if (rdev->raid_disk < 0) 1482 return 0; 1483 1484 if (rdev->raid_disk >= ppl_conf->count) 1485 return -ENODEV; 1486 1487 log = &ppl_conf->child_logs[rdev->raid_disk]; 1488 1489 mutex_lock(&log->io_mutex); 1490 if (add) { 1491 ret = ppl_validate_rdev(rdev); 1492 if (!ret) { 1493 log->rdev = rdev; 1494 ret = ppl_write_empty_header(log); 1495 ppl_init_child_log(log, rdev); 1496 } 1497 } else { 1498 log->rdev = NULL; 1499 } 1500 mutex_unlock(&log->io_mutex); 1501 1502 return ret; 1503 } 1504