1 /* 2 * Copyright (C) 2014 Facebook. All rights reserved. 3 * 4 * This file is released under the GPL. 5 */ 6 7 #include <linux/device-mapper.h> 8 9 #include <linux/module.h> 10 #include <linux/init.h> 11 #include <linux/blkdev.h> 12 #include <linux/bio.h> 13 #include <linux/slab.h> 14 #include <linux/kthread.h> 15 #include <linux/freezer.h> 16 17 #define DM_MSG_PREFIX "log-writes" 18 19 /* 20 * This target will sequentially log all writes to the target device onto the 21 * log device. This is helpful for replaying writes to check for fs consistency 22 * at all times. This target provides a mechanism to mark specific events to 23 * check data at a later time. So for example you would: 24 * 25 * write data 26 * fsync 27 * dmsetup message /dev/whatever mark mymark 28 * unmount /mnt/test 29 * 30 * Then replay the log up to mymark and check the contents of the replay to 31 * verify it matches what was written. 32 * 33 * We log writes only after they have been flushed, this makes the log describe 34 * close to the order in which the data hits the actual disk, not its cache. So 35 * for example the following sequence (W means write, C means complete) 36 * 37 * Wa,Wb,Wc,Cc,Ca,FLUSH,FUAd,Cb,CFLUSH,CFUAd 38 * 39 * Would result in the log looking like this: 40 * 41 * c,a,flush,fuad,b,<other writes>,<next flush> 42 * 43 * This is meant to help expose problems where file systems do not properly wait 44 * on data being written before invoking a FLUSH. FUA bypasses cache so once it 45 * completes it is added to the log as it should be on disk. 46 * 47 * We treat DISCARDs as if they don't bypass cache so that they are logged in 48 * order of completion along with the normal writes. If we didn't do it this 49 * way we would process all the discards first and then write all the data, when 50 * in fact we want to do the data and the discard in the order that they 51 * completed. 52 */ 53 #define LOG_FLUSH_FLAG (1 << 0) 54 #define LOG_FUA_FLAG (1 << 1) 55 #define LOG_DISCARD_FLAG (1 << 2) 56 #define LOG_MARK_FLAG (1 << 3) 57 58 #define WRITE_LOG_VERSION 1ULL 59 #define WRITE_LOG_MAGIC 0x6a736677736872ULL 60 61 /* 62 * The disk format for this is braindead simple. 63 * 64 * At byte 0 we have our super, followed by the following sequence for 65 * nr_entries: 66 * 67 * [ 1 sector ][ entry->nr_sectors ] 68 * [log_write_entry][ data written ] 69 * 70 * The log_write_entry takes up a full sector so we can have arbitrary length 71 * marks and it leaves us room for extra content in the future. 72 */ 73 74 /* 75 * Basic info about the log for userspace. 76 */ 77 struct log_write_super { 78 __le64 magic; 79 __le64 version; 80 __le64 nr_entries; 81 __le32 sectorsize; 82 }; 83 84 /* 85 * sector - the sector we wrote. 86 * nr_sectors - the number of sectors we wrote. 87 * flags - flags for this log entry. 88 * data_len - the size of the data in this log entry, this is for private log 89 * entry stuff, the MARK data provided by userspace for example. 90 */ 91 struct log_write_entry { 92 __le64 sector; 93 __le64 nr_sectors; 94 __le64 flags; 95 __le64 data_len; 96 }; 97 98 struct log_writes_c { 99 struct dm_dev *dev; 100 struct dm_dev *logdev; 101 u64 logged_entries; 102 u32 sectorsize; 103 atomic_t io_blocks; 104 atomic_t pending_blocks; 105 sector_t next_sector; 106 sector_t end_sector; 107 bool logging_enabled; 108 bool device_supports_discard; 109 spinlock_t blocks_lock; 110 struct list_head unflushed_blocks; 111 struct list_head logging_blocks; 112 wait_queue_head_t wait; 113 struct task_struct *log_kthread; 114 }; 115 116 struct pending_block { 117 int vec_cnt; 118 u64 flags; 119 sector_t sector; 120 sector_t nr_sectors; 121 char *data; 122 u32 datalen; 123 struct list_head list; 124 struct bio_vec vecs[0]; 125 }; 126 127 struct per_bio_data { 128 struct pending_block *block; 129 }; 130 131 static void put_pending_block(struct log_writes_c *lc) 132 { 133 if (atomic_dec_and_test(&lc->pending_blocks)) { 134 smp_mb__after_atomic(); 135 if (waitqueue_active(&lc->wait)) 136 wake_up(&lc->wait); 137 } 138 } 139 140 static void put_io_block(struct log_writes_c *lc) 141 { 142 if (atomic_dec_and_test(&lc->io_blocks)) { 143 smp_mb__after_atomic(); 144 if (waitqueue_active(&lc->wait)) 145 wake_up(&lc->wait); 146 } 147 } 148 149 static void log_end_io(struct bio *bio) 150 { 151 struct log_writes_c *lc = bio->bi_private; 152 struct bio_vec *bvec; 153 int i; 154 155 if (bio->bi_error) { 156 unsigned long flags; 157 158 DMERR("Error writing log block, error=%d", bio->bi_error); 159 spin_lock_irqsave(&lc->blocks_lock, flags); 160 lc->logging_enabled = false; 161 spin_unlock_irqrestore(&lc->blocks_lock, flags); 162 } 163 164 bio_for_each_segment_all(bvec, bio, i) 165 __free_page(bvec->bv_page); 166 167 put_io_block(lc); 168 bio_put(bio); 169 } 170 171 /* 172 * Meant to be called if there is an error, it will free all the pages 173 * associated with the block. 174 */ 175 static void free_pending_block(struct log_writes_c *lc, 176 struct pending_block *block) 177 { 178 int i; 179 180 for (i = 0; i < block->vec_cnt; i++) { 181 if (block->vecs[i].bv_page) 182 __free_page(block->vecs[i].bv_page); 183 } 184 kfree(block->data); 185 kfree(block); 186 put_pending_block(lc); 187 } 188 189 static int write_metadata(struct log_writes_c *lc, void *entry, 190 size_t entrylen, void *data, size_t datalen, 191 sector_t sector) 192 { 193 struct bio *bio; 194 struct page *page; 195 void *ptr; 196 size_t ret; 197 198 bio = bio_alloc(GFP_KERNEL, 1); 199 if (!bio) { 200 DMERR("Couldn't alloc log bio"); 201 goto error; 202 } 203 bio->bi_iter.bi_size = 0; 204 bio->bi_iter.bi_sector = sector; 205 bio->bi_bdev = lc->logdev->bdev; 206 bio->bi_end_io = log_end_io; 207 bio->bi_private = lc; 208 bio_set_op_attrs(bio, REQ_OP_WRITE, 0); 209 210 page = alloc_page(GFP_KERNEL); 211 if (!page) { 212 DMERR("Couldn't alloc log page"); 213 bio_put(bio); 214 goto error; 215 } 216 217 ptr = kmap_atomic(page); 218 memcpy(ptr, entry, entrylen); 219 if (datalen) 220 memcpy(ptr + entrylen, data, datalen); 221 memset(ptr + entrylen + datalen, 0, 222 lc->sectorsize - entrylen - datalen); 223 kunmap_atomic(ptr); 224 225 ret = bio_add_page(bio, page, lc->sectorsize, 0); 226 if (ret != lc->sectorsize) { 227 DMERR("Couldn't add page to the log block"); 228 goto error_bio; 229 } 230 submit_bio(bio); 231 return 0; 232 error_bio: 233 bio_put(bio); 234 __free_page(page); 235 error: 236 put_io_block(lc); 237 return -1; 238 } 239 240 static int log_one_block(struct log_writes_c *lc, 241 struct pending_block *block, sector_t sector) 242 { 243 struct bio *bio; 244 struct log_write_entry entry; 245 size_t ret; 246 int i; 247 248 entry.sector = cpu_to_le64(block->sector); 249 entry.nr_sectors = cpu_to_le64(block->nr_sectors); 250 entry.flags = cpu_to_le64(block->flags); 251 entry.data_len = cpu_to_le64(block->datalen); 252 if (write_metadata(lc, &entry, sizeof(entry), block->data, 253 block->datalen, sector)) { 254 free_pending_block(lc, block); 255 return -1; 256 } 257 258 if (!block->vec_cnt) 259 goto out; 260 sector++; 261 262 bio = bio_alloc(GFP_KERNEL, block->vec_cnt); 263 if (!bio) { 264 DMERR("Couldn't alloc log bio"); 265 goto error; 266 } 267 atomic_inc(&lc->io_blocks); 268 bio->bi_iter.bi_size = 0; 269 bio->bi_iter.bi_sector = sector; 270 bio->bi_bdev = lc->logdev->bdev; 271 bio->bi_end_io = log_end_io; 272 bio->bi_private = lc; 273 bio_set_op_attrs(bio, REQ_OP_WRITE, 0); 274 275 for (i = 0; i < block->vec_cnt; i++) { 276 /* 277 * The page offset is always 0 because we allocate a new page 278 * for every bvec in the original bio for simplicity sake. 279 */ 280 ret = bio_add_page(bio, block->vecs[i].bv_page, 281 block->vecs[i].bv_len, 0); 282 if (ret != block->vecs[i].bv_len) { 283 atomic_inc(&lc->io_blocks); 284 submit_bio(bio); 285 bio = bio_alloc(GFP_KERNEL, block->vec_cnt - i); 286 if (!bio) { 287 DMERR("Couldn't alloc log bio"); 288 goto error; 289 } 290 bio->bi_iter.bi_size = 0; 291 bio->bi_iter.bi_sector = sector; 292 bio->bi_bdev = lc->logdev->bdev; 293 bio->bi_end_io = log_end_io; 294 bio->bi_private = lc; 295 bio_set_op_attrs(bio, REQ_OP_WRITE, 0); 296 297 ret = bio_add_page(bio, block->vecs[i].bv_page, 298 block->vecs[i].bv_len, 0); 299 if (ret != block->vecs[i].bv_len) { 300 DMERR("Couldn't add page on new bio?"); 301 bio_put(bio); 302 goto error; 303 } 304 } 305 sector += block->vecs[i].bv_len >> SECTOR_SHIFT; 306 } 307 submit_bio(bio); 308 out: 309 kfree(block->data); 310 kfree(block); 311 put_pending_block(lc); 312 return 0; 313 error: 314 free_pending_block(lc, block); 315 put_io_block(lc); 316 return -1; 317 } 318 319 static int log_super(struct log_writes_c *lc) 320 { 321 struct log_write_super super; 322 323 super.magic = cpu_to_le64(WRITE_LOG_MAGIC); 324 super.version = cpu_to_le64(WRITE_LOG_VERSION); 325 super.nr_entries = cpu_to_le64(lc->logged_entries); 326 super.sectorsize = cpu_to_le32(lc->sectorsize); 327 328 if (write_metadata(lc, &super, sizeof(super), NULL, 0, 0)) { 329 DMERR("Couldn't write super"); 330 return -1; 331 } 332 333 return 0; 334 } 335 336 static inline sector_t logdev_last_sector(struct log_writes_c *lc) 337 { 338 return i_size_read(lc->logdev->bdev->bd_inode) >> SECTOR_SHIFT; 339 } 340 341 static int log_writes_kthread(void *arg) 342 { 343 struct log_writes_c *lc = (struct log_writes_c *)arg; 344 sector_t sector = 0; 345 346 while (!kthread_should_stop()) { 347 bool super = false; 348 bool logging_enabled; 349 struct pending_block *block = NULL; 350 int ret; 351 352 spin_lock_irq(&lc->blocks_lock); 353 if (!list_empty(&lc->logging_blocks)) { 354 block = list_first_entry(&lc->logging_blocks, 355 struct pending_block, list); 356 list_del_init(&block->list); 357 if (!lc->logging_enabled) 358 goto next; 359 360 sector = lc->next_sector; 361 if (block->flags & LOG_DISCARD_FLAG) 362 lc->next_sector++; 363 else 364 lc->next_sector += block->nr_sectors + 1; 365 366 /* 367 * Apparently the size of the device may not be known 368 * right away, so handle this properly. 369 */ 370 if (!lc->end_sector) 371 lc->end_sector = logdev_last_sector(lc); 372 if (lc->end_sector && 373 lc->next_sector >= lc->end_sector) { 374 DMERR("Ran out of space on the logdev"); 375 lc->logging_enabled = false; 376 goto next; 377 } 378 lc->logged_entries++; 379 atomic_inc(&lc->io_blocks); 380 381 super = (block->flags & (LOG_FUA_FLAG | LOG_MARK_FLAG)); 382 if (super) 383 atomic_inc(&lc->io_blocks); 384 } 385 next: 386 logging_enabled = lc->logging_enabled; 387 spin_unlock_irq(&lc->blocks_lock); 388 if (block) { 389 if (logging_enabled) { 390 ret = log_one_block(lc, block, sector); 391 if (!ret && super) 392 ret = log_super(lc); 393 if (ret) { 394 spin_lock_irq(&lc->blocks_lock); 395 lc->logging_enabled = false; 396 spin_unlock_irq(&lc->blocks_lock); 397 } 398 } else 399 free_pending_block(lc, block); 400 continue; 401 } 402 403 if (!try_to_freeze()) { 404 set_current_state(TASK_INTERRUPTIBLE); 405 if (!kthread_should_stop() && 406 !atomic_read(&lc->pending_blocks)) 407 schedule(); 408 __set_current_state(TASK_RUNNING); 409 } 410 } 411 return 0; 412 } 413 414 /* 415 * Construct a log-writes mapping: 416 * log-writes <dev_path> <log_dev_path> 417 */ 418 static int log_writes_ctr(struct dm_target *ti, unsigned int argc, char **argv) 419 { 420 struct log_writes_c *lc; 421 struct dm_arg_set as; 422 const char *devname, *logdevname; 423 int ret; 424 425 as.argc = argc; 426 as.argv = argv; 427 428 if (argc < 2) { 429 ti->error = "Invalid argument count"; 430 return -EINVAL; 431 } 432 433 lc = kzalloc(sizeof(struct log_writes_c), GFP_KERNEL); 434 if (!lc) { 435 ti->error = "Cannot allocate context"; 436 return -ENOMEM; 437 } 438 spin_lock_init(&lc->blocks_lock); 439 INIT_LIST_HEAD(&lc->unflushed_blocks); 440 INIT_LIST_HEAD(&lc->logging_blocks); 441 init_waitqueue_head(&lc->wait); 442 lc->sectorsize = 1 << SECTOR_SHIFT; 443 atomic_set(&lc->io_blocks, 0); 444 atomic_set(&lc->pending_blocks, 0); 445 446 devname = dm_shift_arg(&as); 447 ret = dm_get_device(ti, devname, dm_table_get_mode(ti->table), &lc->dev); 448 if (ret) { 449 ti->error = "Device lookup failed"; 450 goto bad; 451 } 452 453 logdevname = dm_shift_arg(&as); 454 ret = dm_get_device(ti, logdevname, dm_table_get_mode(ti->table), 455 &lc->logdev); 456 if (ret) { 457 ti->error = "Log device lookup failed"; 458 dm_put_device(ti, lc->dev); 459 goto bad; 460 } 461 462 ret = -EINVAL; 463 lc->log_kthread = kthread_run(log_writes_kthread, lc, "log-write"); 464 if (!lc->log_kthread) { 465 ti->error = "Couldn't alloc kthread"; 466 dm_put_device(ti, lc->dev); 467 dm_put_device(ti, lc->logdev); 468 goto bad; 469 } 470 471 /* We put the super at sector 0, start logging at sector 1 */ 472 lc->next_sector = 1; 473 lc->logging_enabled = true; 474 lc->end_sector = logdev_last_sector(lc); 475 lc->device_supports_discard = true; 476 477 ti->num_flush_bios = 1; 478 ti->flush_supported = true; 479 ti->num_discard_bios = 1; 480 ti->discards_supported = true; 481 ti->per_io_data_size = sizeof(struct per_bio_data); 482 ti->private = lc; 483 return 0; 484 485 bad: 486 kfree(lc); 487 return ret; 488 } 489 490 static int log_mark(struct log_writes_c *lc, char *data) 491 { 492 struct pending_block *block; 493 size_t maxsize = lc->sectorsize - sizeof(struct log_write_entry); 494 495 block = kzalloc(sizeof(struct pending_block), GFP_KERNEL); 496 if (!block) { 497 DMERR("Error allocating pending block"); 498 return -ENOMEM; 499 } 500 501 block->data = kstrndup(data, maxsize, GFP_KERNEL); 502 if (!block->data) { 503 DMERR("Error copying mark data"); 504 kfree(block); 505 return -ENOMEM; 506 } 507 atomic_inc(&lc->pending_blocks); 508 block->datalen = strlen(block->data); 509 block->flags |= LOG_MARK_FLAG; 510 spin_lock_irq(&lc->blocks_lock); 511 list_add_tail(&block->list, &lc->logging_blocks); 512 spin_unlock_irq(&lc->blocks_lock); 513 wake_up_process(lc->log_kthread); 514 return 0; 515 } 516 517 static void log_writes_dtr(struct dm_target *ti) 518 { 519 struct log_writes_c *lc = ti->private; 520 521 spin_lock_irq(&lc->blocks_lock); 522 list_splice_init(&lc->unflushed_blocks, &lc->logging_blocks); 523 spin_unlock_irq(&lc->blocks_lock); 524 525 /* 526 * This is just nice to have since it'll update the super to include the 527 * unflushed blocks, if it fails we don't really care. 528 */ 529 log_mark(lc, "dm-log-writes-end"); 530 wake_up_process(lc->log_kthread); 531 wait_event(lc->wait, !atomic_read(&lc->io_blocks) && 532 !atomic_read(&lc->pending_blocks)); 533 kthread_stop(lc->log_kthread); 534 535 WARN_ON(!list_empty(&lc->logging_blocks)); 536 WARN_ON(!list_empty(&lc->unflushed_blocks)); 537 dm_put_device(ti, lc->dev); 538 dm_put_device(ti, lc->logdev); 539 kfree(lc); 540 } 541 542 static void normal_map_bio(struct dm_target *ti, struct bio *bio) 543 { 544 struct log_writes_c *lc = ti->private; 545 546 bio->bi_bdev = lc->dev->bdev; 547 } 548 549 static int log_writes_map(struct dm_target *ti, struct bio *bio) 550 { 551 struct log_writes_c *lc = ti->private; 552 struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data)); 553 struct pending_block *block; 554 struct bvec_iter iter; 555 struct bio_vec bv; 556 size_t alloc_size; 557 int i = 0; 558 bool flush_bio = (bio->bi_opf & REQ_PREFLUSH); 559 bool fua_bio = (bio->bi_opf & REQ_FUA); 560 bool discard_bio = (bio_op(bio) == REQ_OP_DISCARD); 561 562 pb->block = NULL; 563 564 /* Don't bother doing anything if logging has been disabled */ 565 if (!lc->logging_enabled) 566 goto map_bio; 567 568 /* 569 * Map reads as normal. 570 */ 571 if (bio_data_dir(bio) == READ) 572 goto map_bio; 573 574 /* No sectors and not a flush? Don't care */ 575 if (!bio_sectors(bio) && !flush_bio) 576 goto map_bio; 577 578 /* 579 * Discards will have bi_size set but there's no actual data, so just 580 * allocate the size of the pending block. 581 */ 582 if (discard_bio) 583 alloc_size = sizeof(struct pending_block); 584 else 585 alloc_size = sizeof(struct pending_block) + sizeof(struct bio_vec) * bio_segments(bio); 586 587 block = kzalloc(alloc_size, GFP_NOIO); 588 if (!block) { 589 DMERR("Error allocating pending block"); 590 spin_lock_irq(&lc->blocks_lock); 591 lc->logging_enabled = false; 592 spin_unlock_irq(&lc->blocks_lock); 593 return -ENOMEM; 594 } 595 INIT_LIST_HEAD(&block->list); 596 pb->block = block; 597 atomic_inc(&lc->pending_blocks); 598 599 if (flush_bio) 600 block->flags |= LOG_FLUSH_FLAG; 601 if (fua_bio) 602 block->flags |= LOG_FUA_FLAG; 603 if (discard_bio) 604 block->flags |= LOG_DISCARD_FLAG; 605 606 block->sector = bio->bi_iter.bi_sector; 607 block->nr_sectors = bio_sectors(bio); 608 609 /* We don't need the data, just submit */ 610 if (discard_bio) { 611 WARN_ON(flush_bio || fua_bio); 612 if (lc->device_supports_discard) 613 goto map_bio; 614 bio_endio(bio); 615 return DM_MAPIO_SUBMITTED; 616 } 617 618 /* Flush bio, splice the unflushed blocks onto this list and submit */ 619 if (flush_bio && !bio_sectors(bio)) { 620 spin_lock_irq(&lc->blocks_lock); 621 list_splice_init(&lc->unflushed_blocks, &block->list); 622 spin_unlock_irq(&lc->blocks_lock); 623 goto map_bio; 624 } 625 626 /* 627 * We will write this bio somewhere else way later so we need to copy 628 * the actual contents into new pages so we know the data will always be 629 * there. 630 * 631 * We do this because this could be a bio from O_DIRECT in which case we 632 * can't just hold onto the page until some later point, we have to 633 * manually copy the contents. 634 */ 635 bio_for_each_segment(bv, bio, iter) { 636 struct page *page; 637 void *src, *dst; 638 639 page = alloc_page(GFP_NOIO); 640 if (!page) { 641 DMERR("Error allocing page"); 642 free_pending_block(lc, block); 643 spin_lock_irq(&lc->blocks_lock); 644 lc->logging_enabled = false; 645 spin_unlock_irq(&lc->blocks_lock); 646 return -ENOMEM; 647 } 648 649 src = kmap_atomic(bv.bv_page); 650 dst = kmap_atomic(page); 651 memcpy(dst, src + bv.bv_offset, bv.bv_len); 652 kunmap_atomic(dst); 653 kunmap_atomic(src); 654 block->vecs[i].bv_page = page; 655 block->vecs[i].bv_len = bv.bv_len; 656 block->vec_cnt++; 657 i++; 658 } 659 660 /* Had a flush with data in it, weird */ 661 if (flush_bio) { 662 spin_lock_irq(&lc->blocks_lock); 663 list_splice_init(&lc->unflushed_blocks, &block->list); 664 spin_unlock_irq(&lc->blocks_lock); 665 } 666 map_bio: 667 normal_map_bio(ti, bio); 668 return DM_MAPIO_REMAPPED; 669 } 670 671 static int normal_end_io(struct dm_target *ti, struct bio *bio, int error) 672 { 673 struct log_writes_c *lc = ti->private; 674 struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data)); 675 676 if (bio_data_dir(bio) == WRITE && pb->block) { 677 struct pending_block *block = pb->block; 678 unsigned long flags; 679 680 spin_lock_irqsave(&lc->blocks_lock, flags); 681 if (block->flags & LOG_FLUSH_FLAG) { 682 list_splice_tail_init(&block->list, &lc->logging_blocks); 683 list_add_tail(&block->list, &lc->logging_blocks); 684 wake_up_process(lc->log_kthread); 685 } else if (block->flags & LOG_FUA_FLAG) { 686 list_add_tail(&block->list, &lc->logging_blocks); 687 wake_up_process(lc->log_kthread); 688 } else 689 list_add_tail(&block->list, &lc->unflushed_blocks); 690 spin_unlock_irqrestore(&lc->blocks_lock, flags); 691 } 692 693 return error; 694 } 695 696 /* 697 * INFO format: <logged entries> <highest allocated sector> 698 */ 699 static void log_writes_status(struct dm_target *ti, status_type_t type, 700 unsigned status_flags, char *result, 701 unsigned maxlen) 702 { 703 unsigned sz = 0; 704 struct log_writes_c *lc = ti->private; 705 706 switch (type) { 707 case STATUSTYPE_INFO: 708 DMEMIT("%llu %llu", lc->logged_entries, 709 (unsigned long long)lc->next_sector - 1); 710 if (!lc->logging_enabled) 711 DMEMIT(" logging_disabled"); 712 break; 713 714 case STATUSTYPE_TABLE: 715 DMEMIT("%s %s", lc->dev->name, lc->logdev->name); 716 break; 717 } 718 } 719 720 static int log_writes_prepare_ioctl(struct dm_target *ti, 721 struct block_device **bdev, fmode_t *mode) 722 { 723 struct log_writes_c *lc = ti->private; 724 struct dm_dev *dev = lc->dev; 725 726 *bdev = dev->bdev; 727 /* 728 * Only pass ioctls through if the device sizes match exactly. 729 */ 730 if (ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT) 731 return 1; 732 return 0; 733 } 734 735 static int log_writes_iterate_devices(struct dm_target *ti, 736 iterate_devices_callout_fn fn, 737 void *data) 738 { 739 struct log_writes_c *lc = ti->private; 740 741 return fn(ti, lc->dev, 0, ti->len, data); 742 } 743 744 /* 745 * Messages supported: 746 * mark <mark data> - specify the marked data. 747 */ 748 static int log_writes_message(struct dm_target *ti, unsigned argc, char **argv) 749 { 750 int r = -EINVAL; 751 struct log_writes_c *lc = ti->private; 752 753 if (argc != 2) { 754 DMWARN("Invalid log-writes message arguments, expect 2 arguments, got %d", argc); 755 return r; 756 } 757 758 if (!strcasecmp(argv[0], "mark")) 759 r = log_mark(lc, argv[1]); 760 else 761 DMWARN("Unrecognised log writes target message received: %s", argv[0]); 762 763 return r; 764 } 765 766 static void log_writes_io_hints(struct dm_target *ti, struct queue_limits *limits) 767 { 768 struct log_writes_c *lc = ti->private; 769 struct request_queue *q = bdev_get_queue(lc->dev->bdev); 770 771 if (!q || !blk_queue_discard(q)) { 772 lc->device_supports_discard = false; 773 limits->discard_granularity = 1 << SECTOR_SHIFT; 774 limits->max_discard_sectors = (UINT_MAX >> SECTOR_SHIFT); 775 } 776 } 777 778 static struct target_type log_writes_target = { 779 .name = "log-writes", 780 .version = {1, 0, 0}, 781 .module = THIS_MODULE, 782 .ctr = log_writes_ctr, 783 .dtr = log_writes_dtr, 784 .map = log_writes_map, 785 .end_io = normal_end_io, 786 .status = log_writes_status, 787 .prepare_ioctl = log_writes_prepare_ioctl, 788 .message = log_writes_message, 789 .iterate_devices = log_writes_iterate_devices, 790 .io_hints = log_writes_io_hints, 791 }; 792 793 static int __init dm_log_writes_init(void) 794 { 795 int r = dm_register_target(&log_writes_target); 796 797 if (r < 0) 798 DMERR("register failed %d", r); 799 800 return r; 801 } 802 803 static void __exit dm_log_writes_exit(void) 804 { 805 dm_unregister_target(&log_writes_target); 806 } 807 808 module_init(dm_log_writes_init); 809 module_exit(dm_log_writes_exit); 810 811 MODULE_DESCRIPTION(DM_NAME " log writes target"); 812 MODULE_AUTHOR("Josef Bacik <jbacik@fb.com>"); 813 MODULE_LICENSE("GPL"); 814