ordered-data.c (6db4a7335dd701a0e20275440ee057d3db2a7ae3) ordered-data.c (161c3549b45aeef05451b6822d8aaaf39c7bedce)
1/*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,

--- 476 unchanged lines hidden (view full) ---

485void btrfs_wait_logged_extents(struct btrfs_trans_handle *trans,
486 struct btrfs_root *log, u64 transid)
487{
488 struct btrfs_ordered_extent *ordered;
489 int index = transid % 2;
490
491 spin_lock_irq(&log->log_extents_lock[index]);
492 while (!list_empty(&log->logged_list[index])) {
1/*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,

--- 476 unchanged lines hidden (view full) ---

485void btrfs_wait_logged_extents(struct btrfs_trans_handle *trans,
486 struct btrfs_root *log, u64 transid)
487{
488 struct btrfs_ordered_extent *ordered;
489 int index = transid % 2;
490
491 spin_lock_irq(&log->log_extents_lock[index]);
492 while (!list_empty(&log->logged_list[index])) {
493 struct inode *inode;
493 ordered = list_first_entry(&log->logged_list[index],
494 struct btrfs_ordered_extent,
495 log_list);
496 list_del_init(&ordered->log_list);
494 ordered = list_first_entry(&log->logged_list[index],
495 struct btrfs_ordered_extent,
496 log_list);
497 list_del_init(&ordered->log_list);
498 inode = ordered->inode;
497 spin_unlock_irq(&log->log_extents_lock[index]);
498
499 if (!test_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags) &&
500 !test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags)) {
499 spin_unlock_irq(&log->log_extents_lock[index]);
500
501 if (!test_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags) &&
502 !test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags)) {
501 struct inode *inode = ordered->inode;
502 u64 start = ordered->file_offset;
503 u64 end = ordered->file_offset + ordered->len - 1;
504
505 WARN_ON(!inode);
506 filemap_fdatawrite_range(inode->i_mapping, start, end);
507 }
508 wait_event(ordered->wait, test_bit(BTRFS_ORDERED_IO_DONE,
509 &ordered->flags));
510
511 /*
503 u64 start = ordered->file_offset;
504 u64 end = ordered->file_offset + ordered->len - 1;
505
506 WARN_ON(!inode);
507 filemap_fdatawrite_range(inode->i_mapping, start, end);
508 }
509 wait_event(ordered->wait, test_bit(BTRFS_ORDERED_IO_DONE,
510 &ordered->flags));
511
512 /*
512 * If our ordered extent completed it means it updated the
513 * fs/subvol and csum trees already, so no need to make the
514 * current transaction's commit wait for it, as we end up
515 * holding memory unnecessarily and delaying the inode's iput
516 * until the transaction commit (we schedule an iput for the
517 * inode when the ordered extent's refcount drops to 0), which
518 * prevents it from being evictable until the transaction
519 * commits.
513 * In order to keep us from losing our ordered extent
514 * information when committing the transaction we have to make
515 * sure that any logged extents are completed when we go to
516 * commit the transaction. To do this we simply increase the
517 * current transactions pending_ordered counter and decrement it
518 * when the ordered extent completes.
520 */
519 */
521 if (test_bit(BTRFS_ORDERED_COMPLETE, &ordered->flags))
522 btrfs_put_ordered_extent(ordered);
523 else
524 list_add_tail(&ordered->trans_list, &trans->ordered);
520 if (!test_bit(BTRFS_ORDERED_COMPLETE, &ordered->flags)) {
521 struct btrfs_ordered_inode_tree *tree;
525
522
523 tree = &BTRFS_I(inode)->ordered_tree;
524 spin_lock_irq(&tree->lock);
525 if (!test_bit(BTRFS_ORDERED_COMPLETE, &ordered->flags)) {
526 set_bit(BTRFS_ORDERED_PENDING, &ordered->flags);
527 atomic_inc(&trans->transaction->pending_ordered);
528 }
529 spin_unlock_irq(&tree->lock);
530 }
531 btrfs_put_ordered_extent(ordered);
526 spin_lock_irq(&log->log_extents_lock[index]);
527 }
528 spin_unlock_irq(&log->log_extents_lock[index]);
529}
530
531void btrfs_free_logged_extents(struct btrfs_root *log, u64 transid)
532{
533 struct btrfs_ordered_extent *ordered;

--- 45 unchanged lines hidden (view full) ---

579 * and waiters are woken up.
580 */
581void btrfs_remove_ordered_extent(struct inode *inode,
582 struct btrfs_ordered_extent *entry)
583{
584 struct btrfs_ordered_inode_tree *tree;
585 struct btrfs_root *root = BTRFS_I(inode)->root;
586 struct rb_node *node;
532 spin_lock_irq(&log->log_extents_lock[index]);
533 }
534 spin_unlock_irq(&log->log_extents_lock[index]);
535}
536
537void btrfs_free_logged_extents(struct btrfs_root *log, u64 transid)
538{
539 struct btrfs_ordered_extent *ordered;

--- 45 unchanged lines hidden (view full) ---

585 * and waiters are woken up.
586 */
587void btrfs_remove_ordered_extent(struct inode *inode,
588 struct btrfs_ordered_extent *entry)
589{
590 struct btrfs_ordered_inode_tree *tree;
591 struct btrfs_root *root = BTRFS_I(inode)->root;
592 struct rb_node *node;
593 bool dec_pending_ordered = false;
587
588 tree = &BTRFS_I(inode)->ordered_tree;
589 spin_lock_irq(&tree->lock);
590 node = &entry->rb_node;
591 rb_erase(node, &tree->tree);
592 RB_CLEAR_NODE(node);
593 if (tree->last == node)
594 tree->last = NULL;
595 set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags);
594
595 tree = &BTRFS_I(inode)->ordered_tree;
596 spin_lock_irq(&tree->lock);
597 node = &entry->rb_node;
598 rb_erase(node, &tree->tree);
599 RB_CLEAR_NODE(node);
600 if (tree->last == node)
601 tree->last = NULL;
602 set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags);
603 if (test_and_clear_bit(BTRFS_ORDERED_PENDING, &entry->flags))
604 dec_pending_ordered = true;
596 spin_unlock_irq(&tree->lock);
597
605 spin_unlock_irq(&tree->lock);
606
607 /*
608 * The current running transaction is waiting on us, we need to let it
609 * know that we're complete and wake it up.
610 */
611 if (dec_pending_ordered) {
612 struct btrfs_transaction *trans;
613
614 /*
615 * The checks for trans are just a formality, it should be set,
616 * but if it isn't we don't want to deref/assert under the spin
617 * lock, so be nice and check if trans is set, but ASSERT() so
618 * if it isn't set a developer will notice.
619 */
620 spin_lock(&root->fs_info->trans_lock);
621 trans = root->fs_info->running_transaction;
622 if (trans)
623 atomic_inc(&trans->use_count);
624 spin_unlock(&root->fs_info->trans_lock);
625
626 ASSERT(trans);
627 if (trans) {
628 if (atomic_dec_and_test(&trans->pending_ordered))
629 wake_up(&trans->pending_wait);
630 btrfs_put_transaction(trans);
631 }
632 }
633
598 spin_lock(&root->ordered_extent_lock);
599 list_del_init(&entry->root_extent_list);
600 root->nr_ordered_extents--;
601
602 trace_btrfs_ordered_extent_remove(inode, entry);
603
604 if (!root->nr_ordered_extents) {
605 spin_lock(&root->fs_info->ordered_root_lock);

--- 478 unchanged lines hidden ---
634 spin_lock(&root->ordered_extent_lock);
635 list_del_init(&entry->root_extent_list);
636 root->nr_ordered_extents--;
637
638 trace_btrfs_ordered_extent_remove(inode, entry);
639
640 if (!root->nr_ordered_extents) {
641 spin_lock(&root->fs_info->ordered_root_lock);

--- 478 unchanged lines hidden ---