dm.c (11a68244e16b0c35e122dd55b4e7c595e0fb67a1) | dm.c (980691e5f3a1b5ebbb2d34014e028fd7f1c6e4fb) |
---|---|
1/* 2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited. 3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 4 * 5 * This file is released under the GPL. 6 */ 7 8#include "dm.h" --- 738 unchanged lines hidden (view full) --- 747static void free_rq_clone(struct request *clone) 748{ 749 struct dm_rq_target_io *tio = clone->end_io_data; 750 751 blk_rq_unprep_clone(clone); 752 free_rq_tio(tio); 753} 754 | 1/* 2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited. 3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 4 * 5 * This file is released under the GPL. 6 */ 7 8#include "dm.h" --- 738 unchanged lines hidden (view full) --- 747static void free_rq_clone(struct request *clone) 748{ 749 struct dm_rq_target_io *tio = clone->end_io_data; 750 751 blk_rq_unprep_clone(clone); 752 free_rq_tio(tio); 753} 754 |
755/* 756 * Complete the clone and the original request. 757 * Must be called without queue lock. 758 */ 759static void dm_end_request(struct request *clone, int error) 760{ 761 int rw = rq_data_dir(clone); 762 struct dm_rq_target_io *tio = clone->end_io_data; 763 struct mapped_device *md = tio->md; 764 struct request *rq = tio->orig; 765 766 if (blk_pc_request(rq)) { 767 rq->errors = clone->errors; 768 rq->resid_len = clone->resid_len; 769 770 if (rq->sense) 771 /* 772 * We are using the sense buffer of the original 773 * request. 774 * So setting the length of the sense data is enough. 775 */ 776 rq->sense_len = clone->sense_len; 777 } 778 779 free_rq_clone(clone); 780 781 blk_end_request_all(rq, error); 782 783 rq_completed(md, rw, 1); 784} 785 |
|
755static void dm_unprep_request(struct request *rq) 756{ 757 struct request *clone = rq->special; 758 759 rq->special = NULL; 760 rq->cmd_flags &= ~REQ_DONTPREP; 761 762 free_rq_clone(clone); --- 47 unchanged lines hidden (view full) --- 810{ 811 unsigned long flags; 812 813 spin_lock_irqsave(q->queue_lock, flags); 814 __start_queue(q); 815 spin_unlock_irqrestore(q->queue_lock, flags); 816} 817 | 786static void dm_unprep_request(struct request *rq) 787{ 788 struct request *clone = rq->special; 789 790 rq->special = NULL; 791 rq->cmd_flags &= ~REQ_DONTPREP; 792 793 free_rq_clone(clone); --- 47 unchanged lines hidden (view full) --- 841{ 842 unsigned long flags; 843 844 spin_lock_irqsave(q->queue_lock, flags); 845 __start_queue(q); 846 spin_unlock_irqrestore(q->queue_lock, flags); 847} 848 |
818/* 819 * Complete the clone and the original request. 820 * Must be called without queue lock. 821 */ 822static void dm_end_request(struct request *clone, int error) 823{ 824 int rw = rq_data_dir(clone); 825 struct dm_rq_target_io *tio = clone->end_io_data; 826 struct mapped_device *md = tio->md; 827 struct request *rq = tio->orig; 828 829 if (blk_pc_request(rq)) { 830 rq->errors = clone->errors; 831 rq->resid_len = clone->resid_len; 832 833 if (rq->sense) 834 /* 835 * We are using the sense buffer of the original 836 * request. 837 * So setting the length of the sense data is enough. 838 */ 839 rq->sense_len = clone->sense_len; 840 } 841 842 free_rq_clone(clone); 843 844 blk_end_request_all(rq, error); 845 846 rq_completed(md, rw, 1); 847} 848 | |
849static void dm_done(struct request *clone, int error, bool mapped) 850{ 851 int r = error; 852 struct dm_rq_target_io *tio = clone->end_io_data; 853 dm_request_endio_fn rq_end_io = tio->ti->type->rq_end_io; 854 855 if (mapped && rq_end_io) 856 r = rq_end_io(tio->ti, clone, error, &tio->info); --- 1726 unchanged lines hidden --- | 849static void dm_done(struct request *clone, int error, bool mapped) 850{ 851 int r = error; 852 struct dm_rq_target_io *tio = clone->end_io_data; 853 dm_request_endio_fn rq_end_io = tio->ti->type->rq_end_io; 854 855 if (mapped && rq_end_io) 856 r = rq_end_io(tio->ti, clone, error, &tio->info); --- 1726 unchanged lines hidden --- |