dm.c (a4872d5b6ad69a49975c0268828b5bb2317ea5a0) dm.c (1f98a13f623e0ef666690a18c1250335fc6d7ef1)
1/*
2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4 *
5 * This file is released under the GPL.
6 */
7
8#include "dm.h"

--- 572 unchanged lines hidden (view full) ---

581
582 if (atomic_dec_and_test(&io->io_count)) {
583 if (io->error == DM_ENDIO_REQUEUE) {
584 /*
585 * Target requested pushing back the I/O.
586 */
587 spin_lock_irqsave(&md->deferred_lock, flags);
588 if (__noflush_suspending(md)) {
1/*
2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4 *
5 * This file is released under the GPL.
6 */
7
8#include "dm.h"

--- 572 unchanged lines hidden (view full) ---

581
582 if (atomic_dec_and_test(&io->io_count)) {
583 if (io->error == DM_ENDIO_REQUEUE) {
584 /*
585 * Target requested pushing back the I/O.
586 */
587 spin_lock_irqsave(&md->deferred_lock, flags);
588 if (__noflush_suspending(md)) {
589 if (!bio_barrier(io->bio))
589 if (!bio_rw_flagged(io->bio, BIO_RW_BARRIER))
590 bio_list_add_head(&md->deferred,
591 io->bio);
592 } else
593 /* noflush suspend was interrupted. */
594 io->error = -EIO;
595 spin_unlock_irqrestore(&md->deferred_lock, flags);
596 }
597
598 io_error = io->error;
599 bio = io->bio;
600
590 bio_list_add_head(&md->deferred,
591 io->bio);
592 } else
593 /* noflush suspend was interrupted. */
594 io->error = -EIO;
595 spin_unlock_irqrestore(&md->deferred_lock, flags);
596 }
597
598 io_error = io->error;
599 bio = io->bio;
600
601 if (bio_barrier(bio)) {
601 if (bio_rw_flagged(bio, BIO_RW_BARRIER)) {
602 /*
603 * There can be just one barrier request so we use
604 * a per-device variable for error reporting.
605 * Note that you can't touch the bio after end_io_acct
606 */
607 if (!md->barrier_error && io_error != -EOPNOTSUPP)
608 md->barrier_error = io_error;
609 end_io_acct(io);

--- 594 unchanged lines hidden (view full) ---

1204 */
1205static void __split_and_process_bio(struct mapped_device *md, struct bio *bio)
1206{
1207 struct clone_info ci;
1208 int error = 0;
1209
1210 ci.map = dm_get_table(md);
1211 if (unlikely(!ci.map)) {
602 /*
603 * There can be just one barrier request so we use
604 * a per-device variable for error reporting.
605 * Note that you can't touch the bio after end_io_acct
606 */
607 if (!md->barrier_error && io_error != -EOPNOTSUPP)
608 md->barrier_error = io_error;
609 end_io_acct(io);

--- 594 unchanged lines hidden (view full) ---

1204 */
1205static void __split_and_process_bio(struct mapped_device *md, struct bio *bio)
1206{
1207 struct clone_info ci;
1208 int error = 0;
1209
1210 ci.map = dm_get_table(md);
1211 if (unlikely(!ci.map)) {
1212 if (!bio_barrier(bio))
1212 if (!bio_rw_flagged(bio, BIO_RW_BARRIER))
1213 bio_io_error(bio);
1214 else
1215 if (!md->barrier_error)
1216 md->barrier_error = -EIO;
1217 return;
1218 }
1219
1220 ci.md = md;

--- 95 unchanged lines hidden (view full) ---

1316 part_stat_add(cpu, &dm_disk(md)->part0, sectors[rw], bio_sectors(bio));
1317 part_stat_unlock();
1318
1319 /*
1320 * If we're suspended or the thread is processing barriers
1321 * we have to queue this io for later.
1322 */
1323 if (unlikely(test_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags)) ||
1213 bio_io_error(bio);
1214 else
1215 if (!md->barrier_error)
1216 md->barrier_error = -EIO;
1217 return;
1218 }
1219
1220 ci.md = md;

--- 95 unchanged lines hidden (view full) ---

1316 part_stat_add(cpu, &dm_disk(md)->part0, sectors[rw], bio_sectors(bio));
1317 part_stat_unlock();
1318
1319 /*
1320 * If we're suspended or the thread is processing barriers
1321 * we have to queue this io for later.
1322 */
1323 if (unlikely(test_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags)) ||
1324 unlikely(bio_barrier(bio))) {
1324 unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
1325 up_read(&md->io_lock);
1326
1327 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) &&
1328 bio_rw(bio) == READA) {
1329 bio_io_error(bio);
1330 return 0;
1331 }
1332

--- 6 unchanged lines hidden (view full) ---

1339 up_read(&md->io_lock);
1340 return 0;
1341}
1342
1343static int dm_make_request(struct request_queue *q, struct bio *bio)
1344{
1345 struct mapped_device *md = q->queuedata;
1346
1325 up_read(&md->io_lock);
1326
1327 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) &&
1328 bio_rw(bio) == READA) {
1329 bio_io_error(bio);
1330 return 0;
1331 }
1332

--- 6 unchanged lines hidden (view full) ---

1339 up_read(&md->io_lock);
1340 return 0;
1341}
1342
1343static int dm_make_request(struct request_queue *q, struct bio *bio)
1344{
1345 struct mapped_device *md = q->queuedata;
1346
1347 if (unlikely(bio_barrier(bio))) {
1347 if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) {
1348 bio_endio(bio, -EOPNOTSUPP);
1349 return 0;
1350 }
1351
1352 return md->saved_make_request_fn(q, bio); /* call __make_request() */
1353}
1354
1355static int dm_request_based(struct mapped_device *md)

--- 803 unchanged lines hidden (view full) ---

2159 break;
2160 }
2161
2162 up_write(&md->io_lock);
2163
2164 if (dm_request_based(md))
2165 generic_make_request(c);
2166 else {
1348 bio_endio(bio, -EOPNOTSUPP);
1349 return 0;
1350 }
1351
1352 return md->saved_make_request_fn(q, bio); /* call __make_request() */
1353}
1354
1355static int dm_request_based(struct mapped_device *md)

--- 803 unchanged lines hidden (view full) ---

2159 break;
2160 }
2161
2162 up_write(&md->io_lock);
2163
2164 if (dm_request_based(md))
2165 generic_make_request(c);
2166 else {
2167 if (bio_barrier(c))
2167 if (bio_rw_flagged(c, BIO_RW_BARRIER))
2168 process_barrier(md, c);
2169 else
2170 __split_and_process_bio(md, c);
2171 }
2172
2173 down_write(&md->io_lock);
2174 }
2175

--- 507 unchanged lines hidden ---
2168 process_barrier(md, c);
2169 else
2170 __split_and_process_bio(md, c);
2171 }
2172
2173 down_write(&md->io_lock);
2174 }
2175

--- 507 unchanged lines hidden ---