dm-io.c (f790c0ca6fad60b544bb73eda8cc841a6436725b) | dm-io.c (7b6d91daee5cac6402186ff224c3af39d79f4a0e) |
---|---|
1/* 2 * Copyright (C) 2003 Sistina Software 3 * Copyright (C) 2006 Red Hat GmbH 4 * 5 * This file is released under the GPL. 6 */ 7 8#include "dm.h" --- 342 unchanged lines hidden (view full) --- 351 struct io *io, int sync) 352{ 353 int i; 354 struct dpages old_pages = *dp; 355 356 BUG_ON(num_regions > DM_IO_MAX_REGIONS); 357 358 if (sync) | 1/* 2 * Copyright (C) 2003 Sistina Software 3 * Copyright (C) 2006 Red Hat GmbH 4 * 5 * This file is released under the GPL. 6 */ 7 8#include "dm.h" --- 342 unchanged lines hidden (view full) --- 351 struct io *io, int sync) 352{ 353 int i; 354 struct dpages old_pages = *dp; 355 356 BUG_ON(num_regions > DM_IO_MAX_REGIONS); 357 358 if (sync) |
359 rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG); | 359 rw |= REQ_SYNC | REQ_UNPLUG; |
360 361 /* 362 * For multiple regions we need to be careful to rewind 363 * the dp object for each call to do_region. 364 */ 365 for (i = 0; i < num_regions; i++) { 366 *dp = old_pages; | 360 361 /* 362 * For multiple regions we need to be careful to rewind 363 * the dp object for each call to do_region. 364 */ 365 for (i = 0; i < num_regions; i++) { 366 *dp = old_pages; |
367 if (where[i].count || (rw & (1 << BIO_RW_BARRIER))) | 367 if (where[i].count || (rw & REQ_HARDBARRIER)) |
368 do_region(rw, i, where + i, dp, io); 369 } 370 371 /* 372 * Drop the extra reference that we were holding to avoid 373 * the io being completed too early. 374 */ 375 dec_count(io, 0, 0); --- 31 unchanged lines hidden (view full) --- 407 408 if (!atomic_read(&io->count)) 409 break; 410 411 io_schedule(); 412 } 413 set_current_state(TASK_RUNNING); 414 | 368 do_region(rw, i, where + i, dp, io); 369 } 370 371 /* 372 * Drop the extra reference that we were holding to avoid 373 * the io being completed too early. 374 */ 375 dec_count(io, 0, 0); --- 31 unchanged lines hidden (view full) --- 407 408 if (!atomic_read(&io->count)) 409 break; 410 411 io_schedule(); 412 } 413 set_current_state(TASK_RUNNING); 414 |
415 if (io->eopnotsupp_bits && (rw & (1 << BIO_RW_BARRIER))) { 416 rw &= ~(1 << BIO_RW_BARRIER); | 415 if (io->eopnotsupp_bits && (rw & REQ_HARDBARRIER)) { 416 rw &= ~REQ_HARDBARRIER; |
417 goto retry; 418 } 419 420 if (error_bits) 421 *error_bits = io->error_bits; 422 423 return io->error_bits ? -EIO : 0; 424} --- 49 unchanged lines hidden (view full) --- 474 475 return 0; 476} 477 478/* 479 * New collapsed (a)synchronous interface. 480 * 481 * If the IO is asynchronous (i.e. it has notify.fn), you must either unplug | 417 goto retry; 418 } 419 420 if (error_bits) 421 *error_bits = io->error_bits; 422 423 return io->error_bits ? -EIO : 0; 424} --- 49 unchanged lines hidden (view full) --- 474 475 return 0; 476} 477 478/* 479 * New collapsed (a)synchronous interface. 480 * 481 * If the IO is asynchronous (i.e. it has notify.fn), you must either unplug |
482 * the queue with blk_unplug() some time later or set the BIO_RW_SYNC bit in 483 * io_req->bi_rw. If you fail to do one of these, the IO will be submitted to | 482 * the queue with blk_unplug() some time later or set REQ_SYNC in 483io_req->bi_rw. If you fail to do one of these, the IO will be submitted to |
484 * the disk after q->unplug_delay, which defaults to 3ms in blk-settings.c. 485 */ 486int dm_io(struct dm_io_request *io_req, unsigned num_regions, 487 struct dm_io_region *where, unsigned long *sync_error_bits) 488{ 489 int r; 490 struct dpages dp; 491 --- 27 unchanged lines hidden --- | 484 * the disk after q->unplug_delay, which defaults to 3ms in blk-settings.c. 485 */ 486int dm_io(struct dm_io_request *io_req, unsigned num_regions, 487 struct dm_io_region *where, unsigned long *sync_error_bits) 488{ 489 int r; 490 struct dpages dp; 491 --- 27 unchanged lines hidden --- |