1#!/usr/bin/env python3 2# group: rw backing 3# 4# Tests for incremental drive-backup 5# 6# Copyright (C) 2015 John Snow for Red Hat, Inc. 7# 8# Based on 056. 9# 10# This program is free software; you can redistribute it and/or modify 11# it under the terms of the GNU General Public License as published by 12# the Free Software Foundation; either version 2 of the License, or 13# (at your option) any later version. 14# 15# This program is distributed in the hope that it will be useful, 16# but WITHOUT ANY WARRANTY; without even the implied warranty of 17# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18# GNU General Public License for more details. 19# 20# You should have received a copy of the GNU General Public License 21# along with this program. If not, see <http://www.gnu.org/licenses/>. 22# 23 24import os 25import iotests 26 27 28def io_write_patterns(img, patterns): 29 for pattern in patterns: 30 iotests.qemu_io('-c', 'write -P%s %s %s' % pattern, img) 31 32 33def try_remove(img): 34 try: 35 os.remove(img) 36 except OSError: 37 pass 38 39 40def transaction_action(action, **kwargs): 41 return { 42 'type': action, 43 'data': dict((k.replace('_', '-'), v) for k, v in kwargs.items()) 44 } 45 46 47def transaction_bitmap_clear(node, name, **kwargs): 48 return transaction_action('block-dirty-bitmap-clear', 49 node=node, name=name, **kwargs) 50 51 52def transaction_drive_backup(device, target, **kwargs): 53 return transaction_action('drive-backup', job_id=device, device=device, 54 target=target, **kwargs) 55 56 57class Bitmap: 58 def __init__(self, name, drive): 59 self.name = name 60 self.drive = drive 61 self.num = 0 62 self.backups = list() 63 64 def base_target(self): 65 return (self.drive['backup'], None) 66 67 def new_target(self, num=None): 68 if num is None: 69 num = self.num 70 self.num = num + 1 71 base = os.path.join(iotests.test_dir, 72 "%s.%s." % (self.drive['id'], self.name)) 73 suff = "%i.%s" % (num, self.drive['fmt']) 74 target = base + "inc" + suff 75 reference = base + "ref" + suff 76 self.backups.append((target, reference)) 77 return (target, reference) 78 79 def last_target(self): 80 if self.backups: 81 return self.backups[-1] 82 return self.base_target() 83 84 def del_target(self): 85 for image in self.backups.pop(): 86 try_remove(image) 87 self.num -= 1 88 89 def cleanup(self): 90 for backup in self.backups: 91 for image in backup: 92 try_remove(image) 93 94 95class TestIncrementalBackupBase(iotests.QMPTestCase): 96 def __init__(self, *args): 97 super(TestIncrementalBackupBase, self).__init__(*args) 98 self.bitmaps = list() 99 self.files = list() 100 self.drives = list() 101 self.vm = iotests.VM() 102 self.err_img = os.path.join(iotests.test_dir, 'err.%s' % iotests.imgfmt) 103 104 105 def setUp(self): 106 # Create a base image with a distinctive patterning 107 drive0 = self.add_node('drive0') 108 self.img_create(drive0['file'], drive0['fmt']) 109 self.vm.add_drive(drive0['file'], opts='node-name=node0') 110 self.write_default_pattern(drive0['file']) 111 self.vm.launch() 112 113 114 def write_default_pattern(self, target): 115 io_write_patterns(target, (('0x41', 0, 512), 116 ('0xd5', '1M', '32k'), 117 ('0xdc', '32M', '124k'))) 118 119 120 def add_node(self, node_id, fmt=iotests.imgfmt, path=None, backup=None): 121 if path is None: 122 path = os.path.join(iotests.test_dir, '%s.%s' % (node_id, fmt)) 123 if backup is None: 124 backup = os.path.join(iotests.test_dir, 125 '%s.full.backup.%s' % (node_id, fmt)) 126 127 self.drives.append({ 128 'id': node_id, 129 'file': path, 130 'backup': backup, 131 'fmt': fmt }) 132 return self.drives[-1] 133 134 135 def img_create(self, img, fmt=iotests.imgfmt, size='64M', 136 parent=None, parentFormat=None, **kwargs): 137 optargs = [] 138 for k,v in kwargs.items(): 139 optargs = optargs + ['-o', '%s=%s' % (k,v)] 140 args = ['create', '-f', fmt] + optargs + [img, size] 141 if parent: 142 if parentFormat is None: 143 parentFormat = fmt 144 args = args + ['-b', parent, '-F', parentFormat] 145 iotests.qemu_img(*args) 146 self.files.append(img) 147 148 149 def do_qmp_backup(self, error='Input/output error', **kwargs): 150 res = self.vm.qmp('drive-backup', **kwargs) 151 self.assert_qmp(res, 'return', {}) 152 return self.wait_qmp_backup(kwargs['device'], error) 153 154 155 def ignore_job_status_change_events(self): 156 while True: 157 e = self.vm.event_wait(name="JOB_STATUS_CHANGE") 158 if e['data']['status'] == 'null': 159 break 160 161 def wait_qmp_backup(self, device, error='Input/output error'): 162 event = self.vm.event_wait(name="BLOCK_JOB_COMPLETED", 163 match={'data': {'device': device}}) 164 self.assertNotEqual(event, None) 165 self.ignore_job_status_change_events() 166 167 try: 168 failure = self.dictpath(event, 'data/error') 169 except AssertionError: 170 # Backup succeeded. 171 self.assert_qmp(event, 'data/offset', event['data']['len']) 172 return True 173 else: 174 # Backup failed. 175 self.assert_qmp(event, 'data/error', error) 176 return False 177 178 179 def wait_qmp_backup_cancelled(self, device): 180 event = self.vm.event_wait(name='BLOCK_JOB_CANCELLED', 181 match={'data': {'device': device}}) 182 self.assertNotEqual(event, None) 183 self.ignore_job_status_change_events() 184 185 186 def create_anchor_backup(self, drive=None): 187 if drive is None: 188 drive = self.drives[-1] 189 res = self.do_qmp_backup(job_id=drive['id'], 190 device=drive['id'], sync='full', 191 format=drive['fmt'], target=drive['backup']) 192 self.assertTrue(res) 193 self.files.append(drive['backup']) 194 return drive['backup'] 195 196 197 def make_reference_backup(self, bitmap=None): 198 if bitmap is None: 199 bitmap = self.bitmaps[-1] 200 _, reference = bitmap.last_target() 201 res = self.do_qmp_backup(job_id=bitmap.drive['id'], 202 device=bitmap.drive['id'], sync='full', 203 format=bitmap.drive['fmt'], target=reference) 204 self.assertTrue(res) 205 206 207 def add_bitmap(self, name, drive, **kwargs): 208 bitmap = Bitmap(name, drive) 209 self.bitmaps.append(bitmap) 210 result = self.vm.qmp('block-dirty-bitmap-add', node=drive['id'], 211 name=bitmap.name, **kwargs) 212 self.assert_qmp(result, 'return', {}) 213 return bitmap 214 215 216 def prepare_backup(self, bitmap=None, parent=None, **kwargs): 217 if bitmap is None: 218 bitmap = self.bitmaps[-1] 219 if parent is None: 220 parent, _ = bitmap.last_target() 221 222 target, _ = bitmap.new_target() 223 self.img_create(target, bitmap.drive['fmt'], parent=parent, 224 **kwargs) 225 return target 226 227 228 def create_incremental(self, bitmap=None, parent=None, 229 parentFormat=None, validate=True, 230 target=None): 231 if bitmap is None: 232 bitmap = self.bitmaps[-1] 233 if parent is None: 234 parent, _ = bitmap.last_target() 235 236 if target is None: 237 target = self.prepare_backup(bitmap, parent) 238 res = self.do_qmp_backup(job_id=bitmap.drive['id'], 239 device=bitmap.drive['id'], 240 sync='incremental', bitmap=bitmap.name, 241 format=bitmap.drive['fmt'], target=target, 242 mode='existing') 243 if not res: 244 bitmap.del_target(); 245 self.assertFalse(validate) 246 else: 247 self.make_reference_backup(bitmap) 248 return res 249 250 251 def check_backups(self): 252 for bitmap in self.bitmaps: 253 for incremental, reference in bitmap.backups: 254 self.assertTrue(iotests.compare_images(incremental, reference)) 255 last = bitmap.last_target()[0] 256 self.assertTrue(iotests.compare_images(last, bitmap.drive['file'])) 257 258 259 def hmp_io_writes(self, drive, patterns): 260 for pattern in patterns: 261 self.vm.hmp_qemu_io(drive, 'write -P%s %s %s' % pattern) 262 self.vm.hmp_qemu_io(drive, 'flush') 263 264 265 def do_incremental_simple(self, **kwargs): 266 self.create_anchor_backup() 267 self.add_bitmap('bitmap0', self.drives[0], **kwargs) 268 269 # Sanity: Create a "hollow" incremental backup 270 self.create_incremental() 271 # Three writes: One complete overwrite, one new segment, 272 # and one partial overlap. 273 self.hmp_io_writes(self.drives[0]['id'], (('0xab', 0, 512), 274 ('0xfe', '16M', '256k'), 275 ('0x64', '32736k', '64k'))) 276 self.create_incremental() 277 # Three more writes, one of each kind, like above 278 self.hmp_io_writes(self.drives[0]['id'], (('0x9a', 0, 512), 279 ('0x55', '8M', '352k'), 280 ('0x78', '15872k', '1M'))) 281 self.create_incremental() 282 self.vm.shutdown() 283 self.check_backups() 284 285 286 def tearDown(self): 287 self.vm.shutdown() 288 for bitmap in self.bitmaps: 289 bitmap.cleanup() 290 for filename in self.files: 291 try_remove(filename) 292 293 294 295class TestIncrementalBackup(TestIncrementalBackupBase): 296 def test_incremental_simple(self): 297 ''' 298 Test: Create and verify three incremental backups. 299 300 Create a bitmap and a full backup before VM execution begins, 301 then create a series of three incremental backups "during execution," 302 i.e.; after IO requests begin modifying the drive. 303 ''' 304 return self.do_incremental_simple() 305 306 307 def test_small_granularity(self): 308 ''' 309 Test: Create and verify backups made with a small granularity bitmap. 310 311 Perform the same test as test_incremental_simple, but with a granularity 312 of only 32KiB instead of the present default of 64KiB. 313 ''' 314 return self.do_incremental_simple(granularity=32768) 315 316 317 def test_large_granularity(self): 318 ''' 319 Test: Create and verify backups made with a large granularity bitmap. 320 321 Perform the same test as test_incremental_simple, but with a granularity 322 of 128KiB instead of the present default of 64KiB. 323 ''' 324 return self.do_incremental_simple(granularity=131072) 325 326 327 def test_larger_cluster_target(self): 328 ''' 329 Test: Create and verify backups made to a larger cluster size target. 330 331 With a default granularity of 64KiB, verify that backups made to a 332 larger cluster size target of 128KiB without a backing file works. 333 ''' 334 drive0 = self.drives[0] 335 336 # Create a cluster_size=128k full backup / "anchor" backup 337 self.img_create(drive0['backup'], cluster_size='128k') 338 self.assertTrue(self.do_qmp_backup(device=drive0['id'], sync='full', 339 format=drive0['fmt'], 340 target=drive0['backup'], 341 mode='existing')) 342 343 # Create bitmap and dirty it with some new writes. 344 # overwrite [32736, 32799] which will dirty bitmap clusters at 345 # 32M-64K and 32M. 32M+64K will be left undirtied. 346 bitmap0 = self.add_bitmap('bitmap0', drive0) 347 self.hmp_io_writes(drive0['id'], 348 (('0xab', 0, 512), 349 ('0xfe', '16M', '256k'), 350 ('0x64', '32736k', '64k'))) 351 # Check the dirty bitmap stats 352 self.assertTrue(self.vm.check_bitmap_status( 353 'node0', bitmap0.name, { 354 'name': 'bitmap0', 355 'count': 458752, 356 'granularity': 65536, 357 'status': 'active', 358 'persistent': False 359 })) 360 361 # Prepare a cluster_size=128k backup target without a backing file. 362 (target, _) = bitmap0.new_target() 363 self.img_create(target, bitmap0.drive['fmt'], cluster_size='128k') 364 365 # Perform Incremental Backup 366 self.assertTrue(self.do_qmp_backup(device=bitmap0.drive['id'], 367 sync='incremental', 368 bitmap=bitmap0.name, 369 format=bitmap0.drive['fmt'], 370 target=target, 371 mode='existing')) 372 self.make_reference_backup(bitmap0) 373 374 # Add the backing file, then compare and exit. 375 iotests.qemu_img('rebase', '-f', drive0['fmt'], '-u', '-b', 376 drive0['backup'], '-F', drive0['fmt'], target) 377 self.vm.shutdown() 378 self.check_backups() 379 380 381 def test_incremental_transaction(self): 382 '''Test: Verify backups made from transactionally created bitmaps. 383 384 Create a bitmap "before" VM execution begins, then create a second 385 bitmap AFTER writes have already occurred. Use transactions to create 386 a full backup and synchronize both bitmaps to this backup. 387 Create an incremental backup through both bitmaps and verify that 388 both backups match the current drive0 image. 389 ''' 390 391 drive0 = self.drives[0] 392 bitmap0 = self.add_bitmap('bitmap0', drive0) 393 self.hmp_io_writes(drive0['id'], (('0xab', 0, 512), 394 ('0xfe', '16M', '256k'), 395 ('0x64', '32736k', '64k'))) 396 bitmap1 = self.add_bitmap('bitmap1', drive0) 397 398 result = self.vm.qmp('transaction', actions=[ 399 transaction_bitmap_clear(bitmap0.drive['id'], bitmap0.name), 400 transaction_bitmap_clear(bitmap1.drive['id'], bitmap1.name), 401 transaction_drive_backup(drive0['id'], drive0['backup'], 402 sync='full', format=drive0['fmt']) 403 ]) 404 self.assert_qmp(result, 'return', {}) 405 self.wait_until_completed(drive0['id']) 406 self.files.append(drive0['backup']) 407 408 self.hmp_io_writes(drive0['id'], (('0x9a', 0, 512), 409 ('0x55', '8M', '352k'), 410 ('0x78', '15872k', '1M'))) 411 # Both bitmaps should be correctly in sync. 412 self.create_incremental(bitmap0) 413 self.create_incremental(bitmap1) 414 self.vm.shutdown() 415 self.check_backups() 416 417 418 def do_transaction_failure_test(self, race=False): 419 # Create a second drive, with pattern: 420 drive1 = self.add_node('drive1') 421 self.img_create(drive1['file'], drive1['fmt']) 422 io_write_patterns(drive1['file'], (('0x14', 0, 512), 423 ('0x5d', '1M', '32k'), 424 ('0xcd', '32M', '124k'))) 425 426 # Create a blkdebug interface to this img as 'drive1' 427 result = self.vm.qmp('blockdev-add', 428 node_name=drive1['id'], 429 driver=drive1['fmt'], 430 file={ 431 'driver': 'blkdebug', 432 'image': { 433 'driver': 'file', 434 'filename': drive1['file'] 435 }, 436 'set-state': [{ 437 'event': 'flush_to_disk', 438 'state': 1, 439 'new_state': 2 440 }], 441 'inject-error': [{ 442 'event': 'read_aio', 443 'errno': 5, 444 'state': 2, 445 'immediately': False, 446 'once': True 447 }], 448 } 449 ) 450 self.assert_qmp(result, 'return', {}) 451 452 # Create bitmaps and full backups for both drives 453 drive0 = self.drives[0] 454 dr0bm0 = self.add_bitmap('bitmap0', drive0) 455 dr1bm0 = self.add_bitmap('bitmap0', drive1) 456 self.create_anchor_backup(drive0) 457 self.create_anchor_backup(drive1) 458 self.assert_no_active_block_jobs() 459 self.assertFalse(self.vm.get_qmp_events(wait=False)) 460 461 # Emulate some writes 462 if not race: 463 self.hmp_io_writes(drive0['id'], (('0xab', 0, 512), 464 ('0xfe', '16M', '256k'), 465 ('0x64', '32736k', '64k'))) 466 self.hmp_io_writes(drive1['id'], (('0xba', 0, 512), 467 ('0xef', '16M', '256k'), 468 ('0x46', '32736k', '64k'))) 469 470 # Create incremental backup targets 471 target0 = self.prepare_backup(dr0bm0) 472 target1 = self.prepare_backup(dr1bm0) 473 474 # Ask for a new incremental backup per-each drive, 475 # expecting drive1's backup to fail. In the 'race' test, 476 # we expect drive1 to attempt to cancel the empty drive0 job. 477 transaction = [ 478 transaction_drive_backup(drive0['id'], target0, sync='incremental', 479 format=drive0['fmt'], mode='existing', 480 bitmap=dr0bm0.name), 481 transaction_drive_backup(drive1['id'], target1, sync='incremental', 482 format=drive1['fmt'], mode='existing', 483 bitmap=dr1bm0.name) 484 ] 485 result = self.vm.qmp('transaction', actions=transaction, 486 properties={'completion-mode': 'grouped'} ) 487 self.assert_qmp(result, 'return', {}) 488 489 # Observe that drive0's backup is cancelled and drive1 completes with 490 # an error. 491 self.wait_qmp_backup_cancelled(drive0['id']) 492 self.assertFalse(self.wait_qmp_backup(drive1['id'])) 493 error = self.vm.event_wait('BLOCK_JOB_ERROR') 494 self.assert_qmp(error, 'data', {'device': drive1['id'], 495 'action': 'report', 496 'operation': 'read'}) 497 self.assertFalse(self.vm.get_qmp_events(wait=False)) 498 self.assert_no_active_block_jobs() 499 500 # Delete drive0's successful target and eliminate our record of the 501 # unsuccessful drive1 target. 502 dr0bm0.del_target() 503 dr1bm0.del_target() 504 if race: 505 # Don't re-run the transaction, we only wanted to test the race. 506 self.vm.shutdown() 507 return 508 509 # Re-run the same transaction: 510 target0 = self.prepare_backup(dr0bm0) 511 target1 = self.prepare_backup(dr1bm0) 512 513 # Re-run the exact same transaction. 514 result = self.vm.qmp('transaction', actions=transaction, 515 properties={'completion-mode':'grouped'}) 516 self.assert_qmp(result, 'return', {}) 517 518 # Both should complete successfully this time. 519 self.assertTrue(self.wait_qmp_backup(drive0['id'])) 520 self.assertTrue(self.wait_qmp_backup(drive1['id'])) 521 self.make_reference_backup(dr0bm0) 522 self.make_reference_backup(dr1bm0) 523 self.assertFalse(self.vm.get_qmp_events(wait=False)) 524 self.assert_no_active_block_jobs() 525 526 # And the images should of course validate. 527 self.vm.shutdown() 528 self.check_backups() 529 530 def test_transaction_failure(self): 531 '''Test: Verify backups made from a transaction that partially fails. 532 533 Add a second drive with its own unique pattern, and add a bitmap to each 534 drive. Use blkdebug to interfere with the backup on just one drive and 535 attempt to create a coherent incremental backup across both drives. 536 537 verify a failure in one but not both, then delete the failed stubs and 538 re-run the same transaction. 539 540 verify that both incrementals are created successfully. 541 ''' 542 self.do_transaction_failure_test() 543 544 def test_transaction_failure_race(self): 545 '''Test: Verify that transactions with jobs that have no data to 546 transfer do not cause race conditions in the cancellation of the entire 547 transaction job group. 548 ''' 549 self.do_transaction_failure_test(race=True) 550 551 552 def test_sync_dirty_bitmap_missing(self): 553 self.assert_no_active_block_jobs() 554 self.files.append(self.err_img) 555 result = self.vm.qmp('drive-backup', device=self.drives[0]['id'], 556 sync='incremental', format=self.drives[0]['fmt'], 557 target=self.err_img) 558 self.assert_qmp(result, 'error/class', 'GenericError') 559 560 561 def test_sync_dirty_bitmap_not_found(self): 562 self.assert_no_active_block_jobs() 563 self.files.append(self.err_img) 564 result = self.vm.qmp('drive-backup', device=self.drives[0]['id'], 565 sync='incremental', bitmap='unknown', 566 format=self.drives[0]['fmt'], target=self.err_img) 567 self.assert_qmp(result, 'error/class', 'GenericError') 568 569 570 def test_sync_dirty_bitmap_bad_granularity(self): 571 ''' 572 Test: Test what happens if we provide an improper granularity. 573 574 The granularity must always be a power of 2. 575 ''' 576 self.assert_no_active_block_jobs() 577 self.assertRaises(AssertionError, self.add_bitmap, 578 'bitmap0', self.drives[0], 579 granularity=64000) 580 581 def test_growing_before_backup(self): 582 ''' 583 Test: Add a bitmap, truncate the image, write past the old 584 end, do a backup. 585 586 Incremental backup should not ignore dirty bits past the old 587 image end. 588 ''' 589 self.assert_no_active_block_jobs() 590 591 self.create_anchor_backup() 592 593 self.add_bitmap('bitmap0', self.drives[0]) 594 595 res = self.vm.qmp('block_resize', device=self.drives[0]['id'], 596 size=(65 * 1048576)) 597 self.assert_qmp(res, 'return', {}) 598 599 # Dirty the image past the old end 600 self.vm.hmp_qemu_io(self.drives[0]['id'], 'write 64M 64k') 601 602 target = self.prepare_backup(size='65M') 603 self.create_incremental(target=target) 604 605 self.vm.shutdown() 606 self.check_backups() 607 608 609class TestIncrementalBackupBlkdebug(TestIncrementalBackupBase): 610 '''Incremental backup tests that utilize a BlkDebug filter on drive0.''' 611 612 def setUp(self): 613 drive0 = self.add_node('drive0') 614 self.img_create(drive0['file'], drive0['fmt']) 615 self.write_default_pattern(drive0['file']) 616 self.vm.launch() 617 618 def test_incremental_failure(self): 619 '''Test: Verify backups made after a failure are correct. 620 621 Simulate a failure during an incremental backup block job, 622 emulate additional writes, then create another incremental backup 623 afterwards and verify that the backup created is correct. 624 ''' 625 626 drive0 = self.drives[0] 627 result = self.vm.qmp('blockdev-add', 628 node_name=drive0['id'], 629 driver=drive0['fmt'], 630 file={ 631 'driver': 'blkdebug', 632 'image': { 633 'driver': 'file', 634 'filename': drive0['file'] 635 }, 636 'set-state': [{ 637 'event': 'flush_to_disk', 638 'state': 1, 639 'new_state': 2 640 }], 641 'inject-error': [{ 642 'event': 'read_aio', 643 'errno': 5, 644 'state': 2, 645 'immediately': False, 646 'once': True 647 }], 648 } 649 ) 650 self.assert_qmp(result, 'return', {}) 651 652 self.create_anchor_backup(drive0) 653 self.add_bitmap('bitmap0', drive0) 654 # Note: at this point, during a normal execution, 655 # Assume that the VM resumes and begins issuing IO requests here. 656 657 self.hmp_io_writes(drive0['id'], (('0xab', 0, 512), 658 ('0xfe', '16M', '256k'), 659 ('0x64', '32736k', '64k'))) 660 661 result = self.create_incremental(validate=False) 662 self.assertFalse(result) 663 self.hmp_io_writes(drive0['id'], (('0x9a', 0, 512), 664 ('0x55', '8M', '352k'), 665 ('0x78', '15872k', '1M'))) 666 self.create_incremental() 667 self.vm.shutdown() 668 self.check_backups() 669 670 def test_incremental_pause(self): 671 """ 672 Test an incremental backup that errors into a pause and is resumed. 673 """ 674 675 drive0 = self.drives[0] 676 # NB: The blkdebug script here looks for a "flush, read" pattern. 677 # The flush occurs in hmp_io_writes, and the read during the block job. 678 result = self.vm.qmp('blockdev-add', 679 node_name=drive0['id'], 680 driver=drive0['fmt'], 681 file={ 682 'driver': 'blkdebug', 683 'image': { 684 'driver': 'file', 685 'filename': drive0['file'] 686 }, 687 'set-state': [{ 688 'event': 'flush_to_disk', 689 'state': 1, 690 'new_state': 2 691 }], 692 'inject-error': [{ 693 'event': 'read_aio', 694 'errno': 5, 695 'state': 2, 696 'immediately': False, 697 'once': True 698 }], 699 }) 700 self.assert_qmp(result, 'return', {}) 701 self.create_anchor_backup(drive0) 702 bitmap = self.add_bitmap('bitmap0', drive0) 703 704 # Emulate guest activity 705 self.hmp_io_writes(drive0['id'], (('0xab', 0, 512), 706 ('0xfe', '16M', '256k'), 707 ('0x64', '32736k', '64k'))) 708 709 # Bitmap Status Check 710 self.assertTrue(self.vm.check_bitmap_status( 711 drive0['id'], bitmap.name, { 712 'count': 458752, 713 'granularity': 65536, 714 'status': 'active', 715 'busy': False, 716 'recording': True 717 })) 718 719 # Start backup 720 parent, _ = bitmap.last_target() 721 target = self.prepare_backup(bitmap, parent) 722 res = self.vm.qmp('drive-backup', 723 job_id=bitmap.drive['id'], 724 device=bitmap.drive['id'], 725 sync='incremental', 726 bitmap=bitmap.name, 727 format=bitmap.drive['fmt'], 728 target=target, 729 mode='existing', 730 on_source_error='stop') 731 self.assert_qmp(res, 'return', {}) 732 733 # Wait for the error 734 event = self.vm.event_wait(name="BLOCK_JOB_ERROR", 735 match={"data":{"device":bitmap.drive['id']}}) 736 self.assert_qmp(event, 'data', {'device': bitmap.drive['id'], 737 'action': 'stop', 738 'operation': 'read'}) 739 740 # Bitmap Status Check 741 self.assertTrue(self.vm.check_bitmap_status( 742 drive0['id'], bitmap.name, { 743 'count': 458752, 744 'granularity': 65536, 745 'status': 'frozen', 746 'busy': True, 747 'recording': True 748 })) 749 750 # Resume and check incremental backup for consistency 751 res = self.vm.qmp('block-job-resume', device=bitmap.drive['id']) 752 self.assert_qmp(res, 'return', {}) 753 self.wait_qmp_backup(bitmap.drive['id']) 754 755 # Bitmap Status Check 756 self.assertTrue(self.vm.check_bitmap_status( 757 drive0['id'], bitmap.name, { 758 'count': 0, 759 'granularity': 65536, 760 'status': 'active', 761 'busy': False, 762 'recording': True 763 })) 764 765 # Finalize / Cleanup 766 self.make_reference_backup(bitmap) 767 self.vm.shutdown() 768 self.check_backups() 769 770 771if __name__ == '__main__': 772 iotests.main(supported_fmts=['qcow2'], 773 supported_protocols=['file']) 774