1#!/usr/bin/env python 2# 3# Tests for incremental drive-backup 4# 5# Copyright (C) 2015 John Snow for Red Hat, Inc. 6# 7# Based on 056. 8# 9# This program is free software; you can redistribute it and/or modify 10# it under the terms of the GNU General Public License as published by 11# the Free Software Foundation; either version 2 of the License, or 12# (at your option) any later version. 13# 14# This program is distributed in the hope that it will be useful, 15# but WITHOUT ANY WARRANTY; without even the implied warranty of 16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17# GNU General Public License for more details. 18# 19# You should have received a copy of the GNU General Public License 20# along with this program. If not, see <http://www.gnu.org/licenses/>. 21# 22 23import os 24import iotests 25 26 27def io_write_patterns(img, patterns): 28 for pattern in patterns: 29 iotests.qemu_io('-c', 'write -P%s %s %s' % pattern, img) 30 31 32def try_remove(img): 33 try: 34 os.remove(img) 35 except OSError: 36 pass 37 38 39def transaction_action(action, **kwargs): 40 return { 41 'type': action, 42 'data': dict((k.replace('_', '-'), v) for k, v in kwargs.iteritems()) 43 } 44 45 46def transaction_bitmap_clear(node, name, **kwargs): 47 return transaction_action('block-dirty-bitmap-clear', 48 node=node, name=name, **kwargs) 49 50 51def transaction_drive_backup(device, target, **kwargs): 52 return transaction_action('drive-backup', job_id=device, device=device, 53 target=target, **kwargs) 54 55 56class Bitmap: 57 def __init__(self, name, drive): 58 self.name = name 59 self.drive = drive 60 self.num = 0 61 self.backups = list() 62 63 def base_target(self): 64 return (self.drive['backup'], None) 65 66 def new_target(self, num=None): 67 if num is None: 68 num = self.num 69 self.num = num + 1 70 base = os.path.join(iotests.test_dir, 71 "%s.%s." % (self.drive['id'], self.name)) 72 suff = "%i.%s" % (num, self.drive['fmt']) 73 target = base + "inc" + suff 74 reference = base + "ref" + suff 75 self.backups.append((target, reference)) 76 return (target, reference) 77 78 def last_target(self): 79 if self.backups: 80 return self.backups[-1] 81 return self.base_target() 82 83 def del_target(self): 84 for image in self.backups.pop(): 85 try_remove(image) 86 self.num -= 1 87 88 def cleanup(self): 89 for backup in self.backups: 90 for image in backup: 91 try_remove(image) 92 93 94class TestIncrementalBackupBase(iotests.QMPTestCase): 95 def __init__(self, *args): 96 super(TestIncrementalBackupBase, self).__init__(*args) 97 self.bitmaps = list() 98 self.files = list() 99 self.drives = list() 100 self.vm = iotests.VM() 101 self.err_img = os.path.join(iotests.test_dir, 'err.%s' % iotests.imgfmt) 102 103 104 def setUp(self): 105 # Create a base image with a distinctive patterning 106 drive0 = self.add_node('drive0') 107 self.img_create(drive0['file'], drive0['fmt']) 108 self.vm.add_drive(drive0['file']) 109 self.write_default_pattern(drive0['file']) 110 self.vm.launch() 111 112 113 def write_default_pattern(self, target): 114 io_write_patterns(target, (('0x41', 0, 512), 115 ('0xd5', '1M', '32k'), 116 ('0xdc', '32M', '124k'))) 117 118 119 def add_node(self, node_id, fmt=iotests.imgfmt, path=None, backup=None): 120 if path is None: 121 path = os.path.join(iotests.test_dir, '%s.%s' % (node_id, fmt)) 122 if backup is None: 123 backup = os.path.join(iotests.test_dir, 124 '%s.full.backup.%s' % (node_id, fmt)) 125 126 self.drives.append({ 127 'id': node_id, 128 'file': path, 129 'backup': backup, 130 'fmt': fmt }) 131 return self.drives[-1] 132 133 134 def img_create(self, img, fmt=iotests.imgfmt, size='64M', 135 parent=None, parentFormat=None, **kwargs): 136 optargs = [] 137 for k,v in kwargs.iteritems(): 138 optargs = optargs + ['-o', '%s=%s' % (k,v)] 139 args = ['create', '-f', fmt] + optargs + [img, size] 140 if parent: 141 if parentFormat is None: 142 parentFormat = fmt 143 args = args + ['-b', parent, '-F', parentFormat] 144 iotests.qemu_img(*args) 145 self.files.append(img) 146 147 148 def do_qmp_backup(self, error='Input/output error', **kwargs): 149 res = self.vm.qmp('drive-backup', **kwargs) 150 self.assert_qmp(res, 'return', {}) 151 return self.wait_qmp_backup(kwargs['device'], error) 152 153 154 def wait_qmp_backup(self, device, error='Input/output error'): 155 event = self.vm.event_wait(name="BLOCK_JOB_COMPLETED", 156 match={'data': {'device': device}}) 157 self.assertNotEqual(event, None) 158 159 try: 160 failure = self.dictpath(event, 'data/error') 161 except AssertionError: 162 # Backup succeeded. 163 self.assert_qmp(event, 'data/offset', event['data']['len']) 164 return True 165 else: 166 # Backup failed. 167 self.assert_qmp(event, 'data/error', error) 168 return False 169 170 171 def wait_qmp_backup_cancelled(self, device): 172 event = self.vm.event_wait(name='BLOCK_JOB_CANCELLED', 173 match={'data': {'device': device}}) 174 self.assertNotEqual(event, None) 175 176 177 def create_anchor_backup(self, drive=None): 178 if drive is None: 179 drive = self.drives[-1] 180 res = self.do_qmp_backup(job_id=drive['id'], 181 device=drive['id'], sync='full', 182 format=drive['fmt'], target=drive['backup']) 183 self.assertTrue(res) 184 self.files.append(drive['backup']) 185 return drive['backup'] 186 187 188 def make_reference_backup(self, bitmap=None): 189 if bitmap is None: 190 bitmap = self.bitmaps[-1] 191 _, reference = bitmap.last_target() 192 res = self.do_qmp_backup(job_id=bitmap.drive['id'], 193 device=bitmap.drive['id'], sync='full', 194 format=bitmap.drive['fmt'], target=reference) 195 self.assertTrue(res) 196 197 198 def add_bitmap(self, name, drive, **kwargs): 199 bitmap = Bitmap(name, drive) 200 self.bitmaps.append(bitmap) 201 result = self.vm.qmp('block-dirty-bitmap-add', node=drive['id'], 202 name=bitmap.name, **kwargs) 203 self.assert_qmp(result, 'return', {}) 204 return bitmap 205 206 207 def prepare_backup(self, bitmap=None, parent=None): 208 if bitmap is None: 209 bitmap = self.bitmaps[-1] 210 if parent is None: 211 parent, _ = bitmap.last_target() 212 213 target, _ = bitmap.new_target() 214 self.img_create(target, bitmap.drive['fmt'], parent=parent) 215 return target 216 217 218 def create_incremental(self, bitmap=None, parent=None, 219 parentFormat=None, validate=True): 220 if bitmap is None: 221 bitmap = self.bitmaps[-1] 222 if parent is None: 223 parent, _ = bitmap.last_target() 224 225 target = self.prepare_backup(bitmap, parent) 226 res = self.do_qmp_backup(job_id=bitmap.drive['id'], 227 device=bitmap.drive['id'], 228 sync='incremental', bitmap=bitmap.name, 229 format=bitmap.drive['fmt'], target=target, 230 mode='existing') 231 if not res: 232 bitmap.del_target(); 233 self.assertFalse(validate) 234 else: 235 self.make_reference_backup(bitmap) 236 return res 237 238 239 def check_backups(self): 240 for bitmap in self.bitmaps: 241 for incremental, reference in bitmap.backups: 242 self.assertTrue(iotests.compare_images(incremental, reference)) 243 last = bitmap.last_target()[0] 244 self.assertTrue(iotests.compare_images(last, bitmap.drive['file'])) 245 246 247 def hmp_io_writes(self, drive, patterns): 248 for pattern in patterns: 249 self.vm.hmp_qemu_io(drive, 'write -P%s %s %s' % pattern) 250 self.vm.hmp_qemu_io(drive, 'flush') 251 252 253 def do_incremental_simple(self, **kwargs): 254 self.create_anchor_backup() 255 self.add_bitmap('bitmap0', self.drives[0], **kwargs) 256 257 # Sanity: Create a "hollow" incremental backup 258 self.create_incremental() 259 # Three writes: One complete overwrite, one new segment, 260 # and one partial overlap. 261 self.hmp_io_writes(self.drives[0]['id'], (('0xab', 0, 512), 262 ('0xfe', '16M', '256k'), 263 ('0x64', '32736k', '64k'))) 264 self.create_incremental() 265 # Three more writes, one of each kind, like above 266 self.hmp_io_writes(self.drives[0]['id'], (('0x9a', 0, 512), 267 ('0x55', '8M', '352k'), 268 ('0x78', '15872k', '1M'))) 269 self.create_incremental() 270 self.vm.shutdown() 271 self.check_backups() 272 273 274 def tearDown(self): 275 self.vm.shutdown() 276 for bitmap in self.bitmaps: 277 bitmap.cleanup() 278 for filename in self.files: 279 try_remove(filename) 280 281 282 283class TestIncrementalBackup(TestIncrementalBackupBase): 284 def test_incremental_simple(self): 285 ''' 286 Test: Create and verify three incremental backups. 287 288 Create a bitmap and a full backup before VM execution begins, 289 then create a series of three incremental backups "during execution," 290 i.e.; after IO requests begin modifying the drive. 291 ''' 292 return self.do_incremental_simple() 293 294 295 def test_small_granularity(self): 296 ''' 297 Test: Create and verify backups made with a small granularity bitmap. 298 299 Perform the same test as test_incremental_simple, but with a granularity 300 of only 32KiB instead of the present default of 64KiB. 301 ''' 302 return self.do_incremental_simple(granularity=32768) 303 304 305 def test_large_granularity(self): 306 ''' 307 Test: Create and verify backups made with a large granularity bitmap. 308 309 Perform the same test as test_incremental_simple, but with a granularity 310 of 128KiB instead of the present default of 64KiB. 311 ''' 312 return self.do_incremental_simple(granularity=131072) 313 314 315 def test_larger_cluster_target(self): 316 ''' 317 Test: Create and verify backups made to a larger cluster size target. 318 319 With a default granularity of 64KiB, verify that backups made to a 320 larger cluster size target of 128KiB without a backing file works. 321 ''' 322 drive0 = self.drives[0] 323 324 # Create a cluster_size=128k full backup / "anchor" backup 325 self.img_create(drive0['backup'], cluster_size='128k') 326 self.assertTrue(self.do_qmp_backup(device=drive0['id'], sync='full', 327 format=drive0['fmt'], 328 target=drive0['backup'], 329 mode='existing')) 330 331 # Create bitmap and dirty it with some new writes. 332 # overwrite [32736, 32799] which will dirty bitmap clusters at 333 # 32M-64K and 32M. 32M+64K will be left undirtied. 334 bitmap0 = self.add_bitmap('bitmap0', drive0) 335 self.hmp_io_writes(drive0['id'], 336 (('0xab', 0, 512), 337 ('0xfe', '16M', '256k'), 338 ('0x64', '32736k', '64k'))) 339 # Check the dirty bitmap stats 340 result = self.vm.qmp('query-block') 341 self.assert_qmp(result, 'return[0]/dirty-bitmaps[0]/name', 'bitmap0') 342 self.assert_qmp(result, 'return[0]/dirty-bitmaps[0]/count', 458752) 343 self.assert_qmp(result, 'return[0]/dirty-bitmaps[0]/granularity', 65536) 344 self.assert_qmp(result, 'return[0]/dirty-bitmaps[0]/status', 'active') 345 346 # Prepare a cluster_size=128k backup target without a backing file. 347 (target, _) = bitmap0.new_target() 348 self.img_create(target, bitmap0.drive['fmt'], cluster_size='128k') 349 350 # Perform Incremental Backup 351 self.assertTrue(self.do_qmp_backup(device=bitmap0.drive['id'], 352 sync='incremental', 353 bitmap=bitmap0.name, 354 format=bitmap0.drive['fmt'], 355 target=target, 356 mode='existing')) 357 self.make_reference_backup(bitmap0) 358 359 # Add the backing file, then compare and exit. 360 iotests.qemu_img('rebase', '-f', drive0['fmt'], '-u', '-b', 361 drive0['backup'], '-F', drive0['fmt'], target) 362 self.vm.shutdown() 363 self.check_backups() 364 365 366 def test_incremental_transaction(self): 367 '''Test: Verify backups made from transactionally created bitmaps. 368 369 Create a bitmap "before" VM execution begins, then create a second 370 bitmap AFTER writes have already occurred. Use transactions to create 371 a full backup and synchronize both bitmaps to this backup. 372 Create an incremental backup through both bitmaps and verify that 373 both backups match the current drive0 image. 374 ''' 375 376 drive0 = self.drives[0] 377 bitmap0 = self.add_bitmap('bitmap0', drive0) 378 self.hmp_io_writes(drive0['id'], (('0xab', 0, 512), 379 ('0xfe', '16M', '256k'), 380 ('0x64', '32736k', '64k'))) 381 bitmap1 = self.add_bitmap('bitmap1', drive0) 382 383 result = self.vm.qmp('transaction', actions=[ 384 transaction_bitmap_clear(bitmap0.drive['id'], bitmap0.name), 385 transaction_bitmap_clear(bitmap1.drive['id'], bitmap1.name), 386 transaction_drive_backup(drive0['id'], drive0['backup'], 387 sync='full', format=drive0['fmt']) 388 ]) 389 self.assert_qmp(result, 'return', {}) 390 self.wait_until_completed(drive0['id']) 391 self.files.append(drive0['backup']) 392 393 self.hmp_io_writes(drive0['id'], (('0x9a', 0, 512), 394 ('0x55', '8M', '352k'), 395 ('0x78', '15872k', '1M'))) 396 # Both bitmaps should be correctly in sync. 397 self.create_incremental(bitmap0) 398 self.create_incremental(bitmap1) 399 self.vm.shutdown() 400 self.check_backups() 401 402 403 def do_transaction_failure_test(self, race=False): 404 # Create a second drive, with pattern: 405 drive1 = self.add_node('drive1') 406 self.img_create(drive1['file'], drive1['fmt']) 407 io_write_patterns(drive1['file'], (('0x14', 0, 512), 408 ('0x5d', '1M', '32k'), 409 ('0xcd', '32M', '124k'))) 410 411 # Create a blkdebug interface to this img as 'drive1' 412 result = self.vm.qmp('blockdev-add', 413 node_name=drive1['id'], 414 driver=drive1['fmt'], 415 file={ 416 'driver': 'blkdebug', 417 'image': { 418 'driver': 'file', 419 'filename': drive1['file'] 420 }, 421 'set-state': [{ 422 'event': 'flush_to_disk', 423 'state': 1, 424 'new_state': 2 425 }], 426 'inject-error': [{ 427 'event': 'read_aio', 428 'errno': 5, 429 'state': 2, 430 'immediately': False, 431 'once': True 432 }], 433 } 434 ) 435 self.assert_qmp(result, 'return', {}) 436 437 # Create bitmaps and full backups for both drives 438 drive0 = self.drives[0] 439 dr0bm0 = self.add_bitmap('bitmap0', drive0) 440 dr1bm0 = self.add_bitmap('bitmap0', drive1) 441 self.create_anchor_backup(drive0) 442 self.create_anchor_backup(drive1) 443 self.assert_no_active_block_jobs() 444 self.assertFalse(self.vm.get_qmp_events(wait=False)) 445 446 # Emulate some writes 447 if not race: 448 self.hmp_io_writes(drive0['id'], (('0xab', 0, 512), 449 ('0xfe', '16M', '256k'), 450 ('0x64', '32736k', '64k'))) 451 self.hmp_io_writes(drive1['id'], (('0xba', 0, 512), 452 ('0xef', '16M', '256k'), 453 ('0x46', '32736k', '64k'))) 454 455 # Create incremental backup targets 456 target0 = self.prepare_backup(dr0bm0) 457 target1 = self.prepare_backup(dr1bm0) 458 459 # Ask for a new incremental backup per-each drive, 460 # expecting drive1's backup to fail. In the 'race' test, 461 # we expect drive1 to attempt to cancel the empty drive0 job. 462 transaction = [ 463 transaction_drive_backup(drive0['id'], target0, sync='incremental', 464 format=drive0['fmt'], mode='existing', 465 bitmap=dr0bm0.name), 466 transaction_drive_backup(drive1['id'], target1, sync='incremental', 467 format=drive1['fmt'], mode='existing', 468 bitmap=dr1bm0.name) 469 ] 470 result = self.vm.qmp('transaction', actions=transaction, 471 properties={'completion-mode': 'grouped'} ) 472 self.assert_qmp(result, 'return', {}) 473 474 # Observe that drive0's backup is cancelled and drive1 completes with 475 # an error. 476 self.wait_qmp_backup_cancelled(drive0['id']) 477 self.assertFalse(self.wait_qmp_backup(drive1['id'])) 478 error = self.vm.event_wait('BLOCK_JOB_ERROR') 479 self.assert_qmp(error, 'data', {'device': drive1['id'], 480 'action': 'report', 481 'operation': 'read'}) 482 self.assertFalse(self.vm.get_qmp_events(wait=False)) 483 self.assert_no_active_block_jobs() 484 485 # Delete drive0's successful target and eliminate our record of the 486 # unsuccessful drive1 target. 487 dr0bm0.del_target() 488 dr1bm0.del_target() 489 if race: 490 # Don't re-run the transaction, we only wanted to test the race. 491 self.vm.shutdown() 492 return 493 494 # Re-run the same transaction: 495 target0 = self.prepare_backup(dr0bm0) 496 target1 = self.prepare_backup(dr1bm0) 497 498 # Re-run the exact same transaction. 499 result = self.vm.qmp('transaction', actions=transaction, 500 properties={'completion-mode':'grouped'}) 501 self.assert_qmp(result, 'return', {}) 502 503 # Both should complete successfully this time. 504 self.assertTrue(self.wait_qmp_backup(drive0['id'])) 505 self.assertTrue(self.wait_qmp_backup(drive1['id'])) 506 self.make_reference_backup(dr0bm0) 507 self.make_reference_backup(dr1bm0) 508 self.assertFalse(self.vm.get_qmp_events(wait=False)) 509 self.assert_no_active_block_jobs() 510 511 # And the images should of course validate. 512 self.vm.shutdown() 513 self.check_backups() 514 515 def test_transaction_failure(self): 516 '''Test: Verify backups made from a transaction that partially fails. 517 518 Add a second drive with its own unique pattern, and add a bitmap to each 519 drive. Use blkdebug to interfere with the backup on just one drive and 520 attempt to create a coherent incremental backup across both drives. 521 522 verify a failure in one but not both, then delete the failed stubs and 523 re-run the same transaction. 524 525 verify that both incrementals are created successfully. 526 ''' 527 self.do_transaction_failure_test() 528 529 def test_transaction_failure_race(self): 530 '''Test: Verify that transactions with jobs that have no data to 531 transfer do not cause race conditions in the cancellation of the entire 532 transaction job group. 533 ''' 534 self.do_transaction_failure_test(race=True) 535 536 537 def test_sync_dirty_bitmap_missing(self): 538 self.assert_no_active_block_jobs() 539 self.files.append(self.err_img) 540 result = self.vm.qmp('drive-backup', device=self.drives[0]['id'], 541 sync='incremental', format=self.drives[0]['fmt'], 542 target=self.err_img) 543 self.assert_qmp(result, 'error/class', 'GenericError') 544 545 546 def test_sync_dirty_bitmap_not_found(self): 547 self.assert_no_active_block_jobs() 548 self.files.append(self.err_img) 549 result = self.vm.qmp('drive-backup', device=self.drives[0]['id'], 550 sync='incremental', bitmap='unknown', 551 format=self.drives[0]['fmt'], target=self.err_img) 552 self.assert_qmp(result, 'error/class', 'GenericError') 553 554 555 def test_sync_dirty_bitmap_bad_granularity(self): 556 ''' 557 Test: Test what happens if we provide an improper granularity. 558 559 The granularity must always be a power of 2. 560 ''' 561 self.assert_no_active_block_jobs() 562 self.assertRaises(AssertionError, self.add_bitmap, 563 'bitmap0', self.drives[0], 564 granularity=64000) 565 566 567class TestIncrementalBackupBlkdebug(TestIncrementalBackupBase): 568 '''Incremental backup tests that utilize a BlkDebug filter on drive0.''' 569 570 def setUp(self): 571 drive0 = self.add_node('drive0') 572 self.img_create(drive0['file'], drive0['fmt']) 573 self.write_default_pattern(drive0['file']) 574 self.vm.launch() 575 576 def test_incremental_failure(self): 577 '''Test: Verify backups made after a failure are correct. 578 579 Simulate a failure during an incremental backup block job, 580 emulate additional writes, then create another incremental backup 581 afterwards and verify that the backup created is correct. 582 ''' 583 584 drive0 = self.drives[0] 585 result = self.vm.qmp('blockdev-add', 586 node_name=drive0['id'], 587 driver=drive0['fmt'], 588 file={ 589 'driver': 'blkdebug', 590 'image': { 591 'driver': 'file', 592 'filename': drive0['file'] 593 }, 594 'set-state': [{ 595 'event': 'flush_to_disk', 596 'state': 1, 597 'new_state': 2 598 }], 599 'inject-error': [{ 600 'event': 'read_aio', 601 'errno': 5, 602 'state': 2, 603 'immediately': False, 604 'once': True 605 }], 606 } 607 ) 608 self.assert_qmp(result, 'return', {}) 609 610 self.create_anchor_backup(drive0) 611 self.add_bitmap('bitmap0', drive0) 612 # Note: at this point, during a normal execution, 613 # Assume that the VM resumes and begins issuing IO requests here. 614 615 self.hmp_io_writes(drive0['id'], (('0xab', 0, 512), 616 ('0xfe', '16M', '256k'), 617 ('0x64', '32736k', '64k'))) 618 619 result = self.create_incremental(validate=False) 620 self.assertFalse(result) 621 self.hmp_io_writes(drive0['id'], (('0x9a', 0, 512), 622 ('0x55', '8M', '352k'), 623 ('0x78', '15872k', '1M'))) 624 self.create_incremental() 625 self.vm.shutdown() 626 self.check_backups() 627 628 629if __name__ == '__main__': 630 iotests.main(supported_fmts=['qcow2']) 631