1#!/usr/bin/env python3 2# group: rw backing 3# 4# Tests for image streaming. 5# 6# Copyright (C) 2012 IBM Corp. 7# 8# This program is free software; you can redistribute it and/or modify 9# it under the terms of the GNU General Public License as published by 10# the Free Software Foundation; either version 2 of the License, or 11# (at your option) any later version. 12# 13# This program is distributed in the hope that it will be useful, 14# but WITHOUT ANY WARRANTY; without even the implied warranty of 15# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16# GNU General Public License for more details. 17# 18# You should have received a copy of the GNU General Public License 19# along with this program. If not, see <http://www.gnu.org/licenses/>. 20# 21 22import time 23import os 24import iotests 25import unittest 26from iotests import qemu_img, qemu_io 27 28backing_img = os.path.join(iotests.test_dir, 'backing.img') 29mid_img = os.path.join(iotests.test_dir, 'mid.img') 30test_img = os.path.join(iotests.test_dir, 'test.img') 31 32class TestSingleDrive(iotests.QMPTestCase): 33 image_len = 1 * 1024 * 1024 # MB 34 35 def setUp(self): 36 iotests.create_image(backing_img, TestSingleDrive.image_len) 37 qemu_img('create', '-f', iotests.imgfmt, 38 '-o', 'backing_file=%s' % backing_img, 39 '-F', 'raw', mid_img) 40 qemu_img('create', '-f', iotests.imgfmt, 41 '-o', 'backing_file=%s' % mid_img, 42 '-F', iotests.imgfmt, test_img) 43 qemu_io('-f', 'raw', '-c', 'write -P 0x1 0 512', backing_img) 44 qemu_io('-f', iotests.imgfmt, '-c', 'write -P 0x1 524288 512', mid_img) 45 self.vm = iotests.VM().add_drive("blkdebug::" + test_img, 46 "backing.node-name=mid," + 47 "backing.backing.node-name=base") 48 self.vm.launch() 49 50 def tearDown(self): 51 self.vm.shutdown() 52 os.remove(test_img) 53 os.remove(mid_img) 54 os.remove(backing_img) 55 56 def test_stream(self): 57 self.assert_no_active_block_jobs() 58 59 result = self.vm.qmp('block-stream', device='drive0') 60 self.assert_qmp(result, 'return', {}) 61 62 self.wait_until_completed() 63 64 self.assert_no_active_block_jobs() 65 self.vm.shutdown() 66 67 self.assertEqual(qemu_io('-f', 'raw', '-c', 'map', backing_img), 68 qemu_io('-f', iotests.imgfmt, '-c', 'map', test_img), 69 'image file map does not match backing file after streaming') 70 71 def test_stream_intermediate(self): 72 self.assert_no_active_block_jobs() 73 74 self.assertNotEqual(qemu_io('-f', 'raw', '-rU', '-c', 'map', backing_img), 75 qemu_io('-f', iotests.imgfmt, '-rU', '-c', 'map', mid_img), 76 'image file map matches backing file before streaming') 77 78 result = self.vm.qmp('block-stream', device='mid', job_id='stream-mid') 79 self.assert_qmp(result, 'return', {}) 80 81 self.wait_until_completed(drive='stream-mid') 82 83 self.assert_no_active_block_jobs() 84 self.vm.shutdown() 85 86 self.assertEqual(qemu_io('-f', 'raw', '-c', 'map', backing_img), 87 qemu_io('-f', iotests.imgfmt, '-c', 'map', mid_img), 88 'image file map does not match backing file after streaming') 89 90 def test_stream_pause(self): 91 self.assert_no_active_block_jobs() 92 93 self.vm.pause_drive('drive0') 94 result = self.vm.qmp('block-stream', device='drive0') 95 self.assert_qmp(result, 'return', {}) 96 97 self.pause_job('drive0', wait=False) 98 self.vm.resume_drive('drive0') 99 self.pause_wait('drive0') 100 101 result = self.vm.qmp('query-block-jobs') 102 offset = self.dictpath(result, 'return[0]/offset') 103 104 time.sleep(0.5) 105 result = self.vm.qmp('query-block-jobs') 106 self.assert_qmp(result, 'return[0]/offset', offset) 107 108 result = self.vm.qmp('block-job-resume', device='drive0') 109 self.assert_qmp(result, 'return', {}) 110 111 self.wait_until_completed() 112 113 self.assert_no_active_block_jobs() 114 self.vm.shutdown() 115 116 self.assertEqual(qemu_io('-f', 'raw', '-c', 'map', backing_img), 117 qemu_io('-f', iotests.imgfmt, '-c', 'map', test_img), 118 'image file map does not match backing file after streaming') 119 120 def test_stream_no_op(self): 121 self.assert_no_active_block_jobs() 122 123 # The image map is empty before the operation 124 empty_map = qemu_io('-f', iotests.imgfmt, '-rU', '-c', 'map', test_img) 125 126 # This is a no-op: no data should ever be copied from the base image 127 result = self.vm.qmp('block-stream', device='drive0', base=mid_img) 128 self.assert_qmp(result, 'return', {}) 129 130 self.wait_until_completed() 131 132 self.assert_no_active_block_jobs() 133 self.vm.shutdown() 134 135 self.assertEqual(qemu_io('-f', iotests.imgfmt, '-c', 'map', test_img), 136 empty_map, 'image file map changed after a no-op') 137 138 def test_stream_partial(self): 139 self.assert_no_active_block_jobs() 140 141 result = self.vm.qmp('block-stream', device='drive0', base=backing_img) 142 self.assert_qmp(result, 'return', {}) 143 144 self.wait_until_completed() 145 146 self.assert_no_active_block_jobs() 147 self.vm.shutdown() 148 149 self.assertEqual(qemu_io('-f', iotests.imgfmt, '-c', 'map', mid_img), 150 qemu_io('-f', iotests.imgfmt, '-c', 'map', test_img), 151 'image file map does not match backing file after streaming') 152 153 def test_device_not_found(self): 154 result = self.vm.qmp('block-stream', device='nonexistent') 155 self.assert_qmp(result, 'error/desc', 156 'Cannot find device=\'nonexistent\' nor node-name=\'nonexistent\'') 157 158 def test_job_id_missing(self): 159 result = self.vm.qmp('block-stream', device='mid') 160 self.assert_qmp(result, 'error/desc', "Invalid job ID ''") 161 162 def test_read_only(self): 163 # Create a new file that we can attach (we need a read-only top) 164 with iotests.FilePath('ro-top.img') as ro_top_path: 165 qemu_img('create', '-f', iotests.imgfmt, ro_top_path, 166 str(self.image_len)) 167 168 result = self.vm.qmp('blockdev-add', 169 node_name='ro-top', 170 driver=iotests.imgfmt, 171 read_only=True, 172 file={ 173 'driver': 'file', 174 'filename': ro_top_path, 175 'read-only': True 176 }, 177 backing='mid') 178 self.assert_qmp(result, 'return', {}) 179 180 result = self.vm.qmp('block-stream', job_id='stream', 181 device='ro-top', base_node='base') 182 self.assert_qmp(result, 'error/desc', 'Block node is read-only') 183 184 result = self.vm.qmp('blockdev-del', node_name='ro-top') 185 self.assert_qmp(result, 'return', {}) 186 187 188class TestParallelOps(iotests.QMPTestCase): 189 num_ops = 4 # Number of parallel block-stream operations 190 num_imgs = num_ops * 2 + 1 191 image_len = num_ops * 4 * 1024 * 1024 192 imgs = [] 193 194 def setUp(self): 195 opts = [] 196 self.imgs = [] 197 198 # Initialize file names and command-line options 199 for i in range(self.num_imgs): 200 img_depth = self.num_imgs - i - 1 201 opts.append("backing." * img_depth + "node-name=node%d" % i) 202 self.imgs.append(os.path.join(iotests.test_dir, 'img-%d.img' % i)) 203 204 # Create all images 205 iotests.create_image(self.imgs[0], self.image_len) 206 for i in range(1, self.num_imgs): 207 qemu_img('create', '-f', iotests.imgfmt, 208 '-o', 'backing_file=%s' % self.imgs[i-1], 209 '-F', 'raw' if i == 1 else iotests.imgfmt, self.imgs[i]) 210 211 # Put data into the images we are copying data from 212 odd_img_indexes = [x for x in reversed(range(self.num_imgs)) if x % 2 == 1] 213 for i in range(len(odd_img_indexes)): 214 # Alternate between 2MB and 4MB. 215 # This way jobs will not finish in the same order they were created 216 num_mb = 2 + 2 * (i % 2) 217 qemu_io('-f', iotests.imgfmt, 218 '-c', 'write -P 0xFF %dM %dM' % (i * 4, num_mb), 219 self.imgs[odd_img_indexes[i]]) 220 221 # Attach the drive to the VM 222 self.vm = iotests.VM() 223 self.vm.add_drive(self.imgs[-1], ','.join(opts)) 224 self.vm.launch() 225 226 def tearDown(self): 227 self.vm.shutdown() 228 for img in self.imgs: 229 os.remove(img) 230 231 # Test that it's possible to run several block-stream operations 232 # in parallel in the same snapshot chain 233 @unittest.skipIf(os.environ.get('QEMU_CHECK_BLOCK_AUTO'), 'disabled in CI') 234 def test_stream_parallel(self): 235 self.assert_no_active_block_jobs() 236 237 # Check that the maps don't match before the streaming operations 238 for i in range(2, self.num_imgs, 2): 239 self.assertNotEqual(qemu_io('-f', iotests.imgfmt, '-rU', '-c', 'map', self.imgs[i]), 240 qemu_io('-f', iotests.imgfmt, '-rU', '-c', 'map', self.imgs[i-1]), 241 'image file map matches backing file before streaming') 242 243 # Create all streaming jobs 244 pending_jobs = [] 245 for i in range(2, self.num_imgs, 2): 246 node_name = 'node%d' % i 247 job_id = 'stream-%s' % node_name 248 pending_jobs.append(job_id) 249 result = self.vm.qmp('block-stream', device=node_name, 250 job_id=job_id, bottom=f'node{i-1}', 251 speed=1024) 252 self.assert_qmp(result, 'return', {}) 253 254 # Do this in reverse: After unthrottling them, some jobs may finish 255 # before we have unthrottled all of them. This will drain their 256 # subgraph, and this will make jobs above them advance (despite those 257 # jobs on top being throttled). In the worst case, all jobs below the 258 # top one are finished before we can unthrottle it, and this makes it 259 # advance so far that it completes before we can unthrottle it - which 260 # results in an error. 261 # Starting from the top (i.e. in reverse) does not have this problem: 262 # When a job finishes, the ones below it are not advanced. 263 for job in reversed(pending_jobs): 264 result = self.vm.qmp('block-job-set-speed', device=job, speed=0) 265 self.assert_qmp(result, 'return', {}) 266 267 # Wait for all jobs to be finished. 268 while len(pending_jobs) > 0: 269 for event in self.vm.get_qmp_events(wait=True): 270 if event['event'] == 'BLOCK_JOB_COMPLETED': 271 job_id = self.dictpath(event, 'data/device') 272 self.assertTrue(job_id in pending_jobs) 273 self.assert_qmp_absent(event, 'data/error') 274 pending_jobs.remove(job_id) 275 276 self.assert_no_active_block_jobs() 277 self.vm.shutdown() 278 279 # Check that all maps match now 280 for i in range(2, self.num_imgs, 2): 281 self.assertEqual(qemu_io('-f', iotests.imgfmt, '-c', 'map', self.imgs[i]), 282 qemu_io('-f', iotests.imgfmt, '-c', 'map', self.imgs[i-1]), 283 'image file map does not match backing file after streaming') 284 285 # Test that it's not possible to perform two block-stream 286 # operations if there are nodes involved in both. 287 def test_overlapping_1(self): 288 self.assert_no_active_block_jobs() 289 290 # Set a speed limit to make sure that this job blocks the rest 291 result = self.vm.qmp('block-stream', device='node4', 292 job_id='stream-node4', base=self.imgs[1], 293 filter_node_name='stream-filter', speed=1024*1024) 294 self.assert_qmp(result, 'return', {}) 295 296 result = self.vm.qmp('block-stream', device='node5', job_id='stream-node5', base=self.imgs[2]) 297 self.assert_qmp(result, 'error/desc', 298 "Node 'stream-filter' is busy: block device is in use by block job: stream") 299 300 result = self.vm.qmp('block-stream', device='node3', job_id='stream-node3', base=self.imgs[2]) 301 self.assert_qmp(result, 'error/desc', 302 "Node 'node3' is busy: block device is in use by block job: stream") 303 304 result = self.vm.qmp('block-stream', device='node4', job_id='stream-node4-v2') 305 self.assert_qmp(result, 'error/desc', 306 "Node 'node4' is busy: block device is in use by block job: stream") 307 308 # block-commit should also fail if it touches nodes used by the stream job 309 result = self.vm.qmp('block-commit', device='drive0', base=self.imgs[4], job_id='commit-node4') 310 self.assert_qmp(result, 'error/desc', 311 "Node 'stream-filter' is busy: block device is in use by block job: stream") 312 313 result = self.vm.qmp('block-commit', device='drive0', base=self.imgs[1], top=self.imgs[3], job_id='commit-node1') 314 self.assert_qmp(result, 'error/desc', 315 "Node 'node3' is busy: block device is in use by block job: stream") 316 317 # This fails because it needs to modify the backing string in node2, which is blocked 318 result = self.vm.qmp('block-commit', device='drive0', base=self.imgs[0], top=self.imgs[1], job_id='commit-node0') 319 self.assert_qmp(result, 'error/desc', 320 "Node 'node2' is busy: block device is in use by block job: stream") 321 322 result = self.vm.qmp('block-job-set-speed', device='stream-node4', speed=0) 323 self.assert_qmp(result, 'return', {}) 324 325 self.wait_until_completed(drive='stream-node4') 326 self.assert_no_active_block_jobs() 327 328 # Similar to test_overlapping_1, but with block-commit 329 # blocking the other jobs 330 def test_overlapping_2(self): 331 self.assertLessEqual(9, self.num_imgs) 332 self.assert_no_active_block_jobs() 333 334 # Set a speed limit to make sure that this job blocks the rest 335 result = self.vm.qmp('block-commit', device='drive0', top=self.imgs[5], base=self.imgs[3], job_id='commit-node3', speed=1024*1024) 336 self.assert_qmp(result, 'return', {}) 337 338 result = self.vm.qmp('block-stream', device='node3', job_id='stream-node3') 339 self.assert_qmp(result, 'error/desc', 340 "Node 'node3' is busy: block device is in use by block job: commit") 341 342 result = self.vm.qmp('block-stream', device='node6', base=self.imgs[2], job_id='stream-node6') 343 self.assert_qmp(result, 'error/desc', 344 "Node 'node5' is busy: block device is in use by block job: commit") 345 346 result = self.vm.qmp('block-stream', device='node4', base=self.imgs[2], job_id='stream-node4') 347 self.assert_qmp(result, 'error/desc', 348 "Node 'node4' is busy: block device is in use by block job: commit") 349 350 result = self.vm.qmp('block-stream', device='node6', base=self.imgs[4], job_id='stream-node6-v2') 351 self.assert_qmp(result, 'error/desc', 352 "Node 'node5' is busy: block device is in use by block job: commit") 353 354 # This fails because block-commit currently blocks the active layer even if it's not used 355 result = self.vm.qmp('block-stream', device='drive0', base=self.imgs[5], job_id='stream-drive0') 356 self.assert_qmp(result, 'error/desc', 357 "Node 'drive0' is busy: block device is in use by block job: commit") 358 359 result = self.vm.qmp('block-job-set-speed', device='commit-node3', speed=0) 360 self.assert_qmp(result, 'return', {}) 361 362 self.wait_until_completed(drive='commit-node3') 363 364 # Similar to test_overlapping_2, but here block-commit doesn't use the 'top' parameter. 365 # Internally this uses a mirror block job, hence the separate test case. 366 def test_overlapping_3(self): 367 self.assertLessEqual(8, self.num_imgs) 368 self.assert_no_active_block_jobs() 369 370 # Set a speed limit to make sure that this job blocks the rest 371 result = self.vm.qmp('block-commit', device='drive0', base=self.imgs[3], job_id='commit-drive0', speed=1024*1024) 372 self.assert_qmp(result, 'return', {}) 373 374 result = self.vm.qmp('block-stream', device='node5', base=self.imgs[3], job_id='stream-node6') 375 self.assert_qmp(result, 'error/desc', 376 "Node 'node5' is busy: block device is in use by block job: commit") 377 378 result = self.vm.qmp('block-job-set-speed', device='commit-drive0', speed=0) 379 self.assert_qmp(result, 'return', {}) 380 381 event = self.vm.event_wait(name='BLOCK_JOB_READY') 382 self.assert_qmp(event, 'data/device', 'commit-drive0') 383 self.assert_qmp(event, 'data/type', 'commit') 384 self.assert_qmp_absent(event, 'data/error') 385 386 result = self.vm.qmp('block-job-complete', device='commit-drive0') 387 self.assert_qmp(result, 'return', {}) 388 389 self.wait_until_completed(drive='commit-drive0') 390 391 # In this case the base node of the stream job is the same as the 392 # top node of commit job. Since this results in the commit filter 393 # node being part of the stream chain, this is not allowed. 394 def test_overlapping_4(self): 395 self.assert_no_active_block_jobs() 396 397 # Commit from node2 into node0 398 result = self.vm.qmp('block-commit', device='drive0', 399 top=self.imgs[2], base=self.imgs[0], 400 filter_node_name='commit-filter', speed=1024*1024) 401 self.assert_qmp(result, 'return', {}) 402 403 # Stream from node2 into node4 404 result = self.vm.qmp('block-stream', device='node4', base_node='node2', job_id='node4') 405 self.assert_qmp(result, 'error/desc', 406 "Cannot freeze 'backing' link to 'commit-filter'") 407 408 result = self.vm.qmp('block-job-set-speed', device='drive0', speed=0) 409 self.assert_qmp(result, 'return', {}) 410 411 self.wait_until_completed() 412 self.assert_no_active_block_jobs() 413 414 # In this case the base node of the stream job is the commit job's 415 # filter node. stream does not have a real dependency on its base 416 # node, so even though commit removes it when it is done, there is 417 # no conflict. 418 def test_overlapping_5(self): 419 self.assert_no_active_block_jobs() 420 421 # Commit from node2 into node0 422 result = self.vm.qmp('block-commit', device='drive0', 423 top_node='node2', base_node='node0', 424 filter_node_name='commit-filter', speed=1024*1024) 425 self.assert_qmp(result, 'return', {}) 426 427 # Stream from node2 into node4 428 result = self.vm.qmp('block-stream', device='node4', 429 base_node='commit-filter', job_id='node4') 430 self.assert_qmp(result, 'return', {}) 431 432 result = self.vm.qmp('block-job-set-speed', device='drive0', speed=0) 433 self.assert_qmp(result, 'return', {}) 434 435 self.vm.run_job(job='drive0', auto_dismiss=True) 436 self.vm.run_job(job='node4', auto_dismiss=True) 437 self.assert_no_active_block_jobs() 438 439 # Assert that node0 is now the backing node of node4 440 result = self.vm.qmp('query-named-block-nodes') 441 node4 = next(node for node in result['return'] if node['node-name'] == 'node4') 442 self.assertEqual(node4['image']['backing-image']['filename'], self.imgs[0]) 443 444 # Test a block-stream and a block-commit job in parallel 445 # Here the stream job is supposed to finish quickly in order to reproduce 446 # the scenario that triggers the bug fixed in 3d5d319e1221 and 1a63a907507 447 def test_stream_commit_1(self): 448 self.assertLessEqual(8, self.num_imgs) 449 self.assert_no_active_block_jobs() 450 451 # Stream from node0 into node2 452 result = self.vm.qmp('block-stream', device='node2', base_node='node0', job_id='node2') 453 self.assert_qmp(result, 'return', {}) 454 455 # Commit from the active layer into node3 456 result = self.vm.qmp('block-commit', device='drive0', base=self.imgs[3]) 457 self.assert_qmp(result, 'return', {}) 458 459 # Wait for all jobs to be finished. 460 pending_jobs = ['node2', 'drive0'] 461 while len(pending_jobs) > 0: 462 for event in self.vm.get_qmp_events(wait=True): 463 if event['event'] == 'BLOCK_JOB_COMPLETED': 464 node_name = self.dictpath(event, 'data/device') 465 self.assertTrue(node_name in pending_jobs) 466 self.assert_qmp_absent(event, 'data/error') 467 pending_jobs.remove(node_name) 468 if event['event'] == 'BLOCK_JOB_READY': 469 self.assert_qmp(event, 'data/device', 'drive0') 470 self.assert_qmp(event, 'data/type', 'commit') 471 self.assert_qmp_absent(event, 'data/error') 472 self.assertTrue('drive0' in pending_jobs) 473 self.vm.qmp('block-job-complete', device='drive0') 474 475 self.assert_no_active_block_jobs() 476 477 # This is similar to test_stream_commit_1 but both jobs are slowed 478 # down so they can run in parallel for a little while. 479 def test_stream_commit_2(self): 480 self.assertLessEqual(8, self.num_imgs) 481 self.assert_no_active_block_jobs() 482 483 # Stream from node0 into node4 484 result = self.vm.qmp('block-stream', device='node4', base_node='node0', job_id='node4', speed=1024*1024) 485 self.assert_qmp(result, 'return', {}) 486 487 # Commit from the active layer into node5 488 result = self.vm.qmp('block-commit', device='drive0', base=self.imgs[5], speed=1024*1024) 489 self.assert_qmp(result, 'return', {}) 490 491 for job in ['drive0', 'node4']: 492 result = self.vm.qmp('block-job-set-speed', device=job, speed=0) 493 self.assert_qmp(result, 'return', {}) 494 495 # Wait for all jobs to be finished. 496 pending_jobs = ['node4', 'drive0'] 497 while len(pending_jobs) > 0: 498 for event in self.vm.get_qmp_events(wait=True): 499 if event['event'] == 'BLOCK_JOB_COMPLETED': 500 node_name = self.dictpath(event, 'data/device') 501 self.assertTrue(node_name in pending_jobs) 502 self.assert_qmp_absent(event, 'data/error') 503 pending_jobs.remove(node_name) 504 if event['event'] == 'BLOCK_JOB_READY': 505 self.assert_qmp(event, 'data/device', 'drive0') 506 self.assert_qmp(event, 'data/type', 'commit') 507 self.assert_qmp_absent(event, 'data/error') 508 self.assertTrue('drive0' in pending_jobs) 509 self.vm.qmp('block-job-complete', device='drive0') 510 511 self.assert_no_active_block_jobs() 512 513 # Test the base_node parameter 514 def test_stream_base_node_name(self): 515 self.assert_no_active_block_jobs() 516 517 self.assertNotEqual(qemu_io('-f', iotests.imgfmt, '-rU', '-c', 'map', self.imgs[4]), 518 qemu_io('-f', iotests.imgfmt, '-rU', '-c', 'map', self.imgs[3]), 519 'image file map matches backing file before streaming') 520 521 # Error: the base node does not exist 522 result = self.vm.qmp('block-stream', device='node4', base_node='none', job_id='stream') 523 self.assert_qmp(result, 'error/desc', 524 'Cannot find device=\'\' nor node-name=\'none\'') 525 526 # Error: the base node is not a backing file of the top node 527 result = self.vm.qmp('block-stream', device='node4', base_node='node6', job_id='stream') 528 self.assert_qmp(result, 'error/desc', 529 "Node 'node6' is not a backing image of 'node4'") 530 531 # Error: the base node is the same as the top node 532 result = self.vm.qmp('block-stream', device='node4', base_node='node4', job_id='stream') 533 self.assert_qmp(result, 'error/desc', 534 "Node 'node4' is not a backing image of 'node4'") 535 536 # Error: cannot specify 'base' and 'base-node' at the same time 537 result = self.vm.qmp('block-stream', device='node4', base=self.imgs[2], base_node='node2', job_id='stream') 538 self.assert_qmp(result, 'error/desc', 539 "'base' and 'base-node' cannot be specified at the same time") 540 541 # Success: the base node is a backing file of the top node 542 result = self.vm.qmp('block-stream', device='node4', base_node='node2', job_id='stream') 543 self.assert_qmp(result, 'return', {}) 544 545 self.wait_until_completed(drive='stream') 546 547 self.assert_no_active_block_jobs() 548 self.vm.shutdown() 549 550 self.assertEqual(qemu_io('-f', iotests.imgfmt, '-c', 'map', self.imgs[4]), 551 qemu_io('-f', iotests.imgfmt, '-c', 'map', self.imgs[3]), 552 'image file map matches backing file after streaming') 553 554class TestQuorum(iotests.QMPTestCase): 555 num_children = 3 556 children = [] 557 backing = [] 558 559 @iotests.skip_if_unsupported(['quorum']) 560 def setUp(self): 561 opts = ['driver=quorum', 'vote-threshold=2'] 562 563 # Initialize file names and command-line options 564 for i in range(self.num_children): 565 child_img = os.path.join(iotests.test_dir, 'img-%d.img' % i) 566 backing_img = os.path.join(iotests.test_dir, 'backing-%d.img' % i) 567 self.children.append(child_img) 568 self.backing.append(backing_img) 569 qemu_img('create', '-f', iotests.imgfmt, backing_img, '1M') 570 qemu_io('-f', iotests.imgfmt, 571 '-c', 'write -P 0x55 0 1024', backing_img) 572 qemu_img('create', '-f', iotests.imgfmt, 573 '-o', 'backing_file=%s' % backing_img, 574 '-F', iotests.imgfmt, child_img) 575 opts.append("children.%d.file.filename=%s" % (i, child_img)) 576 opts.append("children.%d.node-name=node%d" % (i, i)) 577 578 # Attach the drive to the VM 579 self.vm = iotests.VM() 580 self.vm.add_drive(path = None, opts = ','.join(opts)) 581 self.vm.launch() 582 583 def tearDown(self): 584 self.vm.shutdown() 585 for img in self.children: 586 os.remove(img) 587 for img in self.backing: 588 os.remove(img) 589 590 def test_stream_quorum(self): 591 self.assertNotEqual(qemu_io('-f', iotests.imgfmt, '-rU', '-c', 'map', self.children[0]), 592 qemu_io('-f', iotests.imgfmt, '-rU', '-c', 'map', self.backing[0]), 593 'image file map matches backing file before streaming') 594 595 self.assert_no_active_block_jobs() 596 597 result = self.vm.qmp('block-stream', device='node0', job_id='stream-node0') 598 self.assert_qmp(result, 'return', {}) 599 600 self.wait_until_completed(drive='stream-node0') 601 602 self.assert_no_active_block_jobs() 603 self.vm.shutdown() 604 605 self.assertEqual(qemu_io('-f', iotests.imgfmt, '-c', 'map', self.children[0]), 606 qemu_io('-f', iotests.imgfmt, '-c', 'map', self.backing[0]), 607 'image file map does not match backing file after streaming') 608 609class TestSmallerBackingFile(iotests.QMPTestCase): 610 backing_len = 1 * 1024 * 1024 # MB 611 image_len = 2 * backing_len 612 613 def setUp(self): 614 iotests.create_image(backing_img, self.backing_len) 615 qemu_img('create', '-f', iotests.imgfmt, 616 '-o', 'backing_file=%s' % backing_img, 617 '-F', 'raw', test_img, str(self.image_len)) 618 self.vm = iotests.VM().add_drive(test_img) 619 self.vm.launch() 620 621 # If this hangs, then you are missing a fix to complete streaming when the 622 # end of the backing file is reached. 623 def test_stream(self): 624 self.assert_no_active_block_jobs() 625 626 result = self.vm.qmp('block-stream', device='drive0') 627 self.assert_qmp(result, 'return', {}) 628 629 self.wait_until_completed() 630 631 self.assert_no_active_block_jobs() 632 self.vm.shutdown() 633 634class TestErrors(iotests.QMPTestCase): 635 image_len = 2 * 1024 * 1024 # MB 636 637 # this should match STREAM_BUFFER_SIZE/512 in block/stream.c 638 STREAM_BUFFER_SIZE = 512 * 1024 639 640 def create_blkdebug_file(self, name, event, errno): 641 file = open(name, 'w') 642 file.write(''' 643[inject-error] 644state = "1" 645event = "%s" 646errno = "%d" 647immediately = "off" 648once = "on" 649sector = "%d" 650 651[set-state] 652state = "1" 653event = "%s" 654new_state = "2" 655 656[set-state] 657state = "2" 658event = "%s" 659new_state = "1" 660''' % (event, errno, self.STREAM_BUFFER_SIZE // 512, event, event)) 661 file.close() 662 663class TestEIO(TestErrors): 664 def setUp(self): 665 self.blkdebug_file = backing_img + ".blkdebug" 666 iotests.create_image(backing_img, TestErrors.image_len) 667 self.create_blkdebug_file(self.blkdebug_file, "read_aio", 5) 668 qemu_img('create', '-f', iotests.imgfmt, 669 '-o', 'backing_file=blkdebug:%s:%s,backing_fmt=raw' 670 % (self.blkdebug_file, backing_img), 671 test_img) 672 self.vm = iotests.VM().add_drive(test_img) 673 self.vm.launch() 674 675 def tearDown(self): 676 self.vm.shutdown() 677 os.remove(test_img) 678 os.remove(backing_img) 679 os.remove(self.blkdebug_file) 680 681 def test_report(self): 682 self.assert_no_active_block_jobs() 683 684 result = self.vm.qmp('block-stream', device='drive0') 685 self.assert_qmp(result, 'return', {}) 686 687 completed = False 688 error = False 689 while not completed: 690 for event in self.vm.get_qmp_events(wait=True): 691 if event['event'] == 'BLOCK_JOB_ERROR': 692 self.assert_qmp(event, 'data/device', 'drive0') 693 self.assert_qmp(event, 'data/operation', 'read') 694 error = True 695 elif event['event'] == 'BLOCK_JOB_COMPLETED': 696 self.assertTrue(error, 'job completed unexpectedly') 697 self.assert_qmp(event, 'data/type', 'stream') 698 self.assert_qmp(event, 'data/device', 'drive0') 699 self.assert_qmp(event, 'data/error', 'Input/output error') 700 self.assert_qmp(event, 'data/offset', self.STREAM_BUFFER_SIZE) 701 self.assert_qmp(event, 'data/len', self.image_len) 702 completed = True 703 elif event['event'] == 'JOB_STATUS_CHANGE': 704 self.assert_qmp(event, 'data/id', 'drive0') 705 706 self.assert_no_active_block_jobs() 707 self.vm.shutdown() 708 709 def test_ignore(self): 710 self.assert_no_active_block_jobs() 711 712 result = self.vm.qmp('block-stream', device='drive0', on_error='ignore') 713 self.assert_qmp(result, 'return', {}) 714 715 error = False 716 completed = False 717 while not completed: 718 for event in self.vm.get_qmp_events(wait=True): 719 if event['event'] == 'BLOCK_JOB_ERROR': 720 error = True 721 self.assert_qmp(event, 'data/device', 'drive0') 722 self.assert_qmp(event, 'data/operation', 'read') 723 result = self.vm.qmp('query-block-jobs') 724 if result == {'return': []}: 725 # Job finished too quickly 726 continue 727 self.assertIn(result['return'][0]['status'], 728 ['running', 'pending', 'aborting', 'concluded']) 729 elif event['event'] == 'BLOCK_JOB_COMPLETED': 730 self.assertTrue(error, 'job completed unexpectedly') 731 self.assert_qmp(event, 'data/type', 'stream') 732 self.assert_qmp(event, 'data/device', 'drive0') 733 self.assert_qmp(event, 'data/error', 'Input/output error') 734 self.assert_qmp(event, 'data/offset', self.image_len) 735 self.assert_qmp(event, 'data/len', self.image_len) 736 completed = True 737 elif event['event'] == 'JOB_STATUS_CHANGE': 738 self.assert_qmp(event, 'data/id', 'drive0') 739 740 self.assert_no_active_block_jobs() 741 self.vm.shutdown() 742 743 def test_stop(self): 744 self.assert_no_active_block_jobs() 745 746 result = self.vm.qmp('block-stream', device='drive0', on_error='stop') 747 self.assert_qmp(result, 'return', {}) 748 749 error = False 750 completed = False 751 while not completed: 752 for event in self.vm.get_qmp_events(wait=True): 753 if event['event'] == 'BLOCK_JOB_ERROR': 754 error = True 755 self.assert_qmp(event, 'data/device', 'drive0') 756 self.assert_qmp(event, 'data/operation', 'read') 757 758 if self.vm.qmp('query-block-jobs')['return'][0]['status'] != 'paused': 759 self.vm.events_wait([( 760 'JOB_STATUS_CHANGE', 761 {'data': {'id': 'drive0', 'status': 'paused'}} 762 )]) 763 764 result = self.vm.qmp('query-block-jobs') 765 self.assert_qmp(result, 'return[0]/status', 'paused') 766 self.assert_qmp(result, 'return[0]/offset', self.STREAM_BUFFER_SIZE) 767 self.assert_qmp(result, 'return[0]/io-status', 'failed') 768 769 result = self.vm.qmp('block-job-resume', device='drive0') 770 self.assert_qmp(result, 'return', {}) 771 772 result = self.vm.qmp('query-block-jobs') 773 if result == {'return': []}: 774 # Race; likely already finished. Check. 775 continue 776 self.assertIn(result['return'][0]['status'], 777 ['running', 'pending', 'aborting', 'concluded']) 778 self.assert_qmp(result, 'return[0]/io-status', 'ok') 779 elif event['event'] == 'BLOCK_JOB_COMPLETED': 780 self.assertTrue(error, 'job completed unexpectedly') 781 self.assert_qmp(event, 'data/type', 'stream') 782 self.assert_qmp(event, 'data/device', 'drive0') 783 self.assert_qmp_absent(event, 'data/error') 784 self.assert_qmp(event, 'data/offset', self.image_len) 785 self.assert_qmp(event, 'data/len', self.image_len) 786 completed = True 787 elif event['event'] == 'JOB_STATUS_CHANGE': 788 self.assert_qmp(event, 'data/id', 'drive0') 789 790 self.assert_no_active_block_jobs() 791 self.vm.shutdown() 792 793 def test_enospc(self): 794 self.assert_no_active_block_jobs() 795 796 result = self.vm.qmp('block-stream', device='drive0', on_error='enospc') 797 self.assert_qmp(result, 'return', {}) 798 799 completed = False 800 error = False 801 while not completed: 802 for event in self.vm.get_qmp_events(wait=True): 803 if event['event'] == 'BLOCK_JOB_ERROR': 804 self.assert_qmp(event, 'data/device', 'drive0') 805 self.assert_qmp(event, 'data/operation', 'read') 806 error = True 807 elif event['event'] == 'BLOCK_JOB_COMPLETED': 808 self.assertTrue(error, 'job completed unexpectedly') 809 self.assert_qmp(event, 'data/type', 'stream') 810 self.assert_qmp(event, 'data/device', 'drive0') 811 self.assert_qmp(event, 'data/error', 'Input/output error') 812 self.assert_qmp(event, 'data/offset', self.STREAM_BUFFER_SIZE) 813 self.assert_qmp(event, 'data/len', self.image_len) 814 completed = True 815 elif event['event'] == 'JOB_STATUS_CHANGE': 816 self.assert_qmp(event, 'data/id', 'drive0') 817 818 self.assert_no_active_block_jobs() 819 self.vm.shutdown() 820 821class TestENOSPC(TestErrors): 822 def setUp(self): 823 self.blkdebug_file = backing_img + ".blkdebug" 824 iotests.create_image(backing_img, TestErrors.image_len) 825 self.create_blkdebug_file(self.blkdebug_file, "read_aio", 28) 826 qemu_img('create', '-f', iotests.imgfmt, 827 '-o', 'backing_file=blkdebug:%s:%s,backing_fmt=raw' 828 % (self.blkdebug_file, backing_img), 829 test_img) 830 self.vm = iotests.VM().add_drive(test_img) 831 self.vm.launch() 832 833 def tearDown(self): 834 self.vm.shutdown() 835 os.remove(test_img) 836 os.remove(backing_img) 837 os.remove(self.blkdebug_file) 838 839 def test_enospc(self): 840 self.assert_no_active_block_jobs() 841 842 result = self.vm.qmp('block-stream', device='drive0', on_error='enospc') 843 self.assert_qmp(result, 'return', {}) 844 845 error = False 846 completed = False 847 while not completed: 848 for event in self.vm.get_qmp_events(wait=True): 849 if event['event'] == 'BLOCK_JOB_ERROR': 850 self.assert_qmp(event, 'data/device', 'drive0') 851 self.assert_qmp(event, 'data/operation', 'read') 852 error = True 853 854 if self.vm.qmp('query-block-jobs')['return'][0]['status'] != 'paused': 855 self.vm.events_wait([( 856 'JOB_STATUS_CHANGE', 857 {'data': {'id': 'drive0', 'status': 'paused'}} 858 )]) 859 860 result = self.vm.qmp('query-block-jobs') 861 self.assert_qmp(result, 'return[0]/status', 'paused') 862 self.assert_qmp(result, 'return[0]/offset', self.STREAM_BUFFER_SIZE) 863 self.assert_qmp(result, 'return[0]/io-status', 'nospace') 864 865 result = self.vm.qmp('block-job-resume', device='drive0') 866 self.assert_qmp(result, 'return', {}) 867 868 result = self.vm.qmp('query-block-jobs') 869 if result == {'return': []}: 870 # Race; likely already finished. Check. 871 continue 872 self.assertIn(result['return'][0]['status'], 873 ['running', 'pending', 'aborting', 'concluded']) 874 self.assert_qmp(result, 'return[0]/io-status', 'ok') 875 elif event['event'] == 'BLOCK_JOB_COMPLETED': 876 self.assertTrue(error, 'job completed unexpectedly') 877 self.assert_qmp(event, 'data/type', 'stream') 878 self.assert_qmp(event, 'data/device', 'drive0') 879 self.assert_qmp_absent(event, 'data/error') 880 self.assert_qmp(event, 'data/offset', self.image_len) 881 self.assert_qmp(event, 'data/len', self.image_len) 882 completed = True 883 elif event['event'] == 'JOB_STATUS_CHANGE': 884 self.assert_qmp(event, 'data/id', 'drive0') 885 886 self.assert_no_active_block_jobs() 887 self.vm.shutdown() 888 889class TestStreamStop(iotests.QMPTestCase): 890 image_len = 8 * 1024 * 1024 * 1024 # GB 891 892 def setUp(self): 893 qemu_img('create', backing_img, str(TestStreamStop.image_len)) 894 qemu_io('-f', 'raw', '-c', 'write -P 0x1 0 32M', backing_img) 895 qemu_img('create', '-f', iotests.imgfmt, 896 '-o', 'backing_file=%s' % backing_img, 897 '-F', 'raw', test_img) 898 qemu_io('-f', iotests.imgfmt, '-c', 'write -P 0x1 32M 32M', test_img) 899 self.vm = iotests.VM().add_drive("blkdebug::" + test_img) 900 self.vm.launch() 901 902 def tearDown(self): 903 self.vm.shutdown() 904 os.remove(test_img) 905 os.remove(backing_img) 906 907 def test_stream_stop(self): 908 self.assert_no_active_block_jobs() 909 910 self.vm.pause_drive('drive0') 911 result = self.vm.qmp('block-stream', device='drive0') 912 self.assert_qmp(result, 'return', {}) 913 914 time.sleep(0.1) 915 events = self.vm.get_qmp_events(wait=False) 916 for e in events: 917 self.assert_qmp(e, 'event', 'JOB_STATUS_CHANGE') 918 self.assert_qmp(e, 'data/id', 'drive0') 919 920 self.cancel_and_wait(resume=True) 921 922class TestSetSpeed(iotests.QMPTestCase): 923 image_len = 80 * 1024 * 1024 # MB 924 925 def setUp(self): 926 qemu_img('create', backing_img, str(TestSetSpeed.image_len)) 927 qemu_io('-f', 'raw', '-c', 'write -P 0x1 0 32M', backing_img) 928 qemu_img('create', '-f', iotests.imgfmt, 929 '-o', 'backing_file=%s' % backing_img, 930 '-F', 'raw', test_img) 931 qemu_io('-f', iotests.imgfmt, '-c', 'write -P 0x1 32M 32M', test_img) 932 self.vm = iotests.VM().add_drive('blkdebug::' + test_img) 933 self.vm.launch() 934 935 def tearDown(self): 936 self.vm.shutdown() 937 os.remove(test_img) 938 os.remove(backing_img) 939 940 # This is a short performance test which is not run by default. 941 # Invoke "IMGFMT=qed ./030 TestSetSpeed.perf_test_throughput" 942 def perf_test_throughput(self): 943 self.assert_no_active_block_jobs() 944 945 result = self.vm.qmp('block-stream', device='drive0') 946 self.assert_qmp(result, 'return', {}) 947 948 result = self.vm.qmp('block-job-set-speed', device='drive0', speed=8 * 1024 * 1024) 949 self.assert_qmp(result, 'return', {}) 950 951 self.wait_until_completed() 952 953 self.assert_no_active_block_jobs() 954 955 def test_set_speed(self): 956 self.assert_no_active_block_jobs() 957 958 self.vm.pause_drive('drive0') 959 result = self.vm.qmp('block-stream', device='drive0') 960 self.assert_qmp(result, 'return', {}) 961 962 # Default speed is 0 963 result = self.vm.qmp('query-block-jobs') 964 self.assert_qmp(result, 'return[0]/device', 'drive0') 965 self.assert_qmp(result, 'return[0]/speed', 0) 966 967 result = self.vm.qmp('block-job-set-speed', device='drive0', speed=8 * 1024 * 1024) 968 self.assert_qmp(result, 'return', {}) 969 970 # Ensure the speed we set was accepted 971 result = self.vm.qmp('query-block-jobs') 972 self.assert_qmp(result, 'return[0]/device', 'drive0') 973 self.assert_qmp(result, 'return[0]/speed', 8 * 1024 * 1024) 974 975 self.cancel_and_wait(resume=True) 976 self.vm.pause_drive('drive0') 977 978 # Check setting speed in block-stream works 979 result = self.vm.qmp('block-stream', device='drive0', speed=4 * 1024 * 1024) 980 self.assert_qmp(result, 'return', {}) 981 982 result = self.vm.qmp('query-block-jobs') 983 self.assert_qmp(result, 'return[0]/device', 'drive0') 984 self.assert_qmp(result, 'return[0]/speed', 4 * 1024 * 1024) 985 986 self.cancel_and_wait(resume=True) 987 988 def test_set_speed_invalid(self): 989 self.assert_no_active_block_jobs() 990 991 result = self.vm.qmp('block-stream', device='drive0', speed=-1) 992 self.assert_qmp(result, 'error/desc', "Parameter 'speed' expects a non-negative value") 993 994 self.assert_no_active_block_jobs() 995 996 self.vm.pause_drive('drive0') 997 result = self.vm.qmp('block-stream', device='drive0') 998 self.assert_qmp(result, 'return', {}) 999 1000 result = self.vm.qmp('block-job-set-speed', device='drive0', speed=-1) 1001 self.assert_qmp(result, 'error/desc', "Parameter 'speed' expects a non-negative value") 1002 1003 self.cancel_and_wait(resume=True) 1004 1005if __name__ == '__main__': 1006 iotests.main(supported_fmts=['qcow2', 'qed'], 1007 supported_protocols=['file']) 1008