1#!/usr/bin/env python3 2# group: rw 3# 4# Tests for active mirroring 5# 6# Copyright (C) 2018 Red Hat, Inc. 7# 8# This program is free software; you can redistribute it and/or modify 9# it under the terms of the GNU General Public License as published by 10# the Free Software Foundation; either version 2 of the License, or 11# (at your option) any later version. 12# 13# This program is distributed in the hope that it will be useful, 14# but WITHOUT ANY WARRANTY; without even the implied warranty of 15# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16# GNU General Public License for more details. 17# 18# You should have received a copy of the GNU General Public License 19# along with this program. If not, see <http://www.gnu.org/licenses/>. 20# 21 22import math 23import os 24import subprocess 25import time 26from typing import List, Optional 27import iotests 28from iotests import qemu_img 29 30source_img = os.path.join(iotests.test_dir, 'source.' + iotests.imgfmt) 31target_img = os.path.join(iotests.test_dir, 'target.' + iotests.imgfmt) 32 33class TestActiveMirror(iotests.QMPTestCase): 34 image_len = 128 * 1024 * 1024 # MB 35 potential_writes_in_flight = True 36 37 def setUp(self): 38 qemu_img('create', '-f', iotests.imgfmt, source_img, '128M') 39 qemu_img('create', '-f', iotests.imgfmt, target_img, '128M') 40 41 blk_source = {'id': 'source', 42 'if': 'none', 43 'node-name': 'source-node', 44 'driver': iotests.imgfmt, 45 'file': {'driver': 'blkdebug', 46 'image': {'driver': 'file', 47 'filename': source_img}}} 48 49 blk_target = {'node-name': 'target-node', 50 'driver': iotests.imgfmt, 51 'file': {'driver': 'file', 52 'filename': target_img}} 53 54 self.vm = iotests.VM() 55 self.vm.add_drive_raw(self.vm.qmp_to_opts(blk_source)) 56 self.vm.add_blockdev(self.vm.qmp_to_opts(blk_target)) 57 self.vm.add_device('virtio-blk,id=vblk,drive=source') 58 self.vm.launch() 59 60 def tearDown(self): 61 self.vm.shutdown() 62 63 if not self.potential_writes_in_flight: 64 self.assertTrue(iotests.compare_images(source_img, target_img), 65 'mirror target does not match source') 66 67 os.remove(source_img) 68 os.remove(target_img) 69 70 def doActiveIO(self, sync_source_and_target): 71 # Fill the source image 72 self.vm.hmp_qemu_io('source', 73 'write -P 1 0 %i' % self.image_len); 74 75 # Start some background requests 76 for offset in range(1 * self.image_len // 8, 3 * self.image_len // 8, 1024 * 1024): 77 self.vm.hmp_qemu_io('source', 'aio_write -P 2 %i 1M' % offset) 78 for offset in range(2 * self.image_len // 8, 3 * self.image_len // 8, 1024 * 1024): 79 self.vm.hmp_qemu_io('source', 'aio_write -z %i 1M' % offset) 80 81 # Start the block job 82 self.vm.cmd('blockdev-mirror', 83 job_id='mirror', 84 filter_node_name='mirror-node', 85 device='source-node', 86 target='target-node', 87 sync='full', 88 copy_mode='write-blocking') 89 90 # Start some more requests 91 for offset in range(3 * self.image_len // 8, 5 * self.image_len // 8, 1024 * 1024): 92 self.vm.hmp_qemu_io('source', 'aio_write -P 3 %i 1M' % offset) 93 for offset in range(4 * self.image_len // 8, 5 * self.image_len // 8, 1024 * 1024): 94 self.vm.hmp_qemu_io('source', 'aio_write -z %i 1M' % offset) 95 96 # Wait for the READY event 97 self.wait_ready(drive='mirror') 98 99 # Now start some final requests; all of these (which land on 100 # the source) should be settled using the active mechanism. 101 # The mirror code itself asserts that the source BDS's dirty 102 # bitmap will stay clean between READY and COMPLETED. 103 for offset in range(5 * self.image_len // 8, 7 * self.image_len // 8, 1024 * 1024): 104 self.vm.hmp_qemu_io('source', 'aio_write -P 3 %i 1M' % offset) 105 for offset in range(6 * self.image_len // 8, 7 * self.image_len // 8, 1024 * 1024): 106 self.vm.hmp_qemu_io('source', 'aio_write -z %i 1M' % offset) 107 108 if sync_source_and_target: 109 # If source and target should be in sync after the mirror, 110 # we have to flush before completion 111 self.vm.hmp_qemu_io('source', 'aio_flush') 112 self.potential_writes_in_flight = False 113 114 self.complete_and_wait(drive='mirror', wait_ready=False) 115 116 def testActiveIO(self): 117 self.doActiveIO(False) 118 119 def testActiveIOFlushed(self): 120 self.doActiveIO(True) 121 122 def testUnalignedActiveIO(self): 123 # Fill the source image 124 result = self.vm.hmp_qemu_io('source', 'write -P 1 0 2M') 125 126 # Start the block job (very slowly) 127 self.vm.cmd('blockdev-mirror', 128 job_id='mirror', 129 filter_node_name='mirror-node', 130 device='source-node', 131 target='target-node', 132 sync='full', 133 copy_mode='write-blocking', 134 buf_size=(1048576 // 4), 135 speed=1) 136 137 # Start an unaligned request to a dirty area 138 result = self.vm.hmp_qemu_io('source', 'write -P 2 %i 1' % (1048576 + 42)) 139 140 # Let the job finish 141 self.vm.cmd('block-job-set-speed', device='mirror', speed=0) 142 self.complete_and_wait(drive='mirror') 143 144 self.potential_writes_in_flight = False 145 146 def testIntersectingActiveIO(self): 147 # Fill the source image 148 result = self.vm.hmp_qemu_io('source', 'write -P 1 0 2M') 149 150 # Start the block job (very slowly) 151 self.vm.cmd('blockdev-mirror', 152 job_id='mirror', 153 filter_node_name='mirror-node', 154 device='source-node', 155 target='target-node', 156 sync='full', 157 copy_mode='write-blocking', 158 speed=1) 159 160 self.vm.hmp_qemu_io('source', 'break write_aio A') 161 self.vm.hmp_qemu_io('source', 'aio_write 0 1M') # 1 162 self.vm.hmp_qemu_io('source', 'wait_break A') 163 self.vm.hmp_qemu_io('source', 'aio_write 0 2M') # 2 164 self.vm.hmp_qemu_io('source', 'aio_write 0 2M') # 3 165 166 # Now 2 and 3 are in mirror_wait_on_conflicts, waiting for 1 167 168 self.vm.hmp_qemu_io('source', 'break write_aio B') 169 self.vm.hmp_qemu_io('source', 'aio_write 1M 2M') # 4 170 self.vm.hmp_qemu_io('source', 'wait_break B') 171 172 # 4 doesn't wait for 2 and 3, because they didn't yet set 173 # in_flight_bitmap. So, nothing prevents 4 to go except for our 174 # break-point B. 175 176 self.vm.hmp_qemu_io('source', 'resume A') 177 178 # Now we resumed 1, so 2 and 3 goes to the next iteration of while loop 179 # in mirror_wait_on_conflicts(). They don't exit, as bitmap is dirty 180 # due to request 4. 181 # In the past at that point 2 and 3 would wait for each other producing 182 # a dead-lock. Now this is fixed and they will wait for request 4. 183 184 self.vm.hmp_qemu_io('source', 'resume B') 185 186 # After resuming 4, one of 2 and 3 goes first and set in_flight_bitmap, 187 # so the other will wait for it. 188 189 self.vm.cmd('block-job-set-speed', device='mirror', speed=0) 190 self.complete_and_wait(drive='mirror') 191 192 self.potential_writes_in_flight = False 193 194 195class TestThrottledWithNbdExportBase(iotests.QMPTestCase): 196 image_len = 128 * 1024 * 1024 # MB 197 iops: Optional[int] = None 198 background_processes: List['subprocess.Popen[str]'] = [] 199 200 def setUp(self): 201 # Must be set by subclasses 202 self.assertIsNotNone(self.iops) 203 204 qemu_img('create', '-f', iotests.imgfmt, source_img, '128M') 205 qemu_img('create', '-f', iotests.imgfmt, target_img, '128M') 206 207 self.vm = iotests.VM() 208 self.vm.launch() 209 210 self.vm.cmd('object-add', **{ 211 'qom-type': 'throttle-group', 212 'id': 'thrgr', 213 'limits': { 214 'iops-total': self.iops, 215 'iops-total-max': self.iops 216 } 217 }) 218 219 self.vm.cmd('blockdev-add', **{ 220 'node-name': 'source-node', 221 'driver': 'throttle', 222 'throttle-group': 'thrgr', 223 'file': { 224 'driver': iotests.imgfmt, 225 'file': { 226 'driver': 'file', 227 'filename': source_img 228 } 229 } 230 }) 231 232 self.vm.cmd('blockdev-add', **{ 233 'node-name': 'target-node', 234 'driver': iotests.imgfmt, 235 'file': { 236 'driver': 'file', 237 'filename': target_img 238 } 239 }) 240 241 self.nbd_sock = iotests.file_path('nbd.sock', 242 base_dir=iotests.sock_dir) 243 self.nbd_url = f'nbd+unix:///source-node?socket={self.nbd_sock}' 244 245 self.vm.cmd('nbd-server-start', addr={ 246 'type': 'unix', 247 'data': { 248 'path': self.nbd_sock 249 } 250 }) 251 252 self.vm.cmd('block-export-add', id='exp0', type='nbd', 253 node_name='source-node', writable=True) 254 255 def tearDown(self): 256 # Wait for background requests to settle 257 try: 258 while True: 259 p = self.background_processes.pop() 260 while True: 261 try: 262 p.wait(timeout=0.0) 263 break 264 except subprocess.TimeoutExpired: 265 self.vm.qtest(f'clock_step {1 * 1000 * 1000 * 1000}') 266 except IndexError: 267 pass 268 269 # Cancel ongoing block jobs 270 for job in self.vm.qmp('query-jobs')['return']: 271 self.vm.qmp('block-job-cancel', device=job['id'], force=True) 272 273 while True: 274 self.vm.qtest(f'clock_step {1 * 1000 * 1000 * 1000}') 275 if len(self.vm.qmp('query-jobs')['return']) == 0: 276 break 277 278 self.vm.shutdown() 279 os.remove(source_img) 280 os.remove(target_img) 281 282 283class TestLowThrottledWithNbdExport(TestThrottledWithNbdExportBase): 284 iops = 16 285 286 def testUnderLoad(self): 287 ''' 288 Throttle the source node, then issue a whole bunch of external requests 289 while the mirror job (in write-blocking mode) is running. We want to 290 see background requests being issued even while the source is under 291 full load by active writes, so that progress can be made towards READY. 292 ''' 293 294 # Fill the first half of the source image; do not fill the second half, 295 # that is where we will have active requests occur. This ensures that 296 # active mirroring itself will not directly contribute to the job's 297 # progress (because when the job was started, those areas were not 298 # intended to be copied, so active mirroring will only lead to not 299 # losing progress, but also not making any). 300 self.vm.hmp_qemu_io('source-node', 301 f'aio_write -P 1 0 {self.image_len // 2}') 302 self.vm.qtest(f'clock_step {1 * 1000 * 1000 * 1000}') 303 304 # Launch the mirror job 305 mirror_buf_size = 65536 306 self.vm.cmd('blockdev-mirror', 307 job_id='mirror', 308 filter_node_name='mirror-node', 309 device='source-node', 310 target='target-node', 311 sync='full', 312 copy_mode='write-blocking', 313 buf_size=mirror_buf_size) 314 315 # We create the external requests via qemu-io processes on the NBD 316 # server. Have their offset start in the middle of the image so they 317 # do not overlap with the background requests (which start from the 318 # beginning). 319 active_request_offset = self.image_len // 2 320 active_request_len = 4096 321 322 # Create enough requests to saturate the node for 5 seconds 323 for _ in range(0, 5 * self.iops): 324 req = f'write -P 42 {active_request_offset} {active_request_len}' 325 active_request_offset += active_request_len 326 p = iotests.qemu_io_popen('-f', 'nbd', self.nbd_url, '-c', req) 327 self.background_processes += [p] 328 329 # Now advance the clock one I/O operation at a time by the 4 seconds 330 # (i.e. one less than 5). We expect the mirror job to issue background 331 # operations here, even though active requests are still in flight. 332 # The active requests will take precedence, however, because they have 333 # been issued earlier than mirror's background requests. 334 # Once the active requests we have started above are done (i.e. after 5 335 # virtual seconds), we expect those background requests to be worked 336 # on. We only advance 4 seconds here to avoid race conditions. 337 for _ in range(0, 4 * self.iops): 338 step = math.ceil(1 * 1000 * 1000 * 1000 / self.iops) 339 self.vm.qtest(f'clock_step {step}') 340 341 # Note how much remains to be done until the mirror job is finished 342 job_status = self.vm.qmp('query-jobs')['return'][0] 343 start_remaining = job_status['total-progress'] - \ 344 job_status['current-progress'] 345 346 # Create a whole bunch of more active requests 347 for _ in range(0, 10 * self.iops): 348 req = f'write -P 42 {active_request_offset} {active_request_len}' 349 active_request_offset += active_request_len 350 p = iotests.qemu_io_popen('-f', 'nbd', self.nbd_url, '-c', req) 351 self.background_processes += [p] 352 353 # Let the clock advance more. After 1 second, as noted above, we 354 # expect the background requests to be worked on. Give them a couple 355 # of seconds (specifically 4) to see their impact. 356 for _ in range(0, 5 * self.iops): 357 step = math.ceil(1 * 1000 * 1000 * 1000 / self.iops) 358 self.vm.qtest(f'clock_step {step}') 359 360 # Note how much remains to be done now. We expect this number to be 361 # reduced thanks to those background requests. 362 job_status = self.vm.qmp('query-jobs')['return'][0] 363 end_remaining = job_status['total-progress'] - \ 364 job_status['current-progress'] 365 366 # See that indeed progress was being made on the job, even while the 367 # node was saturated with active requests 368 self.assertGreater(start_remaining - end_remaining, 0) 369 370 371class TestHighThrottledWithNbdExport(TestThrottledWithNbdExportBase): 372 iops = 1024 373 374 def testActiveOnCreation(self): 375 ''' 376 Issue requests on the mirror source node right as the mirror is 377 instated. It's possible that requests occur before the actual job is 378 created, but after the node has been put into the graph. Write 379 requests across the node must in that case be forwarded to the source 380 node without attempting to mirror them (there is no job object yet, so 381 attempting to access it would cause a segfault). 382 We do this with a lightly throttled node (i.e. quite high IOPS limit). 383 Using throttling seems to increase reproductivity, but if the limit is 384 too low, all requests allowed per second will be submitted before 385 mirror_start_job() gets to the problematic point. 386 ''' 387 388 # Let qemu-img bench create write requests (enough for two seconds on 389 # the virtual clock) 390 bench_args = ['bench', '-w', '-d', '1024', '-f', 'nbd', 391 '-c', str(self.iops * 2), self.nbd_url] 392 p = iotests.qemu_tool_popen(iotests.qemu_img_args + bench_args) 393 self.background_processes += [p] 394 395 # Give qemu-img bench time to start up and issue requests 396 time.sleep(1.0) 397 # Flush the request queue, so new requests can come in right as we 398 # start blockdev-mirror 399 self.vm.qtest(f'clock_step {1 * 1000 * 1000 * 1000}') 400 401 self.vm.cmd('blockdev-mirror', 402 job_id='mirror', 403 device='source-node', 404 target='target-node', 405 sync='full', 406 copy_mode='write-blocking') 407 408 409if __name__ == '__main__': 410 iotests.main(supported_fmts=['qcow2', 'raw'], 411 supported_protocols=['file']) 412