151 (f7ccc3295b3d7c49d4a7a3d42242cd5b50111e35) | 151 (b6aed193e5ecca32bb07e062f58f0daca06e7009) |
---|---|
1#!/usr/bin/env python3 2# group: rw 3# 4# Tests for active mirroring 5# 6# Copyright (C) 2018 Red Hat, Inc. 7# 8# This program is free software; you can redistribute it and/or modify --- 65 unchanged lines hidden (view full) --- 74 75 # Start some background requests 76 for offset in range(1 * self.image_len // 8, 3 * self.image_len // 8, 1024 * 1024): 77 self.vm.hmp_qemu_io('source', 'aio_write -P 2 %i 1M' % offset) 78 for offset in range(2 * self.image_len // 8, 3 * self.image_len // 8, 1024 * 1024): 79 self.vm.hmp_qemu_io('source', 'aio_write -z %i 1M' % offset) 80 81 # Start the block job | 1#!/usr/bin/env python3 2# group: rw 3# 4# Tests for active mirroring 5# 6# Copyright (C) 2018 Red Hat, Inc. 7# 8# This program is free software; you can redistribute it and/or modify --- 65 unchanged lines hidden (view full) --- 74 75 # Start some background requests 76 for offset in range(1 * self.image_len // 8, 3 * self.image_len // 8, 1024 * 1024): 77 self.vm.hmp_qemu_io('source', 'aio_write -P 2 %i 1M' % offset) 78 for offset in range(2 * self.image_len // 8, 3 * self.image_len // 8, 1024 * 1024): 79 self.vm.hmp_qemu_io('source', 'aio_write -z %i 1M' % offset) 80 81 # Start the block job |
82 result = self.vm.qmp('blockdev-mirror', 83 job_id='mirror', 84 filter_node_name='mirror-node', 85 device='source-node', 86 target='target-node', 87 sync='full', 88 copy_mode='write-blocking') 89 self.assert_qmp(result, 'return', {}) | 82 self.vm.cmd('blockdev-mirror', 83 job_id='mirror', 84 filter_node_name='mirror-node', 85 device='source-node', 86 target='target-node', 87 sync='full', 88 copy_mode='write-blocking') |
90 91 # Start some more requests 92 for offset in range(3 * self.image_len // 8, 5 * self.image_len // 8, 1024 * 1024): 93 self.vm.hmp_qemu_io('source', 'aio_write -P 3 %i 1M' % offset) 94 for offset in range(4 * self.image_len // 8, 5 * self.image_len // 8, 1024 * 1024): 95 self.vm.hmp_qemu_io('source', 'aio_write -z %i 1M' % offset) 96 97 # Wait for the READY event --- 22 unchanged lines hidden (view full) --- 120 def testActiveIOFlushed(self): 121 self.doActiveIO(True) 122 123 def testUnalignedActiveIO(self): 124 # Fill the source image 125 result = self.vm.hmp_qemu_io('source', 'write -P 1 0 2M') 126 127 # Start the block job (very slowly) | 89 90 # Start some more requests 91 for offset in range(3 * self.image_len // 8, 5 * self.image_len // 8, 1024 * 1024): 92 self.vm.hmp_qemu_io('source', 'aio_write -P 3 %i 1M' % offset) 93 for offset in range(4 * self.image_len // 8, 5 * self.image_len // 8, 1024 * 1024): 94 self.vm.hmp_qemu_io('source', 'aio_write -z %i 1M' % offset) 95 96 # Wait for the READY event --- 22 unchanged lines hidden (view full) --- 119 def testActiveIOFlushed(self): 120 self.doActiveIO(True) 121 122 def testUnalignedActiveIO(self): 123 # Fill the source image 124 result = self.vm.hmp_qemu_io('source', 'write -P 1 0 2M') 125 126 # Start the block job (very slowly) |
128 result = self.vm.qmp('blockdev-mirror', 129 job_id='mirror', 130 filter_node_name='mirror-node', 131 device='source-node', 132 target='target-node', 133 sync='full', 134 copy_mode='write-blocking', 135 buf_size=(1048576 // 4), 136 speed=1) 137 self.assert_qmp(result, 'return', {}) | 127 self.vm.cmd('blockdev-mirror', 128 job_id='mirror', 129 filter_node_name='mirror-node', 130 device='source-node', 131 target='target-node', 132 sync='full', 133 copy_mode='write-blocking', 134 buf_size=(1048576 // 4), 135 speed=1) |
138 139 # Start an unaligned request to a dirty area 140 result = self.vm.hmp_qemu_io('source', 'write -P 2 %i 1' % (1048576 + 42)) 141 142 # Let the job finish | 136 137 # Start an unaligned request to a dirty area 138 result = self.vm.hmp_qemu_io('source', 'write -P 2 %i 1' % (1048576 + 42)) 139 140 # Let the job finish |
143 result = self.vm.qmp('block-job-set-speed', device='mirror', speed=0) 144 self.assert_qmp(result, 'return', {}) | 141 self.vm.cmd('block-job-set-speed', device='mirror', speed=0) |
145 self.complete_and_wait(drive='mirror') 146 147 self.potential_writes_in_flight = False 148 149 def testIntersectingActiveIO(self): 150 # Fill the source image 151 result = self.vm.hmp_qemu_io('source', 'write -P 1 0 2M') 152 153 # Start the block job (very slowly) | 142 self.complete_and_wait(drive='mirror') 143 144 self.potential_writes_in_flight = False 145 146 def testIntersectingActiveIO(self): 147 # Fill the source image 148 result = self.vm.hmp_qemu_io('source', 'write -P 1 0 2M') 149 150 # Start the block job (very slowly) |
154 result = self.vm.qmp('blockdev-mirror', 155 job_id='mirror', 156 filter_node_name='mirror-node', 157 device='source-node', 158 target='target-node', 159 sync='full', 160 copy_mode='write-blocking', 161 speed=1) 162 self.assert_qmp(result, 'return', {}) | 151 self.vm.cmd('blockdev-mirror', 152 job_id='mirror', 153 filter_node_name='mirror-node', 154 device='source-node', 155 target='target-node', 156 sync='full', 157 copy_mode='write-blocking', 158 speed=1) |
163 164 self.vm.hmp_qemu_io('source', 'break write_aio A') 165 self.vm.hmp_qemu_io('source', 'aio_write 0 1M') # 1 166 self.vm.hmp_qemu_io('source', 'wait_break A') 167 self.vm.hmp_qemu_io('source', 'aio_write 0 2M') # 2 168 self.vm.hmp_qemu_io('source', 'aio_write 0 2M') # 3 169 170 # Now 2 and 3 are in mirror_wait_on_conflicts, waiting for 1 --- 14 unchanged lines hidden (view full) --- 185 # In the past at that point 2 and 3 would wait for each other producing 186 # a dead-lock. Now this is fixed and they will wait for request 4. 187 188 self.vm.hmp_qemu_io('source', 'resume B') 189 190 # After resuming 4, one of 2 and 3 goes first and set in_flight_bitmap, 191 # so the other will wait for it. 192 | 159 160 self.vm.hmp_qemu_io('source', 'break write_aio A') 161 self.vm.hmp_qemu_io('source', 'aio_write 0 1M') # 1 162 self.vm.hmp_qemu_io('source', 'wait_break A') 163 self.vm.hmp_qemu_io('source', 'aio_write 0 2M') # 2 164 self.vm.hmp_qemu_io('source', 'aio_write 0 2M') # 3 165 166 # Now 2 and 3 are in mirror_wait_on_conflicts, waiting for 1 --- 14 unchanged lines hidden (view full) --- 181 # In the past at that point 2 and 3 would wait for each other producing 182 # a dead-lock. Now this is fixed and they will wait for request 4. 183 184 self.vm.hmp_qemu_io('source', 'resume B') 185 186 # After resuming 4, one of 2 and 3 goes first and set in_flight_bitmap, 187 # so the other will wait for it. 188 |
193 result = self.vm.qmp('block-job-set-speed', device='mirror', speed=0) 194 self.assert_qmp(result, 'return', {}) | 189 self.vm.cmd('block-job-set-speed', device='mirror', speed=0) |
195 self.complete_and_wait(drive='mirror') 196 197 self.potential_writes_in_flight = False 198 199 200class TestThrottledWithNbdExportBase(iotests.QMPTestCase): 201 image_len = 128 * 1024 * 1024 # MB 202 iops: Optional[int] = None --- 4 unchanged lines hidden (view full) --- 207 self.assertIsNotNone(self.iops) 208 209 qemu_img('create', '-f', iotests.imgfmt, source_img, '128M') 210 qemu_img('create', '-f', iotests.imgfmt, target_img, '128M') 211 212 self.vm = iotests.VM() 213 self.vm.launch() 214 | 190 self.complete_and_wait(drive='mirror') 191 192 self.potential_writes_in_flight = False 193 194 195class TestThrottledWithNbdExportBase(iotests.QMPTestCase): 196 image_len = 128 * 1024 * 1024 # MB 197 iops: Optional[int] = None --- 4 unchanged lines hidden (view full) --- 202 self.assertIsNotNone(self.iops) 203 204 qemu_img('create', '-f', iotests.imgfmt, source_img, '128M') 205 qemu_img('create', '-f', iotests.imgfmt, target_img, '128M') 206 207 self.vm = iotests.VM() 208 self.vm.launch() 209 |
215 result = self.vm.qmp('object-add', **{ | 210 self.vm.cmd('object-add', **{ |
216 'qom-type': 'throttle-group', 217 'id': 'thrgr', 218 'limits': { 219 'iops-total': self.iops, 220 'iops-total-max': self.iops 221 } 222 }) | 211 'qom-type': 'throttle-group', 212 'id': 'thrgr', 213 'limits': { 214 'iops-total': self.iops, 215 'iops-total-max': self.iops 216 } 217 }) |
223 self.assert_qmp(result, 'return', {}) | |
224 | 218 |
225 result = self.vm.qmp('blockdev-add', **{ | 219 self.vm.cmd('blockdev-add', **{ |
226 'node-name': 'source-node', 227 'driver': 'throttle', 228 'throttle-group': 'thrgr', 229 'file': { 230 'driver': iotests.imgfmt, 231 'file': { 232 'driver': 'file', 233 'filename': source_img 234 } 235 } 236 }) | 220 'node-name': 'source-node', 221 'driver': 'throttle', 222 'throttle-group': 'thrgr', 223 'file': { 224 'driver': iotests.imgfmt, 225 'file': { 226 'driver': 'file', 227 'filename': source_img 228 } 229 } 230 }) |
237 self.assert_qmp(result, 'return', {}) | |
238 | 231 |
239 result = self.vm.qmp('blockdev-add', **{ | 232 self.vm.cmd('blockdev-add', **{ |
240 'node-name': 'target-node', 241 'driver': iotests.imgfmt, 242 'file': { 243 'driver': 'file', 244 'filename': target_img 245 } 246 }) | 233 'node-name': 'target-node', 234 'driver': iotests.imgfmt, 235 'file': { 236 'driver': 'file', 237 'filename': target_img 238 } 239 }) |
247 self.assert_qmp(result, 'return', {}) | |
248 249 self.nbd_sock = iotests.file_path('nbd.sock', 250 base_dir=iotests.sock_dir) 251 self.nbd_url = f'nbd+unix:///source-node?socket={self.nbd_sock}' 252 | 240 241 self.nbd_sock = iotests.file_path('nbd.sock', 242 base_dir=iotests.sock_dir) 243 self.nbd_url = f'nbd+unix:///source-node?socket={self.nbd_sock}' 244 |
253 result = self.vm.qmp('nbd-server-start', addr={ | 245 self.vm.cmd('nbd-server-start', addr={ |
254 'type': 'unix', 255 'data': { 256 'path': self.nbd_sock 257 } 258 }) | 246 'type': 'unix', 247 'data': { 248 'path': self.nbd_sock 249 } 250 }) |
259 self.assert_qmp(result, 'return', {}) | |
260 | 251 |
261 result = self.vm.qmp('block-export-add', id='exp0', type='nbd', 262 node_name='source-node', writable=True) 263 self.assert_qmp(result, 'return', {}) | 252 self.vm.cmd('block-export-add', id='exp0', type='nbd', 253 node_name='source-node', writable=True) |
264 265 def tearDown(self): 266 # Wait for background requests to settle 267 try: 268 while True: 269 p = self.background_processes.pop() 270 while True: 271 try: --- 36 unchanged lines hidden (view full) --- 308 # intended to be copied, so active mirroring will only lead to not 309 # losing progress, but also not making any). 310 self.vm.hmp_qemu_io('source-node', 311 f'aio_write -P 1 0 {self.image_len // 2}') 312 self.vm.qtest(f'clock_step {1 * 1000 * 1000 * 1000}') 313 314 # Launch the mirror job 315 mirror_buf_size = 65536 | 254 255 def tearDown(self): 256 # Wait for background requests to settle 257 try: 258 while True: 259 p = self.background_processes.pop() 260 while True: 261 try: --- 36 unchanged lines hidden (view full) --- 298 # intended to be copied, so active mirroring will only lead to not 299 # losing progress, but also not making any). 300 self.vm.hmp_qemu_io('source-node', 301 f'aio_write -P 1 0 {self.image_len // 2}') 302 self.vm.qtest(f'clock_step {1 * 1000 * 1000 * 1000}') 303 304 # Launch the mirror job 305 mirror_buf_size = 65536 |
316 result = self.vm.qmp('blockdev-mirror', 317 job_id='mirror', 318 filter_node_name='mirror-node', 319 device='source-node', 320 target='target-node', 321 sync='full', 322 copy_mode='write-blocking', 323 buf_size=mirror_buf_size) 324 self.assert_qmp(result, 'return', {}) | 306 self.vm.cmd('blockdev-mirror', 307 job_id='mirror', 308 filter_node_name='mirror-node', 309 device='source-node', 310 target='target-node', 311 sync='full', 312 copy_mode='write-blocking', 313 buf_size=mirror_buf_size) |
325 326 # We create the external requests via qemu-io processes on the NBD 327 # server. Have their offset start in the middle of the image so they 328 # do not overlap with the background requests (which start from the 329 # beginning). 330 active_request_offset = self.image_len // 2 331 active_request_len = 4096 332 --- 71 unchanged lines hidden (view full) --- 404 self.background_processes += [p] 405 406 # Give qemu-img bench time to start up and issue requests 407 time.sleep(1.0) 408 # Flush the request queue, so new requests can come in right as we 409 # start blockdev-mirror 410 self.vm.qtest(f'clock_step {1 * 1000 * 1000 * 1000}') 411 | 314 315 # We create the external requests via qemu-io processes on the NBD 316 # server. Have their offset start in the middle of the image so they 317 # do not overlap with the background requests (which start from the 318 # beginning). 319 active_request_offset = self.image_len // 2 320 active_request_len = 4096 321 --- 71 unchanged lines hidden (view full) --- 393 self.background_processes += [p] 394 395 # Give qemu-img bench time to start up and issue requests 396 time.sleep(1.0) 397 # Flush the request queue, so new requests can come in right as we 398 # start blockdev-mirror 399 self.vm.qtest(f'clock_step {1 * 1000 * 1000 * 1000}') 400 |
412 result = self.vm.qmp('blockdev-mirror', 413 job_id='mirror', 414 device='source-node', 415 target='target-node', 416 sync='full', 417 copy_mode='write-blocking') 418 self.assert_qmp(result, 'return', {}) | 401 self.vm.cmd('blockdev-mirror', 402 job_id='mirror', 403 device='source-node', 404 target='target-node', 405 sync='full', 406 copy_mode='write-blocking') |
419 420 421if __name__ == '__main__': 422 iotests.main(supported_fmts=['qcow2', 'raw'], 423 supported_protocols=['file']) | 407 408 409if __name__ == '__main__': 410 iotests.main(supported_fmts=['qcow2', 'raw'], 411 supported_protocols=['file']) |