281 (48033ad678ae2def43bf0d543a2c4c3d2a93feaf) 281 (b6aed193e5ecca32bb07e062f58f0daca06e7009)
1#!/usr/bin/env python3
2# group: rw
3#
4# Test cases for blockdev + IOThread interactions
5#
6# Copyright (C) 2019 Red Hat, Inc.
7#
8# This program is free software; you can redistribute it and/or modify

--- 42 unchanged lines hidden (view full) ---

51 force=True)
52
53 def tearDown(self):
54 self.vm.shutdown()
55 for name in self.images:
56 os.remove(self.images[name])
57
58 def test_add_dirty_bitmap(self):
1#!/usr/bin/env python3
2# group: rw
3#
4# Test cases for blockdev + IOThread interactions
5#
6# Copyright (C) 2019 Red Hat, Inc.
7#
8# This program is free software; you can redistribute it and/or modify

--- 42 unchanged lines hidden (view full) ---

51 force=True)
52
53 def tearDown(self):
54 self.vm.shutdown()
55 for name in self.images:
56 os.remove(self.images[name])
57
58 def test_add_dirty_bitmap(self):
59 result = self.vm.qmp(
59 self.vm.cmd(
60 'block-dirty-bitmap-add',
61 node='drive0',
62 name='bitmap1',
63 persistent=True,
64 )
65
60 'block-dirty-bitmap-add',
61 node='drive0',
62 name='bitmap1',
63 persistent=True,
64 )
65
66 self.assert_qmp(result, 'return', {})
67
66
68
69# Test for RHBZ#1746217 & RHBZ#1773517
70class TestNBDMirrorIOThread(iotests.QMPTestCase):
71 nbd_sock = os.path.join(iotests.sock_dir, 'nbd.sock')
72 drive0_img = os.path.join(iotests.test_dir, 'drive0.img')
73 mirror_img = os.path.join(iotests.test_dir, 'mirror.img')
74 images = { 'drive0': drive0_img, 'mirror': mirror_img }
75
76 def setUp(self):

--- 23 unchanged lines hidden (view full) ---

100
101 def tearDown(self):
102 self.vm_src.shutdown()
103 self.vm_tgt.shutdown()
104 for name in self.images:
105 os.remove(self.images[name])
106
107 def test_nbd_mirror(self):
67# Test for RHBZ#1746217 & RHBZ#1773517
68class TestNBDMirrorIOThread(iotests.QMPTestCase):
69 nbd_sock = os.path.join(iotests.sock_dir, 'nbd.sock')
70 drive0_img = os.path.join(iotests.test_dir, 'drive0.img')
71 mirror_img = os.path.join(iotests.test_dir, 'mirror.img')
72 images = { 'drive0': drive0_img, 'mirror': mirror_img }
73
74 def setUp(self):

--- 23 unchanged lines hidden (view full) ---

98
99 def tearDown(self):
100 self.vm_src.shutdown()
101 self.vm_tgt.shutdown()
102 for name in self.images:
103 os.remove(self.images[name])
104
105 def test_nbd_mirror(self):
108 result = self.vm_tgt.qmp(
106 self.vm_tgt.cmd(
109 'nbd-server-start',
110 addr={
111 'type': 'unix',
112 'data': { 'path': self.nbd_sock }
113 }
114 )
107 'nbd-server-start',
108 addr={
109 'type': 'unix',
110 'data': { 'path': self.nbd_sock }
111 }
112 )
115 self.assert_qmp(result, 'return', {})
116
113
117 result = self.vm_tgt.qmp(
114 self.vm_tgt.cmd(
118 'nbd-server-add',
119 device='drive0',
120 writable=True
121 )
115 'nbd-server-add',
116 device='drive0',
117 writable=True
118 )
122 self.assert_qmp(result, 'return', {})
123
119
124 result = self.vm_src.qmp(
120 self.vm_src.cmd(
125 'drive-mirror',
126 device='drive0',
127 target='nbd+unix:///drive0?socket=' + self.nbd_sock,
128 sync='full',
129 mode='existing',
130 speed=64*1024*1024,
131 job_id='j1'
132 )
121 'drive-mirror',
122 device='drive0',
123 target='nbd+unix:///drive0?socket=' + self.nbd_sock,
124 sync='full',
125 mode='existing',
126 speed=64*1024*1024,
127 job_id='j1'
128 )
133 self.assert_qmp(result, 'return', {})
134
135 self.vm_src.event_wait(name="BLOCK_JOB_READY")
136
137
138# Test for RHBZ#1779036
139class TestExternalSnapshotAbort(iotests.QMPTestCase):
140 drive0_img = os.path.join(iotests.test_dir, 'drive0.img')
141 snapshot_img = os.path.join(iotests.test_dir, 'snapshot.img')

--- 143 unchanged lines hidden (view full) ---

285 # Reconnect is done, so the reconnect delay timer should be gone.
286 # (This is similar to how the open timer should be gone after open,
287 # and similarly there used to be a bug where it was not gone.)
288
289 # Delete the BDS to see whether both timers are gone. If they are not,
290 # they will remain active, fire later, and then access freed data.
291 # (Or, with "block/nbd: Assert there are no timers when closed"
292 # applied, the assertions added in that patch will fail.)
129
130 self.vm_src.event_wait(name="BLOCK_JOB_READY")
131
132
133# Test for RHBZ#1779036
134class TestExternalSnapshotAbort(iotests.QMPTestCase):
135 drive0_img = os.path.join(iotests.test_dir, 'drive0.img')
136 snapshot_img = os.path.join(iotests.test_dir, 'snapshot.img')

--- 143 unchanged lines hidden (view full) ---

280 # Reconnect is done, so the reconnect delay timer should be gone.
281 # (This is similar to how the open timer should be gone after open,
282 # and similarly there used to be a bug where it was not gone.)
283
284 # Delete the BDS to see whether both timers are gone. If they are not,
285 # they will remain active, fire later, and then access freed data.
286 # (Or, with "block/nbd: Assert there are no timers when closed"
287 # applied, the assertions added in that patch will fail.)
293 result = self.vm.qmp('blockdev-del', node_name='nbd')
294 self.assert_qmp(result, 'return', {})
288 self.vm.cmd('blockdev-del', node_name='nbd')
295
296 # Give the timers some time to fire (both have a timeout of 1 s).
297 # (Sleeping in an iotest may ring some alarm bells, but note that if
298 # the timing is off here, the test will just always pass. If we kill
299 # the VM too early, then we just kill the timers before they can fire,
300 # thus not see the error, and so the test will pass.)
301 time.sleep(2)
302
303 def test_yield_in_iothread(self):
304 # Move the NBD node to the I/O thread; the NBD block driver should
305 # attach the connection's QIOChannel to that thread's AioContext, too
289
290 # Give the timers some time to fire (both have a timeout of 1 s).
291 # (Sleeping in an iotest may ring some alarm bells, but note that if
292 # the timing is off here, the test will just always pass. If we kill
293 # the VM too early, then we just kill the timers before they can fire,
294 # thus not see the error, and so the test will pass.)
295 time.sleep(2)
296
297 def test_yield_in_iothread(self):
298 # Move the NBD node to the I/O thread; the NBD block driver should
299 # attach the connection's QIOChannel to that thread's AioContext, too
306 result = self.vm.qmp('x-blockdev-set-iothread',
307 node_name='nbd', iothread='iothr')
308 self.assert_qmp(result, 'return', {})
300 self.vm.cmd('x-blockdev-set-iothread',
301 node_name='nbd', iothread='iothr')
309
310 # Do some I/O that will be throttled by the QSD, so that the network
311 # connection hopefully will yield here. When it is resumed, it must
312 # then be resumed in the I/O thread's AioContext.
313 result = self.vm.qmp('human-monitor-command',
314 command_line='qemu-io nbd "read 0 128K"')
315 self.assert_qmp(result, 'return', '')
316

--- 30 unchanged lines hidden ---
302
303 # Do some I/O that will be throttled by the QSD, so that the network
304 # connection hopefully will yield here. When it is resumed, it must
305 # then be resumed in the I/O thread's AioContext.
306 result = self.vm.qmp('human-monitor-command',
307 command_line='qemu-io nbd "read 0 128K"')
308 self.assert_qmp(result, 'return', '')
309

--- 30 unchanged lines hidden ---