1#!/usr/bin/env python 2# 3# Tests for block device statistics 4# 5# Copyright (C) 2015 Igalia, S.L. 6# Author: Alberto Garcia <berto@igalia.com> 7# 8# This program is free software; you can redistribute it and/or modify 9# it under the terms of the GNU General Public License as published by 10# the Free Software Foundation; either version 2 of the License, or 11# (at your option) any later version. 12# 13# This program is distributed in the hope that it will be useful, 14# but WITHOUT ANY WARRANTY; without even the implied warranty of 15# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16# GNU General Public License for more details. 17# 18# You should have received a copy of the GNU General Public License 19# along with this program. If not, see <http://www.gnu.org/licenses/>. 20# 21 22import iotests 23import os 24 25interval_length = 10 26nsec_per_sec = 1000000000 27op_latency = nsec_per_sec // 1000 # See qtest_latency_ns in accounting.c 28bad_sector = 8192 29bad_offset = bad_sector * 512 30blkdebug_file = os.path.join(iotests.test_dir, 'blkdebug.conf') 31 32class BlockDeviceStatsTestCase(iotests.QMPTestCase): 33 test_img = "null-aio://" 34 total_rd_bytes = 0 35 total_rd_ops = 0 36 total_wr_bytes = 0 37 total_wr_ops = 0 38 total_wr_merged = 0 39 total_flush_ops = 0 40 failed_rd_ops = 0 41 failed_wr_ops = 0 42 invalid_rd_ops = 0 43 invalid_wr_ops = 0 44 wr_highest_offset = 0 45 account_invalid = False 46 account_failed = False 47 48 def blockstats(self, device): 49 result = self.vm.qmp("query-blockstats") 50 for r in result['return']: 51 if r['device'] == device: 52 return r['stats'] 53 raise Exception("Device not found for blockstats: %s" % device) 54 55 def create_blkdebug_file(self): 56 file = open(blkdebug_file, 'w') 57 file.write(''' 58[inject-error] 59event = "read_aio" 60errno = "5" 61sector = "%d" 62 63[inject-error] 64event = "write_aio" 65errno = "5" 66sector = "%d" 67''' % (bad_sector, bad_sector)) 68 file.close() 69 70 def setUp(self): 71 drive_args = [] 72 drive_args.append("stats-intervals.0=%d" % interval_length) 73 drive_args.append("stats-account-invalid=%s" % 74 (self.account_invalid and "on" or "off")) 75 drive_args.append("stats-account-failed=%s" % 76 (self.account_failed and "on" or "off")) 77 drive_args.append("file.image.read-zeroes=on") 78 self.create_blkdebug_file() 79 self.vm = iotests.VM().add_drive('blkdebug:%s:%s' % 80 (blkdebug_file, self.test_img), 81 ','.join(drive_args)) 82 self.vm.launch() 83 # Set an initial value for the clock 84 self.vm.qtest("clock_step %d" % nsec_per_sec) 85 86 def tearDown(self): 87 self.vm.shutdown() 88 os.remove(blkdebug_file) 89 90 def accounted_ops(self, read = False, write = False, flush = False): 91 ops = 0 92 if write: 93 ops += self.total_wr_ops 94 if self.account_failed: 95 ops += self.failed_wr_ops 96 if self.account_invalid: 97 ops += self.invalid_wr_ops 98 if read: 99 ops += self.total_rd_ops 100 if self.account_failed: 101 ops += self.failed_rd_ops 102 if self.account_invalid: 103 ops += self.invalid_rd_ops 104 if flush: 105 ops += self.total_flush_ops 106 return ops 107 108 def accounted_latency(self, read = False, write = False, flush = False): 109 latency = 0 110 if write: 111 latency += self.total_wr_ops * op_latency 112 if self.account_failed: 113 latency += self.failed_wr_ops * op_latency 114 if read: 115 latency += self.total_rd_ops * op_latency 116 if self.account_failed: 117 latency += self.failed_rd_ops * op_latency 118 if flush: 119 latency += self.total_flush_ops * op_latency 120 return latency 121 122 def check_values(self): 123 stats = self.blockstats('drive0') 124 125 # Check that the totals match with what we have calculated 126 self.assertEqual(self.total_rd_bytes, stats['rd_bytes']) 127 self.assertEqual(self.total_wr_bytes, stats['wr_bytes']) 128 self.assertEqual(self.total_rd_ops, stats['rd_operations']) 129 self.assertEqual(self.total_wr_ops, stats['wr_operations']) 130 self.assertEqual(self.total_flush_ops, stats['flush_operations']) 131 self.assertEqual(self.wr_highest_offset, stats['wr_highest_offset']) 132 self.assertEqual(self.failed_rd_ops, stats['failed_rd_operations']) 133 self.assertEqual(self.failed_wr_ops, stats['failed_wr_operations']) 134 self.assertEqual(self.invalid_rd_ops, stats['invalid_rd_operations']) 135 self.assertEqual(self.invalid_wr_ops, stats['invalid_wr_operations']) 136 self.assertEqual(self.account_invalid, stats['account_invalid']) 137 self.assertEqual(self.account_failed, stats['account_failed']) 138 self.assertEqual(self.total_wr_merged, stats['wr_merged']) 139 140 # Check that there's exactly one interval with the length we defined 141 self.assertEqual(1, len(stats['timed_stats'])) 142 timed_stats = stats['timed_stats'][0] 143 self.assertEqual(interval_length, timed_stats['interval_length']) 144 145 total_rd_latency = self.accounted_latency(read = True) 146 if (total_rd_latency != 0): 147 self.assertEqual(total_rd_latency, stats['rd_total_time_ns']) 148 self.assertEqual(op_latency, timed_stats['min_rd_latency_ns']) 149 self.assertEqual(op_latency, timed_stats['max_rd_latency_ns']) 150 self.assertEqual(op_latency, timed_stats['avg_rd_latency_ns']) 151 self.assertLess(0, timed_stats['avg_rd_queue_depth']) 152 else: 153 self.assertEqual(0, stats['rd_total_time_ns']) 154 self.assertEqual(0, timed_stats['min_rd_latency_ns']) 155 self.assertEqual(0, timed_stats['max_rd_latency_ns']) 156 self.assertEqual(0, timed_stats['avg_rd_latency_ns']) 157 self.assertEqual(0, timed_stats['avg_rd_queue_depth']) 158 159 # min read latency <= avg read latency <= max read latency 160 self.assertLessEqual(timed_stats['min_rd_latency_ns'], 161 timed_stats['avg_rd_latency_ns']) 162 self.assertLessEqual(timed_stats['avg_rd_latency_ns'], 163 timed_stats['max_rd_latency_ns']) 164 165 total_wr_latency = self.accounted_latency(write = True) 166 if (total_wr_latency != 0): 167 self.assertEqual(total_wr_latency, stats['wr_total_time_ns']) 168 self.assertEqual(op_latency, timed_stats['min_wr_latency_ns']) 169 self.assertEqual(op_latency, timed_stats['max_wr_latency_ns']) 170 self.assertEqual(op_latency, timed_stats['avg_wr_latency_ns']) 171 self.assertLess(0, timed_stats['avg_wr_queue_depth']) 172 else: 173 self.assertEqual(0, stats['wr_total_time_ns']) 174 self.assertEqual(0, timed_stats['min_wr_latency_ns']) 175 self.assertEqual(0, timed_stats['max_wr_latency_ns']) 176 self.assertEqual(0, timed_stats['avg_wr_latency_ns']) 177 self.assertEqual(0, timed_stats['avg_wr_queue_depth']) 178 179 # min write latency <= avg write latency <= max write latency 180 self.assertLessEqual(timed_stats['min_wr_latency_ns'], 181 timed_stats['avg_wr_latency_ns']) 182 self.assertLessEqual(timed_stats['avg_wr_latency_ns'], 183 timed_stats['max_wr_latency_ns']) 184 185 total_flush_latency = self.accounted_latency(flush = True) 186 if (total_flush_latency != 0): 187 self.assertEqual(total_flush_latency, stats['flush_total_time_ns']) 188 self.assertEqual(op_latency, timed_stats['min_flush_latency_ns']) 189 self.assertEqual(op_latency, timed_stats['max_flush_latency_ns']) 190 self.assertEqual(op_latency, timed_stats['avg_flush_latency_ns']) 191 else: 192 self.assertEqual(0, stats['flush_total_time_ns']) 193 self.assertEqual(0, timed_stats['min_flush_latency_ns']) 194 self.assertEqual(0, timed_stats['max_flush_latency_ns']) 195 self.assertEqual(0, timed_stats['avg_flush_latency_ns']) 196 197 # min flush latency <= avg flush latency <= max flush latency 198 self.assertLessEqual(timed_stats['min_flush_latency_ns'], 199 timed_stats['avg_flush_latency_ns']) 200 self.assertLessEqual(timed_stats['avg_flush_latency_ns'], 201 timed_stats['max_flush_latency_ns']) 202 203 # idle_time_ns must be > 0 if we have performed any operation 204 if (self.accounted_ops(read = True, write = True, flush = True) != 0): 205 self.assertLess(0, stats['idle_time_ns']) 206 else: 207 self.assertFalse('idle_time_ns' in stats) 208 209 # This test does not alter these, so they must be all 0 210 self.assertEqual(0, stats['rd_merged']) 211 self.assertEqual(0, stats['failed_flush_operations']) 212 self.assertEqual(0, stats['invalid_flush_operations']) 213 214 def do_test_stats(self, rd_size = 0, rd_ops = 0, wr_size = 0, wr_ops = 0, 215 flush_ops = 0, invalid_rd_ops = 0, invalid_wr_ops = 0, 216 failed_rd_ops = 0, failed_wr_ops = 0, wr_merged = 0): 217 # The 'ops' list will contain all the requested I/O operations 218 ops = [] 219 for i in range(rd_ops): 220 ops.append("aio_read %d %d" % (i * rd_size, rd_size)) 221 222 for i in range(wr_ops): 223 ops.append("aio_write %d %d" % (i * wr_size, wr_size)) 224 225 for i in range(flush_ops): 226 ops.append("aio_flush") 227 228 highest_offset = wr_ops * wr_size 229 230 for i in range(invalid_rd_ops): 231 ops.append("aio_read -i 0 512") 232 233 for i in range(invalid_wr_ops): 234 ops.append("aio_write -i 0 512") 235 236 for i in range(failed_rd_ops): 237 ops.append("aio_read %d 512" % bad_offset) 238 239 for i in range(failed_wr_ops): 240 ops.append("aio_write %d 512" % bad_offset) 241 242 # We need an extra aio_flush to settle all outstanding AIO 243 # operations before we can advance the virtual clock, so that 244 # the last access happens before clock_step and idle_time_ns 245 # will be greater than 0 246 extra_flush = 0 247 if rd_ops + wr_ops + invalid_rd_ops + invalid_wr_ops + \ 248 failed_rd_ops + failed_wr_ops > 0: 249 extra_flush = 1 250 251 if extra_flush > 0: 252 ops.append("aio_flush") 253 254 if failed_wr_ops > 0: 255 highest_offset = max(highest_offset, bad_offset + 512) 256 257 # Now perform all operations 258 for op in ops: 259 self.vm.hmp_qemu_io("drive0", op) 260 261 # Update the expected totals 262 self.total_rd_bytes += rd_ops * rd_size 263 self.total_rd_ops += rd_ops 264 self.total_wr_bytes += wr_ops * wr_size 265 self.total_wr_ops += wr_ops 266 self.total_wr_merged += wr_merged 267 self.total_flush_ops += flush_ops + extra_flush 268 self.invalid_rd_ops += invalid_rd_ops 269 self.invalid_wr_ops += invalid_wr_ops 270 self.failed_rd_ops += failed_rd_ops 271 self.failed_wr_ops += failed_wr_ops 272 273 self.wr_highest_offset = max(self.wr_highest_offset, highest_offset) 274 275 # Advance the clock so idle_time_ns has a meaningful value 276 self.vm.qtest("clock_step %d" % nsec_per_sec) 277 278 # And check that the actual statistics match the expected ones 279 self.check_values() 280 281 def test_read_only(self): 282 test_values = [[512, 1], 283 [65536, 1], 284 [512, 12], 285 [65536, 12]] 286 for i in test_values: 287 self.do_test_stats(rd_size = i[0], rd_ops = i[1]) 288 289 def test_write_only(self): 290 test_values = [[512, 1], 291 [65536, 1], 292 [512, 12], 293 [65536, 12]] 294 for i in test_values: 295 self.do_test_stats(wr_size = i[0], wr_ops = i[1]) 296 297 def test_invalid(self): 298 self.do_test_stats(invalid_rd_ops = 7) 299 self.do_test_stats(invalid_wr_ops = 3) 300 self.do_test_stats(invalid_rd_ops = 4, invalid_wr_ops = 5) 301 302 def test_failed(self): 303 self.do_test_stats(failed_rd_ops = 8) 304 self.do_test_stats(failed_wr_ops = 6) 305 self.do_test_stats(failed_rd_ops = 5, failed_wr_ops = 12) 306 307 def test_flush(self): 308 self.do_test_stats(flush_ops = 8) 309 310 def test_all(self): 311 # rd_size, rd_ops, wr_size, wr_ops, flush_ops 312 # invalid_rd_ops, invalid_wr_ops, 313 # failed_rd_ops, failed_wr_ops 314 # wr_merged 315 test_values = [[512, 1, 512, 1, 1, 4, 7, 5, 2, 0], 316 [65536, 1, 2048, 12, 7, 7, 5, 2, 5, 0], 317 [32768, 9, 8192, 1, 4, 3, 2, 4, 6, 0], 318 [16384, 11, 3584, 16, 9, 8, 6, 7, 3, 0]] 319 for i in test_values: 320 self.do_test_stats(*i) 321 322 def test_no_op(self): 323 # All values must be sane before doing any I/O 324 self.check_values() 325 326 327class BlockDeviceStatsTestAccountInvalid(BlockDeviceStatsTestCase): 328 account_invalid = True 329 account_failed = False 330 331class BlockDeviceStatsTestAccountFailed(BlockDeviceStatsTestCase): 332 account_invalid = False 333 account_failed = True 334 335class BlockDeviceStatsTestAccountBoth(BlockDeviceStatsTestCase): 336 account_invalid = True 337 account_failed = True 338 339class BlockDeviceStatsTestCoroutine(BlockDeviceStatsTestCase): 340 test_img = "null-co://" 341 342if __name__ == '__main__': 343 iotests.main(supported_fmts=["raw"]) 344