xref: /openbmc/qemu/tests/migration/guestperf/plot.py (revision 438c78da)
1from __future__ import print_function
2#
3# Migration test graph plotting
4#
5# Copyright (c) 2016 Red Hat, Inc.
6#
7# This library is free software; you can redistribute it and/or
8# modify it under the terms of the GNU Lesser General Public
9# License as published by the Free Software Foundation; either
10# version 2 of the License, or (at your option) any later version.
11#
12# This library is distributed in the hope that it will be useful,
13# but WITHOUT ANY WARRANTY; without even the implied warranty of
14# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15# Lesser General Public License for more details.
16#
17# You should have received a copy of the GNU Lesser General Public
18# License along with this library; if not, see <http://www.gnu.org/licenses/>.
19#
20
21import sys
22
23
24class Plot(object):
25
26    # Generated using
27    # http://tools.medialab.sciences-po.fr/iwanthue/
28    COLORS = ["#CD54D0",
29              "#79D94C",
30              "#7470CD",
31              "#D2D251",
32              "#863D79",
33              "#76DDA6",
34              "#D4467B",
35              "#61923D",
36              "#CB9CCA",
37              "#D98F36",
38              "#8CC8DA",
39              "#CE4831",
40              "#5E7693",
41              "#9B803F",
42              "#412F4C",
43              "#CECBA6",
44              "#6D3229",
45              "#598B73",
46              "#C8827C",
47              "#394427"]
48
49    def __init__(self,
50                 reports,
51                 migration_iters,
52                 total_guest_cpu,
53                 split_guest_cpu,
54                 qemu_cpu,
55                 vcpu_cpu):
56
57        self._reports = reports
58        self._migration_iters = migration_iters
59        self._total_guest_cpu = total_guest_cpu
60        self._split_guest_cpu = split_guest_cpu
61        self._qemu_cpu = qemu_cpu
62        self._vcpu_cpu = vcpu_cpu
63        self._color_idx = 0
64
65    def _next_color(self):
66        color = self.COLORS[self._color_idx]
67        self._color_idx += 1
68        if self._color_idx >= len(self.COLORS):
69            self._color_idx = 0
70        return color
71
72    def _get_progress_label(self, progress):
73        if progress:
74            return "\n\n" + "\n".join(
75                ["Status: %s" % progress._status,
76                 "Iteration: %d" % progress._ram._iterations,
77                 "Throttle: %02d%%" % progress._throttle_pcent,
78                 "Dirty rate: %dMB/s" % (progress._ram._dirty_rate_pps * 4 / 1024.0)])
79        else:
80            return "\n\n" + "\n".join(
81                ["Status: %s" % "none",
82                 "Iteration: %d" % 0])
83
84    def _find_start_time(self, report):
85        startqemu = report._qemu_timings._records[0]._timestamp
86        startguest = report._guest_timings._records[0]._timestamp
87        if startqemu < startguest:
88            return startqemu
89        else:
90            return stasrtguest
91
92    def _get_guest_max_value(self, report):
93        maxvalue = 0
94        for record in report._guest_timings._records:
95            if record._value > maxvalue:
96                maxvalue = record._value
97        return maxvalue
98
99    def _get_qemu_max_value(self, report):
100        maxvalue = 0
101        oldvalue = None
102        oldtime = None
103        for record in report._qemu_timings._records:
104            if oldvalue is not None:
105                cpudelta = (record._value - oldvalue) / 1000.0
106                timedelta = record._timestamp - oldtime
107                if timedelta == 0:
108                    continue
109                util = cpudelta / timedelta * 100.0
110            else:
111                util = 0
112            oldvalue = record._value
113            oldtime = record._timestamp
114
115            if util > maxvalue:
116                maxvalue = util
117        return maxvalue
118
119    def _get_total_guest_cpu_graph(self, report, starttime):
120        xaxis = []
121        yaxis = []
122        labels = []
123        progress_idx = -1
124        for record in report._guest_timings._records:
125            while ((progress_idx + 1) < len(report._progress_history) and
126                   report._progress_history[progress_idx + 1]._now < record._timestamp):
127                progress_idx = progress_idx + 1
128
129            if progress_idx >= 0:
130                progress = report._progress_history[progress_idx]
131            else:
132                progress = None
133
134            xaxis.append(record._timestamp - starttime)
135            yaxis.append(record._value)
136            labels.append(self._get_progress_label(progress))
137
138        from plotly import graph_objs as go
139        return go.Scatter(x=xaxis,
140                          y=yaxis,
141                          name="Guest PIDs: %s" % report._scenario._name,
142                          mode='lines',
143                          line={
144                              "dash": "solid",
145                              "color": self._next_color(),
146                              "shape": "linear",
147                              "width": 1
148                          },
149                          text=labels)
150
151    def _get_split_guest_cpu_graphs(self, report, starttime):
152        threads = {}
153        for record in report._guest_timings._records:
154            if record._tid in threads:
155                continue
156            threads[record._tid] = {
157                "xaxis": [],
158                "yaxis": [],
159                "labels": [],
160            }
161
162        progress_idx = -1
163        for record in report._guest_timings._records:
164            while ((progress_idx + 1) < len(report._progress_history) and
165                   report._progress_history[progress_idx + 1]._now < record._timestamp):
166                progress_idx = progress_idx + 1
167
168            if progress_idx >= 0:
169                progress = report._progress_history[progress_idx]
170            else:
171                progress = None
172
173            threads[record._tid]["xaxis"].append(record._timestamp - starttime)
174            threads[record._tid]["yaxis"].append(record._value)
175            threads[record._tid]["labels"].append(self._get_progress_label(progress))
176
177
178        graphs = []
179        from plotly import graph_objs as go
180        for tid in threads.keys():
181            graphs.append(
182                go.Scatter(x=threads[tid]["xaxis"],
183                           y=threads[tid]["yaxis"],
184                           name="PID %s: %s" % (tid, report._scenario._name),
185                           mode="lines",
186                           line={
187                               "dash": "solid",
188                               "color": self._next_color(),
189                               "shape": "linear",
190                               "width": 1
191                           },
192                           text=threads[tid]["labels"]))
193        return graphs
194
195    def _get_migration_iters_graph(self, report, starttime):
196        xaxis = []
197        yaxis = []
198        labels = []
199        for progress in report._progress_history:
200            xaxis.append(progress._now - starttime)
201            yaxis.append(0)
202            labels.append(self._get_progress_label(progress))
203
204        from plotly import graph_objs as go
205        return go.Scatter(x=xaxis,
206                          y=yaxis,
207                          text=labels,
208                          name="Migration iterations",
209                          mode="markers",
210                          marker={
211                              "color": self._next_color(),
212                              "symbol": "star",
213                              "size": 5
214                          })
215
216    def _get_qemu_cpu_graph(self, report, starttime):
217        xaxis = []
218        yaxis = []
219        labels = []
220        progress_idx = -1
221
222        first = report._qemu_timings._records[0]
223        abstimestamps = [first._timestamp]
224        absvalues = [first._value]
225
226        for record in report._qemu_timings._records[1:]:
227            while ((progress_idx + 1) < len(report._progress_history) and
228                   report._progress_history[progress_idx + 1]._now < record._timestamp):
229                progress_idx = progress_idx + 1
230
231            if progress_idx >= 0:
232                progress = report._progress_history[progress_idx]
233            else:
234                progress = None
235
236            oldvalue = absvalues[-1]
237            oldtime = abstimestamps[-1]
238
239            cpudelta = (record._value - oldvalue) / 1000.0
240            timedelta = record._timestamp - oldtime
241            if timedelta == 0:
242                continue
243            util = cpudelta / timedelta * 100.0
244
245            abstimestamps.append(record._timestamp)
246            absvalues.append(record._value)
247
248            xaxis.append(record._timestamp - starttime)
249            yaxis.append(util)
250            labels.append(self._get_progress_label(progress))
251
252        from plotly import graph_objs as go
253        return go.Scatter(x=xaxis,
254                          y=yaxis,
255                          yaxis="y2",
256                          name="QEMU: %s" % report._scenario._name,
257                          mode='lines',
258                          line={
259                              "dash": "solid",
260                              "color": self._next_color(),
261                              "shape": "linear",
262                              "width": 1
263                          },
264                          text=labels)
265
266    def _get_vcpu_cpu_graphs(self, report, starttime):
267        threads = {}
268        for record in report._vcpu_timings._records:
269            if record._tid in threads:
270                continue
271            threads[record._tid] = {
272                "xaxis": [],
273                "yaxis": [],
274                "labels": [],
275                "absvalue": [record._value],
276                "abstime": [record._timestamp],
277            }
278
279        progress_idx = -1
280        for record in report._vcpu_timings._records:
281            while ((progress_idx + 1) < len(report._progress_history) and
282                   report._progress_history[progress_idx + 1]._now < record._timestamp):
283                progress_idx = progress_idx + 1
284
285            if progress_idx >= 0:
286                progress = report._progress_history[progress_idx]
287            else:
288                progress = None
289
290            oldvalue = threads[record._tid]["absvalue"][-1]
291            oldtime = threads[record._tid]["abstime"][-1]
292
293            cpudelta = (record._value - oldvalue) / 1000.0
294            timedelta = record._timestamp - oldtime
295            if timedelta == 0:
296                continue
297            util = cpudelta / timedelta * 100.0
298            if util > 100:
299                util = 100
300
301            threads[record._tid]["absvalue"].append(record._value)
302            threads[record._tid]["abstime"].append(record._timestamp)
303
304            threads[record._tid]["xaxis"].append(record._timestamp - starttime)
305            threads[record._tid]["yaxis"].append(util)
306            threads[record._tid]["labels"].append(self._get_progress_label(progress))
307
308
309        graphs = []
310        from plotly import graph_objs as go
311        for tid in threads.keys():
312            graphs.append(
313                go.Scatter(x=threads[tid]["xaxis"],
314                           y=threads[tid]["yaxis"],
315                           yaxis="y2",
316                           name="VCPU %s: %s" % (tid, report._scenario._name),
317                           mode="lines",
318                           line={
319                               "dash": "solid",
320                               "color": self._next_color(),
321                               "shape": "linear",
322                               "width": 1
323                           },
324                           text=threads[tid]["labels"]))
325        return graphs
326
327    def _generate_chart_report(self, report):
328        graphs = []
329        starttime = self._find_start_time(report)
330        if self._total_guest_cpu:
331            graphs.append(self._get_total_guest_cpu_graph(report, starttime))
332        if self._split_guest_cpu:
333            graphs.extend(self._get_split_guest_cpu_graphs(report, starttime))
334        if self._qemu_cpu:
335            graphs.append(self._get_qemu_cpu_graph(report, starttime))
336        if self._vcpu_cpu:
337            graphs.extend(self._get_vcpu_cpu_graphs(report, starttime))
338        if self._migration_iters:
339            graphs.append(self._get_migration_iters_graph(report, starttime))
340        return graphs
341
342    def _generate_annotation(self, starttime, progress):
343        return {
344            "text": progress._status,
345            "x": progress._now - starttime,
346            "y": 10,
347        }
348
349    def _generate_annotations(self, report):
350        starttime = self._find_start_time(report)
351        annotations = {}
352        started = False
353        for progress in report._progress_history:
354            if progress._status == "setup":
355                continue
356            if progress._status not in annotations:
357                annotations[progress._status] = self._generate_annotation(starttime, progress)
358
359        return annotations.values()
360
361    def _generate_chart(self):
362        from plotly.offline import plot
363        from plotly import graph_objs as go
364
365        graphs = []
366        yaxismax = 0
367        yaxismax2 = 0
368        for report in self._reports:
369            graphs.extend(self._generate_chart_report(report))
370
371            maxvalue = self._get_guest_max_value(report)
372            if maxvalue > yaxismax:
373                yaxismax = maxvalue
374
375            maxvalue = self._get_qemu_max_value(report)
376            if maxvalue > yaxismax2:
377                yaxismax2 = maxvalue
378
379        yaxismax += 100
380        if not self._qemu_cpu:
381            yaxismax2 = 110
382        yaxismax2 += 10
383
384        annotations = []
385        if self._migration_iters:
386            for report in self._reports:
387                annotations.extend(self._generate_annotations(report))
388
389        layout = go.Layout(title="Migration comparison",
390                           xaxis={
391                               "title": "Wallclock time (secs)",
392                               "showgrid": False,
393                           },
394                           yaxis={
395                               "title": "Memory update speed (ms/GB)",
396                               "showgrid": False,
397                               "range": [0, yaxismax],
398                           },
399                           yaxis2={
400                               "title": "Hostutilization (%)",
401                               "overlaying": "y",
402                               "side": "right",
403                               "range": [0, yaxismax2],
404                               "showgrid": False,
405                           },
406                           annotations=annotations)
407
408        figure = go.Figure(data=graphs, layout=layout)
409
410        return plot(figure,
411                    show_link=False,
412                    include_plotlyjs=False,
413                    output_type="div")
414
415
416    def _generate_report(self):
417        pieces = []
418        for report in self._reports:
419            pieces.append("""
420<h3>Report %s</h3>
421<table>
422""" % report._scenario._name)
423
424            pieces.append("""
425  <tr class="subhead">
426    <th colspan="2">Test config</th>
427  </tr>
428  <tr>
429    <th>Emulator:</th>
430    <td>%s</td>
431  </tr>
432  <tr>
433    <th>Kernel:</th>
434    <td>%s</td>
435  </tr>
436  <tr>
437    <th>Ramdisk:</th>
438    <td>%s</td>
439  </tr>
440  <tr>
441    <th>Transport:</th>
442    <td>%s</td>
443  </tr>
444  <tr>
445    <th>Host:</th>
446    <td>%s</td>
447  </tr>
448""" % (report._binary, report._kernel,
449       report._initrd, report._transport, report._dst_host))
450
451            hardware = report._hardware
452            pieces.append("""
453  <tr class="subhead">
454    <th colspan="2">Hardware config</th>
455  </tr>
456  <tr>
457    <th>CPUs:</th>
458    <td>%d</td>
459  </tr>
460  <tr>
461    <th>RAM:</th>
462    <td>%d GB</td>
463  </tr>
464  <tr>
465    <th>Source CPU bind:</th>
466    <td>%s</td>
467  </tr>
468  <tr>
469    <th>Source RAM bind:</th>
470    <td>%s</td>
471  </tr>
472  <tr>
473    <th>Dest CPU bind:</th>
474    <td>%s</td>
475  </tr>
476  <tr>
477    <th>Dest RAM bind:</th>
478    <td>%s</td>
479  </tr>
480  <tr>
481    <th>Preallocate RAM:</th>
482    <td>%s</td>
483  </tr>
484  <tr>
485    <th>Locked RAM:</th>
486    <td>%s</td>
487  </tr>
488  <tr>
489    <th>Huge pages:</th>
490    <td>%s</td>
491  </tr>
492""" % (hardware._cpus, hardware._mem,
493       ",".join(hardware._src_cpu_bind),
494       ",".join(hardware._src_mem_bind),
495       ",".join(hardware._dst_cpu_bind),
496       ",".join(hardware._dst_mem_bind),
497       "yes" if hardware._prealloc_pages else "no",
498       "yes" if hardware._locked_pages else "no",
499       "yes" if hardware._huge_pages else "no"))
500
501            scenario = report._scenario
502            pieces.append("""
503  <tr class="subhead">
504    <th colspan="2">Scenario config</th>
505  </tr>
506  <tr>
507    <th>Max downtime:</th>
508    <td>%d milli-sec</td>
509  </tr>
510  <tr>
511    <th>Max bandwidth:</th>
512    <td>%d MB/sec</td>
513  </tr>
514  <tr>
515    <th>Max iters:</th>
516    <td>%d</td>
517  </tr>
518  <tr>
519    <th>Max time:</th>
520    <td>%d secs</td>
521  </tr>
522  <tr>
523    <th>Pause:</th>
524    <td>%s</td>
525  </tr>
526  <tr>
527    <th>Pause iters:</th>
528    <td>%d</td>
529  </tr>
530  <tr>
531    <th>Post-copy:</th>
532    <td>%s</td>
533  </tr>
534  <tr>
535    <th>Post-copy iters:</th>
536    <td>%d</td>
537  </tr>
538  <tr>
539    <th>Auto-converge:</th>
540    <td>%s</td>
541  </tr>
542  <tr>
543    <th>Auto-converge iters:</th>
544    <td>%d</td>
545  </tr>
546  <tr>
547    <th>MT compression:</th>
548    <td>%s</td>
549  </tr>
550  <tr>
551    <th>MT compression threads:</th>
552    <td>%d</td>
553  </tr>
554  <tr>
555    <th>XBZRLE compression:</th>
556    <td>%s</td>
557  </tr>
558  <tr>
559    <th>XBZRLE compression cache:</th>
560    <td>%d%% of RAM</td>
561  </tr>
562""" % (scenario._downtime, scenario._bandwidth,
563       scenario._max_iters, scenario._max_time,
564       "yes" if scenario._pause else "no", scenario._pause_iters,
565       "yes" if scenario._post_copy else "no", scenario._post_copy_iters,
566       "yes" if scenario._auto_converge else "no", scenario._auto_converge_step,
567       "yes" if scenario._compression_mt else "no", scenario._compression_mt_threads,
568       "yes" if scenario._compression_xbzrle else "no", scenario._compression_xbzrle_cache))
569
570            pieces.append("""
571</table>
572""")
573
574        return "\n".join(pieces)
575
576    def _generate_style(self):
577        return """
578#report table tr th {
579    text-align: right;
580}
581#report table tr td {
582    text-align: left;
583}
584#report table tr.subhead th {
585    background: rgb(192, 192, 192);
586    text-align: center;
587}
588
589"""
590
591    def generate_html(self, fh):
592        print("""<html>
593  <head>
594    <script type="text/javascript" src="plotly.min.js">
595    </script>
596    <style type="text/css">
597%s
598    </style>
599    <title>Migration report</title>
600  </head>
601  <body>
602    <h1>Migration report</h1>
603    <h2>Chart summary</h2>
604    <div id="chart">
605""" % self._generate_style(), file=fh)
606        print(self._generate_chart(), file=fh)
607        print("""
608    </div>
609    <h2>Report details</h2>
610    <div id="report">
611""", file=fh)
612        print(self._generate_report(), file=fh)
613        print("""
614    </div>
615  </body>
616</html>
617""", file=fh)
618
619    def generate(self, filename):
620        if filename is None:
621            self.generate_html(sys.stdout)
622        else:
623            with open(filename, "w") as fh:
624                self.generate_html(fh)
625