xref: /openbmc/linux/tools/perf/pmu-events/jevents.py (revision 078b39c9)
1#!/usr/bin/env python3
2# SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
3"""Convert directories of JSON events to C code."""
4import argparse
5import csv
6from functools import lru_cache
7import json
8import metric
9import os
10import sys
11from typing import (Callable, Dict, Optional, Sequence, Set, Tuple)
12import collections
13
14# Global command line arguments.
15_args = None
16# List of regular event tables.
17_event_tables = []
18# List of event tables generated from "/sys" directories.
19_sys_event_tables = []
20# List of regular metric tables.
21_metric_tables = []
22# List of metric tables generated from "/sys" directories.
23_sys_metric_tables = []
24# Mapping between sys event table names and sys metric table names.
25_sys_event_table_to_metric_table_mapping = {}
26# Map from an event name to an architecture standard
27# JsonEvent. Architecture standard events are in json files in the top
28# f'{_args.starting_dir}/{_args.arch}' directory.
29_arch_std_events = {}
30# Events to write out when the table is closed
31_pending_events = []
32# Name of events table to be written out
33_pending_events_tblname = None
34# Metrics to write out when the table is closed
35_pending_metrics = []
36# Name of metrics table to be written out
37_pending_metrics_tblname = None
38# Global BigCString shared by all structures.
39_bcs = None
40# Map from the name of a metric group to a description of the group.
41_metricgroups = {}
42# Order specific JsonEvent attributes will be visited.
43_json_event_attributes = [
44    # cmp_sevent related attributes.
45    'name', 'pmu', 'topic', 'desc',
46    # Seems useful, put it early.
47    'event',
48    # Short things in alphabetical order.
49    'compat', 'deprecated', 'perpkg', 'unit',
50    # Longer things (the last won't be iterated over during decompress).
51    'long_desc'
52]
53
54# Attributes that are in pmu_metric rather than pmu_event.
55_json_metric_attributes = [
56    'pmu', 'metric_name', 'metric_group', 'metric_expr', 'metric_threshold',
57    'desc', 'long_desc', 'unit', 'compat', 'metricgroup_no_group',
58    'default_metricgroup_name', 'aggr_mode', 'event_grouping'
59]
60# Attributes that are bools or enum int values, encoded as '0', '1',...
61_json_enum_attributes = ['aggr_mode', 'deprecated', 'event_grouping', 'perpkg']
62
63def removesuffix(s: str, suffix: str) -> str:
64  """Remove the suffix from a string
65
66  The removesuffix function is added to str in Python 3.9. We aim for 3.6
67  compatibility and so provide our own function here.
68  """
69  return s[0:-len(suffix)] if s.endswith(suffix) else s
70
71
72def file_name_to_table_name(prefix: str, parents: Sequence[str],
73                            dirname: str) -> str:
74  """Generate a C table name from directory names."""
75  tblname = prefix
76  for p in parents:
77    tblname += '_' + p
78  tblname += '_' + dirname
79  return tblname.replace('-', '_')
80
81
82def c_len(s: str) -> int:
83  """Return the length of s a C string
84
85  This doesn't handle all escape characters properly. It first assumes
86  all \ are for escaping, it then adjusts as it will have over counted
87  \\. The code uses \000 rather than \0 as a terminator as an adjacent
88  number would be folded into a string of \0 (ie. "\0" + "5" doesn't
89  equal a terminator followed by the number 5 but the escape of
90  \05). The code adjusts for \000 but not properly for all octal, hex
91  or unicode values.
92  """
93  try:
94    utf = s.encode(encoding='utf-8',errors='strict')
95  except:
96    print(f'broken string {s}')
97    raise
98  return len(utf) - utf.count(b'\\') + utf.count(b'\\\\') - (utf.count(b'\\000') * 2)
99
100class BigCString:
101  """A class to hold many strings concatenated together.
102
103  Generating a large number of stand-alone C strings creates a large
104  number of relocations in position independent code. The BigCString
105  is a helper for this case. It builds a single string which within it
106  are all the other C strings (to avoid memory issues the string
107  itself is held as a list of strings). The offsets within the big
108  string are recorded and when stored to disk these don't need
109  relocation. To reduce the size of the string further, identical
110  strings are merged. If a longer string ends-with the same value as a
111  shorter string, these entries are also merged.
112  """
113  strings: Set[str]
114  big_string: Sequence[str]
115  offsets: Dict[str, int]
116
117  def __init__(self):
118    self.strings = set()
119
120  def add(self, s: str) -> None:
121    """Called to add to the big string."""
122    self.strings.add(s)
123
124  def compute(self) -> None:
125    """Called once all strings are added to compute the string and offsets."""
126
127    folded_strings = {}
128    # Determine if two strings can be folded, ie. let 1 string use the
129    # end of another. First reverse all strings and sort them.
130    sorted_reversed_strings = sorted([x[::-1] for x in self.strings])
131
132    # Strings 'xyz' and 'yz' will now be [ 'zy', 'zyx' ]. Scan forward
133    # for each string to see if there is a better candidate to fold it
134    # into, in the example rather than using 'yz' we can use'xyz' at
135    # an offset of 1. We record which string can be folded into which
136    # in folded_strings, we don't need to record the offset as it is
137    # trivially computed from the string lengths.
138    for pos,s in enumerate(sorted_reversed_strings):
139      best_pos = pos
140      for check_pos in range(pos + 1, len(sorted_reversed_strings)):
141        if sorted_reversed_strings[check_pos].startswith(s):
142          best_pos = check_pos
143        else:
144          break
145      if pos != best_pos:
146        folded_strings[s[::-1]] = sorted_reversed_strings[best_pos][::-1]
147
148    # Compute reverse mappings for debugging.
149    fold_into_strings = collections.defaultdict(set)
150    for key, val in folded_strings.items():
151      if key != val:
152        fold_into_strings[val].add(key)
153
154    # big_string_offset is the current location within the C string
155    # being appended to - comments, etc. don't count. big_string is
156    # the string contents represented as a list. Strings are immutable
157    # in Python and so appending to one causes memory issues, while
158    # lists are mutable.
159    big_string_offset = 0
160    self.big_string = []
161    self.offsets = {}
162
163    # Emit all strings that aren't folded in a sorted manner.
164    for s in sorted(self.strings):
165      if s not in folded_strings:
166        self.offsets[s] = big_string_offset
167        self.big_string.append(f'/* offset={big_string_offset} */ "')
168        self.big_string.append(s)
169        self.big_string.append('"')
170        if s in fold_into_strings:
171          self.big_string.append(' /* also: ' + ', '.join(fold_into_strings[s]) + ' */')
172        self.big_string.append('\n')
173        big_string_offset += c_len(s)
174        continue
175
176    # Compute the offsets of the folded strings.
177    for s in folded_strings.keys():
178      assert s not in self.offsets
179      folded_s = folded_strings[s]
180      self.offsets[s] = self.offsets[folded_s] + c_len(folded_s) - c_len(s)
181
182_bcs = BigCString()
183
184class JsonEvent:
185  """Representation of an event loaded from a json file dictionary."""
186
187  def __init__(self, jd: dict):
188    """Constructor passed the dictionary of parsed json values."""
189
190    def llx(x: int) -> str:
191      """Convert an int to a string similar to a printf modifier of %#llx."""
192      return '0' if x == 0 else hex(x)
193
194    def fixdesc(s: str) -> str:
195      """Fix formatting issue for the desc string."""
196      if s is None:
197        return None
198      return removesuffix(removesuffix(removesuffix(s, '.  '),
199                                       '. '), '.').replace('\n', '\\n').replace(
200                                           '\"', '\\"').replace('\r', '\\r')
201
202    def convert_aggr_mode(aggr_mode: str) -> Optional[str]:
203      """Returns the aggr_mode_class enum value associated with the JSON string."""
204      if not aggr_mode:
205        return None
206      aggr_mode_to_enum = {
207          'PerChip': '1',
208          'PerCore': '2',
209      }
210      return aggr_mode_to_enum[aggr_mode]
211
212    def convert_metric_constraint(metric_constraint: str) -> Optional[str]:
213      """Returns the metric_event_groups enum value associated with the JSON string."""
214      if not metric_constraint:
215        return None
216      metric_constraint_to_enum = {
217          'NO_GROUP_EVENTS': '1',
218          'NO_GROUP_EVENTS_NMI': '2',
219          'NO_NMI_WATCHDOG': '2',
220          'NO_GROUP_EVENTS_SMT': '3',
221      }
222      return metric_constraint_to_enum[metric_constraint]
223
224    def lookup_msr(num: str) -> Optional[str]:
225      """Converts the msr number, or first in a list to the appropriate event field."""
226      if not num:
227        return None
228      msrmap = {
229          0x3F6: 'ldlat=',
230          0x1A6: 'offcore_rsp=',
231          0x1A7: 'offcore_rsp=',
232          0x3F7: 'frontend=',
233      }
234      return msrmap[int(num.split(',', 1)[0], 0)]
235
236    def real_event(name: str, event: str) -> Optional[str]:
237      """Convert well known event names to an event string otherwise use the event argument."""
238      fixed = {
239          'inst_retired.any': 'event=0xc0,period=2000003',
240          'inst_retired.any_p': 'event=0xc0,period=2000003',
241          'cpu_clk_unhalted.ref': 'event=0x0,umask=0x03,period=2000003',
242          'cpu_clk_unhalted.thread': 'event=0x3c,period=2000003',
243          'cpu_clk_unhalted.core': 'event=0x3c,period=2000003',
244          'cpu_clk_unhalted.thread_any': 'event=0x3c,any=1,period=2000003',
245      }
246      if not name:
247        return None
248      if name.lower() in fixed:
249        return fixed[name.lower()]
250      return event
251
252    def unit_to_pmu(unit: str) -> Optional[str]:
253      """Convert a JSON Unit to Linux PMU name."""
254      if not unit:
255        return None
256      # Comment brought over from jevents.c:
257      # it's not realistic to keep adding these, we need something more scalable ...
258      table = {
259          'CBO': 'uncore_cbox',
260          'QPI LL': 'uncore_qpi',
261          'SBO': 'uncore_sbox',
262          'iMPH-U': 'uncore_arb',
263          'CPU-M-CF': 'cpum_cf',
264          'CPU-M-SF': 'cpum_sf',
265          'PAI-CRYPTO' : 'pai_crypto',
266          'PAI-EXT' : 'pai_ext',
267          'UPI LL': 'uncore_upi',
268          'hisi_sicl,cpa': 'hisi_sicl,cpa',
269          'hisi_sccl,ddrc': 'hisi_sccl,ddrc',
270          'hisi_sccl,hha': 'hisi_sccl,hha',
271          'hisi_sccl,l3c': 'hisi_sccl,l3c',
272          'imx8_ddr': 'imx8_ddr',
273          'L3PMC': 'amd_l3',
274          'DFPMC': 'amd_df',
275          'cpu_core': 'cpu_core',
276          'cpu_atom': 'cpu_atom',
277      }
278      return table[unit] if unit in table else f'uncore_{unit.lower()}'
279
280    eventcode = 0
281    if 'EventCode' in jd:
282      eventcode = int(jd['EventCode'].split(',', 1)[0], 0)
283    if 'ExtSel' in jd:
284      eventcode |= int(jd['ExtSel']) << 8
285    configcode = int(jd['ConfigCode'], 0) if 'ConfigCode' in jd else None
286    self.name = jd['EventName'].lower() if 'EventName' in jd else None
287    self.topic = ''
288    self.compat = jd.get('Compat')
289    self.desc = fixdesc(jd.get('BriefDescription'))
290    self.long_desc = fixdesc(jd.get('PublicDescription'))
291    precise = jd.get('PEBS')
292    msr = lookup_msr(jd.get('MSRIndex'))
293    msrval = jd.get('MSRValue')
294    extra_desc = ''
295    if 'Data_LA' in jd:
296      extra_desc += '  Supports address when precise'
297      if 'Errata' in jd:
298        extra_desc += '.'
299    if 'Errata' in jd:
300      extra_desc += '  Spec update: ' + jd['Errata']
301    self.pmu = unit_to_pmu(jd.get('Unit'))
302    filter = jd.get('Filter')
303    self.unit = jd.get('ScaleUnit')
304    self.perpkg = jd.get('PerPkg')
305    self.aggr_mode = convert_aggr_mode(jd.get('AggregationMode'))
306    self.deprecated = jd.get('Deprecated')
307    self.metric_name = jd.get('MetricName')
308    self.metric_group = jd.get('MetricGroup')
309    self.metricgroup_no_group = jd.get('MetricgroupNoGroup')
310    self.default_metricgroup_name = jd.get('DefaultMetricgroupName')
311    self.event_grouping = convert_metric_constraint(jd.get('MetricConstraint'))
312    self.metric_expr = None
313    if 'MetricExpr' in jd:
314      self.metric_expr = metric.ParsePerfJson(jd['MetricExpr']).Simplify()
315    # Note, the metric formula for the threshold isn't parsed as the &
316    # and > have incorrect precedence.
317    self.metric_threshold = jd.get('MetricThreshold')
318
319    arch_std = jd.get('ArchStdEvent')
320    if precise and self.desc and '(Precise Event)' not in self.desc:
321      extra_desc += ' (Must be precise)' if precise == '2' else (' (Precise '
322                                                                 'event)')
323    event = f'config={llx(configcode)}' if configcode is not None else f'event={llx(eventcode)}'
324    event_fields = [
325        ('AnyThread', 'any='),
326        ('PortMask', 'ch_mask='),
327        ('CounterMask', 'cmask='),
328        ('EdgeDetect', 'edge='),
329        ('FCMask', 'fc_mask='),
330        ('Invert', 'inv='),
331        ('SampleAfterValue', 'period='),
332        ('UMask', 'umask='),
333    ]
334    for key, value in event_fields:
335      if key in jd and jd[key] != '0':
336        event += ',' + value + jd[key]
337    if filter:
338      event += f',{filter}'
339    if msr:
340      event += f',{msr}{msrval}'
341    if self.desc and extra_desc:
342      self.desc += extra_desc
343    if self.long_desc and extra_desc:
344      self.long_desc += extra_desc
345    if self.pmu:
346      if self.desc and not self.desc.endswith('. '):
347        self.desc += '. '
348      self.desc = (self.desc if self.desc else '') + ('Unit: ' + self.pmu + ' ')
349    if arch_std and arch_std.lower() in _arch_std_events:
350      event = _arch_std_events[arch_std.lower()].event
351      # Copy from the architecture standard event to self for undefined fields.
352      for attr, value in _arch_std_events[arch_std.lower()].__dict__.items():
353        if hasattr(self, attr) and not getattr(self, attr):
354          setattr(self, attr, value)
355
356    self.event = real_event(self.name, event)
357
358  def __repr__(self) -> str:
359    """String representation primarily for debugging."""
360    s = '{\n'
361    for attr, value in self.__dict__.items():
362      if value:
363        s += f'\t{attr} = {value},\n'
364    return s + '}'
365
366  def build_c_string(self, metric: bool) -> str:
367    s = ''
368    for attr in _json_metric_attributes if metric else _json_event_attributes:
369      x = getattr(self, attr)
370      if metric and x and attr == 'metric_expr':
371        # Convert parsed metric expressions into a string. Slashes
372        # must be doubled in the file.
373        x = x.ToPerfJson().replace('\\', '\\\\')
374      if metric and x and attr == 'metric_threshold':
375        x = x.replace('\\', '\\\\')
376      if attr in _json_enum_attributes:
377        s += x if x else '0'
378      else:
379        s += f'{x}\\000' if x else '\\000'
380    return s
381
382  def to_c_string(self, metric: bool) -> str:
383    """Representation of the event as a C struct initializer."""
384
385    s = self.build_c_string(metric)
386    return f'{{ { _bcs.offsets[s] } }}, /* {s} */\n'
387
388
389@lru_cache(maxsize=None)
390def read_json_events(path: str, topic: str) -> Sequence[JsonEvent]:
391  """Read json events from the specified file."""
392  try:
393    events = json.load(open(path), object_hook=JsonEvent)
394  except BaseException as err:
395    print(f"Exception processing {path}")
396    raise
397  metrics: list[Tuple[str, str, metric.Expression]] = []
398  for event in events:
399    event.topic = topic
400    if event.metric_name and '-' not in event.metric_name:
401      metrics.append((event.pmu, event.metric_name, event.metric_expr))
402  updates = metric.RewriteMetricsInTermsOfOthers(metrics)
403  if updates:
404    for event in events:
405      if event.metric_name in updates:
406        # print(f'Updated {event.metric_name} from\n"{event.metric_expr}"\n'
407        #       f'to\n"{updates[event.metric_name]}"')
408        event.metric_expr = updates[event.metric_name]
409
410  return events
411
412def preprocess_arch_std_files(archpath: str) -> None:
413  """Read in all architecture standard events."""
414  global _arch_std_events
415  for item in os.scandir(archpath):
416    if item.is_file() and item.name.endswith('.json'):
417      for event in read_json_events(item.path, topic=''):
418        if event.name:
419          _arch_std_events[event.name.lower()] = event
420        if event.metric_name:
421          _arch_std_events[event.metric_name.lower()] = event
422
423
424def add_events_table_entries(item: os.DirEntry, topic: str) -> None:
425  """Add contents of file to _pending_events table."""
426  for e in read_json_events(item.path, topic):
427    if e.name:
428      _pending_events.append(e)
429    if e.metric_name:
430      _pending_metrics.append(e)
431
432
433def print_pending_events() -> None:
434  """Optionally close events table."""
435
436  def event_cmp_key(j: JsonEvent) -> Tuple[bool, str, str, str, str]:
437    def fix_none(s: Optional[str]) -> str:
438      if s is None:
439        return ''
440      return s
441
442    return (j.desc is not None, fix_none(j.topic), fix_none(j.name), fix_none(j.pmu),
443            fix_none(j.metric_name))
444
445  global _pending_events
446  if not _pending_events:
447    return
448
449  global _pending_events_tblname
450  if _pending_events_tblname.endswith('_sys'):
451    global _sys_event_tables
452    _sys_event_tables.append(_pending_events_tblname)
453  else:
454    global event_tables
455    _event_tables.append(_pending_events_tblname)
456
457  _args.output_file.write(
458      f'static const struct compact_pmu_event {_pending_events_tblname}[] = {{\n')
459
460  for event in sorted(_pending_events, key=event_cmp_key):
461    _args.output_file.write(event.to_c_string(metric=False))
462  _pending_events = []
463
464  _args.output_file.write('};\n\n')
465
466def print_pending_metrics() -> None:
467  """Optionally close metrics table."""
468
469  def metric_cmp_key(j: JsonEvent) -> Tuple[bool, str, str]:
470    def fix_none(s: Optional[str]) -> str:
471      if s is None:
472        return ''
473      return s
474
475    return (j.desc is not None, fix_none(j.pmu), fix_none(j.metric_name))
476
477  global _pending_metrics
478  if not _pending_metrics:
479    return
480
481  global _pending_metrics_tblname
482  if _pending_metrics_tblname.endswith('_sys'):
483    global _sys_metric_tables
484    _sys_metric_tables.append(_pending_metrics_tblname)
485  else:
486    global metric_tables
487    _metric_tables.append(_pending_metrics_tblname)
488
489  _args.output_file.write(
490      f'static const struct compact_pmu_event {_pending_metrics_tblname}[] = {{\n')
491
492  for metric in sorted(_pending_metrics, key=metric_cmp_key):
493    _args.output_file.write(metric.to_c_string(metric=True))
494  _pending_metrics = []
495
496  _args.output_file.write('};\n\n')
497
498def get_topic(topic: str) -> str:
499  if topic.endswith('metrics.json'):
500    return 'metrics'
501  return removesuffix(topic, '.json').replace('-', ' ')
502
503def preprocess_one_file(parents: Sequence[str], item: os.DirEntry) -> None:
504
505  if item.is_dir():
506    return
507
508  # base dir or too deep
509  level = len(parents)
510  if level == 0 or level > 4:
511    return
512
513  # Ignore other directories. If the file name does not have a .json
514  # extension, ignore it. It could be a readme.txt for instance.
515  if not item.is_file() or not item.name.endswith('.json'):
516    return
517
518  if item.name == 'metricgroups.json':
519    metricgroup_descriptions = json.load(open(item.path))
520    for mgroup in metricgroup_descriptions:
521      assert len(mgroup) > 1, parents
522      description = f"{metricgroup_descriptions[mgroup]}\\000"
523      mgroup = f"{mgroup}\\000"
524      _bcs.add(mgroup)
525      _bcs.add(description)
526      _metricgroups[mgroup] = description
527    return
528
529  topic = get_topic(item.name)
530  for event in read_json_events(item.path, topic):
531    if event.name:
532      _bcs.add(event.build_c_string(metric=False))
533    if event.metric_name:
534      _bcs.add(event.build_c_string(metric=True))
535
536def process_one_file(parents: Sequence[str], item: os.DirEntry) -> None:
537  """Process a JSON file during the main walk."""
538  def is_leaf_dir(path: str) -> bool:
539    for item in os.scandir(path):
540      if item.is_dir():
541        return False
542    return True
543
544  # model directory, reset topic
545  if item.is_dir() and is_leaf_dir(item.path):
546    print_pending_events()
547    print_pending_metrics()
548
549    global _pending_events_tblname
550    _pending_events_tblname = file_name_to_table_name('pmu_events_', parents, item.name)
551    global _pending_metrics_tblname
552    _pending_metrics_tblname = file_name_to_table_name('pmu_metrics_', parents, item.name)
553
554    if item.name == 'sys':
555      _sys_event_table_to_metric_table_mapping[_pending_events_tblname] = _pending_metrics_tblname
556    return
557
558  # base dir or too deep
559  level = len(parents)
560  if level == 0 or level > 4:
561    return
562
563  # Ignore other directories. If the file name does not have a .json
564  # extension, ignore it. It could be a readme.txt for instance.
565  if not item.is_file() or not item.name.endswith('.json') or item.name == 'metricgroups.json':
566    return
567
568  add_events_table_entries(item, get_topic(item.name))
569
570
571def print_mapping_table(archs: Sequence[str]) -> None:
572  """Read the mapfile and generate the struct from cpuid string to event table."""
573  _args.output_file.write("""
574/* Struct used to make the PMU event table implementation opaque to callers. */
575struct pmu_events_table {
576        const struct compact_pmu_event *entries;
577        size_t length;
578};
579
580/* Struct used to make the PMU metric table implementation opaque to callers. */
581struct pmu_metrics_table {
582        const struct compact_pmu_event *entries;
583        size_t length;
584};
585
586/*
587 * Map a CPU to its table of PMU events. The CPU is identified by the
588 * cpuid field, which is an arch-specific identifier for the CPU.
589 * The identifier specified in tools/perf/pmu-events/arch/xxx/mapfile
590 * must match the get_cpuid_str() in tools/perf/arch/xxx/util/header.c)
591 *
592 * The  cpuid can contain any character other than the comma.
593 */
594struct pmu_events_map {
595        const char *arch;
596        const char *cpuid;
597        struct pmu_events_table event_table;
598        struct pmu_metrics_table metric_table;
599};
600
601/*
602 * Global table mapping each known CPU for the architecture to its
603 * table of PMU events.
604 */
605const struct pmu_events_map pmu_events_map[] = {
606""")
607  for arch in archs:
608    if arch == 'test':
609      _args.output_file.write("""{
610\t.arch = "testarch",
611\t.cpuid = "testcpu",
612\t.event_table = {
613\t\t.entries = pmu_events__test_soc_cpu,
614\t\t.length = ARRAY_SIZE(pmu_events__test_soc_cpu),
615\t},
616\t.metric_table = {
617\t\t.entries = pmu_metrics__test_soc_cpu,
618\t\t.length = ARRAY_SIZE(pmu_metrics__test_soc_cpu),
619\t}
620},
621""")
622    else:
623      with open(f'{_args.starting_dir}/{arch}/mapfile.csv') as csvfile:
624        table = csv.reader(csvfile)
625        first = True
626        for row in table:
627          # Skip the first row or any row beginning with #.
628          if not first and len(row) > 0 and not row[0].startswith('#'):
629            event_tblname = file_name_to_table_name('pmu_events_', [], row[2].replace('/', '_'))
630            if event_tblname in _event_tables:
631              event_size = f'ARRAY_SIZE({event_tblname})'
632            else:
633              event_tblname = 'NULL'
634              event_size = '0'
635            metric_tblname = file_name_to_table_name('pmu_metrics_', [], row[2].replace('/', '_'))
636            if metric_tblname in _metric_tables:
637              metric_size = f'ARRAY_SIZE({metric_tblname})'
638            else:
639              metric_tblname = 'NULL'
640              metric_size = '0'
641            if event_size == '0' and metric_size == '0':
642              continue
643            cpuid = row[0].replace('\\', '\\\\')
644            _args.output_file.write(f"""{{
645\t.arch = "{arch}",
646\t.cpuid = "{cpuid}",
647\t.event_table = {{
648\t\t.entries = {event_tblname},
649\t\t.length = {event_size}
650\t}},
651\t.metric_table = {{
652\t\t.entries = {metric_tblname},
653\t\t.length = {metric_size}
654\t}}
655}},
656""")
657          first = False
658
659  _args.output_file.write("""{
660\t.arch = 0,
661\t.cpuid = 0,
662\t.event_table = { 0, 0 },
663\t.metric_table = { 0, 0 },
664}
665};
666""")
667
668
669def print_system_mapping_table() -> None:
670  """C struct mapping table array for tables from /sys directories."""
671  _args.output_file.write("""
672struct pmu_sys_events {
673\tconst char *name;
674\tstruct pmu_events_table event_table;
675\tstruct pmu_metrics_table metric_table;
676};
677
678static const struct pmu_sys_events pmu_sys_event_tables[] = {
679""")
680  printed_metric_tables = []
681  for tblname in _sys_event_tables:
682    _args.output_file.write(f"""\t{{
683\t\t.event_table = {{
684\t\t\t.entries = {tblname},
685\t\t\t.length = ARRAY_SIZE({tblname})
686\t\t}},""")
687    metric_tblname = _sys_event_table_to_metric_table_mapping[tblname]
688    if metric_tblname in _sys_metric_tables:
689      _args.output_file.write(f"""
690\t\t.metric_table = {{
691\t\t\t.entries = {metric_tblname},
692\t\t\t.length = ARRAY_SIZE({metric_tblname})
693\t\t}},""")
694      printed_metric_tables.append(metric_tblname)
695    _args.output_file.write(f"""
696\t\t.name = \"{tblname}\",
697\t}},
698""")
699  for tblname in _sys_metric_tables:
700    if tblname in printed_metric_tables:
701      continue
702    _args.output_file.write(f"""\t{{
703\t\t.metric_table = {{
704\t\t\t.entries = {tblname},
705\t\t\t.length = ARRAY_SIZE({tblname})
706\t\t}},
707\t\t.name = \"{tblname}\",
708\t}},
709""")
710  _args.output_file.write("""\t{
711\t\t.event_table = { 0, 0 },
712\t\t.metric_table = { 0, 0 },
713\t},
714};
715
716static void decompress_event(int offset, struct pmu_event *pe)
717{
718\tconst char *p = &big_c_string[offset];
719""")
720  for attr in _json_event_attributes:
721    _args.output_file.write(f'\n\tpe->{attr} = ')
722    if attr in _json_enum_attributes:
723      _args.output_file.write("*p - '0';\n")
724    else:
725      _args.output_file.write("(*p == '\\0' ? NULL : p);\n")
726    if attr == _json_event_attributes[-1]:
727      continue
728    if attr in _json_enum_attributes:
729      _args.output_file.write('\tp++;')
730    else:
731      _args.output_file.write('\twhile (*p++);')
732  _args.output_file.write("""}
733
734static void decompress_metric(int offset, struct pmu_metric *pm)
735{
736\tconst char *p = &big_c_string[offset];
737""")
738  for attr in _json_metric_attributes:
739    _args.output_file.write(f'\n\tpm->{attr} = ')
740    if attr in _json_enum_attributes:
741      _args.output_file.write("*p - '0';\n")
742    else:
743      _args.output_file.write("(*p == '\\0' ? NULL : p);\n")
744    if attr == _json_metric_attributes[-1]:
745      continue
746    if attr in _json_enum_attributes:
747      _args.output_file.write('\tp++;')
748    else:
749      _args.output_file.write('\twhile (*p++);')
750  _args.output_file.write("""}
751
752int pmu_events_table_for_each_event(const struct pmu_events_table *table,
753                                    pmu_event_iter_fn fn,
754                                    void *data)
755{
756        for (size_t i = 0; i < table->length; i++) {
757                struct pmu_event pe;
758                int ret;
759
760                decompress_event(table->entries[i].offset, &pe);
761                if (!pe.name)
762                        continue;
763                ret = fn(&pe, table, data);
764                if (ret)
765                        return ret;
766        }
767        return 0;
768}
769
770int pmu_metrics_table_for_each_metric(const struct pmu_metrics_table *table,
771                                     pmu_metric_iter_fn fn,
772                                     void *data)
773{
774        for (size_t i = 0; i < table->length; i++) {
775                struct pmu_metric pm;
776                int ret;
777
778                decompress_metric(table->entries[i].offset, &pm);
779                if (!pm.metric_expr)
780                        continue;
781                ret = fn(&pm, table, data);
782                if (ret)
783                        return ret;
784        }
785        return 0;
786}
787
788const struct pmu_events_table *perf_pmu__find_events_table(struct perf_pmu *pmu)
789{
790        const struct pmu_events_table *table = NULL;
791        char *cpuid = perf_pmu__getcpuid(pmu);
792        int i;
793
794        /* on some platforms which uses cpus map, cpuid can be NULL for
795         * PMUs other than CORE PMUs.
796         */
797        if (!cpuid)
798                return NULL;
799
800        i = 0;
801        for (;;) {
802                const struct pmu_events_map *map = &pmu_events_map[i++];
803                if (!map->arch)
804                        break;
805
806                if (!strcmp_cpuid_str(map->cpuid, cpuid)) {
807                        table = &map->event_table;
808                        break;
809                }
810        }
811        free(cpuid);
812        return table;
813}
814
815const struct pmu_metrics_table *perf_pmu__find_metrics_table(struct perf_pmu *pmu)
816{
817        const struct pmu_metrics_table *table = NULL;
818        char *cpuid = perf_pmu__getcpuid(pmu);
819        int i;
820
821        /* on some platforms which uses cpus map, cpuid can be NULL for
822         * PMUs other than CORE PMUs.
823         */
824        if (!cpuid)
825                return NULL;
826
827        i = 0;
828        for (;;) {
829                const struct pmu_events_map *map = &pmu_events_map[i++];
830                if (!map->arch)
831                        break;
832
833                if (!strcmp_cpuid_str(map->cpuid, cpuid)) {
834                        table = &map->metric_table;
835                        break;
836                }
837        }
838        free(cpuid);
839        return table;
840}
841
842const struct pmu_events_table *find_core_events_table(const char *arch, const char *cpuid)
843{
844        for (const struct pmu_events_map *tables = &pmu_events_map[0];
845             tables->arch;
846             tables++) {
847                if (!strcmp(tables->arch, arch) && !strcmp_cpuid_str(tables->cpuid, cpuid))
848                        return &tables->event_table;
849        }
850        return NULL;
851}
852
853const struct pmu_metrics_table *find_core_metrics_table(const char *arch, const char *cpuid)
854{
855        for (const struct pmu_events_map *tables = &pmu_events_map[0];
856             tables->arch;
857             tables++) {
858                if (!strcmp(tables->arch, arch) && !strcmp_cpuid_str(tables->cpuid, cpuid))
859                        return &tables->metric_table;
860        }
861        return NULL;
862}
863
864int pmu_for_each_core_event(pmu_event_iter_fn fn, void *data)
865{
866        for (const struct pmu_events_map *tables = &pmu_events_map[0];
867             tables->arch;
868             tables++) {
869                int ret = pmu_events_table_for_each_event(&tables->event_table, fn, data);
870
871                if (ret)
872                        return ret;
873        }
874        return 0;
875}
876
877int pmu_for_each_core_metric(pmu_metric_iter_fn fn, void *data)
878{
879        for (const struct pmu_events_map *tables = &pmu_events_map[0];
880             tables->arch;
881             tables++) {
882                int ret = pmu_metrics_table_for_each_metric(&tables->metric_table, fn, data);
883
884                if (ret)
885                        return ret;
886        }
887        return 0;
888}
889
890const struct pmu_events_table *find_sys_events_table(const char *name)
891{
892        for (const struct pmu_sys_events *tables = &pmu_sys_event_tables[0];
893             tables->name;
894             tables++) {
895                if (!strcmp(tables->name, name))
896                        return &tables->event_table;
897        }
898        return NULL;
899}
900
901int pmu_for_each_sys_event(pmu_event_iter_fn fn, void *data)
902{
903        for (const struct pmu_sys_events *tables = &pmu_sys_event_tables[0];
904             tables->name;
905             tables++) {
906                int ret = pmu_events_table_for_each_event(&tables->event_table, fn, data);
907
908                if (ret)
909                        return ret;
910        }
911        return 0;
912}
913
914int pmu_for_each_sys_metric(pmu_metric_iter_fn fn, void *data)
915{
916        for (const struct pmu_sys_events *tables = &pmu_sys_event_tables[0];
917             tables->name;
918             tables++) {
919                int ret = pmu_metrics_table_for_each_metric(&tables->metric_table, fn, data);
920
921                if (ret)
922                        return ret;
923        }
924        return 0;
925}
926""")
927
928def print_metricgroups() -> None:
929  _args.output_file.write("""
930static const int metricgroups[][2] = {
931""")
932  for mgroup in sorted(_metricgroups):
933    description = _metricgroups[mgroup]
934    _args.output_file.write(
935        f'\t{{ {_bcs.offsets[mgroup]}, {_bcs.offsets[description]} }}, /* {mgroup} => {description} */\n'
936    )
937  _args.output_file.write("""
938};
939
940const char *describe_metricgroup(const char *group)
941{
942        int low = 0, high = (int)ARRAY_SIZE(metricgroups) - 1;
943
944        while (low <= high) {
945                int mid = (low + high) / 2;
946                const char *mgroup = &big_c_string[metricgroups[mid][0]];
947                int cmp = strcmp(mgroup, group);
948
949                if (cmp == 0) {
950                        return &big_c_string[metricgroups[mid][1]];
951                } else if (cmp < 0) {
952                        low = mid + 1;
953                } else {
954                        high = mid - 1;
955                }
956        }
957        return NULL;
958}
959""")
960
961def main() -> None:
962  global _args
963
964  def dir_path(path: str) -> str:
965    """Validate path is a directory for argparse."""
966    if os.path.isdir(path):
967      return path
968    raise argparse.ArgumentTypeError(f'\'{path}\' is not a valid directory')
969
970  def ftw(path: str, parents: Sequence[str],
971          action: Callable[[Sequence[str], os.DirEntry], None]) -> None:
972    """Replicate the directory/file walking behavior of C's file tree walk."""
973    for item in sorted(os.scandir(path), key=lambda e: e.name):
974      if _args.model != 'all' and item.is_dir():
975        # Check if the model matches one in _args.model.
976        if len(parents) == _args.model.split(',')[0].count('/'):
977          # We're testing the correct directory.
978          item_path = '/'.join(parents) + ('/' if len(parents) > 0 else '') + item.name
979          if 'test' not in item_path and item_path not in _args.model.split(','):
980            continue
981      action(parents, item)
982      if item.is_dir():
983        ftw(item.path, parents + [item.name], action)
984
985  ap = argparse.ArgumentParser()
986  ap.add_argument('arch', help='Architecture name like x86')
987  ap.add_argument('model', help='''Select a model such as skylake to
988reduce the code size.  Normally set to "all". For architectures like
989ARM64 with an implementor/model, the model must include the implementor
990such as "arm/cortex-a34".''',
991                  default='all')
992  ap.add_argument(
993      'starting_dir',
994      type=dir_path,
995      help='Root of tree containing architecture directories containing json files'
996  )
997  ap.add_argument(
998      'output_file', type=argparse.FileType('w', encoding='utf-8'), nargs='?', default=sys.stdout)
999  _args = ap.parse_args()
1000
1001  _args.output_file.write("""
1002#include "pmu-events/pmu-events.h"
1003#include "util/header.h"
1004#include "util/pmu.h"
1005#include <string.h>
1006#include <stddef.h>
1007
1008struct compact_pmu_event {
1009  int offset;
1010};
1011
1012""")
1013  archs = []
1014  for item in os.scandir(_args.starting_dir):
1015    if not item.is_dir():
1016      continue
1017    if item.name == _args.arch or _args.arch == 'all' or item.name == 'test':
1018      archs.append(item.name)
1019
1020  if len(archs) < 2:
1021    raise IOError(f'Missing architecture directory \'{_args.arch}\'')
1022
1023  archs.sort()
1024  for arch in archs:
1025    arch_path = f'{_args.starting_dir}/{arch}'
1026    preprocess_arch_std_files(arch_path)
1027    ftw(arch_path, [], preprocess_one_file)
1028
1029  _bcs.compute()
1030  _args.output_file.write('static const char *const big_c_string =\n')
1031  for s in _bcs.big_string:
1032    _args.output_file.write(s)
1033  _args.output_file.write(';\n\n')
1034  for arch in archs:
1035    arch_path = f'{_args.starting_dir}/{arch}'
1036    ftw(arch_path, [], process_one_file)
1037    print_pending_events()
1038    print_pending_metrics()
1039
1040  print_mapping_table(archs)
1041  print_system_mapping_table()
1042  print_metricgroups()
1043
1044if __name__ == '__main__':
1045  main()
1046