xref: /openbmc/linux/tools/perf/pmu-events/jevents.py (revision d3402925)
1#!/usr/bin/env python3
2# SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
3"""Convert directories of JSON events to C code."""
4import argparse
5import csv
6from functools import lru_cache
7import json
8import metric
9import os
10import sys
11from typing import (Callable, Dict, Optional, Sequence, Set, Tuple)
12import collections
13
14# Global command line arguments.
15_args = None
16# List of regular event tables.
17_event_tables = []
18# List of event tables generated from "/sys" directories.
19_sys_event_tables = []
20# List of regular metric tables.
21_metric_tables = []
22# List of metric tables generated from "/sys" directories.
23_sys_metric_tables = []
24# Mapping between sys event table names and sys metric table names.
25_sys_event_table_to_metric_table_mapping = {}
26# Map from an event name to an architecture standard
27# JsonEvent. Architecture standard events are in json files in the top
28# f'{_args.starting_dir}/{_args.arch}' directory.
29_arch_std_events = {}
30# Events to write out when the table is closed
31_pending_events = []
32# Name of events table to be written out
33_pending_events_tblname = None
34# Metrics to write out when the table is closed
35_pending_metrics = []
36# Name of metrics table to be written out
37_pending_metrics_tblname = None
38# Global BigCString shared by all structures.
39_bcs = None
40# Order specific JsonEvent attributes will be visited.
41_json_event_attributes = [
42    # cmp_sevent related attributes.
43    'name', 'pmu', 'topic', 'desc',
44    # Seems useful, put it early.
45    'event',
46    # Short things in alphabetical order.
47    'aggr_mode', 'compat', 'deprecated', 'perpkg', 'unit',
48    # Longer things (the last won't be iterated over during decompress).
49    'long_desc'
50]
51
52# Attributes that are in pmu_metric rather than pmu_event.
53_json_metric_attributes = [
54    'metric_name', 'metric_group', 'metric_constraint', 'metric_expr', 'desc',
55    'long_desc', 'unit', 'compat', 'aggr_mode'
56]
57
58def removesuffix(s: str, suffix: str) -> str:
59  """Remove the suffix from a string
60
61  The removesuffix function is added to str in Python 3.9. We aim for 3.6
62  compatibility and so provide our own function here.
63  """
64  return s[0:-len(suffix)] if s.endswith(suffix) else s
65
66
67def file_name_to_table_name(prefix: str, parents: Sequence[str],
68                            dirname: str) -> str:
69  """Generate a C table name from directory names."""
70  tblname = prefix
71  for p in parents:
72    tblname += '_' + p
73  tblname += '_' + dirname
74  return tblname.replace('-', '_')
75
76
77def c_len(s: str) -> int:
78  """Return the length of s a C string
79
80  This doesn't handle all escape characters properly. It first assumes
81  all \ are for escaping, it then adjusts as it will have over counted
82  \\. The code uses \000 rather than \0 as a terminator as an adjacent
83  number would be folded into a string of \0 (ie. "\0" + "5" doesn't
84  equal a terminator followed by the number 5 but the escape of
85  \05). The code adjusts for \000 but not properly for all octal, hex
86  or unicode values.
87  """
88  try:
89    utf = s.encode(encoding='utf-8',errors='strict')
90  except:
91    print(f'broken string {s}')
92    raise
93  return len(utf) - utf.count(b'\\') + utf.count(b'\\\\') - (utf.count(b'\\000') * 2)
94
95class BigCString:
96  """A class to hold many strings concatenated together.
97
98  Generating a large number of stand-alone C strings creates a large
99  number of relocations in position independent code. The BigCString
100  is a helper for this case. It builds a single string which within it
101  are all the other C strings (to avoid memory issues the string
102  itself is held as a list of strings). The offsets within the big
103  string are recorded and when stored to disk these don't need
104  relocation. To reduce the size of the string further, identical
105  strings are merged. If a longer string ends-with the same value as a
106  shorter string, these entries are also merged.
107  """
108  strings: Set[str]
109  big_string: Sequence[str]
110  offsets: Dict[str, int]
111
112  def __init__(self):
113    self.strings = set()
114
115  def add(self, s: str) -> None:
116    """Called to add to the big string."""
117    self.strings.add(s)
118
119  def compute(self) -> None:
120    """Called once all strings are added to compute the string and offsets."""
121
122    folded_strings = {}
123    # Determine if two strings can be folded, ie. let 1 string use the
124    # end of another. First reverse all strings and sort them.
125    sorted_reversed_strings = sorted([x[::-1] for x in self.strings])
126
127    # Strings 'xyz' and 'yz' will now be [ 'zy', 'zyx' ]. Scan forward
128    # for each string to see if there is a better candidate to fold it
129    # into, in the example rather than using 'yz' we can use'xyz' at
130    # an offset of 1. We record which string can be folded into which
131    # in folded_strings, we don't need to record the offset as it is
132    # trivially computed from the string lengths.
133    for pos,s in enumerate(sorted_reversed_strings):
134      best_pos = pos
135      for check_pos in range(pos + 1, len(sorted_reversed_strings)):
136        if sorted_reversed_strings[check_pos].startswith(s):
137          best_pos = check_pos
138        else:
139          break
140      if pos != best_pos:
141        folded_strings[s[::-1]] = sorted_reversed_strings[best_pos][::-1]
142
143    # Compute reverse mappings for debugging.
144    fold_into_strings = collections.defaultdict(set)
145    for key, val in folded_strings.items():
146      if key != val:
147        fold_into_strings[val].add(key)
148
149    # big_string_offset is the current location within the C string
150    # being appended to - comments, etc. don't count. big_string is
151    # the string contents represented as a list. Strings are immutable
152    # in Python and so appending to one causes memory issues, while
153    # lists are mutable.
154    big_string_offset = 0
155    self.big_string = []
156    self.offsets = {}
157
158    # Emit all strings that aren't folded in a sorted manner.
159    for s in sorted(self.strings):
160      if s not in folded_strings:
161        self.offsets[s] = big_string_offset
162        self.big_string.append(f'/* offset={big_string_offset} */ "')
163        self.big_string.append(s)
164        self.big_string.append('"')
165        if s in fold_into_strings:
166          self.big_string.append(' /* also: ' + ', '.join(fold_into_strings[s]) + ' */')
167        self.big_string.append('\n')
168        big_string_offset += c_len(s)
169        continue
170
171    # Compute the offsets of the folded strings.
172    for s in folded_strings.keys():
173      assert s not in self.offsets
174      folded_s = folded_strings[s]
175      self.offsets[s] = self.offsets[folded_s] + c_len(folded_s) - c_len(s)
176
177_bcs = BigCString()
178
179class JsonEvent:
180  """Representation of an event loaded from a json file dictionary."""
181
182  def __init__(self, jd: dict):
183    """Constructor passed the dictionary of parsed json values."""
184
185    def llx(x: int) -> str:
186      """Convert an int to a string similar to a printf modifier of %#llx."""
187      return '0' if x == 0 else hex(x)
188
189    def fixdesc(s: str) -> str:
190      """Fix formatting issue for the desc string."""
191      if s is None:
192        return None
193      return removesuffix(removesuffix(removesuffix(s, '.  '),
194                                       '. '), '.').replace('\n', '\\n').replace(
195                                           '\"', '\\"').replace('\r', '\\r')
196
197    def convert_aggr_mode(aggr_mode: str) -> Optional[str]:
198      """Returns the aggr_mode_class enum value associated with the JSON string."""
199      if not aggr_mode:
200        return None
201      aggr_mode_to_enum = {
202          'PerChip': '1',
203          'PerCore': '2',
204      }
205      return aggr_mode_to_enum[aggr_mode]
206
207    def lookup_msr(num: str) -> Optional[str]:
208      """Converts the msr number, or first in a list to the appropriate event field."""
209      if not num:
210        return None
211      msrmap = {
212          0x3F6: 'ldlat=',
213          0x1A6: 'offcore_rsp=',
214          0x1A7: 'offcore_rsp=',
215          0x3F7: 'frontend=',
216      }
217      return msrmap[int(num.split(',', 1)[0], 0)]
218
219    def real_event(name: str, event: str) -> Optional[str]:
220      """Convert well known event names to an event string otherwise use the event argument."""
221      fixed = {
222          'inst_retired.any': 'event=0xc0,period=2000003',
223          'inst_retired.any_p': 'event=0xc0,period=2000003',
224          'cpu_clk_unhalted.ref': 'event=0x0,umask=0x03,period=2000003',
225          'cpu_clk_unhalted.thread': 'event=0x3c,period=2000003',
226          'cpu_clk_unhalted.core': 'event=0x3c,period=2000003',
227          'cpu_clk_unhalted.thread_any': 'event=0x3c,any=1,period=2000003',
228      }
229      if not name:
230        return None
231      if name.lower() in fixed:
232        return fixed[name.lower()]
233      return event
234
235    def unit_to_pmu(unit: str) -> Optional[str]:
236      """Convert a JSON Unit to Linux PMU name."""
237      if not unit:
238        return None
239      # Comment brought over from jevents.c:
240      # it's not realistic to keep adding these, we need something more scalable ...
241      table = {
242          'CBO': 'uncore_cbox',
243          'QPI LL': 'uncore_qpi',
244          'SBO': 'uncore_sbox',
245          'iMPH-U': 'uncore_arb',
246          'CPU-M-CF': 'cpum_cf',
247          'CPU-M-SF': 'cpum_sf',
248          'PAI-CRYPTO' : 'pai_crypto',
249          'UPI LL': 'uncore_upi',
250          'hisi_sicl,cpa': 'hisi_sicl,cpa',
251          'hisi_sccl,ddrc': 'hisi_sccl,ddrc',
252          'hisi_sccl,hha': 'hisi_sccl,hha',
253          'hisi_sccl,l3c': 'hisi_sccl,l3c',
254          'imx8_ddr': 'imx8_ddr',
255          'L3PMC': 'amd_l3',
256          'DFPMC': 'amd_df',
257          'cpu_core': 'cpu_core',
258          'cpu_atom': 'cpu_atom',
259      }
260      return table[unit] if unit in table else f'uncore_{unit.lower()}'
261
262    eventcode = 0
263    if 'EventCode' in jd:
264      eventcode = int(jd['EventCode'].split(',', 1)[0], 0)
265    if 'ExtSel' in jd:
266      eventcode |= int(jd['ExtSel']) << 8
267    configcode = int(jd['ConfigCode'], 0) if 'ConfigCode' in jd else None
268    self.name = jd['EventName'].lower() if 'EventName' in jd else None
269    self.topic = ''
270    self.compat = jd.get('Compat')
271    self.desc = fixdesc(jd.get('BriefDescription'))
272    self.long_desc = fixdesc(jd.get('PublicDescription'))
273    precise = jd.get('PEBS')
274    msr = lookup_msr(jd.get('MSRIndex'))
275    msrval = jd.get('MSRValue')
276    extra_desc = ''
277    if 'Data_LA' in jd:
278      extra_desc += '  Supports address when precise'
279      if 'Errata' in jd:
280        extra_desc += '.'
281    if 'Errata' in jd:
282      extra_desc += '  Spec update: ' + jd['Errata']
283    self.pmu = unit_to_pmu(jd.get('Unit'))
284    filter = jd.get('Filter')
285    self.unit = jd.get('ScaleUnit')
286    self.perpkg = jd.get('PerPkg')
287    self.aggr_mode = convert_aggr_mode(jd.get('AggregationMode'))
288    self.deprecated = jd.get('Deprecated')
289    self.metric_name = jd.get('MetricName')
290    self.metric_group = jd.get('MetricGroup')
291    self.metric_constraint = jd.get('MetricConstraint')
292    self.metric_expr = None
293    if 'MetricExpr' in jd:
294      self.metric_expr = metric.ParsePerfJson(jd['MetricExpr']).Simplify()
295
296    arch_std = jd.get('ArchStdEvent')
297    if precise and self.desc and '(Precise Event)' not in self.desc:
298      extra_desc += ' (Must be precise)' if precise == '2' else (' (Precise '
299                                                                 'event)')
300    event = f'config={llx(configcode)}' if configcode is not None else f'event={llx(eventcode)}'
301    event_fields = [
302        ('AnyThread', 'any='),
303        ('PortMask', 'ch_mask='),
304        ('CounterMask', 'cmask='),
305        ('EdgeDetect', 'edge='),
306        ('FCMask', 'fc_mask='),
307        ('Invert', 'inv='),
308        ('SampleAfterValue', 'period='),
309        ('UMask', 'umask='),
310    ]
311    for key, value in event_fields:
312      if key in jd and jd[key] != '0':
313        event += ',' + value + jd[key]
314    if filter:
315      event += f',{filter}'
316    if msr:
317      event += f',{msr}{msrval}'
318    if self.desc and extra_desc:
319      self.desc += extra_desc
320    if self.long_desc and extra_desc:
321      self.long_desc += extra_desc
322    if self.pmu:
323      if self.desc and not self.desc.endswith('. '):
324        self.desc += '. '
325      self.desc = (self.desc if self.desc else '') + ('Unit: ' + self.pmu + ' ')
326    if arch_std and arch_std.lower() in _arch_std_events:
327      event = _arch_std_events[arch_std.lower()].event
328      # Copy from the architecture standard event to self for undefined fields.
329      for attr, value in _arch_std_events[arch_std.lower()].__dict__.items():
330        if hasattr(self, attr) and not getattr(self, attr):
331          setattr(self, attr, value)
332
333    self.event = real_event(self.name, event)
334
335  def __repr__(self) -> str:
336    """String representation primarily for debugging."""
337    s = '{\n'
338    for attr, value in self.__dict__.items():
339      if value:
340        s += f'\t{attr} = {value},\n'
341    return s + '}'
342
343  def build_c_string(self, metric: bool) -> str:
344    s = ''
345    for attr in _json_metric_attributes if metric else _json_event_attributes:
346      x = getattr(self, attr)
347      if metric and x and attr == 'metric_expr':
348        # Convert parsed metric expressions into a string. Slashes
349        # must be doubled in the file.
350        x = x.ToPerfJson().replace('\\', '\\\\')
351      s += f'{x}\\000' if x else '\\000'
352    return s
353
354  def to_c_string(self, metric: bool) -> str:
355    """Representation of the event as a C struct initializer."""
356
357    s = self.build_c_string(metric)
358    return f'{{ { _bcs.offsets[s] } }}, /* {s} */\n'
359
360
361@lru_cache(maxsize=None)
362def read_json_events(path: str, topic: str) -> Sequence[JsonEvent]:
363  """Read json events from the specified file."""
364  try:
365    events = json.load(open(path), object_hook=JsonEvent)
366  except BaseException as err:
367    print(f"Exception processing {path}")
368    raise
369  metrics: list[Tuple[str, metric.Expression]] = []
370  for event in events:
371    event.topic = topic
372    if event.metric_name and '-' not in event.metric_name:
373      metrics.append((event.metric_name, event.metric_expr))
374  updates = metric.RewriteMetricsInTermsOfOthers(metrics)
375  if updates:
376    for event in events:
377      if event.metric_name in updates:
378        # print(f'Updated {event.metric_name} from\n"{event.metric_expr}"\n'
379        #       f'to\n"{updates[event.metric_name]}"')
380        event.metric_expr = updates[event.metric_name]
381
382  return events
383
384def preprocess_arch_std_files(archpath: str) -> None:
385  """Read in all architecture standard events."""
386  global _arch_std_events
387  for item in os.scandir(archpath):
388    if item.is_file() and item.name.endswith('.json'):
389      for event in read_json_events(item.path, topic=''):
390        if event.name:
391          _arch_std_events[event.name.lower()] = event
392        if event.metric_name:
393          _arch_std_events[event.metric_name.lower()] = event
394
395
396def add_events_table_entries(item: os.DirEntry, topic: str) -> None:
397  """Add contents of file to _pending_events table."""
398  for e in read_json_events(item.path, topic):
399    if e.name:
400      _pending_events.append(e)
401    if e.metric_name:
402      _pending_metrics.append(e)
403
404
405def print_pending_events() -> None:
406  """Optionally close events table."""
407
408  def event_cmp_key(j: JsonEvent) -> Tuple[bool, str, str, str, str]:
409    def fix_none(s: Optional[str]) -> str:
410      if s is None:
411        return ''
412      return s
413
414    return (j.desc is not None, fix_none(j.topic), fix_none(j.name), fix_none(j.pmu),
415            fix_none(j.metric_name))
416
417  global _pending_events
418  if not _pending_events:
419    return
420
421  global _pending_events_tblname
422  if _pending_events_tblname.endswith('_sys'):
423    global _sys_event_tables
424    _sys_event_tables.append(_pending_events_tblname)
425  else:
426    global event_tables
427    _event_tables.append(_pending_events_tblname)
428
429  _args.output_file.write(
430      f'static const struct compact_pmu_event {_pending_events_tblname}[] = {{\n')
431
432  for event in sorted(_pending_events, key=event_cmp_key):
433    _args.output_file.write(event.to_c_string(metric=False))
434  _pending_events = []
435
436  _args.output_file.write('};\n\n')
437
438def print_pending_metrics() -> None:
439  """Optionally close metrics table."""
440
441  def metric_cmp_key(j: JsonEvent) -> Tuple[bool, str, str]:
442    def fix_none(s: Optional[str]) -> str:
443      if s is None:
444        return ''
445      return s
446
447    return (j.desc is not None, fix_none(j.pmu), fix_none(j.metric_name))
448
449  global _pending_metrics
450  if not _pending_metrics:
451    return
452
453  global _pending_metrics_tblname
454  if _pending_metrics_tblname.endswith('_sys'):
455    global _sys_metric_tables
456    _sys_metric_tables.append(_pending_metrics_tblname)
457  else:
458    global metric_tables
459    _metric_tables.append(_pending_metrics_tblname)
460
461  _args.output_file.write(
462      f'static const struct compact_pmu_event {_pending_metrics_tblname}[] = {{\n')
463
464  for metric in sorted(_pending_metrics, key=metric_cmp_key):
465    _args.output_file.write(metric.to_c_string(metric=True))
466  _pending_metrics = []
467
468  _args.output_file.write('};\n\n')
469
470def get_topic(topic: str) -> str:
471  if topic.endswith('metrics.json'):
472    return 'metrics'
473  return removesuffix(topic, '.json').replace('-', ' ')
474
475def preprocess_one_file(parents: Sequence[str], item: os.DirEntry) -> None:
476
477  if item.is_dir():
478    return
479
480  # base dir or too deep
481  level = len(parents)
482  if level == 0 or level > 4:
483    return
484
485  # Ignore other directories. If the file name does not have a .json
486  # extension, ignore it. It could be a readme.txt for instance.
487  if not item.is_file() or not item.name.endswith('.json'):
488    return
489
490  topic = get_topic(item.name)
491  for event in read_json_events(item.path, topic):
492    if event.name:
493      _bcs.add(event.build_c_string(metric=False))
494    if event.metric_name:
495      _bcs.add(event.build_c_string(metric=True))
496
497def process_one_file(parents: Sequence[str], item: os.DirEntry) -> None:
498  """Process a JSON file during the main walk."""
499  def is_leaf_dir(path: str) -> bool:
500    for item in os.scandir(path):
501      if item.is_dir():
502        return False
503    return True
504
505  # model directory, reset topic
506  if item.is_dir() and is_leaf_dir(item.path):
507    print_pending_events()
508    print_pending_metrics()
509
510    global _pending_events_tblname
511    _pending_events_tblname = file_name_to_table_name('pmu_events_', parents, item.name)
512    global _pending_metrics_tblname
513    _pending_metrics_tblname = file_name_to_table_name('pmu_metrics_', parents, item.name)
514
515    if item.name == 'sys':
516      _sys_event_table_to_metric_table_mapping[_pending_events_tblname] = _pending_metrics_tblname
517    return
518
519  # base dir or too deep
520  level = len(parents)
521  if level == 0 or level > 4:
522    return
523
524  # Ignore other directories. If the file name does not have a .json
525  # extension, ignore it. It could be a readme.txt for instance.
526  if not item.is_file() or not item.name.endswith('.json'):
527    return
528
529  add_events_table_entries(item, get_topic(item.name))
530
531
532def print_mapping_table(archs: Sequence[str]) -> None:
533  """Read the mapfile and generate the struct from cpuid string to event table."""
534  _args.output_file.write("""
535/* Struct used to make the PMU event table implementation opaque to callers. */
536struct pmu_events_table {
537        const struct compact_pmu_event *entries;
538        size_t length;
539};
540
541/* Struct used to make the PMU metric table implementation opaque to callers. */
542struct pmu_metrics_table {
543        const struct compact_pmu_event *entries;
544        size_t length;
545};
546
547/*
548 * Map a CPU to its table of PMU events. The CPU is identified by the
549 * cpuid field, which is an arch-specific identifier for the CPU.
550 * The identifier specified in tools/perf/pmu-events/arch/xxx/mapfile
551 * must match the get_cpuid_str() in tools/perf/arch/xxx/util/header.c)
552 *
553 * The  cpuid can contain any character other than the comma.
554 */
555struct pmu_events_map {
556        const char *arch;
557        const char *cpuid;
558        struct pmu_events_table event_table;
559        struct pmu_metrics_table metric_table;
560};
561
562/*
563 * Global table mapping each known CPU for the architecture to its
564 * table of PMU events.
565 */
566const struct pmu_events_map pmu_events_map[] = {
567""")
568  for arch in archs:
569    if arch == 'test':
570      _args.output_file.write("""{
571\t.arch = "testarch",
572\t.cpuid = "testcpu",
573\t.event_table = {
574\t\t.entries = pmu_events__test_soc_cpu,
575\t\t.length = ARRAY_SIZE(pmu_events__test_soc_cpu),
576\t},
577\t.metric_table = {
578\t\t.entries = pmu_metrics__test_soc_cpu,
579\t\t.length = ARRAY_SIZE(pmu_metrics__test_soc_cpu),
580\t}
581},
582""")
583    else:
584      with open(f'{_args.starting_dir}/{arch}/mapfile.csv') as csvfile:
585        table = csv.reader(csvfile)
586        first = True
587        for row in table:
588          # Skip the first row or any row beginning with #.
589          if not first and len(row) > 0 and not row[0].startswith('#'):
590            event_tblname = file_name_to_table_name('pmu_events_', [], row[2].replace('/', '_'))
591            if event_tblname in _event_tables:
592              event_size = f'ARRAY_SIZE({event_tblname})'
593            else:
594              event_tblname = 'NULL'
595              event_size = '0'
596            metric_tblname = file_name_to_table_name('pmu_metrics_', [], row[2].replace('/', '_'))
597            if metric_tblname in _metric_tables:
598              metric_size = f'ARRAY_SIZE({metric_tblname})'
599            else:
600              metric_tblname = 'NULL'
601              metric_size = '0'
602            if event_size == '0' and metric_size == '0':
603              continue
604            cpuid = row[0].replace('\\', '\\\\')
605            _args.output_file.write(f"""{{
606\t.arch = "{arch}",
607\t.cpuid = "{cpuid}",
608\t.event_table = {{
609\t\t.entries = {event_tblname},
610\t\t.length = {event_size}
611\t}},
612\t.metric_table = {{
613\t\t.entries = {metric_tblname},
614\t\t.length = {metric_size}
615\t}}
616}},
617""")
618          first = False
619
620  _args.output_file.write("""{
621\t.arch = 0,
622\t.cpuid = 0,
623\t.event_table = { 0, 0 },
624\t.metric_table = { 0, 0 },
625}
626};
627""")
628
629
630def print_system_mapping_table() -> None:
631  """C struct mapping table array for tables from /sys directories."""
632  _args.output_file.write("""
633struct pmu_sys_events {
634\tconst char *name;
635\tstruct pmu_events_table event_table;
636\tstruct pmu_metrics_table metric_table;
637};
638
639static const struct pmu_sys_events pmu_sys_event_tables[] = {
640""")
641  printed_metric_tables = []
642  for tblname in _sys_event_tables:
643    _args.output_file.write(f"""\t{{
644\t\t.event_table = {{
645\t\t\t.entries = {tblname},
646\t\t\t.length = ARRAY_SIZE({tblname})
647\t\t}},""")
648    metric_tblname = _sys_event_table_to_metric_table_mapping[tblname]
649    if metric_tblname in _sys_metric_tables:
650      _args.output_file.write(f"""
651\t\t.metric_table = {{
652\t\t\t.entries = {metric_tblname},
653\t\t\t.length = ARRAY_SIZE({metric_tblname})
654\t\t}},""")
655      printed_metric_tables.append(metric_tblname)
656    _args.output_file.write(f"""
657\t\t.name = \"{tblname}\",
658\t}},
659""")
660  for tblname in _sys_metric_tables:
661    if tblname in printed_metric_tables:
662      continue
663    _args.output_file.write(f"""\t{{
664\t\t.metric_table = {{
665\t\t\t.entries = {tblname},
666\t\t\t.length = ARRAY_SIZE({tblname})
667\t\t}},
668\t\t.name = \"{tblname}\",
669\t}},
670""")
671  _args.output_file.write("""\t{
672\t\t.event_table = { 0, 0 },
673\t\t.metric_table = { 0, 0 },
674\t},
675};
676
677static void decompress_event(int offset, struct pmu_event *pe)
678{
679\tconst char *p = &big_c_string[offset];
680""")
681  for attr in _json_event_attributes:
682    _args.output_file.write(f"""
683\tpe->{attr} = (*p == '\\0' ? NULL : p);
684""")
685    if attr == _json_event_attributes[-1]:
686      continue
687    _args.output_file.write('\twhile (*p++);')
688  _args.output_file.write("""}
689
690static void decompress_metric(int offset, struct pmu_metric *pm)
691{
692\tconst char *p = &big_c_string[offset];
693""")
694  for attr in _json_metric_attributes:
695    _args.output_file.write(f"""
696\tpm->{attr} = (*p == '\\0' ? NULL : p);
697""")
698    if attr == _json_metric_attributes[-1]:
699      continue
700    _args.output_file.write('\twhile (*p++);')
701  _args.output_file.write("""}
702
703int pmu_events_table_for_each_event(const struct pmu_events_table *table,
704                                    pmu_event_iter_fn fn,
705                                    void *data)
706{
707        for (size_t i = 0; i < table->length; i++) {
708                struct pmu_event pe;
709                int ret;
710
711                decompress_event(table->entries[i].offset, &pe);
712                if (!pe.name)
713                        continue;
714                ret = fn(&pe, table, data);
715                if (ret)
716                        return ret;
717        }
718        return 0;
719}
720
721int pmu_metrics_table_for_each_metric(const struct pmu_metrics_table *table,
722                                     pmu_metric_iter_fn fn,
723                                     void *data)
724{
725        for (size_t i = 0; i < table->length; i++) {
726                struct pmu_metric pm;
727                int ret;
728
729                decompress_metric(table->entries[i].offset, &pm);
730                if (!pm.metric_expr)
731                        continue;
732                ret = fn(&pm, table, data);
733                if (ret)
734                        return ret;
735        }
736        return 0;
737}
738
739const struct pmu_events_table *perf_pmu__find_events_table(struct perf_pmu *pmu)
740{
741        const struct pmu_events_table *table = NULL;
742        char *cpuid = perf_pmu__getcpuid(pmu);
743        int i;
744
745        /* on some platforms which uses cpus map, cpuid can be NULL for
746         * PMUs other than CORE PMUs.
747         */
748        if (!cpuid)
749                return NULL;
750
751        i = 0;
752        for (;;) {
753                const struct pmu_events_map *map = &pmu_events_map[i++];
754                if (!map->arch)
755                        break;
756
757                if (!strcmp_cpuid_str(map->cpuid, cpuid)) {
758                        table = &map->event_table;
759                        break;
760                }
761        }
762        free(cpuid);
763        return table;
764}
765
766const struct pmu_metrics_table *perf_pmu__find_metrics_table(struct perf_pmu *pmu)
767{
768        const struct pmu_metrics_table *table = NULL;
769        char *cpuid = perf_pmu__getcpuid(pmu);
770        int i;
771
772        /* on some platforms which uses cpus map, cpuid can be NULL for
773         * PMUs other than CORE PMUs.
774         */
775        if (!cpuid)
776                return NULL;
777
778        i = 0;
779        for (;;) {
780                const struct pmu_events_map *map = &pmu_events_map[i++];
781                if (!map->arch)
782                        break;
783
784                if (!strcmp_cpuid_str(map->cpuid, cpuid)) {
785                        table = &map->metric_table;
786                        break;
787                }
788        }
789        free(cpuid);
790        return table;
791}
792
793const struct pmu_events_table *find_core_events_table(const char *arch, const char *cpuid)
794{
795        for (const struct pmu_events_map *tables = &pmu_events_map[0];
796             tables->arch;
797             tables++) {
798                if (!strcmp(tables->arch, arch) && !strcmp_cpuid_str(tables->cpuid, cpuid))
799                        return &tables->event_table;
800        }
801        return NULL;
802}
803
804const struct pmu_metrics_table *find_core_metrics_table(const char *arch, const char *cpuid)
805{
806        for (const struct pmu_events_map *tables = &pmu_events_map[0];
807             tables->arch;
808             tables++) {
809                if (!strcmp(tables->arch, arch) && !strcmp_cpuid_str(tables->cpuid, cpuid))
810                        return &tables->metric_table;
811        }
812        return NULL;
813}
814
815int pmu_for_each_core_event(pmu_event_iter_fn fn, void *data)
816{
817        for (const struct pmu_events_map *tables = &pmu_events_map[0];
818             tables->arch;
819             tables++) {
820                int ret = pmu_events_table_for_each_event(&tables->event_table, fn, data);
821
822                if (ret)
823                        return ret;
824        }
825        return 0;
826}
827
828int pmu_for_each_core_metric(pmu_metric_iter_fn fn, void *data)
829{
830        for (const struct pmu_events_map *tables = &pmu_events_map[0];
831             tables->arch;
832             tables++) {
833                int ret = pmu_metrics_table_for_each_metric(&tables->metric_table, fn, data);
834
835                if (ret)
836                        return ret;
837        }
838        return 0;
839}
840
841const struct pmu_events_table *find_sys_events_table(const char *name)
842{
843        for (const struct pmu_sys_events *tables = &pmu_sys_event_tables[0];
844             tables->name;
845             tables++) {
846                if (!strcmp(tables->name, name))
847                        return &tables->event_table;
848        }
849        return NULL;
850}
851
852int pmu_for_each_sys_event(pmu_event_iter_fn fn, void *data)
853{
854        for (const struct pmu_sys_events *tables = &pmu_sys_event_tables[0];
855             tables->name;
856             tables++) {
857                int ret = pmu_events_table_for_each_event(&tables->event_table, fn, data);
858
859                if (ret)
860                        return ret;
861        }
862        return 0;
863}
864
865int pmu_for_each_sys_metric(pmu_metric_iter_fn fn, void *data)
866{
867        for (const struct pmu_sys_events *tables = &pmu_sys_event_tables[0];
868             tables->name;
869             tables++) {
870                int ret = pmu_metrics_table_for_each_metric(&tables->metric_table, fn, data);
871
872                if (ret)
873                        return ret;
874        }
875        return 0;
876}
877""")
878
879
880def main() -> None:
881  global _args
882
883  def dir_path(path: str) -> str:
884    """Validate path is a directory for argparse."""
885    if os.path.isdir(path):
886      return path
887    raise argparse.ArgumentTypeError(f'\'{path}\' is not a valid directory')
888
889  def ftw(path: str, parents: Sequence[str],
890          action: Callable[[Sequence[str], os.DirEntry], None]) -> None:
891    """Replicate the directory/file walking behavior of C's file tree walk."""
892    for item in os.scandir(path):
893      if _args.model != 'all' and item.is_dir():
894        # Check if the model matches one in _args.model.
895        if len(parents) == _args.model.split(',')[0].count('/'):
896          # We're testing the correct directory.
897          item_path = '/'.join(parents) + ('/' if len(parents) > 0 else '') + item.name
898          if 'test' not in item_path and item_path not in _args.model.split(','):
899            continue
900      action(parents, item)
901      if item.is_dir():
902        ftw(item.path, parents + [item.name], action)
903
904  ap = argparse.ArgumentParser()
905  ap.add_argument('arch', help='Architecture name like x86')
906  ap.add_argument('model', help='''Select a model such as skylake to
907reduce the code size.  Normally set to "all". For architectures like
908ARM64 with an implementor/model, the model must include the implementor
909such as "arm/cortex-a34".''',
910                  default='all')
911  ap.add_argument(
912      'starting_dir',
913      type=dir_path,
914      help='Root of tree containing architecture directories containing json files'
915  )
916  ap.add_argument(
917      'output_file', type=argparse.FileType('w', encoding='utf-8'), nargs='?', default=sys.stdout)
918  _args = ap.parse_args()
919
920  _args.output_file.write("""
921#include "pmu-events/pmu-events.h"
922#include "util/header.h"
923#include "util/pmu.h"
924#include <string.h>
925#include <stddef.h>
926
927struct compact_pmu_event {
928  int offset;
929};
930
931""")
932  archs = []
933  for item in os.scandir(_args.starting_dir):
934    if not item.is_dir():
935      continue
936    if item.name == _args.arch or _args.arch == 'all' or item.name == 'test':
937      archs.append(item.name)
938
939  if len(archs) < 2:
940    raise IOError(f'Missing architecture directory \'{_args.arch}\'')
941
942  archs.sort()
943  for arch in archs:
944    arch_path = f'{_args.starting_dir}/{arch}'
945    preprocess_arch_std_files(arch_path)
946    ftw(arch_path, [], preprocess_one_file)
947
948  _bcs.compute()
949  _args.output_file.write('static const char *const big_c_string =\n')
950  for s in _bcs.big_string:
951    _args.output_file.write(s)
952  _args.output_file.write(';\n\n')
953  for arch in archs:
954    arch_path = f'{_args.starting_dir}/{arch}'
955    ftw(arch_path, [], process_one_file)
956    print_pending_events()
957    print_pending_metrics()
958
959  print_mapping_table(archs)
960  print_system_mapping_table()
961
962
963if __name__ == '__main__':
964  main()
965