xref: /openbmc/linux/tools/perf/pmu-events/jevents.py (revision fa0dadde)
1#!/usr/bin/env python3
2# SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
3"""Convert directories of JSON events to C code."""
4import argparse
5import csv
6from functools import lru_cache
7import json
8import metric
9import os
10import sys
11from typing import (Callable, Dict, Optional, Sequence, Set, Tuple)
12import collections
13
14# Global command line arguments.
15_args = None
16# List of regular event tables.
17_event_tables = []
18# List of event tables generated from "/sys" directories.
19_sys_event_tables = []
20# List of regular metric tables.
21_metric_tables = []
22# List of metric tables generated from "/sys" directories.
23_sys_metric_tables = []
24# Mapping between sys event table names and sys metric table names.
25_sys_event_table_to_metric_table_mapping = {}
26# Map from an event name to an architecture standard
27# JsonEvent. Architecture standard events are in json files in the top
28# f'{_args.starting_dir}/{_args.arch}' directory.
29_arch_std_events = {}
30# Events to write out when the table is closed
31_pending_events = []
32# Name of events table to be written out
33_pending_events_tblname = None
34# Metrics to write out when the table is closed
35_pending_metrics = []
36# Name of metrics table to be written out
37_pending_metrics_tblname = None
38# Global BigCString shared by all structures.
39_bcs = None
40# Order specific JsonEvent attributes will be visited.
41_json_event_attributes = [
42    # cmp_sevent related attributes.
43    'name', 'pmu', 'topic', 'desc',
44    # Seems useful, put it early.
45    'event',
46    # Short things in alphabetical order.
47    'compat', 'deprecated', 'perpkg', 'unit',
48    # Longer things (the last won't be iterated over during decompress).
49    'long_desc'
50]
51
52# Attributes that are in pmu_metric rather than pmu_event.
53_json_metric_attributes = [
54    'metric_name', 'metric_group', 'metric_expr', 'metric_threshold', 'desc',
55    'long_desc', 'unit', 'compat', 'aggr_mode', 'event_grouping'
56]
57# Attributes that are bools or enum int values, encoded as '0', '1',...
58_json_enum_attributes = ['aggr_mode', 'deprecated', 'event_grouping', 'perpkg']
59
60def removesuffix(s: str, suffix: str) -> str:
61  """Remove the suffix from a string
62
63  The removesuffix function is added to str in Python 3.9. We aim for 3.6
64  compatibility and so provide our own function here.
65  """
66  return s[0:-len(suffix)] if s.endswith(suffix) else s
67
68
69def file_name_to_table_name(prefix: str, parents: Sequence[str],
70                            dirname: str) -> str:
71  """Generate a C table name from directory names."""
72  tblname = prefix
73  for p in parents:
74    tblname += '_' + p
75  tblname += '_' + dirname
76  return tblname.replace('-', '_')
77
78
79def c_len(s: str) -> int:
80  """Return the length of s a C string
81
82  This doesn't handle all escape characters properly. It first assumes
83  all \ are for escaping, it then adjusts as it will have over counted
84  \\. The code uses \000 rather than \0 as a terminator as an adjacent
85  number would be folded into a string of \0 (ie. "\0" + "5" doesn't
86  equal a terminator followed by the number 5 but the escape of
87  \05). The code adjusts for \000 but not properly for all octal, hex
88  or unicode values.
89  """
90  try:
91    utf = s.encode(encoding='utf-8',errors='strict')
92  except:
93    print(f'broken string {s}')
94    raise
95  return len(utf) - utf.count(b'\\') + utf.count(b'\\\\') - (utf.count(b'\\000') * 2)
96
97class BigCString:
98  """A class to hold many strings concatenated together.
99
100  Generating a large number of stand-alone C strings creates a large
101  number of relocations in position independent code. The BigCString
102  is a helper for this case. It builds a single string which within it
103  are all the other C strings (to avoid memory issues the string
104  itself is held as a list of strings). The offsets within the big
105  string are recorded and when stored to disk these don't need
106  relocation. To reduce the size of the string further, identical
107  strings are merged. If a longer string ends-with the same value as a
108  shorter string, these entries are also merged.
109  """
110  strings: Set[str]
111  big_string: Sequence[str]
112  offsets: Dict[str, int]
113
114  def __init__(self):
115    self.strings = set()
116
117  def add(self, s: str) -> None:
118    """Called to add to the big string."""
119    self.strings.add(s)
120
121  def compute(self) -> None:
122    """Called once all strings are added to compute the string and offsets."""
123
124    folded_strings = {}
125    # Determine if two strings can be folded, ie. let 1 string use the
126    # end of another. First reverse all strings and sort them.
127    sorted_reversed_strings = sorted([x[::-1] for x in self.strings])
128
129    # Strings 'xyz' and 'yz' will now be [ 'zy', 'zyx' ]. Scan forward
130    # for each string to see if there is a better candidate to fold it
131    # into, in the example rather than using 'yz' we can use'xyz' at
132    # an offset of 1. We record which string can be folded into which
133    # in folded_strings, we don't need to record the offset as it is
134    # trivially computed from the string lengths.
135    for pos,s in enumerate(sorted_reversed_strings):
136      best_pos = pos
137      for check_pos in range(pos + 1, len(sorted_reversed_strings)):
138        if sorted_reversed_strings[check_pos].startswith(s):
139          best_pos = check_pos
140        else:
141          break
142      if pos != best_pos:
143        folded_strings[s[::-1]] = sorted_reversed_strings[best_pos][::-1]
144
145    # Compute reverse mappings for debugging.
146    fold_into_strings = collections.defaultdict(set)
147    for key, val in folded_strings.items():
148      if key != val:
149        fold_into_strings[val].add(key)
150
151    # big_string_offset is the current location within the C string
152    # being appended to - comments, etc. don't count. big_string is
153    # the string contents represented as a list. Strings are immutable
154    # in Python and so appending to one causes memory issues, while
155    # lists are mutable.
156    big_string_offset = 0
157    self.big_string = []
158    self.offsets = {}
159
160    # Emit all strings that aren't folded in a sorted manner.
161    for s in sorted(self.strings):
162      if s not in folded_strings:
163        self.offsets[s] = big_string_offset
164        self.big_string.append(f'/* offset={big_string_offset} */ "')
165        self.big_string.append(s)
166        self.big_string.append('"')
167        if s in fold_into_strings:
168          self.big_string.append(' /* also: ' + ', '.join(fold_into_strings[s]) + ' */')
169        self.big_string.append('\n')
170        big_string_offset += c_len(s)
171        continue
172
173    # Compute the offsets of the folded strings.
174    for s in folded_strings.keys():
175      assert s not in self.offsets
176      folded_s = folded_strings[s]
177      self.offsets[s] = self.offsets[folded_s] + c_len(folded_s) - c_len(s)
178
179_bcs = BigCString()
180
181class JsonEvent:
182  """Representation of an event loaded from a json file dictionary."""
183
184  def __init__(self, jd: dict):
185    """Constructor passed the dictionary of parsed json values."""
186
187    def llx(x: int) -> str:
188      """Convert an int to a string similar to a printf modifier of %#llx."""
189      return '0' if x == 0 else hex(x)
190
191    def fixdesc(s: str) -> str:
192      """Fix formatting issue for the desc string."""
193      if s is None:
194        return None
195      return removesuffix(removesuffix(removesuffix(s, '.  '),
196                                       '. '), '.').replace('\n', '\\n').replace(
197                                           '\"', '\\"').replace('\r', '\\r')
198
199    def convert_aggr_mode(aggr_mode: str) -> Optional[str]:
200      """Returns the aggr_mode_class enum value associated with the JSON string."""
201      if not aggr_mode:
202        return None
203      aggr_mode_to_enum = {
204          'PerChip': '1',
205          'PerCore': '2',
206      }
207      return aggr_mode_to_enum[aggr_mode]
208
209    def convert_metric_constraint(metric_constraint: str) -> Optional[str]:
210      """Returns the metric_event_groups enum value associated with the JSON string."""
211      if not metric_constraint:
212        return None
213      metric_constraint_to_enum = {
214          'NO_GROUP_EVENTS': '1',
215          'NO_GROUP_EVENTS_NMI': '2',
216          'NO_NMI_WATCHDOG': '2',
217          'NO_GROUP_EVENTS_SMT': '3',
218      }
219      return metric_constraint_to_enum[metric_constraint]
220
221    def lookup_msr(num: str) -> Optional[str]:
222      """Converts the msr number, or first in a list to the appropriate event field."""
223      if not num:
224        return None
225      msrmap = {
226          0x3F6: 'ldlat=',
227          0x1A6: 'offcore_rsp=',
228          0x1A7: 'offcore_rsp=',
229          0x3F7: 'frontend=',
230      }
231      return msrmap[int(num.split(',', 1)[0], 0)]
232
233    def real_event(name: str, event: str) -> Optional[str]:
234      """Convert well known event names to an event string otherwise use the event argument."""
235      fixed = {
236          'inst_retired.any': 'event=0xc0,period=2000003',
237          'inst_retired.any_p': 'event=0xc0,period=2000003',
238          'cpu_clk_unhalted.ref': 'event=0x0,umask=0x03,period=2000003',
239          'cpu_clk_unhalted.thread': 'event=0x3c,period=2000003',
240          'cpu_clk_unhalted.core': 'event=0x3c,period=2000003',
241          'cpu_clk_unhalted.thread_any': 'event=0x3c,any=1,period=2000003',
242      }
243      if not name:
244        return None
245      if name.lower() in fixed:
246        return fixed[name.lower()]
247      return event
248
249    def unit_to_pmu(unit: str) -> Optional[str]:
250      """Convert a JSON Unit to Linux PMU name."""
251      if not unit:
252        return None
253      # Comment brought over from jevents.c:
254      # it's not realistic to keep adding these, we need something more scalable ...
255      table = {
256          'CBO': 'uncore_cbox',
257          'QPI LL': 'uncore_qpi',
258          'SBO': 'uncore_sbox',
259          'iMPH-U': 'uncore_arb',
260          'CPU-M-CF': 'cpum_cf',
261          'CPU-M-SF': 'cpum_sf',
262          'PAI-CRYPTO' : 'pai_crypto',
263          'PAI-EXT' : 'pai_ext',
264          'UPI LL': 'uncore_upi',
265          'hisi_sicl,cpa': 'hisi_sicl,cpa',
266          'hisi_sccl,ddrc': 'hisi_sccl,ddrc',
267          'hisi_sccl,hha': 'hisi_sccl,hha',
268          'hisi_sccl,l3c': 'hisi_sccl,l3c',
269          'imx8_ddr': 'imx8_ddr',
270          'L3PMC': 'amd_l3',
271          'DFPMC': 'amd_df',
272          'cpu_core': 'cpu_core',
273          'cpu_atom': 'cpu_atom',
274      }
275      return table[unit] if unit in table else f'uncore_{unit.lower()}'
276
277    eventcode = 0
278    if 'EventCode' in jd:
279      eventcode = int(jd['EventCode'].split(',', 1)[0], 0)
280    if 'ExtSel' in jd:
281      eventcode |= int(jd['ExtSel']) << 8
282    configcode = int(jd['ConfigCode'], 0) if 'ConfigCode' in jd else None
283    self.name = jd['EventName'].lower() if 'EventName' in jd else None
284    self.topic = ''
285    self.compat = jd.get('Compat')
286    self.desc = fixdesc(jd.get('BriefDescription'))
287    self.long_desc = fixdesc(jd.get('PublicDescription'))
288    precise = jd.get('PEBS')
289    msr = lookup_msr(jd.get('MSRIndex'))
290    msrval = jd.get('MSRValue')
291    extra_desc = ''
292    if 'Data_LA' in jd:
293      extra_desc += '  Supports address when precise'
294      if 'Errata' in jd:
295        extra_desc += '.'
296    if 'Errata' in jd:
297      extra_desc += '  Spec update: ' + jd['Errata']
298    self.pmu = unit_to_pmu(jd.get('Unit'))
299    filter = jd.get('Filter')
300    self.unit = jd.get('ScaleUnit')
301    self.perpkg = jd.get('PerPkg')
302    self.aggr_mode = convert_aggr_mode(jd.get('AggregationMode'))
303    self.deprecated = jd.get('Deprecated')
304    self.metric_name = jd.get('MetricName')
305    self.metric_group = jd.get('MetricGroup')
306    self.event_grouping = convert_metric_constraint(jd.get('MetricConstraint'))
307    self.metric_expr = None
308    if 'MetricExpr' in jd:
309      self.metric_expr = metric.ParsePerfJson(jd['MetricExpr']).Simplify()
310    # Note, the metric formula for the threshold isn't parsed as the &
311    # and > have incorrect precedence.
312    self.metric_threshold = jd.get('MetricThreshold')
313
314    arch_std = jd.get('ArchStdEvent')
315    if precise and self.desc and '(Precise Event)' not in self.desc:
316      extra_desc += ' (Must be precise)' if precise == '2' else (' (Precise '
317                                                                 'event)')
318    event = f'config={llx(configcode)}' if configcode is not None else f'event={llx(eventcode)}'
319    event_fields = [
320        ('AnyThread', 'any='),
321        ('PortMask', 'ch_mask='),
322        ('CounterMask', 'cmask='),
323        ('EdgeDetect', 'edge='),
324        ('FCMask', 'fc_mask='),
325        ('Invert', 'inv='),
326        ('SampleAfterValue', 'period='),
327        ('UMask', 'umask='),
328    ]
329    for key, value in event_fields:
330      if key in jd and jd[key] != '0':
331        event += ',' + value + jd[key]
332    if filter:
333      event += f',{filter}'
334    if msr:
335      event += f',{msr}{msrval}'
336    if self.desc and extra_desc:
337      self.desc += extra_desc
338    if self.long_desc and extra_desc:
339      self.long_desc += extra_desc
340    if self.pmu:
341      if self.desc and not self.desc.endswith('. '):
342        self.desc += '. '
343      self.desc = (self.desc if self.desc else '') + ('Unit: ' + self.pmu + ' ')
344    if arch_std and arch_std.lower() in _arch_std_events:
345      event = _arch_std_events[arch_std.lower()].event
346      # Copy from the architecture standard event to self for undefined fields.
347      for attr, value in _arch_std_events[arch_std.lower()].__dict__.items():
348        if hasattr(self, attr) and not getattr(self, attr):
349          setattr(self, attr, value)
350
351    self.event = real_event(self.name, event)
352
353  def __repr__(self) -> str:
354    """String representation primarily for debugging."""
355    s = '{\n'
356    for attr, value in self.__dict__.items():
357      if value:
358        s += f'\t{attr} = {value},\n'
359    return s + '}'
360
361  def build_c_string(self, metric: bool) -> str:
362    s = ''
363    for attr in _json_metric_attributes if metric else _json_event_attributes:
364      x = getattr(self, attr)
365      if metric and x and attr == 'metric_expr':
366        # Convert parsed metric expressions into a string. Slashes
367        # must be doubled in the file.
368        x = x.ToPerfJson().replace('\\', '\\\\')
369      if metric and x and attr == 'metric_threshold':
370        x = x.replace('\\', '\\\\')
371      if attr in _json_enum_attributes:
372        s += x if x else '0'
373      else:
374        s += f'{x}\\000' if x else '\\000'
375    return s
376
377  def to_c_string(self, metric: bool) -> str:
378    """Representation of the event as a C struct initializer."""
379
380    s = self.build_c_string(metric)
381    return f'{{ { _bcs.offsets[s] } }}, /* {s} */\n'
382
383
384@lru_cache(maxsize=None)
385def read_json_events(path: str, topic: str) -> Sequence[JsonEvent]:
386  """Read json events from the specified file."""
387  try:
388    events = json.load(open(path), object_hook=JsonEvent)
389  except BaseException as err:
390    print(f"Exception processing {path}")
391    raise
392  metrics: list[Tuple[str, metric.Expression]] = []
393  for event in events:
394    event.topic = topic
395    if event.metric_name and '-' not in event.metric_name:
396      metrics.append((event.metric_name, event.metric_expr))
397  updates = metric.RewriteMetricsInTermsOfOthers(metrics)
398  if updates:
399    for event in events:
400      if event.metric_name in updates:
401        # print(f'Updated {event.metric_name} from\n"{event.metric_expr}"\n'
402        #       f'to\n"{updates[event.metric_name]}"')
403        event.metric_expr = updates[event.metric_name]
404
405  return events
406
407def preprocess_arch_std_files(archpath: str) -> None:
408  """Read in all architecture standard events."""
409  global _arch_std_events
410  for item in os.scandir(archpath):
411    if item.is_file() and item.name.endswith('.json'):
412      for event in read_json_events(item.path, topic=''):
413        if event.name:
414          _arch_std_events[event.name.lower()] = event
415        if event.metric_name:
416          _arch_std_events[event.metric_name.lower()] = event
417
418
419def add_events_table_entries(item: os.DirEntry, topic: str) -> None:
420  """Add contents of file to _pending_events table."""
421  for e in read_json_events(item.path, topic):
422    if e.name:
423      _pending_events.append(e)
424    if e.metric_name:
425      _pending_metrics.append(e)
426
427
428def print_pending_events() -> None:
429  """Optionally close events table."""
430
431  def event_cmp_key(j: JsonEvent) -> Tuple[bool, str, str, str, str]:
432    def fix_none(s: Optional[str]) -> str:
433      if s is None:
434        return ''
435      return s
436
437    return (j.desc is not None, fix_none(j.topic), fix_none(j.name), fix_none(j.pmu),
438            fix_none(j.metric_name))
439
440  global _pending_events
441  if not _pending_events:
442    return
443
444  global _pending_events_tblname
445  if _pending_events_tblname.endswith('_sys'):
446    global _sys_event_tables
447    _sys_event_tables.append(_pending_events_tblname)
448  else:
449    global event_tables
450    _event_tables.append(_pending_events_tblname)
451
452  _args.output_file.write(
453      f'static const struct compact_pmu_event {_pending_events_tblname}[] = {{\n')
454
455  for event in sorted(_pending_events, key=event_cmp_key):
456    _args.output_file.write(event.to_c_string(metric=False))
457  _pending_events = []
458
459  _args.output_file.write('};\n\n')
460
461def print_pending_metrics() -> None:
462  """Optionally close metrics table."""
463
464  def metric_cmp_key(j: JsonEvent) -> Tuple[bool, str, str]:
465    def fix_none(s: Optional[str]) -> str:
466      if s is None:
467        return ''
468      return s
469
470    return (j.desc is not None, fix_none(j.pmu), fix_none(j.metric_name))
471
472  global _pending_metrics
473  if not _pending_metrics:
474    return
475
476  global _pending_metrics_tblname
477  if _pending_metrics_tblname.endswith('_sys'):
478    global _sys_metric_tables
479    _sys_metric_tables.append(_pending_metrics_tblname)
480  else:
481    global metric_tables
482    _metric_tables.append(_pending_metrics_tblname)
483
484  _args.output_file.write(
485      f'static const struct compact_pmu_event {_pending_metrics_tblname}[] = {{\n')
486
487  for metric in sorted(_pending_metrics, key=metric_cmp_key):
488    _args.output_file.write(metric.to_c_string(metric=True))
489  _pending_metrics = []
490
491  _args.output_file.write('};\n\n')
492
493def get_topic(topic: str) -> str:
494  if topic.endswith('metrics.json'):
495    return 'metrics'
496  return removesuffix(topic, '.json').replace('-', ' ')
497
498def preprocess_one_file(parents: Sequence[str], item: os.DirEntry) -> None:
499
500  if item.is_dir():
501    return
502
503  # base dir or too deep
504  level = len(parents)
505  if level == 0 or level > 4:
506    return
507
508  # Ignore other directories. If the file name does not have a .json
509  # extension, ignore it. It could be a readme.txt for instance.
510  if not item.is_file() or not item.name.endswith('.json'):
511    return
512
513  topic = get_topic(item.name)
514  for event in read_json_events(item.path, topic):
515    if event.name:
516      _bcs.add(event.build_c_string(metric=False))
517    if event.metric_name:
518      _bcs.add(event.build_c_string(metric=True))
519
520def process_one_file(parents: Sequence[str], item: os.DirEntry) -> None:
521  """Process a JSON file during the main walk."""
522  def is_leaf_dir(path: str) -> bool:
523    for item in os.scandir(path):
524      if item.is_dir():
525        return False
526    return True
527
528  # model directory, reset topic
529  if item.is_dir() and is_leaf_dir(item.path):
530    print_pending_events()
531    print_pending_metrics()
532
533    global _pending_events_tblname
534    _pending_events_tblname = file_name_to_table_name('pmu_events_', parents, item.name)
535    global _pending_metrics_tblname
536    _pending_metrics_tblname = file_name_to_table_name('pmu_metrics_', parents, item.name)
537
538    if item.name == 'sys':
539      _sys_event_table_to_metric_table_mapping[_pending_events_tblname] = _pending_metrics_tblname
540    return
541
542  # base dir or too deep
543  level = len(parents)
544  if level == 0 or level > 4:
545    return
546
547  # Ignore other directories. If the file name does not have a .json
548  # extension, ignore it. It could be a readme.txt for instance.
549  if not item.is_file() or not item.name.endswith('.json'):
550    return
551
552  add_events_table_entries(item, get_topic(item.name))
553
554
555def print_mapping_table(archs: Sequence[str]) -> None:
556  """Read the mapfile and generate the struct from cpuid string to event table."""
557  _args.output_file.write("""
558/* Struct used to make the PMU event table implementation opaque to callers. */
559struct pmu_events_table {
560        const struct compact_pmu_event *entries;
561        size_t length;
562};
563
564/* Struct used to make the PMU metric table implementation opaque to callers. */
565struct pmu_metrics_table {
566        const struct compact_pmu_event *entries;
567        size_t length;
568};
569
570/*
571 * Map a CPU to its table of PMU events. The CPU is identified by the
572 * cpuid field, which is an arch-specific identifier for the CPU.
573 * The identifier specified in tools/perf/pmu-events/arch/xxx/mapfile
574 * must match the get_cpuid_str() in tools/perf/arch/xxx/util/header.c)
575 *
576 * The  cpuid can contain any character other than the comma.
577 */
578struct pmu_events_map {
579        const char *arch;
580        const char *cpuid;
581        struct pmu_events_table event_table;
582        struct pmu_metrics_table metric_table;
583};
584
585/*
586 * Global table mapping each known CPU for the architecture to its
587 * table of PMU events.
588 */
589const struct pmu_events_map pmu_events_map[] = {
590""")
591  for arch in archs:
592    if arch == 'test':
593      _args.output_file.write("""{
594\t.arch = "testarch",
595\t.cpuid = "testcpu",
596\t.event_table = {
597\t\t.entries = pmu_events__test_soc_cpu,
598\t\t.length = ARRAY_SIZE(pmu_events__test_soc_cpu),
599\t},
600\t.metric_table = {
601\t\t.entries = pmu_metrics__test_soc_cpu,
602\t\t.length = ARRAY_SIZE(pmu_metrics__test_soc_cpu),
603\t}
604},
605""")
606    else:
607      with open(f'{_args.starting_dir}/{arch}/mapfile.csv') as csvfile:
608        table = csv.reader(csvfile)
609        first = True
610        for row in table:
611          # Skip the first row or any row beginning with #.
612          if not first and len(row) > 0 and not row[0].startswith('#'):
613            event_tblname = file_name_to_table_name('pmu_events_', [], row[2].replace('/', '_'))
614            if event_tblname in _event_tables:
615              event_size = f'ARRAY_SIZE({event_tblname})'
616            else:
617              event_tblname = 'NULL'
618              event_size = '0'
619            metric_tblname = file_name_to_table_name('pmu_metrics_', [], row[2].replace('/', '_'))
620            if metric_tblname in _metric_tables:
621              metric_size = f'ARRAY_SIZE({metric_tblname})'
622            else:
623              metric_tblname = 'NULL'
624              metric_size = '0'
625            if event_size == '0' and metric_size == '0':
626              continue
627            cpuid = row[0].replace('\\', '\\\\')
628            _args.output_file.write(f"""{{
629\t.arch = "{arch}",
630\t.cpuid = "{cpuid}",
631\t.event_table = {{
632\t\t.entries = {event_tblname},
633\t\t.length = {event_size}
634\t}},
635\t.metric_table = {{
636\t\t.entries = {metric_tblname},
637\t\t.length = {metric_size}
638\t}}
639}},
640""")
641          first = False
642
643  _args.output_file.write("""{
644\t.arch = 0,
645\t.cpuid = 0,
646\t.event_table = { 0, 0 },
647\t.metric_table = { 0, 0 },
648}
649};
650""")
651
652
653def print_system_mapping_table() -> None:
654  """C struct mapping table array for tables from /sys directories."""
655  _args.output_file.write("""
656struct pmu_sys_events {
657\tconst char *name;
658\tstruct pmu_events_table event_table;
659\tstruct pmu_metrics_table metric_table;
660};
661
662static const struct pmu_sys_events pmu_sys_event_tables[] = {
663""")
664  printed_metric_tables = []
665  for tblname in _sys_event_tables:
666    _args.output_file.write(f"""\t{{
667\t\t.event_table = {{
668\t\t\t.entries = {tblname},
669\t\t\t.length = ARRAY_SIZE({tblname})
670\t\t}},""")
671    metric_tblname = _sys_event_table_to_metric_table_mapping[tblname]
672    if metric_tblname in _sys_metric_tables:
673      _args.output_file.write(f"""
674\t\t.metric_table = {{
675\t\t\t.entries = {metric_tblname},
676\t\t\t.length = ARRAY_SIZE({metric_tblname})
677\t\t}},""")
678      printed_metric_tables.append(metric_tblname)
679    _args.output_file.write(f"""
680\t\t.name = \"{tblname}\",
681\t}},
682""")
683  for tblname in _sys_metric_tables:
684    if tblname in printed_metric_tables:
685      continue
686    _args.output_file.write(f"""\t{{
687\t\t.metric_table = {{
688\t\t\t.entries = {tblname},
689\t\t\t.length = ARRAY_SIZE({tblname})
690\t\t}},
691\t\t.name = \"{tblname}\",
692\t}},
693""")
694  _args.output_file.write("""\t{
695\t\t.event_table = { 0, 0 },
696\t\t.metric_table = { 0, 0 },
697\t},
698};
699
700static void decompress_event(int offset, struct pmu_event *pe)
701{
702\tconst char *p = &big_c_string[offset];
703""")
704  for attr in _json_event_attributes:
705    _args.output_file.write(f'\n\tpe->{attr} = ')
706    if attr in _json_enum_attributes:
707      _args.output_file.write("*p - '0';\n")
708    else:
709      _args.output_file.write("(*p == '\\0' ? NULL : p);\n")
710    if attr == _json_event_attributes[-1]:
711      continue
712    if attr in _json_enum_attributes:
713      _args.output_file.write('\tp++;')
714    else:
715      _args.output_file.write('\twhile (*p++);')
716  _args.output_file.write("""}
717
718static void decompress_metric(int offset, struct pmu_metric *pm)
719{
720\tconst char *p = &big_c_string[offset];
721""")
722  for attr in _json_metric_attributes:
723    _args.output_file.write(f'\n\tpm->{attr} = ')
724    if attr in _json_enum_attributes:
725      _args.output_file.write("*p - '0';\n")
726    else:
727      _args.output_file.write("(*p == '\\0' ? NULL : p);\n")
728    if attr == _json_metric_attributes[-1]:
729      continue
730    if attr in _json_enum_attributes:
731      _args.output_file.write('\tp++;')
732    else:
733      _args.output_file.write('\twhile (*p++);')
734  _args.output_file.write("""}
735
736int pmu_events_table_for_each_event(const struct pmu_events_table *table,
737                                    pmu_event_iter_fn fn,
738                                    void *data)
739{
740        for (size_t i = 0; i < table->length; i++) {
741                struct pmu_event pe;
742                int ret;
743
744                decompress_event(table->entries[i].offset, &pe);
745                if (!pe.name)
746                        continue;
747                ret = fn(&pe, table, data);
748                if (ret)
749                        return ret;
750        }
751        return 0;
752}
753
754int pmu_metrics_table_for_each_metric(const struct pmu_metrics_table *table,
755                                     pmu_metric_iter_fn fn,
756                                     void *data)
757{
758        for (size_t i = 0; i < table->length; i++) {
759                struct pmu_metric pm;
760                int ret;
761
762                decompress_metric(table->entries[i].offset, &pm);
763                if (!pm.metric_expr)
764                        continue;
765                ret = fn(&pm, table, data);
766                if (ret)
767                        return ret;
768        }
769        return 0;
770}
771
772const struct pmu_events_table *perf_pmu__find_events_table(struct perf_pmu *pmu)
773{
774        const struct pmu_events_table *table = NULL;
775        char *cpuid = perf_pmu__getcpuid(pmu);
776        int i;
777
778        /* on some platforms which uses cpus map, cpuid can be NULL for
779         * PMUs other than CORE PMUs.
780         */
781        if (!cpuid)
782                return NULL;
783
784        i = 0;
785        for (;;) {
786                const struct pmu_events_map *map = &pmu_events_map[i++];
787                if (!map->arch)
788                        break;
789
790                if (!strcmp_cpuid_str(map->cpuid, cpuid)) {
791                        table = &map->event_table;
792                        break;
793                }
794        }
795        free(cpuid);
796        return table;
797}
798
799const struct pmu_metrics_table *perf_pmu__find_metrics_table(struct perf_pmu *pmu)
800{
801        const struct pmu_metrics_table *table = NULL;
802        char *cpuid = perf_pmu__getcpuid(pmu);
803        int i;
804
805        /* on some platforms which uses cpus map, cpuid can be NULL for
806         * PMUs other than CORE PMUs.
807         */
808        if (!cpuid)
809                return NULL;
810
811        i = 0;
812        for (;;) {
813                const struct pmu_events_map *map = &pmu_events_map[i++];
814                if (!map->arch)
815                        break;
816
817                if (!strcmp_cpuid_str(map->cpuid, cpuid)) {
818                        table = &map->metric_table;
819                        break;
820                }
821        }
822        free(cpuid);
823        return table;
824}
825
826const struct pmu_events_table *find_core_events_table(const char *arch, const char *cpuid)
827{
828        for (const struct pmu_events_map *tables = &pmu_events_map[0];
829             tables->arch;
830             tables++) {
831                if (!strcmp(tables->arch, arch) && !strcmp_cpuid_str(tables->cpuid, cpuid))
832                        return &tables->event_table;
833        }
834        return NULL;
835}
836
837const struct pmu_metrics_table *find_core_metrics_table(const char *arch, const char *cpuid)
838{
839        for (const struct pmu_events_map *tables = &pmu_events_map[0];
840             tables->arch;
841             tables++) {
842                if (!strcmp(tables->arch, arch) && !strcmp_cpuid_str(tables->cpuid, cpuid))
843                        return &tables->metric_table;
844        }
845        return NULL;
846}
847
848int pmu_for_each_core_event(pmu_event_iter_fn fn, void *data)
849{
850        for (const struct pmu_events_map *tables = &pmu_events_map[0];
851             tables->arch;
852             tables++) {
853                int ret = pmu_events_table_for_each_event(&tables->event_table, fn, data);
854
855                if (ret)
856                        return ret;
857        }
858        return 0;
859}
860
861int pmu_for_each_core_metric(pmu_metric_iter_fn fn, void *data)
862{
863        for (const struct pmu_events_map *tables = &pmu_events_map[0];
864             tables->arch;
865             tables++) {
866                int ret = pmu_metrics_table_for_each_metric(&tables->metric_table, fn, data);
867
868                if (ret)
869                        return ret;
870        }
871        return 0;
872}
873
874const struct pmu_events_table *find_sys_events_table(const char *name)
875{
876        for (const struct pmu_sys_events *tables = &pmu_sys_event_tables[0];
877             tables->name;
878             tables++) {
879                if (!strcmp(tables->name, name))
880                        return &tables->event_table;
881        }
882        return NULL;
883}
884
885int pmu_for_each_sys_event(pmu_event_iter_fn fn, void *data)
886{
887        for (const struct pmu_sys_events *tables = &pmu_sys_event_tables[0];
888             tables->name;
889             tables++) {
890                int ret = pmu_events_table_for_each_event(&tables->event_table, fn, data);
891
892                if (ret)
893                        return ret;
894        }
895        return 0;
896}
897
898int pmu_for_each_sys_metric(pmu_metric_iter_fn fn, void *data)
899{
900        for (const struct pmu_sys_events *tables = &pmu_sys_event_tables[0];
901             tables->name;
902             tables++) {
903                int ret = pmu_metrics_table_for_each_metric(&tables->metric_table, fn, data);
904
905                if (ret)
906                        return ret;
907        }
908        return 0;
909}
910""")
911
912
913def main() -> None:
914  global _args
915
916  def dir_path(path: str) -> str:
917    """Validate path is a directory for argparse."""
918    if os.path.isdir(path):
919      return path
920    raise argparse.ArgumentTypeError(f'\'{path}\' is not a valid directory')
921
922  def ftw(path: str, parents: Sequence[str],
923          action: Callable[[Sequence[str], os.DirEntry], None]) -> None:
924    """Replicate the directory/file walking behavior of C's file tree walk."""
925    for item in sorted(os.scandir(path), key=lambda e: e.name):
926      if _args.model != 'all' and item.is_dir():
927        # Check if the model matches one in _args.model.
928        if len(parents) == _args.model.split(',')[0].count('/'):
929          # We're testing the correct directory.
930          item_path = '/'.join(parents) + ('/' if len(parents) > 0 else '') + item.name
931          if 'test' not in item_path and item_path not in _args.model.split(','):
932            continue
933      action(parents, item)
934      if item.is_dir():
935        ftw(item.path, parents + [item.name], action)
936
937  ap = argparse.ArgumentParser()
938  ap.add_argument('arch', help='Architecture name like x86')
939  ap.add_argument('model', help='''Select a model such as skylake to
940reduce the code size.  Normally set to "all". For architectures like
941ARM64 with an implementor/model, the model must include the implementor
942such as "arm/cortex-a34".''',
943                  default='all')
944  ap.add_argument(
945      'starting_dir',
946      type=dir_path,
947      help='Root of tree containing architecture directories containing json files'
948  )
949  ap.add_argument(
950      'output_file', type=argparse.FileType('w', encoding='utf-8'), nargs='?', default=sys.stdout)
951  _args = ap.parse_args()
952
953  _args.output_file.write("""
954#include "pmu-events/pmu-events.h"
955#include "util/header.h"
956#include "util/pmu.h"
957#include <string.h>
958#include <stddef.h>
959
960struct compact_pmu_event {
961  int offset;
962};
963
964""")
965  archs = []
966  for item in os.scandir(_args.starting_dir):
967    if not item.is_dir():
968      continue
969    if item.name == _args.arch or _args.arch == 'all' or item.name == 'test':
970      archs.append(item.name)
971
972  if len(archs) < 2:
973    raise IOError(f'Missing architecture directory \'{_args.arch}\'')
974
975  archs.sort()
976  for arch in archs:
977    arch_path = f'{_args.starting_dir}/{arch}'
978    preprocess_arch_std_files(arch_path)
979    ftw(arch_path, [], preprocess_one_file)
980
981  _bcs.compute()
982  _args.output_file.write('static const char *const big_c_string =\n')
983  for s in _bcs.big_string:
984    _args.output_file.write(s)
985  _args.output_file.write(';\n\n')
986  for arch in archs:
987    arch_path = f'{_args.starting_dir}/{arch}'
988    ftw(arch_path, [], process_one_file)
989    print_pending_events()
990    print_pending_metrics()
991
992  print_mapping_table(archs)
993  print_system_mapping_table()
994
995
996if __name__ == '__main__':
997  main()
998