xref: /openbmc/linux/tools/perf/pmu-events/jevents.py (revision 1b8012b2)
1#!/usr/bin/env python3
2# SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
3"""Convert directories of JSON events to C code."""
4import argparse
5import csv
6from functools import lru_cache
7import json
8import metric
9import os
10import sys
11from typing import (Callable, Dict, Optional, Sequence, Set, Tuple)
12import collections
13
14# Global command line arguments.
15_args = None
16# List of regular event tables.
17_event_tables = []
18# List of event tables generated from "/sys" directories.
19_sys_event_tables = []
20# List of regular metric tables.
21_metric_tables = []
22# List of metric tables generated from "/sys" directories.
23_sys_metric_tables = []
24# Mapping between sys event table names and sys metric table names.
25_sys_event_table_to_metric_table_mapping = {}
26# Map from an event name to an architecture standard
27# JsonEvent. Architecture standard events are in json files in the top
28# f'{_args.starting_dir}/{_args.arch}' directory.
29_arch_std_events = {}
30# Events to write out when the table is closed
31_pending_events = []
32# Name of events table to be written out
33_pending_events_tblname = None
34# Metrics to write out when the table is closed
35_pending_metrics = []
36# Name of metrics table to be written out
37_pending_metrics_tblname = None
38# Global BigCString shared by all structures.
39_bcs = None
40# Order specific JsonEvent attributes will be visited.
41_json_event_attributes = [
42    # cmp_sevent related attributes.
43    'name', 'pmu', 'topic', 'desc',
44    # Seems useful, put it early.
45    'event',
46    # Short things in alphabetical order.
47    'compat', 'deprecated', 'perpkg', 'unit',
48    # Longer things (the last won't be iterated over during decompress).
49    'long_desc'
50]
51
52# Attributes that are in pmu_metric rather than pmu_event.
53_json_metric_attributes = [
54    'pmu', 'metric_name', 'metric_group', 'metric_expr', 'metric_threshold',
55    'desc', 'long_desc', 'unit', 'compat', 'metricgroup_no_group', 'aggr_mode',
56    'event_grouping'
57]
58# Attributes that are bools or enum int values, encoded as '0', '1',...
59_json_enum_attributes = ['aggr_mode', 'deprecated', 'event_grouping', 'perpkg']
60
61def removesuffix(s: str, suffix: str) -> str:
62  """Remove the suffix from a string
63
64  The removesuffix function is added to str in Python 3.9. We aim for 3.6
65  compatibility and so provide our own function here.
66  """
67  return s[0:-len(suffix)] if s.endswith(suffix) else s
68
69
70def file_name_to_table_name(prefix: str, parents: Sequence[str],
71                            dirname: str) -> str:
72  """Generate a C table name from directory names."""
73  tblname = prefix
74  for p in parents:
75    tblname += '_' + p
76  tblname += '_' + dirname
77  return tblname.replace('-', '_')
78
79
80def c_len(s: str) -> int:
81  """Return the length of s a C string
82
83  This doesn't handle all escape characters properly. It first assumes
84  all \ are for escaping, it then adjusts as it will have over counted
85  \\. The code uses \000 rather than \0 as a terminator as an adjacent
86  number would be folded into a string of \0 (ie. "\0" + "5" doesn't
87  equal a terminator followed by the number 5 but the escape of
88  \05). The code adjusts for \000 but not properly for all octal, hex
89  or unicode values.
90  """
91  try:
92    utf = s.encode(encoding='utf-8',errors='strict')
93  except:
94    print(f'broken string {s}')
95    raise
96  return len(utf) - utf.count(b'\\') + utf.count(b'\\\\') - (utf.count(b'\\000') * 2)
97
98class BigCString:
99  """A class to hold many strings concatenated together.
100
101  Generating a large number of stand-alone C strings creates a large
102  number of relocations in position independent code. The BigCString
103  is a helper for this case. It builds a single string which within it
104  are all the other C strings (to avoid memory issues the string
105  itself is held as a list of strings). The offsets within the big
106  string are recorded and when stored to disk these don't need
107  relocation. To reduce the size of the string further, identical
108  strings are merged. If a longer string ends-with the same value as a
109  shorter string, these entries are also merged.
110  """
111  strings: Set[str]
112  big_string: Sequence[str]
113  offsets: Dict[str, int]
114
115  def __init__(self):
116    self.strings = set()
117
118  def add(self, s: str) -> None:
119    """Called to add to the big string."""
120    self.strings.add(s)
121
122  def compute(self) -> None:
123    """Called once all strings are added to compute the string and offsets."""
124
125    folded_strings = {}
126    # Determine if two strings can be folded, ie. let 1 string use the
127    # end of another. First reverse all strings and sort them.
128    sorted_reversed_strings = sorted([x[::-1] for x in self.strings])
129
130    # Strings 'xyz' and 'yz' will now be [ 'zy', 'zyx' ]. Scan forward
131    # for each string to see if there is a better candidate to fold it
132    # into, in the example rather than using 'yz' we can use'xyz' at
133    # an offset of 1. We record which string can be folded into which
134    # in folded_strings, we don't need to record the offset as it is
135    # trivially computed from the string lengths.
136    for pos,s in enumerate(sorted_reversed_strings):
137      best_pos = pos
138      for check_pos in range(pos + 1, len(sorted_reversed_strings)):
139        if sorted_reversed_strings[check_pos].startswith(s):
140          best_pos = check_pos
141        else:
142          break
143      if pos != best_pos:
144        folded_strings[s[::-1]] = sorted_reversed_strings[best_pos][::-1]
145
146    # Compute reverse mappings for debugging.
147    fold_into_strings = collections.defaultdict(set)
148    for key, val in folded_strings.items():
149      if key != val:
150        fold_into_strings[val].add(key)
151
152    # big_string_offset is the current location within the C string
153    # being appended to - comments, etc. don't count. big_string is
154    # the string contents represented as a list. Strings are immutable
155    # in Python and so appending to one causes memory issues, while
156    # lists are mutable.
157    big_string_offset = 0
158    self.big_string = []
159    self.offsets = {}
160
161    # Emit all strings that aren't folded in a sorted manner.
162    for s in sorted(self.strings):
163      if s not in folded_strings:
164        self.offsets[s] = big_string_offset
165        self.big_string.append(f'/* offset={big_string_offset} */ "')
166        self.big_string.append(s)
167        self.big_string.append('"')
168        if s in fold_into_strings:
169          self.big_string.append(' /* also: ' + ', '.join(fold_into_strings[s]) + ' */')
170        self.big_string.append('\n')
171        big_string_offset += c_len(s)
172        continue
173
174    # Compute the offsets of the folded strings.
175    for s in folded_strings.keys():
176      assert s not in self.offsets
177      folded_s = folded_strings[s]
178      self.offsets[s] = self.offsets[folded_s] + c_len(folded_s) - c_len(s)
179
180_bcs = BigCString()
181
182class JsonEvent:
183  """Representation of an event loaded from a json file dictionary."""
184
185  def __init__(self, jd: dict):
186    """Constructor passed the dictionary of parsed json values."""
187
188    def llx(x: int) -> str:
189      """Convert an int to a string similar to a printf modifier of %#llx."""
190      return '0' if x == 0 else hex(x)
191
192    def fixdesc(s: str) -> str:
193      """Fix formatting issue for the desc string."""
194      if s is None:
195        return None
196      return removesuffix(removesuffix(removesuffix(s, '.  '),
197                                       '. '), '.').replace('\n', '\\n').replace(
198                                           '\"', '\\"').replace('\r', '\\r')
199
200    def convert_aggr_mode(aggr_mode: str) -> Optional[str]:
201      """Returns the aggr_mode_class enum value associated with the JSON string."""
202      if not aggr_mode:
203        return None
204      aggr_mode_to_enum = {
205          'PerChip': '1',
206          'PerCore': '2',
207      }
208      return aggr_mode_to_enum[aggr_mode]
209
210    def convert_metric_constraint(metric_constraint: str) -> Optional[str]:
211      """Returns the metric_event_groups enum value associated with the JSON string."""
212      if not metric_constraint:
213        return None
214      metric_constraint_to_enum = {
215          'NO_GROUP_EVENTS': '1',
216          'NO_GROUP_EVENTS_NMI': '2',
217          'NO_NMI_WATCHDOG': '2',
218          'NO_GROUP_EVENTS_SMT': '3',
219      }
220      return metric_constraint_to_enum[metric_constraint]
221
222    def lookup_msr(num: str) -> Optional[str]:
223      """Converts the msr number, or first in a list to the appropriate event field."""
224      if not num:
225        return None
226      msrmap = {
227          0x3F6: 'ldlat=',
228          0x1A6: 'offcore_rsp=',
229          0x1A7: 'offcore_rsp=',
230          0x3F7: 'frontend=',
231      }
232      return msrmap[int(num.split(',', 1)[0], 0)]
233
234    def real_event(name: str, event: str) -> Optional[str]:
235      """Convert well known event names to an event string otherwise use the event argument."""
236      fixed = {
237          'inst_retired.any': 'event=0xc0,period=2000003',
238          'inst_retired.any_p': 'event=0xc0,period=2000003',
239          'cpu_clk_unhalted.ref': 'event=0x0,umask=0x03,period=2000003',
240          'cpu_clk_unhalted.thread': 'event=0x3c,period=2000003',
241          'cpu_clk_unhalted.core': 'event=0x3c,period=2000003',
242          'cpu_clk_unhalted.thread_any': 'event=0x3c,any=1,period=2000003',
243      }
244      if not name:
245        return None
246      if name.lower() in fixed:
247        return fixed[name.lower()]
248      return event
249
250    def unit_to_pmu(unit: str) -> Optional[str]:
251      """Convert a JSON Unit to Linux PMU name."""
252      if not unit:
253        return None
254      # Comment brought over from jevents.c:
255      # it's not realistic to keep adding these, we need something more scalable ...
256      table = {
257          'CBO': 'uncore_cbox',
258          'QPI LL': 'uncore_qpi',
259          'SBO': 'uncore_sbox',
260          'iMPH-U': 'uncore_arb',
261          'CPU-M-CF': 'cpum_cf',
262          'CPU-M-SF': 'cpum_sf',
263          'PAI-CRYPTO' : 'pai_crypto',
264          'PAI-EXT' : 'pai_ext',
265          'UPI LL': 'uncore_upi',
266          'hisi_sicl,cpa': 'hisi_sicl,cpa',
267          'hisi_sccl,ddrc': 'hisi_sccl,ddrc',
268          'hisi_sccl,hha': 'hisi_sccl,hha',
269          'hisi_sccl,l3c': 'hisi_sccl,l3c',
270          'imx8_ddr': 'imx8_ddr',
271          'L3PMC': 'amd_l3',
272          'DFPMC': 'amd_df',
273          'cpu_core': 'cpu_core',
274          'cpu_atom': 'cpu_atom',
275      }
276      return table[unit] if unit in table else f'uncore_{unit.lower()}'
277
278    eventcode = 0
279    if 'EventCode' in jd:
280      eventcode = int(jd['EventCode'].split(',', 1)[0], 0)
281    if 'ExtSel' in jd:
282      eventcode |= int(jd['ExtSel']) << 8
283    configcode = int(jd['ConfigCode'], 0) if 'ConfigCode' in jd else None
284    self.name = jd['EventName'].lower() if 'EventName' in jd else None
285    self.topic = ''
286    self.compat = jd.get('Compat')
287    self.desc = fixdesc(jd.get('BriefDescription'))
288    self.long_desc = fixdesc(jd.get('PublicDescription'))
289    precise = jd.get('PEBS')
290    msr = lookup_msr(jd.get('MSRIndex'))
291    msrval = jd.get('MSRValue')
292    extra_desc = ''
293    if 'Data_LA' in jd:
294      extra_desc += '  Supports address when precise'
295      if 'Errata' in jd:
296        extra_desc += '.'
297    if 'Errata' in jd:
298      extra_desc += '  Spec update: ' + jd['Errata']
299    self.pmu = unit_to_pmu(jd.get('Unit'))
300    filter = jd.get('Filter')
301    self.unit = jd.get('ScaleUnit')
302    self.perpkg = jd.get('PerPkg')
303    self.aggr_mode = convert_aggr_mode(jd.get('AggregationMode'))
304    self.deprecated = jd.get('Deprecated')
305    self.metric_name = jd.get('MetricName')
306    self.metric_group = jd.get('MetricGroup')
307    self.metricgroup_no_group = jd.get('MetricgroupNoGroup')
308    self.event_grouping = convert_metric_constraint(jd.get('MetricConstraint'))
309    self.metric_expr = None
310    if 'MetricExpr' in jd:
311      self.metric_expr = metric.ParsePerfJson(jd['MetricExpr']).Simplify()
312    # Note, the metric formula for the threshold isn't parsed as the &
313    # and > have incorrect precedence.
314    self.metric_threshold = jd.get('MetricThreshold')
315
316    arch_std = jd.get('ArchStdEvent')
317    if precise and self.desc and '(Precise Event)' not in self.desc:
318      extra_desc += ' (Must be precise)' if precise == '2' else (' (Precise '
319                                                                 'event)')
320    event = f'config={llx(configcode)}' if configcode is not None else f'event={llx(eventcode)}'
321    event_fields = [
322        ('AnyThread', 'any='),
323        ('PortMask', 'ch_mask='),
324        ('CounterMask', 'cmask='),
325        ('EdgeDetect', 'edge='),
326        ('FCMask', 'fc_mask='),
327        ('Invert', 'inv='),
328        ('SampleAfterValue', 'period='),
329        ('UMask', 'umask='),
330    ]
331    for key, value in event_fields:
332      if key in jd and jd[key] != '0':
333        event += ',' + value + jd[key]
334    if filter:
335      event += f',{filter}'
336    if msr:
337      event += f',{msr}{msrval}'
338    if self.desc and extra_desc:
339      self.desc += extra_desc
340    if self.long_desc and extra_desc:
341      self.long_desc += extra_desc
342    if self.pmu:
343      if self.desc and not self.desc.endswith('. '):
344        self.desc += '. '
345      self.desc = (self.desc if self.desc else '') + ('Unit: ' + self.pmu + ' ')
346    if arch_std and arch_std.lower() in _arch_std_events:
347      event = _arch_std_events[arch_std.lower()].event
348      # Copy from the architecture standard event to self for undefined fields.
349      for attr, value in _arch_std_events[arch_std.lower()].__dict__.items():
350        if hasattr(self, attr) and not getattr(self, attr):
351          setattr(self, attr, value)
352
353    self.event = real_event(self.name, event)
354
355  def __repr__(self) -> str:
356    """String representation primarily for debugging."""
357    s = '{\n'
358    for attr, value in self.__dict__.items():
359      if value:
360        s += f'\t{attr} = {value},\n'
361    return s + '}'
362
363  def build_c_string(self, metric: bool) -> str:
364    s = ''
365    for attr in _json_metric_attributes if metric else _json_event_attributes:
366      x = getattr(self, attr)
367      if metric and x and attr == 'metric_expr':
368        # Convert parsed metric expressions into a string. Slashes
369        # must be doubled in the file.
370        x = x.ToPerfJson().replace('\\', '\\\\')
371      if metric and x and attr == 'metric_threshold':
372        x = x.replace('\\', '\\\\')
373      if attr in _json_enum_attributes:
374        s += x if x else '0'
375      else:
376        s += f'{x}\\000' if x else '\\000'
377    return s
378
379  def to_c_string(self, metric: bool) -> str:
380    """Representation of the event as a C struct initializer."""
381
382    s = self.build_c_string(metric)
383    return f'{{ { _bcs.offsets[s] } }}, /* {s} */\n'
384
385
386@lru_cache(maxsize=None)
387def read_json_events(path: str, topic: str) -> Sequence[JsonEvent]:
388  """Read json events from the specified file."""
389  try:
390    events = json.load(open(path), object_hook=JsonEvent)
391  except BaseException as err:
392    print(f"Exception processing {path}")
393    raise
394  metrics: list[Tuple[str, metric.Expression]] = []
395  for event in events:
396    event.topic = topic
397    if event.metric_name and '-' not in event.metric_name:
398      metrics.append((event.metric_name, event.metric_expr))
399  updates = metric.RewriteMetricsInTermsOfOthers(metrics)
400  if updates:
401    for event in events:
402      if event.metric_name in updates:
403        # print(f'Updated {event.metric_name} from\n"{event.metric_expr}"\n'
404        #       f'to\n"{updates[event.metric_name]}"')
405        event.metric_expr = updates[event.metric_name]
406
407  return events
408
409def preprocess_arch_std_files(archpath: str) -> None:
410  """Read in all architecture standard events."""
411  global _arch_std_events
412  for item in os.scandir(archpath):
413    if item.is_file() and item.name.endswith('.json'):
414      for event in read_json_events(item.path, topic=''):
415        if event.name:
416          _arch_std_events[event.name.lower()] = event
417        if event.metric_name:
418          _arch_std_events[event.metric_name.lower()] = event
419
420
421def add_events_table_entries(item: os.DirEntry, topic: str) -> None:
422  """Add contents of file to _pending_events table."""
423  for e in read_json_events(item.path, topic):
424    if e.name:
425      _pending_events.append(e)
426    if e.metric_name:
427      _pending_metrics.append(e)
428
429
430def print_pending_events() -> None:
431  """Optionally close events table."""
432
433  def event_cmp_key(j: JsonEvent) -> Tuple[bool, str, str, str, str]:
434    def fix_none(s: Optional[str]) -> str:
435      if s is None:
436        return ''
437      return s
438
439    return (j.desc is not None, fix_none(j.topic), fix_none(j.name), fix_none(j.pmu),
440            fix_none(j.metric_name))
441
442  global _pending_events
443  if not _pending_events:
444    return
445
446  global _pending_events_tblname
447  if _pending_events_tblname.endswith('_sys'):
448    global _sys_event_tables
449    _sys_event_tables.append(_pending_events_tblname)
450  else:
451    global event_tables
452    _event_tables.append(_pending_events_tblname)
453
454  _args.output_file.write(
455      f'static const struct compact_pmu_event {_pending_events_tblname}[] = {{\n')
456
457  for event in sorted(_pending_events, key=event_cmp_key):
458    _args.output_file.write(event.to_c_string(metric=False))
459  _pending_events = []
460
461  _args.output_file.write('};\n\n')
462
463def print_pending_metrics() -> None:
464  """Optionally close metrics table."""
465
466  def metric_cmp_key(j: JsonEvent) -> Tuple[bool, str, str]:
467    def fix_none(s: Optional[str]) -> str:
468      if s is None:
469        return ''
470      return s
471
472    return (j.desc is not None, fix_none(j.pmu), fix_none(j.metric_name))
473
474  global _pending_metrics
475  if not _pending_metrics:
476    return
477
478  global _pending_metrics_tblname
479  if _pending_metrics_tblname.endswith('_sys'):
480    global _sys_metric_tables
481    _sys_metric_tables.append(_pending_metrics_tblname)
482  else:
483    global metric_tables
484    _metric_tables.append(_pending_metrics_tblname)
485
486  _args.output_file.write(
487      f'static const struct compact_pmu_event {_pending_metrics_tblname}[] = {{\n')
488
489  for metric in sorted(_pending_metrics, key=metric_cmp_key):
490    _args.output_file.write(metric.to_c_string(metric=True))
491  _pending_metrics = []
492
493  _args.output_file.write('};\n\n')
494
495def get_topic(topic: str) -> str:
496  if topic.endswith('metrics.json'):
497    return 'metrics'
498  return removesuffix(topic, '.json').replace('-', ' ')
499
500def preprocess_one_file(parents: Sequence[str], item: os.DirEntry) -> None:
501
502  if item.is_dir():
503    return
504
505  # base dir or too deep
506  level = len(parents)
507  if level == 0 or level > 4:
508    return
509
510  # Ignore other directories. If the file name does not have a .json
511  # extension, ignore it. It could be a readme.txt for instance.
512  if not item.is_file() or not item.name.endswith('.json'):
513    return
514
515  topic = get_topic(item.name)
516  for event in read_json_events(item.path, topic):
517    if event.name:
518      _bcs.add(event.build_c_string(metric=False))
519    if event.metric_name:
520      _bcs.add(event.build_c_string(metric=True))
521
522def process_one_file(parents: Sequence[str], item: os.DirEntry) -> None:
523  """Process a JSON file during the main walk."""
524  def is_leaf_dir(path: str) -> bool:
525    for item in os.scandir(path):
526      if item.is_dir():
527        return False
528    return True
529
530  # model directory, reset topic
531  if item.is_dir() and is_leaf_dir(item.path):
532    print_pending_events()
533    print_pending_metrics()
534
535    global _pending_events_tblname
536    _pending_events_tblname = file_name_to_table_name('pmu_events_', parents, item.name)
537    global _pending_metrics_tblname
538    _pending_metrics_tblname = file_name_to_table_name('pmu_metrics_', parents, item.name)
539
540    if item.name == 'sys':
541      _sys_event_table_to_metric_table_mapping[_pending_events_tblname] = _pending_metrics_tblname
542    return
543
544  # base dir or too deep
545  level = len(parents)
546  if level == 0 or level > 4:
547    return
548
549  # Ignore other directories. If the file name does not have a .json
550  # extension, ignore it. It could be a readme.txt for instance.
551  if not item.is_file() or not item.name.endswith('.json'):
552    return
553
554  add_events_table_entries(item, get_topic(item.name))
555
556
557def print_mapping_table(archs: Sequence[str]) -> None:
558  """Read the mapfile and generate the struct from cpuid string to event table."""
559  _args.output_file.write("""
560/* Struct used to make the PMU event table implementation opaque to callers. */
561struct pmu_events_table {
562        const struct compact_pmu_event *entries;
563        size_t length;
564};
565
566/* Struct used to make the PMU metric table implementation opaque to callers. */
567struct pmu_metrics_table {
568        const struct compact_pmu_event *entries;
569        size_t length;
570};
571
572/*
573 * Map a CPU to its table of PMU events. The CPU is identified by the
574 * cpuid field, which is an arch-specific identifier for the CPU.
575 * The identifier specified in tools/perf/pmu-events/arch/xxx/mapfile
576 * must match the get_cpuid_str() in tools/perf/arch/xxx/util/header.c)
577 *
578 * The  cpuid can contain any character other than the comma.
579 */
580struct pmu_events_map {
581        const char *arch;
582        const char *cpuid;
583        struct pmu_events_table event_table;
584        struct pmu_metrics_table metric_table;
585};
586
587/*
588 * Global table mapping each known CPU for the architecture to its
589 * table of PMU events.
590 */
591const struct pmu_events_map pmu_events_map[] = {
592""")
593  for arch in archs:
594    if arch == 'test':
595      _args.output_file.write("""{
596\t.arch = "testarch",
597\t.cpuid = "testcpu",
598\t.event_table = {
599\t\t.entries = pmu_events__test_soc_cpu,
600\t\t.length = ARRAY_SIZE(pmu_events__test_soc_cpu),
601\t},
602\t.metric_table = {
603\t\t.entries = pmu_metrics__test_soc_cpu,
604\t\t.length = ARRAY_SIZE(pmu_metrics__test_soc_cpu),
605\t}
606},
607""")
608    else:
609      with open(f'{_args.starting_dir}/{arch}/mapfile.csv') as csvfile:
610        table = csv.reader(csvfile)
611        first = True
612        for row in table:
613          # Skip the first row or any row beginning with #.
614          if not first and len(row) > 0 and not row[0].startswith('#'):
615            event_tblname = file_name_to_table_name('pmu_events_', [], row[2].replace('/', '_'))
616            if event_tblname in _event_tables:
617              event_size = f'ARRAY_SIZE({event_tblname})'
618            else:
619              event_tblname = 'NULL'
620              event_size = '0'
621            metric_tblname = file_name_to_table_name('pmu_metrics_', [], row[2].replace('/', '_'))
622            if metric_tblname in _metric_tables:
623              metric_size = f'ARRAY_SIZE({metric_tblname})'
624            else:
625              metric_tblname = 'NULL'
626              metric_size = '0'
627            if event_size == '0' and metric_size == '0':
628              continue
629            cpuid = row[0].replace('\\', '\\\\')
630            _args.output_file.write(f"""{{
631\t.arch = "{arch}",
632\t.cpuid = "{cpuid}",
633\t.event_table = {{
634\t\t.entries = {event_tblname},
635\t\t.length = {event_size}
636\t}},
637\t.metric_table = {{
638\t\t.entries = {metric_tblname},
639\t\t.length = {metric_size}
640\t}}
641}},
642""")
643          first = False
644
645  _args.output_file.write("""{
646\t.arch = 0,
647\t.cpuid = 0,
648\t.event_table = { 0, 0 },
649\t.metric_table = { 0, 0 },
650}
651};
652""")
653
654
655def print_system_mapping_table() -> None:
656  """C struct mapping table array for tables from /sys directories."""
657  _args.output_file.write("""
658struct pmu_sys_events {
659\tconst char *name;
660\tstruct pmu_events_table event_table;
661\tstruct pmu_metrics_table metric_table;
662};
663
664static const struct pmu_sys_events pmu_sys_event_tables[] = {
665""")
666  printed_metric_tables = []
667  for tblname in _sys_event_tables:
668    _args.output_file.write(f"""\t{{
669\t\t.event_table = {{
670\t\t\t.entries = {tblname},
671\t\t\t.length = ARRAY_SIZE({tblname})
672\t\t}},""")
673    metric_tblname = _sys_event_table_to_metric_table_mapping[tblname]
674    if metric_tblname in _sys_metric_tables:
675      _args.output_file.write(f"""
676\t\t.metric_table = {{
677\t\t\t.entries = {metric_tblname},
678\t\t\t.length = ARRAY_SIZE({metric_tblname})
679\t\t}},""")
680      printed_metric_tables.append(metric_tblname)
681    _args.output_file.write(f"""
682\t\t.name = \"{tblname}\",
683\t}},
684""")
685  for tblname in _sys_metric_tables:
686    if tblname in printed_metric_tables:
687      continue
688    _args.output_file.write(f"""\t{{
689\t\t.metric_table = {{
690\t\t\t.entries = {tblname},
691\t\t\t.length = ARRAY_SIZE({tblname})
692\t\t}},
693\t\t.name = \"{tblname}\",
694\t}},
695""")
696  _args.output_file.write("""\t{
697\t\t.event_table = { 0, 0 },
698\t\t.metric_table = { 0, 0 },
699\t},
700};
701
702static void decompress_event(int offset, struct pmu_event *pe)
703{
704\tconst char *p = &big_c_string[offset];
705""")
706  for attr in _json_event_attributes:
707    _args.output_file.write(f'\n\tpe->{attr} = ')
708    if attr in _json_enum_attributes:
709      _args.output_file.write("*p - '0';\n")
710    else:
711      _args.output_file.write("(*p == '\\0' ? NULL : p);\n")
712    if attr == _json_event_attributes[-1]:
713      continue
714    if attr in _json_enum_attributes:
715      _args.output_file.write('\tp++;')
716    else:
717      _args.output_file.write('\twhile (*p++);')
718  _args.output_file.write("""}
719
720static void decompress_metric(int offset, struct pmu_metric *pm)
721{
722\tconst char *p = &big_c_string[offset];
723""")
724  for attr in _json_metric_attributes:
725    _args.output_file.write(f'\n\tpm->{attr} = ')
726    if attr in _json_enum_attributes:
727      _args.output_file.write("*p - '0';\n")
728    else:
729      _args.output_file.write("(*p == '\\0' ? NULL : p);\n")
730    if attr == _json_metric_attributes[-1]:
731      continue
732    if attr in _json_enum_attributes:
733      _args.output_file.write('\tp++;')
734    else:
735      _args.output_file.write('\twhile (*p++);')
736  _args.output_file.write("""}
737
738int pmu_events_table_for_each_event(const struct pmu_events_table *table,
739                                    pmu_event_iter_fn fn,
740                                    void *data)
741{
742        for (size_t i = 0; i < table->length; i++) {
743                struct pmu_event pe;
744                int ret;
745
746                decompress_event(table->entries[i].offset, &pe);
747                if (!pe.name)
748                        continue;
749                ret = fn(&pe, table, data);
750                if (ret)
751                        return ret;
752        }
753        return 0;
754}
755
756int pmu_metrics_table_for_each_metric(const struct pmu_metrics_table *table,
757                                     pmu_metric_iter_fn fn,
758                                     void *data)
759{
760        for (size_t i = 0; i < table->length; i++) {
761                struct pmu_metric pm;
762                int ret;
763
764                decompress_metric(table->entries[i].offset, &pm);
765                if (!pm.metric_expr)
766                        continue;
767                ret = fn(&pm, table, data);
768                if (ret)
769                        return ret;
770        }
771        return 0;
772}
773
774const struct pmu_events_table *perf_pmu__find_events_table(struct perf_pmu *pmu)
775{
776        const struct pmu_events_table *table = NULL;
777        char *cpuid = perf_pmu__getcpuid(pmu);
778        int i;
779
780        /* on some platforms which uses cpus map, cpuid can be NULL for
781         * PMUs other than CORE PMUs.
782         */
783        if (!cpuid)
784                return NULL;
785
786        i = 0;
787        for (;;) {
788                const struct pmu_events_map *map = &pmu_events_map[i++];
789                if (!map->arch)
790                        break;
791
792                if (!strcmp_cpuid_str(map->cpuid, cpuid)) {
793                        table = &map->event_table;
794                        break;
795                }
796        }
797        free(cpuid);
798        return table;
799}
800
801const struct pmu_metrics_table *perf_pmu__find_metrics_table(struct perf_pmu *pmu)
802{
803        const struct pmu_metrics_table *table = NULL;
804        char *cpuid = perf_pmu__getcpuid(pmu);
805        int i;
806
807        /* on some platforms which uses cpus map, cpuid can be NULL for
808         * PMUs other than CORE PMUs.
809         */
810        if (!cpuid)
811                return NULL;
812
813        i = 0;
814        for (;;) {
815                const struct pmu_events_map *map = &pmu_events_map[i++];
816                if (!map->arch)
817                        break;
818
819                if (!strcmp_cpuid_str(map->cpuid, cpuid)) {
820                        table = &map->metric_table;
821                        break;
822                }
823        }
824        free(cpuid);
825        return table;
826}
827
828const struct pmu_events_table *find_core_events_table(const char *arch, const char *cpuid)
829{
830        for (const struct pmu_events_map *tables = &pmu_events_map[0];
831             tables->arch;
832             tables++) {
833                if (!strcmp(tables->arch, arch) && !strcmp_cpuid_str(tables->cpuid, cpuid))
834                        return &tables->event_table;
835        }
836        return NULL;
837}
838
839const struct pmu_metrics_table *find_core_metrics_table(const char *arch, const char *cpuid)
840{
841        for (const struct pmu_events_map *tables = &pmu_events_map[0];
842             tables->arch;
843             tables++) {
844                if (!strcmp(tables->arch, arch) && !strcmp_cpuid_str(tables->cpuid, cpuid))
845                        return &tables->metric_table;
846        }
847        return NULL;
848}
849
850int pmu_for_each_core_event(pmu_event_iter_fn fn, void *data)
851{
852        for (const struct pmu_events_map *tables = &pmu_events_map[0];
853             tables->arch;
854             tables++) {
855                int ret = pmu_events_table_for_each_event(&tables->event_table, fn, data);
856
857                if (ret)
858                        return ret;
859        }
860        return 0;
861}
862
863int pmu_for_each_core_metric(pmu_metric_iter_fn fn, void *data)
864{
865        for (const struct pmu_events_map *tables = &pmu_events_map[0];
866             tables->arch;
867             tables++) {
868                int ret = pmu_metrics_table_for_each_metric(&tables->metric_table, fn, data);
869
870                if (ret)
871                        return ret;
872        }
873        return 0;
874}
875
876const struct pmu_events_table *find_sys_events_table(const char *name)
877{
878        for (const struct pmu_sys_events *tables = &pmu_sys_event_tables[0];
879             tables->name;
880             tables++) {
881                if (!strcmp(tables->name, name))
882                        return &tables->event_table;
883        }
884        return NULL;
885}
886
887int pmu_for_each_sys_event(pmu_event_iter_fn fn, void *data)
888{
889        for (const struct pmu_sys_events *tables = &pmu_sys_event_tables[0];
890             tables->name;
891             tables++) {
892                int ret = pmu_events_table_for_each_event(&tables->event_table, fn, data);
893
894                if (ret)
895                        return ret;
896        }
897        return 0;
898}
899
900int pmu_for_each_sys_metric(pmu_metric_iter_fn fn, void *data)
901{
902        for (const struct pmu_sys_events *tables = &pmu_sys_event_tables[0];
903             tables->name;
904             tables++) {
905                int ret = pmu_metrics_table_for_each_metric(&tables->metric_table, fn, data);
906
907                if (ret)
908                        return ret;
909        }
910        return 0;
911}
912""")
913
914
915def main() -> None:
916  global _args
917
918  def dir_path(path: str) -> str:
919    """Validate path is a directory for argparse."""
920    if os.path.isdir(path):
921      return path
922    raise argparse.ArgumentTypeError(f'\'{path}\' is not a valid directory')
923
924  def ftw(path: str, parents: Sequence[str],
925          action: Callable[[Sequence[str], os.DirEntry], None]) -> None:
926    """Replicate the directory/file walking behavior of C's file tree walk."""
927    for item in sorted(os.scandir(path), key=lambda e: e.name):
928      if _args.model != 'all' and item.is_dir():
929        # Check if the model matches one in _args.model.
930        if len(parents) == _args.model.split(',')[0].count('/'):
931          # We're testing the correct directory.
932          item_path = '/'.join(parents) + ('/' if len(parents) > 0 else '') + item.name
933          if 'test' not in item_path and item_path not in _args.model.split(','):
934            continue
935      action(parents, item)
936      if item.is_dir():
937        ftw(item.path, parents + [item.name], action)
938
939  ap = argparse.ArgumentParser()
940  ap.add_argument('arch', help='Architecture name like x86')
941  ap.add_argument('model', help='''Select a model such as skylake to
942reduce the code size.  Normally set to "all". For architectures like
943ARM64 with an implementor/model, the model must include the implementor
944such as "arm/cortex-a34".''',
945                  default='all')
946  ap.add_argument(
947      'starting_dir',
948      type=dir_path,
949      help='Root of tree containing architecture directories containing json files'
950  )
951  ap.add_argument(
952      'output_file', type=argparse.FileType('w', encoding='utf-8'), nargs='?', default=sys.stdout)
953  _args = ap.parse_args()
954
955  _args.output_file.write("""
956#include "pmu-events/pmu-events.h"
957#include "util/header.h"
958#include "util/pmu.h"
959#include <string.h>
960#include <stddef.h>
961
962struct compact_pmu_event {
963  int offset;
964};
965
966""")
967  archs = []
968  for item in os.scandir(_args.starting_dir):
969    if not item.is_dir():
970      continue
971    if item.name == _args.arch or _args.arch == 'all' or item.name == 'test':
972      archs.append(item.name)
973
974  if len(archs) < 2:
975    raise IOError(f'Missing architecture directory \'{_args.arch}\'')
976
977  archs.sort()
978  for arch in archs:
979    arch_path = f'{_args.starting_dir}/{arch}'
980    preprocess_arch_std_files(arch_path)
981    ftw(arch_path, [], preprocess_one_file)
982
983  _bcs.compute()
984  _args.output_file.write('static const char *const big_c_string =\n')
985  for s in _bcs.big_string:
986    _args.output_file.write(s)
987  _args.output_file.write(';\n\n')
988  for arch in archs:
989    arch_path = f'{_args.starting_dir}/{arch}'
990    ftw(arch_path, [], process_one_file)
991    print_pending_events()
992    print_pending_metrics()
993
994  print_mapping_table(archs)
995  print_system_mapping_table()
996
997
998if __name__ == '__main__':
999  main()
1000