1#!/usr/bin/env python3 2# SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) 3"""Convert directories of JSON events to C code.""" 4import argparse 5import csv 6from functools import lru_cache 7import json 8import metric 9import os 10import sys 11from typing import (Callable, Dict, Optional, Sequence, Set, Tuple) 12import collections 13 14# Global command line arguments. 15_args = None 16# List of regular event tables. 17_event_tables = [] 18# List of event tables generated from "/sys" directories. 19_sys_event_tables = [] 20# List of regular metric tables. 21_metric_tables = [] 22# List of metric tables generated from "/sys" directories. 23_sys_metric_tables = [] 24# Mapping between sys event table names and sys metric table names. 25_sys_event_table_to_metric_table_mapping = {} 26# Map from an event name to an architecture standard 27# JsonEvent. Architecture standard events are in json files in the top 28# f'{_args.starting_dir}/{_args.arch}' directory. 29_arch_std_events = {} 30# Events to write out when the table is closed 31_pending_events = [] 32# Name of events table to be written out 33_pending_events_tblname = None 34# Metrics to write out when the table is closed 35_pending_metrics = [] 36# Name of metrics table to be written out 37_pending_metrics_tblname = None 38# Global BigCString shared by all structures. 39_bcs = None 40# Map from the name of a metric group to a description of the group. 41_metricgroups = {} 42# Order specific JsonEvent attributes will be visited. 43_json_event_attributes = [ 44 # cmp_sevent related attributes. 45 'name', 'topic', 'desc', 46 # Seems useful, put it early. 47 'event', 48 # Short things in alphabetical order. 49 'compat', 'deprecated', 'perpkg', 'unit', 50 # Longer things (the last won't be iterated over during decompress). 51 'long_desc' 52] 53 54# Attributes that are in pmu_metric rather than pmu_event. 55_json_metric_attributes = [ 56 'metric_name', 'metric_group', 'metric_expr', 'metric_threshold', 57 'desc', 'long_desc', 'unit', 'compat', 'metricgroup_no_group', 58 'default_metricgroup_name', 'aggr_mode', 'event_grouping' 59] 60# Attributes that are bools or enum int values, encoded as '0', '1',... 61_json_enum_attributes = ['aggr_mode', 'deprecated', 'event_grouping', 'perpkg'] 62 63def removesuffix(s: str, suffix: str) -> str: 64 """Remove the suffix from a string 65 66 The removesuffix function is added to str in Python 3.9. We aim for 3.6 67 compatibility and so provide our own function here. 68 """ 69 return s[0:-len(suffix)] if s.endswith(suffix) else s 70 71 72def file_name_to_table_name(prefix: str, parents: Sequence[str], 73 dirname: str) -> str: 74 """Generate a C table name from directory names.""" 75 tblname = prefix 76 for p in parents: 77 tblname += '_' + p 78 tblname += '_' + dirname 79 return tblname.replace('-', '_') 80 81 82def c_len(s: str) -> int: 83 """Return the length of s a C string 84 85 This doesn't handle all escape characters properly. It first assumes 86 all \ are for escaping, it then adjusts as it will have over counted 87 \\. The code uses \000 rather than \0 as a terminator as an adjacent 88 number would be folded into a string of \0 (ie. "\0" + "5" doesn't 89 equal a terminator followed by the number 5 but the escape of 90 \05). The code adjusts for \000 but not properly for all octal, hex 91 or unicode values. 92 """ 93 try: 94 utf = s.encode(encoding='utf-8',errors='strict') 95 except: 96 print(f'broken string {s}') 97 raise 98 return len(utf) - utf.count(b'\\') + utf.count(b'\\\\') - (utf.count(b'\\000') * 2) 99 100class BigCString: 101 """A class to hold many strings concatenated together. 102 103 Generating a large number of stand-alone C strings creates a large 104 number of relocations in position independent code. The BigCString 105 is a helper for this case. It builds a single string which within it 106 are all the other C strings (to avoid memory issues the string 107 itself is held as a list of strings). The offsets within the big 108 string are recorded and when stored to disk these don't need 109 relocation. To reduce the size of the string further, identical 110 strings are merged. If a longer string ends-with the same value as a 111 shorter string, these entries are also merged. 112 """ 113 strings: Set[str] 114 big_string: Sequence[str] 115 offsets: Dict[str, int] 116 insert_number: int 117 insert_point: Dict[str, int] 118 metrics: Set[str] 119 120 def __init__(self): 121 self.strings = set() 122 self.insert_number = 0; 123 self.insert_point = {} 124 self.metrics = set() 125 126 def add(self, s: str, metric: bool) -> None: 127 """Called to add to the big string.""" 128 if s not in self.strings: 129 self.strings.add(s) 130 self.insert_point[s] = self.insert_number 131 self.insert_number += 1 132 if metric: 133 self.metrics.add(s) 134 135 def compute(self) -> None: 136 """Called once all strings are added to compute the string and offsets.""" 137 138 folded_strings = {} 139 # Determine if two strings can be folded, ie. let 1 string use the 140 # end of another. First reverse all strings and sort them. 141 sorted_reversed_strings = sorted([x[::-1] for x in self.strings]) 142 143 # Strings 'xyz' and 'yz' will now be [ 'zy', 'zyx' ]. Scan forward 144 # for each string to see if there is a better candidate to fold it 145 # into, in the example rather than using 'yz' we can use'xyz' at 146 # an offset of 1. We record which string can be folded into which 147 # in folded_strings, we don't need to record the offset as it is 148 # trivially computed from the string lengths. 149 for pos,s in enumerate(sorted_reversed_strings): 150 best_pos = pos 151 for check_pos in range(pos + 1, len(sorted_reversed_strings)): 152 if sorted_reversed_strings[check_pos].startswith(s): 153 best_pos = check_pos 154 else: 155 break 156 if pos != best_pos: 157 folded_strings[s[::-1]] = sorted_reversed_strings[best_pos][::-1] 158 159 # Compute reverse mappings for debugging. 160 fold_into_strings = collections.defaultdict(set) 161 for key, val in folded_strings.items(): 162 if key != val: 163 fold_into_strings[val].add(key) 164 165 # big_string_offset is the current location within the C string 166 # being appended to - comments, etc. don't count. big_string is 167 # the string contents represented as a list. Strings are immutable 168 # in Python and so appending to one causes memory issues, while 169 # lists are mutable. 170 big_string_offset = 0 171 self.big_string = [] 172 self.offsets = {} 173 174 def string_cmp_key(s: str) -> Tuple[bool, int, str]: 175 return (s in self.metrics, self.insert_point[s], s) 176 177 # Emit all strings that aren't folded in a sorted manner. 178 for s in sorted(self.strings, key=string_cmp_key): 179 if s not in folded_strings: 180 self.offsets[s] = big_string_offset 181 self.big_string.append(f'/* offset={big_string_offset} */ "') 182 self.big_string.append(s) 183 self.big_string.append('"') 184 if s in fold_into_strings: 185 self.big_string.append(' /* also: ' + ', '.join(fold_into_strings[s]) + ' */') 186 self.big_string.append('\n') 187 big_string_offset += c_len(s) 188 continue 189 190 # Compute the offsets of the folded strings. 191 for s in folded_strings.keys(): 192 assert s not in self.offsets 193 folded_s = folded_strings[s] 194 self.offsets[s] = self.offsets[folded_s] + c_len(folded_s) - c_len(s) 195 196_bcs = BigCString() 197 198class JsonEvent: 199 """Representation of an event loaded from a json file dictionary.""" 200 201 def __init__(self, jd: dict): 202 """Constructor passed the dictionary of parsed json values.""" 203 204 def llx(x: int) -> str: 205 """Convert an int to a string similar to a printf modifier of %#llx.""" 206 return '0' if x == 0 else hex(x) 207 208 def fixdesc(s: str) -> str: 209 """Fix formatting issue for the desc string.""" 210 if s is None: 211 return None 212 return removesuffix(removesuffix(removesuffix(s, '. '), 213 '. '), '.').replace('\n', '\\n').replace( 214 '\"', '\\"').replace('\r', '\\r') 215 216 def convert_aggr_mode(aggr_mode: str) -> Optional[str]: 217 """Returns the aggr_mode_class enum value associated with the JSON string.""" 218 if not aggr_mode: 219 return None 220 aggr_mode_to_enum = { 221 'PerChip': '1', 222 'PerCore': '2', 223 } 224 return aggr_mode_to_enum[aggr_mode] 225 226 def convert_metric_constraint(metric_constraint: str) -> Optional[str]: 227 """Returns the metric_event_groups enum value associated with the JSON string.""" 228 if not metric_constraint: 229 return None 230 metric_constraint_to_enum = { 231 'NO_GROUP_EVENTS': '1', 232 'NO_GROUP_EVENTS_NMI': '2', 233 'NO_NMI_WATCHDOG': '2', 234 'NO_GROUP_EVENTS_SMT': '3', 235 } 236 return metric_constraint_to_enum[metric_constraint] 237 238 def lookup_msr(num: str) -> Optional[str]: 239 """Converts the msr number, or first in a list to the appropriate event field.""" 240 if not num: 241 return None 242 msrmap = { 243 0x3F6: 'ldlat=', 244 0x1A6: 'offcore_rsp=', 245 0x1A7: 'offcore_rsp=', 246 0x3F7: 'frontend=', 247 } 248 return msrmap[int(num.split(',', 1)[0], 0)] 249 250 def real_event(name: str, event: str) -> Optional[str]: 251 """Convert well known event names to an event string otherwise use the event argument.""" 252 fixed = { 253 'inst_retired.any': 'event=0xc0,period=2000003', 254 'inst_retired.any_p': 'event=0xc0,period=2000003', 255 'cpu_clk_unhalted.ref': 'event=0x0,umask=0x03,period=2000003', 256 'cpu_clk_unhalted.thread': 'event=0x3c,period=2000003', 257 'cpu_clk_unhalted.core': 'event=0x3c,period=2000003', 258 'cpu_clk_unhalted.thread_any': 'event=0x3c,any=1,period=2000003', 259 } 260 if not name: 261 return None 262 if name.lower() in fixed: 263 return fixed[name.lower()] 264 return event 265 266 def unit_to_pmu(unit: str) -> Optional[str]: 267 """Convert a JSON Unit to Linux PMU name.""" 268 if not unit: 269 return 'default_core' 270 # Comment brought over from jevents.c: 271 # it's not realistic to keep adding these, we need something more scalable ... 272 table = { 273 'CBO': 'uncore_cbox', 274 'QPI LL': 'uncore_qpi', 275 'SBO': 'uncore_sbox', 276 'iMPH-U': 'uncore_arb', 277 'CPU-M-CF': 'cpum_cf', 278 'CPU-M-SF': 'cpum_sf', 279 'PAI-CRYPTO' : 'pai_crypto', 280 'PAI-EXT' : 'pai_ext', 281 'UPI LL': 'uncore_upi', 282 'hisi_sicl,cpa': 'hisi_sicl,cpa', 283 'hisi_sccl,ddrc': 'hisi_sccl,ddrc', 284 'hisi_sccl,hha': 'hisi_sccl,hha', 285 'hisi_sccl,l3c': 'hisi_sccl,l3c', 286 'imx8_ddr': 'imx8_ddr', 287 'L3PMC': 'amd_l3', 288 'DFPMC': 'amd_df', 289 'cpu_core': 'cpu_core', 290 'cpu_atom': 'cpu_atom', 291 'ali_drw': 'ali_drw', 292 } 293 return table[unit] if unit in table else f'uncore_{unit.lower()}' 294 295 eventcode = 0 296 if 'EventCode' in jd: 297 eventcode = int(jd['EventCode'].split(',', 1)[0], 0) 298 if 'ExtSel' in jd: 299 eventcode |= int(jd['ExtSel']) << 8 300 configcode = int(jd['ConfigCode'], 0) if 'ConfigCode' in jd else None 301 self.name = jd['EventName'].lower() if 'EventName' in jd else None 302 self.topic = '' 303 self.compat = jd.get('Compat') 304 self.desc = fixdesc(jd.get('BriefDescription')) 305 self.long_desc = fixdesc(jd.get('PublicDescription')) 306 precise = jd.get('PEBS') 307 msr = lookup_msr(jd.get('MSRIndex')) 308 msrval = jd.get('MSRValue') 309 extra_desc = '' 310 if 'Data_LA' in jd: 311 extra_desc += ' Supports address when precise' 312 if 'Errata' in jd: 313 extra_desc += '.' 314 if 'Errata' in jd: 315 extra_desc += ' Spec update: ' + jd['Errata'] 316 self.pmu = unit_to_pmu(jd.get('Unit')) 317 filter = jd.get('Filter') 318 self.unit = jd.get('ScaleUnit') 319 self.perpkg = jd.get('PerPkg') 320 self.aggr_mode = convert_aggr_mode(jd.get('AggregationMode')) 321 self.deprecated = jd.get('Deprecated') 322 self.metric_name = jd.get('MetricName') 323 self.metric_group = jd.get('MetricGroup') 324 self.metricgroup_no_group = jd.get('MetricgroupNoGroup') 325 self.default_metricgroup_name = jd.get('DefaultMetricgroupName') 326 self.event_grouping = convert_metric_constraint(jd.get('MetricConstraint')) 327 self.metric_expr = None 328 if 'MetricExpr' in jd: 329 self.metric_expr = metric.ParsePerfJson(jd['MetricExpr']).Simplify() 330 # Note, the metric formula for the threshold isn't parsed as the & 331 # and > have incorrect precedence. 332 self.metric_threshold = jd.get('MetricThreshold') 333 334 arch_std = jd.get('ArchStdEvent') 335 if precise and self.desc and '(Precise Event)' not in self.desc: 336 extra_desc += ' (Must be precise)' if precise == '2' else (' (Precise ' 337 'event)') 338 event = f'config={llx(configcode)}' if configcode is not None else f'event={llx(eventcode)}' 339 event_fields = [ 340 ('AnyThread', 'any='), 341 ('PortMask', 'ch_mask='), 342 ('CounterMask', 'cmask='), 343 ('EdgeDetect', 'edge='), 344 ('FCMask', 'fc_mask='), 345 ('Invert', 'inv='), 346 ('SampleAfterValue', 'period='), 347 ('UMask', 'umask='), 348 ] 349 for key, value in event_fields: 350 if key in jd and jd[key] != '0': 351 event += ',' + value + jd[key] 352 if filter: 353 event += f',{filter}' 354 if msr: 355 event += f',{msr}{msrval}' 356 if self.desc and extra_desc: 357 self.desc += extra_desc 358 if self.long_desc and extra_desc: 359 self.long_desc += extra_desc 360 if arch_std: 361 if arch_std.lower() in _arch_std_events: 362 event = _arch_std_events[arch_std.lower()].event 363 # Copy from the architecture standard event to self for undefined fields. 364 for attr, value in _arch_std_events[arch_std.lower()].__dict__.items(): 365 if hasattr(self, attr) and not getattr(self, attr): 366 setattr(self, attr, value) 367 else: 368 raise argparse.ArgumentTypeError('Cannot find arch std event:', arch_std) 369 370 self.event = real_event(self.name, event) 371 372 def __repr__(self) -> str: 373 """String representation primarily for debugging.""" 374 s = '{\n' 375 for attr, value in self.__dict__.items(): 376 if value: 377 s += f'\t{attr} = {value},\n' 378 return s + '}' 379 380 def build_c_string(self, metric: bool) -> str: 381 s = '' 382 for attr in _json_metric_attributes if metric else _json_event_attributes: 383 x = getattr(self, attr) 384 if metric and x and attr == 'metric_expr': 385 # Convert parsed metric expressions into a string. Slashes 386 # must be doubled in the file. 387 x = x.ToPerfJson().replace('\\', '\\\\') 388 if metric and x and attr == 'metric_threshold': 389 x = x.replace('\\', '\\\\') 390 if attr in _json_enum_attributes: 391 s += x if x else '0' 392 else: 393 s += f'{x}\\000' if x else '\\000' 394 return s 395 396 def to_c_string(self, metric: bool) -> str: 397 """Representation of the event as a C struct initializer.""" 398 399 s = self.build_c_string(metric) 400 return f'{{ { _bcs.offsets[s] } }}, /* {s} */\n' 401 402 403@lru_cache(maxsize=None) 404def read_json_events(path: str, topic: str) -> Sequence[JsonEvent]: 405 """Read json events from the specified file.""" 406 try: 407 events = json.load(open(path), object_hook=JsonEvent) 408 except BaseException as err: 409 print(f"Exception processing {path}") 410 raise 411 metrics: list[Tuple[str, str, metric.Expression]] = [] 412 for event in events: 413 event.topic = topic 414 if event.metric_name and '-' not in event.metric_name: 415 metrics.append((event.pmu, event.metric_name, event.metric_expr)) 416 updates = metric.RewriteMetricsInTermsOfOthers(metrics) 417 if updates: 418 for event in events: 419 if event.metric_name in updates: 420 # print(f'Updated {event.metric_name} from\n"{event.metric_expr}"\n' 421 # f'to\n"{updates[event.metric_name]}"') 422 event.metric_expr = updates[event.metric_name] 423 424 return events 425 426def preprocess_arch_std_files(archpath: str) -> None: 427 """Read in all architecture standard events.""" 428 global _arch_std_events 429 for item in os.scandir(archpath): 430 if item.is_file() and item.name.endswith('.json'): 431 for event in read_json_events(item.path, topic=''): 432 if event.name: 433 _arch_std_events[event.name.lower()] = event 434 if event.metric_name: 435 _arch_std_events[event.metric_name.lower()] = event 436 437 438def add_events_table_entries(item: os.DirEntry, topic: str) -> None: 439 """Add contents of file to _pending_events table.""" 440 for e in read_json_events(item.path, topic): 441 if e.name: 442 _pending_events.append(e) 443 if e.metric_name: 444 _pending_metrics.append(e) 445 446 447def print_pending_events() -> None: 448 """Optionally close events table.""" 449 450 def event_cmp_key(j: JsonEvent) -> Tuple[str, str, bool, str, str]: 451 def fix_none(s: Optional[str]) -> str: 452 if s is None: 453 return '' 454 return s 455 456 return (fix_none(j.pmu).replace(',','_'), fix_none(j.name), j.desc is not None, fix_none(j.topic), 457 fix_none(j.metric_name)) 458 459 global _pending_events 460 if not _pending_events: 461 return 462 463 global _pending_events_tblname 464 if _pending_events_tblname.endswith('_sys'): 465 global _sys_event_tables 466 _sys_event_tables.append(_pending_events_tblname) 467 else: 468 global event_tables 469 _event_tables.append(_pending_events_tblname) 470 471 first = True 472 last_pmu = None 473 pmus = set() 474 for event in sorted(_pending_events, key=event_cmp_key): 475 if event.pmu != last_pmu: 476 if not first: 477 _args.output_file.write('};\n') 478 pmu_name = event.pmu.replace(',', '_') 479 _args.output_file.write( 480 f'static const struct compact_pmu_event {_pending_events_tblname}_{pmu_name}[] = {{\n') 481 first = False 482 last_pmu = event.pmu 483 pmus.add((event.pmu, pmu_name)) 484 485 _args.output_file.write(event.to_c_string(metric=False)) 486 _pending_events = [] 487 488 _args.output_file.write(f""" 489}}; 490 491const struct pmu_table_entry {_pending_events_tblname}[] = {{ 492""") 493 for (pmu, tbl_pmu) in sorted(pmus): 494 pmu_name = f"{pmu}\\000" 495 _args.output_file.write(f"""{{ 496 .entries = {_pending_events_tblname}_{tbl_pmu}, 497 .num_entries = ARRAY_SIZE({_pending_events_tblname}_{tbl_pmu}), 498 .pmu_name = {{ {_bcs.offsets[pmu_name]} /* {pmu_name} */ }}, 499}}, 500""") 501 _args.output_file.write('};\n\n') 502 503def print_pending_metrics() -> None: 504 """Optionally close metrics table.""" 505 506 def metric_cmp_key(j: JsonEvent) -> Tuple[bool, str, str]: 507 def fix_none(s: Optional[str]) -> str: 508 if s is None: 509 return '' 510 return s 511 512 return (j.desc is not None, fix_none(j.pmu), fix_none(j.metric_name)) 513 514 global _pending_metrics 515 if not _pending_metrics: 516 return 517 518 global _pending_metrics_tblname 519 if _pending_metrics_tblname.endswith('_sys'): 520 global _sys_metric_tables 521 _sys_metric_tables.append(_pending_metrics_tblname) 522 else: 523 global metric_tables 524 _metric_tables.append(_pending_metrics_tblname) 525 526 first = True 527 last_pmu = None 528 pmus = set() 529 for metric in sorted(_pending_metrics, key=metric_cmp_key): 530 if metric.pmu != last_pmu: 531 if not first: 532 _args.output_file.write('};\n') 533 pmu_name = metric.pmu.replace(',', '_') 534 _args.output_file.write( 535 f'static const struct compact_pmu_event {_pending_metrics_tblname}_{pmu_name}[] = {{\n') 536 first = False 537 last_pmu = metric.pmu 538 pmus.add((metric.pmu, pmu_name)) 539 540 _args.output_file.write(metric.to_c_string(metric=True)) 541 _pending_metrics = [] 542 543 _args.output_file.write(f""" 544}}; 545 546const struct pmu_table_entry {_pending_metrics_tblname}[] = {{ 547""") 548 for (pmu, tbl_pmu) in sorted(pmus): 549 pmu_name = f"{pmu}\\000" 550 _args.output_file.write(f"""{{ 551 .entries = {_pending_metrics_tblname}_{tbl_pmu}, 552 .num_entries = ARRAY_SIZE({_pending_metrics_tblname}_{tbl_pmu}), 553 .pmu_name = {{ {_bcs.offsets[pmu_name]} /* {pmu_name} */ }}, 554}}, 555""") 556 _args.output_file.write('};\n\n') 557 558def get_topic(topic: str) -> str: 559 if topic.endswith('metrics.json'): 560 return 'metrics' 561 return removesuffix(topic, '.json').replace('-', ' ') 562 563def preprocess_one_file(parents: Sequence[str], item: os.DirEntry) -> None: 564 565 if item.is_dir(): 566 return 567 568 # base dir or too deep 569 level = len(parents) 570 if level == 0 or level > 4: 571 return 572 573 # Ignore other directories. If the file name does not have a .json 574 # extension, ignore it. It could be a readme.txt for instance. 575 if not item.is_file() or not item.name.endswith('.json'): 576 return 577 578 if item.name == 'metricgroups.json': 579 metricgroup_descriptions = json.load(open(item.path)) 580 for mgroup in metricgroup_descriptions: 581 assert len(mgroup) > 1, parents 582 description = f"{metricgroup_descriptions[mgroup]}\\000" 583 mgroup = f"{mgroup}\\000" 584 _bcs.add(mgroup, metric=True) 585 _bcs.add(description, metric=True) 586 _metricgroups[mgroup] = description 587 return 588 589 topic = get_topic(item.name) 590 for event in read_json_events(item.path, topic): 591 pmu_name = f"{event.pmu}\\000" 592 if event.name: 593 _bcs.add(pmu_name, metric=False) 594 _bcs.add(event.build_c_string(metric=False), metric=False) 595 if event.metric_name: 596 _bcs.add(pmu_name, metric=True) 597 _bcs.add(event.build_c_string(metric=True), metric=True) 598 599def process_one_file(parents: Sequence[str], item: os.DirEntry) -> None: 600 """Process a JSON file during the main walk.""" 601 def is_leaf_dir(path: str) -> bool: 602 for item in os.scandir(path): 603 if item.is_dir(): 604 return False 605 return True 606 607 # model directory, reset topic 608 if item.is_dir() and is_leaf_dir(item.path): 609 print_pending_events() 610 print_pending_metrics() 611 612 global _pending_events_tblname 613 _pending_events_tblname = file_name_to_table_name('pmu_events_', parents, item.name) 614 global _pending_metrics_tblname 615 _pending_metrics_tblname = file_name_to_table_name('pmu_metrics_', parents, item.name) 616 617 if item.name == 'sys': 618 _sys_event_table_to_metric_table_mapping[_pending_events_tblname] = _pending_metrics_tblname 619 return 620 621 # base dir or too deep 622 level = len(parents) 623 if level == 0 or level > 4: 624 return 625 626 # Ignore other directories. If the file name does not have a .json 627 # extension, ignore it. It could be a readme.txt for instance. 628 if not item.is_file() or not item.name.endswith('.json') or item.name == 'metricgroups.json': 629 return 630 631 add_events_table_entries(item, get_topic(item.name)) 632 633 634def print_mapping_table(archs: Sequence[str]) -> None: 635 """Read the mapfile and generate the struct from cpuid string to event table.""" 636 _args.output_file.write(""" 637/* Struct used to make the PMU event table implementation opaque to callers. */ 638struct pmu_events_table { 639 const struct pmu_table_entry *pmus; 640 uint32_t num_pmus; 641}; 642 643/* Struct used to make the PMU metric table implementation opaque to callers. */ 644struct pmu_metrics_table { 645 const struct pmu_table_entry *pmus; 646 uint32_t num_pmus; 647}; 648 649/* 650 * Map a CPU to its table of PMU events. The CPU is identified by the 651 * cpuid field, which is an arch-specific identifier for the CPU. 652 * The identifier specified in tools/perf/pmu-events/arch/xxx/mapfile 653 * must match the get_cpuid_str() in tools/perf/arch/xxx/util/header.c) 654 * 655 * The cpuid can contain any character other than the comma. 656 */ 657struct pmu_events_map { 658 const char *arch; 659 const char *cpuid; 660 struct pmu_events_table event_table; 661 struct pmu_metrics_table metric_table; 662}; 663 664/* 665 * Global table mapping each known CPU for the architecture to its 666 * table of PMU events. 667 */ 668const struct pmu_events_map pmu_events_map[] = { 669""") 670 for arch in archs: 671 if arch == 'test': 672 _args.output_file.write("""{ 673\t.arch = "testarch", 674\t.cpuid = "testcpu", 675\t.event_table = { 676\t\t.pmus = pmu_events__test_soc_cpu, 677\t\t.num_pmus = ARRAY_SIZE(pmu_events__test_soc_cpu), 678\t}, 679\t.metric_table = { 680\t\t.pmus = pmu_metrics__test_soc_cpu, 681\t\t.num_pmus = ARRAY_SIZE(pmu_metrics__test_soc_cpu), 682\t} 683}, 684""") 685 else: 686 with open(f'{_args.starting_dir}/{arch}/mapfile.csv') as csvfile: 687 table = csv.reader(csvfile) 688 first = True 689 for row in table: 690 # Skip the first row or any row beginning with #. 691 if not first and len(row) > 0 and not row[0].startswith('#'): 692 event_tblname = file_name_to_table_name('pmu_events_', [], row[2].replace('/', '_')) 693 if event_tblname in _event_tables: 694 event_size = f'ARRAY_SIZE({event_tblname})' 695 else: 696 event_tblname = 'NULL' 697 event_size = '0' 698 metric_tblname = file_name_to_table_name('pmu_metrics_', [], row[2].replace('/', '_')) 699 if metric_tblname in _metric_tables: 700 metric_size = f'ARRAY_SIZE({metric_tblname})' 701 else: 702 metric_tblname = 'NULL' 703 metric_size = '0' 704 if event_size == '0' and metric_size == '0': 705 continue 706 cpuid = row[0].replace('\\', '\\\\') 707 _args.output_file.write(f"""{{ 708\t.arch = "{arch}", 709\t.cpuid = "{cpuid}", 710\t.event_table = {{ 711\t\t.pmus = {event_tblname}, 712\t\t.num_pmus = {event_size} 713\t}}, 714\t.metric_table = {{ 715\t\t.pmus = {metric_tblname}, 716\t\t.num_pmus = {metric_size} 717\t}} 718}}, 719""") 720 first = False 721 722 _args.output_file.write("""{ 723\t.arch = 0, 724\t.cpuid = 0, 725\t.event_table = { 0, 0 }, 726\t.metric_table = { 0, 0 }, 727} 728}; 729""") 730 731 732def print_system_mapping_table() -> None: 733 """C struct mapping table array for tables from /sys directories.""" 734 _args.output_file.write(""" 735struct pmu_sys_events { 736\tconst char *name; 737\tstruct pmu_events_table event_table; 738\tstruct pmu_metrics_table metric_table; 739}; 740 741static const struct pmu_sys_events pmu_sys_event_tables[] = { 742""") 743 printed_metric_tables = [] 744 for tblname in _sys_event_tables: 745 _args.output_file.write(f"""\t{{ 746\t\t.event_table = {{ 747\t\t\t.pmus = {tblname}, 748\t\t\t.num_pmus = ARRAY_SIZE({tblname}) 749\t\t}},""") 750 metric_tblname = _sys_event_table_to_metric_table_mapping[tblname] 751 if metric_tblname in _sys_metric_tables: 752 _args.output_file.write(f""" 753\t\t.metric_table = {{ 754\t\t\t.pmus = {metric_tblname}, 755\t\t\t.num_pmus = ARRAY_SIZE({metric_tblname}) 756\t\t}},""") 757 printed_metric_tables.append(metric_tblname) 758 _args.output_file.write(f""" 759\t\t.name = \"{tblname}\", 760\t}}, 761""") 762 for tblname in _sys_metric_tables: 763 if tblname in printed_metric_tables: 764 continue 765 _args.output_file.write(f"""\t{{ 766\t\t.metric_table = {{ 767\t\t\t.entries = {tblname}, 768\t\t\t.length = ARRAY_SIZE({tblname}) 769\t\t}}, 770\t\t.name = \"{tblname}\", 771\t}}, 772""") 773 _args.output_file.write("""\t{ 774\t\t.event_table = { 0, 0 }, 775\t\t.metric_table = { 0, 0 }, 776\t}, 777}; 778 779static void decompress_event(int offset, struct pmu_event *pe) 780{ 781\tconst char *p = &big_c_string[offset]; 782""") 783 for attr in _json_event_attributes: 784 _args.output_file.write(f'\n\tpe->{attr} = ') 785 if attr in _json_enum_attributes: 786 _args.output_file.write("*p - '0';\n") 787 else: 788 _args.output_file.write("(*p == '\\0' ? NULL : p);\n") 789 if attr == _json_event_attributes[-1]: 790 continue 791 if attr in _json_enum_attributes: 792 _args.output_file.write('\tp++;') 793 else: 794 _args.output_file.write('\twhile (*p++);') 795 _args.output_file.write("""} 796 797static void decompress_metric(int offset, struct pmu_metric *pm) 798{ 799\tconst char *p = &big_c_string[offset]; 800""") 801 for attr in _json_metric_attributes: 802 _args.output_file.write(f'\n\tpm->{attr} = ') 803 if attr in _json_enum_attributes: 804 _args.output_file.write("*p - '0';\n") 805 else: 806 _args.output_file.write("(*p == '\\0' ? NULL : p);\n") 807 if attr == _json_metric_attributes[-1]: 808 continue 809 if attr in _json_enum_attributes: 810 _args.output_file.write('\tp++;') 811 else: 812 _args.output_file.write('\twhile (*p++);') 813 _args.output_file.write("""} 814 815static int pmu_events_table__for_each_event_pmu(const struct pmu_events_table *table, 816 const struct pmu_table_entry *pmu, 817 pmu_event_iter_fn fn, 818 void *data) 819{ 820 int ret; 821 struct pmu_event pe = { 822 .pmu = &big_c_string[pmu->pmu_name.offset], 823 }; 824 825 for (uint32_t i = 0; i < pmu->num_entries; i++) { 826 decompress_event(pmu->entries[i].offset, &pe); 827 if (!pe.name) 828 continue; 829 ret = fn(&pe, table, data); 830 if (ret) 831 return ret; 832 } 833 return 0; 834 } 835 836static int pmu_events_table__find_event_pmu(const struct pmu_events_table *table, 837 const struct pmu_table_entry *pmu, 838 const char *name, 839 pmu_event_iter_fn fn, 840 void *data) 841{ 842 struct pmu_event pe = { 843 .pmu = &big_c_string[pmu->pmu_name.offset], 844 }; 845 int low = 0, high = pmu->num_entries - 1; 846 847 while (low <= high) { 848 int cmp, mid = (low + high) / 2; 849 850 decompress_event(pmu->entries[mid].offset, &pe); 851 852 if (!pe.name && !name) 853 goto do_call; 854 855 if (!pe.name && name) { 856 low = mid + 1; 857 continue; 858 } 859 if (pe.name && !name) { 860 high = mid - 1; 861 continue; 862 } 863 864 cmp = strcasecmp(pe.name, name); 865 if (cmp < 0) { 866 low = mid + 1; 867 continue; 868 } 869 if (cmp > 0) { 870 high = mid - 1; 871 continue; 872 } 873 do_call: 874 return fn ? fn(&pe, table, data) : 0; 875 } 876 return -1000; 877} 878 879int pmu_events_table__for_each_event(const struct pmu_events_table *table, 880 struct perf_pmu *pmu, 881 pmu_event_iter_fn fn, 882 void *data) 883{ 884 for (size_t i = 0; i < table->num_pmus; i++) { 885 const struct pmu_table_entry *table_pmu = &table->pmus[i]; 886 const char *pmu_name = &big_c_string[table_pmu->pmu_name.offset]; 887 int ret; 888 889 if (pmu && !pmu__name_match(pmu, pmu_name)) 890 continue; 891 892 ret = pmu_events_table__for_each_event_pmu(table, table_pmu, fn, data); 893 if (pmu || ret) 894 return ret; 895 } 896 return 0; 897} 898 899int pmu_events_table__find_event(const struct pmu_events_table *table, 900 struct perf_pmu *pmu, 901 const char *name, 902 pmu_event_iter_fn fn, 903 void *data) 904{ 905 for (size_t i = 0; i < table->num_pmus; i++) { 906 const struct pmu_table_entry *table_pmu = &table->pmus[i]; 907 const char *pmu_name = &big_c_string[table_pmu->pmu_name.offset]; 908 int ret; 909 910 if (!pmu__name_match(pmu, pmu_name)) 911 continue; 912 913 ret = pmu_events_table__find_event_pmu(table, table_pmu, name, fn, data); 914 if (ret != -1000) 915 return ret; 916 } 917 return -1000; 918} 919 920size_t pmu_events_table__num_events(const struct pmu_events_table *table, 921 struct perf_pmu *pmu) 922{ 923 size_t count = 0; 924 925 for (size_t i = 0; i < table->num_pmus; i++) { 926 const struct pmu_table_entry *table_pmu = &table->pmus[i]; 927 const char *pmu_name = &big_c_string[table_pmu->pmu_name.offset]; 928 929 if (pmu__name_match(pmu, pmu_name)) 930 count += table_pmu->num_entries; 931 } 932 return count; 933} 934 935static int pmu_metrics_table__for_each_metric_pmu(const struct pmu_metrics_table *table, 936 const struct pmu_table_entry *pmu, 937 pmu_metric_iter_fn fn, 938 void *data) 939{ 940 int ret; 941 struct pmu_metric pm = { 942 .pmu = &big_c_string[pmu->pmu_name.offset], 943 }; 944 945 for (uint32_t i = 0; i < pmu->num_entries; i++) { 946 decompress_metric(pmu->entries[i].offset, &pm); 947 if (!pm.metric_expr) 948 continue; 949 ret = fn(&pm, table, data); 950 if (ret) 951 return ret; 952 } 953 return 0; 954} 955 956int pmu_metrics_table__for_each_metric(const struct pmu_metrics_table *table, 957 pmu_metric_iter_fn fn, 958 void *data) 959{ 960 for (size_t i = 0; i < table->num_pmus; i++) { 961 int ret = pmu_metrics_table__for_each_metric_pmu(table, &table->pmus[i], 962 fn, data); 963 964 if (ret) 965 return ret; 966 } 967 return 0; 968} 969 970const struct pmu_events_table *perf_pmu__find_events_table(struct perf_pmu *pmu) 971{ 972 const struct pmu_events_table *table = NULL; 973 char *cpuid = perf_pmu__getcpuid(pmu); 974 size_t i; 975 976 /* on some platforms which uses cpus map, cpuid can be NULL for 977 * PMUs other than CORE PMUs. 978 */ 979 if (!cpuid) 980 return NULL; 981 982 i = 0; 983 for (;;) { 984 const struct pmu_events_map *map = &pmu_events_map[i++]; 985 if (!map->arch) 986 break; 987 988 if (!strcmp_cpuid_str(map->cpuid, cpuid)) { 989 table = &map->event_table; 990 break; 991 } 992 } 993 free(cpuid); 994 if (!pmu || !table) 995 return table; 996 997 for (i = 0; i < table->num_pmus; i++) { 998 const struct pmu_table_entry *table_pmu = &table->pmus[i]; 999 const char *pmu_name = &big_c_string[table_pmu->pmu_name.offset]; 1000 1001 if (pmu__name_match(pmu, pmu_name)) 1002 return table; 1003 } 1004 return NULL; 1005} 1006 1007const struct pmu_metrics_table *perf_pmu__find_metrics_table(struct perf_pmu *pmu) 1008{ 1009 const struct pmu_metrics_table *table = NULL; 1010 char *cpuid = perf_pmu__getcpuid(pmu); 1011 int i; 1012 1013 /* on some platforms which uses cpus map, cpuid can be NULL for 1014 * PMUs other than CORE PMUs. 1015 */ 1016 if (!cpuid) 1017 return NULL; 1018 1019 i = 0; 1020 for (;;) { 1021 const struct pmu_events_map *map = &pmu_events_map[i++]; 1022 if (!map->arch) 1023 break; 1024 1025 if (!strcmp_cpuid_str(map->cpuid, cpuid)) { 1026 table = &map->metric_table; 1027 break; 1028 } 1029 } 1030 free(cpuid); 1031 return table; 1032} 1033 1034const struct pmu_events_table *find_core_events_table(const char *arch, const char *cpuid) 1035{ 1036 for (const struct pmu_events_map *tables = &pmu_events_map[0]; 1037 tables->arch; 1038 tables++) { 1039 if (!strcmp(tables->arch, arch) && !strcmp_cpuid_str(tables->cpuid, cpuid)) 1040 return &tables->event_table; 1041 } 1042 return NULL; 1043} 1044 1045const struct pmu_metrics_table *find_core_metrics_table(const char *arch, const char *cpuid) 1046{ 1047 for (const struct pmu_events_map *tables = &pmu_events_map[0]; 1048 tables->arch; 1049 tables++) { 1050 if (!strcmp(tables->arch, arch) && !strcmp_cpuid_str(tables->cpuid, cpuid)) 1051 return &tables->metric_table; 1052 } 1053 return NULL; 1054} 1055 1056int pmu_for_each_core_event(pmu_event_iter_fn fn, void *data) 1057{ 1058 for (const struct pmu_events_map *tables = &pmu_events_map[0]; 1059 tables->arch; 1060 tables++) { 1061 int ret = pmu_events_table__for_each_event(&tables->event_table, 1062 /*pmu=*/ NULL, fn, data); 1063 1064 if (ret) 1065 return ret; 1066 } 1067 return 0; 1068} 1069 1070int pmu_for_each_core_metric(pmu_metric_iter_fn fn, void *data) 1071{ 1072 for (const struct pmu_events_map *tables = &pmu_events_map[0]; 1073 tables->arch; 1074 tables++) { 1075 int ret = pmu_metrics_table__for_each_metric(&tables->metric_table, fn, data); 1076 1077 if (ret) 1078 return ret; 1079 } 1080 return 0; 1081} 1082 1083const struct pmu_events_table *find_sys_events_table(const char *name) 1084{ 1085 for (const struct pmu_sys_events *tables = &pmu_sys_event_tables[0]; 1086 tables->name; 1087 tables++) { 1088 if (!strcmp(tables->name, name)) 1089 return &tables->event_table; 1090 } 1091 return NULL; 1092} 1093 1094int pmu_for_each_sys_event(pmu_event_iter_fn fn, void *data) 1095{ 1096 for (const struct pmu_sys_events *tables = &pmu_sys_event_tables[0]; 1097 tables->name; 1098 tables++) { 1099 int ret = pmu_events_table__for_each_event(&tables->event_table, 1100 /*pmu=*/ NULL, fn, data); 1101 1102 if (ret) 1103 return ret; 1104 } 1105 return 0; 1106} 1107 1108int pmu_for_each_sys_metric(pmu_metric_iter_fn fn, void *data) 1109{ 1110 for (const struct pmu_sys_events *tables = &pmu_sys_event_tables[0]; 1111 tables->name; 1112 tables++) { 1113 int ret = pmu_metrics_table__for_each_metric(&tables->metric_table, fn, data); 1114 1115 if (ret) 1116 return ret; 1117 } 1118 return 0; 1119} 1120""") 1121 1122def print_metricgroups() -> None: 1123 _args.output_file.write(""" 1124static const int metricgroups[][2] = { 1125""") 1126 for mgroup in sorted(_metricgroups): 1127 description = _metricgroups[mgroup] 1128 _args.output_file.write( 1129 f'\t{{ {_bcs.offsets[mgroup]}, {_bcs.offsets[description]} }}, /* {mgroup} => {description} */\n' 1130 ) 1131 _args.output_file.write(""" 1132}; 1133 1134const char *describe_metricgroup(const char *group) 1135{ 1136 int low = 0, high = (int)ARRAY_SIZE(metricgroups) - 1; 1137 1138 while (low <= high) { 1139 int mid = (low + high) / 2; 1140 const char *mgroup = &big_c_string[metricgroups[mid][0]]; 1141 int cmp = strcmp(mgroup, group); 1142 1143 if (cmp == 0) { 1144 return &big_c_string[metricgroups[mid][1]]; 1145 } else if (cmp < 0) { 1146 low = mid + 1; 1147 } else { 1148 high = mid - 1; 1149 } 1150 } 1151 return NULL; 1152} 1153""") 1154 1155def main() -> None: 1156 global _args 1157 1158 def dir_path(path: str) -> str: 1159 """Validate path is a directory for argparse.""" 1160 if os.path.isdir(path): 1161 return path 1162 raise argparse.ArgumentTypeError(f'\'{path}\' is not a valid directory') 1163 1164 def ftw(path: str, parents: Sequence[str], 1165 action: Callable[[Sequence[str], os.DirEntry], None]) -> None: 1166 """Replicate the directory/file walking behavior of C's file tree walk.""" 1167 for item in sorted(os.scandir(path), key=lambda e: e.name): 1168 if _args.model != 'all' and item.is_dir(): 1169 # Check if the model matches one in _args.model. 1170 if len(parents) == _args.model.split(',')[0].count('/'): 1171 # We're testing the correct directory. 1172 item_path = '/'.join(parents) + ('/' if len(parents) > 0 else '') + item.name 1173 if 'test' not in item_path and item_path not in _args.model.split(','): 1174 continue 1175 action(parents, item) 1176 if item.is_dir(): 1177 ftw(item.path, parents + [item.name], action) 1178 1179 ap = argparse.ArgumentParser() 1180 ap.add_argument('arch', help='Architecture name like x86') 1181 ap.add_argument('model', help='''Select a model such as skylake to 1182reduce the code size. Normally set to "all". For architectures like 1183ARM64 with an implementor/model, the model must include the implementor 1184such as "arm/cortex-a34".''', 1185 default='all') 1186 ap.add_argument( 1187 'starting_dir', 1188 type=dir_path, 1189 help='Root of tree containing architecture directories containing json files' 1190 ) 1191 ap.add_argument( 1192 'output_file', type=argparse.FileType('w', encoding='utf-8'), nargs='?', default=sys.stdout) 1193 _args = ap.parse_args() 1194 1195 _args.output_file.write(""" 1196#include <pmu-events/pmu-events.h> 1197#include "util/header.h" 1198#include "util/pmu.h" 1199#include <string.h> 1200#include <stddef.h> 1201 1202struct compact_pmu_event { 1203 int offset; 1204}; 1205 1206struct pmu_table_entry { 1207 const struct compact_pmu_event *entries; 1208 uint32_t num_entries; 1209 struct compact_pmu_event pmu_name; 1210}; 1211 1212""") 1213 archs = [] 1214 for item in os.scandir(_args.starting_dir): 1215 if not item.is_dir(): 1216 continue 1217 if item.name == _args.arch or _args.arch == 'all' or item.name == 'test': 1218 archs.append(item.name) 1219 1220 if len(archs) < 2: 1221 raise IOError(f'Missing architecture directory \'{_args.arch}\'') 1222 1223 archs.sort() 1224 for arch in archs: 1225 arch_path = f'{_args.starting_dir}/{arch}' 1226 preprocess_arch_std_files(arch_path) 1227 ftw(arch_path, [], preprocess_one_file) 1228 1229 _bcs.compute() 1230 _args.output_file.write('static const char *const big_c_string =\n') 1231 for s in _bcs.big_string: 1232 _args.output_file.write(s) 1233 _args.output_file.write(';\n\n') 1234 for arch in archs: 1235 arch_path = f'{_args.starting_dir}/{arch}' 1236 ftw(arch_path, [], process_one_file) 1237 print_pending_events() 1238 print_pending_metrics() 1239 1240 print_mapping_table(archs) 1241 print_system_mapping_table() 1242 print_metricgroups() 1243 1244if __name__ == '__main__': 1245 main() 1246