Loading...
Note: File does not exist in v5.9.
1#!/usr/bin/env python3
2# SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
3"""Convert directories of JSON events to C code."""
4import argparse
5import csv
6import json
7import metric
8import os
9import sys
10from typing import (Callable, Dict, Optional, Sequence, Set, Tuple)
11import collections
12
13# Global command line arguments.
14_args = None
15# List of event tables generated from "/sys" directories.
16_sys_event_tables = []
17# Map from an event name to an architecture standard
18# JsonEvent. Architecture standard events are in json files in the top
19# f'{_args.starting_dir}/{_args.arch}' directory.
20_arch_std_events = {}
21# Track whether an events table is currently being defined and needs closing.
22_close_table = False
23# Events to write out when the table is closed
24_pending_events = []
25# Global BigCString shared by all structures.
26_bcs = None
27# Order specific JsonEvent attributes will be visited.
28_json_event_attributes = [
29 # cmp_sevent related attributes.
30 'name', 'pmu', 'topic', 'desc', 'metric_name', 'metric_group',
31 # Seems useful, put it early.
32 'event',
33 # Short things in alphabetical order.
34 'aggr_mode', 'compat', 'deprecated', 'perpkg', 'unit',
35 # Longer things (the last won't be iterated over during decompress).
36 'metric_constraint', 'metric_expr', 'long_desc'
37]
38
39
40def removesuffix(s: str, suffix: str) -> str:
41 """Remove the suffix from a string
42
43 The removesuffix function is added to str in Python 3.9. We aim for 3.6
44 compatibility and so provide our own function here.
45 """
46 return s[0:-len(suffix)] if s.endswith(suffix) else s
47
48
49def file_name_to_table_name(parents: Sequence[str], dirname: str) -> str:
50 """Generate a C table name from directory names."""
51 tblname = 'pme'
52 for p in parents:
53 tblname += '_' + p
54 tblname += '_' + dirname
55 return tblname.replace('-', '_')
56
57def c_len(s: str) -> int:
58 """Return the length of s a C string
59
60 This doesn't handle all escape characters properly. It first assumes
61 all \ are for escaping, it then adjusts as it will have over counted
62 \\. The code uses \000 rather than \0 as a terminator as an adjacent
63 number would be folded into a string of \0 (ie. "\0" + "5" doesn't
64 equal a terminator followed by the number 5 but the escape of
65 \05). The code adjusts for \000 but not properly for all octal, hex
66 or unicode values.
67 """
68 try:
69 utf = s.encode(encoding='utf-8',errors='strict')
70 except:
71 print(f'broken string {s}')
72 raise
73 return len(utf) - utf.count(b'\\') + utf.count(b'\\\\') - (utf.count(b'\\000') * 2)
74
75class BigCString:
76 """A class to hold many strings concatenated together.
77
78 Generating a large number of stand-alone C strings creates a large
79 number of relocations in position independent code. The BigCString
80 is a helper for this case. It builds a single string which within it
81 are all the other C strings (to avoid memory issues the string
82 itself is held as a list of strings). The offsets within the big
83 string are recorded and when stored to disk these don't need
84 relocation. To reduce the size of the string further, identical
85 strings are merged. If a longer string ends-with the same value as a
86 shorter string, these entries are also merged.
87 """
88 strings: Set[str]
89 big_string: Sequence[str]
90 offsets: Dict[str, int]
91
92 def __init__(self):
93 self.strings = set()
94
95 def add(self, s: str) -> None:
96 """Called to add to the big string."""
97 self.strings.add(s)
98
99 def compute(self) -> None:
100 """Called once all strings are added to compute the string and offsets."""
101
102 folded_strings = {}
103 # Determine if two strings can be folded, ie. let 1 string use the
104 # end of another. First reverse all strings and sort them.
105 sorted_reversed_strings = sorted([x[::-1] for x in self.strings])
106
107 # Strings 'xyz' and 'yz' will now be [ 'zy', 'zyx' ]. Scan forward
108 # for each string to see if there is a better candidate to fold it
109 # into, in the example rather than using 'yz' we can use'xyz' at
110 # an offset of 1. We record which string can be folded into which
111 # in folded_strings, we don't need to record the offset as it is
112 # trivially computed from the string lengths.
113 for pos,s in enumerate(sorted_reversed_strings):
114 best_pos = pos
115 for check_pos in range(pos + 1, len(sorted_reversed_strings)):
116 if sorted_reversed_strings[check_pos].startswith(s):
117 best_pos = check_pos
118 else:
119 break
120 if pos != best_pos:
121 folded_strings[s[::-1]] = sorted_reversed_strings[best_pos][::-1]
122
123 # Compute reverse mappings for debugging.
124 fold_into_strings = collections.defaultdict(set)
125 for key, val in folded_strings.items():
126 if key != val:
127 fold_into_strings[val].add(key)
128
129 # big_string_offset is the current location within the C string
130 # being appended to - comments, etc. don't count. big_string is
131 # the string contents represented as a list. Strings are immutable
132 # in Python and so appending to one causes memory issues, while
133 # lists are mutable.
134 big_string_offset = 0
135 self.big_string = []
136 self.offsets = {}
137
138 # Emit all strings that aren't folded in a sorted manner.
139 for s in sorted(self.strings):
140 if s not in folded_strings:
141 self.offsets[s] = big_string_offset
142 self.big_string.append(f'/* offset={big_string_offset} */ "')
143 self.big_string.append(s)
144 self.big_string.append('"')
145 if s in fold_into_strings:
146 self.big_string.append(' /* also: ' + ', '.join(fold_into_strings[s]) + ' */')
147 self.big_string.append('\n')
148 big_string_offset += c_len(s)
149 continue
150
151 # Compute the offsets of the folded strings.
152 for s in folded_strings.keys():
153 assert s not in self.offsets
154 folded_s = folded_strings[s]
155 self.offsets[s] = self.offsets[folded_s] + c_len(folded_s) - c_len(s)
156
157_bcs = BigCString()
158
159class JsonEvent:
160 """Representation of an event loaded from a json file dictionary."""
161
162 def __init__(self, jd: dict):
163 """Constructor passed the dictionary of parsed json values."""
164
165 def llx(x: int) -> str:
166 """Convert an int to a string similar to a printf modifier of %#llx."""
167 return '0' if x == 0 else hex(x)
168
169 def fixdesc(s: str) -> str:
170 """Fix formatting issue for the desc string."""
171 if s is None:
172 return None
173 return removesuffix(removesuffix(removesuffix(s, '. '),
174 '. '), '.').replace('\n', '\\n').replace(
175 '\"', '\\"').replace('\r', '\\r')
176
177 def convert_aggr_mode(aggr_mode: str) -> Optional[str]:
178 """Returns the aggr_mode_class enum value associated with the JSON string."""
179 if not aggr_mode:
180 return None
181 aggr_mode_to_enum = {
182 'PerChip': '1',
183 'PerCore': '2',
184 }
185 return aggr_mode_to_enum[aggr_mode]
186
187 def lookup_msr(num: str) -> Optional[str]:
188 """Converts the msr number, or first in a list to the appropriate event field."""
189 if not num:
190 return None
191 msrmap = {
192 0x3F6: 'ldlat=',
193 0x1A6: 'offcore_rsp=',
194 0x1A7: 'offcore_rsp=',
195 0x3F7: 'frontend=',
196 }
197 return msrmap[int(num.split(',', 1)[0], 0)]
198
199 def real_event(name: str, event: str) -> Optional[str]:
200 """Convert well known event names to an event string otherwise use the event argument."""
201 fixed = {
202 'inst_retired.any': 'event=0xc0,period=2000003',
203 'inst_retired.any_p': 'event=0xc0,period=2000003',
204 'cpu_clk_unhalted.ref': 'event=0x0,umask=0x03,period=2000003',
205 'cpu_clk_unhalted.thread': 'event=0x3c,period=2000003',
206 'cpu_clk_unhalted.core': 'event=0x3c,period=2000003',
207 'cpu_clk_unhalted.thread_any': 'event=0x3c,any=1,period=2000003',
208 }
209 if not name:
210 return None
211 if name.lower() in fixed:
212 return fixed[name.lower()]
213 return event
214
215 def unit_to_pmu(unit: str) -> Optional[str]:
216 """Convert a JSON Unit to Linux PMU name."""
217 if not unit:
218 return None
219 # Comment brought over from jevents.c:
220 # it's not realistic to keep adding these, we need something more scalable ...
221 table = {
222 'CBO': 'uncore_cbox',
223 'QPI LL': 'uncore_qpi',
224 'SBO': 'uncore_sbox',
225 'iMPH-U': 'uncore_arb',
226 'CPU-M-CF': 'cpum_cf',
227 'CPU-M-SF': 'cpum_sf',
228 'PAI-CRYPTO' : 'pai_crypto',
229 'UPI LL': 'uncore_upi',
230 'hisi_sicl,cpa': 'hisi_sicl,cpa',
231 'hisi_sccl,ddrc': 'hisi_sccl,ddrc',
232 'hisi_sccl,hha': 'hisi_sccl,hha',
233 'hisi_sccl,l3c': 'hisi_sccl,l3c',
234 'imx8_ddr': 'imx8_ddr',
235 'L3PMC': 'amd_l3',
236 'DFPMC': 'amd_df',
237 'cpu_core': 'cpu_core',
238 'cpu_atom': 'cpu_atom',
239 }
240 return table[unit] if unit in table else f'uncore_{unit.lower()}'
241
242 eventcode = 0
243 if 'EventCode' in jd:
244 eventcode = int(jd['EventCode'].split(',', 1)[0], 0)
245 if 'ExtSel' in jd:
246 eventcode |= int(jd['ExtSel']) << 8
247 configcode = int(jd['ConfigCode'], 0) if 'ConfigCode' in jd else None
248 self.name = jd['EventName'].lower() if 'EventName' in jd else None
249 self.topic = ''
250 self.compat = jd.get('Compat')
251 self.desc = fixdesc(jd.get('BriefDescription'))
252 self.long_desc = fixdesc(jd.get('PublicDescription'))
253 precise = jd.get('PEBS')
254 msr = lookup_msr(jd.get('MSRIndex'))
255 msrval = jd.get('MSRValue')
256 extra_desc = ''
257 if 'Data_LA' in jd:
258 extra_desc += ' Supports address when precise'
259 if 'Errata' in jd:
260 extra_desc += '.'
261 if 'Errata' in jd:
262 extra_desc += ' Spec update: ' + jd['Errata']
263 self.pmu = unit_to_pmu(jd.get('Unit'))
264 filter = jd.get('Filter')
265 self.unit = jd.get('ScaleUnit')
266 self.perpkg = jd.get('PerPkg')
267 self.aggr_mode = convert_aggr_mode(jd.get('AggregationMode'))
268 self.deprecated = jd.get('Deprecated')
269 self.metric_name = jd.get('MetricName')
270 self.metric_group = jd.get('MetricGroup')
271 self.metric_constraint = jd.get('MetricConstraint')
272 self.metric_expr = None
273 if 'MetricExpr' in jd:
274 self.metric_expr = metric.ParsePerfJson(jd['MetricExpr']).Simplify()
275
276 arch_std = jd.get('ArchStdEvent')
277 if precise and self.desc and '(Precise Event)' not in self.desc:
278 extra_desc += ' (Must be precise)' if precise == '2' else (' (Precise '
279 'event)')
280 event = f'config={llx(configcode)}' if configcode is not None else f'event={llx(eventcode)}'
281 event_fields = [
282 ('AnyThread', 'any='),
283 ('PortMask', 'ch_mask='),
284 ('CounterMask', 'cmask='),
285 ('EdgeDetect', 'edge='),
286 ('FCMask', 'fc_mask='),
287 ('Invert', 'inv='),
288 ('SampleAfterValue', 'period='),
289 ('UMask', 'umask='),
290 ]
291 for key, value in event_fields:
292 if key in jd and jd[key] != '0':
293 event += ',' + value + jd[key]
294 if filter:
295 event += f',{filter}'
296 if msr:
297 event += f',{msr}{msrval}'
298 if self.desc and extra_desc:
299 self.desc += extra_desc
300 if self.long_desc and extra_desc:
301 self.long_desc += extra_desc
302 if self.pmu:
303 if self.desc and not self.desc.endswith('. '):
304 self.desc += '. '
305 self.desc = (self.desc if self.desc else '') + ('Unit: ' + self.pmu + ' ')
306 if arch_std and arch_std.lower() in _arch_std_events:
307 event = _arch_std_events[arch_std.lower()].event
308 # Copy from the architecture standard event to self for undefined fields.
309 for attr, value in _arch_std_events[arch_std.lower()].__dict__.items():
310 if hasattr(self, attr) and not getattr(self, attr):
311 setattr(self, attr, value)
312
313 self.event = real_event(self.name, event)
314
315 def __repr__(self) -> str:
316 """String representation primarily for debugging."""
317 s = '{\n'
318 for attr, value in self.__dict__.items():
319 if value:
320 s += f'\t{attr} = {value},\n'
321 return s + '}'
322
323 def build_c_string(self) -> str:
324 s = ''
325 for attr in _json_event_attributes:
326 x = getattr(self, attr)
327 if x and attr == 'metric_expr':
328 # Convert parsed metric expressions into a string. Slashes
329 # must be doubled in the file.
330 x = x.ToPerfJson().replace('\\', '\\\\')
331 s += f'{x}\\000' if x else '\\000'
332 return s
333
334 def to_c_string(self) -> str:
335 """Representation of the event as a C struct initializer."""
336
337 s = self.build_c_string()
338 return f'{{ { _bcs.offsets[s] } }}, /* {s} */\n'
339
340
341def read_json_events(path: str, topic: str) -> Sequence[JsonEvent]:
342 """Read json events from the specified file."""
343
344 try:
345 result = json.load(open(path), object_hook=JsonEvent)
346 except BaseException as err:
347 print(f"Exception processing {path}")
348 raise
349 for event in result:
350 event.topic = topic
351 return result
352
353def preprocess_arch_std_files(archpath: str) -> None:
354 """Read in all architecture standard events."""
355 global _arch_std_events
356 for item in os.scandir(archpath):
357 if item.is_file() and item.name.endswith('.json'):
358 for event in read_json_events(item.path, topic=''):
359 if event.name:
360 _arch_std_events[event.name.lower()] = event
361
362
363def print_events_table_prefix(tblname: str) -> None:
364 """Called when a new events table is started."""
365 global _close_table
366 if _close_table:
367 raise IOError('Printing table prefix but last table has no suffix')
368 _args.output_file.write(f'static const struct compact_pmu_event {tblname}[] = {{\n')
369 _close_table = True
370
371
372def add_events_table_entries(item: os.DirEntry, topic: str) -> None:
373 """Add contents of file to _pending_events table."""
374 if not _close_table:
375 raise IOError('Table entries missing prefix')
376 for e in read_json_events(item.path, topic):
377 _pending_events.append(e)
378
379
380def print_events_table_suffix() -> None:
381 """Optionally close events table."""
382
383 def event_cmp_key(j: JsonEvent) -> Tuple[bool, str, str, str, str]:
384 def fix_none(s: Optional[str]) -> str:
385 if s is None:
386 return ''
387 return s
388
389 return (j.desc is not None, fix_none(j.topic), fix_none(j.name), fix_none(j.pmu),
390 fix_none(j.metric_name))
391
392 global _close_table
393 if not _close_table:
394 return
395
396 global _pending_events
397 for event in sorted(_pending_events, key=event_cmp_key):
398 _args.output_file.write(event.to_c_string())
399 _pending_events = []
400
401 _args.output_file.write('};\n\n')
402 _close_table = False
403
404def get_topic(topic: str) -> str:
405 if topic.endswith('metrics.json'):
406 return 'metrics'
407 return removesuffix(topic, '.json').replace('-', ' ')
408
409def preprocess_one_file(parents: Sequence[str], item: os.DirEntry) -> None:
410
411 if item.is_dir():
412 return
413
414 # base dir or too deep
415 level = len(parents)
416 if level == 0 or level > 4:
417 return
418
419 # Ignore other directories. If the file name does not have a .json
420 # extension, ignore it. It could be a readme.txt for instance.
421 if not item.is_file() or not item.name.endswith('.json'):
422 return
423
424 topic = get_topic(item.name)
425 for event in read_json_events(item.path, topic):
426 _bcs.add(event.build_c_string())
427
428def process_one_file(parents: Sequence[str], item: os.DirEntry) -> None:
429 """Process a JSON file during the main walk."""
430 global _sys_event_tables
431
432 def is_leaf_dir(path: str) -> bool:
433 for item in os.scandir(path):
434 if item.is_dir():
435 return False
436 return True
437
438 # model directory, reset topic
439 if item.is_dir() and is_leaf_dir(item.path):
440 print_events_table_suffix()
441
442 tblname = file_name_to_table_name(parents, item.name)
443 if item.name == 'sys':
444 _sys_event_tables.append(tblname)
445 print_events_table_prefix(tblname)
446 return
447
448 # base dir or too deep
449 level = len(parents)
450 if level == 0 or level > 4:
451 return
452
453 # Ignore other directories. If the file name does not have a .json
454 # extension, ignore it. It could be a readme.txt for instance.
455 if not item.is_file() or not item.name.endswith('.json'):
456 return
457
458 add_events_table_entries(item, get_topic(item.name))
459
460
461def print_mapping_table(archs: Sequence[str]) -> None:
462 """Read the mapfile and generate the struct from cpuid string to event table."""
463 _args.output_file.write("""
464/* Struct used to make the PMU event table implementation opaque to callers. */
465struct pmu_events_table {
466 const struct compact_pmu_event *entries;
467 size_t length;
468};
469
470/*
471 * Map a CPU to its table of PMU events. The CPU is identified by the
472 * cpuid field, which is an arch-specific identifier for the CPU.
473 * The identifier specified in tools/perf/pmu-events/arch/xxx/mapfile
474 * must match the get_cpuid_str() in tools/perf/arch/xxx/util/header.c)
475 *
476 * The cpuid can contain any character other than the comma.
477 */
478struct pmu_events_map {
479 const char *arch;
480 const char *cpuid;
481 struct pmu_events_table table;
482};
483
484/*
485 * Global table mapping each known CPU for the architecture to its
486 * table of PMU events.
487 */
488const struct pmu_events_map pmu_events_map[] = {
489""")
490 for arch in archs:
491 if arch == 'test':
492 _args.output_file.write("""{
493\t.arch = "testarch",
494\t.cpuid = "testcpu",
495\t.table = {
496\t.entries = pme_test_soc_cpu,
497\t.length = ARRAY_SIZE(pme_test_soc_cpu),
498\t}
499},
500""")
501 else:
502 with open(f'{_args.starting_dir}/{arch}/mapfile.csv') as csvfile:
503 table = csv.reader(csvfile)
504 first = True
505 for row in table:
506 # Skip the first row or any row beginning with #.
507 if not first and len(row) > 0 and not row[0].startswith('#'):
508 tblname = file_name_to_table_name([], row[2].replace('/', '_'))
509 cpuid = row[0].replace('\\', '\\\\')
510 _args.output_file.write(f"""{{
511\t.arch = "{arch}",
512\t.cpuid = "{cpuid}",
513\t.table = {{
514\t\t.entries = {tblname},
515\t\t.length = ARRAY_SIZE({tblname})
516\t}}
517}},
518""")
519 first = False
520
521 _args.output_file.write("""{
522\t.arch = 0,
523\t.cpuid = 0,
524\t.table = { 0, 0 },
525}
526};
527""")
528
529
530def print_system_mapping_table() -> None:
531 """C struct mapping table array for tables from /sys directories."""
532 _args.output_file.write("""
533struct pmu_sys_events {
534\tconst char *name;
535\tstruct pmu_events_table table;
536};
537
538static const struct pmu_sys_events pmu_sys_event_tables[] = {
539""")
540 for tblname in _sys_event_tables:
541 _args.output_file.write(f"""\t{{
542\t\t.table = {{
543\t\t\t.entries = {tblname},
544\t\t\t.length = ARRAY_SIZE({tblname})
545\t\t}},
546\t\t.name = \"{tblname}\",
547\t}},
548""")
549 _args.output_file.write("""\t{
550\t\t.table = { 0, 0 }
551\t},
552};
553
554static void decompress(int offset, struct pmu_event *pe)
555{
556\tconst char *p = &big_c_string[offset];
557""")
558 for attr in _json_event_attributes:
559 _args.output_file.write(f"""
560\tpe->{attr} = (*p == '\\0' ? NULL : p);
561""")
562 if attr == _json_event_attributes[-1]:
563 continue
564 _args.output_file.write('\twhile (*p++);')
565 _args.output_file.write("""}
566
567int pmu_events_table_for_each_event(const struct pmu_events_table *table,
568 pmu_event_iter_fn fn,
569 void *data)
570{
571 for (size_t i = 0; i < table->length; i++) {
572 struct pmu_event pe;
573 int ret;
574
575 decompress(table->entries[i].offset, &pe);
576 ret = fn(&pe, table, data);
577 if (ret)
578 return ret;
579 }
580 return 0;
581}
582
583const struct pmu_events_table *perf_pmu__find_table(struct perf_pmu *pmu)
584{
585 const struct pmu_events_table *table = NULL;
586 char *cpuid = perf_pmu__getcpuid(pmu);
587 int i;
588
589 /* on some platforms which uses cpus map, cpuid can be NULL for
590 * PMUs other than CORE PMUs.
591 */
592 if (!cpuid)
593 return NULL;
594
595 i = 0;
596 for (;;) {
597 const struct pmu_events_map *map = &pmu_events_map[i++];
598 if (!map->arch)
599 break;
600
601 if (!strcmp_cpuid_str(map->cpuid, cpuid)) {
602 table = &map->table;
603 break;
604 }
605 }
606 free(cpuid);
607 return table;
608}
609
610const struct pmu_events_table *find_core_events_table(const char *arch, const char *cpuid)
611{
612 for (const struct pmu_events_map *tables = &pmu_events_map[0];
613 tables->arch;
614 tables++) {
615 if (!strcmp(tables->arch, arch) && !strcmp_cpuid_str(tables->cpuid, cpuid))
616 return &tables->table;
617 }
618 return NULL;
619}
620
621int pmu_for_each_core_event(pmu_event_iter_fn fn, void *data)
622{
623 for (const struct pmu_events_map *tables = &pmu_events_map[0];
624 tables->arch;
625 tables++) {
626 int ret = pmu_events_table_for_each_event(&tables->table, fn, data);
627
628 if (ret)
629 return ret;
630 }
631 return 0;
632}
633
634const struct pmu_events_table *find_sys_events_table(const char *name)
635{
636 for (const struct pmu_sys_events *tables = &pmu_sys_event_tables[0];
637 tables->name;
638 tables++) {
639 if (!strcmp(tables->name, name))
640 return &tables->table;
641 }
642 return NULL;
643}
644
645int pmu_for_each_sys_event(pmu_event_iter_fn fn, void *data)
646{
647 for (const struct pmu_sys_events *tables = &pmu_sys_event_tables[0];
648 tables->name;
649 tables++) {
650 int ret = pmu_events_table_for_each_event(&tables->table, fn, data);
651
652 if (ret)
653 return ret;
654 }
655 return 0;
656}
657""")
658
659
660def main() -> None:
661 global _args
662
663 def dir_path(path: str) -> str:
664 """Validate path is a directory for argparse."""
665 if os.path.isdir(path):
666 return path
667 raise argparse.ArgumentTypeError(f'\'{path}\' is not a valid directory')
668
669 def ftw(path: str, parents: Sequence[str],
670 action: Callable[[Sequence[str], os.DirEntry], None]) -> None:
671 """Replicate the directory/file walking behavior of C's file tree walk."""
672 for item in os.scandir(path):
673 action(parents, item)
674 if item.is_dir():
675 ftw(item.path, parents + [item.name], action)
676
677 ap = argparse.ArgumentParser()
678 ap.add_argument('arch', help='Architecture name like x86')
679 ap.add_argument(
680 'starting_dir',
681 type=dir_path,
682 help='Root of tree containing architecture directories containing json files'
683 )
684 ap.add_argument(
685 'output_file', type=argparse.FileType('w', encoding='utf-8'), nargs='?', default=sys.stdout)
686 _args = ap.parse_args()
687
688 _args.output_file.write("""
689#include "pmu-events/pmu-events.h"
690#include "util/header.h"
691#include "util/pmu.h"
692#include <string.h>
693#include <stddef.h>
694
695struct compact_pmu_event {
696 int offset;
697};
698
699""")
700 archs = []
701 for item in os.scandir(_args.starting_dir):
702 if not item.is_dir():
703 continue
704 if item.name == _args.arch or _args.arch == 'all' or item.name == 'test':
705 archs.append(item.name)
706
707 if len(archs) < 2:
708 raise IOError(f'Missing architecture directory \'{_args.arch}\'')
709
710 archs.sort()
711 for arch in archs:
712 arch_path = f'{_args.starting_dir}/{arch}'
713 preprocess_arch_std_files(arch_path)
714 ftw(arch_path, [], preprocess_one_file)
715
716 _bcs.compute()
717 _args.output_file.write('static const char *const big_c_string =\n')
718 for s in _bcs.big_string:
719 _args.output_file.write(s)
720 _args.output_file.write(';\n\n')
721 for arch in archs:
722 arch_path = f'{_args.starting_dir}/{arch}'
723 ftw(arch_path, [], process_one_file)
724 print_events_table_suffix()
725
726 print_mapping_table(archs)
727 print_system_mapping_table()
728
729
730if __name__ == '__main__':
731 main()