mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-01 08:04:22 +08:00
b777b3d255
Add a target that generates a log file for running metric_test.py and make this a dependency on generating pmu-events.c. The log output is displayed if the test fails like (the test was modified to make it fail): ``` TEST /tmp/perf/pmu-events/metric_test.log F...... ====================================================================== FAIL: test_Brackets (__main__.TestMetricExpressions) ---------------------------------------------------------------------- Traceback (most recent call last): File "tools/perf/pmu-events/metric_test.py", line 33, in test_Brackets self.assertEqual((a * b + c).ToPerfJson(), 'a * b + d') AssertionError: 'a * b + c' != 'a * b + d' - a * b + c ? ^ + a * b + d ? ^ ---------------------------------------------------------------------- Ran 7 tests in 0.004s FAILED (failures=1) make[3]: *** [pmu-events/Build:32: /tmp/perf/pmu-events/metric_test.log] Error 1 ``` However, normal execution will just show the TEST line. This is roughly modeled on fortify testing in the kernel lib directory. Modify metric_test.py so that it is executable. This is necessary when PYTHON isn't specified in the build, the normal case. Use variables to make the paths to files clearer and more consistent. Committer notes: Add pmu-events/metric_test.log to tools/perf/.gitignore and to the 'clean' target on tools/perf/Makefile.perf. Reviewed-by: Kajol Jain <kjain@linux.ibm.com> Signed-off-by: Ian Rogers <irogers@google.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Caleb Biggers <caleb.biggers@intel.com> Cc: Florian Fischer <florian.fischer@muhq.space> Cc: Ian Rogers <irogers@google.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: James Clark <james.clark@arm.com> Cc: Jing Zhang <renyu.zj@linux.alibaba.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: John Garry <john.g.garry@oracle.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Kang Minchul <tegongkang@gmail.com> Cc: Kim Phillips <kim.phillips@amd.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Mike Leach <mike.leach@linaro.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Perry Taylor <perry.taylor@intel.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ravi Bangoria <ravi.bangoria@amd.com> Cc: Rob Herring <robh@kernel.org> Cc: Sandipan Das <sandipan.das@amd.com> Cc: Stephane Eranian <eranian@google.com> Cc: Will Deacon <will@kernel.org> Cc: Xing Zhengjun <zhengjun.xing@linux.intel.com> Cc: linux-arm-kernel@lists.infradead.org Cc: linuxppc-dev@lists.ozlabs.org Link: https://lore.kernel.org/r/20230126233645.200509-16-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
169 lines
5.8 KiB
Python
Executable File
169 lines
5.8 KiB
Python
Executable File
#!/usr/bin/env python3
|
|
# SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
|
|
import unittest
|
|
from metric import Constant
|
|
from metric import Event
|
|
from metric import Expression
|
|
from metric import ParsePerfJson
|
|
from metric import RewriteMetricsInTermsOfOthers
|
|
|
|
|
|
class TestMetricExpressions(unittest.TestCase):
|
|
|
|
def test_Operators(self):
|
|
a = Event('a')
|
|
b = Event('b')
|
|
self.assertEqual((a | b).ToPerfJson(), 'a | b')
|
|
self.assertEqual((a ^ b).ToPerfJson(), 'a ^ b')
|
|
self.assertEqual((a & b).ToPerfJson(), 'a & b')
|
|
self.assertEqual((a < b).ToPerfJson(), 'a < b')
|
|
self.assertEqual((a > b).ToPerfJson(), 'a > b')
|
|
self.assertEqual((a + b).ToPerfJson(), 'a + b')
|
|
self.assertEqual((a - b).ToPerfJson(), 'a - b')
|
|
self.assertEqual((a * b).ToPerfJson(), 'a * b')
|
|
self.assertEqual((a / b).ToPerfJson(), 'a / b')
|
|
self.assertEqual((a % b).ToPerfJson(), 'a % b')
|
|
one = Constant(1)
|
|
self.assertEqual((a + one).ToPerfJson(), 'a + 1')
|
|
|
|
def test_Brackets(self):
|
|
a = Event('a')
|
|
b = Event('b')
|
|
c = Event('c')
|
|
self.assertEqual((a * b + c).ToPerfJson(), 'a * b + c')
|
|
self.assertEqual((a + b * c).ToPerfJson(), 'a + b * c')
|
|
self.assertEqual(((a + a) + a).ToPerfJson(), 'a + a + a')
|
|
self.assertEqual(((a + b) * c).ToPerfJson(), '(a + b) * c')
|
|
self.assertEqual((a + (b * c)).ToPerfJson(), 'a + b * c')
|
|
self.assertEqual(((a / b) * c).ToPerfJson(), 'a / b * c')
|
|
self.assertEqual((a / (b * c)).ToPerfJson(), 'a / (b * c)')
|
|
|
|
def test_ParsePerfJson(self):
|
|
# Based on an example of a real metric.
|
|
before = '(a + b + c + d) / (2 * e)'
|
|
after = before
|
|
self.assertEqual(ParsePerfJson(before).ToPerfJson(), after)
|
|
|
|
# Parsing should handle events with '-' in their name. Note, in
|
|
# the json file the '\' are doubled to '\\'.
|
|
before = r'topdown\-fe\-bound / topdown\-slots - 1'
|
|
after = before
|
|
self.assertEqual(ParsePerfJson(before).ToPerfJson(), after)
|
|
|
|
# Parsing should handle escaped modifiers. Note, in the json file
|
|
# the '\' are doubled to '\\'.
|
|
before = r'arb@event\=0x81\,umask\=0x1@ + arb@event\=0x84\,umask\=0x1@'
|
|
after = before
|
|
self.assertEqual(ParsePerfJson(before).ToPerfJson(), after)
|
|
|
|
# Parsing should handle exponents in numbers.
|
|
before = r'a + 1e12 + b'
|
|
after = before
|
|
self.assertEqual(ParsePerfJson(before).ToPerfJson(), after)
|
|
|
|
def test_IfElseTests(self):
|
|
# if-else needs rewriting to Select and back.
|
|
before = r'Event1 if #smt_on else Event2'
|
|
after = f'({before})'
|
|
self.assertEqual(ParsePerfJson(before).ToPerfJson(), after)
|
|
|
|
before = r'Event1 if 0 else Event2'
|
|
after = f'({before})'
|
|
self.assertEqual(ParsePerfJson(before).ToPerfJson(), after)
|
|
|
|
before = r'Event1 if 1 else Event2'
|
|
after = f'({before})'
|
|
self.assertEqual(ParsePerfJson(before).ToPerfJson(), after)
|
|
|
|
# Ensure the select is evaluate last.
|
|
before = r'Event1 + 1 if Event2 < 2 else Event3 + 3'
|
|
after = (r'Select(Event(r"Event1") + Constant(1), Event(r"Event2") < '
|
|
r'Constant(2), Event(r"Event3") + Constant(3))')
|
|
self.assertEqual(ParsePerfJson(before).ToPython(), after)
|
|
|
|
before = r'Event1 > 1 if Event2 < 2 else Event3 > 3'
|
|
after = (r'Select(Event(r"Event1") > Constant(1), Event(r"Event2") < '
|
|
r'Constant(2), Event(r"Event3") > Constant(3))')
|
|
self.assertEqual(ParsePerfJson(before).ToPython(), after)
|
|
|
|
before = r'min(a + b if c > 1 else c + d, e + f)'
|
|
after = r'min((a + b if c > 1 else c + d), e + f)'
|
|
self.assertEqual(ParsePerfJson(before).ToPerfJson(), after)
|
|
|
|
before = r'a if b else c if d else e'
|
|
after = r'(a if b else (c if d else e))'
|
|
self.assertEqual(ParsePerfJson(before).ToPerfJson(), after)
|
|
|
|
def test_ToPython(self):
|
|
# pylint: disable=eval-used
|
|
# Based on an example of a real metric.
|
|
before = '(a + b + c + d) / (2 * e)'
|
|
py = ParsePerfJson(before).ToPython()
|
|
after = eval(py).ToPerfJson()
|
|
self.assertEqual(before, after)
|
|
|
|
def test_Simplify(self):
|
|
before = '1 + 2 + 3'
|
|
after = '6'
|
|
self.assertEqual(ParsePerfJson(before).Simplify().ToPerfJson(), after)
|
|
|
|
before = 'a + 0'
|
|
after = 'a'
|
|
self.assertEqual(ParsePerfJson(before).Simplify().ToPerfJson(), after)
|
|
|
|
before = '0 + a'
|
|
after = 'a'
|
|
self.assertEqual(ParsePerfJson(before).Simplify().ToPerfJson(), after)
|
|
|
|
before = 'a | 0'
|
|
after = 'a'
|
|
self.assertEqual(ParsePerfJson(before).Simplify().ToPerfJson(), after)
|
|
|
|
before = '0 | a'
|
|
after = 'a'
|
|
self.assertEqual(ParsePerfJson(before).Simplify().ToPerfJson(), after)
|
|
|
|
before = 'a * 0'
|
|
after = '0'
|
|
self.assertEqual(ParsePerfJson(before).Simplify().ToPerfJson(), after)
|
|
|
|
before = '0 * a'
|
|
after = '0'
|
|
self.assertEqual(ParsePerfJson(before).Simplify().ToPerfJson(), after)
|
|
|
|
before = 'a * 1'
|
|
after = 'a'
|
|
self.assertEqual(ParsePerfJson(before).Simplify().ToPerfJson(), after)
|
|
|
|
before = '1 * a'
|
|
after = 'a'
|
|
self.assertEqual(ParsePerfJson(before).Simplify().ToPerfJson(), after)
|
|
|
|
before = 'a if 0 else b'
|
|
after = 'b'
|
|
self.assertEqual(ParsePerfJson(before).Simplify().ToPerfJson(), after)
|
|
|
|
before = 'a if 1 else b'
|
|
after = 'a'
|
|
self.assertEqual(ParsePerfJson(before).Simplify().ToPerfJson(), after)
|
|
|
|
before = 'a if b else a'
|
|
after = 'a'
|
|
self.assertEqual(ParsePerfJson(before).Simplify().ToPerfJson(), after)
|
|
|
|
# Pattern used to add a slots event to metrics that require it.
|
|
before = '0 * SLOTS'
|
|
after = '0 * SLOTS'
|
|
self.assertEqual(ParsePerfJson(before).Simplify().ToPerfJson(), after)
|
|
|
|
def test_RewriteMetricsInTermsOfOthers(self):
|
|
Expression.__eq__ = lambda e1, e2: e1.Equals(e2)
|
|
before = [('m1', ParsePerfJson('a + b + c + d')),
|
|
('m2', ParsePerfJson('a + b + c'))]
|
|
after = {'m1': ParsePerfJson('m2 + d')}
|
|
self.assertEqual(RewriteMetricsInTermsOfOthers(before), after)
|
|
Expression.__eq__ = None
|
|
|
|
if __name__ == '__main__':
|
|
unittest.main()
|