From f82fc3671bc46a8d17673c70d790d69d999f8af6 Mon Sep 17 00:00:00 2001 From: "Thing-han, Lim" <15379156+potsrevennil@users.noreply.github.com> Date: Thu, 14 Nov 2024 18:07:51 +0800 Subject: [PATCH 1/4] tests: fix referencing self.compile_mode in Tests class Signed-off-by: Thing-han, Lim <15379156+potsrevennil@users.noreply.github.com> --- scripts/lib/mlkem_test.py | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) diff --git a/scripts/lib/mlkem_test.py b/scripts/lib/mlkem_test.py index 66a79c1b3..a4d4263ed 100644 --- a/scripts/lib/mlkem_test.py +++ b/scripts/lib/mlkem_test.py @@ -482,9 +482,7 @@ def init_results() -> TypedDict: print(f"::endgroup::") for k, result in results.items(): - title = ( - "## " + (self._acvp.compile_mode) + " " + (k.capitalize()) + " Tests" - ) + title = "## " + (self.compile_mode) + " " + (k.capitalize()) + " Tests" github_summary(title, f"{TEST_TYPES.ACVP.desc()} encapDecap", result) fail = reduce(lambda acc, c: acc or c, result.values(), fail) @@ -521,13 +519,7 @@ def init_results() -> TypedDict: print(f"::endgroup::") for k, result in results.items(): - title = ( - "## " - + (self._acvp.ts[k].compile_mode) - + " " - + (k.capitalize()) - + " Tests" - ) + title = "## " + (self.compile_mode) + " " + (k.capitalize()) + " Tests" github_summary(title, f"{TEST_TYPES.ACVP.desc()} keyGen", result) fail = reduce(lambda acc, c: acc or c, result.values(), fail) From fc308ece7cc5c80da125f3f232ae4fedaa819655 Mon Sep 17 00:00:00 2001 From: "Thing-han, Lim" <15379156+potsrevennil@users.noreply.github.com> Date: Thu, 14 Nov 2024 18:10:17 +0800 Subject: [PATCH 2/4] move mac_taskpolicy condition handling into _run_bench Signed-off-by: Thing-han, Lim <15379156+potsrevennil@users.noreply.github.com> --- scripts/lib/mlkem_test.py | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/scripts/lib/mlkem_test.py b/scripts/lib/mlkem_test.py index a4d4263ed..a8ab00fce 100644 --- a/scripts/lib/mlkem_test.py +++ b/scripts/lib/mlkem_test.py @@ -546,7 +546,12 @@ def _acvp(opt: bool): exit(1) def _run_bench( - self, t: Test_Implementations, opt: bool, run_as_root: bool, exec_wrapper: str + self, + t: Test_Implementations, + opt: bool, + run_as_root: bool, + exec_wrapper: str, + mac_taskpolicy, ) -> TypedDict: cmd_prefix = [] if run_as_root: @@ -555,6 +560,13 @@ def _run_bench( ) cmd_prefix.append("sudo") + if mac_taskpolicy: + if exec_wrapper: + logging.error(f"cannot set both --mac-taskpolicy and --exec-wrapper") + sys.exit(1) + else: + exec_wrapper = f"taskpolicy -c {mac_taskpolicy}" + if exec_wrapper: logging.info(f"Running with customized wrapper.") exec_wrapper = exec_wrapper.split(" ") @@ -579,13 +591,6 @@ def bench( t = self._bench_components output = False - if mac_taskpolicy: - if exec_wrapper: - logging.error(f"cannot set both --mac-taskpolicy and --exec-wrapper") - sys.exit(1) - else: - exec_wrapper = f"taskpolicy -c {mac_taskpolicy}" - # NOTE: We haven't yet decided how to output both opt/no-opt benchmark results if self.opt.lower() == "all": if self.compile: From 8d49b5f1740801dd0e0436a7c2209c46cb408740 Mon Sep 17 00:00:00 2001 From: "Thing-han, Lim" <15379156+potsrevennil@users.noreply.github.com> Date: Thu, 14 Nov 2024 18:14:44 +0800 Subject: [PATCH 3/4] tests: clean up some underlining tests function (_run_xxx) Merge the call for compile and run_schemes into `Test_Implementations.test` function, thus remove the repeated `if compile ... if run ...` part in test functions Signed-off-by: Thing-han, Lim <15379156+potsrevennil@users.noreply.github.com> --- scripts/lib/mlkem_test.py | 201 +++++++++++++++++++++----------------- 1 file changed, 109 insertions(+), 92 deletions(-) diff --git a/scripts/lib/mlkem_test.py b/scripts/lib/mlkem_test.py index a8ab00fce..fcafc0c63 100644 --- a/scripts/lib/mlkem_test.py +++ b/scripts/lib/mlkem_test.py @@ -260,6 +260,25 @@ def run_schemes( else: return results + def test( + self, + opt: bool, + compile: bool, + run: bool, + extra_make_envs={}, + extra_make_args=[], + actual_proc: Callable[[bytes], str] = None, + expect_proc: Callable[[SCHEME, str], tuple[bool, str]] = None, + cmd_prefix: [str] = [], + extra_args: [str] = [], + ) -> TypedDict: + if compile: + self.compile(opt, extra_make_envs, extra_make_args) + if run: + return self.run_schemes( + opt, actual_proc, expect_proc, cmd_prefix, extra_args + ) + """ Underlying functional tests @@ -275,20 +294,19 @@ def __init__(self, opts: Options): self.opt = opts.opt self.verbose = opts.verbose - self._func = Test_Implementations(TEST_TYPES.MLKEM, copts) - self._nistkat = Test_Implementations(TEST_TYPES.NISTKAT, copts) - self._kat = Test_Implementations(TEST_TYPES.KAT, copts) - self._acvp = Test_Implementations(TEST_TYPES.ACVP, copts) - self._bench = Test_Implementations(TEST_TYPES.BENCH, copts) - self._bench_components = Test_Implementations( + self.__func = Test_Implementations(TEST_TYPES.MLKEM, copts) + self.__nistkat = Test_Implementations(TEST_TYPES.NISTKAT, copts) + self.__kat = Test_Implementations(TEST_TYPES.KAT, copts) + self.__acvp = Test_Implementations(TEST_TYPES.ACVP, copts) + self.__bench = Test_Implementations(TEST_TYPES.BENCH, copts) + self.__bench_components = Test_Implementations( TEST_TYPES.BENCH_COMPONENTS, copts ) - self.compile_mode = copts.compile_mode() self.compile = opts.compile self.run = opts.run - def _run_func(self, opt: bool): + def _func(self, opt: bool, compile: bool, run: bool) -> TypedDict: """Underlying function for functional test""" def expect(scheme: SCHEME, actual: str) -> tuple[bool, str]: @@ -307,8 +325,10 @@ def expect(scheme: SCHEME, actual: str) -> tuple[bool, str]: f"Failed, expecting {expect}, but getting {actual}" if fail else "", ) - return self._func.run_schemes( + return self.__func.test( opt, + compile, + run, actual_proc=lambda result: str(result, encoding="utf-8"), expect_proc=expect, ) @@ -316,23 +336,16 @@ def expect(scheme: SCHEME, actual: str) -> tuple[bool, str]: def func(self): config_logger(self.verbose) - def _func(opt: bool): - - if self.compile: - self._func.compile(opt) - if self.run: - return self._run_func(opt) - fail = False if self.opt.lower() == "all" or self.opt.lower() == "no_opt": - fail = fail or _func(False) + fail = fail or self._func(False, self.compile, self.run) if self.opt.lower() == "all" or self.opt.lower() == "opt": - fail = fail or _func(True) + fail = fail or self._func(True, self.compile, self.run) if fail: exit(1) - def _run_nistkat(self, opt: bool): + def _nistkat(self, opt: bool, compile: bool, run: bool) -> TypedDict: def expect_proc(scheme: SCHEME, actual: str) -> tuple[bool, str]: expect = parse_meta(scheme, "nistkat-sha256") fail = expect != actual @@ -342,8 +355,10 @@ def expect_proc(scheme: SCHEME, actual: str) -> tuple[bool, str]: f"Failed, expecting {expect}, but getting {actual}" if fail else "", ) - return self._nistkat.run_schemes( + return self.__nistkat.test( opt, + compile, + run, actual_proc=sha256sum, expect_proc=expect_proc, ) @@ -351,22 +366,16 @@ def expect_proc(scheme: SCHEME, actual: str) -> tuple[bool, str]: def nistkat(self): config_logger(self.verbose) - def _nistkat(opt: bool): - if self.compile: - self._nistkat.compile(opt) - if self.run: - return self._run_nistkat(opt) - fail = False if self.opt.lower() == "all" or self.opt.lower() == "no_opt": - fail = fail or _nistkat(False) + fail = fail or self._nistkat(False, self.compile, self.run) if self.opt.lower() == "all" or self.opt.lower() == "opt": - fail = fail or _nistkat(True) + fail = fail or self._nistkat(True, self.compile, self.run) if fail: exit(1) - def _run_kat(self, opt: bool): + def _kat(self, opt: bool, compile: bool, run: bool) -> TypedDict: def expect_proc(scheme: SCHEME, actual: str) -> tuple[bool, str]: expect = parse_meta(scheme, "kat-sha256") fail = expect != actual @@ -376,8 +385,10 @@ def expect_proc(scheme: SCHEME, actual: str) -> tuple[bool, str]: f"Failed, expecting {expect}, but getting {actual}" if fail else "", ) - return self._kat.run_schemes( + return self.__kat.test( opt, + compile, + run, actual_proc=sha256sum, expect_proc=expect_proc, ) @@ -385,23 +396,17 @@ def expect_proc(scheme: SCHEME, actual: str) -> tuple[bool, str]: def kat(self): config_logger(self.verbose) - def _kat(opt: bool): - if self.compile: - self._kat.compile(opt) - if self.run: - return self._run_kat(opt) - fail = False if self.opt.lower() == "all" or self.opt.lower() == "no_opt": - fail = fail or _kat(False) + fail = fail or self._kat(False, self.compile, self.run) if self.opt.lower() == "all" or self.opt.lower() == "opt": - fail = fail or _kat(True) + fail = fail or self._kat(True, self.compile, self.run) if fail: exit(1) - def _run_acvp(self, opt: bool, acvp_dir: str = "test/acvp_data"): + def _run_acvp(self, opt: bool, acvp_dir: str = "test/acvp_data") -> bool: acvp_keygen_json = f"{acvp_dir}/acvp_keygen_internalProjection.json" acvp_encapDecap_json = f"{acvp_dir}/acvp_encapDecap_internalProjection.json" @@ -468,7 +473,7 @@ def init_results() -> TypedDict: f"c={tc['c']}", ] - rs = self._acvp.run_scheme( + rs = self.__acvp.run_scheme( opt, scheme, extra_args=extra_args, @@ -505,7 +510,7 @@ def init_results() -> TypedDict: f"d={tc['d']}", ] - rs = self._acvp.run_scheme( + rs = self.__acvp.run_scheme( opt, scheme, extra_args=extra_args, @@ -526,29 +531,34 @@ def init_results() -> TypedDict: return fail + def _acvp( + self, opt: bool, compmile: bool, run: bool, acvp_dir: str = "test/acvp_data/" + ) -> bool: + if compile: + self.__acvp.compile(opt) + if run: + return self._run_acvp(opt, acvp_dir) + def acvp(self, acvp_dir: str): config_logger(self.verbose) - def _acvp(opt: bool): - if self.compile: - self._acvp.compile(opt) - if self.run: - return self._run_acvp(opt, acvp_dir) - fail = False if self.opt.lower() == "all" or self.opt.lower() == "no_opt": - fail = fail or _acvp(False) + fail = fail or self._acvp(False, self.compile, self.run, acvp_dir) if self.opt.lower() == "all" or self.opt.lower() == "opt": - fail = fail or _acvp(True) + fail = fail or self._acvp(True, self.compile, self.run, acvp_dir) if fail: exit(1) - def _run_bench( + def _bench( self, t: Test_Implementations, opt: bool, + compile: bool, + run: bool, + cycles, run_as_root: bool, exec_wrapper: str, mac_taskpolicy, @@ -572,7 +582,13 @@ def _run_bench( exec_wrapper = exec_wrapper.split(" ") cmd_prefix = cmd_prefix + exec_wrapper - return t.run_schemes(opt, cmd_prefix=cmd_prefix) + return t.test( + opt, + compile, + run, + extra_make_args=[f"CYCLES={cycles}"], + cmd_prefix=cmd_prefix, + ) def bench( self, @@ -586,34 +602,42 @@ def bench( config_logger(self.verbose) if components is False: - t = self._bench + t = self.__bench else: - t = self._bench_components + t = self.__bench_components output = False - # NOTE: We haven't yet decided how to output both opt/no-opt benchmark results if self.opt.lower() == "all": - if self.compile: - t.compile(False, extra_make_args=[f"CYCLES={cycles}"]) - if self.run: - self._run_bench(t, False, run_as_root, exec_wrapper) - if self.compile: - t.compile(True, extra_make_args=[f"CYCLES={cycles}"]) - if self.run: - resultss = self._run_bench(t, True, run_as_root, exec_wrapper) + # NOTE: We haven't yet decided how to output both opt/no-opt benchmark results + self._bench( + t, + False, + self.compile, + self.run, + cycles, + run_as_root, + exec_wrapper, + mac_taskpolicy, + ) + resultss = self._bench( + t, + True, + self.compile, + self.run, + cycles, + run_as_root, + exec_wrapper, + mac_taskpolicy, + ) else: - if self.compile: - t.compile( - True if self.opt.lower() == "opt" else False, - extra_make_args=[f"CYCLES={cycles}"], - ) - if self.run: - resultss = self._run_bench( - t, - True if self.opt.lower() == "opt" else False, - run_as_root, - exec_wrapper, - ) + resultss = self._bench( + t, + self.compile, + self.run, + True if self.opt.lower() == "opt" else False, + run_as_root, + exec_wrapper, + ) if resultss is None: exit(0) @@ -654,33 +678,26 @@ def all(self, func: bool, kat: bool, nistkat: bool, acvp: bool): def all(opt: bool): code = 0 - if self.compile: - compiles = [ - *([self._func.compile] if func else []), - *([self._nistkat.compile] if nistkat else []), - *([self._kat.compile] if kat else []), - *([self._acvp.compile] if acvp else []), - ] + fs = [ + *([self._func] if func else []), + *([self._nistkat] if nistkat else []), + *([self._kat] if kat else []), + *([self._acvp] if acvp else []), + ] - for f in compiles: + if self.compile: + for f in fs: try: - f(opt) + f(opt, True, False) except SystemExit as e: code = code or e sys.stdout.flush() if self.run: - runs = [ - *([self._run_func] if func else []), - *([self._run_nistkat] if nistkat else []), - *([self._run_kat] if kat else []), - *([self._run_acvp] if acvp else []), - ] - - for f in runs: + for f in fs: try: - code = code or int(f(opt)) + code = code or int(f(opt, False, True)) except SystemExit as e: code = code or e From c909fed458f723f6d6df4943e31f1b808b383ba8 Mon Sep 17 00:00:00 2001 From: "Thing-han, Lim" <15379156+potsrevennil@users.noreply.github.com> Date: Fri, 15 Nov 2024 15:10:23 +0800 Subject: [PATCH 4/4] Add comments for Test_Implementations methods Signed-off-by: Thing-han, Lim <15379156+potsrevennil@users.noreply.github.com> --- scripts/lib/mlkem_test.py | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/scripts/lib/mlkem_test.py b/scripts/lib/mlkem_test.py index fcafc0c63..487a12eac 100644 --- a/scripts/lib/mlkem_test.py +++ b/scripts/lib/mlkem_test.py @@ -194,6 +194,9 @@ def compile( extra_make_envs={}, extra_make_args=[], ): + """ + opt: determine to compile/run the opt or non-opt implementation + """ self.ts["opt" if opt else "no_opt"].compile_schemes( extra_make_envs, extra_make_args, @@ -208,6 +211,15 @@ def run_scheme( prefix: [str] = [], extra_args: [str] = [], ) -> TypedDict: + """ + opt: determine to run the opt or non-opt implementation + actual_proc: a function for processing the actual test output into string + expect_proc: a function for comparing the actual_proc output with some desried behaviour which depends on SCHEME + prefix: cmd prefix for running the testing binaries + extra_args: extra arguments for running the testing binaries + + output: {opt/no_opt: {scheme: result}} + """ k = "opt" if opt else "no_opt" results = {} @@ -226,6 +238,15 @@ def run_schemes( cmd_prefix: [str] = [], extra_args: [str] = [], ) -> TypedDict: + """ + opt: determine to run the opt or non-opt implementation + actual_proc: a function for processing the actual test output into string + expect_proc: a function for comparing the actual_proc output with some desried behaviour which depends on SCHEME + cmd_prefix: cmd prefix for running the testing binaries + extra_args: extra arguments for running the testing binaries + + output: {opt/no_opt: {scheme: result}} + """ results = {} k = "opt" if opt else "no_opt" @@ -272,6 +293,17 @@ def test( cmd_prefix: [str] = [], extra_args: [str] = [], ) -> TypedDict: + """ + opt: determine to compile/run the opt or non-opt implementation + compile: compile the test binary if true + run: run the binaries and check if output is as expected if true + actual_proc: a function for processing the actual test output into string + expect_proc: a function for comparing the actual_proc output with some desried behaviour which depends on SCHEME + cmd_prefix: cmd prefix for running the testing binaries + extra_args: extra arguments for running the testing binaries + + output: {opt/no_opt: {scheme: result}} + """ if compile: self.compile(opt, extra_make_envs, extra_make_args) if run: