From 2d0d157fffe94b03261c7b9d2aad9fc5a0cf78e7 Mon Sep 17 00:00:00 2001 From: Stephane Odul Date: Fri, 2 Feb 2024 23:33:09 -0800 Subject: [PATCH] Add more typing to the codebase and minor fixes. - Improve maintainability of the codebase while leaving the business logic the same. - Remove more code specific to python 2.7. - Use sets and tuples instead of lists when they are more efficient. - Remove redundant `str()` calls. - Move default_args instantiation to runtime instead of import time. --- green/cmdline.py | 8 +- green/command.py | 41 ++- green/config.py | 273 ++++++++++---------- green/djangorunner.py | 39 +-- green/examples.py | 19 +- green/junit.py | 95 ++++--- green/process.py | 12 +- green/result.py | 107 ++++---- green/suite.py | 3 +- green/test/test_config.py | 506 ++++++++++++++++++++++---------------- green/test/test_junit.py | 6 +- green/test/test_result.py | 8 +- green/test/test_runner.py | 6 +- green/test/test_suite.py | 17 +- 14 files changed, 654 insertions(+), 486 deletions(-) diff --git a/green/cmdline.py b/green/cmdline.py index fd10a99..a6c00dd 100644 --- a/green/cmdline.py +++ b/green/cmdline.py @@ -1,3 +1,5 @@ +"""The green command line entry point.""" + from __future__ import annotations @@ -53,7 +55,8 @@ def _main(argv: Sequence[str] | None, testing: bool) -> int: # Add debug logging for stuff that happened before this point here if config.files_loaded: - debug("Loaded config file(s): {}".format(", ".join(config.files_loaded))) + loaded_files = ", ".join(str(path) for path in config.files_loaded) + debug(f"Loaded config file(s): {loaded_files}") # Discover/Load the test suite if testing: @@ -81,7 +84,7 @@ def _main(argv: Sequence[str] | None, testing: bool) -> int: return int(not result.wasSuccessful()) -def main(argv: Sequence[str] | None = None, testing: bool = False): +def main(argv: Sequence[str] | None = None, testing: bool = False) -> int: # create the temp dir only once (i.e., not while in the recursed call) if not os.environ.get("TMPDIR"): # pragma: nocover try: @@ -97,6 +100,7 @@ def main(argv: Sequence[str] | None = None, testing: bool = False): if os_error.errno == 39: # "Directory not empty" when trying to delete the temp dir can just be a warning print(f"warning: {os_error.strerror}") + return 0 else: raise os_error else: diff --git a/green/command.py b/green/command.py index 1116a1a..f510a1a 100644 --- a/green/command.py +++ b/green/command.py @@ -1,29 +1,43 @@ +"""Registers the green command with setuptools.""" + from __future__ import annotations +import functools import sys +from typing import TYPE_CHECKING from setuptools import Command from green.config import parseArguments from green.cmdline import main +if TYPE_CHECKING: + from argparse import Action + -def get_user_options(): +def get_user_options() -> list[tuple[str, str | None, str | None]]: # When running "python setup.py --help-commands", setup.py will call this # function -- but green isn't actually being called. if "--help-commands" in sys.argv: return [] - r = parseArguments() - options = [] + args = parseArguments() + options: list[tuple[str, str | None, str | None]] = [] - for action in r.store_opt.actions: - names = [str(name.lstrip("-")) for name in action.option_strings] + action: Action + for action in args.store_opt.actions: + names = [name.lstrip("-") for name in action.option_strings] + short_name: str | None if len(names) == 1: - names.insert(0, None) + full_name = names[0] + short_name = None + else: + # TODO: We might want to pick the longer of the two for full_name. + full_name = names[1] + short_name = names[0] if not action.const: - names[1] += "=" - options.append((names[1], names[0], action.help)) + full_name += "=" + options.append((full_name, short_name, action.help)) return options @@ -31,16 +45,19 @@ def get_user_options(): class green(Command): command_name = "green" description = "Run unit tests using green" - user_options = get_user_options() - def initialize_options(self): + @functools.cached_property + def user_options(self) -> list[tuple[str, str | None, str | None]]: + return get_user_options() + + def initialize_options(self) -> None: for name, _, _ in self.user_options: setattr(self, name.replace("-", "_").rstrip("="), None) - def finalize_options(self): + def finalize_options(self) -> None: pass - def run(self): + def run(self) -> None: self.ensure_finalized() if self.distribution.install_requires: diff --git a/green/config.py b/green/config.py index 00b2079..cc3af0b 100644 --- a/green/config.py +++ b/green/config.py @@ -1,68 +1,79 @@ +""" +Handle the command line options and config file parsing. +""" + # We have to use this entire file before we can turn coverage on, so we exclude # it from coverage. We still have tests, though! -from __future__ import annotations +from __future__ import annotations # pragma: no cover import argparse # pragma: no cover import configparser # pragma: no cover -from typing import Sequence - -import coverage # pragma: no cover - -coverage_version = f"Coverage {coverage.__version__}" # pragma: no cover - import copy # pragma: no cover +import functools # pragma: no cover import logging # pragma: no cover +import multiprocessing # pragma: no cover import os # pragma: no cover +import pathlib # pragma: no cover import sys # pragma: no cover import tempfile # pragma: no cover from textwrap import dedent # pragma: no cover -import multiprocessing # pragma: no cover +from typing import Callable, Sequence # pragma: no cover + +import coverage # pragma: no cover + +coverage_version = f"Coverage {coverage.__version__}" # pragma: no cover # Used for debugging output in cmdline, since we can't do debug output here. -files_loaded = [] # pragma: no cover - -# Set the defaults in a re-usable way -default_args = argparse.Namespace( # pragma: no cover - targets=["."], # Not in configs - processes=multiprocessing.cpu_count(), - initializer="", - finalizer="", - maxtasksperchild=None, - termcolor=None, - notermcolor=None, - disable_windows=False, - allow_stdout=False, - quiet_stdout=False, - no_skip_report=False, - no_tracebacks=False, - help=False, # Not in configs - version=False, - logging=False, - debug=0, - verbose=1, - disable_unidecode=False, - failfast=False, - config=None, # Not in configs - file_pattern="test*.py", - test_pattern="*", - junit_report="", - run_coverage=False, - cov_config_file=True, # A string with a special boolean default - quiet_coverage=False, - clear_omit=False, - omit_patterns=None, - include_patterns=None, - minimum_coverage=None, - completion_file=False, - completions=False, - options=False, - # These are not really options, they are added later for convenience - parser=None, - store_opt=None, - # not implemented, but unittest stub in place - warnings="", -) +files_loaded: list[pathlib.Path] = [] # pragma: no cover + + +# TODO: switch to functools.cache after 3.9+ is the minimum supported version. +@functools.lru_cache # pragma: no cover +def get_default_args() -> argparse.Namespace: + """ + Set the defaults in a re-usable way. + """ + return argparse.Namespace( # pragma: no cover + targets=["."], # Not in configs + processes=multiprocessing.cpu_count(), + initializer="", + finalizer="", + maxtasksperchild=None, + termcolor=None, + notermcolor=None, + disable_windows=False, + allow_stdout=False, + quiet_stdout=False, + no_skip_report=False, + no_tracebacks=False, + help=False, # Not in configs + version=False, + logging=False, + debug=0, + verbose=1, + disable_unidecode=False, + failfast=False, + config=None, # Not in configs + file_pattern="test*.py", + test_pattern="*", + junit_report="", + run_coverage=False, + cov_config_file=True, # A string with a special boolean default + quiet_coverage=False, + clear_omit=False, + omit_patterns=None, + include_patterns=None, + minimum_coverage=None, + completion_file=False, + completions=False, + options=False, + # These are not really options, they are added later for convenience + parser=None, + store_opt=None, + # not implemented, but unittest stub in place + warnings="", + ) class StoreOpt: # pragma: no cover @@ -71,11 +82,11 @@ class StoreOpt: # pragma: no cover shell completion scripts. """ - def __init__(self): - self.options = [] - self.actions = [] + def __init__(self) -> None: + self.options: list[str] = [] + self.actions: list[argparse.Action] = [] - def __call__(self, action): + def __call__(self, action: argparse.Action) -> None: self.actions.append(action) self.options.extend(action.option_strings[0:2]) @@ -566,42 +577,29 @@ class ConfigFile: # pragma: no cover """ Filehandle wrapper that adds a "[green]" section to the start of a config file so that users don't actually have to manually add a [green] section. - - Works with configparser versions from both Python 2 and 3 """ - def __init__(self, filepath): - self.first = True - with open(filepath) as f: - self.lines = f.readlines() - - # Python 2.7 (Older dot versions) - def readline(self): - try: - return self.__next__() - except StopIteration: - return "" - - # Python 2.7 (Newer dot versions) - def next(self): - return self.__next__() + def __init__(self, filepath: pathlib.Path) -> None: + self._first = True + self._lines = filepath.read_text().splitlines(keepends=True) - # Python 3 def __iter__(self): return self def __next__(self): - if self.first: - self.first = False + if self._first: + self._first = False return "[green]\n" - if self.lines: - return self.lines.pop(0) + if self._lines: + return self._lines.pop(0) raise StopIteration # Since this must be imported before coverage is started, we get erroneous # reports of not covering this function during our internal coverage tests. -def getConfig(filepath=None): # pragma: no cover +def getConfig( # pragma: no cover + filepath: str | pathlib.Path | None = None, +) -> configparser.ConfigParser: """ Get the Green config file settings. @@ -613,47 +611,54 @@ def getConfig(filepath=None): # pragma: no cover """ parser = configparser.ConfigParser() - filepaths = [] + filepaths: list[pathlib.Path] = [] # Lowest priority goes first in the list - home = os.getenv("HOME") - if home: - default_filepath = os.path.join(home, ".green") - if os.path.isfile(default_filepath): + try: + default_filepath = pathlib.Path.home() / ".green" + if default_filepath.is_file(): filepaths.append(default_filepath) + except RuntimeError: + pass # Low priority - env_filepath = os.getenv("GREEN_CONFIG") - if env_filepath and os.path.isfile(env_filepath): - filepaths.append(env_filepath) + green_config_env = os.getenv("GREEN_CONFIG") + if green_config_env: + config_path = pathlib.Path(green_config_env) + if config_path.is_file(): + filepaths.append(config_path) + cwd = pathlib.Path.cwd() # Medium priority for cfg_file in ("setup.cfg", ".green"): - cwd_filepath = os.path.join(os.getcwd(), cfg_file) - if os.path.isfile(cwd_filepath): - filepaths.append(cwd_filepath) + config_path = cwd / cfg_file + if config_path.is_file(): + filepaths.append(config_path) # High priority - if filepath and os.path.isfile(filepath): - filepaths.append(filepath) + if filepath: + config_path = pathlib.Path(filepath) + if config_path.is_file(): + filepaths.append(config_path) if filepaths: global files_loaded files_loaded = filepaths - for filepath in filepaths: + for config_path in filepaths: # Users are expected to put a [green] section # only if they use setup.cfg - if filepath.endswith("setup.cfg"): - with open(filepath) as f: - parser.read_file(f) + if config_path.name == "setup.cfg": + parser.read(config_path) else: - parser.read_file(ConfigFile(filepath)) + parser.read_file(ConfigFile(config_path)) return parser # Since this must be imported before coverage is started, we get erroneous # reports of not covering this function during our internal coverage tests. -def mergeConfig(args, testing=False): # pragma: no cover +def mergeConfig( # pragma: no cover + args: argparse.Namespace, testing: bool = False +) -> argparse.Namespace: """ I take in a namespace created by the ArgumentParser in cmdline.main() and merge in options from configuration files. The config items only replace @@ -667,13 +672,14 @@ def mergeConfig(args, testing=False): # pragma: no cover extended, taking clear-omit into account. cov = coverage object default None """ + default_args = get_default_args() config = getConfig(getattr(args, "config", default_args.config)) new_args = copy.deepcopy(default_args) # Default by default! - for name, default_value in dict(default_args._get_kwargs()).items(): + for name, _ in default_args._get_kwargs(): # Config options overwrite default options - config_getter = None - if name in [ + config_getter: Callable | None = None + if name in { "termcolor", "notermcolor", "allow_stdout", @@ -692,17 +698,17 @@ def mergeConfig(args, testing=False): # pragma: no cover "no_tracebacks", "disable_windows", "quiet_coverage", - ]: + }: config_getter = config.getboolean - elif name in [ + elif name in { "processes", "debug", "verbose", "minimum_coverage", "maxtasksperchild", - ]: + }: config_getter = config.getint - elif name in [ + elif name in { "file_pattern", "finalizer", "initializer", @@ -712,11 +718,11 @@ def mergeConfig(args, testing=False): # pragma: no cover "warnings", "test_pattern", "junit_report", - ]: + }: config_getter = config.get - elif name in ["targets", "help", "config"]: + elif name in {"targets", "help", "config"}: pass # Some options only make sense coming on the command-line. - elif name in ["store_opt", "parser"]: + elif name in {"store_opt", "parser"}: pass # These are convenience objects, not actual settings else: raise NotImplementedError(name) @@ -769,32 +775,33 @@ def mergeConfig(args, testing=False): # pragma: no cover # Coverage. We must enable it here because we cannot cover module-level # code after it is imported, and this is the earliest place we can turn on # coverage. - omit_patterns = [ - "*/argparse*", - "*/colorama*", - "*/django/*", - "*/distutils*", # Gets pulled in on Travis-CI CPython - "*/extras*", # pulled in by testtools - "*/linecache2*", # pulled in by testtools - "*/mimeparse*", # pulled in by testtools - "*/mock*", - "*/pbr*", # pulled in by testtools - "*/pkg_resources*", # pulled in by django - "*/pypy*", - "*/pytz*", # pulled in by django - "*/six*", # pulled in by testtools - "*/termstyle*", - "*/test*", - "*/traceback2*", # pulled in by testtools - "*/unittest2*", # pulled in by testtools - "*Python.framework*", # OS X system python - "*site-packages*", # System python for other OS's - "*/dist-packages*", # Resolves issue #259 - tempfile.gettempdir() + "*", - ] + omit_patterns: list[str] if new_args.clear_omit: omit_patterns = [] - + else: + omit_patterns = [ + "*/argparse*", + "*/colorama*", + "*/django/*", + "*/distutils*", # Gets pulled in on Travis-CI CPython + "*/extras*", # pulled in by testtools + "*/linecache2*", # pulled in by testtools + "*/mimeparse*", # pulled in by testtools + "*/mock*", + "*/pbr*", # pulled in by testtools + "*/pkg_resources*", # pulled in by django + "*/pypy*", + "*/pytz*", # pulled in by django + "*/six*", # pulled in by testtools + "*/termstyle*", + "*/test*", + "*/traceback2*", # pulled in by testtools + "*/unittest2*", # pulled in by testtools + "*Python.framework*", # OS X system python + "*site-packages*", # System python for other OS's + "*/dist-packages*", # Resolves issue #259 + tempfile.gettempdir() + "*", + ] if new_args.omit_patterns: omit_patterns.extend(new_args.omit_patterns.split(",")) new_args.omit_patterns = omit_patterns @@ -804,10 +811,10 @@ def mergeConfig(args, testing=False): # pragma: no cover else: new_args.include_patterns = [] - if new_args.quiet_coverage or (type(new_args.cov_config_file) == str): + if new_args.quiet_coverage or isinstance(new_args.cov_config_file, str): new_args.run_coverage = True - if new_args.minimum_coverage != None: + if new_args.minimum_coverage is not None: new_args.run_coverage = True if new_args.run_coverage: diff --git a/green/djangorunner.py b/green/djangorunner.py index 437a7f2..c7b5f74 100644 --- a/green/djangorunner.py +++ b/green/djangorunner.py @@ -10,10 +10,10 @@ from __future__ import annotations -from argparse import Namespace +from argparse import ArgumentParser, Namespace import os import sys -from typing import Sequence +from typing import Any, Final, Sequence from green.config import mergeConfig from green.loader import GreenTestLoader @@ -24,11 +24,11 @@ # If we're not being run from an actual django project, set up django config os.environ.setdefault("DJANGO_SETTINGS_MODULE", "green.djangorunner") BASE_DIR = os.path.dirname(os.path.dirname(__file__)) -SECRET_KEY = ")9^_e(=cisybdt4m4+fs+_wb%d$!9mpcoy0um^alvx%gexj#jv" +SECRET_KEY: Final[str] = ")9^_e(=cisybdt4m4+fs+_wb%d$!9mpcoy0um^alvx%gexj#jv" DEBUG = True TEMPLATE_DEBUG = True ALLOWED_HOSTS: Sequence[str] = [] -INSTALLED_APPS = ( +INSTALLED_APPS: Final[Sequence[str]] = ( "django.contrib.admin", "django.contrib.auth", "django.contrib.contenttypes", @@ -37,7 +37,7 @@ "django.contrib.staticfiles", "myapp", ) -MIDDLEWARE_CLASSES = ( +MIDDLEWARE_CLASSES: Final[Sequence[str]] = ( "django.contrib.sessions.middleware.SessionMiddleware", "django.middleware.common.CommonMiddleware", "django.middleware.csrf.CsrfViewMiddleware", @@ -46,24 +46,24 @@ "django.contrib.messages.middleware.MessageMiddleware", "django.middleware.clickjacking.XFrameOptionsMiddleware", ) -ROOT_URLCONF = "myproj.urls" -WSGI_APPLICATION = "myproj.wsgi.application" -DATABASES = { +ROOT_URLCONF: Final[str] = "myproj.urls" +WSGI_APPLICATION: Final[str] = "myproj.wsgi.application" +DATABASES: Final[dict[str, dict[str, str]]] = { "default": { "ENGINE": "django.db.backends.sqlite3", "NAME": os.path.join(BASE_DIR, "db.sqlite3"), } } -LANGUAGE_CODE = "en-us" -TIME_ZONE = "UTC" +LANGUAGE_CODE: Final[str] = "en-us" +TIME_ZONE: Final[str] = "UTC" USE_I18N = True USE_L10N = True USE_TZ = True -STATIC_URL = "/static/" +STATIC_URL: Final[str] = "/static/" # End of django fake config stuff -def django_missing(): +def django_missing() -> None: raise ImportError("No django module installed") @@ -75,13 +75,13 @@ def django_missing(): from django.test.runner import DiscoverRunner class DjangoRunner(DiscoverRunner): - def __init__(self, verbose=-1, **kwargs): + def __init__(self, verbose: int = -1, **kwargs): super().__init__(**kwargs) self.verbose = verbose self.loader = GreenTestLoader() @classmethod - def add_arguments(cls, parser): + def add_arguments(cls, parser: ArgumentParser) -> None: parser.add_argument( "--green-verbosity", action="store", @@ -94,7 +94,14 @@ def add_arguments(cls, parser): ) super().add_arguments(parser) - def run_tests(self, test_labels, extra_tests=None, **kwargs): + # FIXME: extra_tests is not used, we should either use it or update the + # documentation accordingly. + def run_tests( + self, + test_labels: list[str] | tuple[str, ...], + extra_tests: Any = None, + **kwargs: Any, + ): """ Run the unit tests for all the test labels in the provided list. @@ -111,7 +118,7 @@ def run_tests(self, test_labels, extra_tests=None, **kwargs): django_db = self.setup_databases() # Green - if type(test_labels) == tuple: + if isinstance(test_labels, tuple): test_labels = list(test_labels) else: raise ValueError("test_labels should be a tuple of strings") diff --git a/green/examples.py b/green/examples.py index 03b790e..8bcbe55 100644 --- a/green/examples.py +++ b/green/examples.py @@ -2,53 +2,54 @@ import sys import unittest +from typing import Final -doctest_modules = ["green.examples"] +doctest_modules: Final[list[str]] = ["green.examples"] class TestStates(unittest.TestCase): - def test0Pass(self): + def test0Pass(self) -> None: """ This test will print output to stdout, and then pass. """ print("Sunshine and daisies") - def test1Fail(self): + def test1Fail(self) -> None: """ This test will print output to stderr, and then fail an assertion. """ sys.stderr.write("Doom and gloom.\n") self.assertTrue(False) - def test2Error(self): + def test2Error(self) -> None: """ An Exception will be raised (and not caught) while running this test. """ raise Exception @unittest.skip("This is the 'reason' portion of the skipped test.") - def test3Skip(self): + def test3Skip(self) -> None: """ This test will be skipped. """ pass @unittest.expectedFailure - def test4ExpectedFailure(self): + def test4ExpectedFailure(self) -> None: """ This test will fail, but we expect it to. """ self.assertEqual(True, False) @unittest.expectedFailure - def test5UnexpectedPass(self): + def test5UnexpectedPass(self) -> None: """ This test will pass, but we expected it to fail! """ pass -def some_function(): +def some_function() -> int: """ This will fail because some_function() does not, in fact, return 100. >>> some_function() @@ -58,7 +59,7 @@ def some_function(): class MyClass: - def my_method(self): + def my_method(self) -> str: """ This will pass. >>> s = MyClass() diff --git a/green/junit.py b/green/junit.py index b705c86..8b735c2 100644 --- a/green/junit.py +++ b/green/junit.py @@ -1,28 +1,47 @@ +"""Classes and methods to generate JUnit XML reports.""" + from __future__ import annotations +from typing import Dict, List, Final, TextIO, Tuple, TYPE_CHECKING, Union + from lxml.etree import Element, tostring as to_xml +if TYPE_CHECKING: + # TypeAlias moved to the typing module after py3.9. + from typing_extensions import TypeAlias + + from green.result import GreenTestResult, ProtoTest, ProtoError + from lxml.etree import _Element + + TestVerdict: TypeAlias = Union[ + Tuple[int, ProtoTest], Tuple[int, ProtoTest, Union[str, ProtoError]] + ] + TestsCollection: TypeAlias = Dict[str, List[TestVerdict]] + + +# TODO: consider using enum.Enum (new in py3.4) for the JUnitDialect and Verdict classes. + class JUnitDialect: """ Hold the name of the elements defined in the JUnit XML schema (for JUnit 4). """ - CLASS_NAME = "classname" - ERROR = "error" - ERROR_COUNT = "errors" - FAILURE = "failure" - FAILURE_COUNT = "failures" - NAME = "name" - SKIPPED = "skipped" - SKIPPED_COUNT = "skipped" - SYSTEM_ERR = "system-err" - SYSTEM_OUT = "system-out" - TEST_CASE = "testcase" - TEST_COUNT = "tests" - TEST_SUITE = "testsuite" - TEST_SUITES = "testsuites" - TEST_TIME = "time" + CLASS_NAME: Final[str] = "classname" + ERROR: Final[str] = "error" + ERROR_COUNT: Final[str] = "errors" + FAILURE: Final[str] = "failure" + FAILURE_COUNT: Final[str] = "failures" + NAME: Final[str] = "name" + SKIPPED: Final[str] = "skipped" + SKIPPED_COUNT: Final[str] = "skipped" + SYSTEM_ERR: Final[str] = "system-err" + SYSTEM_OUT: Final[str] = "system-out" + TEST_CASE: Final[str] = "testcase" + TEST_COUNT: Final[str] = "tests" + TEST_SUITE: Final[str] = "testsuite" + TEST_SUITES: Final[str] = "testsuites" + TEST_TIME: Final[str] = "time" class Verdict: @@ -30,10 +49,10 @@ class Verdict: Enumeration of possible test verdicts """ - PASSED = 0 - FAILED = 1 - ERROR = 2 - SKIPPED = 3 + PASSED: Final[int] = 0 + FAILED: Final[int] = 1 + ERROR: Final[int] = 2 + SKIPPED: Final[int] = 3 class JUnitXML: @@ -45,9 +64,13 @@ class JUnitXML: See Option '-j' / '--junit-report' """ - def save_as(self, test_results, destination): + def save_as(self, test_results: GreenTestResult, destination: TextIO) -> None: + """ + Write the JUnit XML report to the given file-like object. + """ xml_root = Element(JUnitDialect.TEST_SUITES) tests_by_class = self._group_tests_by_class(test_results) + suite: list[TestVerdict] for name, suite in tests_by_class.items(): xml_suite = self._convert_suite(test_results, name, suite) xml_root.append(xml_suite) @@ -63,8 +86,10 @@ def save_as(self, test_results, destination): ) destination.write(xml.decode()) - def _group_tests_by_class(self, test_results): - result = {} + def _group_tests_by_class( + self, test_results: GreenTestResult + ) -> dict[str, list[TestVerdict]]: + result: TestsCollection = {} self._add_passing_tests(result, test_results) self._add_failures(result, test_results) self._add_errors(result, test_results) @@ -72,7 +97,9 @@ def _group_tests_by_class(self, test_results): return result @staticmethod - def _add_passing_tests(collection, test_results): + def _add_passing_tests( + collection: TestsCollection, test_results: GreenTestResult + ) -> None: for each_test in test_results.passing: key = JUnitXML._suite_name(each_test) if key not in collection: @@ -80,11 +107,11 @@ def _add_passing_tests(collection, test_results): collection[key].append((Verdict.PASSED, each_test)) @staticmethod - def _suite_name(test): + def _suite_name(test) -> str: return f"{test.module}.{test.class_name}" @staticmethod - def _add_failures(collection, test_results): + def _add_failures(collection: TestsCollection, test_results: GreenTestResult): for each_test, failure in test_results.failures: key = JUnitXML._suite_name(each_test) if key not in collection: @@ -92,7 +119,7 @@ def _add_failures(collection, test_results): collection[key].append((Verdict.FAILED, each_test, failure)) @staticmethod - def _add_errors(collection, test_results): + def _add_errors(collection: TestsCollection, test_results: GreenTestResult): for each_test, error in test_results.errors: key = JUnitXML._suite_name(each_test) if key not in collection: @@ -100,14 +127,16 @@ def _add_errors(collection, test_results): collection[key].append((Verdict.ERROR, each_test, error)) @staticmethod - def _add_skipped_tests(collection, test_results): + def _add_skipped_tests(collection: TestsCollection, test_results: GreenTestResult): for each_test, reason in test_results.skipped: key = JUnitXML._suite_name(each_test) if key not in collection: collection[key] = [] collection[key].append((Verdict.SKIPPED, each_test, reason)) - def _convert_suite(self, results, name, suite): + def _convert_suite( + self, results: GreenTestResult, name: str, suite: list[TestVerdict] + ) -> _Element: xml_suite = Element(JUnitDialect.TEST_SUITE) xml_suite.set(JUnitDialect.NAME, name) xml_suite.set(JUnitDialect.TEST_COUNT, str(len(suite))) @@ -131,17 +160,17 @@ def _convert_suite(self, results, name, suite): return xml_suite @staticmethod - def _count_test_with_verdict(verdict, suite): + def _count_test_with_verdict(verdict: int, suite): return sum(1 for entry in suite if entry[0] == verdict) - def _convert_test(self, results, verdict, test, *details): + def _convert_test(self, results, verdict, test, *details) -> _Element: xml_test = Element(JUnitDialect.TEST_CASE) xml_test.set(JUnitDialect.NAME, test.method_name) xml_test.set(JUnitDialect.CLASS_NAME, test.class_name) xml_test.set(JUnitDialect.TEST_TIME, test.test_time) xml_verdict = self._convert_verdict(verdict, test, details) - if verdict: + if xml_verdict is not None: xml_test.append(xml_verdict) if test in results.stdout_output: @@ -156,7 +185,7 @@ def _convert_test(self, results, verdict, test, *details): return xml_test - def _convert_verdict(self, verdict, test, details): + def _convert_verdict(self, verdict: int, test, details) -> _Element | None: if verdict == Verdict.FAILED: failure = Element(JUnitDialect.FAILURE) failure.text = str(details[0]) @@ -172,5 +201,5 @@ def _convert_verdict(self, verdict, test, details): return None @staticmethod - def _suite_time(suite): + def _suite_time(suite) -> float: return sum(float(each_test.test_time) for verdict, each_test, *details in suite) diff --git a/green/process.py b/green/process.py index 1538400..b5f31eb 100644 --- a/green/process.py +++ b/green/process.py @@ -29,7 +29,7 @@ # Super-useful debug function for finding problems in the subprocesses, and it # even works on windows -def ddebug(msg: str, err: ExcInfoType | None = None): # pragma: no cover +def ddebug(msg: str, err: ExcInfoType | None = None) -> None: # pragma: no cover """ err can be an instance of sys.exc_info() -- which is the latest traceback info @@ -48,12 +48,12 @@ class ProcessLogger: instead of having process crashes be silent. """ - def __init__(self, callable: Callable): + def __init__(self, callable: Callable) -> None: self.__callable = callable def __call__(self, *args, **kwargs): try: - result = self.__callable(*args, **kwargs) + return self.__callable(*args, **kwargs) except Exception: # Here we add some debugging help. If multiprocessing's # debugging is on, it will arrange to log the traceback @@ -66,9 +66,6 @@ def __call__(self, *args, **kwargs): # clean up raise - # It was fine, give a normal answer - return result - class LoggingDaemonlessPool(Pool): """ @@ -82,9 +79,6 @@ class LoggingDaemonlessPool(Pool): def Process(ctx, *args, **kwargs): return ctx.Process(daemon=False, *args, **kwargs) - # FIXME: `kwargs={}` is dangerous as the empty dict is declared at import time - # and becomes a shared object between all instances of LoggingDaemonlessPool. - # In short, it is a global variable that is mutable. def apply_async( self, func: Callable, diff --git a/green/result.py b/green/result.py index 2055325..d86f5e6 100644 --- a/green/result.py +++ b/green/result.py @@ -1,3 +1,5 @@ +"""Classes and methods to handle test results.""" + from __future__ import annotations import argparse @@ -6,7 +8,7 @@ from shutil import get_terminal_size import time import traceback -from typing import Any, Union, TYPE_CHECKING, Type, Iterable +from typing import Any, Callable, Sequence, TYPE_CHECKING, Union from unittest.result import failfast from unittest import TestCase, TestSuite @@ -244,35 +246,40 @@ class ProtoTestResult(BaseTestResult): I'm the TestResult object for a single unit test run in a process. """ - start_time: float = 0.0 + failfast: bool = False # Because unittest inspects the attribute + finalize_callback_called: bool = False shouldStop: bool = False + start_time: float = 0.0 + test_time: str = "" - def __init__(self, start_callback=None, finalize_callback=None): + def __init__( + self, + start_callback: Callable[[RunnableTestT], None] | None = None, + finalize_callback: Callable[[RunnableTestT], None] | None = None, + ) -> None: super().__init__(None, colors=None) self.start_callback = start_callback self.finalize_callback = finalize_callback - self.finalize_callback_called = False - self.pickle_attrs = [ + self.collectedDurations: list[tuple[str, float]] = [] + self.errors: list[tuple[ProtoTest, ProtoError]] = [] + self.expectedFailures: list[tuple[ProtoTest, ProtoError]] = [] + self.failures: list[tuple[ProtoTest, ProtoError]] = [] + self.passing: list[ProtoTest] = [] + self.skipped: list[tuple[ProtoTest, str]] = [] + self.unexpectedSuccesses: list[ProtoTest] = [] + self.pickle_attrs: Sequence[str] = ( "errors", "expectedFailures", "failures", "passing", - "pickle_attrs", + "pickle_attrs", # TODO: check if pickle_attrs should be pickled. "shouldStop", "skipped", "stderr_errput", "stdout_output", - "unexpectedSuccesses", "test_time", - ] - self.failfast = False # Because unittest inspects the attribute - self.collectedDurations = [] - self.errors = [] - self.expectedFailures = [] - self.failures = [] - self.passing = [] - self.skipped = [] - self.unexpectedSuccesses = [] + "unexpectedSuccesses", + ) self.reinitialize() def reinitialize(self): @@ -300,7 +307,7 @@ def __repr__(self) -> str: # pragma: no cover def __getstate__(self) -> dict[str, Any]: """ - Prevent the callback functions from getting pickled + Prevent the callback functions from getting pickled. """ result_dict = {} for pickle_attr in self.pickle_attrs: @@ -401,8 +408,8 @@ class GreenTestResult(BaseTestResult): Aggregates test results and outputs them to a stream. """ - last_class = "" - last_module = "" + last_class: str = "" + last_module: str = "" first_text_output: str = "" shouldStop: bool = False @@ -415,18 +422,19 @@ def __init__(self, args: argparse.Namespace, stream) -> None: self.failfast = args.failfast self.testsRun: int = 0 # Individual lists - # TODO: add actual types to the lists. - self.collectedDurations: list = [] - self.errors: list = [] - self.expectedFailures: list = [] - self.failures: list = [] - self.passing: list = [] - self.skipped: list = [] - self.unexpectedSuccesses: list = [] + self.collectedDurations: list[tuple[str, float]] = [] + self.errors: list[tuple[ProtoTest, ProtoError]] = [] + self.expectedFailures: list[tuple[ProtoTest, ProtoError]] = [] + self.failures: list[tuple[ProtoTest, ProtoError]] = [] + self.passing: list[ProtoTest] = [] + self.skipped: list[tuple[ProtoTest, str]] = [] + self.unexpectedSuccesses: list[ProtoTest] = [] # Combination of all errors and failures - self.all_errors: list = [] + self.all_errors: list[ + tuple[ProtoTest, Callable[[str], str], str, ProtoError] + ] = [] # For exiting non-zero if we don't reach a certain level of coverage - self.coverage_percent = None + self.coverage_percent: int | None = None def __str__(self) -> str: # pragma: no cover return ( @@ -443,8 +451,11 @@ def stop(self) -> None: self.shouldStop = True def tryRecordingStdoutStderr( - self, test: ProtoTest, proto_test_result: ProtoTestResult, err=None - ): + self, + test: ProtoTest, + proto_test_result: ProtoTestResult, + err: ProtoError | None = None, + ) -> None: if proto_test_result.stdout_output.get(test, False): self.recordStdout(test, proto_test_result.stdout_output[test]) if proto_test_result.stderr_errput.get(test, False): @@ -495,6 +506,7 @@ def stopTestRun(self) -> None: """ Called once after all tests have run. """ + # FIXME: stopTime and timeTaken are defined outside __init__. self.stopTime = time.time() self.timeTaken = self.stopTime - self.startTime self.printErrors() @@ -540,7 +552,7 @@ def stopTestRun(self) -> None: ) ) self.stream.writeln() - results = [ + results: tuple[tuple[list, str, Callable[[str], str]], ...] = ( (self.errors, "errors", self.colors.error), (self.expectedFailures, "expected_failures", self.colors.expectedFailure), (self.failures, "failures", self.colors.failing), @@ -551,7 +563,7 @@ def stopTestRun(self) -> None: "unexpected_successes", self.colors.unexpectedSuccess, ), - ] + ) stats = [] for obj_list, name, color_func in results: if obj_list: @@ -564,7 +576,7 @@ def stopTestRun(self) -> None: grade = self.colors.failing("FAILED") self.stream.writeln(f"{grade} ({', '.join(stats)})") - def startTest(self, test): + def startTest(self, test: RunnableTestT) -> None: """ Called before the start of each test. """ @@ -601,12 +613,19 @@ def startTest(self, test): if current_class != self.last_class: self.last_class = current_class - def stopTest(self, test): + def stopTest(self, test: RunnableTestT) -> None: """ Supposed to be called after each test. """ - def _reportOutcome(self, test, outcome_char, color_func, err=None, reason=""): + def _reportOutcome( + self, + test: RunnableTestT, + outcome_char, + color_func: Callable[[str], str], + err=None, + reason: str = "", + ) -> None: self.testsRun += 1 test = proto_test(test) if self.showAll: @@ -632,7 +651,7 @@ def _reportOutcome(self, test, outcome_char, color_func, err=None, reason=""): self.stream.write(color_func(outcome_char)) self.stream.flush() - def addSuccess(self, test, test_time=None): + def addSuccess(self, test: RunnableTestT, test_time=None): """ Called when a test passed. """ @@ -643,7 +662,7 @@ def addSuccess(self, test, test_time=None): self._reportOutcome(test, ".", self.colors.passing) @failfast - def addError(self, test, err, test_time=None): + def addError(self, test: RunnableTestT, err, test_time=None): """ Called when a test raises an exception. """ @@ -656,7 +675,7 @@ def addError(self, test, err, test_time=None): self._reportOutcome(test, "E", self.colors.error, err) @failfast - def addFailure(self, test, err, test_time=None): + def addFailure(self, test: RunnableTestT, err, test_time=None): """ Called when a test fails a unittest assertion. """ @@ -676,7 +695,7 @@ def addFailure(self, test, err, test_time=None): self.all_errors.append((test, self.colors.error, "Failure", err)) self._reportOutcome(test, "F", self.colors.failing, err) - def addSkip(self, test, reason, test_time=None): + def addSkip(self, test: RunnableTestT, reason: str, test_time=None): """ Called when a test is skipped. """ @@ -686,7 +705,7 @@ def addSkip(self, test, reason, test_time=None): self.skipped.append((test, reason)) self._reportOutcome(test, "s", self.colors.skipped, reason=reason) - def addExpectedFailure(self, test, err, test_time=None): + def addExpectedFailure(self, test: RunnableTestT, err, test_time=None): """ Called when a test fails, and we expected the failure. """ @@ -697,7 +716,7 @@ def addExpectedFailure(self, test, err, test_time=None): self.expectedFailures.append((test, err)) self._reportOutcome(test, "x", self.colors.expectedFailure, err) - def addUnexpectedSuccess(self, test, test_time=None): + def addUnexpectedSuccess(self, test: RunnableTestT, test_time=None) -> None: """ Called when a test passed, but we expected a failure. """ @@ -707,7 +726,7 @@ def addUnexpectedSuccess(self, test, test_time=None): self.unexpectedSuccesses.append(test) self._reportOutcome(test, "u", self.colors.unexpectedSuccess) - def printErrors(self): + def printErrors(self) -> None: """ Print a list of all tracebacks from errors and failures, as well as captured stdout (even if the test passed, except with quiet_stdout @@ -763,7 +782,7 @@ def printErrors(self): self.displayStdout(test) self.displayStderr(test) - def wasSuccessful(self): + def wasSuccessful(self) -> bool: """ Tells whether or not the overall run was successful. """ diff --git a/green/suite.py b/green/suite.py index 9066087..194202d 100644 --- a/green/suite.py +++ b/green/suite.py @@ -8,7 +8,7 @@ import unittest from io import StringIO -from green.config import default_args +from green.config import get_default_args from green.output import GreenStream @@ -26,6 +26,7 @@ def __init__(self, tests=(), args: argparse.Namespace | None = None) -> None: # You should either set GreenTestSuite.args before instantiation, or # pass args into __init__ self._removed_tests = 0 + default_args = get_default_args() self.allow_stdout = default_args.allow_stdout self.full_test_pattern = "test" + default_args.test_pattern self.customize(args) diff --git a/green/test/test_config.py b/green/test/test_config.py index 6e6c727..09f268f 100644 --- a/green/test/test_config.py +++ b/green/test/test_config.py @@ -1,10 +1,14 @@ +from __future__ import annotations + import configparser import copy +import pathlib from io import StringIO import os import shutil import tempfile import unittest +from typing import Sequence from green import config from green.output import GreenStream @@ -37,25 +41,25 @@ class ModifiedEnvironment: I am a context manager that sets up environment variables for a test case. """ - def __init__(self, **kwargs): + def __init__(self, **kwargs: str | None) -> None: self.prev = {} self.excur = kwargs for k in kwargs: self.prev[k] = os.getenv(k) - def __enter__(self): + def __enter__(self) -> None: self.update_environment(self.excur) - def __exit__(self, type, value, traceback): + def __exit__(self, exc_type, exc_value, traceback) -> None: self.update_environment(self.prev) - def update_environment(self, d): - for k in d: - if d[k] is None: - if k in os.environ: - del os.environ[k] + def update_environment(self, env: dict[str, str | None]) -> None: + for key, value in env.items(): + if value is None: + if key in os.environ: + del os.environ[key] else: - os.environ[k] = d[k] + os.environ[key] = value class ConfigBase(unittest.TestCase): @@ -64,24 +68,21 @@ class ConfigBase(unittest.TestCase): in a temporary directory with known values attached to self. """ - def _write_file(self, path, lines): - f = open(path, "w") - f.writelines([x + "\n" for x in lines]) - f.close() + def _write_file(self, path: pathlib.Path, lines: Sequence[str]) -> None: + path.write_text("\n".join(lines) + "\n") def setUp(self): - self.tmpd = tempfile.mkdtemp() - self.addCleanup(shutil.rmtree, self.tmpd) + self.tmpd = tmpd = pathlib.Path(tempfile.mkdtemp()) + self.addCleanup(shutil.rmtree, tmpd) # Set CWD to known empty directory so we don't pick up some other .green # file from the CWD tests are actually run from. - save_cwd = os.getcwd() - self.addCleanup(os.chdir, save_cwd) - cwd_dir = os.path.join(self.tmpd, "cwd") - os.mkdir(cwd_dir) + self.addCleanup(os.chdir, pathlib.Path.cwd()) + cwd_dir = tmpd / "cwd" + cwd_dir.mkdir(exist_ok=True, parents=True) os.chdir(cwd_dir) # This represents the $HOME config file, and doubles for the current # working directory config file if we set CWD to self.tmpd - self.default_filename = os.path.join(self.tmpd, ".green") + self.default_filename = tmpd / ".green" self.default_logging = False self.default_version = False self.default_failfast = True @@ -90,46 +91,46 @@ def setUp(self): self.default_filename, [ "# this is a test config file for green", - f"logging = {str(self.default_logging)}", - f"version = {str(self.default_version)}", + f"logging = {self.default_logging}", + f"version = {self.default_version}", f"omit-patterns = {self.default_filename}", - f"failfast = {str(self.default_failfast)}", - f"termcolor = {str(self.default_termcolor)}", + f"failfast = {self.default_failfast}", + f"termcolor = {self.default_termcolor}", ], ) - self.env_filename = os.path.join(self.tmpd, "green.env") + self.env_filename = tmpd / "green.env" self.env_logging = True self.env_no_skip_report = False self._write_file( self.env_filename, [ "# this is a test config file for green", - f"logging = {str(self.env_logging)}", + f"logging = {self.env_logging}", f"omit-patterns = {self.env_filename}", f"no-skip-report = {self.env_no_skip_report}", ], ) - self.cmd_filename = os.path.join(self.tmpd, "green.cmd") + self.cmd_filename = self.tmpd / "green.cmd" self.cmd_logging = False self.cmd_run_coverage = False self._write_file( self.cmd_filename, [ "# this is a test config file for green", - f"logging = {str(self.cmd_logging)}", + f"logging = {self.cmd_logging}", f"omit-patterns = {self.cmd_filename}", f"run-coverage = {self.cmd_run_coverage}", ], ) - self.setup_filename = os.path.join(cwd_dir, "setup.cfg") + self.setup_filename = cwd_dir / "setup.cfg" self.setup_failfast = False self.setup_verbose = 3 self._write_file( self.setup_filename, [ "[green]", - f"failfast = {str(self.setup_failfast)}", - f"verbose = {str(self.setup_verbose)}", + f"failfast = {self.setup_failfast}", + f"verbose = {self.setup_verbose}", ], ) @@ -145,19 +146,27 @@ def test_cmd_env_nodef_nosetup(self): exist, setup.cfg does not exist Result: load --config """ - os.unlink(self.default_filename) - os.remove(self.setup_filename) - with ModifiedEnvironment(GREEN_CONFIG=self.env_filename, HOME=self.tmpd): + self.default_filename.unlink(missing_ok=True) + self.setup_filename.unlink(missing_ok=True) + with ModifiedEnvironment( + GREEN_CONFIG=str(self.env_filename), HOME=str(self.tmpd) + ): cfg = config.getConfig(self.cmd_filename) - ae = self.assertEqual - ar = self.assertRaises - ae(["green"], cfg.sections()) - ae(self.cmd_filename, cfg.get("green", "omit-patterns")) - ae(self.cmd_run_coverage, cfg.getboolean("green", "run-coverage")) - ae(self.cmd_logging, cfg.getboolean("green", "logging")) - ae(self.env_no_skip_report, cfg.getboolean("green", "no-skip-report")) - ar(configparser.NoOptionError, cfg.getboolean, "green", "version") - ar(configparser.NoOptionError, cfg.getboolean, "green", "verbose") + self.assertEqual(["green"], cfg.sections()) + self.assertEqual(str(self.cmd_filename), cfg.get("green", "omit-patterns")) + self.assertEqual( + self.cmd_run_coverage, cfg.getboolean("green", "run-coverage") + ) + self.assertEqual(self.cmd_logging, cfg.getboolean("green", "logging")) + self.assertEqual( + self.env_no_skip_report, cfg.getboolean("green", "no-skip-report") + ) + self.assertRaises( + configparser.NoOptionError, cfg.getboolean, "green", "version" + ) + self.assertRaises( + configparser.NoOptionError, cfg.getboolean, "green", "verbose" + ) def test_cmd_noenv_def_nosetup(self): """ @@ -167,17 +176,21 @@ def test_cmd_noenv_def_nosetup(self): """ os.unlink(self.env_filename) os.remove(self.setup_filename) - with ModifiedEnvironment(GREEN_CONFIG=None, HOME=self.tmpd): + with ModifiedEnvironment(GREEN_CONFIG=None, HOME=str(self.tmpd)): cfg = config.getConfig(self.cmd_filename) - ae = self.assertEqual - ar = self.assertRaises - ae(["green"], cfg.sections()) - ae(self.cmd_filename, cfg.get("green", "omit-patterns")) - ae(self.cmd_run_coverage, cfg.getboolean("green", "run-coverage")) - ae(self.cmd_logging, cfg.getboolean("green", "logging")) - ar(configparser.NoOptionError, cfg.getboolean, "green", "no-skip-report") - ae(self.default_version, cfg.getboolean("green", "version")) - ar(configparser.NoOptionError, cfg.getboolean, "green", "verbose") + self.assertEqual(["green"], cfg.sections()) + self.assertEqual(str(self.cmd_filename), cfg.get("green", "omit-patterns")) + self.assertEqual( + self.cmd_run_coverage, cfg.getboolean("green", "run-coverage") + ) + self.assertEqual(self.cmd_logging, cfg.getboolean("green", "logging")) + self.assertRaises( + configparser.NoOptionError, cfg.getboolean, "green", "no-skip-report" + ) + self.assertEqual(self.default_version, cfg.getboolean("green", "version")) + self.assertRaises( + configparser.NoOptionError, cfg.getboolean, "green", "verbose" + ) def test_cmd_noenv_nodef_nosetup(self): """ @@ -188,17 +201,23 @@ def test_cmd_noenv_nodef_nosetup(self): os.unlink(self.env_filename) os.unlink(self.default_filename) os.remove(self.setup_filename) - with ModifiedEnvironment(GREEN_CONFIG=None, HOME=self.tmpd): + with ModifiedEnvironment(GREEN_CONFIG=None, HOME=str(self.tmpd)): cfg = config.getConfig(self.cmd_filename) - ae = self.assertEqual - ar = self.assertRaises - ae(["green"], cfg.sections()) - ae(self.cmd_filename, cfg.get("green", "omit-patterns")) - ae(self.cmd_run_coverage, cfg.getboolean("green", "run-coverage")) - ae(self.cmd_logging, cfg.getboolean("green", "logging")) - ar(configparser.NoOptionError, cfg.getboolean, "green", "no-skip-report") - ar(configparser.NoOptionError, cfg.getboolean, "green", "version") - ar(configparser.NoOptionError, cfg.getboolean, "green", "verbose") + self.assertEqual(["green"], cfg.sections()) + self.assertEqual(str(self.cmd_filename), cfg.get("green", "omit-patterns")) + self.assertEqual( + self.cmd_run_coverage, cfg.getboolean("green", "run-coverage") + ) + self.assertEqual(self.cmd_logging, cfg.getboolean("green", "logging")) + self.assertRaises( + configparser.NoOptionError, cfg.getboolean, "green", "no-skip-report" + ) + self.assertRaises( + configparser.NoOptionError, cfg.getboolean, "green", "version" + ) + self.assertRaises( + configparser.NoOptionError, cfg.getboolean, "green", "verbose" + ) def test_nocmd_env_cwd(self): """ @@ -208,17 +227,25 @@ def test_nocmd_env_cwd(self): os.chdir(self.tmpd) # setUp is already set to restore us to our pre-testing cwd os.unlink(self.cmd_filename) os.remove(self.setup_filename) - with ModifiedEnvironment(GREEN_CONFIG=self.env_filename, HOME=self.tmpd): + with ModifiedEnvironment( + GREEN_CONFIG=str(self.env_filename), HOME=str(self.tmpd) + ): cfg = config.getConfig() - ae = self.assertEqual - ar = self.assertRaises - ae(["green"], cfg.sections()) - ae(self.default_filename, cfg.get("green", "omit-patterns")) - ar(configparser.NoOptionError, cfg.get, "green", "run-coverage") - ae(self.default_logging, cfg.getboolean("green", "logging")) - ae(self.env_no_skip_report, cfg.getboolean("green", "no-skip-report")) - ae(self.default_version, cfg.getboolean("green", "version")) - ar(configparser.NoOptionError, cfg.getint, "green", "verbose") + self.assertEqual(["green"], cfg.sections()) + self.assertEqual( + str(self.default_filename), cfg.get("green", "omit-patterns") + ) + self.assertRaises( + configparser.NoOptionError, cfg.get, "green", "run-coverage" + ) + self.assertEqual(self.default_logging, cfg.getboolean("green", "logging")) + self.assertEqual( + self.env_no_skip_report, cfg.getboolean("green", "no-skip-report") + ) + self.assertEqual(self.default_version, cfg.getboolean("green", "version")) + self.assertRaises( + configparser.NoOptionError, cfg.getint, "green", "verbose" + ) def test_nocmd_env_def_nosetup(self): """ @@ -226,19 +253,25 @@ def test_nocmd_env_def_nosetup(self): setup.cfg does not exist Result: load $GREEN_CONFIG """ - os.unlink(self.cmd_filename) - os.remove(self.setup_filename) - with ModifiedEnvironment(GREEN_CONFIG=self.env_filename, HOME=self.tmpd): + self.cmd_filename.unlink(missing_ok=True) + self.setup_filename.unlink(missing_ok=True) + with ModifiedEnvironment( + GREEN_CONFIG=str(self.env_filename), HOME=str(self.tmpd) + ): cfg = config.getConfig() - ae = self.assertEqual - ar = self.assertRaises - ae(["green"], cfg.sections()) - ae(self.env_filename, cfg.get("green", "omit-patterns")) - ar(configparser.NoOptionError, cfg.get, "green", "run-coverage") - ae(self.env_logging, cfg.getboolean("green", "logging")) - ae(self.env_no_skip_report, cfg.getboolean("green", "no-skip-report")) - ae(self.default_version, cfg.getboolean("green", "version")) - ar(configparser.NoOptionError, cfg.getboolean, "green", "verbose") + self.assertEqual(["green"], cfg.sections()) + self.assertEqual(str(self.env_filename), cfg.get("green", "omit-patterns")) + self.assertRaises( + configparser.NoOptionError, cfg.get, "green", "run-coverage" + ) + self.assertEqual(self.env_logging, cfg.getboolean("green", "logging")) + self.assertEqual( + self.env_no_skip_report, cfg.getboolean("green", "no-skip-report") + ) + self.assertEqual(self.default_version, cfg.getboolean("green", "version")) + self.assertRaises( + configparser.NoOptionError, cfg.getboolean, "green", "verbose" + ) def test_nocmd_env_nodef_nosetup(self): """ @@ -246,20 +279,28 @@ def test_nocmd_env_nodef_nosetup(self): exist, setup.cfg does not exist Result: load $GREEN_CONFIG """ - os.unlink(self.cmd_filename) - os.unlink(self.default_filename) - os.remove(self.setup_filename) - with ModifiedEnvironment(GREEN_CONFIG=self.env_filename, HOME=self.tmpd): + self.cmd_filename.unlink(missing_ok=True) + self.default_filename.unlink(missing_ok=True) + self.setup_filename.unlink(missing_ok=True) + with ModifiedEnvironment( + GREEN_CONFIG=str(self.env_filename), HOME=str(self.tmpd) + ): cfg = config.getConfig() - ae = self.assertEqual - ar = self.assertRaises - ae(["green"], cfg.sections()) - ae(self.env_filename, cfg.get("green", "omit-patterns")) - ar(configparser.NoOptionError, cfg.get, "green", "run-coverage") - ae(self.env_logging, cfg.getboolean("green", "logging")) - ae(self.env_no_skip_report, cfg.getboolean("green", "no-skip-report")) - ar(configparser.NoOptionError, cfg.getboolean, "green", "version") - ar(configparser.NoOptionError, cfg.getboolean, "green", "verbose") + self.assertEqual(["green"], cfg.sections()) + self.assertEqual(str(self.env_filename), cfg.get("green", "omit-patterns")) + self.assertRaises( + configparser.NoOptionError, cfg.get, "green", "run-coverage" + ) + self.assertEqual(self.env_logging, cfg.getboolean("green", "logging")) + self.assertEqual( + self.env_no_skip_report, cfg.getboolean("green", "no-skip-report") + ) + self.assertRaises( + configparser.NoOptionError, cfg.getboolean, "green", "version" + ) + self.assertRaises( + configparser.NoOptionError, cfg.getboolean, "green", "verbose" + ) def test_nocmd_noenv_def_nosetup(self): """ @@ -270,17 +311,23 @@ def test_nocmd_noenv_def_nosetup(self): os.unlink(self.cmd_filename) os.unlink(self.env_filename) os.remove(self.setup_filename) - with ModifiedEnvironment(GREEN_CONFIG=None, HOME=self.tmpd): + with ModifiedEnvironment(GREEN_CONFIG=None, HOME=str(self.tmpd)): cfg = config.getConfig() - ae = self.assertEqual - ar = self.assertRaises - ae(["green"], cfg.sections()) - ae(self.default_filename, cfg.get("green", "omit-patterns")) - ar(configparser.NoOptionError, cfg.get, "green", "run-coverage") - ae(self.default_logging, cfg.getboolean("green", "logging")) - ar(configparser.NoOptionError, cfg.getboolean, "green", "no-skip-report") - ae(self.default_version, cfg.getboolean("green", "version")) - ar(configparser.NoOptionError, cfg.getboolean, "green", "verbose") + self.assertEqual(["green"], cfg.sections()) + self.assertEqual( + str(self.default_filename), cfg.get("green", "omit-patterns") + ) + self.assertRaises( + configparser.NoOptionError, cfg.get, "green", "run-coverage" + ) + self.assertEqual(self.default_logging, cfg.getboolean("green", "logging")) + self.assertRaises( + configparser.NoOptionError, cfg.getboolean, "green", "no-skip-report" + ) + self.assertEqual(self.default_version, cfg.getboolean("green", "version")) + self.assertRaises( + configparser.NoOptionError, cfg.getboolean, "green", "verbose" + ) def test_nocmd_noenv_nodef_nosetup(self): """ @@ -292,17 +339,21 @@ def test_nocmd_noenv_nodef_nosetup(self): os.unlink(self.env_filename) os.unlink(self.cmd_filename) os.remove(self.setup_filename) - with ModifiedEnvironment(GREEN_CONFIG=None, HOME=self.tmpd): + with ModifiedEnvironment(GREEN_CONFIG=None, HOME=str(self.tmpd)): cfg = config.getConfig() - ae = self.assertEqual - ar = self.assertRaises - ae([], cfg.sections()) - ar(configparser.NoSectionError, cfg.get, "green", "omit-patterns") - ar(configparser.NoSectionError, cfg.get, "green", "run-coverage") - ar(configparser.NoSectionError, cfg.get, "green", "logging") - ar(configparser.NoSectionError, cfg.get, "green", "no-skip-report") - ar(configparser.NoSectionError, cfg.get, "green", "version") - ar(configparser.NoSectionError, cfg.get, "green", "verbose") + self.assertEqual([], cfg.sections()) + self.assertRaises( + configparser.NoSectionError, cfg.get, "green", "omit-patterns" + ) + self.assertRaises( + configparser.NoSectionError, cfg.get, "green", "run-coverage" + ) + self.assertRaises(configparser.NoSectionError, cfg.get, "green", "logging") + self.assertRaises( + configparser.NoSectionError, cfg.get, "green", "no-skip-report" + ) + self.assertRaises(configparser.NoSectionError, cfg.get, "green", "version") + self.assertRaises(configparser.NoSectionError, cfg.get, "green", "verbose") def test_cmd_env_nodef_setup(self): """ @@ -311,18 +362,24 @@ def test_cmd_env_nodef_setup(self): Result: load --config """ os.unlink(self.default_filename) - with ModifiedEnvironment(GREEN_CONFIG=self.env_filename, HOME=self.tmpd): + with ModifiedEnvironment( + GREEN_CONFIG=str(self.env_filename), HOME=str(self.tmpd) + ): cfg = config.getConfig(self.cmd_filename) - ae = self.assertEqual - ar = self.assertRaises - ae(["green"], cfg.sections()) - ae(self.cmd_filename, cfg.get("green", "omit-patterns")) - ae(self.cmd_run_coverage, cfg.getboolean("green", "run-coverage")) - ae(self.cmd_logging, cfg.getboolean("green", "logging")) - ae(self.env_no_skip_report, cfg.getboolean("green", "no-skip-report")) - ae(self.setup_verbose, cfg.getint("green", "verbose")) - ae(self.setup_failfast, cfg.getboolean("green", "failfast")) - ar(configparser.NoOptionError, cfg.getboolean, "green", "version") + self.assertEqual(["green"], cfg.sections()) + self.assertEqual(str(self.cmd_filename), cfg.get("green", "omit-patterns")) + self.assertEqual( + self.cmd_run_coverage, cfg.getboolean("green", "run-coverage") + ) + self.assertEqual(self.cmd_logging, cfg.getboolean("green", "logging")) + self.assertEqual( + self.env_no_skip_report, cfg.getboolean("green", "no-skip-report") + ) + self.assertEqual(self.setup_verbose, cfg.getint("green", "verbose")) + self.assertEqual(self.setup_failfast, cfg.getboolean("green", "failfast")) + self.assertRaises( + configparser.NoOptionError, cfg.getboolean, "green", "version" + ) def test_cmd_noenv_def_setup(self): """ @@ -331,18 +388,20 @@ def test_cmd_noenv_def_setup(self): Result: load --config """ os.unlink(self.env_filename) - with ModifiedEnvironment(GREEN_CONFIG=None, HOME=self.tmpd): + with ModifiedEnvironment(GREEN_CONFIG=None, HOME=str(self.tmpd)): cfg = config.getConfig(self.cmd_filename) - ae = self.assertEqual - ar = self.assertRaises - ae(["green"], cfg.sections()) - ae(self.cmd_filename, cfg.get("green", "omit-patterns")) - ae(self.cmd_run_coverage, cfg.getboolean("green", "run-coverage")) - ae(self.cmd_logging, cfg.getboolean("green", "logging")) - ar(configparser.NoOptionError, cfg.getboolean, "green", "no-skip-report") - ae(self.default_version, cfg.getboolean("green", "version")) - ae(self.setup_verbose, cfg.getint("green", "verbose")) - ae(self.setup_failfast, cfg.getboolean("green", "failfast")) + self.assertEqual(["green"], cfg.sections()) + self.assertEqual(str(self.cmd_filename), cfg.get("green", "omit-patterns")) + self.assertEqual( + self.cmd_run_coverage, cfg.getboolean("green", "run-coverage") + ) + self.assertEqual(self.cmd_logging, cfg.getboolean("green", "logging")) + self.assertRaises( + configparser.NoOptionError, cfg.getboolean, "green", "no-skip-report" + ) + self.assertEqual(self.default_version, cfg.getboolean("green", "version")) + self.assertEqual(self.setup_verbose, cfg.getint("green", "verbose")) + self.assertEqual(self.setup_failfast, cfg.getboolean("green", "failfast")) def test_cmd_noenv_nodef_setup(self): """ @@ -352,18 +411,22 @@ def test_cmd_noenv_nodef_setup(self): """ os.unlink(self.env_filename) os.unlink(self.default_filename) - with ModifiedEnvironment(GREEN_CONFIG=None, HOME=self.tmpd): + with ModifiedEnvironment(GREEN_CONFIG=None, HOME=str(self.tmpd)): cfg = config.getConfig(self.cmd_filename) - ae = self.assertEqual - ar = self.assertRaises - ae(["green"], cfg.sections()) - ae(self.cmd_filename, cfg.get("green", "omit-patterns")) - ae(self.cmd_run_coverage, cfg.getboolean("green", "run-coverage")) - ae(self.cmd_logging, cfg.getboolean("green", "logging")) - ar(configparser.NoOptionError, cfg.getboolean, "green", "no-skip-report") - ar(configparser.NoOptionError, cfg.getboolean, "green", "version") - ae(self.setup_verbose, cfg.getint("green", "verbose")) - ae(self.setup_failfast, cfg.getboolean("green", "failfast")) + self.assertEqual(["green"], cfg.sections()) + self.assertEqual(str(self.cmd_filename), cfg.get("green", "omit-patterns")) + self.assertEqual( + self.cmd_run_coverage, cfg.getboolean("green", "run-coverage") + ) + self.assertEqual(self.cmd_logging, cfg.getboolean("green", "logging")) + self.assertRaises( + configparser.NoOptionError, cfg.getboolean, "green", "no-skip-report" + ) + self.assertRaises( + configparser.NoOptionError, cfg.getboolean, "green", "version" + ) + self.assertEqual(self.setup_verbose, cfg.getint("green", "verbose")) + self.assertEqual(self.setup_failfast, cfg.getboolean("green", "failfast")) def test_nocmd_env_def_setup(self): """ @@ -372,18 +435,22 @@ def test_nocmd_env_def_setup(self): Result: load $GREEN_CONFIG """ os.unlink(self.cmd_filename) - with ModifiedEnvironment(GREEN_CONFIG=self.env_filename, HOME=self.tmpd): + with ModifiedEnvironment( + GREEN_CONFIG=str(self.env_filename), HOME=str(self.tmpd) + ): cfg = config.getConfig() - ae = self.assertEqual - ar = self.assertRaises - ae(["green"], cfg.sections()) - ae(self.env_filename, cfg.get("green", "omit-patterns")) - ar(configparser.NoOptionError, cfg.get, "green", "run-coverage") - ae(self.env_logging, cfg.getboolean("green", "logging")) - ae(self.env_no_skip_report, cfg.getboolean("green", "no-skip-report")) - ae(self.default_version, cfg.getboolean("green", "version")) - ae(self.setup_verbose, cfg.getint("green", "verbose")) - ae(self.setup_failfast, cfg.getboolean("green", "failfast")) + self.assertEqual(["green"], cfg.sections()) + self.assertEqual(str(self.env_filename), cfg.get("green", "omit-patterns")) + self.assertRaises( + configparser.NoOptionError, cfg.get, "green", "run-coverage" + ) + self.assertEqual(self.env_logging, cfg.getboolean("green", "logging")) + self.assertEqual( + self.env_no_skip_report, cfg.getboolean("green", "no-skip-report") + ) + self.assertEqual(self.default_version, cfg.getboolean("green", "version")) + self.assertEqual(self.setup_verbose, cfg.getint("green", "verbose")) + self.assertEqual(self.setup_failfast, cfg.getboolean("green", "failfast")) def test_nocmd_env_nodef_setup(self): """ @@ -393,18 +460,24 @@ def test_nocmd_env_nodef_setup(self): """ os.unlink(self.cmd_filename) os.unlink(self.default_filename) - with ModifiedEnvironment(GREEN_CONFIG=self.env_filename, HOME=self.tmpd): + with ModifiedEnvironment( + GREEN_CONFIG=str(self.env_filename), HOME=str(self.tmpd) + ): cfg = config.getConfig() - ae = self.assertEqual - ar = self.assertRaises - ae(["green"], cfg.sections()) - ae(self.env_filename, cfg.get("green", "omit-patterns")) - ar(configparser.NoOptionError, cfg.get, "green", "run-coverage") - ae(self.env_logging, cfg.getboolean("green", "logging")) - ae(self.env_no_skip_report, cfg.getboolean("green", "no-skip-report")) - ar(configparser.NoOptionError, cfg.getboolean, "green", "version") - ae(self.setup_verbose, cfg.getint("green", "verbose")) - ae(self.setup_failfast, cfg.getboolean("green", "failfast")) + self.assertEqual(["green"], cfg.sections()) + self.assertEqual(str(self.env_filename), cfg.get("green", "omit-patterns")) + self.assertRaises( + configparser.NoOptionError, cfg.get, "green", "run-coverage" + ) + self.assertEqual(self.env_logging, cfg.getboolean("green", "logging")) + self.assertEqual( + self.env_no_skip_report, cfg.getboolean("green", "no-skip-report") + ) + self.assertRaises( + configparser.NoOptionError, cfg.getboolean, "green", "version" + ) + self.assertEqual(self.setup_verbose, cfg.getint("green", "verbose")) + self.assertEqual(self.setup_failfast, cfg.getboolean("green", "failfast")) def test_nocmd_noenv_def_setup(self): """ @@ -414,18 +487,22 @@ def test_nocmd_noenv_def_setup(self): """ os.unlink(self.cmd_filename) os.unlink(self.env_filename) - with ModifiedEnvironment(GREEN_CONFIG=None, HOME=self.tmpd): + with ModifiedEnvironment(GREEN_CONFIG=None, HOME=str(self.tmpd)): cfg = config.getConfig() - ae = self.assertEqual - ar = self.assertRaises - ae(["green"], cfg.sections()) - ae(self.default_filename, cfg.get("green", "omit-patterns")) - ar(configparser.NoOptionError, cfg.get, "green", "run-coverage") - ae(self.default_logging, cfg.getboolean("green", "logging")) - ar(configparser.NoOptionError, cfg.getboolean, "green", "no-skip-report") - ae(self.default_version, cfg.getboolean("green", "version")) - ae(self.setup_verbose, cfg.getint("green", "verbose")) - ae(self.setup_failfast, cfg.getboolean("green", "failfast")) + self.assertEqual(["green"], cfg.sections()) + self.assertEqual( + str(self.default_filename), cfg.get("green", "omit-patterns") + ) + self.assertRaises( + configparser.NoOptionError, cfg.get, "green", "run-coverage" + ) + self.assertEqual(self.default_logging, cfg.getboolean("green", "logging")) + self.assertRaises( + configparser.NoOptionError, cfg.getboolean, "green", "no-skip-report" + ) + self.assertEqual(self.default_version, cfg.getboolean("green", "version")) + self.assertEqual(self.setup_verbose, cfg.getint("green", "verbose")) + self.assertEqual(self.setup_failfast, cfg.getboolean("green", "failfast")) def test_nocmd_noenv_nodef_setup(self): """ @@ -433,20 +510,24 @@ def test_nocmd_noenv_nodef_setup(self): setup.cfg exists Result: empty config """ - os.unlink(self.default_filename) - os.unlink(self.env_filename) - os.unlink(self.cmd_filename) - with ModifiedEnvironment(GREEN_CONFIG=None, HOME=self.tmpd): + self.default_filename.unlink(missing_ok=True) + self.env_filename.unlink(missing_ok=True) + self.cmd_filename.unlink(missing_ok=True) + with ModifiedEnvironment(GREEN_CONFIG=None, HOME=str(self.tmpd)): cfg = config.getConfig() - ae = self.assertEqual - ar = self.assertRaises - ae(self.setup_verbose, cfg.getint("green", "verbose")) - ae(self.setup_failfast, cfg.getboolean("green", "failfast")) - ar(configparser.NoOptionError, cfg.get, "green", "omit-patterns") - ar(configparser.NoOptionError, cfg.get, "green", "run-coverage") - ar(configparser.NoOptionError, cfg.get, "green", "logging") - ar(configparser.NoOptionError, cfg.get, "green", "no-skip-report") - ar(configparser.NoOptionError, cfg.get, "green", "version") + self.assertEqual(self.setup_verbose, cfg.getint("green", "verbose")) + self.assertEqual(self.setup_failfast, cfg.getboolean("green", "failfast")) + self.assertRaises( + configparser.NoOptionError, cfg.get, "green", "omit-patterns" + ) + self.assertRaises( + configparser.NoOptionError, cfg.get, "green", "run-coverage" + ) + self.assertRaises(configparser.NoOptionError, cfg.get, "green", "logging") + self.assertRaises( + configparser.NoOptionError, cfg.get, "green", "no-skip-report" + ) + self.assertRaises(configparser.NoOptionError, cfg.get, "green", "version") class TestMergeConfig(ConfigBase): @@ -465,8 +546,10 @@ def test_overwrite(self): saved_stdout = config.sys.stdout config.sys.stdout = gs self.addCleanup(setattr, config.sys, "stdout", saved_stdout) - with ModifiedEnvironment(GREEN_CONFIG=self.env_filename, HOME=self.tmpd): - new_args = copy.deepcopy(config.default_args) + with ModifiedEnvironment( + GREEN_CONFIG=str(self.env_filename), HOME=str(self.tmpd) + ): + new_args = copy.deepcopy(config.get_default_args()) new_args.omit_patterns = "omitstuff" new_args.run_coverage = True @@ -488,9 +571,9 @@ def test_no_overwrite(self): Default unspecified command-line args do not overwrite config values. """ # This config environment should set logging to True - with ModifiedEnvironment(GREEN_CONFIG=self.env_filename, HOME=""): + with ModifiedEnvironment(GREEN_CONFIG=str(self.env_filename), HOME=""): # The default for logging in arguments is False - da = copy.deepcopy(config.default_args) + da = copy.deepcopy(config.get_default_args()) del da.logging computed_args = config.mergeConfig(da, testing=True) self.assertEqual(computed_args.logging, True) @@ -499,8 +582,8 @@ def test_specified_command_line(self): """ Specified command-line arguments always overwrite config file values """ - with ModifiedEnvironment(HOME=self.tmpd): - new_args = copy.deepcopy(config.default_args) + with ModifiedEnvironment(HOME=str(self.tmpd)): + new_args = copy.deepcopy(config.get_default_args()) new_args.failfast = True # same as config, for sanity new_args.logging = True # different than config, not default del new_args.version # Not in arguments, should get config value @@ -521,16 +604,19 @@ def test_targets(self): args = config.mergeConfig(args) self.assertEqual(args.targets, ["target1", "target2"]) - def test_forgotToUpdateMerge(self): + def test_forgot_to_update_merge(self): """ mergeConfig raises an exception for unknown cmdline args """ - orig_args = copy.deepcopy(config.default_args) + default_args = config.get_default_args() + orig_args = copy.deepcopy(default_args) self.addCleanup(setattr, config, "default_args", orig_args) - config.default_args.new_option = True - new_args = copy.deepcopy(config.default_args) + default_args.new_option = True + new_args = copy.deepcopy(default_args) - self.assertRaises( - NotImplementedError, config.mergeConfig, new_args, testing=True - ) + try: + with self.assertRaises(NotImplementedError): + config.mergeConfig(new_args, testing=True) + finally: + config.get_default_args.cache_clear() diff --git a/green/test/test_junit.py b/green/test/test_junit.py index b2d2fd2..ea7965e 100644 --- a/green/test/test_junit.py +++ b/green/test/test_junit.py @@ -1,4 +1,4 @@ -from green.config import default_args +from green.config import get_default_args from green.output import GreenStream from green.junit import JUnitXML, JUnitDialect, Verdict from green.result import GreenTestResult, ProtoTest, proto_error @@ -23,7 +23,9 @@ def test(module, class_name, method_name): class JUnitXMLReportIsGenerated(TestCase): def setUp(self): self._destination = StringIO() - self._test_results = GreenTestResult(default_args, GreenStream(StringIO())) + self._test_results = GreenTestResult( + get_default_args(), GreenStream(StringIO()) + ) self._test_results.timeTaken = 4.06 self._adapter = JUnitXML() diff --git a/green/test/test_result.py b/green/test/test_result.py index bc21ba1..07739b0 100644 --- a/green/test/test_result.py +++ b/green/test/test_result.py @@ -11,7 +11,7 @@ from unittest.mock import MagicMock, patch import tempfile -from green.config import default_args +from green.config import get_default_args from green.output import Colors, GreenStream from green.result import ( GreenTestResult, @@ -364,7 +364,7 @@ def test_class_or_module_failure(self): class TestGreenTestResult(unittest.TestCase): def setUp(self): - self.args = copy.deepcopy(default_args) + self.args = copy.deepcopy(get_default_args()) self.stream = StringIO() def tearDown(self): @@ -834,7 +834,7 @@ def test_stopTestRun_singular_process_message(self): class TestGreenTestResultAdds(unittest.TestCase): def setUp(self): self.stream = StringIO() - self.args = copy.deepcopy(default_args) + self.args = copy.deepcopy(get_default_args()) self.args.verbose = 0 self.gtr = GreenTestResult(self.args, GreenStream(self.stream)) self.gtr._reportOutcome = MagicMock() @@ -1084,7 +1084,7 @@ def test_wasSuccessful_coverageSucceeds(self): class TestGreenTestRunCoverage(unittest.TestCase): def setUp(self): - self.args = copy.deepcopy(default_args) + self.args = copy.deepcopy(get_default_args()) cov_file = tempfile.NamedTemporaryFile(delete=False) cov_file.close() diff --git a/green/test/test_runner.py b/green/test/test_runner.py index d527ba2..0b66406 100644 --- a/green/test/test_runner.py +++ b/green/test/test_runner.py @@ -12,7 +12,7 @@ from unittest import mock import weakref -from green.config import default_args +from green.config import get_default_args from green.exceptions import InitializerOrFinalizerError from green.loader import GreenTestLoader from green.output import GreenStream @@ -108,7 +108,7 @@ def tearDownClass(cls): cls.startdir = None def setUp(self): - self.args = copy.deepcopy(default_args) + self.args = copy.deepcopy(get_default_args()) self.stream = StringIO() self.tmpdir = tempfile.mkdtemp() self.loader = GreenTestLoader() @@ -284,7 +284,7 @@ def setUp(self): os.chdir(self.container_dir) self.tmpdir = tempfile.mkdtemp(dir=self.container_dir) self.stream = StringIO() - self.args = copy.deepcopy(default_args) + self.args = copy.deepcopy(get_default_args()) self.loader = GreenTestLoader() def tearDown(self): diff --git a/green/test/test_suite.py b/green/test/test_suite.py index 0e9e9d1..a408d24 100644 --- a/green/test/test_suite.py +++ b/green/test/test_suite.py @@ -6,7 +6,7 @@ import unittest from unittest.mock import MagicMock -from green.config import default_args +from green.config import get_default_args from green.loader import GreenTestLoader from green.runner import run from green.suite import GreenTestSuite @@ -23,6 +23,7 @@ def test_defaultArgs(self): """ Passing in default arguments causes attributes to be set. """ + default_args = get_default_args() gts = GreenTestSuite(args=default_args) self.assertEqual(gts.allow_stdout, default_args.allow_stdout) @@ -31,7 +32,7 @@ def test_shouldStop(self): When result.shouldStop == True, the suite should exit early. """ mock_test = MagicMock() - gts = GreenTestSuite(args=default_args) + gts = GreenTestSuite(args=get_default_args()) gts._tests = (mock_test,) mock_result = MagicMock() mock_result.shouldStop = True @@ -43,7 +44,7 @@ def test_failedModuleSetup(self): """ mock_test = MagicMock() mock_test.__iter__.side_effect = TypeError - gts = GreenTestSuite(args=default_args) + gts = GreenTestSuite(args=get_default_args()) gts._tests = (mock_test,) mock_result = MagicMock() mock_result._moduleSetUpFailed = True @@ -57,7 +58,7 @@ def test_failedModuleTeardown(self): mock_module = MagicMock() mock_test = MagicMock() mock_err = MagicMock() - args = copy.deepcopy(default_args) + args = copy.deepcopy(get_default_args()) gts = GreenTestSuite(args=args) gts._get_previous_module = mock_module mock_result = MagicMock() @@ -75,7 +76,7 @@ def test_addTest_testPattern(self): mock_test._testMethodName = "test_hello" mock_test2 = MagicMock() mock_test2._testMethodName = "test_goodbye" - args = copy.deepcopy(default_args) + args = copy.deepcopy(get_default_args()) args.test_pattern = "_good*" gts = GreenTestSuite(args=args) gts.addTest(mock_test) @@ -100,7 +101,7 @@ def test_skip_in_setUpClass(self): """ If SkipTest is raised in setUpClass, then the test gets skipped """ - gts = GreenTestSuite(args=default_args) + gts = GreenTestSuite(args=get_default_args()) mock_test = MagicMock() mock_result = MagicMock() mock_class = MagicMock() @@ -129,7 +130,7 @@ def tearDownClass(cls): cls.startdir = None def setUp(self): - self.args = copy.deepcopy(default_args) + self.args = copy.deepcopy(get_default_args()) self.stream = StringIO() self.tmpdir = tempfile.mkdtemp() self.loader = GreenTestLoader() @@ -181,7 +182,7 @@ def tearDownClass(cls): cls.startdir = None def setUp(self): - self.args = copy.deepcopy(default_args) + self.args = copy.deepcopy(get_default_args()) self.stream = StringIO() self.tmpdir = tempfile.mkdtemp() self.loader = GreenTestLoader()