Skip to content

Commit

Permalink
Merge pull request #124 from ceball/clarify_test_and_lint
Browse files Browse the repository at this point in the history
Clean up testing vs linting concepts, plus misc fixes.
  • Loading branch information
ceball authored May 20, 2020
2 parents d88ba57 + 10bc45a commit b609866
Show file tree
Hide file tree
Showing 7 changed files with 101 additions and 70 deletions.
5 changes: 0 additions & 5 deletions nbcelltests/define.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,17 +13,12 @@ class LintType(Enum):
CELLS_PER_NOTEBOOK = 'cells_per_notebook'
FUNCTION_DEFINITIONS = 'function_definitions'
CLASS_DEFINITIONS = 'class_definitions'
CELL_COVERAGE = 'cell_coverage'
LINTER = 'linter'
KERNELSPEC = 'kernelspec'
MAGICS = 'magics'


class TestType(Enum):
LINES_PER_CELL = 'lines_per_cell'
CELLS_PER_NOTEBOOK = 'cells_per_notebook'
FUNCTION_DEFINITIONS = 'function_definitions'
CLASS_DEFINITIONS = 'class_definitions'
CELL_COVERAGE = 'cell_coverage'
CELL_TEST = 'cell_test'

Expand Down
2 changes: 2 additions & 0 deletions nbcelltests/shared.py
Original file line number Diff line number Diff line change
Expand Up @@ -148,4 +148,6 @@ def cell_injected_into_test(test_lines):


def get_coverage(metadata):
if metadata['cell_count'] == 0:
return 0
return 100.0 * metadata['test_count'] / metadata['cell_count']
49 changes: 5 additions & 44 deletions nbcelltests/test.py
Original file line number Diff line number Diff line change
Expand Up @@ -123,24 +123,6 @@ def writeout_lines_per_cell(fp, lines_per_cell, metadata):
fp.write(2 * INDENT + 'assert {lines_in_cell} <= {limit}\n\n'.format(limit=lines_per_cell, lines_in_cell=lines_in_cell))


def writeout_cells_per_notebook(fp, cells_per_notebook, metadata):
if cells_per_notebook:
fp.write(INDENT + 'def test_cells_per_notebook(self):\n')
fp.write(2 * INDENT + 'assert {cells_in_notebook} <= {limit}\n\n'.format(limit=cells_per_notebook, cells_in_notebook=metadata.get('cell_count', -1)))


def writeout_function_definitions(fp, function_definitions, metadata):
if function_definitions:
fp.write(INDENT + 'def test_function_definition_count(self):\n')
fp.write(2 * INDENT + 'assert {functions_in_notebook} <= {limit}\n\n'.format(limit=function_definitions, functions_in_notebook=metadata.get('functions', -1)))


def writeout_class_definitions(fp, class_definitions, metadata):
if class_definitions:
fp.write(INDENT + 'def test_class_definition_count(self):\n')
fp.write(2 * INDENT + 'assert {classes_in_notebook} <= {limit}\n\n'.format(limit=class_definitions, classes_in_notebook=metadata.get('classes', -1)))


def writeout_cell_coverage(fp, cell_coverage, metadata):
if cell_coverage:
fp.write(INDENT + 'def test_cell_coverage(self):\n')
Expand Down Expand Up @@ -170,27 +152,15 @@ def run(notebook, rules=None, filename=None):
lines_per_cell = extra_metadata.get('lines_per_cell', -1)
writeout_lines_per_cell(fp, lines_per_cell, extra_metadata)

if 'cells_per_notebook' in extra_metadata:
cells_per_notebook = extra_metadata.get('cells_per_notebook', -1)
writeout_cells_per_notebook(fp, cells_per_notebook, extra_metadata)

if 'function_definitions' in extra_metadata:
function_definitions = extra_metadata.get('function_definitions', -1)
writeout_function_definitions(fp, function_definitions, extra_metadata)

if 'class_definitions' in extra_metadata:
class_definitions = extra_metadata.get('class_definitions', -1)
writeout_class_definitions(fp, class_definitions, extra_metadata)

if 'cell_coverage' in extra_metadata:
cell_coverage = extra_metadata.get('cell_coverage', 0)
cell_coverage = extra_metadata['cell_coverage']
writeout_cell_coverage(fp, cell_coverage, extra_metadata)

return name


def runWithReturn(notebook, executable=None, rules=None):
name = run(notebook)
name = run(notebook, rules=rules)
executable = executable or [sys.executable, '-m', 'pytest', '-v']
argv = executable + [name]
return subprocess.check_output(argv)
Expand All @@ -204,7 +174,7 @@ def runWithReport(notebook, executable=None, rules=None, collect_only=False):
tmpd = tempfile.mkdtemp()
py_file = os.path.join(tmpd, os.path.basename(notebook).replace('.ipynb', '.py'))
json_file = os.path.join(tmpd, os.path.basename(notebook).replace('.ipynb', '.json'))
_ = run(notebook, filename=py_file)
_ = run(notebook, filename=py_file, rules=rules)
ret = []
try:
# enable collecting info via json
Expand Down Expand Up @@ -234,15 +204,6 @@ def runWithReport(notebook, executable=None, rules=None, collect_only=False):

if 'test_cell_coverage' in node['nodeid']:
ret.append(TestMessage(-1, 'Testing cell coverage', TestType.CELL_COVERAGE, outcome))
elif 'test_cells_per_notebook' in node['nodeid']:
ret.append(TestMessage(-1, 'Testing cells per notebook', TestType.CELLS_PER_NOTEBOOK, outcome))
elif 'test_class_definition_count' in node['nodeid']:
ret.append(TestMessage(-1, 'Testing class definitions per notebook', TestType.CLASS_DEFINITIONS, outcome))
elif 'test_function_definition_count' in node['nodeid']:
ret.append(TestMessage(-1, 'Testing function definitions per notebook', TestType.FUNCTION_DEFINITIONS, outcome))
elif 'test_lines_per_cell_' in node['nodeid']:
cell_no = node['nodeid'].rsplit('_', 1)[-1]
ret.append(TestMessage(int(cell_no) + 1, 'Testing lines per cell', TestType.LINES_PER_CELL, outcome))
elif 'test_cell' in node['nodeid']:
cell_no = node['nodeid'].rsplit('_', 1)[-1]
ret.append(TestMessage(int(cell_no) + 1, 'Testing cell', TestType.CELL_TEST, outcome))
Expand All @@ -255,7 +216,7 @@ def runWithReport(notebook, executable=None, rules=None, collect_only=False):

def runWithHTMLReturn(notebook, executable=None, rules=None):
'''use pytest self contained html'''
name = run(notebook)
name = run(notebook, rules=rules)
html = name.replace('.py', '.html')
executable = executable or [sys.executable, '-m', 'pytest', '-v']
argv = executable + ['--html=' + html, '--self-contained-html', name]
Expand All @@ -268,7 +229,7 @@ def runWithHTMLReturn2(notebook, executable=None, rules=None):
'''use custom return objects'''
ret = ''
executable = executable or [sys.executable, '-m', 'pytest', '-v']
ret_tmp = run(notebook)
ret_tmp = run(notebook, rules=rules)
for test in ret_tmp:
test = test.to_html()
ret += '<p>' + test + '</p>'
Expand Down
File renamed without changes.
18 changes: 1 addition & 17 deletions nbcelltests/tests/test_lint.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
import os
import pytest

from nbcelltests.lint import lint_lines_per_cell, lint_cells_per_notebook, lint_function_definitions, lint_class_definitions, lint_cell_coverage, lint_kernelspec, lint_magics, run
from nbcelltests.lint import lint_lines_per_cell, lint_cells_per_notebook, lint_function_definitions, lint_class_definitions, lint_kernelspec, lint_magics, run
from nbcelltests.define import LintType

LR = namedtuple("lint_result", ['passed', 'type'])
Expand Down Expand Up @@ -71,20 +71,6 @@ def test_lint_class_definitions(max_class_definitions, classes, expected_ret, ex
_verify(ret, passed, expected_ret, expected_pass)


@pytest.mark.parametrize(
"test_count, cell_count, min_cell_coverage, expected_ret, expected_pass", [
(0, 10, 50, [LR(False, LintType.CELL_COVERAGE)], False),
(5, 10, 50, [LR(True, LintType.CELL_COVERAGE)], True),
(0, 10, 0, [LR(True, LintType.CELL_COVERAGE)], True),
(0, 10, -1, [], True),
(0, 0, 0, [], True)
]
)
def test_cell_coverage(test_count, cell_count, min_cell_coverage, expected_ret, expected_pass):
ret, passed = lint_cell_coverage(test_count, cell_count, min_cell_coverage)
_verify(ret, passed, expected_ret, expected_pass)


@pytest.mark.parametrize(
"kernelspec_requirements, kernelspec, expected_ret, expected_pass", [
({'name': 'python3'}, {'name': 'python3',
Expand Down Expand Up @@ -156,14 +142,12 @@ def test_magics_lists_sanity():
'cells_per_notebook': 2,
'function_definitions': 0,
'class_definitions': 0,
'cell_coverage': 90,
'kernelspec_requirements':
{'name': 'python3'},
'magics_whitelist': ['matplotlib']}, [LR(True, LintType.LINES_PER_CELL)] * 4 +
[LR(False, LintType.CELLS_PER_NOTEBOOK)] +
[LR(False, LintType.FUNCTION_DEFINITIONS)] +
[LR(False, LintType.CLASS_DEFINITIONS)] +
[LR(False, LintType.CELL_COVERAGE)] +
[LR(True, LintType.KERNELSPEC)] +
[LR(True, LintType.MAGICS)], False)
]
Expand Down
22 changes: 20 additions & 2 deletions nbcelltests/tests/test_shared.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
#
import os

import pytest
import nbformat

from nbcelltests.shared import extract_extrametadata, get_coverage, is_empty
Expand All @@ -15,7 +16,7 @@
BASIC_NB = os.path.join(os.path.dirname(__file__), 'basic.ipynb')
MORE_NB = os.path.join(os.path.dirname(__file__), 'more.ipynb')
MAGICS_NB = os.path.join(os.path.dirname(__file__), 'magics.ipynb')
COVERAGE_NB = os.path.join(os.path.dirname(__file__), 'coverage.ipynb')
COVERAGE_NB = os.path.join(os.path.dirname(__file__), '_cell_coverage.ipynb')


def test_is_empty():
Expand Down Expand Up @@ -119,5 +120,22 @@ def test_extract_extrametadata_cell_test_coverage():
assert _metadata(COVERAGE_NB, 'test_count') == 1


def test_get_coverage():
def test_get_coverage_nb():
assert get_coverage(extract_extrametadata(nbformat.read(COVERAGE_NB, 4))) == 25


@pytest.mark.parametrize(
"test_count, cell_count, expected", [
(0, 10, 0),
(5, 10, 50),
(10, 10, 100),
(0, 0, 0),
(10, 0, 0), # this would be an error at test generation time
]
)
def test_get_coverage(test_count, cell_count, expected):
metadata = {
'test_count': test_count,
'cell_count': cell_count
}
assert get_coverage(metadata) == expected
75 changes: 73 additions & 2 deletions nbcelltests/tests/test_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
import sys
import unittest

from nbcelltests.test import run
from nbcelltests.test import run, runWithReturn, runWithReport

# TODO: we should generate the notebooks rather than having them as
# files (same for lint ones). Would also allow for simplification of
Expand All @@ -22,7 +22,7 @@
COUNTING = os.path.join(os.path.dirname(__file__), '_cell_counting.ipynb')
NONCODE = os.path.join(os.path.dirname(__file__), '_non_code_cell.ipynb')
SKIPS = os.path.join(os.path.dirname(__file__), '_skips.ipynb')
COVERAGE = os.path.join(os.path.dirname(__file__), 'coverage.ipynb')
COVERAGE = os.path.join(os.path.dirname(__file__), '_cell_coverage.ipynb')

# Hack. We want to test expected behavior in distributed situation,
# which we are doing via pytest --forked.
Expand Down Expand Up @@ -420,4 +420,75 @@ def test_coverage(self):
raise ValueError("Cell coverage test should have failed.")


######

# should split this file up - but only after deciding on organization
# of module being tested

# TODO: there's a repeated pattern of cleaning up files generated
# during tests, but address
# https://github.com/jpmorganchase/nbcelltests/issues/125 before
# deciding how to handle that better here.

def test_basic_runWithReturn_pass():
"""Basic check - just that it runs without error"""
generates = os.path.join(os.path.dirname(__file__), "_cell_coverage_test.py")
if os.path.exists(generates):
raise ValueError("Going to generate %s but it already exists." % generates)

try:
_ = runWithReturn(COVERAGE, rules={'cell_coverage': 10})
finally:
try:
os.remove(generates)
except Exception:
pass


def test_basic_runWithReturn_fail():
"""Basic check - just that it fails"""
generates = os.path.join(os.path.dirname(__file__), "_cell_coverage_test.py")
if os.path.exists(generates):
raise ValueError("Going to generate %s but it already exists." % generates)

try:
_ = runWithReturn(COVERAGE, rules={'cell_coverage': 100})
except Exception:
pass # would need to alter run fn or capture output to check more exactly
else:
raise ValueError("coverage check should have failed, but didn't")
finally:
try:
os.remove(generates)
except Exception:
pass


def test_basic_runWithReport_pass():
"""Basic check - just that it runs without error"""
generates = os.path.join(os.path.dirname(__file__), "_cell_coverage_test.py")
if os.path.exists(generates):
raise ValueError("Going to generate %s but it already exists." % generates)

from nbcelltests.define import TestType
try:
ret = runWithReport(COVERAGE, executable=None, rules={'cell_coverage': 10})
finally:
try:
os.remove(generates)
except Exception:
pass

assert len(ret) == 1
assert (ret[0].passed, ret[0].type, ret[0].message) == (1, TestType.CELL_COVERAGE, 'Testing cell coverage')


# def test_basic_runWithReport_fail():
# from nbcelltests.define import TestType
# # TODO it fails here, but it shouldn't, right? we want to be able to report
# ret = runWithReport(COVERAGE, executable=None, rules={'cell_coverage':100})
# assert len(ret) == 1
# assert (ret[0].passed, ret[0].type, ret[0].message) == (False, TestType.CELL_COVERAGE, 'Testing cell coverage')


del _TestCellTests # TODO: either make genuinely abstract, or don't use classes/inheritance at all here (since classes/inheritance are not meaningful here anyway).

0 comments on commit b609866

Please sign in to comment.