Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
112 changes: 77 additions & 35 deletions gofer/ok.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,16 +3,16 @@
import io
import itertools
import json
import glob
import os
import os.path as op
import random
import string
from contextlib import redirect_stderr, redirect_stdout
from jinja2 import Template
from textwrap import dedent

from .notebook import execute_notebook, _global_anywhere
from .utils import hide_outputs
from .utils import hide_outputs, cd
from pygments import highlight
from pygments.lexers import PythonConsoleLexer
from pygments.formatters import HtmlFormatter
Expand Down Expand Up @@ -135,15 +135,17 @@ def from_file(cls, path):


class OKTests:
def __init__(self, test_paths):
def __init__(self, test_paths, cwd=None):
self.paths = test_paths
self.tests = [OKTest.from_file(path) for path in self.paths]
self.cwd = cwd if cwd else os.getcwd()

def run(self, global_environment, include_grade=True):
passed_tests = []
failed_tests = []
for t in self.tests:
passed, hint = t.run(global_environment)
with cd(self.cwd):
passed, hint = t.run(global_environment)
if passed:
passed_tests.append(t)
else:
Expand Down Expand Up @@ -207,56 +209,96 @@ def id_generator(size=6, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))


def grade_notebook(notebook_path, tests_glob=None):
"""
Grade a notebook file & return grade
"""
def _already_in_gofer():
try:
# Lots of notebooks call grade_notebook in them. These notebooks are then
# executed by gofer - which will in-turn execute grade_notebook again!
# This puts us in an infinite loop.
# We use this sentinel to detect and break out of that loop.
_global_anywhere('__GOFER_GRADER__')
# FIXME: Do something else here?
return None
return True
except NameError:
pass
return False

with open(notebook_path) as f:
nb = json.load(f)

def run_nb_tests(nb, cwd,
tests_glob=None,
initial_env=None,
ignore_errors=True):
"""
Run tests on a loaded notebook and return score, test_results

Parameters
----------
nb : dict
Notebook dictionary.
cwd : str
Path in which to run notebook / test code.
tests_glob : None or sequence
Sequence of filenames for tests to run. If None, corresponds to empty
sequence.
initial_env : None or dict
Initial environment for running tests. Defaults to empty.
ignore_errors : {True, False}
If True, ignore errors when executing notebook.

Returns
-------
test_results : list
List of class:`OKTestResult` instances.
"""
initial_env = {} if initial_env is None else initial_env
if _already_in_gofer():
# FIXME: Do something else here?
return None

secret = id_generator()
results_array = "check_results_{}".format(secret)
initial_env = {
initial_env.update({
# Set this to prevent recursive executions!
'__GOFER_GRADER__': True,
results_array: []
}
})

global_env = execute_notebook(nb, secret, initial_env, ignore_errors=True)
with cd(cwd):
global_env = execute_notebook(nb,
secret,
initial_env,
ignore_errors=ignore_errors)

test_results = global_env[results_array]

# Check for tests which were not included in the notebook and specified by tests_globs
# Allows instructors to run notebooks with additional tests not accessible to user
if tests_glob:
# unpack list of paths into a single list
tested_set = list(itertools.chain(*[r.paths for r in test_results]))
print(tested_set)
extra_tests = []
for t in sorted(tests_glob):
include = True
for tested in tested_set:
if tested in t: # e.g. if 'tests/q1.py' is in /srv/repo/lab01/tests/q1.py'
include = False
if include:
extra_tests.append(OKTests([t]))
extra_results = [t.run(global_env, include_grade=False) for t in extra_tests]
test_results += extra_results
# Check for tests which were not included in the notebook and specified by
# tests_globs.
# Allows instructors to run notebooks with additional tests not accessible
# to user.
if tests_glob is None or len(tests_glob) == 0:
return test_results

# unpack list of paths into a single list
tested_set = list(itertools.chain(*[r.paths for r in test_results]))
print(tested_set)
extra_tests = []
for t in sorted(tests_glob):
include = True
for tested in tested_set:
if tested in t: # e.g. if 'tests/q1.py' is in /srv/repo/lab01/tests/q1.py'
include = False
if include:
extra_tests.append(OKTests([t], cwd=cwd))
extra_results = [t.run(global_env, include_grade=False) for t in extra_tests]
return test_results + extra_results


def grade_notebook(notebook_path, tests_glob=None):
"""
Grade a notebook file & return grade
"""
with open(notebook_path) as f:
nb = json.load(f)
test_results = run_nb_tests(nb, op.dirname(notebook_path), tests_glob)
# avoid divide by zero error if there are no tests
score = sum([r.grade for r in test_results])/max(len(test_results), 1)

# If within an IPython or Jupyter environment, display hints
display_defined = False
try:
Expand All @@ -273,7 +315,7 @@ def grade_notebook(notebook_path, tests_glob=None):
return score


def check(test_file_path, global_env=None):
def check(test_file_path, global_env=None, cwd=None):
"""
check global_env against given test_file in oktest format

Expand All @@ -286,7 +328,7 @@ def check(test_file_path, global_env=None):

Returns a TestResult object.
"""
tests = OKTests([test_file_path])
tests = OKTests([test_file_path], cwd=cwd)

if global_env is None:
# Get the global env of our callers - one level below us in the stack
Expand Down
17 changes: 16 additions & 1 deletion gofer/utils.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,10 @@
import os
import sys
from contextlib import contextmanager

from IPython import get_ipython


def flush_inline_matplotlib_plots():
"""
Flush matplotlib plots immediately, rather than asynchronously.
Expand Down Expand Up @@ -46,4 +49,16 @@ def hide_outputs():
yield
finally:
flush_inline_matplotlib_plots()
ipy.display_formatter.formatters = old_formatters
ipy.display_formatter.formatters = old_formatters


@contextmanager
def cd(newdir):
""" Context manager to change working directory
"""
prevdir = os.getcwd()
os.chdir(os.path.expanduser(newdir))
try:
yield
finally:
os.chdir(prevdir)
22 changes: 22 additions & 0 deletions tests/notebooks/pwd/extra_tests/has_var.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
test = {
'name': 'Presence of variable',
'points': 1,
'suites': [
{
'cases': [
{
'code': r"""
>>> 'extra_variable' in vars()
True
""",
'hidden': False,
'locked': False
},
],
'scored': True,
'setup': '',
'teardown': '',
'type': 'doctest'
}
]
}
73 changes: 73 additions & 0 deletions tests/notebooks/pwd/pwd.ipynb
Original file line number Diff line number Diff line change
@@ -0,0 +1,73 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from client.api.notebook import Notebook\n",
"ok = Notebook('pwd.ok')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"my_variable = 1"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Just tests if \"my_variable\" is defined.\n",
"ok.grade('q1')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Generate an error, to test ignoring errors in notebook execution."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"1/0"
]
}
],
"metadata": {
"jupytext": {
"split_at_heading": true
},
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.4"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
23 changes: 23 additions & 0 deletions tests/notebooks/pwd/tests/q1.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
test = {
'name': '',
'points': 1,
'suites': [
{
'cases': [
{
'code': r"""
>>> import os
>>> os.path.isfile('pwd.ipynb')
True
""",
'hidden': False,
'locked': False
},
],
'scored': True,
'setup': '',
'teardown': '',
'type': 'doctest'
}
]
}
47 changes: 46 additions & 1 deletion tests/test_nb.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,16 @@
import os
import os.path as op
import json
import pytest
from glob import glob
from gofer.ok import grade_notebook

import nbformat

from gofer.ok import grade_notebook, run_nb_tests
from gofer.notebook import execute_notebook, _global_anywhere

import pytest

here = os.path.dirname(__file__)

def test_simple_execute():
Expand Down Expand Up @@ -102,3 +108,42 @@ def test_grade_notebook():

zero_grade_notebook = os.path.join(here, 'notebooks/grading/zero-grade.ipynb')
assert grade_notebook(zero_grade_notebook, glob(test_paths)) == 0


def test_nb_wd():
# Test that notebook runs in its own directory.
# Test will fail, give 0 score if notebook does not run in own directory.
test_paths = os.path.join(here, 'notebooks', 'pwd', 'tests', 'q*.py')
pwd_notebook = os.path.join(here, 'notebooks', 'pwd', 'pwd.ipynb')
assert grade_notebook(pwd_notebook, glob(test_paths)) == 1


def test_run_nb_tests():
# Test API for run_nb_tests
# Test will fail, give 0 score, if notebook does not run in own directory.
pwd_path = op.join(here, 'notebooks', 'pwd')
test_paths = op.join(pwd_path, 'tests', 'q*.py')
tests = glob(test_paths)
pwd_notebook = op.join(pwd_path, 'pwd.ipynb')
nb = nbformat.read(pwd_notebook, nbformat.NO_CONVERT)
test_results = run_nb_tests(nb, pwd_path, tests)
assert len(test_results) == 1
assert test_results[0].grade == 1
# When the directory is incorrect, the test fails.
test_results = run_nb_tests(nb, here, tests)
assert test_results[0].grade == 0
# We can add variables to the environment with initial_env
env_test = op.join(pwd_path, 'extra_tests', 'has_var.py')
# Fails without initial var.
test_results = run_nb_tests(nb, pwd_path, [env_test])
assert test_results[0].grade == 0
# Passes with.
test_results = run_nb_tests(nb, pwd_path, [env_test],
initial_env={'extra_variable': 1}
)
assert test_results[0].grade == 1
# The test notebook also has a cell causing an error.
# By default, testing ignores the error. With ignore_errors=False, the
# error falls through to us.
with pytest.raises(ZeroDivisionError):
test_results = run_nb_tests(nb, pwd_path, tests, ignore_errors=False)
Loading