Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,9 @@ RUN = poetry run
test:
$(RUN) python -m unittest tests/test_*py tests/*/test_*py

lint-fix:
$(RUN) tox -e format

# not yet deployed
doctest:
find src docs -type f \( -name "*.rst" -o -name "*.md" -o -name "*.py" \) -print0 | xargs -0 $(RUN) python -m doctest --option ELLIPSIS --option NORMALIZE_WHITESPACE
Expand Down
607 changes: 14 additions & 593 deletions poetry.lock

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ curies = ">=0.6.6"
pronto = ">=2.5.0"
SPARQLWrapper = "*"
SQLAlchemy = ">=1.4.32"
linkml-runtime = ">=1.5.3"
linkml-runtime = "1.9.1-rc1"
linkml-renderer = ">=0.3.0"
networkx = ">=2.7.1"
sssom = "^0.4.4"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,14 @@
from prefixmaps.io.parser import load_multi_context
from sssom_schema import Mapping

from oaklib.datamodels.obograph import DefinitionPropertyValue, Edge, Graph, Meta, Node, SynonymPropertyValue
from oaklib.datamodels.obograph import (
DefinitionPropertyValue,
Edge,
Graph,
Meta,
Node,
SynonymPropertyValue,
)
from oaklib.datamodels.search import SearchConfiguration
from oaklib.datamodels.text_annotator import TextAnnotation, TextAnnotationConfiguration
from oaklib.datamodels.vocabulary import LABEL_PREDICATE, SEMAPV
Expand Down Expand Up @@ -552,6 +559,7 @@ def entity_alias_map(self, curie: CURIE) -> ALIAS_MAP:
if meta is not None:
for syn in meta.synonyms:
from oaklib.converters.obo_graph_to_rdf_owl_converter import SCOPE_MAP

pred = SCOPE_MAP.get(syn.pred, None)
m[pred].append(syn.val)
return m
Expand Down
38 changes: 19 additions & 19 deletions tests/test_cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ def _out(self, path: Optional[str] = TEST_OUT) -> str:
def test_main_help(self):
result = self.runner.invoke(main, ["--help"])
out = result.stdout
print("STDERR", result.stderr)
logging.info("STDERR", result.stderr)
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Maybe switch to a named logger as suggested here (tutorial or logging cookbook).

To change insert after the import section

logger = logging.getLogger(__name__)

and then replace all logging.debug, ligging.info etc. by logger.debug, logger.info etc.

linkml-runtime also does it this way (test_schemaview.py).

Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

yep! definitely. not done with this implementation at all, just first pass at getting the output a bit more grok'able so I can see the breaking errors.

Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think this refactoring should go in another PR. The changes here are consistent with the rest of the library and I captured the next step we need to take here: #838

self.assertIn("search", out)
self.assertIn("subset", out)
self.assertIn("validate", out)
Expand Down Expand Up @@ -138,8 +138,8 @@ def test_info(self):
main,
["-i", str(input_arg), "info", NUCLEUS, "-o", TEST_OUT, "-D", "x,d"],
)
print("STDERR", result.stdout)
print("STDERR", result.stderr)
logging.info("STDERR", result.stdout)
logging.info("STDERR", result.stderr)
self.assertEqual(0, result.exit_code)
with open(TEST_OUT) as file:
contents = "\n".join(file.readlines())
Expand All @@ -149,8 +149,8 @@ def test_info(self):
result = self.runner.invoke(
main, ["-i", str(input_arg), "info", NUCLEUS, "-o", TEST_OUT, "-D", "x"]
)
print("STDERR", result.stdout)
print("STDERR", result.stderr)
logging.info("STDERR", result.stdout)
logging.info("STDERR", result.stderr)
self.assertEqual(0, result.exit_code)
with open(TEST_OUT) as file:
contents = "\n".join(file.readlines())
Expand Down Expand Up @@ -383,8 +383,8 @@ def test_gap_fill(self):
TEST_OUT,
],
)
print("STDERR", result.stdout)
print("STDERR", result.stderr)
logging.info("STDERR", result.stdout)
logging.info("STDERR", result.stderr)
self.assertEqual(0, result.exit_code)
contents = self._out()
self.assertIn(NUCLEUS, contents)
Expand Down Expand Up @@ -428,7 +428,7 @@ def test_paths(self):
result = self.runner.invoke(main, all_args, catch_exceptions=False)
self.assertEqual(0, result.exit_code)
out = result.stdout
# print(input_arg, case, out)
# logging.info(input_arg, case, out)
self.assertIn(expected, out)
if unexpected:
self.assertNotIn(unexpected, out)
Expand Down Expand Up @@ -610,8 +610,8 @@ def test_dump(self):
cmd.extend(["-c", conf_path])
result = self.runner.invoke(main, cmd)
if result.exit_code != 0:
print("STDOUT", result.stdout)
print("STDERR", result.stderr)
logging.info("STDOUT", result.stdout)
logging.info("STDERR", result.stderr)
self.assertEqual(0, result.exit_code, f"input={input}, output_format={output_format}")
if output_format == "obojson":
obj: obograph.GraphDocument
Expand Down Expand Up @@ -722,7 +722,7 @@ def test_extract(self):
"-O",
output_format,
] + query
# print(cmd)
# logging.info(cmd)
if dangling:
cmd += ["--dangling"]
result = self.runner.invoke(main, cmd)
Expand All @@ -736,7 +736,7 @@ def test_extract(self):
nucleus_node = [n for n in g.nodes if n.lbl == "nucleus"][0]
self.assertTrue(nucleus_node is not None)
# TODO
# print(nucleus_node)
# logging.info(nucleus_node)
# self.assertTrue(nucleus_node.meta.definition.val.startswith("A membrane-bounded organelle"))
elif output_format == "fhirjson":
obj: fhir.CodeSystem
Expand Down Expand Up @@ -1040,8 +1040,8 @@ def test_query(self):

def test_validate_help(self):
result = self.runner.invoke(main, ["validate", "--help"])
print("STDERR", result.stdout)
print("STDERR", result.stderr)
logging.info("STDERR", result.stdout)
logging.info("STDERR", result.stderr)
self.assertEqual(0, result.exit_code)

def test_validate_bad_ontology(self):
Expand Down Expand Up @@ -1179,7 +1179,7 @@ def test_lexmatch_owl(self):
"--no-ensure-strict-prefixes",
],
)
print("STDERR", result.stdout)
logging.info("STDERR", result.stdout)
err = result.stderr
self.assertEqual(0, result.exit_code)
with open(outfile) as stream:
Expand Down Expand Up @@ -1207,7 +1207,7 @@ def test_lexmatch_sqlite(self):
"--no-ensure-strict-prefixes",
],
)
print("STDERR", result.stdout)
logging.info("STDERR", result.stdout)
err = result.stderr
self.assertEqual("", err)
self.assertEqual(0, result.exit_code)
Expand Down Expand Up @@ -1495,7 +1495,7 @@ def test_statistics(self):
args = ["-i", str(input_arg), "statistics", "-o", str(out_path)] + opts
result = self.runner.invoke(main, args)
err = result.stderr
# print(" ".join(args))
# logging.info(" ".join(args))
logging.info(f"ERR={err}")
self.assertEqual(0, result.exit_code)
with open(out_path) as file:
Expand All @@ -1521,7 +1521,7 @@ def test_annotate_file(self):
outfile,
],
)
print("STDERR", result.stdout)
logging.info("STDERR", result.stdout)
err = "\n".join(
[line for line in result.stderr.split("\n") if not line.startswith("WARNING")]
)
Expand Down Expand Up @@ -1550,7 +1550,7 @@ def test_annotate_words(self):
outfile,
],
)
print("STDERR", result.stdout)
logging.info("STDERR", result.stdout)
err = result.stderr
self.assertEqual("", err)
self.assertEqual(0, result.exit_code)
Expand Down
3 changes: 2 additions & 1 deletion tests/test_converters/test_obo_graph_to_cx.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import json
import logging
import unittest

import curies
Expand Down Expand Up @@ -27,7 +28,7 @@ def test_convert(self):
"""Tests parsing then converting to cx document."""
gd: GraphDocument = json_loader.load(str(ONT), target_class=GraphDocument)
doc = self.converter.convert(gd)
print(doc)
logging.info(doc)
with open(OUT, "w", encoding="UTF-8") as f:
json.dump(doc, f)
cxn = create_nice_cx_from_file(OUT)
Expand Down
30 changes: 15 additions & 15 deletions tests/test_implementations/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -464,7 +464,7 @@ def test_skos_mappings(self, oi: MappingProviderInterface):
mappings = [
(m.subject_id, m.predicate_id, m.object_id) for m in oi.sssom_mappings(curies)
]
print(mappings)
logging.info(mappings)
expected = [
("X:0000001", "oio:hasDbXref", "Y:1"),
("X:0000001", "skos:exactMatch", "schema:Person"),
Expand Down Expand Up @@ -873,13 +873,13 @@ def test_chains(self, oi: OboGraphInterface):
]

for chain_query, included, excluded in cases:
print(f"Q={chain_query} I={included} E={excluded}")
logging.info(f"Q={chain_query} I={included} E={excluded}")
chains = list(oi.chains(chain_query))
print(f". RESULTS={chains}")
# print(len(chains))
logging.info(f". RESULTS={chains}")
# logging.info(len(chains))
tuples_chains = []
for chain in chains:
# print(chain)
# logging.info(chain)
tuple_chain = []
for e in chain:
tuple_chain.append((e.sub, e.pred, e.obj))
Expand Down Expand Up @@ -1080,7 +1080,7 @@ def test_merge(self, target: MergeInterface, source: BasicOntologyInterface):
merged_entities = set(target.entities(owl_type=OWL_CLASS))
diff = merged_entities.difference(target_entities.union(source_entities))
for x in diff:
print(x)
logging.info(x)
test.assertCountEqual(target_entities.union(source_entities), merged_entities)
in_both = target_entities.intersection(source_entities)
test.assertIn(CELL, in_both)
Expand Down Expand Up @@ -1184,8 +1184,8 @@ def test_diff(self, oi: DifferInterface, oi_modified: DifferInterface):
n_unexpected += 1
ch.type = type(ch).__name__
for e in expected:
print("Expected not found:")
print(yaml_dumper.dumps(e))
logging.info("Expected not found:")
logging.info(yaml_dumper.dumps(e))
test.assertEqual(0, len(expected), f"Expected changes not found: {expected}")
expected_rev = [
kgcl.NewSynonym(
Expand Down Expand Up @@ -1235,8 +1235,8 @@ def test_diff(self, oi: DifferInterface, oi_modified: DifferInterface):
n_unexpected += 1
ch.type = type(ch).__name__
for e in expected_rev:
print("Expected (reversed) not found:")
print(yaml_dumper.dumps(e))
logging.info("Expected (reversed) not found:")
logging.info(yaml_dumper.dumps(e))
test.assertEqual(0, len(expected_rev), f"Expected changes not found: {expected_rev}")
test.assertEqual(0, n_unexpected, f"Unexpected changes: {n_unexpected}")
# test diff summary
Expand All @@ -1252,7 +1252,7 @@ def test_diff(self, oi: DifferInterface, oi_modified: DifferInterface):
("EdgeDeletion", 5),
]
for typ, expected in cases:
print(typ)
logging.info(typ)
test.assertEqual(expected, residual[typ])

def test_as_obograph(self, oi: OboGraphInterface):
Expand Down Expand Up @@ -1726,7 +1726,7 @@ def test_patcher_obsoletion_chains(self, get_adapter_function: Callable):
continue
# expanded_changes = oi.expand_changes(changes, apply=False)
# for change in expanded_changes:
# print(json_dumper.dumps(change))
# logging.info(json_dumper.dumps(change))
expanded_changes = oi.expand_changes(changes, apply=True)
logging.info(f"Expanded changes: {len(expanded_changes)}")
test.assertGreater(len(expanded_changes), 1)
Expand Down Expand Up @@ -2150,7 +2150,7 @@ def test_information_content_scores(
use_associations=use_associations,
):
m[curie] = score
print(f"{curie} IC= {score}")
logging.info(f"{curie} IC= {score}")
test.assertGreater(len(m), 0)
if use_associations:
# test.assertEqual(m[CELLULAR_COMPONENT], 0.0, "all genes are under cell component")
Expand All @@ -2159,7 +2159,7 @@ def test_information_content_scores(
test.assertLess(m[NUCLEAR_ENVELOPE], 1.0)
# universal root node always has zero information content
for k, v in m.items():
print(f"{k} IC= {v}")
logging.info(f"{k} IC= {v}")
test.assertEqual(m[OWL_THING], 0.0)
for child, parent in posets:
if use_associations:
Expand Down Expand Up @@ -2272,7 +2272,7 @@ def test_annotate_text(self, oi: TextAnnotatorInterface):
anns = sorted(anns, key=lambda x: x.subject_start)
test.assertEqual(n, len(anns))
for i, ann in enumerate(anns):
print(ann)
logging.info(ann)
object_id, object_label, subject_start, subject_end = expected[i]
test.assertEqual(object_id, ann.object_id)
test.assertEqual(object_label, ann.object_label)
Expand Down
2 changes: 1 addition & 1 deletion tests/test_implementations/test_sqldb.py
Original file line number Diff line number Diff line change
Expand Up @@ -611,7 +611,7 @@ def test_create_from_input_specification(self):
def test_integration_create_from_hpo_input_specification(self):
spec = yaml_loader.load(str(CONF_DIR_PATH / "hpoa-input-spec.yaml"), InputSpecification)
oi = get_adapter(spec)
print(oi)
logging.info(oi)

def test_store_associations(self):
shutil.copyfile(DB, MUTABLE_DB)
Expand Down