-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathpyproject.toml
More file actions
101 lines (91 loc) · 3.06 KB
/
pyproject.toml
File metadata and controls
101 lines (91 loc) · 3.06 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
[project]
name = "abbacus-cortex"
version = "0.3.6"
description = "Cognitive knowledge system with formal ontology, reasoning, and intelligence serving"
readme = "README.md"
authors = [
{ name = "grayisnotacolor", email = "74255714+grayisnotacolor@users.noreply.github.com" }
]
license = "MIT"
license-files = ["LICENSE"]
requires-python = ">=3.12"
keywords = ["knowledge-graph", "knowledge-management", "mcp", "reasoning", "ontology", "rdf", "ai", "semantic-search", "llm"]
classifiers = [
"Development Status :: 4 - Beta",
"Environment :: Console",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.12",
"Programming Language :: Python :: 3.13",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Database :: Database Engines/Servers",
"Typing :: Typed",
]
dependencies = [
# Graph / Ontology
"pyoxigraph>=0.4",
# Web framework
"fastapi>=0.115",
"uvicorn[standard]>=0.34",
# CLI
"typer>=0.15",
# Templates
"jinja2>=3.1",
# Config
"python-dotenv>=1.1",
# Auth
"bcrypt>=4.2",
# MCP
"mcp>=1.6",
# HTTP client
"httpx>=0.28",
# LLM (provider-agnostic: Anthropic, OpenAI, Ollama, etc.)
# Pinned at >=1.60: bumping to >=1.83.7 (the patched version for current
# GHSA alerts) downgrades typer 0.24 -> 0.23 and click 8.3 -> 8.1 via the
# python-dotenv constraint chain, which breaks ~24 CLI tests that rely on
# click 8.3's CliRunner.Result.stderr behavior. The flagged litellm CVEs
# are all in litellm's proxy server endpoints, which Cortex never runs.
# Cortex is local-only and only calls litellm.completion() as a library.
"litellm>=1.60",
]
[project.urls]
Homepage = "https://github.com/abbacusgroup/Cortex"
Repository = "https://github.com/abbacusgroup/Cortex"
Documentation = "https://github.com/abbacusgroup/Cortex#readme"
Changelog = "https://github.com/abbacusgroup/Cortex/blob/main/CHANGELOG.md"
"Bug Tracker" = "https://github.com/abbacusgroup/Cortex/issues"
[project.optional-dependencies]
embeddings = ["sentence-transformers>=3.4"]
[project.scripts]
cortex = "cortex.cli.main:app"
[dependency-groups]
dev = [
"pytest>=9.0.3",
"pytest-asyncio>=0.25",
# Bundle 9 / F.1 + F.2: xdist for parallel runs (the CI workflow uses
# ``pytest -n auto``); forked for the test-isolation suite that needs
# a fresh process per test.
"pytest-xdist>=3.8",
"pytest-forked>=1.6",
"httpx>=0.28",
"ruff>=0.11",
]
[build-system]
requires = ["uv_build>=0.10.12,<1.0.0"]
build-backend = "uv_build"
[tool.uv]
build-backend.module-name = "cortex"
[tool.pytest.ini_options]
testpaths = ["tests", "benchmarks"]
asyncio_mode = "auto"
pythonpath = ["src"]
markers = [
"slow: end-to-end tests that spawn subprocesses (skipped in fast loops)",
"bench: benchmark suite (run with -m bench)",
]
[tool.ruff]
target-version = "py312"
line-length = 100
[tool.ruff.lint]
select = ["B", "C4", "E", "F", "I", "N", "RUF", "SIM", "W", "UP"]