-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathconfig.example.toml
More file actions
95 lines (80 loc) · 3 KB
/
config.example.toml
File metadata and controls
95 lines (80 loc) · 3 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
# Clepho Configuration
# Copy to ~/.config/clepho/config.toml
# See docs/configuration.md for full documentation
[database]
# Backend: "sqlite" (default) or "postgresql"
backend = "sqlite"
# SQLite database path (used when backend = "sqlite")
sqlite_path = "~/.local/share/clepho/clepho.db"
# PostgreSQL connection URL (used when backend = "postgresql")
# Requires building with: cargo build --features postgres
# postgresql_url = "postgresql://user:password@localhost:5432/clepho"
# Connection pool size for PostgreSQL (default: 10)
# pool_size = 10
[llm]
# Provider: lmstudio, ollama, openai, anthropic
provider = "lmstudio"
endpoint = "http://127.0.0.1:1234/v1"
model = "gemma-3-4b"
# api_key = "sk-..." # Required for openai/anthropic
# Custom prompt context prepended to image descriptions (global default)
# Use this to fine-tune responses (e.g., shorter output, specific focus)
# custom_prompt = "Keep responses brief, under 50 words. Focus only on the main subject."
#
# Per-folder prompts: When you confirm a Scan, LLM Describe, or Batch LLM action,
# you can edit a per-folder prompt in the confirmation dialog. Per-folder prompts
# are stored in the database and override the global custom_prompt for that directory.
# Override the base LLM prompt entirely (replaces the built-in prompt)
# Must include JSON format instruction for structured parsing to work.
# base_prompt = "Describe this photo briefly. Respond with JSON: {\"description\": \"...\", \"tags\": [\"tag1\", \"tag2\"]}"
# Number of concurrent LLM requests for batch processing (default: 4)
# batch_concurrency = 4
# Request structured JSON output from the LLM provider (default: true)
# Uses native JSON mode for OpenAI-compatible and Ollama providers.
# Disable if your model doesn't support JSON mode (e.g. some LM Studio models).
# json_mode = true
[scanner]
# Supported image extensions
image_extensions = [
"jpg", "jpeg", "png", "gif", "webp",
"heic", "heif", "raw", "cr2", "nef", "arw", "dng"
]
# Perceptual hash similarity threshold (0-256, lower = stricter)
similarity_threshold = 50
[preview]
# Graphics protocol: auto, sixel, kitty, iterm2, halfblocks, none
protocol = "auto"
# Preview image size (pixels)
thumbnail_size = 1024
# external_viewer = "feh" # Override system default
[thumbnails]
path = "~/.cache/clepho/thumbnails"
size = 256
[trash]
path = "~/.local/share/clepho/.trash"
max_age_days = 30
max_size_bytes = 1073741824 # 1GB
[schedule]
check_overdue_on_startup = true
[keybindings]
# Yazi-compatible defaults - uncomment to customize
# move_down = ["j", "Down"]
# move_up = ["k", "Up"]
# go_parent = ["h", "Left", "Backspace"]
# enter_selected = ["l", "Right", "Enter"]
# yank_files = ["y", "x"]
# paste_files = ["p"]
# delete_files = ["d", "Delete"]
# rename_files = ["r"]
# toggle_hidden = ["."]
# toggle_show_all_files = ["H"]
# find_duplicates = ["u"]
# describe_with_llm = ["i"]
# scan = ["s"]
# semantic_search = ["/"]
# manage_people = ["P"]
# view_trash = ["X"]
# open_gallery = ["A"]
# open_slideshow = ["S"]
# quit = ["q"]
# show_help = ["?"]