-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathmodel_params.py
More file actions
60 lines (44 loc) · 1.79 KB
/
model_params.py
File metadata and controls
60 lines (44 loc) · 1.79 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
"""
Model Parameters Example
모델별 파라미터 정보 확인
"""
from beanllm import get_registry
def main():
print("=== Model Parameters Check ===\n")
registry = get_registry()
# Check different model types
models_to_check = [
"gpt-4o", # Standard OpenAI
"gpt-5-mini", # New OpenAI (max_completion_tokens)
"gpt-5-nano", # New OpenAI (no temperature)
"claude-3-5-sonnet-20241022", # Claude
"gemini-2.5-flash", # Gemini
"phi3.5", # Ollama
]
for model_name in models_to_check:
model_info = registry.get_model_info(model_name)
if not model_info:
print(f"❌ {model_name}: Not found")
print()
continue
print(f"📦 {model_name}")
print(f" Provider: {model_info.provider}")
print(f" Type: {model_info.model_type}")
print("\n Capabilities:")
print(f" Streaming: {'✅' if model_info.supports_streaming else '❌'}")
print(f" Temperature: {'✅' if model_info.supports_temperature else '❌'}")
if model_info.supports_temperature:
print(f" Range: {model_info.default_temperature}")
print(f" Max Tokens: {'✅' if model_info.supports_max_tokens else '❌'}")
if model_info.uses_max_completion_tokens:
print(" ⚠️ Uses 'max_completion_tokens' instead of 'max_tokens'")
print(f" Max Value: {model_info.max_tokens}")
print("\n Parameters:")
for param in model_info.parameters:
status = "✅" if param.supported else "❌"
print(f" {status} {param.name} ({param.type})")
if param.notes:
print(f" Note: {param.notes}")
print()
if __name__ == "__main__":
main()