Skip to content

Commit 8a00146

Browse files
committed
fix: apply code formatting and linting fixes
- Apply Black code formatting to all Python files - Fix import sorting with isort - Resolve all Flake8 linting issues - Fix MyPy type checking errors - Remove unused imports and variables - Fix line length violations and formatting inconsistencies - Add proper type annotations for global variables - Add test_env to .gitignore
1 parent 7af102c commit 8a00146

13 files changed

Lines changed: 279 additions & 171 deletions

File tree

.gitignore

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -27,3 +27,4 @@ node_modules/
2727
*.tmp
2828
plan.md
2929
metadata.npy
30+
test_env/

app.py

Lines changed: 53 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,9 @@
1-
import streamlit as st
21
import logging
3-
from typing import Dict, List
2+
3+
import streamlit as st
44
from openai import OpenAI
5-
from coderag.config import OPENAI_API_KEY, OPENAI_CHAT_MODEL
5+
6+
from coderag.config import OPENAI_API_KEY
67
from prompt_flow import execute_rag_flow
78

89
# Configure logging for Streamlit
@@ -23,9 +24,7 @@
2324

2425
# Set page config
2526
st.set_page_config(
26-
page_title="CodeRAG: Your Coding Assistant",
27-
page_icon="🤖",
28-
layout="wide"
27+
page_title="CodeRAG: Your Coding Assistant", page_icon="🤖", layout="wide"
2928
)
3029

3130
st.title("🤖 CodeRAG: Your Coding Assistant")
@@ -40,20 +39,20 @@
4039
# Sidebar with controls
4140
with st.sidebar:
4241
st.header("Controls")
43-
42+
4443
if st.button("🗑️ Clear Conversation", type="secondary"):
4544
st.session_state.messages = []
4645
st.session_state.conversation_context = []
4746
st.rerun()
48-
47+
4948
# Status indicators
5049
st.header("Status")
5150
if client:
5251
st.success("✅ OpenAI Connected")
5352
else:
5453
st.error("❌ OpenAI Not Connected")
5554
st.error("Please check your API key in .env file")
56-
55+
5756
# Conversation stats
5857
if st.session_state.messages:
5958
st.info(f"💬 {len(st.session_state.messages)} messages in conversation")
@@ -68,73 +67,100 @@
6867

6968
# Chat input with validation
7069
if not client:
71-
st.warning("⚠️ OpenAI client not available. Please configure your API key to use the assistant.")
70+
st.warning(
71+
"⚠️ OpenAI client not available. Please configure your API key to use "
72+
"the assistant."
73+
)
7274
st.stop()
7375

7476
if prompt := st.chat_input("What is your coding question?", disabled=not client):
7577
# Validate input
7678
if not prompt.strip():
7779
st.warning("Please enter a valid question.")
7880
st.stop()
79-
81+
8082
# Add user message
8183
st.session_state.messages.append({"role": "user", "content": prompt})
8284
# Add to conversation context for better continuity
8385
st.session_state.conversation_context.append(f"User: {prompt}")
84-
86+
8587
with st.chat_message("user"):
8688
st.markdown(prompt)
8789

8890
with st.chat_message("assistant"):
8991
message_placeholder = st.empty()
90-
92+
9193
# Show loading indicator
9294
with st.spinner("🔍 Searching codebase and generating response..."):
9395
try:
9496
# Execute RAG flow with error handling
9597
response = execute_rag_flow(prompt)
96-
98+
9799
# Check if response indicates an error
98-
if response.startswith("Error:") or "error occurred" in response.lower():
100+
if (
101+
response.startswith("Error:")
102+
or "error occurred" in response.lower()
103+
):
99104
message_placeholder.error(response)
100105
else:
101106
message_placeholder.markdown(response)
102-
107+
103108
full_response = response
104-
109+
105110
except Exception as e:
106111
error_message = f"Unexpected error: {str(e)}"
107112
logger.error(f"Streamlit error: {error_message}")
108113
message_placeholder.error(error_message)
109114
full_response = error_message
110115

111116
# Add assistant response to session
112-
st.session_state.messages.append({"role": "assistant", "content": full_response})
117+
st.session_state.messages.append(
118+
{"role": "assistant", "content": full_response}
119+
)
113120
# Add to conversation context
114-
st.session_state.conversation_context.append(f"Assistant: {full_response[:200]}...") # Truncate for context
115-
121+
st.session_state.conversation_context.append(
122+
f"Assistant: {full_response[:200]}..."
123+
) # Truncate for context
124+
116125
# Keep conversation context manageable (last 10 exchanges)
117126
if len(st.session_state.conversation_context) > 20:
118-
st.session_state.conversation_context = st.session_state.conversation_context[-20:]
127+
st.session_state.conversation_context = (
128+
st.session_state.conversation_context[-20:]
129+
)
119130

120131
# Footer with helpful information
121132
if not st.session_state.messages:
122133
st.markdown("---")
123134
st.markdown("### 💡 Tips for better results:")
124-
st.markdown("""
135+
st.markdown(
136+
"""
125137
- Ask specific questions about your code
126138
- Mention file names or functions you're interested in
127139
- Request explanations, improvements, or debugging help
128140
- Ask about code patterns or best practices
129-
""")
130-
141+
"""
142+
)
143+
131144
st.markdown("### 🚀 Example queries:")
132145
col1, col2 = st.columns(2)
133146
with col1:
134147
if st.button("📝 Explain the indexing process"):
135-
st.session_state.messages.append({"role": "user", "content": "Explain how the FAISS indexing works in this codebase"})
148+
st.session_state.messages.append(
149+
{
150+
"role": "user",
151+
"content": "Explain how the FAISS indexing works in this codebase",
152+
}
153+
)
136154
st.rerun()
137155
with col2:
138156
if st.button("🐛 Help debug search issues"):
139-
st.session_state.messages.append({"role": "user", "content": "How can I debug issues with code search not returning results?"})
140-
st.rerun()
157+
st.session_state.messages.append(
158+
{
159+
"role": "user",
160+
"content": (
161+
"How can I debug issues with code search not returning "
162+
"results?"
163+
),
164+
}
165+
)
166+
st.rerun()

coderag/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
# __init__.py
1+
# __init__.py

coderag/config.py

Lines changed: 8 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
import os
2+
23
from dotenv import load_dotenv
34

45
# Load environment variables from the .env file
@@ -7,17 +8,21 @@
78
# === Environment Variables ===
89
# OpenAI API key and model settings (loaded from .env)
910
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
10-
OPENAI_EMBEDDING_MODEL = os.getenv("OPENAI_EMBEDDING_MODEL", "text-embedding-ada-002") # Default to ada-002
11+
OPENAI_EMBEDDING_MODEL = os.getenv(
12+
"OPENAI_EMBEDDING_MODEL", "text-embedding-ada-002"
13+
) # Default to ada-002
1114
OPENAI_CHAT_MODEL = os.getenv("OPENAI_CHAT_MODEL", "gpt-4") # Default to GPT-4
1215

1316
# Embedding dimension (from .env or fallback)
1417
EMBEDDING_DIM = int(os.getenv("EMBEDDING_DIM", 1536)) # Default to 1536 if not in .env
1518

1619
# Project directory (from .env)
17-
WATCHED_DIR = os.getenv("WATCHED_DIR", os.path.join(os.getcwd(), 'CodeRAG'))
20+
WATCHED_DIR = os.getenv("WATCHED_DIR", os.path.join(os.getcwd(), "CodeRAG"))
1821

1922
# Path to FAISS index (from .env or fallback)
20-
FAISS_INDEX_FILE = os.getenv("FAISS_INDEX_FILE", os.path.join(WATCHED_DIR, 'coderag_index.faiss'))
23+
FAISS_INDEX_FILE = os.getenv(
24+
"FAISS_INDEX_FILE", os.path.join(WATCHED_DIR, "coderag_index.faiss")
25+
)
2126

2227
# === Project-Specific Configuration ===
2328
# Define the root directory of the project

coderag/embeddings.py

Lines changed: 13 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,9 @@
11
import logging
22
from typing import Optional
3-
from openai import OpenAI
3+
44
import numpy as np
5+
from openai import OpenAI
6+
57
from coderag.config import OPENAI_API_KEY, OPENAI_EMBEDDING_MODEL
68

79
logger = logging.getLogger(__name__)
@@ -16,36 +18,37 @@
1618
logger.error(f"Failed to initialize OpenAI client: {e}")
1719
client = None
1820

21+
1922
def generate_embeddings(text: str) -> Optional[np.ndarray]:
2023
"""Generate embeddings using OpenAI's embedding API.
21-
24+
2225
Args:
2326
text: The input text to generate embeddings for
24-
27+
2528
Returns:
2629
numpy array of embeddings or None if generation fails
2730
"""
2831
if not client:
2932
logger.error("OpenAI client not initialized")
3033
return None
31-
34+
3235
if not text or not text.strip():
3336
logger.warning("Empty text provided for embedding generation")
3437
return None
35-
38+
3639
try:
3740
logger.debug(f"Generating embeddings for text of length: {len(text)}")
3841
response = client.embeddings.create(
3942
model=OPENAI_EMBEDDING_MODEL,
40-
input=[text.strip()] # Input should be a list of strings
43+
input=[text.strip()], # Input should be a list of strings
4144
)
42-
45+
4346
# Extract the embedding from the response
4447
embeddings = response.data[0].embedding
45-
result = np.array(embeddings).astype('float32').reshape(1, -1)
48+
result = np.array(embeddings).astype("float32").reshape(1, -1)
4649
logger.debug(f"Successfully generated embeddings with shape: {result.shape}")
4750
return result
48-
51+
4952
except Exception as e:
5053
logger.error(f"Failed to generate embeddings: {str(e)}")
51-
return None
54+
return None

0 commit comments

Comments
 (0)