diff --git a/backend/.env.sample b/backend/.env.sample index 06016ce..20df2fe 100644 --- a/backend/.env.sample +++ b/backend/.env.sample @@ -1,6 +1,6 @@ # Server Configuration Variables -NODE_ENV=... -PORT=... +NODE_ENV=development +PORT=3000 # Firebase Configuration Variables FIREBASE_API_KEY=... @@ -10,30 +10,35 @@ FIREBASE_STORAGE_BUCKET=... FIREBASE_MESSAGING_SENDER_ID=... FIREBASE_APP_ID=... FIREBASE_MEASUREMENT_ID=... +FIREBASE_DATABASE_URL=... -# OAuth +# Google OAuth GOOGLE_CLIENT_ID=... GOOGLE_CLIENT_SECRET=... + +# GitHub OAuth GITHUB_CLIENT_ID=... GITHUB_CLIENT_SECRET=... # Application URLs -FRONTEND_URL=... -BACKEND_URL=... +FRONTEND_URL=http://localhost:3001 +BACKEND_URL=http://localhost:3000 # JWT Settings JWT_SECRET=... -# ALGOLIA Search API +# Algoria Search API ALGOLIA_APP_ID=... ALGOLIA_API_KEY=... ALGOLIA_INDEX_NAME=... -# Stripe Configuration +# Stripe Configuration Variables STRIPE_SECRET_KEY=... STRIPE_WEBHOOK_SECRET=... STRIPE_PRICE_ID=... -STRIPE_TEST_MODE=true +STRIPE_TEST_MODE=... + -# Gemini configuration -GEMINI_API_KEY=... \ No newline at end of file +# AI Configuration Variables +GEMINI_API_KEY=... +TOGETHER_API_KEY=... diff --git a/backend/docker-compose.yml b/backend/docker-compose.yml new file mode 100644 index 0000000..8ed53c5 --- /dev/null +++ b/backend/docker-compose.yml @@ -0,0 +1,17 @@ +services: + postgres: + image: postgres:latest + container_name: summarizz_postgres + restart: unless-stopped + environment: + POSTGRES_USER: summarizz + POSTGRES_PASSWORD: summarizz + POSTGRES_DB: summarizz + POSTGRES_INITDB_ARGS: "--encoding=UTF-8 --lc-collate=C --lc-ctype=C" + ports: + - "5433:5433" + volumes: + - postgres_data:/var/lib/postgresql/data + +volumes: + postgres_data: diff --git a/backend/justfile b/backend/justfile new file mode 100644 index 0000000..6f7f2e3 --- /dev/null +++ b/backend/justfile @@ -0,0 +1,91 @@ +export CONFIG_SCRIPT := "./scripts/config.sh" + +alias s := setup +alias b := build +alias c := clean +alias t := test +alias rd := run-dev +alias rp := run-prod + +default: + just --list + +# NOTE: Cleans build artifacts and deletes node_modules +clean: + #!/usr/bin/env bash + source $CONFIG_SCRIPT + + info "Cleaning build artifacts and node_modules..." + items=("node_modules" "dist" "coverage" ".eslintcache" ".cache" ".nyc_output" "package-lock.json" "yarn.lock" "pnpm-lock.yaml") + for item in "${items[@]}"; do + if [[ "$item" == *.* ]]; then + find . -name "$item" -type f -exec rm -f "{}" + 2>/dev/null || true + else + find . -name "$item" -type d -prune -exec rm -rf "{}" + 2>/dev/null || true + fi + done + + +# NOTE: Best to run this before any other recipe to ensure a clean start +setup: clean + #!/usr/bin/env bash + source $CONFIG_SCRIPT + + info "Setting up backend application with provided configuration..." + npm install + +# NOTE: Builds the entire backend application for dev/prod runs +build: + #!/usr/bin/env bash + source $CONFIG_SCRIPT + + info "Building backend application with provided configuration..." + npm run build + +# NOTE: Runs the linter (eslint) to check for standards and code quality +lint: + #!/usr/bin/env bash + source $CONFIG_SCRIPT + + info "Running linter with provided configuration..." + npm run lint + +# NOTE: Runs tests using jest (optionally set --verbose flag) +test arg="default": + #!/usr/bin/env bash + source $CONFIG_SCRIPT + + info "Running backend tests with provided argument: $arg..." + if [ -z "$arg" ]; then \ + arg="default" \ + fi + + if [ "$arg" == "v" ] || [ "$arg" == "-v" ] || [ "$arg" == "--v" ] || [ "$arg" == "verbose" ]; then \ + npm run test:verbose \ + else \ + npm run test \ + fi + +# NOTE: Runs all tasks in a sequence to setup respective backend environment +start arg="default": clean build lint (test arg) + #!/usr/bin/env bash + source $CONFIG_SCRIPT + + info "Starting backend application with provided configuration..." + npm run start + +# NOTE: Runs the respective backend environment in development mode (nodemon for hot reloading) +run-dev: clean build lint + #!/usr/bin/env bash + source $CONFIG_SCRIPT + + info "Running backend application in development mode..." + npm run dev + +# NOTE: Runs the respective backend environment in production mode (without nodemon, should only be used when deploying) +run-prod arg="default": clean build lint (test arg) + #!/usr/bin/env bash + source $CONFIG_SCRIPT + + info "Running backend application in production mode..." + npm run start diff --git a/backend/package.json b/backend/package.json index 3e2815c..68cf460 100644 --- a/backend/package.json +++ b/backend/package.json @@ -36,7 +36,7 @@ "@algolia/client-search": "^5.20.4", "@aws-sdk/client-s3": "^3.816.0", "@aws-sdk/s3-request-presigner": "^3.816.0", - "@google/generative-ai": "^0.24.1", + "@google/genai": "^1.4.0", "@langchain/core": "^0.3.57", "@types/jsonwebtoken": "^9.0.7", "algoliasearch": "^4.24.0", @@ -53,6 +53,7 @@ "helmet": "^8.1.0", "jsonwebtoken": "^9.0.2", "stripe": "^18.1.1", + "together-ai": "^0.16.0", "winston": "^3.17.0", "zod": "^3.24.4" } diff --git a/backend/scripts/config.sh b/backend/scripts/config.sh new file mode 100644 index 0000000..d0beb9b --- /dev/null +++ b/backend/scripts/config.sh @@ -0,0 +1,53 @@ +#!/usr/bin/env bash + +set -e + +if command -v tput >/dev/null 2>&1; then + COLOR_RESET=$(tput sgr0) + COLOR_RED=$(tput setaf 1) + COLOR_GREEN=$(tput setaf 2) + COLOR_YELLOW=$(tput setaf 3) + COLOR_BLUE=$(tput setaf 4) + COLOR_MAGENTA=$(tput setaf 5) + COLOR_CYAN=$(tput setaf 6) + COLOR_WHITE=$(tput setaf 7) +else + COLOR_RESET="\033[0m" + COLOR_RED="\033[0;31m" + COLOR_GREEN="\033[0;32m" + COLOR_YELLOW="\033[0;33m" + COLOR_BLUE="\033[0;34m" + COLOR_MAGENTA="\033[0;35m" + COLOR_CYAN="\033[0;36m" + COLOR_WHITE="\033[0;37m" +fi + +PREFIX_INFO="${COLOR_BLUE}[INFO]${COLOR_RESET}" +PREFIX_WARN="${COLOR_YELLOW}[WARN]${COLOR_RESET}" +PREFIX_ERROR="${COLOR_RED}[ERROR]${COLOR_RESET}" +PREFIX_SUCCESS="${COLOR_GREEN}[SUCCESS]${COLOR_RESET}" +PREFIX_DEBUG="${COLOR_MAGENTA}[DEBUG]${COLOR_RESET}" + +_get_current_timestamp() { + date "+%Y-%m-%d %H:%M:%S" +} + +info() { + echo "${PREFIX_INFO} [$(_get_current_timestamp)] ${1}" +} + +warn() { + echo "${PREFIX_WARN} [$(_get_current_timestamp)] ${COLOR_YELLOW}${1}${COLOR_RESET}" >&2 +} + +error() { + echo "${PREFIX_ERROR} [$(_get_current_timestamp)] ${COLOR_RED}${1}${COLOR_RESET}" >&2 +} + +success() { + echo "${PREFIX_SUCCESS} [$(_get_current_timestamp)] ${COLOR_GREEN}${1}${COLOR_RESET}" +} + +debug() { + echo "${PREFIX_DEBUG} [$(_get_current_timestamp)] ${COLOR_MAGENTA}${1}${COLOR_RESET}" +} diff --git a/backend/scripts/deploy_test_pg_db.sh b/backend/scripts/deploy_test_pg_db.sh index 2bfd6e5..08e5847 100644 --- a/backend/scripts/deploy_test_pg_db.sh +++ b/backend/scripts/deploy_test_pg_db.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash echo "Setting up test PostgreSQL database for Summarizz..." diff --git a/backend/scripts/test_data.sql b/backend/scripts/test_data.sql index 2f514da..521af4f 100644 --- a/backend/scripts/test_data.sql +++ b/backend/scripts/test_data.sql @@ -1,4 +1,3 @@ - -- Test data for Summarizz application -- This script populates the database with test data for development purposes @@ -245,9 +244,21 @@ CROSS JOIN (SELECT user_id FROM users ORDER BY random() LIMIT 1) u; -- Update content metrics based on interactions UPDATE content c -SET - likes = (SELECT COUNT(*) FROM user_content_interactions WHERE content_id = c.content_id AND interaction_type = 'like'), - shares = (SELECT COUNT(*) FROM user_content_interactions WHERE content_id = c.content_id AND interaction_type = 'share'); +SET + likes = ( + SELECT COUNT(*) + FROM user_content_interactions + WHERE + content_id = c.content_id + AND interaction_type = 'like' + ), + shares = ( + SELECT COUNT(*) + FROM user_content_interactions + WHERE + content_id = c.content_id + AND interaction_type = 'share' + ); -- Update comment like counts UPDATE comments c diff --git a/backend/src/modules/ai/config/models.ts b/backend/src/modules/ai/config/models.ts index 01b34fa..0639547 100644 --- a/backend/src/modules/ai/config/models.ts +++ b/backend/src/modules/ai/config/models.ts @@ -1,18 +1,107 @@ -import { AIModel, ModelConfig } from '../types'; +import { AIGenerationModel, AIModel, GenerationModelConfig, ModelConfig } from '../types'; +import { HarmBlockMethod, HarmBlockThreshold, HarmCategory } from '@google/genai'; +import { SUMMARY_SYSTEM_PROMPT } from './prompts'; -export const MODEL_CONFIGS: Record = { +const MAX_OUTPUT_TOKENS = 1500; + +export const SUMMARIZATION_MODEL_CONFIGS: Record = { [AIModel.Gemini20Flash]: { + safetySettings: [ + { + method: HarmBlockMethod.SEVERITY, + category: HarmCategory.HARM_CATEGORY_HATE_SPEECH, + threshold: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE + }, + { + method: HarmBlockMethod.SEVERITY, + category: HarmCategory.HARM_CATEGORY_HATE_SPEECH, + threshold: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE + }, + { + method: HarmBlockMethod.SEVERITY, + category: HarmCategory.HARM_CATEGORY_HATE_SPEECH, + threshold: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE + }, + { + method: HarmBlockMethod.SEVERITY, + category: HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT, + threshold: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE + } + ], + systemInstruction: SUMMARY_SYSTEM_PROMPT, temperature: 0.2, - maxOutputTokens: 1500, + maxOutputTokens: MAX_OUTPUT_TOKENS, topP: 1, - frequencyPenalty: 0.1, - presencePenalty: 0.1, }, [AIModel.Gemini15Flash]: { + safetySettings: [ + { + method: HarmBlockMethod.SEVERITY, + category: HarmCategory.HARM_CATEGORY_HATE_SPEECH, + threshold: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE + }, + { + method: HarmBlockMethod.SEVERITY, + category: HarmCategory.HARM_CATEGORY_HATE_SPEECH, + threshold: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE + }, + { + method: HarmBlockMethod.SEVERITY, + category: HarmCategory.HARM_CATEGORY_HATE_SPEECH, + threshold: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE + }, + { + method: HarmBlockMethod.SEVERITY, + category: HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT, + threshold: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE + } + ], + systemInstruction: SUMMARY_SYSTEM_PROMPT, + temperature: 0.2, + maxOutputTokens: MAX_OUTPUT_TOKENS, + topP: 1, + }, + [AIModel.Gemini15FlashLite]: { + safetySettings: [ + { + method: HarmBlockMethod.SEVERITY, + category: HarmCategory.HARM_CATEGORY_HATE_SPEECH, + threshold: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE + }, + { + method: HarmBlockMethod.SEVERITY, + category: HarmCategory.HARM_CATEGORY_HATE_SPEECH, + threshold: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE + }, + { + method: HarmBlockMethod.SEVERITY, + category: HarmCategory.HARM_CATEGORY_HATE_SPEECH, + threshold: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE + }, + { + method: HarmBlockMethod.SEVERITY, + category: HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT, + threshold: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE + } + ], + systemInstruction: SUMMARY_SYSTEM_PROMPT, temperature: 0.2, - maxOutputTokens: 1500, + maxOutputTokens: MAX_OUTPUT_TOKENS, topP: 1, - frequencyPenalty: 0.1, - presencePenalty: 0.1, + } +}; + +export const IMAGE_GENERATION_MODEL_CONFIGS: Record = { + [AIGenerationModel.Gemini20FlashImageGenPreview]: { + + }, + [AIGenerationModel.TogetherFlux1SchnellFree]: { + + }, + [AIGenerationModel.TogetherFlux1Schnell]: { + }, + [AIGenerationModel.TogetherFlux1Dev]: { + + } }; diff --git a/backend/src/modules/ai/config/prompts.ts b/backend/src/modules/ai/config/prompts.ts index 182378a..354b478 100644 --- a/backend/src/modules/ai/config/prompts.ts +++ b/backend/src/modules/ai/config/prompts.ts @@ -1,32 +1,149 @@ -export const SUMMARY_SYSTEM_PROMPT = `You are an expert summarization model trained to create concise yet informative summaries. -Follow these guidelines strictly: - -1. FORMAT: - - Provide exactly TWO paragraphs - - Each paragraph should have 3-5 sentences - - First paragraph: Main topic and context - - Second paragraph: Key findings and implications - -2. STYLE: - - Use clear, direct language - - Maintain formal tone - - Include specific details when relevant - - Avoid technical jargon unless necessary - -3. AVOID: - - Personal opinions - - Redundant information - - Tangential details - - Vague statements - -4. ENSURE: - - Logical flow between sentences - - Proper transition words - - Start with most important information - - End with significant implications`; - -export const SUMMARY_USER_PROMPT = `Analyze the following text and create a two-paragraph summary following the guidelines: +export const SUMMARY_SYSTEM_PROMPT = ` +You are an expert AI summarization assistant. Your role is to analyze +any text and produce high-quality, structured summaries that capture +essential information in a clear, concise and coherent manner. + +Transform the provided text into exactly TWO well-structured paragraphs +that comprehensively cover the main content while maintaining readability +and logical flow. + + + +CRITICAL: You MUST follow this exact structure: +- Output exactly 2 paragraphs, no more, no less +- Paragraph 1: 3-5 sentences covering main topic, context, and primary subject matter +- Paragraph 2: 3-5 sentences covering key findings, conclusions, implications, or outcomes +- Each sentence must be substantive and contribute unique information +- No bullet points, lists, or sub-headings in your response + + + +- Use clear, direct, and professional language +- Maintain formal but accessible tone throughout +- Include specific details, data, and examples when they enhance understanding +- Prioritize concrete information over abstract concepts +- Use active voice when possible +- Employ smooth transitions between sentences and paragraphs + + + +- Do NOT include personal opinions, interpretations, or subjective commentary +- Do NOT repeat the same information in different words +- Do NOT include tangential details that don't serve the core narrative +- Do NOT use vague phrases like "the text discusses" or "it is mentioned that" +- Do NOT exceed the two-paragraph limit under any circumstances +- Do NOT use technical jargon without providing context + + + +- Begin with the most critical and impactful information +- Ensure logical progression from general context to specific details +- Use transitional phrases to create smooth flow between ideas +- End with the most significant implications, conclusions, or actionable insights +- Verify that each paragraph serves a distinct purpose in the overall summary +- Maintain coherence so the summary reads as a unified piece of writing +`; + +export const SUMMARY_USER_PROMPT = ` +Please analyze the following text and create a comprehensive two-paragraph summary +that strictly adheres to the system guidelines provided. + + {text} + + + +Apply all formatting requirements, writing style guidelines, and quality standards +from the system prompt. Ensure your response contains exactly two paragraphs with 3-5 +substantive sentences each, maintaining logical flow and covering both context/main +topics in paragraph 1 and key findings/outcomes in paragraph 2. +`; + +/** + * TODO: Refactor this prompt later to be more structured and clear. + * + * Similar to the image generation prompt below, this one is also a bit + * scuffed in terms of how it handles user tiers. It should be handled + * in the service layer, not passed to the prompt for the AI to figure + * out whether to generate a high or low resolution image. + */ +export const IMAGE_GENERATION_SYSTEM_PROMPT = ` +You are an expert AI image generation assistant. Your role is to create +high-quality, visually compelling images that accurately represent user +descriptions while adhering to content guidelines and technical specifications. + + +Transform textual descriptions into detailed, coherent visual representations +that capture the essence, mood, and specific elements described by the user +while maintaining artistic quality and technical excellence. + + + +CRITICAL: You MUST follow these guidelines: +- Generate images that directly correspond to the provided description +- Maintain consistent artistic style and quality throughout the image +- Ensure all visual elements are coherent and properly integrated +- Respect aspect ratios and composition principles +- Include all specified subjects, objects, or elements mentioned in the description +- Apply appropriate lighting, color schemes, and atmospheric effects as described + + + +- Use balanced composition with clear focal points +- Apply appropriate perspective and depth to create visual interest +- Maintain consistent lighting sources and shadows throughout the scene +- Choose color palettes that enhance the mood and theme of the description +- Ensure visual elements are proportionally accurate and realistic (unless stylization is specified) +- Create smooth transitions between different elements in the composition + + + +- Do NOT generate inappropriate, offensive, or harmful content +- Do NOT include copyrighted characters, logos, or trademarked elements +- Do NOT create images that could be used to mislead or deceive +- Do NOT generate content depicting violence, illegal activities, or explicit material +- Do NOT include recognizable faces of real people without explicit permission +- Do NOT create images that violate platform content policies + + + +- Prioritize visual clarity and detail appropriate to the requested resolution +- Ensure all elements are well-defined and properly rendered +- Maintain consistent art style throughout the entire image +- Apply professional-level composition techniques (rule of thirds, leading lines, etc.) +- Optimize contrast and saturation for visual impact +- Verify that the final image effectively communicates the intended concept + + + +- Generate images at the resolution appropriate for the user's subscription tier +- Maintain proper aspect ratios as specified or use standard ratios when not specified +- Ensure image quality is optimized for the intended use case +- Apply appropriate compression and formatting for web delivery +- Consider performance implications for different device types and connection speeds +`; + +/** + * TODO: Refactor this prompt later to be more structured and clear. + * + * At this moment, its pretty scuffed passing over whether the user is a paid or free user. + * That should be handled in the service layer, not passed to the prompt for the AI to figure + * out whether to generate a high or low resolution images. + */ +export const IMAGE_GENERATION_USER_PROMPT = ` +Please generate an image based on the following description and user requirements. + + +{description} + + + +{userTier} + -Remember to maintain clarity and conciseness while capturing all essential information.`; + +Create an image that accurately represents the provided description. If the user is a paid user, +generate a high-resolution image with enhanced quality and detail. If the user is a free user, +generate a standard resolution image. Ensure the image is appropriate and follows content guidelines. +`; diff --git a/backend/src/modules/ai/controllers/generation.controller.ts b/backend/src/modules/ai/controllers/generation.controller.ts new file mode 100644 index 0000000..f043b10 --- /dev/null +++ b/backend/src/modules/ai/controllers/generation.controller.ts @@ -0,0 +1,29 @@ +import { Request, Response, NextFunction } from 'express'; +import { ImageGenerationService } from '../services/generation.service'; +import { createSuccessResponse } from '../../../shared/utils/response'; +import { logger } from '../../../shared/utils/logger'; +import { ImageGenerationResponse } from '../types'; + +export class ImageGenerationController { + private service: ImageGenerationService; + + constructor() { + this.service = new ImageGenerationService(); + } + + generateImageSingle = async (req: Request, res: Response, next: NextFunction) => { + try { + return []; + } catch (error) { + next(error); + } + }; + + generateImageMultiple = async (req: Request, res: Response, next: NextFunction) => { + try { + return []; + } catch (error) { + next(error); + } + }; +} \ No newline at end of file diff --git a/backend/src/modules/ai/controllers/summarization.controller.ts b/backend/src/modules/ai/controllers/summarization.controller.ts index e31be90..7b1acce 100644 --- a/backend/src/modules/ai/controllers/summarization.controller.ts +++ b/backend/src/modules/ai/controllers/summarization.controller.ts @@ -13,7 +13,6 @@ export class SummarizationController { summarize = async (req: Request, res: Response, next: NextFunction) => { try { - // Validation is handled by middleware const requestData: SummarizationRequest = req.body; const result = await this.service.summarize(requestData); @@ -29,5 +28,20 @@ export class SummarizationController { } }; - // Additional endpoints can be added here + summarizeAsStream = async (req: Request, res: Response, next: NextFunction) => { + try { + const requestData: SummarizationRequest = req.body; + const stream = await this.service.summarizeStream(requestData); + + logger.info('Successfully generated summary stream', { + textLength: requestData.text.length, + processingTime: stream.metadata.processingTime, + model: stream.metadata.model, + }); + + res.json(createSuccessResponse(stream)); + } catch (error) { + next(error); + } + } } diff --git a/backend/src/modules/ai/routes/generation.routes.ts b/backend/src/modules/ai/routes/generation.routes.ts new file mode 100644 index 0000000..95881c7 --- /dev/null +++ b/backend/src/modules/ai/routes/generation.routes.ts @@ -0,0 +1,21 @@ +import { Router } from 'express'; +import { ImageGenerationController } from '../controllers/generation.controller'; +import { validateRequest } from '../../../shared/middleware/validation.middleware'; +import { LowerTierImageGenerationRequestSchema, HigherTierImageGenerationRequestSchema } from '../types'; + +const imageGenerationRouter = Router(); +const imageGenerationController = new ImageGenerationController(); + +imageGenerationRouter.post( + '/generate/single', + validateRequest(LowerTierImageGenerationRequestSchema), + imageGenerationController.generateImageSingle +); + +imageGenerationRouter.post( + '/generate/multiple', + validateRequest(HigherTierImageGenerationRequestSchema), + imageGenerationController.generateImageMultiple +); + +export default imageGenerationRouter; diff --git a/backend/src/modules/ai/routes/summarization.routes.ts b/backend/src/modules/ai/routes/summarization.routes.ts index e0319f0..c044f23 100644 --- a/backend/src/modules/ai/routes/summarization.routes.ts +++ b/backend/src/modules/ai/routes/summarization.routes.ts @@ -3,13 +3,19 @@ import { SummarizationController } from '../controllers/summarization.controller import { validateRequest } from '../../../shared/middleware/validation.middleware'; import { SummarizationRequestSchema } from '../types'; -const router = Router(); +const summarizationRouter = Router(); const summarizationController = new SummarizationController(); -router.post( - '/summarize', - validateRequest(SummarizationRequestSchema), - summarizationController.summarize +summarizationRouter.post( + '/summarize', + validateRequest(SummarizationRequestSchema), + summarizationController.summarize ); -export default router; +summarizationRouter.post( + '/summarize/stream', + validateRequest(SummarizationRequestSchema), + summarizationController.summarizeAsStream +); + +export default summarizationRouter; diff --git a/backend/src/modules/ai/services/generation.service.ts b/backend/src/modules/ai/services/generation.service.ts new file mode 100644 index 0000000..3c30747 --- /dev/null +++ b/backend/src/modules/ai/services/generation.service.ts @@ -0,0 +1,55 @@ +import { AppError } from '../../../shared/errors'; + +import { HigherTierImageGenerationRequest, LowerTierImageGenerationRequest, LowerTierImageGenerationResponse } from '../types'; +import { AIProvider, checkEnvironmentVariables } from '../utils/ai.utils'; +import { ImageGenerationStrategy } from '../strategy/generation.strategy'; + +const APP_ERROR_SOURCE = 'image.generation.service'; + +export class ImageGenerationService { + private strategy: ImageGenerationStrategy; + + constructor(strategy?: ImageGenerationStrategy) { + checkEnvironmentVariables(APP_ERROR_SOURCE, AIProvider.GEMINI); + checkEnvironmentVariables(APP_ERROR_SOURCE, AIProvider.TOGETHER); + + if (!strategy) { + throw new AppError( + 400, + 'Invalid strategy provided. Please provide a valid ImageGenerationStrategy instance.', + APP_ERROR_SOURCE + ); + } + this.strategy = strategy; + } + + getStrategy = (): ImageGenerationStrategy => { + return this.strategy; + } + + setStrategy = (strategy: ImageGenerationStrategy): boolean => { + if (!strategy) { + throw new AppError( + 400, + 'Invalid strategy provided. Please provide a valid ImageGenerationStrategy instance.', + APP_ERROR_SOURCE + ); + } + + this.strategy = strategy; + return true; + } + + async generate( + request: LowerTierImageGenerationRequest | HigherTierImageGenerationRequest + ): Promise { + if (!request) { + throw new AppError( + 400, + 'Invalid request provided. Please provide a valid image generation request.', + APP_ERROR_SOURCE + ); + } + return this.strategy.generate(request); + } +} diff --git a/backend/src/modules/ai/services/imagen/google.service.ts b/backend/src/modules/ai/services/imagen/google.service.ts new file mode 100644 index 0000000..ede7780 --- /dev/null +++ b/backend/src/modules/ai/services/imagen/google.service.ts @@ -0,0 +1,101 @@ +import { getEncoding, Tiktoken } from 'js-tiktoken'; + +import { GoogleGenAI } from '@google/genai'; +import { PromptTemplate } from '@langchain/core/prompts'; +import Together from 'together-ai'; + +import { AppError } from '../../../shared/errors'; +import { env } from '../../../shared/config/environment'; +import { logger } from '../../../shared/utils/logger'; + +import { AIGenerationModel, HigherTierImageGenerationRequest, LowerTierImageGenerationResponse } from '../types'; +import { IMAGE_GENERATION_USER_PROMPT } from '../config/prompts'; +import { IMAGE_GENERATION_MODEL_CONFIGS } from '../config/models'; +import { AIProvider, checkEnvironmentVariables } from '../utils/ai.utils'; +import { ImageGenerationStrategy } from '../strategy/generation.strategy'; + +const APP_ERROR_SOURCE = 'image.google.generation.service'; + +export class GeminiImageGenerationService implements ImageGenerationStrategy { + private readonly defaultModelType: AIGenerationModel; + private prompt: PromptTemplate; + private encoding: Tiktoken; + + constructor() { + checkEnvironmentVariables(APP_ERROR_SOURCE, AIProvider.GOOGLE); + + this.defaultModelType = AIGenerationModel.Gemini20FlashImageGenPreview; + this.prompt = PromptTemplate.fromTemplate(IMAGE_GENERATION_USER_PROMPT); + this.encoding = getEncoding('cl100k_base'); + } + + setupGoogleGenAIClient(): GoogleGenAI { + const genAI = new GoogleGenAI({ + apiKey: process.env.GEMINI_API_KEY!, + vertexai: process.env.GOOGLE_USE_VERTEX_AI === 'true' + }); + + if (!genAI) { + throw new AppError( + 500, + 'Failed to initialize Google GenAI client. Please check your environment variables.', + APP_ERROR_SOURCE + ); + } + + return genAI; + } + + async generate( + request: LowerTierImageGenerationResponse | HigherTierImageGenerationRequest + ): Promise { + const startTime = Date.now(); + const modelType = request.options?.model || this.defaultModelType; + + try { + const genAI = this.setupGoogleGenAIClient(); + const formattedPrompt = await this.prompt.format({ + text: request.text, + }); + + const result = await genAI.models.generateImages({ + model: this.defaultModelType, + prompt: formattedPrompt, + config: { + ...IMAGE_GENERATION_MODEL_CONFIGS[modelType], + } + }); + + const imageUrl = result?.generatedImages?.[0]?.image?.imageBytes; + if (!imageUrl) { + throw new AppError( + 500, + 'Image generation was unsuccessful, there was no image returned for the request.', + APP_ERROR_SOURCE + ); + } + + const endTime = Date.now(); + const tokenCount = this.encoding.encode(formattedPrompt).length; + const response: LowerTierImageGenerationResponse = {}; + + // TODO: Finish implementation for generate method in GeminiImageGenerationService + + } catch (error) { + if (error instanceof AppError) { + throw error; + } else { + logger.error( + APP_ERROR_SOURCE, + 'An unexpected error occurred during image generation.', + error + ); + throw new AppError( + 500, + 'An unexpected error occurred during image generation. Please try again later.', + APP_ERROR_SOURCE + ); + } + } + } +} diff --git a/backend/src/modules/ai/services/imagen/index.ts b/backend/src/modules/ai/services/imagen/index.ts new file mode 100644 index 0000000..8c5d553 --- /dev/null +++ b/backend/src/modules/ai/services/imagen/index.ts @@ -0,0 +1,2 @@ +export * from './google.service'; +export * from './together.service'; diff --git a/backend/src/modules/ai/services/imagen/together.service.ts b/backend/src/modules/ai/services/imagen/together.service.ts new file mode 100644 index 0000000..5af97a2 --- /dev/null +++ b/backend/src/modules/ai/services/imagen/together.service.ts @@ -0,0 +1,75 @@ +import { getEncoding, Tiktoken } from 'js-tiktoken'; + +import { GoogleGenAI } from '@google/genai'; +import { PromptTemplate } from '@langchain/core/prompts'; +import Together from 'together-ai'; + +import { AppError } from '../../../shared/errors'; +import { env } from '../../../shared/config/environment'; +import { logger } from '../../../shared/utils/logger'; + +import { AIGenerationModel, HigherTierImageGenerationRequest, LowerTierImageGenerationResponse } from '../types'; +import { IMAGE_GENERATION_USER_PROMPT } from '../config/prompts'; +import { IMAGE_GENERATION_MODEL_CONFIGS } from '../config/models'; +import { AIProvider, checkEnvironmentVariables } from '../utils/ai.utils'; +import { ImageGenerationStrategy } from '../strategy/generation.strategy'; +import { LowerTierImageGenerationRequest } from '../../types'; + +const APP_ERROR_SOURCE = 'image.together.generation.service'; + +export class TogetherImageGenerationService implements ImageGenerationStrategy { + private readonly defaultModelType: AIGenerationModel; + private prompt: PromptTemplate; + private encoding: Tiktoken; + + constructor() { + checkEnvironmentVariables(APP_ERROR_SOURCE, AIProvider.TOGETHER); + + this.defaultModelType = AIGenerationModel.TogetherVQGAN; + this.prompt = PromptTemplate.fromTemplate(IMAGE_GENERATION_USER_PROMPT); + this.encoding = getEncoding('cl100k_base'); + } + + setupTogetherAIClient(): Together { + const togetherAI = new Together({ + apiKey: env.ai.togetherKey, + baseURL: env.ai.togetherBaseUrl, + }); + + if (!togetherAI) { + throw new AppError( + 500, + 'Failed to initialize Together AI client. Please check your environment variables.', + APP_ERROR_SOURCE + ); + } + + return togetherAI; + } + + async generate( + request: LowerTierImageGenerationRequest | HigherTierImageGenerationRequest + ): Promise { + const startTime = Date.now(); + const modelType = request.options?.model || this.defaultModelType; + + try { + /** + * TODO: Implement Together AI Image Generation Logc + */ + return {}; + + } catch (error) { + logger.error(`Error generating image using model ${modelType}`, { error }); + if (error instanceof AppError) { + throw error; + } + + throw new AppError( + 500, + 'Failed to generate image. Please try again later.', + APP_ERROR_SOURCE + ); + } + } +} diff --git a/backend/src/modules/ai/services/index.ts b/backend/src/modules/ai/services/index.ts new file mode 100644 index 0000000..6403e49 --- /dev/null +++ b/backend/src/modules/ai/services/index.ts @@ -0,0 +1,2 @@ +export * from './summarization.service'; +export * from './generation.service'; diff --git a/backend/src/modules/ai/services/summarization.service.ts b/backend/src/modules/ai/services/summarization.service.ts index 50a4a89..ad4ec32 100644 --- a/backend/src/modules/ai/services/summarization.service.ts +++ b/backend/src/modules/ai/services/summarization.service.ts @@ -1,71 +1,194 @@ -import {GenerativeModel, GoogleGenerativeAI} from "@google/generative-ai"; +import { getEncoding, Tiktoken } from 'js-tiktoken'; + +import { GoogleGenAI, Modality } from '@google/genai'; import { PromptTemplate } from '@langchain/core/prompts'; -import { AIModel, SummarizationRequest, SummarizationResponse } from '../types'; -import { SUMMARY_SYSTEM_PROMPT, SUMMARY_USER_PROMPT } from '../config/prompts'; -import { MODEL_CONFIGS } from '../config/models'; + import { AppError } from '../../../shared/errors'; import { logger } from '../../../shared/utils/logger'; -import { env } from '../../../shared/config/environment'; -import { getEncoding } from 'js-tiktoken'; +import { SUMMARY_USER_PROMPT } from '../config/prompts'; +import { SUMMARIZATION_MODEL_CONFIGS } from '../config/models'; +import { AIModel, SummarizationRequest, SummarizationResponse, SummarizationStreamResponse } from '../types'; +import { checkEnvironmentVariables } from '../utils/ai.utils'; + +const APP_ERROR_SOURCE = 'summarization.service'; export class SummarizationService { private readonly defaultModelType: AIModel; private prompt: PromptTemplate; + private encoding: Tiktoken; constructor() { - // Validate Gemini API key - if (!env.ai.geminiKey) { - throw new AppError(500, 'Gemini API key not found in environment variables. Please set GEMINI_API_KEY.', 'summarization.service'); - } + checkEnvironmentVariables(APP_ERROR_SOURCE); this.defaultModelType = AIModel.Gemini20Flash; this.prompt = PromptTemplate.fromTemplate(SUMMARY_USER_PROMPT); + this.encoding = getEncoding('cl100k_base'); + } + + setupGenAIClient(): GoogleGenAI { + const genAI = new GoogleGenAI({ + apiKey: process.env.GEMINI_API_KEY!, + vertexai: process.env.GOOGLE_USE_VERTEX_AI === 'true' + }); + + if (!genAI) { + throw new AppError( + 500, + 'Failed to initialize Google GenAI client. Please check your environment variables.', + APP_ERROR_SOURCE + ); + } + + return genAI; + } + + calculateTokenUsage(formattedPrompt: string, summary: string): number { + const promptTokens = this.encoding.encode(formattedPrompt).length; + const summaryTokens = this.encoding.encode(summary).length; + const tokenCount = promptTokens + summaryTokens; + + return tokenCount; } async summarize(request: SummarizationRequest): Promise { const startTime = Date.now(); const modelType = request.options?.model || this.defaultModelType; - const genAI = new GoogleGenerativeAI(env.ai.geminiKey!); - const model = genAI.getGenerativeModel({ - model: modelType, - systemInstruction: SUMMARY_SYSTEM_PROMPT, - generationConfig: MODEL_CONFIGS[modelType], - }); - try { - // Format the prompt with system and user messages combined + const genAI = this.setupGenAIClient(); const formattedPrompt = await this.prompt.format({ text: request.text, }); - // Call the model - const result = await model.generateContent(formattedPrompt); - const summary = result.response.text(); + /** + * NOTE: + * Generate content using genAI (from above) with proper + * configurations (GenerateContentParameters). To view the + * full parameters, I referred to the source code + * (ctrl-left click on generateContent method). + * + * GenerateContentParameters: { + * model: string; + * contents: ContentListUnion; + * config?: GenerateContentConfig; + * } + **/ + const result = await genAI.models.generateContent({ + model: this.defaultModelType, + contents: formattedPrompt, + config: { + ...SUMMARIZATION_MODEL_CONFIGS[modelType], + } + }); + + const summary = result.text; + + if (!summary) { + throw new AppError( + 500, + 'Failed to generate summary, the model returned an empty response which was unexpected.', + APP_ERROR_SOURCE + ); + } const endTime = Date.now(); const processingTime = endTime - startTime; - // Calculate token usage - const encoding = getEncoding('cl100k_base'); - const promptTokens = encoding.encode(formattedPrompt).length; - const summaryTokens = encoding.encode(summary).length; - const tokenCount = promptTokens + summaryTokens; + // NOTE: Calculate the token usage for the summary response + const tokenCount = this.calculateTokenUsage(formattedPrompt, summary); return { summary, + metadata: { + model: modelType, + processingTime: processingTime, + tokenCount: tokenCount + } + }; + } catch (error) { + logger.error('Error occurred in summarization service', { error }); + if (error instanceof AppError) { + throw error; + } + + throw new AppError( + 500, + 'An error occurred while generating a summary, please try again.', + APP_ERROR_SOURCE + ); + } + } + + async summarizeStream(request: SummarizationRequest): Promise { + const startTime = Date.now(); + const modelType = request.options?.model || this.defaultModelType; + + try { + const genAI = this.setupGenAIClient(); + const formattedPrompt = await this.prompt.format({ + text: request.text, + }); + + // NOTE: Call the model to generate the content stream + const result = await genAI.models.generateContentStream({ + model: modelType, + contents: formattedPrompt, + config: { + ...SUMMARIZATION_MODEL_CONFIGS[modelType], + responseModalities: [Modality.TEXT] + } + }); + + if (!result) { + throw new AppError( + 500, + 'Failed to generate summary stream, the model returned an empty response which was unexpected.', + APP_ERROR_SOURCE + ); + } + + const stream = []; + for await (const chunk of result) { + const summaryChunk = chunk.text; + + if (!summaryChunk) { + throw new AppError( + 500, + 'Failed to generate summary stream, the model returned an empty response which was unexpected.', + APP_ERROR_SOURCE + ); + } + + logger.debug('Received chunk from summarization stream', { summaryChunk }); + stream.push(summaryChunk); + } + + const endTime = Date.now(); + const processingTime = endTime - startTime; + + // NOTE: Calculate the token usage for the summary response + const tokenCount = this.calculateTokenUsage(formattedPrompt, stream.join('')); + + return { + stream, metadata: { model: modelType, processingTime, tokenCount } }; + } catch (error) { - logger.error('Error in summarization service', { error }); + logger.error('Error occurred in summarization service', { error }); if (error instanceof AppError) { throw error; } - throw new AppError(500, 'Failed to generate summary', 'summarization.service'); + + throw new AppError( + 500, + 'An error occurred while generating a summary, please try again.', + APP_ERROR_SOURCE + ); } } } \ No newline at end of file diff --git a/backend/src/modules/ai/strategy/generation.strategy.ts b/backend/src/modules/ai/strategy/generation.strategy.ts new file mode 100644 index 0000000..780938a --- /dev/null +++ b/backend/src/modules/ai/strategy/generation.strategy.ts @@ -0,0 +1,13 @@ +import { + HigherTierImageGenerationRequest, + HigherTierImageGenerationResponse, + LowerTierImageGenerationRequest, + LowerTierImageGenerationResponse +} from "../types"; + + +export interface ImageGenerationStrategy { + generate( + request: LowerTierImageGenerationRequest | HigherTierImageGenerationRequest + ): Promise; +} diff --git a/backend/src/modules/ai/types/generation.types.ts b/backend/src/modules/ai/types/generation.types.ts new file mode 100644 index 0000000..c18c78c --- /dev/null +++ b/backend/src/modules/ai/types/generation.types.ts @@ -0,0 +1,77 @@ +import { z } from 'zod'; +import { AIGenerationModel, } from './models.types'; + +// Define the schema for the generation request body +export const LowerTierImageGenerationRequestSchema = z.object({ + body: z.object({ + prompt: z.string().min(1, 'Prompt must not be empty.').max(1000, 'Provided prompt is too long.'), + options: z.object({ + model: z.nativeEnum(AIGenerationModel).optional(), + width: z.number().positive().optional(), + height: z.number().positive().optional(), + steps: z.number().int().positive().optional(), + disable_safety_checker: z.boolean().optional(), + }).optional(), + }), + query: z.object({}).optional(), + params: z.object({}).optional(), +}); + +export const HigherTierImageGenerationRequestSchema = z.object({ + body: z.object({ + prompt: z.string().min(1, 'Prompt must not be empty.').max(10000, 'Provided prompt is too long.'), + options: z.object({ + model: z.nativeEnum(AIGenerationModel).optional(), + width: z.number().positive().optional(), + height: z.number().positive().optional(), + steps: z.number().int().positive().optional(), + disable_safety_checker: z.boolean().optional(), + }).optional(), + }), + query: z.object({}).optional(), + params: z.object({}).optional(), +}); + +// NOTE: Lower tier image generation using Imagen 3/Gemini 2.0 Flash (Image Generation) models +// TODO: Implement interface LowerTierImageGenerationRequest +export interface LowerTierImageGenerationRequest { + prompt: string; +} + +// TODO: Implement interface LowerTierImageGenerationResponse +export interface LowerTierImageGenerationResponse { + prompt: string; +} + +// NOTE: Higher tier image generation using FLUX.1-dev/FLUX.1-schnell-free models +// TODO: Refactor implementation HigherTierImageGenerationRequest +export interface HigherTierImageGenerationRequest { + prompt: string; + options?: { + model?: AIGenerationModel; + width?: number; + height?: number; + steps?: number; + disable_safety_checker?: boolean; + }; +} + +// TODO: Refactor implementation HigherTierImageGenerationResponse +export interface HigherTierImageGenerationResponse { + imageUrl: string; + metadata: { + model: string; + processingTime: number; + tokenCount: number; + }; +} + +export interface ImageGenerationError { + code: string; + message: string; + details?: unknown; +} + +/** + * export interface ExpensivelyHighForAbsolutelyNoReasonOtherThanToShowOffWeAreAbleToMakeHighQualityImagesFromTheirAPIImageGenerationRequest {} + */ diff --git a/backend/src/modules/ai/types/index.ts b/backend/src/modules/ai/types/index.ts index ac5a521..c8f3fc9 100644 --- a/backend/src/modules/ai/types/index.ts +++ b/backend/src/modules/ai/types/index.ts @@ -1,2 +1,3 @@ export * from './summarization.types'; export * from './models.types'; +export * from './generation.types'; diff --git a/backend/src/modules/ai/types/models.types.ts b/backend/src/modules/ai/types/models.types.ts index fa25be8..e37be4f 100644 --- a/backend/src/modules/ai/types/models.types.ts +++ b/backend/src/modules/ai/types/models.types.ts @@ -1,12 +1,59 @@ +import { HarmBlockMethod, HarmBlockThreshold, HarmCategory } from "@google/genai"; + export enum AIModel { Gemini20Flash = 'gemini-2.0-flash', Gemini15Flash = 'gemini-1.5-flash', + Gemini15FlashLite = 'gemini-1.5-flash-lite', +} + +export enum AIGenerationModel { + Gemini20FlashImageGenPreview = 'gemini-2.0-flash-preview-image-generation', + TogetherFlux1Dev = 'black-forest-labs/FLUX.1-dev', + TogetherFlux1Schnell = 'black-forest-labs/FLUX.1-schnell', + TogetherFlux1SchnellFree = 'black-forest-labs/FLUX.1-schnell-Free', +} + +export interface SafetySetting { + method?: HarmBlockMethod; + category: HarmCategory; + threshold: HarmBlockThreshold; +} + +export interface ThinkingConfig { + includeThoughts?: boolean; + thinkingBudget?: number; +} + +export interface HttpOptions { + baseUrl?: string; + apiVersion?: string; + headers?: Record; + timeout?: number; } export interface ModelConfig { - temperature: number; - maxOutputTokens: number; - topP: number; - frequencyPenalty: number; - presencePenalty: number; -} \ No newline at end of file + safetySettings?: SafetySetting[]; + systemInstruction?: string; + temperature?: number; + topP?: number; + topK?: number; + maxOutputTokens?: number; + stopSequences?: string[]; + thinkingConfig?: ThinkingConfig; + httpOptions?: HttpOptions; +} + +export interface ImageLora { + path?: string; + scale: number; +} + +export interface GenerationModelConfig { + steps?: number; + width?: number; + height?: number; + n?: number; + responseFormat?: string; + image_loras?: ImageLora[]; + disableSafetyChecker?: boolean; +} diff --git a/backend/src/modules/ai/types/summarization.types.ts b/backend/src/modules/ai/types/summarization.types.ts index d60256d..2447346 100644 --- a/backend/src/modules/ai/types/summarization.types.ts +++ b/backend/src/modules/ai/types/summarization.types.ts @@ -1,10 +1,10 @@ import { z } from 'zod'; import { AIModel } from './models.types'; -// Define the schema for the request body +// Define the schema for the summarization request body export const SummarizationRequestSchema = z.object({ body: z.object({ - text: z.string().min(1, 'Text cannot be empty').max(10000, 'Text is too long'), + text: z.string().min(1, 'Text must not be empty.').max(10000, 'Provided text is too long.'), options: z.object({ maxLength: z.number().positive().optional(), format: z.enum(['concise', 'detailed']).optional(), @@ -33,6 +33,25 @@ export interface SummarizationResponse { }; } +export interface SummarizationStreamResponse { + stream: string[]; + metadata: { + model: string; + processingTime: number; + tokenCount: number; + } +} + +export interface SummarizationStreamChunk { + summary: string; + isComplete?: boolean; + metadata?: { + model: string; + processingTime: number; + tokenCount: number; + }; +} + export interface SummarizationError { code: string; message: string; diff --git a/backend/src/modules/ai/utils/ai.utils.ts b/backend/src/modules/ai/utils/ai.utils.ts new file mode 100644 index 0000000..0bff2fe --- /dev/null +++ b/backend/src/modules/ai/utils/ai.utils.ts @@ -0,0 +1,82 @@ +import { env } from '../../../shared/config/environment'; +import { AppError } from '../../../shared/errors'; + +export enum AIProvider { + GEMINI = 'gemini', + TOGETHER = 'together' +} + +export function checkGeminiEnvironmentVariables(source: string): void { + if (!env.ai.geminiKey) { + throw new AppError( + 500, + 'Gemini API key not found in environment variables, to prevent this please set GEMINI_API_KEY.', + source + ); + } + + if (!env.ai.googleUseVertexAI) { + throw new AppError( + 500, + 'Google Use Vertex AI not found in environment variables, to prevent this please set GOOGLE_USE_VERTEX_AI.', + source + ); + } + + /** + * NOTE: Uncomment these if we want to use Google Project ID and Location for SummarizationService | ImageGenerationService. + * + * if (!env.ai.googleProjectId) { + * throw new AppError( + * 500, + * 'Google Project ID not found in environment variables, to prevent this please set GOOGLE_PROJECT_ID.', + * source + * ); + * } + * + * if (!env.ai.googleLocation) { + * throw new AppError( + * 500, + * 'Google Location not found in environment variables, to prevent this please set GOOGLE_LOCATION.', + * source + * ); + * } + **/ +} + +export function checkTogetherEnvironmentVariables(source: string): void { + if (!env.ai.togetherKey) { + throw new AppError( + 500, + 'Together AI API key not found in environment variables, to prevent this please set TOGETHER_API_KEY.', + source + ); + } + + if (!env.ai.togetherBaseUrl) { + throw new AppError( + 500, + 'Together AI Base URL not found in environment variables, to prevent this please set TOGETHER_BASE_URL.', + source + ); + } +} + +export function checkEnvironmentVariables(source: string, provider: AIProvider = AIProvider.GEMINI): void { + switch (provider) { + case AIProvider.GEMINI: + checkGeminiEnvironmentVariables(source); + break; + + case AIProvider.TOGETHER: + checkTogetherEnvironmentVariables(source); + break; + + default: + throw new AppError( + 500, + `Unknown AI provider: ${provider}. Please check your environment variables.`, + source + ); + } +} diff --git a/backend/src/shared/config/environment.ts b/backend/src/shared/config/environment.ts index 6935871..dbfc0f3 100644 --- a/backend/src/shared/config/environment.ts +++ b/backend/src/shared/config/environment.ts @@ -2,6 +2,7 @@ import dotenv from 'dotenv'; import path from 'path'; import fs from 'fs'; import { StringValue } from 'ms'; +import { logger } from '../utils/logger'; // Possible locations for .env file @@ -13,10 +14,10 @@ const possibleEnvPaths = [ // Find the first existing .env file const envPath = possibleEnvPaths.find(fs.existsSync); -console.log('Loading environment from:', envPath); +logger.info(`Using environment file (if found): ${envPath || 'none'}`); if (!envPath) { - throw new Error('Failed to find .env file in expected locations.'); + throw new Error('Failed to find .env file in expected location(s).'); } const result = dotenv.config({ path: envPath }); @@ -42,9 +43,12 @@ export const env = { }, ai: { geminiKey: process.env.GEMINI_API_KEY, + googleUseVertexAI: process.env.GOOGLE_USE_VERTEX_AI === 'true', langchainKey: process.env.LANGCHAIN_API_KEY, langchainTracing: process.env.LANGCHAIN_TRACING_V3, openrouterKey: process.env.OPENROUTER_API_KEY, + togetherKey: process.env.TOGETHER_API_KEY, + togetherBaseUrl: process.env.TOGETHER_BASE_URL }, firebase: { apiKey: process.env.FIREBASE_API_KEY,