diff --git a/.gitignore b/.gitignore index e69de29..2eea525 100644 --- a/.gitignore +++ b/.gitignore @@ -0,0 +1 @@ +.env \ No newline at end of file diff --git a/server/clustering/cluster_scoring.py b/server/clustering/cluster_scoring.py index b5ec4be..9dd9f5d 100644 --- a/server/clustering/cluster_scoring.py +++ b/server/clustering/cluster_scoring.py @@ -1,26 +1,87 @@ from typing import Dict, List def compute_entity_similarity(a: Dict, b: Dict) -> float: + """ + Compute entity similarity between two articles with primary/secondary importance weighting. + + Args: + a: Entity dict with primary_subject, secondary_subject, primary_orgs, secondary_orgs, primary_event, secondary_event + b: Entity dict with same structure + + Returns: + Similarity score (0.0 to 1.0+) + """ score = 0.0 - if a["subject"] and a["subject"] == b["subject"]: + # Primary subject match (highest weight) + if a.get("primary_subject") and a["primary_subject"] == b.get("primary_subject"): score += 1.0 + # Secondary subject match with lower weight + elif a.get("secondary_subject") and a["secondary_subject"] == b.get("secondary_subject"): + score += 0.3 + # Cross-match (primary vs secondary) + elif (a.get("primary_subject") and a["primary_subject"] == b.get("secondary_subject")) or \ + (a.get("secondary_subject") and a["secondary_subject"] == b.get("primary_subject")): + score += 0.2 - if a["event"] and a["event"] == b["event"]: + # Primary event match (high weight) + if a.get("primary_event") and a["primary_event"] == b.get("primary_event"): score += 0.5 + # Secondary event match (lower weight) + elif a.get("secondary_event") and a["secondary_event"] == b.get("secondary_event"): + score += 0.2 + # Cross-match + elif (a.get("primary_event") and a["primary_event"] == b.get("secondary_event")) or \ + (a.get("secondary_event") and a["secondary_event"] == b.get("primary_event")): + score += 0.15 - orgs_a = set(a.get("orgs", [])) - orgs_b = set(b.get("orgs", [])) + # Organization matching with primary/secondary distinction + primary_orgs_a = set(a.get("primary_orgs", [])) + primary_orgs_b = set(b.get("primary_orgs", [])) + secondary_orgs_a = set(a.get("secondary_orgs", [])) + secondary_orgs_b = set(b.get("secondary_orgs", [])) - if orgs_a and orgs_b: - score += 0.2 * len(orgs_a & orgs_b) + # Primary org matches (higher weight) + if primary_orgs_a and primary_orgs_b: + score += 0.3 * len(primary_orgs_a & primary_orgs_b) + + # Secondary org matches (lower weight) + if secondary_orgs_a and secondary_orgs_b: + score += 0.1 * len(secondary_orgs_a & secondary_orgs_b) + + # Cross-org matches (primary <-> secondary) + if primary_orgs_a and secondary_orgs_b: + score += 0.1 * len(primary_orgs_a & secondary_orgs_b) + if secondary_orgs_a and primary_orgs_b: + score += 0.1 * len(secondary_orgs_a & primary_orgs_b) return score def compute_final_score( semantic_score: float, entity_score: float, - w_sem: float = 0.6, + cross_score: float = 0.5, + w_sem: float = 0.3, w_ent: float = 0.4, + w_cross: float = 0.3, ) -> float: - return w_sem * semantic_score + w_ent * entity_score + """ + Compute final clustering score combining multiple signals. + + Args: + semantic_score: Embedding-based similarity (0.0-1.0) + entity_score: Entity matching score (0.0-1.0+) + cross_score: Cross-encoder score (0.0-1.0) + w_sem: Weight for semantic similarity + w_ent: Weight for entity similarity + w_cross: Weight for cross-encoder score + + Returns: + Final combined score + """ + # Normalize entity score to [0, 1] range + normalized_entity = min(entity_score / 2.0, 1.0) + + return (w_sem * semantic_score + + w_ent * normalized_entity + + w_cross * cross_score) diff --git a/server/cross_encoder.py b/server/cross_encoder.py new file mode 100644 index 0000000..16a48f2 --- /dev/null +++ b/server/cross_encoder.py @@ -0,0 +1,124 @@ +import logging +from typing import Dict, List, Any, Tuple +from sentence_transformers import CrossEncoder +import numpy as np + +logger = logging.getLogger(__name__) + + +class CrossEncoderManager: + """Manages cross-encoder for computing semantic relevance scores between articles.""" + + def __init__(self, model_name: str = "cross-encoder/ms-marco-MiniLM-L-6-v2"): + """ + Initialize the cross encoder. + + Args: + model_name: HuggingFace model identifier for cross-encoder + Default: ms-marco-MiniLM-L-6-v2 (efficient and accurate for relevance) + """ + self.model_name = model_name + try: + self.model = CrossEncoder(model_name) + logger.info(f"Cross-encoder loaded: {model_name}") + except Exception as e: + logger.error(f"Failed to load cross-encoder: {e}") + self.model = None + + def compute_relevance_score( + self, + query_article: Dict[str, Any], + candidate_article: Dict[str, Any] + ) -> float: + """ + Compute semantic relevance score between two articles. + + Args: + query_article: Source article dict with title, description, full_content + candidate_article: Target article dict for comparison + + Returns: + Relevance score between 0 and 1 + """ + if self.model is None: + logger.warning("Cross-encoder model not loaded, returning 0.5") + return 0.5 + + try: + query_text = self._build_article_text(query_article) + candidate_text = self._build_article_text(candidate_article) + scores = self.model.predict([ + [query_text, candidate_text] + ]) + relevance_score = self._sigmoid(scores[0]) + + return float(relevance_score) + + except Exception as e: + logger.error(f"Error computing relevance score: {e}") + return 0.5 + + def compute_batch_relevance_scores( + self, + query_article: Dict[str, Any], + candidate_articles: List[Dict[str, Any]] + ) -> List[float]: + """ + Compute relevance scores between one query article and multiple candidates. + + Args: + query_article: Source article + candidate_articles: List of candidate articles + + Returns: + List of relevance scores + """ + if self.model is None or not candidate_articles: + return [0.5] * len(candidate_articles) + + try: + query_text = self._build_article_text(query_article) + + pairs = [ + [query_text, self._build_article_text(candidate)] + for candidate in candidate_articles + ] + scores = self.model.predict(pairs) + normalized_scores = [float(self._sigmoid(score)) for score in scores] + + return normalized_scores + + except Exception as e: + logger.error(f"Error computing batch relevance scores: {e}") + return [0.5] * len(candidate_articles) + + def _build_article_text(self, article: Dict[str, Any]) -> str: + """ + Build a text representation of an article for cross-encoder. + + Args: + article: Article dictionary + + Returns: + Combined text of title and description + """ + title = article.get("title", "").strip() + description = article.get("description", "").strip() + + if title and description: + return f"{title} {description}" + elif title: + return title + elif description: + return description + else: + return "" + + @staticmethod + def _sigmoid(x: float) -> float: + """Apply sigmoid function to normalize cross-encoder output.""" + import math + try: + return 1.0 / (1.0 + math.exp(-x)) + except OverflowError: + return 0.0 if x < 0 else 1.0 diff --git a/server/database.py b/server/database.py index bf8ca42..d95f7ea 100644 --- a/server/database.py +++ b/server/database.py @@ -18,12 +18,14 @@ SEMANTIC_SIMILARITY_THRESHOLD, FINAL_SCORE_THRESHOLD, ) +from cross_encoder import CrossEncoderManager class DatabaseManager: def __init__(self, db_url: str, embedding_dimension: int = 1536): self.db_url = db_url self.embedding_dimension = embedding_dimension + self.cross_encoder = CrossEncoderManager() self.setup_database() @contextmanager @@ -60,9 +62,12 @@ def setup_database(self): item_type TEXT, created_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP, updated_at TIMESTAMPTZ DEFAULT CURRENT_TIMESTAMP, - subject TEXT, - organization_list JSONB, - event_type TEXT, + primary_subject TEXT, + secondary_subject TEXT, + primary_organizations JSONB, + secondary_organizations JSONB, + primary_event_type TEXT, + secondary_event_type TEXT, cluster_id INTEGER ) """ @@ -85,84 +90,143 @@ def setup_database(self): def assign_cluster_with_similarity( self, article_id: str, - subject: str, - orgs: List[str], - event: str, + primary_subject: str, + secondary_subject: str, + primary_orgs: List[str], + secondary_orgs: List[str], + primary_event: str, + secondary_event: str, + article_data: Optional[Dict[str, Any]] = None, ): + """ + Assign article to a cluster using semantic similarity (cross-encoder) and entity matching. + + Args: + article_id: The article ID + primary_subject: Primary subject extracted by LLM + secondary_subject: Secondary subject extracted by LLM + primary_orgs: Primary organizations from LLM + secondary_orgs: Secondary organizations from LLM + primary_event: Primary event type from LLM + secondary_event: Secondary event type from LLM + article_data: Full article data for cross-encoder (title, description, etc.) + """ with self.get_connection() as conn: cur = conn.cursor() - cur.execute( - """ - SELECT embedding FROM embeddings WHERE article_id = %s - """, - (article_id,) - ) - row = cur.fetchone() - if not row: - return - - article_embedding = row["embedding"] + # Get article data if not provided + if article_data is None: + cur.execute( + """ + SELECT id, title, description, full_content + FROM articles WHERE id = %s + """, + (article_id,) + ) + article_data = cur.fetchone() + if not article_data: + return + # Fetch candidate articles from clusters cur.execute( """ - SELECT a.cluster_id, a.subject, a.organization_list, a.event_type, - 1 - (e.embedding <=> %s) AS semantic_similarity + SELECT a.id, a.cluster_id, a.title, a.description, + a.primary_subject, a.secondary_subject, + a.primary_event_type, a.secondary_event_type, + a.primary_organizations, a.secondary_organizations, + 1 - (e.embedding <=> + (SELECT embedding FROM embeddings WHERE article_id = %s) + ) AS embedding_similarity FROM articles a JOIN embeddings e ON a.id = e.article_id WHERE a.cluster_id IS NOT NULL - AND (a.subject = %s OR a.event_type = %s) + AND a.id != %s + ORDER BY embedding_similarity DESC + LIMIT 20 """, - (article_embedding, subject, event), + (article_id, article_id), ) - best_cluster_id = None - best_score = 0.0 - - for row in cur.fetchall(): - semantic_score = row["semantic_similarity"] - - if semantic_score < SEMANTIC_SIMILARITY_THRESHOLD: - continue - - entity_score = compute_entity_similarity( - { - "subject": subject, - "orgs": orgs, - "event": event, - }, - { - "subject": row["subject"], - "orgs": row["organization_list"] or [], - "event": row["event_type"], - }, - ) - - final_score = compute_final_score(semantic_score, entity_score) - - if final_score > best_score: - best_score = final_score - best_cluster_id = row["cluster_id"] - - if best_cluster_id is None or best_score < FINAL_SCORE_THRESHOLD: + candidate_rows = cur.fetchall() + if not candidate_rows: + # No clusters exist yet, create new cluster cur.execute("SELECT COALESCE(MAX(cluster_id), 0) + 1 AS next_id FROM articles") best_cluster_id = cur.fetchone()["next_id"] + else: + best_cluster_id = None + best_score = 0.0 + + # Compute cross-encoder scores for all candidates + cross_encoder_scores = self.cross_encoder.compute_batch_relevance_scores( + article_data, + candidate_rows + ) + for row, cross_score in zip(candidate_rows, cross_encoder_scores): + # Skip if embedding similarity is too low + embedding_sim = row["embedding_similarity"] + if embedding_sim < SEMANTIC_SIMILARITY_THRESHOLD: + continue + + # Compute entity similarity with priority to primary entities + entity_score = compute_entity_similarity( + { + "primary_subject": primary_subject, + "secondary_subject": secondary_subject, + "primary_orgs": primary_orgs, + "secondary_orgs": secondary_orgs, + "primary_event": primary_event, + "secondary_event": secondary_event, + }, + { + "primary_subject": row["primary_subject"], + "secondary_subject": row["secondary_subject"], + "primary_orgs": row["primary_organizations"] or [], + "secondary_orgs": row["secondary_organizations"] or [], + "primary_event": row["primary_event_type"], + "secondary_event": row["secondary_event_type"], + }, + ) + + # Combine scores: embedding (0.3), cross-encoder (0.3), entity (0.4) + final_score = ( + 0.3 * embedding_sim + + 0.3 * cross_score + + 0.4 * entity_score + ) + + if final_score > best_score: + best_score = final_score + best_cluster_id = row["cluster_id"] + + # Check if score meets threshold + if best_score < FINAL_SCORE_THRESHOLD: + cur.execute("SELECT COALESCE(MAX(cluster_id), 0) + 1 AS next_id FROM articles") + best_cluster_id = cur.fetchone()["next_id"] + + # Update article with cluster assignment and entities cur.execute( """ UPDATE articles - SET subject = %s, - organization_list = %s::jsonb, - event_type = %s, + SET primary_subject = %s, + secondary_subject = %s, + primary_organizations = %s::jsonb, + secondary_organizations = %s::jsonb, + primary_event_type = %s, + secondary_event_type = %s, cluster_id = %s, updated_at = CURRENT_TIMESTAMP WHERE id = %s """, ( - subject, - json.dumps(orgs), - event, + primary_subject, + secondary_subject, + json.dumps(primary_orgs), + json.dumps(secondary_orgs), + primary_event, + secondary_event, best_cluster_id, article_id, ), ) + diff --git a/server/entity_llm_processor.py b/server/entity_llm_processor.py index fd5f777..b8dad8c 100644 --- a/server/entity_llm_processor.py +++ b/server/entity_llm_processor.py @@ -12,8 +12,27 @@ def __init__(self, model: str = "gpt-4o-mini"): self.system_prompt = ( "You are an expert technical analysis system. Your task is to extract " "key entities from a given technical article text and return them in " - "JSON format. Focus on high-level subjects, involved organizations, " - "and the type of event the article describes." + "JSON format.\n\n" + "CRITICAL: Distinguish between PRIMARY and SECONDARY sources:\n" + "- PRIMARY: Information that originates DIRECTLY from the source (e.g., official announcement, " + "company blog, official paper, GitHub repo by the organization itself)\n" + "- SECONDARY: Information that is REPORTED BY A THIRD PARTY (e.g., news article, analysis, " + "commentary about someone else's work)\n\n" + "Extract and categorize entities as follows:\n" + "1. For SUBJECTS/TOPICS:\n" + " - primary_subject: The main subject directly communicated by the source organization\n" + " - secondary_subject: Subjects mentioned but not the primary focus of communication\n\n" + "2. For ORGANIZATIONS:\n" + " - primary_organizations: Organizations that are the SOURCE/AUTHOR of the information\n" + " - secondary_organizations: Organizations mentioned/discussed but not the source\n\n" + "3. For EVENT TYPES:\n" + " - primary_event_type: The main event being announced/reported by the source\n" + " - secondary_event_type: Related events or context mentioned secondarily\n\n" + "EXAMPLES:\n" + "- If article is 'Google announces new Gemini model': primary_org=['Google']\n" + "- If article is 'Le Monde reports on Google\\'s new model': primary_org=['Le Monde'] as reporter, " + "secondary_org=['Google'] as subject\n\n" + "Return results in JSON format with clear separation between primary and secondary entities." ) def process(self, article: Dict, db_manager: Any) -> bool: @@ -36,11 +55,25 @@ def process(self, article: Dict, db_manager: Any) -> bool: data = json.loads(response.choices[0].message.content) + # Extract primary and secondary entities + primary_subject = data.get("primary_subject", "") + secondary_subject = data.get("secondary_subject", "") + + primary_orgs = data.get("primary_organizations", []) + secondary_orgs = data.get("secondary_organizations", []) + + primary_event = data.get("primary_event_type", "") + secondary_event = data.get("secondary_event_type", "") + db_manager.assign_cluster_with_similarity( article_id=article_id, - subject=data.get("subject", ""), - orgs=data.get("organization_list", []), - event=data.get("event_type", "") + primary_subject=primary_subject, + secondary_subject=secondary_subject, + primary_orgs=primary_orgs, + secondary_orgs=secondary_orgs, + primary_event=primary_event, + secondary_event=secondary_event, + article_data=article # Pass full article for cross-encoder ) return True diff --git a/server/examples_new_features.py b/server/examples_new_features.py new file mode 100644 index 0000000..c55e1a4 --- /dev/null +++ b/server/examples_new_features.py @@ -0,0 +1,238 @@ +""" +Examples demonstrating the new primary/secondary entity extraction and cross-encoder usage. +""" + +import os +import json +from typing import Dict, List + +from cross_encoder import CrossEncoderManager +from entity_llm_processor import EntityLLMProcessor +from database import DatabaseManager + + +def example_cross_encoder_relevance(): + """ + Example: Using Cross Encoder to compute semantic relevance between articles. + + The cross encoder is more efficient than computing embeddings for many pairs + and provides direct relevance scores optimized for ranking/clustering tasks. + """ + print("\n" + "="*60) + print("EXAMPLE: Cross-Encoder Relevance Scoring") + print("="*60) + + # Initialize cross encoder + ce_manager = CrossEncoderManager() + + # Sample articles + article1 = { + "id": "arxiv_001", + "title": "Efficient Transformer Architectures with Flash Attention", + "description": "New optimization techniques for transformer models", + "full_content": "This paper presents Flash Attention, an I/O-aware attention mechanism..." + } + + article2 = { + "id": "github_001", + "title": "OpenAI Releases GPT-4 Turbo Model", + "description": "Announcement of new capabilities in GPT-4 Turbo", + "full_content": "OpenAI has released GPT-4 Turbo with improved performance..." + } + + article3 = { + "id": "arxiv_002", + "title": "Attention Mechanisms and Transformer Optimization", + "description": "Survey of efficiency improvements in attention", + "full_content": "This comprehensive survey covers attention optimization techniques..." + } + + # Compute relevance scores + print("\nComputing relevance scores:") + print(f"\n1. Article1 vs Article2 (different topics):") + score_1_2 = ce_manager.compute_relevance_score(article1, article2) + print(f" Relevance: {score_1_2:.3f}") + + print(f"\n2. Article1 vs Article3 (similar topics - attention/transformers):") + score_1_3 = ce_manager.compute_relevance_score(article1, article3) + print(f" Relevance: {score_1_3:.3f}") + + print("\nāœ“ Article1 and Article3 should have higher relevance (same topic)") + print(f" Difference: {score_1_3 - score_1_2:.3f}") + + +def example_primary_secondary_extraction(): + """ + Example: Using enhanced LLM processor with primary/secondary entity extraction. + + The new system distinguishes between: + - PRIMARY entities: Central focus of the article + - SECONDARY entities: Supporting or contextual information + + This allows more nuanced clustering and relevance matching. + """ + print("\n" + "="*60) + print("EXAMPLE: Primary/Secondary Entity Extraction") + print("="*60) + + processor = EntityLLMProcessor(model="gpt-4o-mini") + + # Example article + article = { + "id": "arxiv_123", + "title": "OpenAI Releases GPT-4 with Multimodal Capabilities", + "description": "OpenAI announces GPT-4, a new multimodal AI model with vision understanding", + "full_content": ( + "OpenAI has released GPT-4, their most advanced model to date. " + "GPT-4 can process both text and images, setting a new standard for AI capabilities. " + "The model was trained using RLHF with input from domain experts. " + "Anthropic's Claude model also supports multimodal inputs, showing industry convergence." + ) + } + + print(f"\nProcessing article: {article['title']}") + print("\nExpected extraction:") + print(" PRIMARY subject: 'Large Language Models' or 'GPT-4'") + print(" SECONDARY subject: 'Multimodal AI' or 'Vision Understanding'") + print(" PRIMARY organizations: ['OpenAI']") + print(" SECONDARY organizations: ['Anthropic']") + print(" PRIMARY event: 'Model Release'") + print(" SECONDARY event: 'Industry Convergence'") + + print("\nNote: In production, this would be called on actual article processing") + print(" and would integrate with the database clustering system.") + + +def example_clustering_with_primary_secondary(): + """ + Example: How primary/secondary entities improve clustering. + + The clustering algorithm now considers: + 1. Embedding similarity (from vector database) - 30% weight + 2. Cross-encoder relevance score - 30% weight + 3. Entity matching with primary/secondary distinction - 40% weight + + This creates more meaningful clusters with better article grouping. + """ + print("\n" + "="*60) + print("EXAMPLE: Clustering with Primary/Secondary Entities") + print("="*60) + + print("\nClustering Flow:") + print("1. Article enters the system") + print("2. LLM extracts PRIMARY and SECONDARY entities") + print("3. Embedding is computed via text-embedding-3-small") + print("4. Candidate articles are fetched (vector similarity)") + print("5. For each candidate:") + print(" a) Embedding similarity: cosine distance (30%)") + print(" b) Cross-encoder score: semantic relevance (30%)") + print(" c) Entity matching: primary > secondary (40%)") + print("6. Best matching cluster is selected") + print("7. If no good match: create new cluster") + + print("\nEntity Matching Weights:") + print(" PRIMARY subject match : +1.0") + print(" SECONDARY subject match : +0.3") + print(" PRIMARY event match : +0.5") + print(" SECONDARY event match : +0.2") + print(" PRIMARY organizations match : +0.3 per org") + print(" SECONDARY organizations match: +0.1 per org") + + print("\nāœ“ Primary entities weighted higher = more focused clustering") + + +def example_database_schema(): + """ + Show the updated database schema with primary/secondary support. + """ + print("\n" + "="*60) + print("DATABASE SCHEMA - Articles Table") + print("="*60) + + schema = { + "articles": { + "columns": { + "id": "TEXT PRIMARY KEY", + "source_site": "TEXT", + "title": "TEXT", + "description": "TEXT", + "full_content": "TEXT", + "primary_subject": "TEXT [NEW]", + "secondary_subject": "TEXT [NEW]", + "primary_organizations": "JSONB [NEW] (array of strings)", + "secondary_organizations": "JSONB [NEW] (array of strings)", + "primary_event_type": "TEXT [NEW]", + "secondary_event_type": "TEXT [NEW]", + "cluster_id": "INTEGER", + "created_at": "TIMESTAMPTZ", + "updated_at": "TIMESTAMPTZ", + } + } + } + + print("\nNew columns for entity extraction:") + for col in ["primary_subject", "secondary_subject", "primary_organizations", + "secondary_organizations", "primary_event_type", "secondary_event_type"]: + print(f" āœ“ {col}") + + print("\nāœ“ Old columns removed: subject, organization_list, event_type") + + +def example_llm_prompt(): + """ + Show the new LLM system prompt for entity extraction. + """ + print("\n" + "="*60) + print("LLM SYSTEM PROMPT - Enhanced with Primary/Secondary") + print("="*60) + + prompt = """You are an expert technical analysis system. Your task is to extract +key entities from a given technical article text and return them in JSON format. + +Extract and categorize entities as follows: +1. PRIMARY entities: Main subjects/topics that are the central focus of the article +2. SECONDARY entities: Supporting topics, related subjects, or contextual information + +For organizations and event types, also use primary/secondary classification: +- PRIMARY organizations: Directly involved or central to the article +- SECONDARY organizations: Mentioned but peripheral to the main narrative +- PRIMARY event type: The main event/announcement being discussed +- SECONDARY event types: Related or background events + +Return results in JSON format with clear separation between primary and secondary entities.""" + + print(prompt) + + print("\n\nExpected JSON output structure:") + expected_output = { + "primary_subject": "Main topic of the article", + "secondary_subject": "Supporting or related topic", + "primary_organizations": ["Org1", "Org2"], + "secondary_organizations": ["Org3"], + "primary_event_type": "Main event (e.g., 'Model Release', 'Acquisition')", + "secondary_event_type": "Related event or context" + } + print(json.dumps(expected_output, indent=2)) + + +def main(): + """Run all examples.""" + print("\n" + "="*60) + print("TECHNICAL WATCH SERVER - NEW FEATURES EXAMPLES") + print("="*60) + + # Note: Uncomment to run examples that require API keys + # example_cross_encoder_relevance() + # example_primary_secondary_extraction() + + example_clustering_with_primary_secondary() + example_database_schema() + example_llm_prompt() + + print("\n" + "="*60) + print("For production usage, check main.py watch/backfill modes") + print("="*60) + + +if __name__ == "__main__": + main() diff --git a/server/requirements.txt b/server/requirements.txt index cfc0203..5f8243c 100644 --- a/server/requirements.txt +++ b/server/requirements.txt @@ -6,4 +6,4 @@ beautifulsoup4==4.12.2 PyPDF2==3.0.1 openai==1.54.0 psycopg[binary]==3.2.1 -pgvector==0.2.5 +pgvector==0.2.5sentence-transformers==3.0.1 \ No newline at end of file