-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathAIModel.py
More file actions
61 lines (50 loc) · 2.08 KB
/
AIModel.py
File metadata and controls
61 lines (50 loc) · 2.08 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Embedding, LSTM, Dense
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from sklearn.model_selection import train_test_split
import numpy as np
# Preprocess Data
def preprocess_data(conversations, max_len=20):
questions = [q for q, _ in conversations]
answers = [a for _, a in conversations]
# Tokenize the text data
tokenizer = Tokenizer()
tokenizer.fit_on_texts(questions + answers)
X = tokenizer.texts_to_sequences(questions)
y = tokenizer.texts_to_sequences(answers)
# Pad sequences to ensure uniform input size
X = pad_sequences(X, maxlen=max_len, padding='post')
y = pad_sequences(y, maxlen=max_len, padding='post')
return X, y, tokenizer
# Build RNN Model
def build_rnn_model(vocab_size, max_len):
model = Sequential([
Embedding(input_dim=vocab_size, output_dim=128, input_length=max_len),
LSTM(128, return_sequences=False),
Dense(vocab_size, activation='softmax')
])
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
return model
# Load and preprocess data
conversations = [
("How are you?", "I am fine."),
("What is your name?", "I am an AI assistant."),
("Tell me a joke.", "Why did the chicken cross the road? To get to the other side."),
# Add more training data as needed
]
# Preprocessing
max_len = 20 # Set maximum length for padding
X, y, tokenizer = preprocess_data(conversations, max_len)
vocab_size = len(tokenizer.word_index) + 1
# Split data into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
# Build and train the RNN model
model = build_rnn_model(vocab_size, max_len)
model.fit(X_train, y_train, epochs=10, batch_size=32, validation_data=(X_test, y_test))
# Save the model and tokenizer
model.save('ai_assistant_rnn_model.h5')
with open('tokenizer.pickle', 'wb') as f:
import pickle
pickle.dump(tokenizer, f)