Version 2 con funciones incluidas
Browse files- keras_model_functions.py +59 -0
keras_model_functions.py
ADDED
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import tensorflow as tf
|
2 |
+
from tensorflow.keras.models import Sequential
|
3 |
+
from tensorflow.keras.layers import LSTM, Dense, Dropout
|
4 |
+
from tensorflow.keras.optimizers import Adam
|
5 |
+
from tensorflow.keras.callbacks import EarlyStopping
|
6 |
+
|
7 |
+
def huber_loss(y_true, y_pred, delta=1.0):
|
8 |
+
"""
|
9 |
+
Funci贸n de p茅rdida Huber personalizada.
|
10 |
+
"""
|
11 |
+
error = y_true - y_pred
|
12 |
+
is_small_error = tf.abs(error) <= delta
|
13 |
+
small_error_loss = 0.5 * tf.square(error)
|
14 |
+
big_error_loss = delta * (tf.abs(error) - 0.5 * delta)
|
15 |
+
return tf.where(is_small_error, small_error_loss, big_error_loss)
|
16 |
+
|
17 |
+
def create_sequences(data, time_steps):
|
18 |
+
"""
|
19 |
+
Funci贸n para crear secuencias de datos de entrada y salida.
|
20 |
+
"""
|
21 |
+
X, y = [], []
|
22 |
+
for i in range(len(data) - time_steps):
|
23 |
+
X.append(data[i:(i + time_steps), :])
|
24 |
+
y.append(data[i + time_steps, 0])
|
25 |
+
return np.array(X), np.array(y)
|
26 |
+
|
27 |
+
def build_lstm_model(time_steps, input_size):
|
28 |
+
"""
|
29 |
+
Funci贸n para construir el modelo LSTM mejorado.
|
30 |
+
"""
|
31 |
+
model = Sequential([
|
32 |
+
LSTM(100, return_sequences=True, input_shape=(time_steps, input_size)),
|
33 |
+
Dropout(0.2),
|
34 |
+
LSTM(100),
|
35 |
+
Dropout(0.2),
|
36 |
+
Dense(50),
|
37 |
+
Dense(1)
|
38 |
+
])
|
39 |
+
|
40 |
+
optimizer = Adam(learning_rate=0.001)
|
41 |
+
model.compile(optimizer=optimizer, loss=huber_loss)
|
42 |
+
return model
|
43 |
+
|
44 |
+
def train_model(model, X_train, y_train, X_val, y_val, epochs=200, batch_size=32):
|
45 |
+
"""
|
46 |
+
Funci贸n para entrenar el modelo LSTM mejorado.
|
47 |
+
"""
|
48 |
+
early_stopping = EarlyStopping(monitor='val_loss', patience=10, restore_best_weights=True)
|
49 |
+
|
50 |
+
history = model.fit(
|
51 |
+
X_train, y_train,
|
52 |
+
batch_size=batch_size,
|
53 |
+
epochs=epochs,
|
54 |
+
validation_data=(X_val, y_val),
|
55 |
+
callbacks=[early_stopping],
|
56 |
+
verbose=1
|
57 |
+
)
|
58 |
+
|
59 |
+
return history
|