File size: 4,620 Bytes
d6d1455
 
 
 
63df486
 
d6d1455
 
63df486
d6d1455
 
63df486
d6d1455
 
 
 
 
 
 
63df486
d6d1455
 
 
 
 
 
 
63df486
d6d1455
 
 
 
 
 
63df486
 
 
 
 
 
 
 
 
 
d6d1455
 
 
63df486
 
 
 
 
 
d6d1455
 
 
 
 
 
 
 
 
 
 
 
 
 
63df486
d6d1455
 
 
63df486
d6d1455
 
 
 
 
 
 
 
 
63df486
d6d1455
 
 
63df486
d6d1455
 
 
 
 
 
 
 
 
 
63df486
 
 
 
 
d6d1455
63df486
 
d6d1455
 
63df486
d6d1455
 
 
 
 
63df486
d6d1455
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, LSTM, Embedding
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.callbacks import EarlyStopping
import gradio as gr

# Initial data set
data = [
    "Double big 12", "Single big 11", "Single big 13", "Double big 12", "Double small 10",
    "Double big 12", "Double big 12", "Single small 7", "Single small 5", "Single small 9",
    "Single big 13", "Double small 8", "Single small 5", "Double big 14", "Single big 11",
    "Double big 14", "Single big 17", "Triple 9", "Double small 6", "Single big 13",
    "Double big 14", "Double small 8", "Double small 8", "Single big 13", "Single small 9",
    "Double small 8", "Double small 8", "Single big 12", "Double small 8", "Double big 14",
    "Double small 10", "Single big 13", "Single big 11", "Double big 14", "Double big 14"
]

# Encoding the labels
encoder = LabelEncoder()
encoded_data = encoder.fit_transform(data)

# Create sequences
sequence_length = 5
X, y = [], []
for i in range(len(encoded_data) - sequence_length):
    X.append(encoded_data[i:i + sequence_length])
    y.append(encoded_data[i + sequence_length])

X = np.array(X)
y = to_categorical(y, num_classes=len(encoder.classes_))

# Build the model
def build_model(vocab_size, sequence_length):
    model = Sequential([
        Embedding(vocab_size, 50, input_length=sequence_length),
        LSTM(100),
        Dense(vocab_size, activation='softmax')
    ])
    model.compile(loss='categorical_crossentropy', optimizer=Adam(learning_rate=0.001), metrics=['accuracy'])
    return model

# Initialize the model
vocab_size = len(encoder.classes_)
model = build_model(vocab_size, sequence_length)

# Train the model
early_stopping = EarlyStopping(monitor='val_loss', patience=10, restore_best_weights=True)
history = model.fit(X, y, epochs=100, validation_split=0.2, callbacks=[early_stopping], verbose=0)

def predict_next(model, data, sequence_length, encoder):
    last_sequence = data[-sequence_length:]
    last_sequence = np.array(encoder.transform(last_sequence)).reshape((1, sequence_length))
    prediction = model.predict(last_sequence)
    predicted_label = encoder.inverse_transform([np.argmax(prediction)])
    return predicted_label[0]

def update_data(data, new_outcome):
    data.append(new_outcome)
    return data

def retrain_model(model, X, y, epochs=10):
    early_stopping = EarlyStopping(monitor='val_loss', patience=5, restore_best_weights=True)
    model.fit(X, y, epochs=epochs, validation_split=0.2, callbacks=[early_stopping], verbose=0)
    return model

def gradio_predict(outcome):
    global data, model

    if outcome not in encoder.classes_:
        return "Invalid outcome. Please try again."

    data = update_data(data, outcome)

    if len(data) < sequence_length:
        return "Not enough data to make a prediction."

    predicted_next = predict_next(model, data, sequence_length, encoder)
    return f'Predicted next outcome: {predicted_next}'

def gradio_update(actual_next):
    global data, X, y, model

    if actual_next not in encoder.classes_:
        return "Invalid outcome. Please try again."

    data = update_data(data, actual_next)

    if len(data) < sequence_length + 1:
        return "Not enough data to update the model."

    # Update X and y
    new_X = []
    new_y = []
    for i in range(len(data) - sequence_length):
        new_X.append(encoder.transform(data[i:i + sequence_length]))
        new_y.append(encoder.transform([data[i + sequence_length]])[0])

    X = np.array(new_X)
    y = to_categorical(new_y, num_classes=len(encoder.classes_))

    # Retrain the model
    model = retrain_model(model, X, y, epochs=10)

    return "Model updated with new data."

# Gradio interface
with gr.Blocks() as demo:
    gr.Markdown("## Outcome Prediction Model")
    with gr.Row():
        outcome_input = gr.Textbox(label="Current Outcome")
        predict_button = gr.Button("Predict Next")
        predicted_output = gr.Textbox(label="Predicted Next Outcome")
    with gr.Row():
        actual_input = gr.Textbox(label="Actual Next Outcome")
        update_button = gr.Button("Update Model")
        update_output = gr.Textbox(label="Update Status")

    predict_button.click(gradio_predict, inputs=outcome_input, outputs=predicted_output)
    update_button.click(gradio_update, inputs=actual_input, outputs=update_output)

demo.launch()