File size: 3,380 Bytes
ef825ff
 
 
 
 
 
 
 
 
 
 
 
 
 
56dbc97
 
 
ef825ff
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4bd68bd
ef825ff
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
import gradio as gr
import pickle
import random
import numpy as np

with open('models.pickle', 'rb') as f:    
    models = pickle.load(f)

LORA_TOKEN = ''  # '<|>LORA_TOKEN<|>'
NOT_SPLIT_TOKEN = '<|>NOT_SPLIT_TOKEN<|>'

def sample_next(ctx: str, model, k):
    ctx = ', '.join(ctx.split(', ')[-k:])
    if model.get(ctx) is None:
        # Fallback: choose a random token from the model's vocabulary
        random_key = random.choice(list(model.keys()))
        return random_key.split(', ')[-1]
    possible_chars = list(model[ctx].keys())
    possible_values = list(model[ctx].values())
    return np.random.choice(possible_chars, p=possible_values)

def generateText(model, minLen=100, size=5, user_idea=None):
    keys = list(model.keys())
    k = len(random.choice(keys).split(', '))
    
    # If user provides an idea, use it as the starting point; otherwise, choose randomly
    if user_idea and user_idea.strip():
        starting_sent = user_idea.strip()
        # Ensure the starting sentence is compatible with the model's context format
        starting_sent = starting_sent.replace(', ', NOT_SPLIT_TOKEN)
    else:
        starting_sent = random.choice(keys)
    
    sentence = starting_sent
    ctx = ', '.join(starting_sent.split(', ')[-k:]) if ', ' in starting_sent else starting_sent
    
    while True:
        next_prediction = sample_next(ctx, model, k)
        sentence += f", {next_prediction}"
        ctx = ', '.join(sentence.split(', ')[-k:])
        if '\n' in sentence:
            break
    
    sentence = sentence.replace(NOT_SPLIT_TOKEN, ', ')
    prompt = sentence.split('\n')[0]
    
    # Ensure the prompt meets the minimum length requirement
    if len(prompt) < minLen:
        return generateText(model, minLen, size=1, user_idea=user_idea)
    
    size = size - 1
    if size == 0:
        return [prompt]
    
    output = [prompt]
    for _ in range(size):
        # Generate additional prompts without user_idea to maintain diversity
        new_prompt = generateText(model, minLen, size=1)[0]
        output.append(new_prompt)
    
    return output

def sentence_builder(quantity, minLen, Type, negative, user_idea):
    if Type == "NSFW":
        idx = 1
    elif Type == "SFW":
        idx = 2
    else:
        idx = 0
    model = models[idx]
    output = ""
    for i in range(quantity):
        # Pass user_idea only for the first prompt if provided
        prompt = generateText(model[0], minLen=minLen, size=1, user_idea=user_idea if i == 0 else None)[0]
        output += f"PROMPT:  {prompt}\n\n"
        if negative:
            negative_prompt = generateText(model[1], minLen=minLen, size=5)[0]
            output += f"NEGATIVE PROMPT:  {negative_prompt}\n"
        output += "----------------------------------------------------------------\n\n\n"
    
    return output[:-3]

ui = gr.Interface(
    sentence_builder,
    [
        gr.Slider(1, 10, value=4, label="Count", info="Choose between 1 and 10", step=1),
        gr.Slider(100, 1000, value=300, label="minLen", info="Choose between 100 and 1000", step=50),
        gr.Radio(["NSFW", "SFW", "BOTH"], label="TYPE", info="NSFW stands for NOT SAFE FOR WORK, so choose any one you want?"),
        gr.Checkbox(label="Negative Prompt", info="Do you want to generate negative prompt as well as prompt?")
    ],
    "text"
)

if __name__ == "__main__":
    ui.launch()