File size: 6,598 Bytes
99ec0c6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
import gradio as gr
import re

# Set page title and description
title = "πŸ’– Pickup Line Generator"
description = """
<div style="text-align: center; max-width: 650px; margin: 0 auto;">
  <div>
    <p>Generate fun, clever, or cringey pickup lines using SmolLM-135M! Select a vibe and click generate to get started! 😏</p>
  </div>
</div>
"""

# Load model and tokenizer
print("Loading SmolLM-135M model...")
MODEL_NAME = "HuggingFaceTB/SmolLM-135M"

# Check for CUDA availability
device = "cuda" if torch.cuda.is_available() else "cpu"
print(f"Using device: {device}")

# Load the model and tokenizer
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
# Set pad_token to eos_token to handle padding
tokenizer.pad_token = tokenizer.eos_token
model = AutoModelForCausalLM.from_pretrained(MODEL_NAME).to(device)

print(f"Model loaded successfully! Memory footprint: {model.get_memory_footprint() / 1e6:.2f} MB")

def get_vibe_guidance(vibe):
    """Get specific guidance for each vibe with examples"""
    vibe_patterns = {
        "romantic": """Generate a romantic and sweet pickup line that's genuine and heartfelt.
Example: 
Input: Generate a romantic pickup line
Output: Are you a magician? Because whenever I look at you, everyone else disappears. ❀️

Now generate a romantic pickup line: """,

        "cheesy": """Generate a super cheesy and over-the-top pickup line.
Example:
Input: Generate a cheesy pickup line
Output: Are you a parking ticket? Because you've got FINE written all over you! 😏

Now generate a cheesy pickup line: """,

        "nerdy": """Generate a nerdy, science-themed pickup line.
Example:
Input: Generate a nerdy pickup line
Output: Are you made of copper and tellurium? Because you're Cu-Te! πŸ”¬

Now generate a nerdy pickup line: """,

        "cringe": """Generate the most cringey and over-the-top pickup line imaginable.
Example:
Input: Generate a cringe pickup line
Output: Are you a dictionary? Because you're adding meaning to my life! πŸ“š

Now generate a cringe pickup line: """,

        "flirty": """Generate a bold and flirty pickup line.
Example:
Input: Generate a flirty pickup line
Output: Is your name Google? Because you've got everything I've been searching for! 😏

Now generate a flirty pickup line: """
    }
    return vibe_patterns.get(vibe, "Generate a pickup line with a ")

def generate_pickup_line(vibe):
    """Generate a pickup line based on the selected vibe"""
    # Get the vibe guidance
    vibe_guide = get_vibe_guidance(vibe)
    
    # Create the prompt
    prompt = f"""Instructions: Generate a pickup line with a {vibe} vibe.
{vibe_guide}"""
    
    # Prepare inputs with explicit attention mask
    encoded_input = tokenizer.encode_plus(
        prompt,
        return_tensors="pt",
        padding=True,
        return_attention_mask=True
    )
    input_ids = encoded_input["input_ids"].to(device)
    attention_mask = encoded_input["attention_mask"].to(device)
    
    # Generate multiple responses and pick the best one
    num_tries = 3
    best_response = None
    
    for _ in range(num_tries):
        with torch.no_grad():
            outputs = model.generate(
                input_ids,
                attention_mask=attention_mask,
                max_new_tokens=100,
                do_sample=True,
                temperature=0.8,
                top_p=0.92,
                top_k=50,
                pad_token_id=tokenizer.eos_token_id,
                eos_token_id=tokenizer.eos_token_id,
            )
        
        # Get the full generated text
        full_response = tokenizer.decode(outputs[0], skip_special_tokens=True)
        
        # Extract just the pickup line
        if full_response.startswith(prompt):
            response = full_response[len(prompt):].strip()
        else:
            response = full_response.replace(prompt, "").strip()
        
        # Clean up the response
        for marker in ["Instructions:", "Generate a pickup line:", "\n"]:
            if marker in response:
                response = response.split(marker, 1)[0].strip()
        
        # Add appropriate emoji based on vibe
        if vibe == "romantic":
            response += " ❀️"
        elif vibe == "cheesy":
            response += " 😏"
        elif vibe == "nerdy":
            response += " πŸ”¬"
        elif vibe == "cringe":
            response += " πŸ˜‚"
        elif vibe == "flirty":
            response += " πŸ’‹"
        
        best_response = response
        break
    
    return best_response

# Create custom CSS
custom_css = """
.gradio-container {
    background-color: #fef6f9 !important;
}
.title {
    font-family: 'Lobster', cursive !important;
    color: #ff69b4 !important;
}
.button {
    background: linear-gradient(45deg, #ff69b4, #ff1493) !important;
    color: white !important;
    border: none !important;
    transition: all 0.3s ease !important;
}
.button:hover {
    transform: translateY(-2px);
    box-shadow: 0 4px 8px rgba(255, 105, 180, 0.3);
}
"""

# Create the Gradio interface
with gr.Blocks(theme="soft", css=custom_css) as demo:
    gr.Markdown(f"# {title}")
    gr.Markdown(description)
    
    with gr.Row():
        with gr.Column():
            vibe_dropdown = gr.Dropdown(
                choices=[
                    "romantic",
                    "cheesy",
                    "nerdy",
                    "cringe",
                    "flirty"
                ],
                label="Pick a vibe",
                value="romantic"
            )
            generate_btn = gr.Button("Generate Line", elem_classes="button")
        
        with gr.Column():
            output = gr.Textbox(
                label="Your pickup line",
                lines=3,
                interactive=False
            )
            copy_btn = gr.Button("πŸ“‹ Copy to Clipboard", elem_classes="button")
    
    # Example inputs
    gr.Examples(
        examples=[
            ["romantic"],
            ["cheesy"],
            ["nerdy"],
            ["cringe"],
            ["flirty"]
        ],
        inputs=[vibe_dropdown]
    )
    
    generate_btn.click(
        fn=generate_pickup_line,
        inputs=[vibe_dropdown],
        outputs=output
    )
    
    # Footer
    gr.Markdown("""
    <div style="text-align: center; margin-top: 20px; color: #666;">
        Built by Nath with SmolLM πŸ”₯
    </div>
    """)

# Launch the app
if __name__ == "__main__":
    demo.launch(share=True)  # Set share=False if you don't want to create a public link