Spaces:
Sleeping
Sleeping
Ready for Hugging Face Spaces deployment
Browse files- README.md +97 -0
- model_server.py +142 -0
- pickup-line-generator/.gitignore +5 -12
- pickup-line-generator/app/api/generate/route.ts +10 -15
- pickup-line-generator/next.config.js +1 -6
README.md
ADDED
@@ -0,0 +1,97 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# π Pickup Line Generator
|
2 |
+
|
3 |
+
A modern web application that generates fun, clever, or cringey pickup lines using the SmolLM-135M model. Built with Next.js and Hugging Face's Inference API.
|
4 |
+
|
5 |
+
## π Features
|
6 |
+
|
7 |
+
- Generate pickup lines with different vibes:
|
8 |
+
- π Romantic
|
9 |
+
- π Cheesy
|
10 |
+
- π¬ Nerdy
|
11 |
+
- π Cringe
|
12 |
+
- π Flirty
|
13 |
+
- Modern, responsive UI with beautiful animations
|
14 |
+
- One-click copy to clipboard
|
15 |
+
- Powered by SmolLM-135M model
|
16 |
+
|
17 |
+
## π οΈ Tech Stack
|
18 |
+
|
19 |
+
- **Frontend**: Next.js 13+ with App Router
|
20 |
+
- **Styling**: TailwindCSS
|
21 |
+
- **Model**: SmolLM-135M (via Hugging Face Inference API)
|
22 |
+
|
23 |
+
## π Getting Started
|
24 |
+
|
25 |
+
### Prerequisites
|
26 |
+
|
27 |
+
- Node.js 18+
|
28 |
+
- Python 3.8+
|
29 |
+
|
30 |
+
### Installation
|
31 |
+
|
32 |
+
1. Clone the repository:
|
33 |
+
```bash
|
34 |
+
git clone <your-repo-url>
|
35 |
+
cd pickup-line-generator
|
36 |
+
```
|
37 |
+
|
38 |
+
2. Install dependencies:
|
39 |
+
```bash
|
40 |
+
npm install
|
41 |
+
```
|
42 |
+
|
43 |
+
3. Run the development server:
|
44 |
+
```bash
|
45 |
+
npm run dev
|
46 |
+
```
|
47 |
+
|
48 |
+
Visit `http://localhost:3000` to see the application.
|
49 |
+
|
50 |
+
## π Deployment to Hugging Face Spaces
|
51 |
+
|
52 |
+
1. Push your code to GitHub
|
53 |
+
2. Create a new Space on Hugging Face:
|
54 |
+
- Choose "Next.js" as the SDK
|
55 |
+
- Connect your GitHub repository
|
56 |
+
- The environment variables are already configured in the repository
|
57 |
+
|
58 |
+
The application will automatically build and deploy.
|
59 |
+
|
60 |
+
## π Project Structure
|
61 |
+
|
62 |
+
```
|
63 |
+
pickup-line-generator/
|
64 |
+
βββ app/
|
65 |
+
β βββ page.tsx # Main page component
|
66 |
+
β βββ layout.tsx # Root layout
|
67 |
+
β βββ globals.css # Global styles
|
68 |
+
β βββ api/
|
69 |
+
β βββ generate/ # API route for generation
|
70 |
+
β βββ route.ts
|
71 |
+
βββ lib/
|
72 |
+
β βββ model.ts # Model integration
|
73 |
+
βββ public/ # Static assets
|
74 |
+
```
|
75 |
+
|
76 |
+
## π― Example Pickup Lines
|
77 |
+
|
78 |
+
Here are some examples of what the generator can create:
|
79 |
+
|
80 |
+
- π **Romantic**: "Are you a magician? Because whenever I look at you, everyone else disappears."
|
81 |
+
- π¬ **Nerdy**: "Are you made of copper and tellurium? Because you're Cu-Te!"
|
82 |
+
- π **Cheesy**: "Are you a parking ticket? Because you've got FINE written all over you!"
|
83 |
+
- π **Cringe**: "Are you a dictionary? Because you're adding meaning to my life!"
|
84 |
+
- π **Flirty**: "Is your name Google? Because you've got everything I've been searching for!"
|
85 |
+
|
86 |
+
## π€ Contributing
|
87 |
+
|
88 |
+
Contributions are welcome! Please feel free to submit a Pull Request.
|
89 |
+
|
90 |
+
## π License
|
91 |
+
|
92 |
+
This project is licensed under the MIT License - see the LICENSE file for details.
|
93 |
+
|
94 |
+
## π Acknowledgments
|
95 |
+
|
96 |
+
- Built using SmolLM by Hugging Face
|
97 |
+
- Deployed on Hugging Face Spaces
|
model_server.py
ADDED
@@ -0,0 +1,142 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from fastapi import FastAPI, HTTPException
|
2 |
+
from fastapi.middleware.cors import CORSMiddleware
|
3 |
+
from pydantic import BaseModel
|
4 |
+
import torch
|
5 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
6 |
+
|
7 |
+
app = FastAPI()
|
8 |
+
|
9 |
+
# Add CORS middleware
|
10 |
+
app.add_middleware(
|
11 |
+
CORSMiddleware,
|
12 |
+
allow_origins=["*"], # In production, replace with your Vercel domain
|
13 |
+
allow_credentials=True,
|
14 |
+
allow_methods=["*"],
|
15 |
+
allow_headers=["*"],
|
16 |
+
)
|
17 |
+
|
18 |
+
# Load model and tokenizer
|
19 |
+
print("Loading SmolLM-135M model...")
|
20 |
+
MODEL_NAME = "HuggingFaceTB/SmolLM-135M"
|
21 |
+
|
22 |
+
# Check for CUDA availability
|
23 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
24 |
+
print(f"Using device: {device}")
|
25 |
+
|
26 |
+
# Load the model and tokenizer
|
27 |
+
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
|
28 |
+
tokenizer.pad_token = tokenizer.eos_token
|
29 |
+
model = AutoModelForCausalLM.from_pretrained(MODEL_NAME).to(device)
|
30 |
+
|
31 |
+
print(f"Model loaded successfully! Memory footprint: {model.get_memory_footprint() / 1e6:.2f} MB")
|
32 |
+
|
33 |
+
class VibeRequest(BaseModel):
|
34 |
+
vibe: str
|
35 |
+
|
36 |
+
def get_vibe_guidance(vibe: str) -> str:
|
37 |
+
"""Get specific guidance for each vibe with examples"""
|
38 |
+
vibe_patterns = {
|
39 |
+
"romantic": """Generate a romantic and sweet pickup line that's genuine and heartfelt.
|
40 |
+
Example:
|
41 |
+
Input: Generate a romantic pickup line
|
42 |
+
Output: Are you a magician? Because whenever I look at you, everyone else disappears. β€οΈ
|
43 |
+
|
44 |
+
Now generate a romantic pickup line: """,
|
45 |
+
|
46 |
+
"cheesy": """Generate a super cheesy and over-the-top pickup line.
|
47 |
+
Example:
|
48 |
+
Input: Generate a cheesy pickup line
|
49 |
+
Output: Are you a parking ticket? Because you've got FINE written all over you! π
|
50 |
+
|
51 |
+
Now generate a cheesy pickup line: """,
|
52 |
+
|
53 |
+
"nerdy": """Generate a nerdy, science-themed pickup line.
|
54 |
+
Example:
|
55 |
+
Input: Generate a nerdy pickup line
|
56 |
+
Output: Are you made of copper and tellurium? Because you're Cu-Te! π¬
|
57 |
+
|
58 |
+
Now generate a nerdy pickup line: """,
|
59 |
+
|
60 |
+
"cringe": """Generate the most cringey and over-the-top pickup line imaginable.
|
61 |
+
Example:
|
62 |
+
Input: Generate a cringe pickup line
|
63 |
+
Output: Are you a dictionary? Because you're adding meaning to my life! π
|
64 |
+
|
65 |
+
Now generate a cringe pickup line: """,
|
66 |
+
|
67 |
+
"flirty": """Generate a bold and flirty pickup line.
|
68 |
+
Example:
|
69 |
+
Input: Generate a flirty pickup line
|
70 |
+
Output: Is your name Google? Because you've got everything I've been searching for! π
|
71 |
+
|
72 |
+
Now generate a flirty pickup line: """
|
73 |
+
}
|
74 |
+
return vibe_patterns.get(vibe, "Generate a pickup line with a ")
|
75 |
+
|
76 |
+
@app.post("/generate")
|
77 |
+
async def generate_pickup_line(request: VibeRequest):
|
78 |
+
try:
|
79 |
+
vibe = request.vibe
|
80 |
+
vibe_guide = get_vibe_guidance(vibe)
|
81 |
+
|
82 |
+
# Create the prompt
|
83 |
+
prompt = f"""Instructions: Generate a pickup line with a {vibe} vibe.
|
84 |
+
{vibe_guide}"""
|
85 |
+
|
86 |
+
# Prepare inputs
|
87 |
+
encoded_input = tokenizer.encode_plus(
|
88 |
+
prompt,
|
89 |
+
return_tensors="pt",
|
90 |
+
padding=True,
|
91 |
+
return_attention_mask=True
|
92 |
+
)
|
93 |
+
input_ids = encoded_input["input_ids"].to(device)
|
94 |
+
attention_mask = encoded_input["attention_mask"].to(device)
|
95 |
+
|
96 |
+
# Generate response
|
97 |
+
with torch.no_grad():
|
98 |
+
outputs = model.generate(
|
99 |
+
input_ids,
|
100 |
+
attention_mask=attention_mask,
|
101 |
+
max_new_tokens=100,
|
102 |
+
do_sample=True,
|
103 |
+
temperature=0.8,
|
104 |
+
top_p=0.92,
|
105 |
+
top_k=50,
|
106 |
+
pad_token_id=tokenizer.eos_token_id,
|
107 |
+
eos_token_id=tokenizer.eos_token_id,
|
108 |
+
)
|
109 |
+
|
110 |
+
# Get the full generated text
|
111 |
+
full_response = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
112 |
+
|
113 |
+
# Extract just the pickup line
|
114 |
+
if full_response.startswith(prompt):
|
115 |
+
response = full_response[len(prompt):].strip()
|
116 |
+
else:
|
117 |
+
response = full_response.replace(prompt, "").strip()
|
118 |
+
|
119 |
+
# Clean up the response
|
120 |
+
for marker in ["Instructions:", "Generate a pickup line:", "\n"]:
|
121 |
+
if marker in response:
|
122 |
+
response = response.split(marker, 1)[0].strip()
|
123 |
+
|
124 |
+
# Add appropriate emoji based on vibe
|
125 |
+
if vibe == "romantic":
|
126 |
+
response += " β€οΈ"
|
127 |
+
elif vibe == "cheesy":
|
128 |
+
response += " π"
|
129 |
+
elif vibe == "nerdy":
|
130 |
+
response += " π¬"
|
131 |
+
elif vibe == "cringe":
|
132 |
+
response += " π"
|
133 |
+
elif vibe == "flirty":
|
134 |
+
response += " π"
|
135 |
+
|
136 |
+
return {"pickupLine": response}
|
137 |
+
except Exception as e:
|
138 |
+
raise HTTPException(status_code=500, detail=str(e))
|
139 |
+
|
140 |
+
if __name__ == "__main__":
|
141 |
+
import uvicorn
|
142 |
+
uvicorn.run(app, host="0.0.0.0", port=8000)
|
pickup-line-generator/.gitignore
CHANGED
@@ -22,12 +22,10 @@ npm-debug.log*
|
|
22 |
yarn-debug.log*
|
23 |
yarn-error.log*
|
24 |
|
25 |
-
#
|
26 |
-
.env*.local
|
27 |
.env
|
28 |
-
|
29 |
-
|
30 |
-
.vercel
|
31 |
|
32 |
# typescript
|
33 |
*.tsbuildinfo
|
@@ -48,7 +46,7 @@ next-env.d.ts
|
|
48 |
ehthumbs.db
|
49 |
Thumbs.db
|
50 |
|
51 |
-
# Python virtual environment
|
52 |
venv/
|
53 |
env/
|
54 |
ENV/
|
@@ -80,9 +78,4 @@ logs
|
|
80 |
*.tgz
|
81 |
|
82 |
# Yarn Integrity file
|
83 |
-
.yarn-integrity
|
84 |
-
|
85 |
-
# dotenv environment variable files
|
86 |
-
.env.development.local
|
87 |
-
.env.test.local
|
88 |
-
.env.production.local
|
|
|
22 |
yarn-debug.log*
|
23 |
yarn-error.log*
|
24 |
|
25 |
+
# environment variables
|
|
|
26 |
.env
|
27 |
+
.env.*
|
28 |
+
!.env.example
|
|
|
29 |
|
30 |
# typescript
|
31 |
*.tsbuildinfo
|
|
|
46 |
ehthumbs.db
|
47 |
Thumbs.db
|
48 |
|
49 |
+
# Python virtual environment
|
50 |
venv/
|
51 |
env/
|
52 |
ENV/
|
|
|
78 |
*.tgz
|
79 |
|
80 |
# Yarn Integrity file
|
81 |
+
.yarn-integrity
|
|
|
|
|
|
|
|
|
|
pickup-line-generator/app/api/generate/route.ts
CHANGED
@@ -1,26 +1,21 @@
|
|
1 |
import { NextResponse } from 'next/server';
|
|
|
2 |
|
3 |
export async function POST(request: Request) {
|
4 |
try {
|
5 |
-
const
|
6 |
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
},
|
13 |
-
body: JSON.stringify(body),
|
14 |
-
});
|
15 |
-
|
16 |
-
if (!response.ok) {
|
17 |
-
throw new Error('Failed to generate pickup line');
|
18 |
}
|
19 |
|
20 |
-
const
|
21 |
-
return NextResponse.json(
|
22 |
} catch (error) {
|
23 |
-
console.error('Error
|
24 |
return NextResponse.json(
|
25 |
{ error: 'Failed to generate pickup line' },
|
26 |
{ status: 500 }
|
|
|
1 |
import { NextResponse } from 'next/server';
|
2 |
+
import { generatePickupLine } from '@/lib/model';
|
3 |
|
4 |
export async function POST(request: Request) {
|
5 |
try {
|
6 |
+
const { vibe } = await request.json();
|
7 |
|
8 |
+
if (!vibe) {
|
9 |
+
return NextResponse.json(
|
10 |
+
{ error: 'Vibe is required' },
|
11 |
+
{ status: 400 }
|
12 |
+
);
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
}
|
14 |
|
15 |
+
const pickupLine = await generatePickupLine(vibe);
|
16 |
+
return NextResponse.json({ pickupLine });
|
17 |
} catch (error) {
|
18 |
+
console.error('Error:', error);
|
19 |
return NextResponse.json(
|
20 |
{ error: 'Failed to generate pickup line' },
|
21 |
{ status: 500 }
|
pickup-line-generator/next.config.js
CHANGED
@@ -1,11 +1,6 @@
|
|
1 |
/** @type {import('next').NextConfig} */
|
2 |
const nextConfig = {
|
3 |
-
output: 'standalone'
|
4 |
-
reactStrictMode: true,
|
5 |
-
swcMinify: true,
|
6 |
-
experimental: {
|
7 |
-
serverActions: true,
|
8 |
-
},
|
9 |
}
|
10 |
|
11 |
module.exports = nextConfig
|
|
|
1 |
/** @type {import('next').NextConfig} */
|
2 |
const nextConfig = {
|
3 |
+
output: 'standalone'
|
|
|
|
|
|
|
|
|
|
|
4 |
}
|
5 |
|
6 |
module.exports = nextConfig
|