natgluons commited on
Commit
177b6ad
Β·
1 Parent(s): ed07d7f

Ready for Hugging Face Spaces deployment

Browse files
README.md ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # πŸ’– Pickup Line Generator
2
+
3
+ A modern web application that generates fun, clever, or cringey pickup lines using the SmolLM-135M model. Built with Next.js and Hugging Face's Inference API.
4
+
5
+ ## 🌟 Features
6
+
7
+ - Generate pickup lines with different vibes:
8
+ - πŸ’ Romantic
9
+ - 😏 Cheesy
10
+ - πŸ”¬ Nerdy
11
+ - πŸ˜‚ Cringe
12
+ - πŸ’‹ Flirty
13
+ - Modern, responsive UI with beautiful animations
14
+ - One-click copy to clipboard
15
+ - Powered by SmolLM-135M model
16
+
17
+ ## πŸ› οΈ Tech Stack
18
+
19
+ - **Frontend**: Next.js 13+ with App Router
20
+ - **Styling**: TailwindCSS
21
+ - **Model**: SmolLM-135M (via Hugging Face Inference API)
22
+
23
+ ## πŸš€ Getting Started
24
+
25
+ ### Prerequisites
26
+
27
+ - Node.js 18+
28
+ - Python 3.8+
29
+
30
+ ### Installation
31
+
32
+ 1. Clone the repository:
33
+ ```bash
34
+ git clone <your-repo-url>
35
+ cd pickup-line-generator
36
+ ```
37
+
38
+ 2. Install dependencies:
39
+ ```bash
40
+ npm install
41
+ ```
42
+
43
+ 3. Run the development server:
44
+ ```bash
45
+ npm run dev
46
+ ```
47
+
48
+ Visit `http://localhost:3000` to see the application.
49
+
50
+ ## 🌐 Deployment to Hugging Face Spaces
51
+
52
+ 1. Push your code to GitHub
53
+ 2. Create a new Space on Hugging Face:
54
+ - Choose "Next.js" as the SDK
55
+ - Connect your GitHub repository
56
+ - The environment variables are already configured in the repository
57
+
58
+ The application will automatically build and deploy.
59
+
60
+ ## πŸ“ Project Structure
61
+
62
+ ```
63
+ pickup-line-generator/
64
+ β”œβ”€β”€ app/
65
+ β”‚ β”œβ”€β”€ page.tsx # Main page component
66
+ β”‚ β”œβ”€β”€ layout.tsx # Root layout
67
+ β”‚ β”œβ”€β”€ globals.css # Global styles
68
+ β”‚ └── api/
69
+ β”‚ └── generate/ # API route for generation
70
+ β”‚ └── route.ts
71
+ β”œβ”€β”€ lib/
72
+ β”‚ └── model.ts # Model integration
73
+ └── public/ # Static assets
74
+ ```
75
+
76
+ ## 🎯 Example Pickup Lines
77
+
78
+ Here are some examples of what the generator can create:
79
+
80
+ - πŸ’ **Romantic**: "Are you a magician? Because whenever I look at you, everyone else disappears."
81
+ - πŸ”¬ **Nerdy**: "Are you made of copper and tellurium? Because you're Cu-Te!"
82
+ - 😏 **Cheesy**: "Are you a parking ticket? Because you've got FINE written all over you!"
83
+ - πŸ˜‚ **Cringe**: "Are you a dictionary? Because you're adding meaning to my life!"
84
+ - πŸ’‹ **Flirty**: "Is your name Google? Because you've got everything I've been searching for!"
85
+
86
+ ## 🀝 Contributing
87
+
88
+ Contributions are welcome! Please feel free to submit a Pull Request.
89
+
90
+ ## πŸ“ License
91
+
92
+ This project is licensed under the MIT License - see the LICENSE file for details.
93
+
94
+ ## πŸ™ Acknowledgments
95
+
96
+ - Built using SmolLM by Hugging Face
97
+ - Deployed on Hugging Face Spaces
model_server.py ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI, HTTPException
2
+ from fastapi.middleware.cors import CORSMiddleware
3
+ from pydantic import BaseModel
4
+ import torch
5
+ from transformers import AutoModelForCausalLM, AutoTokenizer
6
+
7
+ app = FastAPI()
8
+
9
+ # Add CORS middleware
10
+ app.add_middleware(
11
+ CORSMiddleware,
12
+ allow_origins=["*"], # In production, replace with your Vercel domain
13
+ allow_credentials=True,
14
+ allow_methods=["*"],
15
+ allow_headers=["*"],
16
+ )
17
+
18
+ # Load model and tokenizer
19
+ print("Loading SmolLM-135M model...")
20
+ MODEL_NAME = "HuggingFaceTB/SmolLM-135M"
21
+
22
+ # Check for CUDA availability
23
+ device = "cuda" if torch.cuda.is_available() else "cpu"
24
+ print(f"Using device: {device}")
25
+
26
+ # Load the model and tokenizer
27
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
28
+ tokenizer.pad_token = tokenizer.eos_token
29
+ model = AutoModelForCausalLM.from_pretrained(MODEL_NAME).to(device)
30
+
31
+ print(f"Model loaded successfully! Memory footprint: {model.get_memory_footprint() / 1e6:.2f} MB")
32
+
33
+ class VibeRequest(BaseModel):
34
+ vibe: str
35
+
36
+ def get_vibe_guidance(vibe: str) -> str:
37
+ """Get specific guidance for each vibe with examples"""
38
+ vibe_patterns = {
39
+ "romantic": """Generate a romantic and sweet pickup line that's genuine and heartfelt.
40
+ Example:
41
+ Input: Generate a romantic pickup line
42
+ Output: Are you a magician? Because whenever I look at you, everyone else disappears. ❀️
43
+
44
+ Now generate a romantic pickup line: """,
45
+
46
+ "cheesy": """Generate a super cheesy and over-the-top pickup line.
47
+ Example:
48
+ Input: Generate a cheesy pickup line
49
+ Output: Are you a parking ticket? Because you've got FINE written all over you! 😏
50
+
51
+ Now generate a cheesy pickup line: """,
52
+
53
+ "nerdy": """Generate a nerdy, science-themed pickup line.
54
+ Example:
55
+ Input: Generate a nerdy pickup line
56
+ Output: Are you made of copper and tellurium? Because you're Cu-Te! πŸ”¬
57
+
58
+ Now generate a nerdy pickup line: """,
59
+
60
+ "cringe": """Generate the most cringey and over-the-top pickup line imaginable.
61
+ Example:
62
+ Input: Generate a cringe pickup line
63
+ Output: Are you a dictionary? Because you're adding meaning to my life! πŸ“š
64
+
65
+ Now generate a cringe pickup line: """,
66
+
67
+ "flirty": """Generate a bold and flirty pickup line.
68
+ Example:
69
+ Input: Generate a flirty pickup line
70
+ Output: Is your name Google? Because you've got everything I've been searching for! 😏
71
+
72
+ Now generate a flirty pickup line: """
73
+ }
74
+ return vibe_patterns.get(vibe, "Generate a pickup line with a ")
75
+
76
+ @app.post("/generate")
77
+ async def generate_pickup_line(request: VibeRequest):
78
+ try:
79
+ vibe = request.vibe
80
+ vibe_guide = get_vibe_guidance(vibe)
81
+
82
+ # Create the prompt
83
+ prompt = f"""Instructions: Generate a pickup line with a {vibe} vibe.
84
+ {vibe_guide}"""
85
+
86
+ # Prepare inputs
87
+ encoded_input = tokenizer.encode_plus(
88
+ prompt,
89
+ return_tensors="pt",
90
+ padding=True,
91
+ return_attention_mask=True
92
+ )
93
+ input_ids = encoded_input["input_ids"].to(device)
94
+ attention_mask = encoded_input["attention_mask"].to(device)
95
+
96
+ # Generate response
97
+ with torch.no_grad():
98
+ outputs = model.generate(
99
+ input_ids,
100
+ attention_mask=attention_mask,
101
+ max_new_tokens=100,
102
+ do_sample=True,
103
+ temperature=0.8,
104
+ top_p=0.92,
105
+ top_k=50,
106
+ pad_token_id=tokenizer.eos_token_id,
107
+ eos_token_id=tokenizer.eos_token_id,
108
+ )
109
+
110
+ # Get the full generated text
111
+ full_response = tokenizer.decode(outputs[0], skip_special_tokens=True)
112
+
113
+ # Extract just the pickup line
114
+ if full_response.startswith(prompt):
115
+ response = full_response[len(prompt):].strip()
116
+ else:
117
+ response = full_response.replace(prompt, "").strip()
118
+
119
+ # Clean up the response
120
+ for marker in ["Instructions:", "Generate a pickup line:", "\n"]:
121
+ if marker in response:
122
+ response = response.split(marker, 1)[0].strip()
123
+
124
+ # Add appropriate emoji based on vibe
125
+ if vibe == "romantic":
126
+ response += " ❀️"
127
+ elif vibe == "cheesy":
128
+ response += " 😏"
129
+ elif vibe == "nerdy":
130
+ response += " πŸ”¬"
131
+ elif vibe == "cringe":
132
+ response += " πŸ˜‚"
133
+ elif vibe == "flirty":
134
+ response += " πŸ’‹"
135
+
136
+ return {"pickupLine": response}
137
+ except Exception as e:
138
+ raise HTTPException(status_code=500, detail=str(e))
139
+
140
+ if __name__ == "__main__":
141
+ import uvicorn
142
+ uvicorn.run(app, host="0.0.0.0", port=8000)
pickup-line-generator/.gitignore CHANGED
@@ -22,12 +22,10 @@ npm-debug.log*
22
  yarn-debug.log*
23
  yarn-error.log*
24
 
25
- # local env files
26
- .env*.local
27
  .env
28
-
29
- # vercel
30
- .vercel
31
 
32
  # typescript
33
  *.tsbuildinfo
@@ -48,7 +46,7 @@ next-env.d.ts
48
  ehthumbs.db
49
  Thumbs.db
50
 
51
- # Python virtual environment (if you're using Python for the model)
52
  venv/
53
  env/
54
  ENV/
@@ -80,9 +78,4 @@ logs
80
  *.tgz
81
 
82
  # Yarn Integrity file
83
- .yarn-integrity
84
-
85
- # dotenv environment variable files
86
- .env.development.local
87
- .env.test.local
88
- .env.production.local
 
22
  yarn-debug.log*
23
  yarn-error.log*
24
 
25
+ # environment variables
 
26
  .env
27
+ .env.*
28
+ !.env.example
 
29
 
30
  # typescript
31
  *.tsbuildinfo
 
46
  ehthumbs.db
47
  Thumbs.db
48
 
49
+ # Python virtual environment
50
  venv/
51
  env/
52
  ENV/
 
78
  *.tgz
79
 
80
  # Yarn Integrity file
81
+ .yarn-integrity
 
 
 
 
 
pickup-line-generator/app/api/generate/route.ts CHANGED
@@ -1,26 +1,21 @@
1
  import { NextResponse } from 'next/server';
 
2
 
3
  export async function POST(request: Request) {
4
  try {
5
- const body = await request.json();
6
 
7
- // Forward the request to our model server
8
- const response = await fetch(`${process.env.NEXT_PUBLIC_API_URL}/generate`, {
9
- method: 'POST',
10
- headers: {
11
- 'Content-Type': 'application/json',
12
- },
13
- body: JSON.stringify(body),
14
- });
15
-
16
- if (!response.ok) {
17
- throw new Error('Failed to generate pickup line');
18
  }
19
 
20
- const data = await response.json();
21
- return NextResponse.json(data);
22
  } catch (error) {
23
- console.error('Error generating pickup line:', error);
24
  return NextResponse.json(
25
  { error: 'Failed to generate pickup line' },
26
  { status: 500 }
 
1
  import { NextResponse } from 'next/server';
2
+ import { generatePickupLine } from '@/lib/model';
3
 
4
  export async function POST(request: Request) {
5
  try {
6
+ const { vibe } = await request.json();
7
 
8
+ if (!vibe) {
9
+ return NextResponse.json(
10
+ { error: 'Vibe is required' },
11
+ { status: 400 }
12
+ );
 
 
 
 
 
 
13
  }
14
 
15
+ const pickupLine = await generatePickupLine(vibe);
16
+ return NextResponse.json({ pickupLine });
17
  } catch (error) {
18
+ console.error('Error:', error);
19
  return NextResponse.json(
20
  { error: 'Failed to generate pickup line' },
21
  { status: 500 }
pickup-line-generator/next.config.js CHANGED
@@ -1,11 +1,6 @@
1
  /** @type {import('next').NextConfig} */
2
  const nextConfig = {
3
- output: 'standalone',
4
- reactStrictMode: true,
5
- swcMinify: true,
6
- experimental: {
7
- serverActions: true,
8
- },
9
  }
10
 
11
  module.exports = nextConfig
 
1
  /** @type {import('next').NextConfig} */
2
  const nextConfig = {
3
+ output: 'standalone'
 
 
 
 
 
4
  }
5
 
6
  module.exports = nextConfig