gstranger commited on
Commit
dba807a
·
1 Parent(s): f6e532b

change space for hw5

Browse files
Files changed (1) hide show
  1. app.py +48 -24
app.py CHANGED
@@ -10,8 +10,13 @@ from diffusers import DiffusionPipeline
10
  device = "cuda" if torch.cuda.is_available() else "cpu"
11
  # model_repo_id = "stabilityai/sdxl-turbo" # Replace to the model you would like to use
12
  model_repo_id = "CompVis/stable-diffusion-v1-4"
13
- model_dropdown = ['stabilityai/sdxl-turbo', 'CompVis/stable-diffusion-v1-4' ]
14
 
 
 
 
 
 
15
 
16
 
17
  if torch.cuda.is_available():
@@ -28,6 +33,7 @@ MAX_IMAGE_SIZE = 1024
28
 
29
  # @spaces.GPU #[uncomment to use ZeroGPU]
30
  def infer(
 
31
  prompt,
32
  negative_prompt,
33
  randomize_seed,
@@ -44,7 +50,12 @@ def infer(
44
 
45
  generator = torch.Generator().manual_seed(seed)
46
 
47
- pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype)
 
 
 
 
 
48
  pipe = pipe.to(device)
49
 
50
  image = pipe(
@@ -60,11 +71,9 @@ def infer(
60
  return image, seed
61
 
62
 
63
-
64
  examples = [
65
- "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
66
- "An astronaut riding a green horse",
67
- "A delicious ceviche cheesecake slice",
68
  ]
69
 
70
  css = """
@@ -76,7 +85,22 @@ css = """
76
 
77
  with gr.Blocks(css=css) as demo:
78
  with gr.Column(elem_id="col-container"):
79
- gr.Markdown(" # Text-to-Image SemaSci Template")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
80
 
81
  with gr.Row():
82
  prompt = gr.Text(
@@ -92,22 +116,22 @@ with gr.Blocks(css=css) as demo:
92
  result = gr.Image(label="Result", show_label=False)
93
 
94
  with gr.Accordion("Advanced Settings", open=False):
95
- # model_repo_id = gr.Text(
96
- # label="Model Id",
97
- # max_lines=1,
98
- # placeholder="Choose model",
99
- # visible=True,
100
- # value=model_repo_id,
101
- # )
102
- model_repo_id = gr.Dropdown(
103
- label="Model Id",
104
- choices=model_dropdown,
105
- info="Choose model",
106
- visible=True,
107
- allow_custom_value=True,
108
- value=model_repo_id,
109
- )
110
-
111
  negative_prompt = gr.Text(
112
  label="Negative prompt",
113
  max_lines=1,
@@ -164,6 +188,7 @@ with gr.Blocks(css=css) as demo:
164
  triggers=[run_button.click, prompt.submit],
165
  fn=infer,
166
  inputs=[
 
167
  prompt,
168
  negative_prompt,
169
  randomize_seed,
@@ -179,4 +204,3 @@ with gr.Blocks(css=css) as demo:
179
 
180
  if __name__ == "__main__":
181
  demo.launch()
182
-
 
10
  device = "cuda" if torch.cuda.is_available() else "cpu"
11
  # model_repo_id = "stabilityai/sdxl-turbo" # Replace to the model you would like to use
12
  model_repo_id = "CompVis/stable-diffusion-v1-4"
13
+ model_dropdown = ["stabilityai/sdxl-turbo", "CompVis/stable-diffusion-v1-4"]
14
 
15
+ models = [
16
+ "gstranger/kawaiicat-lora-1.4",
17
+ "CompVis/stable-diffusion-v1-4",
18
+ "stabilityai/sdxl-turbo",
19
+ ]
20
 
21
 
22
  if torch.cuda.is_available():
 
33
 
34
  # @spaces.GPU #[uncomment to use ZeroGPU]
35
  def infer(
36
+ model_id,
37
  prompt,
38
  negative_prompt,
39
  randomize_seed,
 
50
 
51
  generator = torch.Generator().manual_seed(seed)
52
 
53
+ pipe = DiffusionPipeline.from_pretrained(
54
+ model_id,
55
+ torch_dtype=torch_dtype,
56
+ requires_safety_checker=False,
57
+ safety_checker=None,
58
+ )
59
  pipe = pipe.to(device)
60
 
61
  image = pipe(
 
71
  return image, seed
72
 
73
 
 
74
  examples = [
75
+ "kawaiicat. The cat is sitting. The cat's tail is curled up at the end. The cat is pleased and is enjoying its time.",
76
+ "kawaiicat. The cat is sitting upright. The cat is eating some noodles with the chopsticks from a green bowl, which it's holding in his hands.",
 
77
  ]
78
 
79
  css = """
 
85
 
86
  with gr.Blocks(css=css) as demo:
87
  with gr.Column(elem_id="col-container"):
88
+ gr.Markdown(" # Text-to-Image kawaiicat Stickers")
89
+ with gr.Row():
90
+ # Dropdown to select the model from Hugging Face
91
+ model_id = gr.Dropdown(
92
+ label="Model",
93
+ choices=models,
94
+ value=models[0], # Default model
95
+ )
96
+
97
+ lora_scale = gr.Slider(
98
+ label="LORA Scale",
99
+ minimum=0,
100
+ maximum=1,
101
+ step=0.01,
102
+ value=1,
103
+ )
104
 
105
  with gr.Row():
106
  prompt = gr.Text(
 
116
  result = gr.Image(label="Result", show_label=False)
117
 
118
  with gr.Accordion("Advanced Settings", open=False):
119
+ # model_repo_id = gr.Text(
120
+ # label="Model Id",
121
+ # max_lines=1,
122
+ # placeholder="Choose model",
123
+ # visible=True,
124
+ # value=model_repo_id,
125
+ # )
126
+ # model_id = gr.Dropdown(
127
+ # label="Model Id",
128
+ # choices=models,
129
+ # info="Choose model",
130
+ # visible=True,
131
+ # allow_custom_value=True,
132
+ # value=models,
133
+ # )
134
+
135
  negative_prompt = gr.Text(
136
  label="Negative prompt",
137
  max_lines=1,
 
188
  triggers=[run_button.click, prompt.submit],
189
  fn=infer,
190
  inputs=[
191
+ model_id,
192
  prompt,
193
  negative_prompt,
194
  randomize_seed,
 
204
 
205
  if __name__ == "__main__":
206
  demo.launch()