awacke1 commited on
Commit
a32d7a4
·
verified ·
1 Parent(s): f88b464

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +47 -33
app.py CHANGED
@@ -64,51 +64,65 @@ def generate_stories(prompt: str) -> list[str]:
64
  """
65
  Generates 10 story outlines from the loaded model based on the user's prompt.
66
  """
67
- # If the model failed to load, display the error in all output boxes.
 
 
68
  if model_error:
69
- error_message = f"**Model failed to load.**\n\nPlease check the console logs for details.\n\n**Error:**\n`{str(model_error)}`"
 
70
  return [error_message] * 10
71
 
72
  if not prompt:
73
  # Return a list of 10 empty strings to clear the outputs
74
  return [""] * 10
75
 
76
- # A clear, instructive prompt format that works well with models like Phi-2.
77
- story_prompt = f"""Instruct: Create a short story outline based on this idea: "{prompt}"
 
78
  The outline should have three parts: a dramatic hook, a concise ballad, and a satisfying finale. Use emojis.
79
  Output:
80
  ### 🎬 The Hook
81
  """
82
 
83
- # Parameters for the pipeline to generate 10 diverse results.
84
- params = {
85
- "max_new_tokens": 250,
86
- "num_return_sequences": 10,
87
- "do_sample": True,
88
- "temperature": 0.9,
89
- "top_k": 50,
90
- "top_p": 0.95,
91
- "pad_token_id": generator.tokenizer.eos_token_id
92
- }
93
-
94
- # Generate 10 different story variations
95
- outputs = generator(story_prompt, **params)
96
-
97
- # Extract the generated text.
98
- stories = []
99
- for out in outputs:
100
- # The model will generate the prompt plus the continuation. We extract just the new part.
101
- full_text = out['generated_text']
102
- # Add back the part of the prompt we want to see in the output
103
- story_start = "### 🎬 The Hook\n"
104
- generated_part = full_text.split(story_start)[-1]
105
- stories.append(story_start + generated_part)
106
-
107
- # Ensure we return exactly 10 stories, padding if necessary.
108
- while len(stories) < 10:
109
- stories.append("Failed to generate a story for this slot.")
110
-
111
- return stories
 
 
 
 
 
 
 
 
 
 
112
 
113
  # --- Gradio Interface ---
114
  with gr.Blocks(theme=gr.themes.Soft(), css=".gradio-container {max-width: 95% !important;}") as demo:
 
64
  """
65
  Generates 10 story outlines from the loaded model based on the user's prompt.
66
  """
67
+ print("--- Button clicked. Attempting to generate stories... ---")
68
+
69
+ # If the model failed to load during startup, display that error.
70
  if model_error:
71
+ error_message = f"**Model failed to load during startup.**\n\nPlease check the console logs for details.\n\n**Error:**\n`{str(model_error)}`"
72
+ print(f"Returning startup error: {error_message}")
73
  return [error_message] * 10
74
 
75
  if not prompt:
76
  # Return a list of 10 empty strings to clear the outputs
77
  return [""] * 10
78
 
79
+ try:
80
+ # A clear, instructive prompt format that works well with models like Phi-2.
81
+ story_prompt = f"""Instruct: Create a short story outline based on this idea: "{prompt}"
82
  The outline should have three parts: a dramatic hook, a concise ballad, and a satisfying finale. Use emojis.
83
  Output:
84
  ### 🎬 The Hook
85
  """
86
 
87
+ # Parameters for the pipeline to generate 10 diverse results.
88
+ params = {
89
+ "max_new_tokens": 250,
90
+ "num_return_sequences": 10,
91
+ "do_sample": True,
92
+ "temperature": 0.9,
93
+ "top_k": 50,
94
+ "top_p": 0.95,
95
+ "pad_token_id": generator.tokenizer.eos_token_id
96
+ }
97
+
98
+ print("Generating text with the model...")
99
+ # Generate 10 different story variations
100
+ outputs = generator(story_prompt, **params)
101
+ print("✅ Text generation complete.")
102
+
103
+ # Extract the generated text.
104
+ stories = []
105
+ for out in outputs:
106
+ # The model will generate the prompt plus the continuation. We extract just the new part.
107
+ full_text = out['generated_text']
108
+ # Add back the part of the prompt we want to see in the output
109
+ story_start = "### 🎬 The Hook\n"
110
+ generated_part = full_text.split(story_start)[-1]
111
+ stories.append(story_start + generated_part)
112
+
113
+ # Ensure we return exactly 10 stories, padding if necessary.
114
+ while len(stories) < 10:
115
+ stories.append("Failed to generate a story for this slot.")
116
+
117
+ return stories
118
+
119
+ except Exception as e:
120
+ # Catch any errors that happen DURING generation and display them in the UI.
121
+ print(f"--- 🚨 Error during story generation ---")
122
+ print(f"Error: {e}")
123
+ runtime_error_message = f"**An error occurred during story generation.**\n\nPlease check the console logs for details.\n\n**Error:**\n`{str(e)}`"
124
+ return [runtime_error_message] * 10
125
+
126
 
127
  # --- Gradio Interface ---
128
  with gr.Blocks(theme=gr.themes.Soft(), css=".gradio-container {max-width: 95% !important;}") as demo: