Leo8613 commited on
Commit
f16c710
·
verified ·
1 Parent(s): 7b524eb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +32 -18
app.py CHANGED
@@ -1,36 +1,50 @@
1
  import gradio as gr
2
  from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
3
 
4
- # Charger le modèle et le tokenizer
5
  tokenizer = AutoTokenizer.from_pretrained("unsloth/Llama-3.2-1B")
6
  model = AutoModelForCausalLM.from_pretrained("unsloth/Llama-3.2-1B")
7
 
8
- # Utiliser une pipeline pour la génération de texte
9
  text_gen_pipeline = pipeline("text-generation", model=model, tokenizer=tokenizer)
10
 
11
- # Fonction de génération de texte
12
- def generate_text(prompt, max_length=50):
13
- generated_text = text_gen_pipeline(prompt, max_length=max_length, num_return_sequences=1)
 
 
 
 
 
14
  return generated_text[0]['generated_text']
15
 
16
- # Interface Gradio
17
  with gr.Blocks() as demo:
18
- gr.Markdown("## Génération de texte avec Llama 3.2 - 1B")
19
 
20
- # Entrée du texte par l'utilisateur
21
- prompt_input = gr.Textbox(label="Entrée (prompt)", placeholder="Entrez votre texte ici...")
22
 
23
- # Slider pour la longueur maximale de la génération
24
- max_length_input = gr.Slider(minimum=10, maximum=200, value=50, step=10, label="Longueur maximale")
25
 
26
- # Zone de sortie pour le texte généré
27
- output_text = gr.Textbox(label="Texte généré")
28
 
29
- # Bouton de soumission
30
- generate_button = gr.Button("Générer")
31
 
32
- # Action sur le bouton
33
- generate_button.click(generate_text, inputs=[prompt_input, max_length_input], outputs=output_text)
 
 
 
 
 
 
 
 
 
34
 
35
- # Lancer l'application
36
  demo.launch()
 
1
  import gradio as gr
2
  from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
3
 
4
+ # Load the model and tokenizer
5
  tokenizer = AutoTokenizer.from_pretrained("unsloth/Llama-3.2-1B")
6
  model = AutoModelForCausalLM.from_pretrained("unsloth/Llama-3.2-1B")
7
 
8
+ # Use a pipeline for text generation
9
  text_gen_pipeline = pipeline("text-generation", model=model, tokenizer=tokenizer)
10
 
11
+ # Text generation function with additional parameters
12
+ def generate_text(prompt, max_length=50, temperature=0.7, top_p=0.9, top_k=50):
13
+ generated_text = text_gen_pipeline(prompt,
14
+ max_length=max_length,
15
+ temperature=temperature,
16
+ top_p=top_p,
17
+ top_k=top_k,
18
+ num_return_sequences=1)
19
  return generated_text[0]['generated_text']
20
 
21
+ # Gradio Interface
22
  with gr.Blocks() as demo:
23
+ gr.Markdown("## Text Generation with Llama 3.2 - 1B")
24
 
25
+ # Input box for user prompt
26
+ prompt_input = gr.Textbox(label="Input (Prompt)", placeholder="Enter your prompt here...")
27
 
28
+ # Slider for maximum text length
29
+ max_length_input = gr.Slider(minimum=10, maximum=200, value=50, step=10, label="Maximum Length")
30
 
31
+ # Slider for temperature (controls creativity)
32
+ temperature_input = gr.Slider(minimum=0.1, maximum=1.0, value=0.7, step=0.1, label="Temperature (creativity)")
33
 
34
+ # Slider for top_p (nucleus sampling)
35
+ top_p_input = gr.Slider(minimum=0.1, maximum=1.0, value=0.9, step=0.1, label="Top-p (nucleus sampling)")
36
 
37
+ # Slider for top_k (controls diversity)
38
+ top_k_input = gr.Slider(minimum=1, maximum=100, value=50, step=1, label="Top-k (sampling diversity)")
39
+
40
+ # Output box for the generated text
41
+ output_text = gr.Textbox(label="Generated Text")
42
+
43
+ # Submit button
44
+ generate_button = gr.Button("Generate")
45
+
46
+ # Action on button click
47
+ generate_button.click(generate_text, inputs=[prompt_input, max_length_input, temperature_input, top_p_input, top_k_input], outputs=output_text)
48
 
49
+ # Launch the app
50
  demo.launch()