Spaces:
Sleeping
Sleeping
File size: 4,207 Bytes
f32f4fc 1629dbf f32f4fc 1629dbf 6155c9f 1629dbf f32f4fc 1629dbf f32f4fc |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 |
#!/usr/bin/env python
# coding: utf-8
# ### Keywords to Title Generator
# - https://huggingface.co/EnglishVoice/t5-base-keywords-to-headline?text=diabetic+diet+plan
# - Apache 2.0
# In[2]:
import torch
from transformers import T5ForConditionalGeneration,T5Tokenizer
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = T5ForConditionalGeneration.from_pretrained("EnglishVoice/t5-base-keywords-to-headline")
tokenizer = T5Tokenizer.from_pretrained("EnglishVoice/t5-base-keywords-to-headline", clean_up_tokenization_spaces=True, legacy=False)
model = model.to(device)
# In[5]:
def title_gen(keywords):
text = "headline: " + keywords
encoding = tokenizer.encode_plus(text, return_tensors = "pt")
input_ids = encoding["input_ids"].to(device)
attention_masks = encoding["attention_mask"].to(device)
beam_outputs = model.generate(
input_ids = input_ids,
attention_mask = attention_masks,
max_new_tokens = 30,
do_sample = True,
num_return_sequences = 5,
temperature = 1.2,
#num_beams = 20,
#num_beam_groups = 20,
#diversity_penalty=0.8,
no_repeat_ngram_size = 3,
penalty_alpha = 0.8,
#early_stopping = True,
top_k = 15,
#top_p = 0.60,
)
titles = ""
for i in range(len(beam_outputs)):
result = tokenizer.decode(beam_outputs[i], skip_special_tokens=True)
titles += f"<h3>{result}<br>" #Create string with all the titles and a <br> tag for line break
return titles
# In[1]:
import gradio as gr
# In[ ]:
iface = gr.Interface(fn=paraphrase,
inputs=[gr.Textbox(label="Paste 2 or more keywords searated by a comma.", lines=1), "checkbox", gr.Slider(0.1, 2, 0.8)],
outputs=[gr.HTML(label="Titles:")],
title="AI Keywords to Title Generator",
description="Turn keywords into creative suggestions",
article="<div align=left><h1>AI Creative Title Generator</h1><li>With just keywords, generate a list of creative titles.</li><li>Click on Submit to generate more creative and diverse titles.</li><p>AI Model:<br><li>T5 Model trained on a dataset of titles and related keywords</li><li>Original model id: EnglishVoice/t5-base-keywords-to-headline by English Voice AI Labs</li></p><p>Default parameter details:<br><li>Temperature = 1.2, no_repeat_ngram_size=3, top_k = 15, penalty_alpha = 0.8, max_new_tokens = 30</li></div>",
flagging_mode='never'
)
iface.launch()
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
'''
#Create a four button panel for changing parameters with one click
def fn(text):
return ("Hello gradio!")
with gr.Blocks () as demo:
with gr.Row(variant='compact') as PanelRow1: #first row: top
with gr.Column(scale=0, min_width=180) as PanelCol5:
gr.HTML("")
with gr.Column(scale=0) as PanelCol4:
submit = gr.Button("Temp++", scale=0)
with gr.Column(scale=1) as PanelCol5:
gr.HTML("")
with gr.Row(variant='compact') as PanelRow2: #2nd row: left, right, middle
with gr.Column(min_width=100) as PanelCol1:
submit = gr.Button("Contrastive")
with gr.Column(min_width=100) as PanelCol2:
submit = gr.Button("Re-generate")
with gr.Column(min_width=100) as PanelCol3:
submit = gr.Button("Diversity Beam")
with gr.Column(min_width=100) as PanelCol5:
gr.HTML("")
with gr.Column(min_width=100) as PanelCol5:
gr.HTML("")
with gr.Column(scale=0) as PanelCol5:
gr.HTML("")
with gr.Row(variant='compact') as PanelRow3: #last row: down
with gr.Column(scale=0, min_width=180) as PanelCol7:
gr.HTML("")
with gr.Column(scale=1) as PanelCol6:
submit = gr.Button("Temp--", scale=0)
with gr.Column(scale=0) as PanelCol5:
gr.HTML("")
demo.launch()
'''
# In[164]:
import gc
gc.collect()
# In[166]:
gr.close_all()
# In[ ]:
|