BugZoid commited on
Commit
3251884
·
verified ·
1 Parent(s): 90e51aa

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +131 -55
app.py CHANGED
@@ -1,18 +1,36 @@
1
  import tweepy
2
  from transformers import pipeline, GPT2LMHeadModel, GPT2Tokenizer
3
  import os
4
-
5
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
 
7
  # Autenticação com Twitter para leitura
8
-
9
  client = tweepy.Client(
10
  bearer_token=os.getenv('TWITTER_BEARER_TOKEN')
11
  )
12
 
13
-
14
  # Autenticação com Twitter para postagem
15
-
16
  auth = tweepy.OAuth1UserHandler(
17
  os.getenv('TWITTER_API_KEY'),
18
  os.getenv('TWITTER_API_SECRET_KEY'),
@@ -22,58 +40,116 @@ auth = tweepy.OAuth1UserHandler(
22
 
23
  api = tweepy.API(auth)
24
 
25
- # Coletar tweets
26
- query = 'BBB25 -filter:retweets'
27
- tweets = client.search_recent_tweets(query=query, lang='pt', max_results=100)
28
-
29
- # Análise de sentimentos
30
- sentiment_pipeline = pipeline('sentiment-analysis', model='cardiffnlp/twitter-xlm-roberta-base-sentiment')
31
-
32
- sentiments = []
33
- for tweet in tweets.data:
34
- result = sentiment_pipeline(tweet.text)
35
- sentiments.append(result[0]['label'])
36
-
37
- # Calcular taxas
38
- positive = sentiments.count('positive')
39
- negative = sentiments.count('negative')
40
- total = len(sentiments)
41
-
42
- positive_ratio = positive / total
43
- negative_ratio = negative / total
44
-
45
- # Gerar mensagem com IA
46
- tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
47
- model = GPT2LMHeadModel.from_pretrained('gpt2')
48
-
49
- if positive_ratio > 0.6:
50
- prompt = "Write an exciting tweet about BBB25 with a positive tone in Portuguese."
51
- elif negative_ratio > 0.6:
52
- prompt = "Write an informative tweet about BBB25 with a neutral tone in Portuguese."
53
- else:
54
- prompt = "Write a buzzing tweet about BBB25 with an engaging tone in Portuguese."
55
 
56
- input_ids = tokenizer.encode(prompt, return_tensors='pt')
57
-
58
- # Gerar texto com limite de tokens correspondente a 280 caracteres
59
- outputs = model.generate(input_ids, max_length=25, do_sample=True)
60
- generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
61
-
62
- # Limitar o tweet a 280 caracteres
63
- generated_text = generated_text[:280]
64
-
65
- # Postar no Twitter
66
  try:
67
- api.update_status(status=generated_text)
68
- print(f"Postado: {generated_text}")
69
- except Exception as e:
70
- print(f"Erro ao postar: {e}")
71
-
72
- # Logging (opcional)
73
- with open('posting_log.txt', 'a') as f:
74
- f.write(f"Positive Ratio: {positive_ratio}, Negative Ratio: {negative_ratio}, Posted: {generated_text}\n")
75
 
76
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
77
 
78
  # Footer
79
  st.markdown("---")
@@ -84,4 +160,4 @@ st.markdown(
84
  </div>
85
  """,
86
  unsafe_allow_html=True
87
- )
 
1
  import tweepy
2
  from transformers import pipeline, GPT2LMHeadModel, GPT2Tokenizer
3
  import os
4
+ import streamlit as st
5
+ from datetime import datetime
6
+
7
+ # Verificar variáveis de ambiente
8
+ required_vars = [
9
+ 'TWITTER_API_KEY',
10
+ 'TWITTER_API_SECRET_KEY',
11
+ 'TWITTER_ACCESS_TOKEN',
12
+ 'TWITTER_ACCESS_TOKEN_SECRET',
13
+ 'TWITTER_BEARER_TOKEN'
14
+ ]
15
+
16
+ # Verificação inicial das variáveis de ambiente
17
+ missing_vars = []
18
+ for var in required_vars:
19
+ if os.getenv(var) is None:
20
+ missing_vars.append(var)
21
+ print(f"Erro: A variável de ambiente '{var}' não está definida.")
22
+ else:
23
+ print(f"{var} carregada com sucesso.")
24
+
25
+ if missing_vars:
26
+ raise ValueError(f"As seguintes variáveis de ambiente são necessárias: {', '.join(missing_vars)}")
27
 
28
  # Autenticação com Twitter para leitura
 
29
  client = tweepy.Client(
30
  bearer_token=os.getenv('TWITTER_BEARER_TOKEN')
31
  )
32
 
 
33
  # Autenticação com Twitter para postagem
 
34
  auth = tweepy.OAuth1UserHandler(
35
  os.getenv('TWITTER_API_KEY'),
36
  os.getenv('TWITTER_API_SECRET_KEY'),
 
40
 
41
  api = tweepy.API(auth)
42
 
43
+ # Configuração da query e campos do tweet
44
+ query = 'BBB25 -filter:retweets lang:pt -is:reply'
45
+ tweet_fields = ['text', 'created_at', 'lang', 'public_metrics']
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
46
 
 
 
 
 
 
 
 
 
 
 
47
  try:
48
+ # Busca tweets com os campos especificados
49
+ tweets = client.search_recent_tweets(
50
+ query=query,
51
+ max_results=100,
52
+ tweet_fields=tweet_fields
53
+ )
 
 
54
 
55
+ if not tweets.data:
56
+ print("Nenhum tweet encontrado")
57
+ st.error("Nenhum tweet encontrado para análise")
58
+ st.stop()
59
+
60
+ # Análise de sentimentos
61
+ sentiment_pipeline = pipeline(
62
+ 'sentiment-analysis',
63
+ model='cardiffnlp/twitter-xlm-roberta-base-sentiment'
64
+ )
65
+
66
+ sentiments = []
67
+ for tweet in tweets.data:
68
+ # Verificação adicional para garantir que temos tweets em português
69
+ if hasattr(tweet, 'lang') and tweet.lang == 'pt':
70
+ result = sentiment_pipeline(tweet.text)
71
+ sentiments.append(result[0]['label'])
72
+
73
+ # Calcular taxas
74
+ if sentiments:
75
+ positive = sentiments.count('positive')
76
+ negative = sentiments.count('negative')
77
+ neutral = sentiments.count('neutral')
78
+ total = len(sentiments)
79
+
80
+ positive_ratio = positive / total
81
+ negative_ratio = negative / total
82
+ neutral_ratio = neutral / total
83
+
84
+ # Gerar mensagem com IA
85
+ tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
86
+ model = GPT2LMHeadModel.from_pretrained('gpt2')
87
+
88
+ if positive_ratio > 0.6:
89
+ prompt = "Write an exciting tweet about BBB25 with a positive tone in Portuguese."
90
+ elif negative_ratio > 0.6:
91
+ prompt = "Write an informative tweet about BBB25 with a neutral tone in Portuguese."
92
+ else:
93
+ prompt = "Write a buzzing tweet about BBB25 with an engaging tone in Portuguese."
94
+
95
+ # Gerar texto
96
+ input_ids = tokenizer.encode(prompt, return_tensors='pt')
97
+ outputs = model.generate(
98
+ input_ids,
99
+ max_length=25,
100
+ do_sample=True,
101
+ pad_token_id=tokenizer.eos_token_id
102
+ )
103
+
104
+ generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
105
+ generated_text = generated_text[:280] # Limitar a 280 caracteres
106
+
107
+ try:
108
+ # Postar no Twitter
109
+ api.update_status(status=generated_text)
110
+ print(f"Tweet postado com sucesso: {generated_text}")
111
+
112
+ # Interface Streamlit
113
+ st.title("Análise de Sentimentos - BBB25")
114
+
115
+ # Mostrar estatísticas
116
+ col1, col2, col3 = st.columns(3)
117
+ with col1:
118
+ st.metric("Sentimento Positivo", f"{positive_ratio:.1%}")
119
+ with col2:
120
+ st.metric("Sentimento Neutro", f"{neutral_ratio:.1%}")
121
+ with col3:
122
+ st.metric("Sentimento Negativo", f"{negative_ratio:.1%}")
123
+
124
+ # Mostrar tweet gerado
125
+ st.subheader("Tweet Gerado e Postado")
126
+ st.write(generated_text)
127
+
128
+ # Logging
129
+ log_entry = {
130
+ 'timestamp': datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
131
+ 'positive_ratio': positive_ratio,
132
+ 'negative_ratio': negative_ratio,
133
+ 'neutral_ratio': neutral_ratio,
134
+ 'tweet': generated_text
135
+ }
136
+
137
+ with open('posting_log.txt', 'a') as f:
138
+ f.write(f"{str(log_entry)}\n")
139
+
140
+ except Exception as e:
141
+ st.error(f"Erro ao postar tweet: {str(e)}")
142
+ print(f"Erro ao postar: {e}")
143
+
144
+ except tweepy.errors.BadRequest as e:
145
+ st.error(f"Erro na requisição ao Twitter: {str(e)}")
146
+ print(f"Erro na requisição: {str(e)}")
147
+ except tweepy.errors.TweepyException as e:
148
+ st.error(f"Erro do Tweepy: {str(e)}")
149
+ print(f"Erro do Tweepy: {str(e)}")
150
+ except Exception as e:
151
+ st.error(f"Erro inesperado: {str(e)}")
152
+ print(f"Erro inesperado: {str(e)}")
153
 
154
  # Footer
155
  st.markdown("---")
 
160
  </div>
161
  """,
162
  unsafe_allow_html=True
163
+ )