SaiKumar1627 commited on
Commit
afc89a3
·
verified ·
1 Parent(s): 6734e1b

Update deliverable2.py

Browse files
Files changed (1) hide show
  1. deliverable2.py +36 -36
deliverable2.py CHANGED
@@ -79,39 +79,39 @@ class URLValidator:
79
 
80
  return " ".join(reasons) if reasons else "This source is highly credible and relevant."
81
 
82
- def rate_url_validity(user_query: str, url: str):
83
- """ Main function to evaluate the validity of a webpage. """
84
- content = fetch_page_content(url) # ✅ Add a properly indented block here
85
-
86
- # Handle errors
87
- if "Error" in content:
88
- return {"Validation Error": content}
89
-
90
- domain_trust = get_domain_trust(url, content)
91
- similarity_score = compute_similarity_score(user_query, content)
92
- fact_check_score = check_facts(content)
93
- bias_score = detect_bias(content)
94
-
95
- final_score = (
96
- (0.3 * domain_trust) +
97
- (0.3 * similarity_score) +
98
- (0.2 * fact_check_score) +
99
- (0.2 * bias_score)
100
- )
101
-
102
- stars, icon = get_star_rating(final_score)
103
- explanation = generate_explanation(domain_trust, similarity_score, fact_check_score, bias_score, final_score)
104
-
105
- return {
106
- "raw_score": {
107
- "Domain Trust": domain_trust,
108
- "Content Relevance": similarity_score,
109
- "Fact-Check Score": fact_check_score,
110
- "Bias Score": bias_score,
111
- "Final Validity Score": final_score
112
- },
113
- "stars": {
114
- "icon": icon
115
- },
116
- "explanation": explanation
117
- }
 
79
 
80
  return " ".join(reasons) if reasons else "This source is highly credible and relevant."
81
 
82
+ def rate_url_validity(self, user_query: str, url: str):
83
+ """ Main function to evaluate the validity of a webpage. """
84
+ content = self.fetch_page_content(url) # ✅ Properly indented and referenced with self
85
+
86
+ # Handle errors
87
+ if "Error" in content:
88
+ return {"Validation Error": content}
89
+
90
+ domain_trust = self.get_domain_trust(url, content)
91
+ similarity_score = self.compute_similarity_score(user_query, content)
92
+ fact_check_score = self.check_facts(content)
93
+ bias_score = self.detect_bias(content)
94
+
95
+ final_score = (
96
+ (0.3 * domain_trust) +
97
+ (0.3 * similarity_score) +
98
+ (0.2 * fact_check_score) +
99
+ (0.2 * bias_score)
100
+ )
101
+
102
+ stars, icon = self.get_star_rating(final_score)
103
+ explanation = self.generate_explanation(domain_trust, similarity_score, fact_check_score, bias_score, final_score)
104
+
105
+ return {
106
+ "raw_score": {
107
+ "Domain Trust": domain_trust,
108
+ "Content Relevance": similarity_score,
109
+ "Fact-Check Score": fact_check_score,
110
+ "Bias Score": bias_score,
111
+ "Final Validity Score": final_score
112
+ },
113
+ "stars": {
114
+ "icon": icon
115
+ },
116
+ "explanation": explanation
117
+ }