Sameercodes commited on
Commit
af5a6cb
·
verified ·
1 Parent(s): a2fc562

Delete app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -196
app.py DELETED
@@ -1,196 +0,0 @@
1
- pip install selenium
2
- import time
3
- import random
4
- import re
5
- from datetime import datetime
6
- import pandas as pd
7
- from selenium import webdriver
8
- from selenium.webdriver.common.by import By
9
- from selenium.webdriver.chrome.options import Options
10
- from selenium.webdriver.chrome.service import Service
11
- from selenium.webdriver.support.ui import WebDriverWait
12
- from selenium.webdriver.support import expected_conditions as EC
13
- import gradio as gr
14
-
15
- def scrape_amazon(search_term, pincode, num_pages=5):
16
- options = Options()
17
- options.add_argument('--headless')
18
- options.add_argument('--disable-blink-features=AutomationControlled')
19
- options.add_argument('--disable-gpu')
20
- options.add_argument('--no-sandbox')
21
-
22
- driver = webdriver.Chrome(service=Service(), options=options)
23
-
24
- all_products = []
25
- seen_titles = set()
26
-
27
- for page in range(1, num_pages + 1):
28
- url = f"https://www.amazon.in/s?k={search_term}&page={page}&crid=2M096C61O4MLT&sprefix={search_term},aps,283"
29
- driver.get(url)
30
-
31
- time.sleep(random.uniform(3, 5)) # Let page load
32
-
33
- # Scroll down to load dynamic content
34
- driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
35
- time.sleep(random.uniform(2, 4))
36
-
37
- products = driver.find_elements(By.XPATH, "//div[@data-component-type='s-search-result']")
38
- print(f"Scraping page {page}, found {len(products)} products...")
39
-
40
- for product in products:
41
-
42
- try:
43
- title_elem = product.find_element(By.XPATH, ".//h2//span")
44
- title = title_elem.text.strip()
45
- except:
46
- title = "No Title"
47
-
48
- if title in seen_titles:
49
- continue
50
- seen_titles.add(title)
51
-
52
- # Link Extraction
53
- try:
54
- link_elem = product.find_element(By.XPATH, ".//a[@class='a-link-normal s-no-outline']")
55
- link = link_elem.get_attribute('href')
56
- if link and link.startswith("/"):
57
- link = "https://www.amazon.com" + link
58
- except:
59
- link = "No Link"
60
-
61
- # Selling Price Extraction
62
- try:
63
- price_elem = product.find_element(By.XPATH, ".//span[@class='a-price-whole']")
64
- selling_price = (price_elem.text).replace(',', '').strip()
65
- except:
66
- try:
67
- price_elem = product.find_element(By.XPATH, ".//span[@class='a-offscreen']")
68
- selling_price = price_elem.text.replace('₹', '').replace(',', '').strip()
69
- except:
70
- selling_price = "No Price"
71
-
72
- try:
73
- mrp_elem = product.find_element(By.XPATH, ".//span[@class='a-price a-text-price']//span[@class='a-offscreen']")
74
- mrp = mrp_elem.text.replace('₹', '').replace(',', '').strip()
75
-
76
- except:
77
- mrp = selling_price
78
-
79
- # Discount Extraction
80
- try:
81
- if selling_price != "No Price" and mrp != "No Price":
82
- discount_percent = round(100 * (float(mrp) - float(selling_price)) / float(mrp), 2)
83
- else:
84
- discount_percent = 0.0
85
- except:
86
- discount_percent = 0.0
87
-
88
- # Grammage Extraction
89
- try:
90
- grammage_match = re.search(r'(\d+\.?\d*\s?(ml|g|kg|l))', title.lower())
91
- grammage = grammage_match.group(0) if grammage_match else "No Grammage"
92
- except:
93
- grammage = "No Grammage"
94
-
95
- # Deal Tags Extraction
96
- try:
97
- badge = product.find_element(By.XPATH, ".//div[contains(@class, 'a-color-secondary')]//span[contains(translate(text(), 'ABCDEFGHIJKLMNOPQRSTUVWXYZ', 'abcdefghijklmnopqrstuvwxyz'), 'deal') or contains(translate(text(), 'ABCDEFGHIJKLMNOPQRSTUVWXYZ', 'abcdefghijklmnopqrstuvwxyz'), 'coupon') or contains(translate(text(), 'ABCDEFGHIJKLMNOPQRSTUVWXYZ', 'abcdefghijklmnopqrstuvwxyz'), 'save') or contains(translate(text(), 'ABCDEFGHIJKLMNOPQRSTUVWXYZ', 'abcdefghijklmnopqrstuvwxyz'), 'limited')]")
98
- deal_tag = badge.text.strip()
99
- except:
100
- deal_tag = "No Deal"
101
-
102
- # Quantity Bought Extraction
103
- try:
104
- qty = product.find_element(By.XPATH, ".//span[contains(text(),'bought in past month')]").text.strip()
105
- except:
106
- qty = "No data"
107
-
108
- # Rating Extraction
109
- try:
110
- rating_elem = product.find_element(By.XPATH, ".//span[contains(@aria-label,'out of 5 stars')]")
111
- rating = rating_elem.get_attribute("aria-label").split()[0]
112
- except:
113
- rating = "No Rating"
114
-
115
- # Reviews Extraction
116
- try:
117
- reviews = product.find_element(By.XPATH, ".//a[contains(@aria-label,'ratings')]/span").text.strip()
118
- except:
119
- reviews = "No Reviews"
120
-
121
- # Ad / Not Ad Detection
122
- try:
123
- ad_elem = product.find_element(By.XPATH, ".//span[contains(@class, 'puis-sponsored-label-text') and contains(text(), 'Sponsored')]")
124
- ad_status = "Ad"
125
- except:
126
- ad_status = "Not Ad"
127
-
128
- # Compile product info
129
- product_data = {
130
- 'Title': title,
131
- 'Grammage': grammage,
132
- 'Selling Price': selling_price,
133
- 'MRP': mrp,
134
- 'Discount %': discount_percent,
135
- 'Deal Tags': deal_tag,
136
- 'Quantity Bought': qty,
137
- 'Rating': rating,
138
- 'Reviews': reviews,
139
- 'Link': link,
140
- 'Ad/Not Ad': ad_status,
141
- 'Date': datetime.now().strftime("%d-%m-%Y"),
142
- 'Search Term': search_term,
143
- 'Pincode': pincode,
144
- 'Category': search_term,
145
- }
146
-
147
- all_products.append(product_data)
148
-
149
- time.sleep(random.uniform(2, 4)) # Pause between pages
150
-
151
- driver.quit()
152
-
153
- # Create DataFrame
154
- df = pd.DataFrame(all_products)
155
-
156
- # Save outputs
157
- today_date = datetime.now().strftime("%Y-%m-%d")
158
- filename_base = f"{search_term}scrape{today_date}"
159
-
160
- excel_path = f"{filename_base}.xlsx"
161
- csv_path = f"{filename_base}.csv"
162
- json_path = f"{filename_base}.json"
163
-
164
- df.to_excel(excel_path, index=False)
165
- df.to_csv(csv_path, index=False)
166
- df.to_json(json_path, orient="records", lines=True)
167
-
168
- return excel_path, csv_path, json_path
169
-
170
-
171
- def scrape_and_return_files(product_name, pincode, num_pages):
172
- excel_path, csv_path, json_path = scrape_amazon(product_name, pincode, int(num_pages))
173
- return excel_path, csv_path, json_path
174
-
175
-
176
- with gr.Blocks() as demo:
177
- gr.Markdown("## 🛒 Amazon Scraper")
178
-
179
- with gr.Row():
180
- product_name = gr.Textbox(label="Product Name", placeholder="e.g., atta")
181
- pincode = gr.Textbox(label="Pincode", placeholder="e.g., 400076")
182
- num_pages = gr.Number(label="Number of Pages", value=2)
183
-
184
- scrape_button = gr.Button("Scrape Amazon!")
185
-
186
- output_excel = gr.File(label="Download Excel (.xlsx)")
187
- output_csv = gr.File(label="Download CSV (.csv)")
188
- output_json = gr.File(label="Download JSON (.json)")
189
-
190
- scrape_button.click(
191
- scrape_and_return_files,
192
- inputs=[product_name, pincode, num_pages],
193
- outputs=[output_excel, output_csv, output_json]
194
- )
195
-
196
- demo.launch(share=True)