Sameercodes commited on
Commit
e92b112
Β·
verified Β·
1 Parent(s): bdbe8e1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +30 -167
app.py CHANGED
@@ -1,177 +1,40 @@
1
- import time
2
- import random
3
- import re
4
- from datetime import datetime
5
- import pandas as pd
6
  import gradio as gr
7
- from selenium import webdriver
8
- from selenium.webdriver.common.by import By
9
- from selenium.webdriver.chrome.options import Options
10
- from selenium.webdriver.chrome.service import Service
11
 
12
- def scrape_amazon(search_term, pincode, num_pages=5):
13
- options = Options()
14
- options.add_argument('--headless')
15
- options.add_argument('--disable-blink-features=AutomationControlled')
16
- options.add_argument('--disable-gpu')
17
- options.add_argument('--no-sandbox')
18
 
19
- driver = webdriver.Chrome(service=Service(), options=options)
 
 
 
 
20
 
21
- all_products = []
22
- seen_titles = set()
 
 
 
23
 
24
- for page in range(1, num_pages + 1):
25
- url = f"https://www.amazon.in/s?k={search_term}&page={page}&crid=2M096C61O4MLT&sprefix={search_term},aps,283"
26
- driver.get(url)
27
 
28
- time.sleep(random.uniform(3, 5)) # Let page load
 
 
 
 
29
 
30
- # Scroll down to load dynamic content
31
- driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
32
- time.sleep(random.uniform(2, 4))
 
 
33
 
34
- products = driver.find_elements(By.XPATH, "//div[@data-component-type='s-search-result']")
35
- print(f"Scraping page {page}, found {len(products)} products...")
36
 
37
- for product in products:
38
- try:
39
- title_elem = product.find_element(By.XPATH, ".//h2//span")
40
- title = title_elem.text.strip()
41
- except:
42
- title = "No Title"
43
 
44
- if title in seen_titles:
45
- continue
46
- seen_titles.add(title)
47
-
48
- try:
49
- link_elem = product.find_element(By.XPATH, ".//a[@class='a-link-normal s-no-outline']")
50
- link = link_elem.get_attribute('href')
51
- except:
52
- link = "No Link"
53
-
54
- try:
55
- price_elem = product.find_element(By.XPATH, ".//span[@class='a-price-whole']")
56
- selling_price = price_elem.text.replace(',', '').strip()
57
- except:
58
- try:
59
- price_elem = product.find_element(By.XPATH, ".//span[@class='a-offscreen']")
60
- selling_price = price_elem.text.replace('β‚Ή', '').replace(',', '').strip()
61
- except:
62
- selling_price = "No Price"
63
-
64
- try:
65
- mrp_elem = product.find_element(By.XPATH, ".//span[@class='a-price a-text-price' and @data-a-strike='true']//span[@class='a-offscreen']")
66
- raw_price = mrp_elem.get_attribute("textContent")
67
- mrp = raw_price.replace('β‚Ή', '').replace(',', '').strip()
68
- except:
69
- mrp = "No Price"
70
-
71
- try:
72
- if selling_price != "No Price" and mrp != "No Price":
73
- discount_percent = round(100 * (float(mrp) - float(selling_price)) / float(mrp), 2)
74
- else:
75
- discount_percent = 0.0
76
- except:
77
- discount_percent = 0.0
78
-
79
- try:
80
- grammage_match = re.search(r'(\d+\.?\d*\s?(ml|g|kg|l))', title.lower())
81
- grammage = grammage_match.group(0) if grammage_match else "No Grammage"
82
- except:
83
- grammage = "No Grammage"
84
-
85
- try:
86
- badge = product.find_element(By.XPATH, ".//div[contains(@class, 'a-color-secondary')]//span[contains(translate(text(), 'ABCDEFGHIJKLMNOPQRSTUVWXYZ', 'abcdefghijklmnopqrstuvwxyz'), 'deal') or contains(translate(text(), 'ABCDEFGHIJKLMNOPQRSTUVWXYZ', 'abcdefghijklmnopqrstuvwxyz'), 'coupon') or contains(translate(text(), 'ABCDEFGHIJKLMNOPQRSTUVWXYZ', 'abcdefghijklmnopqrstuvwxyz'), 'save') or contains(translate(text(), 'ABCDEFGHIJKLMNOPQRSTUVWXYZ', 'abcdefghijklmnopqrstuvwxyz'), 'limited')]")
87
- deal_tag = badge.text.strip()
88
- except:
89
- deal_tag = "No Deal"
90
-
91
- try:
92
- qty = product.find_element(By.XPATH, ".//span[contains(text(),'bought in past month')]").text.strip()
93
- except:
94
- qty = "No data"
95
-
96
- try:
97
- rating_elem = product.find_element(By.XPATH, ".//span[@class='a-icon-alt']")
98
- rating = rating_elem.get_attribute("textContent").split()[0]
99
- except:
100
- rating = "No Rating"
101
-
102
- try:
103
- reviews = product.find_element(By.XPATH, ".//a[contains(@aria-label,'ratings')]/span").text.strip()
104
- except:
105
- reviews = "No Reviews"
106
-
107
- try:
108
- ad_elem = product.find_element(By.XPATH, ".//span[contains(@class, 'a-color-secondary') and contains(text(), 'Sponsored')]")
109
- ad_status = "Ad"
110
- except:
111
- ad_status = "Not Ad"
112
-
113
- product_data = {
114
- 'Title': title,
115
- 'Grammage': grammage,
116
- 'Selling Price': selling_price,
117
- 'MRP': mrp,
118
- 'Discount %': discount_percent,
119
- 'Deal Tags': deal_tag,
120
- 'Quantity Bought': qty,
121
- 'Rating': rating,
122
- 'Reviews': reviews,
123
- 'Link': link,
124
- 'Ad/Not Ad': ad_status,
125
- 'Date': datetime.now().strftime("%d-%m-%Y"),
126
- 'Search Term': search_term,
127
- 'Pincode': pincode,
128
- 'Category': search_term,
129
- }
130
-
131
- all_products.append(product_data)
132
-
133
- time.sleep(random.uniform(2, 4)) # Pause between pages
134
-
135
- driver.quit()
136
-
137
- df = pd.DataFrame(all_products)
138
-
139
- today_date = datetime.now().strftime("%Y-%m-%d")
140
- filename_base = f"{search_term}_scrape_{today_date}"
141
-
142
- excel_path = f"{filename_base}.xlsx"
143
- csv_path = f"{filename_base}.csv"
144
- json_path = f"{filename_base}.json"
145
-
146
- df.to_excel(excel_path, index=False)
147
- df.to_csv(csv_path, index=False)
148
- df.to_json(json_path, orient="records", lines=True)
149
-
150
- return excel_path, csv_path, json_path, df
151
-
152
- ### Now the Gradio interface
153
-
154
- def gradio_interface(search_term, pincode, num_pages):
155
- excel_path, csv_path, json_path, df = scrape_amazon(search_term, pincode, int(num_pages))
156
- return df, excel_path, csv_path, json_path
157
-
158
- # Gradio App
159
- app = gr.Interface(
160
- fn=gradio_interface,
161
- inputs=[
162
- gr.Textbox(label="Search Term"),
163
- gr.Textbox(label="Pincode"),
164
- gr.Slider(minimum=1, maximum=10, step=1, value=2, label="Number of Pages to Scrape")
165
- ],
166
- outputs=[
167
- gr.Dataframe(label="Scraped Data"),
168
- gr.File(label="Excel File"),
169
- gr.File(label="CSV File"),
170
- gr.File(label="JSON File"),
171
- ],
172
- title="πŸ›’ Amazon.in Product Scraper",
173
- description="Enter a search term, pincode, and number of pages. Download the results as Excel/CSV/JSON.",
174
- )
175
-
176
- if __name__ == "__main__":
177
- app.launch()
 
 
 
 
 
 
1
  import gradio as gr
2
+ from scraper import scrape_amazon_interface
 
 
 
3
 
4
+ with gr.Blocks(theme="default") as demo:
5
+ gr.Markdown(
6
+ "<h1 style='text-align: center; color: orange;'>πŸ›’ Amazon.in Scraper</h1>"
7
+ "<p style='text-align: center;'>Scrape product details based on your search term!</p>"
8
+ )
 
9
 
10
+ with gr.Row():
11
+ with gr.Column(scale=1):
12
+ search_term = gr.Textbox(label="πŸ” Search Term", placeholder="e.g., Atta", value="")
13
+ pincode = gr.Textbox(label="πŸ“ Pincode", placeholder="e.g., 400076", value="")
14
+ num_pages = gr.Slider(label="πŸ“„ Number of Pages to Scrape", minimum=1, maximum=10, step=1, value=1)
15
 
16
+ submit_btn = gr.Button("Submit", variant="primary")
17
+ clear_btn = gr.Button("Clear", variant="secondary")
18
+
19
+ with gr.Column(scale=2):
20
+ output_file = gr.File(label="⬇️ Scraped Excel File Download")
21
 
22
+ def run_scraper(search_term, pincode, num_pages):
23
+ excel_path = scrape_amazon_interface(search_term, pincode, num_pages)
24
+ return excel_path
25
 
26
+ submit_btn.click(
27
+ run_scraper,
28
+ inputs=[search_term, pincode, num_pages],
29
+ outputs=[output_file]
30
+ )
31
 
32
+ clear_btn.click(
33
+ lambda: ("", "", 1, None),
34
+ inputs=[],
35
+ outputs=[search_term, pincode, num_pages, output_file]
36
+ )
37
 
38
+ demo.launch(share=True)
 
39
 
 
 
 
 
 
 
40