Sameercodes commited on
Commit
462263d
·
verified ·
1 Parent(s): 8573458

Delete Scraper.py

Browse files
Files changed (1) hide show
  1. Scraper.py +0 -150
Scraper.py DELETED
@@ -1,150 +0,0 @@
1
- import time
2
- import random
3
- import re
4
- from datetime import datetime
5
- import pandas as pd
6
- from selenium import webdriver
7
- from selenium.webdriver.common.by import By
8
- from selenium.webdriver.chrome.options import Options
9
- from selenium.webdriver.chrome.service import Service
10
- from selenium.webdriver.support.ui import WebDriverWait
11
- from selenium.webdriver.support import expected_conditions as EC
12
-
13
- def scrape_amazon(search_term, pincode, num_pages=5):
14
- options = Options()
15
- options.add_argument('--headless')
16
- options.add_argument('--disable-blink-features=AutomationControlled')
17
- options.add_argument('--disable-gpu')
18
- options.add_argument('--no-sandbox')
19
-
20
- driver = webdriver.Chrome(service=Service(), options=options)
21
-
22
- all_products = []
23
- seen_titles = set()
24
-
25
- for page in range(1, num_pages + 1):
26
- url = f"https://www.amazon.in/s?k={search_term}&page={page}"
27
- driver.get(url)
28
-
29
- time.sleep(random.uniform(3, 5))
30
-
31
- driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
32
- time.sleep(random.uniform(2, 4))
33
-
34
- products = driver.find_elements(By.XPATH, "//div[@data-component-type='s-search-result']")
35
- print(f"Scraping page {page}, found {len(products)} products...")
36
-
37
- for product in products:
38
- try:
39
- title_elem = product.find_element(By.XPATH, ".//h2//span")
40
- title = title_elem.text.strip()
41
- except:
42
- title = "No Title"
43
-
44
- if title in seen_titles:
45
- continue
46
- seen_titles.add(title)
47
-
48
- try:
49
- link_elem = product.find_element(By.XPATH, ".//a[@class='a-link-normal s-no-outline']")
50
- link = link_elem.get_attribute('href')
51
- if link and link.startswith("/"):
52
- link = "https://www.amazon.in" + link
53
- except:
54
- link = "No Link"
55
-
56
- try:
57
- price_elem = product.find_element(By.XPATH, ".//span[@class='a-price-whole']")
58
- selling_price = (price_elem.text).replace(',', '').strip()
59
- except:
60
- try:
61
- price_elem = product.find_element(By.XPATH, ".//span[@class='a-offscreen']")
62
- selling_price = price_elem.text.replace('₹', '').replace(',', '').strip()
63
- except:
64
- selling_price = "No Price"
65
-
66
- try:
67
- mrp_elem = product.find_element(By.XPATH, ".//span[@class='a-price a-text-price']//span[@class='a-offscreen']")
68
- mrp = mrp_elem.get_attribute("textContent").replace('₹', '').replace(',', '').strip()
69
- except:
70
- mrp = "No Price"
71
-
72
- try:
73
- if selling_price != "No Price" and mrp != "No Price":
74
- discount_percent = round(100 * (float(mrp) - float(selling_price)) / float(mrp), 2)
75
- else:
76
- discount_percent = 0.0
77
- except:
78
- discount_percent = 0.0
79
-
80
- try:
81
- grammage_match = re.search(r'(\d+\.?\d*\s?(ml|g|kg|l))', title.lower())
82
- grammage = grammage_match.group(0) if grammage_match else "No Grammage"
83
- except:
84
- grammage = "No Grammage"
85
-
86
- try:
87
- badge = product.find_element(By.XPATH, ".//div[contains(@class, 'a-color-secondary')]//span[contains(translate(text(), 'ABCDEFGHIJKLMNOPQRSTUVWXYZ', 'abcdefghijklmnopqrstuvwxyz'), 'deal') or contains(translate(text(), 'ABCDEFGHIJKLMNOPQRSTUVWXYZ', 'abcdefghijklmnopqrstuvwxyz'), 'coupon') or contains(translate(text(), 'ABCDEFGHIJKLMNOPQRSTUVWXYZ', 'abcdefghijklmnopqrstuvwxyz'), 'save') or contains(translate(text(), 'ABCDEFGHIJKLMNOPQRSTUVWXYZ', 'abcdefghijklmnopqrstuvwxyz'), 'limited')]")
88
- deal_tag = badge.text.strip()
89
- except:
90
- deal_tag = "No Deal"
91
-
92
- try:
93
- qty = product.find_element(By.XPATH, ".//span[contains(text(),'bought in past month')]").text.strip()
94
- except:
95
- qty = "No data"
96
-
97
- try:
98
- rating_elem = product.find_element(By.XPATH, ".//span[@class='a-icon-alt']")
99
- rating = rating_elem.get_attribute("textContent").split()[0]
100
- except:
101
- rating = "No Rating"
102
-
103
- try:
104
- reviews = product.find_element(By.XPATH, ".//a[contains(@aria-label,'ratings')]/span").text.strip()
105
- except:
106
- reviews = "No Reviews"
107
-
108
- try:
109
- ad_elem = product.find_element(By.XPATH, ".//span[contains(@class, 'a-color-secondary') and contains(text(), 'Sponsored')]")
110
- ad_status = "Ad"
111
- except:
112
- ad_status = "Not Ad"
113
-
114
- product_data = {
115
- 'Title': title,
116
- 'Grammage': grammage,
117
- 'Selling Price': selling_price,
118
- 'MRP': mrp,
119
- 'Discount %': discount_percent,
120
- 'Deal Tags': deal_tag,
121
- 'Quantity Bought': qty,
122
- 'Rating': rating,
123
- 'Reviews': reviews,
124
- 'Link': link,
125
- 'Ad/Not Ad': ad_status,
126
- 'Date': datetime.now().strftime("%d-%m-%Y"),
127
- 'Search Term': search_term,
128
- 'Pincode': pincode,
129
- 'Category': search_term,
130
- }
131
-
132
- all_products.append(product_data)
133
-
134
- time.sleep(random.uniform(2, 4))
135
-
136
- driver.quit()
137
-
138
- df = pd.DataFrame(all_products)
139
-
140
- today_date = datetime.now().strftime("%Y-%m-%d")
141
- filename_base = f"{search_term}_scrape_{today_date}.xlsx"
142
- df.to_excel(filename_base, index=False)
143
-
144
- print(f"\nSaved: {filename_base}")
145
- return filename_base
146
-
147
-
148
- # Interface function for Gradio
149
- def scrape_amazon_interface(search_term, pincode, num_pages):
150
- return scrape_amazon(search_term, pincode, num_pages)