Sameercodes commited on
Commit
bdbe8e1
·
verified ·
1 Parent(s): 0d162a0

Update Scrapper.py

Browse files
Files changed (1) hide show
  1. Scrapper.py +148 -0
Scrapper.py CHANGED
@@ -0,0 +1,148 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import time
2
+ import random
3
+ import re
4
+ from datetime import datetime
5
+ import pandas as pd
6
+ from selenium import webdriver
7
+ from selenium.webdriver.common.by import By
8
+ from selenium.webdriver.chrome.options import Options
9
+ from selenium.webdriver.chrome.service import Service
10
+
11
+ def scrape_amazon(search_term, pincode, num_pages=5):
12
+ options = Options()
13
+ options.add_argument('--headless')
14
+ options.add_argument('--disable-blink-features=AutomationControlled')
15
+ options.add_argument('--disable-gpu')
16
+ options.add_argument('--no-sandbox')
17
+ options.add_argument('--disable-dev-shm-usage')
18
+ options.add_argument('--window-size=1920,1080')
19
+
20
+ driver = webdriver.Chrome(service=Service(), options=options)
21
+
22
+ all_products = []
23
+ seen_titles = set()
24
+
25
+ for page in range(1, num_pages + 1):
26
+ url = f"https://www.amazon.in/s?k={search_term}&page={page}"
27
+ driver.get(url)
28
+
29
+ time.sleep(random.uniform(3, 5))
30
+ driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
31
+ time.sleep(random.uniform(2, 4))
32
+
33
+ products = driver.find_elements(By.XPATH, "//div[@data-component-type='s-search-result']")
34
+ print(f"Scraping page {page}, found {len(products)} products...")
35
+
36
+ for product in products:
37
+ try:
38
+ title_elem = product.find_element(By.XPATH, ".//h2//span")
39
+ title = title_elem.text.strip()
40
+ except:
41
+ title = "No Title"
42
+
43
+ if title in seen_titles:
44
+ continue
45
+ seen_titles.add(title)
46
+
47
+ try:
48
+ link_elem = product.find_element(By.XPATH, ".//a[@class='a-link-normal s-no-outline']")
49
+ link = link_elem.get_attribute('href')
50
+ if link and link.startswith("/"):
51
+ link = "https://www.amazon.in" + link
52
+ except:
53
+ link = "No Link"
54
+
55
+ try:
56
+ price_elem = product.find_element(By.XPATH, ".//span[@class='a-price-whole']")
57
+ selling_price = price_elem.text.replace(',', '').strip()
58
+ except:
59
+ try:
60
+ price_elem = product.find_element(By.XPATH, ".//span[@class='a-offscreen']")
61
+ selling_price = price_elem.text.replace('₹', '').replace(',', '').strip()
62
+ except:
63
+ selling_price = "No Price"
64
+
65
+ try:
66
+ mrp_elem = product.find_element(By.XPATH, ".//span[@class='a-price a-text-price']//span[@class='a-offscreen']")
67
+ raw_price = mrp_elem.get_attribute("textContent")
68
+ mrp = raw_price.replace('₹', '').replace(',', '').strip()
69
+ except:
70
+ mrp = "No Price"
71
+
72
+ try:
73
+ if selling_price != "No Price" and mrp != "No Price":
74
+ discount_percent = round(100 * (float(mrp) - float(selling_price)) / float(mrp), 2)
75
+ else:
76
+ discount_percent = 0.0
77
+ except:
78
+ discount_percent = 0.0
79
+
80
+ try:
81
+ grammage_match = re.search(r'(\d+\.?\d*\s?(ml|g|kg|l))', title.lower())
82
+ grammage = grammage_match.group(0) if grammage_match else "No Grammage"
83
+ except:
84
+ grammage = "No Grammage"
85
+
86
+ try:
87
+ badge = product.find_element(By.XPATH, ".//div[contains(@class, 'a-color-secondary')]//span[contains(translate(text(), 'ABCDEFGHIJKLMNOPQRSTUVWXYZ', 'abcdefghijklmnopqrstuvwxyz'), 'deal') or contains(translate(text(), 'ABCDEFGHIJKLMNOPQRSTUVWXYZ', 'abcdefghijklmnopqrstuvwxyz'), 'coupon') or contains(translate(text(), 'ABCDEFGHIJKLMNOPQRSTUVWXYZ', 'abcdefghijklmnopqrstuvwxyz'), 'save') or contains(translate(text(), 'ABCDEFGHIJKLMNOPQRSTUVWXYZ', 'abcdefghijklmnopqrstuvwxyz'), 'limited')]")
88
+ deal_tag = badge.text.strip()
89
+ except:
90
+ deal_tag = "No Deal"
91
+
92
+ try:
93
+ qty = product.find_element(By.XPATH, ".//span[contains(text(),'bought in past month')]").text.strip()
94
+ except:
95
+ qty = "No data"
96
+
97
+ try:
98
+ rating_elem = product.find_element(By.XPATH, ".//span[@class='a-icon-alt']")
99
+ rating = rating_elem.get_attribute("textContent").split()[0]
100
+ except:
101
+ rating = "No Rating"
102
+
103
+ try:
104
+ reviews = product.find_element(By.XPATH, ".//a[contains(@aria-label,'ratings')]/span").text.strip()
105
+ except:
106
+ reviews = "No Reviews"
107
+
108
+ try:
109
+ product.find_element(By.XPATH, ".//span[contains(@class, 'a-color-secondary') and contains(text(), 'Sponsored')]")
110
+ ad_status = "Ad"
111
+ except:
112
+ ad_status = "Not Ad"
113
+
114
+ product_data = {
115
+ 'Title': title,
116
+ 'Grammage': grammage,
117
+ 'Selling Price': selling_price,
118
+ 'MRP': mrp,
119
+ 'Discount %': discount_percent,
120
+ 'Deal Tags': deal_tag,
121
+ 'Quantity Bought': qty,
122
+ 'Rating': rating,
123
+ 'Reviews': reviews,
124
+ 'Link': link,
125
+ 'Ad/Not Ad': ad_status,
126
+ 'Date': datetime.now().strftime("%d-%m-%Y"),
127
+ 'Search Term': search_term,
128
+ 'Pincode': pincode,
129
+ 'Category': search_term,
130
+ }
131
+
132
+ all_products.append(product_data)
133
+
134
+ time.sleep(random.uniform(2, 4))
135
+
136
+ driver.quit()
137
+
138
+ df = pd.DataFrame(all_products)
139
+ today_date = datetime.now().strftime("%Y-%m-%d")
140
+ filename_base = f"{search_term}_scrape_{today_date}"
141
+ excel_path = f"{filename_base}.xlsx"
142
+ df.to_excel(excel_path, index=False)
143
+
144
+ return excel_path
145
+
146
+ def scrape_amazon_interface(search_term, pincode, num_pages):
147
+ excel_path = scrape_amazon(search_term, pincode, num_pages)
148
+ return excel_path