Sameercodes commited on
Commit
8573458
·
verified ·
1 Parent(s): 7be35b2

Update Scraper.py

Browse files
Files changed (1) hide show
  1. Scraper.py +14 -12
Scraper.py CHANGED
@@ -7,6 +7,8 @@ from selenium import webdriver
7
  from selenium.webdriver.common.by import By
8
  from selenium.webdriver.chrome.options import Options
9
  from selenium.webdriver.chrome.service import Service
 
 
10
 
11
  def scrape_amazon(search_term, pincode, num_pages=5):
12
  options = Options()
@@ -14,8 +16,6 @@ def scrape_amazon(search_term, pincode, num_pages=5):
14
  options.add_argument('--disable-blink-features=AutomationControlled')
15
  options.add_argument('--disable-gpu')
16
  options.add_argument('--no-sandbox')
17
- options.add_argument('--disable-dev-shm-usage')
18
- options.add_argument('--window-size=1920,1080')
19
 
20
  driver = webdriver.Chrome(service=Service(), options=options)
21
 
@@ -27,6 +27,7 @@ def scrape_amazon(search_term, pincode, num_pages=5):
27
  driver.get(url)
28
 
29
  time.sleep(random.uniform(3, 5))
 
30
  driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
31
  time.sleep(random.uniform(2, 4))
32
 
@@ -54,7 +55,7 @@ def scrape_amazon(search_term, pincode, num_pages=5):
54
 
55
  try:
56
  price_elem = product.find_element(By.XPATH, ".//span[@class='a-price-whole']")
57
- selling_price = price_elem.text.replace(',', '').strip()
58
  except:
59
  try:
60
  price_elem = product.find_element(By.XPATH, ".//span[@class='a-offscreen']")
@@ -64,8 +65,7 @@ def scrape_amazon(search_term, pincode, num_pages=5):
64
 
65
  try:
66
  mrp_elem = product.find_element(By.XPATH, ".//span[@class='a-price a-text-price']//span[@class='a-offscreen']")
67
- raw_price = mrp_elem.get_attribute("textContent")
68
- mrp = raw_price.replace('₹', '').replace(',', '').strip()
69
  except:
70
  mrp = "No Price"
71
 
@@ -106,7 +106,7 @@ def scrape_amazon(search_term, pincode, num_pages=5):
106
  reviews = "No Reviews"
107
 
108
  try:
109
- product.find_element(By.XPATH, ".//span[contains(@class, 'a-color-secondary') and contains(text(), 'Sponsored')]")
110
  ad_status = "Ad"
111
  except:
112
  ad_status = "Not Ad"
@@ -136,13 +136,15 @@ def scrape_amazon(search_term, pincode, num_pages=5):
136
  driver.quit()
137
 
138
  df = pd.DataFrame(all_products)
 
139
  today_date = datetime.now().strftime("%Y-%m-%d")
140
- filename_base = f"{search_term}_scrape_{today_date}"
141
- excel_path = f"{filename_base}.xlsx"
142
- df.to_excel(excel_path, index=False)
 
 
143
 
144
- return excel_path
145
 
 
146
  def scrape_amazon_interface(search_term, pincode, num_pages):
147
- excel_path = scrape_amazon(search_term, pincode, num_pages)
148
- return excel_path
 
7
  from selenium.webdriver.common.by import By
8
  from selenium.webdriver.chrome.options import Options
9
  from selenium.webdriver.chrome.service import Service
10
+ from selenium.webdriver.support.ui import WebDriverWait
11
+ from selenium.webdriver.support import expected_conditions as EC
12
 
13
  def scrape_amazon(search_term, pincode, num_pages=5):
14
  options = Options()
 
16
  options.add_argument('--disable-blink-features=AutomationControlled')
17
  options.add_argument('--disable-gpu')
18
  options.add_argument('--no-sandbox')
 
 
19
 
20
  driver = webdriver.Chrome(service=Service(), options=options)
21
 
 
27
  driver.get(url)
28
 
29
  time.sleep(random.uniform(3, 5))
30
+
31
  driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
32
  time.sleep(random.uniform(2, 4))
33
 
 
55
 
56
  try:
57
  price_elem = product.find_element(By.XPATH, ".//span[@class='a-price-whole']")
58
+ selling_price = (price_elem.text).replace(',', '').strip()
59
  except:
60
  try:
61
  price_elem = product.find_element(By.XPATH, ".//span[@class='a-offscreen']")
 
65
 
66
  try:
67
  mrp_elem = product.find_element(By.XPATH, ".//span[@class='a-price a-text-price']//span[@class='a-offscreen']")
68
+ mrp = mrp_elem.get_attribute("textContent").replace('₹', '').replace(',', '').strip()
 
69
  except:
70
  mrp = "No Price"
71
 
 
106
  reviews = "No Reviews"
107
 
108
  try:
109
+ ad_elem = product.find_element(By.XPATH, ".//span[contains(@class, 'a-color-secondary') and contains(text(), 'Sponsored')]")
110
  ad_status = "Ad"
111
  except:
112
  ad_status = "Not Ad"
 
136
  driver.quit()
137
 
138
  df = pd.DataFrame(all_products)
139
+
140
  today_date = datetime.now().strftime("%Y-%m-%d")
141
+ filename_base = f"{search_term}_scrape_{today_date}.xlsx"
142
+ df.to_excel(filename_base, index=False)
143
+
144
+ print(f"\nSaved: {filename_base}")
145
+ return filename_base
146
 
 
147
 
148
+ # Interface function for Gradio
149
  def scrape_amazon_interface(search_term, pincode, num_pages):
150
+ return scrape_amazon(search_term, pincode, num_pages)