File size: 3,832 Bytes
b6e91ad 1bbce87 b6e91ad deeaf90 953979f 85d5816 953979f 984bb90 b6e91ad 8c1f8f7 984bb90 b6e91ad 984bb90 b6e91ad 984bb90 b6e91ad 9787462 85d5816 9787462 953979f 9787462 85d5816 9787462 b7bac19 9787462 b7bac19 9787462 b7bac19 9787462 b7bac19 9787462 ef19ec0 85d5816 9787462 b7bac19 9787462 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 |
from selenium import webdriver
from selenium.common.exceptions import WebDriverException
from PIL import Image
from io import BytesIO
import time
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from bs4 import BeautifulSoup
def take_webdata(url):
options = webdriver.ChromeOptions()
options.add_argument('--headless')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
try:
wd = webdriver.Chrome(options=options)
wd.set_window_size(1080, 720) # Adjust the window size here
wd.get(url)
wd.implicitly_wait(5)
# Get the page title
page_title = wd.title
screenshot = wd.get_screenshot_as_png()
except WebDriverException as e:
return Image.new('RGB', (1, 1)), page_title
finally:
if wd:
wd.quit()
return Image.open(BytesIO(screenshot)) , page_title
def scrape_vehicle(page_source):
soup = BeautifulSoup(page_source, "html.parser")
data_kendaraan = {}
table = soup.find("table")
for row in table.find_all("tr"):
cells = row.find_all("td")
if len(cells) >= 3:
key = cells[0].get_text(strip=True).lower().replace(".", "").replace(" ", "_")
value = cells[2].get_text(strip=True)
data_kendaraan[key] = value
rincians = []
rincian_div = soup.find("div", id="det_pkb")
if rincian_div:
rows = rincian_div.find_all("div", class_="row")
for row in rows[1:]: # baris pertama adalah header
cols = row.find_all("p")
if len(cols) >= 3:
rincian = {
"pokok": cols[0].get_text(strip=True),
"denda": cols[1].get_text(strip=True),
"total": cols[2].get_text(strip=True),
}
rincian["jenis"] = cols[3].get_text(strip=True) if len(cols) > 3 else ""
rincian["jenis"] = rincian["jenis"].upper()
rincian = {k: v for k, v in rincian.items() if v}
if rincian:
rincians.append(rincian)
return data_kendaraan, rincians
def get_vehicle_info(plate_number: str):
# Configure headless Chrome
options = webdriver.ChromeOptions()
options.add_argument("--headless")
options.add_argument("--disable-gpu")
options.add_argument("--no-sandbox")
# Path to chromedriver (adjust if needed)
driver = webdriver.Chrome(options=options)
try:
driver.get("https://www.jambisamsat.net/infopkb.html")
time.sleep(1)
WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.ID, "no_polisi"))
)
input_field = driver.find_element(By.ID, "no_polisi")
input_field.clear()
input_field.send_keys(plate_number)
submit_button = driver.find_element(By.CSS_SELECTOR, 'button.btn.btn-primary[type="submit"]')
submit_button.click()
# Wait for the new page to load
WebDriverWait(driver, 10).until(
EC.url_contains("infopkb.php")
)
driver.implicitly_wait(3)
scroll_height = driver.execute_script("return document.body.scrollHeight")
driver.set_window_size(1920, scroll_height + 200) # force full-page height
time.sleep(1)
data_kendaraan, rincian = scrape_vehicle(driver.page_source)
print(data_kendaraan, rincian)
page_title = driver.title
screenshot = driver.get_screenshot_as_png()
return Image.open(BytesIO(screenshot)) , page_title
except WebDriverException as e:
return Image.new('RGB', (1, 1)), page_title
finally:
driver.quit() |