File size: 3,754 Bytes
b6e91ad 1bbce87 b6e91ad deeaf90 953979f 984bb90 b6e91ad 8c1f8f7 984bb90 b6e91ad 984bb90 b6e91ad 984bb90 b6e91ad 9787462 be21b28 85d5816 be21b28 85d5816 be21b28 85d5816 be21b28 85d5816 be21b28 85d5816 9787462 953979f 9787462 85d5816 9787462 b7bac19 9787462 b7bac19 9787462 b7bac19 9787462 b7bac19 9787462 ef19ec0 be21b28 85d5816 9787462 b7bac19 9787462 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 |
from selenium import webdriver
from selenium.common.exceptions import WebDriverException
from PIL import Image
from io import BytesIO
import time
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
def take_webdata(url):
options = webdriver.ChromeOptions()
options.add_argument('--headless')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
try:
wd = webdriver.Chrome(options=options)
wd.set_window_size(1080, 720) # Adjust the window size here
wd.get(url)
wd.implicitly_wait(5)
# Get the page title
page_title = wd.title
screenshot = wd.get_screenshot_as_png()
except WebDriverException as e:
return Image.new('RGB', (1, 1)), page_title
finally:
if wd:
wd.quit()
return Image.open(BytesIO(screenshot)) , page_title
def scrape_vehicle(driver):
data_kendaraan = {}
try:
rows = driver.find_elements(By.CSS_SELECTOR, "table tr")
for row in rows:
cols = row.find_elements(By.TAG_NAME, "td")
if len(cols) >= 3:
key = cols[0].text.strip().lower().replace(".", "").replace(" ", "_")
value = cols[2].text.strip()
data_kendaraan[key] = value
except Exception as e:
print("Gagal parsing tabel:", e)
rincians = []
try:
container = driver.find_element(By.ID, "det_pkb")
rows = container.find_elements(By.CLASS_NAME, "row")
for row in rows[1:]: # skip header
cols = row.find_elements(By.TAG_NAME, "p")
if len(cols) >= 3:
rincian = {
"pokok": cols[0].text.strip(),
"denda": cols[1].text.strip(),
"total": cols[2].text.strip(),
}
if len(cols) > 3:
rincian["jenis"] = cols[3].text.strip().upper()
rincians.append(rincian)
except Exception as e:
print("Gagal parsing det_pkb:", e)
return data_kendaraan, rincians
def get_vehicle_info(plate_number: str):
# Configure headless Chrome
options = webdriver.ChromeOptions()
options.add_argument("--headless")
options.add_argument("--disable-gpu")
options.add_argument("--no-sandbox")
# Path to chromedriver (adjust if needed)
driver = webdriver.Chrome(options=options)
try:
driver.get("https://www.jambisamsat.net/infopkb.html")
time.sleep(1)
WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.ID, "no_polisi"))
)
input_field = driver.find_element(By.ID, "no_polisi")
input_field.clear()
input_field.send_keys(plate_number)
submit_button = driver.find_element(By.CSS_SELECTOR, 'button.btn.btn-primary[type="submit"]')
submit_button.click()
# Wait for the new page to load
WebDriverWait(driver, 10).until(
EC.url_contains("infopkb.php")
)
driver.implicitly_wait(3)
scroll_height = driver.execute_script("return document.body.scrollHeight")
driver.set_window_size(1920, scroll_height + 200) # force full-page height
time.sleep(1)
data_kendaraan, rincian = scrape_vehicle(driver)
print(data_kendaraan, rincian)
page_title = driver.title
screenshot = driver.get_screenshot_as_png()
return Image.open(BytesIO(screenshot)) , page_title
except WebDriverException as e:
return Image.new('RGB', (1, 1)), page_title
finally:
driver.quit() |