File size: 3,016 Bytes
366b9dd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
be7cc52
36092a0
a8c57d0
be7cc52
e736965
366b9dd
be7cc52
 
 
 
366b9dd
36092a0
358f05c
366b9dd
36092a0
be7cc52
43614ad
366b9dd
 
 
 
 
 
 
 
 
 
 
be7cc52
366b9dd
be7cc52
366b9dd
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
# from fastapi import FastAPI
# from playwright.async_api import async_playwright, TimeoutError
# import re

# app = FastAPI()

# async def scrape_google(query: str):
#     url = f"https://www.google.com/search?q={query}"
#     async with async_playwright() as pw:
#         browser = await pw.chromium.launch(headless=True)
#         context = await browser.new_context()
#         page = await context.new_page()

#         await page.goto(url, wait_until="domcontentloaded", timeout=60000)
#         try:
#             await page.wait_for_selector("div#search", timeout=10000)
#         except TimeoutError:
#             pass

#         links = []
#         for h in await page.query_selector_all("h3"):
#             try:
#                 a = await h.evaluate_handle("e => e.closest('a')")
#                 href = await a.get_attribute("href")
#                 title = await h.inner_text()
#                 links.append({"title": title, "link": href})
#             except:
#                 continue

#         results = []
#         for item in links[:5]:
#             await page.goto(item["link"], wait_until="domcontentloaded", timeout=30000)
#             html = await page.content()
#             emails = re.findall(r"[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Za-z]{2,}", html)
#             phones = re.findall(r"\+?\d[\d\s\-/]{7,}\d", html)
#             results.append({
#                 **item,
#                 "emails": list(set(emails))[:2],
#                 "phones": list(set(phones))[:2]
#             })

#         await browser.close()
#     return results

# @app.get("/search")
# async def search(query: str):
#     data = await scrape_google(query.replace(" ", "+"))
#     return {"query": query, "results": data}







from fastapi import FastAPI
from playwright.async_api import async_playwright, TimeoutError

app = FastAPI()

async def scrape_full_page(url: str):
    async with async_playwright() as pw:
        browser = await pw.chromium.launch(headless=True)
        context = await browser.new_context()
        page = await context.new_page()
        
        await page.goto(url, wait_until="domcontentloaded", timeout=60000)
        try:
            await page.wait_for_selector("body", timeout=10000)
        except TimeoutError:
            pass

        html = await page.content()
        # Extract headings & paragraphs as structured JSON
        items = await page.evaluate("""
        () => {
            const data = [];
            document.querySelectorAll('h1,h2,h3,h4,h5,h6,p').forEach(el => {
                data.push({ tag: el.tagName.toLowerCase(), text: el.innerText.trim() });
            });
            return data;
        }
        """)
        await browser.close()
    return {"html": html, "content": items}

@app.get("/scrape")
async def scrape(url: str):
    """
    Fetches the full page and returns:
    - raw HTML
    - an array of objects: { tag: 'h1'|'p'|..., text: '...' }
    """
    result = await scrape_full_page(url)
    return result