2024-04-15 17:01:47 -04:00
|
|
|
import * as cheerio from "cheerio";
|
|
|
|
import { ScrapingBeeClient } from "scrapingbee";
|
|
|
|
import { extractMetadata } from "./utils/metadata";
|
|
|
|
import dotenv from "dotenv";
|
2024-04-17 18:24:46 -07:00
|
|
|
import { Document, PageOptions } from "../../lib/entities";
|
2024-04-15 17:01:47 -04:00
|
|
|
import { parseMarkdown } from "../../lib/html-to-markdown";
|
2024-04-17 18:24:46 -07:00
|
|
|
import { excludeNonMainTags } from "./utils/excludeTags";
|
2024-04-28 11:34:25 -07:00
|
|
|
import { urlSpecificParams } from "./utils/custom/website_params";
|
2024-04-15 17:01:47 -04:00
|
|
|
|
|
|
|
dotenv.config();
|
|
|
|
|
2024-04-28 11:34:25 -07:00
|
|
|
export async function generateRequestParams(
|
|
|
|
url: string,
|
|
|
|
wait_browser: string = "domcontentloaded",
|
|
|
|
timeout: number = 15000
|
|
|
|
): Promise<any> {
|
|
|
|
const defaultParams = {
|
|
|
|
url: url,
|
|
|
|
params: { timeout: timeout, wait_browser: wait_browser },
|
|
|
|
headers: { "ScrapingService-Request": "TRUE" },
|
|
|
|
};
|
|
|
|
|
2024-04-28 12:44:00 -07:00
|
|
|
try {
|
2024-05-09 17:45:16 -07:00
|
|
|
const urlKey = new URL(url).hostname.replace(/^www\./, "");
|
2024-04-28 12:44:00 -07:00
|
|
|
if (urlSpecificParams.hasOwnProperty(urlKey)) {
|
|
|
|
return { ...defaultParams, ...urlSpecificParams[urlKey] };
|
|
|
|
} else {
|
|
|
|
return defaultParams;
|
|
|
|
}
|
|
|
|
} catch (error) {
|
|
|
|
console.error(`Error generating URL key: ${error}`);
|
2024-04-28 11:34:25 -07:00
|
|
|
return defaultParams;
|
|
|
|
}
|
|
|
|
}
|
2024-04-16 12:06:46 -04:00
|
|
|
export async function scrapWithCustomFirecrawl(
|
|
|
|
url: string,
|
|
|
|
options?: any
|
|
|
|
): Promise<string> {
|
|
|
|
try {
|
|
|
|
// TODO: merge the custom firecrawl scraper into mono-repo when ready
|
|
|
|
return null;
|
|
|
|
} catch (error) {
|
|
|
|
console.error(`Error scraping with custom firecrawl-scraper: ${error}`);
|
|
|
|
return "";
|
|
|
|
}
|
|
|
|
}
|
2024-04-15 17:01:47 -04:00
|
|
|
|
2024-04-16 12:06:46 -04:00
|
|
|
export async function scrapWithScrapingBee(
|
|
|
|
url: string,
|
2024-04-23 17:04:10 -07:00
|
|
|
wait_browser: string = "domcontentloaded",
|
|
|
|
timeout: number = 15000
|
2024-04-16 12:06:46 -04:00
|
|
|
): Promise<string> {
|
2024-04-15 17:01:47 -04:00
|
|
|
try {
|
|
|
|
const client = new ScrapingBeeClient(process.env.SCRAPING_BEE_API_KEY);
|
2024-04-28 11:34:25 -07:00
|
|
|
const clientParams = await generateRequestParams(
|
|
|
|
url,
|
|
|
|
wait_browser,
|
|
|
|
timeout
|
|
|
|
);
|
2024-05-09 17:45:16 -07:00
|
|
|
|
2024-04-28 11:34:25 -07:00
|
|
|
const response = await client.get(clientParams);
|
2024-04-15 17:01:47 -04:00
|
|
|
|
|
|
|
if (response.status !== 200 && response.status !== 404) {
|
|
|
|
console.error(
|
|
|
|
`Scraping bee error in ${url} with status code ${response.status}`
|
|
|
|
);
|
|
|
|
return "";
|
|
|
|
}
|
|
|
|
const decoder = new TextDecoder();
|
|
|
|
const text = decoder.decode(response.data);
|
|
|
|
return text;
|
|
|
|
} catch (error) {
|
|
|
|
console.error(`Error scraping with Scraping Bee: ${error}`);
|
|
|
|
return "";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
export async function scrapWithPlaywright(url: string): Promise<string> {
|
|
|
|
try {
|
2024-05-09 17:45:16 -07:00
|
|
|
const reqParams = await generateRequestParams(url);
|
2024-05-09 17:48:11 -07:00
|
|
|
const wait_playwright = reqParams["params"]?.wait ?? 0;
|
2024-05-09 17:45:16 -07:00
|
|
|
|
2024-04-15 17:01:47 -04:00
|
|
|
const response = await fetch(process.env.PLAYWRIGHT_MICROSERVICE_URL, {
|
2024-04-16 12:06:46 -04:00
|
|
|
method: "POST",
|
2024-04-15 17:01:47 -04:00
|
|
|
headers: {
|
|
|
|
"Content-Type": "application/json",
|
|
|
|
},
|
2024-05-09 17:45:16 -07:00
|
|
|
body: JSON.stringify({ url: url, wait: wait_playwright }),
|
2024-04-15 17:01:47 -04:00
|
|
|
});
|
|
|
|
|
|
|
|
if (!response.ok) {
|
2024-04-16 12:06:46 -04:00
|
|
|
console.error(
|
|
|
|
`Error fetching w/ playwright server -> URL: ${url} with status: ${response.status}`
|
|
|
|
);
|
2024-04-15 17:01:47 -04:00
|
|
|
return "";
|
|
|
|
}
|
|
|
|
|
|
|
|
const data = await response.json();
|
|
|
|
const html = data.content;
|
|
|
|
return html ?? "";
|
|
|
|
} catch (error) {
|
|
|
|
console.error(`Error scraping with Puppeteer: ${error}`);
|
|
|
|
return "";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
export async function scrapSingleUrl(
|
|
|
|
urlToScrap: string,
|
2024-05-09 17:45:16 -07:00
|
|
|
pageOptions: PageOptions = { onlyMainContent: true, includeHtml: false }
|
2024-04-15 17:01:47 -04:00
|
|
|
): Promise<Document> {
|
|
|
|
urlToScrap = urlToScrap.trim();
|
|
|
|
|
2024-04-17 18:24:46 -07:00
|
|
|
const removeUnwantedElements = (html: string, pageOptions: PageOptions) => {
|
2024-04-15 17:01:47 -04:00
|
|
|
const soup = cheerio.load(html);
|
|
|
|
soup("script, style, iframe, noscript, meta, head").remove();
|
2024-04-17 18:24:46 -07:00
|
|
|
if (pageOptions.onlyMainContent) {
|
|
|
|
// remove any other tags that are not in the main content
|
|
|
|
excludeNonMainTags.forEach((tag) => {
|
|
|
|
soup(tag).remove();
|
|
|
|
});
|
|
|
|
}
|
2024-04-15 17:01:47 -04:00
|
|
|
return soup.html();
|
|
|
|
};
|
|
|
|
|
2024-04-16 12:06:46 -04:00
|
|
|
const attemptScraping = async (
|
|
|
|
url: string,
|
|
|
|
method:
|
|
|
|
| "firecrawl-scraper"
|
|
|
|
| "scrapingBee"
|
|
|
|
| "playwright"
|
|
|
|
| "scrapingBeeLoad"
|
|
|
|
| "fetch"
|
|
|
|
) => {
|
2024-04-15 17:01:47 -04:00
|
|
|
let text = "";
|
|
|
|
switch (method) {
|
2024-04-16 12:06:46 -04:00
|
|
|
case "firecrawl-scraper":
|
2024-04-28 11:34:25 -07:00
|
|
|
text = await scrapWithCustomFirecrawl(url);
|
2024-04-16 12:06:46 -04:00
|
|
|
break;
|
|
|
|
case "scrapingBee":
|
2024-04-15 17:01:47 -04:00
|
|
|
if (process.env.SCRAPING_BEE_API_KEY) {
|
2024-04-28 11:34:25 -07:00
|
|
|
text = await scrapWithScrapingBee(
|
|
|
|
url,
|
|
|
|
"domcontentloaded",
|
|
|
|
pageOptions.fallback === false ? 7000 : 15000
|
|
|
|
);
|
2024-04-15 17:01:47 -04:00
|
|
|
}
|
|
|
|
break;
|
2024-04-16 12:06:46 -04:00
|
|
|
case "playwright":
|
2024-04-15 17:01:47 -04:00
|
|
|
if (process.env.PLAYWRIGHT_MICROSERVICE_URL) {
|
|
|
|
text = await scrapWithPlaywright(url);
|
|
|
|
}
|
|
|
|
break;
|
2024-04-16 12:06:46 -04:00
|
|
|
case "scrapingBeeLoad":
|
2024-04-15 17:01:47 -04:00
|
|
|
if (process.env.SCRAPING_BEE_API_KEY) {
|
|
|
|
text = await scrapWithScrapingBee(url, "networkidle2");
|
|
|
|
}
|
|
|
|
break;
|
2024-04-16 12:06:46 -04:00
|
|
|
case "fetch":
|
2024-04-15 17:01:47 -04:00
|
|
|
try {
|
|
|
|
const response = await fetch(url);
|
|
|
|
if (!response.ok) {
|
2024-04-16 12:06:46 -04:00
|
|
|
console.error(
|
|
|
|
`Error fetching URL: ${url} with status: ${response.status}`
|
|
|
|
);
|
2024-04-15 17:01:47 -04:00
|
|
|
return "";
|
|
|
|
}
|
|
|
|
text = await response.text();
|
|
|
|
} catch (error) {
|
|
|
|
console.error(`Error scraping URL: ${error}`);
|
|
|
|
return "";
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
2024-04-28 13:59:35 -07:00
|
|
|
|
2024-05-09 17:45:16 -07:00
|
|
|
//* TODO: add an optional to return markdown or structured/extracted content
|
2024-04-17 18:24:46 -07:00
|
|
|
let cleanedHtml = removeUnwantedElements(text, pageOptions);
|
2024-05-09 17:45:16 -07:00
|
|
|
|
2024-04-15 17:01:47 -04:00
|
|
|
return [await parseMarkdown(cleanedHtml), text];
|
|
|
|
};
|
|
|
|
try {
|
2024-05-09 17:45:16 -07:00
|
|
|
let [text, html] = ["", ""];
|
|
|
|
let urlKey = urlToScrap;
|
|
|
|
try {
|
|
|
|
urlKey = new URL(urlToScrap).hostname.replace(/^www\./, "");
|
|
|
|
} catch (error) {
|
|
|
|
console.error(`Invalid URL key, trying: ${urlToScrap}`);
|
2024-04-23 15:28:32 -07:00
|
|
|
}
|
2024-05-09 17:45:16 -07:00
|
|
|
const defaultScraper = urlSpecificParams[urlKey]?.defaultScraper ?? "";
|
|
|
|
const scrapersInOrder = defaultScraper
|
|
|
|
? [
|
|
|
|
defaultScraper,
|
|
|
|
"scrapingBee",
|
|
|
|
"playwright",
|
|
|
|
"scrapingBeeLoad",
|
|
|
|
"fetch",
|
|
|
|
]
|
|
|
|
: ["scrapingBee", "playwright", "scrapingBeeLoad", "fetch"];
|
|
|
|
|
|
|
|
for (const scraper of scrapersInOrder) {
|
|
|
|
[text, html] = await attemptScraping(urlToScrap, scraper);
|
|
|
|
if (text && text.length >= 100) break;
|
|
|
|
console.log(`Falling back to ${scraper}`);
|
2024-04-15 17:01:47 -04:00
|
|
|
}
|
|
|
|
|
2024-05-09 17:52:46 -07:00
|
|
|
if (!text) {
|
2024-05-09 17:45:16 -07:00
|
|
|
throw new Error(`All scraping methods failed for URL: ${urlToScrap}`);
|
2024-04-15 17:01:47 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
const soup = cheerio.load(html);
|
|
|
|
const metadata = extractMetadata(soup, urlToScrap);
|
2024-05-09 17:45:16 -07:00
|
|
|
const document: Document = {
|
2024-04-15 17:01:47 -04:00
|
|
|
content: text,
|
2024-05-06 19:45:56 -03:00
|
|
|
markdown: text,
|
2024-05-07 13:40:24 -03:00
|
|
|
html: pageOptions.includeHtml ? html : undefined,
|
2024-04-15 17:01:47 -04:00
|
|
|
metadata: { ...metadata, sourceURL: urlToScrap },
|
2024-05-09 17:45:16 -07:00
|
|
|
};
|
|
|
|
|
|
|
|
return document;
|
2024-04-15 17:01:47 -04:00
|
|
|
} catch (error) {
|
|
|
|
console.error(`Error: ${error} - Failed to fetch URL: ${urlToScrap}`);
|
|
|
|
return {
|
|
|
|
content: "",
|
2024-05-06 19:45:56 -03:00
|
|
|
markdown: "",
|
|
|
|
html: "",
|
2024-04-15 17:01:47 -04:00
|
|
|
metadata: { sourceURL: urlToScrap },
|
|
|
|
} as Document;
|
|
|
|
}
|
|
|
|
}
|