355 lines
12 KiB
TypeScript
Raw Normal View History

2024-04-15 17:01:47 -04:00
import * as cheerio from "cheerio";
import { ScrapingBeeClient } from "scrapingbee";
import { extractMetadata } from "./utils/metadata";
import dotenv from "dotenv";
2024-04-17 18:24:46 -07:00
import { Document, PageOptions } from "../../lib/entities";
2024-04-15 17:01:47 -04:00
import { parseMarkdown } from "../../lib/html-to-markdown";
2024-04-17 18:24:46 -07:00
import { excludeNonMainTags } from "./utils/excludeTags";
2024-04-28 11:34:25 -07:00
import { urlSpecificParams } from "./utils/custom/website_params";
import { fetchAndProcessPdf } from "./utils/pdfProcessor";
2024-04-15 17:01:47 -04:00
dotenv.config();
2024-05-21 18:34:23 -07:00
const baseScrapers = [
"fire-engine",
"scrapingBee",
"playwright",
"scrapingBeeLoad",
"fetch",
] as const;
2024-04-28 11:34:25 -07:00
export async function generateRequestParams(
url: string,
wait_browser: string = "domcontentloaded",
timeout: number = 15000
): Promise<any> {
const defaultParams = {
url: url,
params: { timeout: timeout, wait_browser: wait_browser },
headers: { "ScrapingService-Request": "TRUE" },
};
2024-04-28 12:44:00 -07:00
try {
2024-05-09 17:45:16 -07:00
const urlKey = new URL(url).hostname.replace(/^www\./, "");
2024-04-28 12:44:00 -07:00
if (urlSpecificParams.hasOwnProperty(urlKey)) {
return { ...defaultParams, ...urlSpecificParams[urlKey] };
} else {
return defaultParams;
}
} catch (error) {
console.error(`Error generating URL key: ${error}`);
2024-04-28 11:34:25 -07:00
return defaultParams;
}
}
2024-05-21 18:34:23 -07:00
export async function scrapWithFireEngine(
2024-04-16 12:06:46 -04:00
url: string,
2024-05-28 12:56:24 -07:00
waitFor: number = 0,
2024-05-29 18:56:57 -04:00
screenshot: boolean = false,
2024-04-16 12:06:46 -04:00
options?: any
2024-05-29 18:56:57 -04:00
): Promise<[string, string]> {
2024-04-16 12:06:46 -04:00
try {
2024-05-21 18:34:23 -07:00
const reqParams = await generateRequestParams(url);
2024-05-28 12:56:24 -07:00
// If the user has passed a wait parameter in the request, use that
const waitParam = reqParams["params"]?.wait ?? waitFor;
2024-05-29 18:56:57 -04:00
const screenshotParam = reqParams["params"]?.screenshot ?? screenshot;
2024-05-28 12:56:24 -07:00
console.log(`[Fire-Engine] Scraping ${url} with wait: ${waitParam}`);
2024-05-21 18:34:23 -07:00
const response = await fetch(process.env.FIRE_ENGINE_BETA_URL+ "/scrape", {
method: "POST",
headers: {
"Content-Type": "application/json",
},
2024-05-29 18:56:57 -04:00
body: JSON.stringify({ url: url, wait: waitParam, screenshot: screenshotParam }),
2024-05-21 18:34:23 -07:00
});
if (!response.ok) {
console.error(
`[Fire-Engine] Error fetching url: ${url} with status: ${response.status}`
);
2024-05-29 18:56:57 -04:00
return ["", ""];
2024-05-21 18:34:23 -07:00
}
const contentType = response.headers['content-type'];
if (contentType && contentType.includes('application/pdf')) {
2024-05-29 18:56:57 -04:00
return [await fetchAndProcessPdf(url), ""];
2024-05-21 18:34:23 -07:00
} else {
const data = await response.json();
const html = data.content;
2024-05-29 18:56:57 -04:00
const screenshot = data.screenshot;
return [html ?? "", screenshot ?? ""];
2024-05-21 18:34:23 -07:00
}
2024-04-16 12:06:46 -04:00
} catch (error) {
2024-05-21 18:50:42 -07:00
console.error(`[Fire-Engine][c] Error fetching url: ${url} -> ${error}`);
2024-05-29 18:56:57 -04:00
return ["", ""];
2024-04-16 12:06:46 -04:00
}
}
2024-04-15 17:01:47 -04:00
2024-04-16 12:06:46 -04:00
export async function scrapWithScrapingBee(
url: string,
2024-04-23 17:04:10 -07:00
wait_browser: string = "domcontentloaded",
timeout: number = 15000
2024-04-16 12:06:46 -04:00
): Promise<string> {
2024-04-15 17:01:47 -04:00
try {
const client = new ScrapingBeeClient(process.env.SCRAPING_BEE_API_KEY);
2024-04-28 11:34:25 -07:00
const clientParams = await generateRequestParams(
url,
wait_browser,
timeout
);
2024-05-09 17:45:16 -07:00
2024-04-28 11:34:25 -07:00
const response = await client.get(clientParams);
2024-04-15 17:01:47 -04:00
if (response.status !== 200 && response.status !== 404) {
console.error(
2024-05-21 18:34:23 -07:00
`[ScrapingBee] Error fetching url: ${url} with status code ${response.status}`
2024-04-15 17:01:47 -04:00
);
return "";
}
2024-05-13 16:13:10 -03:00
const contentType = response.headers['content-type'];
if (contentType && contentType.includes('application/pdf')) {
return fetchAndProcessPdf(url);
} else {
const decoder = new TextDecoder();
const text = decoder.decode(response.data);
return text;
}
2024-04-15 17:01:47 -04:00
} catch (error) {
2024-05-21 18:50:42 -07:00
console.error(`[ScrapingBee][c] Error fetching url: ${url} -> ${error}`);
2024-04-15 17:01:47 -04:00
return "";
}
}
2024-05-28 12:56:24 -07:00
export async function scrapWithPlaywright(url: string, waitFor: number = 0): Promise<string> {
2024-04-15 17:01:47 -04:00
try {
2024-05-09 17:45:16 -07:00
const reqParams = await generateRequestParams(url);
2024-05-28 12:56:24 -07:00
// If the user has passed a wait parameter in the request, use that
const waitParam = reqParams["params"]?.wait ?? waitFor;
2024-05-09 17:45:16 -07:00
2024-04-15 17:01:47 -04:00
const response = await fetch(process.env.PLAYWRIGHT_MICROSERVICE_URL, {
2024-04-16 12:06:46 -04:00
method: "POST",
2024-04-15 17:01:47 -04:00
headers: {
"Content-Type": "application/json",
},
2024-05-28 12:56:24 -07:00
body: JSON.stringify({ url: url, wait: waitParam }),
2024-04-15 17:01:47 -04:00
});
if (!response.ok) {
2024-04-16 12:06:46 -04:00
console.error(
2024-05-21 18:34:23 -07:00
`[Playwright] Error fetching url: ${url} with status: ${response.status}`
2024-04-16 12:06:46 -04:00
);
2024-04-15 17:01:47 -04:00
return "";
}
2024-05-13 16:13:10 -03:00
const contentType = response.headers['content-type'];
if (contentType && contentType.includes('application/pdf')) {
return fetchAndProcessPdf(url);
} else {
const data = await response.json();
const html = data.content;
return html ?? "";
}
2024-04-15 17:01:47 -04:00
} catch (error) {
2024-05-21 18:50:42 -07:00
console.error(`[Playwright][c] Error fetching url: ${url} -> ${error}`);
2024-04-15 17:01:47 -04:00
return "";
}
}
2024-05-21 18:50:42 -07:00
export async function scrapWithFetch(url: string): Promise<string> {
try {
const response = await fetch(url);
if (!response.ok) {
console.error(
`[Fetch] Error fetching url: ${url} with status: ${response.status}`
);
return "";
}
const contentType = response.headers['content-type'];
if (contentType && contentType.includes('application/pdf')) {
return fetchAndProcessPdf(url);
} else {
const text = await response.text();
return text;
}
} catch (error) {
console.error(`[Fetch][c] Error fetching url: ${url} -> ${error}`);
return "";
}
}
/**
* Get the order of scrapers to be used for scraping a URL
* If the user doesn't have envs set for a specific scraper, it will be removed from the order.
* @param defaultScraper The default scraper to use if the URL does not have a specific scraper order defined
* @returns The order of scrapers to be used for scraping a URL
*/
2024-05-29 18:56:57 -04:00
function getScrapingFallbackOrder(defaultScraper?: string, isWaitPresent: boolean = false, isScreenshotPresent: boolean = false) {
2024-05-21 18:50:42 -07:00
const availableScrapers = baseScrapers.filter(scraper => {
switch (scraper) {
case "scrapingBee":
case "scrapingBeeLoad":
return !!process.env.SCRAPING_BEE_API_KEY;
case "fire-engine":
return !!process.env.FIRE_ENGINE_BETA_URL;
case "playwright":
return !!process.env.PLAYWRIGHT_MICROSERVICE_URL;
default:
return true;
}
});
2024-05-28 12:56:24 -07:00
let defaultOrder = ["scrapingBee", "fire-engine", "playwright", "scrapingBeeLoad", "fetch"];
2024-05-29 18:56:57 -04:00
if (isWaitPresent || isScreenshotPresent) {
2024-05-28 12:56:24 -07:00
defaultOrder = ["fire-engine", "playwright", ...defaultOrder.filter(scraper => scraper !== "fire-engine" && scraper !== "playwright")];
}
2024-05-21 18:50:42 -07:00
const filteredDefaultOrder = defaultOrder.filter((scraper: typeof baseScrapers[number]) => availableScrapers.includes(scraper));
const uniqueScrapers = new Set(defaultScraper ? [defaultScraper, ...filteredDefaultOrder, ...availableScrapers] : [...filteredDefaultOrder, ...availableScrapers]);
2024-05-21 18:34:23 -07:00
const scrapersInOrder = Array.from(uniqueScrapers);
2024-05-28 12:56:24 -07:00
console.log(`Scrapers in order: ${scrapersInOrder}`);
2024-05-21 18:34:23 -07:00
return scrapersInOrder as typeof baseScrapers[number][];
}
2024-05-29 18:56:57 -04:00
async function handleCustomScraping(text: string, url: string): Promise<[string, string] | null> {
if (text.includes('<meta name="readme-deploy"')) {
console.log(`Special use case detected for ${url}, using Fire Engine with wait time 1000ms`);
return await scrapWithFireEngine(url, 1000);
}
return null;
}
2024-04-15 17:01:47 -04:00
export async function scrapSingleUrl(
urlToScrap: string,
2024-05-29 18:56:57 -04:00
pageOptions: PageOptions = { onlyMainContent: true, includeHtml: false, waitFor: 0, screenshot: false },
2024-05-15 11:28:20 -07:00
existingHtml: string = ""
2024-04-15 17:01:47 -04:00
): Promise<Document> {
urlToScrap = urlToScrap.trim();
2024-04-17 18:24:46 -07:00
const removeUnwantedElements = (html: string, pageOptions: PageOptions) => {
2024-04-15 17:01:47 -04:00
const soup = cheerio.load(html);
soup("script, style, iframe, noscript, meta, head").remove();
2024-04-17 18:24:46 -07:00
if (pageOptions.onlyMainContent) {
// remove any other tags that are not in the main content
excludeNonMainTags.forEach((tag) => {
soup(tag).remove();
});
}
2024-04-15 17:01:47 -04:00
return soup.html();
};
2024-04-16 12:06:46 -04:00
const attemptScraping = async (
url: string,
2024-05-21 18:34:23 -07:00
method: typeof baseScrapers[number]
2024-04-16 12:06:46 -04:00
) => {
2024-04-15 17:01:47 -04:00
let text = "";
2024-05-29 18:56:57 -04:00
let screenshot = "";
2024-04-15 17:01:47 -04:00
switch (method) {
2024-05-21 18:34:23 -07:00
case "fire-engine":
2024-05-21 18:50:42 -07:00
if (process.env.FIRE_ENGINE_BETA_URL) {
2024-05-28 12:56:24 -07:00
console.log(`Scraping ${url} with Fire Engine`);
2024-05-29 18:56:57 -04:00
[text, screenshot] = await scrapWithFireEngine(url, pageOptions.waitFor, pageOptions.screenshot);
2024-05-21 18:50:42 -07:00
}
2024-04-16 12:06:46 -04:00
break;
case "scrapingBee":
2024-04-15 17:01:47 -04:00
if (process.env.SCRAPING_BEE_API_KEY) {
2024-04-28 11:34:25 -07:00
text = await scrapWithScrapingBee(
url,
"domcontentloaded",
pageOptions.fallback === false ? 7000 : 15000
);
2024-04-15 17:01:47 -04:00
}
break;
2024-04-16 12:06:46 -04:00
case "playwright":
2024-04-15 17:01:47 -04:00
if (process.env.PLAYWRIGHT_MICROSERVICE_URL) {
2024-05-28 12:56:24 -07:00
text = await scrapWithPlaywright(url, pageOptions.waitFor);
2024-04-15 17:01:47 -04:00
}
break;
2024-04-16 12:06:46 -04:00
case "scrapingBeeLoad":
2024-04-15 17:01:47 -04:00
if (process.env.SCRAPING_BEE_API_KEY) {
text = await scrapWithScrapingBee(url, "networkidle2");
}
break;
2024-04-16 12:06:46 -04:00
case "fetch":
2024-05-21 18:50:42 -07:00
text = await scrapWithFetch(url);
2024-04-15 17:01:47 -04:00
break;
}
// Check for custom scraping conditions
const customScrapedContent = await handleCustomScraping(text, url);
if (customScrapedContent) {
2024-05-29 18:56:57 -04:00
text = customScrapedContent[0];
screenshot = customScrapedContent[1];
}
2024-05-09 17:45:16 -07:00
//* TODO: add an optional to return markdown or structured/extracted content
2024-04-17 18:24:46 -07:00
let cleanedHtml = removeUnwantedElements(text, pageOptions);
2024-05-09 17:45:16 -07:00
2024-05-29 18:56:57 -04:00
return [await parseMarkdown(cleanedHtml), text, screenshot];
2024-04-15 17:01:47 -04:00
};
try {
2024-05-29 18:56:57 -04:00
let [text, html, screenshot] = ["", "", ""];
2024-05-09 17:45:16 -07:00
let urlKey = urlToScrap;
try {
urlKey = new URL(urlToScrap).hostname.replace(/^www\./, "");
} catch (error) {
console.error(`Invalid URL key, trying: ${urlToScrap}`);
2024-04-23 15:28:32 -07:00
}
2024-05-09 17:45:16 -07:00
const defaultScraper = urlSpecificParams[urlKey]?.defaultScraper ?? "";
2024-05-29 18:56:57 -04:00
const scrapersInOrder = getScrapingFallbackOrder(defaultScraper, pageOptions && pageOptions.waitFor && pageOptions.waitFor > 0, pageOptions && pageOptions.screenshot && pageOptions.screenshot === true)
2024-05-09 17:45:16 -07:00
for (const scraper of scrapersInOrder) {
2024-05-13 20:45:11 -07:00
// If exists text coming from crawler, use it
2024-05-15 11:28:20 -07:00
if (existingHtml && existingHtml.trim().length >= 100) {
let cleanedHtml = removeUnwantedElements(existingHtml, pageOptions);
text = await parseMarkdown(cleanedHtml);
html = existingHtml;
2024-05-13 20:45:11 -07:00
break;
}
2024-05-29 18:56:57 -04:00
[text, html, screenshot] = await attemptScraping(urlToScrap, scraper);
2024-05-13 20:45:11 -07:00
if (text && text.trim().length >= 100) break;
2024-05-21 18:34:23 -07:00
const nextScraperIndex = scrapersInOrder.indexOf(scraper) + 1;
if (nextScraperIndex < scrapersInOrder.length) {
console.info(`Falling back to ${scrapersInOrder[nextScraperIndex]}`);
}
2024-04-15 17:01:47 -04:00
}
2024-05-09 17:52:46 -07:00
if (!text) {
2024-05-09 17:45:16 -07:00
throw new Error(`All scraping methods failed for URL: ${urlToScrap}`);
2024-04-15 17:01:47 -04:00
}
const soup = cheerio.load(html);
const metadata = extractMetadata(soup, urlToScrap);
2024-05-29 18:56:57 -04:00
let document: Document;
if(screenshot && screenshot.length > 0) {
document = {
content: text,
markdown: text,
html: pageOptions.includeHtml ? html : undefined,
metadata: { ...metadata, screenshot_base64: screenshot, sourceURL: urlToScrap, },
}
}else{
document = {
content: text,
markdown: text,
html: pageOptions.includeHtml ? html : undefined,
metadata: { ...metadata, sourceURL: urlToScrap, },
}
}
2024-05-09 17:45:16 -07:00
return document;
2024-04-15 17:01:47 -04:00
} catch (error) {
console.error(`Error: ${error} - Failed to fetch URL: ${urlToScrap}`);
return {
content: "",
2024-05-06 19:45:56 -03:00
markdown: "",
html: "",
2024-04-15 17:01:47 -04:00
metadata: { sourceURL: urlToScrap },
} as Document;
}
}