701 lines
22 KiB
TypeScript
Raw Normal View History

2024-04-15 17:01:47 -04:00
import * as cheerio from "cheerio";
import { ScrapingBeeClient } from "scrapingbee";
import { extractMetadata } from "./utils/metadata";
import dotenv from "dotenv";
2024-06-28 16:39:09 -04:00
import { Document, PageOptions, FireEngineResponse, ExtractorOptions } from "../../lib/entities";
2024-04-15 17:01:47 -04:00
import { parseMarkdown } from "../../lib/html-to-markdown";
2024-04-28 11:34:25 -07:00
import { urlSpecificParams } from "./utils/custom/website_params";
import { fetchAndProcessPdf } from "./utils/pdfProcessor";
2024-06-04 12:15:39 -07:00
import { handleCustomScraping } from "./custom/handleCustomScraping";
import { removeUnwantedElements } from "./utils/removeUnwantedElements";
2024-06-10 16:49:03 -07:00
import axios from "axios";
2024-04-15 17:01:47 -04:00
dotenv.config();
2024-05-21 18:34:23 -07:00
const baseScrapers = [
"fire-engine",
"scrapingBee",
"playwright",
"scrapingBeeLoad",
"fetch",
] as const;
2024-06-10 16:49:03 -07:00
const universalTimeout = 15000;
2024-05-21 18:34:23 -07:00
2024-04-28 11:34:25 -07:00
export async function generateRequestParams(
url: string,
wait_browser: string = "domcontentloaded",
timeout: number = 15000
): Promise<any> {
const defaultParams = {
url: url,
params: { timeout: timeout, wait_browser: wait_browser },
headers: { "ScrapingService-Request": "TRUE" },
};
2024-04-28 12:44:00 -07:00
try {
2024-05-09 17:45:16 -07:00
const urlKey = new URL(url).hostname.replace(/^www\./, "");
2024-04-28 12:44:00 -07:00
if (urlSpecificParams.hasOwnProperty(urlKey)) {
return { ...defaultParams, ...urlSpecificParams[urlKey] };
} else {
return defaultParams;
}
} catch (error) {
console.error(`Error generating URL key: ${error}`);
2024-04-28 11:34:25 -07:00
return defaultParams;
}
}
2024-07-03 17:28:53 -03:00
import { logScrape } from "../../services/logging/scrape_log";
2024-06-28 15:51:18 -03:00
export async function scrapWithFireEngine({
url,
waitFor = 0,
screenshot = false,
pageOptions = { parsePDF: true },
headers,
options,
}: {
url: string;
waitFor?: number;
screenshot?: boolean;
pageOptions?: { scrollXPaths?: string[]; parsePDF?: boolean };
headers?: Record<string, string>;
options?: any;
}): Promise<FireEngineResponse> {
2024-07-03 17:28:53 -03:00
const logParams = {
url,
scraper: "fire-engine",
success: false,
response_code: null,
time_taken_seconds: null,
error_message: "",
html: "",
startTime: Date.now(),
};
2024-04-16 12:06:46 -04:00
try {
2024-05-21 18:34:23 -07:00
const reqParams = await generateRequestParams(url);
2024-05-28 12:56:24 -07:00
const waitParam = reqParams["params"]?.wait ?? waitFor;
2024-05-29 18:56:57 -04:00
const screenshotParam = reqParams["params"]?.screenshot ?? screenshot;
2024-05-31 15:39:54 -07:00
console.log(
`[Fire-Engine] Scraping ${url} with wait: ${waitParam} and screenshot: ${screenshotParam}`
);
2024-05-21 18:34:23 -07:00
2024-06-10 16:49:03 -07:00
const response = await axios.post(
process.env.FIRE_ENGINE_BETA_URL + "/scrape",
{
2024-05-31 15:39:54 -07:00
url: url,
wait: waitParam,
screenshot: screenshotParam,
headers: headers,
2024-06-10 16:49:03 -07:00
pageOptions: pageOptions,
2024-05-21 18:34:23 -07:00
},
2024-06-10 16:49:03 -07:00
{
headers: {
"Content-Type": "application/json",
},
2024-06-28 15:51:18 -03:00
timeout: universalTimeout + waitParam,
2024-06-10 16:49:03 -07:00
}
);
2024-05-21 18:34:23 -07:00
2024-06-10 16:49:03 -07:00
if (response.status !== 200) {
2024-05-21 18:34:23 -07:00
console.error(
`[Fire-Engine] Error fetching url: ${url} with status: ${response.status}`
);
2024-07-03 17:28:53 -03:00
logParams.error_message = response.data?.pageError;
logParams.response_code = response.data?.pageStatusCode;
2024-06-28 15:51:18 -03:00
return {
html: "",
screenshot: "",
pageStatusCode: response.data?.pageStatusCode,
pageError: response.data?.pageError,
};
2024-05-21 18:34:23 -07:00
}
2024-05-31 15:39:54 -07:00
const contentType = response.headers["content-type"];
if (contentType && contentType.includes("application/pdf")) {
2024-06-28 15:51:18 -03:00
const { content, pageStatusCode, pageError } = await fetchAndProcessPdf(
url,
pageOptions?.parsePDF
);
2024-07-03 17:28:53 -03:00
logParams.success = true;
2024-07-03 17:56:21 -03:00
// We shouldnt care about the pdf logging here I believe
return { html: content, screenshot: "", pageStatusCode, pageError };
2024-05-21 18:34:23 -07:00
} else {
2024-06-10 16:49:03 -07:00
const data = response.data;
2024-07-03 17:56:21 -03:00
logParams.success = data.pageStatusCode >= 200 && data.pageStatusCode < 300 || data.pageStatusCode === 404;
2024-07-03 17:28:53 -03:00
logParams.html = data.content ?? "";
logParams.response_code = data.pageStatusCode;
logParams.error_message = data.pageError;
2024-06-28 15:51:18 -03:00
return {
2024-07-03 17:28:53 -03:00
html: data.content ?? "",
screenshot: data.screenshot ?? "",
2024-06-28 15:51:18 -03:00
pageStatusCode: data.pageStatusCode,
pageError: data.pageError,
};
2024-05-21 18:34:23 -07:00
}
2024-04-16 12:06:46 -04:00
} catch (error) {
2024-06-28 15:51:18 -03:00
if (error.code === "ECONNABORTED") {
2024-06-10 16:57:31 -07:00
console.log(`[Fire-Engine] Request timed out for ${url}`);
2024-07-03 17:28:53 -03:00
logParams.error_message = "Request timed out";
2024-06-10 16:57:31 -07:00
} else {
console.error(`[Fire-Engine][c] Error fetching url: ${url} -> ${error}`);
2024-07-03 17:56:21 -03:00
logParams.error_message = error.message || error;
2024-06-10 16:57:31 -07:00
}
2024-05-29 19:43:51 -07:00
return { html: "", screenshot: "" };
2024-07-03 17:28:53 -03:00
} finally {
const endTime = Date.now();
const time_taken_seconds = (endTime - logParams.startTime) / 1000;
await logScrape({
url: logParams.url,
scraper: logParams.scraper,
success: logParams.success,
response_code: logParams.response_code,
time_taken_seconds,
error_message: logParams.error_message,
html: logParams.html,
});
2024-04-16 12:06:46 -04:00
}
}
2024-04-15 17:01:47 -04:00
2024-04-16 12:06:46 -04:00
export async function scrapWithScrapingBee(
url: string,
2024-04-23 17:04:10 -07:00
wait_browser: string = "domcontentloaded",
timeout: number = universalTimeout,
pageOptions: { parsePDF?: boolean } = { parsePDF: true }
2024-06-28 15:51:18 -03:00
): Promise<{ content: string; pageStatusCode?: number; pageError?: string }> {
2024-07-03 17:28:53 -03:00
const logParams = {
url,
scraper: wait_browser === "networkidle2" ? "scrapingBeeLoad" : "scrapingBee",
success: false,
response_code: null,
time_taken_seconds: null,
error_message: "",
html: "",
startTime: Date.now(),
};
2024-04-15 17:01:47 -04:00
try {
const client = new ScrapingBeeClient(process.env.SCRAPING_BEE_API_KEY);
2024-04-28 11:34:25 -07:00
const clientParams = await generateRequestParams(
url,
wait_browser,
2024-06-28 15:51:18 -03:00
timeout
2024-04-28 11:34:25 -07:00
);
const response = await client.get({
...clientParams,
params: {
...clientParams.params,
2024-06-28 15:51:18 -03:00
transparent_status_code: "True",
},
});
2024-05-31 15:39:54 -07:00
const contentType = response.headers["content-type"];
if (contentType && contentType.includes("application/pdf")) {
2024-07-03 17:28:53 -03:00
logParams.success = true;
2024-07-03 17:56:21 -03:00
const { content, pageStatusCode, pageError } = await fetchAndProcessPdf(url, pageOptions?.parsePDF);
return { content, pageStatusCode, pageError };
} else {
let text = "";
try {
const decoder = new TextDecoder();
text = decoder.decode(response.data);
2024-07-03 17:28:53 -03:00
logParams.success = true;
} catch (decodeError) {
2024-06-28 15:51:18 -03:00
console.error(
`[ScrapingBee][c] Error decoding response data for url: ${url} -> ${decodeError}`
);
2024-07-03 17:56:21 -03:00
logParams.error_message = decodeError.message || decodeError;
}
2024-07-03 17:28:53 -03:00
logParams.response_code = response.status;
logParams.html = text;
2024-07-03 17:56:21 -03:00
logParams.success = response.status >= 200 && response.status < 300 || response.status === 404;
logParams.error_message = response.statusText != "OK" ? response.statusText : undefined;
2024-06-28 15:51:18 -03:00
return {
content: text,
pageStatusCode: response.status,
pageError:
response.statusText != "OK" ? response.statusText : undefined,
};
}
2024-04-15 17:01:47 -04:00
} catch (error) {
2024-05-21 18:50:42 -07:00
console.error(`[ScrapingBee][c] Error fetching url: ${url} -> ${error}`);
2024-07-03 17:56:21 -03:00
logParams.error_message = error.message || error;
2024-07-03 17:28:53 -03:00
logParams.response_code = error.response?.status;
2024-06-28 15:51:18 -03:00
return {
content: "",
2024-07-03 17:28:53 -03:00
pageStatusCode: error.response?.status,
pageError: error.response?.statusText,
2024-06-28 15:51:18 -03:00
};
2024-07-03 17:28:53 -03:00
} finally {
const endTime = Date.now();
logParams.time_taken_seconds = (endTime - logParams.startTime) / 1000;
await logScrape(logParams);
2024-04-15 17:01:47 -04:00
}
}
2024-05-31 15:39:54 -07:00
export async function scrapWithPlaywright(
url: string,
2024-05-31 15:43:06 -07:00
waitFor: number = 0,
headers?: Record<string, string>,
pageOptions: { parsePDF?: boolean } = { parsePDF: true }
2024-06-28 15:51:18 -03:00
): Promise<{ content: string; pageStatusCode?: number; pageError?: string }> {
2024-07-03 17:28:53 -03:00
const logParams = {
url,
scraper: "playwright",
success: false,
response_code: null,
time_taken_seconds: null,
error_message: "",
html: "",
startTime: Date.now(),
};
2024-04-15 17:01:47 -04:00
try {
2024-05-09 17:45:16 -07:00
const reqParams = await generateRequestParams(url);
2024-05-28 12:56:24 -07:00
// If the user has passed a wait parameter in the request, use that
const waitParam = reqParams["params"]?.wait ?? waitFor;
2024-05-09 17:45:16 -07:00
2024-06-28 15:51:18 -03:00
const response = await axios.post(
process.env.PLAYWRIGHT_MICROSERVICE_URL,
{
url: url,
wait_after_load: waitParam,
headers: headers,
2024-04-15 17:01:47 -04:00
},
2024-06-28 15:51:18 -03:00
{
headers: {
"Content-Type": "application/json",
},
timeout: universalTimeout + waitParam, // Add waitParam to timeout to account for the wait time
transformResponse: [(data) => data], // Prevent axios from parsing JSON automatically
}
);
2024-04-15 17:01:47 -04:00
2024-06-10 16:49:03 -07:00
if (response.status !== 200) {
2024-04-16 12:06:46 -04:00
console.error(
2024-05-21 18:34:23 -07:00
`[Playwright] Error fetching url: ${url} with status: ${response.status}`
2024-04-16 12:06:46 -04:00
);
2024-07-03 17:28:53 -03:00
logParams.error_message = response.data?.pageError;
logParams.response_code = response.data?.pageStatusCode;
2024-06-28 15:51:18 -03:00
return {
content: "",
pageStatusCode: response.data?.pageStatusCode,
pageError: response.data?.pageError,
};
2024-04-15 17:01:47 -04:00
}
2024-05-31 15:39:54 -07:00
const contentType = response.headers["content-type"];
if (contentType && contentType.includes("application/pdf")) {
2024-07-03 17:28:53 -03:00
logParams.success = true;
return await fetchAndProcessPdf(url, pageOptions?.parsePDF);
2024-05-13 16:13:10 -03:00
} else {
2024-06-10 16:49:03 -07:00
const textData = response.data;
try {
const data = JSON.parse(textData);
const html = data.content;
2024-07-03 17:28:53 -03:00
logParams.success = true;
logParams.html = html;
logParams.response_code = data.pageStatusCode;
logParams.error_message = data.pageError;
2024-06-28 15:51:18 -03:00
return {
content: html ?? "",
pageStatusCode: data.pageStatusCode,
pageError: data.pageError,
};
} catch (jsonError) {
2024-07-03 17:56:21 -03:00
logParams.error_message = jsonError.message || jsonError;
2024-06-28 15:51:18 -03:00
console.error(
`[Playwright] Error parsing JSON response for url: ${url} -> ${jsonError}`
);
return { content: "" };
}
2024-05-13 16:13:10 -03:00
}
2024-04-15 17:01:47 -04:00
} catch (error) {
2024-06-28 15:51:18 -03:00
if (error.code === "ECONNABORTED") {
2024-07-03 17:28:53 -03:00
logParams.error_message = "Request timed out";
2024-06-10 16:49:03 -07:00
console.log(`[Playwright] Request timed out for ${url}`);
} else {
2024-07-03 17:56:21 -03:00
logParams.error_message = error.message || error;
2024-06-10 16:49:03 -07:00
console.error(`[Playwright] Error fetching url: ${url} -> ${error}`);
}
return { content: "" };
2024-07-03 17:28:53 -03:00
} finally {
const endTime = Date.now();
logParams.time_taken_seconds = (endTime - logParams.startTime) / 1000;
await logScrape(logParams);
2024-04-15 17:01:47 -04:00
}
}
export async function scrapWithFetch(
url: string,
pageOptions: { parsePDF?: boolean } = { parsePDF: true }
2024-06-28 15:51:18 -03:00
): Promise<{ content: string; pageStatusCode?: number; pageError?: string }> {
2024-07-03 17:28:53 -03:00
const logParams = {
url,
scraper: "fetch",
success: false,
response_code: null,
time_taken_seconds: null,
error_message: "",
html: "",
startTime: Date.now(),
};
2024-05-21 18:50:42 -07:00
try {
2024-06-10 16:49:03 -07:00
const response = await axios.get(url, {
headers: {
"Content-Type": "application/json",
},
timeout: universalTimeout,
2024-06-28 15:51:18 -03:00
transformResponse: [(data) => data], // Prevent axios from parsing JSON automatically
2024-06-10 16:49:03 -07:00
});
if (response.status !== 200) {
2024-05-21 18:50:42 -07:00
console.error(
2024-06-10 16:49:03 -07:00
`[Axios] Error fetching url: ${url} with status: ${response.status}`
2024-05-21 18:50:42 -07:00
);
2024-07-03 17:28:53 -03:00
logParams.error_message = response.statusText;
logParams.response_code = response.status;
2024-06-28 15:51:18 -03:00
return {
content: "",
pageStatusCode: response.status,
pageError: response.statusText,
};
2024-05-21 18:50:42 -07:00
}
2024-05-31 15:39:54 -07:00
const contentType = response.headers["content-type"];
if (contentType && contentType.includes("application/pdf")) {
2024-07-03 17:28:53 -03:00
logParams.success = true;
return await fetchAndProcessPdf(url, pageOptions?.parsePDF);
2024-05-21 18:50:42 -07:00
} else {
2024-06-10 16:49:03 -07:00
const text = response.data;
2024-07-03 17:28:53 -03:00
const result = { content: text, pageStatusCode: 200 };
logParams.success = true;
logParams.html = text;
2024-07-03 17:56:21 -03:00
logParams.response_code = 200;
logParams.error_message = null;
2024-07-03 17:28:53 -03:00
return result;
2024-05-21 18:50:42 -07:00
}
} catch (error) {
2024-06-28 15:51:18 -03:00
if (error.code === "ECONNABORTED") {
2024-07-03 17:28:53 -03:00
logParams.error_message = "Request timed out";
2024-06-10 16:49:03 -07:00
console.log(`[Axios] Request timed out for ${url}`);
} else {
2024-07-03 17:56:21 -03:00
logParams.error_message = error.message || error;
2024-06-10 16:49:03 -07:00
console.error(`[Axios] Error fetching url: ${url} -> ${error}`);
}
return { content: "" };
2024-07-03 17:28:53 -03:00
} finally {
const endTime = Date.now();
logParams.time_taken_seconds = (endTime - logParams.startTime) / 1000;
await logScrape(logParams);
2024-05-21 18:50:42 -07:00
}
}
/**
* Get the order of scrapers to be used for scraping a URL
* If the user doesn't have envs set for a specific scraper, it will be removed from the order.
* @param defaultScraper The default scraper to use if the URL does not have a specific scraper order defined
* @returns The order of scrapers to be used for scraping a URL
*/
2024-05-31 15:39:54 -07:00
function getScrapingFallbackOrder(
defaultScraper?: string,
isWaitPresent: boolean = false,
isScreenshotPresent: boolean = false,
isHeadersPresent: boolean = false
) {
const availableScrapers = baseScrapers.filter((scraper) => {
2024-05-21 18:50:42 -07:00
switch (scraper) {
case "scrapingBee":
case "scrapingBeeLoad":
return !!process.env.SCRAPING_BEE_API_KEY;
case "fire-engine":
return !!process.env.FIRE_ENGINE_BETA_URL;
case "playwright":
return !!process.env.PLAYWRIGHT_MICROSERVICE_URL;
default:
return true;
}
});
2024-05-31 15:39:54 -07:00
let defaultOrder = [
"scrapingBee",
"fire-engine",
"playwright",
"scrapingBeeLoad",
"fetch",
];
if (isWaitPresent || isScreenshotPresent || isHeadersPresent) {
defaultOrder = [
"fire-engine",
"playwright",
...defaultOrder.filter(
(scraper) => scraper !== "fire-engine" && scraper !== "playwright"
),
];
2024-05-28 12:56:24 -07:00
}
2024-05-31 15:39:54 -07:00
const filteredDefaultOrder = defaultOrder.filter(
(scraper: (typeof baseScrapers)[number]) =>
availableScrapers.includes(scraper)
);
const uniqueScrapers = new Set(
defaultScraper
? [defaultScraper, ...filteredDefaultOrder, ...availableScrapers]
: [...filteredDefaultOrder, ...availableScrapers]
);
2024-05-21 18:34:23 -07:00
const scrapersInOrder = Array.from(uniqueScrapers);
2024-05-31 15:39:54 -07:00
return scrapersInOrder as (typeof baseScrapers)[number][];
2024-05-21 18:34:23 -07:00
}
2024-04-15 17:01:47 -04:00
export async function scrapSingleUrl(
urlToScrap: string,
2024-05-31 15:39:54 -07:00
pageOptions: PageOptions = {
onlyMainContent: true,
includeHtml: false,
2024-06-28 17:07:47 -04:00
includeRawHtml: false,
2024-05-31 15:39:54 -07:00
waitFor: 0,
screenshot: false,
2024-06-28 15:51:18 -03:00
headers: undefined,
2024-05-31 15:39:54 -07:00
},
2024-06-28 16:39:09 -04:00
extractorOptions: ExtractorOptions = {
mode: "llm-extraction-from-markdown"
},
2024-05-15 11:28:20 -07:00
existingHtml: string = ""
2024-04-15 17:01:47 -04:00
): Promise<Document> {
urlToScrap = urlToScrap.trim();
2024-04-16 12:06:46 -04:00
const attemptScraping = async (
url: string,
2024-05-31 15:39:54 -07:00
method: (typeof baseScrapers)[number]
2024-06-28 15:51:18 -03:00
) => {
let scraperResponse: {
text: string;
screenshot: string;
metadata: { pageStatusCode?: number; pageError?: string | null };
} = { text: "", screenshot: "", metadata: {} };
2024-05-29 18:56:57 -04:00
let screenshot = "";
2024-04-15 17:01:47 -04:00
switch (method) {
2024-05-21 18:34:23 -07:00
case "fire-engine":
2024-05-21 18:50:42 -07:00
if (process.env.FIRE_ENGINE_BETA_URL) {
2024-05-28 12:56:24 -07:00
console.log(`Scraping ${url} with Fire Engine`);
2024-06-28 15:45:16 -03:00
const response = await scrapWithFireEngine({
2024-05-31 15:39:54 -07:00
url,
2024-06-28 15:45:16 -03:00
waitFor: pageOptions.waitFor,
screenshot: pageOptions.screenshot,
pageOptions: pageOptions,
2024-06-28 15:51:18 -03:00
headers: pageOptions.headers,
});
scraperResponse.text = response.html;
scraperResponse.screenshot = response.screenshot;
scraperResponse.metadata.pageStatusCode = response.pageStatusCode;
scraperResponse.metadata.pageError = response.pageError;
2024-05-21 18:50:42 -07:00
}
2024-04-16 12:06:46 -04:00
break;
case "scrapingBee":
2024-04-15 17:01:47 -04:00
if (process.env.SCRAPING_BEE_API_KEY) {
const response = await scrapWithScrapingBee(
2024-04-28 11:34:25 -07:00
url,
"domcontentloaded",
pageOptions.fallback === false ? 7000 : 15000
);
scraperResponse.text = response.content;
scraperResponse.metadata.pageStatusCode = response.pageStatusCode;
scraperResponse.metadata.pageError = response.pageError;
2024-04-15 17:01:47 -04:00
}
break;
2024-04-16 12:06:46 -04:00
case "playwright":
2024-04-15 17:01:47 -04:00
if (process.env.PLAYWRIGHT_MICROSERVICE_URL) {
2024-06-28 15:51:18 -03:00
const response = await scrapWithPlaywright(
url,
pageOptions.waitFor,
pageOptions.headers
);
scraperResponse.text = response.content;
scraperResponse.metadata.pageStatusCode = response.pageStatusCode;
scraperResponse.metadata.pageError = response.pageError;
2024-04-15 17:01:47 -04:00
}
break;
2024-04-16 12:06:46 -04:00
case "scrapingBeeLoad":
2024-04-15 17:01:47 -04:00
if (process.env.SCRAPING_BEE_API_KEY) {
const response = await scrapWithScrapingBee(url, "networkidle2");
scraperResponse.text = response.content;
scraperResponse.metadata.pageStatusCode = response.pageStatusCode;
scraperResponse.metadata.pageError = response.pageError;
2024-04-15 17:01:47 -04:00
}
break;
2024-04-16 12:06:46 -04:00
case "fetch":
const response = await scrapWithFetch(url);
scraperResponse.text = response.content;
scraperResponse.metadata.pageStatusCode = response.pageStatusCode;
scraperResponse.metadata.pageError = response.pageError;
2024-04-15 17:01:47 -04:00
break;
}
2024-06-28 15:51:18 -03:00
let customScrapedContent: FireEngineResponse | null = null;
2024-06-04 12:15:39 -07:00
// Check for custom scraping conditions
2024-06-28 15:51:18 -03:00
const customScraperResult = await handleCustomScraping(
scraperResponse.text,
url
);
2024-06-04 12:15:39 -07:00
2024-06-28 15:51:18 -03:00
if (customScraperResult) {
switch (customScraperResult.scraper) {
case "fire-engine":
2024-06-28 15:51:18 -03:00
customScrapedContent = await scrapWithFireEngine({
url: customScraperResult.url,
waitFor: customScraperResult.waitAfterLoad,
screenshot: false,
pageOptions: customScraperResult.pageOptions,
});
2024-06-05 15:34:42 -03:00
if (screenshot) {
customScrapedContent.screenshot = screenshot;
}
2024-06-05 15:02:28 -03:00
break;
case "pdf":
2024-06-28 15:51:18 -03:00
const { content, pageStatusCode, pageError } =
await fetchAndProcessPdf(
customScraperResult.url,
pageOptions?.parsePDF
);
customScrapedContent = {
html: content,
screenshot,
pageStatusCode,
pageError,
};
2024-06-05 15:02:28 -03:00
break;
}
2024-06-04 12:15:39 -07:00
}
if (customScrapedContent) {
scraperResponse.text = customScrapedContent.html;
2024-06-03 15:24:40 -03:00
screenshot = customScrapedContent.screenshot;
}
2024-05-09 17:45:16 -07:00
//* TODO: add an optional to return markdown or structured/extracted content
let cleanedHtml = removeUnwantedElements(scraperResponse.text, pageOptions);
return {
text: await parseMarkdown(cleanedHtml),
html: cleanedHtml,
rawHtml: scraperResponse.text,
screenshot: scraperResponse.screenshot,
pageStatusCode: scraperResponse.metadata.pageStatusCode,
2024-06-28 15:51:18 -03:00
pageError: scraperResponse.metadata.pageError || undefined,
};
2024-04-15 17:01:47 -04:00
};
2024-06-28 15:51:18 -03:00
let { text, html, rawHtml, screenshot, pageStatusCode, pageError } = {
text: "",
html: "",
rawHtml: "",
screenshot: "",
pageStatusCode: 200,
pageError: undefined,
};
2024-04-15 17:01:47 -04:00
try {
2024-05-09 17:45:16 -07:00
let urlKey = urlToScrap;
try {
urlKey = new URL(urlToScrap).hostname.replace(/^www\./, "");
} catch (error) {
console.error(`Invalid URL key, trying: ${urlToScrap}`);
2024-04-23 15:28:32 -07:00
}
2024-05-09 17:45:16 -07:00
const defaultScraper = urlSpecificParams[urlKey]?.defaultScraper ?? "";
2024-05-31 15:39:54 -07:00
const scrapersInOrder = getScrapingFallbackOrder(
defaultScraper,
pageOptions && pageOptions.waitFor && pageOptions.waitFor > 0,
pageOptions && pageOptions.screenshot && pageOptions.screenshot === true,
pageOptions && pageOptions.headers && pageOptions.headers !== undefined
);
2024-05-09 17:45:16 -07:00
for (const scraper of scrapersInOrder) {
2024-05-13 20:45:11 -07:00
// If exists text coming from crawler, use it
2024-05-15 11:28:20 -07:00
if (existingHtml && existingHtml.trim().length >= 100) {
let cleanedHtml = removeUnwantedElements(existingHtml, pageOptions);
text = await parseMarkdown(cleanedHtml);
html = cleanedHtml;
2024-05-13 20:45:11 -07:00
break;
}
const attempt = await attemptScraping(urlToScrap, scraper);
2024-06-28 15:51:18 -03:00
text = attempt.text ?? "";
html = attempt.html ?? "";
rawHtml = attempt.rawHtml ?? "";
screenshot = attempt.screenshot ?? "";
2024-07-02 10:51:35 -03:00
2024-06-14 09:46:55 -03:00
if (attempt.pageStatusCode) {
pageStatusCode = attempt.pageStatusCode;
}
2024-07-02 10:51:35 -03:00
if (attempt.pageError && attempt.pageStatusCode >= 400) {
2024-06-14 09:46:55 -03:00
pageError = attempt.pageError;
2024-07-02 10:51:35 -03:00
} else if (attempt.pageStatusCode < 400) {
2024-07-01 18:21:15 -03:00
pageError = undefined;
2024-06-14 09:46:55 -03:00
}
2024-06-28 15:51:18 -03:00
2024-05-13 20:45:11 -07:00
if (text && text.trim().length >= 100) break;
2024-06-14 09:46:55 -03:00
if (pageStatusCode && pageStatusCode == 404) break;
2024-05-21 18:34:23 -07:00
const nextScraperIndex = scrapersInOrder.indexOf(scraper) + 1;
if (nextScraperIndex < scrapersInOrder.length) {
console.info(`Falling back to ${scrapersInOrder[nextScraperIndex]}`);
}
2024-04-15 17:01:47 -04:00
}
2024-05-09 17:52:46 -07:00
if (!text) {
2024-05-09 17:45:16 -07:00
throw new Error(`All scraping methods failed for URL: ${urlToScrap}`);
2024-04-15 17:01:47 -04:00
}
const soup = cheerio.load(rawHtml);
2024-04-15 17:01:47 -04:00
const metadata = extractMetadata(soup, urlToScrap);
2024-05-29 18:56:57 -04:00
let document: Document;
2024-05-31 15:39:54 -07:00
if (screenshot && screenshot.length > 0) {
2024-05-29 18:56:57 -04:00
document = {
content: text,
markdown: text,
html: pageOptions.includeHtml ? html : undefined,
2024-06-28 17:07:47 -04:00
rawHtml: pageOptions.includeRawHtml || extractorOptions.mode === "llm-extraction-from-raw-html" ? rawHtml : undefined,
2024-05-31 15:39:54 -07:00
metadata: {
...metadata,
screenshot: screenshot,
sourceURL: urlToScrap,
pageStatusCode: pageStatusCode,
2024-06-28 15:51:18 -03:00
pageError: pageError,
2024-05-31 15:39:54 -07:00
},
};
} else {
2024-05-29 18:56:57 -04:00
document = {
content: text,
markdown: text,
html: pageOptions.includeHtml ? html : undefined,
2024-06-28 17:07:47 -04:00
rawHtml: pageOptions.includeRawHtml || extractorOptions.mode === "llm-extraction-from-raw-html" ? rawHtml : undefined,
metadata: {
...metadata,
sourceURL: urlToScrap,
pageStatusCode: pageStatusCode,
2024-06-28 15:51:18 -03:00
pageError: pageError,
},
2024-05-31 15:39:54 -07:00
};
2024-05-29 18:56:57 -04:00
}
2024-05-09 17:45:16 -07:00
return document;
2024-04-15 17:01:47 -04:00
} catch (error) {
console.error(`Error: ${error} - Failed to fetch URL: ${urlToScrap}`);
return {
content: "",
2024-05-06 19:45:56 -03:00
markdown: "",
html: "",
2024-06-14 09:46:55 -03:00
metadata: {
sourceURL: urlToScrap,
pageStatusCode: pageStatusCode,
2024-06-28 15:51:18 -03:00
pageError: pageError,
2024-06-14 09:46:55 -03:00
},
2024-04-15 17:01:47 -04:00
} as Document;
}
}