2024-04-15 17:01:47 -04:00
|
|
|
import * as cheerio from "cheerio";
|
|
|
|
import { ScrapingBeeClient } from "scrapingbee";
|
|
|
|
import { extractMetadata } from "./utils/metadata";
|
|
|
|
import dotenv from "dotenv";
|
2024-05-29 19:43:51 -07:00
|
|
|
import { Document, PageOptions, FireEngineResponse } from "../../lib/entities";
|
2024-04-15 17:01:47 -04:00
|
|
|
import { parseMarkdown } from "../../lib/html-to-markdown";
|
2024-04-17 18:24:46 -07:00
|
|
|
import { excludeNonMainTags } from "./utils/excludeTags";
|
2024-04-28 11:34:25 -07:00
|
|
|
import { urlSpecificParams } from "./utils/custom/website_params";
|
2024-05-13 09:13:42 -03:00
|
|
|
import { fetchAndProcessPdf } from "./utils/pdfProcessor";
|
2024-06-04 12:15:39 -07:00
|
|
|
import { handleCustomScraping } from "./custom/handleCustomScraping";
|
2024-06-10 16:49:03 -07:00
|
|
|
import axios from "axios";
|
2024-04-15 17:01:47 -04:00
|
|
|
|
|
|
|
dotenv.config();
|
|
|
|
|
2024-05-21 18:34:23 -07:00
|
|
|
const baseScrapers = [
|
|
|
|
"fire-engine",
|
|
|
|
"scrapingBee",
|
|
|
|
"playwright",
|
|
|
|
"scrapingBeeLoad",
|
|
|
|
"fetch",
|
|
|
|
] as const;
|
|
|
|
|
2024-06-10 16:49:03 -07:00
|
|
|
const universalTimeout = 15000;
|
|
|
|
|
2024-04-28 11:34:25 -07:00
|
|
|
export async function generateRequestParams(
|
|
|
|
url: string,
|
|
|
|
wait_browser: string = "domcontentloaded",
|
|
|
|
timeout: number = 15000
|
|
|
|
): Promise<any> {
|
|
|
|
const defaultParams = {
|
|
|
|
url: url,
|
|
|
|
params: { timeout: timeout, wait_browser: wait_browser },
|
|
|
|
headers: { "ScrapingService-Request": "TRUE" },
|
|
|
|
};
|
|
|
|
|
2024-04-28 12:44:00 -07:00
|
|
|
try {
|
2024-05-09 17:45:16 -07:00
|
|
|
const urlKey = new URL(url).hostname.replace(/^www\./, "");
|
2024-04-28 12:44:00 -07:00
|
|
|
if (urlSpecificParams.hasOwnProperty(urlKey)) {
|
|
|
|
return { ...defaultParams, ...urlSpecificParams[urlKey] };
|
|
|
|
} else {
|
|
|
|
return defaultParams;
|
|
|
|
}
|
|
|
|
} catch (error) {
|
|
|
|
console.error(`Error generating URL key: ${error}`);
|
2024-04-28 11:34:25 -07:00
|
|
|
return defaultParams;
|
|
|
|
}
|
|
|
|
}
|
2024-05-21 18:34:23 -07:00
|
|
|
export async function scrapWithFireEngine(
|
2024-04-16 12:06:46 -04:00
|
|
|
url: string,
|
2024-05-28 12:56:24 -07:00
|
|
|
waitFor: number = 0,
|
2024-05-29 18:56:57 -04:00
|
|
|
screenshot: boolean = false,
|
2024-06-05 11:48:41 -03:00
|
|
|
pageOptions: { scrollXPaths?: string[] } = {},
|
2024-05-31 15:39:54 -07:00
|
|
|
headers?: Record<string, string>,
|
2024-04-16 12:06:46 -04:00
|
|
|
options?: any
|
2024-05-29 19:43:51 -07:00
|
|
|
): Promise<FireEngineResponse> {
|
2024-04-16 12:06:46 -04:00
|
|
|
try {
|
2024-05-21 18:34:23 -07:00
|
|
|
const reqParams = await generateRequestParams(url);
|
2024-05-28 12:56:24 -07:00
|
|
|
// If the user has passed a wait parameter in the request, use that
|
|
|
|
const waitParam = reqParams["params"]?.wait ?? waitFor;
|
2024-05-29 18:56:57 -04:00
|
|
|
const screenshotParam = reqParams["params"]?.screenshot ?? screenshot;
|
2024-05-31 15:39:54 -07:00
|
|
|
console.log(
|
|
|
|
`[Fire-Engine] Scraping ${url} with wait: ${waitParam} and screenshot: ${screenshotParam}`
|
|
|
|
);
|
2024-05-21 18:34:23 -07:00
|
|
|
|
2024-06-10 16:49:03 -07:00
|
|
|
const response = await axios.post(
|
|
|
|
process.env.FIRE_ENGINE_BETA_URL + "/scrape",
|
|
|
|
{
|
2024-05-31 15:39:54 -07:00
|
|
|
url: url,
|
|
|
|
wait: waitParam,
|
|
|
|
screenshot: screenshotParam,
|
|
|
|
headers: headers,
|
2024-06-10 16:49:03 -07:00
|
|
|
pageOptions: pageOptions,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
headers: {
|
|
|
|
"Content-Type": "application/json",
|
|
|
|
},
|
|
|
|
timeout: universalTimeout + waitParam
|
|
|
|
}
|
|
|
|
);
|
2024-05-21 18:34:23 -07:00
|
|
|
|
2024-06-10 16:49:03 -07:00
|
|
|
if (response.status !== 200) {
|
2024-05-21 18:34:23 -07:00
|
|
|
console.error(
|
|
|
|
`[Fire-Engine] Error fetching url: ${url} with status: ${response.status}`
|
|
|
|
);
|
2024-05-29 19:43:51 -07:00
|
|
|
return { html: "", screenshot: "" };
|
2024-05-21 18:34:23 -07:00
|
|
|
}
|
|
|
|
|
2024-05-31 15:39:54 -07:00
|
|
|
const contentType = response.headers["content-type"];
|
|
|
|
if (contentType && contentType.includes("application/pdf")) {
|
2024-05-29 19:43:51 -07:00
|
|
|
return { html: await fetchAndProcessPdf(url), screenshot: "" };
|
2024-05-21 18:34:23 -07:00
|
|
|
} else {
|
2024-06-10 16:49:03 -07:00
|
|
|
const data = response.data;
|
2024-05-21 18:34:23 -07:00
|
|
|
const html = data.content;
|
2024-05-29 18:56:57 -04:00
|
|
|
const screenshot = data.screenshot;
|
2024-05-29 19:43:51 -07:00
|
|
|
return { html: html ?? "", screenshot: screenshot ?? "" };
|
2024-05-21 18:34:23 -07:00
|
|
|
}
|
2024-04-16 12:06:46 -04:00
|
|
|
} catch (error) {
|
2024-06-10 16:57:31 -07:00
|
|
|
if (error.code === 'ECONNABORTED') {
|
|
|
|
console.log(`[Fire-Engine] Request timed out for ${url}`);
|
|
|
|
} else {
|
|
|
|
console.error(`[Fire-Engine][c] Error fetching url: ${url} -> ${error}`);
|
|
|
|
}
|
2024-05-29 19:43:51 -07:00
|
|
|
return { html: "", screenshot: "" };
|
2024-04-16 12:06:46 -04:00
|
|
|
}
|
|
|
|
}
|
2024-04-15 17:01:47 -04:00
|
|
|
|
2024-04-16 12:06:46 -04:00
|
|
|
export async function scrapWithScrapingBee(
|
|
|
|
url: string,
|
2024-04-23 17:04:10 -07:00
|
|
|
wait_browser: string = "domcontentloaded",
|
2024-06-10 16:49:03 -07:00
|
|
|
timeout: number = universalTimeout
|
2024-04-16 12:06:46 -04:00
|
|
|
): Promise<string> {
|
2024-04-15 17:01:47 -04:00
|
|
|
try {
|
|
|
|
const client = new ScrapingBeeClient(process.env.SCRAPING_BEE_API_KEY);
|
2024-04-28 11:34:25 -07:00
|
|
|
const clientParams = await generateRequestParams(
|
|
|
|
url,
|
|
|
|
wait_browser,
|
|
|
|
timeout
|
|
|
|
);
|
2024-05-09 17:45:16 -07:00
|
|
|
|
2024-04-28 11:34:25 -07:00
|
|
|
const response = await client.get(clientParams);
|
2024-04-15 17:01:47 -04:00
|
|
|
|
|
|
|
if (response.status !== 200 && response.status !== 404) {
|
|
|
|
console.error(
|
2024-05-21 18:34:23 -07:00
|
|
|
`[ScrapingBee] Error fetching url: ${url} with status code ${response.status}`
|
2024-04-15 17:01:47 -04:00
|
|
|
);
|
|
|
|
return "";
|
|
|
|
}
|
2024-05-31 15:39:54 -07:00
|
|
|
|
|
|
|
const contentType = response.headers["content-type"];
|
|
|
|
if (contentType && contentType.includes("application/pdf")) {
|
2024-05-13 09:13:42 -03:00
|
|
|
return fetchAndProcessPdf(url);
|
|
|
|
} else {
|
|
|
|
const decoder = new TextDecoder();
|
|
|
|
const text = decoder.decode(response.data);
|
|
|
|
return text;
|
|
|
|
}
|
2024-04-15 17:01:47 -04:00
|
|
|
} catch (error) {
|
2024-05-21 18:50:42 -07:00
|
|
|
console.error(`[ScrapingBee][c] Error fetching url: ${url} -> ${error}`);
|
2024-04-15 17:01:47 -04:00
|
|
|
return "";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-05-31 15:39:54 -07:00
|
|
|
export async function scrapWithPlaywright(
|
|
|
|
url: string,
|
2024-05-31 15:43:06 -07:00
|
|
|
waitFor: number = 0,
|
|
|
|
headers?: Record<string, string>
|
2024-05-31 15:39:54 -07:00
|
|
|
): Promise<string> {
|
2024-04-15 17:01:47 -04:00
|
|
|
try {
|
2024-05-09 17:45:16 -07:00
|
|
|
const reqParams = await generateRequestParams(url);
|
2024-05-28 12:56:24 -07:00
|
|
|
// If the user has passed a wait parameter in the request, use that
|
|
|
|
const waitParam = reqParams["params"]?.wait ?? waitFor;
|
2024-05-09 17:45:16 -07:00
|
|
|
|
2024-06-10 16:49:03 -07:00
|
|
|
const response = await axios.post(process.env.PLAYWRIGHT_MICROSERVICE_URL, {
|
|
|
|
url: url,
|
|
|
|
wait_after_load: waitParam,
|
|
|
|
headers: headers,
|
|
|
|
}, {
|
2024-04-15 17:01:47 -04:00
|
|
|
headers: {
|
|
|
|
"Content-Type": "application/json",
|
|
|
|
},
|
2024-06-10 16:49:03 -07:00
|
|
|
timeout: universalTimeout + waitParam, // Add waitParam to timeout to account for the wait time
|
|
|
|
transformResponse: [(data) => data] // Prevent axios from parsing JSON automatically
|
2024-04-15 17:01:47 -04:00
|
|
|
});
|
|
|
|
|
2024-06-10 16:49:03 -07:00
|
|
|
if (response.status !== 200) {
|
2024-04-16 12:06:46 -04:00
|
|
|
console.error(
|
2024-05-21 18:34:23 -07:00
|
|
|
`[Playwright] Error fetching url: ${url} with status: ${response.status}`
|
2024-04-16 12:06:46 -04:00
|
|
|
);
|
2024-04-15 17:01:47 -04:00
|
|
|
return "";
|
|
|
|
}
|
|
|
|
|
2024-05-31 15:39:54 -07:00
|
|
|
const contentType = response.headers["content-type"];
|
|
|
|
if (contentType && contentType.includes("application/pdf")) {
|
2024-05-13 16:13:10 -03:00
|
|
|
return fetchAndProcessPdf(url);
|
|
|
|
} else {
|
2024-06-10 16:49:03 -07:00
|
|
|
const textData = response.data;
|
2024-06-01 19:16:56 +10:00
|
|
|
try {
|
|
|
|
const data = JSON.parse(textData);
|
|
|
|
const html = data.content;
|
|
|
|
return html ?? "";
|
|
|
|
} catch (jsonError) {
|
|
|
|
console.error(`[Playwright] Error parsing JSON response for url: ${url} -> ${jsonError}`);
|
|
|
|
return "";
|
|
|
|
}
|
2024-05-13 16:13:10 -03:00
|
|
|
}
|
2024-04-15 17:01:47 -04:00
|
|
|
} catch (error) {
|
2024-06-10 16:49:03 -07:00
|
|
|
if (error.code === 'ECONNABORTED') {
|
|
|
|
console.log(`[Playwright] Request timed out for ${url}`);
|
|
|
|
} else {
|
|
|
|
console.error(`[Playwright] Error fetching url: ${url} -> ${error}`);
|
|
|
|
}
|
2024-04-15 17:01:47 -04:00
|
|
|
return "";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-05-21 18:50:42 -07:00
|
|
|
export async function scrapWithFetch(url: string): Promise<string> {
|
|
|
|
try {
|
2024-06-10 16:49:03 -07:00
|
|
|
const response = await axios.get(url, {
|
|
|
|
headers: {
|
|
|
|
"Content-Type": "application/json",
|
|
|
|
},
|
|
|
|
timeout: universalTimeout,
|
|
|
|
transformResponse: [(data) => data] // Prevent axios from parsing JSON automatically
|
|
|
|
});
|
|
|
|
|
|
|
|
if (response.status !== 200) {
|
2024-05-21 18:50:42 -07:00
|
|
|
console.error(
|
2024-06-10 16:49:03 -07:00
|
|
|
`[Axios] Error fetching url: ${url} with status: ${response.status}`
|
2024-05-21 18:50:42 -07:00
|
|
|
);
|
|
|
|
return "";
|
|
|
|
}
|
|
|
|
|
2024-05-31 15:39:54 -07:00
|
|
|
const contentType = response.headers["content-type"];
|
|
|
|
if (contentType && contentType.includes("application/pdf")) {
|
2024-05-21 18:50:42 -07:00
|
|
|
return fetchAndProcessPdf(url);
|
|
|
|
} else {
|
2024-06-10 16:49:03 -07:00
|
|
|
const text = response.data;
|
2024-05-21 18:50:42 -07:00
|
|
|
return text;
|
|
|
|
}
|
|
|
|
} catch (error) {
|
2024-06-10 16:49:03 -07:00
|
|
|
if (error.code === 'ECONNABORTED') {
|
|
|
|
console.log(`[Axios] Request timed out for ${url}`);
|
|
|
|
} else {
|
|
|
|
console.error(`[Axios] Error fetching url: ${url} -> ${error}`);
|
|
|
|
}
|
2024-05-21 18:50:42 -07:00
|
|
|
return "";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Get the order of scrapers to be used for scraping a URL
|
|
|
|
* If the user doesn't have envs set for a specific scraper, it will be removed from the order.
|
|
|
|
* @param defaultScraper The default scraper to use if the URL does not have a specific scraper order defined
|
|
|
|
* @returns The order of scrapers to be used for scraping a URL
|
|
|
|
*/
|
2024-05-31 15:39:54 -07:00
|
|
|
function getScrapingFallbackOrder(
|
|
|
|
defaultScraper?: string,
|
|
|
|
isWaitPresent: boolean = false,
|
|
|
|
isScreenshotPresent: boolean = false,
|
|
|
|
isHeadersPresent: boolean = false
|
|
|
|
) {
|
|
|
|
const availableScrapers = baseScrapers.filter((scraper) => {
|
2024-05-21 18:50:42 -07:00
|
|
|
switch (scraper) {
|
|
|
|
case "scrapingBee":
|
|
|
|
case "scrapingBeeLoad":
|
|
|
|
return !!process.env.SCRAPING_BEE_API_KEY;
|
|
|
|
case "fire-engine":
|
|
|
|
return !!process.env.FIRE_ENGINE_BETA_URL;
|
|
|
|
case "playwright":
|
|
|
|
return !!process.env.PLAYWRIGHT_MICROSERVICE_URL;
|
|
|
|
default:
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
});
|
|
|
|
|
2024-05-31 15:39:54 -07:00
|
|
|
let defaultOrder = [
|
|
|
|
"scrapingBee",
|
|
|
|
"fire-engine",
|
|
|
|
"playwright",
|
|
|
|
"scrapingBeeLoad",
|
|
|
|
"fetch",
|
|
|
|
];
|
|
|
|
|
|
|
|
if (isWaitPresent || isScreenshotPresent || isHeadersPresent) {
|
|
|
|
defaultOrder = [
|
|
|
|
"fire-engine",
|
|
|
|
"playwright",
|
|
|
|
...defaultOrder.filter(
|
|
|
|
(scraper) => scraper !== "fire-engine" && scraper !== "playwright"
|
|
|
|
),
|
|
|
|
];
|
2024-05-28 12:56:24 -07:00
|
|
|
}
|
|
|
|
|
2024-05-31 15:39:54 -07:00
|
|
|
const filteredDefaultOrder = defaultOrder.filter(
|
|
|
|
(scraper: (typeof baseScrapers)[number]) =>
|
|
|
|
availableScrapers.includes(scraper)
|
|
|
|
);
|
|
|
|
const uniqueScrapers = new Set(
|
|
|
|
defaultScraper
|
|
|
|
? [defaultScraper, ...filteredDefaultOrder, ...availableScrapers]
|
|
|
|
: [...filteredDefaultOrder, ...availableScrapers]
|
|
|
|
);
|
2024-05-21 18:34:23 -07:00
|
|
|
const scrapersInOrder = Array.from(uniqueScrapers);
|
2024-05-31 15:39:54 -07:00
|
|
|
return scrapersInOrder as (typeof baseScrapers)[number][];
|
2024-05-21 18:34:23 -07:00
|
|
|
}
|
|
|
|
|
2024-06-04 15:28:09 -03:00
|
|
|
|
|
|
|
|
2024-05-29 13:39:43 -03:00
|
|
|
|
2024-04-15 17:01:47 -04:00
|
|
|
export async function scrapSingleUrl(
|
|
|
|
urlToScrap: string,
|
2024-05-31 15:39:54 -07:00
|
|
|
pageOptions: PageOptions = {
|
|
|
|
onlyMainContent: true,
|
|
|
|
includeHtml: false,
|
|
|
|
waitFor: 0,
|
|
|
|
screenshot: false,
|
2024-06-03 23:42:39 -07:00
|
|
|
headers: undefined
|
2024-05-31 15:39:54 -07:00
|
|
|
},
|
2024-05-15 11:28:20 -07:00
|
|
|
existingHtml: string = ""
|
2024-04-15 17:01:47 -04:00
|
|
|
): Promise<Document> {
|
|
|
|
urlToScrap = urlToScrap.trim();
|
|
|
|
|
2024-04-17 18:24:46 -07:00
|
|
|
const removeUnwantedElements = (html: string, pageOptions: PageOptions) => {
|
2024-04-15 17:01:47 -04:00
|
|
|
const soup = cheerio.load(html);
|
|
|
|
soup("script, style, iframe, noscript, meta, head").remove();
|
2024-04-17 18:24:46 -07:00
|
|
|
if (pageOptions.onlyMainContent) {
|
|
|
|
// remove any other tags that are not in the main content
|
|
|
|
excludeNonMainTags.forEach((tag) => {
|
|
|
|
soup(tag).remove();
|
|
|
|
});
|
|
|
|
}
|
2024-04-15 17:01:47 -04:00
|
|
|
return soup.html();
|
|
|
|
};
|
|
|
|
|
2024-04-16 12:06:46 -04:00
|
|
|
const attemptScraping = async (
|
|
|
|
url: string,
|
2024-05-31 15:39:54 -07:00
|
|
|
method: (typeof baseScrapers)[number]
|
2024-04-16 12:06:46 -04:00
|
|
|
) => {
|
2024-04-15 17:01:47 -04:00
|
|
|
let text = "";
|
2024-05-29 18:56:57 -04:00
|
|
|
let screenshot = "";
|
2024-04-15 17:01:47 -04:00
|
|
|
switch (method) {
|
2024-05-21 18:34:23 -07:00
|
|
|
case "fire-engine":
|
2024-05-21 18:50:42 -07:00
|
|
|
if (process.env.FIRE_ENGINE_BETA_URL) {
|
2024-05-28 12:56:24 -07:00
|
|
|
console.log(`Scraping ${url} with Fire Engine`);
|
2024-05-31 15:39:54 -07:00
|
|
|
const response = await scrapWithFireEngine(
|
|
|
|
url,
|
|
|
|
pageOptions.waitFor,
|
|
|
|
pageOptions.screenshot,
|
|
|
|
pageOptions.headers
|
|
|
|
);
|
2024-05-29 19:43:51 -07:00
|
|
|
text = response.html;
|
|
|
|
screenshot = response.screenshot;
|
2024-05-21 18:50:42 -07:00
|
|
|
}
|
2024-04-16 12:06:46 -04:00
|
|
|
break;
|
|
|
|
case "scrapingBee":
|
2024-04-15 17:01:47 -04:00
|
|
|
if (process.env.SCRAPING_BEE_API_KEY) {
|
2024-04-28 11:34:25 -07:00
|
|
|
text = await scrapWithScrapingBee(
|
|
|
|
url,
|
|
|
|
"domcontentloaded",
|
|
|
|
pageOptions.fallback === false ? 7000 : 15000
|
|
|
|
);
|
2024-04-15 17:01:47 -04:00
|
|
|
}
|
|
|
|
break;
|
2024-04-16 12:06:46 -04:00
|
|
|
case "playwright":
|
2024-04-15 17:01:47 -04:00
|
|
|
if (process.env.PLAYWRIGHT_MICROSERVICE_URL) {
|
2024-05-31 15:43:06 -07:00
|
|
|
text = await scrapWithPlaywright(url, pageOptions.waitFor, pageOptions.headers);
|
2024-04-15 17:01:47 -04:00
|
|
|
}
|
|
|
|
break;
|
2024-04-16 12:06:46 -04:00
|
|
|
case "scrapingBeeLoad":
|
2024-04-15 17:01:47 -04:00
|
|
|
if (process.env.SCRAPING_BEE_API_KEY) {
|
|
|
|
text = await scrapWithScrapingBee(url, "networkidle2");
|
|
|
|
}
|
|
|
|
break;
|
2024-04-16 12:06:46 -04:00
|
|
|
case "fetch":
|
2024-05-21 18:50:42 -07:00
|
|
|
text = await scrapWithFetch(url);
|
2024-04-15 17:01:47 -04:00
|
|
|
break;
|
|
|
|
}
|
2024-04-28 13:59:35 -07:00
|
|
|
|
2024-06-04 12:15:39 -07:00
|
|
|
let customScrapedContent : FireEngineResponse | null = null;
|
|
|
|
|
2024-05-29 13:39:43 -03:00
|
|
|
// Check for custom scraping conditions
|
2024-06-04 12:15:39 -07:00
|
|
|
const customScraperResult = await handleCustomScraping(text, url);
|
|
|
|
|
2024-06-04 17:47:28 -03:00
|
|
|
if (customScraperResult){
|
|
|
|
switch (customScraperResult.scraper) {
|
|
|
|
case "fire-engine":
|
2024-06-05 14:07:56 -03:00
|
|
|
customScrapedContent = await scrapWithFireEngine(customScraperResult.url, customScraperResult.waitAfterLoad, false, customScraperResult.pageOptions)
|
2024-06-05 15:34:42 -03:00
|
|
|
if (screenshot) {
|
|
|
|
customScrapedContent.screenshot = screenshot;
|
|
|
|
}
|
2024-06-05 15:02:28 -03:00
|
|
|
break;
|
2024-06-04 17:47:28 -03:00
|
|
|
case "pdf":
|
|
|
|
customScrapedContent = { html: await fetchAndProcessPdf(customScraperResult.url), screenshot }
|
2024-06-05 15:02:28 -03:00
|
|
|
break;
|
2024-06-04 17:47:28 -03:00
|
|
|
}
|
2024-06-04 12:15:39 -07:00
|
|
|
}
|
|
|
|
|
2024-05-29 13:39:43 -03:00
|
|
|
if (customScrapedContent) {
|
2024-06-03 15:24:40 -03:00
|
|
|
text = customScrapedContent.html;
|
|
|
|
screenshot = customScrapedContent.screenshot;
|
2024-05-29 13:39:43 -03:00
|
|
|
}
|
|
|
|
|
2024-05-09 17:45:16 -07:00
|
|
|
//* TODO: add an optional to return markdown or structured/extracted content
|
2024-04-17 18:24:46 -07:00
|
|
|
let cleanedHtml = removeUnwantedElements(text, pageOptions);
|
2024-05-09 17:45:16 -07:00
|
|
|
|
2024-05-29 18:56:57 -04:00
|
|
|
return [await parseMarkdown(cleanedHtml), text, screenshot];
|
2024-04-15 17:01:47 -04:00
|
|
|
};
|
|
|
|
try {
|
2024-05-29 18:56:57 -04:00
|
|
|
let [text, html, screenshot] = ["", "", ""];
|
2024-05-09 17:45:16 -07:00
|
|
|
let urlKey = urlToScrap;
|
|
|
|
try {
|
|
|
|
urlKey = new URL(urlToScrap).hostname.replace(/^www\./, "");
|
|
|
|
} catch (error) {
|
|
|
|
console.error(`Invalid URL key, trying: ${urlToScrap}`);
|
2024-04-23 15:28:32 -07:00
|
|
|
}
|
2024-05-09 17:45:16 -07:00
|
|
|
const defaultScraper = urlSpecificParams[urlKey]?.defaultScraper ?? "";
|
2024-05-31 15:39:54 -07:00
|
|
|
const scrapersInOrder = getScrapingFallbackOrder(
|
|
|
|
defaultScraper,
|
|
|
|
pageOptions && pageOptions.waitFor && pageOptions.waitFor > 0,
|
|
|
|
pageOptions && pageOptions.screenshot && pageOptions.screenshot === true,
|
|
|
|
pageOptions && pageOptions.headers && pageOptions.headers !== undefined
|
|
|
|
);
|
2024-05-09 17:45:16 -07:00
|
|
|
|
|
|
|
for (const scraper of scrapersInOrder) {
|
2024-05-13 20:45:11 -07:00
|
|
|
// If exists text coming from crawler, use it
|
2024-05-15 11:28:20 -07:00
|
|
|
if (existingHtml && existingHtml.trim().length >= 100) {
|
|
|
|
let cleanedHtml = removeUnwantedElements(existingHtml, pageOptions);
|
|
|
|
text = await parseMarkdown(cleanedHtml);
|
|
|
|
html = existingHtml;
|
2024-05-13 20:45:11 -07:00
|
|
|
break;
|
|
|
|
}
|
2024-05-29 18:56:57 -04:00
|
|
|
[text, html, screenshot] = await attemptScraping(urlToScrap, scraper);
|
2024-05-13 20:45:11 -07:00
|
|
|
if (text && text.trim().length >= 100) break;
|
2024-05-21 18:34:23 -07:00
|
|
|
const nextScraperIndex = scrapersInOrder.indexOf(scraper) + 1;
|
|
|
|
if (nextScraperIndex < scrapersInOrder.length) {
|
|
|
|
console.info(`Falling back to ${scrapersInOrder[nextScraperIndex]}`);
|
|
|
|
}
|
2024-04-15 17:01:47 -04:00
|
|
|
}
|
|
|
|
|
2024-05-09 17:52:46 -07:00
|
|
|
if (!text) {
|
2024-05-09 17:45:16 -07:00
|
|
|
throw new Error(`All scraping methods failed for URL: ${urlToScrap}`);
|
2024-04-15 17:01:47 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
const soup = cheerio.load(html);
|
|
|
|
const metadata = extractMetadata(soup, urlToScrap);
|
2024-05-29 18:56:57 -04:00
|
|
|
|
|
|
|
let document: Document;
|
2024-05-31 15:39:54 -07:00
|
|
|
if (screenshot && screenshot.length > 0) {
|
2024-05-29 18:56:57 -04:00
|
|
|
document = {
|
|
|
|
content: text,
|
|
|
|
markdown: text,
|
|
|
|
html: pageOptions.includeHtml ? html : undefined,
|
2024-05-31 15:39:54 -07:00
|
|
|
metadata: {
|
|
|
|
...metadata,
|
|
|
|
screenshot: screenshot,
|
|
|
|
sourceURL: urlToScrap,
|
|
|
|
},
|
|
|
|
};
|
|
|
|
} else {
|
2024-05-29 18:56:57 -04:00
|
|
|
document = {
|
|
|
|
content: text,
|
|
|
|
markdown: text,
|
|
|
|
html: pageOptions.includeHtml ? html : undefined,
|
2024-05-31 15:39:54 -07:00
|
|
|
metadata: { ...metadata, sourceURL: urlToScrap },
|
|
|
|
};
|
2024-05-29 18:56:57 -04:00
|
|
|
}
|
2024-05-09 17:45:16 -07:00
|
|
|
|
|
|
|
return document;
|
2024-04-15 17:01:47 -04:00
|
|
|
} catch (error) {
|
|
|
|
console.error(`Error: ${error} - Failed to fetch URL: ${urlToScrap}`);
|
|
|
|
return {
|
|
|
|
content: "",
|
2024-05-06 19:45:56 -03:00
|
|
|
markdown: "",
|
|
|
|
html: "",
|
2024-04-15 17:01:47 -04:00
|
|
|
metadata: { sourceURL: urlToScrap },
|
|
|
|
} as Document;
|
|
|
|
}
|
|
|
|
}
|