197 lines
5.8 KiB
TypeScript
Raw Normal View History

2024-04-15 17:01:47 -04:00
import * as cheerio from "cheerio";
import { ScrapingBeeClient } from "scrapingbee";
import { extractMetadata } from "./utils/metadata";
import dotenv from "dotenv";
2024-04-17 18:24:46 -07:00
import { Document, PageOptions } from "../../lib/entities";
2024-04-15 17:01:47 -04:00
import { parseMarkdown } from "../../lib/html-to-markdown";
2024-04-17 18:24:46 -07:00
import { excludeNonMainTags } from "./utils/excludeTags";
2024-04-15 17:01:47 -04:00
dotenv.config();
2024-04-16 12:06:46 -04:00
export async function scrapWithCustomFirecrawl(
url: string,
options?: any
): Promise<string> {
try {
// TODO: merge the custom firecrawl scraper into mono-repo when ready
return null;
} catch (error) {
console.error(`Error scraping with custom firecrawl-scraper: ${error}`);
return "";
}
}
2024-04-15 17:01:47 -04:00
2024-04-16 12:06:46 -04:00
export async function scrapWithScrapingBee(
url: string,
2024-04-23 17:04:10 -07:00
wait_browser: string = "domcontentloaded",
timeout: number = 15000
2024-04-16 12:06:46 -04:00
): Promise<string> {
2024-04-15 17:01:47 -04:00
try {
const client = new ScrapingBeeClient(process.env.SCRAPING_BEE_API_KEY);
const response = await client.get({
url: url,
2024-04-23 17:04:10 -07:00
params: { timeout: timeout, wait_browser: wait_browser },
2024-04-15 17:01:47 -04:00
headers: { "ScrapingService-Request": "TRUE" },
});
if (response.status !== 200 && response.status !== 404) {
console.error(
`Scraping bee error in ${url} with status code ${response.status}`
);
return "";
}
const decoder = new TextDecoder();
const text = decoder.decode(response.data);
return text;
} catch (error) {
console.error(`Error scraping with Scraping Bee: ${error}`);
return "";
}
}
export async function scrapWithPlaywright(url: string): Promise<string> {
try {
const response = await fetch(process.env.PLAYWRIGHT_MICROSERVICE_URL, {
2024-04-16 12:06:46 -04:00
method: "POST",
2024-04-15 17:01:47 -04:00
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({ url: url }),
});
if (!response.ok) {
2024-04-16 12:06:46 -04:00
console.error(
`Error fetching w/ playwright server -> URL: ${url} with status: ${response.status}`
);
2024-04-15 17:01:47 -04:00
return "";
}
const data = await response.json();
const html = data.content;
return html ?? "";
} catch (error) {
console.error(`Error scraping with Puppeteer: ${error}`);
return "";
}
}
export async function scrapSingleUrl(
urlToScrap: string,
2024-04-17 18:24:46 -07:00
toMarkdown: boolean = true,
pageOptions: PageOptions = { onlyMainContent: true }
2024-04-15 17:01:47 -04:00
): Promise<Document> {
console.log(`Scraping URL: ${urlToScrap}`);
urlToScrap = urlToScrap.trim();
2024-04-17 18:24:46 -07:00
const removeUnwantedElements = (html: string, pageOptions: PageOptions) => {
2024-04-15 17:01:47 -04:00
const soup = cheerio.load(html);
soup("script, style, iframe, noscript, meta, head").remove();
2024-04-17 18:24:46 -07:00
if (pageOptions.onlyMainContent) {
// remove any other tags that are not in the main content
excludeNonMainTags.forEach((tag) => {
soup(tag).remove();
});
}
2024-04-15 17:01:47 -04:00
return soup.html();
};
2024-04-16 12:06:46 -04:00
const attemptScraping = async (
url: string,
method:
| "firecrawl-scraper"
| "scrapingBee"
| "playwright"
| "scrapingBeeLoad"
| "fetch"
) => {
2024-04-15 17:01:47 -04:00
let text = "";
switch (method) {
2024-04-16 12:06:46 -04:00
case "firecrawl-scraper":
2024-04-23 17:04:10 -07:00
text = await scrapWithCustomFirecrawl(url,);
2024-04-16 12:06:46 -04:00
break;
case "scrapingBee":
2024-04-15 17:01:47 -04:00
if (process.env.SCRAPING_BEE_API_KEY) {
2024-04-23 17:04:10 -07:00
text = await scrapWithScrapingBee(url,"domcontentloaded", pageOptions.fallback === false? 7000 : 15000);
2024-04-15 17:01:47 -04:00
}
break;
2024-04-16 12:06:46 -04:00
case "playwright":
2024-04-15 17:01:47 -04:00
if (process.env.PLAYWRIGHT_MICROSERVICE_URL) {
text = await scrapWithPlaywright(url);
}
break;
2024-04-16 12:06:46 -04:00
case "scrapingBeeLoad":
2024-04-15 17:01:47 -04:00
if (process.env.SCRAPING_BEE_API_KEY) {
text = await scrapWithScrapingBee(url, "networkidle2");
}
break;
2024-04-16 12:06:46 -04:00
case "fetch":
2024-04-15 17:01:47 -04:00
try {
const response = await fetch(url);
if (!response.ok) {
2024-04-16 12:06:46 -04:00
console.error(
`Error fetching URL: ${url} with status: ${response.status}`
);
2024-04-15 17:01:47 -04:00
return "";
}
text = await response.text();
} catch (error) {
console.error(`Error scraping URL: ${error}`);
return "";
}
break;
}
2024-04-17 18:24:46 -07:00
let cleanedHtml = removeUnwantedElements(text, pageOptions);
2024-04-23 11:15:11 -07:00
2024-04-15 17:01:47 -04:00
return [await parseMarkdown(cleanedHtml), text];
};
try {
2024-04-16 12:06:46 -04:00
// TODO: comment this out once we're ready to merge firecrawl-scraper into the mono-repo
// let [text, html] = await attemptScraping(urlToScrap, 'firecrawl-scraper');
// if (!text || text.length < 100) {
// console.log("Falling back to scraping bee load");
// [text, html] = await attemptScraping(urlToScrap, 'scrapingBeeLoad');
// }
let [text, html] = await attemptScraping(urlToScrap, "scrapingBee");
2024-04-23 15:28:32 -07:00
if(pageOptions.fallback === false){
const soup = cheerio.load(html);
const metadata = extractMetadata(soup, urlToScrap);
return {
content: text,
markdown: text,
metadata: { ...metadata, sourceURL: urlToScrap },
} as Document;
}
2024-04-15 17:01:47 -04:00
if (!text || text.length < 100) {
console.log("Falling back to playwright");
2024-04-16 12:06:46 -04:00
[text, html] = await attemptScraping(urlToScrap, "playwright");
2024-04-15 17:01:47 -04:00
}
if (!text || text.length < 100) {
console.log("Falling back to scraping bee load");
2024-04-16 12:06:46 -04:00
[text, html] = await attemptScraping(urlToScrap, "scrapingBeeLoad");
2024-04-15 17:01:47 -04:00
}
if (!text || text.length < 100) {
console.log("Falling back to fetch");
2024-04-16 12:06:46 -04:00
[text, html] = await attemptScraping(urlToScrap, "fetch");
2024-04-15 17:01:47 -04:00
}
const soup = cheerio.load(html);
const metadata = extractMetadata(soup, urlToScrap);
return {
content: text,
markdown: text,
metadata: { ...metadata, sourceURL: urlToScrap },
} as Document;
} catch (error) {
console.error(`Error: ${error} - Failed to fetch URL: ${urlToScrap}`);
return {
content: "",
markdown: "",
metadata: { sourceURL: urlToScrap },
} as Document;
}
}