120 lines
3.8 KiB
TypeScript
Raw Normal View History

2024-08-06 15:24:45 -03:00
import { Request, Response } from "express";
import { authenticateUser } from "./auth";
import { RateLimiterMode } from "../../../src/types";
import { isUrlBlocked } from "../../../src/scraper/WebScraper/utils/blocklist";
import { Logger } from "../../../src/lib/logger";
import { checkAndUpdateURL } from "../../../src/lib/validateUrl";
2024-08-15 21:51:59 +02:00
import { MapRequest, MapResponse } from "./types";
2024-08-06 15:24:45 -03:00
2024-08-15 21:51:59 +02:00
export async function mapController(req: Request<{}, MapResponse, MapRequest>, res: Response<MapResponse>) {
2024-08-06 15:24:45 -03:00
// expected req.body
// req.body = {
// url: string
2024-08-15 21:51:59 +02:00
// crawlerOptions:
2024-08-06 15:24:45 -03:00
// }
try {
const { success, team_id, error, status } = await authenticateUser(
req,
res,
RateLimiterMode.Crawl
);
if (!success) {
2024-08-15 21:51:59 +02:00
return res.status(status).json({ success: false, error });
2024-08-06 15:24:45 -03:00
}
// if (req.headers["x-idempotency-key"]) {
// const isIdempotencyValid = await validateIdempotencyKey(req);
// if (!isIdempotencyValid) {
// return res.status(409).json({ error: "Idempotency key already used" });
// }
// try {
// createIdempotencyKey(req);
// } catch (error) {
// Logger.error(error);
// return res.status(500).json({ error: error.message });
// }
// }
// const { success: creditsCheckSuccess, message: creditsCheckMessage } =
// await checkTeamCredits(team_id, 1);
// if (!creditsCheckSuccess) {
// return res.status(402).json({ error: "Insufficient credits" });
// }
let url = req.body.url;
if (!url) {
2024-08-15 21:51:59 +02:00
return res.status(400).json({ success: false, error: "Url is required" });
2024-08-06 15:24:45 -03:00
}
if (isUrlBlocked(url)) {
return res
.status(403)
.json({
2024-08-15 21:51:59 +02:00
success: false,
2024-08-06 15:24:45 -03:00
error:
"Firecrawl currently does not support social media scraping due to policy restrictions. We're actively working on building support for it.",
});
}
try {
2024-08-15 21:51:59 +02:00
url = checkAndUpdateURL(url).url;
2024-08-06 15:24:45 -03:00
} catch (error) {
2024-08-15 21:51:59 +02:00
return res.status(400).json({ success: false, error: 'Invalid Url' });
2024-08-06 15:24:45 -03:00
}
2024-08-15 21:51:59 +02:00
return res.status(200).json({ success: true, links: [ "test1", "test2" ] });
2024-08-06 15:24:45 -03:00
// const mode = req.body.mode ?? "crawl";
// const crawlerOptions = { ...defaultCrawlerOptions, ...req.body.crawlerOptions };
// const pageOptions = { ...defaultCrawlPageOptions, ...req.body.pageOptions };
// if (mode === "single_urls" && !url.includes(",")) { // NOTE: do we need this?
// try {
// const a = new WebScraperDataProvider();
// await a.setOptions({
// jobId: uuidv4(),
// mode: "single_urls",
// urls: [url],
// crawlerOptions: { ...crawlerOptions, returnOnlyUrls: true },
// pageOptions: pageOptions,
// });
// const docs = await a.getDocuments(false, (progress) => {
// job.progress({
// current: progress.current,
// total: progress.total,
// current_step: "SCRAPING",
// current_url: progress.currentDocumentUrl,
// });
// });
// return res.json({
// success: true,
// documents: docs,
// });
// } catch (error) {
// Logger.error(error);
// return res.status(500).json({ error: error.message });
// }
// }
// const job = await addWebScraperJob({
// url: url,
// mode: mode ?? "crawl", // fix for single urls not working
// crawlerOptions: crawlerOptions,
// team_id: team_id,
// pageOptions: pageOptions,
// origin: req.body.origin ?? defaultOrigin,
// });
// await logCrawl(job.id.toString(), team_id);
// res.json({ jobId: job.id });
} catch (error) {
Logger.error(error);
2024-08-15 21:51:59 +02:00
return res.status(500).json({ success: false, error: error.message });
2024-08-06 15:24:45 -03:00
}
}