All files / src/handlers crawl-handlers.ts

93.58% Statements 248/265
87.41% Branches 243/278
100% Functions 11/11
95.92% Lines 212/221

Press n or j to go to the next uncovered block, b, p or k for the previous block.

1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560                12x   12x     12x 5x     12x 3x     12x           11x   11x           11x             1x         19x   19x 19x 19x 15x     5x     19x 19x 4x 15x 1x 14x 1x 13x   13x 1x       19x                     19x 19x     19x   2x 2x 2x     2x 2x 2x 2x       2x   1x     1x           1x       2x                             18x                                                       9x 9x 9x 9x 9x 9x   9x 9x   9x 9x   9x 14x 14x       14x   14x   14x 14x     14x             11x 10x   10x 9x 9x 9x 9x               9x 6x 6x 6x 6x 6x 5x                     4x         9x   9x 9x   4x     9x                           8x   8x 8x           5x     3x 10x     3x 3x 1x 4x     3x                 5x         53x   53x 1x       52x       52x 52x 52x 52x 52x 52x     52x 2x               52x     52x 3x 52x 52x     52x   3x 49x   3x   49x 49x     49x 49x     49x 49x     49x 49x     49x 5x   5x 5x 1x     49x     49x 49x     49x 49x 49x 49x 49x 49x 49x 49x 49x     49x 2x 49x 2x 49x 49x     49x 2x 49x     49x 49x 49x 49x     49x     49x     49x 49x     49x           49x 44x     49x     49x 2x     47x     47x     47x   47x   3x 1x 2x 2x   44x 40x 4x 2x 2x 1x     47x           47x     5x 2x   2x 2x           2x               2x     2x 2x 2x 2x   2x     2x 2x     1x         5x 5x   5x 5x             5x 2x     2x               47x 2x                     47x 2x             47x 1x             47x 2x   3x     2x           47x   6x        
import { BaseHandler } from './base-handler.js';
import { BatchCrawlOptions, CrawlResultItem, AdvancedCrawlConfig, CrawlEndpointResponse } from '../types.js';
import * as fs from 'fs/promises';
import * as path from 'path';
import * as os from 'os';
 
export class CrawlHandlers extends BaseHandler {
  async batchCrawl(options: BatchCrawlOptions) {
    try {
      // Build crawler config if needed
      const crawler_config: Record<string, unknown> = {};
 
      // Handle remove_images by using exclude_tags
      if (options.remove_images) {
        crawler_config.exclude_tags = ['img', 'picture', 'svg'];
      }
 
      if (options.bypass_cache) {
        crawler_config.cache_mode = 'BYPASS';
      }
 
      const response = await this.axiosClient.post('/crawl', {
        urls: options.urls,
        max_concurrent: options.max_concurrent,
        crawler_config: Object.keys(crawler_config).length > 0 ? crawler_config : undefined,
      });
 
      const results = response.data.results || [];
 
      return {
        content: [
          {
            type: 'text',
            text: `Batch crawl completed. Processed ${results.length} URLs:\n\n${results
              .map(
                (r: CrawlResultItem, i: number) => `${i + 1}. ${options.urls[i]}: ${r.success ? 'Success' : 'Failed'}`,
              )
              .join('\n')}`,
          },
        ],
      };
    } catch (error) {
      throw this.formatError(error, 'batch crawl');
    }
  }
 
  async smartCrawl(options: { url: string; max_depth?: number; follow_links?: boolean; bypass_cache?: boolean }) {
    try {
      // First, try to detect the content type from URL or HEAD request
      let contentType = '';
      try {
        const headResponse = await this.axiosClient.head(options.url);
        contentType = headResponse.headers['content-type'] || '';
      } catch {
        // If HEAD request fails, continue anyway - we'll detect from the crawl response
        console.debug('HEAD request failed, will detect content type from response');
      }
 
      let detectedType = 'html';
      if (options.url.includes('sitemap') || options.url.endsWith('.xml')) {
        detectedType = 'sitemap';
      } else if (options.url.includes('rss') || options.url.includes('feed')) {
        detectedType = 'rss';
      } else if (contentType.includes('text/plain') || options.url.endsWith('.txt')) {
        detectedType = 'text';
      } else Iif (contentType.includes('application/xml') || contentType.includes('text/xml')) {
        detectedType = 'xml';
      } else if (contentType.includes('application/json')) {
        detectedType = 'json';
      }
 
      // Crawl without the unsupported 'strategy' parameter
      const response = await this.axiosClient.post('/crawl', {
        urls: [options.url],
        crawler_config: {
          cache_mode: options.bypass_cache ? 'BYPASS' : 'ENABLED',
        },
        browser_config: {
          headless: true,
          browser_type: 'chromium',
        },
      });
 
      const results = response.data.results || [];
      const result = results[0] || {};
 
      // Handle follow_links for sitemaps and RSS feeds
      if (options.follow_links && (detectedType === 'sitemap' || detectedType === 'rss' || detectedType === 'xml')) {
        // Extract URLs from the content
        const urlPattern = /<loc>(.*?)<\/loc>|<link[^>]*>(.*?)<\/link>|href=["']([^"']+)["']/gi;
        const content = result.markdown || result.html || '';
        const foundUrls: string[] = [];
        let match;
 
        while ((match = urlPattern.exec(content)) !== null) {
          const url = match[1] || match[2] || match[3];
          Eif (url && url.startsWith('http')) {
            foundUrls.push(url);
          }
        }
 
        if (foundUrls.length > 0) {
          // Limit to first 10 URLs to avoid overwhelming the system
          const urlsToFollow = foundUrls.slice(0, Math.min(10, options.max_depth || 10));
 
          // Crawl the found URLs
          await this.axiosClient.post('/crawl', {
            urls: urlsToFollow,
            max_concurrent: 3,
            bypass_cache: options.bypass_cache,
          });
 
          return {
            content: [
              {
                type: 'text',
                text: `Smart crawl detected content type: ${detectedType}\n\nMain content:\n${result.markdown?.raw_markdown || result.html || 'No content extracted'}\n\n---\nFollowed ${urlsToFollow.length} links:\n${urlsToFollow.map((url, i) => `${i + 1}. ${url}`).join('\n')}`,
              },
              ...(result.metadata
                ? [
                    {
                      type: 'text',
                      text: `\n\n---\nMetadata:\n${JSON.stringify(result.metadata, null, 2)}`,
                    },
                  ]
                : []),
            ],
          };
        }
      }
 
      return {
        content: [
          {
            type: 'text',
            text: `Smart crawl detected content type: ${detectedType}\n\n${result.markdown?.raw_markdown || result.html || 'No content extracted'}`,
          },
          ...(result.metadata
            ? [
                {
                  type: 'text',
                  text: `\n\n---\nMetadata:\n${JSON.stringify(result.metadata, null, 2)}`,
                },
              ]
            : []),
        ],
      };
    } catch (error) {
      throw this.formatError(error, 'smart crawl');
    }
  }
 
  async crawlRecursive(options: {
    url: string;
    max_depth?: number;
    max_pages?: number;
    include_pattern?: string;
    exclude_pattern?: string;
  }) {
    try {
      const startUrl = new URL(options.url);
      const visited = new Set<string>();
      const toVisit: Array<{ url: string; depth: number }> = [{ url: options.url, depth: 0 }];
      const results: Array<{ url: string; content: string; internal_links_found: number; depth: number }> = [];
      let maxDepthReached = 0;
 
      const includeRegex = options.include_pattern ? new RegExp(options.include_pattern) : null;
      const excludeRegex = options.exclude_pattern ? new RegExp(options.exclude_pattern) : null;
 
      const maxDepth = options.max_depth !== undefined ? options.max_depth : 3;
      const maxPages = options.max_pages || 50;
 
      while (toVisit.length > 0 && results.length < maxPages) {
        const current = toVisit.shift();
        Iif (!current || visited.has(current.url) || current.depth > maxDepth) {
          continue;
        }
 
        visited.add(current.url);
 
        try {
          // Check URL patterns
          Iif (excludeRegex && excludeRegex.test(current.url)) continue;
          Iif (includeRegex && !includeRegex.test(current.url)) continue;
 
          // Crawl the page using the crawl endpoint to get links
          const response = await this.axiosClient.post('/crawl', {
            urls: [current.url],
            crawler_config: {
              cache_mode: 'BYPASS',
            },
          });
 
          const crawlResults = response.data.results || [response.data];
          const result: CrawlResultItem = crawlResults[0];
 
          if (result && result.success) {
            const markdownContent = result.markdown?.fit_markdown || result.markdown?.raw_markdown || '';
            const internalLinksCount = result.links?.internal?.length || 0;
            maxDepthReached = Math.max(maxDepthReached, current.depth);
            results.push({
              url: current.url,
              content: markdownContent,
              internal_links_found: internalLinksCount,
              depth: current.depth,
            });
 
            // Add internal links to crawl queue
            if (current.depth < maxDepth && result.links?.internal) {
              for (const linkObj of result.links.internal) {
                const linkUrl = linkObj.href || linkObj;
                try {
                  const absoluteUrl = new URL(linkUrl, current.url).toString();
                  if (!visited.has(absoluteUrl) && new URL(absoluteUrl).hostname === startUrl.hostname) {
                    toVisit.push({ url: absoluteUrl, depth: current.depth + 1 });
                  }
                } catch (e) {
                  // Skip invalid URLs
                  console.debug('Invalid URL:', e);
                }
              }
            }
          }
        } catch (error) {
          // Log but continue crawling other pages
          console.error(`Failed to crawl ${current.url}:`, error instanceof Error ? error.message : error);
        }
      }
 
      // Prepare the output text
      let outputText = `Recursive crawl completed:\n\nPages crawled: ${results.length}\nStarting URL: ${options.url}\n`;
 
      if (results.length > 0) {
        outputText += `Max depth reached: ${maxDepthReached} (limit: ${maxDepth})\n\nNote: Only internal links (same domain) are followed during recursive crawling.\n\nPages found:\n${results.map((r) => `- [Depth ${r.depth}] ${r.url}\n  Content: ${r.content.length} chars\n  Internal links found: ${r.internal_links_found}`).join('\n')}`;
      } else {
        outputText += `\nNo pages could be crawled. This might be due to:\n- The starting URL returned an error\n- No internal links were found\n- All discovered links were filtered out by include/exclude patterns`;
      }
 
      return {
        content: [
          {
            type: 'text',
            text: outputText,
          },
        ],
      };
    } catch (error) {
      throw this.formatError(error, 'crawl recursively');
    }
  }
 
  async parseSitemap(options: { url: string; filter_pattern?: string }) {
    try {
      // Fetch the sitemap directly (not through Crawl4AI server)
      const axios = (await import('axios')).default;
      const response = await axios.get(options.url, {
        timeout: 30000,
        headers: {
          'User-Agent': 'Mozilla/5.0 (compatible; MCP-Crawl4AI/1.0)',
        },
      });
      const sitemapContent = response.data;
 
      // Parse XML content - simple regex approach for basic sitemaps
      const urlMatches = sitemapContent.match(/<loc>(.*?)<\/loc>/g) || [];
      const urls = urlMatches.map((match: string) => match.replace(/<\/?loc>/g, ''));
 
      // Apply filter if provided
      let filteredUrls = urls;
      if (options.filter_pattern) {
        const filterRegex = new RegExp(options.filter_pattern);
        filteredUrls = urls.filter((url: string) => filterRegex.test(url));
      }
 
      return {
        content: [
          {
            type: 'text',
            text: `Sitemap parsed successfully:\n\nTotal URLs found: ${urls.length}\nFiltered URLs: ${filteredUrls.length}\n\nURLs:\n${filteredUrls.slice(0, 100).join('\n')}${filteredUrls.length > 100 ? '\n... and ' + (filteredUrls.length - 100) + ' more' : ''}`,
          },
        ],
      };
    } catch (error) {
      throw this.formatError(error, 'parse sitemap');
    }
  }
 
  async crawl(options: Record<string, unknown>) {
    try {
      // Ensure options is an object
      if (!options || typeof options !== 'object') {
        throw new Error('crawl requires options object with at least a url parameter');
      }
 
      // Build browser_config
      const browser_config: Record<string, unknown> = {
        headless: true, // Always true as noted
      };
 
      if (options.browser_type) browser_config.browser_type = options.browser_type;
      if (options.viewport_width) browser_config.viewport_width = options.viewport_width;
      if (options.viewport_height) browser_config.viewport_height = options.viewport_height;
      if (options.user_agent) browser_config.user_agent = options.user_agent;
      if (options.headers) browser_config.headers = options.headers;
      if (options.cookies) browser_config.cookies = options.cookies;
 
      // Handle proxy configuration
      if (options.proxy_server) {
        browser_config.proxy_config = {
          server: options.proxy_server,
          username: options.proxy_username,
          password: options.proxy_password,
        };
      }
 
      // Build crawler_config
      const crawler_config: Record<string, unknown> = {};
 
      // Content filtering
      if (options.word_count_threshold !== undefined)
        crawler_config.word_count_threshold = options.word_count_threshold;
      if (options.excluded_tags) crawler_config.excluded_tags = options.excluded_tags;
      if (options.remove_overlay_elements) crawler_config.remove_overlay_elements = options.remove_overlay_elements;
 
      // JavaScript execution
      if (options.js_code !== undefined && options.js_code !== null) {
        // If js_code is an array, join it with newlines for the server
        crawler_config.js_code = Array.isArray(options.js_code) ? options.js_code.join('\n') : options.js_code;
      } else if (options.js_code === null) {
        // If js_code is explicitly null, throw a helpful error
        throw new Error('js_code parameter is null. Please provide JavaScript code as a string or array of strings.');
      }
      if (options.wait_for) crawler_config.wait_for = options.wait_for;
      if (options.wait_for_timeout) crawler_config.wait_for_timeout = options.wait_for_timeout;
 
      // Dynamic content
      if (options.delay_before_scroll) crawler_config.delay_before_scroll = options.delay_before_scroll;
      if (options.scroll_delay) crawler_config.scroll_delay = options.scroll_delay;
 
      // Content processing
      if (options.process_iframes) crawler_config.process_iframes = options.process_iframes;
      if (options.exclude_external_links) crawler_config.exclude_external_links = options.exclude_external_links;
 
      // Export options
      if (options.screenshot) crawler_config.screenshot = options.screenshot;
      if (options.pdf) crawler_config.pdf = options.pdf;
 
      // Session and cache
      if (options.session_id) {
        crawler_config.session_id = options.session_id;
        // Update session last_used time
        const session = this.sessions.get(String(options.session_id));
        if (session) {
          session.last_used = new Date();
        }
      }
      if (options.cache_mode) crawler_config.cache_mode = String(options.cache_mode).toLowerCase();
 
      // Performance
      Iif (options.timeout) crawler_config.timeout = options.timeout;
      if (options.verbose) crawler_config.verbose = options.verbose;
 
      // Additional crawler parameters
      Iif (options.wait_until) crawler_config.wait_until = options.wait_until;
      Iif (options.page_timeout) crawler_config.page_timeout = options.page_timeout;
      Iif (options.wait_for_images) crawler_config.wait_for_images = options.wait_for_images;
      Iif (options.ignore_body_visibility) crawler_config.ignore_body_visibility = options.ignore_body_visibility;
      if (options.scan_full_page) crawler_config.scan_full_page = options.scan_full_page;
      if (options.remove_forms) crawler_config.remove_forms = options.remove_forms;
      if (options.keep_data_attributes) crawler_config.keep_data_attributes = options.keep_data_attributes;
      Iif (options.excluded_selector) crawler_config.excluded_selector = options.excluded_selector;
      if (options.only_text) crawler_config.only_text = options.only_text;
 
      // Media handling
      if (options.image_description_min_word_threshold !== undefined)
        crawler_config.image_description_min_word_threshold = options.image_description_min_word_threshold;
      if (options.image_score_threshold !== undefined)
        crawler_config.image_score_threshold = options.image_score_threshold;
      if (options.exclude_external_images) crawler_config.exclude_external_images = options.exclude_external_images;
      if (options.screenshot_wait_for !== undefined) crawler_config.screenshot_wait_for = options.screenshot_wait_for;
 
      // Link filtering
      if (options.exclude_social_media_links)
        crawler_config.exclude_social_media_links = options.exclude_social_media_links;
      if (options.exclude_domains) crawler_config.exclude_domains = options.exclude_domains;
 
      // Page interaction
      if (options.js_only) crawler_config.js_only = options.js_only;
      if (options.simulate_user) crawler_config.simulate_user = options.simulate_user;
      if (options.override_navigator) crawler_config.override_navigator = options.override_navigator;
      if (options.magic) crawler_config.magic = options.magic;
 
      // Virtual scroll
      if (options.virtual_scroll_config) crawler_config.virtual_scroll_config = options.virtual_scroll_config;
 
      // Cache control
      if (options.cache_mode) crawler_config.cache_mode = options.cache_mode;
 
      // Other
      if (options.log_console) crawler_config.log_console = options.log_console;
      if (options.capture_mhtml) crawler_config.capture_mhtml = options.capture_mhtml;
 
      // Call service with proper configuration
      const crawlConfig: AdvancedCrawlConfig = {
        url: options.url ? String(options.url) : undefined,
        crawler_config,
      };
 
      // Only include browser_config if we're not using a session
      if (!options.session_id) {
        crawlConfig.browser_config = browser_config;
      }
 
      const response: CrawlEndpointResponse = await this.service.crawl(crawlConfig);
 
      // Validate response structure
      if (!response || !response.results || response.results.length === 0) {
        throw new Error('Invalid response from server: no results received');
      }
 
      const result: CrawlResultItem = response.results[0];
 
      // Build response content
      const content = [];
 
      // Main content - use markdown.raw_markdown as primary content
      let mainContent = 'No content extracted';
 
      if (result.extracted_content) {
        // Handle extraction results which might be objects or strings
        if (typeof result.extracted_content === 'string') {
          mainContent = result.extracted_content;
        } else Eif (typeof result.extracted_content === 'object') {
          mainContent = JSON.stringify(result.extracted_content, null, 2);
        }
      } else if (result.markdown?.raw_markdown) {
        mainContent = result.markdown.raw_markdown;
      } else if (result.html) {
        mainContent = result.html;
      } else if (result.fit_html) {
        mainContent = result.fit_html;
      }
 
      content.push({
        type: 'text',
        text: mainContent,
      });
 
      // Screenshot if available
      if (result.screenshot) {
        // Save to local directory if requested
        let savedFilePath: string | undefined;
        if (options.screenshot_directory && typeof options.screenshot_directory === 'string') {
          try {
            // Resolve home directory path
            let screenshotDir = options.screenshot_directory;
            Iif (screenshotDir.startsWith('~')) {
              const homedir = os.homedir();
              screenshotDir = path.join(homedir, screenshotDir.slice(1));
            }
 
            // Check if user provided a file path instead of directory
            Iif (screenshotDir.endsWith('.png') || screenshotDir.endsWith('.jpg')) {
              console.warn(
                `Warning: screenshot_directory should be a directory path, not a file path. Using parent directory.`,
              );
              screenshotDir = path.dirname(screenshotDir);
            }
 
            // Ensure directory exists
            await fs.mkdir(screenshotDir, { recursive: true });
 
            // Generate filename from URL and timestamp
            const url = new URL(String(options.url));
            const hostname = url.hostname.replace(/[^a-z0-9]/gi, '-');
            const timestamp = new Date().toISOString().replace(/[:.]/g, '-').slice(0, -5);
            const filename = `${hostname}-${timestamp}.png`;
 
            savedFilePath = path.join(screenshotDir, filename);
 
            // Convert base64 to buffer and save
            const buffer = Buffer.from(result.screenshot, 'base64');
            await fs.writeFile(savedFilePath, buffer);
          } catch (saveError) {
            // Log error but don't fail the operation
            console.error('Failed to save screenshot locally:', saveError);
          }
        }
 
        // If saved locally and screenshot is large (>800KB), don't return the base64 data
        const screenshotSize = Buffer.from(result.screenshot, 'base64').length;
        const shouldReturnImage = !savedFilePath || screenshotSize < 800 * 1024; // 800KB threshold
 
        Eif (shouldReturnImage) {
          content.push({
            type: 'image',
            data: result.screenshot,
            mimeType: 'image/png',
          });
        }
 
        if (savedFilePath) {
          const sizeInfo = !shouldReturnImage
            ? ` (${Math.round(screenshotSize / 1024)}KB - too large to display inline)`
            : '';
          content.push({
            type: 'text',
            text: `\n---\nScreenshot saved to: ${savedFilePath}${sizeInfo}`,
          });
        }
      }
 
      // PDF if available
      if (result.pdf) {
        content.push({
          type: 'resource',
          resource: {
            uri: `data:application/pdf;name=${encodeURIComponent(new URL(String(options.url)).hostname)}.pdf;base64,${result.pdf}`,
            mimeType: 'application/pdf',
            blob: result.pdf,
          },
        });
      }
 
      // Metadata
      if (result.metadata) {
        content.push({
          type: 'text',
          text: `\n---\nMetadata: ${JSON.stringify(result.metadata, null, 2)}`,
        });
      }
 
      // Links
      if (result.links && (result.links.internal.length > 0 || result.links.external.length > 0)) {
        content.push({
          type: 'text',
          text: `\n---\nLinks: Internal: ${result.links.internal.length}, External: ${result.links.external.length}`,
        });
      }
 
      // JS execution results if available
      if (result.js_execution_result && result.js_execution_result.results.length > 0) {
        const jsResults = result.js_execution_result.results
          .map((res: unknown, idx: number) => {
            return `Result ${idx + 1}: ${JSON.stringify(res, null, 2)}`;
          })
          .join('\n');
        content.push({
          type: 'text',
          text: `\n---\nJavaScript Execution Results:\n${jsResults}`,
        });
      }
 
      return { content };
    } catch (error) {
      throw this.formatError(error, 'crawl');
    }
  }
}