-
Notifications
You must be signed in to change notification settings - Fork 535
docs: Add guide "HttpCrawler with custom parser" #1622
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Open
Mantisus
wants to merge
18
commits into
apify:master
Choose a base branch
from
Mantisus:custom-http-parser
base: master
Could not load branches
Branch not found: {{ refName }}
Loading
Could not load tags
Nothing to show
Loading
Are you sure you want to change the base?
Some commits from the old base branch may be removed from the timeline,
and old review comments may become outdated.
Open
Changes from all commits
Commits
Show all changes
18 commits
Select commit
Hold shift + click to select a range
e5aff86
add docs "HttpCrawler with custom parser"
Mantisus 247ef79
fix
Mantisus 758424b
add `AbstractHttpCrawler` section
Mantisus 7a9e092
del extra file
Mantisus a895901
add AdaptivePlaywrightCrawler example
Mantisus 5844f4b
Update docs/guides/code_examples/crawler_custom_parser/selectolax_ada…
Mantisus 7fe669e
integrate to HTTP crawlers guide
Mantisus 2bc5967
Update pyproject.toml
Mantisus 2b1f41f
Update docs/guides/http_crawlers.mdx
Mantisus 08ee00c
Update docs/guides/http_crawlers.mdx
Mantisus 8195397
Update docs/guides/http_crawlers.mdx
Mantisus a5be06d
Update docs/guides/http_crawlers.mdx
Mantisus e22346b
Update docs/guides/http_crawlers.mdx
Mantisus 3cfacf0
Update docs/guides/http_crawlers.mdx
Mantisus ac427cc
Update docs/guides/http_crawlers.mdx
Mantisus bbc0157
Merge branch 'apify:master' into custom-http-parser
Mantisus 09c6624
Resolve
Mantisus 24ca257
replace match to item
Mantisus File filter
Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
There are no files selected for viewing
Empty file.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,63 @@ | ||
| import asyncio | ||
|
|
||
| from pydantic import ValidationError | ||
| from selectolax.lexbor import LexborHTMLParser | ||
| from yarl import URL | ||
|
|
||
| from crawlee import Request | ||
| from crawlee.crawlers import HttpCrawler, HttpCrawlingContext | ||
|
|
||
|
|
||
| async def main() -> None: | ||
| crawler = HttpCrawler( | ||
| max_request_retries=1, | ||
| max_requests_per_crawl=10, | ||
| ) | ||
|
|
||
| @crawler.router.default_handler | ||
| async def request_handler(context: HttpCrawlingContext) -> None: | ||
| context.log.info(f'Processing {context.request.url} ...') | ||
|
|
||
| # Parse the HTML content using Selectolax with Lexbor backend. | ||
| parsed_html = LexborHTMLParser(await context.http_response.read()) | ||
|
|
||
| # Extract data from the page. | ||
| data = { | ||
| 'url': context.request.url, | ||
| 'title': parsed_html.css_first('title').text(), | ||
| 'h1s': [h1.text() for h1 in parsed_html.css('h1')], | ||
| 'h2s': [h2.text() for h2 in parsed_html.css('h2')], | ||
| 'h3s': [h3.text() for h3 in parsed_html.css('h3')], | ||
| } | ||
| await context.push_data(data) | ||
|
|
||
| # Css selector to extract valid href attributes. | ||
| links_selector = ( | ||
| 'a[href]:not([href^="#"]):not([href^="javascript:"]):not([href^="mailto:"])' | ||
| ) | ||
| base_url = URL(context.request.url) | ||
| extracted_requests = [] | ||
|
|
||
| # Extract links. | ||
| for item in parsed_html.css(links_selector): | ||
| href = item.attributes.get('href') | ||
| if not href: | ||
| continue | ||
|
|
||
| # Convert relative URLs to absolute if needed. | ||
| url = str(base_url.join(URL(href))) | ||
| try: | ||
| request = Request.from_url(url) | ||
| except ValidationError as exc: | ||
| context.log.warning(f'Skipping invalid URL "{url}": {exc}') | ||
| continue | ||
| extracted_requests.append(request) | ||
|
|
||
| # Add extracted requests to the queue with the same-domain strategy. | ||
| await context.add_requests(extracted_requests, strategy='same-domain') | ||
|
|
||
| await crawler.run(['https://crawlee.dev']) | ||
|
|
||
|
|
||
| if __name__ == '__main__': | ||
| asyncio.run(main()) |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,61 @@ | ||
| import asyncio | ||
|
|
||
| from lxml import html | ||
| from pydantic import ValidationError | ||
|
|
||
| from crawlee import Request | ||
| from crawlee.crawlers import HttpCrawler, HttpCrawlingContext | ||
|
|
||
|
|
||
| async def main() -> None: | ||
| crawler = HttpCrawler( | ||
| max_request_retries=1, | ||
| max_requests_per_crawl=10, | ||
| ) | ||
|
|
||
| @crawler.router.default_handler | ||
| async def request_handler(context: HttpCrawlingContext) -> None: | ||
| context.log.info(f'Processing {context.request.url} ...') | ||
|
|
||
| # Parse the HTML content using lxml. | ||
| parsed_html = html.fromstring(await context.http_response.read()) | ||
|
|
||
| # Extract data from the page. | ||
| data = { | ||
| 'url': context.request.url, | ||
| 'title': parsed_html.findtext('.//title'), | ||
| 'h1s': [h1.text_content() for h1 in parsed_html.findall('.//h1')], | ||
| 'h2s': [h2.text_content() for h2 in parsed_html.findall('.//h2')], | ||
| 'h3s': [h3.text_content() for h3 in parsed_html.findall('.//h3')], | ||
| } | ||
| await context.push_data(data) | ||
|
|
||
| # Convert relative URLs to absolute before extracting links. | ||
| parsed_html.make_links_absolute(context.request.url, resolve_base_href=True) | ||
|
|
||
| # Xpath 1.0 selector for extracting valid href attributes. | ||
| links_xpath = ( | ||
| '//a/@href[not(starts-with(., "#")) ' | ||
| 'and not(starts-with(., "javascript:")) ' | ||
| 'and not(starts-with(., "mailto:"))]' | ||
| ) | ||
|
|
||
| extracted_requests = [] | ||
|
|
||
| # Extract links. | ||
| for url in parsed_html.xpath(links_xpath): | ||
| try: | ||
| request = Request.from_url(url) | ||
| except ValidationError as exc: | ||
| context.log.warning(f'Skipping invalid URL "{url}": {exc}') | ||
| continue | ||
| extracted_requests.append(request) | ||
|
|
||
| # Add extracted requests to the queue with the same-domain strategy. | ||
| await context.add_requests(extracted_requests, strategy='same-domain') | ||
|
|
||
| await crawler.run(['https://crawlee.dev']) | ||
|
|
||
|
|
||
| if __name__ == '__main__': | ||
| asyncio.run(main()) |
77 changes: 77 additions & 0 deletions
77
docs/guides/code_examples/http_crawlers/lxml_saxonche_parser.py
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,77 @@ | ||
| import asyncio | ||
|
|
||
| from lxml import html | ||
| from pydantic import ValidationError | ||
| from saxonche import PySaxonProcessor | ||
|
|
||
| from crawlee import Request | ||
| from crawlee.crawlers import HttpCrawler, HttpCrawlingContext | ||
|
|
||
|
|
||
| async def main() -> None: | ||
| crawler = HttpCrawler( | ||
| max_request_retries=1, | ||
| max_requests_per_crawl=10, | ||
| ) | ||
|
|
||
| # Create Saxon processor once and reuse across requests. | ||
| saxon_proc = PySaxonProcessor(license=False) | ||
| xpath_proc = saxon_proc.new_xpath_processor() | ||
|
|
||
| @crawler.router.default_handler | ||
| async def request_handler(context: HttpCrawlingContext) -> None: | ||
| context.log.info(f'Processing {context.request.url} ...') | ||
|
|
||
| # Parse HTML with lxml. | ||
| parsed_html = html.fromstring(await context.http_response.read()) | ||
| # Convert relative URLs to absolute before extracting links. | ||
| parsed_html.make_links_absolute(context.request.url, resolve_base_href=True) | ||
| # Convert parsed HTML to XML for Saxon processing. | ||
| xml = html.tostring(parsed_html, encoding='unicode', method='xml') | ||
| # Parse XML with Saxon. | ||
| parsed_xml = saxon_proc.parse_xml(xml_text=xml) | ||
| # Set the parsed context for XPath evaluation. | ||
| xpath_proc.set_context(xdm_item=parsed_xml) | ||
|
|
||
| # Extract data using XPath 2.0 string() function. | ||
| data = { | ||
| 'url': context.request.url, | ||
| 'title': xpath_proc.evaluate_single('.//title/string()'), | ||
| 'h1s': [str(h) for h in (xpath_proc.evaluate('//h1/string()') or [])], | ||
| 'h2s': [str(h) for h in (xpath_proc.evaluate('//h2/string()') or [])], | ||
| 'h3s': [str(h) for h in (xpath_proc.evaluate('//h3/string()') or [])], | ||
| } | ||
| await context.push_data(data) | ||
|
|
||
| # XPath 2.0 with distinct-values() to get unique links and remove fragments. | ||
| links_xpath = """ | ||
| distinct-values( | ||
| for $href in //a/@href[ | ||
| not(starts-with(., "#")) | ||
| and not(starts-with(., "javascript:")) | ||
| and not(starts-with(., "mailto:")) | ||
| ] | ||
| return replace($href, "#.*$", "") | ||
| ) | ||
| """ | ||
|
|
||
| extracted_requests = [] | ||
|
|
||
| # Extract links. | ||
| for item in xpath_proc.evaluate(links_xpath) or []: | ||
| url = item.string_value | ||
| try: | ||
| request = Request.from_url(url) | ||
| except ValidationError as exc: | ||
| context.log.warning(f'Skipping invalid URL "{url}": {exc}') | ||
| continue | ||
| extracted_requests.append(request) | ||
|
|
||
| # Add extracted requests to the queue with the same-domain strategy. | ||
| await context.add_requests(extracted_requests, strategy='same-domain') | ||
|
|
||
| await crawler.run(['https://crawlee.dev']) | ||
|
|
||
|
|
||
| if __name__ == '__main__': | ||
| asyncio.run(main()) |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,64 @@ | ||
| import asyncio | ||
|
|
||
| from pydantic import ValidationError | ||
| from pyquery import PyQuery | ||
| from yarl import URL | ||
|
|
||
| from crawlee import Request | ||
| from crawlee.crawlers import HttpCrawler, HttpCrawlingContext | ||
|
|
||
|
|
||
| async def main() -> None: | ||
| crawler = HttpCrawler( | ||
| max_request_retries=1, | ||
| max_requests_per_crawl=10, | ||
| ) | ||
|
|
||
| @crawler.router.default_handler | ||
| async def request_handler(context: HttpCrawlingContext) -> None: | ||
| context.log.info(f'Processing {context.request.url} ...') | ||
|
|
||
| # Parse the HTML content using PyQuery. | ||
| parsed_html = PyQuery(await context.http_response.read()) | ||
|
|
||
| # Extract data using jQuery-style selectors. | ||
| data = { | ||
| 'url': context.request.url, | ||
| 'title': parsed_html('title').text(), | ||
| 'h1s': [h1.text() for h1 in parsed_html('h1').items()], | ||
| 'h2s': [h2.text() for h2 in parsed_html('h2').items()], | ||
| 'h3s': [h3.text() for h3 in parsed_html('h3').items()], | ||
| } | ||
| await context.push_data(data) | ||
|
|
||
| # Css selector to extract valid href attributes. | ||
| links_selector = ( | ||
| 'a[href]:not([href^="#"]):not([href^="javascript:"]):not([href^="mailto:"])' | ||
| ) | ||
| base_url = URL(context.request.url) | ||
|
|
||
| extracted_requests = [] | ||
|
|
||
| # Extract links. | ||
| for item in parsed_html(links_selector).items(): | ||
| href = item.attr('href') | ||
| if not href: | ||
| continue | ||
|
|
||
| # Convert relative URLs to absolute if needed. | ||
| url = str(base_url.join(URL(str(href)))) | ||
| try: | ||
| request = Request.from_url(url) | ||
| except ValidationError as exc: | ||
| context.log.warning(f'Skipping invalid URL "{url}": {exc}') | ||
| continue | ||
| extracted_requests.append(request) | ||
|
|
||
| # Add extracted requests to the queue with the same-domain strategy. | ||
| await context.add_requests(extracted_requests, strategy='same-domain') | ||
|
|
||
| await crawler.run(['https://crawlee.dev']) | ||
|
|
||
|
|
||
| if __name__ == '__main__': | ||
| asyncio.run(main()) |
74 changes: 74 additions & 0 deletions
74
docs/guides/code_examples/http_crawlers/scrapling_parser.py
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,74 @@ | ||
| import asyncio | ||
|
|
||
| from pydantic import ValidationError | ||
| from scrapling.parser import Selector | ||
| from yarl import URL | ||
|
|
||
| from crawlee import Request | ||
| from crawlee.crawlers import HttpCrawler, HttpCrawlingContext | ||
|
|
||
|
|
||
| async def main() -> None: | ||
| crawler = HttpCrawler( | ||
| max_request_retries=1, | ||
| max_requests_per_crawl=10, | ||
| ) | ||
|
|
||
| @crawler.router.default_handler | ||
| async def request_handler(context: HttpCrawlingContext) -> None: | ||
| context.log.info(f'Processing {context.request.url} ...') | ||
|
|
||
| # Parse the HTML content using Scrapling. | ||
| page = Selector(await context.http_response.read(), url=context.request.url) | ||
|
|
||
| # Extract data using Xpath selectors with .get_all_text method for full text | ||
| # content. | ||
| title_el = page.xpath_first('//title') | ||
| data = { | ||
| 'url': context.request.url, | ||
| 'title': title_el.text if isinstance(title_el, Selector) else title_el, | ||
| 'h1s': [ | ||
| h1.get_all_text() if isinstance(h1, Selector) else h1 | ||
| for h1 in page.xpath('//h1') | ||
| ], | ||
| 'h2s': [ | ||
| h2.get_all_text() if isinstance(h2, Selector) else h2 | ||
| for h2 in page.xpath('//h2') | ||
| ], | ||
| 'h3s': [ | ||
| h3.get_all_text() if isinstance(h3, Selector) else h3 | ||
| for h3 in page.xpath('//h3') | ||
| ], | ||
| } | ||
| await context.push_data(data) | ||
|
|
||
| # Css selector to extract valid href attributes. | ||
| links_selector = ( | ||
| 'a[href]:not([href^="#"]):not([href^="javascript:"]):not([href^="mailto:"])' | ||
| ) | ||
| base_url = URL(context.request.url) | ||
| extracted_requests = [] | ||
|
|
||
| # Extract links. | ||
| for item in page.css(links_selector): | ||
| href = item.attrib.get('href') if isinstance(item, Selector) else None | ||
| if not href: | ||
| continue | ||
|
|
||
| # Convert relative URLs to absolute if needed. | ||
| url = str(base_url.join(URL(href))) | ||
| try: | ||
| request = Request.from_url(url) | ||
| except ValidationError as exc: | ||
| context.log.warning(f'Skipping invalid URL "{url}": {exc}') | ||
| continue | ||
| extracted_requests.append(request) | ||
|
|
||
| # Add extracted requests to the queue with the same-domain strategy. | ||
| await context.add_requests(extracted_requests, strategy='same-domain') | ||
|
|
||
| await crawler.run(['https://crawlee.dev']) | ||
|
|
||
|
|
||
| if __name__ == '__main__': | ||
| asyncio.run(main()) |
38 changes: 38 additions & 0 deletions
38
docs/guides/code_examples/http_crawlers/selectolax_adaptive_run.py
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,38 @@ | ||
| import asyncio | ||
|
|
||
| from crawlee.crawlers import ( | ||
| AdaptivePlaywrightCrawler, | ||
| AdaptivePlaywrightCrawlerStatisticState, | ||
| AdaptivePlaywrightCrawlingContext, | ||
| ) | ||
| from crawlee.statistics import Statistics | ||
|
|
||
| from .selectolax_parser import SelectolaxLexborParser | ||
|
|
||
|
|
||
| async def main() -> None: | ||
| crawler: AdaptivePlaywrightCrawler = AdaptivePlaywrightCrawler( | ||
| max_requests_per_crawl=10, | ||
| # Use custom Selectolax parser for static content parsing. | ||
| static_parser=SelectolaxLexborParser(), | ||
| # Set up statistics with AdaptivePlaywrightCrawlerStatisticState. | ||
| statistics=Statistics(state_model=AdaptivePlaywrightCrawlerStatisticState), | ||
| ) | ||
|
|
||
| @crawler.router.default_handler | ||
| async def handle_request(context: AdaptivePlaywrightCrawlingContext) -> None: | ||
| context.log.info(f'Processing {context.request.url} ...') | ||
| data = { | ||
| 'url': context.request.url, | ||
| 'title': await context.query_selector_one('title'), | ||
| } | ||
|
|
||
| await context.push_data(data) | ||
|
|
||
| await context.enqueue_links() | ||
|
|
||
| await crawler.run(['https://crawlee.dev/']) | ||
|
|
||
|
|
||
| if __name__ == '__main__': | ||
| asyncio.run(main()) | ||
Oops, something went wrong.
Oops, something went wrong.
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
Uh oh!
There was an error while loading. Please reload this page.