Initial commit
176
.gitignore
vendored
Normal file
|
@ -0,0 +1,176 @@
|
|||
# Created by https://www.toptal.com/developers/gitignore/api/python
|
||||
# Edit at https://www.toptal.com/developers/gitignore?templates=python
|
||||
|
||||
### Python ###
|
||||
# Byte-compiled / optimized / DLL files
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
*$py.class
|
||||
|
||||
# C extensions
|
||||
*.so
|
||||
|
||||
# Distribution / packaging
|
||||
.Python
|
||||
build/
|
||||
develop-eggs/
|
||||
dist/
|
||||
downloads/
|
||||
eggs/
|
||||
.eggs/
|
||||
lib/
|
||||
lib64/
|
||||
parts/
|
||||
sdist/
|
||||
var/
|
||||
wheels/
|
||||
share/python-wheels/
|
||||
*.egg-info/
|
||||
.installed.cfg
|
||||
*.egg
|
||||
MANIFEST
|
||||
|
||||
# PyInstaller
|
||||
# Usually these files are written by a python script from a template
|
||||
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
||||
*.manifest
|
||||
*.spec
|
||||
|
||||
# Installer logs
|
||||
pip-log.txt
|
||||
pip-delete-this-directory.txt
|
||||
|
||||
# Unit test / coverage reports
|
||||
htmlcov/
|
||||
.tox/
|
||||
.nox/
|
||||
.coverage
|
||||
.coverage.*
|
||||
.cache
|
||||
nosetests.xml
|
||||
coverage.xml
|
||||
*.cover
|
||||
*.py,cover
|
||||
.hypothesis/
|
||||
.pytest_cache/
|
||||
cover/
|
||||
|
||||
# Translations
|
||||
*.mo
|
||||
*.pot
|
||||
|
||||
# Django stuff:
|
||||
*.log
|
||||
local_settings.py
|
||||
db.sqlite3
|
||||
db.sqlite3-journal
|
||||
|
||||
# Flask stuff:
|
||||
instance/
|
||||
.webassets-cache
|
||||
|
||||
# Scrapy stuff:
|
||||
.scrapy
|
||||
|
||||
# Sphinx documentation
|
||||
docs/_build/
|
||||
|
||||
# PyBuilder
|
||||
.pybuilder/
|
||||
target/
|
||||
|
||||
# Jupyter Notebook
|
||||
.ipynb_checkpoints
|
||||
|
||||
# IPython
|
||||
profile_default/
|
||||
ipython_config.py
|
||||
|
||||
# pyenv
|
||||
# For a library or package, you might want to ignore these files since the code is
|
||||
# intended to run in multiple environments; otherwise, check them in:
|
||||
# .python-version
|
||||
|
||||
# pipenv
|
||||
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
||||
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
||||
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
||||
# install all needed dependencies.
|
||||
#Pipfile.lock
|
||||
|
||||
# poetry
|
||||
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
||||
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
||||
# commonly ignored for libraries.
|
||||
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
||||
#poetry.lock
|
||||
|
||||
# pdm
|
||||
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
||||
#pdm.lock
|
||||
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
||||
# in version control.
|
||||
# https://pdm.fming.dev/#use-with-ide
|
||||
.pdm.toml
|
||||
|
||||
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
||||
__pypackages__/
|
||||
|
||||
# Celery stuff
|
||||
celerybeat-schedule
|
||||
celerybeat.pid
|
||||
|
||||
# SageMath parsed files
|
||||
*.sage.py
|
||||
|
||||
# Environments
|
||||
.env
|
||||
.venv
|
||||
env/
|
||||
venv/
|
||||
ENV/
|
||||
env.bak/
|
||||
venv.bak/
|
||||
|
||||
# Spyder project settings
|
||||
.spyderproject
|
||||
.spyproject
|
||||
|
||||
# Rope project settings
|
||||
.ropeproject
|
||||
|
||||
# mkdocs documentation
|
||||
/site
|
||||
|
||||
# mypy
|
||||
.mypy_cache/
|
||||
.dmypy.json
|
||||
dmypy.json
|
||||
|
||||
# Pyre type checker
|
||||
.pyre/
|
||||
|
||||
# pytype static type analyzer
|
||||
.pytype/
|
||||
|
||||
# Cython debug symbols
|
||||
cython_debug/
|
||||
|
||||
# PyCharm
|
||||
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
||||
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
||||
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
||||
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
||||
#.idea/
|
||||
|
||||
### Python Patch ###
|
||||
# Poetry local configuration file - https://python-poetry.org/docs/configuration/#local-configuration
|
||||
poetry.toml
|
||||
|
||||
# ruff
|
||||
.ruff_cache/
|
||||
|
||||
# LSP config files
|
||||
pyrightconfig.json
|
||||
|
||||
# End of https://www.toptal.com/developers/gitignore/api/python
|
14
code/ep2023-scrapy.py
Normal file
|
@ -0,0 +1,14 @@
|
|||
import scrapy
|
||||
|
||||
|
||||
class EuroPython2023Spider(scrapy.Spider):
|
||||
name = "europython"
|
||||
|
||||
start_urls = [
|
||||
"https://ep2023.europython.eu/sessions",
|
||||
"https://ep2023.europython.eu/tutorials",
|
||||
]
|
||||
|
||||
def parse(self, response):
|
||||
for session in response.css("h2 a::text").getall():
|
||||
yield {"title": session}
|
22
code/exercise-1.py
Normal file
|
@ -0,0 +1,22 @@
|
|||
# Exercise 1
|
||||
|
||||
# Target: https://quotes.toscrape.com/
|
||||
|
||||
# On this page, you will find a collection of quotes along with their respective
|
||||
# authors. Each quote is accompanied by a link that directs you to a dedicated
|
||||
# page providing additional details about the author, the quote itself, and a list of associated tags.
|
||||
|
||||
# Your task is to extract all of this information and export it into a JSON lines file.
|
||||
|
||||
# TIP: your parse method can be used to yield items or schedule new requests for later processing.
|
||||
import scrapy
|
||||
|
||||
|
||||
class QuotesSpider(scrapy.Spider):
|
||||
name = "quotes"
|
||||
allowed_domains = ["quotes.toscrape.com"]
|
||||
start_urls = ["https://quotes.toscrape.com"]
|
||||
|
||||
def parse(self, response):
|
||||
# TODO
|
||||
...
|
26
code/exercise-2.py
Normal file
|
@ -0,0 +1,26 @@
|
|||
# Exercise 2
|
||||
|
||||
# Target: https://quotes.toscrape.com/scroll
|
||||
|
||||
# There has been another modification to the layout. Our quotes page
|
||||
# now features an infinite scroll functionality, meaning that new
|
||||
# content is dynamically loaded as you reach the bottom of the page.
|
||||
|
||||
# TIP: To understand this behavior, open your browser and access
|
||||
# our target page. Press F12 to open the developer tools and
|
||||
# select the "Network" tab. Observe what occurs in the network
|
||||
# requests when you navigate to the end of the page.
|
||||
import scrapy
|
||||
|
||||
|
||||
class QuotesScrollSpider(scrapy.Spider):
|
||||
name = "quotes_scroll"
|
||||
allowed_domains = ["quotes.toscrape.com"]
|
||||
|
||||
def start_requests(self):
|
||||
# TODO
|
||||
...
|
||||
|
||||
def parse(self, response):
|
||||
# TODO
|
||||
...
|
21
code/exercise-3.py
Normal file
|
@ -0,0 +1,21 @@
|
|||
# Exercise 2
|
||||
|
||||
# Target: https://quotes.toscrape.com/js/
|
||||
|
||||
# The spider you created in the first exercise has ceased to function.
|
||||
# Although no errors are evident in the logs, the spider is not returning any data.
|
||||
|
||||
# TIP: To troubleshoot, open your browser and navigate to our target page.
|
||||
# Press Ctrl+U (View Page Source) to inspect the HTML content of the page.
|
||||
import json
|
||||
import scrapy
|
||||
|
||||
|
||||
class QuotesJSSpider(scrapy.Spider):
|
||||
name = "quotes_js"
|
||||
allowed_domains = ["quotes.toscrape.com"]
|
||||
start_urls = ["https://quotes.toscrape.com/js/"]
|
||||
|
||||
def parse(self, response):
|
||||
# TODO
|
||||
...
|
25
code/exercise-4.py
Normal file
|
@ -0,0 +1,25 @@
|
|||
import scrapy
|
||||
|
||||
|
||||
class QuotesViewStateSpider(scrapy.Spider):
|
||||
name = "quotes_viewstate"
|
||||
allowed_domains = ["quotes.toscrape.com"]
|
||||
start_urls = ["http://quotes.toscrape.com/search.aspx"]
|
||||
|
||||
def parse(self, response):
|
||||
authors = response.css("#author option::attr(value)").getall()
|
||||
|
||||
form_data = {
|
||||
# TODO
|
||||
}
|
||||
for author in authors:
|
||||
yield scrapy.FormRequest(
|
||||
response.urljoin(response.css("form::attr(action)").get()),
|
||||
callback=self.parse_author_tags,
|
||||
formdata=form_data,
|
||||
cb_kwargs={"author": author}
|
||||
)
|
||||
|
||||
def parse_author_tags(self, response, author):
|
||||
# TODO
|
||||
...
|
13
code/groups-requests.py
Normal file
|
@ -0,0 +1,13 @@
|
|||
import requests
|
||||
from parsel import Selector
|
||||
|
||||
|
||||
start_urls = [
|
||||
"http://python.org.br",
|
||||
]
|
||||
for url in start_urls:
|
||||
response = requests.get(url)
|
||||
content = Selector(text=response.text)
|
||||
|
||||
for group in content.css("h4.card-title::text").getall():
|
||||
print(group)
|
0
code/monitoring/monitoring/__init__.py
Normal file
12
code/monitoring/monitoring/items.py
Normal file
|
@ -0,0 +1,12 @@
|
|||
# Define here the models for your scraped items
|
||||
#
|
||||
# See documentation in:
|
||||
# https://docs.scrapy.org/en/latest/topics/items.html
|
||||
|
||||
import scrapy
|
||||
|
||||
|
||||
class MonitoringItem(scrapy.Item):
|
||||
# define the fields for your item here like:
|
||||
# name = scrapy.Field()
|
||||
pass
|
102
code/monitoring/monitoring/middlewares.py
Normal file
|
@ -0,0 +1,102 @@
|
|||
# Define here the models for your spider middleware
|
||||
#
|
||||
# See documentation in:
|
||||
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
|
||||
|
||||
# useful for handling different item types with a single interface
|
||||
from itemadapter import ItemAdapter, is_item
|
||||
from scrapy import signals
|
||||
|
||||
|
||||
class MonitoringSpiderMiddleware:
|
||||
# Not all methods need to be defined. If a method is not defined,
|
||||
# scrapy acts as if the spider middleware does not modify the
|
||||
# passed objects.
|
||||
|
||||
@classmethod
|
||||
def from_crawler(cls, crawler):
|
||||
# This method is used by Scrapy to create your spiders.
|
||||
s = cls()
|
||||
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
|
||||
return s
|
||||
|
||||
def process_spider_input(self, response, spider):
|
||||
# Called for each response that goes through the spider
|
||||
# middleware and into the spider.
|
||||
|
||||
# Should return None or raise an exception.
|
||||
return None
|
||||
|
||||
def process_spider_output(self, response, result, spider):
|
||||
# Called with the results returned from the Spider, after
|
||||
# it has processed the response.
|
||||
|
||||
# Must return an iterable of Request, or item objects.
|
||||
for i in result:
|
||||
yield i
|
||||
|
||||
def process_spider_exception(self, response, exception, spider):
|
||||
# Called when a spider or process_spider_input() method
|
||||
# (from other spider middleware) raises an exception.
|
||||
|
||||
# Should return either None or an iterable of Request or item objects.
|
||||
pass
|
||||
|
||||
def process_start_requests(self, start_requests, spider):
|
||||
# Called with the start requests of the spider, and works
|
||||
# similarly to the process_spider_output() method, except
|
||||
# that it doesn’t have a response associated.
|
||||
|
||||
# Must return only requests (not items).
|
||||
for r in start_requests:
|
||||
yield r
|
||||
|
||||
def spider_opened(self, spider):
|
||||
spider.logger.info("Spider opened: %s" % spider.name)
|
||||
|
||||
|
||||
class MonitoringDownloaderMiddleware:
|
||||
# Not all methods need to be defined. If a method is not defined,
|
||||
# scrapy acts as if the downloader middleware does not modify the
|
||||
# passed objects.
|
||||
|
||||
@classmethod
|
||||
def from_crawler(cls, crawler):
|
||||
# This method is used by Scrapy to create your spiders.
|
||||
s = cls()
|
||||
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
|
||||
return s
|
||||
|
||||
def process_request(self, request, spider):
|
||||
# Called for each request that goes through the downloader
|
||||
# middleware.
|
||||
|
||||
# Must either:
|
||||
# - return None: continue processing this request
|
||||
# - or return a Response object
|
||||
# - or return a Request object
|
||||
# - or raise IgnoreRequest: process_exception() methods of
|
||||
# installed downloader middleware will be called
|
||||
return None
|
||||
|
||||
def process_response(self, request, response, spider):
|
||||
# Called with the response returned from the downloader.
|
||||
|
||||
# Must either;
|
||||
# - return a Response object
|
||||
# - return a Request object
|
||||
# - or raise IgnoreRequest
|
||||
return response
|
||||
|
||||
def process_exception(self, request, exception, spider):
|
||||
# Called when a download handler or a process_request()
|
||||
# (from other downloader middleware) raises an exception.
|
||||
|
||||
# Must either:
|
||||
# - return None: continue processing this exception
|
||||
# - return a Response object: stops process_exception() chain
|
||||
# - return a Request object: stops process_exception() chain
|
||||
pass
|
||||
|
||||
def spider_opened(self, spider):
|
||||
spider.logger.info("Spider opened: %s" % spider.name)
|
24
code/monitoring/monitoring/monitors.py
Normal file
|
@ -0,0 +1,24 @@
|
|||
from spidermon import Monitor, MonitorSuite, monitors
|
||||
from spidermon.contrib.actions.reports.files import CreateFileReport
|
||||
|
||||
|
||||
@monitors.name("Item count")
|
||||
class ItemCountMonitor(Monitor):
|
||||
@monitors.name("Minimum number of items")
|
||||
def test_minimum_number_of_items(self):
|
||||
item_extracted = getattr(self.data.stats, "item_scraped_count", 0)
|
||||
minimum_threshold = 10
|
||||
|
||||
msg = "Extracted less than {} items".format(minimum_threshold)
|
||||
self.assertTrue(item_extracted >= minimum_threshold, msg=msg)
|
||||
|
||||
|
||||
class SpiderCloseMonitorSuite(MonitorSuite):
|
||||
|
||||
monitors = [
|
||||
ItemCountMonitor,
|
||||
]
|
||||
|
||||
monitors_finished_actions = [
|
||||
CreateFileReport,
|
||||
]
|
13
code/monitoring/monitoring/pipelines.py
Normal file
|
@ -0,0 +1,13 @@
|
|||
# Define your item pipelines here
|
||||
#
|
||||
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
|
||||
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
|
||||
|
||||
|
||||
# useful for handling different item types with a single interface
|
||||
from itemadapter import ItemAdapter
|
||||
|
||||
|
||||
class MonitoringPipeline:
|
||||
def process_item(self, item, spider):
|
||||
return item
|
106
code/monitoring/monitoring/settings.py
Normal file
|
@ -0,0 +1,106 @@
|
|||
# Scrapy settings for monitoring project
|
||||
#
|
||||
# For simplicity, this file contains only settings considered important or
|
||||
# commonly used. You can find more settings consulting the documentation:
|
||||
#
|
||||
# https://docs.scrapy.org/en/latest/topics/settings.html
|
||||
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
|
||||
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
|
||||
|
||||
BOT_NAME = "monitoring"
|
||||
|
||||
SPIDER_MODULES = ["monitoring.spiders"]
|
||||
NEWSPIDER_MODULE = "monitoring.spiders"
|
||||
|
||||
|
||||
# Crawl responsibly by identifying yourself (and your website) on the user-agent
|
||||
# USER_AGENT = "monitoring (+http://www.yourdomain.com)"
|
||||
|
||||
# Obey robots.txt rules
|
||||
ROBOTSTXT_OBEY = True
|
||||
|
||||
# Configure maximum concurrent requests performed by Scrapy (default: 16)
|
||||
# CONCURRENT_REQUESTS = 32
|
||||
|
||||
# Configure a delay for requests for the same website (default: 0)
|
||||
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
|
||||
# See also autothrottle settings and docs
|
||||
# DOWNLOAD_DELAY = 3
|
||||
# The download delay setting will honor only one of:
|
||||
# CONCURRENT_REQUESTS_PER_DOMAIN = 16
|
||||
# CONCURRENT_REQUESTS_PER_IP = 16
|
||||
|
||||
# Disable cookies (enabled by default)
|
||||
# COOKIES_ENABLED = False
|
||||
|
||||
# Disable Telnet Console (enabled by default)
|
||||
# TELNETCONSOLE_ENABLED = False
|
||||
|
||||
# Override the default request headers:
|
||||
# DEFAULT_REQUEST_HEADERS = {
|
||||
# "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
|
||||
# "Accept-Language": "en",
|
||||
# }
|
||||
|
||||
# Enable or disable spider middlewares
|
||||
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
|
||||
# SPIDER_MIDDLEWARES = {
|
||||
# "monitoring.middlewares.MonitoringSpiderMiddleware": 543,
|
||||
# }
|
||||
|
||||
# Enable or disable downloader middlewares
|
||||
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
|
||||
# DOWNLOADER_MIDDLEWARES = {
|
||||
# "monitoring.middlewares.MonitoringDownloaderMiddleware": 543,
|
||||
# }
|
||||
|
||||
# Enable or disable extensions
|
||||
# See https://docs.scrapy.org/en/latest/topics/extensions.html
|
||||
# EXTENSIONS = {
|
||||
# "scrapy.extensions.telnet.TelnetConsole": None,
|
||||
# }
|
||||
|
||||
# Configure item pipelines
|
||||
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
|
||||
# ITEM_PIPELINES = {
|
||||
# "monitoring.pipelines.MonitoringPipeline": 300,
|
||||
# }
|
||||
|
||||
# Enable and configure the AutoThrottle extension (disabled by default)
|
||||
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
|
||||
# AUTOTHROTTLE_ENABLED = True
|
||||
# The initial download delay
|
||||
# AUTOTHROTTLE_START_DELAY = 5
|
||||
# The maximum download delay to be set in case of high latencies
|
||||
# AUTOTHROTTLE_MAX_DELAY = 60
|
||||
# The average number of requests Scrapy should be sending in parallel to
|
||||
# each remote server
|
||||
# AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
|
||||
# Enable showing throttling stats for every response received:
|
||||
# AUTOTHROTTLE_DEBUG = False
|
||||
|
||||
# Enable and configure HTTP caching (disabled by default)
|
||||
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
|
||||
# HTTPCACHE_ENABLED = True
|
||||
# HTTPCACHE_EXPIRATION_SECS = 0
|
||||
# HTTPCACHE_DIR = "httpcache"
|
||||
# HTTPCACHE_IGNORE_HTTP_CODES = []
|
||||
# HTTPCACHE_STORAGE = "scrapy.extensions.httpcache.FilesystemCacheStorage"
|
||||
|
||||
# Set settings whose default value is deprecated to a future-proof value
|
||||
REQUEST_FINGERPRINTER_IMPLEMENTATION = "2.7"
|
||||
TWISTED_REACTOR = "twisted.internet.asyncioreactor.AsyncioSelectorReactor"
|
||||
FEED_EXPORT_ENCODING = "utf-8"
|
||||
|
||||
# Monitoring
|
||||
# SPIDERMON_ENABLED = True
|
||||
|
||||
# EXTENSIONS = {
|
||||
# "spidermon.contrib.scrapy.extensions.Spidermon": 500,
|
||||
# }
|
||||
|
||||
# SPIDERMON_SPIDER_CLOSE_MONITORS = ("monitoring.monitors.SpiderCloseMonitorSuite",)
|
||||
|
||||
# SPIDERMON_REPORT_TEMPLATE = "reports/email/monitors/result.jinja"
|
||||
# SPIDERMON_REPORT_CONTEXT = {"report_title": "Spidermon File Report"}
|
||||
# SPIDERMON_REPORT_FILENAME = "my_report.html"
|
4
code/monitoring/monitoring/spiders/__init__.py
Normal file
|
@ -0,0 +1,4 @@
|
|||
# This package will contain the spiders of your Scrapy project
|
||||
#
|
||||
# Please refer to the documentation for information on how to create and manage
|
||||
# your spiders.
|
30
code/monitoring/monitoring/spiders/quote.py
Normal file
|
@ -0,0 +1,30 @@
|
|||
import scrapy
|
||||
|
||||
|
||||
class QuoteSpider(scrapy.Spider):
|
||||
name = "quote"
|
||||
allowed_domains = ["quotes.toscrape.com"]
|
||||
api_url = "https://quotes.toscrape.com/api/quotes?page={page}"
|
||||
|
||||
def start_requests(self):
|
||||
yield scrapy.Request(self.api_url.format(page=1))
|
||||
|
||||
def parse(self, response):
|
||||
data = response.json()
|
||||
current_page = data.get("page")
|
||||
|
||||
for quote in data.get("quotes"):
|
||||
yield {
|
||||
"quote": quote.get("text"),
|
||||
"author": quote.get("author").get("name"),
|
||||
"author_url": response.urljoin(
|
||||
quote.get("author").get("goodreads_link")
|
||||
),
|
||||
"tags": quote.get("tags"),
|
||||
}
|
||||
|
||||
if data.get("has_next"):
|
||||
next_page = current_page + 1
|
||||
yield scrapy.Request(
|
||||
self.api_url.format(page=next_page),
|
||||
)
|
11
code/monitoring/scrapy.cfg
Normal file
|
@ -0,0 +1,11 @@
|
|||
# Automatically created by: scrapy startproject
|
||||
#
|
||||
# For more information about the [deploy] section see:
|
||||
# https://scrapyd.readthedocs.io/en/latest/deploy.html
|
||||
|
||||
[settings]
|
||||
default = monitoring.settings
|
||||
|
||||
[deploy]
|
||||
#url = http://localhost:6800/
|
||||
project = monitoring
|
18
code/parsing-data-css.py
Normal file
|
@ -0,0 +1,18 @@
|
|||
import scrapy
|
||||
|
||||
|
||||
class EuroPython2023Spider(scrapy.Spider):
|
||||
name = "europython"
|
||||
|
||||
start_urls = [
|
||||
"https://ep2023.europython.eu/sessions",
|
||||
"https://ep2023.europython.eu/tutorials",
|
||||
]
|
||||
|
||||
def parse(self, response):
|
||||
sessions = response.css(".mt-12")
|
||||
for session in sessions:
|
||||
yield {
|
||||
"title": session.css("h2 a::text").get(),
|
||||
"presenter": session.css("p a::text").get(),
|
||||
}
|
18
code/parsing-data-mixed.py
Normal file
|
@ -0,0 +1,18 @@
|
|||
import scrapy
|
||||
|
||||
|
||||
class EuroPython2023Spider(scrapy.Spider):
|
||||
name = "europython"
|
||||
|
||||
start_urls = [
|
||||
"https://ep2023.europython.eu/sessions",
|
||||
"https://ep2023.europython.eu/tutorials",
|
||||
]
|
||||
|
||||
def parse(self, response):
|
||||
sessions = response.css(".mt-12")
|
||||
for session in sessions:
|
||||
yield {
|
||||
"title": session.xpath("./h2/a/text()").get(),
|
||||
"presenter": session.xpath("./p/a/text()").get(),
|
||||
}
|
18
code/parsing-data-xpath.py
Normal file
|
@ -0,0 +1,18 @@
|
|||
import scrapy
|
||||
|
||||
|
||||
class EuroPython2023Spider(scrapy.Spider):
|
||||
name = "europython"
|
||||
|
||||
start_urls = [
|
||||
"https://ep2023.europython.eu/sessions",
|
||||
"https://ep2023.europython.eu/tutorials",
|
||||
]
|
||||
|
||||
def parse(self, response):
|
||||
sessions = response.xpath("//div[contains(@class, 'mt-12')]")
|
||||
for session in sessions:
|
||||
yield {
|
||||
"title": session.xpath("./h2/a/text()").get(),
|
||||
"presenter": session.xpath("./p/a/text()").get(),
|
||||
}
|
37
code/quotes-playwright.py
Normal file
|
@ -0,0 +1,37 @@
|
|||
import scrapy
|
||||
from scrapy_playwright.page import PageMethod
|
||||
|
||||
|
||||
class QuotesPlaywrightSpider(scrapy.Spider):
|
||||
name = "quotes-playwright"
|
||||
custom_settings = {
|
||||
"DOWNLOAD_HANDLERS": {
|
||||
"http": "scrapy_playwright.handler.ScrapyPlaywrightDownloadHandler",
|
||||
"https": "scrapy_playwright.handler.ScrapyPlaywrightDownloadHandler",
|
||||
},
|
||||
"TWISTED_REACTOR": "twisted.internet.asyncioreactor.AsyncioSelectorReactor",
|
||||
}
|
||||
|
||||
def start_requests(self):
|
||||
yield scrapy.Request(
|
||||
url="http://quotes.toscrape.com/scroll",
|
||||
meta=dict(
|
||||
playwright=True,
|
||||
playwright_include_page=True,
|
||||
playwright_page_methods=[
|
||||
PageMethod("wait_for_selector", "div.quote"),
|
||||
PageMethod(
|
||||
"evaluate", "window.scrollBy(0, document.body.scrollHeight)"
|
||||
),
|
||||
PageMethod(
|
||||
"wait_for_selector", "div.quote:nth-child(11)"
|
||||
),
|
||||
],
|
||||
),
|
||||
)
|
||||
|
||||
async def parse(self, response):
|
||||
page = response.meta["playwright_page"]
|
||||
await page.screenshot(path="quotes.png", full_page=True)
|
||||
await page.close()
|
||||
return {"quote_count": len(response.css("div.quote"))}
|
6
code/requirements
Normal file
|
@ -0,0 +1,6 @@
|
|||
requests
|
||||
scrapy
|
||||
scrapy-playwright
|
||||
spidermon
|
||||
ipython
|
||||
ipdb
|
BIN
presentation/images/cat_keyboard.gif
Normal file
After Width: | Height: | Size: 134 KiB |
BIN
presentation/images/exercise-1-page.png
Normal file
After Width: | Height: | Size: 84 KiB |
BIN
presentation/images/exercise-1-sc.png
Normal file
After Width: | Height: | Size: 164 KiB |
BIN
presentation/images/exercise-2-network.png
Normal file
After Width: | Height: | Size: 137 KiB |
BIN
presentation/images/exercise-2-scroll.gif
Normal file
After Width: | Height: | Size: 8.6 MiB |
BIN
presentation/images/exercise-2-url.png
Normal file
After Width: | Height: | Size: 113 KiB |
BIN
presentation/images/exercise-3-js.png
Normal file
After Width: | Height: | Size: 99 KiB |
BIN
presentation/images/exercise-4-form-1.png
Normal file
After Width: | Height: | Size: 126 KiB |
BIN
presentation/images/exercise-4-form-2.png
Normal file
After Width: | Height: | Size: 262 KiB |
BIN
presentation/images/exercise-4-form-3.png
Normal file
After Width: | Height: | Size: 74 KiB |
BIN
presentation/images/exercise-4-page.png
Normal file
After Width: | Height: | Size: 73 KiB |
BIN
presentation/images/foto-perfil-quadrada.png
Normal file
After Width: | Height: | Size: 37 KiB |
BIN
presentation/images/scrapy_architecture_02.png
Normal file
After Width: | Height: | Size: 69 KiB |
BIN
presentation/images/scrapylogo.png
Normal file
After Width: | Height: | Size: 13 KiB |