Initial commit - Data scraper for Jundiaí events

This commit is contained in:
Renne Rocha 2024-12-03 07:57:52 -03:00
commit d7f6b31b53
15 changed files with 2035 additions and 0 deletions

182
.gitignore vendored Normal file
View file

@ -0,0 +1,182 @@
# Created by https://www.toptal.com/developers/gitignore/api/python
# Edit at https://www.toptal.com/developers/gitignore?templates=python
### Python ###
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock
# pdm
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
#pdm.lock
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
# in version control.
# https://pdm.fming.dev/#use-with-ide
.pdm.toml
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
### Python Patch ###
# Poetry local configuration file - https://python-poetry.org/docs/configuration/#local-configuration
poetry.toml
# ruff
.ruff_cache/
# LSP config files
pyrightconfig.json
# End of https://www.toptal.com/developers/gitignore/api/python
# Generated using ignr.py - github.com/Antrikshy/ignr.py
.~lock*
*.swp
*.csv

20
.pre-commit-config.yaml Normal file
View file

@ -0,0 +1,20 @@
# See https://pre-commit.com for more information
# See https://pre-commit.com/hooks.html for more hooks
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v5.0.0
hooks:
- id: trailing-whitespace
- id: end-of-file-fixer
- id: check-yaml
- id: check-added-large-files
- repo: https://github.com/pycqa/isort
rev: 5.13.2
hooks:
- id: isort
name: isort (python)
- repo: https://github.com/psf/black-pre-commit-mirror
rev: 24.10.0
hooks:
- id: black
language_version: python3.11

1
.python-version Normal file
View file

@ -0,0 +1 @@
3.13.0

BIN
data_collector/agenda.db Normal file

Binary file not shown.

View file

@ -0,0 +1,12 @@
# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
import scrapy
class DataCollectorItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
pass

View file

@ -0,0 +1,102 @@
# Define here the models for your spider middleware
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
# useful for handling different item types with a single interface
from itemadapter import ItemAdapter, is_item
from scrapy import signals
class DataCollectorSpiderMiddleware:
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, or item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Request or item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesnt have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info("Spider opened: %s" % spider.name)
class DataCollectorDownloaderMiddleware:
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info("Spider opened: %s" % spider.name)

View file

@ -0,0 +1,31 @@
import peewee
from data_collector import settings
def get_db():
return peewee.SqliteDatabase(settings.DATABASE)
class Event(peewee.Model):
title = peewee.CharField()
start_datetime = peewee.DateTimeField()
end_datetime = peewee.DateTimeField()
url = peewee.CharField()
location = peewee.CharField()
processed = peewee.BooleanField()
class Meta:
database = get_db()
indexes = ((("title", "start_datetime", "end_datetime", "url"), True),)
def initialize_db():
db = get_db()
db.connect(reuse_if_open=True)
db.create_tables(
[
Event,
]
)
return db

View file

@ -0,0 +1,30 @@
import peewee
from itemadapter import ItemAdapter
from data_collector.models import Event, initialize_db
class DataCollectorPipeline:
def open_spider(self, spider):
self.db = initialize_db()
def process_item(self, item, spider):
event = Event(
title=item["title"],
start_datetime=item["start_datetime"],
end_datetime=item["end_datetime"],
url=item["url"],
location=item["location"],
processed=False,
)
try:
event.save()
except peewee.IntegrityError:
# Event was added before
pass
return item
def close_spider(self, spider):
self.db.close()

View file

@ -0,0 +1,94 @@
# Scrapy settings for data_collector project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://docs.scrapy.org/en/latest/topics/settings.html
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = "data_collector"
SPIDER_MODULES = ["data_collector.spiders"]
NEWSPIDER_MODULE = "data_collector.spiders"
# Crawl responsibly by identifying yourself (and your website) on the user-agent
# USER_AGENT = "data_collector (+http://www.yourdomain.com)"
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
# CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
# DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
# CONCURRENT_REQUESTS_PER_DOMAIN = 16
# CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
# COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
# TELNETCONSOLE_ENABLED = False
# Override the default request headers:
# DEFAULT_REQUEST_HEADERS = {
# "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
# "Accept-Language": "en",
# }
# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
# SPIDER_MIDDLEWARES = {
# "data_collector.middlewares.DataCollectorSpiderMiddleware": 543,
# }
# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# DOWNLOADER_MIDDLEWARES = {
# "data_collector.middlewares.DataCollectorDownloaderMiddleware": 543,
# }
# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
# EXTENSIONS = {
# "scrapy.extensions.telnet.TelnetConsole": None,
# }
# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
"data_collector.pipelines.DataCollectorPipeline": 300,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
# AUTOTHROTTLE_ENABLED = True
# The initial download delay
# AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
# AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
# AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
# AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
# HTTPCACHE_ENABLED = True
# HTTPCACHE_EXPIRATION_SECS = 0
# HTTPCACHE_DIR = "httpcache"
# HTTPCACHE_IGNORE_HTTP_CODES = []
# HTTPCACHE_STORAGE = "scrapy.extensions.httpcache.FilesystemCacheStorage"
# Set settings whose default value is deprecated to a future-proof value
TWISTED_REACTOR = "twisted.internet.asyncioreactor.AsyncioSelectorReactor"
FEED_EXPORT_ENCODING = "utf-8"
DATABASE = "agenda.db"

View file

@ -0,0 +1,4 @@
# This package will contain the spiders of your Scrapy project
#
# Please refer to the documentation for information on how to create and manage
# your spiders.

View file

@ -0,0 +1,36 @@
import re
from datetime import datetime
import scrapy
class CulturaJundiaiSpider(scrapy.Spider):
name = "cultura_jundiai"
allowed_domains = ["cultura.jundiai.sp.gov.br"]
start_urls = ["https://cultura.jundiai.sp.gov.br/agenda-cultural/"]
date_regex = r"(?P<date>\d{2}\/\d{2}\/\d{4}).*(?P<start_time>\d{2}:\d{2}).*(?P<end_time>\d{2}:\d{2})"
def parse(self, response):
for event in response.css(".noticia-lista"):
event_date = event.css(".data-lista p::text").get()
date_search = re.search(self.date_regex, event_date)
if date_search:
date = date_search.groupdict()["date"]
start_time = date_search.groupdict()["start_time"]
start_datetime = datetime.strptime(
f"{date} {start_time}", "%d/%m/%Y %H:%M"
)
end_time = date_search.groupdict()["end_time"]
end_datetime = datetime.strptime(f"{date} {end_time}", "%d/%m/%Y %H:%M")
yield {
"title": event.css(".titulo-lista::text").get(),
"start_datetime": start_datetime,
"end_datetime": end_datetime,
"url": event.css("a::attr(href)").get(),
"location": event.css(".resumo-lista::text").get(),
}
for page_url in response.css("#paginacao a::attr(href)").getall():
yield scrapy.Request(page_url)

11
data_collector/scrapy.cfg Normal file
View file

@ -0,0 +1,11 @@
# Automatically created by: scrapy startproject
#
# For more information about the [deploy] section see:
# https://scrapyd.readthedocs.io/en/latest/deploy.html
[settings]
default = data_collector.settings
[deploy]
#url = http://localhost:6800/
project = data_collector

1483
poetry.lock generated Normal file

File diff suppressed because it is too large Load diff

29
pyproject.toml Normal file
View file

@ -0,0 +1,29 @@
[tool.poetry]
name = "agenda"
version = "0.1.0"
description = ""
authors = ["Renne Rocha <renne@rocha.dev.br>"]
license = "GNU GPLv3"
readme = "README.md"
[tool.poetry.dependencies]
python = "^3.13"
scrapy = "^2.12.0"
peewee = "^3.17.8"
[tool.poetry.group.dev.dependencies]
ipython = "^8.30.0"
ipdb = "^0.13.13"
black = "^24.10.0"
pre-commit = "^4.0.1"
[build-system]
requires = ["poetry-core"]
build-backend = "poetry.core.masonry.api"
[tool.isort]
known_first_party = ["data_collector"]
known_third_party = ["scrapy"]
profile = "black"
sections = ["FUTURE", "STDLIB", "THIRDPARTY", "FIRSTPARTY", "LOCALFOLDER"]