kuow-mastodon-bot/kuow_fetcher.py

153 lines
5.8 KiB
Python
Raw Normal View History

2024-01-15 08:47:46 -08:00
import json
from datetime import datetime
from typing import Optional
2024-01-14 14:48:09 -08:00
import requests
2024-01-14 15:12:33 -08:00
import yaml
2024-01-14 14:48:09 -08:00
from bs4 import BeautifulSoup
from mastodon import Mastodon
2024-01-15 08:47:46 -08:00
from sqlalchemy import create_engine, select
from sqlalchemy.exc import NoResultFound
from sqlalchemy.orm import DeclarativeBase, Mapped, Session, mapped_column
class Base(DeclarativeBase):
pass
class KuowStory(Base):
__tablename__ = "kuow_stories"
pageview_story_id: Mapped[str] = mapped_column(primary_key=True, unique=True)
dfp_targeting_id: Mapped[str] = mapped_column()
article_link: Mapped[str] = mapped_column()
last_updated_time: Mapped[datetime] = mapped_column()
post_id: Mapped[Optional[str]] = mapped_column()
def __repr__(self) -> str:
return f"KuowStory(pageview_story_id={self.pageview_story_id!r}, dfp_targeting_id={self.dfp_targeting_id!r}, article_link={self.article_link!r}, last_updated_time={self.last_updated_time!r}, post_id={self.post_id!r})"
engine = create_engine("sqlite:///kuow.db")
Base.metadata.create_all(engine)
2024-01-14 14:48:09 -08:00
kuow_base_url = "https://www.kuow.org"
url = (
kuow_base_url
+ "/fragments?name=story_grid&source=homepage&view_id=1&page=1&per_page=12"
)
2024-01-14 15:12:33 -08:00
config = yaml.safe_load(open("config.yml"))
mastodon = Mastodon(
client_id=config["mastodon"]["client_id"],
client_secret=config["mastodon"]["client_secret"],
access_token=config["mastodon"]["access_token"],
api_base_url=config["mastodon"]["api_base_url"],
)
2024-01-14 14:48:09 -08:00
2024-01-14 15:12:33 -08:00
kuow_response = requests.get(url)
soup = BeautifulSoup(kuow_response.content, "html.parser")
2024-01-14 14:48:09 -08:00
articles = soup.find_all("span", class_="txt")
2024-01-14 14:48:09 -08:00
# Reverse articles, so that if multiple new ones have been found, they'll be posted in order of when published
articles.reverse()
2024-01-15 08:47:46 -08:00
with Session(engine) as session:
for article in articles:
article_link = article.find("a").attrs["href"]
print("Checking {}".format(article_link))
2024-01-14 14:48:09 -08:00
2024-01-15 08:47:46 -08:00
lookup_statement = select(KuowStory).where(
KuowStory.article_link == article_link
)
lookup_result = session.scalars(lookup_statement)
is_new_article = False
2024-01-14 14:48:09 -08:00
2024-01-14 14:48:09 -08:00
article_lookup = requests.get(kuow_base_url + article_link)
article_soup = BeautifulSoup(article_lookup.content, "html.parser")
2024-01-15 08:47:46 -08:00
2024-01-14 14:48:09 -08:00
try:
2024-01-15 08:47:46 -08:00
schema_org_scriptblock = article_soup.find(
"script", {"type": "application/ld+json"}
2024-01-14 14:48:09 -08:00
)
2024-01-15 08:47:46 -08:00
schema_org = json.loads(schema_org_scriptblock.text)
assert schema_org["@context"] == "http://schema.org"
last_updated_time = datetime.fromisoformat(schema_org["dateModified"])
2024-01-14 14:48:09 -08:00
except:
2024-01-15 08:47:46 -08:00
print(
"Could not find or load schema.org data for this post, looking up the meta published time"
)
try:
last_updated_time = datetime.fromisoformat(
article_soup.find(
"meta", attrs={"property": "article:published_time"}
)["content"]
)
except:
print("Could not find or load the meta published time for this post")
last_updated_time = datetime.now()
try:
article_record = lookup_result.one()
# Only process existing articles if the last updated time doesn't match
process_article = (
article_record.last_updated_time.astimezone() != last_updated_time
)
except NoResultFound:
# Is a new article, or at least one that doesn't match based on the link
article_record = KuowStory()
process_article = True
is_new_article = True
if process_article:
print("Processing {}".format(article_link))
try:
article_record.pageview_story_id = article_soup.find(
"script", {"class": "pageview_story"}
)["data-id"]
article_record.dfp_targeting_id = article_soup.find(
"script", {"class": "dfp_targeting", "data-key": "id"}
)["data-value"]
except:
print("Could not find or load IDs for this post")
try:
tags = article_soup.find(
"script", {"class": "dfp_targeting", "data-key": "tags"}
)["data-value"].split("|")
except:
print("Could not find or load any tags for this article")
tags = []
additional_tag_string = ""
for tag in tags:
# TODO: Do a check for tags in the config file that we don't want to tag posts with
additional_tag_string += " #{}".format(tag.title().replace(" ", ""))
try:
article_description = (
(article_soup.find("meta", attrs={"property": "description"}))
.attrs["content"]
.strip()
)
if not article_record.post_id:
print("Posting to Mastodon")
mastodon_post_result = mastodon.status_post(
status=article_description
+ "\n"
+ kuow_base_url
+ article_link
+ "\n#KUOW #News{}".format(additional_tag_string),
visibility="public",
)
article_record.post_id = mastodon_post_result["id"]
else:
print("Article has already been posted")
2024-01-15 08:47:46 -08:00
except:
print("Could not load a description/post this article")
article_record.article_link = article_link
article_record.last_updated_time = last_updated_time
if is_new_article:
session.add(article_record)
session.commit()