2022-06-22 18:11:22 +00:00
|
|
|
import mimetypes
|
|
|
|
import re
|
2022-06-29 19:38:13 +00:00
|
|
|
from typing import Any
|
2022-06-22 18:11:22 +00:00
|
|
|
from urllib.parse import urlparse
|
|
|
|
|
|
|
|
import httpx
|
|
|
|
from bs4 import BeautifulSoup # type: ignore
|
2022-08-11 20:14:11 +00:00
|
|
|
from loguru import logger
|
2022-06-22 18:11:22 +00:00
|
|
|
from pydantic import BaseModel
|
|
|
|
|
2022-08-04 15:36:21 +00:00
|
|
|
from app import ap_object
|
2022-06-22 18:11:22 +00:00
|
|
|
from app import config
|
2022-08-04 15:36:21 +00:00
|
|
|
from app.actor import LOCAL_ACTOR
|
|
|
|
from app.actor import fetch_actor
|
|
|
|
from app.database import AsyncSession
|
|
|
|
from app.models import InboxObject
|
|
|
|
from app.models import OutboxObject
|
2022-06-22 19:15:07 +00:00
|
|
|
from app.utils.url import is_url_valid
|
2022-08-04 17:11:14 +00:00
|
|
|
from app.utils.url import make_abs
|
2022-06-22 18:11:22 +00:00
|
|
|
|
|
|
|
|
|
|
|
class OpenGraphMeta(BaseModel):
|
|
|
|
url: str
|
|
|
|
title: str
|
2022-08-02 20:22:15 +00:00
|
|
|
image: str | None
|
|
|
|
description: str | None
|
|
|
|
site_name: str
|
2022-06-22 18:11:22 +00:00
|
|
|
|
|
|
|
|
2022-08-02 20:22:15 +00:00
|
|
|
def _scrap_og_meta(url: str, html: str) -> OpenGraphMeta | None:
|
2022-06-22 18:11:22 +00:00
|
|
|
soup = BeautifulSoup(html, "html5lib")
|
|
|
|
ogs = {
|
|
|
|
og.attrs["property"]: og.attrs.get("content")
|
|
|
|
for og in soup.html.head.findAll(property=re.compile(r"^og"))
|
|
|
|
}
|
2022-08-11 20:14:11 +00:00
|
|
|
# FIXME some page have no <title>
|
2022-08-02 20:22:15 +00:00
|
|
|
raw = {
|
|
|
|
"url": url,
|
|
|
|
"title": soup.find("title").text,
|
|
|
|
"image": None,
|
|
|
|
"description": None,
|
2022-08-15 08:27:58 +00:00
|
|
|
"site_name": urlparse(url).hostname,
|
2022-08-02 20:22:15 +00:00
|
|
|
}
|
2022-06-22 18:11:22 +00:00
|
|
|
for field in OpenGraphMeta.__fields__.keys():
|
|
|
|
og_field = f"og:{field}"
|
2022-08-02 20:22:15 +00:00
|
|
|
if ogs.get(og_field):
|
|
|
|
raw[field] = ogs.get(og_field, None)
|
2022-06-22 18:11:22 +00:00
|
|
|
|
2022-08-02 20:22:15 +00:00
|
|
|
if "title" not in raw:
|
|
|
|
return None
|
2022-06-22 18:11:22 +00:00
|
|
|
|
2022-08-04 17:11:14 +00:00
|
|
|
for maybe_rel in {"url", "image"}:
|
|
|
|
if u := raw.get(maybe_rel):
|
|
|
|
raw[maybe_rel] = make_abs(u, url)
|
|
|
|
|
2022-06-22 18:11:22 +00:00
|
|
|
return OpenGraphMeta.parse_obj(raw)
|
|
|
|
|
|
|
|
|
2022-08-04 15:36:21 +00:00
|
|
|
async def external_urls(
|
|
|
|
db_session: AsyncSession,
|
|
|
|
ro: ap_object.RemoteObject | OutboxObject | InboxObject,
|
|
|
|
) -> set[str]:
|
2022-08-15 08:27:58 +00:00
|
|
|
note_host = urlparse(ro.ap_id).hostname
|
2022-06-22 18:11:22 +00:00
|
|
|
|
2022-07-06 19:13:33 +00:00
|
|
|
tags_hrefs = set()
|
2022-08-04 15:36:21 +00:00
|
|
|
for tag in ro.tags:
|
2022-07-06 19:13:33 +00:00
|
|
|
if tag_href := tag.get("href"):
|
|
|
|
tags_hrefs.add(tag_href)
|
2022-08-10 18:34:36 +00:00
|
|
|
if tag.get("type") == "Mention":
|
2022-08-10 18:47:19 +00:00
|
|
|
if tag["href"] != LOCAL_ACTOR.ap_id:
|
|
|
|
mentioned_actor = await fetch_actor(db_session, tag["href"])
|
|
|
|
tags_hrefs.add(mentioned_actor.url)
|
|
|
|
tags_hrefs.add(mentioned_actor.ap_id)
|
|
|
|
else:
|
|
|
|
tags_hrefs.add(LOCAL_ACTOR.ap_id)
|
|
|
|
tags_hrefs.add(LOCAL_ACTOR.url)
|
2022-07-06 19:13:33 +00:00
|
|
|
|
2022-06-22 18:11:22 +00:00
|
|
|
urls = set()
|
2022-08-04 15:36:21 +00:00
|
|
|
if ro.content:
|
|
|
|
soup = BeautifulSoup(ro.content, "html5lib")
|
2022-06-22 18:11:22 +00:00
|
|
|
for link in soup.find_all("a"):
|
|
|
|
h = link.get("href")
|
|
|
|
ph = urlparse(h)
|
|
|
|
mimetype, _ = mimetypes.guess_type(h)
|
|
|
|
if (
|
|
|
|
ph.scheme in {"http", "https"}
|
2022-08-15 08:27:58 +00:00
|
|
|
and ph.hostname != note_host
|
2022-06-22 18:11:22 +00:00
|
|
|
and is_url_valid(h)
|
|
|
|
and (
|
|
|
|
not mimetype
|
2022-08-02 20:22:15 +00:00
|
|
|
or mimetype.split("/")[0] not in ["image", "video", "audio"]
|
2022-06-22 18:11:22 +00:00
|
|
|
)
|
|
|
|
):
|
|
|
|
urls.add(h)
|
|
|
|
|
2022-07-06 19:13:33 +00:00
|
|
|
return urls - tags_hrefs
|
2022-06-22 18:11:22 +00:00
|
|
|
|
|
|
|
|
2022-06-29 19:38:13 +00:00
|
|
|
async def _og_meta_from_url(url: str) -> OpenGraphMeta | None:
|
|
|
|
async with httpx.AsyncClient() as client:
|
|
|
|
resp = await client.get(
|
|
|
|
url,
|
|
|
|
headers={
|
|
|
|
"User-Agent": config.USER_AGENT,
|
|
|
|
},
|
|
|
|
follow_redirects=True,
|
|
|
|
)
|
|
|
|
|
2022-06-22 18:11:22 +00:00
|
|
|
resp.raise_for_status()
|
|
|
|
|
|
|
|
if not (ct := resp.headers.get("content-type")) or not ct.startswith("text/html"):
|
|
|
|
return None
|
|
|
|
|
2022-08-11 20:14:11 +00:00
|
|
|
try:
|
|
|
|
return _scrap_og_meta(url, resp.text)
|
|
|
|
except Exception:
|
|
|
|
logger.info(f"Failed to scrap OG meta for {url}")
|
|
|
|
return None
|
2022-06-22 18:11:22 +00:00
|
|
|
|
|
|
|
|
2022-08-04 15:36:21 +00:00
|
|
|
async def og_meta_from_note(
|
|
|
|
db_session: AsyncSession,
|
|
|
|
ro: ap_object.RemoteObject,
|
|
|
|
) -> list[dict[str, Any]]:
|
2022-06-22 18:11:22 +00:00
|
|
|
og_meta = []
|
2022-08-04 15:36:21 +00:00
|
|
|
urls = await external_urls(db_session, ro)
|
2022-06-22 18:11:22 +00:00
|
|
|
for url in urls:
|
|
|
|
try:
|
2022-06-29 19:38:13 +00:00
|
|
|
maybe_og_meta = await _og_meta_from_url(url)
|
2022-06-22 18:11:22 +00:00
|
|
|
if maybe_og_meta:
|
2022-06-29 19:38:13 +00:00
|
|
|
og_meta.append(maybe_og_meta.dict())
|
2022-06-22 18:11:22 +00:00
|
|
|
except httpx.HTTPError:
|
|
|
|
pass
|
|
|
|
|
|
|
|
return og_meta
|