From 9b5e301bf754fe0ac551a847a397fb1b9c2b0a36 Mon Sep 17 00:00:00 2001 From: Frederick Yin Date: Wed, 29 Nov 2023 11:20:05 -0500 Subject: atom: do not include headerlink --- atom/atom.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/atom/atom.py b/atom/atom.py index 3a77d5f..c6e38d2 100755 --- a/atom/atom.py +++ b/atom/atom.py @@ -74,7 +74,9 @@ def add_comic(num, dryrun=False): "url": f"https://fkfd.me/comics/{info['num']}", "title": "Comic: " + info["title"], "date": utc_date(), - "html": '

{1}

'.format(info["img"], info["alt"]), + "html": '

{1}

{2}

'.format( + info["img"], info["transcript"], info["alt"] + ), }, dryrun=dryrun, ) @@ -82,6 +84,7 @@ def add_comic(num, dryrun=False): def add_blogpost(path, dryrun=False): # example path: "shitpost/flat_egg", "projects/byseekel" + path = path.strip("/") try: f = open(BLOG_DIR + f"/site/{path}/index.html") except FileNotFoundError: @@ -92,13 +95,16 @@ def add_blogpost(path, dryrun=False): f.close() soup = BeautifulSoup(html, "html.parser") main = soup.find(role="main") - url = f"https://fkfd.me/{path.strip('/')}/" # trailing slash necessary + url = f"https://fkfd.me/{path}/" # trailing slash necessary # convert all relative paths to absolute URLs for img in main.find_all("img"): img["src"] = urlparse.urljoin(url, img["src"]) for a in main.find_all("a"): + if "headerlink" in a["class"]: # remove "ΒΆ" + a.decompose() + continue a["href"] = urlparse.urljoin(url, a["href"]) add_entry( -- cgit v1.2.3