summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSolderpunk <solderpunk@sdf.org>2020-03-17 13:09:51 +0100
committerSolderpunk <solderpunk@sdf.org>2020-03-17 13:09:51 +0100
commit63f7622d4c0e6f4a90068009ff4b93412a141901 (patch)
tree38b2f50d9040dd7ecf7cd1b15aba7d7b9d545739
parentb5cc680fbc9f3cd821f23df4cf4b736b43e79379 (diff)
Extract feed title from index.gmi if it exists and is world readable.
-rw-r--r--gemfeed.py44
1 files changed, 28 insertions, 16 deletions
diff --git a/gemfeed.py b/gemfeed.py
index bae4870..f0e9aa9 100644
--- a/gemfeed.py
+++ b/gemfeed.py
@@ -8,6 +8,19 @@ import urllib.parse
from feedgen.feed import FeedGenerator
+def is_world_readable(filename):
+ st = os.stat(filename)
+ return st.st_mode & stat.S_IROTH
+
+def get_feed_title():
+ default = "Just another Gemini feed"
+ for extension in ("gmi", "gem", "gemini"):
+ filename = "index.{}".format(extension)
+ print(filename)
+ if os.path.exists(filename) and is_world_readable(filename):
+ return extract_first_heading(filename, default)
+ return default
+
def find_files():
files = []
for extension in ("gmi", "gem", "gemini"):
@@ -15,12 +28,16 @@ def find_files():
index = "index.{}".format(extension)
if index in files:
files.remove(index)
- world_readable = []
- for filename in files:
- st = os.stat(filename)
- if st.st_mode & stat.S_IROTH:
- world_readable.append(filename)
- return world_readable
+ return [f for f in files if is_world_readable(f)]
+
+def extract_first_heading(filename, default=""):
+ with open(filename) as fp:
+ for line in fp:
+ if line.startswith("#"):
+ while line[0] == "#":
+ line = line[1:]
+ return line.strip()
+ return default
def populate_entry_from_file(filename, base_url, entry):
url = urljoin(base_url, filename)
@@ -29,15 +46,7 @@ def populate_entry_from_file(filename, base_url, entry):
updated = os.path.getctime(filename)
updated = datetime.datetime.fromtimestamp(updated, tz=datetime.timezone.utc)
entry.updated(updated)
- with open(filename) as fp:
- for line in fp:
- if line.startswith("#"):
- while line[0] == "#":
- line = line[1:]
- title = line.strip()
- break
- else:
- title = filename
+ title = extract_first_heading(filename, filename)
entry.title(title)
def urljoin(base, url):
@@ -51,6 +60,9 @@ def urljoin(base, url):
def main():
+ # Get default title from index page, if there is one
+ feed_title = get_feed_title()
+
# Parse arguments
parser = argparse.ArgumentParser(description='Generate an Atom feed for Gemini content.')
parser.add_argument('-a', '--author', dest='author', type=str,
@@ -64,7 +76,7 @@ def main():
parser.add_argument('-s', '--subtitle', dest='subtitle', type=str,
help='feed subtitle')
parser.add_argument('-t', '--title', dest='title', type=str,
- default="A Gemini feed", help='feed title')
+ default=feed_title, help='feed title')
args = parser.parse_args()
# Normalise base URL