diff --git a/pg-py/pa.py b/pg-py/pa.py
index 8f3e455..f324e28 100644
--- a/pg-py/pa.py
+++ b/pg-py/pa.py
@@ -1,7 +1,7 @@
#!/usr/bin/env python3
"""
PaPy (Page with Python): a Python static site generator
- v1.2
+ v1.3
"""
import re
@@ -24,13 +24,14 @@ PAGINATION = "numbers" # "numbers" or "direction"
ENABLE_SEARCH = False
GENERATE_ARCHIVE = "yes" # "yes" or "no"
ARCHIVE_URL = "archive" # the URL of the archive
+ TAGS_PAGE_TITLE = "tags and categories"
# Global caches
CSS_VAR_PATTERN = re.compile(r'(var\(--[^)]+\))')
HEAD_TAG_PATTERN = re.compile(r'<head>.*?</head>', re.DOTALL)
HASHTAG_PATTERN = re.compile(r'\{\{\s*hashtag:([^}]+)\s*\}\}')
FILTER_PATTERN = re.compile(r'\{([^}|]+)(?:\|([^:]+)(?::([^}]+))?)\}')
- MARKDOWN_EXTENSIONS = ["fenced_code", "tables", "footnotes", "extra"]
+ MARKDOWN_EXTENSIONS = ["fenced_code", "tables", "footnotes", "extra", "smarty"]
md_converter = markdown.Markdown(extensions=MARKDOWN_EXTENSIONS)
@@ -39,7 +40,7 @@ def apply_filter(value, filter_name, filter_arg=None):
try:
if '+' in value and ':' not in value[-5:]:
value = value[:-2] + ':' + value[-2:]
-
+
date_obj = datetime.fromisoformat(value)
if filter_arg:
format_str = filter_arg.replace('%20', ' ')
@@ -57,7 +58,7 @@ def process_filters(content, context):
filtered_value = apply_filter(context[var_name], filter_name, filter_arg)
return str(filtered_value)
return match.group(0)
-
+
return FILTER_PATTERN.sub(replace_filter, content)
@@ -144,38 +145,38 @@ def load_template_parts(theme_dir: Path):
def render_template(template_content, context):
content = template_content
-
+
while True:
for_match = re.search(r'{%\s*for\s+(\w+)\s+in\s+(\w+)\s*%}(.*?){%\s*endfor\s*%}', content, re.DOTALL)
if not for_match:
break
-
+
item_var, list_var, loop_content = for_match.groups()
if list_var in context and isinstance(context[list_var], list) and context[list_var]:
rendered_loop = ''
for item in context[list_var]:
item_content = loop_content
item_content = process_filters(item_content, item if isinstance(item, dict) else {item_var: item})
-
+
if isinstance(item, dict):
for key, value in item.items():
placeholder = f'{{{item_var}.{key}}}'
item_content = item_content.replace(placeholder, str(value))
else:
item_content = item_content.replace(f'{{{item_var}}}', str(item))
-
+
item_content = process_filters(item_content, item if isinstance(item, dict) else {item_var: item})
rendered_loop += item_content
content = content.replace(for_match.group(0), rendered_loop)
else:
content = content.replace(for_match.group(0), '')
-
+
for key, value in context.items():
if isinstance(value, str):
content = content.replace(f'{{{key}}}', value)
content = process_filters(content, context)
-
+
return content
@@ -263,7 +264,7 @@ def process_markdown_file(md_path: Path, templates, output_dir: Path, is_post=Fa
tag_name = match.group(1).strip()
inline_tags.append(tag_name)
cleaned_content = cleaned_content.replace(
- match.group(0),
+ match.group(0),
f'<a href="/tags/{tag_name.lower()}/">#{tag_name}</a>'
)
@@ -279,7 +280,7 @@ def process_markdown_file(md_path: Path, templates, output_dir: Path, is_post=Fa
})
markdown_content = md_converter.reset().convert(cleaned_content)
-
+
caption_data = [caption] if caption else []
tags_html = ''
@@ -327,20 +328,19 @@ def process_markdown_file(md_path: Path, templates, output_dir: Path, is_post=Fa
def generate_post_navigation(current_post, all_posts):
if not all_posts:
return ''
-
+
sorted_posts = sorted(all_posts, key=lambda x: x['date'])
-
current_index = None
for i, post in enumerate(sorted_posts):
if post['url_path'] == current_post['url_path']:
current_index = i
break
-
+
if current_index is None:
return ''
-
+
navigation_items = []
-
+
# Previous post (newer)
if current_index > 0:
prev_post = sorted_posts[current_index - 1]
@@ -350,8 +350,8 @@ def generate_post_navigation(current_post, all_posts):
'title': prev_post['title'],
'label': '↩ Previous Post'
})
-
- # Next post (older)
+
+ # Next post (older)
if current_index < len(sorted_posts) - 1:
next_post = sorted_posts[current_index + 1]
navigation_items.append({
@@ -360,7 +360,7 @@ def generate_post_navigation(current_post, all_posts):
'title': next_post['title'],
'label': 'Next Post ↪'
})
-
+
return navigation_items
@@ -371,7 +371,7 @@ def generate_index_page(templates, posts, output_dir: Path, page_num=1, is_pagin
title = SITE_TITLE
if tag_name:
- title = f"Posts tagged: {tag_name}"
+ title = f"Posts tagged: #{tag_name}"
elif is_pagination:
title = f"Page {page_num} - {SITE_TITLE}"
@@ -405,7 +405,7 @@ def generate_index_page(templates, posts, output_dir: Path, page_num=1, is_pagin
else:
pagination_html += f'<span class="older"><a href="/page/{page_num+1}/">Older ↪</a></span>'
pagination_html += '</div>'
-
+
description = "Blog post archive" if is_pagination else SITE_DESC
if tag_name:
description = f"Posts tagged {tag_name}"
@@ -425,11 +425,11 @@ def generate_index_page(templates, posts, output_dir: Path, page_num=1, is_pagin
template_to_use = templates['tags'] if tag_name else templates['main']
content = render_template(template_to_use, context)
-
+
description = "Blog post archive" if is_pagination else SITE_DESC
if tag_name:
description = f"Posts tagged {tag_name}"
-
+
page_url = SITE_URL
if is_pagination and page_num > 1:
page_url = f"{SITE_URL}/page/{page_num}/"
@@ -457,9 +457,7 @@ def generate_index_page(templates, posts, output_dir: Path, page_num=1, is_pagin
def generate_archive_page(templates, posts, output_dir: Path):
title = f"Archive - {SITE_TITLE}"
description = f"Complete post archive - {SITE_DESC}"
-
archive_template = templates['archive']
-
context = {
'title': title,
'description': description,
@@ -476,24 +474,29 @@ def generate_archive_page(templates, posts, output_dir: Path):
content = render_template(archive_template, context)
page_url = f"{SITE_URL}/{ARCHIVE_URL}/"
full_html = generate_html_page(templates, title, content, description, page_url, "", "website")
-
output_path = output_dir / ARCHIVE_URL / 'index.html'
write_html_file(output_path, full_html)
return output_path
def generate_tags_index(templates, all_tags, output_dir: Path):
- lines = ['<main>\n<h1>Tags</h1>', '<ul>']
- for tag, count in sorted(all_tags.items(), key=lambda x: x[0].lower()):
- lines.append(f'<li><a href="/tags/{tag.lower()}/">{tag}</a> ({count})</li>')
- lines.append('</ul>\n</main>\n')
-
- content = "\n".join(lines)
- title = f"Tags - {SITE_TITLE}"
- description = f"All tags on {SITE_TITLE}"
+ tags_as_posts = []
+ for tag in sorted(all_tags.keys(), key=lambda x: x.lower()):
+ tags_as_posts.append({
+ 'title': tag,
+ 'url': f'/tags/{tag.lower()}/'
+ })
+
+ context = {
+ 'title': f"{TAGS_PAGE_TITLE}",
+ 'description': f"All tags and categories on {SITE_TITLE}",
+ 'pagination': '',
+ 'posts': tags_as_posts
+ }
+
+ content = render_template(templates['tags'], context)
page_url = f"{SITE_URL}/tags/"
-
- full_html = generate_html_page(templates, title, content, description, page_url, "", "website")
+ full_html = generate_html_page(templates, context['title'], content, context['description'], page_url, "", "website")
output_path = output_dir / 'tags' / 'index.html'
write_html_file(output_path, full_html)
return output_path
@@ -521,21 +524,19 @@ def generate_rss_feed(posts, output_dir: Path):
date_str = post['date']
if re.search(r'[+-]\d{4}$', date_str):
date_str = date_str[:-2] + ':' + date_str[-2:]
-
+
pub_date = datetime.fromisoformat(date_str)
ET.SubElement(item, 'pubDate').text = format_datetime(pub_date)
-
+
if 'html_content' in post:
desc_elem = ET.SubElement(item, 'description')
desc_elem.text = post['html_content']
tree = ET.ElementTree(rss)
ET.indent(tree, space=" ", level=0)
-
xml_content = '<?xml version="1.0" encoding="utf-8" standalone="yes" ?>\n'
rss_content = ET.tostring(rss, encoding='unicode', method='xml')
xml_content += rss_content
-
(output_dir / 'index.xml').write_text(xml_content, encoding='utf-8')
@@ -563,19 +564,19 @@ def generate_search_index(posts, output_dir: Path):
# Optional feature - search with lunr.js
if not ENABLE_SEARCH:
return
-
+
search_data = []
for post in posts:
import re
plain_content = re.sub('<[^<]+?>', '', post.get('html_content', ''))
plain_content = plain_content.replace(' ', ' ').replace('&', '&')
-
+
search_data.append({
'title': post['title'],
'content': plain_content,
'url': post['url']
})
-
+
import json
json_output = json.dumps(search_data, indent=2)
(output_dir / 'search-index.json').write_text(json_output, encoding='utf-8')
@@ -612,7 +613,7 @@ def main():
page_info = process_markdown_file(md_file, templates, public_dir, is_post=True)
all_pages.append(page_info)
post_pages.append(page_info)
-
+
for tag in page_info['tags']:
all_tags_set.add(tag)
tag_counts[tag] = tag_counts.get(tag, 0) + 1
@@ -633,7 +634,7 @@ def main():
{"url": tag.lower(), "label": tag}
for tag in post.get('front_matter_tags', [])
]
-
+
full_html = generate_html_page(
templates, post['title'], post['html_content'],
post['description'], post['url'], post.get('image', ''),
@@ -642,7 +643,7 @@ def main():
tags=tags_with_links
)
write_html_file(Path(post['path']), full_html)
-
+
index_page = generate_index_page(templates, post_pages, public_dir, page_num=1, is_pagination=False)
all_pages.append({'url': f"{SITE_URL}/", 'path': str(index_page), 'date': datetime.now().replace(microsecond=0).isoformat()})
@@ -658,14 +659,14 @@ def main():
if all_tags_set:
tags_index_path = generate_tags_index(templates, tag_counts, public_dir)
all_pages.append({'url': f"{SITE_URL}/tags/", 'path': str(tags_index_path), 'date': datetime.now().replace(microsecond=0).isoformat()})
-
+
for tag in all_tags_set:
tagged_posts = [post for post in post_pages if tag in post['tags']]
tagged_posts.sort(key=lambda x: x.get('date', ''), reverse=True)
tag_page_path = generate_index_page(templates, tagged_posts, public_dir, tag_name=tag)
all_pages.append({'url': f"{SITE_URL}/tags/{tag.lower()}/", 'path': str(tag_page_path), 'date': datetime.now().replace(microsecond=0).isoformat()})
-
+
tag_total_pages = (len(tagged_posts) + POSTS_PER_PAGE - 1) // POSTS_PER_PAGE
for page_num in range(2, tag_total_pages + 1):
tag_pagination_path = generate_index_page(templates, tagged_posts, public_dir, page_num=page_num, is_pagination=True, tag_name=tag)