Plain is headed towards 1.0! Subscribe for development updates →

  1import os
  2from html.parser import HTMLParser
  3from urllib.parse import urlparse, urlunparse
  4
  5import mistune
  6from pygments import highlight
  7from pygments.formatters import html
  8from pygments.lexers import get_lexer_by_name
  9
 10from plain.urls import reverse
 11from plain.utils.text import slugify
 12
 13
 14class PagesRenderer(mistune.HTMLRenderer):
 15    def __init__(self, current_page_path, pages_registry, **kwargs):
 16        super().__init__(**kwargs)
 17        self.current_page_path = current_page_path
 18        self.pages_registry = pages_registry
 19
 20    def link(self, text, url, title=None):
 21        """Convert relative markdown links to proper page URLs."""
 22        # Check if it's a relative link (starts with ./ or ../, or is just a filename)
 23        is_relative = url.startswith(("./", "../")) or (
 24            not url.startswith(("http://", "https://", "/", "#")) and ":" not in url
 25        )
 26
 27        if is_relative:
 28            # Parse URL to extract components
 29            parsed_url = urlparse(url)
 30
 31            # Resolve relative to current page's directory using just the path component
 32            current_dir = os.path.dirname(self.current_page_path)
 33            resolved_path = os.path.normpath(os.path.join(current_dir, parsed_url.path))
 34            page = self.pages_registry.get_page_from_path(resolved_path)
 35
 36            # Get the primary URL name for link conversion
 37            url_name = page.get_url_name()
 38            if url_name:
 39                base_url = reverse(f"pages:{url_name}")
 40                # Reconstruct URL with preserved query params and fragment
 41                url = urlunparse(
 42                    (
 43                        parsed_url.scheme,  # scheme (empty for relative)
 44                        parsed_url.netloc,  # netloc (empty for relative)
 45                        base_url,  # path (our converted URL)
 46                        parsed_url.params,  # params
 47                        parsed_url.query,  # query
 48                        parsed_url.fragment,  # fragment
 49                    )
 50                )
 51
 52        return super().link(text, url, title)
 53
 54    def heading(self, text, level, **attrs):
 55        """Automatically add an ID to headings if one is not provided."""
 56
 57        if "id" not in attrs:
 58            inner_text = get_inner_text(text)
 59            inner_text = inner_text.replace(
 60                ".", "-"
 61            )  # Replace dots with hyphens (slugify won't)
 62            attrs["id"] = slugify(inner_text)
 63
 64        return super().heading(text, level, **attrs)
 65
 66    def block_code(self, code, info=None):
 67        """Highlight code blocks using Pygments."""
 68
 69        if info:
 70            lexer = get_lexer_by_name(info, stripall=True)
 71            formatter = html.HtmlFormatter(wrapcode=True)
 72            return highlight(code, lexer, formatter)
 73
 74        return "<pre><code>" + mistune.escape(code) + "</code></pre>"
 75
 76
 77def render_markdown(content, current_page_path):
 78    from .registry import pages_registry
 79
 80    renderer = PagesRenderer(
 81        current_page_path=current_page_path, pages_registry=pages_registry, escape=False
 82    )
 83    markdown = mistune.create_markdown(
 84        renderer=renderer, plugins=["strikethrough", "table"]
 85    )
 86    return markdown(content)
 87
 88
 89class InnerTextParser(HTMLParser):
 90    def __init__(self):
 91        super().__init__()
 92        self.text_content = []
 93
 94    def handle_data(self, data):
 95        # Collect all text data
 96        self.text_content.append(data.strip())
 97
 98
 99def get_inner_text(html_content):
100    parser = InnerTextParser()
101    parser.feed(html_content)
102    return " ".join([text for text in parser.text_content if text])