aboutsummaryrefslogtreecommitdiffstats
path: root/release_notes.py
blob: 3c0964bfb01e2e8861b48cd924a67394ff6b79a4 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
#!/usr/bin/env python3
# encoding: utf-8
# Copyright (C) 2020 Denis 'GNUtoo' Carikli <GNUtoo@cyberdimension.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program.  If not, see <https://www.gnu.org/licenses/>.

# How to use:
# - Download the release notes from Wordpress. If it's not published yet you
#   can still click on preview and save-as the page
# - Point this program to the resulting html file, it will generate a text
#   version of it.

from bs4 import BeautifulSoup
from html2text import config, HTML2Text

try:
    # This has been removed in more recent
    # versions of python-html2text. See commit
    # b361467894fb277563b4547ec9d4df49f5e0c6e3
    # (b361467 Remove support for Python ≤ 3.4)
    # in https://github.com/Alir3z4/html2text.git
    from html2text.utils import wrapwrite
except:
    pass

import sys

def usage(progname):
    print("{} path/to/file.html".format(progname))
    sys.exit(1)

if len(sys.argv) != 2:
    usage(sys.argv[0])

html_file_path = sys.argv[1]

with open(html_file_path) as html_file:
    try:
        soup = BeautifulSoup(html_file, features="html5lib").article
    except:
        try:
            # For some reason the lxml parser isn't found with
            # python-beautifulsoup4 4.9.3-3.0 on Parabola. It's
            # probably better to use an html5 parser anyway as the
            # Replicant blog (now?) uses the html doctype and the
            # theme seems to include an html5.js file for the IE 9
            # browser.
            soup = BeautifulSoup(html_file, features="lxml").article
        except:
            print("Cannot find html5lib or lxml parsers")
            sys.exit(1)

# Format the output to be compatible with mail conventions but make sure that
# the links are not split between two lines
config.INLINE_LINKS = False
config.PROTECT_LINKS = True
config.WRAP_LIST_ITEMS = True
config.BODY_WIDTH = 70

parser = HTML2Text()
try:
    wrapwrite(parser.handle(soup.decode()))
except:
    sys.stdout.write(parser.handle(soup.decode()))