#!/usr/bin/env python3 # encoding: utf-8 # Copyright (C) 2020 Denis 'GNUtoo' Carikli # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see . # How to use: # - Download the release notes from Wordpress. If it's not published yet you # can still click on preview and save-as the page # - Point this program to the resulting html file, it will generate a text # version of it. from bs4 import BeautifulSoup from html2text import config, HTML2Text try: # This has been removed in more recent # versions of python-html2text. See commit # b361467894fb277563b4547ec9d4df49f5e0c6e3 # (b361467 Remove support for Python ≤ 3.4) # in https://github.com/Alir3z4/html2text.git from html2text.utils import wrapwrite except: pass import sys def usage(progname): print("{} path/to/file.html".format(progname)) sys.exit(1) if len(sys.argv) != 2: usage(sys.argv[0]) html_file_path = sys.argv[1] with open(html_file_path) as html_file: try: soup = BeautifulSoup(html_file, features="html5lib").article except: try: # For some reason the lxml parser isn't found with # python-beautifulsoup4 4.9.3-3.0 on Parabola. It's # probably better to use an html5 parser anyway as the # Replicant blog (now?) uses the html doctype and the # theme seems to include an html5.js file for the IE 9 # browser. soup = BeautifulSoup(html_file, features="lxml").article except: print("Cannot find html5lib or lxml parsers") sys.exit(1) # Format the output to be compatible with mail conventions but make sure that # the links are not split between two lines config.INLINE_LINKS = False config.PROTECT_LINKS = True config.WRAP_LIST_ITEMS = True config.BODY_WIDTH = 70 parser = HTML2Text() try: wrapwrite(parser.handle(soup.decode())) except: sys.stdout.write(parser.handle(soup.decode()))