Script 'mail_helper' called by obssrc
Hello community,
here is the log from the commit of package python-nbconvert for
openSUSE:Factory checked in at 2023-06-21 22:39:16
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/python-nbconvert (Old)
and /work/SRC/openSUSE:Factory/.python-nbconvert.new.15902 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Package is "python-nbconvert"
Wed Jun 21 22:39:16 2023 rev:24 rq:1094160 version:7.6.0
Changes:
--------
--- /work/SRC/openSUSE:Factory/python-nbconvert/python-nbconvert.changes
2023-06-12 15:27:36.995465594 +0200
+++
/work/SRC/openSUSE:Factory/.python-nbconvert.new.15902/python-nbconvert.changes
2023-06-21 22:40:14.990512243 +0200
@@ -1,0 +2,11 @@
+Tue Jun 20 18:19:24 UTC 2023 - Ben Greiner <[email protected]>
+
+- Update to 7.6.0
+ * Update to Mistune v3 #1820 (@TiagodePAlves)
+- Release 7.5.0
+ * Add mermaidjs 10.2.3 #1957 (@bollwyvl)
+ * Fix pdf conversion with explicitly relative paths #2005
+ (@tuncbkose)
+ * Ensure TEXINPUTS is an absolute path #2002 (@tuncbkose)
+
+-------------------------------------------------------------------
Old:
----
nbconvert-7.4.0.tar.gz
New:
----
nbconvert-7.6.0.tar.gz
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Other differences:
------------------
++++++ python-nbconvert.spec ++++++
--- /var/tmp/diff_new_pack.VxTfC2/_old 2023-06-21 22:40:15.554515637 +0200
+++ /var/tmp/diff_new_pack.VxTfC2/_new 2023-06-21 22:40:15.562515685 +0200
@@ -29,12 +29,11 @@
%else
%bcond_with libalternatives
%endif
-# avoid rewriting
-%define python3dist python3dist
-# 7.4.0 gets abbreviated by pythondistdeps
-%define shortversion 7.4
+
+# 7.6.0 gets abbreviated by pythondistdeps
+%define shortversion 7.6
Name: python-nbconvert%{psuffix}
-Version: 7.4.0
+Version: 7.6.0
Release: 0
Summary: Conversion of Jupyter Notebooks
License: BSD-3-Clause AND MIT
@@ -51,17 +50,17 @@
Requires: python-MarkupSafe >= 2.0
Requires: python-Pygments >= 2.4.1
Requires: python-beautifulsoup4
-Requires: python-bleach
Requires: python-defusedxml
Requires: python-jupyter-core >= 4.7
Requires: python-jupyterlab-pygments
Requires: python-nbclient >= 0.5
-Requires: python-nbformat >= 5.1
+Requires: python-nbformat >= 5.7
Requires: python-packaging
Requires: python-pandocfilters >= 1.4.1
Requires: python-tinycss2
-Requires: python-traitlets >= 5.0
-Requires: (python-mistune >= 2.0.3 with python-mistune < 3)
+Requires: python-traitlets >= 5.1
+Requires: (python-bleach without python-bleach = 5.0.0)
+Requires: (python-mistune >= 2.0.3 with python-mistune < 4)
Recommends: pandoc
Recommends: python-tornado >= 6.1
Suggests: %{name}-latex
@@ -97,7 +96,7 @@
Summary: Conversion of Jupyter Notebooks
Requires: jupyter-ipykernel
Requires: jupyter-jupyter-core
-Requires: %python3dist(nbconvert) = %{shortversion}
+Requires: python3dist(nbconvert) = %{shortversion}
Conflicts: python3-jupyter_nbconvert < 5.5.0
%description -n jupyter-nbconvert
++++++ nbconvert-7.4.0.tar.gz -> nbconvert-7.6.0.tar.gz ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore' old/nbconvert-7.4.0/.github/workflows/tests.yml
new/nbconvert-7.6.0/.github/workflows/tests.yml
--- old/nbconvert-7.4.0/.github/workflows/tests.yml 2020-02-02
01:00:00.000000000 +0100
+++ new/nbconvert-7.6.0/.github/workflows/tests.yml 2020-02-02
01:00:00.000000000 +0100
@@ -42,7 +42,7 @@
sudo apt-get install xvfb x11-utils libxkbcommon-x11-0
libxcb-xinerama0 python3-pyqt5
# pandoc is not up to date in the ubuntu repos, so we install
directly
- wget
https://github.com/jgm/pandoc/releases/download/2.14.2/pandoc-2.14.2-1-amd64.deb
&& sudo dpkg -i pandoc-2.14.2-1-amd64.deb
+ wget
https://github.com/jgm/pandoc/releases/download/3.1.2/pandoc-3.1.2-1-amd64.deb
&& sudo dpkg -i pandoc-3.1.2-1-amd64.deb
- name: Run tests on Linux
if: ${{ startsWith(runner.os, 'linux') }}
@@ -110,10 +110,18 @@
with:
dependency_type: minimum
only_create_file: 1
- - name: Run the unit tests
+ - name: Install dependencies
+ run: |
+ sudo apt-get update
+ sudo apt-get install texlive-plain-generic inkscape texlive-xetex
latexmk
+ sudo apt-get install xvfb x11-utils libxkbcommon-x11-0
libxcb-xinerama0 python3-pyqt5
+
+ # pandoc is not up to date in the ubuntu repos, so we install
directly
+ wget
https://github.com/jgm/pandoc/releases/download/2.14.2/pandoc-2.14.2-1-amd64.deb
&& sudo dpkg -i pandoc-2.14.2-1-amd64.deb
+
+ - name: Run tests
run: |
- export NBFORMAT_VALIDATOR=jsonschema
- hatch run test:nowarn || hatch run test:nowarn --lf
+ xvfb-run --auto-servernum hatch run test:nowarn || xvfb-run
--auto-servernum hatch run test:nowarn --lf
test_prereleases:
name: Test Prereleases
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore' old/nbconvert-7.4.0/.pre-commit-config.yaml
new/nbconvert-7.6.0/.pre-commit-config.yaml
--- old/nbconvert-7.4.0/.pre-commit-config.yaml 2020-02-02 01:00:00.000000000
+0100
+++ new/nbconvert-7.6.0/.pre-commit-config.yaml 2020-02-02 01:00:00.000000000
+0100
@@ -19,7 +19,7 @@
- id: trailing-whitespace
- repo: https://github.com/python-jsonschema/check-jsonschema
- rev: 0.22.0
+ rev: 0.23.1
hooks:
- id: check-github-workflows
@@ -36,7 +36,7 @@
- id: black
- repo: https://github.com/charliermarsh/ruff-pre-commit
- rev: v0.0.263
+ rev: v0.0.270
hooks:
- id: ruff
args: ["--fix"]
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore' old/nbconvert-7.4.0/CHANGELOG.md
new/nbconvert-7.6.0/CHANGELOG.md
--- old/nbconvert-7.4.0/CHANGELOG.md 2020-02-02 01:00:00.000000000 +0100
+++ new/nbconvert-7.6.0/CHANGELOG.md 2020-02-02 01:00:00.000000000 +0100
@@ -2,6 +2,46 @@
<!-- <START NEW CHANGELOG ENTRY> -->
+## 7.6.0
+
+([Full
Changelog](https://github.com/jupyter/nbconvert/compare/v7.5.0...60af6d897c083444586829c636f278d84ae81962))
+
+### Maintenance and upkeep improvements
+
+- Update to Mistune v3 [#1820](https://github.com/jupyter/nbconvert/pull/1820)
([@TiagodePAlves](https://github.com/TiagodePAlves))
+
+### Contributors to this release
+
+([GitHub contributors page for this
release](https://github.com/jupyter/nbconvert/graphs/contributors?from=2023-06-13&to=2023-06-19&type=c))
+
+[@blink1073](https://github.com/search?q=repo%3Ajupyter%2Fnbconvert+involves%3Ablink1073+updated%3A2023-06-13..2023-06-19&type=Issues)
|
[@kloczek](https://github.com/search?q=repo%3Ajupyter%2Fnbconvert+involves%3Akloczek+updated%3A2023-06-13..2023-06-19&type=Issues)
|
[@TiagodePAlves](https://github.com/search?q=repo%3Ajupyter%2Fnbconvert+involves%3ATiagodePAlves+updated%3A2023-06-13..2023-06-19&type=Issues)
+
+<!-- <END NEW CHANGELOG ENTRY> -->
+
+## 7.5.0
+
+([Full
Changelog](https://github.com/jupyter/nbconvert/compare/v7.4.0...3dd3a67bf16474042efac25519ef257d708a8d7b))
+
+### Enhancements made
+
+- Add mermaidjs 10.2.3 [#1957](https://github.com/jupyter/nbconvert/pull/1957)
([@bollwyvl](https://github.com/bollwyvl))
+
+### Bugs fixed
+
+- Fix pdf conversion with explicitly relative paths
[#2005](https://github.com/jupyter/nbconvert/pull/2005)
([@tuncbkose](https://github.com/tuncbkose))
+- Ensure TEXINPUTS is an absolute path
[#2002](https://github.com/jupyter/nbconvert/pull/2002)
([@tuncbkose](https://github.com/tuncbkose))
+
+### Maintenance and upkeep improvements
+
+- bump pandoc max version
[#1997](https://github.com/jupyter/nbconvert/pull/1997)
([@tuncbkose](https://github.com/tuncbkose))
+- exclude bleach 5.0.0 from dependencies resolution
[#1990](https://github.com/jupyter/nbconvert/pull/1990)
([@karlicoss](https://github.com/karlicoss))
+
+### Contributors to this release
+
+([GitHub contributors page for this
release](https://github.com/jupyter/nbconvert/graphs/contributors?from=2023-05-08&to=2023-06-13&type=c))
+
+[@blink1073](https://github.com/search?q=repo%3Ajupyter%2Fnbconvert+involves%3Ablink1073+updated%3A2023-05-08..2023-06-13&type=Issues)
|
[@bollwyvl](https://github.com/search?q=repo%3Ajupyter%2Fnbconvert+involves%3Abollwyvl+updated%3A2023-05-08..2023-06-13&type=Issues)
|
[@karlicoss](https://github.com/search?q=repo%3Ajupyter%2Fnbconvert+involves%3Akarlicoss+updated%3A2023-05-08..2023-06-13&type=Issues)
|
[@pre-commit-ci](https://github.com/search?q=repo%3Ajupyter%2Fnbconvert+involves%3Apre-commit-ci+updated%3A2023-05-08..2023-06-13&type=Issues)
|
[@tuncbkose](https://github.com/search?q=repo%3Ajupyter%2Fnbconvert+involves%3Atuncbkose+updated%3A2023-05-08..2023-06-13&type=Issues)
+
## 7.4.0
([Full
Changelog](https://github.com/jupyter/nbconvert/compare/v7.3.1...32fcf7b26462f5d51d577f8beda9d49cd3a0f441))
@@ -28,8 +68,6 @@
[@blink1073](https://github.com/search?q=repo%3Ajupyter%2Fnbconvert+involves%3Ablink1073+updated%3A2023-04-10..2023-05-08&type=Issues)
|
[@krassowski](https://github.com/search?q=repo%3Ajupyter%2Fnbconvert+involves%3Akrassowski+updated%3A2023-04-10..2023-05-08&type=Issues)
|
[@pre-commit-ci](https://github.com/search?q=repo%3Ajupyter%2Fnbconvert+involves%3Apre-commit-ci+updated%3A2023-04-10..2023-05-08&type=Issues)
|
[@tuncbkose](https://github.com/search?q=repo%3Ajupyter%2Fnbconvert+involves%3Atuncbkose+updated%3A2023-04-10..2023-05-08&type=Issues)
-<!-- <END NEW CHANGELOG ENTRY> -->
-
## 7.3.1
([Full
Changelog](https://github.com/jupyter/nbconvert/compare/v7.3.0...3860152ecea3d9833540eebe279ff603b3d47cea))
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore' old/nbconvert-7.4.0/PKG-INFO new/nbconvert-7.6.0/PKG-INFO
--- old/nbconvert-7.4.0/PKG-INFO 2020-02-02 01:00:00.000000000 +0100
+++ new/nbconvert-7.6.0/PKG-INFO 2020-02-02 01:00:00.000000000 +0100
@@ -1,6 +1,6 @@
Metadata-Version: 2.1
Name: nbconvert
-Version: 7.4.0
+Version: 7.6.0
Summary: Converting Jupyter Notebooks
Project-URL: Homepage, https://jupyter.org
Author-email: Jupyter Development Team <[email protected]>
@@ -45,21 +45,21 @@
Classifier: Programming Language :: Python :: 3
Requires-Python: >=3.7
Requires-Dist: beautifulsoup4
-Requires-Dist: bleach
+Requires-Dist: bleach!=5.0.0
Requires-Dist: defusedxml
Requires-Dist: importlib-metadata>=3.6; python_version < '3.10'
Requires-Dist: jinja2>=3.0
Requires-Dist: jupyter-core>=4.7
Requires-Dist: jupyterlab-pygments
Requires-Dist: markupsafe>=2.0
-Requires-Dist: mistune<3,>=2.0.3
+Requires-Dist: mistune<4,>=2.0.3
Requires-Dist: nbclient>=0.5.0
-Requires-Dist: nbformat>=5.1
+Requires-Dist: nbformat>=5.7
Requires-Dist: packaging
Requires-Dist: pandocfilters>=1.4.1
Requires-Dist: pygments>=2.4.1
Requires-Dist: tinycss2
-Requires-Dist: traitlets>=5.0
+Requires-Dist: traitlets>=5.1
Provides-Extra: all
Requires-Dist: nbconvert[docs,qtpdf,serve,test,webpdf]; extra == 'all'
Provides-Extra: docs
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore' old/nbconvert-7.4.0/nbconvert/_version.py
new/nbconvert-7.6.0/nbconvert/_version.py
--- old/nbconvert-7.4.0/nbconvert/_version.py 2020-02-02 01:00:00.000000000
+0100
+++ new/nbconvert-7.6.0/nbconvert/_version.py 2020-02-02 01:00:00.000000000
+0100
@@ -3,7 +3,7 @@
from typing import List
# Version string must appear intact for versioning
-__version__ = "7.4.0"
+__version__ = "7.6.0"
# Build up version_info tuple for backwards compatibility
pattern = r"(?P<major>\d+).(?P<minor>\d+).(?P<patch>\d+)(?P<rest>.*)"
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore' old/nbconvert-7.4.0/nbconvert/exporters/html.py
new/nbconvert-7.6.0/nbconvert/exporters/html.py
--- old/nbconvert-7.4.0/nbconvert/exporters/html.py 2020-02-02
01:00:00.000000000 +0100
+++ new/nbconvert-7.6.0/nbconvert/exporters/html.py 2020-02-02
01:00:00.000000000 +0100
@@ -121,6 +121,15 @@
""",
).tag(config=True)
+ mermaid_js_url = Unicode(
+
"https://cdnjs.cloudflare.com/ajax/libs/mermaid/10.2.3/mermaid.esm.min.mjs",
+ help="""
+ URL to load MermaidJS from.
+
+ Defaults to loading from cdnjs.
+ """,
+ )
+
jquery_url = Unicode(
"https://cdnjs.cloudflare.com/ajax/libs/jquery/2.0.3/jquery.min.js",
help="""
@@ -303,6 +312,7 @@
resources["include_url"] = resources_include_url
resources["require_js_url"] = self.require_js_url
resources["mathjax_url"] = self.mathjax_url
+ resources["mermaid_js_url"] = self.mermaid_js_url
resources["jquery_url"] = self.jquery_url
resources["jupyter_widgets_base_url"] = self.jupyter_widgets_base_url
resources["widget_renderer_url"] = self.widget_renderer_url
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore' old/nbconvert-7.4.0/nbconvert/exporters/latex.py
new/nbconvert-7.6.0/nbconvert/exporters/latex.py
--- old/nbconvert-7.4.0/nbconvert/exporters/latex.py 2020-02-02
01:00:00.000000000 +0100
+++ new/nbconvert-7.6.0/nbconvert/exporters/latex.py 2020-02-02
01:00:00.000000000 +0100
@@ -3,12 +3,14 @@
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
+import os
from traitlets import default
from traitlets.config import Config
from nbconvert.filters.filter_links import resolve_references
from nbconvert.filters.highlight import Highlight2Latex
+from nbconvert.filters.pandoc import ConvertExplicitlyRelativePaths
from .templateexporter import TemplateExporter
@@ -77,6 +79,16 @@
)
self.register_filter("highlight_code", highlight_code)
+ # Need to make sure explicit relative paths are visible to latex for
pdf conversion
+ # https://github.com/jupyter/nbconvert/issues/1998
+ nb_path = resources.get("metadata", {}).get("path") if resources else
None
+ texinputs = os.path.abspath(nb_path) if nb_path else os.getcwd()
+ convert_explicitly_relative_paths = self.filters.get(
+ "convert_explicitly_relative_paths",
+ ConvertExplicitlyRelativePaths(texinputs=texinputs, parent=self),
+ )
+ self.register_filter("convert_explicitly_relative_paths",
convert_explicitly_relative_paths)
+
return super().from_notebook_node(nb, resources, **kw)
def _create_environment(self):
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore' old/nbconvert-7.4.0/nbconvert/exporters/pdf.py
new/nbconvert-7.6.0/nbconvert/exporters/pdf.py
--- old/nbconvert-7.4.0/nbconvert/exporters/pdf.py 2020-02-02
01:00:00.000000000 +0100
+++ new/nbconvert-7.6.0/nbconvert/exporters/pdf.py 2020-02-02
01:00:00.000000000 +0100
@@ -186,7 +186,7 @@
latex, resources = super().from_notebook_node(nb, resources=resources,
**kw)
# set texinputs directory, so that local files will be found
if resources and resources.get("metadata", {}).get("path"):
- self.texinputs = resources["metadata"]["path"]
+ self.texinputs = os.path.abspath(resources["metadata"]["path"])
else:
self.texinputs = os.getcwd()
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore'
old/nbconvert-7.4.0/nbconvert/exporters/tests/test_pdf.py
new/nbconvert-7.6.0/nbconvert/exporters/tests/test_pdf.py
--- old/nbconvert-7.4.0/nbconvert/exporters/tests/test_pdf.py 2020-02-02
01:00:00.000000000 +0100
+++ new/nbconvert-7.6.0/nbconvert/exporters/tests/test_pdf.py 2020-02-02
01:00:00.000000000 +0100
@@ -8,6 +8,7 @@
from tempfile import TemporaryDirectory
from ...tests.utils import onlyif_cmds_exist
+from ...utils import _contextlib_chdir
from ..pdf import PDFExporter
from .base import ExportersTestsBase
@@ -39,3 +40,24 @@
assert len(output) > 0
# all temporary file should be cleaned up
assert {file_name} == set(os.listdir(td))
+
+ @onlyif_cmds_exist("xelatex", "pandoc")
+ def test_texinputs(self):
+ """
+ Is TEXINPUTS set properly when we are converting
+ - in the same directory, and
+ - in a different directory?
+ """
+ with TemporaryDirectory() as td, _contextlib_chdir.chdir(td):
+ os.mkdir("folder")
+ file_name = os.path.basename(self._get_notebook())
+ nb1 = os.path.join(td, file_name)
+ nb2 = os.path.join(td, "folder", file_name)
+ ex1 = self.exporter_class(latex_count=1) # type:ignore
+ ex2 = self.exporter_class(latex_count=1) # type:ignore
+ shutil.copy(self._get_notebook(), nb1)
+ shutil.copy(self._get_notebook(), nb2)
+ _ = ex1.from_filename(nb1)
+ _ = ex2.from_filename(nb2)
+ assert ex1.texinputs == os.path.abspath(".")
+ assert ex2.texinputs == os.path.abspath("./folder")
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore' old/nbconvert-7.4.0/nbconvert/filters/markdown.py
new/nbconvert-7.6.0/nbconvert/filters/markdown.py
--- old/nbconvert-7.4.0/nbconvert/filters/markdown.py 2020-02-02
01:00:00.000000000 +0100
+++ new/nbconvert-7.6.0/nbconvert/filters/markdown.py 2020-02-02
01:00:00.000000000 +0100
@@ -11,13 +11,14 @@
try:
from .markdown_mistune import markdown2html_mistune
+
except ImportError as e:
- # store in variable for Python 3
_mistune_import_error = e
- def markdown2html_mistune(source):
+ def markdown2html_mistune(source: str) -> str:
"""mistune is unavailable, raise ImportError"""
- raise ImportError("markdown2html requires mistune: %s" %
_mistune_import_error)
+ msg = f"markdown2html requires mistune: {_mistune_import_error}"
+ raise ImportError(msg)
from .pandoc import convert_pandoc
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore'
old/nbconvert-7.4.0/nbconvert/filters/markdown_mistune.py
new/nbconvert-7.6.0/nbconvert/filters/markdown_mistune.py
--- old/nbconvert-7.4.0/nbconvert/filters/markdown_mistune.py 2020-02-02
01:00:00.000000000 +0100
+++ new/nbconvert-7.6.0/nbconvert/filters/markdown_mistune.py 2020-02-02
01:00:00.000000000 +0100
@@ -9,143 +9,260 @@
import base64
import mimetypes
import os
-import re
-from functools import partial
from html import escape
+from typing import Any, Callable, Dict, Iterable, Match, Optional, Tuple
import bs4
-from mistune import PLUGINS, BlockParser, HTMLRenderer, InlineParser, Markdown
# type:ignore
from pygments import highlight
from pygments.formatters import HtmlFormatter
+from pygments.lexer import Lexer
from pygments.lexers import get_lexer_by_name
from pygments.util import ClassNotFound
from nbconvert.filters.strings import add_anchor
-html_escape = partial(escape, quote=False)
-
+try: # for Mistune >= 3.0
+ from mistune import (
+ BlockParser,
+ BlockState,
+ HTMLRenderer,
+ InlineParser,
+ InlineState,
+ Markdown,
+ import_plugin,
+ )
-class InvalidNotebook(Exception): # noqa
- """An invalid notebook model."""
+ MISTUNE_V3 = True
- pass
+except ImportError: # for Mistune >= 2.0
+ import re
+ from mistune import ( # type: ignore[attr-defined]
+ PLUGINS,
+ BlockParser,
+ HTMLRenderer,
+ InlineParser,
+ Markdown,
+ )
-class MathBlockParser(BlockParser):
- """This acts as a pass-through to the MathInlineParser. It is needed in
- order to avoid other block level rules splitting math sections apart.
- """
+ MISTUNE_V3 = False
- MULTILINE_MATH = re.compile(
- r"(?<!\\)[$]{2}.*?(?<!\\)[$]{2}|"
- r"\\\\\[.*?\\\\\]|"
- r"\\begin\{([a-z]*\*?)\}.*?\\end\{\1\}",
- re.DOTALL,
- )
+ def import_plugin(name: str) -> 'MarkdownPlugin': # type: ignore[misc]
+ """Simple implementation of Mistune V3's import_plugin for V2."""
+ return PLUGINS[name] # type: ignore[no-any-return]
- RULE_NAMES = ("multiline_math", *BlockParser.RULE_NAMES)
- # Regex for header that doesn't require space after '#'
- AXT_HEADING = re.compile(r" {0,3}(#{1,6})(?!#+)(?:
*\n+|([^\n]*?)(?:\n+|\s+?#+\s*\n+))")
+class InvalidNotebook(Exception): # noqa
+ """An invalid notebook model."""
- def parse_multiline_math(self, m, state):
- """Pass token through mutiline math."""
- return {"type": "multiline_math", "text": m.group(0)}
+ pass
-def _dotall(pattern):
- """Make the '.' special character match any character inside the pattern,
including a newline.
+def _dotall(pattern: str) -> str:
+ """Makes the '.' special character match any character inside the pattern,
including a newline.
- This is implemented with the inline flag `(?s:...)` and is equivalent to
using `re.DOTALL` when
- it is the only pattern used. It is necessary since `mistune>=2.0.0`, where
the pattern is passed
- to the undocumented `re.Scanner`.
+ This is implemented with the inline flag `(?s:...)` and is equivalent to
using `re.DOTALL`.
+ It is useful for LaTeX environments, where line breaks may be present.
"""
return f"(?s:{pattern})"
-class MathInlineParser(InlineParser):
- r"""This interprets the content of LaTeX style math objects.
+if MISTUNE_V3: # Parsers for Mistune >= 3.0.0
- In particular this grabs ``$$...$$``, ``\\[...\\]``, ``\\(...\\)``,
``$...$``,
- and ``\begin{foo}...\end{foo}`` styles for declaring mathematics. It strips
- delimiters from all these varieties, and extracts the type of environment
- in the last case (``foo`` in this example).
- """
- BLOCK_MATH_TEX = _dotall(r"(?<!\\)\$\$(.*?)(?<!\\)\$\$")
- BLOCK_MATH_LATEX = _dotall(r"(?<!\\)\\\\\[(.*?)(?<!\\)\\\\\]")
- INLINE_MATH_TEX = _dotall(r"(?<![$\\])\$(.+?)(?<![$\\])\$")
- INLINE_MATH_LATEX = _dotall(r"(?<!\\)\\\\\((.*?)(?<!\\)\\\\\)")
- LATEX_ENVIRONMENT = _dotall(r"\\begin\{([a-z]*\*?)\}(.*?)\\end\{\1\}")
-
- # The order is important here
- RULE_NAMES = (
- "block_math_tex",
- "block_math_latex",
- "inline_math_tex",
- "inline_math_latex",
- "latex_environment",
- *InlineParser.RULE_NAMES,
- )
-
- def parse_block_math_tex(self, m, state):
- """Parse block text math."""
- # sometimes the Scanner keeps the final '$$', so we use the
- # full matched string and remove the math markers
- text = m.group(0)[2:-2]
- return "block_math", text
-
- def parse_block_math_latex(self, m, state):
- """Parse block latex math ."""
- text = m.group(1)
- return "block_math", text
-
- def parse_inline_math_tex(self, m, state):
- """Parse inline tex math."""
- text = m.group(1)
- return "inline_math", text
-
- def parse_inline_math_latex(self, m, state):
- """Parse inline latex math."""
- text = m.group(1)
- return "inline_math", text
-
- def parse_latex_environment(self, m, state):
- """Parse a latex environment."""
- name, text = m.group(1), m.group(2)
- return "latex_environment", name, text
+ class MathBlockParser(BlockParser):
+ """This acts as a pass-through to the MathInlineParser. It is needed in
+ order to avoid other block level rules splitting math sections apart.
+
+ It works by matching each multiline math environment as a single
paragraph,
+ so that other rules don't think each section is its own paragraph.
Inline
+ is ignored here.
+ """
+ AXT_HEADING_WITHOUT_LEADING_SPACES = (
+ r"^ {0,3}(?P<axt_1>#{1,6})(?!#+)(?P<axt_2>[ \t]*(.*?)?)$"
+ )
+
+ MULTILINE_MATH = _dotall(
+ # Display math mode, old TeX delimiter: $$ \sqrt{2} $$
+ r"(?<!\\)[$]{2}.*?(?<!\\)[$]{2}"
+ "|"
+ # Display math mode, new LaTeX delimiter: \[ \sqrt{2} \]
+ r"\\\\\[.*?\\\\\]"
+ "|"
+ # LaTeX environment: \begin{equation} \sqrt{2} \end{equation}
+
r"\\begin\{(?P<math_env_name>[a-z]*\*?)\}.*?\\end\{(?P=math_env_name)\}"
+ )
+
+ SPECIFICATION = {
+ **BlockParser.SPECIFICATION,
+ "axt_heading": AXT_HEADING_WITHOUT_LEADING_SPACES,
+ "multiline_math": MULTILINE_MATH,
+ }
+
+ # Multiline math must be searched before other rules
+ DEFAULT_RULES: Tuple[str, ...] = ("multiline_math",
*BlockParser.DEFAULT_RULES) # type: ignore[assignment]
+
+ def parse_multiline_math(self, m: Match[str], state: BlockState) ->
int:
+ """Send mutiline math as a single paragraph to MathInlineParser."""
+ matched_text = m[0]
+ state.add_paragraph(matched_text)
+ return m.end()
+
+ class MathInlineParser(InlineParser):
+ r"""This interprets the content of LaTeX style math objects.
+
+ In particular this grabs ``$$...$$``, ``\\[...\\]``, ``\\(...\\)``,
``$...$``,
+ and ``\begin{foo}...\end{foo}`` styles for declaring mathematics. It
strips
+ delimiters from all these varieties, and extracts the type of
environment
+ in the last case (``foo`` in this example).
+ """
-class MarkdownWithMath(Markdown):
- """Markdown text with math enabled."""
+ # Display math mode, using older TeX delimiter: $$ \pi $$
+ BLOCK_MATH_TEX =
_dotall(r"(?<!\\)\$\$(?P<math_block_tex>.*?)(?<!\\)\$\$")
+ # Display math mode, using newer LaTeX delimiter: \[ \pi \]
+ BLOCK_MATH_LATEX =
_dotall(r"(?<!\\)\\\\\[(?P<math_block_latex>.*?)(?<!\\)\\\\\]")
+ # Inline math mode, using older TeX delimiter: $ \pi $ (cannot be
empty!)
+ INLINE_MATH_TEX =
_dotall(r"(?<![$\\])\$(?P<math_inline_tex>.+?)(?<![$\\])\$")
+ # Inline math mode, using newer LaTeX delimiter: \( \pi \)
+ INLINE_MATH_LATEX =
_dotall(r"(?<!\\)\\\\\((?P<math_inline_latex>.*?)(?<!\\)\\\\\)")
+ # LaTeX math environment: \begin{equation} \pi \end{equation}
+ LATEX_ENVIRONMENT = _dotall(
+ r"\\begin\{(?P<math_env_name>[a-z]*\*?)\}"
+ r"(?P<math_env_body>.*?)"
+ r"\\end\{(?P=math_env_name)\}"
+ )
+
+ SPECIFICATION = {
+ **InlineParser.SPECIFICATION,
+ "block_math_tex": BLOCK_MATH_TEX,
+ "block_math_latex": BLOCK_MATH_LATEX,
+ "inline_math_tex": INLINE_MATH_TEX,
+ "inline_math_latex": INLINE_MATH_LATEX,
+ "latex_environment": LATEX_ENVIRONMENT,
+ }
+
+ # Block math must be matched first, and all math must come before text
+ DEFAULT_RULES: Tuple[str, ...] = (
+ "block_math_tex",
+ "block_math_latex",
+ "inline_math_tex",
+ "inline_math_latex",
+ "latex_environment",
+ *InlineParser.DEFAULT_RULES,
+ ) # type: ignore[assignment]
+
+ def parse_block_math_tex(self, m: Match[str], state: InlineState) ->
int:
+ """Parse older TeX-style display math."""
+ body = m.group("math_block_tex")
+ state.append_token({"type": "block_math", "raw": body})
+ return m.end()
+
+ def parse_block_math_latex(self, m: Match[str], state: InlineState) ->
int:
+ """Parse newer LaTeX-style display math."""
+ body = m.group("math_block_latex")
+ state.append_token({"type": "block_math", "raw": body})
+ return m.end()
+
+ def parse_inline_math_tex(self, m: Match[str], state: InlineState) ->
int:
+ """Parse older TeX-style inline math."""
+ body = m.group("math_inline_tex")
+ state.append_token({"type": "inline_math", "raw": body})
+ return m.end()
+
+ def parse_inline_math_latex(self, m: Match[str], state: InlineState)
-> int:
+ """Parse newer LaTeX-style inline math."""
+ body = m.group("math_inline_latex")
+ state.append_token({"type": "inline_math", "raw": body})
+ return m.end()
+
+ def parse_latex_environment(self, m: Match[str], state: InlineState)
-> int:
+ """Parse a latex environment."""
+ attrs = {"name": m.group("math_env_name"), "body":
m.group("math_env_body")}
+ state.append_token({"type": "latex_environment", "attrs": attrs})
+ return m.end()
+
+else: # Parsers for Mistune >= 2.0.0 < 3.0.0
+
+ class MathBlockParser(BlockParser): # type: ignore[no-redef]
+ """This acts as a pass-through to the MathInlineParser. It is needed in
+ order to avoid other block level rules splitting math sections apart.
+ """
- def __init__(self, renderer, block=None, inline=None, plugins=None):
- """Initialize the parser."""
- if block is None:
- block = MathBlockParser()
- if inline is None:
- inline = MathInlineParser(renderer, hard_wrap=False)
- if plugins is None:
- plugins = [
- # "abbr",
- # 'footnotes',
- "strikethrough",
- "table",
- "url",
- "task_lists",
- "def_list",
- ]
- _plugins = []
- for p in plugins:
- if isinstance(p, str):
- _plugins.append(PLUGINS[p])
- else:
- _plugins.append(p)
- plugins = _plugins
- super().__init__(renderer, block, inline, plugins)
+ MULTILINE_MATH = re.compile(
+ # Display math mode, old TeX delimiter: $$ \sqrt{2} $$
+ r"(?<!\\)[$]{2}.*?(?<!\\)[$]{2}|"
+ # Display math mode, new LaTeX delimiter: \[ \sqrt{2} \]
+ r"\\\\\[.*?\\\\\]|"
+ # LaTeX environment: \begin{equation} \sqrt{2} \end{equation}
+ r"\\begin\{([a-z]*\*?)\}.*?\\end\{\1\}",
+ re.DOTALL,
+ )
+
+ # Regex for header that doesn't require space after '#'
+ AXT_HEADING = re.compile(r" {0,3}(#{1,6})(?!#+)(?:
*\n+|([^\n]*?)(?:\n+|\s+?#+\s*\n+))")
+
+ # Multiline math must be searched before other rules
+ RULE_NAMES = ("multiline_math", *BlockParser.RULE_NAMES) # type:
ignore
+
+ def parse_multiline_math(self, m: Match[str], state: Any) -> Dict[str,
str]:
+ """Pass token through mutiline math."""
+ return {"type": "multiline_math", "text": m.group(0)}
+
+ class MathInlineParser(InlineParser): # type: ignore[no-redef]
+ r"""This interprets the content of LaTeX style math objects.
+
+ In particular this grabs ``$$...$$``, ``\\[...\\]``, ``\\(...\\)``,
``$...$``,
+ and ``\begin{foo}...\end{foo}`` styles for declaring mathematics. It
strips
+ delimiters from all these varieties, and extracts the type of
environment
+ in the last case (``foo`` in this example).
+ """
- def render(self, s):
- """Compatibility method with `mistune==0.8.4`."""
- return self.parse(s)
+ # Display math mode, using older TeX delimiter: $$ \pi $$
+ BLOCK_MATH_TEX = _dotall(r"(?<!\\)\$\$(.*?)(?<!\\)\$\$")
+ # Display math mode, using newer LaTeX delimiter: \[ \pi \]
+ BLOCK_MATH_LATEX = _dotall(r"(?<!\\)\\\\\[(.*?)(?<!\\)\\\\\]")
+ # Inline math mode, using older TeX delimiter: $ \pi $ (cannot be
empty!)
+ INLINE_MATH_TEX = _dotall(r"(?<![$\\])\$(.+?)(?<![$\\])\$")
+ # Inline math mode, using newer LaTeX delimiter: \( \pi \)
+ INLINE_MATH_LATEX = _dotall(r"(?<!\\)\\\\\((.*?)(?<!\\)\\\\\)")
+ # LaTeX math environment: \begin{equation} \pi \end{equation}
+ LATEX_ENVIRONMENT = _dotall(r"\\begin\{([a-z]*\*?)\}(.*?)\\end\{\1\}")
+
+ RULE_NAMES = (
+ "block_math_tex",
+ "block_math_latex",
+ "inline_math_tex",
+ "inline_math_latex",
+ "latex_environment",
+ *InlineParser.RULE_NAMES, # type: ignore
+ )
+
+ def parse_block_math_tex(self, m: Match[str], state: Any) ->
Tuple[str, str]:
+ """Parse block text math."""
+ # sometimes the Scanner keeps the final '$$', so we use the
+ # full matched string and remove the math markers
+ text = m.group(0)[2:-2]
+ return "block_math", text
+
+ def parse_block_math_latex(self, m: Match[str], state: Any) ->
Tuple[str, str]:
+ """Parse block latex math ."""
+ text = m.group(1)
+ return "block_math", text
+
+ def parse_inline_math_tex(self, m: Match[str], state: Any) ->
Tuple[str, str]:
+ """Parse inline tex math."""
+ text = m.group(1)
+ return "inline_math", text
+
+ def parse_inline_math_latex(self, m: Match[str], state: Any) ->
Tuple[str, str]:
+ """Parse inline latex math."""
+ text = m.group(1)
+ return "inline_math", text
+
+ def parse_latex_environment(self, m: Match[str], state: Any) ->
Tuple[str, str, str]:
+ """Parse a latex environment."""
+ name, text = m.group(1), m.group(2)
+ return "latex_environment", name, text
class IPythonRenderer(HTMLRenderer):
@@ -153,13 +270,13 @@
def __init__( # noqa
self,
- escape=True,
- allow_harmful_protocols=True,
- embed_images=False,
- exclude_anchor_links=False,
- anchor_link_text="¶",
- path="",
- attachments=None,
+ escape: bool = True,
+ allow_harmful_protocols: bool = True,
+ embed_images: bool = False,
+ exclude_anchor_links: bool = False,
+ anchor_link_text: str = "¶",
+ path: str = "",
+ attachments: Optional[Dict[str, Dict[str, str]]] = None,
):
"""Initialize the renderer."""
super().__init__(escape, allow_harmful_protocols)
@@ -172,75 +289,101 @@
else:
self.attachments = {}
- def block_code(self, code, info=None):
+ def block_code(self, code: str, info: Optional[str] = None) -> str:
"""Handle block code."""
- lang = ""
- lexer = None
+ lang: Optional[str] = ""
+ lexer: Optional[Lexer] = None
+
if info:
+ if info.startswith("mermaid"):
+ return self.block_mermaidjs(code)
+
try:
- lang = info.strip().split(None, 1)[0]
+ lang = info.strip().split(maxsplit=1)[0]
lexer = get_lexer_by_name(lang, stripall=True)
except ClassNotFound:
- code = lang + "\n" + code
- lang = None # type:ignore
+ code = f"{lang}\n{code}"
+ lang = None
if not lang:
- return super().block_code(code)
+ return super().block_code(code, info=info)
formatter = HtmlFormatter()
return highlight(code, lexer, formatter)
- def block_html(self, html):
+ def block_mermaidjs(self, code: str) -> str:
+ """Handle mermaid syntax."""
+ return (
+ """<div class="jp-Mermaid"><pre class="mermaid">\n"""
+ f"""{code.strip()}"""
+ """\n</pre></div>"""
+ )
+
+ def block_html(self, html: str) -> str:
"""Handle block html."""
if self.embed_images:
html = self._html_embed_images(html)
return super().block_html(html)
- def inline_html(self, html):
+ def inline_html(self, html: str) -> str:
"""Handle inline html."""
if self.embed_images:
html = self._html_embed_images(html)
return super().inline_html(html)
- def heading(self, text, level):
+ def heading(self, text: str, level: int, **attrs: Dict[str, Any]) -> str:
"""Handle a heading."""
- html = super().heading(text, level)
+ html = super().heading(text, level, **attrs)
if self.exclude_anchor_links:
return html
- return add_anchor(html, anchor_link_text=self.anchor_link_text)
+ return str(add_anchor(html, anchor_link_text=self.anchor_link_text))
- def escape_html(self, text):
+ def escape_html(self, text: str) -> str:
"""Escape html content."""
- return html_escape(text)
+ return escape(text, quote=False)
- def multiline_math(self, text):
- """Handle mulitline math."""
- return text
-
- def block_math(self, text):
+ def block_math(self, body: str) -> str:
"""Handle block math."""
- return f"$${self.escape_html(text)}$$"
+ return f"$${self.escape_html(body)}$$"
+
+ def multiline_math(self, text: str) -> str:
+ """Handle mulitline math for older mistune versions."""
+ return text
- def latex_environment(self, name, text):
+ def latex_environment(self, name: str, body: str) -> str:
"""Handle a latex environment."""
- name, text = self.escape_html(name), self.escape_html(text)
- return f"\\begin{{{name}}}{text}\\end{{{name}}}"
+ name, body = self.escape_html(name), self.escape_html(body)
+ return f"\\begin{{{name}}}{body}\\end{{{name}}}"
- def inline_math(self, text):
+ def inline_math(self, body: str) -> str:
"""Handle inline math."""
- return f"${self.escape_html(text)}$"
+ return f"${self.escape_html(body)}$"
- def image(self, src, text, title):
+ def image(self, text: str, url: str, title: Optional[str] = None) -> str:
"""Rendering a image with title and text.
- :param src: source link of the image.
:param text: alt text of the image.
+ :param url: source link of the image.
:param title: title text of the image.
+
+ :note: The parameters `text` and `url` are swapped in older versions
+ of mistune.
+ """
+ if MISTUNE_V3:
+ url = self._embed_image_or_attachment(url)
+ else: # for mistune v2, the first argument is the URL
+ text = self._embed_image_or_attachment(text)
+
+ return super().image(text, url, title)
+
+ def _embed_image_or_attachment(self, src: str) -> str:
+ """Embed an image or attachment, depending on the configuration.
+ If neither is possible, returns the original URL.
"""
- attachment_prefix = "attachment:"
+ attachment_prefix = "attachment:"
if src.startswith(attachment_prefix):
name = src[len(attachment_prefix) :]
@@ -250,25 +393,22 @@
attachment = self.attachments[name]
# we choose vector over raster, and lossless over lossy
- preferred_mime_types = ["image/svg+xml", "image/png", "image/jpeg"]
- for preferred_mime_type in preferred_mime_types:
- if preferred_mime_type in attachment:
- break
- else: # otherwise we choose the first mimetype we can find
- preferred_mime_type = list(attachment.keys())[0]
- mime_type = preferred_mime_type
- data = attachment[mime_type]
- src = "data:" + mime_type + ";base64," + data
+ preferred_mime_types = ("image/svg+xml", "image/png", "image/jpeg")
+ for mime_type in preferred_mime_types:
+ if mime_type in attachment:
+ return f"data:{mime_type};base64,{attachment[mime_type]}"
+ # otherwise we choose the first mimetype we can find
+ default_mime_type = tuple(attachment.keys())[0]
+ return
f"data:{default_mime_type};base64,{attachment[default_mime_type]}"
elif self.embed_images:
base64_url = self._src_to_base64(src)
-
if base64_url is not None:
- src = base64_url
+ return base64_url
- return super().image(src, text, title)
+ return src
- def _src_to_base64(self, src):
+ def _src_to_base64(self, src: str) -> Optional[str]:
"""Turn the source file into a base64 url.
:param src: source link of the file.
@@ -280,30 +420,72 @@
return None
with open(src_path, "rb") as fobj:
- mime_type = mimetypes.guess_type(src_path)[0]
+ mime_type, _ = mimetypes.guess_type(src_path)
base64_data = base64.b64encode(fobj.read())
base64_str = base64_data.replace(b"\n", b"").decode("ascii")
return f"data:{mime_type};base64,{base64_str}"
- def _html_embed_images(self, html):
+ def _html_embed_images(self, html: str) -> str:
parsed_html = bs4.BeautifulSoup(html, features="html.parser")
- imgs = parsed_html.find_all("img")
+ imgs: bs4.ResultSet[bs4.Tag] = parsed_html.find_all("img")
# Replace img tags's sources by base64 dataurls
for img in imgs:
- if "src" not in img.attrs:
+ src = img.attrs.get("src")
+ if src is None:
continue
base64_url = self._src_to_base64(img.attrs["src"])
-
if base64_url is not None:
img.attrs["src"] = base64_url
return str(parsed_html)
-def markdown2html_mistune(source):
+# Represents an already imported plugin for Mistune
+MarkdownPlugin = Callable[[Markdown], None]
+
+
+class MarkdownWithMath(Markdown):
+ """Markdown text with math enabled."""
+
+ DEFAULT_PLUGINS = (
+ # "abbr", (see https://github.com/jupyter/nbconvert/pull/1853)
+ # "footnotes",
+ "strikethrough",
+ "table",
+ "url",
+ "task_lists",
+ "def_list",
+ )
+
+ def __init__(
+ self,
+ renderer: HTMLRenderer,
+ block: Optional[BlockParser] = None,
+ inline: Optional[InlineParser] = None,
+ plugins: Optional[Iterable[MarkdownPlugin]] = None,
+ ):
+ """Initialize the parser."""
+ if block is None:
+ block = MathBlockParser()
+ if inline is None:
+ if MISTUNE_V3:
+ inline = MathInlineParser(hard_wrap=False)
+ else:
+ inline = MathInlineParser(renderer, hard_wrap=False) # type:
ignore
+ if plugins is None:
+ plugins = (import_plugin(p) for p in self.DEFAULT_PLUGINS)
+
+ super().__init__(renderer, block, inline, plugins)
+
+ def render(self, source: str) -> str:
+ """Render the HTML output for a Markdown source."""
+ return str(super().__call__(source))
+
+
+def markdown2html_mistune(source: str) -> str:
"""Convert a markdown string to HTML using mistune"""
return
MarkdownWithMath(renderer=IPythonRenderer(escape=False)).render(source)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore' old/nbconvert-7.4.0/nbconvert/filters/pandoc.py
new/nbconvert-7.6.0/nbconvert/filters/pandoc.py
--- old/nbconvert-7.4.0/nbconvert/filters/pandoc.py 2020-02-02
01:00:00.000000000 +0100
+++ new/nbconvert-7.6.0/nbconvert/filters/pandoc.py 2020-02-02
01:00:00.000000000 +0100
@@ -1,4 +1,12 @@
-"""Convert between any two formats using pandoc."""
+"""
+Convert between any two formats using pandoc,
+and related filters
+"""
+import os
+
+from pandocfilters import Image, applyJSONFilters # type:ignore
+
+from nbconvert.utils.base import NbConvertBase
from nbconvert.utils.pandoc import pandoc
@@ -23,3 +31,48 @@
Output as returned by pandoc.
"""
return pandoc(source, from_format, to_format, extra_args=extra_args)
+
+
+# When converting to pdf, explicitly relative references
+# like "./" and "../" doesn't work with TEXINPUTS.
+# So we need to convert them to absolute paths.
+# See https://github.com/jupyter/nbconvert/issues/1998
+class ConvertExplicitlyRelativePaths(NbConvertBase):
+ """A converter that handles relative path references."""
+
+ def __init__(self, texinputs=None, **kwargs):
+ """Initialize the converter."""
+ # texinputs should be the directory of the notebook file
+ self.nb_dir = os.path.abspath(texinputs) if texinputs else ""
+ self.ancestor_dirs = self.nb_dir.split("/")
+ super().__init__(**kwargs)
+
+ def __call__(self, source):
+ """Invoke the converter."""
+ # If this is not set for some reason, we can't do anything,
+ if self.nb_dir:
+ return applyJSONFilters([self.action], source)
+ return source
+
+ def action(self, key, value, frmt, meta):
+ """Perform the action."""
+ # Convert explicitly relative paths:
+ # ./path -> path (This should be visible to the latex engine since
TEXINPUTS already has .)
+ # ../path -> /abs_path
+ # assuming all relative references are at the start of a given path
+ if key == "Image":
+ # Image seems to have this composition, according to
https://github.com/jgm/pandoc-types
+ attr, caption, [filename, typedef] = value
+
+ if filename[:2] == "./":
+ filename = filename[2:]
+ elif filename[:3] == "../":
+ n_up = 0
+ while filename[:3] == "../":
+ n_up += 1
+ filename = filename[3:]
+ ancestors = "/".join(self.ancestor_dirs[:-n_up]) + "/"
+ filename = ancestors + filename
+ return Image(attr, caption, [filename, typedef])
+ # If not image, return "no change"
+ return None
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore'
old/nbconvert-7.4.0/nbconvert/filters/tests/test_markdown.py
new/nbconvert-7.6.0/nbconvert/filters/tests/test_markdown.py
--- old/nbconvert-7.4.0/nbconvert/filters/tests/test_markdown.py
2020-02-02 01:00:00.000000000 +0100
+++ new/nbconvert-7.6.0/nbconvert/filters/tests/test_markdown.py
2020-02-02 01:00:00.000000000 +0100
@@ -247,6 +247,19 @@
tokens[index],
)
+ def test_mermaid_markdown(self):
+ code = """flowchart LR
+ chicken --> egg --> chicken"""
+ case = f"""```mermaid\n {code}\n```"""
+
+ output_check = (
+ """<div class="jp-Mermaid"><pre class="mermaid">\n"""
+ f"""{code.strip()}"""
+ """\n</pre></div>"""
+ )
+
+ self._try_markdown(markdown2html, case, output_check)
+
def _try_markdown(self, method, test, tokens):
results = method(test)
if isinstance(tokens, (str,)):
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore'
old/nbconvert-7.4.0/nbconvert/filters/tests/test_pandoc.py
new/nbconvert-7.6.0/nbconvert/filters/tests/test_pandoc.py
--- old/nbconvert-7.4.0/nbconvert/filters/tests/test_pandoc.py 1970-01-01
01:00:00.000000000 +0100
+++ new/nbconvert-7.6.0/nbconvert/filters/tests/test_pandoc.py 2020-02-02
01:00:00.000000000 +0100
@@ -0,0 +1,59 @@
+"""
+Module with tests for Pandoc filters
+"""
+
+# Copyright (c) Jupyter Development Team.
+# Distributed under the terms of the Modified BSD License.
+
+import json
+
+from ...tests.base import TestsBase
+from ...tests.utils import onlyif_cmds_exist
+from ..pandoc import ConvertExplicitlyRelativePaths, convert_pandoc
+
+
+class TestPandocFilters(TestsBase):
+ @onlyif_cmds_exist("pandoc")
+ def test_convert_explicitly_relative_paths(self):
+ """
+ Do the image links in a markdown file located in dir get processed
correctly?
+ """
+ inp_dir = "/home/user/src"
+ fltr = ConvertExplicitlyRelativePaths(texinputs=inp_dir)
+
+ # pairs of input, expected
+ tests = {
+ # TEXINPUTS is enough, abs_path not needed
+ "im.png": "im.png",
+ "./im.png": "im.png",
+ "./images/im.png": "images/im.png",
+ # TEXINPUTS is not enough, abs_path needed
+ "../im.png": "/home/user/im.png",
+ "../images/im.png": "/home/user/images/im.png",
+ "../../images/im.png": "/home/images/im.png",
+ }
+
+ # this shouldn't be modified by the filter
+ # since it is a code block inside markdown,
+ # not an image link itself
+ fake = """
+ ```
+ \\includegraphics{../fake.png}
+ ```
+ """
+
+ # convert to markdown image
+ def foo(filename):
+ return f""
+
+ # create input markdown and convert to pandoc json
+ inp = convert_pandoc(fake + "\n".join(map(foo, tests.keys())),
"markdown", "json")
+ expected = convert_pandoc(fake + "\n".join(map(foo, tests.values())),
"markdown", "json")
+ # Do this to fix string formatting
+ expected = json.dumps(json.loads(expected))
+ self.assertEqual(expected, fltr(inp))
+
+ def test_convert_explicitly_relative_paths_no_texinputs(self):
+ # no texinputs should lead to just returning
+ fltr = ConvertExplicitlyRelativePaths(texinputs="")
+ self.assertEqual("test", fltr("test"))
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore' old/nbconvert-7.4.0/nbconvert/utils/pandoc.py
new/nbconvert-7.6.0/nbconvert/utils/pandoc.py
--- old/nbconvert-7.4.0/nbconvert/utils/pandoc.py 2020-02-02
01:00:00.000000000 +0100
+++ new/nbconvert-7.6.0/nbconvert/utils/pandoc.py 2020-02-02
01:00:00.000000000 +0100
@@ -13,8 +13,8 @@
from .exceptions import ConversionException
-_minimal_version = "1.12.1"
-_maximal_version = "3.0.0"
+_minimal_version = "2.14.2"
+_maximal_version = "4.0.0"
def pandoc(source, fmt, to, extra_args=None, encoding="utf-8"):
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore' old/nbconvert-7.4.0/pyproject.toml
new/nbconvert-7.6.0/pyproject.toml
--- old/nbconvert-7.4.0/pyproject.toml 2020-02-02 01:00:00.000000000 +0100
+++ new/nbconvert-7.6.0/pyproject.toml 2020-02-02 01:00:00.000000000 +0100
@@ -21,21 +21,21 @@
requires-python = ">=3.7"
dependencies = [
"beautifulsoup4",
- "bleach",
+ "bleach!=5.0.0",
"defusedxml",
"importlib_metadata>=3.6;python_version<\"3.10\"",
"jinja2>=3.0",
"jupyter_core>=4.7",
"jupyterlab_pygments",
"MarkupSafe>=2.0",
- "mistune>=2.0.3,<3",
+ "mistune>=2.0.3,<4",
"nbclient>=0.5.0",
- "nbformat>=5.1",
+ "nbformat>=5.7",
"packaging",
"pandocfilters>=1.4.1",
"pygments>=2.4.1",
"tinycss2",
- "traitlets>=5.0",
+ "traitlets>=5.1",
]
dynamic = ["version"]
@@ -129,7 +129,7 @@
"black[jupyter]==23.3.0",
"mdformat>0.7",
"mdformat-gfm>=0.3.5",
- "ruff==0.0.263"
+ "ruff==0.0.270"
]
detached = true
[tool.hatch.envs.lint.scripts]
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore'
old/nbconvert-7.4.0/share/templates/classic/index.html.j2
new/nbconvert-7.6.0/share/templates/classic/index.html.j2
--- old/nbconvert-7.4.0/share/templates/classic/index.html.j2 2020-02-02
01:00:00.000000000 +0100
+++ new/nbconvert-7.6.0/share/templates/classic/index.html.j2 2020-02-02
01:00:00.000000000 +0100
@@ -19,6 +19,12 @@
{%- block html_head_js_requirejs -%}
<script src="{{ resources.require_js_url }}"></script>
{%- endblock html_head_js_requirejs -%}
+{%- block html_head_js_mermaidjs -%}
+<script type="module">
+ import mermaid from '{{ resources.mermaid_js_url }}';
+ mermaid.initialize({ startOnLoad: true });
+</script>
+{%- endblock html_head_js_mermaidjs -%}
{%- endblock html_head_js -%}
{% block jupyter_widgets %}
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore' old/nbconvert-7.4.0/share/templates/lab/index.html.j2
new/nbconvert-7.6.0/share/templates/lab/index.html.j2
--- old/nbconvert-7.4.0/share/templates/lab/index.html.j2 2020-02-02
01:00:00.000000000 +0100
+++ new/nbconvert-7.6.0/share/templates/lab/index.html.j2 2020-02-02
01:00:00.000000000 +0100
@@ -1,5 +1,6 @@
{%- extends 'base.html.j2' -%}
{% from 'mathjax.html.j2' import mathjax %}
+{% from 'mermaidjs.html.j2' import mermaid_js %}
{% from 'jupyter_widgets.html.j2' import jupyter_widgets %}
{%- block header -%}
@@ -149,6 +150,10 @@
{{ mathjax(resources.mathjax_url) }}
{%- endblock html_head_js_mathjax -%}
+{%- block html_head_js_mermaidjs -%}
+{{ mermaid_js(resources.mermaid_js_url) }}
+{%- endblock html_head_js_mermaidjs -%}
+
{%- block html_head_css -%}
{%- endblock html_head_css -%}
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore'
old/nbconvert-7.4.0/share/templates/lab/mermaidjs.html.j2
new/nbconvert-7.6.0/share/templates/lab/mermaidjs.html.j2
--- old/nbconvert-7.4.0/share/templates/lab/mermaidjs.html.j2 1970-01-01
01:00:00.000000000 +0100
+++ new/nbconvert-7.6.0/share/templates/lab/mermaidjs.html.j2 2020-02-02
01:00:00.000000000 +0100
@@ -0,0 +1,114 @@
+{%- macro mermaid_js(
+url="https://cdnjs.cloudflare.com/ajax/libs/mermaid/10.0.2/mermaid.esm.min.mjs"
+) -%}
+<script type="module">
+ document.addEventListener("DOMContentLoaded", async () => {
+ const diagrams = document.querySelectorAll(".jp-Mermaid > pre.mermaid");
+ // do not load mermaidjs if not needed
+ if (!diagrams.length) {
+ return;
+ }
+ const mermaid = (await import("{{ url }}")).default;
+
+ mermaid.initialize({
+ maxTextSize: 100000,
+ startOnLoad: false,
+ fontFamily: window
+ .getComputedStyle(document.body)
+ .getPropertyValue("--jp-ui-font-family"),
+ theme: document.querySelector("body[data-jp-theme-light='true']")
+ ? "default"
+ : "dark",
+ });
+
+ let _nextMermaidId = 0;
+
+ function makeMermaidImage(svg) {
+ const img = document.createElement('img');
+ const maxWidth = svg.match(/max-width: (\d+)/);
+ if (maxWidth && maxWidth[1]) {
+ const width = parseInt(maxWidth[1]);
+ if (width && !Number.isNaN(width) && Number.isFinite(width)) {
+ img.width = width;
+ }
+ }
+ img.setAttribute('src', `data:image/svg+xml,${encodeURIComponent(svg)}`);
+ return img;
+ }
+
+ async function makeMermaidError(text) {
+ let errorMessage = '';
+ try {
+ await mermaid.parse(text);
+ } catch (err) {
+ errorMessage = `${err}`;
+ }
+
+ const result = document.createElement('details');
+ const summary = document.createElement('summary');
+ const pre = document.createElement('pre');
+ const code = document.createElement('code');
+ code.innerText = text;
+ pre.appendChild(code);
+ summary.appendChild(pre);
+ result.appendChild(summary);
+
+ const warning = document.createElement('pre');
+ warning.innerText = errorMessage;
+ result.appendChild(warning);
+ return result;
+ }
+
+ async function renderOneMarmaid(src) {
+ const id = `jp-mermaid-${_nextMermaidId++}`;
+ const parent = src.parentNode;
+ let raw = src.textContent.trim();
+ const el = document.createElement("div");
+ el.style.visibility = "hidden";
+ document.body.appendChild(el);
+ let result = null;
+ try {
+ const { svg } = await mermaid.render(id, raw, el);
+ result = makeMermaidImage(svg);
+ } catch (err) {
+ parent.classList.add("jp-mod-warning");
+ result = await makeMermaidError(raw);
+ } finally {
+ el.remove();
+ }
+ parent.classList.add("jp-RenderedMermaid");
+ parent.appendChild(result);
+ }
+
+ void Promise.all([...diagrams].map(renderOneMarmaid));
+ });
+</script>
+<style>
+ .jp-RenderedMarkdown .jp-Mermaid:not(.jp-RenderedMermaid) {
+ display: none;
+ }
+ .jp-RenderedMarkdown .jp-RenderedMermaid.jp-mod-warning {
+ width: auto;
+ padding: 10px;
+ border: var(--jp-border-width) solid var(--jp-warn-color2);
+ border-radius: var(--jp-border-radius);
+ color: var(--jp-ui-font-color1);
+ font-size: var(--jp-ui-font-size1);
+ white-space: pre-wrap;
+ word-wrap: break-word;
+ }
+ .jp-RenderedMarkdown .jp-RenderedMermaid.jp-mod-warning details > pre {
+ margin-top: 1em;
+ }
+ .jp-RenderedMarkdown .jp-RenderedMermaid.jp-mod-warning summary {
+ color: var(--jp-warn-color2);
+ }
+ .jp-RenderedMarkdown .jp-RenderedMermaid.jp-mod-warning summary > pre {
+ display: inline-block;
+ }
+ .jp-RenderedMermaid > .mermaid {
+ display: none;
+ }
+</style>
+<!-- End of mermaid configuration -->
+{%- endmacro %}
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore' old/nbconvert-7.4.0/share/templates/latex/base.tex.j2
new/nbconvert-7.6.0/share/templates/latex/base.tex.j2
--- old/nbconvert-7.4.0/share/templates/latex/base.tex.j2 2020-02-02
01:00:00.000000000 +0100
+++ new/nbconvert-7.6.0/share/templates/latex/base.tex.j2 2020-02-02
01:00:00.000000000 +0100
@@ -89,6 +89,7 @@
\usepackage[inline]{enumitem} % IRkernel/repr support (it uses the
enumerate* environment)
\usepackage[normalem]{ulem} % ulem is needed to support strikethroughs
(\sout)
% normalem makes italics be italics, not
underlines
+ \usepackage{soul} % strikethrough (\st) support for pandoc >= 3.0.0
\usepackage{mathrsfs}
((* endblock packages *))
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore'
old/nbconvert-7.4.0/share/templates/latex/document_contents.tex.j2
new/nbconvert-7.6.0/share/templates/latex/document_contents.tex.j2
--- old/nbconvert-7.4.0/share/templates/latex/document_contents.tex.j2
2020-02-02 01:00:00.000000000 +0100
+++ new/nbconvert-7.6.0/share/templates/latex/document_contents.tex.j2
2020-02-02 01:00:00.000000000 +0100
@@ -65,7 +65,7 @@
% Render markdown
((* block markdowncell scoped *))
- ((( cell.source | citation2latex | strip_files_prefix |
convert_pandoc('markdown+tex_math_double_backslash', 'json',extra_args=[]) |
resolve_references | convert_pandoc('json','latex'))))
+ ((( cell.source | citation2latex | strip_files_prefix |
convert_pandoc('markdown+tex_math_double_backslash', 'json',extra_args=[]) |
resolve_references | convert_explicitly_relative_paths |
convert_pandoc('json','latex'))))
((* endblock markdowncell *))
% Don't display unknown types
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn'
'--exclude=.svnignore' old/nbconvert-7.4.0/share/templates/reveal/index.html.j2
new/nbconvert-7.6.0/share/templates/reveal/index.html.j2
--- old/nbconvert-7.4.0/share/templates/reveal/index.html.j2 2020-02-02
01:00:00.000000000 +0100
+++ new/nbconvert-7.6.0/share/templates/reveal/index.html.j2 2020-02-02
01:00:00.000000000 +0100
@@ -30,6 +30,12 @@
{%- block html_head_js_requirejs -%}
<script src="{{ resources.require_js_url }}"></script>
{%- endblock html_head_js_requirejs -%}
+{%- block html_head_js_mermaidjs -%}
+<script type="module">
+ import mermaid from '{{ resources.mermaid_js_url }}';
+ mermaid.initialize({ startOnLoad: true });
+</script>
+{%- endblock html_head_js_mermaidjs -%}
{%- endblock html_head_js -%}
{% block jupyter_widgets %}