diff --git a/go.mod b/go.mod
index 2f81c9b16a..5d22b82745 100644
--- a/go.mod
+++ b/go.mod
@@ -79,11 +79,9 @@ require (
 	github.com/prometheus/procfs v0.0.4 // indirect
 	github.com/quasoft/websspi v1.0.0
 	github.com/remyoudompheng/bigfft v0.0.0-20190321074620-2f0d2b0e0001 // indirect
-	github.com/russross/blackfriday/v2 v2.0.1
 	github.com/satori/go.uuid v1.2.0
 	github.com/sergi/go-diff v1.0.0
 	github.com/shurcooL/httpfs v0.0.0-20190527155220-6a4d4a70508b // indirect
-	github.com/shurcooL/sanitized_anchor_name v1.0.0 // indirect
 	github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd
 	github.com/steveyen/gtreap v0.0.0-20150807155958-0abe01ef9be2 // indirect
 	github.com/stretchr/testify v1.4.0
@@ -95,6 +93,7 @@ require (
 	github.com/unknwon/paginater v0.0.0-20151104151617-7748a72e0141
 	github.com/urfave/cli v1.20.0
 	github.com/yohcop/openid-go v0.0.0-20160914080427-2c050d2dae53
+	github.com/yuin/goldmark v1.1.19
 	go.etcd.io/bbolt v1.3.3 // indirect
 	golang.org/x/crypto v0.0.0-20191227163750-53104e6ec876
 	golang.org/x/net v0.0.0-20191101175033-0deb6923b6d9
diff --git a/go.sum b/go.sum
index a6f65167f5..247630d47d 100644
--- a/go.sum
+++ b/go.sum
@@ -462,16 +462,12 @@ github.com/remyoudompheng/bigfft v0.0.0-20190321074620-2f0d2b0e0001/go.mod h1:qq
 github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
 github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo=
 github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
-github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q=
-github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
 github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww=
 github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
 github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ=
 github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
 github.com/shurcooL/httpfs v0.0.0-20190527155220-6a4d4a70508b h1:4kg1wyftSKxLtnPAvcRWakIPpokB9w780/KwrNLnfPA=
 github.com/shurcooL/httpfs v0.0.0-20190527155220-6a4d4a70508b/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg=
-github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=
-github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
 github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd h1:ug7PpSOB5RBPK1Kg6qskGBoP3Vnj/aNYFTznWvlkGo0=
 github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw=
 github.com/siddontang/go v0.0.0-20180604090527-bdc77568d726/go.mod h1:3yhqj7WBBfRhbBlzyOC3gUxftwsU0u8gqevxwIHQpMw=
@@ -550,6 +546,8 @@ github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q
 github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
 github.com/yohcop/openid-go v0.0.0-20160914080427-2c050d2dae53 h1:HsIQ6yAjfjQ3IxPGrTusxp6Qxn92gNVq2x5CbvQvx3w=
 github.com/yohcop/openid-go v0.0.0-20160914080427-2c050d2dae53/go.mod h1:f6elajwZV+xceiaqgRL090YzLEDGSbqr3poGL3ZgXYo=
+github.com/yuin/goldmark v1.1.19 h1:0s2/60x0XsFCXHeFut+F3azDVAAyIMyUfJRbRexiTYs=
+github.com/yuin/goldmark v1.1.19/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
 github.com/ziutek/mymysql v1.5.4 h1:GB0qdRGsTwQSBVYuVShFBKaXSnSnYYC2d9knnE1LHFs=
 github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0=
 go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
diff --git a/modules/markup/common/footnote.go b/modules/markup/common/footnote.go
new file mode 100644
index 0000000000..ad4cd7f2e1
--- /dev/null
+++ b/modules/markup/common/footnote.go
@@ -0,0 +1,507 @@
+// Copyright 2019 Yusuke Inuzuka
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// Use of this source code is governed by a MIT-style
+// license that can be found in the LICENSE file.
+
+// Most of what follows is a subtly changed version of github.com/yuin/goldmark/extension/footnote.go
+
+package common
+
+import (
+	"bytes"
+	"fmt"
+	"os"
+	"strconv"
+	"unicode"
+
+	"github.com/yuin/goldmark"
+	"github.com/yuin/goldmark/ast"
+	"github.com/yuin/goldmark/parser"
+	"github.com/yuin/goldmark/renderer"
+	"github.com/yuin/goldmark/renderer/html"
+	"github.com/yuin/goldmark/text"
+	"github.com/yuin/goldmark/util"
+)
+
+// CleanValue will clean a value to make it safe to be an id
+// This function is quite different from the original goldmark function
+// and more closely matches the output from the shurcooL sanitizer
+// In particular Unicode letters and numbers are a lot more than a-zA-Z0-9...
+func CleanValue(value []byte) []byte {
+	value = bytes.TrimSpace(value)
+	rs := bytes.Runes(value)
+	result := make([]rune, 0, len(rs))
+	needsDash := false
+	for _, r := range rs {
+		switch {
+		case unicode.IsLetter(r) || unicode.IsNumber(r):
+			if needsDash && len(result) > 0 {
+				result = append(result, '-')
+			}
+			needsDash = false
+			result = append(result, unicode.ToLower(r))
+		default:
+			needsDash = true
+		}
+	}
+	return []byte(string(result))
+}
+
+// Most of what follows is a subtly changed version of github.com/yuin/goldmark/extension/footnote.go
+
+// A FootnoteLink struct represents a link to a footnote of Markdown
+// (PHP Markdown Extra) text.
+type FootnoteLink struct {
+	ast.BaseInline
+	Index int
+	Name  []byte
+}
+
+// Dump implements Node.Dump.
+func (n *FootnoteLink) Dump(source []byte, level int) {
+	m := map[string]string{}
+	m["Index"] = fmt.Sprintf("%v", n.Index)
+	m["Name"] = fmt.Sprintf("%v", n.Name)
+	ast.DumpHelper(n, source, level, m, nil)
+}
+
+// KindFootnoteLink is a NodeKind of the FootnoteLink node.
+var KindFootnoteLink = ast.NewNodeKind("GiteaFootnoteLink")
+
+// Kind implements Node.Kind.
+func (n *FootnoteLink) Kind() ast.NodeKind {
+	return KindFootnoteLink
+}
+
+// NewFootnoteLink returns a new FootnoteLink node.
+func NewFootnoteLink(index int, name []byte) *FootnoteLink {
+	return &FootnoteLink{
+		Index: index,
+		Name:  name,
+	}
+}
+
+// A FootnoteBackLink struct represents a link to a footnote of Markdown
+// (PHP Markdown Extra) text.
+type FootnoteBackLink struct {
+	ast.BaseInline
+	Index int
+	Name  []byte
+}
+
+// Dump implements Node.Dump.
+func (n *FootnoteBackLink) Dump(source []byte, level int) {
+	m := map[string]string{}
+	m["Index"] = fmt.Sprintf("%v", n.Index)
+	m["Name"] = fmt.Sprintf("%v", n.Name)
+	ast.DumpHelper(n, source, level, m, nil)
+}
+
+// KindFootnoteBackLink is a NodeKind of the FootnoteBackLink node.
+var KindFootnoteBackLink = ast.NewNodeKind("GiteaFootnoteBackLink")
+
+// Kind implements Node.Kind.
+func (n *FootnoteBackLink) Kind() ast.NodeKind {
+	return KindFootnoteBackLink
+}
+
+// NewFootnoteBackLink returns a new FootnoteBackLink node.
+func NewFootnoteBackLink(index int, name []byte) *FootnoteBackLink {
+	return &FootnoteBackLink{
+		Index: index,
+		Name:  name,
+	}
+}
+
+// A Footnote struct represents a footnote of Markdown
+// (PHP Markdown Extra) text.
+type Footnote struct {
+	ast.BaseBlock
+	Ref   []byte
+	Index int
+	Name  []byte
+}
+
+// Dump implements Node.Dump.
+func (n *Footnote) Dump(source []byte, level int) {
+	m := map[string]string{}
+	m["Index"] = fmt.Sprintf("%v", n.Index)
+	m["Ref"] = fmt.Sprintf("%s", n.Ref)
+	m["Name"] = fmt.Sprintf("%v", n.Name)
+	ast.DumpHelper(n, source, level, m, nil)
+}
+
+// KindFootnote is a NodeKind of the Footnote node.
+var KindFootnote = ast.NewNodeKind("GiteaFootnote")
+
+// Kind implements Node.Kind.
+func (n *Footnote) Kind() ast.NodeKind {
+	return KindFootnote
+}
+
+// NewFootnote returns a new Footnote node.
+func NewFootnote(ref []byte) *Footnote {
+	return &Footnote{
+		Ref:   ref,
+		Index: -1,
+		Name:  ref,
+	}
+}
+
+// A FootnoteList struct represents footnotes of Markdown
+// (PHP Markdown Extra) text.
+type FootnoteList struct {
+	ast.BaseBlock
+	Count int
+}
+
+// Dump implements Node.Dump.
+func (n *FootnoteList) Dump(source []byte, level int) {
+	m := map[string]string{}
+	m["Count"] = fmt.Sprintf("%v", n.Count)
+	ast.DumpHelper(n, source, level, m, nil)
+}
+
+// KindFootnoteList is a NodeKind of the FootnoteList node.
+var KindFootnoteList = ast.NewNodeKind("GiteaFootnoteList")
+
+// Kind implements Node.Kind.
+func (n *FootnoteList) Kind() ast.NodeKind {
+	return KindFootnoteList
+}
+
+// NewFootnoteList returns a new FootnoteList node.
+func NewFootnoteList() *FootnoteList {
+	return &FootnoteList{
+		Count: 0,
+	}
+}
+
+var footnoteListKey = parser.NewContextKey()
+
+type footnoteBlockParser struct {
+}
+
+var defaultFootnoteBlockParser = &footnoteBlockParser{}
+
+// NewFootnoteBlockParser returns a new parser.BlockParser that can parse
+// footnotes of the Markdown(PHP Markdown Extra) text.
+func NewFootnoteBlockParser() parser.BlockParser {
+	return defaultFootnoteBlockParser
+}
+
+func (b *footnoteBlockParser) Trigger() []byte {
+	return []byte{'['}
+}
+
+func (b *footnoteBlockParser) Open(parent ast.Node, reader text.Reader, pc parser.Context) (ast.Node, parser.State) {
+	line, segment := reader.PeekLine()
+	pos := pc.BlockOffset()
+	if pos < 0 || line[pos] != '[' {
+		return nil, parser.NoChildren
+	}
+	pos++
+	if pos > len(line)-1 || line[pos] != '^' {
+		return nil, parser.NoChildren
+	}
+	open := pos + 1
+	closes := 0
+	closure := util.FindClosure(line[pos+1:], '[', ']', false, false)
+	closes = pos + 1 + closure
+	next := closes + 1
+	if closure > -1 {
+		if next >= len(line) || line[next] != ':' {
+			return nil, parser.NoChildren
+		}
+	} else {
+		return nil, parser.NoChildren
+	}
+	padding := segment.Padding
+	label := reader.Value(text.NewSegment(segment.Start+open-padding, segment.Start+closes-padding))
+	if util.IsBlank(label) {
+		return nil, parser.NoChildren
+	}
+	item := NewFootnote(label)
+
+	pos = next + 1 - padding
+	if pos >= len(line) {
+		reader.Advance(pos)
+		return item, parser.NoChildren
+	}
+	reader.AdvanceAndSetPadding(pos, padding)
+	return item, parser.HasChildren
+}
+
+func (b *footnoteBlockParser) Continue(node ast.Node, reader text.Reader, pc parser.Context) parser.State {
+	line, _ := reader.PeekLine()
+	if util.IsBlank(line) {
+		return parser.Continue | parser.HasChildren
+	}
+	childpos, padding := util.IndentPosition(line, reader.LineOffset(), 4)
+	if childpos < 0 {
+		return parser.Close
+	}
+	reader.AdvanceAndSetPadding(childpos, padding)
+	return parser.Continue | parser.HasChildren
+}
+
+func (b *footnoteBlockParser) Close(node ast.Node, reader text.Reader, pc parser.Context) {
+	var list *FootnoteList
+	if tlist := pc.Get(footnoteListKey); tlist != nil {
+		list = tlist.(*FootnoteList)
+	} else {
+		list = NewFootnoteList()
+		pc.Set(footnoteListKey, list)
+		node.Parent().InsertBefore(node.Parent(), node, list)
+	}
+	node.Parent().RemoveChild(node.Parent(), node)
+	list.AppendChild(list, node)
+}
+
+func (b *footnoteBlockParser) CanInterruptParagraph() bool {
+	return true
+}
+
+func (b *footnoteBlockParser) CanAcceptIndentedLine() bool {
+	return false
+}
+
+type footnoteParser struct {
+}
+
+var defaultFootnoteParser = &footnoteParser{}
+
+// NewFootnoteParser returns a new parser.InlineParser that can parse
+// footnote links of the Markdown(PHP Markdown Extra) text.
+func NewFootnoteParser() parser.InlineParser {
+	return defaultFootnoteParser
+}
+
+func (s *footnoteParser) Trigger() []byte {
+	// footnote syntax probably conflict with the image syntax.
+	// So we need trigger this parser with '!'.
+	return []byte{'!', '['}
+}
+
+func (s *footnoteParser) Parse(parent ast.Node, block text.Reader, pc parser.Context) ast.Node {
+	line, segment := block.PeekLine()
+	pos := 1
+	if len(line) > 0 && line[0] == '!' {
+		pos++
+	}
+	if pos >= len(line) || line[pos] != '^' {
+		return nil
+	}
+	pos++
+	if pos >= len(line) {
+		return nil
+	}
+	open := pos
+	closure := util.FindClosure(line[pos:], '[', ']', false, false)
+	if closure < 0 {
+		return nil
+	}
+	closes := pos + closure
+	value := block.Value(text.NewSegment(segment.Start+open, segment.Start+closes))
+	block.Advance(closes + 1)
+
+	var list *FootnoteList
+	if tlist := pc.Get(footnoteListKey); tlist != nil {
+		list = tlist.(*FootnoteList)
+	}
+	if list == nil {
+		return nil
+	}
+	index := 0
+	name := []byte{}
+	for def := list.FirstChild(); def != nil; def = def.NextSibling() {
+		d := def.(*Footnote)
+		if bytes.Equal(d.Ref, value) {
+			if d.Index < 0 {
+				list.Count++
+				d.Index = list.Count
+				val := CleanValue(d.Name)
+				if len(val) == 0 {
+					val = []byte(strconv.Itoa(d.Index))
+				}
+				d.Name = pc.IDs().Generate(val, KindFootnote)
+			}
+			index = d.Index
+			name = d.Name
+			break
+		}
+	}
+	if index == 0 {
+		return nil
+	}
+
+	return NewFootnoteLink(index, name)
+}
+
+type footnoteASTTransformer struct {
+}
+
+var defaultFootnoteASTTransformer = &footnoteASTTransformer{}
+
+// NewFootnoteASTTransformer returns a new parser.ASTTransformer that
+// insert a footnote list to the last of the document.
+func NewFootnoteASTTransformer() parser.ASTTransformer {
+	return defaultFootnoteASTTransformer
+}
+
+func (a *footnoteASTTransformer) Transform(node *ast.Document, reader text.Reader, pc parser.Context) {
+	var list *FootnoteList
+	if tlist := pc.Get(footnoteListKey); tlist != nil {
+		list = tlist.(*FootnoteList)
+	} else {
+		return
+	}
+	pc.Set(footnoteListKey, nil)
+	for footnote := list.FirstChild(); footnote != nil; {
+		var container ast.Node = footnote
+		next := footnote.NextSibling()
+		if fc := container.LastChild(); fc != nil && ast.IsParagraph(fc) {
+			container = fc
+		}
+		footnoteNode := footnote.(*Footnote)
+		index := footnoteNode.Index
+		name := footnoteNode.Name
+		if index < 0 {
+			list.RemoveChild(list, footnote)
+		} else {
+			container.AppendChild(container, NewFootnoteBackLink(index, name))
+		}
+		footnote = next
+	}
+	list.SortChildren(func(n1, n2 ast.Node) int {
+		if n1.(*Footnote).Index < n2.(*Footnote).Index {
+			return -1
+		}
+		return 1
+	})
+	if list.Count <= 0 {
+		list.Parent().RemoveChild(list.Parent(), list)
+		return
+	}
+
+	node.AppendChild(node, list)
+}
+
+// FootnoteHTMLRenderer is a renderer.NodeRenderer implementation that
+// renders FootnoteLink nodes.
+type FootnoteHTMLRenderer struct {
+	html.Config
+}
+
+// NewFootnoteHTMLRenderer returns a new FootnoteHTMLRenderer.
+func NewFootnoteHTMLRenderer(opts ...html.Option) renderer.NodeRenderer {
+	r := &FootnoteHTMLRenderer{
+		Config: html.NewConfig(),
+	}
+	for _, opt := range opts {
+		opt.SetHTMLOption(&r.Config)
+	}
+	return r
+}
+
+// RegisterFuncs implements renderer.NodeRenderer.RegisterFuncs.
+func (r *FootnoteHTMLRenderer) RegisterFuncs(reg renderer.NodeRendererFuncRegisterer) {
+	reg.Register(KindFootnoteLink, r.renderFootnoteLink)
+	reg.Register(KindFootnoteBackLink, r.renderFootnoteBackLink)
+	reg.Register(KindFootnote, r.renderFootnote)
+	reg.Register(KindFootnoteList, r.renderFootnoteList)
+}
+
+func (r *FootnoteHTMLRenderer) renderFootnoteLink(w util.BufWriter, source []byte, node ast.Node, entering bool) (ast.WalkStatus, error) {
+	if entering {
+		n := node.(*FootnoteLink)
+		n.Dump(source, 0)
+		is := strconv.Itoa(n.Index)
+		_, _ = w.WriteString(``)
+	}
+	return ast.WalkContinue, nil
+}
+
+func (r *FootnoteHTMLRenderer) renderFootnoteBackLink(w util.BufWriter, source []byte, node ast.Node, entering bool) (ast.WalkStatus, error) {
+	if entering {
+		n := node.(*FootnoteBackLink)
+		fmt.Fprintf(os.Stdout, "source:\n%s\n", string(n.Text(source)))
+		_, _ = w.WriteString(` `)
+	}
+	return ast.WalkContinue, nil
+}
+
+func (r *FootnoteHTMLRenderer) renderFootnote(w util.BufWriter, source []byte, node ast.Node, entering bool) (ast.WalkStatus, error) {
+	n := node.(*Footnote)
+	if entering {
+		fmt.Fprintf(os.Stdout, "source:\n%s\n", string(n.Text(source)))
+		_, _ = w.WriteString(`
\n")
+	} else {
+		_, _ = w.WriteString("\n")
+	}
+	return ast.WalkContinue, nil
+}
+
+func (r *FootnoteHTMLRenderer) renderFootnoteList(w util.BufWriter, source []byte, node ast.Node, entering bool) (ast.WalkStatus, error) {
+	tag := "div"
+	if entering {
+		_, _ = w.WriteString("<")
+		_, _ = w.WriteString(tag)
+		_, _ = w.WriteString(` class="footnotes" role="doc-endnotes"`)
+		if node.Attributes() != nil {
+			html.RenderAttributes(w, node, html.GlobalAttributeFilter)
+		}
+		_ = w.WriteByte('>')
+		if r.Config.XHTML {
+			_, _ = w.WriteString("\n
\n")
+		} else {
+			_, _ = w.WriteString("\n
\n")
+		}
+		_, _ = w.WriteString("\n")
+	} else {
+		_, _ = w.WriteString("
\n")
+		_, _ = w.WriteString("")
+		_, _ = w.WriteString(tag)
+		_, _ = w.WriteString(">\n")
+	}
+	return ast.WalkContinue, nil
+}
+
+type footnoteExtension struct{}
+
+// FootnoteExtension represents the Gitea Footnote
+var FootnoteExtension = &footnoteExtension{}
+
+// Extend extends the markdown converter with the Gitea Footnote parser
+func (e *footnoteExtension) Extend(m goldmark.Markdown) {
+	m.Parser().AddOptions(
+		parser.WithBlockParsers(
+			util.Prioritized(NewFootnoteBlockParser(), 999),
+		),
+		parser.WithInlineParsers(
+			util.Prioritized(NewFootnoteParser(), 101),
+		),
+		parser.WithASTTransformers(
+			util.Prioritized(NewFootnoteASTTransformer(), 999),
+		),
+	)
+	m.Renderer().AddOptions(renderer.WithNodeRenderers(
+		util.Prioritized(NewFootnoteHTMLRenderer(), 500),
+	))
+}
diff --git a/modules/markup/common/html.go b/modules/markup/common/html.go
new file mode 100644
index 0000000000..3a47686f1e
--- /dev/null
+++ b/modules/markup/common/html.go
@@ -0,0 +1,19 @@
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// Use of this source code is governed by a MIT-style
+// license that can be found in the LICENSE file.
+
+package common
+
+import (
+	"mvdan.cc/xurls/v2"
+)
+
+var (
+	// NOTE: All below regex matching do not perform any extra validation.
+	// Thus a link is produced even if the linked entity does not exist.
+	// While fast, this is also incorrect and lead to false positives.
+	// TODO: fix invalid linking issue
+
+	// LinkRegex is a regexp matching a valid link
+	LinkRegex, _ = xurls.StrictMatchingScheme("https?://")
+)
diff --git a/modules/markup/common/linkify.go b/modules/markup/common/linkify.go
new file mode 100644
index 0000000000..6ae70fba34
--- /dev/null
+++ b/modules/markup/common/linkify.go
@@ -0,0 +1,156 @@
+// Copyright 2019 Yusuke Inuzuka
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// Use of this source code is governed by a MIT-style
+// license that can be found in the LICENSE file.
+
+// Most of this file is a subtly changed version of github.com/yuin/goldmark/extension/linkify.go
+
+package common
+
+import (
+	"bytes"
+	"regexp"
+
+	"github.com/yuin/goldmark"
+	"github.com/yuin/goldmark/ast"
+	"github.com/yuin/goldmark/parser"
+	"github.com/yuin/goldmark/text"
+	"github.com/yuin/goldmark/util"
+)
+
+var wwwURLRegxp = regexp.MustCompile(`^www\.[-a-zA-Z0-9@:%._\+~#=]{2,256}\.[a-z]{2,6}((?:/|[#?])[-a-zA-Z0-9@:%_\+.~#!?&//=\(\);,'">\^{}\[\]` + "`" + `]*)?`)
+
+type linkifyParser struct {
+}
+
+var defaultLinkifyParser = &linkifyParser{}
+
+// NewLinkifyParser return a new InlineParser can parse
+// text that seems like a URL.
+func NewLinkifyParser() parser.InlineParser {
+	return defaultLinkifyParser
+}
+
+func (s *linkifyParser) Trigger() []byte {
+	// ' ' indicates any white spaces and a line head
+	return []byte{' ', '*', '_', '~', '('}
+}
+
+var protoHTTP = []byte("http:")
+var protoHTTPS = []byte("https:")
+var protoFTP = []byte("ftp:")
+var domainWWW = []byte("www.")
+
+func (s *linkifyParser) Parse(parent ast.Node, block text.Reader, pc parser.Context) ast.Node {
+	if pc.IsInLinkLabel() {
+		return nil
+	}
+	line, segment := block.PeekLine()
+	consumes := 0
+	start := segment.Start
+	c := line[0]
+	// advance if current position is not a line head.
+	if c == ' ' || c == '*' || c == '_' || c == '~' || c == '(' {
+		consumes++
+		start++
+		line = line[1:]
+	}
+
+	var m []int
+	var protocol []byte
+	var typ ast.AutoLinkType = ast.AutoLinkURL
+	if bytes.HasPrefix(line, protoHTTP) || bytes.HasPrefix(line, protoHTTPS) || bytes.HasPrefix(line, protoFTP) {
+		m = LinkRegex.FindSubmatchIndex(line)
+	}
+	if m == nil && bytes.HasPrefix(line, domainWWW) {
+		m = wwwURLRegxp.FindSubmatchIndex(line)
+		protocol = []byte("http")
+	}
+	if m != nil {
+		lastChar := line[m[1]-1]
+		if lastChar == '.' {
+			m[1]--
+		} else if lastChar == ')' {
+			closing := 0
+			for i := m[1] - 1; i >= m[0]; i-- {
+				if line[i] == ')' {
+					closing++
+				} else if line[i] == '(' {
+					closing--
+				}
+			}
+			if closing > 0 {
+				m[1] -= closing
+			}
+		} else if lastChar == ';' {
+			i := m[1] - 2
+			for ; i >= m[0]; i-- {
+				if util.IsAlphaNumeric(line[i]) {
+					continue
+				}
+				break
+			}
+			if i != m[1]-2 {
+				if line[i] == '&' {
+					m[1] -= m[1] - i
+				}
+			}
+		}
+	}
+	if m == nil {
+		if len(line) > 0 && util.IsPunct(line[0]) {
+			return nil
+		}
+		typ = ast.AutoLinkEmail
+		stop := util.FindEmailIndex(line)
+		if stop < 0 {
+			return nil
+		}
+		at := bytes.IndexByte(line, '@')
+		m = []int{0, stop, at, stop - 1}
+		if m == nil || bytes.IndexByte(line[m[2]:m[3]], '.') < 0 {
+			return nil
+		}
+		lastChar := line[m[1]-1]
+		if lastChar == '.' {
+			m[1]--
+		}
+		if m[1] < len(line) {
+			nextChar := line[m[1]]
+			if nextChar == '-' || nextChar == '_' {
+				return nil
+			}
+		}
+	}
+	if m == nil {
+		return nil
+	}
+	if consumes != 0 {
+		s := segment.WithStop(segment.Start + 1)
+		ast.MergeOrAppendTextSegment(parent, s)
+	}
+	consumes += m[1]
+	block.Advance(consumes)
+	n := ast.NewTextSegment(text.NewSegment(start, start+m[1]))
+	link := ast.NewAutoLink(typ, n)
+	link.Protocol = protocol
+	return link
+}
+
+func (s *linkifyParser) CloseBlock(parent ast.Node, pc parser.Context) {
+	// nothing to do
+}
+
+type linkify struct {
+}
+
+// Linkify is an extension that allow you to parse text that seems like a URL.
+var Linkify = &linkify{}
+
+func (e *linkify) Extend(m goldmark.Markdown) {
+	m.Parser().AddOptions(
+		parser.WithInlineParsers(
+			util.Prioritized(NewLinkifyParser(), 999),
+		),
+	)
+}
diff --git a/modules/markup/html.go b/modules/markup/html.go
index b10da40fc1..2c6773bce4 100644
--- a/modules/markup/html.go
+++ b/modules/markup/html.go
@@ -15,6 +15,7 @@ import (
 	"code.gitea.io/gitea/modules/base"
 	"code.gitea.io/gitea/modules/git"
 	"code.gitea.io/gitea/modules/log"
+	"code.gitea.io/gitea/modules/markup/common"
 	"code.gitea.io/gitea/modules/references"
 	"code.gitea.io/gitea/modules/setting"
 	"code.gitea.io/gitea/modules/util"
@@ -57,8 +58,6 @@ var (
 	//   https://html.spec.whatwg.org/multipage/input.html#e-mail-state-(type%3Demail)
 	emailRegex = regexp.MustCompile("(?:\\s|^|\\(|\\[)([a-zA-Z0-9.!#$%&'*+\\/=?^_`{|}~-]+@[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\\.[a-zA-Z0-9]{2,}(?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)+)(?:\\s|$|\\)|\\]|\\.(\\s|$))")
 
-	linkRegex, _ = xurls.StrictMatchingScheme("https?://")
-
 	// blackfriday extensions create IDs like fn:user-content-footnote
 	blackfridayExtRegex = regexp.MustCompile(`[^:]*:user-content-`)
 )
@@ -118,7 +117,7 @@ func CustomLinkURLSchemes(schemes []string) {
 		}
 		withAuth = append(withAuth, s)
 	}
-	linkRegex, _ = xurls.StrictMatchingScheme(strings.Join(withAuth, "|"))
+	common.LinkRegex, _ = xurls.StrictMatchingScheme(strings.Join(withAuth, "|"))
 }
 
 // IsSameDomain checks if given url string has the same hostname as current Gitea instance
@@ -509,6 +508,12 @@ func shortLinkProcessorFull(ctx *postProcessCtx, node *html.Node, noLink bool) {
 				(strings.HasPrefix(val, "‘") && strings.HasSuffix(val, "’")) {
 				const lenQuote = len("‘")
 				val = val[lenQuote : len(val)-lenQuote]
+			} else if (strings.HasPrefix(val, "\"") && strings.HasSuffix(val, "\"")) ||
+				(strings.HasPrefix(val, "'") && strings.HasSuffix(val, "'")) {
+				val = val[1 : len(val)-1]
+			} else if strings.HasPrefix(val, "'") && strings.HasSuffix(val, "’") {
+				const lenQuote = len("‘")
+				val = val[1 : len(val)-lenQuote]
 			}
 			props[key] = val
 		}
@@ -803,7 +808,7 @@ func emailAddressProcessor(ctx *postProcessCtx, node *html.Node) {
 // linkProcessor creates links for any HTTP or HTTPS URL not captured by
 // markdown.
 func linkProcessor(ctx *postProcessCtx, node *html.Node) {
-	m := linkRegex.FindStringIndex(node.Data)
+	m := common.LinkRegex.FindStringIndex(node.Data)
 	if m == nil {
 		return
 	}
@@ -832,7 +837,7 @@ func genDefaultLinkProcessor(defaultLink string) processor {
 
 // descriptionLinkProcessor creates links for DescriptionHTML
 func descriptionLinkProcessor(ctx *postProcessCtx, node *html.Node) {
-	m := linkRegex.FindStringIndex(node.Data)
+	m := common.LinkRegex.FindStringIndex(node.Data)
 	if m == nil {
 		return
 	}
diff --git a/modules/markup/html_test.go b/modules/markup/html_test.go
index 07747e97e1..91ef320b40 100644
--- a/modules/markup/html_test.go
+++ b/modules/markup/html_test.go
@@ -323,6 +323,6 @@ func TestRender_ShortLinks(t *testing.T) {
 		`
`)
 	test(
 		"[[foobar]]
",
-		`[[foobar]]
`,
-		`[[foobar]]
`)
+		`[[foobar]]
`,
+		`[[foobar]]
`)
 }
diff --git a/modules/markup/markdown/goldmark.go b/modules/markup/markdown/goldmark.go
new file mode 100644
index 0000000000..2a2a9dce6a
--- /dev/null
+++ b/modules/markup/markdown/goldmark.go
@@ -0,0 +1,178 @@
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// Use of this source code is governed by a MIT-style
+// license that can be found in the LICENSE file.
+
+package markdown
+
+import (
+	"bytes"
+	"fmt"
+	"strings"
+
+	"code.gitea.io/gitea/modules/markup"
+	"code.gitea.io/gitea/modules/markup/common"
+	giteautil "code.gitea.io/gitea/modules/util"
+
+	"github.com/yuin/goldmark/ast"
+	east "github.com/yuin/goldmark/extension/ast"
+	"github.com/yuin/goldmark/parser"
+	"github.com/yuin/goldmark/renderer"
+	"github.com/yuin/goldmark/renderer/html"
+	"github.com/yuin/goldmark/text"
+	"github.com/yuin/goldmark/util"
+)
+
+var byteMailto = []byte("mailto:")
+
+// GiteaASTTransformer is a default transformer of the goldmark tree.
+type GiteaASTTransformer struct{}
+
+// Transform transforms the given AST tree.
+func (g *GiteaASTTransformer) Transform(node *ast.Document, reader text.Reader, pc parser.Context) {
+	_ = ast.Walk(node, func(n ast.Node, entering bool) (ast.WalkStatus, error) {
+		if !entering {
+			return ast.WalkContinue, nil
+		}
+
+		switch v := n.(type) {
+		case *ast.Image:
+			// Images need two things:
+			//
+			// 1. Their src needs to munged to be a real value
+			// 2. If they're not wrapped with a link they need a link wrapper
+
+			// Check if the destination is a real link
+			link := v.Destination
+			if len(link) > 0 && !markup.IsLink(link) {
+				prefix := pc.Get(urlPrefixKey).(string)
+				if pc.Get(isWikiKey).(bool) {
+					prefix = giteautil.URLJoin(prefix, "wiki", "raw")
+				}
+				prefix = strings.Replace(prefix, "/src/", "/media/", 1)
+
+				lnk := string(link)
+				lnk = giteautil.URLJoin(prefix, lnk)
+				lnk = strings.Replace(lnk, " ", "+", -1)
+				link = []byte(lnk)
+			}
+			v.Destination = link
+
+			parent := n.Parent()
+			// Create a link around image only if parent is not already a link
+			if _, ok := parent.(*ast.Link); !ok && parent != nil {
+				wrap := ast.NewLink()
+				wrap.Destination = link
+				wrap.Title = v.Title
+				parent.ReplaceChild(parent, n, wrap)
+				wrap.AppendChild(wrap, n)
+			}
+		case *ast.Link:
+			// Links need their href to munged to be a real value
+			link := v.Destination
+			if len(link) > 0 && !markup.IsLink(link) &&
+				link[0] != '#' && !bytes.HasPrefix(link, byteMailto) {
+				// special case: this is not a link, a hash link or a mailto:, so it's a
+				// relative URL
+				lnk := string(link)
+				if pc.Get(isWikiKey).(bool) {
+					lnk = giteautil.URLJoin("wiki", lnk)
+				}
+				link = []byte(giteautil.URLJoin(pc.Get(urlPrefixKey).(string), lnk))
+			}
+			v.Destination = link
+		}
+		return ast.WalkContinue, nil
+	})
+}
+
+type prefixedIDs struct {
+	values map[string]bool
+}
+
+// Generate generates a new element id.
+func (p *prefixedIDs) Generate(value []byte, kind ast.NodeKind) []byte {
+	dft := []byte("id")
+	if kind == ast.KindHeading {
+		dft = []byte("heading")
+	}
+	return p.GenerateWithDefault(value, dft)
+}
+
+// Generate generates a new element id.
+func (p *prefixedIDs) GenerateWithDefault(value []byte, dft []byte) []byte {
+	result := common.CleanValue(value)
+	if len(result) == 0 {
+		result = dft
+	}
+	if !bytes.HasPrefix(result, []byte("user-content-")) {
+		result = append([]byte("user-content-"), result...)
+	}
+	if _, ok := p.values[util.BytesToReadOnlyString(result)]; !ok {
+		p.values[util.BytesToReadOnlyString(result)] = true
+		return result
+	}
+	for i := 1; ; i++ {
+		newResult := fmt.Sprintf("%s-%d", result, i)
+		if _, ok := p.values[newResult]; !ok {
+			p.values[newResult] = true
+			return []byte(newResult)
+		}
+	}
+}
+
+// Put puts a given element id to the used ids table.
+func (p *prefixedIDs) Put(value []byte) {
+	p.values[util.BytesToReadOnlyString(value)] = true
+}
+
+func newPrefixedIDs() *prefixedIDs {
+	return &prefixedIDs{
+		values: map[string]bool{},
+	}
+}
+
+// NewTaskCheckBoxHTMLRenderer creates a TaskCheckBoxHTMLRenderer to render tasklists
+// in the gitea form.
+func NewTaskCheckBoxHTMLRenderer(opts ...html.Option) renderer.NodeRenderer {
+	r := &TaskCheckBoxHTMLRenderer{
+		Config: html.NewConfig(),
+	}
+	for _, opt := range opts {
+		opt.SetHTMLOption(&r.Config)
+	}
+	return r
+}
+
+// TaskCheckBoxHTMLRenderer is a renderer.NodeRenderer implementation that
+// renders checkboxes in list items.
+// Overrides the default goldmark one to present the gitea format
+type TaskCheckBoxHTMLRenderer struct {
+	html.Config
+}
+
+// RegisterFuncs implements renderer.NodeRenderer.RegisterFuncs.
+func (r *TaskCheckBoxHTMLRenderer) RegisterFuncs(reg renderer.NodeRendererFuncRegisterer) {
+	reg.Register(east.KindTaskCheckBox, r.renderTaskCheckBox)
+}
+
+func (r *TaskCheckBoxHTMLRenderer) renderTaskCheckBox(w util.BufWriter, source []byte, node ast.Node, entering bool) (ast.WalkStatus, error) {
+	if !entering {
+		return ast.WalkContinue, nil
+	}
+	n := node.(*east.TaskCheckBox)
+
+	end := ">"
+	if r.XHTML {
+		end = " />"
+	}
+	var err error
+	if n.IsChecked {
+		_, err = w.WriteString(``)
+	} else {
+		_, err = w.WriteString(``)
+	}
+	if err != nil {
+		return ast.WalkStop, err
+	}
+	return ast.WalkContinue, nil
+}
diff --git a/modules/markup/markdown/markdown.go b/modules/markup/markdown/markdown.go
index f1e44a8fbc..5230fca4dc 100644
--- a/modules/markup/markdown/markdown.go
+++ b/modules/markup/markdown/markdown.go
@@ -7,161 +7,83 @@ package markdown
 
 import (
 	"bytes"
-	"io"
-	"strings"
+	"sync"
 
+	"code.gitea.io/gitea/modules/log"
 	"code.gitea.io/gitea/modules/markup"
+	"code.gitea.io/gitea/modules/markup/common"
 	"code.gitea.io/gitea/modules/setting"
-	"code.gitea.io/gitea/modules/util"
+	giteautil "code.gitea.io/gitea/modules/util"
 
-	"github.com/russross/blackfriday/v2"
+	"github.com/yuin/goldmark"
+	"github.com/yuin/goldmark/extension"
+	"github.com/yuin/goldmark/parser"
+	"github.com/yuin/goldmark/renderer"
+	"github.com/yuin/goldmark/renderer/html"
+	"github.com/yuin/goldmark/util"
 )
 
-// Renderer is a extended version of underlying render object.
-type Renderer struct {
-	blackfriday.Renderer
-	URLPrefix string
-	IsWiki    bool
+var converter goldmark.Markdown
+var once = sync.Once{}
+
+var urlPrefixKey = parser.NewContextKey()
+var isWikiKey = parser.NewContextKey()
+
+// NewGiteaParseContext creates a parser.Context with the gitea context set
+func NewGiteaParseContext(urlPrefix string, isWiki bool) parser.Context {
+	pc := parser.NewContext(parser.WithIDs(newPrefixedIDs()))
+	pc.Set(urlPrefixKey, urlPrefix)
+	pc.Set(isWikiKey, isWiki)
+	return pc
 }
 
-var byteMailto = []byte("mailto:")
-
-var htmlEscaper = [256][]byte{
-	'&': []byte("&"),
-	'<': []byte("<"),
-	'>': []byte(">"),
-	'"': []byte("""),
-}
-
-func escapeHTML(w io.Writer, s []byte) {
-	var start, end int
-	for end < len(s) {
-		escSeq := htmlEscaper[s[end]]
-		if escSeq != nil {
-			_, _ = w.Write(s[start:end])
-			_, _ = w.Write(escSeq)
-			start = end + 1
-		}
-		end++
-	}
-	if start < len(s) && end <= len(s) {
-		_, _ = w.Write(s[start:end])
-	}
-}
-
-// RenderNode is a default renderer of a single node of a syntax tree. For
-// block nodes it will be called twice: first time with entering=true, second
-// time with entering=false, so that it could know when it's working on an open
-// tag and when on close. It writes the result to w.
-//
-// The return value is a way to tell the calling walker to adjust its walk
-// pattern: e.g. it can terminate the traversal by returning Terminate. Or it
-// can ask the walker to skip a subtree of this node by returning SkipChildren.
-// The typical behavior is to return GoToNext, which asks for the usual
-// traversal to the next node.
-func (r *Renderer) RenderNode(w io.Writer, node *blackfriday.Node, entering bool) blackfriday.WalkStatus {
-	switch node.Type {
-	case blackfriday.Image:
-		prefix := r.URLPrefix
-		if r.IsWiki {
-			prefix = util.URLJoin(prefix, "wiki", "raw")
-		}
-		prefix = strings.Replace(prefix, "/src/", "/media/", 1)
-		link := node.LinkData.Destination
-		if len(link) > 0 && !markup.IsLink(link) {
-			lnk := string(link)
-			lnk = util.URLJoin(prefix, lnk)
-			lnk = strings.Replace(lnk, " ", "+", -1)
-			link = []byte(lnk)
-		}
-		node.LinkData.Destination = link
-		// Render link around image only if parent is not link already
-		if node.Parent != nil && node.Parent.Type != blackfriday.Link {
-			if entering {
-				_, _ = w.Write([]byte(``))
-				return r.Renderer.RenderNode(w, node, entering)
-			}
-			s := r.Renderer.RenderNode(w, node, entering)
-			_, _ = w.Write([]byte(``))
-			return s
-		}
-		return r.Renderer.RenderNode(w, node, entering)
-	case blackfriday.Link:
-		// special case: this is not a link, a hash link or a mailto:, so it's a
-		// relative URL
-		link := node.LinkData.Destination
-		if len(link) > 0 && !markup.IsLink(link) &&
-			link[0] != '#' && !bytes.HasPrefix(link, byteMailto) &&
-			node.LinkData.Footnote == nil {
-			lnk := string(link)
-			if r.IsWiki {
-				lnk = util.URLJoin("wiki", lnk)
-			}
-			link = []byte(util.URLJoin(r.URLPrefix, lnk))
-		}
-		node.LinkData.Destination = link
-		return r.Renderer.RenderNode(w, node, entering)
-	case blackfriday.Text:
-		isListItem := false
-		for n := node.Parent; n != nil; n = n.Parent {
-			if n.Type == blackfriday.Item {
-				isListItem = true
-				break
-			}
-		}
-		if isListItem {
-			text := node.Literal
-			switch {
-			case bytes.HasPrefix(text, []byte("[ ] ")):
-				_, _ = w.Write([]byte(``))
-				text = text[3:]
-			case bytes.HasPrefix(text, []byte("[x] ")):
-				_, _ = w.Write([]byte(``))
-				text = text[3:]
-			}
-			node.Literal = text
-		}
-	}
-	return r.Renderer.RenderNode(w, node, entering)
-}
-
-const (
-	blackfridayExtensions = 0 |
-		blackfriday.NoIntraEmphasis |
-		blackfriday.Tables |
-		blackfriday.FencedCode |
-		blackfriday.Strikethrough |
-		blackfriday.NoEmptyLineBeforeBlock |
-		blackfriday.DefinitionLists |
-		blackfriday.Footnotes |
-		blackfriday.HeadingIDs |
-		blackfriday.AutoHeadingIDs
-	blackfridayHTMLFlags = 0 |
-		blackfriday.Smartypants
-)
-
 // RenderRaw renders Markdown to HTML without handling special links.
 func RenderRaw(body []byte, urlPrefix string, wikiMarkdown bool) []byte {
-	renderer := &Renderer{
-		Renderer: blackfriday.NewHTMLRenderer(blackfriday.HTMLRendererParameters{
-			Flags:                blackfridayHTMLFlags,
-			FootnoteAnchorPrefix: "user-content-",
-			HeadingIDPrefix:      "user-content-",
-		}),
-		URLPrefix: urlPrefix,
-		IsWiki:    wikiMarkdown,
+	once.Do(func() {
+		converter = goldmark.New(
+			goldmark.WithExtensions(extension.Table,
+				extension.Strikethrough,
+				extension.TaskList,
+				extension.DefinitionList,
+				common.FootnoteExtension,
+				extension.NewTypographer(
+					extension.WithTypographicSubstitutions(extension.TypographicSubstitutions{
+						extension.EnDash: nil,
+						extension.EmDash: nil,
+					}),
+				),
+			),
+			goldmark.WithParserOptions(
+				parser.WithAttribute(),
+				parser.WithAutoHeadingID(),
+				parser.WithASTTransformers(
+					util.Prioritized(&GiteaASTTransformer{}, 10000),
+				),
+			),
+			goldmark.WithRendererOptions(
+				html.WithUnsafe(),
+			),
+		)
+
+		// Override the original Tasklist renderer!
+		converter.Renderer().AddOptions(
+			renderer.WithNodeRenderers(
+				util.Prioritized(NewTaskCheckBoxHTMLRenderer(), 1000),
+			),
+		)
+
+		if setting.Markdown.EnableHardLineBreak {
+			converter.Renderer().AddOptions(html.WithHardWraps())
+		}
+	})
+
+	pc := NewGiteaParseContext(urlPrefix, wikiMarkdown)
+	var buf bytes.Buffer
+	if err := converter.Convert(giteautil.NormalizeEOL(body), &buf, parser.WithContext(pc)); err != nil {
+		log.Error("Unable to render: %v", err)
 	}
 
-	exts := blackfridayExtensions
-	if setting.Markdown.EnableHardLineBreak {
-		exts |= blackfriday.HardLineBreak
-	}
-
-	// Need to normalize EOL to UNIX LF to have consistent results in rendering
-	body = blackfriday.Run(util.NormalizeEOL(body), blackfriday.WithRenderer(renderer), blackfriday.WithExtensions(exts))
-	return markup.SanitizeBytes(body)
+	return markup.SanitizeReader(&buf).Bytes()
 }
 
 var (
@@ -174,8 +96,7 @@ func init() {
 }
 
 // Parser implements markup.Parser
-type Parser struct {
-}
+type Parser struct{}
 
 // Name implements markup.Parser
 func (Parser) Name() string {
diff --git a/modules/markup/markdown/markdown_test.go b/modules/markup/markdown/markdown_test.go
index e3156a657b..53772ee441 100644
--- a/modules/markup/markdown/markdown_test.go
+++ b/modules/markup/markdown/markdown_test.go
@@ -98,16 +98,12 @@ func TestRender_Images(t *testing.T) {
 func testAnswers(baseURLContent, baseURLImages string) []string {
 	return []string{
 		`Wiki! Enjoy :)
-
 
-
 See commit 65f1bf27bc
-
 Ideas and codes
-
 
 `,
 		`What is Wine Staging?
-
 Wine Staging on website wine-staging.com.
-
 Quick Links
-
 Here are some links to the most important topics. You can find the full list of pages at the sidebar.
-
 
 
 
@@ -131,7 +123,6 @@ func testAnswers(baseURLContent, baseURLImages string) []string {
 | Installation | 
 
 
-
 
 
   | 
@@ -141,20 +132,15 @@ func testAnswers(baseURLContent, baseURLImages string) []string {
 
 `,
 		`Excelsior JET allows you to create native executables for Windows, Linux and Mac OS X.
-
 
 - Package your libGDX application
 

 
 - Perform a test run by hitting the Run! button.
 

 
 
-
 More tests
-
 (from https://www.markdownguide.org/extended-syntax/)
-
 Definition list
-
 
 - First Term
 
 - This is the definition of the first term.
 
@@ -162,27 +148,21 @@ func testAnswers(baseURLContent, baseURLImages string) []string {
 - This is one definition of the second term.
 
 - This is another definition of the second term.
 
 
-
 
-
 Here is a simple footnote,1 and here is a longer one.2
-
 
-
 
-
 
-- This is the first footnote.
 
-
-Here is one with multiple paragraphs and code.
-
+- 
+
This is the first footnote. ↩︎
+ 
+- 
+
Here is one with multiple paragraphs and code.
 Indent paragraphs to include them in the footnote.
-
 { my code }
-
-Add as many paragraphs as you like.
 
+Add as many paragraphs as you like. ↩︎
+
 
-
 
 
 `,
 	}
@@ -299,15 +279,15 @@ func TestRender_RenderParagraphs(t *testing.T) {
 	test := func(t *testing.T, str string, cnt int) {
 		unix := []byte(str)
 		res := string(RenderRaw(unix, "", false))
-		assert.Equal(t, strings.Count(res, "Wiki! Enjoy :)
-
 
 - Links, Language bindings, Engine bindings
 
 - Tips
 
@@ -88,13 +87,9 @@ Here are some links to the most important topics. You can find the full list of
 `,
 		// rendered
 		`What is Wine Staging?
-
 Wine Staging on website wine-staging.com.
-
 Quick Links
-
 Here are some links to the most important topics. You can find the full list of pages at the sidebar.
-
 Configuration
 
 `,
diff --git a/vendor/github.com/russross/blackfriday/v2/.gitignore b/vendor/github.com/russross/blackfriday/v2/.gitignore
deleted file mode 100644
index 75623dcccb..0000000000
--- a/vendor/github.com/russross/blackfriday/v2/.gitignore
+++ /dev/null
@@ -1,8 +0,0 @@
-*.out
-*.swp
-*.8
-*.6
-_obj
-_test*
-markdown
-tags
diff --git a/vendor/github.com/russross/blackfriday/v2/.travis.yml b/vendor/github.com/russross/blackfriday/v2/.travis.yml
deleted file mode 100644
index b0b525a5a8..0000000000
--- a/vendor/github.com/russross/blackfriday/v2/.travis.yml
+++ /dev/null
@@ -1,17 +0,0 @@
-sudo: false
-language: go
-go:
-  - "1.10.x"
-  - "1.11.x"
-  - tip
-matrix:
-  fast_finish: true
-  allow_failures:
-    - go: tip
-install:
-  - # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step).
-script:
-  - go get -t -v ./...
-  - diff -u <(echo -n) <(gofmt -d -s .)
-  - go tool vet .
-  - go test -v ./...
diff --git a/vendor/github.com/russross/blackfriday/v2/LICENSE.txt b/vendor/github.com/russross/blackfriday/v2/LICENSE.txt
deleted file mode 100644
index 2885af3602..0000000000
--- a/vendor/github.com/russross/blackfriday/v2/LICENSE.txt
+++ /dev/null
@@ -1,29 +0,0 @@
-Blackfriday is distributed under the Simplified BSD License:
-
-> Copyright © 2011 Russ Ross
-> All rights reserved.
->
-> Redistribution and use in source and binary forms, with or without
-> modification, are permitted provided that the following conditions
-> are met:
->
-> 1.  Redistributions of source code must retain the above copyright
->     notice, this list of conditions and the following disclaimer.
->
-> 2.  Redistributions in binary form must reproduce the above
->     copyright notice, this list of conditions and the following
->     disclaimer in the documentation and/or other materials provided with
->     the distribution.
->
-> THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-> "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-> LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
-> FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
-> COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-> INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
-> BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-> LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-> CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-> LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
-> ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-> POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/russross/blackfriday/v2/README.md b/vendor/github.com/russross/blackfriday/v2/README.md
deleted file mode 100644
index d5a8649bd5..0000000000
--- a/vendor/github.com/russross/blackfriday/v2/README.md
+++ /dev/null
@@ -1,291 +0,0 @@
-Blackfriday [](https://travis-ci.org/russross/blackfriday)
-===========
-
-Blackfriday is a [Markdown][1] processor implemented in [Go][2]. It
-is paranoid about its input (so you can safely feed it user-supplied
-data), it is fast, it supports common extensions (tables, smart
-punctuation substitutions, etc.), and it is safe for all utf-8
-(unicode) input.
-
-HTML output is currently supported, along with Smartypants
-extensions.
-
-It started as a translation from C of [Sundown][3].
-
-
-Installation
-------------
-
-Blackfriday is compatible with any modern Go release. With Go 1.7 and git
-installed:
-
-    go get gopkg.in/russross/blackfriday.v2
-
-will download, compile, and install the package into your `$GOPATH`
-directory hierarchy. Alternatively, you can achieve the same if you
-import it into a project:
-
-    import "gopkg.in/russross/blackfriday.v2"
-
-and `go get` without parameters.
-
-
-Versions
---------
-
-Currently maintained and recommended version of Blackfriday is `v2`. It's being
-developed on its own branch: https://github.com/russross/blackfriday/tree/v2 and the
-documentation is available at
-https://godoc.org/gopkg.in/russross/blackfriday.v2.
-
-It is `go get`-able via via [gopkg.in][6] at `gopkg.in/russross/blackfriday.v2`,
-but we highly recommend using package management tool like [dep][7] or
-[Glide][8] and make use of semantic versioning. With package management you
-should import `github.com/russross/blackfriday` and specify that you're using
-version 2.0.0.
-
-Version 2 offers a number of improvements over v1:
-
-* Cleaned up API
-* A separate call to [`Parse`][4], which produces an abstract syntax tree for
-  the document
-* Latest bug fixes
-* Flexibility to easily add your own rendering extensions
-
-Potential drawbacks:
-
-* Our benchmarks show v2 to be slightly slower than v1. Currently in the
-  ballpark of around 15%.
-* API breakage. If you can't afford modifying your code to adhere to the new API
-  and don't care too much about the new features, v2 is probably not for you.
-* Several bug fixes are trailing behind and still need to be forward-ported to
-  v2. See issue [#348](https://github.com/russross/blackfriday/issues/348) for
-  tracking.
-
-Usage
------
-
-For the most sensible markdown processing, it is as simple as getting your input
-into a byte slice and calling:
-
-```go
-output := blackfriday.Run(input)
-```
-
-Your input will be parsed and the output rendered with a set of most popular
-extensions enabled. If you want the most basic feature set, corresponding with
-the bare Markdown specification, use:
-
-```go
-output := blackfriday.Run(input, blackfriday.WithNoExtensions())
-```
-
-### Sanitize untrusted content
-
-Blackfriday itself does nothing to protect against malicious content. If you are
-dealing with user-supplied markdown, we recommend running Blackfriday's output
-through HTML sanitizer such as [Bluemonday][5].
-
-Here's an example of simple usage of Blackfriday together with Bluemonday:
-
-```go
-import (
-    "github.com/microcosm-cc/bluemonday"
-    "github.com/russross/blackfriday"
-)
-
-// ...
-unsafe := blackfriday.Run(input)
-html := bluemonday.UGCPolicy().SanitizeBytes(unsafe)
-```
-
-### Custom options
-
-If you want to customize the set of options, use `blackfriday.WithExtensions`,
-`blackfriday.WithRenderer` and `blackfriday.WithRefOverride`.
-
-You can also check out `blackfriday-tool` for a more complete example
-of how to use it. Download and install it using:
-
-    go get github.com/russross/blackfriday-tool
-
-This is a simple command-line tool that allows you to process a
-markdown file using a standalone program.  You can also browse the
-source directly on github if you are just looking for some example
-code:
-
-* 
-
-Note that if you have not already done so, installing
-`blackfriday-tool` will be sufficient to download and install
-blackfriday in addition to the tool itself. The tool binary will be
-installed in `$GOPATH/bin`.  This is a statically-linked binary that
-can be copied to wherever you need it without worrying about
-dependencies and library versions.
-
-
-Features
---------
-
-All features of Sundown are supported, including:
-
-*   **Compatibility**. The Markdown v1.0.3 test suite passes with
-    the `--tidy` option.  Without `--tidy`, the differences are
-    mostly in whitespace and entity escaping, where blackfriday is
-    more consistent and cleaner.
-
-*   **Common extensions**, including table support, fenced code
-    blocks, autolinks, strikethroughs, non-strict emphasis, etc.
-
-*   **Safety**. Blackfriday is paranoid when parsing, making it safe
-    to feed untrusted user input without fear of bad things
-    happening. The test suite stress tests this and there are no
-    known inputs that make it crash.  If you find one, please let me
-    know and send me the input that does it.
-
-    NOTE: "safety" in this context means *runtime safety only*. In order to
-    protect yourself against JavaScript injection in untrusted content, see
-    [this example](https://github.com/russross/blackfriday#sanitize-untrusted-content).
-
-*   **Fast processing**. It is fast enough to render on-demand in
-    most web applications without having to cache the output.
-
-*   **Thread safety**. You can run multiple parsers in different
-    goroutines without ill effect. There is no dependence on global
-    shared state.
-
-*   **Minimal dependencies**. Blackfriday only depends on standard
-    library packages in Go. The source code is pretty
-    self-contained, so it is easy to add to any project, including
-    Google App Engine projects.
-
-*   **Standards compliant**. Output successfully validates using the
-    W3C validation tool for HTML 4.01 and XHTML 1.0 Transitional.
-
-
-Extensions
-----------
-
-In addition to the standard markdown syntax, this package
-implements the following extensions:
-
-*   **Intra-word emphasis supression**. The `_` character is
-    commonly used inside words when discussing code, so having
-    markdown interpret it as an emphasis command is usually the
-    wrong thing. Blackfriday lets you treat all emphasis markers as
-    normal characters when they occur inside a word.
-
-*   **Tables**. Tables can be created by drawing them in the input
-    using a simple syntax:
-
-    ```
-    Name    | Age
-    --------|------
-    Bob     | 27
-    Alice   | 23
-    ```
-
-*   **Fenced code blocks**. In addition to the normal 4-space
-    indentation to mark code blocks, you can explicitly mark them
-    and supply a language (to make syntax highlighting simple). Just
-    mark it like this:
-
-        ```go
-        func getTrue() bool {
-            return true
-        }
-        ```
-
-    You can use 3 or more backticks to mark the beginning of the
-    block, and the same number to mark the end of the block.
-
-*   **Definition lists**. A simple definition list is made of a single-line
-    term followed by a colon and the definition for that term.
-
-        Cat
-        : Fluffy animal everyone likes
-
-        Internet
-        : Vector of transmission for pictures of cats
-
-    Terms must be separated from the previous definition by a blank line.
-
-*   **Footnotes**. A marker in the text that will become a superscript number;
-    a footnote definition that will be placed in a list of footnotes at the
-    end of the document. A footnote looks like this:
-
-        This is a footnote.[^1]
-
-        [^1]: the footnote text.
-
-*   **Autolinking**. Blackfriday can find URLs that have not been
-    explicitly marked as links and turn them into links.
-
-*   **Strikethrough**. Use two tildes (`~~`) to mark text that
-    should be crossed out.
-
-*   **Hard line breaks**. With this extension enabled newlines in the input
-    translate into line breaks in the output. This extension is off by default.
-
-*   **Smart quotes**. Smartypants-style punctuation substitution is
-    supported, turning normal double- and single-quote marks into
-    curly quotes, etc.
-
-*   **LaTeX-style dash parsing** is an additional option, where `--`
-    is translated into `–`, and `---` is translated into
-    `—`. This differs from most smartypants processors, which
-    turn a single hyphen into an ndash and a double hyphen into an
-    mdash.
-
-*   **Smart fractions**, where anything that looks like a fraction
-    is translated into suitable HTML (instead of just a few special
-    cases like most smartypant processors). For example, `4/5`
-    becomes `4⁄5`, which renders as
-    4⁄5.
-
-
-Other renderers
----------------
-
-Blackfriday is structured to allow alternative rendering engines. Here
-are a few of note:
-
-*   [github_flavored_markdown](https://godoc.org/github.com/shurcooL/github_flavored_markdown):
-    provides a GitHub Flavored Markdown renderer with fenced code block
-    highlighting, clickable heading anchor links.
-
-    It's not customizable, and its goal is to produce HTML output
-    equivalent to the [GitHub Markdown API endpoint](https://developer.github.com/v3/markdown/#render-a-markdown-document-in-raw-mode),
-    except the rendering is performed locally.
-
-*   [markdownfmt](https://github.com/shurcooL/markdownfmt): like gofmt,
-    but for markdown.
-
-*   [LaTeX output](https://github.com/Ambrevar/Blackfriday-LaTeX):
-    renders output as LaTeX.
-
-*   [Blackfriday-Confluence](https://github.com/kentaro-m/blackfriday-confluence): provides a [Confluence Wiki Markup](https://confluence.atlassian.com/doc/confluence-wiki-markup-251003035.html) renderer.
-
-
-Todo
-----
-
-*   More unit testing
-*   Improve unicode support. It does not understand all unicode
-    rules (about what constitutes a letter, a punctuation symbol,
-    etc.), so it may fail to detect word boundaries correctly in
-    some instances. It is safe on all utf-8 input.
-
-
-License
--------
-
-[Blackfriday is distributed under the Simplified BSD License](LICENSE.txt)
-
-
-   [1]: https://daringfireball.net/projects/markdown/ "Markdown"
-   [2]: https://golang.org/ "Go Language"
-   [3]: https://github.com/vmg/sundown "Sundown"
-   [4]: https://godoc.org/gopkg.in/russross/blackfriday.v2#Parse "Parse func"
-   [5]: https://github.com/microcosm-cc/bluemonday "Bluemonday"
-   [6]: https://labix.org/gopkg.in "gopkg.in"
diff --git a/vendor/github.com/russross/blackfriday/v2/block.go b/vendor/github.com/russross/blackfriday/v2/block.go
deleted file mode 100644
index b8607474e5..0000000000
--- a/vendor/github.com/russross/blackfriday/v2/block.go
+++ /dev/null
@@ -1,1590 +0,0 @@
-//
-// Blackfriday Markdown Processor
-// Available at http://github.com/russross/blackfriday
-//
-// Copyright © 2011 Russ Ross .
-// Distributed under the Simplified BSD License.
-// See README.md for details.
-//
-
-//
-// Functions to parse block-level elements.
-//
-
-package blackfriday
-
-import (
-	"bytes"
-	"html"
-	"regexp"
-	"strings"
-
-	"github.com/shurcooL/sanitized_anchor_name"
-)
-
-const (
-	charEntity = "&(?:#x[a-f0-9]{1,8}|#[0-9]{1,8}|[a-z][a-z0-9]{1,31});"
-	escapable  = "[!\"#$%&'()*+,./:;<=>?@[\\\\\\]^_`{|}~-]"
-)
-
-var (
-	reBackslashOrAmp      = regexp.MustCompile("[\\&]")
-	reEntityOrEscapedChar = regexp.MustCompile("(?i)\\\\" + escapable + "|" + charEntity)
-)
-
-// Parse block-level data.
-// Note: this function and many that it calls assume that
-// the input buffer ends with a newline.
-func (p *Markdown) block(data []byte) {
-	// this is called recursively: enforce a maximum depth
-	if p.nesting >= p.maxNesting {
-		return
-	}
-	p.nesting++
-
-	// parse out one block-level construct at a time
-	for len(data) > 0 {
-		// prefixed heading:
-		//
-		// # Heading 1
-		// ## Heading 2
-		// ...
-		// ###### Heading 6
-		if p.isPrefixHeading(data) {
-			data = data[p.prefixHeading(data):]
-			continue
-		}
-
-		// block of preformatted HTML:
-		//
-		// 
-		//     ...
-		// 
-		if data[0] == '<' {
-			if i := p.html(data, true); i > 0 {
-				data = data[i:]
-				continue
-			}
-		}
-
-		// title block
-		//
-		// % stuff
-		// % more stuff
-		// % even more stuff
-		if p.extensions&Titleblock != 0 {
-			if data[0] == '%' {
-				if i := p.titleBlock(data, true); i > 0 {
-					data = data[i:]
-					continue
-				}
-			}
-		}
-
-		// blank lines.  note: returns the # of bytes to skip
-		if i := p.isEmpty(data); i > 0 {
-			data = data[i:]
-			continue
-		}
-
-		// indented code block:
-		//
-		//     func max(a, b int) int {
-		//         if a > b {
-		//             return a
-		//         }
-		//         return b
-		//      }
-		if p.codePrefix(data) > 0 {
-			data = data[p.code(data):]
-			continue
-		}
-
-		// fenced code block:
-		//
-		// ``` go
-		// func fact(n int) int {
-		//     if n <= 1 {
-		//         return n
-		//     }
-		//     return n * fact(n-1)
-		// }
-		// ```
-		if p.extensions&FencedCode != 0 {
-			if i := p.fencedCodeBlock(data, true); i > 0 {
-				data = data[i:]
-				continue
-			}
-		}
-
-		// horizontal rule:
-		//
-		// ------
-		// or
-		// ******
-		// or
-		// ______
-		if p.isHRule(data) {
-			p.addBlock(HorizontalRule, nil)
-			var i int
-			for i = 0; i < len(data) && data[i] != '\n'; i++ {
-			}
-			data = data[i:]
-			continue
-		}
-
-		// block quote:
-		//
-		// > A big quote I found somewhere
-		// > on the web
-		if p.quotePrefix(data) > 0 {
-			data = data[p.quote(data):]
-			continue
-		}
-
-		// table:
-		//
-		// Name  | Age | Phone
-		// ------|-----|---------
-		// Bob   | 31  | 555-1234
-		// Alice | 27  | 555-4321
-		if p.extensions&Tables != 0 {
-			if i := p.table(data); i > 0 {
-				data = data[i:]
-				continue
-			}
-		}
-
-		// an itemized/unordered list:
-		//
-		// * Item 1
-		// * Item 2
-		//
-		// also works with + or -
-		if p.uliPrefix(data) > 0 {
-			data = data[p.list(data, 0):]
-			continue
-		}
-
-		// a numbered/ordered list:
-		//
-		// 1. Item 1
-		// 2. Item 2
-		if p.oliPrefix(data) > 0 {
-			data = data[p.list(data, ListTypeOrdered):]
-			continue
-		}
-
-		// definition lists:
-		//
-		// Term 1
-		// :   Definition a
-		// :   Definition b
-		//
-		// Term 2
-		// :   Definition c
-		if p.extensions&DefinitionLists != 0 {
-			if p.dliPrefix(data) > 0 {
-				data = data[p.list(data, ListTypeDefinition):]
-				continue
-			}
-		}
-
-		// anything else must look like a normal paragraph
-		// note: this finds underlined headings, too
-		data = data[p.paragraph(data):]
-	}
-
-	p.nesting--
-}
-
-func (p *Markdown) addBlock(typ NodeType, content []byte) *Node {
-	p.closeUnmatchedBlocks()
-	container := p.addChild(typ, 0)
-	container.content = content
-	return container
-}
-
-func (p *Markdown) isPrefixHeading(data []byte) bool {
-	if data[0] != '#' {
-		return false
-	}
-
-	if p.extensions&SpaceHeadings != 0 {
-		level := 0
-		for level < 6 && level < len(data) && data[level] == '#' {
-			level++
-		}
-		if level == len(data) || data[level] != ' ' {
-			return false
-		}
-	}
-	return true
-}
-
-func (p *Markdown) prefixHeading(data []byte) int {
-	level := 0
-	for level < 6 && level < len(data) && data[level] == '#' {
-		level++
-	}
-	i := skipChar(data, level, ' ')
-	end := skipUntilChar(data, i, '\n')
-	skip := end
-	id := ""
-	if p.extensions&HeadingIDs != 0 {
-		j, k := 0, 0
-		// find start/end of heading id
-		for j = i; j < end-1 && (data[j] != '{' || data[j+1] != '#'); j++ {
-		}
-		for k = j + 1; k < end && data[k] != '}'; k++ {
-		}
-		// extract heading id iff found
-		if j < end && k < end {
-			id = string(data[j+2 : k])
-			end = j
-			skip = k + 1
-			for end > 0 && data[end-1] == ' ' {
-				end--
-			}
-		}
-	}
-	for end > 0 && data[end-1] == '#' {
-		if isBackslashEscaped(data, end-1) {
-			break
-		}
-		end--
-	}
-	for end > 0 && data[end-1] == ' ' {
-		end--
-	}
-	if end > i {
-		if id == "" && p.extensions&AutoHeadingIDs != 0 {
-			id = sanitized_anchor_name.Create(string(data[i:end]))
-		}
-		block := p.addBlock(Heading, data[i:end])
-		block.HeadingID = id
-		block.Level = level
-	}
-	return skip
-}
-
-func (p *Markdown) isUnderlinedHeading(data []byte) int {
-	// test of level 1 heading
-	if data[0] == '=' {
-		i := skipChar(data, 1, '=')
-		i = skipChar(data, i, ' ')
-		if i < len(data) && data[i] == '\n' {
-			return 1
-		}
-		return 0
-	}
-
-	// test of level 2 heading
-	if data[0] == '-' {
-		i := skipChar(data, 1, '-')
-		i = skipChar(data, i, ' ')
-		if i < len(data) && data[i] == '\n' {
-			return 2
-		}
-		return 0
-	}
-
-	return 0
-}
-
-func (p *Markdown) titleBlock(data []byte, doRender bool) int {
-	if data[0] != '%' {
-		return 0
-	}
-	splitData := bytes.Split(data, []byte("\n"))
-	var i int
-	for idx, b := range splitData {
-		if !bytes.HasPrefix(b, []byte("%")) {
-			i = idx // - 1
-			break
-		}
-	}
-
-	data = bytes.Join(splitData[0:i], []byte("\n"))
-	consumed := len(data)
-	data = bytes.TrimPrefix(data, []byte("% "))
-	data = bytes.Replace(data, []byte("\n% "), []byte("\n"), -1)
-	block := p.addBlock(Heading, data)
-	block.Level = 1
-	block.IsTitleblock = true
-
-	return consumed
-}
-
-func (p *Markdown) html(data []byte, doRender bool) int {
-	var i, j int
-
-	// identify the opening tag
-	if data[0] != '<' {
-		return 0
-	}
-	curtag, tagfound := p.htmlFindTag(data[1:])
-
-	// handle special cases
-	if !tagfound {
-		// check for an HTML comment
-		if size := p.htmlComment(data, doRender); size > 0 {
-			return size
-		}
-
-		// check for an 
 tag
-		if size := p.htmlHr(data, doRender); size > 0 {
-			return size
-		}
-
-		// no special case recognized
-		return 0
-	}
-
-	// look for an unindented matching closing tag
-	// followed by a blank line
-	found := false
-	/*
-		closetag := []byte("\n" + curtag + ">")
-		j = len(curtag) + 1
-		for !found {
-			// scan for a closing tag at the beginning of a line
-			if skip := bytes.Index(data[j:], closetag); skip >= 0 {
-				j += skip + len(closetag)
-			} else {
-				break
-			}
-
-			// see if it is the only thing on the line
-			if skip := p.isEmpty(data[j:]); skip > 0 {
-				// see if it is followed by a blank line/eof
-				j += skip
-				if j >= len(data) {
-					found = true
-					i = j
-				} else {
-					if skip := p.isEmpty(data[j:]); skip > 0 {
-						j += skip
-						found = true
-						i = j
-					}
-				}
-			}
-		}
-	*/
-
-	// if not found, try a second pass looking for indented match
-	// but not if tag is "ins" or "del" (following original Markdown.pl)
-	if !found && curtag != "ins" && curtag != "del" {
-		i = 1
-		for i < len(data) {
-			i++
-			for i < len(data) && !(data[i-1] == '<' && data[i] == '/') {
-				i++
-			}
-
-			if i+2+len(curtag) >= len(data) {
-				break
-			}
-
-			j = p.htmlFindEnd(curtag, data[i-1:])
-
-			if j > 0 {
-				i += j - 1
-				found = true
-				break
-			}
-		}
-	}
-
-	if !found {
-		return 0
-	}
-
-	// the end of the block has been found
-	if doRender {
-		// trim newlines
-		end := i
-		for end > 0 && data[end-1] == '\n' {
-			end--
-		}
-		finalizeHTMLBlock(p.addBlock(HTMLBlock, data[:end]))
-	}
-
-	return i
-}
-
-func finalizeHTMLBlock(block *Node) {
-	block.Literal = block.content
-	block.content = nil
-}
-
-// HTML comment, lax form
-func (p *Markdown) htmlComment(data []byte, doRender bool) int {
-	i := p.inlineHTMLComment(data)
-	// needs to end with a blank line
-	if j := p.isEmpty(data[i:]); j > 0 {
-		size := i + j
-		if doRender {
-			// trim trailing newlines
-			end := size
-			for end > 0 && data[end-1] == '\n' {
-				end--
-			}
-			block := p.addBlock(HTMLBlock, data[:end])
-			finalizeHTMLBlock(block)
-		}
-		return size
-	}
-	return 0
-}
-
-// HR, which is the only self-closing block tag considered
-func (p *Markdown) htmlHr(data []byte, doRender bool) int {
-	if len(data) < 4 {
-		return 0
-	}
-	if data[0] != '<' || (data[1] != 'h' && data[1] != 'H') || (data[2] != 'r' && data[2] != 'R') {
-		return 0
-	}
-	if data[3] != ' ' && data[3] != '/' && data[3] != '>' {
-		// not an 
 tag after all; at least not a valid one
-		return 0
-	}
-	i := 3
-	for i < len(data) && data[i] != '>' && data[i] != '\n' {
-		i++
-	}
-	if i < len(data) && data[i] == '>' {
-		i++
-		if j := p.isEmpty(data[i:]); j > 0 {
-			size := i + j
-			if doRender {
-				// trim newlines
-				end := size
-				for end > 0 && data[end-1] == '\n' {
-					end--
-				}
-				finalizeHTMLBlock(p.addBlock(HTMLBlock, data[:end]))
-			}
-			return size
-		}
-	}
-	return 0
-}
-
-func (p *Markdown) htmlFindTag(data []byte) (string, bool) {
-	i := 0
-	for i < len(data) && isalnum(data[i]) {
-		i++
-	}
-	key := string(data[:i])
-	if _, ok := blockTags[key]; ok {
-		return key, true
-	}
-	return "", false
-}
-
-func (p *Markdown) htmlFindEnd(tag string, data []byte) int {
-	// assume data[0] == '<' && data[1] == '/' already tested
-	if tag == "hr" {
-		return 2
-	}
-	// check if tag is a match
-	closetag := []byte("" + tag + ">")
-	if !bytes.HasPrefix(data, closetag) {
-		return 0
-	}
-	i := len(closetag)
-
-	// check that the rest of the line is blank
-	skip := 0
-	if skip = p.isEmpty(data[i:]); skip == 0 {
-		return 0
-	}
-	i += skip
-	skip = 0
-
-	if i >= len(data) {
-		return i
-	}
-
-	if p.extensions&LaxHTMLBlocks != 0 {
-		return i
-	}
-	if skip = p.isEmpty(data[i:]); skip == 0 {
-		// following line must be blank
-		return 0
-	}
-
-	return i + skip
-}
-
-func (*Markdown) isEmpty(data []byte) int {
-	// it is okay to call isEmpty on an empty buffer
-	if len(data) == 0 {
-		return 0
-	}
-
-	var i int
-	for i = 0; i < len(data) && data[i] != '\n'; i++ {
-		if data[i] != ' ' && data[i] != '\t' {
-			return 0
-		}
-	}
-	if i < len(data) && data[i] == '\n' {
-		i++
-	}
-	return i
-}
-
-func (*Markdown) isHRule(data []byte) bool {
-	i := 0
-
-	// skip up to three spaces
-	for i < 3 && data[i] == ' ' {
-		i++
-	}
-
-	// look at the hrule char
-	if data[i] != '*' && data[i] != '-' && data[i] != '_' {
-		return false
-	}
-	c := data[i]
-
-	// the whole line must be the char or whitespace
-	n := 0
-	for i < len(data) && data[i] != '\n' {
-		switch {
-		case data[i] == c:
-			n++
-		case data[i] != ' ':
-			return false
-		}
-		i++
-	}
-
-	return n >= 3
-}
-
-// isFenceLine checks if there's a fence line (e.g., ``` or ``` go) at the beginning of data,
-// and returns the end index if so, or 0 otherwise. It also returns the marker found.
-// If info is not nil, it gets set to the syntax specified in the fence line.
-func isFenceLine(data []byte, info *string, oldmarker string) (end int, marker string) {
-	i, size := 0, 0
-
-	// skip up to three spaces
-	for i < len(data) && i < 3 && data[i] == ' ' {
-		i++
-	}
-
-	// check for the marker characters: ~ or `
-	if i >= len(data) {
-		return 0, ""
-	}
-	if data[i] != '~' && data[i] != '`' {
-		return 0, ""
-	}
-
-	c := data[i]
-
-	// the whole line must be the same char or whitespace
-	for i < len(data) && data[i] == c {
-		size++
-		i++
-	}
-
-	// the marker char must occur at least 3 times
-	if size < 3 {
-		return 0, ""
-	}
-	marker = string(data[i-size : i])
-
-	// if this is the end marker, it must match the beginning marker
-	if oldmarker != "" && marker != oldmarker {
-		return 0, ""
-	}
-
-	// TODO(shurcooL): It's probably a good idea to simplify the 2 code paths here
-	// into one, always get the info string, and discard it if the caller doesn't care.
-	if info != nil {
-		infoLength := 0
-		i = skipChar(data, i, ' ')
-
-		if i >= len(data) {
-			if i == len(data) {
-				return i, marker
-			}
-			return 0, ""
-		}
-
-		infoStart := i
-
-		if data[i] == '{' {
-			i++
-			infoStart++
-
-			for i < len(data) && data[i] != '}' && data[i] != '\n' {
-				infoLength++
-				i++
-			}
-
-			if i >= len(data) || data[i] != '}' {
-				return 0, ""
-			}
-
-			// strip all whitespace at the beginning and the end
-			// of the {} block
-			for infoLength > 0 && isspace(data[infoStart]) {
-				infoStart++
-				infoLength--
-			}
-
-			for infoLength > 0 && isspace(data[infoStart+infoLength-1]) {
-				infoLength--
-			}
-			i++
-			i = skipChar(data, i, ' ')
-		} else {
-			for i < len(data) && !isverticalspace(data[i]) {
-				infoLength++
-				i++
-			}
-		}
-
-		*info = strings.TrimSpace(string(data[infoStart : infoStart+infoLength]))
-	}
-
-	if i == len(data) {
-		return i, marker
-	}
-	if i > len(data) || data[i] != '\n' {
-		return 0, ""
-	}
-	return i + 1, marker // Take newline into account.
-}
-
-// fencedCodeBlock returns the end index if data contains a fenced code block at the beginning,
-// or 0 otherwise. It writes to out if doRender is true, otherwise it has no side effects.
-// If doRender is true, a final newline is mandatory to recognize the fenced code block.
-func (p *Markdown) fencedCodeBlock(data []byte, doRender bool) int {
-	var info string
-	beg, marker := isFenceLine(data, &info, "")
-	if beg == 0 || beg >= len(data) {
-		return 0
-	}
-
-	var work bytes.Buffer
-	work.Write([]byte(info))
-	work.WriteByte('\n')
-
-	for {
-		// safe to assume beg < len(data)
-
-		// check for the end of the code block
-		fenceEnd, _ := isFenceLine(data[beg:], nil, marker)
-		if fenceEnd != 0 {
-			beg += fenceEnd
-			break
-		}
-
-		// copy the current line
-		end := skipUntilChar(data, beg, '\n') + 1
-
-		// did we reach the end of the buffer without a closing marker?
-		if end >= len(data) {
-			return 0
-		}
-
-		// verbatim copy to the working buffer
-		if doRender {
-			work.Write(data[beg:end])
-		}
-		beg = end
-	}
-
-	if doRender {
-		block := p.addBlock(CodeBlock, work.Bytes()) // TODO: get rid of temp buffer
-		block.IsFenced = true
-		finalizeCodeBlock(block)
-	}
-
-	return beg
-}
-
-func unescapeChar(str []byte) []byte {
-	if str[0] == '\\' {
-		return []byte{str[1]}
-	}
-	return []byte(html.UnescapeString(string(str)))
-}
-
-func unescapeString(str []byte) []byte {
-	if reBackslashOrAmp.Match(str) {
-		return reEntityOrEscapedChar.ReplaceAllFunc(str, unescapeChar)
-	}
-	return str
-}
-
-func finalizeCodeBlock(block *Node) {
-	if block.IsFenced {
-		newlinePos := bytes.IndexByte(block.content, '\n')
-		firstLine := block.content[:newlinePos]
-		rest := block.content[newlinePos+1:]
-		block.Info = unescapeString(bytes.Trim(firstLine, "\n"))
-		block.Literal = rest
-	} else {
-		block.Literal = block.content
-	}
-	block.content = nil
-}
-
-func (p *Markdown) table(data []byte) int {
-	table := p.addBlock(Table, nil)
-	i, columns := p.tableHeader(data)
-	if i == 0 {
-		p.tip = table.Parent
-		table.Unlink()
-		return 0
-	}
-
-	p.addBlock(TableBody, nil)
-
-	for i < len(data) {
-		pipes, rowStart := 0, i
-		for ; i < len(data) && data[i] != '\n'; i++ {
-			if data[i] == '|' {
-				pipes++
-			}
-		}
-
-		if pipes == 0 {
-			i = rowStart
-			break
-		}
-
-		// include the newline in data sent to tableRow
-		if i < len(data) && data[i] == '\n' {
-			i++
-		}
-		p.tableRow(data[rowStart:i], columns, false)
-	}
-
-	return i
-}
-
-// check if the specified position is preceded by an odd number of backslashes
-func isBackslashEscaped(data []byte, i int) bool {
-	backslashes := 0
-	for i-backslashes-1 >= 0 && data[i-backslashes-1] == '\\' {
-		backslashes++
-	}
-	return backslashes&1 == 1
-}
-
-func (p *Markdown) tableHeader(data []byte) (size int, columns []CellAlignFlags) {
-	i := 0
-	colCount := 1
-	for i = 0; i < len(data) && data[i] != '\n'; i++ {
-		if data[i] == '|' && !isBackslashEscaped(data, i) {
-			colCount++
-		}
-	}
-
-	// doesn't look like a table header
-	if colCount == 1 {
-		return
-	}
-
-	// include the newline in the data sent to tableRow
-	j := i
-	if j < len(data) && data[j] == '\n' {
-		j++
-	}
-	header := data[:j]
-
-	// column count ignores pipes at beginning or end of line
-	if data[0] == '|' {
-		colCount--
-	}
-	if i > 2 && data[i-1] == '|' && !isBackslashEscaped(data, i-1) {
-		colCount--
-	}
-
-	columns = make([]CellAlignFlags, colCount)
-
-	// move on to the header underline
-	i++
-	if i >= len(data) {
-		return
-	}
-
-	if data[i] == '|' && !isBackslashEscaped(data, i) {
-		i++
-	}
-	i = skipChar(data, i, ' ')
-
-	// each column header is of form: / *:?-+:? *|/ with # dashes + # colons >= 3
-	// and trailing | optional on last column
-	col := 0
-	for i < len(data) && data[i] != '\n' {
-		dashes := 0
-
-		if data[i] == ':' {
-			i++
-			columns[col] |= TableAlignmentLeft
-			dashes++
-		}
-		for i < len(data) && data[i] == '-' {
-			i++
-			dashes++
-		}
-		if i < len(data) && data[i] == ':' {
-			i++
-			columns[col] |= TableAlignmentRight
-			dashes++
-		}
-		for i < len(data) && data[i] == ' ' {
-			i++
-		}
-		if i == len(data) {
-			return
-		}
-		// end of column test is messy
-		switch {
-		case dashes < 3:
-			// not a valid column
-			return
-
-		case data[i] == '|' && !isBackslashEscaped(data, i):
-			// marker found, now skip past trailing whitespace
-			col++
-			i++
-			for i < len(data) && data[i] == ' ' {
-				i++
-			}
-
-			// trailing junk found after last column
-			if col >= colCount && i < len(data) && data[i] != '\n' {
-				return
-			}
-
-		case (data[i] != '|' || isBackslashEscaped(data, i)) && col+1 < colCount:
-			// something else found where marker was required
-			return
-
-		case data[i] == '\n':
-			// marker is optional for the last column
-			col++
-
-		default:
-			// trailing junk found after last column
-			return
-		}
-	}
-	if col != colCount {
-		return
-	}
-
-	p.addBlock(TableHead, nil)
-	p.tableRow(header, columns, true)
-	size = i
-	if size < len(data) && data[size] == '\n' {
-		size++
-	}
-	return
-}
-
-func (p *Markdown) tableRow(data []byte, columns []CellAlignFlags, header bool) {
-	p.addBlock(TableRow, nil)
-	i, col := 0, 0
-
-	if data[i] == '|' && !isBackslashEscaped(data, i) {
-		i++
-	}
-
-	for col = 0; col < len(columns) && i < len(data); col++ {
-		for i < len(data) && data[i] == ' ' {
-			i++
-		}
-
-		cellStart := i
-
-		for i < len(data) && (data[i] != '|' || isBackslashEscaped(data, i)) && data[i] != '\n' {
-			i++
-		}
-
-		cellEnd := i
-
-		// skip the end-of-cell marker, possibly taking us past end of buffer
-		i++
-
-		for cellEnd > cellStart && cellEnd-1 < len(data) && data[cellEnd-1] == ' ' {
-			cellEnd--
-		}
-
-		cell := p.addBlock(TableCell, data[cellStart:cellEnd])
-		cell.IsHeader = header
-		cell.Align = columns[col]
-	}
-
-	// pad it out with empty columns to get the right number
-	for ; col < len(columns); col++ {
-		cell := p.addBlock(TableCell, nil)
-		cell.IsHeader = header
-		cell.Align = columns[col]
-	}
-
-	// silently ignore rows with too many cells
-}
-
-// returns blockquote prefix length
-func (p *Markdown) quotePrefix(data []byte) int {
-	i := 0
-	for i < 3 && i < len(data) && data[i] == ' ' {
-		i++
-	}
-	if i < len(data) && data[i] == '>' {
-		if i+1 < len(data) && data[i+1] == ' ' {
-			return i + 2
-		}
-		return i + 1
-	}
-	return 0
-}
-
-// blockquote ends with at least one blank line
-// followed by something without a blockquote prefix
-func (p *Markdown) terminateBlockquote(data []byte, beg, end int) bool {
-	if p.isEmpty(data[beg:]) <= 0 {
-		return false
-	}
-	if end >= len(data) {
-		return true
-	}
-	return p.quotePrefix(data[end:]) == 0 && p.isEmpty(data[end:]) == 0
-}
-
-// parse a blockquote fragment
-func (p *Markdown) quote(data []byte) int {
-	block := p.addBlock(BlockQuote, nil)
-	var raw bytes.Buffer
-	beg, end := 0, 0
-	for beg < len(data) {
-		end = beg
-		// Step over whole lines, collecting them. While doing that, check for
-		// fenced code and if one's found, incorporate it altogether,
-		// irregardless of any contents inside it
-		for end < len(data) && data[end] != '\n' {
-			if p.extensions&FencedCode != 0 {
-				if i := p.fencedCodeBlock(data[end:], false); i > 0 {
-					// -1 to compensate for the extra end++ after the loop:
-					end += i - 1
-					break
-				}
-			}
-			end++
-		}
-		if end < len(data) && data[end] == '\n' {
-			end++
-		}
-		if pre := p.quotePrefix(data[beg:]); pre > 0 {
-			// skip the prefix
-			beg += pre
-		} else if p.terminateBlockquote(data, beg, end) {
-			break
-		}
-		// this line is part of the blockquote
-		raw.Write(data[beg:end])
-		beg = end
-	}
-	p.block(raw.Bytes())
-	p.finalize(block)
-	return end
-}
-
-// returns prefix length for block code
-func (p *Markdown) codePrefix(data []byte) int {
-	if len(data) >= 1 && data[0] == '\t' {
-		return 1
-	}
-	if len(data) >= 4 && data[0] == ' ' && data[1] == ' ' && data[2] == ' ' && data[3] == ' ' {
-		return 4
-	}
-	return 0
-}
-
-func (p *Markdown) code(data []byte) int {
-	var work bytes.Buffer
-
-	i := 0
-	for i < len(data) {
-		beg := i
-		for i < len(data) && data[i] != '\n' {
-			i++
-		}
-		if i < len(data) && data[i] == '\n' {
-			i++
-		}
-
-		blankline := p.isEmpty(data[beg:i]) > 0
-		if pre := p.codePrefix(data[beg:i]); pre > 0 {
-			beg += pre
-		} else if !blankline {
-			// non-empty, non-prefixed line breaks the pre
-			i = beg
-			break
-		}
-
-		// verbatim copy to the working buffer
-		if blankline {
-			work.WriteByte('\n')
-		} else {
-			work.Write(data[beg:i])
-		}
-	}
-
-	// trim all the \n off the end of work
-	workbytes := work.Bytes()
-	eol := len(workbytes)
-	for eol > 0 && workbytes[eol-1] == '\n' {
-		eol--
-	}
-	if eol != len(workbytes) {
-		work.Truncate(eol)
-	}
-
-	work.WriteByte('\n')
-
-	block := p.addBlock(CodeBlock, work.Bytes()) // TODO: get rid of temp buffer
-	block.IsFenced = false
-	finalizeCodeBlock(block)
-
-	return i
-}
-
-// returns unordered list item prefix
-func (p *Markdown) uliPrefix(data []byte) int {
-	i := 0
-	// start with up to 3 spaces
-	for i < len(data) && i < 3 && data[i] == ' ' {
-		i++
-	}
-	if i >= len(data)-1 {
-		return 0
-	}
-	// need one of {'*', '+', '-'} followed by a space or a tab
-	if (data[i] != '*' && data[i] != '+' && data[i] != '-') ||
-		(data[i+1] != ' ' && data[i+1] != '\t') {
-		return 0
-	}
-	return i + 2
-}
-
-// returns ordered list item prefix
-func (p *Markdown) oliPrefix(data []byte) int {
-	i := 0
-
-	// start with up to 3 spaces
-	for i < 3 && i < len(data) && data[i] == ' ' {
-		i++
-	}
-
-	// count the digits
-	start := i
-	for i < len(data) && data[i] >= '0' && data[i] <= '9' {
-		i++
-	}
-	if start == i || i >= len(data)-1 {
-		return 0
-	}
-
-	// we need >= 1 digits followed by a dot and a space or a tab
-	if data[i] != '.' || !(data[i+1] == ' ' || data[i+1] == '\t') {
-		return 0
-	}
-	return i + 2
-}
-
-// returns definition list item prefix
-func (p *Markdown) dliPrefix(data []byte) int {
-	if len(data) < 2 {
-		return 0
-	}
-	i := 0
-	// need a ':' followed by a space or a tab
-	if data[i] != ':' || !(data[i+1] == ' ' || data[i+1] == '\t') {
-		return 0
-	}
-	for i < len(data) && data[i] == ' ' {
-		i++
-	}
-	return i + 2
-}
-
-// parse ordered or unordered list block
-func (p *Markdown) list(data []byte, flags ListType) int {
-	i := 0
-	flags |= ListItemBeginningOfList
-	block := p.addBlock(List, nil)
-	block.ListFlags = flags
-	block.Tight = true
-
-	for i < len(data) {
-		skip := p.listItem(data[i:], &flags)
-		if flags&ListItemContainsBlock != 0 {
-			block.ListData.Tight = false
-		}
-		i += skip
-		if skip == 0 || flags&ListItemEndOfList != 0 {
-			break
-		}
-		flags &= ^ListItemBeginningOfList
-	}
-
-	above := block.Parent
-	finalizeList(block)
-	p.tip = above
-	return i
-}
-
-// Returns true if the list item is not the same type as its parent list
-func (p *Markdown) listTypeChanged(data []byte, flags *ListType) bool {
-	if p.dliPrefix(data) > 0 && *flags&ListTypeDefinition == 0 {
-		return true
-	} else if p.oliPrefix(data) > 0 && *flags&ListTypeOrdered == 0 {
-		return true
-	} else if p.uliPrefix(data) > 0 && (*flags&ListTypeOrdered != 0 || *flags&ListTypeDefinition != 0) {
-		return true
-	}
-	return false
-}
-
-// Returns true if block ends with a blank line, descending if needed
-// into lists and sublists.
-func endsWithBlankLine(block *Node) bool {
-	// TODO: figure this out. Always false now.
-	for block != nil {
-		//if block.lastLineBlank {
-		//return true
-		//}
-		t := block.Type
-		if t == List || t == Item {
-			block = block.LastChild
-		} else {
-			break
-		}
-	}
-	return false
-}
-
-func finalizeList(block *Node) {
-	block.open = false
-	item := block.FirstChild
-	for item != nil {
-		// check for non-final list item ending with blank line:
-		if endsWithBlankLine(item) && item.Next != nil {
-			block.ListData.Tight = false
-			break
-		}
-		// recurse into children of list item, to see if there are spaces
-		// between any of them:
-		subItem := item.FirstChild
-		for subItem != nil {
-			if endsWithBlankLine(subItem) && (item.Next != nil || subItem.Next != nil) {
-				block.ListData.Tight = false
-				break
-			}
-			subItem = subItem.Next
-		}
-		item = item.Next
-	}
-}
-
-// Parse a single list item.
-// Assumes initial prefix is already removed if this is a sublist.
-func (p *Markdown) listItem(data []byte, flags *ListType) int {
-	// keep track of the indentation of the first line
-	itemIndent := 0
-	if data[0] == '\t' {
-		itemIndent += 4
-	} else {
-		for itemIndent < 3 && data[itemIndent] == ' ' {
-			itemIndent++
-		}
-	}
-
-	var bulletChar byte = '*'
-	i := p.uliPrefix(data)
-	if i == 0 {
-		i = p.oliPrefix(data)
-	} else {
-		bulletChar = data[i-2]
-	}
-	if i == 0 {
-		i = p.dliPrefix(data)
-		// reset definition term flag
-		if i > 0 {
-			*flags &= ^ListTypeTerm
-		}
-	}
-	if i == 0 {
-		// if in definition list, set term flag and continue
-		if *flags&ListTypeDefinition != 0 {
-			*flags |= ListTypeTerm
-		} else {
-			return 0
-		}
-	}
-
-	// skip leading whitespace on first line
-	for i < len(data) && data[i] == ' ' {
-		i++
-	}
-
-	// find the end of the line
-	line := i
-	for i > 0 && i < len(data) && data[i-1] != '\n' {
-		i++
-	}
-
-	// get working buffer
-	var raw bytes.Buffer
-
-	// put the first line into the working buffer
-	raw.Write(data[line:i])
-	line = i
-
-	// process the following lines
-	containsBlankLine := false
-	sublist := 0
-	codeBlockMarker := ""
-
-gatherlines:
-	for line < len(data) {
-		i++
-
-		// find the end of this line
-		for i < len(data) && data[i-1] != '\n' {
-			i++
-		}
-
-		// if it is an empty line, guess that it is part of this item
-		// and move on to the next line
-		if p.isEmpty(data[line:i]) > 0 {
-			containsBlankLine = true
-			line = i
-			continue
-		}
-
-		// calculate the indentation
-		indent := 0
-		indentIndex := 0
-		if data[line] == '\t' {
-			indentIndex++
-			indent += 4
-		} else {
-			for indent < 4 && line+indent < i && data[line+indent] == ' ' {
-				indent++
-				indentIndex++
-			}
-		}
-
-		chunk := data[line+indentIndex : i]
-
-		if p.extensions&FencedCode != 0 {
-			// determine if in or out of codeblock
-			// if in codeblock, ignore normal list processing
-			_, marker := isFenceLine(chunk, nil, codeBlockMarker)
-			if marker != "" {
-				if codeBlockMarker == "" {
-					// start of codeblock
-					codeBlockMarker = marker
-				} else {
-					// end of codeblock.
-					codeBlockMarker = ""
-				}
-			}
-			// we are in a codeblock, write line, and continue
-			if codeBlockMarker != "" || marker != "" {
-				raw.Write(data[line+indentIndex : i])
-				line = i
-				continue gatherlines
-			}
-		}
-
-		// evaluate how this line fits in
-		switch {
-		// is this a nested list item?
-		case (p.uliPrefix(chunk) > 0 && !p.isHRule(chunk)) ||
-			p.oliPrefix(chunk) > 0 ||
-			p.dliPrefix(chunk) > 0:
-
-			// to be a nested list, it must be indented more
-			// if not, it is either a different kind of list
-			// or the next item in the same list
-			if indent <= itemIndent {
-				if p.listTypeChanged(chunk, flags) {
-					*flags |= ListItemEndOfList
-				} else if containsBlankLine {
-					*flags |= ListItemContainsBlock
-				}
-
-				break gatherlines
-			}
-
-			if containsBlankLine {
-				*flags |= ListItemContainsBlock
-			}
-
-			// is this the first item in the nested list?
-			if sublist == 0 {
-				sublist = raw.Len()
-			}
-
-		// is this a nested prefix heading?
-		case p.isPrefixHeading(chunk):
-			// if the heading is not indented, it is not nested in the list
-			// and thus ends the list
-			if containsBlankLine && indent < 4 {
-				*flags |= ListItemEndOfList
-				break gatherlines
-			}
-			*flags |= ListItemContainsBlock
-
-		// anything following an empty line is only part
-		// of this item if it is indented 4 spaces
-		// (regardless of the indentation of the beginning of the item)
-		case containsBlankLine && indent < 4:
-			if *flags&ListTypeDefinition != 0 && i < len(data)-1 {
-				// is the next item still a part of this list?
-				next := i
-				for next < len(data) && data[next] != '\n' {
-					next++
-				}
-				for next < len(data)-1 && data[next] == '\n' {
-					next++
-				}
-				if i < len(data)-1 && data[i] != ':' && data[next] != ':' {
-					*flags |= ListItemEndOfList
-				}
-			} else {
-				*flags |= ListItemEndOfList
-			}
-			break gatherlines
-
-		// a blank line means this should be parsed as a block
-		case containsBlankLine:
-			raw.WriteByte('\n')
-			*flags |= ListItemContainsBlock
-		}
-
-		// if this line was preceded by one or more blanks,
-		// re-introduce the blank into the buffer
-		if containsBlankLine {
-			containsBlankLine = false
-			raw.WriteByte('\n')
-		}
-
-		// add the line into the working buffer without prefix
-		raw.Write(data[line+indentIndex : i])
-
-		line = i
-	}
-
-	rawBytes := raw.Bytes()
-
-	block := p.addBlock(Item, nil)
-	block.ListFlags = *flags
-	block.Tight = false
-	block.BulletChar = bulletChar
-	block.Delimiter = '.' // Only '.' is possible in Markdown, but ')' will also be possible in CommonMark
-
-	// render the contents of the list item
-	if *flags&ListItemContainsBlock != 0 && *flags&ListTypeTerm == 0 {
-		// intermediate render of block item, except for definition term
-		if sublist > 0 {
-			p.block(rawBytes[:sublist])
-			p.block(rawBytes[sublist:])
-		} else {
-			p.block(rawBytes)
-		}
-	} else {
-		// intermediate render of inline item
-		if sublist > 0 {
-			child := p.addChild(Paragraph, 0)
-			child.content = rawBytes[:sublist]
-			p.block(rawBytes[sublist:])
-		} else {
-			child := p.addChild(Paragraph, 0)
-			child.content = rawBytes
-		}
-	}
-	return line
-}
-
-// render a single paragraph that has already been parsed out
-func (p *Markdown) renderParagraph(data []byte) {
-	if len(data) == 0 {
-		return
-	}
-
-	// trim leading spaces
-	beg := 0
-	for data[beg] == ' ' {
-		beg++
-	}
-
-	end := len(data)
-	// trim trailing newline
-	if data[len(data)-1] == '\n' {
-		end--
-	}
-
-	// trim trailing spaces
-	for end > beg && data[end-1] == ' ' {
-		end--
-	}
-
-	p.addBlock(Paragraph, data[beg:end])
-}
-
-func (p *Markdown) paragraph(data []byte) int {
-	// prev: index of 1st char of previous line
-	// line: index of 1st char of current line
-	// i: index of cursor/end of current line
-	var prev, line, i int
-	tabSize := TabSizeDefault
-	if p.extensions&TabSizeEight != 0 {
-		tabSize = TabSizeDouble
-	}
-	// keep going until we find something to mark the end of the paragraph
-	for i < len(data) {
-		// mark the beginning of the current line
-		prev = line
-		current := data[i:]
-		line = i
-
-		// did we find a reference or a footnote? If so, end a paragraph
-		// preceding it and report that we have consumed up to the end of that
-		// reference:
-		if refEnd := isReference(p, current, tabSize); refEnd > 0 {
-			p.renderParagraph(data[:i])
-			return i + refEnd
-		}
-
-		// did we find a blank line marking the end of the paragraph?
-		if n := p.isEmpty(current); n > 0 {
-			// did this blank line followed by a definition list item?
-			if p.extensions&DefinitionLists != 0 {
-				if i < len(data)-1 && data[i+1] == ':' {
-					return p.list(data[prev:], ListTypeDefinition)
-				}
-			}
-
-			p.renderParagraph(data[:i])
-			return i + n
-		}
-
-		// an underline under some text marks a heading, so our paragraph ended on prev line
-		if i > 0 {
-			if level := p.isUnderlinedHeading(current); level > 0 {
-				// render the paragraph
-				p.renderParagraph(data[:prev])
-
-				// ignore leading and trailing whitespace
-				eol := i - 1
-				for prev < eol && data[prev] == ' ' {
-					prev++
-				}
-				for eol > prev && data[eol-1] == ' ' {
-					eol--
-				}
-
-				id := ""
-				if p.extensions&AutoHeadingIDs != 0 {
-					id = sanitized_anchor_name.Create(string(data[prev:eol]))
-				}
-
-				block := p.addBlock(Heading, data[prev:eol])
-				block.Level = level
-				block.HeadingID = id
-
-				// find the end of the underline
-				for i < len(data) && data[i] != '\n' {
-					i++
-				}
-				return i
-			}
-		}
-
-		// if the next line starts a block of HTML, then the paragraph ends here
-		if p.extensions&LaxHTMLBlocks != 0 {
-			if data[i] == '<' && p.html(current, false) > 0 {
-				// rewind to before the HTML block
-				p.renderParagraph(data[:i])
-				return i
-			}
-		}
-
-		// if there's a prefixed heading or a horizontal rule after this, paragraph is over
-		if p.isPrefixHeading(current) || p.isHRule(current) {
-			p.renderParagraph(data[:i])
-			return i
-		}
-
-		// if there's a fenced code block, paragraph is over
-		if p.extensions&FencedCode != 0 {
-			if p.fencedCodeBlock(current, false) > 0 {
-				p.renderParagraph(data[:i])
-				return i
-			}
-		}
-
-		// if there's a definition list item, prev line is a definition term
-		if p.extensions&DefinitionLists != 0 {
-			if p.dliPrefix(current) != 0 {
-				ret := p.list(data[prev:], ListTypeDefinition)
-				return ret
-			}
-		}
-
-		// if there's a list after this, paragraph is over
-		if p.extensions&NoEmptyLineBeforeBlock != 0 {
-			if p.uliPrefix(current) != 0 ||
-				p.oliPrefix(current) != 0 ||
-				p.quotePrefix(current) != 0 ||
-				p.codePrefix(current) != 0 {
-				p.renderParagraph(data[:i])
-				return i
-			}
-		}
-
-		// otherwise, scan to the beginning of the next line
-		nl := bytes.IndexByte(data[i:], '\n')
-		if nl >= 0 {
-			i += nl + 1
-		} else {
-			i += len(data[i:])
-		}
-	}
-
-	p.renderParagraph(data[:i])
-	return i
-}
-
-func skipChar(data []byte, start int, char byte) int {
-	i := start
-	for i < len(data) && data[i] == char {
-		i++
-	}
-	return i
-}
-
-func skipUntilChar(text []byte, start int, char byte) int {
-	i := start
-	for i < len(text) && text[i] != char {
-		i++
-	}
-	return i
-}
diff --git a/vendor/github.com/russross/blackfriday/v2/doc.go b/vendor/github.com/russross/blackfriday/v2/doc.go
deleted file mode 100644
index 5b3fa9876a..0000000000
--- a/vendor/github.com/russross/blackfriday/v2/doc.go
+++ /dev/null
@@ -1,18 +0,0 @@
-// Package blackfriday is a markdown processor.
-//
-// It translates plain text with simple formatting rules into an AST, which can
-// then be further processed to HTML (provided by Blackfriday itself) or other
-// formats (provided by the community).
-//
-// The simplest way to invoke Blackfriday is to call the Run function. It will
-// take a text input and produce a text output in HTML (or other format).
-//
-// A slightly more sophisticated way to use Blackfriday is to create a Markdown
-// processor and to call Parse, which returns a syntax tree for the input
-// document. You can leverage Blackfriday's parsing for content extraction from
-// markdown documents. You can assign a custom renderer and set various options
-// to the Markdown processor.
-//
-// If you're interested in calling Blackfriday from command line, see
-// https://github.com/russross/blackfriday-tool.
-package blackfriday
diff --git a/vendor/github.com/russross/blackfriday/v2/esc.go b/vendor/github.com/russross/blackfriday/v2/esc.go
deleted file mode 100644
index 6385f27cb6..0000000000
--- a/vendor/github.com/russross/blackfriday/v2/esc.go
+++ /dev/null
@@ -1,34 +0,0 @@
-package blackfriday
-
-import (
-	"html"
-	"io"
-)
-
-var htmlEscaper = [256][]byte{
-	'&': []byte("&"),
-	'<': []byte("<"),
-	'>': []byte(">"),
-	'"': []byte("""),
-}
-
-func escapeHTML(w io.Writer, s []byte) {
-	var start, end int
-	for end < len(s) {
-		escSeq := htmlEscaper[s[end]]
-		if escSeq != nil {
-			w.Write(s[start:end])
-			w.Write(escSeq)
-			start = end + 1
-		}
-		end++
-	}
-	if start < len(s) && end <= len(s) {
-		w.Write(s[start:end])
-	}
-}
-
-func escLink(w io.Writer, text []byte) {
-	unesc := html.UnescapeString(string(text))
-	escapeHTML(w, []byte(unesc))
-}
diff --git a/vendor/github.com/russross/blackfriday/v2/go.mod b/vendor/github.com/russross/blackfriday/v2/go.mod
deleted file mode 100644
index 620b74e0ac..0000000000
--- a/vendor/github.com/russross/blackfriday/v2/go.mod
+++ /dev/null
@@ -1 +0,0 @@
-module github.com/russross/blackfriday/v2
diff --git a/vendor/github.com/russross/blackfriday/v2/html.go b/vendor/github.com/russross/blackfriday/v2/html.go
deleted file mode 100644
index 284c87184f..0000000000
--- a/vendor/github.com/russross/blackfriday/v2/html.go
+++ /dev/null
@@ -1,949 +0,0 @@
-//
-// Blackfriday Markdown Processor
-// Available at http://github.com/russross/blackfriday
-//
-// Copyright © 2011 Russ Ross .
-// Distributed under the Simplified BSD License.
-// See README.md for details.
-//
-
-//
-//
-// HTML rendering backend
-//
-//
-
-package blackfriday
-
-import (
-	"bytes"
-	"fmt"
-	"io"
-	"regexp"
-	"strings"
-)
-
-// HTMLFlags control optional behavior of HTML renderer.
-type HTMLFlags int
-
-// HTML renderer configuration options.
-const (
-	HTMLFlagsNone           HTMLFlags = 0
-	SkipHTML                HTMLFlags = 1 << iota // Skip preformatted HTML blocks
-	SkipImages                                    // Skip embedded images
-	SkipLinks                                     // Skip all links
-	Safelink                                      // Only link to trusted protocols
-	NofollowLinks                                 // Only link with rel="nofollow"
-	NoreferrerLinks                               // Only link with rel="noreferrer"
-	NoopenerLinks                                 // Only link with rel="noopener"
-	HrefTargetBlank                               // Add a blank target
-	CompletePage                                  // Generate a complete HTML page
-	UseXHTML                                      // Generate XHTML output instead of HTML
-	FootnoteReturnLinks                           // Generate a link at the end of a footnote to return to the source
-	Smartypants                                   // Enable smart punctuation substitutions
-	SmartypantsFractions                          // Enable smart fractions (with Smartypants)
-	SmartypantsDashes                             // Enable smart dashes (with Smartypants)
-	SmartypantsLatexDashes                        // Enable LaTeX-style dashes (with Smartypants)
-	SmartypantsAngledQuotes                       // Enable angled double quotes (with Smartypants) for double quotes rendering
-	SmartypantsQuotesNBSP                         // Enable « French guillemets » (with Smartypants)
-	TOC                                           // Generate a table of contents
-)
-
-var (
-	htmlTagRe = regexp.MustCompile("(?i)^" + htmlTag)
-)
-
-const (
-	htmlTag = "(?:" + openTag + "|" + closeTag + "|" + htmlComment + "|" +
-		processingInstruction + "|" + declaration + "|" + cdata + ")"
-	closeTag              = "" + tagName + "\\s*[>]"
-	openTag               = "<" + tagName + attribute + "*" + "\\s*/?>"
-	attribute             = "(?:" + "\\s+" + attributeName + attributeValueSpec + "?)"
-	attributeValue        = "(?:" + unquotedValue + "|" + singleQuotedValue + "|" + doubleQuotedValue + ")"
-	attributeValueSpec    = "(?:" + "\\s*=" + "\\s*" + attributeValue + ")"
-	attributeName         = "[a-zA-Z_:][a-zA-Z0-9:._-]*"
-	cdata                 = ""
-	declaration           = "]*>"
-	doubleQuotedValue     = "\"[^\"]*\""
-	htmlComment           = "|"
-	processingInstruction = "[<][?].*?[?][>]"
-	singleQuotedValue     = "'[^']*'"
-	tagName               = "[A-Za-z][A-Za-z0-9-]*"
-	unquotedValue         = "[^\"'=<>`\\x00-\\x20]+"
-)
-
-// HTMLRendererParameters is a collection of supplementary parameters tweaking
-// the behavior of various parts of HTML renderer.
-type HTMLRendererParameters struct {
-	// Prepend this text to each relative URL.
-	AbsolutePrefix string
-	// Add this text to each footnote anchor, to ensure uniqueness.
-	FootnoteAnchorPrefix string
-	// Show this text inside the  tag for a footnote return link, if the
-	// HTML_FOOTNOTE_RETURN_LINKS flag is enabled. If blank, the string
-	// [return] is used.
-	FootnoteReturnLinkContents string
-	// If set, add this text to the front of each Heading ID, to ensure
-	// uniqueness.
-	HeadingIDPrefix string
-	// If set, add this text to the back of each Heading ID, to ensure uniqueness.
-	HeadingIDSuffix string
-	// Increase heading levels: if the offset is 1,  becomes  etc.
-	// Negative offset is also valid.
-	// Resulting levels are clipped between 1 and 6.
-	HeadingLevelOffset int
-
-	Title string // Document title (used if CompletePage is set)
-	CSS   string // Optional CSS file URL (used if CompletePage is set)
-	Icon  string // Optional icon file URL (used if CompletePage is set)
-
-	Flags HTMLFlags // Flags allow customizing this renderer's behavior
-}
-
-// HTMLRenderer is a type that implements the Renderer interface for HTML output.
-//
-// Do not create this directly, instead use the NewHTMLRenderer function.
-type HTMLRenderer struct {
-	HTMLRendererParameters
-
-	closeTag string // how to end singleton tags: either " />" or ">"
-
-	// Track heading IDs to prevent ID collision in a single generation.
-	headingIDs map[string]int
-
-	lastOutputLen int
-	disableTags   int
-
-	sr *SPRenderer
-}
-
-const (
-	xhtmlClose = " />"
-	htmlClose  = ">"
-)
-
-// NewHTMLRenderer creates and configures an HTMLRenderer object, which
-// satisfies the Renderer interface.
-func NewHTMLRenderer(params HTMLRendererParameters) *HTMLRenderer {
-	// configure the rendering engine
-	closeTag := htmlClose
-	if params.Flags&UseXHTML != 0 {
-		closeTag = xhtmlClose
-	}
-
-	if params.FootnoteReturnLinkContents == "" {
-		params.FootnoteReturnLinkContents = `[return]`
-	}
-
-	return &HTMLRenderer{
-		HTMLRendererParameters: params,
-
-		closeTag:   closeTag,
-		headingIDs: make(map[string]int),
-
-		sr: NewSmartypantsRenderer(params.Flags),
-	}
-}
-
-func isHTMLTag(tag []byte, tagname string) bool {
-	found, _ := findHTMLTagPos(tag, tagname)
-	return found
-}
-
-// Look for a character, but ignore it when it's in any kind of quotes, it
-// might be JavaScript
-func skipUntilCharIgnoreQuotes(html []byte, start int, char byte) int {
-	inSingleQuote := false
-	inDoubleQuote := false
-	inGraveQuote := false
-	i := start
-	for i < len(html) {
-		switch {
-		case html[i] == char && !inSingleQuote && !inDoubleQuote && !inGraveQuote:
-			return i
-		case html[i] == '\'':
-			inSingleQuote = !inSingleQuote
-		case html[i] == '"':
-			inDoubleQuote = !inDoubleQuote
-		case html[i] == '`':
-			inGraveQuote = !inGraveQuote
-		}
-		i++
-	}
-	return start
-}
-
-func findHTMLTagPos(tag []byte, tagname string) (bool, int) {
-	i := 0
-	if i < len(tag) && tag[0] != '<' {
-		return false, -1
-	}
-	i++
-	i = skipSpace(tag, i)
-
-	if i < len(tag) && tag[i] == '/' {
-		i++
-	}
-
-	i = skipSpace(tag, i)
-	j := 0
-	for ; i < len(tag); i, j = i+1, j+1 {
-		if j >= len(tagname) {
-			break
-		}
-
-		if strings.ToLower(string(tag[i]))[0] != tagname[j] {
-			return false, -1
-		}
-	}
-
-	if i == len(tag) {
-		return false, -1
-	}
-
-	rightAngle := skipUntilCharIgnoreQuotes(tag, i, '>')
-	if rightAngle >= i {
-		return true, rightAngle
-	}
-
-	return false, -1
-}
-
-func skipSpace(tag []byte, i int) int {
-	for i < len(tag) && isspace(tag[i]) {
-		i++
-	}
-	return i
-}
-
-func isRelativeLink(link []byte) (yes bool) {
-	// a tag begin with '#'
-	if link[0] == '#' {
-		return true
-	}
-
-	// link begin with '/' but not '//', the second maybe a protocol relative link
-	if len(link) >= 2 && link[0] == '/' && link[1] != '/' {
-		return true
-	}
-
-	// only the root '/'
-	if len(link) == 1 && link[0] == '/' {
-		return true
-	}
-
-	// current directory : begin with "./"
-	if bytes.HasPrefix(link, []byte("./")) {
-		return true
-	}
-
-	// parent directory : begin with "../"
-	if bytes.HasPrefix(link, []byte("../")) {
-		return true
-	}
-
-	return false
-}
-
-func (r *HTMLRenderer) ensureUniqueHeadingID(id string) string {
-	for count, found := r.headingIDs[id]; found; count, found = r.headingIDs[id] {
-		tmp := fmt.Sprintf("%s-%d", id, count+1)
-
-		if _, tmpFound := r.headingIDs[tmp]; !tmpFound {
-			r.headingIDs[id] = count + 1
-			id = tmp
-		} else {
-			id = id + "-1"
-		}
-	}
-
-	if _, found := r.headingIDs[id]; !found {
-		r.headingIDs[id] = 0
-	}
-
-	return id
-}
-
-func (r *HTMLRenderer) addAbsPrefix(link []byte) []byte {
-	if r.AbsolutePrefix != "" && isRelativeLink(link) && link[0] != '.' {
-		newDest := r.AbsolutePrefix
-		if link[0] != '/' {
-			newDest += "/"
-		}
-		newDest += string(link)
-		return []byte(newDest)
-	}
-	return link
-}
-
-func appendLinkAttrs(attrs []string, flags HTMLFlags, link []byte) []string {
-	if isRelativeLink(link) {
-		return attrs
-	}
-	val := []string{}
-	if flags&NofollowLinks != 0 {
-		val = append(val, "nofollow")
-	}
-	if flags&NoreferrerLinks != 0 {
-		val = append(val, "noreferrer")
-	}
-	if flags&NoopenerLinks != 0 {
-		val = append(val, "noopener")
-	}
-	if flags&HrefTargetBlank != 0 {
-		attrs = append(attrs, "target=\"_blank\"")
-	}
-	if len(val) == 0 {
-		return attrs
-	}
-	attr := fmt.Sprintf("rel=%q", strings.Join(val, " "))
-	return append(attrs, attr)
-}
-
-func isMailto(link []byte) bool {
-	return bytes.HasPrefix(link, []byte("mailto:"))
-}
-
-func needSkipLink(flags HTMLFlags, dest []byte) bool {
-	if flags&SkipLinks != 0 {
-		return true
-	}
-	return flags&Safelink != 0 && !isSafeLink(dest) && !isMailto(dest)
-}
-
-func isSmartypantable(node *Node) bool {
-	pt := node.Parent.Type
-	return pt != Link && pt != CodeBlock && pt != Code
-}
-
-func appendLanguageAttr(attrs []string, info []byte) []string {
-	if len(info) == 0 {
-		return attrs
-	}
-	endOfLang := bytes.IndexAny(info, "\t ")
-	if endOfLang < 0 {
-		endOfLang = len(info)
-	}
-	return append(attrs, fmt.Sprintf("class=\"language-%s\"", info[:endOfLang]))
-}
-
-func (r *HTMLRenderer) tag(w io.Writer, name []byte, attrs []string) {
-	w.Write(name)
-	if len(attrs) > 0 {
-		w.Write(spaceBytes)
-		w.Write([]byte(strings.Join(attrs, " ")))
-	}
-	w.Write(gtBytes)
-	r.lastOutputLen = 1
-}
-
-func footnoteRef(prefix string, node *Node) []byte {
-	urlFrag := prefix + string(slugify(node.Destination))
-	anchor := fmt.Sprintf(`%d`, urlFrag, node.NoteID)
-	return []byte(fmt.Sprintf(``, urlFrag, anchor))
-}
-
-func footnoteItem(prefix string, slug []byte) []byte {
-	return []byte(fmt.Sprintf(`
- `, prefix, slug))
-}
-
-func footnoteReturnLink(prefix, returnLink string, slug []byte) []byte {
-	const format = ` `
-	return []byte(fmt.Sprintf(format, prefix, slug, returnLink))
-}
-
-func itemOpenCR(node *Node) bool {
-	if node.Prev == nil {
-		return false
-	}
-	ld := node.Parent.ListData
-	return !ld.Tight && ld.ListFlags&ListTypeDefinition == 0
-}
-
-func skipParagraphTags(node *Node) bool {
-	grandparent := node.Parent.Parent
-	if grandparent == nil || grandparent.Type != List {
-		return false
-	}
-	tightOrTerm := grandparent.Tight || node.Parent.ListFlags&ListTypeTerm != 0
-	return grandparent.Type == List && tightOrTerm
-}
-
-func cellAlignment(align CellAlignFlags) string {
-	switch align {
-	case TableAlignmentLeft:
-		return "left"
-	case TableAlignmentRight:
-		return "right"
-	case TableAlignmentCenter:
-		return "center"
-	default:
-		return ""
-	}
-}
-
-func (r *HTMLRenderer) out(w io.Writer, text []byte) {
-	if r.disableTags > 0 {
-		w.Write(htmlTagRe.ReplaceAll(text, []byte{}))
-	} else {
-		w.Write(text)
-	}
-	r.lastOutputLen = len(text)
-}
-
-func (r *HTMLRenderer) cr(w io.Writer) {
-	if r.lastOutputLen > 0 {
-		r.out(w, nlBytes)
-	}
-}
-
-var (
-	nlBytes    = []byte{'\n'}
-	gtBytes    = []byte{'>'}
-	spaceBytes = []byte{' '}
-)
-
-var (
-	brTag              = []byte("
")
-	brXHTMLTag         = []byte("
")
-	emTag              = []byte("")
-	emCloseTag         = []byte("")
-	strongTag          = []byte("")
-	strongCloseTag     = []byte("")
-	delTag             = []byte("")
-	delCloseTag        = []byte("")
-	ttTag              = []byte("")
-	ttCloseTag         = []byte("")
-	aTag               = []byte("")
-	preTag             = []byte("")
-	preCloseTag        = []byte("")
-	codeTag            = []byte("")
-	codeCloseTag       = []byte("")
-	pTag               = []byte("")
-	pCloseTag          = []byte("
")
-	blockquoteTag      = []byte("")
-	blockquoteCloseTag = []byte("
")
-	hrTag              = []byte("
")
-	hrXHTMLTag         = []byte("
")
-	ulTag              = []byte("")
-	ulCloseTag         = []byte("
")
-	olTag              = []byte("")
-	olCloseTag         = []byte("
")
-	dlTag              = []byte("")
-	dlCloseTag         = []byte("
")
-	liTag              = []byte("- ")
-	liCloseTag         = []byte("
 ")
-	ddTag              = []byte("- ")
-	ddCloseTag         = []byte("
 ")
-	dtTag              = []byte("- ")
-	dtCloseTag         = []byte("
 ")
-	tableTag           = []byte("")
-	tableCloseTag      = []byte("
")
-	tdTag              = []byte("")
-	thTag              = []byte(" | ")
-	theadTag           = []byte("")
-	theadCloseTag      = []byte("")
-	tbodyTag           = []byte(" | ")
-	tbodyCloseTag      = []byte("")
-	trTag              = []byte("")
-	trCloseTag         = []byte("
")
-	h1Tag              = []byte("")
-	h2Tag              = []byte("")
-	h3Tag              = []byte("")
-	h4Tag              = []byte("")
-	h5Tag              = []byte("")
-	h6Tag              = []byte("")
-
-	footnotesDivBytes      = []byte("\n\n")
-)
-
-func headingTagsFromLevel(level int) ([]byte, []byte) {
-	if level <= 1 {
-		return h1Tag, h1CloseTag
-	}
-	switch level {
-	case 2:
-		return h2Tag, h2CloseTag
-	case 3:
-		return h3Tag, h3CloseTag
-	case 4:
-		return h4Tag, h4CloseTag
-	case 5:
-		return h5Tag, h5CloseTag
-	}
-	return h6Tag, h6CloseTag
-}
-
-func (r *HTMLRenderer) outHRTag(w io.Writer) {
-	if r.Flags&UseXHTML == 0 {
-		r.out(w, hrTag)
-	} else {
-		r.out(w, hrXHTMLTag)
-	}
-}
-
-// RenderNode is a default renderer of a single node of a syntax tree. For
-// block nodes it will be called twice: first time with entering=true, second
-// time with entering=false, so that it could know when it's working on an open
-// tag and when on close. It writes the result to w.
-//
-// The return value is a way to tell the calling walker to adjust its walk
-// pattern: e.g. it can terminate the traversal by returning Terminate. Or it
-// can ask the walker to skip a subtree of this node by returning SkipChildren.
-// The typical behavior is to return GoToNext, which asks for the usual
-// traversal to the next node.
-func (r *HTMLRenderer) RenderNode(w io.Writer, node *Node, entering bool) WalkStatus {
-	attrs := []string{}
-	switch node.Type {
-	case Text:
-		if r.Flags&Smartypants != 0 {
-			var tmp bytes.Buffer
-			escapeHTML(&tmp, node.Literal)
-			r.sr.Process(w, tmp.Bytes())
-		} else {
-			if node.Parent.Type == Link {
-				escLink(w, node.Literal)
-			} else {
-				escapeHTML(w, node.Literal)
-			}
-		}
-	case Softbreak:
-		r.cr(w)
-		// TODO: make it configurable via out(renderer.softbreak)
-	case Hardbreak:
-		if r.Flags&UseXHTML == 0 {
-			r.out(w, brTag)
-		} else {
-			r.out(w, brXHTMLTag)
-		}
-		r.cr(w)
-	case Emph:
-		if entering {
-			r.out(w, emTag)
-		} else {
-			r.out(w, emCloseTag)
-		}
-	case Strong:
-		if entering {
-			r.out(w, strongTag)
-		} else {
-			r.out(w, strongCloseTag)
-		}
-	case Del:
-		if entering {
-			r.out(w, delTag)
-		} else {
-			r.out(w, delCloseTag)
-		}
-	case HTMLSpan:
-		if r.Flags&SkipHTML != 0 {
-			break
-		}
-		r.out(w, node.Literal)
-	case Link:
-		// mark it but don't link it if it is not a safe link: no smartypants
-		dest := node.LinkData.Destination
-		if needSkipLink(r.Flags, dest) {
-			if entering {
-				r.out(w, ttTag)
-			} else {
-				r.out(w, ttCloseTag)
-			}
-		} else {
-			if entering {
-				dest = r.addAbsPrefix(dest)
-				var hrefBuf bytes.Buffer
-				hrefBuf.WriteString("href=\"")
-				escLink(&hrefBuf, dest)
-				hrefBuf.WriteByte('"')
-				attrs = append(attrs, hrefBuf.String())
-				if node.NoteID != 0 {
-					r.out(w, footnoteRef(r.FootnoteAnchorPrefix, node))
-					break
-				}
-				attrs = appendLinkAttrs(attrs, r.Flags, dest)
-				if len(node.LinkData.Title) > 0 {
-					var titleBuff bytes.Buffer
-					titleBuff.WriteString("title=\"")
-					escapeHTML(&titleBuff, node.LinkData.Title)
-					titleBuff.WriteByte('"')
-					attrs = append(attrs, titleBuff.String())
-				}
-				r.tag(w, aTag, attrs)
-			} else {
-				if node.NoteID != 0 {
-					break
-				}
-				r.out(w, aCloseTag)
-			}
-		}
-	case Image:
-		if r.Flags&SkipImages != 0 {
-			return SkipChildren
-		}
-		if entering {
-			dest := node.LinkData.Destination
-			dest = r.addAbsPrefix(dest)
-			if r.disableTags == 0 {
-				//if options.safe && potentiallyUnsafe(dest) {
-				//out(w, `
`))
-			}
-		}
-	case Code:
-		r.out(w, codeTag)
-		escapeHTML(w, node.Literal)
-		r.out(w, codeCloseTag)
-	case Document:
-		break
-	case Paragraph:
-		if skipParagraphTags(node) {
-			break
-		}
-		if entering {
-			// TODO: untangle this clusterfuck about when the newlines need
-			// to be added and when not.
-			if node.Prev != nil {
-				switch node.Prev.Type {
-				case HTMLBlock, List, Paragraph, Heading, CodeBlock, BlockQuote, HorizontalRule:
-					r.cr(w)
-				}
-			}
-			if node.Parent.Type == BlockQuote && node.Prev == nil {
-				r.cr(w)
-			}
-			r.out(w, pTag)
-		} else {
-			r.out(w, pCloseTag)
-			if !(node.Parent.Type == Item && node.Next == nil) {
-				r.cr(w)
-			}
-		}
-	case BlockQuote:
-		if entering {
-			r.cr(w)
-			r.out(w, blockquoteTag)
-		} else {
-			r.out(w, blockquoteCloseTag)
-			r.cr(w)
-		}
-	case HTMLBlock:
-		if r.Flags&SkipHTML != 0 {
-			break
-		}
-		r.cr(w)
-		r.out(w, node.Literal)
-		r.cr(w)
-	case Heading:
-		headingLevel := r.HTMLRendererParameters.HeadingLevelOffset + node.Level
-		openTag, closeTag := headingTagsFromLevel(headingLevel)
-		if entering {
-			if node.IsTitleblock {
-				attrs = append(attrs, `class="title"`)
-			}
-			if node.HeadingID != "" {
-				id := r.ensureUniqueHeadingID(node.HeadingID)
-				if r.HeadingIDPrefix != "" {
-					id = r.HeadingIDPrefix + id
-				}
-				if r.HeadingIDSuffix != "" {
-					id = id + r.HeadingIDSuffix
-				}
-				attrs = append(attrs, fmt.Sprintf(`id="%s"`, id))
-			}
-			r.cr(w)
-			r.tag(w, openTag, attrs)
-		} else {
-			r.out(w, closeTag)
-			if !(node.Parent.Type == Item && node.Next == nil) {
-				r.cr(w)
-			}
-		}
-	case HorizontalRule:
-		r.cr(w)
-		r.outHRTag(w)
-		r.cr(w)
-	case List:
-		openTag := ulTag
-		closeTag := ulCloseTag
-		if node.ListFlags&ListTypeOrdered != 0 {
-			openTag = olTag
-			closeTag = olCloseTag
-		}
-		if node.ListFlags&ListTypeDefinition != 0 {
-			openTag = dlTag
-			closeTag = dlCloseTag
-		}
-		if entering {
-			if node.IsFootnotesList {
-				r.out(w, footnotesDivBytes)
-				r.outHRTag(w)
-				r.cr(w)
-			}
-			r.cr(w)
-			if node.Parent.Type == Item && node.Parent.Parent.Tight {
-				r.cr(w)
-			}
-			r.tag(w, openTag[:len(openTag)-1], attrs)
-			r.cr(w)
-		} else {
-			r.out(w, closeTag)
-			//cr(w)
-			//if node.parent.Type != Item {
-			//	cr(w)
-			//}
-			if node.Parent.Type == Item && node.Next != nil {
-				r.cr(w)
-			}
-			if node.Parent.Type == Document || node.Parent.Type == BlockQuote {
-				r.cr(w)
-			}
-			if node.IsFootnotesList {
-				r.out(w, footnotesCloseDivBytes)
-			}
-		}
-	case Item:
-		openTag := liTag
-		closeTag := liCloseTag
-		if node.ListFlags&ListTypeDefinition != 0 {
-			openTag = ddTag
-			closeTag = ddCloseTag
-		}
-		if node.ListFlags&ListTypeTerm != 0 {
-			openTag = dtTag
-			closeTag = dtCloseTag
-		}
-		if entering {
-			if itemOpenCR(node) {
-				r.cr(w)
-			}
-			if node.ListData.RefLink != nil {
-				slug := slugify(node.ListData.RefLink)
-				r.out(w, footnoteItem(r.FootnoteAnchorPrefix, slug))
-				break
-			}
-			r.out(w, openTag)
-		} else {
-			if node.ListData.RefLink != nil {
-				slug := slugify(node.ListData.RefLink)
-				if r.Flags&FootnoteReturnLinks != 0 {
-					r.out(w, footnoteReturnLink(r.FootnoteAnchorPrefix, r.FootnoteReturnLinkContents, slug))
-				}
-			}
-			r.out(w, closeTag)
-			r.cr(w)
-		}
-	case CodeBlock:
-		attrs = appendLanguageAttr(attrs, node.Info)
-		r.cr(w)
-		r.out(w, preTag)
-		r.tag(w, codeTag[:len(codeTag)-1], attrs)
-		escapeHTML(w, node.Literal)
-		r.out(w, codeCloseTag)
-		r.out(w, preCloseTag)
-		if node.Parent.Type != Item {
-			r.cr(w)
-		}
-	case Table:
-		if entering {
-			r.cr(w)
-			r.out(w, tableTag)
-		} else {
-			r.out(w, tableCloseTag)
-			r.cr(w)
-		}
-	case TableCell:
-		openTag := tdTag
-		closeTag := tdCloseTag
-		if node.IsHeader {
-			openTag = thTag
-			closeTag = thCloseTag
-		}
-		if entering {
-			align := cellAlignment(node.Align)
-			if align != "" {
-				attrs = append(attrs, fmt.Sprintf(`align="%s"`, align))
-			}
-			if node.Prev == nil {
-				r.cr(w)
-			}
-			r.tag(w, openTag, attrs)
-		} else {
-			r.out(w, closeTag)
-			r.cr(w)
-		}
-	case TableHead:
-		if entering {
-			r.cr(w)
-			r.out(w, theadTag)
-		} else {
-			r.out(w, theadCloseTag)
-			r.cr(w)
-		}
-	case TableBody:
-		if entering {
-			r.cr(w)
-			r.out(w, tbodyTag)
-			// XXX: this is to adhere to a rather silly test. Should fix test.
-			if node.FirstChild == nil {
-				r.cr(w)
-			}
-		} else {
-			r.out(w, tbodyCloseTag)
-			r.cr(w)
-		}
-	case TableRow:
-		if entering {
-			r.cr(w)
-			r.out(w, trTag)
-		} else {
-			r.out(w, trCloseTag)
-			r.cr(w)
-		}
-	default:
-		panic("Unknown node type " + node.Type.String())
-	}
-	return GoToNext
-}
-
-// RenderHeader writes HTML document preamble and TOC if requested.
-func (r *HTMLRenderer) RenderHeader(w io.Writer, ast *Node) {
-	r.writeDocumentHeader(w)
-	if r.Flags&TOC != 0 {
-		r.writeTOC(w, ast)
-	}
-}
-
-// RenderFooter writes HTML document footer.
-func (r *HTMLRenderer) RenderFooter(w io.Writer, ast *Node) {
-	if r.Flags&CompletePage == 0 {
-		return
-	}
-	io.WriteString(w, "\n