diff --git a/core/api/convert/md/md.go b/core/api/convert/md/md.go index 1c83de4a..eaad1bed 100644 --- a/core/api/convert/md/md.go +++ b/core/api/convert/md/md.go @@ -24,5 +24,5 @@ import ( // PagesHTML set to the given (*api.DocumentConversionRequest).Filedata converted by the blackfriday lib. func Convert(ctx context.Context, in interface{}) (interface{}, error) { return &api.DocumentConversionResponse{ - PagesHTML: blackfriday.MarkdownCommon(in.(*api.DocumentConversionRequest).Filedata)}, nil + PagesHTML: blackfriday.Run(in.(*api.DocumentConversionRequest).Filedata)}, nil } diff --git a/domain/section/markdown/markdown.go b/domain/section/markdown/markdown.go index bf7539da..ef45ab5b 100644 --- a/domain/section/markdown/markdown.go +++ b/domain/section/markdown/markdown.go @@ -47,7 +47,7 @@ func (*Provider) Command(ctx *provider.Context, w http.ResponseWriter, r *http.R // Render converts markdown data into HTML suitable for browser rendering. func (*Provider) Render(ctx *provider.Context, config, data string) string { - result := blackfriday.MarkdownCommon([]byte(data)) + result := blackfriday.Run([]byte(data)) return string(result) } diff --git a/vendor/github.com/documize/blackfriday/README.md b/vendor/github.com/documize/blackfriday/README.md index 52e3b25b..2e0db355 100644 --- a/vendor/github.com/documize/blackfriday/README.md +++ b/vendor/github.com/documize/blackfriday/README.md @@ -8,7 +8,7 @@ punctuation substitutions, etc.), and it is safe for all utf-8 (unicode) input. HTML output is currently supported, along with Smartypants -extensions. An experimental LaTeX output engine is also included. +extensions. It started as a translation from C of [Sundown][3]. @@ -16,63 +16,87 @@ It started as a translation from C of [Sundown][3]. Installation ------------ -Blackfriday is compatible with Go 1. If you are using an older -release of Go, consider using v1.1 of blackfriday, which was based -on the last stable release of Go prior to Go 1. You can find it as a -tagged commit on github. +Blackfriday is compatible with any modern Go release. With Go 1.7 and git +installed: -With Go 1 and git installed: - - go get github.com/russross/blackfriday + go get gopkg.in/russross/blackfriday.v2 will download, compile, and install the package into your `$GOPATH` directory hierarchy. Alternatively, you can achieve the same if you import it into a project: - import "github.com/russross/blackfriday" + import "gopkg.in/russross/blackfriday.v2" and `go get` without parameters. + +Versions +-------- + +Currently maintained and recommended version of Blackfriday is `v2`. It's being +developed on its own branch: https://github.com/russross/blackfriday/v2. You +should install and import it via [gopkg.in][6] at +`gopkg.in/russross/blackfriday.v2`. + +Version 2 offers a number of improvements over v1: + +* Cleaned up API +* A separate call to [`Parse`][4], which produces an abstract syntax tree for + the document +* Latest bug fixes +* Flexibility to easily add your own rendering extensions + +Potential drawbacks: + +* Our benchmarks show v2 to be slightly slower than v1. Currently in the + ballpark of around 15%. +* API breakage. If you can't afford modifying your code to adhere to the new API + and don't care too much about the new features, v2 is probably not for you. +* Several bug fixes are trailing behind and still need to be forward-ported to + v2. See issue [#348](https://github.com/russross/blackfriday/issues/348) for + tracking. + Usage ----- -For basic usage, it is as simple as getting your input into a byte -slice and calling: +For the most sensible markdown processing, it is as simple as getting your input +into a byte slice and calling: - output := blackfriday.MarkdownBasic(input) +```go +output := blackfriday.Run(input) +``` -This renders it with no extensions enabled. To get a more useful -feature set, use this instead: +Your input will be parsed and the output rendered with a set of most popular +extensions enabled. If you want the most basic feature set, corresponding with +the bare Markdown specification, use: - output := blackfriday.MarkdownCommon(input) +```go +output := blackfriday.Run(input, blackfriday.WithNoExtensions()) +``` ### Sanitize untrusted content Blackfriday itself does nothing to protect against malicious content. If you are -dealing with user-supplied markdown, we recommend running blackfriday's output -through HTML sanitizer such as -[Bluemonday](https://github.com/microcosm-cc/bluemonday). +dealing with user-supplied markdown, we recommend running Blackfriday's output +through HTML sanitizer such as [Bluemonday][5]. -Here's an example of simple usage of blackfriday together with bluemonday: +Here's an example of simple usage of Blackfriday together with Bluemonday: -``` go +```go import ( "github.com/microcosm-cc/bluemonday" "github.com/russross/blackfriday" ) // ... -unsafe := blackfriday.MarkdownCommon(input) +unsafe := blackfriday.Run(input) html := bluemonday.UGCPolicy().SanitizeBytes(unsafe) ``` ### Custom options -If you want to customize the set of options, first get a renderer -(currently either the HTML or LaTeX output engines), then use it to -call the more general `Markdown` function. For examples, see the -implementations of `MarkdownBasic` and `MarkdownCommon` in -`markdown.go`. +If you want to customize the set of options, use `blackfriday.WithExtensions`, +`blackfriday.WithRenderer` and `blackfriday.WithRefOverride`. You can also check out `blackfriday-tool` for a more complete example of how to use it. Download and install it using: @@ -114,7 +138,7 @@ All features of Sundown are supported, including: know and send me the input that does it. NOTE: "safety" in this context means *runtime safety only*. In order to - protect yourself agains JavaScript injection in untrusted content, see + protect yourself against JavaScript injection in untrusted content, see [this example](https://github.com/russross/blackfriday#sanitize-untrusted-content). * **Fast processing**. It is fast enough to render on-demand in @@ -160,7 +184,7 @@ implements the following extensions: and supply a language (to make syntax highlighting simple). Just mark it like this: - ``` go + ```go func getTrue() bool { return true } @@ -169,16 +193,33 @@ implements the following extensions: You can use 3 or more backticks to mark the beginning of the block, and the same number to mark the end of the block. +* **Definition lists**. A simple definition list is made of a single-line + term followed by a colon and the definition for that term. + + Cat + : Fluffy animal everyone likes + + Internet + : Vector of transmission for pictures of cats + + Terms must be separated from the previous definition by a blank line. + +* **Footnotes**. A marker in the text that will become a superscript number; + a footnote definition that will be placed in a list of footnotes at the + end of the document. A footnote looks like this: + + This is a footnote.[^1] + + [^1]: the footnote text. + * **Autolinking**. Blackfriday can find URLs that have not been explicitly marked as links and turn them into links. * **Strikethrough**. Use two tildes (`~~`) to mark text that should be crossed out. -* **Hard line breaks**. With this extension enabled (it is off by - default in the `MarkdownBasic` and `MarkdownCommon` convenience - functions), newlines in the input translate into line breaks in - the output. +* **Hard line breaks**. With this extension enabled newlines in the input + translate into line breaks in the output. This extension is off by default. * **Smart quotes**. Smartypants-style punctuation substitution is supported, turning normal double- and single-quote marks into @@ -205,7 +246,7 @@ are a few of note: * [github_flavored_markdown](https://godoc.org/github.com/shurcooL/github_flavored_markdown): provides a GitHub Flavored Markdown renderer with fenced code block - highlighting, clickable header anchor links. + highlighting, clickable heading anchor links. It's not customizable, and its goal is to produce HTML output equivalent to the [GitHub Markdown API endpoint](https://developer.github.com/v3/markdown/#render-a-markdown-document-in-raw-mode), @@ -214,15 +255,8 @@ are a few of note: * [markdownfmt](https://github.com/shurcooL/markdownfmt): like gofmt, but for markdown. -* LaTeX output: renders output as LaTeX. This is currently part of the - main Blackfriday repository, but may be split into its own project - in the future. If you are interested in owning and maintaining the - LaTeX output component, please be in touch. - - It renders some basic documents, but is only experimental at this - point. In particular, it does not do any inline escaping, so input - that happens to look like LaTeX code will be passed through without - modification. +* [LaTeX output](https://bitbucket.org/ambrevar/blackfriday-latex): + renders output as LaTeX. Todo @@ -241,6 +275,9 @@ License [Blackfriday is distributed under the Simplified BSD License](LICENSE.txt) - [1]: http://daringfireball.net/projects/markdown/ "Markdown" - [2]: http://golang.org/ "Go Language" + [1]: https://daringfireball.net/projects/markdown/ "Markdown" + [2]: https://golang.org/ "Go Language" [3]: https://github.com/vmg/sundown "Sundown" + [4]: https://godoc.org/gopkg.in/russross/blackfriday.v2#Parse "Parse func" + [5]: https://github.com/microcosm-cc/bluemonday "Bluemonday" + [6]: https://labix.org/gopkg.in "gopkg.in" diff --git a/vendor/github.com/documize/blackfriday/block.go b/vendor/github.com/documize/blackfriday/block.go index 1f300b33..d7da33f2 100644 --- a/vendor/github.com/documize/blackfriday/block.go +++ b/vendor/github.com/documize/blackfriday/block.go @@ -15,18 +15,26 @@ package blackfriday import ( "bytes" + "html" + "regexp" "github.com/shurcooL/sanitized_anchor_name" ) +const ( + charEntity = "&(?:#x[a-f0-9]{1,8}|#[0-9]{1,8}|[a-z][a-z0-9]{1,31});" + escapable = "[!\"#$%&'()*+,./:;<=>?@[\\\\\\]^_`{|}~-]" +) + +var ( + reBackslashOrAmp = regexp.MustCompile("[\\&]") + reEntityOrEscapedChar = regexp.MustCompile("(?i)\\\\" + escapable + "|" + charEntity) +) + // Parse block-level data. // Note: this function and many that it calls assume that // the input buffer ends with a newline. -func (p *parser) block(out *bytes.Buffer, data []byte) { - if len(data) == 0 || data[len(data)-1] != '\n' { - panic("block input is missing terminating newline") - } - +func (p *Markdown) block(data []byte) { // this is called recursively: enforce a maximum depth if p.nesting >= p.maxNesting { return @@ -35,14 +43,14 @@ func (p *parser) block(out *bytes.Buffer, data []byte) { // parse out one block-level construct at a time for len(data) > 0 { - // prefixed header: + // prefixed heading: // - // # Header 1 - // ## Header 2 + // # Heading 1 + // ## Heading 2 // ... - // ###### Header 6 - if p.isPrefixHeader(data) { - data = data[p.prefixHeader(out, data):] + // ###### Heading 6 + if p.isPrefixHeading(data) { + data = data[p.prefixHeading(data):] continue } @@ -52,7 +60,7 @@ func (p *parser) block(out *bytes.Buffer, data []byte) { // ... // if data[0] == '<' { - if i := p.html(out, data, true); i > 0 { + if i := p.html(data, true); i > 0 { data = data[i:] continue } @@ -63,9 +71,9 @@ func (p *parser) block(out *bytes.Buffer, data []byte) { // % stuff // % more stuff // % even more stuff - if p.flags&EXTENSION_TITLEBLOCK != 0 { + if p.extensions&Titleblock != 0 { if data[0] == '%' { - if i := p.titleBlock(out, data, true); i > 0 { + if i := p.titleBlock(data, true); i > 0 { data = data[i:] continue } @@ -87,7 +95,7 @@ func (p *parser) block(out *bytes.Buffer, data []byte) { // return b // } if p.codePrefix(data) > 0 { - data = data[p.code(out, data):] + data = data[p.code(data):] continue } @@ -101,8 +109,8 @@ func (p *parser) block(out *bytes.Buffer, data []byte) { // return n * fact(n-1) // } // ``` - if p.flags&EXTENSION_FENCED_CODE != 0 { - if i := p.fencedCode(out, data, true); i > 0 { + if p.extensions&FencedCode != 0 { + if i := p.fencedCodeBlock(data, true); i > 0 { data = data[i:] continue } @@ -116,9 +124,9 @@ func (p *parser) block(out *bytes.Buffer, data []byte) { // or // ______ if p.isHRule(data) { - p.r.HRule(out) + p.addBlock(HorizontalRule, nil) var i int - for i = 0; data[i] != '\n'; i++ { + for i = 0; i < len(data) && data[i] != '\n'; i++ { } data = data[i:] continue @@ -129,7 +137,7 @@ func (p *parser) block(out *bytes.Buffer, data []byte) { // > A big quote I found somewhere // > on the web if p.quotePrefix(data) > 0 { - data = data[p.quote(out, data):] + data = data[p.quote(data):] continue } @@ -139,8 +147,8 @@ func (p *parser) block(out *bytes.Buffer, data []byte) { // ------|-----|--------- // Bob | 31 | 555-1234 // Alice | 27 | 555-4321 - if p.flags&EXTENSION_TABLES != 0 { - if i := p.table(out, data); i > 0 { + if p.extensions&Tables != 0 { + if i := p.table(data); i > 0 { data = data[i:] continue } @@ -153,7 +161,7 @@ func (p *parser) block(out *bytes.Buffer, data []byte) { // // also works with + or - if p.uliPrefix(data) > 0 { - data = data[p.list(out, data, 0):] + data = data[p.list(data, 0):] continue } @@ -162,7 +170,7 @@ func (p *parser) block(out *bytes.Buffer, data []byte) { // 1. Item 1 // 2. Item 2 if p.oliPrefix(data) > 0 { - data = data[p.list(out, data, LIST_TYPE_ORDERED):] + data = data[p.list(data, ListTypeOrdered):] continue } @@ -174,55 +182,62 @@ func (p *parser) block(out *bytes.Buffer, data []byte) { // // Term 2 // : Definition c - if p.flags&EXTENSION_DEFINITION_LISTS != 0 { + if p.extensions&DefinitionLists != 0 { if p.dliPrefix(data) > 0 { - data = data[p.list(out, data, LIST_TYPE_DEFINITION):] + data = data[p.list(data, ListTypeDefinition):] continue } } // anything else must look like a normal paragraph - // note: this finds underlined headers, too - data = data[p.paragraph(out, data):] + // note: this finds underlined headings, too + data = data[p.paragraph(data):] } p.nesting-- } -func (p *parser) isPrefixHeader(data []byte) bool { +func (p *Markdown) addBlock(typ NodeType, content []byte) *Node { + p.closeUnmatchedBlocks() + container := p.addChild(typ, 0) + container.content = content + return container +} + +func (p *Markdown) isPrefixHeading(data []byte) bool { if data[0] != '#' { return false } - if p.flags&EXTENSION_SPACE_HEADERS != 0 { + if p.extensions&SpaceHeadings != 0 { level := 0 - for level < 6 && data[level] == '#' { + for level < 6 && level < len(data) && data[level] == '#' { level++ } - if data[level] != ' ' { + if level == len(data) || data[level] != ' ' { return false } } return true } -func (p *parser) prefixHeader(out *bytes.Buffer, data []byte) int { +func (p *Markdown) prefixHeading(data []byte) int { level := 0 - for level < 6 && data[level] == '#' { + for level < 6 && level < len(data) && data[level] == '#' { level++ } i := skipChar(data, level, ' ') end := skipUntilChar(data, i, '\n') skip := end id := "" - if p.flags&EXTENSION_HEADER_IDS != 0 { + if p.extensions&HeadingIDs != 0 { j, k := 0, 0 - // find start/end of header id + // find start/end of heading id for j = i; j < end-1 && (data[j] != '{' || data[j+1] != '#'); j++ { } for k = j + 1; k < end && data[k] != '}'; k++ { } - // extract header id iff found + // extract heading id iff found if j < end && k < end { id = string(data[j+2 : k]) end = j @@ -242,45 +257,41 @@ func (p *parser) prefixHeader(out *bytes.Buffer, data []byte) int { end-- } if end > i { - if id == "" && p.flags&EXTENSION_AUTO_HEADER_IDS != 0 { + if id == "" && p.extensions&AutoHeadingIDs != 0 { id = sanitized_anchor_name.Create(string(data[i:end])) } - work := func() bool { - p.inline(out, data[i:end]) - return true - } - p.r.Header(out, work, level, id) + block := p.addBlock(Heading, data[i:end]) + block.HeadingID = id + block.Level = level } return skip } -func (p *parser) isUnderlinedHeader(data []byte) int { - // test of level 1 header +func (p *Markdown) isUnderlinedHeading(data []byte) int { + // test of level 1 heading if data[0] == '=' { i := skipChar(data, 1, '=') i = skipChar(data, i, ' ') - if data[i] == '\n' { + if i < len(data) && data[i] == '\n' { return 1 - } else { - return 0 } + return 0 } - // test of level 2 header + // test of level 2 heading if data[0] == '-' { i := skipChar(data, 1, '-') i = skipChar(data, i, ' ') - if data[i] == '\n' { + if i < len(data) && data[i] == '\n' { return 2 - } else { - return 0 } + return 0 } return 0 } -func (p *parser) titleBlock(out *bytes.Buffer, data []byte, doRender bool) int { +func (p *Markdown) titleBlock(data []byte, doRender bool) int { if data[0] != '%' { return 0 } @@ -294,12 +305,17 @@ func (p *parser) titleBlock(out *bytes.Buffer, data []byte, doRender bool) int { } data = bytes.Join(splitData[0:i], []byte("\n")) - p.r.TitleBlock(out, data) + consumed := len(data) + data = bytes.TrimPrefix(data, []byte("% ")) + data = bytes.Replace(data, []byte("\n% "), []byte("\n"), -1) + block := p.addBlock(Heading, data) + block.Level = 1 + block.IsTitleblock = true - return len(data) + return consumed } -func (p *parser) html(out *bytes.Buffer, data []byte, doRender bool) int { +func (p *Markdown) html(data []byte, doRender bool) int { var i, j int // identify the opening tag @@ -311,12 +327,12 @@ func (p *parser) html(out *bytes.Buffer, data []byte, doRender bool) int { // handle special cases if !tagfound { // check for an HTML comment - if size := p.htmlComment(out, data, doRender); size > 0 { + if size := p.htmlComment(data, doRender); size > 0 { return size } // check for an
List
\n\nNested list
\n\n" + "List
\n\nNested list
\n\n" + "Text 2
\n", } - doTestsBlock(t, tests, EXTENSION_DEFINITION_LISTS) + doTestsBlock(t, tests, DefinitionLists) } func TestPreformattedHtml(t *testing.T) { @@ -976,7 +926,7 @@ func TestPreformattedHtmlLax(t *testing.T) { "Paragraph\n\nParagraph
\n\nAnd here?
\n", } - doTestsBlock(t, tests, EXTENSION_LAX_HTML_BLOCKS) + doTestsBlock(t, tests, LaxHTMLBlocks) } func TestFencedCodeBlock(t *testing.T) { @@ -1061,8 +1011,126 @@ func TestFencedCodeBlock(t *testing.T) { "Some text before a fenced code block\n``` oz\ncode blocks breakup paragraphs\n```\nSome text in between\n``` oz\nmultiple code blocks work okay\n```\nAnd some text after a fenced code block", "Some text before a fenced code block
\n\ncode blocks breakup paragraphs\n
\n\nSome text in between
\n\nmultiple code blocks work okay\n
\n\nAnd some text after a fenced code block
\n", + + "```\n[]:()\n```\n", + "[]:()\n
\n",
+
+ "```\n[]:()\n[]:)\n[]:(\n[]:x\n[]:testing\n[:testing\n\n[]:\nlinebreak\n[]()\n\n[]:\n[]()\n```",
+ "[]:()\n[]:)\n[]:(\n[]:x\n[]:testing\n[:testing\n\n[]:\nlinebreak\n[]()\n\n[]:\n[]()\n
\n",
}
- doTestsBlock(t, tests, EXTENSION_FENCED_CODE)
+ doTestsBlock(t, tests, FencedCode)
+}
+
+func TestFencedCodeInsideBlockquotes(t *testing.T) {
+ cat := func(s ...string) string { return strings.Join(s, "\n") }
+ var tests = []string{
+ cat("> ```go",
+ "package moo",
+ "",
+ "```",
+ ""),
+ `++`, + // ------------------------------------------- + cat("> foo", + "> ", + "> ```go", + "package moo", + "```", + "> ", + "> goo.", + ""), + `+package moo + +
++`, + // ------------------------------------------- + cat("> foo", + "> ", + "> quote", + "continues", + "```", + ""), + `foo
+ ++ +package moo +
goo.
+
++`, + // ------------------------------------------- + cat("> foo", + "> ", + "> ```go", + "package moo", + "```", + "> ", + "> goo.", + "> ", + "> ```go", + "package zoo", + "```", + "> ", + "> woo.", + ""), + `foo
+ +quote +continues +` + "```" + `
+
++`, + } + + // These 2 alternative forms of blockquoted fenced code blocks should produce same output. + forms := [2]string{ + cat("> plain quoted text", + "> ```fenced", + "code", + " with leading single space correctly preserved", + "okay", + "```", + "> rest of quoted text"), + cat("> plain quoted text", + "> ```fenced", + "> code", + "> with leading single space correctly preserved", + "> okay", + "> ```", + "> rest of quoted text"), + } + want := `foo
+ ++ +package moo +
goo.
+ ++ +package zoo +
woo.
+
++` + tests = append(tests, forms[0], want) + tests = append(tests, forms[1], want) + + doTestsBlock(t, tests, FencedCode) } func TestTable(t *testing.T) { @@ -1109,7 +1177,7 @@ func TestTable(t *testing.T) { "a|b\\|c|d\n---|---|---\nf|g\\|h|i\n", "plain quoted text
+ ++ +code + with leading single space correctly preserved +okay +
rest of quoted text
+
a | \nb|c | \nd | \n
---|---|---|
f | \ng|h | \ni | \n
List
\n\nnormal text
\n\n``` oz\n
\n\nleading spaces
\n\n```\n
\n",
}
- doTestsBlock(t, tests, EXTENSION_FENCED_CODE|EXTENSION_NO_EMPTY_LINE_BEFORE_BLOCK)
+ doTestsBlock(t, tests, FencedCode|NoEmptyLineBeforeBlock)
}
func TestTitleBlock_EXTENSION_TITLEBLOCK(t *testing.T) {
@@ -1398,10 +1466,226 @@ func TestTitleBlock_EXTENSION_TITLEBLOCK(t *testing.T) {
"Some text
\n\n\n", + + "Some text\n\n\n", + "Some text
\n\n\n", + + "Some text\n\n\n", + "Some text
\n\n\n", + } + doTestsBlock(t, tests, 0) +} + +func TestTOC(t *testing.T) { + var tests = []string{ + "# Title\n\n##Subtitle1\n\n##Subtitle2", + //"\n\ncode
foo
+ + + +`, + } + doTestsParam(t, tests, TestParams{HTMLFlags: UseXHTML | CompletePage}) +} + +func TestIsFenceLine(t *testing.T) { + tests := []struct { + data []byte + syntaxRequested bool + wantEnd int + wantMarker string + wantSyntax string + }{ + { + data: []byte("```"), + wantEnd: 3, + wantMarker: "```", + }, + { + data: []byte("```\nstuff here\n"), + wantEnd: 4, + wantMarker: "```", + }, + { + data: []byte("```\nstuff here\n"), + syntaxRequested: true, + wantEnd: 4, + wantMarker: "```", + }, + { + data: []byte("stuff here\n```\n"), + wantEnd: 0, + }, + { + data: []byte("```"), + syntaxRequested: true, + wantEnd: 3, + wantMarker: "```", + }, + { + data: []byte("``` go"), + syntaxRequested: true, + wantEnd: 6, + wantMarker: "```", + wantSyntax: "go", + }, } - doTestsBlock(t, tests, EXTENSION_TITLEBLOCK) - + for _, test := range tests { + var syntax *string + if test.syntaxRequested { + syntax = new(string) + } + end, marker := isFenceLine(test.data, syntax, "```") + if got, want := end, test.wantEnd; got != want { + t.Errorf("got end %v, want %v", got, want) + } + if got, want := marker, test.wantMarker; got != want { + t.Errorf("got marker %q, want %q", got, want) + } + if test.syntaxRequested { + if got, want := *syntax, test.wantSyntax; got != want { + t.Errorf("got syntax %q, want %q", got, want) + } + } + } } diff --git a/vendor/github.com/documize/blackfriday/doc.go b/vendor/github.com/documize/blackfriday/doc.go new file mode 100644 index 00000000..5b3fa987 --- /dev/null +++ b/vendor/github.com/documize/blackfriday/doc.go @@ -0,0 +1,18 @@ +// Package blackfriday is a markdown processor. +// +// It translates plain text with simple formatting rules into an AST, which can +// then be further processed to HTML (provided by Blackfriday itself) or other +// formats (provided by the community). +// +// The simplest way to invoke Blackfriday is to call the Run function. It will +// take a text input and produce a text output in HTML (or other format). +// +// A slightly more sophisticated way to use Blackfriday is to create a Markdown +// processor and to call Parse, which returns a syntax tree for the input +// document. You can leverage Blackfriday's parsing for content extraction from +// markdown documents. You can assign a custom renderer and set various options +// to the Markdown processor. +// +// If you're interested in calling Blackfriday from command line, see +// https://github.com/russross/blackfriday-tool. +package blackfriday diff --git a/vendor/github.com/documize/blackfriday/esc.go b/vendor/github.com/documize/blackfriday/esc.go new file mode 100644 index 00000000..6385f27c --- /dev/null +++ b/vendor/github.com/documize/blackfriday/esc.go @@ -0,0 +1,34 @@ +package blackfriday + +import ( + "html" + "io" +) + +var htmlEscaper = [256][]byte{ + '&': []byte("&"), + '<': []byte("<"), + '>': []byte(">"), + '"': []byte("""), +} + +func escapeHTML(w io.Writer, s []byte) { + var start, end int + for end < len(s) { + escSeq := htmlEscaper[s[end]] + if escSeq != nil { + w.Write(s[start:end]) + w.Write(escSeq) + start = end + 1 + } + end++ + } + if start < len(s) && end <= len(s) { + w.Write(s[start:end]) + } +} + +func escLink(w io.Writer, text []byte) { + unesc := html.UnescapeString(string(text)) + escapeHTML(w, []byte(unesc)) +} diff --git a/vendor/github.com/documize/blackfriday/esc_test.go b/vendor/github.com/documize/blackfriday/esc_test.go new file mode 100644 index 00000000..ff67d546 --- /dev/null +++ b/vendor/github.com/documize/blackfriday/esc_test.go @@ -0,0 +1,48 @@ +package blackfriday + +import ( + "bytes" + "testing" +) + +func TestEsc(t *testing.T) { + tests := []string{ + "abc", "abc", + "a&c", "a&c", + "<", "<", + "[]:<", "[]:<", + "Hello |" + processingInstruction = "[<][?].*?[?][>]" + singleQuotedValue = "'[^']*'" + tagName = "[A-Za-z][A-Za-z0-9-]*" + unquotedValue = "[^\"'=<>`\\x00-\\x20]+" +) + +// HTMLRendererParameters is a collection of supplementary parameters tweaking +// the behavior of various parts of HTML renderer. +type HTMLRendererParameters struct { // Prepend this text to each relative URL. AbsolutePrefix string // Add this text to each footnote anchor, to ensure uniqueness. @@ -64,34 +82,34 @@ type HtmlRendererParameters struct { // HTML_FOOTNOTE_RETURN_LINKS flag is enabled. If blank, the string // [return] is used. FootnoteReturnLinkContents string - // If set, add this text to the front of each Header ID, to ensure + // If set, add this text to the front of each Heading ID, to ensure // uniqueness. - HeaderIDPrefix string - // If set, add this text to the back of each Header ID, to ensure uniqueness. - HeaderIDSuffix string + HeadingIDPrefix string + // If set, add this text to the back of each Heading ID, to ensure uniqueness. + HeadingIDSuffix string + + Title string // Document title (used if CompletePage is set) + CSS string // Optional CSS file URL (used if CompletePage is set) + Icon string // Optional icon file URL (used if CompletePage is set) + + Flags HTMLFlags // Flags allow customizing this renderer's behavior } -// Html is a type that implements the Renderer interface for HTML output. +// HTMLRenderer is a type that implements the Renderer interface for HTML output. // -// Do not create this directly, instead use the HtmlRenderer function. -type Html struct { - flags int // HTML_* options +// Do not create this directly, instead use the NewHTMLRenderer function. +type HTMLRenderer struct { + HTMLRendererParameters + closeTag string // how to end singleton tags: either " />" or ">" - title string // document title - css string // optional css file url (used with HTML_COMPLETE_PAGE) - parameters HtmlRendererParameters + // Track heading IDs to prevent ID collision in a single generation. + headingIDs map[string]int - // table of contents data - tocMarker int - headerCount int - currentLevel int - toc *bytes.Buffer + lastOutputLen int + disableTags int - // Track header IDs to prevent ID collision in a single generation. - headerIDs map[string]int - - smartypants *smartypantsRenderer + sr *SPRenderer } const ( @@ -99,715 +117,31 @@ const ( htmlClose = ">" ) -// HtmlRenderer creates and configures an Html object, which +// NewHTMLRenderer creates and configures an HTMLRenderer object, which // satisfies the Renderer interface. -// -// flags is a set of HTML_* options ORed together. -// title is the title of the document, and css is a URL for the document's -// stylesheet. -// title and css are only used when HTML_COMPLETE_PAGE is selected. -func HtmlRenderer(flags int, title string, css string) Renderer { - return HtmlRendererWithParameters(flags, title, css, HtmlRendererParameters{}) -} - -func HtmlRendererWithParameters(flags int, title string, - css string, renderParameters HtmlRendererParameters) Renderer { +func NewHTMLRenderer(params HTMLRendererParameters) *HTMLRenderer { // configure the rendering engine closeTag := htmlClose - if flags&HTML_USE_XHTML != 0 { + if params.Flags&UseXHTML != 0 { closeTag = xhtmlClose } - if renderParameters.FootnoteReturnLinkContents == "" { - renderParameters.FootnoteReturnLinkContents = `[return]` + if params.FootnoteReturnLinkContents == "" { + params.FootnoteReturnLinkContents = `[return]` } - return &Html{ - flags: flags, + return &HTMLRenderer{ + HTMLRendererParameters: params, + closeTag: closeTag, - title: title, - css: css, - parameters: renderParameters, + headingIDs: make(map[string]int), - headerCount: 0, - currentLevel: 0, - toc: new(bytes.Buffer), - - headerIDs: make(map[string]int), - - smartypants: smartypants(flags), + sr: NewSmartypantsRenderer(params.Flags), } } -// Using if statements is a bit faster than a switch statement. As the compiler -// improves, this should be unnecessary this is only worthwhile because -// attrEscape is the single largest CPU user in normal use. -// Also tried using map, but that gave a ~3x slowdown. -func escapeSingleChar(char byte) (string, bool) { - if char == '"' { - return """, true - } - if char == '&' { - return "&", true - } - if char == '<' { - return "<", true - } - if char == '>' { - return ">", true - } - return "", false -} - -func attrEscape(out *bytes.Buffer, src []byte) { - org := 0 - for i, ch := range src { - if entity, ok := escapeSingleChar(ch); ok { - if i > org { - // copy all the normal characters since the last escape - out.Write(src[org:i]) - } - org = i + 1 - out.WriteString(entity) - } - } - if org < len(src) { - out.Write(src[org:]) - } -} - -func entityEscapeWithSkip(out *bytes.Buffer, src []byte, skipRanges [][]int) { - end := 0 - for _, rang := range skipRanges { - attrEscape(out, src[end:rang[0]]) - out.Write(src[rang[0]:rang[1]]) - end = rang[1] - } - attrEscape(out, src[end:]) -} - -func (options *Html) GetFlags() int { - return options.flags -} - -func (options *Html) TitleBlock(out *bytes.Buffer, text []byte) { - text = bytes.TrimPrefix(text, []byte("% ")) - text = bytes.Replace(text, []byte("\n% "), []byte("\n"), -1) - out.WriteString("")
- } else {
- out.WriteString("\">")
- }
-
- attrEscape(out, text)
- out.WriteString("
\n")
-}
-
-func (options *Html) BlockQuote(out *bytes.Buffer, text []byte) {
- doubleSpace(out)
- out.WriteString("\n")
- out.Write(text)
- out.WriteString("
\n")
-}
-
-func (options *Html) Table(out *bytes.Buffer, header []byte, body []byte, columnData []int) {
- doubleSpace(out)
- out.WriteString("\n\n")
- out.Write(header)
- out.WriteString("\n\n\n")
- out.Write(body)
- out.WriteString("\n
\n")
-}
-
-func (options *Html) TableRow(out *bytes.Buffer, text []byte) {
- doubleSpace(out)
- out.WriteString("\n")
- out.Write(text)
- out.WriteString("\n \n")
-}
-
-func (options *Html) TableHeaderCell(out *bytes.Buffer, text []byte, align int) {
- doubleSpace(out)
- switch align {
- case TABLE_ALIGNMENT_LEFT:
- out.WriteString("")
- case TABLE_ALIGNMENT_RIGHT:
- out.WriteString(" ")
- case TABLE_ALIGNMENT_CENTER:
- out.WriteString(" ")
- default:
- out.WriteString(" ")
- }
-
- out.Write(text)
- out.WriteString(" ")
-}
-
-func (options *Html) TableCell(out *bytes.Buffer, text []byte, align int) {
- doubleSpace(out)
- switch align {
- case TABLE_ALIGNMENT_LEFT:
- out.WriteString("")
- case TABLE_ALIGNMENT_RIGHT:
- out.WriteString(" ")
- case TABLE_ALIGNMENT_CENTER:
- out.WriteString(" ")
- default:
- out.WriteString(" ")
- }
-
- out.Write(text)
- out.WriteString(" ")
-}
-
-func (options *Html) Footnotes(out *bytes.Buffer, text func() bool) {
- out.WriteString("\n")
- options.HRule(out)
- options.List(out, text, LIST_TYPE_ORDERED)
- out.WriteString("\n")
-}
-
-func (options *Html) FootnoteItem(out *bytes.Buffer, name, text []byte, flags int) {
- if flags&LIST_ITEM_CONTAINS_BLOCK != 0 || flags&LIST_ITEM_BEGINNING_OF_LIST != 0 {
- doubleSpace(out)
- }
- slug := slugify(name)
- out.WriteString(``)
- out.Write(text)
- if options.flags&HTML_FOOTNOTE_RETURN_LINKS != 0 {
- out.WriteString(` `)
- out.WriteString(options.parameters.FootnoteReturnLinkContents)
- out.WriteString(``)
- }
- out.WriteString(" \n")
-}
-
-func (options *Html) List(out *bytes.Buffer, text func() bool, flags int) {
- marker := out.Len()
- doubleSpace(out)
-
- if flags&LIST_TYPE_DEFINITION != 0 {
- out.WriteString("")
- } else if flags&LIST_TYPE_ORDERED != 0 {
- out.WriteString("")
- } else {
- out.WriteString("")
- }
- if !text() {
- out.Truncate(marker)
- return
- }
- if flags&LIST_TYPE_DEFINITION != 0 {
- out.WriteString("
\n")
- } else if flags&LIST_TYPE_ORDERED != 0 {
- out.WriteString("\n")
- } else {
- out.WriteString("\n")
- }
-}
-
-func (options *Html) ListItem(out *bytes.Buffer, text []byte, flags int) {
- if (flags&LIST_ITEM_CONTAINS_BLOCK != 0 && flags&LIST_TYPE_DEFINITION == 0) ||
- flags&LIST_ITEM_BEGINNING_OF_LIST != 0 {
- doubleSpace(out)
- }
- if flags&LIST_TYPE_TERM != 0 {
- out.WriteString("")
- } else if flags&LIST_TYPE_DEFINITION != 0 {
- out.WriteString(" ")
- } else {
- out.WriteString("")
- }
- out.Write(text)
- if flags&LIST_TYPE_TERM != 0 {
- out.WriteString("\n")
- } else if flags&LIST_TYPE_DEFINITION != 0 {
- out.WriteString(" \n")
- } else {
- out.WriteString("\n")
- }
-}
-
-func (options *Html) Paragraph(out *bytes.Buffer, text func() bool) {
- marker := out.Len()
- doubleSpace(out)
-
- out.WriteString("")
- if !text() {
- out.Truncate(marker)
- return
- }
- out.WriteString("
\n")
-}
-
-func (options *Html) AutoLink(out *bytes.Buffer, link []byte, kind int) {
- skipRanges := htmlEntity.FindAllIndex(link, -1)
- if options.flags&HTML_SAFELINK != 0 && !isSafeLink(link) && kind != LINK_TYPE_EMAIL {
- // mark it but don't link it if it is not a safe link: no smartypants
- out.WriteString("")
- entityEscapeWithSkip(out, link, skipRanges)
- out.WriteString("")
- return
- }
-
- out.WriteString(" 0 {
- out.WriteString(fmt.Sprintf("\" rel=\"%s", strings.Join(relAttrs, " ")))
- }
-
- // blank target only add to external link
- if options.flags&HTML_HREF_TARGET_BLANK != 0 && !isRelativeLink(link) {
- out.WriteString("\" target=\"_blank")
- }
-
- out.WriteString("\">")
-
- // Pretty print: if we get an email address as
- // an actual URI, e.g. `mailto:foo@bar.com`, we don't
- // want to print the `mailto:` prefix
- switch {
- case bytes.HasPrefix(link, []byte("mailto://")):
- attrEscape(out, link[len("mailto://"):])
- case bytes.HasPrefix(link, []byte("mailto:")):
- attrEscape(out, link[len("mailto:"):])
- default:
- entityEscapeWithSkip(out, link, skipRanges)
- }
-
- out.WriteString("")
-}
-
-func (options *Html) CodeSpan(out *bytes.Buffer, text []byte) {
- out.WriteString("")
- attrEscape(out, text)
- out.WriteString("
")
-}
-
-func (options *Html) DoubleEmphasis(out *bytes.Buffer, text []byte) {
- out.WriteString("")
- out.Write(text)
- out.WriteString("")
-}
-
-func (options *Html) Emphasis(out *bytes.Buffer, text []byte) {
- if len(text) == 0 {
- return
- }
- out.WriteString("")
- out.Write(text)
- out.WriteString("")
-}
-
-func (options *Html) maybeWriteAbsolutePrefix(out *bytes.Buffer, link []byte) {
- if options.parameters.AbsolutePrefix != "" && isRelativeLink(link) && link[0] != '.' {
- out.WriteString(options.parameters.AbsolutePrefix)
- if link[0] != '/' {
- out.WriteByte('/')
- }
- }
-}
-
-func (options *Html) Image(out *bytes.Buffer, link []byte, title []byte, alt []byte) {
- if options.flags&HTML_SKIP_IMAGES != 0 {
- return
- }
-
- out.WriteString("
0 {
- attrEscape(out, alt)
- }
- if len(title) > 0 {
- out.WriteString("\" title=\"")
- attrEscape(out, title)
- }
-
- out.WriteByte('"')
- out.WriteString(options.closeTag)
-}
-
-func (options *Html) LineBreak(out *bytes.Buffer) {
- out.WriteString("
")
- attrEscape(out, content)
- out.WriteString("")
- return
- }
-
- if options.flags&HTML_SAFELINK != 0 && !isSafeLink(link) {
- // write the link text out but don't link it, just mark it with typewriter font
- out.WriteString("")
- attrEscape(out, content)
- out.WriteString("")
- return
- }
-
- out.WriteString(" 0 {
- out.WriteString("\" title=\"")
- attrEscape(out, title)
- }
- var relAttrs []string
- if options.flags&HTML_NOFOLLOW_LINKS != 0 && !isRelativeLink(link) {
- relAttrs = append(relAttrs, "nofollow")
- }
- if options.flags&HTML_NOREFERRER_LINKS != 0 && !isRelativeLink(link) {
- relAttrs = append(relAttrs, "noreferrer")
- }
- if len(relAttrs) > 0 {
- out.WriteString(fmt.Sprintf("\" rel=\"%s", strings.Join(relAttrs, " ")))
- }
-
- // blank target only add to external link
- if options.flags&HTML_HREF_TARGET_BLANK != 0 && !isRelativeLink(link) {
- out.WriteString("\" target=\"_blank")
- }
-
- out.WriteString("\">")
- out.Write(content)
- out.WriteString("")
- return
-}
-
-func (options *Html) RawHtmlTag(out *bytes.Buffer, text []byte) {
- if options.flags&HTML_SKIP_HTML != 0 {
- return
- }
- if options.flags&HTML_SKIP_STYLE != 0 && isHtmlTag(text, "style") {
- return
- }
- if options.flags&HTML_SKIP_LINKS != 0 && isHtmlTag(text, "a") {
- return
- }
- if options.flags&HTML_SKIP_IMAGES != 0 && isHtmlTag(text, "img") {
- return
- }
- out.Write(text)
-}
-
-func (options *Html) TripleEmphasis(out *bytes.Buffer, text []byte) {
- out.WriteString("")
- out.Write(text)
- out.WriteString("")
-}
-
-func (options *Html) StrikeThrough(out *bytes.Buffer, text []byte) {
- out.WriteString("")
- out.Write(text)
- out.WriteString("")
-}
-
-func (options *Html) FootnoteRef(out *bytes.Buffer, ref []byte, id int) {
- slug := slugify(ref)
- out.WriteString(``)
- out.WriteString(strconv.Itoa(id))
- out.WriteString(``)
-}
-
-func (options *Html) Entity(out *bytes.Buffer, entity []byte) {
- out.Write(entity)
-}
-
-func (options *Html) NormalText(out *bytes.Buffer, text []byte) {
- if options.flags&HTML_USE_SMARTYPANTS != 0 {
- options.Smartypants(out, text)
- } else {
- attrEscape(out, text)
- }
-}
-
-func (options *Html) Smartypants(out *bytes.Buffer, text []byte) {
- smrt := smartypantsData{false, false}
-
- // first do normal entity escaping
- var escaped bytes.Buffer
- attrEscape(&escaped, text)
- text = escaped.Bytes()
-
- mark := 0
- for i := 0; i < len(text); i++ {
- if action := options.smartypants[text[i]]; action != nil {
- if i > mark {
- out.Write(text[mark:i])
- }
-
- previousChar := byte(0)
- if i > 0 {
- previousChar = text[i-1]
- }
- i += action(out, &smrt, previousChar, text[i:])
- mark = i + 1
- }
- }
-
- if mark < len(text) {
- out.Write(text[mark:])
- }
-}
-
-func (options *Html) DocumentHeader(out *bytes.Buffer) {
- if options.flags&HTML_COMPLETE_PAGE == 0 {
- return
- }
-
- ending := ""
- if options.flags&HTML_USE_XHTML != 0 {
- out.WriteString("\n")
- out.WriteString("\n")
- ending = " /"
- } else {
- out.WriteString("\n")
- out.WriteString("\n")
- }
- out.WriteString("\n")
- out.WriteString(" ")
- options.NormalText(out, []byte(options.title))
- out.WriteString(" \n")
- out.WriteString(" \n")
- out.WriteString(" \n")
- if options.css != "" {
- out.WriteString(" \n")
- }
- out.WriteString("\n")
- out.WriteString("\n")
-
- options.tocMarker = out.Len()
-}
-
-func (options *Html) DocumentFooter(out *bytes.Buffer) {
- // finalize and insert the table of contents
- if options.flags&HTML_TOC != 0 {
- options.TocFinalize()
-
- // now we have to insert the table of contents into the document
- var temp bytes.Buffer
-
- // start by making a copy of everything after the document header
- temp.Write(out.Bytes()[options.tocMarker:])
-
- // now clear the copied material from the main output buffer
- out.Truncate(options.tocMarker)
-
- // corner case spacing issue
- if options.flags&HTML_COMPLETE_PAGE != 0 {
- out.WriteByte('\n')
- }
-
- // insert the table of contents
- out.WriteString("\n")
-
- // corner case spacing issue
- if options.flags&HTML_COMPLETE_PAGE == 0 && options.flags&HTML_OMIT_CONTENTS == 0 {
- out.WriteByte('\n')
- }
-
- // write out everything that came after it
- if options.flags&HTML_OMIT_CONTENTS == 0 {
- out.Write(temp.Bytes())
- }
- }
-
- if options.flags&HTML_COMPLETE_PAGE != 0 {
- out.WriteString("\n\n")
- out.WriteString("\n")
- }
-
-}
-
-func (options *Html) TocHeaderWithAnchor(text []byte, level int, anchor string) {
- for level > options.currentLevel {
- switch {
- case bytes.HasSuffix(options.toc.Bytes(), []byte("\n")):
- // this sublist can nest underneath a header
- size := options.toc.Len()
- options.toc.Truncate(size - len("\n"))
-
- case options.currentLevel > 0:
- options.toc.WriteString("")
- }
- if options.toc.Len() > 0 {
- options.toc.WriteByte('\n')
- }
- options.toc.WriteString("\n")
- options.currentLevel++
- }
-
- for level < options.currentLevel {
- options.toc.WriteString("
")
- if options.currentLevel > 1 {
- options.toc.WriteString(" \n")
- }
- options.currentLevel--
- }
-
- options.toc.WriteString("")
- options.headerCount++
-
- options.toc.Write(text)
-
- options.toc.WriteString(" \n")
-}
-
-func (options *Html) TocHeader(text []byte, level int) {
- options.TocHeaderWithAnchor(text, level, "")
-}
-
-func (options *Html) TocFinalize() {
- for options.currentLevel > 1 {
- options.toc.WriteString("\n")
- options.currentLevel--
- }
-
- if options.currentLevel > 0 {
- options.toc.WriteString("\n")
- }
-}
-
-func isHtmlTag(tag []byte, tagname string) bool {
- found, _ := findHtmlTagPos(tag, tagname)
+func isHTMLTag(tag []byte, tagname string) bool {
+ found, _ := findHTMLTagPos(tag, tagname)
return found
}
@@ -834,7 +168,7 @@ func skipUntilCharIgnoreQuotes(html []byte, start int, char byte) int {
return start
}
-func findHtmlTagPos(tag []byte, tagname string) (bool, int) {
+func findHTMLTagPos(tag []byte, tagname string) (bool, int) {
i := 0
if i < len(tag) && tag[0] != '<' {
return false, -1
@@ -863,21 +197,13 @@ func findHtmlTagPos(tag []byte, tagname string) (bool, int) {
}
rightAngle := skipUntilCharIgnoreQuotes(tag, i, '>')
- if rightAngle > i {
+ if rightAngle >= i {
return true, rightAngle
}
return false, -1
}
-func skipUntilChar(text []byte, start int, char byte) int {
- i := start
- for i < len(text) && text[i] != char {
- i++
- }
- return i
-}
-
func skipSpace(tag []byte, i int) int {
for i < len(tag) && isspace(tag[i]) {
i++
@@ -885,20 +211,6 @@ func skipSpace(tag []byte, i int) int {
return i
}
-func skipChar(data []byte, start int, char byte) int {
- i := start
- for i < len(data) && data[i] == char {
- i++
- }
- return i
-}
-
-func doubleSpace(out *bytes.Buffer) {
- if out.Len() > 0 {
- out.WriteByte('\n')
- }
-}
-
func isRelativeLink(link []byte) (yes bool) {
// a tag begin with '#'
if link[0] == '#' {
@@ -928,21 +240,701 @@ func isRelativeLink(link []byte) (yes bool) {
return false
}
-func (options *Html) ensureUniqueHeaderID(id string) string {
- for count, found := options.headerIDs[id]; found; count, found = options.headerIDs[id] {
+func (r *HTMLRenderer) ensureUniqueHeadingID(id string) string {
+ for count, found := r.headingIDs[id]; found; count, found = r.headingIDs[id] {
tmp := fmt.Sprintf("%s-%d", id, count+1)
- if _, tmpFound := options.headerIDs[tmp]; !tmpFound {
- options.headerIDs[id] = count + 1
+ if _, tmpFound := r.headingIDs[tmp]; !tmpFound {
+ r.headingIDs[id] = count + 1
id = tmp
} else {
id = id + "-1"
}
}
- if _, found := options.headerIDs[id]; !found {
- options.headerIDs[id] = 0
+ if _, found := r.headingIDs[id]; !found {
+ r.headingIDs[id] = 0
}
return id
}
+
+func (r *HTMLRenderer) addAbsPrefix(link []byte) []byte {
+ if r.AbsolutePrefix != "" && isRelativeLink(link) && link[0] != '.' {
+ newDest := r.AbsolutePrefix
+ if link[0] != '/' {
+ newDest += "/"
+ }
+ newDest += string(link)
+ return []byte(newDest)
+ }
+ return link
+}
+
+func appendLinkAttrs(attrs []string, flags HTMLFlags, link []byte) []string {
+ if isRelativeLink(link) {
+ return attrs
+ }
+ val := []string{}
+ if flags&NofollowLinks != 0 {
+ val = append(val, "nofollow")
+ }
+ if flags&NoreferrerLinks != 0 {
+ val = append(val, "noreferrer")
+ }
+ if flags&HrefTargetBlank != 0 {
+ attrs = append(attrs, "target=\"_blank\"")
+ }
+ if len(val) == 0 {
+ return attrs
+ }
+ attr := fmt.Sprintf("rel=%q", strings.Join(val, " "))
+ return append(attrs, attr)
+}
+
+func isMailto(link []byte) bool {
+ return bytes.HasPrefix(link, []byte("mailto:"))
+}
+
+func needSkipLink(flags HTMLFlags, dest []byte) bool {
+ if flags&SkipLinks != 0 {
+ return true
+ }
+ return flags&Safelink != 0 && !isSafeLink(dest) && !isMailto(dest)
+}
+
+func isSmartypantable(node *Node) bool {
+ pt := node.Parent.Type
+ return pt != Link && pt != CodeBlock && pt != Code
+}
+
+func appendLanguageAttr(attrs []string, info []byte) []string {
+ if len(info) == 0 {
+ return attrs
+ }
+ endOfLang := bytes.IndexAny(info, "\t ")
+ if endOfLang < 0 {
+ endOfLang = len(info)
+ }
+ return append(attrs, fmt.Sprintf("class=\"language-%s\"", info[:endOfLang]))
+}
+
+func (r *HTMLRenderer) tag(w io.Writer, name []byte, attrs []string) {
+ w.Write(name)
+ if len(attrs) > 0 {
+ w.Write(spaceBytes)
+ w.Write([]byte(strings.Join(attrs, " ")))
+ }
+ w.Write(gtBytes)
+ r.lastOutputLen = 1
+}
+
+func footnoteRef(prefix string, node *Node) []byte {
+ urlFrag := prefix + string(slugify(node.Destination))
+ anchor := fmt.Sprintf(`%d`, urlFrag, node.NoteID)
+ return []byte(fmt.Sprintf(`%s`, urlFrag, anchor))
+}
+
+func footnoteItem(prefix string, slug []byte) []byte {
+ return []byte(fmt.Sprintf(``, prefix, slug))
+}
+
+func footnoteReturnLink(prefix, returnLink string, slug []byte) []byte {
+ const format = ` %s`
+ return []byte(fmt.Sprintf(format, prefix, slug, returnLink))
+}
+
+func itemOpenCR(node *Node) bool {
+ if node.Prev == nil {
+ return false
+ }
+ ld := node.Parent.ListData
+ return !ld.Tight && ld.ListFlags&ListTypeDefinition == 0
+}
+
+func skipParagraphTags(node *Node) bool {
+ grandparent := node.Parent.Parent
+ if grandparent == nil || grandparent.Type != List {
+ return false
+ }
+ tightOrTerm := grandparent.Tight || node.Parent.ListFlags&ListTypeTerm != 0
+ return grandparent.Type == List && tightOrTerm
+}
+
+func cellAlignment(align CellAlignFlags) string {
+ switch align {
+ case TableAlignmentLeft:
+ return "left"
+ case TableAlignmentRight:
+ return "right"
+ case TableAlignmentCenter:
+ return "center"
+ default:
+ return ""
+ }
+}
+
+func (r *HTMLRenderer) out(w io.Writer, text []byte) {
+ if r.disableTags > 0 {
+ w.Write(htmlTagRe.ReplaceAll(text, []byte{}))
+ } else {
+ w.Write(text)
+ }
+ r.lastOutputLen = len(text)
+}
+
+func (r *HTMLRenderer) cr(w io.Writer) {
+ if r.lastOutputLen > 0 {
+ r.out(w, nlBytes)
+ }
+}
+
+var (
+ nlBytes = []byte{'\n'}
+ gtBytes = []byte{'>'}
+ spaceBytes = []byte{' '}
+)
+
+var (
+ brTag = []byte("
")
+ brXHTMLTag = []byte("
")
+ emTag = []byte("")
+ emCloseTag = []byte("")
+ strongTag = []byte("")
+ strongCloseTag = []byte("")
+ delTag = []byte("")
+ delCloseTag = []byte("")
+ ttTag = []byte("")
+ ttCloseTag = []byte("")
+ aTag = []byte("")
+ preTag = []byte("")
+ preCloseTag = []byte("
")
+ codeTag = []byte("")
+ codeCloseTag = []byte("
")
+ pTag = []byte("")
+ pCloseTag = []byte("
")
+ blockquoteTag = []byte("")
+ blockquoteCloseTag = []byte("
")
+ hrTag = []byte("
")
+ hrXHTMLTag = []byte("
")
+ ulTag = []byte("")
+ ulCloseTag = []byte("
")
+ olTag = []byte("")
+ olCloseTag = []byte("
")
+ dlTag = []byte("")
+ dlCloseTag = []byte("
")
+ liTag = []byte("")
+ liCloseTag = []byte(" ")
+ ddTag = []byte("")
+ ddCloseTag = []byte(" ")
+ dtTag = []byte("")
+ dtCloseTag = []byte(" ")
+ tableTag = []byte("")
+ tableCloseTag = []byte("
")
+ tdTag = []byte("")
+ thTag = []byte(" ")
+ theadTag = []byte("")
+ theadCloseTag = []byte("")
+ tbodyTag = []byte(" ")
+ tbodyCloseTag = []byte("")
+ trTag = []byte("")
+ trCloseTag = []byte(" ")
+ h1Tag = []byte("")
+ h2Tag = []byte("")
+ h3Tag = []byte("")
+ h4Tag = []byte("")
+ h5Tag = []byte("")
+ h6Tag = []byte("")
+
+ footnotesDivBytes = []byte("\n\n\n")
+ footnotesCloseDivBytes = []byte("\n\n")
+)
+
+func headingTagsFromLevel(level int) ([]byte, []byte) {
+ switch level {
+ case 1:
+ return h1Tag, h1CloseTag
+ case 2:
+ return h2Tag, h2CloseTag
+ case 3:
+ return h3Tag, h3CloseTag
+ case 4:
+ return h4Tag, h4CloseTag
+ case 5:
+ return h5Tag, h5CloseTag
+ default:
+ return h6Tag, h6CloseTag
+ }
+}
+
+func (r *HTMLRenderer) outHRTag(w io.Writer) {
+ if r.Flags&UseXHTML == 0 {
+ r.out(w, hrTag)
+ } else {
+ r.out(w, hrXHTMLTag)
+ }
+}
+
+// RenderNode is a default renderer of a single node of a syntax tree. For
+// block nodes it will be called twice: first time with entering=true, second
+// time with entering=false, so that it could know when it's working on an open
+// tag and when on close. It writes the result to w.
+//
+// The return value is a way to tell the calling walker to adjust its walk
+// pattern: e.g. it can terminate the traversal by returning Terminate. Or it
+// can ask the walker to skip a subtree of this node by returning SkipChildren.
+// The typical behavior is to return GoToNext, which asks for the usual
+// traversal to the next node.
+func (r *HTMLRenderer) RenderNode(w io.Writer, node *Node, entering bool) WalkStatus {
+ attrs := []string{}
+ switch node.Type {
+ case Text:
+ if r.Flags&Smartypants != 0 {
+ var tmp bytes.Buffer
+ escapeHTML(&tmp, node.Literal)
+ r.sr.Process(w, tmp.Bytes())
+ } else {
+ if node.Parent.Type == Link {
+ escLink(w, node.Literal)
+ } else {
+ escapeHTML(w, node.Literal)
+ }
+ }
+ case Softbreak:
+ r.cr(w)
+ // TODO: make it configurable via out(renderer.softbreak)
+ case Hardbreak:
+ if r.Flags&UseXHTML == 0 {
+ r.out(w, brTag)
+ } else {
+ r.out(w, brXHTMLTag)
+ }
+ r.cr(w)
+ case Emph:
+ if entering {
+ r.out(w, emTag)
+ } else {
+ r.out(w, emCloseTag)
+ }
+ case Strong:
+ if entering {
+ r.out(w, strongTag)
+ } else {
+ r.out(w, strongCloseTag)
+ }
+ case Del:
+ if entering {
+ r.out(w, delTag)
+ } else {
+ r.out(w, delCloseTag)
+ }
+ case HTMLSpan:
+ if r.Flags&SkipHTML != 0 {
+ break
+ }
+ r.out(w, node.Literal)
+ case Link:
+ // mark it but don't link it if it is not a safe link: no smartypants
+ dest := node.LinkData.Destination
+ if needSkipLink(r.Flags, dest) {
+ if entering {
+ r.out(w, ttTag)
+ } else {
+ r.out(w, ttCloseTag)
+ }
+ } else {
+ if entering {
+ dest = r.addAbsPrefix(dest)
+ var hrefBuf bytes.Buffer
+ hrefBuf.WriteString("href=\"")
+ escLink(&hrefBuf, dest)
+ hrefBuf.WriteByte('"')
+ attrs = append(attrs, hrefBuf.String())
+ if node.NoteID != 0 {
+ r.out(w, footnoteRef(r.FootnoteAnchorPrefix, node))
+ break
+ }
+ attrs = appendLinkAttrs(attrs, r.Flags, dest)
+ if len(node.LinkData.Title) > 0 {
+ var titleBuff bytes.Buffer
+ titleBuff.WriteString("title=\"")
+ escapeHTML(&titleBuff, node.LinkData.Title)
+ titleBuff.WriteByte('"')
+ attrs = append(attrs, titleBuff.String())
+ }
+ r.tag(w, aTag, attrs)
+ } else {
+ if node.NoteID != 0 {
+ break
+ }
+ r.out(w, aCloseTag)
+ }
+ }
+ case Image:
+ if r.Flags&SkipImages != 0 {
+ return SkipChildren
+ }
+ if entering {
+ dest := node.LinkData.Destination
+ dest = r.addAbsPrefix(dest)
+ if r.disableTags == 0 {
+ //if options.safe && potentiallyUnsafe(dest) {
+ //out(w, `
`))
+ }
+ }
+ case Code:
+ r.out(w, codeTag)
+ escapeHTML(w, node.Literal)
+ r.out(w, codeCloseTag)
+ case Document:
+ break
+ case Paragraph:
+ if skipParagraphTags(node) {
+ break
+ }
+ if entering {
+ // TODO: untangle this clusterfuck about when the newlines need
+ // to be added and when not.
+ if node.Prev != nil {
+ switch node.Prev.Type {
+ case HTMLBlock, List, Paragraph, Heading, CodeBlock, BlockQuote, HorizontalRule:
+ r.cr(w)
+ }
+ }
+ if node.Parent.Type == BlockQuote && node.Prev == nil {
+ r.cr(w)
+ }
+ r.out(w, pTag)
+ } else {
+ r.out(w, pCloseTag)
+ if !(node.Parent.Type == Item && node.Next == nil) {
+ r.cr(w)
+ }
+ }
+ case BlockQuote:
+ if entering {
+ r.cr(w)
+ r.out(w, blockquoteTag)
+ } else {
+ r.out(w, blockquoteCloseTag)
+ r.cr(w)
+ }
+ case HTMLBlock:
+ if r.Flags&SkipHTML != 0 {
+ break
+ }
+ r.cr(w)
+ r.out(w, node.Literal)
+ r.cr(w)
+ case Heading:
+ openTag, closeTag := headingTagsFromLevel(node.Level)
+ if entering {
+ if node.IsTitleblock {
+ attrs = append(attrs, `class="title"`)
+ }
+ if node.HeadingID != "" {
+ id := r.ensureUniqueHeadingID(node.HeadingID)
+ if r.HeadingIDPrefix != "" {
+ id = r.HeadingIDPrefix + id
+ }
+ if r.HeadingIDSuffix != "" {
+ id = id + r.HeadingIDSuffix
+ }
+ attrs = append(attrs, fmt.Sprintf(`id="%s"`, id))
+ }
+ r.cr(w)
+ r.tag(w, openTag, attrs)
+ } else {
+ r.out(w, closeTag)
+ if !(node.Parent.Type == Item && node.Next == nil) {
+ r.cr(w)
+ }
+ }
+ case HorizontalRule:
+ r.cr(w)
+ r.outHRTag(w)
+ r.cr(w)
+ case List:
+ openTag := ulTag
+ closeTag := ulCloseTag
+ if node.ListFlags&ListTypeOrdered != 0 {
+ openTag = olTag
+ closeTag = olCloseTag
+ }
+ if node.ListFlags&ListTypeDefinition != 0 {
+ openTag = dlTag
+ closeTag = dlCloseTag
+ }
+ if entering {
+ if node.IsFootnotesList {
+ r.out(w, footnotesDivBytes)
+ r.outHRTag(w)
+ r.cr(w)
+ }
+ r.cr(w)
+ if node.Parent.Type == Item && node.Parent.Parent.Tight {
+ r.cr(w)
+ }
+ r.tag(w, openTag[:len(openTag)-1], attrs)
+ r.cr(w)
+ } else {
+ r.out(w, closeTag)
+ //cr(w)
+ //if node.parent.Type != Item {
+ // cr(w)
+ //}
+ if node.Parent.Type == Item && node.Next != nil {
+ r.cr(w)
+ }
+ if node.Parent.Type == Document || node.Parent.Type == BlockQuote {
+ r.cr(w)
+ }
+ if node.IsFootnotesList {
+ r.out(w, footnotesCloseDivBytes)
+ }
+ }
+ case Item:
+ openTag := liTag
+ closeTag := liCloseTag
+ if node.ListFlags&ListTypeDefinition != 0 {
+ openTag = ddTag
+ closeTag = ddCloseTag
+ }
+ if node.ListFlags&ListTypeTerm != 0 {
+ openTag = dtTag
+ closeTag = dtCloseTag
+ }
+ if entering {
+ if itemOpenCR(node) {
+ r.cr(w)
+ }
+ if node.ListData.RefLink != nil {
+ slug := slugify(node.ListData.RefLink)
+ r.out(w, footnoteItem(r.FootnoteAnchorPrefix, slug))
+ break
+ }
+ r.out(w, openTag)
+ } else {
+ if node.ListData.RefLink != nil {
+ slug := slugify(node.ListData.RefLink)
+ if r.Flags&FootnoteReturnLinks != 0 {
+ r.out(w, footnoteReturnLink(r.FootnoteAnchorPrefix, r.FootnoteReturnLinkContents, slug))
+ }
+ }
+ r.out(w, closeTag)
+ r.cr(w)
+ }
+ case CodeBlock:
+ attrs = appendLanguageAttr(attrs, node.Info)
+ r.cr(w)
+ r.out(w, preTag)
+ r.tag(w, codeTag[:len(codeTag)-1], attrs)
+ escapeHTML(w, node.Literal)
+ r.out(w, codeCloseTag)
+ r.out(w, preCloseTag)
+ if node.Parent.Type != Item {
+ r.cr(w)
+ }
+ case Table:
+ if entering {
+ r.cr(w)
+ r.out(w, tableTag)
+ } else {
+ r.out(w, tableCloseTag)
+ r.cr(w)
+ }
+ case TableCell:
+ openTag := tdTag
+ closeTag := tdCloseTag
+ if node.IsHeader {
+ openTag = thTag
+ closeTag = thCloseTag
+ }
+ if entering {
+ align := cellAlignment(node.Align)
+ if align != "" {
+ attrs = append(attrs, fmt.Sprintf(`align="%s"`, align))
+ }
+ if node.Prev == nil {
+ r.cr(w)
+ }
+ r.tag(w, openTag, attrs)
+ } else {
+ r.out(w, closeTag)
+ r.cr(w)
+ }
+ case TableHead:
+ if entering {
+ r.cr(w)
+ r.out(w, theadTag)
+ } else {
+ r.out(w, theadCloseTag)
+ r.cr(w)
+ }
+ case TableBody:
+ if entering {
+ r.cr(w)
+ r.out(w, tbodyTag)
+ // XXX: this is to adhere to a rather silly test. Should fix test.
+ if node.FirstChild == nil {
+ r.cr(w)
+ }
+ } else {
+ r.out(w, tbodyCloseTag)
+ r.cr(w)
+ }
+ case TableRow:
+ if entering {
+ r.cr(w)
+ r.out(w, trTag)
+ } else {
+ r.out(w, trCloseTag)
+ r.cr(w)
+ }
+ default:
+ panic("Unknown node type " + node.Type.String())
+ }
+ return GoToNext
+}
+
+// RenderHeader writes HTML document preamble and TOC if requested.
+func (r *HTMLRenderer) RenderHeader(w io.Writer, ast *Node) {
+ r.writeDocumentHeader(w)
+ if r.Flags&TOC != 0 {
+ r.writeTOC(w, ast)
+ }
+}
+
+// RenderFooter writes HTML document footer.
+func (r *HTMLRenderer) RenderFooter(w io.Writer, ast *Node) {
+ if r.Flags&CompletePage == 0 {
+ return
+ }
+ io.WriteString(w, "\n\n