github.com/prometheus/procfs v0.0.4 // indirect
github.com/quasoft/websspi v1.0.0
github.com/remyoudompheng/bigfft v0.0.0-20190321074620-2f0d2b0e0001 // indirect
- github.com/russross/blackfriday/v2 v2.0.1
github.com/satori/go.uuid v1.2.0
github.com/sergi/go-diff v1.0.0
github.com/shurcooL/httpfs v0.0.0-20190527155220-6a4d4a70508b // indirect
- github.com/shurcooL/sanitized_anchor_name v1.0.0 // indirect
github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd
github.com/steveyen/gtreap v0.0.0-20150807155958-0abe01ef9be2 // indirect
github.com/stretchr/testify v1.4.0
github.com/unknwon/paginater v0.0.0-20151104151617-7748a72e0141
github.com/urfave/cli v1.20.0
github.com/yohcop/openid-go v0.0.0-20160914080427-2c050d2dae53
+ github.com/yuin/goldmark v1.1.19
go.etcd.io/bbolt v1.3.3 // indirect
golang.org/x/crypto v0.0.0-20191227163750-53104e6ec876
golang.org/x/net v0.0.0-20191101175033-0deb6923b6d9
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo=
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
-github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q=
-github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww=
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ=
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
github.com/shurcooL/httpfs v0.0.0-20190527155220-6a4d4a70508b h1:4kg1wyftSKxLtnPAvcRWakIPpokB9w780/KwrNLnfPA=
github.com/shurcooL/httpfs v0.0.0-20190527155220-6a4d4a70508b/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg=
-github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=
-github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd h1:ug7PpSOB5RBPK1Kg6qskGBoP3Vnj/aNYFTznWvlkGo0=
github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw=
github.com/siddontang/go v0.0.0-20180604090527-bdc77568d726/go.mod h1:3yhqj7WBBfRhbBlzyOC3gUxftwsU0u8gqevxwIHQpMw=
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
github.com/yohcop/openid-go v0.0.0-20160914080427-2c050d2dae53 h1:HsIQ6yAjfjQ3IxPGrTusxp6Qxn92gNVq2x5CbvQvx3w=
github.com/yohcop/openid-go v0.0.0-20160914080427-2c050d2dae53/go.mod h1:f6elajwZV+xceiaqgRL090YzLEDGSbqr3poGL3ZgXYo=
+github.com/yuin/goldmark v1.1.19 h1:0s2/60x0XsFCXHeFut+F3azDVAAyIMyUfJRbRexiTYs=
+github.com/yuin/goldmark v1.1.19/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/ziutek/mymysql v1.5.4 h1:GB0qdRGsTwQSBVYuVShFBKaXSnSnYYC2d9knnE1LHFs=
github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0=
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
--- /dev/null
+// Copyright 2019 Yusuke Inuzuka
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// Use of this source code is governed by a MIT-style
+// license that can be found in the LICENSE file.
+
+// Most of what follows is a subtly changed version of github.com/yuin/goldmark/extension/footnote.go
+
+package common
+
+import (
+ "bytes"
+ "fmt"
+ "os"
+ "strconv"
+ "unicode"
+
+ "github.com/yuin/goldmark"
+ "github.com/yuin/goldmark/ast"
+ "github.com/yuin/goldmark/parser"
+ "github.com/yuin/goldmark/renderer"
+ "github.com/yuin/goldmark/renderer/html"
+ "github.com/yuin/goldmark/text"
+ "github.com/yuin/goldmark/util"
+)
+
+// CleanValue will clean a value to make it safe to be an id
+// This function is quite different from the original goldmark function
+// and more closely matches the output from the shurcooL sanitizer
+// In particular Unicode letters and numbers are a lot more than a-zA-Z0-9...
+func CleanValue(value []byte) []byte {
+ value = bytes.TrimSpace(value)
+ rs := bytes.Runes(value)
+ result := make([]rune, 0, len(rs))
+ needsDash := false
+ for _, r := range rs {
+ switch {
+ case unicode.IsLetter(r) || unicode.IsNumber(r):
+ if needsDash && len(result) > 0 {
+ result = append(result, '-')
+ }
+ needsDash = false
+ result = append(result, unicode.ToLower(r))
+ default:
+ needsDash = true
+ }
+ }
+ return []byte(string(result))
+}
+
+// Most of what follows is a subtly changed version of github.com/yuin/goldmark/extension/footnote.go
+
+// A FootnoteLink struct represents a link to a footnote of Markdown
+// (PHP Markdown Extra) text.
+type FootnoteLink struct {
+ ast.BaseInline
+ Index int
+ Name []byte
+}
+
+// Dump implements Node.Dump.
+func (n *FootnoteLink) Dump(source []byte, level int) {
+ m := map[string]string{}
+ m["Index"] = fmt.Sprintf("%v", n.Index)
+ m["Name"] = fmt.Sprintf("%v", n.Name)
+ ast.DumpHelper(n, source, level, m, nil)
+}
+
+// KindFootnoteLink is a NodeKind of the FootnoteLink node.
+var KindFootnoteLink = ast.NewNodeKind("GiteaFootnoteLink")
+
+// Kind implements Node.Kind.
+func (n *FootnoteLink) Kind() ast.NodeKind {
+ return KindFootnoteLink
+}
+
+// NewFootnoteLink returns a new FootnoteLink node.
+func NewFootnoteLink(index int, name []byte) *FootnoteLink {
+ return &FootnoteLink{
+ Index: index,
+ Name: name,
+ }
+}
+
+// A FootnoteBackLink struct represents a link to a footnote of Markdown
+// (PHP Markdown Extra) text.
+type FootnoteBackLink struct {
+ ast.BaseInline
+ Index int
+ Name []byte
+}
+
+// Dump implements Node.Dump.
+func (n *FootnoteBackLink) Dump(source []byte, level int) {
+ m := map[string]string{}
+ m["Index"] = fmt.Sprintf("%v", n.Index)
+ m["Name"] = fmt.Sprintf("%v", n.Name)
+ ast.DumpHelper(n, source, level, m, nil)
+}
+
+// KindFootnoteBackLink is a NodeKind of the FootnoteBackLink node.
+var KindFootnoteBackLink = ast.NewNodeKind("GiteaFootnoteBackLink")
+
+// Kind implements Node.Kind.
+func (n *FootnoteBackLink) Kind() ast.NodeKind {
+ return KindFootnoteBackLink
+}
+
+// NewFootnoteBackLink returns a new FootnoteBackLink node.
+func NewFootnoteBackLink(index int, name []byte) *FootnoteBackLink {
+ return &FootnoteBackLink{
+ Index: index,
+ Name: name,
+ }
+}
+
+// A Footnote struct represents a footnote of Markdown
+// (PHP Markdown Extra) text.
+type Footnote struct {
+ ast.BaseBlock
+ Ref []byte
+ Index int
+ Name []byte
+}
+
+// Dump implements Node.Dump.
+func (n *Footnote) Dump(source []byte, level int) {
+ m := map[string]string{}
+ m["Index"] = fmt.Sprintf("%v", n.Index)
+ m["Ref"] = fmt.Sprintf("%s", n.Ref)
+ m["Name"] = fmt.Sprintf("%v", n.Name)
+ ast.DumpHelper(n, source, level, m, nil)
+}
+
+// KindFootnote is a NodeKind of the Footnote node.
+var KindFootnote = ast.NewNodeKind("GiteaFootnote")
+
+// Kind implements Node.Kind.
+func (n *Footnote) Kind() ast.NodeKind {
+ return KindFootnote
+}
+
+// NewFootnote returns a new Footnote node.
+func NewFootnote(ref []byte) *Footnote {
+ return &Footnote{
+ Ref: ref,
+ Index: -1,
+ Name: ref,
+ }
+}
+
+// A FootnoteList struct represents footnotes of Markdown
+// (PHP Markdown Extra) text.
+type FootnoteList struct {
+ ast.BaseBlock
+ Count int
+}
+
+// Dump implements Node.Dump.
+func (n *FootnoteList) Dump(source []byte, level int) {
+ m := map[string]string{}
+ m["Count"] = fmt.Sprintf("%v", n.Count)
+ ast.DumpHelper(n, source, level, m, nil)
+}
+
+// KindFootnoteList is a NodeKind of the FootnoteList node.
+var KindFootnoteList = ast.NewNodeKind("GiteaFootnoteList")
+
+// Kind implements Node.Kind.
+func (n *FootnoteList) Kind() ast.NodeKind {
+ return KindFootnoteList
+}
+
+// NewFootnoteList returns a new FootnoteList node.
+func NewFootnoteList() *FootnoteList {
+ return &FootnoteList{
+ Count: 0,
+ }
+}
+
+var footnoteListKey = parser.NewContextKey()
+
+type footnoteBlockParser struct {
+}
+
+var defaultFootnoteBlockParser = &footnoteBlockParser{}
+
+// NewFootnoteBlockParser returns a new parser.BlockParser that can parse
+// footnotes of the Markdown(PHP Markdown Extra) text.
+func NewFootnoteBlockParser() parser.BlockParser {
+ return defaultFootnoteBlockParser
+}
+
+func (b *footnoteBlockParser) Trigger() []byte {
+ return []byte{'['}
+}
+
+func (b *footnoteBlockParser) Open(parent ast.Node, reader text.Reader, pc parser.Context) (ast.Node, parser.State) {
+ line, segment := reader.PeekLine()
+ pos := pc.BlockOffset()
+ if pos < 0 || line[pos] != '[' {
+ return nil, parser.NoChildren
+ }
+ pos++
+ if pos > len(line)-1 || line[pos] != '^' {
+ return nil, parser.NoChildren
+ }
+ open := pos + 1
+ closes := 0
+ closure := util.FindClosure(line[pos+1:], '[', ']', false, false)
+ closes = pos + 1 + closure
+ next := closes + 1
+ if closure > -1 {
+ if next >= len(line) || line[next] != ':' {
+ return nil, parser.NoChildren
+ }
+ } else {
+ return nil, parser.NoChildren
+ }
+ padding := segment.Padding
+ label := reader.Value(text.NewSegment(segment.Start+open-padding, segment.Start+closes-padding))
+ if util.IsBlank(label) {
+ return nil, parser.NoChildren
+ }
+ item := NewFootnote(label)
+
+ pos = next + 1 - padding
+ if pos >= len(line) {
+ reader.Advance(pos)
+ return item, parser.NoChildren
+ }
+ reader.AdvanceAndSetPadding(pos, padding)
+ return item, parser.HasChildren
+}
+
+func (b *footnoteBlockParser) Continue(node ast.Node, reader text.Reader, pc parser.Context) parser.State {
+ line, _ := reader.PeekLine()
+ if util.IsBlank(line) {
+ return parser.Continue | parser.HasChildren
+ }
+ childpos, padding := util.IndentPosition(line, reader.LineOffset(), 4)
+ if childpos < 0 {
+ return parser.Close
+ }
+ reader.AdvanceAndSetPadding(childpos, padding)
+ return parser.Continue | parser.HasChildren
+}
+
+func (b *footnoteBlockParser) Close(node ast.Node, reader text.Reader, pc parser.Context) {
+ var list *FootnoteList
+ if tlist := pc.Get(footnoteListKey); tlist != nil {
+ list = tlist.(*FootnoteList)
+ } else {
+ list = NewFootnoteList()
+ pc.Set(footnoteListKey, list)
+ node.Parent().InsertBefore(node.Parent(), node, list)
+ }
+ node.Parent().RemoveChild(node.Parent(), node)
+ list.AppendChild(list, node)
+}
+
+func (b *footnoteBlockParser) CanInterruptParagraph() bool {
+ return true
+}
+
+func (b *footnoteBlockParser) CanAcceptIndentedLine() bool {
+ return false
+}
+
+type footnoteParser struct {
+}
+
+var defaultFootnoteParser = &footnoteParser{}
+
+// NewFootnoteParser returns a new parser.InlineParser that can parse
+// footnote links of the Markdown(PHP Markdown Extra) text.
+func NewFootnoteParser() parser.InlineParser {
+ return defaultFootnoteParser
+}
+
+func (s *footnoteParser) Trigger() []byte {
+ // footnote syntax probably conflict with the image syntax.
+ // So we need trigger this parser with '!'.
+ return []byte{'!', '['}
+}
+
+func (s *footnoteParser) Parse(parent ast.Node, block text.Reader, pc parser.Context) ast.Node {
+ line, segment := block.PeekLine()
+ pos := 1
+ if len(line) > 0 && line[0] == '!' {
+ pos++
+ }
+ if pos >= len(line) || line[pos] != '^' {
+ return nil
+ }
+ pos++
+ if pos >= len(line) {
+ return nil
+ }
+ open := pos
+ closure := util.FindClosure(line[pos:], '[', ']', false, false)
+ if closure < 0 {
+ return nil
+ }
+ closes := pos + closure
+ value := block.Value(text.NewSegment(segment.Start+open, segment.Start+closes))
+ block.Advance(closes + 1)
+
+ var list *FootnoteList
+ if tlist := pc.Get(footnoteListKey); tlist != nil {
+ list = tlist.(*FootnoteList)
+ }
+ if list == nil {
+ return nil
+ }
+ index := 0
+ name := []byte{}
+ for def := list.FirstChild(); def != nil; def = def.NextSibling() {
+ d := def.(*Footnote)
+ if bytes.Equal(d.Ref, value) {
+ if d.Index < 0 {
+ list.Count++
+ d.Index = list.Count
+ val := CleanValue(d.Name)
+ if len(val) == 0 {
+ val = []byte(strconv.Itoa(d.Index))
+ }
+ d.Name = pc.IDs().Generate(val, KindFootnote)
+ }
+ index = d.Index
+ name = d.Name
+ break
+ }
+ }
+ if index == 0 {
+ return nil
+ }
+
+ return NewFootnoteLink(index, name)
+}
+
+type footnoteASTTransformer struct {
+}
+
+var defaultFootnoteASTTransformer = &footnoteASTTransformer{}
+
+// NewFootnoteASTTransformer returns a new parser.ASTTransformer that
+// insert a footnote list to the last of the document.
+func NewFootnoteASTTransformer() parser.ASTTransformer {
+ return defaultFootnoteASTTransformer
+}
+
+func (a *footnoteASTTransformer) Transform(node *ast.Document, reader text.Reader, pc parser.Context) {
+ var list *FootnoteList
+ if tlist := pc.Get(footnoteListKey); tlist != nil {
+ list = tlist.(*FootnoteList)
+ } else {
+ return
+ }
+ pc.Set(footnoteListKey, nil)
+ for footnote := list.FirstChild(); footnote != nil; {
+ var container ast.Node = footnote
+ next := footnote.NextSibling()
+ if fc := container.LastChild(); fc != nil && ast.IsParagraph(fc) {
+ container = fc
+ }
+ footnoteNode := footnote.(*Footnote)
+ index := footnoteNode.Index
+ name := footnoteNode.Name
+ if index < 0 {
+ list.RemoveChild(list, footnote)
+ } else {
+ container.AppendChild(container, NewFootnoteBackLink(index, name))
+ }
+ footnote = next
+ }
+ list.SortChildren(func(n1, n2 ast.Node) int {
+ if n1.(*Footnote).Index < n2.(*Footnote).Index {
+ return -1
+ }
+ return 1
+ })
+ if list.Count <= 0 {
+ list.Parent().RemoveChild(list.Parent(), list)
+ return
+ }
+
+ node.AppendChild(node, list)
+}
+
+// FootnoteHTMLRenderer is a renderer.NodeRenderer implementation that
+// renders FootnoteLink nodes.
+type FootnoteHTMLRenderer struct {
+ html.Config
+}
+
+// NewFootnoteHTMLRenderer returns a new FootnoteHTMLRenderer.
+func NewFootnoteHTMLRenderer(opts ...html.Option) renderer.NodeRenderer {
+ r := &FootnoteHTMLRenderer{
+ Config: html.NewConfig(),
+ }
+ for _, opt := range opts {
+ opt.SetHTMLOption(&r.Config)
+ }
+ return r
+}
+
+// RegisterFuncs implements renderer.NodeRenderer.RegisterFuncs.
+func (r *FootnoteHTMLRenderer) RegisterFuncs(reg renderer.NodeRendererFuncRegisterer) {
+ reg.Register(KindFootnoteLink, r.renderFootnoteLink)
+ reg.Register(KindFootnoteBackLink, r.renderFootnoteBackLink)
+ reg.Register(KindFootnote, r.renderFootnote)
+ reg.Register(KindFootnoteList, r.renderFootnoteList)
+}
+
+func (r *FootnoteHTMLRenderer) renderFootnoteLink(w util.BufWriter, source []byte, node ast.Node, entering bool) (ast.WalkStatus, error) {
+ if entering {
+ n := node.(*FootnoteLink)
+ n.Dump(source, 0)
+ is := strconv.Itoa(n.Index)
+ _, _ = w.WriteString(`<sup id="fnref:`)
+ _, _ = w.Write(n.Name)
+ _, _ = w.WriteString(`"><a href="#fn:`)
+ _, _ = w.Write(n.Name)
+ _, _ = w.WriteString(`" class="footnote-ref" role="doc-noteref">`)
+ _, _ = w.WriteString(is)
+ _, _ = w.WriteString(`</a></sup>`)
+ }
+ return ast.WalkContinue, nil
+}
+
+func (r *FootnoteHTMLRenderer) renderFootnoteBackLink(w util.BufWriter, source []byte, node ast.Node, entering bool) (ast.WalkStatus, error) {
+ if entering {
+ n := node.(*FootnoteBackLink)
+ fmt.Fprintf(os.Stdout, "source:\n%s\n", string(n.Text(source)))
+ _, _ = w.WriteString(` <a href="#fnref:`)
+ _, _ = w.Write(n.Name)
+ _, _ = w.WriteString(`" class="footnote-backref" role="doc-backlink">`)
+ _, _ = w.WriteString("↩︎")
+ _, _ = w.WriteString(`</a>`)
+ }
+ return ast.WalkContinue, nil
+}
+
+func (r *FootnoteHTMLRenderer) renderFootnote(w util.BufWriter, source []byte, node ast.Node, entering bool) (ast.WalkStatus, error) {
+ n := node.(*Footnote)
+ if entering {
+ fmt.Fprintf(os.Stdout, "source:\n%s\n", string(n.Text(source)))
+ _, _ = w.WriteString(`<li id="fn:`)
+ _, _ = w.Write(n.Name)
+ _, _ = w.WriteString(`" role="doc-endnote"`)
+ if node.Attributes() != nil {
+ html.RenderAttributes(w, node, html.ListItemAttributeFilter)
+ }
+ _, _ = w.WriteString(">\n")
+ } else {
+ _, _ = w.WriteString("</li>\n")
+ }
+ return ast.WalkContinue, nil
+}
+
+func (r *FootnoteHTMLRenderer) renderFootnoteList(w util.BufWriter, source []byte, node ast.Node, entering bool) (ast.WalkStatus, error) {
+ tag := "div"
+ if entering {
+ _, _ = w.WriteString("<")
+ _, _ = w.WriteString(tag)
+ _, _ = w.WriteString(` class="footnotes" role="doc-endnotes"`)
+ if node.Attributes() != nil {
+ html.RenderAttributes(w, node, html.GlobalAttributeFilter)
+ }
+ _ = w.WriteByte('>')
+ if r.Config.XHTML {
+ _, _ = w.WriteString("\n<hr />\n")
+ } else {
+ _, _ = w.WriteString("\n<hr>\n")
+ }
+ _, _ = w.WriteString("<ol>\n")
+ } else {
+ _, _ = w.WriteString("</ol>\n")
+ _, _ = w.WriteString("</")
+ _, _ = w.WriteString(tag)
+ _, _ = w.WriteString(">\n")
+ }
+ return ast.WalkContinue, nil
+}
+
+type footnoteExtension struct{}
+
+// FootnoteExtension represents the Gitea Footnote
+var FootnoteExtension = &footnoteExtension{}
+
+// Extend extends the markdown converter with the Gitea Footnote parser
+func (e *footnoteExtension) Extend(m goldmark.Markdown) {
+ m.Parser().AddOptions(
+ parser.WithBlockParsers(
+ util.Prioritized(NewFootnoteBlockParser(), 999),
+ ),
+ parser.WithInlineParsers(
+ util.Prioritized(NewFootnoteParser(), 101),
+ ),
+ parser.WithASTTransformers(
+ util.Prioritized(NewFootnoteASTTransformer(), 999),
+ ),
+ )
+ m.Renderer().AddOptions(renderer.WithNodeRenderers(
+ util.Prioritized(NewFootnoteHTMLRenderer(), 500),
+ ))
+}
--- /dev/null
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// Use of this source code is governed by a MIT-style
+// license that can be found in the LICENSE file.
+
+package common
+
+import (
+ "mvdan.cc/xurls/v2"
+)
+
+var (
+ // NOTE: All below regex matching do not perform any extra validation.
+ // Thus a link is produced even if the linked entity does not exist.
+ // While fast, this is also incorrect and lead to false positives.
+ // TODO: fix invalid linking issue
+
+ // LinkRegex is a regexp matching a valid link
+ LinkRegex, _ = xurls.StrictMatchingScheme("https?://")
+)
--- /dev/null
+// Copyright 2019 Yusuke Inuzuka
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// Use of this source code is governed by a MIT-style
+// license that can be found in the LICENSE file.
+
+// Most of this file is a subtly changed version of github.com/yuin/goldmark/extension/linkify.go
+
+package common
+
+import (
+ "bytes"
+ "regexp"
+
+ "github.com/yuin/goldmark"
+ "github.com/yuin/goldmark/ast"
+ "github.com/yuin/goldmark/parser"
+ "github.com/yuin/goldmark/text"
+ "github.com/yuin/goldmark/util"
+)
+
+var wwwURLRegxp = regexp.MustCompile(`^www\.[-a-zA-Z0-9@:%._\+~#=]{2,256}\.[a-z]{2,6}((?:/|[#?])[-a-zA-Z0-9@:%_\+.~#!?&//=\(\);,'">\^{}\[\]` + "`" + `]*)?`)
+
+type linkifyParser struct {
+}
+
+var defaultLinkifyParser = &linkifyParser{}
+
+// NewLinkifyParser return a new InlineParser can parse
+// text that seems like a URL.
+func NewLinkifyParser() parser.InlineParser {
+ return defaultLinkifyParser
+}
+
+func (s *linkifyParser) Trigger() []byte {
+ // ' ' indicates any white spaces and a line head
+ return []byte{' ', '*', '_', '~', '('}
+}
+
+var protoHTTP = []byte("http:")
+var protoHTTPS = []byte("https:")
+var protoFTP = []byte("ftp:")
+var domainWWW = []byte("www.")
+
+func (s *linkifyParser) Parse(parent ast.Node, block text.Reader, pc parser.Context) ast.Node {
+ if pc.IsInLinkLabel() {
+ return nil
+ }
+ line, segment := block.PeekLine()
+ consumes := 0
+ start := segment.Start
+ c := line[0]
+ // advance if current position is not a line head.
+ if c == ' ' || c == '*' || c == '_' || c == '~' || c == '(' {
+ consumes++
+ start++
+ line = line[1:]
+ }
+
+ var m []int
+ var protocol []byte
+ var typ ast.AutoLinkType = ast.AutoLinkURL
+ if bytes.HasPrefix(line, protoHTTP) || bytes.HasPrefix(line, protoHTTPS) || bytes.HasPrefix(line, protoFTP) {
+ m = LinkRegex.FindSubmatchIndex(line)
+ }
+ if m == nil && bytes.HasPrefix(line, domainWWW) {
+ m = wwwURLRegxp.FindSubmatchIndex(line)
+ protocol = []byte("http")
+ }
+ if m != nil {
+ lastChar := line[m[1]-1]
+ if lastChar == '.' {
+ m[1]--
+ } else if lastChar == ')' {
+ closing := 0
+ for i := m[1] - 1; i >= m[0]; i-- {
+ if line[i] == ')' {
+ closing++
+ } else if line[i] == '(' {
+ closing--
+ }
+ }
+ if closing > 0 {
+ m[1] -= closing
+ }
+ } else if lastChar == ';' {
+ i := m[1] - 2
+ for ; i >= m[0]; i-- {
+ if util.IsAlphaNumeric(line[i]) {
+ continue
+ }
+ break
+ }
+ if i != m[1]-2 {
+ if line[i] == '&' {
+ m[1] -= m[1] - i
+ }
+ }
+ }
+ }
+ if m == nil {
+ if len(line) > 0 && util.IsPunct(line[0]) {
+ return nil
+ }
+ typ = ast.AutoLinkEmail
+ stop := util.FindEmailIndex(line)
+ if stop < 0 {
+ return nil
+ }
+ at := bytes.IndexByte(line, '@')
+ m = []int{0, stop, at, stop - 1}
+ if m == nil || bytes.IndexByte(line[m[2]:m[3]], '.') < 0 {
+ return nil
+ }
+ lastChar := line[m[1]-1]
+ if lastChar == '.' {
+ m[1]--
+ }
+ if m[1] < len(line) {
+ nextChar := line[m[1]]
+ if nextChar == '-' || nextChar == '_' {
+ return nil
+ }
+ }
+ }
+ if m == nil {
+ return nil
+ }
+ if consumes != 0 {
+ s := segment.WithStop(segment.Start + 1)
+ ast.MergeOrAppendTextSegment(parent, s)
+ }
+ consumes += m[1]
+ block.Advance(consumes)
+ n := ast.NewTextSegment(text.NewSegment(start, start+m[1]))
+ link := ast.NewAutoLink(typ, n)
+ link.Protocol = protocol
+ return link
+}
+
+func (s *linkifyParser) CloseBlock(parent ast.Node, pc parser.Context) {
+ // nothing to do
+}
+
+type linkify struct {
+}
+
+// Linkify is an extension that allow you to parse text that seems like a URL.
+var Linkify = &linkify{}
+
+func (e *linkify) Extend(m goldmark.Markdown) {
+ m.Parser().AddOptions(
+ parser.WithInlineParsers(
+ util.Prioritized(NewLinkifyParser(), 999),
+ ),
+ )
+}
"code.gitea.io/gitea/modules/base"
"code.gitea.io/gitea/modules/git"
"code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/markup/common"
"code.gitea.io/gitea/modules/references"
"code.gitea.io/gitea/modules/setting"
"code.gitea.io/gitea/modules/util"
// https://html.spec.whatwg.org/multipage/input.html#e-mail-state-(type%3Demail)
emailRegex = regexp.MustCompile("(?:\\s|^|\\(|\\[)([a-zA-Z0-9.!#$%&'*+\\/=?^_`{|}~-]+@[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\\.[a-zA-Z0-9]{2,}(?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)+)(?:\\s|$|\\)|\\]|\\.(\\s|$))")
- linkRegex, _ = xurls.StrictMatchingScheme("https?://")
-
// blackfriday extensions create IDs like fn:user-content-footnote
blackfridayExtRegex = regexp.MustCompile(`[^:]*:user-content-`)
)
}
withAuth = append(withAuth, s)
}
- linkRegex, _ = xurls.StrictMatchingScheme(strings.Join(withAuth, "|"))
+ common.LinkRegex, _ = xurls.StrictMatchingScheme(strings.Join(withAuth, "|"))
}
// IsSameDomain checks if given url string has the same hostname as current Gitea instance
(strings.HasPrefix(val, "‘") && strings.HasSuffix(val, "’")) {
const lenQuote = len("‘")
val = val[lenQuote : len(val)-lenQuote]
+ } else if (strings.HasPrefix(val, "\"") && strings.HasSuffix(val, "\"")) ||
+ (strings.HasPrefix(val, "'") && strings.HasSuffix(val, "'")) {
+ val = val[1 : len(val)-1]
+ } else if strings.HasPrefix(val, "'") && strings.HasSuffix(val, "’") {
+ const lenQuote = len("‘")
+ val = val[1 : len(val)-lenQuote]
}
props[key] = val
}
// linkProcessor creates links for any HTTP or HTTPS URL not captured by
// markdown.
func linkProcessor(ctx *postProcessCtx, node *html.Node) {
- m := linkRegex.FindStringIndex(node.Data)
+ m := common.LinkRegex.FindStringIndex(node.Data)
if m == nil {
return
}
// descriptionLinkProcessor creates links for DescriptionHTML
func descriptionLinkProcessor(ctx *postProcessCtx, node *html.Node) {
- m := linkRegex.FindStringIndex(node.Data)
+ m := common.LinkRegex.FindStringIndex(node.Data)
if m == nil {
return
}
`<p><a href="`+notencodedImgurlWiki+`" rel="nofollow"><img src="`+notencodedImgurlWiki+`"/></a></p>`)
test(
"<p><a href=\"https://example.org\">[[foobar]]</a></p>",
- `<p></p><p><a href="https://example.org" rel="nofollow">[[foobar]]</a></p><p></p>`,
- `<p></p><p><a href="https://example.org" rel="nofollow">[[foobar]]</a></p><p></p>`)
+ `<p><a href="https://example.org" rel="nofollow">[[foobar]]</a></p>`,
+ `<p><a href="https://example.org" rel="nofollow">[[foobar]]</a></p>`)
}
--- /dev/null
+// Copyright 2019 The Gitea Authors. All rights reserved.
+// Use of this source code is governed by a MIT-style
+// license that can be found in the LICENSE file.
+
+package markdown
+
+import (
+ "bytes"
+ "fmt"
+ "strings"
+
+ "code.gitea.io/gitea/modules/markup"
+ "code.gitea.io/gitea/modules/markup/common"
+ giteautil "code.gitea.io/gitea/modules/util"
+
+ "github.com/yuin/goldmark/ast"
+ east "github.com/yuin/goldmark/extension/ast"
+ "github.com/yuin/goldmark/parser"
+ "github.com/yuin/goldmark/renderer"
+ "github.com/yuin/goldmark/renderer/html"
+ "github.com/yuin/goldmark/text"
+ "github.com/yuin/goldmark/util"
+)
+
+var byteMailto = []byte("mailto:")
+
+// GiteaASTTransformer is a default transformer of the goldmark tree.
+type GiteaASTTransformer struct{}
+
+// Transform transforms the given AST tree.
+func (g *GiteaASTTransformer) Transform(node *ast.Document, reader text.Reader, pc parser.Context) {
+ _ = ast.Walk(node, func(n ast.Node, entering bool) (ast.WalkStatus, error) {
+ if !entering {
+ return ast.WalkContinue, nil
+ }
+
+ switch v := n.(type) {
+ case *ast.Image:
+ // Images need two things:
+ //
+ // 1. Their src needs to munged to be a real value
+ // 2. If they're not wrapped with a link they need a link wrapper
+
+ // Check if the destination is a real link
+ link := v.Destination
+ if len(link) > 0 && !markup.IsLink(link) {
+ prefix := pc.Get(urlPrefixKey).(string)
+ if pc.Get(isWikiKey).(bool) {
+ prefix = giteautil.URLJoin(prefix, "wiki", "raw")
+ }
+ prefix = strings.Replace(prefix, "/src/", "/media/", 1)
+
+ lnk := string(link)
+ lnk = giteautil.URLJoin(prefix, lnk)
+ lnk = strings.Replace(lnk, " ", "+", -1)
+ link = []byte(lnk)
+ }
+ v.Destination = link
+
+ parent := n.Parent()
+ // Create a link around image only if parent is not already a link
+ if _, ok := parent.(*ast.Link); !ok && parent != nil {
+ wrap := ast.NewLink()
+ wrap.Destination = link
+ wrap.Title = v.Title
+ parent.ReplaceChild(parent, n, wrap)
+ wrap.AppendChild(wrap, n)
+ }
+ case *ast.Link:
+ // Links need their href to munged to be a real value
+ link := v.Destination
+ if len(link) > 0 && !markup.IsLink(link) &&
+ link[0] != '#' && !bytes.HasPrefix(link, byteMailto) {
+ // special case: this is not a link, a hash link or a mailto:, so it's a
+ // relative URL
+ lnk := string(link)
+ if pc.Get(isWikiKey).(bool) {
+ lnk = giteautil.URLJoin("wiki", lnk)
+ }
+ link = []byte(giteautil.URLJoin(pc.Get(urlPrefixKey).(string), lnk))
+ }
+ v.Destination = link
+ }
+ return ast.WalkContinue, nil
+ })
+}
+
+type prefixedIDs struct {
+ values map[string]bool
+}
+
+// Generate generates a new element id.
+func (p *prefixedIDs) Generate(value []byte, kind ast.NodeKind) []byte {
+ dft := []byte("id")
+ if kind == ast.KindHeading {
+ dft = []byte("heading")
+ }
+ return p.GenerateWithDefault(value, dft)
+}
+
+// Generate generates a new element id.
+func (p *prefixedIDs) GenerateWithDefault(value []byte, dft []byte) []byte {
+ result := common.CleanValue(value)
+ if len(result) == 0 {
+ result = dft
+ }
+ if !bytes.HasPrefix(result, []byte("user-content-")) {
+ result = append([]byte("user-content-"), result...)
+ }
+ if _, ok := p.values[util.BytesToReadOnlyString(result)]; !ok {
+ p.values[util.BytesToReadOnlyString(result)] = true
+ return result
+ }
+ for i := 1; ; i++ {
+ newResult := fmt.Sprintf("%s-%d", result, i)
+ if _, ok := p.values[newResult]; !ok {
+ p.values[newResult] = true
+ return []byte(newResult)
+ }
+ }
+}
+
+// Put puts a given element id to the used ids table.
+func (p *prefixedIDs) Put(value []byte) {
+ p.values[util.BytesToReadOnlyString(value)] = true
+}
+
+func newPrefixedIDs() *prefixedIDs {
+ return &prefixedIDs{
+ values: map[string]bool{},
+ }
+}
+
+// NewTaskCheckBoxHTMLRenderer creates a TaskCheckBoxHTMLRenderer to render tasklists
+// in the gitea form.
+func NewTaskCheckBoxHTMLRenderer(opts ...html.Option) renderer.NodeRenderer {
+ r := &TaskCheckBoxHTMLRenderer{
+ Config: html.NewConfig(),
+ }
+ for _, opt := range opts {
+ opt.SetHTMLOption(&r.Config)
+ }
+ return r
+}
+
+// TaskCheckBoxHTMLRenderer is a renderer.NodeRenderer implementation that
+// renders checkboxes in list items.
+// Overrides the default goldmark one to present the gitea format
+type TaskCheckBoxHTMLRenderer struct {
+ html.Config
+}
+
+// RegisterFuncs implements renderer.NodeRenderer.RegisterFuncs.
+func (r *TaskCheckBoxHTMLRenderer) RegisterFuncs(reg renderer.NodeRendererFuncRegisterer) {
+ reg.Register(east.KindTaskCheckBox, r.renderTaskCheckBox)
+}
+
+func (r *TaskCheckBoxHTMLRenderer) renderTaskCheckBox(w util.BufWriter, source []byte, node ast.Node, entering bool) (ast.WalkStatus, error) {
+ if !entering {
+ return ast.WalkContinue, nil
+ }
+ n := node.(*east.TaskCheckBox)
+
+ end := ">"
+ if r.XHTML {
+ end = " />"
+ }
+ var err error
+ if n.IsChecked {
+ _, err = w.WriteString(`<span class="ui fitted disabled checkbox"><input type="checkbox" disabled="disabled"` + end + `<label` + end + `</span>`)
+ } else {
+ _, err = w.WriteString(`<span class="ui checked fitted disabled checkbox"><input type="checkbox" checked="" disabled="disabled"` + end + `<label` + end + `</span>`)
+ }
+ if err != nil {
+ return ast.WalkStop, err
+ }
+ return ast.WalkContinue, nil
+}
import (
"bytes"
- "io"
- "strings"
+ "sync"
+ "code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/markup"
+ "code.gitea.io/gitea/modules/markup/common"
"code.gitea.io/gitea/modules/setting"
- "code.gitea.io/gitea/modules/util"
-
- "github.com/russross/blackfriday/v2"
+ giteautil "code.gitea.io/gitea/modules/util"
+
+ "github.com/yuin/goldmark"
+ "github.com/yuin/goldmark/extension"
+ "github.com/yuin/goldmark/parser"
+ "github.com/yuin/goldmark/renderer"
+ "github.com/yuin/goldmark/renderer/html"
+ "github.com/yuin/goldmark/util"
)
-// Renderer is a extended version of underlying render object.
-type Renderer struct {
- blackfriday.Renderer
- URLPrefix string
- IsWiki bool
-}
+var converter goldmark.Markdown
+var once = sync.Once{}
-var byteMailto = []byte("mailto:")
+var urlPrefixKey = parser.NewContextKey()
+var isWikiKey = parser.NewContextKey()
-var htmlEscaper = [256][]byte{
- '&': []byte("&"),
- '<': []byte("<"),
- '>': []byte(">"),
- '"': []byte("""),
+// NewGiteaParseContext creates a parser.Context with the gitea context set
+func NewGiteaParseContext(urlPrefix string, isWiki bool) parser.Context {
+ pc := parser.NewContext(parser.WithIDs(newPrefixedIDs()))
+ pc.Set(urlPrefixKey, urlPrefix)
+ pc.Set(isWikiKey, isWiki)
+ return pc
}
-func escapeHTML(w io.Writer, s []byte) {
- var start, end int
- for end < len(s) {
- escSeq := htmlEscaper[s[end]]
- if escSeq != nil {
- _, _ = w.Write(s[start:end])
- _, _ = w.Write(escSeq)
- start = end + 1
- }
- end++
- }
- if start < len(s) && end <= len(s) {
- _, _ = w.Write(s[start:end])
- }
-}
-
-// RenderNode is a default renderer of a single node of a syntax tree. For
-// block nodes it will be called twice: first time with entering=true, second
-// time with entering=false, so that it could know when it's working on an open
-// tag and when on close. It writes the result to w.
-//
-// The return value is a way to tell the calling walker to adjust its walk
-// pattern: e.g. it can terminate the traversal by returning Terminate. Or it
-// can ask the walker to skip a subtree of this node by returning SkipChildren.
-// The typical behavior is to return GoToNext, which asks for the usual
-// traversal to the next node.
-func (r *Renderer) RenderNode(w io.Writer, node *blackfriday.Node, entering bool) blackfriday.WalkStatus {
- switch node.Type {
- case blackfriday.Image:
- prefix := r.URLPrefix
- if r.IsWiki {
- prefix = util.URLJoin(prefix, "wiki", "raw")
- }
- prefix = strings.Replace(prefix, "/src/", "/media/", 1)
- link := node.LinkData.Destination
- if len(link) > 0 && !markup.IsLink(link) {
- lnk := string(link)
- lnk = util.URLJoin(prefix, lnk)
- lnk = strings.Replace(lnk, " ", "+", -1)
- link = []byte(lnk)
- }
- node.LinkData.Destination = link
- // Render link around image only if parent is not link already
- if node.Parent != nil && node.Parent.Type != blackfriday.Link {
- if entering {
- _, _ = w.Write([]byte(`<a href="`))
- escapeHTML(w, link)
- _, _ = w.Write([]byte(`">`))
- return r.Renderer.RenderNode(w, node, entering)
- }
- s := r.Renderer.RenderNode(w, node, entering)
- _, _ = w.Write([]byte(`</a>`))
- return s
- }
- return r.Renderer.RenderNode(w, node, entering)
- case blackfriday.Link:
- // special case: this is not a link, a hash link or a mailto:, so it's a
- // relative URL
- link := node.LinkData.Destination
- if len(link) > 0 && !markup.IsLink(link) &&
- link[0] != '#' && !bytes.HasPrefix(link, byteMailto) &&
- node.LinkData.Footnote == nil {
- lnk := string(link)
- if r.IsWiki {
- lnk = util.URLJoin("wiki", lnk)
- }
- link = []byte(util.URLJoin(r.URLPrefix, lnk))
- }
- node.LinkData.Destination = link
- return r.Renderer.RenderNode(w, node, entering)
- case blackfriday.Text:
- isListItem := false
- for n := node.Parent; n != nil; n = n.Parent {
- if n.Type == blackfriday.Item {
- isListItem = true
- break
- }
- }
- if isListItem {
- text := node.Literal
- switch {
- case bytes.HasPrefix(text, []byte("[ ] ")):
- _, _ = w.Write([]byte(`<span class="ui fitted disabled checkbox"><input type="checkbox" disabled="disabled" /><label /></span>`))
- text = text[3:]
- case bytes.HasPrefix(text, []byte("[x] ")):
- _, _ = w.Write([]byte(`<span class="ui checked fitted disabled checkbox"><input type="checkbox" checked="" disabled="disabled" /><label /></span>`))
- text = text[3:]
- }
- node.Literal = text
- }
- }
- return r.Renderer.RenderNode(w, node, entering)
-}
-
-const (
- blackfridayExtensions = 0 |
- blackfriday.NoIntraEmphasis |
- blackfriday.Tables |
- blackfriday.FencedCode |
- blackfriday.Strikethrough |
- blackfriday.NoEmptyLineBeforeBlock |
- blackfriday.DefinitionLists |
- blackfriday.Footnotes |
- blackfriday.HeadingIDs |
- blackfriday.AutoHeadingIDs
- blackfridayHTMLFlags = 0 |
- blackfriday.Smartypants
-)
-
// RenderRaw renders Markdown to HTML without handling special links.
func RenderRaw(body []byte, urlPrefix string, wikiMarkdown bool) []byte {
- renderer := &Renderer{
- Renderer: blackfriday.NewHTMLRenderer(blackfriday.HTMLRendererParameters{
- Flags: blackfridayHTMLFlags,
- FootnoteAnchorPrefix: "user-content-",
- HeadingIDPrefix: "user-content-",
- }),
- URLPrefix: urlPrefix,
- IsWiki: wikiMarkdown,
- }
+ once.Do(func() {
+ converter = goldmark.New(
+ goldmark.WithExtensions(extension.Table,
+ extension.Strikethrough,
+ extension.TaskList,
+ extension.DefinitionList,
+ common.FootnoteExtension,
+ extension.NewTypographer(
+ extension.WithTypographicSubstitutions(extension.TypographicSubstitutions{
+ extension.EnDash: nil,
+ extension.EmDash: nil,
+ }),
+ ),
+ ),
+ goldmark.WithParserOptions(
+ parser.WithAttribute(),
+ parser.WithAutoHeadingID(),
+ parser.WithASTTransformers(
+ util.Prioritized(&GiteaASTTransformer{}, 10000),
+ ),
+ ),
+ goldmark.WithRendererOptions(
+ html.WithUnsafe(),
+ ),
+ )
+
+ // Override the original Tasklist renderer!
+ converter.Renderer().AddOptions(
+ renderer.WithNodeRenderers(
+ util.Prioritized(NewTaskCheckBoxHTMLRenderer(), 1000),
+ ),
+ )
+
+ if setting.Markdown.EnableHardLineBreak {
+ converter.Renderer().AddOptions(html.WithHardWraps())
+ }
+ })
- exts := blackfridayExtensions
- if setting.Markdown.EnableHardLineBreak {
- exts |= blackfriday.HardLineBreak
+ pc := NewGiteaParseContext(urlPrefix, wikiMarkdown)
+ var buf bytes.Buffer
+ if err := converter.Convert(giteautil.NormalizeEOL(body), &buf, parser.WithContext(pc)); err != nil {
+ log.Error("Unable to render: %v", err)
}
- // Need to normalize EOL to UNIX LF to have consistent results in rendering
- body = blackfriday.Run(util.NormalizeEOL(body), blackfriday.WithRenderer(renderer), blackfriday.WithExtensions(exts))
- return markup.SanitizeBytes(body)
+ return markup.SanitizeReader(&buf).Bytes()
}
var (
}
// Parser implements markup.Parser
-type Parser struct {
-}
+type Parser struct{}
// Name implements markup.Parser
func (Parser) Name() string {
func testAnswers(baseURLContent, baseURLImages string) []string {
return []string{
`<p>Wiki! Enjoy :)</p>
-
<ul>
<li><a href="` + baseURLContent + `/Links" rel="nofollow">Links, Language bindings, Engine bindings</a></li>
<li><a href="` + baseURLContent + `/Tips" rel="nofollow">Tips</a></li>
</ul>
-
<p>See commit <a href="http://localhost:3000/gogits/gogs/commit/65f1bf27bc" rel="nofollow"><code>65f1bf27bc</code></a></p>
-
<p>Ideas and codes</p>
-
<ul>
<li>Bezier widget (by <a href="` + AppURL + `r-lyeh" rel="nofollow">@r-lyeh</a>) <a href="http://localhost:3000/ocornut/imgui/issues/786" rel="nofollow">ocornut/imgui#786</a></li>
<li>Bezier widget (by <a href="` + AppURL + `r-lyeh" rel="nofollow">@r-lyeh</a>) <a href="http://localhost:3000/gogits/gogs/issues/786" rel="nofollow">#786</a></li>
</ul>
`,
`<h2 id="user-content-what-is-wine-staging">What is Wine Staging?</h2>
-
<p><strong>Wine Staging</strong> on website <a href="http://wine-staging.com" rel="nofollow">wine-staging.com</a>.</p>
-
<h2 id="user-content-quick-links">Quick Links</h2>
-
<p>Here are some links to the most important topics. You can find the full list of pages at the sidebar.</p>
-
<table>
<thead>
<tr>
<th><a href="` + baseURLContent + `/Installation" rel="nofollow">Installation</a></th>
</tr>
</thead>
-
<tbody>
<tr>
<td><a href="` + baseURLImages + `/images/icon-usage.png" rel="nofollow"><img src="` + baseURLImages + `/images/icon-usage.png" title="icon-usage.png" alt="images/icon-usage.png"/></a></td>
</table>
`,
`<p><a href="http://www.excelsiorjet.com/" rel="nofollow">Excelsior JET</a> allows you to create native executables for Windows, Linux and Mac OS X.</p>
-
<ol>
<li><a href="https://github.com/libgdx/libgdx/wiki/Gradle-on-the-Commandline#packaging-for-the-desktop" rel="nofollow">Package your libGDX application</a>
<a href="` + baseURLImages + `/images/1.png" rel="nofollow"><img src="` + baseURLImages + `/images/1.png" title="1.png" alt="images/1.png"/></a></li>
<li>Perform a test run by hitting the Run! button.
<a href="` + baseURLImages + `/images/2.png" rel="nofollow"><img src="` + baseURLImages + `/images/2.png" title="2.png" alt="images/2.png"/></a></li>
</ol>
-
<h2 id="user-content-custom-id">More tests</h2>
-
<p>(from <a href="https://www.markdownguide.org/extended-syntax/" rel="nofollow">https://www.markdownguide.org/extended-syntax/</a>)</p>
-
<h3 id="user-content-definition-list">Definition list</h3>
-
<dl>
<dt>First Term</dt>
<dd>This is the definition of the first term.</dd>
<dd>This is one definition of the second term.</dd>
<dd>This is another definition of the second term.</dd>
</dl>
-
<h3 id="user-content-footnotes">Footnotes</h3>
-
<p>Here is a simple footnote,<sup id="fnref:user-content-1"><a href="#fn:user-content-1" rel="nofollow">1</a></sup> and here is a longer one.<sup id="fnref:user-content-bignote"><a href="#fn:user-content-bignote" rel="nofollow">2</a></sup></p>
-
<div>
-
<hr/>
-
<ol>
-<li id="fn:user-content-1">This is the first footnote.</li>
-
-<li id="fn:user-content-bignote"><p>Here is one with multiple paragraphs and code.</p>
-
+<li id="fn:user-content-1">
+<p>This is the first footnote. <a href="#fnref:user-content-1" rel="nofollow">↩︎</a></p>
+</li>
+<li id="fn:user-content-bignote">
+<p>Here is one with multiple paragraphs and code.</p>
<p>Indent paragraphs to include them in the footnote.</p>
-
<p><code>{ my code }</code></p>
-
-<p>Add as many paragraphs as you like.</p></li>
+<p>Add as many paragraphs as you like. <a href="#fnref:user-content-bignote" rel="nofollow">↩︎</a></p>
+</li>
</ol>
-
</div>
`,
}
test := func(t *testing.T, str string, cnt int) {
unix := []byte(str)
res := string(RenderRaw(unix, "", false))
- assert.Equal(t, strings.Count(res, "<p"), cnt)
+ assert.Equal(t, strings.Count(res, "<p"), cnt, "Rendered result for unix should have %d paragraph(s) but has %d:\n%s\n", cnt, strings.Count(res, "<p"), res)
mac := []byte(strings.ReplaceAll(str, "\n", "\r"))
res = string(RenderRaw(mac, "", false))
- assert.Equal(t, strings.Count(res, "<p"), cnt)
+ assert.Equal(t, strings.Count(res, "<p"), cnt, "Rendered result for mac should have %d paragraph(s) but has %d:\n%s\n", cnt, strings.Count(res, "<p"), res)
dos := []byte(strings.ReplaceAll(str, "\n", "\r\n"))
res = string(RenderRaw(dos, "", false))
- assert.Equal(t, strings.Count(res, "<p"), cnt)
+ assert.Equal(t, strings.Count(res, "<p"), cnt, "Rendered result for windows should have %d paragraph(s) but has %d:\n%s\n", cnt, strings.Count(res, "<p"), res)
}
test(t, "\nOne\nTwo\nThree", 1)
import (
"bytes"
- "io"
+ "sync"
- "github.com/russross/blackfriday/v2"
-)
+ "io"
-// MarkdownStripper extends blackfriday.Renderer
-type MarkdownStripper struct {
- links []string
- coallesce bool
- empty bool
-}
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/markup/common"
-const (
- blackfridayExtensions = 0 |
- blackfriday.NoIntraEmphasis |
- blackfriday.Tables |
- blackfriday.FencedCode |
- blackfriday.Strikethrough |
- blackfriday.NoEmptyLineBeforeBlock |
- blackfriday.DefinitionLists |
- blackfriday.Footnotes |
- blackfriday.HeadingIDs |
- blackfriday.AutoHeadingIDs |
- // Not included in modules/markup/markdown/markdown.go;
- // required here to process inline links
- blackfriday.Autolink
+ "github.com/yuin/goldmark"
+ "github.com/yuin/goldmark/ast"
+ "github.com/yuin/goldmark/extension"
+ "github.com/yuin/goldmark/parser"
+ "github.com/yuin/goldmark/renderer"
+ "github.com/yuin/goldmark/renderer/html"
+ "github.com/yuin/goldmark/text"
)
-// StripMarkdown parses markdown content by removing all markup and code blocks
-// in order to extract links and other references
-func StripMarkdown(rawBytes []byte) (string, []string) {
- buf, links := StripMarkdownBytes(rawBytes)
- return string(buf), links
+type stripRenderer struct {
+ links []string
+ empty bool
}
-// StripMarkdownBytes parses markdown content by removing all markup and code blocks
-// in order to extract links and other references
-func StripMarkdownBytes(rawBytes []byte) ([]byte, []string) {
- stripper := &MarkdownStripper{
- links: make([]string, 0, 10),
- empty: true,
- }
-
- parser := blackfriday.New(blackfriday.WithRenderer(stripper), blackfriday.WithExtensions(blackfridayExtensions))
- ast := parser.Parse(rawBytes)
- var buf bytes.Buffer
- stripper.RenderHeader(&buf, ast)
- ast.Walk(func(node *blackfriday.Node, entering bool) blackfriday.WalkStatus {
- return stripper.RenderNode(&buf, node, entering)
+func (r *stripRenderer) Render(w io.Writer, source []byte, doc ast.Node) error {
+ return ast.Walk(doc, func(n ast.Node, entering bool) (ast.WalkStatus, error) {
+ if !entering {
+ return ast.WalkContinue, nil
+ }
+ switch v := n.(type) {
+ case *ast.Text:
+ if !v.IsRaw() {
+ _, prevSibIsText := n.PreviousSibling().(*ast.Text)
+ coalesce := prevSibIsText
+ r.processString(
+ w,
+ v.Text(source),
+ coalesce)
+ if v.SoftLineBreak() {
+ r.doubleSpace(w)
+ }
+ }
+ return ast.WalkContinue, nil
+ case *ast.Link:
+ r.processLink(w, v.Destination)
+ return ast.WalkSkipChildren, nil
+ case *ast.AutoLink:
+ r.processLink(w, v.URL(source))
+ return ast.WalkSkipChildren, nil
+ }
+ return ast.WalkContinue, nil
})
- stripper.RenderFooter(&buf, ast)
- return buf.Bytes(), stripper.GetLinks()
-}
-
-// RenderNode is the main rendering method. It will be called once for
-// every leaf node and twice for every non-leaf node (first with
-// entering=true, then with entering=false). The method should write its
-// rendition of the node to the supplied writer w.
-func (r *MarkdownStripper) RenderNode(w io.Writer, node *blackfriday.Node, entering bool) blackfriday.WalkStatus {
- if !entering {
- return blackfriday.GoToNext
- }
- switch node.Type {
- case blackfriday.Text:
- r.processString(w, node.Literal, node.Parent == nil)
- return blackfriday.GoToNext
- case blackfriday.Link:
- r.processLink(w, node.LinkData.Destination)
- r.coallesce = false
- return blackfriday.SkipChildren
- }
- r.coallesce = false
- return blackfriday.GoToNext
-}
-
-// RenderHeader is a method that allows the renderer to produce some
-// content preceding the main body of the output document.
-func (r *MarkdownStripper) RenderHeader(w io.Writer, ast *blackfriday.Node) {
}
-// RenderFooter is a symmetric counterpart of RenderHeader.
-func (r *MarkdownStripper) RenderFooter(w io.Writer, ast *blackfriday.Node) {
-}
-
-func (r *MarkdownStripper) doubleSpace(w io.Writer) {
+func (r *stripRenderer) doubleSpace(w io.Writer) {
if !r.empty {
_, _ = w.Write([]byte{'\n'})
}
}
-func (r *MarkdownStripper) processString(w io.Writer, text []byte, coallesce bool) {
+func (r *stripRenderer) processString(w io.Writer, text []byte, coalesce bool) {
// Always break-up words
- if !coallesce || !r.coallesce {
+ if !coalesce {
r.doubleSpace(w)
}
_, _ = w.Write(text)
- r.coallesce = coallesce
r.empty = false
}
-func (r *MarkdownStripper) processLink(w io.Writer, link []byte) {
+func (r *stripRenderer) processLink(w io.Writer, link []byte) {
// Links are processed out of band
r.links = append(r.links, string(link))
- r.coallesce = false
}
// GetLinks returns the list of link data collected while parsing
-func (r *MarkdownStripper) GetLinks() []string {
+func (r *stripRenderer) GetLinks() []string {
return r.links
}
+
+// AddOptions adds given option to this renderer.
+func (r *stripRenderer) AddOptions(...renderer.Option) {
+ // no-op
+}
+
+// StripMarkdown parses markdown content by removing all markup and code blocks
+// in order to extract links and other references
+func StripMarkdown(rawBytes []byte) (string, []string) {
+ buf, links := StripMarkdownBytes(rawBytes)
+ return string(buf), links
+}
+
+var stripParser parser.Parser
+var once = sync.Once{}
+
+// StripMarkdownBytes parses markdown content by removing all markup and code blocks
+// in order to extract links and other references
+func StripMarkdownBytes(rawBytes []byte) ([]byte, []string) {
+ once.Do(func() {
+ gdMarkdown := goldmark.New(
+ goldmark.WithExtensions(extension.Table,
+ extension.Strikethrough,
+ extension.TaskList,
+ extension.DefinitionList,
+ common.FootnoteExtension,
+ common.Linkify,
+ ),
+ goldmark.WithParserOptions(
+ parser.WithAttribute(),
+ parser.WithAutoHeadingID(),
+ ),
+ goldmark.WithRendererOptions(
+ html.WithUnsafe(),
+ ),
+ )
+ stripParser = gdMarkdown.Parser()
+ })
+ stripper := &stripRenderer{
+ links: make([]string, 0, 10),
+ empty: true,
+ }
+ reader := text.NewReader(rawBytes)
+ doc := stripParser.Parse(reader)
+ var buf bytes.Buffer
+ if err := stripper.Render(&buf, rawBytes, doc); err != nil {
+ log.Error("Unable to strip: %v", err)
+ }
+ return buf.Bytes(), stripper.GetLinks()
+}
[]string{
"link",
}},
+ {
+ "Simply closes: #29 yes",
+ []string{
+ "Simply closes: #29 yes",
+ },
+ []string{},
+ },
+ {
+ "Simply closes: !29 yes",
+ []string{
+ "Simply closes: !29 yes",
+ },
+ []string{},
+ },
}
for _, test := range list {
package markup
import (
+ "bytes"
+ "io"
"regexp"
"sync"
return sanitizer.policy.Sanitize(s)
}
+// SanitizeReader sanitizes a Reader
+func SanitizeReader(r io.Reader) *bytes.Buffer {
+ NewSanitizer()
+ return sanitizer.policy.SanitizeReader(r)
+}
+
// SanitizeBytes takes a []byte slice that contains a HTML fragment or document and applies policy whitelist.
func SanitizeBytes(b []byte) []byte {
if len(b) == 0 {
{29, "", "", "29", true, XRefActionCloses, &RefSpan{Start: 15, End: 18}, &RefSpan{Start: 7, End: 13}},
},
},
- {
- "#123 no, this is a title.",
- []testResult{},
- },
{
" #124 yes, this is a reference.",
[]testResult{
- Bezier widget (by @r-lyeh) https://github.com/ocornut/imgui/issues/786`,
// rendered
`<p>Wiki! Enjoy :)</p>
-
<ul>
<li><a href="` + AppSubURL + `wiki/Links" rel="nofollow">Links, Language bindings, Engine bindings</a></li>
<li><a href="` + AppSubURL + `wiki/Tips" rel="nofollow">Tips</a></li>
`,
// rendered
`<h2 id="user-content-what-is-wine-staging">What is Wine Staging?</h2>
-
<p><strong>Wine Staging</strong> on website <a href="http://wine-staging.com" rel="nofollow">wine-staging.com</a>.</p>
-
<h2 id="user-content-quick-links">Quick Links</h2>
-
<p>Here are some links to the most important topics. You can find the full list of pages at the sidebar.</p>
-
<p><a href="` + AppSubURL + `wiki/Configuration" rel="nofollow">Configuration</a>
<a href="` + AppSubURL + `wiki/raw/images/icon-bug.png" rel="nofollow"><img src="` + AppSubURL + `wiki/raw/images/icon-bug.png" title="icon-bug.png" alt="images/icon-bug.png"/></a></p>
`,
+++ /dev/null
-*.out
-*.swp
-*.8
-*.6
-_obj
-_test*
-markdown
-tags
+++ /dev/null
-sudo: false
-language: go
-go:
- - "1.10.x"
- - "1.11.x"
- - tip
-matrix:
- fast_finish: true
- allow_failures:
- - go: tip
-install:
- - # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step).
-script:
- - go get -t -v ./...
- - diff -u <(echo -n) <(gofmt -d -s .)
- - go tool vet .
- - go test -v ./...
+++ /dev/null
-Blackfriday is distributed under the Simplified BSD License:
-
-> Copyright © 2011 Russ Ross
-> All rights reserved.
->
-> Redistribution and use in source and binary forms, with or without
-> modification, are permitted provided that the following conditions
-> are met:
->
-> 1. Redistributions of source code must retain the above copyright
-> notice, this list of conditions and the following disclaimer.
->
-> 2. Redistributions in binary form must reproduce the above
-> copyright notice, this list of conditions and the following
-> disclaimer in the documentation and/or other materials provided with
-> the distribution.
->
-> THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-> "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-> LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
-> FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
-> COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-> INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
-> BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-> LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-> CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-> LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
-> ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-> POSSIBILITY OF SUCH DAMAGE.
+++ /dev/null
-Blackfriday [![Build Status](https://travis-ci.org/russross/blackfriday.svg?branch=master)](https://travis-ci.org/russross/blackfriday)
-===========
-
-Blackfriday is a [Markdown][1] processor implemented in [Go][2]. It
-is paranoid about its input (so you can safely feed it user-supplied
-data), it is fast, it supports common extensions (tables, smart
-punctuation substitutions, etc.), and it is safe for all utf-8
-(unicode) input.
-
-HTML output is currently supported, along with Smartypants
-extensions.
-
-It started as a translation from C of [Sundown][3].
-
-
-Installation
-------------
-
-Blackfriday is compatible with any modern Go release. With Go 1.7 and git
-installed:
-
- go get gopkg.in/russross/blackfriday.v2
-
-will download, compile, and install the package into your `$GOPATH`
-directory hierarchy. Alternatively, you can achieve the same if you
-import it into a project:
-
- import "gopkg.in/russross/blackfriday.v2"
-
-and `go get` without parameters.
-
-
-Versions
---------
-
-Currently maintained and recommended version of Blackfriday is `v2`. It's being
-developed on its own branch: https://github.com/russross/blackfriday/tree/v2 and the
-documentation is available at
-https://godoc.org/gopkg.in/russross/blackfriday.v2.
-
-It is `go get`-able via via [gopkg.in][6] at `gopkg.in/russross/blackfriday.v2`,
-but we highly recommend using package management tool like [dep][7] or
-[Glide][8] and make use of semantic versioning. With package management you
-should import `github.com/russross/blackfriday` and specify that you're using
-version 2.0.0.
-
-Version 2 offers a number of improvements over v1:
-
-* Cleaned up API
-* A separate call to [`Parse`][4], which produces an abstract syntax tree for
- the document
-* Latest bug fixes
-* Flexibility to easily add your own rendering extensions
-
-Potential drawbacks:
-
-* Our benchmarks show v2 to be slightly slower than v1. Currently in the
- ballpark of around 15%.
-* API breakage. If you can't afford modifying your code to adhere to the new API
- and don't care too much about the new features, v2 is probably not for you.
-* Several bug fixes are trailing behind and still need to be forward-ported to
- v2. See issue [#348](https://github.com/russross/blackfriday/issues/348) for
- tracking.
-
-Usage
------
-
-For the most sensible markdown processing, it is as simple as getting your input
-into a byte slice and calling:
-
-```go
-output := blackfriday.Run(input)
-```
-
-Your input will be parsed and the output rendered with a set of most popular
-extensions enabled. If you want the most basic feature set, corresponding with
-the bare Markdown specification, use:
-
-```go
-output := blackfriday.Run(input, blackfriday.WithNoExtensions())
-```
-
-### Sanitize untrusted content
-
-Blackfriday itself does nothing to protect against malicious content. If you are
-dealing with user-supplied markdown, we recommend running Blackfriday's output
-through HTML sanitizer such as [Bluemonday][5].
-
-Here's an example of simple usage of Blackfriday together with Bluemonday:
-
-```go
-import (
- "github.com/microcosm-cc/bluemonday"
- "github.com/russross/blackfriday"
-)
-
-// ...
-unsafe := blackfriday.Run(input)
-html := bluemonday.UGCPolicy().SanitizeBytes(unsafe)
-```
-
-### Custom options
-
-If you want to customize the set of options, use `blackfriday.WithExtensions`,
-`blackfriday.WithRenderer` and `blackfriday.WithRefOverride`.
-
-You can also check out `blackfriday-tool` for a more complete example
-of how to use it. Download and install it using:
-
- go get github.com/russross/blackfriday-tool
-
-This is a simple command-line tool that allows you to process a
-markdown file using a standalone program. You can also browse the
-source directly on github if you are just looking for some example
-code:
-
-* <http://github.com/russross/blackfriday-tool>
-
-Note that if you have not already done so, installing
-`blackfriday-tool` will be sufficient to download and install
-blackfriday in addition to the tool itself. The tool binary will be
-installed in `$GOPATH/bin`. This is a statically-linked binary that
-can be copied to wherever you need it without worrying about
-dependencies and library versions.
-
-
-Features
---------
-
-All features of Sundown are supported, including:
-
-* **Compatibility**. The Markdown v1.0.3 test suite passes with
- the `--tidy` option. Without `--tidy`, the differences are
- mostly in whitespace and entity escaping, where blackfriday is
- more consistent and cleaner.
-
-* **Common extensions**, including table support, fenced code
- blocks, autolinks, strikethroughs, non-strict emphasis, etc.
-
-* **Safety**. Blackfriday is paranoid when parsing, making it safe
- to feed untrusted user input without fear of bad things
- happening. The test suite stress tests this and there are no
- known inputs that make it crash. If you find one, please let me
- know and send me the input that does it.
-
- NOTE: "safety" in this context means *runtime safety only*. In order to
- protect yourself against JavaScript injection in untrusted content, see
- [this example](https://github.com/russross/blackfriday#sanitize-untrusted-content).
-
-* **Fast processing**. It is fast enough to render on-demand in
- most web applications without having to cache the output.
-
-* **Thread safety**. You can run multiple parsers in different
- goroutines without ill effect. There is no dependence on global
- shared state.
-
-* **Minimal dependencies**. Blackfriday only depends on standard
- library packages in Go. The source code is pretty
- self-contained, so it is easy to add to any project, including
- Google App Engine projects.
-
-* **Standards compliant**. Output successfully validates using the
- W3C validation tool for HTML 4.01 and XHTML 1.0 Transitional.
-
-
-Extensions
-----------
-
-In addition to the standard markdown syntax, this package
-implements the following extensions:
-
-* **Intra-word emphasis supression**. The `_` character is
- commonly used inside words when discussing code, so having
- markdown interpret it as an emphasis command is usually the
- wrong thing. Blackfriday lets you treat all emphasis markers as
- normal characters when they occur inside a word.
-
-* **Tables**. Tables can be created by drawing them in the input
- using a simple syntax:
-
- ```
- Name | Age
- --------|------
- Bob | 27
- Alice | 23
- ```
-
-* **Fenced code blocks**. In addition to the normal 4-space
- indentation to mark code blocks, you can explicitly mark them
- and supply a language (to make syntax highlighting simple). Just
- mark it like this:
-
- ```go
- func getTrue() bool {
- return true
- }
- ```
-
- You can use 3 or more backticks to mark the beginning of the
- block, and the same number to mark the end of the block.
-
-* **Definition lists**. A simple definition list is made of a single-line
- term followed by a colon and the definition for that term.
-
- Cat
- : Fluffy animal everyone likes
-
- Internet
- : Vector of transmission for pictures of cats
-
- Terms must be separated from the previous definition by a blank line.
-
-* **Footnotes**. A marker in the text that will become a superscript number;
- a footnote definition that will be placed in a list of footnotes at the
- end of the document. A footnote looks like this:
-
- This is a footnote.[^1]
-
- [^1]: the footnote text.
-
-* **Autolinking**. Blackfriday can find URLs that have not been
- explicitly marked as links and turn them into links.
-
-* **Strikethrough**. Use two tildes (`~~`) to mark text that
- should be crossed out.
-
-* **Hard line breaks**. With this extension enabled newlines in the input
- translate into line breaks in the output. This extension is off by default.
-
-* **Smart quotes**. Smartypants-style punctuation substitution is
- supported, turning normal double- and single-quote marks into
- curly quotes, etc.
-
-* **LaTeX-style dash parsing** is an additional option, where `--`
- is translated into `–`, and `---` is translated into
- `—`. This differs from most smartypants processors, which
- turn a single hyphen into an ndash and a double hyphen into an
- mdash.
-
-* **Smart fractions**, where anything that looks like a fraction
- is translated into suitable HTML (instead of just a few special
- cases like most smartypant processors). For example, `4/5`
- becomes `<sup>4</sup>⁄<sub>5</sub>`, which renders as
- <sup>4</sup>⁄<sub>5</sub>.
-
-
-Other renderers
----------------
-
-Blackfriday is structured to allow alternative rendering engines. Here
-are a few of note:
-
-* [github_flavored_markdown](https://godoc.org/github.com/shurcooL/github_flavored_markdown):
- provides a GitHub Flavored Markdown renderer with fenced code block
- highlighting, clickable heading anchor links.
-
- It's not customizable, and its goal is to produce HTML output
- equivalent to the [GitHub Markdown API endpoint](https://developer.github.com/v3/markdown/#render-a-markdown-document-in-raw-mode),
- except the rendering is performed locally.
-
-* [markdownfmt](https://github.com/shurcooL/markdownfmt): like gofmt,
- but for markdown.
-
-* [LaTeX output](https://github.com/Ambrevar/Blackfriday-LaTeX):
- renders output as LaTeX.
-
-* [Blackfriday-Confluence](https://github.com/kentaro-m/blackfriday-confluence): provides a [Confluence Wiki Markup](https://confluence.atlassian.com/doc/confluence-wiki-markup-251003035.html) renderer.
-
-
-Todo
-----
-
-* More unit testing
-* Improve unicode support. It does not understand all unicode
- rules (about what constitutes a letter, a punctuation symbol,
- etc.), so it may fail to detect word boundaries correctly in
- some instances. It is safe on all utf-8 input.
-
-
-License
--------
-
-[Blackfriday is distributed under the Simplified BSD License](LICENSE.txt)
-
-
- [1]: https://daringfireball.net/projects/markdown/ "Markdown"
- [2]: https://golang.org/ "Go Language"
- [3]: https://github.com/vmg/sundown "Sundown"
- [4]: https://godoc.org/gopkg.in/russross/blackfriday.v2#Parse "Parse func"
- [5]: https://github.com/microcosm-cc/bluemonday "Bluemonday"
- [6]: https://labix.org/gopkg.in "gopkg.in"
+++ /dev/null
-//
-// Blackfriday Markdown Processor
-// Available at http://github.com/russross/blackfriday
-//
-// Copyright © 2011 Russ Ross <russ@russross.com>.
-// Distributed under the Simplified BSD License.
-// See README.md for details.
-//
-
-//
-// Functions to parse block-level elements.
-//
-
-package blackfriday
-
-import (
- "bytes"
- "html"
- "regexp"
- "strings"
-
- "github.com/shurcooL/sanitized_anchor_name"
-)
-
-const (
- charEntity = "&(?:#x[a-f0-9]{1,8}|#[0-9]{1,8}|[a-z][a-z0-9]{1,31});"
- escapable = "[!\"#$%&'()*+,./:;<=>?@[\\\\\\]^_`{|}~-]"
-)
-
-var (
- reBackslashOrAmp = regexp.MustCompile("[\\&]")
- reEntityOrEscapedChar = regexp.MustCompile("(?i)\\\\" + escapable + "|" + charEntity)
-)
-
-// Parse block-level data.
-// Note: this function and many that it calls assume that
-// the input buffer ends with a newline.
-func (p *Markdown) block(data []byte) {
- // this is called recursively: enforce a maximum depth
- if p.nesting >= p.maxNesting {
- return
- }
- p.nesting++
-
- // parse out one block-level construct at a time
- for len(data) > 0 {
- // prefixed heading:
- //
- // # Heading 1
- // ## Heading 2
- // ...
- // ###### Heading 6
- if p.isPrefixHeading(data) {
- data = data[p.prefixHeading(data):]
- continue
- }
-
- // block of preformatted HTML:
- //
- // <div>
- // ...
- // </div>
- if data[0] == '<' {
- if i := p.html(data, true); i > 0 {
- data = data[i:]
- continue
- }
- }
-
- // title block
- //
- // % stuff
- // % more stuff
- // % even more stuff
- if p.extensions&Titleblock != 0 {
- if data[0] == '%' {
- if i := p.titleBlock(data, true); i > 0 {
- data = data[i:]
- continue
- }
- }
- }
-
- // blank lines. note: returns the # of bytes to skip
- if i := p.isEmpty(data); i > 0 {
- data = data[i:]
- continue
- }
-
- // indented code block:
- //
- // func max(a, b int) int {
- // if a > b {
- // return a
- // }
- // return b
- // }
- if p.codePrefix(data) > 0 {
- data = data[p.code(data):]
- continue
- }
-
- // fenced code block:
- //
- // ``` go
- // func fact(n int) int {
- // if n <= 1 {
- // return n
- // }
- // return n * fact(n-1)
- // }
- // ```
- if p.extensions&FencedCode != 0 {
- if i := p.fencedCodeBlock(data, true); i > 0 {
- data = data[i:]
- continue
- }
- }
-
- // horizontal rule:
- //
- // ------
- // or
- // ******
- // or
- // ______
- if p.isHRule(data) {
- p.addBlock(HorizontalRule, nil)
- var i int
- for i = 0; i < len(data) && data[i] != '\n'; i++ {
- }
- data = data[i:]
- continue
- }
-
- // block quote:
- //
- // > A big quote I found somewhere
- // > on the web
- if p.quotePrefix(data) > 0 {
- data = data[p.quote(data):]
- continue
- }
-
- // table:
- //
- // Name | Age | Phone
- // ------|-----|---------
- // Bob | 31 | 555-1234
- // Alice | 27 | 555-4321
- if p.extensions&Tables != 0 {
- if i := p.table(data); i > 0 {
- data = data[i:]
- continue
- }
- }
-
- // an itemized/unordered list:
- //
- // * Item 1
- // * Item 2
- //
- // also works with + or -
- if p.uliPrefix(data) > 0 {
- data = data[p.list(data, 0):]
- continue
- }
-
- // a numbered/ordered list:
- //
- // 1. Item 1
- // 2. Item 2
- if p.oliPrefix(data) > 0 {
- data = data[p.list(data, ListTypeOrdered):]
- continue
- }
-
- // definition lists:
- //
- // Term 1
- // : Definition a
- // : Definition b
- //
- // Term 2
- // : Definition c
- if p.extensions&DefinitionLists != 0 {
- if p.dliPrefix(data) > 0 {
- data = data[p.list(data, ListTypeDefinition):]
- continue
- }
- }
-
- // anything else must look like a normal paragraph
- // note: this finds underlined headings, too
- data = data[p.paragraph(data):]
- }
-
- p.nesting--
-}
-
-func (p *Markdown) addBlock(typ NodeType, content []byte) *Node {
- p.closeUnmatchedBlocks()
- container := p.addChild(typ, 0)
- container.content = content
- return container
-}
-
-func (p *Markdown) isPrefixHeading(data []byte) bool {
- if data[0] != '#' {
- return false
- }
-
- if p.extensions&SpaceHeadings != 0 {
- level := 0
- for level < 6 && level < len(data) && data[level] == '#' {
- level++
- }
- if level == len(data) || data[level] != ' ' {
- return false
- }
- }
- return true
-}
-
-func (p *Markdown) prefixHeading(data []byte) int {
- level := 0
- for level < 6 && level < len(data) && data[level] == '#' {
- level++
- }
- i := skipChar(data, level, ' ')
- end := skipUntilChar(data, i, '\n')
- skip := end
- id := ""
- if p.extensions&HeadingIDs != 0 {
- j, k := 0, 0
- // find start/end of heading id
- for j = i; j < end-1 && (data[j] != '{' || data[j+1] != '#'); j++ {
- }
- for k = j + 1; k < end && data[k] != '}'; k++ {
- }
- // extract heading id iff found
- if j < end && k < end {
- id = string(data[j+2 : k])
- end = j
- skip = k + 1
- for end > 0 && data[end-1] == ' ' {
- end--
- }
- }
- }
- for end > 0 && data[end-1] == '#' {
- if isBackslashEscaped(data, end-1) {
- break
- }
- end--
- }
- for end > 0 && data[end-1] == ' ' {
- end--
- }
- if end > i {
- if id == "" && p.extensions&AutoHeadingIDs != 0 {
- id = sanitized_anchor_name.Create(string(data[i:end]))
- }
- block := p.addBlock(Heading, data[i:end])
- block.HeadingID = id
- block.Level = level
- }
- return skip
-}
-
-func (p *Markdown) isUnderlinedHeading(data []byte) int {
- // test of level 1 heading
- if data[0] == '=' {
- i := skipChar(data, 1, '=')
- i = skipChar(data, i, ' ')
- if i < len(data) && data[i] == '\n' {
- return 1
- }
- return 0
- }
-
- // test of level 2 heading
- if data[0] == '-' {
- i := skipChar(data, 1, '-')
- i = skipChar(data, i, ' ')
- if i < len(data) && data[i] == '\n' {
- return 2
- }
- return 0
- }
-
- return 0
-}
-
-func (p *Markdown) titleBlock(data []byte, doRender bool) int {
- if data[0] != '%' {
- return 0
- }
- splitData := bytes.Split(data, []byte("\n"))
- var i int
- for idx, b := range splitData {
- if !bytes.HasPrefix(b, []byte("%")) {
- i = idx // - 1
- break
- }
- }
-
- data = bytes.Join(splitData[0:i], []byte("\n"))
- consumed := len(data)
- data = bytes.TrimPrefix(data, []byte("% "))
- data = bytes.Replace(data, []byte("\n% "), []byte("\n"), -1)
- block := p.addBlock(Heading, data)
- block.Level = 1
- block.IsTitleblock = true
-
- return consumed
-}
-
-func (p *Markdown) html(data []byte, doRender bool) int {
- var i, j int
-
- // identify the opening tag
- if data[0] != '<' {
- return 0
- }
- curtag, tagfound := p.htmlFindTag(data[1:])
-
- // handle special cases
- if !tagfound {
- // check for an HTML comment
- if size := p.htmlComment(data, doRender); size > 0 {
- return size
- }
-
- // check for an <hr> tag
- if size := p.htmlHr(data, doRender); size > 0 {
- return size
- }
-
- // no special case recognized
- return 0
- }
-
- // look for an unindented matching closing tag
- // followed by a blank line
- found := false
- /*
- closetag := []byte("\n</" + curtag + ">")
- j = len(curtag) + 1
- for !found {
- // scan for a closing tag at the beginning of a line
- if skip := bytes.Index(data[j:], closetag); skip >= 0 {
- j += skip + len(closetag)
- } else {
- break
- }
-
- // see if it is the only thing on the line
- if skip := p.isEmpty(data[j:]); skip > 0 {
- // see if it is followed by a blank line/eof
- j += skip
- if j >= len(data) {
- found = true
- i = j
- } else {
- if skip := p.isEmpty(data[j:]); skip > 0 {
- j += skip
- found = true
- i = j
- }
- }
- }
- }
- */
-
- // if not found, try a second pass looking for indented match
- // but not if tag is "ins" or "del" (following original Markdown.pl)
- if !found && curtag != "ins" && curtag != "del" {
- i = 1
- for i < len(data) {
- i++
- for i < len(data) && !(data[i-1] == '<' && data[i] == '/') {
- i++
- }
-
- if i+2+len(curtag) >= len(data) {
- break
- }
-
- j = p.htmlFindEnd(curtag, data[i-1:])
-
- if j > 0 {
- i += j - 1
- found = true
- break
- }
- }
- }
-
- if !found {
- return 0
- }
-
- // the end of the block has been found
- if doRender {
- // trim newlines
- end := i
- for end > 0 && data[end-1] == '\n' {
- end--
- }
- finalizeHTMLBlock(p.addBlock(HTMLBlock, data[:end]))
- }
-
- return i
-}
-
-func finalizeHTMLBlock(block *Node) {
- block.Literal = block.content
- block.content = nil
-}
-
-// HTML comment, lax form
-func (p *Markdown) htmlComment(data []byte, doRender bool) int {
- i := p.inlineHTMLComment(data)
- // needs to end with a blank line
- if j := p.isEmpty(data[i:]); j > 0 {
- size := i + j
- if doRender {
- // trim trailing newlines
- end := size
- for end > 0 && data[end-1] == '\n' {
- end--
- }
- block := p.addBlock(HTMLBlock, data[:end])
- finalizeHTMLBlock(block)
- }
- return size
- }
- return 0
-}
-
-// HR, which is the only self-closing block tag considered
-func (p *Markdown) htmlHr(data []byte, doRender bool) int {
- if len(data) < 4 {
- return 0
- }
- if data[0] != '<' || (data[1] != 'h' && data[1] != 'H') || (data[2] != 'r' && data[2] != 'R') {
- return 0
- }
- if data[3] != ' ' && data[3] != '/' && data[3] != '>' {
- // not an <hr> tag after all; at least not a valid one
- return 0
- }
- i := 3
- for i < len(data) && data[i] != '>' && data[i] != '\n' {
- i++
- }
- if i < len(data) && data[i] == '>' {
- i++
- if j := p.isEmpty(data[i:]); j > 0 {
- size := i + j
- if doRender {
- // trim newlines
- end := size
- for end > 0 && data[end-1] == '\n' {
- end--
- }
- finalizeHTMLBlock(p.addBlock(HTMLBlock, data[:end]))
- }
- return size
- }
- }
- return 0
-}
-
-func (p *Markdown) htmlFindTag(data []byte) (string, bool) {
- i := 0
- for i < len(data) && isalnum(data[i]) {
- i++
- }
- key := string(data[:i])
- if _, ok := blockTags[key]; ok {
- return key, true
- }
- return "", false
-}
-
-func (p *Markdown) htmlFindEnd(tag string, data []byte) int {
- // assume data[0] == '<' && data[1] == '/' already tested
- if tag == "hr" {
- return 2
- }
- // check if tag is a match
- closetag := []byte("</" + tag + ">")
- if !bytes.HasPrefix(data, closetag) {
- return 0
- }
- i := len(closetag)
-
- // check that the rest of the line is blank
- skip := 0
- if skip = p.isEmpty(data[i:]); skip == 0 {
- return 0
- }
- i += skip
- skip = 0
-
- if i >= len(data) {
- return i
- }
-
- if p.extensions&LaxHTMLBlocks != 0 {
- return i
- }
- if skip = p.isEmpty(data[i:]); skip == 0 {
- // following line must be blank
- return 0
- }
-
- return i + skip
-}
-
-func (*Markdown) isEmpty(data []byte) int {
- // it is okay to call isEmpty on an empty buffer
- if len(data) == 0 {
- return 0
- }
-
- var i int
- for i = 0; i < len(data) && data[i] != '\n'; i++ {
- if data[i] != ' ' && data[i] != '\t' {
- return 0
- }
- }
- if i < len(data) && data[i] == '\n' {
- i++
- }
- return i
-}
-
-func (*Markdown) isHRule(data []byte) bool {
- i := 0
-
- // skip up to three spaces
- for i < 3 && data[i] == ' ' {
- i++
- }
-
- // look at the hrule char
- if data[i] != '*' && data[i] != '-' && data[i] != '_' {
- return false
- }
- c := data[i]
-
- // the whole line must be the char or whitespace
- n := 0
- for i < len(data) && data[i] != '\n' {
- switch {
- case data[i] == c:
- n++
- case data[i] != ' ':
- return false
- }
- i++
- }
-
- return n >= 3
-}
-
-// isFenceLine checks if there's a fence line (e.g., ``` or ``` go) at the beginning of data,
-// and returns the end index if so, or 0 otherwise. It also returns the marker found.
-// If info is not nil, it gets set to the syntax specified in the fence line.
-func isFenceLine(data []byte, info *string, oldmarker string) (end int, marker string) {
- i, size := 0, 0
-
- // skip up to three spaces
- for i < len(data) && i < 3 && data[i] == ' ' {
- i++
- }
-
- // check for the marker characters: ~ or `
- if i >= len(data) {
- return 0, ""
- }
- if data[i] != '~' && data[i] != '`' {
- return 0, ""
- }
-
- c := data[i]
-
- // the whole line must be the same char or whitespace
- for i < len(data) && data[i] == c {
- size++
- i++
- }
-
- // the marker char must occur at least 3 times
- if size < 3 {
- return 0, ""
- }
- marker = string(data[i-size : i])
-
- // if this is the end marker, it must match the beginning marker
- if oldmarker != "" && marker != oldmarker {
- return 0, ""
- }
-
- // TODO(shurcooL): It's probably a good idea to simplify the 2 code paths here
- // into one, always get the info string, and discard it if the caller doesn't care.
- if info != nil {
- infoLength := 0
- i = skipChar(data, i, ' ')
-
- if i >= len(data) {
- if i == len(data) {
- return i, marker
- }
- return 0, ""
- }
-
- infoStart := i
-
- if data[i] == '{' {
- i++
- infoStart++
-
- for i < len(data) && data[i] != '}' && data[i] != '\n' {
- infoLength++
- i++
- }
-
- if i >= len(data) || data[i] != '}' {
- return 0, ""
- }
-
- // strip all whitespace at the beginning and the end
- // of the {} block
- for infoLength > 0 && isspace(data[infoStart]) {
- infoStart++
- infoLength--
- }
-
- for infoLength > 0 && isspace(data[infoStart+infoLength-1]) {
- infoLength--
- }
- i++
- i = skipChar(data, i, ' ')
- } else {
- for i < len(data) && !isverticalspace(data[i]) {
- infoLength++
- i++
- }
- }
-
- *info = strings.TrimSpace(string(data[infoStart : infoStart+infoLength]))
- }
-
- if i == len(data) {
- return i, marker
- }
- if i > len(data) || data[i] != '\n' {
- return 0, ""
- }
- return i + 1, marker // Take newline into account.
-}
-
-// fencedCodeBlock returns the end index if data contains a fenced code block at the beginning,
-// or 0 otherwise. It writes to out if doRender is true, otherwise it has no side effects.
-// If doRender is true, a final newline is mandatory to recognize the fenced code block.
-func (p *Markdown) fencedCodeBlock(data []byte, doRender bool) int {
- var info string
- beg, marker := isFenceLine(data, &info, "")
- if beg == 0 || beg >= len(data) {
- return 0
- }
-
- var work bytes.Buffer
- work.Write([]byte(info))
- work.WriteByte('\n')
-
- for {
- // safe to assume beg < len(data)
-
- // check for the end of the code block
- fenceEnd, _ := isFenceLine(data[beg:], nil, marker)
- if fenceEnd != 0 {
- beg += fenceEnd
- break
- }
-
- // copy the current line
- end := skipUntilChar(data, beg, '\n') + 1
-
- // did we reach the end of the buffer without a closing marker?
- if end >= len(data) {
- return 0
- }
-
- // verbatim copy to the working buffer
- if doRender {
- work.Write(data[beg:end])
- }
- beg = end
- }
-
- if doRender {
- block := p.addBlock(CodeBlock, work.Bytes()) // TODO: get rid of temp buffer
- block.IsFenced = true
- finalizeCodeBlock(block)
- }
-
- return beg
-}
-
-func unescapeChar(str []byte) []byte {
- if str[0] == '\\' {
- return []byte{str[1]}
- }
- return []byte(html.UnescapeString(string(str)))
-}
-
-func unescapeString(str []byte) []byte {
- if reBackslashOrAmp.Match(str) {
- return reEntityOrEscapedChar.ReplaceAllFunc(str, unescapeChar)
- }
- return str
-}
-
-func finalizeCodeBlock(block *Node) {
- if block.IsFenced {
- newlinePos := bytes.IndexByte(block.content, '\n')
- firstLine := block.content[:newlinePos]
- rest := block.content[newlinePos+1:]
- block.Info = unescapeString(bytes.Trim(firstLine, "\n"))
- block.Literal = rest
- } else {
- block.Literal = block.content
- }
- block.content = nil
-}
-
-func (p *Markdown) table(data []byte) int {
- table := p.addBlock(Table, nil)
- i, columns := p.tableHeader(data)
- if i == 0 {
- p.tip = table.Parent
- table.Unlink()
- return 0
- }
-
- p.addBlock(TableBody, nil)
-
- for i < len(data) {
- pipes, rowStart := 0, i
- for ; i < len(data) && data[i] != '\n'; i++ {
- if data[i] == '|' {
- pipes++
- }
- }
-
- if pipes == 0 {
- i = rowStart
- break
- }
-
- // include the newline in data sent to tableRow
- if i < len(data) && data[i] == '\n' {
- i++
- }
- p.tableRow(data[rowStart:i], columns, false)
- }
-
- return i
-}
-
-// check if the specified position is preceded by an odd number of backslashes
-func isBackslashEscaped(data []byte, i int) bool {
- backslashes := 0
- for i-backslashes-1 >= 0 && data[i-backslashes-1] == '\\' {
- backslashes++
- }
- return backslashes&1 == 1
-}
-
-func (p *Markdown) tableHeader(data []byte) (size int, columns []CellAlignFlags) {
- i := 0
- colCount := 1
- for i = 0; i < len(data) && data[i] != '\n'; i++ {
- if data[i] == '|' && !isBackslashEscaped(data, i) {
- colCount++
- }
- }
-
- // doesn't look like a table header
- if colCount == 1 {
- return
- }
-
- // include the newline in the data sent to tableRow
- j := i
- if j < len(data) && data[j] == '\n' {
- j++
- }
- header := data[:j]
-
- // column count ignores pipes at beginning or end of line
- if data[0] == '|' {
- colCount--
- }
- if i > 2 && data[i-1] == '|' && !isBackslashEscaped(data, i-1) {
- colCount--
- }
-
- columns = make([]CellAlignFlags, colCount)
-
- // move on to the header underline
- i++
- if i >= len(data) {
- return
- }
-
- if data[i] == '|' && !isBackslashEscaped(data, i) {
- i++
- }
- i = skipChar(data, i, ' ')
-
- // each column header is of form: / *:?-+:? *|/ with # dashes + # colons >= 3
- // and trailing | optional on last column
- col := 0
- for i < len(data) && data[i] != '\n' {
- dashes := 0
-
- if data[i] == ':' {
- i++
- columns[col] |= TableAlignmentLeft
- dashes++
- }
- for i < len(data) && data[i] == '-' {
- i++
- dashes++
- }
- if i < len(data) && data[i] == ':' {
- i++
- columns[col] |= TableAlignmentRight
- dashes++
- }
- for i < len(data) && data[i] == ' ' {
- i++
- }
- if i == len(data) {
- return
- }
- // end of column test is messy
- switch {
- case dashes < 3:
- // not a valid column
- return
-
- case data[i] == '|' && !isBackslashEscaped(data, i):
- // marker found, now skip past trailing whitespace
- col++
- i++
- for i < len(data) && data[i] == ' ' {
- i++
- }
-
- // trailing junk found after last column
- if col >= colCount && i < len(data) && data[i] != '\n' {
- return
- }
-
- case (data[i] != '|' || isBackslashEscaped(data, i)) && col+1 < colCount:
- // something else found where marker was required
- return
-
- case data[i] == '\n':
- // marker is optional for the last column
- col++
-
- default:
- // trailing junk found after last column
- return
- }
- }
- if col != colCount {
- return
- }
-
- p.addBlock(TableHead, nil)
- p.tableRow(header, columns, true)
- size = i
- if size < len(data) && data[size] == '\n' {
- size++
- }
- return
-}
-
-func (p *Markdown) tableRow(data []byte, columns []CellAlignFlags, header bool) {
- p.addBlock(TableRow, nil)
- i, col := 0, 0
-
- if data[i] == '|' && !isBackslashEscaped(data, i) {
- i++
- }
-
- for col = 0; col < len(columns) && i < len(data); col++ {
- for i < len(data) && data[i] == ' ' {
- i++
- }
-
- cellStart := i
-
- for i < len(data) && (data[i] != '|' || isBackslashEscaped(data, i)) && data[i] != '\n' {
- i++
- }
-
- cellEnd := i
-
- // skip the end-of-cell marker, possibly taking us past end of buffer
- i++
-
- for cellEnd > cellStart && cellEnd-1 < len(data) && data[cellEnd-1] == ' ' {
- cellEnd--
- }
-
- cell := p.addBlock(TableCell, data[cellStart:cellEnd])
- cell.IsHeader = header
- cell.Align = columns[col]
- }
-
- // pad it out with empty columns to get the right number
- for ; col < len(columns); col++ {
- cell := p.addBlock(TableCell, nil)
- cell.IsHeader = header
- cell.Align = columns[col]
- }
-
- // silently ignore rows with too many cells
-}
-
-// returns blockquote prefix length
-func (p *Markdown) quotePrefix(data []byte) int {
- i := 0
- for i < 3 && i < len(data) && data[i] == ' ' {
- i++
- }
- if i < len(data) && data[i] == '>' {
- if i+1 < len(data) && data[i+1] == ' ' {
- return i + 2
- }
- return i + 1
- }
- return 0
-}
-
-// blockquote ends with at least one blank line
-// followed by something without a blockquote prefix
-func (p *Markdown) terminateBlockquote(data []byte, beg, end int) bool {
- if p.isEmpty(data[beg:]) <= 0 {
- return false
- }
- if end >= len(data) {
- return true
- }
- return p.quotePrefix(data[end:]) == 0 && p.isEmpty(data[end:]) == 0
-}
-
-// parse a blockquote fragment
-func (p *Markdown) quote(data []byte) int {
- block := p.addBlock(BlockQuote, nil)
- var raw bytes.Buffer
- beg, end := 0, 0
- for beg < len(data) {
- end = beg
- // Step over whole lines, collecting them. While doing that, check for
- // fenced code and if one's found, incorporate it altogether,
- // irregardless of any contents inside it
- for end < len(data) && data[end] != '\n' {
- if p.extensions&FencedCode != 0 {
- if i := p.fencedCodeBlock(data[end:], false); i > 0 {
- // -1 to compensate for the extra end++ after the loop:
- end += i - 1
- break
- }
- }
- end++
- }
- if end < len(data) && data[end] == '\n' {
- end++
- }
- if pre := p.quotePrefix(data[beg:]); pre > 0 {
- // skip the prefix
- beg += pre
- } else if p.terminateBlockquote(data, beg, end) {
- break
- }
- // this line is part of the blockquote
- raw.Write(data[beg:end])
- beg = end
- }
- p.block(raw.Bytes())
- p.finalize(block)
- return end
-}
-
-// returns prefix length for block code
-func (p *Markdown) codePrefix(data []byte) int {
- if len(data) >= 1 && data[0] == '\t' {
- return 1
- }
- if len(data) >= 4 && data[0] == ' ' && data[1] == ' ' && data[2] == ' ' && data[3] == ' ' {
- return 4
- }
- return 0
-}
-
-func (p *Markdown) code(data []byte) int {
- var work bytes.Buffer
-
- i := 0
- for i < len(data) {
- beg := i
- for i < len(data) && data[i] != '\n' {
- i++
- }
- if i < len(data) && data[i] == '\n' {
- i++
- }
-
- blankline := p.isEmpty(data[beg:i]) > 0
- if pre := p.codePrefix(data[beg:i]); pre > 0 {
- beg += pre
- } else if !blankline {
- // non-empty, non-prefixed line breaks the pre
- i = beg
- break
- }
-
- // verbatim copy to the working buffer
- if blankline {
- work.WriteByte('\n')
- } else {
- work.Write(data[beg:i])
- }
- }
-
- // trim all the \n off the end of work
- workbytes := work.Bytes()
- eol := len(workbytes)
- for eol > 0 && workbytes[eol-1] == '\n' {
- eol--
- }
- if eol != len(workbytes) {
- work.Truncate(eol)
- }
-
- work.WriteByte('\n')
-
- block := p.addBlock(CodeBlock, work.Bytes()) // TODO: get rid of temp buffer
- block.IsFenced = false
- finalizeCodeBlock(block)
-
- return i
-}
-
-// returns unordered list item prefix
-func (p *Markdown) uliPrefix(data []byte) int {
- i := 0
- // start with up to 3 spaces
- for i < len(data) && i < 3 && data[i] == ' ' {
- i++
- }
- if i >= len(data)-1 {
- return 0
- }
- // need one of {'*', '+', '-'} followed by a space or a tab
- if (data[i] != '*' && data[i] != '+' && data[i] != '-') ||
- (data[i+1] != ' ' && data[i+1] != '\t') {
- return 0
- }
- return i + 2
-}
-
-// returns ordered list item prefix
-func (p *Markdown) oliPrefix(data []byte) int {
- i := 0
-
- // start with up to 3 spaces
- for i < 3 && i < len(data) && data[i] == ' ' {
- i++
- }
-
- // count the digits
- start := i
- for i < len(data) && data[i] >= '0' && data[i] <= '9' {
- i++
- }
- if start == i || i >= len(data)-1 {
- return 0
- }
-
- // we need >= 1 digits followed by a dot and a space or a tab
- if data[i] != '.' || !(data[i+1] == ' ' || data[i+1] == '\t') {
- return 0
- }
- return i + 2
-}
-
-// returns definition list item prefix
-func (p *Markdown) dliPrefix(data []byte) int {
- if len(data) < 2 {
- return 0
- }
- i := 0
- // need a ':' followed by a space or a tab
- if data[i] != ':' || !(data[i+1] == ' ' || data[i+1] == '\t') {
- return 0
- }
- for i < len(data) && data[i] == ' ' {
- i++
- }
- return i + 2
-}
-
-// parse ordered or unordered list block
-func (p *Markdown) list(data []byte, flags ListType) int {
- i := 0
- flags |= ListItemBeginningOfList
- block := p.addBlock(List, nil)
- block.ListFlags = flags
- block.Tight = true
-
- for i < len(data) {
- skip := p.listItem(data[i:], &flags)
- if flags&ListItemContainsBlock != 0 {
- block.ListData.Tight = false
- }
- i += skip
- if skip == 0 || flags&ListItemEndOfList != 0 {
- break
- }
- flags &= ^ListItemBeginningOfList
- }
-
- above := block.Parent
- finalizeList(block)
- p.tip = above
- return i
-}
-
-// Returns true if the list item is not the same type as its parent list
-func (p *Markdown) listTypeChanged(data []byte, flags *ListType) bool {
- if p.dliPrefix(data) > 0 && *flags&ListTypeDefinition == 0 {
- return true
- } else if p.oliPrefix(data) > 0 && *flags&ListTypeOrdered == 0 {
- return true
- } else if p.uliPrefix(data) > 0 && (*flags&ListTypeOrdered != 0 || *flags&ListTypeDefinition != 0) {
- return true
- }
- return false
-}
-
-// Returns true if block ends with a blank line, descending if needed
-// into lists and sublists.
-func endsWithBlankLine(block *Node) bool {
- // TODO: figure this out. Always false now.
- for block != nil {
- //if block.lastLineBlank {
- //return true
- //}
- t := block.Type
- if t == List || t == Item {
- block = block.LastChild
- } else {
- break
- }
- }
- return false
-}
-
-func finalizeList(block *Node) {
- block.open = false
- item := block.FirstChild
- for item != nil {
- // check for non-final list item ending with blank line:
- if endsWithBlankLine(item) && item.Next != nil {
- block.ListData.Tight = false
- break
- }
- // recurse into children of list item, to see if there are spaces
- // between any of them:
- subItem := item.FirstChild
- for subItem != nil {
- if endsWithBlankLine(subItem) && (item.Next != nil || subItem.Next != nil) {
- block.ListData.Tight = false
- break
- }
- subItem = subItem.Next
- }
- item = item.Next
- }
-}
-
-// Parse a single list item.
-// Assumes initial prefix is already removed if this is a sublist.
-func (p *Markdown) listItem(data []byte, flags *ListType) int {
- // keep track of the indentation of the first line
- itemIndent := 0
- if data[0] == '\t' {
- itemIndent += 4
- } else {
- for itemIndent < 3 && data[itemIndent] == ' ' {
- itemIndent++
- }
- }
-
- var bulletChar byte = '*'
- i := p.uliPrefix(data)
- if i == 0 {
- i = p.oliPrefix(data)
- } else {
- bulletChar = data[i-2]
- }
- if i == 0 {
- i = p.dliPrefix(data)
- // reset definition term flag
- if i > 0 {
- *flags &= ^ListTypeTerm
- }
- }
- if i == 0 {
- // if in definition list, set term flag and continue
- if *flags&ListTypeDefinition != 0 {
- *flags |= ListTypeTerm
- } else {
- return 0
- }
- }
-
- // skip leading whitespace on first line
- for i < len(data) && data[i] == ' ' {
- i++
- }
-
- // find the end of the line
- line := i
- for i > 0 && i < len(data) && data[i-1] != '\n' {
- i++
- }
-
- // get working buffer
- var raw bytes.Buffer
-
- // put the first line into the working buffer
- raw.Write(data[line:i])
- line = i
-
- // process the following lines
- containsBlankLine := false
- sublist := 0
- codeBlockMarker := ""
-
-gatherlines:
- for line < len(data) {
- i++
-
- // find the end of this line
- for i < len(data) && data[i-1] != '\n' {
- i++
- }
-
- // if it is an empty line, guess that it is part of this item
- // and move on to the next line
- if p.isEmpty(data[line:i]) > 0 {
- containsBlankLine = true
- line = i
- continue
- }
-
- // calculate the indentation
- indent := 0
- indentIndex := 0
- if data[line] == '\t' {
- indentIndex++
- indent += 4
- } else {
- for indent < 4 && line+indent < i && data[line+indent] == ' ' {
- indent++
- indentIndex++
- }
- }
-
- chunk := data[line+indentIndex : i]
-
- if p.extensions&FencedCode != 0 {
- // determine if in or out of codeblock
- // if in codeblock, ignore normal list processing
- _, marker := isFenceLine(chunk, nil, codeBlockMarker)
- if marker != "" {
- if codeBlockMarker == "" {
- // start of codeblock
- codeBlockMarker = marker
- } else {
- // end of codeblock.
- codeBlockMarker = ""
- }
- }
- // we are in a codeblock, write line, and continue
- if codeBlockMarker != "" || marker != "" {
- raw.Write(data[line+indentIndex : i])
- line = i
- continue gatherlines
- }
- }
-
- // evaluate how this line fits in
- switch {
- // is this a nested list item?
- case (p.uliPrefix(chunk) > 0 && !p.isHRule(chunk)) ||
- p.oliPrefix(chunk) > 0 ||
- p.dliPrefix(chunk) > 0:
-
- // to be a nested list, it must be indented more
- // if not, it is either a different kind of list
- // or the next item in the same list
- if indent <= itemIndent {
- if p.listTypeChanged(chunk, flags) {
- *flags |= ListItemEndOfList
- } else if containsBlankLine {
- *flags |= ListItemContainsBlock
- }
-
- break gatherlines
- }
-
- if containsBlankLine {
- *flags |= ListItemContainsBlock
- }
-
- // is this the first item in the nested list?
- if sublist == 0 {
- sublist = raw.Len()
- }
-
- // is this a nested prefix heading?
- case p.isPrefixHeading(chunk):
- // if the heading is not indented, it is not nested in the list
- // and thus ends the list
- if containsBlankLine && indent < 4 {
- *flags |= ListItemEndOfList
- break gatherlines
- }
- *flags |= ListItemContainsBlock
-
- // anything following an empty line is only part
- // of this item if it is indented 4 spaces
- // (regardless of the indentation of the beginning of the item)
- case containsBlankLine && indent < 4:
- if *flags&ListTypeDefinition != 0 && i < len(data)-1 {
- // is the next item still a part of this list?
- next := i
- for next < len(data) && data[next] != '\n' {
- next++
- }
- for next < len(data)-1 && data[next] == '\n' {
- next++
- }
- if i < len(data)-1 && data[i] != ':' && data[next] != ':' {
- *flags |= ListItemEndOfList
- }
- } else {
- *flags |= ListItemEndOfList
- }
- break gatherlines
-
- // a blank line means this should be parsed as a block
- case containsBlankLine:
- raw.WriteByte('\n')
- *flags |= ListItemContainsBlock
- }
-
- // if this line was preceded by one or more blanks,
- // re-introduce the blank into the buffer
- if containsBlankLine {
- containsBlankLine = false
- raw.WriteByte('\n')
- }
-
- // add the line into the working buffer without prefix
- raw.Write(data[line+indentIndex : i])
-
- line = i
- }
-
- rawBytes := raw.Bytes()
-
- block := p.addBlock(Item, nil)
- block.ListFlags = *flags
- block.Tight = false
- block.BulletChar = bulletChar
- block.Delimiter = '.' // Only '.' is possible in Markdown, but ')' will also be possible in CommonMark
-
- // render the contents of the list item
- if *flags&ListItemContainsBlock != 0 && *flags&ListTypeTerm == 0 {
- // intermediate render of block item, except for definition term
- if sublist > 0 {
- p.block(rawBytes[:sublist])
- p.block(rawBytes[sublist:])
- } else {
- p.block(rawBytes)
- }
- } else {
- // intermediate render of inline item
- if sublist > 0 {
- child := p.addChild(Paragraph, 0)
- child.content = rawBytes[:sublist]
- p.block(rawBytes[sublist:])
- } else {
- child := p.addChild(Paragraph, 0)
- child.content = rawBytes
- }
- }
- return line
-}
-
-// render a single paragraph that has already been parsed out
-func (p *Markdown) renderParagraph(data []byte) {
- if len(data) == 0 {
- return
- }
-
- // trim leading spaces
- beg := 0
- for data[beg] == ' ' {
- beg++
- }
-
- end := len(data)
- // trim trailing newline
- if data[len(data)-1] == '\n' {
- end--
- }
-
- // trim trailing spaces
- for end > beg && data[end-1] == ' ' {
- end--
- }
-
- p.addBlock(Paragraph, data[beg:end])
-}
-
-func (p *Markdown) paragraph(data []byte) int {
- // prev: index of 1st char of previous line
- // line: index of 1st char of current line
- // i: index of cursor/end of current line
- var prev, line, i int
- tabSize := TabSizeDefault
- if p.extensions&TabSizeEight != 0 {
- tabSize = TabSizeDouble
- }
- // keep going until we find something to mark the end of the paragraph
- for i < len(data) {
- // mark the beginning of the current line
- prev = line
- current := data[i:]
- line = i
-
- // did we find a reference or a footnote? If so, end a paragraph
- // preceding it and report that we have consumed up to the end of that
- // reference:
- if refEnd := isReference(p, current, tabSize); refEnd > 0 {
- p.renderParagraph(data[:i])
- return i + refEnd
- }
-
- // did we find a blank line marking the end of the paragraph?
- if n := p.isEmpty(current); n > 0 {
- // did this blank line followed by a definition list item?
- if p.extensions&DefinitionLists != 0 {
- if i < len(data)-1 && data[i+1] == ':' {
- return p.list(data[prev:], ListTypeDefinition)
- }
- }
-
- p.renderParagraph(data[:i])
- return i + n
- }
-
- // an underline under some text marks a heading, so our paragraph ended on prev line
- if i > 0 {
- if level := p.isUnderlinedHeading(current); level > 0 {
- // render the paragraph
- p.renderParagraph(data[:prev])
-
- // ignore leading and trailing whitespace
- eol := i - 1
- for prev < eol && data[prev] == ' ' {
- prev++
- }
- for eol > prev && data[eol-1] == ' ' {
- eol--
- }
-
- id := ""
- if p.extensions&AutoHeadingIDs != 0 {
- id = sanitized_anchor_name.Create(string(data[prev:eol]))
- }
-
- block := p.addBlock(Heading, data[prev:eol])
- block.Level = level
- block.HeadingID = id
-
- // find the end of the underline
- for i < len(data) && data[i] != '\n' {
- i++
- }
- return i
- }
- }
-
- // if the next line starts a block of HTML, then the paragraph ends here
- if p.extensions&LaxHTMLBlocks != 0 {
- if data[i] == '<' && p.html(current, false) > 0 {
- // rewind to before the HTML block
- p.renderParagraph(data[:i])
- return i
- }
- }
-
- // if there's a prefixed heading or a horizontal rule after this, paragraph is over
- if p.isPrefixHeading(current) || p.isHRule(current) {
- p.renderParagraph(data[:i])
- return i
- }
-
- // if there's a fenced code block, paragraph is over
- if p.extensions&FencedCode != 0 {
- if p.fencedCodeBlock(current, false) > 0 {
- p.renderParagraph(data[:i])
- return i
- }
- }
-
- // if there's a definition list item, prev line is a definition term
- if p.extensions&DefinitionLists != 0 {
- if p.dliPrefix(current) != 0 {
- ret := p.list(data[prev:], ListTypeDefinition)
- return ret
- }
- }
-
- // if there's a list after this, paragraph is over
- if p.extensions&NoEmptyLineBeforeBlock != 0 {
- if p.uliPrefix(current) != 0 ||
- p.oliPrefix(current) != 0 ||
- p.quotePrefix(current) != 0 ||
- p.codePrefix(current) != 0 {
- p.renderParagraph(data[:i])
- return i
- }
- }
-
- // otherwise, scan to the beginning of the next line
- nl := bytes.IndexByte(data[i:], '\n')
- if nl >= 0 {
- i += nl + 1
- } else {
- i += len(data[i:])
- }
- }
-
- p.renderParagraph(data[:i])
- return i
-}
-
-func skipChar(data []byte, start int, char byte) int {
- i := start
- for i < len(data) && data[i] == char {
- i++
- }
- return i
-}
-
-func skipUntilChar(text []byte, start int, char byte) int {
- i := start
- for i < len(text) && text[i] != char {
- i++
- }
- return i
-}
+++ /dev/null
-// Package blackfriday is a markdown processor.
-//
-// It translates plain text with simple formatting rules into an AST, which can
-// then be further processed to HTML (provided by Blackfriday itself) or other
-// formats (provided by the community).
-//
-// The simplest way to invoke Blackfriday is to call the Run function. It will
-// take a text input and produce a text output in HTML (or other format).
-//
-// A slightly more sophisticated way to use Blackfriday is to create a Markdown
-// processor and to call Parse, which returns a syntax tree for the input
-// document. You can leverage Blackfriday's parsing for content extraction from
-// markdown documents. You can assign a custom renderer and set various options
-// to the Markdown processor.
-//
-// If you're interested in calling Blackfriday from command line, see
-// https://github.com/russross/blackfriday-tool.
-package blackfriday
+++ /dev/null
-package blackfriday
-
-import (
- "html"
- "io"
-)
-
-var htmlEscaper = [256][]byte{
- '&': []byte("&"),
- '<': []byte("<"),
- '>': []byte(">"),
- '"': []byte("""),
-}
-
-func escapeHTML(w io.Writer, s []byte) {
- var start, end int
- for end < len(s) {
- escSeq := htmlEscaper[s[end]]
- if escSeq != nil {
- w.Write(s[start:end])
- w.Write(escSeq)
- start = end + 1
- }
- end++
- }
- if start < len(s) && end <= len(s) {
- w.Write(s[start:end])
- }
-}
-
-func escLink(w io.Writer, text []byte) {
- unesc := html.UnescapeString(string(text))
- escapeHTML(w, []byte(unesc))
-}
+++ /dev/null
-module github.com/russross/blackfriday/v2
+++ /dev/null
-//
-// Blackfriday Markdown Processor
-// Available at http://github.com/russross/blackfriday
-//
-// Copyright © 2011 Russ Ross <russ@russross.com>.
-// Distributed under the Simplified BSD License.
-// See README.md for details.
-//
-
-//
-//
-// HTML rendering backend
-//
-//
-
-package blackfriday
-
-import (
- "bytes"
- "fmt"
- "io"
- "regexp"
- "strings"
-)
-
-// HTMLFlags control optional behavior of HTML renderer.
-type HTMLFlags int
-
-// HTML renderer configuration options.
-const (
- HTMLFlagsNone HTMLFlags = 0
- SkipHTML HTMLFlags = 1 << iota // Skip preformatted HTML blocks
- SkipImages // Skip embedded images
- SkipLinks // Skip all links
- Safelink // Only link to trusted protocols
- NofollowLinks // Only link with rel="nofollow"
- NoreferrerLinks // Only link with rel="noreferrer"
- NoopenerLinks // Only link with rel="noopener"
- HrefTargetBlank // Add a blank target
- CompletePage // Generate a complete HTML page
- UseXHTML // Generate XHTML output instead of HTML
- FootnoteReturnLinks // Generate a link at the end of a footnote to return to the source
- Smartypants // Enable smart punctuation substitutions
- SmartypantsFractions // Enable smart fractions (with Smartypants)
- SmartypantsDashes // Enable smart dashes (with Smartypants)
- SmartypantsLatexDashes // Enable LaTeX-style dashes (with Smartypants)
- SmartypantsAngledQuotes // Enable angled double quotes (with Smartypants) for double quotes rendering
- SmartypantsQuotesNBSP // Enable « French guillemets » (with Smartypants)
- TOC // Generate a table of contents
-)
-
-var (
- htmlTagRe = regexp.MustCompile("(?i)^" + htmlTag)
-)
-
-const (
- htmlTag = "(?:" + openTag + "|" + closeTag + "|" + htmlComment + "|" +
- processingInstruction + "|" + declaration + "|" + cdata + ")"
- closeTag = "</" + tagName + "\\s*[>]"
- openTag = "<" + tagName + attribute + "*" + "\\s*/?>"
- attribute = "(?:" + "\\s+" + attributeName + attributeValueSpec + "?)"
- attributeValue = "(?:" + unquotedValue + "|" + singleQuotedValue + "|" + doubleQuotedValue + ")"
- attributeValueSpec = "(?:" + "\\s*=" + "\\s*" + attributeValue + ")"
- attributeName = "[a-zA-Z_:][a-zA-Z0-9:._-]*"
- cdata = "<!\\[CDATA\\[[\\s\\S]*?\\]\\]>"
- declaration = "<![A-Z]+" + "\\s+[^>]*>"
- doubleQuotedValue = "\"[^\"]*\""
- htmlComment = "<!---->|<!--(?:-?[^>-])(?:-?[^-])*-->"
- processingInstruction = "[<][?].*?[?][>]"
- singleQuotedValue = "'[^']*'"
- tagName = "[A-Za-z][A-Za-z0-9-]*"
- unquotedValue = "[^\"'=<>`\\x00-\\x20]+"
-)
-
-// HTMLRendererParameters is a collection of supplementary parameters tweaking
-// the behavior of various parts of HTML renderer.
-type HTMLRendererParameters struct {
- // Prepend this text to each relative URL.
- AbsolutePrefix string
- // Add this text to each footnote anchor, to ensure uniqueness.
- FootnoteAnchorPrefix string
- // Show this text inside the <a> tag for a footnote return link, if the
- // HTML_FOOTNOTE_RETURN_LINKS flag is enabled. If blank, the string
- // <sup>[return]</sup> is used.
- FootnoteReturnLinkContents string
- // If set, add this text to the front of each Heading ID, to ensure
- // uniqueness.
- HeadingIDPrefix string
- // If set, add this text to the back of each Heading ID, to ensure uniqueness.
- HeadingIDSuffix string
- // Increase heading levels: if the offset is 1, <h1> becomes <h2> etc.
- // Negative offset is also valid.
- // Resulting levels are clipped between 1 and 6.
- HeadingLevelOffset int
-
- Title string // Document title (used if CompletePage is set)
- CSS string // Optional CSS file URL (used if CompletePage is set)
- Icon string // Optional icon file URL (used if CompletePage is set)
-
- Flags HTMLFlags // Flags allow customizing this renderer's behavior
-}
-
-// HTMLRenderer is a type that implements the Renderer interface for HTML output.
-//
-// Do not create this directly, instead use the NewHTMLRenderer function.
-type HTMLRenderer struct {
- HTMLRendererParameters
-
- closeTag string // how to end singleton tags: either " />" or ">"
-
- // Track heading IDs to prevent ID collision in a single generation.
- headingIDs map[string]int
-
- lastOutputLen int
- disableTags int
-
- sr *SPRenderer
-}
-
-const (
- xhtmlClose = " />"
- htmlClose = ">"
-)
-
-// NewHTMLRenderer creates and configures an HTMLRenderer object, which
-// satisfies the Renderer interface.
-func NewHTMLRenderer(params HTMLRendererParameters) *HTMLRenderer {
- // configure the rendering engine
- closeTag := htmlClose
- if params.Flags&UseXHTML != 0 {
- closeTag = xhtmlClose
- }
-
- if params.FootnoteReturnLinkContents == "" {
- params.FootnoteReturnLinkContents = `<sup>[return]</sup>`
- }
-
- return &HTMLRenderer{
- HTMLRendererParameters: params,
-
- closeTag: closeTag,
- headingIDs: make(map[string]int),
-
- sr: NewSmartypantsRenderer(params.Flags),
- }
-}
-
-func isHTMLTag(tag []byte, tagname string) bool {
- found, _ := findHTMLTagPos(tag, tagname)
- return found
-}
-
-// Look for a character, but ignore it when it's in any kind of quotes, it
-// might be JavaScript
-func skipUntilCharIgnoreQuotes(html []byte, start int, char byte) int {
- inSingleQuote := false
- inDoubleQuote := false
- inGraveQuote := false
- i := start
- for i < len(html) {
- switch {
- case html[i] == char && !inSingleQuote && !inDoubleQuote && !inGraveQuote:
- return i
- case html[i] == '\'':
- inSingleQuote = !inSingleQuote
- case html[i] == '"':
- inDoubleQuote = !inDoubleQuote
- case html[i] == '`':
- inGraveQuote = !inGraveQuote
- }
- i++
- }
- return start
-}
-
-func findHTMLTagPos(tag []byte, tagname string) (bool, int) {
- i := 0
- if i < len(tag) && tag[0] != '<' {
- return false, -1
- }
- i++
- i = skipSpace(tag, i)
-
- if i < len(tag) && tag[i] == '/' {
- i++
- }
-
- i = skipSpace(tag, i)
- j := 0
- for ; i < len(tag); i, j = i+1, j+1 {
- if j >= len(tagname) {
- break
- }
-
- if strings.ToLower(string(tag[i]))[0] != tagname[j] {
- return false, -1
- }
- }
-
- if i == len(tag) {
- return false, -1
- }
-
- rightAngle := skipUntilCharIgnoreQuotes(tag, i, '>')
- if rightAngle >= i {
- return true, rightAngle
- }
-
- return false, -1
-}
-
-func skipSpace(tag []byte, i int) int {
- for i < len(tag) && isspace(tag[i]) {
- i++
- }
- return i
-}
-
-func isRelativeLink(link []byte) (yes bool) {
- // a tag begin with '#'
- if link[0] == '#' {
- return true
- }
-
- // link begin with '/' but not '//', the second maybe a protocol relative link
- if len(link) >= 2 && link[0] == '/' && link[1] != '/' {
- return true
- }
-
- // only the root '/'
- if len(link) == 1 && link[0] == '/' {
- return true
- }
-
- // current directory : begin with "./"
- if bytes.HasPrefix(link, []byte("./")) {
- return true
- }
-
- // parent directory : begin with "../"
- if bytes.HasPrefix(link, []byte("../")) {
- return true
- }
-
- return false
-}
-
-func (r *HTMLRenderer) ensureUniqueHeadingID(id string) string {
- for count, found := r.headingIDs[id]; found; count, found = r.headingIDs[id] {
- tmp := fmt.Sprintf("%s-%d", id, count+1)
-
- if _, tmpFound := r.headingIDs[tmp]; !tmpFound {
- r.headingIDs[id] = count + 1
- id = tmp
- } else {
- id = id + "-1"
- }
- }
-
- if _, found := r.headingIDs[id]; !found {
- r.headingIDs[id] = 0
- }
-
- return id
-}
-
-func (r *HTMLRenderer) addAbsPrefix(link []byte) []byte {
- if r.AbsolutePrefix != "" && isRelativeLink(link) && link[0] != '.' {
- newDest := r.AbsolutePrefix
- if link[0] != '/' {
- newDest += "/"
- }
- newDest += string(link)
- return []byte(newDest)
- }
- return link
-}
-
-func appendLinkAttrs(attrs []string, flags HTMLFlags, link []byte) []string {
- if isRelativeLink(link) {
- return attrs
- }
- val := []string{}
- if flags&NofollowLinks != 0 {
- val = append(val, "nofollow")
- }
- if flags&NoreferrerLinks != 0 {
- val = append(val, "noreferrer")
- }
- if flags&NoopenerLinks != 0 {
- val = append(val, "noopener")
- }
- if flags&HrefTargetBlank != 0 {
- attrs = append(attrs, "target=\"_blank\"")
- }
- if len(val) == 0 {
- return attrs
- }
- attr := fmt.Sprintf("rel=%q", strings.Join(val, " "))
- return append(attrs, attr)
-}
-
-func isMailto(link []byte) bool {
- return bytes.HasPrefix(link, []byte("mailto:"))
-}
-
-func needSkipLink(flags HTMLFlags, dest []byte) bool {
- if flags&SkipLinks != 0 {
- return true
- }
- return flags&Safelink != 0 && !isSafeLink(dest) && !isMailto(dest)
-}
-
-func isSmartypantable(node *Node) bool {
- pt := node.Parent.Type
- return pt != Link && pt != CodeBlock && pt != Code
-}
-
-func appendLanguageAttr(attrs []string, info []byte) []string {
- if len(info) == 0 {
- return attrs
- }
- endOfLang := bytes.IndexAny(info, "\t ")
- if endOfLang < 0 {
- endOfLang = len(info)
- }
- return append(attrs, fmt.Sprintf("class=\"language-%s\"", info[:endOfLang]))
-}
-
-func (r *HTMLRenderer) tag(w io.Writer, name []byte, attrs []string) {
- w.Write(name)
- if len(attrs) > 0 {
- w.Write(spaceBytes)
- w.Write([]byte(strings.Join(attrs, " ")))
- }
- w.Write(gtBytes)
- r.lastOutputLen = 1
-}
-
-func footnoteRef(prefix string, node *Node) []byte {
- urlFrag := prefix + string(slugify(node.Destination))
- anchor := fmt.Sprintf(`<a href="#fn:%s">%d</a>`, urlFrag, node.NoteID)
- return []byte(fmt.Sprintf(`<sup class="footnote-ref" id="fnref:%s">%s</sup>`, urlFrag, anchor))
-}
-
-func footnoteItem(prefix string, slug []byte) []byte {
- return []byte(fmt.Sprintf(`<li id="fn:%s%s">`, prefix, slug))
-}
-
-func footnoteReturnLink(prefix, returnLink string, slug []byte) []byte {
- const format = ` <a class="footnote-return" href="#fnref:%s%s">%s</a>`
- return []byte(fmt.Sprintf(format, prefix, slug, returnLink))
-}
-
-func itemOpenCR(node *Node) bool {
- if node.Prev == nil {
- return false
- }
- ld := node.Parent.ListData
- return !ld.Tight && ld.ListFlags&ListTypeDefinition == 0
-}
-
-func skipParagraphTags(node *Node) bool {
- grandparent := node.Parent.Parent
- if grandparent == nil || grandparent.Type != List {
- return false
- }
- tightOrTerm := grandparent.Tight || node.Parent.ListFlags&ListTypeTerm != 0
- return grandparent.Type == List && tightOrTerm
-}
-
-func cellAlignment(align CellAlignFlags) string {
- switch align {
- case TableAlignmentLeft:
- return "left"
- case TableAlignmentRight:
- return "right"
- case TableAlignmentCenter:
- return "center"
- default:
- return ""
- }
-}
-
-func (r *HTMLRenderer) out(w io.Writer, text []byte) {
- if r.disableTags > 0 {
- w.Write(htmlTagRe.ReplaceAll(text, []byte{}))
- } else {
- w.Write(text)
- }
- r.lastOutputLen = len(text)
-}
-
-func (r *HTMLRenderer) cr(w io.Writer) {
- if r.lastOutputLen > 0 {
- r.out(w, nlBytes)
- }
-}
-
-var (
- nlBytes = []byte{'\n'}
- gtBytes = []byte{'>'}
- spaceBytes = []byte{' '}
-)
-
-var (
- brTag = []byte("<br>")
- brXHTMLTag = []byte("<br />")
- emTag = []byte("<em>")
- emCloseTag = []byte("</em>")
- strongTag = []byte("<strong>")
- strongCloseTag = []byte("</strong>")
- delTag = []byte("<del>")
- delCloseTag = []byte("</del>")
- ttTag = []byte("<tt>")
- ttCloseTag = []byte("</tt>")
- aTag = []byte("<a")
- aCloseTag = []byte("</a>")
- preTag = []byte("<pre>")
- preCloseTag = []byte("</pre>")
- codeTag = []byte("<code>")
- codeCloseTag = []byte("</code>")
- pTag = []byte("<p>")
- pCloseTag = []byte("</p>")
- blockquoteTag = []byte("<blockquote>")
- blockquoteCloseTag = []byte("</blockquote>")
- hrTag = []byte("<hr>")
- hrXHTMLTag = []byte("<hr />")
- ulTag = []byte("<ul>")
- ulCloseTag = []byte("</ul>")
- olTag = []byte("<ol>")
- olCloseTag = []byte("</ol>")
- dlTag = []byte("<dl>")
- dlCloseTag = []byte("</dl>")
- liTag = []byte("<li>")
- liCloseTag = []byte("</li>")
- ddTag = []byte("<dd>")
- ddCloseTag = []byte("</dd>")
- dtTag = []byte("<dt>")
- dtCloseTag = []byte("</dt>")
- tableTag = []byte("<table>")
- tableCloseTag = []byte("</table>")
- tdTag = []byte("<td")
- tdCloseTag = []byte("</td>")
- thTag = []byte("<th")
- thCloseTag = []byte("</th>")
- theadTag = []byte("<thead>")
- theadCloseTag = []byte("</thead>")
- tbodyTag = []byte("<tbody>")
- tbodyCloseTag = []byte("</tbody>")
- trTag = []byte("<tr>")
- trCloseTag = []byte("</tr>")
- h1Tag = []byte("<h1")
- h1CloseTag = []byte("</h1>")
- h2Tag = []byte("<h2")
- h2CloseTag = []byte("</h2>")
- h3Tag = []byte("<h3")
- h3CloseTag = []byte("</h3>")
- h4Tag = []byte("<h4")
- h4CloseTag = []byte("</h4>")
- h5Tag = []byte("<h5")
- h5CloseTag = []byte("</h5>")
- h6Tag = []byte("<h6")
- h6CloseTag = []byte("</h6>")
-
- footnotesDivBytes = []byte("\n<div class=\"footnotes\">\n\n")
- footnotesCloseDivBytes = []byte("\n</div>\n")
-)
-
-func headingTagsFromLevel(level int) ([]byte, []byte) {
- if level <= 1 {
- return h1Tag, h1CloseTag
- }
- switch level {
- case 2:
- return h2Tag, h2CloseTag
- case 3:
- return h3Tag, h3CloseTag
- case 4:
- return h4Tag, h4CloseTag
- case 5:
- return h5Tag, h5CloseTag
- }
- return h6Tag, h6CloseTag
-}
-
-func (r *HTMLRenderer) outHRTag(w io.Writer) {
- if r.Flags&UseXHTML == 0 {
- r.out(w, hrTag)
- } else {
- r.out(w, hrXHTMLTag)
- }
-}
-
-// RenderNode is a default renderer of a single node of a syntax tree. For
-// block nodes it will be called twice: first time with entering=true, second
-// time with entering=false, so that it could know when it's working on an open
-// tag and when on close. It writes the result to w.
-//
-// The return value is a way to tell the calling walker to adjust its walk
-// pattern: e.g. it can terminate the traversal by returning Terminate. Or it
-// can ask the walker to skip a subtree of this node by returning SkipChildren.
-// The typical behavior is to return GoToNext, which asks for the usual
-// traversal to the next node.
-func (r *HTMLRenderer) RenderNode(w io.Writer, node *Node, entering bool) WalkStatus {
- attrs := []string{}
- switch node.Type {
- case Text:
- if r.Flags&Smartypants != 0 {
- var tmp bytes.Buffer
- escapeHTML(&tmp, node.Literal)
- r.sr.Process(w, tmp.Bytes())
- } else {
- if node.Parent.Type == Link {
- escLink(w, node.Literal)
- } else {
- escapeHTML(w, node.Literal)
- }
- }
- case Softbreak:
- r.cr(w)
- // TODO: make it configurable via out(renderer.softbreak)
- case Hardbreak:
- if r.Flags&UseXHTML == 0 {
- r.out(w, brTag)
- } else {
- r.out(w, brXHTMLTag)
- }
- r.cr(w)
- case Emph:
- if entering {
- r.out(w, emTag)
- } else {
- r.out(w, emCloseTag)
- }
- case Strong:
- if entering {
- r.out(w, strongTag)
- } else {
- r.out(w, strongCloseTag)
- }
- case Del:
- if entering {
- r.out(w, delTag)
- } else {
- r.out(w, delCloseTag)
- }
- case HTMLSpan:
- if r.Flags&SkipHTML != 0 {
- break
- }
- r.out(w, node.Literal)
- case Link:
- // mark it but don't link it if it is not a safe link: no smartypants
- dest := node.LinkData.Destination
- if needSkipLink(r.Flags, dest) {
- if entering {
- r.out(w, ttTag)
- } else {
- r.out(w, ttCloseTag)
- }
- } else {
- if entering {
- dest = r.addAbsPrefix(dest)
- var hrefBuf bytes.Buffer
- hrefBuf.WriteString("href=\"")
- escLink(&hrefBuf, dest)
- hrefBuf.WriteByte('"')
- attrs = append(attrs, hrefBuf.String())
- if node.NoteID != 0 {
- r.out(w, footnoteRef(r.FootnoteAnchorPrefix, node))
- break
- }
- attrs = appendLinkAttrs(attrs, r.Flags, dest)
- if len(node.LinkData.Title) > 0 {
- var titleBuff bytes.Buffer
- titleBuff.WriteString("title=\"")
- escapeHTML(&titleBuff, node.LinkData.Title)
- titleBuff.WriteByte('"')
- attrs = append(attrs, titleBuff.String())
- }
- r.tag(w, aTag, attrs)
- } else {
- if node.NoteID != 0 {
- break
- }
- r.out(w, aCloseTag)
- }
- }
- case Image:
- if r.Flags&SkipImages != 0 {
- return SkipChildren
- }
- if entering {
- dest := node.LinkData.Destination
- dest = r.addAbsPrefix(dest)
- if r.disableTags == 0 {
- //if options.safe && potentiallyUnsafe(dest) {
- //out(w, `<img src="" alt="`)
- //} else {
- r.out(w, []byte(`<img src="`))
- escLink(w, dest)
- r.out(w, []byte(`" alt="`))
- //}
- }
- r.disableTags++
- } else {
- r.disableTags--
- if r.disableTags == 0 {
- if node.LinkData.Title != nil {
- r.out(w, []byte(`" title="`))
- escapeHTML(w, node.LinkData.Title)
- }
- r.out(w, []byte(`" />`))
- }
- }
- case Code:
- r.out(w, codeTag)
- escapeHTML(w, node.Literal)
- r.out(w, codeCloseTag)
- case Document:
- break
- case Paragraph:
- if skipParagraphTags(node) {
- break
- }
- if entering {
- // TODO: untangle this clusterfuck about when the newlines need
- // to be added and when not.
- if node.Prev != nil {
- switch node.Prev.Type {
- case HTMLBlock, List, Paragraph, Heading, CodeBlock, BlockQuote, HorizontalRule:
- r.cr(w)
- }
- }
- if node.Parent.Type == BlockQuote && node.Prev == nil {
- r.cr(w)
- }
- r.out(w, pTag)
- } else {
- r.out(w, pCloseTag)
- if !(node.Parent.Type == Item && node.Next == nil) {
- r.cr(w)
- }
- }
- case BlockQuote:
- if entering {
- r.cr(w)
- r.out(w, blockquoteTag)
- } else {
- r.out(w, blockquoteCloseTag)
- r.cr(w)
- }
- case HTMLBlock:
- if r.Flags&SkipHTML != 0 {
- break
- }
- r.cr(w)
- r.out(w, node.Literal)
- r.cr(w)
- case Heading:
- headingLevel := r.HTMLRendererParameters.HeadingLevelOffset + node.Level
- openTag, closeTag := headingTagsFromLevel(headingLevel)
- if entering {
- if node.IsTitleblock {
- attrs = append(attrs, `class="title"`)
- }
- if node.HeadingID != "" {
- id := r.ensureUniqueHeadingID(node.HeadingID)
- if r.HeadingIDPrefix != "" {
- id = r.HeadingIDPrefix + id
- }
- if r.HeadingIDSuffix != "" {
- id = id + r.HeadingIDSuffix
- }
- attrs = append(attrs, fmt.Sprintf(`id="%s"`, id))
- }
- r.cr(w)
- r.tag(w, openTag, attrs)
- } else {
- r.out(w, closeTag)
- if !(node.Parent.Type == Item && node.Next == nil) {
- r.cr(w)
- }
- }
- case HorizontalRule:
- r.cr(w)
- r.outHRTag(w)
- r.cr(w)
- case List:
- openTag := ulTag
- closeTag := ulCloseTag
- if node.ListFlags&ListTypeOrdered != 0 {
- openTag = olTag
- closeTag = olCloseTag
- }
- if node.ListFlags&ListTypeDefinition != 0 {
- openTag = dlTag
- closeTag = dlCloseTag
- }
- if entering {
- if node.IsFootnotesList {
- r.out(w, footnotesDivBytes)
- r.outHRTag(w)
- r.cr(w)
- }
- r.cr(w)
- if node.Parent.Type == Item && node.Parent.Parent.Tight {
- r.cr(w)
- }
- r.tag(w, openTag[:len(openTag)-1], attrs)
- r.cr(w)
- } else {
- r.out(w, closeTag)
- //cr(w)
- //if node.parent.Type != Item {
- // cr(w)
- //}
- if node.Parent.Type == Item && node.Next != nil {
- r.cr(w)
- }
- if node.Parent.Type == Document || node.Parent.Type == BlockQuote {
- r.cr(w)
- }
- if node.IsFootnotesList {
- r.out(w, footnotesCloseDivBytes)
- }
- }
- case Item:
- openTag := liTag
- closeTag := liCloseTag
- if node.ListFlags&ListTypeDefinition != 0 {
- openTag = ddTag
- closeTag = ddCloseTag
- }
- if node.ListFlags&ListTypeTerm != 0 {
- openTag = dtTag
- closeTag = dtCloseTag
- }
- if entering {
- if itemOpenCR(node) {
- r.cr(w)
- }
- if node.ListData.RefLink != nil {
- slug := slugify(node.ListData.RefLink)
- r.out(w, footnoteItem(r.FootnoteAnchorPrefix, slug))
- break
- }
- r.out(w, openTag)
- } else {
- if node.ListData.RefLink != nil {
- slug := slugify(node.ListData.RefLink)
- if r.Flags&FootnoteReturnLinks != 0 {
- r.out(w, footnoteReturnLink(r.FootnoteAnchorPrefix, r.FootnoteReturnLinkContents, slug))
- }
- }
- r.out(w, closeTag)
- r.cr(w)
- }
- case CodeBlock:
- attrs = appendLanguageAttr(attrs, node.Info)
- r.cr(w)
- r.out(w, preTag)
- r.tag(w, codeTag[:len(codeTag)-1], attrs)
- escapeHTML(w, node.Literal)
- r.out(w, codeCloseTag)
- r.out(w, preCloseTag)
- if node.Parent.Type != Item {
- r.cr(w)
- }
- case Table:
- if entering {
- r.cr(w)
- r.out(w, tableTag)
- } else {
- r.out(w, tableCloseTag)
- r.cr(w)
- }
- case TableCell:
- openTag := tdTag
- closeTag := tdCloseTag
- if node.IsHeader {
- openTag = thTag
- closeTag = thCloseTag
- }
- if entering {
- align := cellAlignment(node.Align)
- if align != "" {
- attrs = append(attrs, fmt.Sprintf(`align="%s"`, align))
- }
- if node.Prev == nil {
- r.cr(w)
- }
- r.tag(w, openTag, attrs)
- } else {
- r.out(w, closeTag)
- r.cr(w)
- }
- case TableHead:
- if entering {
- r.cr(w)
- r.out(w, theadTag)
- } else {
- r.out(w, theadCloseTag)
- r.cr(w)
- }
- case TableBody:
- if entering {
- r.cr(w)
- r.out(w, tbodyTag)
- // XXX: this is to adhere to a rather silly test. Should fix test.
- if node.FirstChild == nil {
- r.cr(w)
- }
- } else {
- r.out(w, tbodyCloseTag)
- r.cr(w)
- }
- case TableRow:
- if entering {
- r.cr(w)
- r.out(w, trTag)
- } else {
- r.out(w, trCloseTag)
- r.cr(w)
- }
- default:
- panic("Unknown node type " + node.Type.String())
- }
- return GoToNext
-}
-
-// RenderHeader writes HTML document preamble and TOC if requested.
-func (r *HTMLRenderer) RenderHeader(w io.Writer, ast *Node) {
- r.writeDocumentHeader(w)
- if r.Flags&TOC != 0 {
- r.writeTOC(w, ast)
- }
-}
-
-// RenderFooter writes HTML document footer.
-func (r *HTMLRenderer) RenderFooter(w io.Writer, ast *Node) {
- if r.Flags&CompletePage == 0 {
- return
- }
- io.WriteString(w, "\n</body>\n</html>\n")
-}
-
-func (r *HTMLRenderer) writeDocumentHeader(w io.Writer) {
- if r.Flags&CompletePage == 0 {
- return
- }
- ending := ""
- if r.Flags&UseXHTML != 0 {
- io.WriteString(w, "<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Transitional//EN\" ")
- io.WriteString(w, "\"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd\">\n")
- io.WriteString(w, "<html xmlns=\"http://www.w3.org/1999/xhtml\">\n")
- ending = " /"
- } else {
- io.WriteString(w, "<!DOCTYPE html>\n")
- io.WriteString(w, "<html>\n")
- }
- io.WriteString(w, "<head>\n")
- io.WriteString(w, " <title>")
- if r.Flags&Smartypants != 0 {
- r.sr.Process(w, []byte(r.Title))
- } else {
- escapeHTML(w, []byte(r.Title))
- }
- io.WriteString(w, "</title>\n")
- io.WriteString(w, " <meta name=\"GENERATOR\" content=\"Blackfriday Markdown Processor v")
- io.WriteString(w, Version)
- io.WriteString(w, "\"")
- io.WriteString(w, ending)
- io.WriteString(w, ">\n")
- io.WriteString(w, " <meta charset=\"utf-8\"")
- io.WriteString(w, ending)
- io.WriteString(w, ">\n")
- if r.CSS != "" {
- io.WriteString(w, " <link rel=\"stylesheet\" type=\"text/css\" href=\"")
- escapeHTML(w, []byte(r.CSS))
- io.WriteString(w, "\"")
- io.WriteString(w, ending)
- io.WriteString(w, ">\n")
- }
- if r.Icon != "" {
- io.WriteString(w, " <link rel=\"icon\" type=\"image/x-icon\" href=\"")
- escapeHTML(w, []byte(r.Icon))
- io.WriteString(w, "\"")
- io.WriteString(w, ending)
- io.WriteString(w, ">\n")
- }
- io.WriteString(w, "</head>\n")
- io.WriteString(w, "<body>\n\n")
-}
-
-func (r *HTMLRenderer) writeTOC(w io.Writer, ast *Node) {
- buf := bytes.Buffer{}
-
- inHeading := false
- tocLevel := 0
- headingCount := 0
-
- ast.Walk(func(node *Node, entering bool) WalkStatus {
- if node.Type == Heading && !node.HeadingData.IsTitleblock {
- inHeading = entering
- if entering {
- node.HeadingID = fmt.Sprintf("toc_%d", headingCount)
- if node.Level == tocLevel {
- buf.WriteString("</li>\n\n<li>")
- } else if node.Level < tocLevel {
- for node.Level < tocLevel {
- tocLevel--
- buf.WriteString("</li>\n</ul>")
- }
- buf.WriteString("</li>\n\n<li>")
- } else {
- for node.Level > tocLevel {
- tocLevel++
- buf.WriteString("\n<ul>\n<li>")
- }
- }
-
- fmt.Fprintf(&buf, `<a href="#toc_%d">`, headingCount)
- headingCount++
- } else {
- buf.WriteString("</a>")
- }
- return GoToNext
- }
-
- if inHeading {
- return r.RenderNode(&buf, node, entering)
- }
-
- return GoToNext
- })
-
- for ; tocLevel > 0; tocLevel-- {
- buf.WriteString("</li>\n</ul>")
- }
-
- if buf.Len() > 0 {
- io.WriteString(w, "<nav>\n")
- w.Write(buf.Bytes())
- io.WriteString(w, "\n\n</nav>\n")
- }
- r.lastOutputLen = buf.Len()
-}
+++ /dev/null
-//
-// Blackfriday Markdown Processor
-// Available at http://github.com/russross/blackfriday
-//
-// Copyright © 2011 Russ Ross <russ@russross.com>.
-// Distributed under the Simplified BSD License.
-// See README.md for details.
-//
-
-//
-// Functions to parse inline elements.
-//
-
-package blackfriday
-
-import (
- "bytes"
- "regexp"
- "strconv"
-)
-
-var (
- urlRe = `((https?|ftp):\/\/|\/)[-A-Za-z0-9+&@#\/%?=~_|!:,.;\(\)]+`
- anchorRe = regexp.MustCompile(`^(<a\shref="` + urlRe + `"(\stitle="[^"<>]+")?\s?>` + urlRe + `<\/a>)`)
-
- // https://www.w3.org/TR/html5/syntax.html#character-references
- // highest unicode code point in 17 planes (2^20): 1,114,112d =
- // 7 dec digits or 6 hex digits
- // named entity references can be 2-31 characters with stuff like <
- // at one end and ∳ at the other. There
- // are also sometimes numbers at the end, although this isn't inherent
- // in the specification; there are never numbers anywhere else in
- // current character references, though; see ¾ and ▒, etc.
- // https://www.w3.org/TR/html5/syntax.html#named-character-references
- //
- // entity := "&" (named group | number ref) ";"
- // named group := [a-zA-Z]{2,31}[0-9]{0,2}
- // number ref := "#" (dec ref | hex ref)
- // dec ref := [0-9]{1,7}
- // hex ref := ("x" | "X") [0-9a-fA-F]{1,6}
- htmlEntityRe = regexp.MustCompile(`&([a-zA-Z]{2,31}[0-9]{0,2}|#([0-9]{1,7}|[xX][0-9a-fA-F]{1,6}));`)
-)
-
-// Functions to parse text within a block
-// Each function returns the number of chars taken care of
-// data is the complete block being rendered
-// offset is the number of valid chars before the current cursor
-
-func (p *Markdown) inline(currBlock *Node, data []byte) {
- // handlers might call us recursively: enforce a maximum depth
- if p.nesting >= p.maxNesting || len(data) == 0 {
- return
- }
- p.nesting++
- beg, end := 0, 0
- for end < len(data) {
- handler := p.inlineCallback[data[end]]
- if handler != nil {
- if consumed, node := handler(p, data, end); consumed == 0 {
- // No action from the callback.
- end++
- } else {
- // Copy inactive chars into the output.
- currBlock.AppendChild(text(data[beg:end]))
- if node != nil {
- currBlock.AppendChild(node)
- }
- // Skip past whatever the callback used.
- beg = end + consumed
- end = beg
- }
- } else {
- end++
- }
- }
- if beg < len(data) {
- if data[end-1] == '\n' {
- end--
- }
- currBlock.AppendChild(text(data[beg:end]))
- }
- p.nesting--
-}
-
-// single and double emphasis parsing
-func emphasis(p *Markdown, data []byte, offset int) (int, *Node) {
- data = data[offset:]
- c := data[0]
-
- if len(data) > 2 && data[1] != c {
- // whitespace cannot follow an opening emphasis;
- // strikethrough only takes two characters '~~'
- if c == '~' || isspace(data[1]) {
- return 0, nil
- }
- ret, node := helperEmphasis(p, data[1:], c)
- if ret == 0 {
- return 0, nil
- }
-
- return ret + 1, node
- }
-
- if len(data) > 3 && data[1] == c && data[2] != c {
- if isspace(data[2]) {
- return 0, nil
- }
- ret, node := helperDoubleEmphasis(p, data[2:], c)
- if ret == 0 {
- return 0, nil
- }
-
- return ret + 2, node
- }
-
- if len(data) > 4 && data[1] == c && data[2] == c && data[3] != c {
- if c == '~' || isspace(data[3]) {
- return 0, nil
- }
- ret, node := helperTripleEmphasis(p, data, 3, c)
- if ret == 0 {
- return 0, nil
- }
-
- return ret + 3, node
- }
-
- return 0, nil
-}
-
-func codeSpan(p *Markdown, data []byte, offset int) (int, *Node) {
- data = data[offset:]
-
- nb := 0
-
- // count the number of backticks in the delimiter
- for nb < len(data) && data[nb] == '`' {
- nb++
- }
-
- // find the next delimiter
- i, end := 0, 0
- for end = nb; end < len(data) && i < nb; end++ {
- if data[end] == '`' {
- i++
- } else {
- i = 0
- }
- }
-
- // no matching delimiter?
- if i < nb && end >= len(data) {
- return 0, nil
- }
-
- // trim outside whitespace
- fBegin := nb
- for fBegin < end && data[fBegin] == ' ' {
- fBegin++
- }
-
- fEnd := end - nb
- for fEnd > fBegin && data[fEnd-1] == ' ' {
- fEnd--
- }
-
- // render the code span
- if fBegin != fEnd {
- code := NewNode(Code)
- code.Literal = data[fBegin:fEnd]
- return end, code
- }
-
- return end, nil
-}
-
-// newline preceded by two spaces becomes <br>
-func maybeLineBreak(p *Markdown, data []byte, offset int) (int, *Node) {
- origOffset := offset
- for offset < len(data) && data[offset] == ' ' {
- offset++
- }
-
- if offset < len(data) && data[offset] == '\n' {
- if offset-origOffset >= 2 {
- return offset - origOffset + 1, NewNode(Hardbreak)
- }
- return offset - origOffset, nil
- }
- return 0, nil
-}
-
-// newline without two spaces works when HardLineBreak is enabled
-func lineBreak(p *Markdown, data []byte, offset int) (int, *Node) {
- if p.extensions&HardLineBreak != 0 {
- return 1, NewNode(Hardbreak)
- }
- return 0, nil
-}
-
-type linkType int
-
-const (
- linkNormal linkType = iota
- linkImg
- linkDeferredFootnote
- linkInlineFootnote
-)
-
-func isReferenceStyleLink(data []byte, pos int, t linkType) bool {
- if t == linkDeferredFootnote {
- return false
- }
- return pos < len(data)-1 && data[pos] == '[' && data[pos+1] != '^'
-}
-
-func maybeImage(p *Markdown, data []byte, offset int) (int, *Node) {
- if offset < len(data)-1 && data[offset+1] == '[' {
- return link(p, data, offset)
- }
- return 0, nil
-}
-
-func maybeInlineFootnote(p *Markdown, data []byte, offset int) (int, *Node) {
- if offset < len(data)-1 && data[offset+1] == '[' {
- return link(p, data, offset)
- }
- return 0, nil
-}
-
-// '[': parse a link or an image or a footnote
-func link(p *Markdown, data []byte, offset int) (int, *Node) {
- // no links allowed inside regular links, footnote, and deferred footnotes
- if p.insideLink && (offset > 0 && data[offset-1] == '[' || len(data)-1 > offset && data[offset+1] == '^') {
- return 0, nil
- }
-
- var t linkType
- switch {
- // special case: ![^text] == deferred footnote (that follows something with
- // an exclamation point)
- case p.extensions&Footnotes != 0 && len(data)-1 > offset && data[offset+1] == '^':
- t = linkDeferredFootnote
- // ![alt] == image
- case offset >= 0 && data[offset] == '!':
- t = linkImg
- offset++
- // ^[text] == inline footnote
- // [^refId] == deferred footnote
- case p.extensions&Footnotes != 0:
- if offset >= 0 && data[offset] == '^' {
- t = linkInlineFootnote
- offset++
- } else if len(data)-1 > offset && data[offset+1] == '^' {
- t = linkDeferredFootnote
- }
- // [text] == regular link
- default:
- t = linkNormal
- }
-
- data = data[offset:]
-
- var (
- i = 1
- noteID int
- title, link, altContent []byte
- textHasNl = false
- )
-
- if t == linkDeferredFootnote {
- i++
- }
-
- // look for the matching closing bracket
- for level := 1; level > 0 && i < len(data); i++ {
- switch {
- case data[i] == '\n':
- textHasNl = true
-
- case data[i-1] == '\\':
- continue
-
- case data[i] == '[':
- level++
-
- case data[i] == ']':
- level--
- if level <= 0 {
- i-- // compensate for extra i++ in for loop
- }
- }
- }
-
- if i >= len(data) {
- return 0, nil
- }
-
- txtE := i
- i++
- var footnoteNode *Node
-
- // skip any amount of whitespace or newline
- // (this is much more lax than original markdown syntax)
- for i < len(data) && isspace(data[i]) {
- i++
- }
-
- // inline style link
- switch {
- case i < len(data) && data[i] == '(':
- // skip initial whitespace
- i++
-
- for i < len(data) && isspace(data[i]) {
- i++
- }
-
- linkB := i
-
- // look for link end: ' " )
- findlinkend:
- for i < len(data) {
- switch {
- case data[i] == '\\':
- i += 2
-
- case data[i] == ')' || data[i] == '\'' || data[i] == '"':
- break findlinkend
-
- default:
- i++
- }
- }
-
- if i >= len(data) {
- return 0, nil
- }
- linkE := i
-
- // look for title end if present
- titleB, titleE := 0, 0
- if data[i] == '\'' || data[i] == '"' {
- i++
- titleB = i
-
- findtitleend:
- for i < len(data) {
- switch {
- case data[i] == '\\':
- i += 2
-
- case data[i] == ')':
- break findtitleend
-
- default:
- i++
- }
- }
-
- if i >= len(data) {
- return 0, nil
- }
-
- // skip whitespace after title
- titleE = i - 1
- for titleE > titleB && isspace(data[titleE]) {
- titleE--
- }
-
- // check for closing quote presence
- if data[titleE] != '\'' && data[titleE] != '"' {
- titleB, titleE = 0, 0
- linkE = i
- }
- }
-
- // remove whitespace at the end of the link
- for linkE > linkB && isspace(data[linkE-1]) {
- linkE--
- }
-
- // remove optional angle brackets around the link
- if data[linkB] == '<' {
- linkB++
- }
- if data[linkE-1] == '>' {
- linkE--
- }
-
- // build escaped link and title
- if linkE > linkB {
- link = data[linkB:linkE]
- }
-
- if titleE > titleB {
- title = data[titleB:titleE]
- }
-
- i++
-
- // reference style link
- case isReferenceStyleLink(data, i, t):
- var id []byte
- altContentConsidered := false
-
- // look for the id
- i++
- linkB := i
- for i < len(data) && data[i] != ']' {
- i++
- }
- if i >= len(data) {
- return 0, nil
- }
- linkE := i
-
- // find the reference
- if linkB == linkE {
- if textHasNl {
- var b bytes.Buffer
-
- for j := 1; j < txtE; j++ {
- switch {
- case data[j] != '\n':
- b.WriteByte(data[j])
- case data[j-1] != ' ':
- b.WriteByte(' ')
- }
- }
-
- id = b.Bytes()
- } else {
- id = data[1:txtE]
- altContentConsidered = true
- }
- } else {
- id = data[linkB:linkE]
- }
-
- // find the reference with matching id
- lr, ok := p.getRef(string(id))
- if !ok {
- return 0, nil
- }
-
- // keep link and title from reference
- link = lr.link
- title = lr.title
- if altContentConsidered {
- altContent = lr.text
- }
- i++
-
- // shortcut reference style link or reference or inline footnote
- default:
- var id []byte
-
- // craft the id
- if textHasNl {
- var b bytes.Buffer
-
- for j := 1; j < txtE; j++ {
- switch {
- case data[j] != '\n':
- b.WriteByte(data[j])
- case data[j-1] != ' ':
- b.WriteByte(' ')
- }
- }
-
- id = b.Bytes()
- } else {
- if t == linkDeferredFootnote {
- id = data[2:txtE] // get rid of the ^
- } else {
- id = data[1:txtE]
- }
- }
-
- footnoteNode = NewNode(Item)
- if t == linkInlineFootnote {
- // create a new reference
- noteID = len(p.notes) + 1
-
- var fragment []byte
- if len(id) > 0 {
- if len(id) < 16 {
- fragment = make([]byte, len(id))
- } else {
- fragment = make([]byte, 16)
- }
- copy(fragment, slugify(id))
- } else {
- fragment = append([]byte("footnote-"), []byte(strconv.Itoa(noteID))...)
- }
-
- ref := &reference{
- noteID: noteID,
- hasBlock: false,
- link: fragment,
- title: id,
- footnote: footnoteNode,
- }
-
- p.notes = append(p.notes, ref)
-
- link = ref.link
- title = ref.title
- } else {
- // find the reference with matching id
- lr, ok := p.getRef(string(id))
- if !ok {
- return 0, nil
- }
-
- if t == linkDeferredFootnote {
- lr.noteID = len(p.notes) + 1
- lr.footnote = footnoteNode
- p.notes = append(p.notes, lr)
- }
-
- // keep link and title from reference
- link = lr.link
- // if inline footnote, title == footnote contents
- title = lr.title
- noteID = lr.noteID
- }
-
- // rewind the whitespace
- i = txtE + 1
- }
-
- var uLink []byte
- if t == linkNormal || t == linkImg {
- if len(link) > 0 {
- var uLinkBuf bytes.Buffer
- unescapeText(&uLinkBuf, link)
- uLink = uLinkBuf.Bytes()
- }
-
- // links need something to click on and somewhere to go
- if len(uLink) == 0 || (t == linkNormal && txtE <= 1) {
- return 0, nil
- }
- }
-
- // call the relevant rendering function
- var linkNode *Node
- switch t {
- case linkNormal:
- linkNode = NewNode(Link)
- linkNode.Destination = normalizeURI(uLink)
- linkNode.Title = title
- if len(altContent) > 0 {
- linkNode.AppendChild(text(altContent))
- } else {
- // links cannot contain other links, so turn off link parsing
- // temporarily and recurse
- insideLink := p.insideLink
- p.insideLink = true
- p.inline(linkNode, data[1:txtE])
- p.insideLink = insideLink
- }
-
- case linkImg:
- linkNode = NewNode(Image)
- linkNode.Destination = uLink
- linkNode.Title = title
- linkNode.AppendChild(text(data[1:txtE]))
- i++
-
- case linkInlineFootnote, linkDeferredFootnote:
- linkNode = NewNode(Link)
- linkNode.Destination = link
- linkNode.Title = title
- linkNode.NoteID = noteID
- linkNode.Footnote = footnoteNode
- if t == linkInlineFootnote {
- i++
- }
-
- default:
- return 0, nil
- }
-
- return i, linkNode
-}
-
-func (p *Markdown) inlineHTMLComment(data []byte) int {
- if len(data) < 5 {
- return 0
- }
- if data[0] != '<' || data[1] != '!' || data[2] != '-' || data[3] != '-' {
- return 0
- }
- i := 5
- // scan for an end-of-comment marker, across lines if necessary
- for i < len(data) && !(data[i-2] == '-' && data[i-1] == '-' && data[i] == '>') {
- i++
- }
- // no end-of-comment marker
- if i >= len(data) {
- return 0
- }
- return i + 1
-}
-
-func stripMailto(link []byte) []byte {
- if bytes.HasPrefix(link, []byte("mailto://")) {
- return link[9:]
- } else if bytes.HasPrefix(link, []byte("mailto:")) {
- return link[7:]
- } else {
- return link
- }
-}
-
-// autolinkType specifies a kind of autolink that gets detected.
-type autolinkType int
-
-// These are the possible flag values for the autolink renderer.
-const (
- notAutolink autolinkType = iota
- normalAutolink
- emailAutolink
-)
-
-// '<' when tags or autolinks are allowed
-func leftAngle(p *Markdown, data []byte, offset int) (int, *Node) {
- data = data[offset:]
- altype, end := tagLength(data)
- if size := p.inlineHTMLComment(data); size > 0 {
- end = size
- }
- if end > 2 {
- if altype != notAutolink {
- var uLink bytes.Buffer
- unescapeText(&uLink, data[1:end+1-2])
- if uLink.Len() > 0 {
- link := uLink.Bytes()
- node := NewNode(Link)
- node.Destination = link
- if altype == emailAutolink {
- node.Destination = append([]byte("mailto:"), link...)
- }
- node.AppendChild(text(stripMailto(link)))
- return end, node
- }
- } else {
- htmlTag := NewNode(HTMLSpan)
- htmlTag.Literal = data[:end]
- return end, htmlTag
- }
- }
-
- return end, nil
-}
-
-// '\\' backslash escape
-var escapeChars = []byte("\\`*_{}[]()#+-.!:|&<>~")
-
-func escape(p *Markdown, data []byte, offset int) (int, *Node) {
- data = data[offset:]
-
- if len(data) > 1 {
- if p.extensions&BackslashLineBreak != 0 && data[1] == '\n' {
- return 2, NewNode(Hardbreak)
- }
- if bytes.IndexByte(escapeChars, data[1]) < 0 {
- return 0, nil
- }
-
- return 2, text(data[1:2])
- }
-
- return 2, nil
-}
-
-func unescapeText(ob *bytes.Buffer, src []byte) {
- i := 0
- for i < len(src) {
- org := i
- for i < len(src) && src[i] != '\\' {
- i++
- }
-
- if i > org {
- ob.Write(src[org:i])
- }
-
- if i+1 >= len(src) {
- break
- }
-
- ob.WriteByte(src[i+1])
- i += 2
- }
-}
-
-// '&' escaped when it doesn't belong to an entity
-// valid entities are assumed to be anything matching &#?[A-Za-z0-9]+;
-func entity(p *Markdown, data []byte, offset int) (int, *Node) {
- data = data[offset:]
-
- end := 1
-
- if end < len(data) && data[end] == '#' {
- end++
- }
-
- for end < len(data) && isalnum(data[end]) {
- end++
- }
-
- if end < len(data) && data[end] == ';' {
- end++ // real entity
- } else {
- return 0, nil // lone '&'
- }
-
- ent := data[:end]
- // undo & escaping or it will be converted to &amp; by another
- // escaper in the renderer
- if bytes.Equal(ent, []byte("&")) {
- ent = []byte{'&'}
- }
-
- return end, text(ent)
-}
-
-func linkEndsWithEntity(data []byte, linkEnd int) bool {
- entityRanges := htmlEntityRe.FindAllIndex(data[:linkEnd], -1)
- return entityRanges != nil && entityRanges[len(entityRanges)-1][1] == linkEnd
-}
-
-// hasPrefixCaseInsensitive is a custom implementation of
-// strings.HasPrefix(strings.ToLower(s), prefix)
-// we rolled our own because ToLower pulls in a huge machinery of lowercasing
-// anything from Unicode and that's very slow. Since this func will only be
-// used on ASCII protocol prefixes, we can take shortcuts.
-func hasPrefixCaseInsensitive(s, prefix []byte) bool {
- if len(s) < len(prefix) {
- return false
- }
- delta := byte('a' - 'A')
- for i, b := range prefix {
- if b != s[i] && b != s[i]+delta {
- return false
- }
- }
- return true
-}
-
-var protocolPrefixes = [][]byte{
- []byte("http://"),
- []byte("https://"),
- []byte("ftp://"),
- []byte("file://"),
- []byte("mailto:"),
-}
-
-const shortestPrefix = 6 // len("ftp://"), the shortest of the above
-
-func maybeAutoLink(p *Markdown, data []byte, offset int) (int, *Node) {
- // quick check to rule out most false hits
- if p.insideLink || len(data) < offset+shortestPrefix {
- return 0, nil
- }
- for _, prefix := range protocolPrefixes {
- endOfHead := offset + 8 // 8 is the len() of the longest prefix
- if endOfHead > len(data) {
- endOfHead = len(data)
- }
- if hasPrefixCaseInsensitive(data[offset:endOfHead], prefix) {
- return autoLink(p, data, offset)
- }
- }
- return 0, nil
-}
-
-func autoLink(p *Markdown, data []byte, offset int) (int, *Node) {
- // Now a more expensive check to see if we're not inside an anchor element
- anchorStart := offset
- offsetFromAnchor := 0
- for anchorStart > 0 && data[anchorStart] != '<' {
- anchorStart--
- offsetFromAnchor++
- }
-
- anchorStr := anchorRe.Find(data[anchorStart:])
- if anchorStr != nil {
- anchorClose := NewNode(HTMLSpan)
- anchorClose.Literal = anchorStr[offsetFromAnchor:]
- return len(anchorStr) - offsetFromAnchor, anchorClose
- }
-
- // scan backward for a word boundary
- rewind := 0
- for offset-rewind > 0 && rewind <= 7 && isletter(data[offset-rewind-1]) {
- rewind++
- }
- if rewind > 6 { // longest supported protocol is "mailto" which has 6 letters
- return 0, nil
- }
-
- origData := data
- data = data[offset-rewind:]
-
- if !isSafeLink(data) {
- return 0, nil
- }
-
- linkEnd := 0
- for linkEnd < len(data) && !isEndOfLink(data[linkEnd]) {
- linkEnd++
- }
-
- // Skip punctuation at the end of the link
- if (data[linkEnd-1] == '.' || data[linkEnd-1] == ',') && data[linkEnd-2] != '\\' {
- linkEnd--
- }
-
- // But don't skip semicolon if it's a part of escaped entity:
- if data[linkEnd-1] == ';' && data[linkEnd-2] != '\\' && !linkEndsWithEntity(data, linkEnd) {
- linkEnd--
- }
-
- // See if the link finishes with a punctuation sign that can be closed.
- var copen byte
- switch data[linkEnd-1] {
- case '"':
- copen = '"'
- case '\'':
- copen = '\''
- case ')':
- copen = '('
- case ']':
- copen = '['
- case '}':
- copen = '{'
- default:
- copen = 0
- }
-
- if copen != 0 {
- bufEnd := offset - rewind + linkEnd - 2
-
- openDelim := 1
-
- /* Try to close the final punctuation sign in this same line;
- * if we managed to close it outside of the URL, that means that it's
- * not part of the URL. If it closes inside the URL, that means it
- * is part of the URL.
- *
- * Examples:
- *
- * foo http://www.pokemon.com/Pikachu_(Electric) bar
- * => http://www.pokemon.com/Pikachu_(Electric)
- *
- * foo (http://www.pokemon.com/Pikachu_(Electric)) bar
- * => http://www.pokemon.com/Pikachu_(Electric)
- *
- * foo http://www.pokemon.com/Pikachu_(Electric)) bar
- * => http://www.pokemon.com/Pikachu_(Electric))
- *
- * (foo http://www.pokemon.com/Pikachu_(Electric)) bar
- * => foo http://www.pokemon.com/Pikachu_(Electric)
- */
-
- for bufEnd >= 0 && origData[bufEnd] != '\n' && openDelim != 0 {
- if origData[bufEnd] == data[linkEnd-1] {
- openDelim++
- }
-
- if origData[bufEnd] == copen {
- openDelim--
- }
-
- bufEnd--
- }
-
- if openDelim == 0 {
- linkEnd--
- }
- }
-
- var uLink bytes.Buffer
- unescapeText(&uLink, data[:linkEnd])
-
- if uLink.Len() > 0 {
- node := NewNode(Link)
- node.Destination = uLink.Bytes()
- node.AppendChild(text(uLink.Bytes()))
- return linkEnd, node
- }
-
- return linkEnd, nil
-}
-
-func isEndOfLink(char byte) bool {
- return isspace(char) || char == '<'
-}
-
-var validUris = [][]byte{[]byte("http://"), []byte("https://"), []byte("ftp://"), []byte("mailto://")}
-var validPaths = [][]byte{[]byte("/"), []byte("./"), []byte("../")}
-
-func isSafeLink(link []byte) bool {
- for _, path := range validPaths {
- if len(link) >= len(path) && bytes.Equal(link[:len(path)], path) {
- if len(link) == len(path) {
- return true
- } else if isalnum(link[len(path)]) {
- return true
- }
- }
- }
-
- for _, prefix := range validUris {
- // TODO: handle unicode here
- // case-insensitive prefix test
- if len(link) > len(prefix) && bytes.Equal(bytes.ToLower(link[:len(prefix)]), prefix) && isalnum(link[len(prefix)]) {
- return true
- }
- }
-
- return false
-}
-
-// return the length of the given tag, or 0 is it's not valid
-func tagLength(data []byte) (autolink autolinkType, end int) {
- var i, j int
-
- // a valid tag can't be shorter than 3 chars
- if len(data) < 3 {
- return notAutolink, 0
- }
-
- // begins with a '<' optionally followed by '/', followed by letter or number
- if data[0] != '<' {
- return notAutolink, 0
- }
- if data[1] == '/' {
- i = 2
- } else {
- i = 1
- }
-
- if !isalnum(data[i]) {
- return notAutolink, 0
- }
-
- // scheme test
- autolink = notAutolink
-
- // try to find the beginning of an URI
- for i < len(data) && (isalnum(data[i]) || data[i] == '.' || data[i] == '+' || data[i] == '-') {
- i++
- }
-
- if i > 1 && i < len(data) && data[i] == '@' {
- if j = isMailtoAutoLink(data[i:]); j != 0 {
- return emailAutolink, i + j
- }
- }
-
- if i > 2 && i < len(data) && data[i] == ':' {
- autolink = normalAutolink
- i++
- }
-
- // complete autolink test: no whitespace or ' or "
- switch {
- case i >= len(data):
- autolink = notAutolink
- case autolink != notAutolink:
- j = i
-
- for i < len(data) {
- if data[i] == '\\' {
- i += 2
- } else if data[i] == '>' || data[i] == '\'' || data[i] == '"' || isspace(data[i]) {
- break
- } else {
- i++
- }
-
- }
-
- if i >= len(data) {
- return autolink, 0
- }
- if i > j && data[i] == '>' {
- return autolink, i + 1
- }
-
- // one of the forbidden chars has been found
- autolink = notAutolink
- }
- i += bytes.IndexByte(data[i:], '>')
- if i < 0 {
- return autolink, 0
- }
- return autolink, i + 1
-}
-
-// look for the address part of a mail autolink and '>'
-// this is less strict than the original markdown e-mail address matching
-func isMailtoAutoLink(data []byte) int {
- nb := 0
-
- // address is assumed to be: [-@._a-zA-Z0-9]+ with exactly one '@'
- for i := 0; i < len(data); i++ {
- if isalnum(data[i]) {
- continue
- }
-
- switch data[i] {
- case '@':
- nb++
-
- case '-', '.', '_':
- break
-
- case '>':
- if nb == 1 {
- return i + 1
- }
- return 0
- default:
- return 0
- }
- }
-
- return 0
-}
-
-// look for the next emph char, skipping other constructs
-func helperFindEmphChar(data []byte, c byte) int {
- i := 0
-
- for i < len(data) {
- for i < len(data) && data[i] != c && data[i] != '`' && data[i] != '[' {
- i++
- }
- if i >= len(data) {
- return 0
- }
- // do not count escaped chars
- if i != 0 && data[i-1] == '\\' {
- i++
- continue
- }
- if data[i] == c {
- return i
- }
-
- if data[i] == '`' {
- // skip a code span
- tmpI := 0
- i++
- for i < len(data) && data[i] != '`' {
- if tmpI == 0 && data[i] == c {
- tmpI = i
- }
- i++
- }
- if i >= len(data) {
- return tmpI
- }
- i++
- } else if data[i] == '[' {
- // skip a link
- tmpI := 0
- i++
- for i < len(data) && data[i] != ']' {
- if tmpI == 0 && data[i] == c {
- tmpI = i
- }
- i++
- }
- i++
- for i < len(data) && (data[i] == ' ' || data[i] == '\n') {
- i++
- }
- if i >= len(data) {
- return tmpI
- }
- if data[i] != '[' && data[i] != '(' { // not a link
- if tmpI > 0 {
- return tmpI
- }
- continue
- }
- cc := data[i]
- i++
- for i < len(data) && data[i] != cc {
- if tmpI == 0 && data[i] == c {
- return i
- }
- i++
- }
- if i >= len(data) {
- return tmpI
- }
- i++
- }
- }
- return 0
-}
-
-func helperEmphasis(p *Markdown, data []byte, c byte) (int, *Node) {
- i := 0
-
- // skip one symbol if coming from emph3
- if len(data) > 1 && data[0] == c && data[1] == c {
- i = 1
- }
-
- for i < len(data) {
- length := helperFindEmphChar(data[i:], c)
- if length == 0 {
- return 0, nil
- }
- i += length
- if i >= len(data) {
- return 0, nil
- }
-
- if i+1 < len(data) && data[i+1] == c {
- i++
- continue
- }
-
- if data[i] == c && !isspace(data[i-1]) {
-
- if p.extensions&NoIntraEmphasis != 0 {
- if !(i+1 == len(data) || isspace(data[i+1]) || ispunct(data[i+1])) {
- continue
- }
- }
-
- emph := NewNode(Emph)
- p.inline(emph, data[:i])
- return i + 1, emph
- }
- }
-
- return 0, nil
-}
-
-func helperDoubleEmphasis(p *Markdown, data []byte, c byte) (int, *Node) {
- i := 0
-
- for i < len(data) {
- length := helperFindEmphChar(data[i:], c)
- if length == 0 {
- return 0, nil
- }
- i += length
-
- if i+1 < len(data) && data[i] == c && data[i+1] == c && i > 0 && !isspace(data[i-1]) {
- nodeType := Strong
- if c == '~' {
- nodeType = Del
- }
- node := NewNode(nodeType)
- p.inline(node, data[:i])
- return i + 2, node
- }
- i++
- }
- return 0, nil
-}
-
-func helperTripleEmphasis(p *Markdown, data []byte, offset int, c byte) (int, *Node) {
- i := 0
- origData := data
- data = data[offset:]
-
- for i < len(data) {
- length := helperFindEmphChar(data[i:], c)
- if length == 0 {
- return 0, nil
- }
- i += length
-
- // skip whitespace preceded symbols
- if data[i] != c || isspace(data[i-1]) {
- continue
- }
-
- switch {
- case i+2 < len(data) && data[i+1] == c && data[i+2] == c:
- // triple symbol found
- strong := NewNode(Strong)
- em := NewNode(Emph)
- strong.AppendChild(em)
- p.inline(em, data[:i])
- return i + 3, strong
- case (i+1 < len(data) && data[i+1] == c):
- // double symbol found, hand over to emph1
- length, node := helperEmphasis(p, origData[offset-2:], c)
- if length == 0 {
- return 0, nil
- }
- return length - 2, node
- default:
- // single symbol found, hand over to emph2
- length, node := helperDoubleEmphasis(p, origData[offset-1:], c)
- if length == 0 {
- return 0, nil
- }
- return length - 1, node
- }
- }
- return 0, nil
-}
-
-func text(s []byte) *Node {
- node := NewNode(Text)
- node.Literal = s
- return node
-}
-
-func normalizeURI(s []byte) []byte {
- return s // TODO: implement
-}
+++ /dev/null
-// Blackfriday Markdown Processor
-// Available at http://github.com/russross/blackfriday
-//
-// Copyright © 2011 Russ Ross <russ@russross.com>.
-// Distributed under the Simplified BSD License.
-// See README.md for details.
-
-package blackfriday
-
-import (
- "bytes"
- "fmt"
- "io"
- "strings"
- "unicode/utf8"
-)
-
-//
-// Markdown parsing and processing
-//
-
-// Version string of the package. Appears in the rendered document when
-// CompletePage flag is on.
-const Version = "2.0"
-
-// Extensions is a bitwise or'ed collection of enabled Blackfriday's
-// extensions.
-type Extensions int
-
-// These are the supported markdown parsing extensions.
-// OR these values together to select multiple extensions.
-const (
- NoExtensions Extensions = 0
- NoIntraEmphasis Extensions = 1 << iota // Ignore emphasis markers inside words
- Tables // Render tables
- FencedCode // Render fenced code blocks
- Autolink // Detect embedded URLs that are not explicitly marked
- Strikethrough // Strikethrough text using ~~test~~
- LaxHTMLBlocks // Loosen up HTML block parsing rules
- SpaceHeadings // Be strict about prefix heading rules
- HardLineBreak // Translate newlines into line breaks
- TabSizeEight // Expand tabs to eight spaces instead of four
- Footnotes // Pandoc-style footnotes
- NoEmptyLineBeforeBlock // No need to insert an empty line to start a (code, quote, ordered list, unordered list) block
- HeadingIDs // specify heading IDs with {#id}
- Titleblock // Titleblock ala pandoc
- AutoHeadingIDs // Create the heading ID from the text
- BackslashLineBreak // Translate trailing backslashes into line breaks
- DefinitionLists // Render definition lists
-
- CommonHTMLFlags HTMLFlags = UseXHTML | Smartypants |
- SmartypantsFractions | SmartypantsDashes | SmartypantsLatexDashes
-
- CommonExtensions Extensions = NoIntraEmphasis | Tables | FencedCode |
- Autolink | Strikethrough | SpaceHeadings | HeadingIDs |
- BackslashLineBreak | DefinitionLists
-)
-
-// ListType contains bitwise or'ed flags for list and list item objects.
-type ListType int
-
-// These are the possible flag values for the ListItem renderer.
-// Multiple flag values may be ORed together.
-// These are mostly of interest if you are writing a new output format.
-const (
- ListTypeOrdered ListType = 1 << iota
- ListTypeDefinition
- ListTypeTerm
-
- ListItemContainsBlock
- ListItemBeginningOfList // TODO: figure out if this is of any use now
- ListItemEndOfList
-)
-
-// CellAlignFlags holds a type of alignment in a table cell.
-type CellAlignFlags int
-
-// These are the possible flag values for the table cell renderer.
-// Only a single one of these values will be used; they are not ORed together.
-// These are mostly of interest if you are writing a new output format.
-const (
- TableAlignmentLeft CellAlignFlags = 1 << iota
- TableAlignmentRight
- TableAlignmentCenter = (TableAlignmentLeft | TableAlignmentRight)
-)
-
-// The size of a tab stop.
-const (
- TabSizeDefault = 4
- TabSizeDouble = 8
-)
-
-// blockTags is a set of tags that are recognized as HTML block tags.
-// Any of these can be included in markdown text without special escaping.
-var blockTags = map[string]struct{}{
- "blockquote": {},
- "del": {},
- "div": {},
- "dl": {},
- "fieldset": {},
- "form": {},
- "h1": {},
- "h2": {},
- "h3": {},
- "h4": {},
- "h5": {},
- "h6": {},
- "iframe": {},
- "ins": {},
- "math": {},
- "noscript": {},
- "ol": {},
- "pre": {},
- "p": {},
- "script": {},
- "style": {},
- "table": {},
- "ul": {},
-
- // HTML5
- "address": {},
- "article": {},
- "aside": {},
- "canvas": {},
- "figcaption": {},
- "figure": {},
- "footer": {},
- "header": {},
- "hgroup": {},
- "main": {},
- "nav": {},
- "output": {},
- "progress": {},
- "section": {},
- "video": {},
-}
-
-// Renderer is the rendering interface. This is mostly of interest if you are
-// implementing a new rendering format.
-//
-// Only an HTML implementation is provided in this repository, see the README
-// for external implementations.
-type Renderer interface {
- // RenderNode is the main rendering method. It will be called once for
- // every leaf node and twice for every non-leaf node (first with
- // entering=true, then with entering=false). The method should write its
- // rendition of the node to the supplied writer w.
- RenderNode(w io.Writer, node *Node, entering bool) WalkStatus
-
- // RenderHeader is a method that allows the renderer to produce some
- // content preceding the main body of the output document. The header is
- // understood in the broad sense here. For example, the default HTML
- // renderer will write not only the HTML document preamble, but also the
- // table of contents if it was requested.
- //
- // The method will be passed an entire document tree, in case a particular
- // implementation needs to inspect it to produce output.
- //
- // The output should be written to the supplied writer w. If your
- // implementation has no header to write, supply an empty implementation.
- RenderHeader(w io.Writer, ast *Node)
-
- // RenderFooter is a symmetric counterpart of RenderHeader.
- RenderFooter(w io.Writer, ast *Node)
-}
-
-// Callback functions for inline parsing. One such function is defined
-// for each character that triggers a response when parsing inline data.
-type inlineParser func(p *Markdown, data []byte, offset int) (int, *Node)
-
-// Markdown is a type that holds extensions and the runtime state used by
-// Parse, and the renderer. You can not use it directly, construct it with New.
-type Markdown struct {
- renderer Renderer
- referenceOverride ReferenceOverrideFunc
- refs map[string]*reference
- inlineCallback [256]inlineParser
- extensions Extensions
- nesting int
- maxNesting int
- insideLink bool
-
- // Footnotes need to be ordered as well as available to quickly check for
- // presence. If a ref is also a footnote, it's stored both in refs and here
- // in notes. Slice is nil if footnotes not enabled.
- notes []*reference
-
- doc *Node
- tip *Node // = doc
- oldTip *Node
- lastMatchedContainer *Node // = doc
- allClosed bool
-}
-
-func (p *Markdown) getRef(refid string) (ref *reference, found bool) {
- if p.referenceOverride != nil {
- r, overridden := p.referenceOverride(refid)
- if overridden {
- if r == nil {
- return nil, false
- }
- return &reference{
- link: []byte(r.Link),
- title: []byte(r.Title),
- noteID: 0,
- hasBlock: false,
- text: []byte(r.Text)}, true
- }
- }
- // refs are case insensitive
- ref, found = p.refs[strings.ToLower(refid)]
- return ref, found
-}
-
-func (p *Markdown) finalize(block *Node) {
- above := block.Parent
- block.open = false
- p.tip = above
-}
-
-func (p *Markdown) addChild(node NodeType, offset uint32) *Node {
- return p.addExistingChild(NewNode(node), offset)
-}
-
-func (p *Markdown) addExistingChild(node *Node, offset uint32) *Node {
- for !p.tip.canContain(node.Type) {
- p.finalize(p.tip)
- }
- p.tip.AppendChild(node)
- p.tip = node
- return node
-}
-
-func (p *Markdown) closeUnmatchedBlocks() {
- if !p.allClosed {
- for p.oldTip != p.lastMatchedContainer {
- parent := p.oldTip.Parent
- p.finalize(p.oldTip)
- p.oldTip = parent
- }
- p.allClosed = true
- }
-}
-
-//
-//
-// Public interface
-//
-//
-
-// Reference represents the details of a link.
-// See the documentation in Options for more details on use-case.
-type Reference struct {
- // Link is usually the URL the reference points to.
- Link string
- // Title is the alternate text describing the link in more detail.
- Title string
- // Text is the optional text to override the ref with if the syntax used was
- // [refid][]
- Text string
-}
-
-// ReferenceOverrideFunc is expected to be called with a reference string and
-// return either a valid Reference type that the reference string maps to or
-// nil. If overridden is false, the default reference logic will be executed.
-// See the documentation in Options for more details on use-case.
-type ReferenceOverrideFunc func(reference string) (ref *Reference, overridden bool)
-
-// New constructs a Markdown processor. You can use the same With* functions as
-// for Run() to customize parser's behavior and the renderer.
-func New(opts ...Option) *Markdown {
- var p Markdown
- for _, opt := range opts {
- opt(&p)
- }
- p.refs = make(map[string]*reference)
- p.maxNesting = 16
- p.insideLink = false
- docNode := NewNode(Document)
- p.doc = docNode
- p.tip = docNode
- p.oldTip = docNode
- p.lastMatchedContainer = docNode
- p.allClosed = true
- // register inline parsers
- p.inlineCallback[' '] = maybeLineBreak
- p.inlineCallback['*'] = emphasis
- p.inlineCallback['_'] = emphasis
- if p.extensions&Strikethrough != 0 {
- p.inlineCallback['~'] = emphasis
- }
- p.inlineCallback['`'] = codeSpan
- p.inlineCallback['\n'] = lineBreak
- p.inlineCallback['['] = link
- p.inlineCallback['<'] = leftAngle
- p.inlineCallback['\\'] = escape
- p.inlineCallback['&'] = entity
- p.inlineCallback['!'] = maybeImage
- p.inlineCallback['^'] = maybeInlineFootnote
- if p.extensions&Autolink != 0 {
- p.inlineCallback['h'] = maybeAutoLink
- p.inlineCallback['m'] = maybeAutoLink
- p.inlineCallback['f'] = maybeAutoLink
- p.inlineCallback['H'] = maybeAutoLink
- p.inlineCallback['M'] = maybeAutoLink
- p.inlineCallback['F'] = maybeAutoLink
- }
- if p.extensions&Footnotes != 0 {
- p.notes = make([]*reference, 0)
- }
- return &p
-}
-
-// Option customizes the Markdown processor's default behavior.
-type Option func(*Markdown)
-
-// WithRenderer allows you to override the default renderer.
-func WithRenderer(r Renderer) Option {
- return func(p *Markdown) {
- p.renderer = r
- }
-}
-
-// WithExtensions allows you to pick some of the many extensions provided by
-// Blackfriday. You can bitwise OR them.
-func WithExtensions(e Extensions) Option {
- return func(p *Markdown) {
- p.extensions = e
- }
-}
-
-// WithNoExtensions turns off all extensions and custom behavior.
-func WithNoExtensions() Option {
- return func(p *Markdown) {
- p.extensions = NoExtensions
- p.renderer = NewHTMLRenderer(HTMLRendererParameters{
- Flags: HTMLFlagsNone,
- })
- }
-}
-
-// WithRefOverride sets an optional function callback that is called every
-// time a reference is resolved.
-//
-// In Markdown, the link reference syntax can be made to resolve a link to
-// a reference instead of an inline URL, in one of the following ways:
-//
-// * [link text][refid]
-// * [refid][]
-//
-// Usually, the refid is defined at the bottom of the Markdown document. If
-// this override function is provided, the refid is passed to the override
-// function first, before consulting the defined refids at the bottom. If
-// the override function indicates an override did not occur, the refids at
-// the bottom will be used to fill in the link details.
-func WithRefOverride(o ReferenceOverrideFunc) Option {
- return func(p *Markdown) {
- p.referenceOverride = o
- }
-}
-
-// Run is the main entry point to Blackfriday. It parses and renders a
-// block of markdown-encoded text.
-//
-// The simplest invocation of Run takes one argument, input:
-// output := Run(input)
-// This will parse the input with CommonExtensions enabled and render it with
-// the default HTMLRenderer (with CommonHTMLFlags).
-//
-// Variadic arguments opts can customize the default behavior. Since Markdown
-// type does not contain exported fields, you can not use it directly. Instead,
-// use the With* functions. For example, this will call the most basic
-// functionality, with no extensions:
-// output := Run(input, WithNoExtensions())
-//
-// You can use any number of With* arguments, even contradicting ones. They
-// will be applied in order of appearance and the latter will override the
-// former:
-// output := Run(input, WithNoExtensions(), WithExtensions(exts),
-// WithRenderer(yourRenderer))
-func Run(input []byte, opts ...Option) []byte {
- r := NewHTMLRenderer(HTMLRendererParameters{
- Flags: CommonHTMLFlags,
- })
- optList := []Option{WithRenderer(r), WithExtensions(CommonExtensions)}
- optList = append(optList, opts...)
- parser := New(optList...)
- ast := parser.Parse(input)
- var buf bytes.Buffer
- parser.renderer.RenderHeader(&buf, ast)
- ast.Walk(func(node *Node, entering bool) WalkStatus {
- return parser.renderer.RenderNode(&buf, node, entering)
- })
- parser.renderer.RenderFooter(&buf, ast)
- return buf.Bytes()
-}
-
-// Parse is an entry point to the parsing part of Blackfriday. It takes an
-// input markdown document and produces a syntax tree for its contents. This
-// tree can then be rendered with a default or custom renderer, or
-// analyzed/transformed by the caller to whatever non-standard needs they have.
-// The return value is the root node of the syntax tree.
-func (p *Markdown) Parse(input []byte) *Node {
- p.block(input)
- // Walk the tree and finish up some of unfinished blocks
- for p.tip != nil {
- p.finalize(p.tip)
- }
- // Walk the tree again and process inline markdown in each block
- p.doc.Walk(func(node *Node, entering bool) WalkStatus {
- if node.Type == Paragraph || node.Type == Heading || node.Type == TableCell {
- p.inline(node, node.content)
- node.content = nil
- }
- return GoToNext
- })
- p.parseRefsToAST()
- return p.doc
-}
-
-func (p *Markdown) parseRefsToAST() {
- if p.extensions&Footnotes == 0 || len(p.notes) == 0 {
- return
- }
- p.tip = p.doc
- block := p.addBlock(List, nil)
- block.IsFootnotesList = true
- block.ListFlags = ListTypeOrdered
- flags := ListItemBeginningOfList
- // Note: this loop is intentionally explicit, not range-form. This is
- // because the body of the loop will append nested footnotes to p.notes and
- // we need to process those late additions. Range form would only walk over
- // the fixed initial set.
- for i := 0; i < len(p.notes); i++ {
- ref := p.notes[i]
- p.addExistingChild(ref.footnote, 0)
- block := ref.footnote
- block.ListFlags = flags | ListTypeOrdered
- block.RefLink = ref.link
- if ref.hasBlock {
- flags |= ListItemContainsBlock
- p.block(ref.title)
- } else {
- p.inline(block, ref.title)
- }
- flags &^= ListItemBeginningOfList | ListItemContainsBlock
- }
- above := block.Parent
- finalizeList(block)
- p.tip = above
- block.Walk(func(node *Node, entering bool) WalkStatus {
- if node.Type == Paragraph || node.Type == Heading {
- p.inline(node, node.content)
- node.content = nil
- }
- return GoToNext
- })
-}
-
-//
-// Link references
-//
-// This section implements support for references that (usually) appear
-// as footnotes in a document, and can be referenced anywhere in the document.
-// The basic format is:
-//
-// [1]: http://www.google.com/ "Google"
-// [2]: http://www.github.com/ "Github"
-//
-// Anywhere in the document, the reference can be linked by referring to its
-// label, i.e., 1 and 2 in this example, as in:
-//
-// This library is hosted on [Github][2], a git hosting site.
-//
-// Actual footnotes as specified in Pandoc and supported by some other Markdown
-// libraries such as php-markdown are also taken care of. They look like this:
-//
-// This sentence needs a bit of further explanation.[^note]
-//
-// [^note]: This is the explanation.
-//
-// Footnotes should be placed at the end of the document in an ordered list.
-// Finally, there are inline footnotes such as:
-//
-// Inline footnotes^[Also supported.] provide a quick inline explanation,
-// but are rendered at the bottom of the document.
-//
-
-// reference holds all information necessary for a reference-style links or
-// footnotes.
-//
-// Consider this markdown with reference-style links:
-//
-// [link][ref]
-//
-// [ref]: /url/ "tooltip title"
-//
-// It will be ultimately converted to this HTML:
-//
-// <p><a href=\"/url/\" title=\"title\">link</a></p>
-//
-// And a reference structure will be populated as follows:
-//
-// p.refs["ref"] = &reference{
-// link: "/url/",
-// title: "tooltip title",
-// }
-//
-// Alternatively, reference can contain information about a footnote. Consider
-// this markdown:
-//
-// Text needing a footnote.[^a]
-//
-// [^a]: This is the note
-//
-// A reference structure will be populated as follows:
-//
-// p.refs["a"] = &reference{
-// link: "a",
-// title: "This is the note",
-// noteID: <some positive int>,
-// }
-//
-// TODO: As you can see, it begs for splitting into two dedicated structures
-// for refs and for footnotes.
-type reference struct {
- link []byte
- title []byte
- noteID int // 0 if not a footnote ref
- hasBlock bool
- footnote *Node // a link to the Item node within a list of footnotes
-
- text []byte // only gets populated by refOverride feature with Reference.Text
-}
-
-func (r *reference) String() string {
- return fmt.Sprintf("{link: %q, title: %q, text: %q, noteID: %d, hasBlock: %v}",
- r.link, r.title, r.text, r.noteID, r.hasBlock)
-}
-
-// Check whether or not data starts with a reference link.
-// If so, it is parsed and stored in the list of references
-// (in the render struct).
-// Returns the number of bytes to skip to move past it,
-// or zero if the first line is not a reference.
-func isReference(p *Markdown, data []byte, tabSize int) int {
- // up to 3 optional leading spaces
- if len(data) < 4 {
- return 0
- }
- i := 0
- for i < 3 && data[i] == ' ' {
- i++
- }
-
- noteID := 0
-
- // id part: anything but a newline between brackets
- if data[i] != '[' {
- return 0
- }
- i++
- if p.extensions&Footnotes != 0 {
- if i < len(data) && data[i] == '^' {
- // we can set it to anything here because the proper noteIds will
- // be assigned later during the second pass. It just has to be != 0
- noteID = 1
- i++
- }
- }
- idOffset := i
- for i < len(data) && data[i] != '\n' && data[i] != '\r' && data[i] != ']' {
- i++
- }
- if i >= len(data) || data[i] != ']' {
- return 0
- }
- idEnd := i
- // footnotes can have empty ID, like this: [^], but a reference can not be
- // empty like this: []. Break early if it's not a footnote and there's no ID
- if noteID == 0 && idOffset == idEnd {
- return 0
- }
- // spacer: colon (space | tab)* newline? (space | tab)*
- i++
- if i >= len(data) || data[i] != ':' {
- return 0
- }
- i++
- for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
- i++
- }
- if i < len(data) && (data[i] == '\n' || data[i] == '\r') {
- i++
- if i < len(data) && data[i] == '\n' && data[i-1] == '\r' {
- i++
- }
- }
- for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
- i++
- }
- if i >= len(data) {
- return 0
- }
-
- var (
- linkOffset, linkEnd int
- titleOffset, titleEnd int
- lineEnd int
- raw []byte
- hasBlock bool
- )
-
- if p.extensions&Footnotes != 0 && noteID != 0 {
- linkOffset, linkEnd, raw, hasBlock = scanFootnote(p, data, i, tabSize)
- lineEnd = linkEnd
- } else {
- linkOffset, linkEnd, titleOffset, titleEnd, lineEnd = scanLinkRef(p, data, i)
- }
- if lineEnd == 0 {
- return 0
- }
-
- // a valid ref has been found
-
- ref := &reference{
- noteID: noteID,
- hasBlock: hasBlock,
- }
-
- if noteID > 0 {
- // reusing the link field for the id since footnotes don't have links
- ref.link = data[idOffset:idEnd]
- // if footnote, it's not really a title, it's the contained text
- ref.title = raw
- } else {
- ref.link = data[linkOffset:linkEnd]
- ref.title = data[titleOffset:titleEnd]
- }
-
- // id matches are case-insensitive
- id := string(bytes.ToLower(data[idOffset:idEnd]))
-
- p.refs[id] = ref
-
- return lineEnd
-}
-
-func scanLinkRef(p *Markdown, data []byte, i int) (linkOffset, linkEnd, titleOffset, titleEnd, lineEnd int) {
- // link: whitespace-free sequence, optionally between angle brackets
- if data[i] == '<' {
- i++
- }
- linkOffset = i
- for i < len(data) && data[i] != ' ' && data[i] != '\t' && data[i] != '\n' && data[i] != '\r' {
- i++
- }
- linkEnd = i
- if data[linkOffset] == '<' && data[linkEnd-1] == '>' {
- linkOffset++
- linkEnd--
- }
-
- // optional spacer: (space | tab)* (newline | '\'' | '"' | '(' )
- for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
- i++
- }
- if i < len(data) && data[i] != '\n' && data[i] != '\r' && data[i] != '\'' && data[i] != '"' && data[i] != '(' {
- return
- }
-
- // compute end-of-line
- if i >= len(data) || data[i] == '\r' || data[i] == '\n' {
- lineEnd = i
- }
- if i+1 < len(data) && data[i] == '\r' && data[i+1] == '\n' {
- lineEnd++
- }
-
- // optional (space|tab)* spacer after a newline
- if lineEnd > 0 {
- i = lineEnd + 1
- for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
- i++
- }
- }
-
- // optional title: any non-newline sequence enclosed in '"() alone on its line
- if i+1 < len(data) && (data[i] == '\'' || data[i] == '"' || data[i] == '(') {
- i++
- titleOffset = i
-
- // look for EOL
- for i < len(data) && data[i] != '\n' && data[i] != '\r' {
- i++
- }
- if i+1 < len(data) && data[i] == '\n' && data[i+1] == '\r' {
- titleEnd = i + 1
- } else {
- titleEnd = i
- }
-
- // step back
- i--
- for i > titleOffset && (data[i] == ' ' || data[i] == '\t') {
- i--
- }
- if i > titleOffset && (data[i] == '\'' || data[i] == '"' || data[i] == ')') {
- lineEnd = titleEnd
- titleEnd = i
- }
- }
-
- return
-}
-
-// The first bit of this logic is the same as Parser.listItem, but the rest
-// is much simpler. This function simply finds the entire block and shifts it
-// over by one tab if it is indeed a block (just returns the line if it's not).
-// blockEnd is the end of the section in the input buffer, and contents is the
-// extracted text that was shifted over one tab. It will need to be rendered at
-// the end of the document.
-func scanFootnote(p *Markdown, data []byte, i, indentSize int) (blockStart, blockEnd int, contents []byte, hasBlock bool) {
- if i == 0 || len(data) == 0 {
- return
- }
-
- // skip leading whitespace on first line
- for i < len(data) && data[i] == ' ' {
- i++
- }
-
- blockStart = i
-
- // find the end of the line
- blockEnd = i
- for i < len(data) && data[i-1] != '\n' {
- i++
- }
-
- // get working buffer
- var raw bytes.Buffer
-
- // put the first line into the working buffer
- raw.Write(data[blockEnd:i])
- blockEnd = i
-
- // process the following lines
- containsBlankLine := false
-
-gatherLines:
- for blockEnd < len(data) {
- i++
-
- // find the end of this line
- for i < len(data) && data[i-1] != '\n' {
- i++
- }
-
- // if it is an empty line, guess that it is part of this item
- // and move on to the next line
- if p.isEmpty(data[blockEnd:i]) > 0 {
- containsBlankLine = true
- blockEnd = i
- continue
- }
-
- n := 0
- if n = isIndented(data[blockEnd:i], indentSize); n == 0 {
- // this is the end of the block.
- // we don't want to include this last line in the index.
- break gatherLines
- }
-
- // if there were blank lines before this one, insert a new one now
- if containsBlankLine {
- raw.WriteByte('\n')
- containsBlankLine = false
- }
-
- // get rid of that first tab, write to buffer
- raw.Write(data[blockEnd+n : i])
- hasBlock = true
-
- blockEnd = i
- }
-
- if data[blockEnd-1] != '\n' {
- raw.WriteByte('\n')
- }
-
- contents = raw.Bytes()
-
- return
-}
-
-//
-//
-// Miscellaneous helper functions
-//
-//
-
-// Test if a character is a punctuation symbol.
-// Taken from a private function in regexp in the stdlib.
-func ispunct(c byte) bool {
- for _, r := range []byte("!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~") {
- if c == r {
- return true
- }
- }
- return false
-}
-
-// Test if a character is a whitespace character.
-func isspace(c byte) bool {
- return ishorizontalspace(c) || isverticalspace(c)
-}
-
-// Test if a character is a horizontal whitespace character.
-func ishorizontalspace(c byte) bool {
- return c == ' ' || c == '\t'
-}
-
-// Test if a character is a vertical character.
-func isverticalspace(c byte) bool {
- return c == '\n' || c == '\r' || c == '\f' || c == '\v'
-}
-
-// Test if a character is letter.
-func isletter(c byte) bool {
- return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z')
-}
-
-// Test if a character is a letter or a digit.
-// TODO: check when this is looking for ASCII alnum and when it should use unicode
-func isalnum(c byte) bool {
- return (c >= '0' && c <= '9') || isletter(c)
-}
-
-// Replace tab characters with spaces, aligning to the next TAB_SIZE column.
-// always ends output with a newline
-func expandTabs(out *bytes.Buffer, line []byte, tabSize int) {
- // first, check for common cases: no tabs, or only tabs at beginning of line
- i, prefix := 0, 0
- slowcase := false
- for i = 0; i < len(line); i++ {
- if line[i] == '\t' {
- if prefix == i {
- prefix++
- } else {
- slowcase = true
- break
- }
- }
- }
-
- // no need to decode runes if all tabs are at the beginning of the line
- if !slowcase {
- for i = 0; i < prefix*tabSize; i++ {
- out.WriteByte(' ')
- }
- out.Write(line[prefix:])
- return
- }
-
- // the slow case: we need to count runes to figure out how
- // many spaces to insert for each tab
- column := 0
- i = 0
- for i < len(line) {
- start := i
- for i < len(line) && line[i] != '\t' {
- _, size := utf8.DecodeRune(line[i:])
- i += size
- column++
- }
-
- if i > start {
- out.Write(line[start:i])
- }
-
- if i >= len(line) {
- break
- }
-
- for {
- out.WriteByte(' ')
- column++
- if column%tabSize == 0 {
- break
- }
- }
-
- i++
- }
-}
-
-// Find if a line counts as indented or not.
-// Returns number of characters the indent is (0 = not indented).
-func isIndented(data []byte, indentSize int) int {
- if len(data) == 0 {
- return 0
- }
- if data[0] == '\t' {
- return 1
- }
- if len(data) < indentSize {
- return 0
- }
- for i := 0; i < indentSize; i++ {
- if data[i] != ' ' {
- return 0
- }
- }
- return indentSize
-}
-
-// Create a url-safe slug for fragments
-func slugify(in []byte) []byte {
- if len(in) == 0 {
- return in
- }
- out := make([]byte, 0, len(in))
- sym := false
-
- for _, ch := range in {
- if isalnum(ch) {
- sym = false
- out = append(out, ch)
- } else if sym {
- continue
- } else {
- out = append(out, '-')
- sym = true
- }
- }
- var a, b int
- var ch byte
- for a, ch = range out {
- if ch != '-' {
- break
- }
- }
- for b = len(out) - 1; b > 0; b-- {
- if out[b] != '-' {
- break
- }
- }
- return out[a : b+1]
-}
+++ /dev/null
-package blackfriday
-
-import (
- "bytes"
- "fmt"
-)
-
-// NodeType specifies a type of a single node of a syntax tree. Usually one
-// node (and its type) corresponds to a single markdown feature, e.g. emphasis
-// or code block.
-type NodeType int
-
-// Constants for identifying different types of nodes. See NodeType.
-const (
- Document NodeType = iota
- BlockQuote
- List
- Item
- Paragraph
- Heading
- HorizontalRule
- Emph
- Strong
- Del
- Link
- Image
- Text
- HTMLBlock
- CodeBlock
- Softbreak
- Hardbreak
- Code
- HTMLSpan
- Table
- TableCell
- TableHead
- TableBody
- TableRow
-)
-
-var nodeTypeNames = []string{
- Document: "Document",
- BlockQuote: "BlockQuote",
- List: "List",
- Item: "Item",
- Paragraph: "Paragraph",
- Heading: "Heading",
- HorizontalRule: "HorizontalRule",
- Emph: "Emph",
- Strong: "Strong",
- Del: "Del",
- Link: "Link",
- Image: "Image",
- Text: "Text",
- HTMLBlock: "HTMLBlock",
- CodeBlock: "CodeBlock",
- Softbreak: "Softbreak",
- Hardbreak: "Hardbreak",
- Code: "Code",
- HTMLSpan: "HTMLSpan",
- Table: "Table",
- TableCell: "TableCell",
- TableHead: "TableHead",
- TableBody: "TableBody",
- TableRow: "TableRow",
-}
-
-func (t NodeType) String() string {
- return nodeTypeNames[t]
-}
-
-// ListData contains fields relevant to a List and Item node type.
-type ListData struct {
- ListFlags ListType
- Tight bool // Skip <p>s around list item data if true
- BulletChar byte // '*', '+' or '-' in bullet lists
- Delimiter byte // '.' or ')' after the number in ordered lists
- RefLink []byte // If not nil, turns this list item into a footnote item and triggers different rendering
- IsFootnotesList bool // This is a list of footnotes
-}
-
-// LinkData contains fields relevant to a Link node type.
-type LinkData struct {
- Destination []byte // Destination is what goes into a href
- Title []byte // Title is the tooltip thing that goes in a title attribute
- NoteID int // NoteID contains a serial number of a footnote, zero if it's not a footnote
- Footnote *Node // If it's a footnote, this is a direct link to the footnote Node. Otherwise nil.
-}
-
-// CodeBlockData contains fields relevant to a CodeBlock node type.
-type CodeBlockData struct {
- IsFenced bool // Specifies whether it's a fenced code block or an indented one
- Info []byte // This holds the info string
- FenceChar byte
- FenceLength int
- FenceOffset int
-}
-
-// TableCellData contains fields relevant to a TableCell node type.
-type TableCellData struct {
- IsHeader bool // This tells if it's under the header row
- Align CellAlignFlags // This holds the value for align attribute
-}
-
-// HeadingData contains fields relevant to a Heading node type.
-type HeadingData struct {
- Level int // This holds the heading level number
- HeadingID string // This might hold heading ID, if present
- IsTitleblock bool // Specifies whether it's a title block
-}
-
-// Node is a single element in the abstract syntax tree of the parsed document.
-// It holds connections to the structurally neighboring nodes and, for certain
-// types of nodes, additional information that might be needed when rendering.
-type Node struct {
- Type NodeType // Determines the type of the node
- Parent *Node // Points to the parent
- FirstChild *Node // Points to the first child, if any
- LastChild *Node // Points to the last child, if any
- Prev *Node // Previous sibling; nil if it's the first child
- Next *Node // Next sibling; nil if it's the last child
-
- Literal []byte // Text contents of the leaf nodes
-
- HeadingData // Populated if Type is Heading
- ListData // Populated if Type is List
- CodeBlockData // Populated if Type is CodeBlock
- LinkData // Populated if Type is Link
- TableCellData // Populated if Type is TableCell
-
- content []byte // Markdown content of the block nodes
- open bool // Specifies an open block node that has not been finished to process yet
-}
-
-// NewNode allocates a node of a specified type.
-func NewNode(typ NodeType) *Node {
- return &Node{
- Type: typ,
- open: true,
- }
-}
-
-func (n *Node) String() string {
- ellipsis := ""
- snippet := n.Literal
- if len(snippet) > 16 {
- snippet = snippet[:16]
- ellipsis = "..."
- }
- return fmt.Sprintf("%s: '%s%s'", n.Type, snippet, ellipsis)
-}
-
-// Unlink removes node 'n' from the tree.
-// It panics if the node is nil.
-func (n *Node) Unlink() {
- if n.Prev != nil {
- n.Prev.Next = n.Next
- } else if n.Parent != nil {
- n.Parent.FirstChild = n.Next
- }
- if n.Next != nil {
- n.Next.Prev = n.Prev
- } else if n.Parent != nil {
- n.Parent.LastChild = n.Prev
- }
- n.Parent = nil
- n.Next = nil
- n.Prev = nil
-}
-
-// AppendChild adds a node 'child' as a child of 'n'.
-// It panics if either node is nil.
-func (n *Node) AppendChild(child *Node) {
- child.Unlink()
- child.Parent = n
- if n.LastChild != nil {
- n.LastChild.Next = child
- child.Prev = n.LastChild
- n.LastChild = child
- } else {
- n.FirstChild = child
- n.LastChild = child
- }
-}
-
-// InsertBefore inserts 'sibling' immediately before 'n'.
-// It panics if either node is nil.
-func (n *Node) InsertBefore(sibling *Node) {
- sibling.Unlink()
- sibling.Prev = n.Prev
- if sibling.Prev != nil {
- sibling.Prev.Next = sibling
- }
- sibling.Next = n
- n.Prev = sibling
- sibling.Parent = n.Parent
- if sibling.Prev == nil {
- sibling.Parent.FirstChild = sibling
- }
-}
-
-func (n *Node) isContainer() bool {
- switch n.Type {
- case Document:
- fallthrough
- case BlockQuote:
- fallthrough
- case List:
- fallthrough
- case Item:
- fallthrough
- case Paragraph:
- fallthrough
- case Heading:
- fallthrough
- case Emph:
- fallthrough
- case Strong:
- fallthrough
- case Del:
- fallthrough
- case Link:
- fallthrough
- case Image:
- fallthrough
- case Table:
- fallthrough
- case TableHead:
- fallthrough
- case TableBody:
- fallthrough
- case TableRow:
- fallthrough
- case TableCell:
- return true
- default:
- return false
- }
-}
-
-func (n *Node) canContain(t NodeType) bool {
- if n.Type == List {
- return t == Item
- }
- if n.Type == Document || n.Type == BlockQuote || n.Type == Item {
- return t != Item
- }
- if n.Type == Table {
- return t == TableHead || t == TableBody
- }
- if n.Type == TableHead || n.Type == TableBody {
- return t == TableRow
- }
- if n.Type == TableRow {
- return t == TableCell
- }
- return false
-}
-
-// WalkStatus allows NodeVisitor to have some control over the tree traversal.
-// It is returned from NodeVisitor and different values allow Node.Walk to
-// decide which node to go to next.
-type WalkStatus int
-
-const (
- // GoToNext is the default traversal of every node.
- GoToNext WalkStatus = iota
- // SkipChildren tells walker to skip all children of current node.
- SkipChildren
- // Terminate tells walker to terminate the traversal.
- Terminate
-)
-
-// NodeVisitor is a callback to be called when traversing the syntax tree.
-// Called twice for every node: once with entering=true when the branch is
-// first visited, then with entering=false after all the children are done.
-type NodeVisitor func(node *Node, entering bool) WalkStatus
-
-// Walk is a convenience method that instantiates a walker and starts a
-// traversal of subtree rooted at n.
-func (n *Node) Walk(visitor NodeVisitor) {
- w := newNodeWalker(n)
- for w.current != nil {
- status := visitor(w.current, w.entering)
- switch status {
- case GoToNext:
- w.next()
- case SkipChildren:
- w.entering = false
- w.next()
- case Terminate:
- return
- }
- }
-}
-
-type nodeWalker struct {
- current *Node
- root *Node
- entering bool
-}
-
-func newNodeWalker(root *Node) *nodeWalker {
- return &nodeWalker{
- current: root,
- root: root,
- entering: true,
- }
-}
-
-func (nw *nodeWalker) next() {
- if (!nw.current.isContainer() || !nw.entering) && nw.current == nw.root {
- nw.current = nil
- return
- }
- if nw.entering && nw.current.isContainer() {
- if nw.current.FirstChild != nil {
- nw.current = nw.current.FirstChild
- nw.entering = true
- } else {
- nw.entering = false
- }
- } else if nw.current.Next == nil {
- nw.current = nw.current.Parent
- nw.entering = false
- } else {
- nw.current = nw.current.Next
- nw.entering = true
- }
-}
-
-func dump(ast *Node) {
- fmt.Println(dumpString(ast))
-}
-
-func dumpR(ast *Node, depth int) string {
- if ast == nil {
- return ""
- }
- indent := bytes.Repeat([]byte("\t"), depth)
- content := ast.Literal
- if content == nil {
- content = ast.content
- }
- result := fmt.Sprintf("%s%s(%q)\n", indent, ast.Type, content)
- for n := ast.FirstChild; n != nil; n = n.Next {
- result += dumpR(n, depth+1)
- }
- return result
-}
-
-func dumpString(ast *Node) string {
- return dumpR(ast, 0)
-}
+++ /dev/null
-//
-// Blackfriday Markdown Processor
-// Available at http://github.com/russross/blackfriday
-//
-// Copyright © 2011 Russ Ross <russ@russross.com>.
-// Distributed under the Simplified BSD License.
-// See README.md for details.
-//
-
-//
-//
-// SmartyPants rendering
-//
-//
-
-package blackfriday
-
-import (
- "bytes"
- "io"
-)
-
-// SPRenderer is a struct containing state of a Smartypants renderer.
-type SPRenderer struct {
- inSingleQuote bool
- inDoubleQuote bool
- callbacks [256]smartCallback
-}
-
-func wordBoundary(c byte) bool {
- return c == 0 || isspace(c) || ispunct(c)
-}
-
-func tolower(c byte) byte {
- if c >= 'A' && c <= 'Z' {
- return c - 'A' + 'a'
- }
- return c
-}
-
-func isdigit(c byte) bool {
- return c >= '0' && c <= '9'
-}
-
-func smartQuoteHelper(out *bytes.Buffer, previousChar byte, nextChar byte, quote byte, isOpen *bool, addNBSP bool) bool {
- // edge of the buffer is likely to be a tag that we don't get to see,
- // so we treat it like text sometimes
-
- // enumerate all sixteen possibilities for (previousChar, nextChar)
- // each can be one of {0, space, punct, other}
- switch {
- case previousChar == 0 && nextChar == 0:
- // context is not any help here, so toggle
- *isOpen = !*isOpen
- case isspace(previousChar) && nextChar == 0:
- // [ "] might be [ "<code>foo...]
- *isOpen = true
- case ispunct(previousChar) && nextChar == 0:
- // [!"] hmm... could be [Run!"] or [("<code>...]
- *isOpen = false
- case /* isnormal(previousChar) && */ nextChar == 0:
- // [a"] is probably a close
- *isOpen = false
- case previousChar == 0 && isspace(nextChar):
- // [" ] might be [...foo</code>" ]
- *isOpen = false
- case isspace(previousChar) && isspace(nextChar):
- // [ " ] context is not any help here, so toggle
- *isOpen = !*isOpen
- case ispunct(previousChar) && isspace(nextChar):
- // [!" ] is probably a close
- *isOpen = false
- case /* isnormal(previousChar) && */ isspace(nextChar):
- // [a" ] this is one of the easy cases
- *isOpen = false
- case previousChar == 0 && ispunct(nextChar):
- // ["!] hmm... could be ["$1.95] or [</code>"!...]
- *isOpen = false
- case isspace(previousChar) && ispunct(nextChar):
- // [ "!] looks more like [ "$1.95]
- *isOpen = true
- case ispunct(previousChar) && ispunct(nextChar):
- // [!"!] context is not any help here, so toggle
- *isOpen = !*isOpen
- case /* isnormal(previousChar) && */ ispunct(nextChar):
- // [a"!] is probably a close
- *isOpen = false
- case previousChar == 0 /* && isnormal(nextChar) */ :
- // ["a] is probably an open
- *isOpen = true
- case isspace(previousChar) /* && isnormal(nextChar) */ :
- // [ "a] this is one of the easy cases
- *isOpen = true
- case ispunct(previousChar) /* && isnormal(nextChar) */ :
- // [!"a] is probably an open
- *isOpen = true
- default:
- // [a'b] maybe a contraction?
- *isOpen = false
- }
-
- // Note that with the limited lookahead, this non-breaking
- // space will also be appended to single double quotes.
- if addNBSP && !*isOpen {
- out.WriteString(" ")
- }
-
- out.WriteByte('&')
- if *isOpen {
- out.WriteByte('l')
- } else {
- out.WriteByte('r')
- }
- out.WriteByte(quote)
- out.WriteString("quo;")
-
- if addNBSP && *isOpen {
- out.WriteString(" ")
- }
-
- return true
-}
-
-func (r *SPRenderer) smartSingleQuote(out *bytes.Buffer, previousChar byte, text []byte) int {
- if len(text) >= 2 {
- t1 := tolower(text[1])
-
- if t1 == '\'' {
- nextChar := byte(0)
- if len(text) >= 3 {
- nextChar = text[2]
- }
- if smartQuoteHelper(out, previousChar, nextChar, 'd', &r.inDoubleQuote, false) {
- return 1
- }
- }
-
- if (t1 == 's' || t1 == 't' || t1 == 'm' || t1 == 'd') && (len(text) < 3 || wordBoundary(text[2])) {
- out.WriteString("’")
- return 0
- }
-
- if len(text) >= 3 {
- t2 := tolower(text[2])
-
- if ((t1 == 'r' && t2 == 'e') || (t1 == 'l' && t2 == 'l') || (t1 == 'v' && t2 == 'e')) &&
- (len(text) < 4 || wordBoundary(text[3])) {
- out.WriteString("’")
- return 0
- }
- }
- }
-
- nextChar := byte(0)
- if len(text) > 1 {
- nextChar = text[1]
- }
- if smartQuoteHelper(out, previousChar, nextChar, 's', &r.inSingleQuote, false) {
- return 0
- }
-
- out.WriteByte(text[0])
- return 0
-}
-
-func (r *SPRenderer) smartParens(out *bytes.Buffer, previousChar byte, text []byte) int {
- if len(text) >= 3 {
- t1 := tolower(text[1])
- t2 := tolower(text[2])
-
- if t1 == 'c' && t2 == ')' {
- out.WriteString("©")
- return 2
- }
-
- if t1 == 'r' && t2 == ')' {
- out.WriteString("®")
- return 2
- }
-
- if len(text) >= 4 && t1 == 't' && t2 == 'm' && text[3] == ')' {
- out.WriteString("™")
- return 3
- }
- }
-
- out.WriteByte(text[0])
- return 0
-}
-
-func (r *SPRenderer) smartDash(out *bytes.Buffer, previousChar byte, text []byte) int {
- if len(text) >= 2 {
- if text[1] == '-' {
- out.WriteString("—")
- return 1
- }
-
- if wordBoundary(previousChar) && wordBoundary(text[1]) {
- out.WriteString("–")
- return 0
- }
- }
-
- out.WriteByte(text[0])
- return 0
-}
-
-func (r *SPRenderer) smartDashLatex(out *bytes.Buffer, previousChar byte, text []byte) int {
- if len(text) >= 3 && text[1] == '-' && text[2] == '-' {
- out.WriteString("—")
- return 2
- }
- if len(text) >= 2 && text[1] == '-' {
- out.WriteString("–")
- return 1
- }
-
- out.WriteByte(text[0])
- return 0
-}
-
-func (r *SPRenderer) smartAmpVariant(out *bytes.Buffer, previousChar byte, text []byte, quote byte, addNBSP bool) int {
- if bytes.HasPrefix(text, []byte(""")) {
- nextChar := byte(0)
- if len(text) >= 7 {
- nextChar = text[6]
- }
- if smartQuoteHelper(out, previousChar, nextChar, quote, &r.inDoubleQuote, addNBSP) {
- return 5
- }
- }
-
- if bytes.HasPrefix(text, []byte("�")) {
- return 3
- }
-
- out.WriteByte('&')
- return 0
-}
-
-func (r *SPRenderer) smartAmp(angledQuotes, addNBSP bool) func(*bytes.Buffer, byte, []byte) int {
- var quote byte = 'd'
- if angledQuotes {
- quote = 'a'
- }
-
- return func(out *bytes.Buffer, previousChar byte, text []byte) int {
- return r.smartAmpVariant(out, previousChar, text, quote, addNBSP)
- }
-}
-
-func (r *SPRenderer) smartPeriod(out *bytes.Buffer, previousChar byte, text []byte) int {
- if len(text) >= 3 && text[1] == '.' && text[2] == '.' {
- out.WriteString("…")
- return 2
- }
-
- if len(text) >= 5 && text[1] == ' ' && text[2] == '.' && text[3] == ' ' && text[4] == '.' {
- out.WriteString("…")
- return 4
- }
-
- out.WriteByte(text[0])
- return 0
-}
-
-func (r *SPRenderer) smartBacktick(out *bytes.Buffer, previousChar byte, text []byte) int {
- if len(text) >= 2 && text[1] == '`' {
- nextChar := byte(0)
- if len(text) >= 3 {
- nextChar = text[2]
- }
- if smartQuoteHelper(out, previousChar, nextChar, 'd', &r.inDoubleQuote, false) {
- return 1
- }
- }
-
- out.WriteByte(text[0])
- return 0
-}
-
-func (r *SPRenderer) smartNumberGeneric(out *bytes.Buffer, previousChar byte, text []byte) int {
- if wordBoundary(previousChar) && previousChar != '/' && len(text) >= 3 {
- // is it of the form digits/digits(word boundary)?, i.e., \d+/\d+\b
- // note: check for regular slash (/) or fraction slash (⁄, 0x2044, or 0xe2 81 84 in utf-8)
- // and avoid changing dates like 1/23/2005 into fractions.
- numEnd := 0
- for len(text) > numEnd && isdigit(text[numEnd]) {
- numEnd++
- }
- if numEnd == 0 {
- out.WriteByte(text[0])
- return 0
- }
- denStart := numEnd + 1
- if len(text) > numEnd+3 && text[numEnd] == 0xe2 && text[numEnd+1] == 0x81 && text[numEnd+2] == 0x84 {
- denStart = numEnd + 3
- } else if len(text) < numEnd+2 || text[numEnd] != '/' {
- out.WriteByte(text[0])
- return 0
- }
- denEnd := denStart
- for len(text) > denEnd && isdigit(text[denEnd]) {
- denEnd++
- }
- if denEnd == denStart {
- out.WriteByte(text[0])
- return 0
- }
- if len(text) == denEnd || wordBoundary(text[denEnd]) && text[denEnd] != '/' {
- out.WriteString("<sup>")
- out.Write(text[:numEnd])
- out.WriteString("</sup>⁄<sub>")
- out.Write(text[denStart:denEnd])
- out.WriteString("</sub>")
- return denEnd - 1
- }
- }
-
- out.WriteByte(text[0])
- return 0
-}
-
-func (r *SPRenderer) smartNumber(out *bytes.Buffer, previousChar byte, text []byte) int {
- if wordBoundary(previousChar) && previousChar != '/' && len(text) >= 3 {
- if text[0] == '1' && text[1] == '/' && text[2] == '2' {
- if len(text) < 4 || wordBoundary(text[3]) && text[3] != '/' {
- out.WriteString("½")
- return 2
- }
- }
-
- if text[0] == '1' && text[1] == '/' && text[2] == '4' {
- if len(text) < 4 || wordBoundary(text[3]) && text[3] != '/' || (len(text) >= 5 && tolower(text[3]) == 't' && tolower(text[4]) == 'h') {
- out.WriteString("¼")
- return 2
- }
- }
-
- if text[0] == '3' && text[1] == '/' && text[2] == '4' {
- if len(text) < 4 || wordBoundary(text[3]) && text[3] != '/' || (len(text) >= 6 && tolower(text[3]) == 't' && tolower(text[4]) == 'h' && tolower(text[5]) == 's') {
- out.WriteString("¾")
- return 2
- }
- }
- }
-
- out.WriteByte(text[0])
- return 0
-}
-
-func (r *SPRenderer) smartDoubleQuoteVariant(out *bytes.Buffer, previousChar byte, text []byte, quote byte) int {
- nextChar := byte(0)
- if len(text) > 1 {
- nextChar = text[1]
- }
- if !smartQuoteHelper(out, previousChar, nextChar, quote, &r.inDoubleQuote, false) {
- out.WriteString(""")
- }
-
- return 0
-}
-
-func (r *SPRenderer) smartDoubleQuote(out *bytes.Buffer, previousChar byte, text []byte) int {
- return r.smartDoubleQuoteVariant(out, previousChar, text, 'd')
-}
-
-func (r *SPRenderer) smartAngledDoubleQuote(out *bytes.Buffer, previousChar byte, text []byte) int {
- return r.smartDoubleQuoteVariant(out, previousChar, text, 'a')
-}
-
-func (r *SPRenderer) smartLeftAngle(out *bytes.Buffer, previousChar byte, text []byte) int {
- i := 0
-
- for i < len(text) && text[i] != '>' {
- i++
- }
-
- out.Write(text[:i+1])
- return i
-}
-
-type smartCallback func(out *bytes.Buffer, previousChar byte, text []byte) int
-
-// NewSmartypantsRenderer constructs a Smartypants renderer object.
-func NewSmartypantsRenderer(flags HTMLFlags) *SPRenderer {
- var (
- r SPRenderer
-
- smartAmpAngled = r.smartAmp(true, false)
- smartAmpAngledNBSP = r.smartAmp(true, true)
- smartAmpRegular = r.smartAmp(false, false)
- smartAmpRegularNBSP = r.smartAmp(false, true)
-
- addNBSP = flags&SmartypantsQuotesNBSP != 0
- )
-
- if flags&SmartypantsAngledQuotes == 0 {
- r.callbacks['"'] = r.smartDoubleQuote
- if !addNBSP {
- r.callbacks['&'] = smartAmpRegular
- } else {
- r.callbacks['&'] = smartAmpRegularNBSP
- }
- } else {
- r.callbacks['"'] = r.smartAngledDoubleQuote
- if !addNBSP {
- r.callbacks['&'] = smartAmpAngled
- } else {
- r.callbacks['&'] = smartAmpAngledNBSP
- }
- }
- r.callbacks['\''] = r.smartSingleQuote
- r.callbacks['('] = r.smartParens
- if flags&SmartypantsDashes != 0 {
- if flags&SmartypantsLatexDashes == 0 {
- r.callbacks['-'] = r.smartDash
- } else {
- r.callbacks['-'] = r.smartDashLatex
- }
- }
- r.callbacks['.'] = r.smartPeriod
- if flags&SmartypantsFractions == 0 {
- r.callbacks['1'] = r.smartNumber
- r.callbacks['3'] = r.smartNumber
- } else {
- for ch := '1'; ch <= '9'; ch++ {
- r.callbacks[ch] = r.smartNumberGeneric
- }
- }
- r.callbacks['<'] = r.smartLeftAngle
- r.callbacks['`'] = r.smartBacktick
- return &r
-}
-
-// Process is the entry point of the Smartypants renderer.
-func (r *SPRenderer) Process(w io.Writer, text []byte) {
- mark := 0
- for i := 0; i < len(text); i++ {
- if action := r.callbacks[text[i]]; action != nil {
- if i > mark {
- w.Write(text[mark:i])
- }
- previousChar := byte(0)
- if i > 0 {
- previousChar = text[i-1]
- }
- var tmp bytes.Buffer
- i += action(&tmp, previousChar, text[i:])
- w.Write(tmp.Bytes())
- mark = i + 1
- }
- }
- if mark < len(text) {
- w.Write(text[mark:])
- }
-}
+++ /dev/null
-sudo: false
-language: go
-go:
- - 1.x
- - master
-matrix:
- allow_failures:
- - go: master
- fast_finish: true
-install:
- - # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step).
-script:
- - go get -t -v ./...
- - diff -u <(echo -n) <(gofmt -d -s .)
- - go tool vet .
- - go test -v -race ./...
+++ /dev/null
-MIT License
-
-Copyright (c) 2015 Dmitri Shuralyov
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
+++ /dev/null
-sanitized_anchor_name
-=====================
-
-[![Build Status](https://travis-ci.org/shurcooL/sanitized_anchor_name.svg?branch=master)](https://travis-ci.org/shurcooL/sanitized_anchor_name) [![GoDoc](https://godoc.org/github.com/shurcooL/sanitized_anchor_name?status.svg)](https://godoc.org/github.com/shurcooL/sanitized_anchor_name)
-
-Package sanitized_anchor_name provides a func to create sanitized anchor names.
-
-Its logic can be reused by multiple packages to create interoperable anchor names
-and links to those anchors.
-
-At this time, it does not try to ensure that generated anchor names
-are unique, that responsibility falls on the caller.
-
-Installation
-------------
-
-```bash
-go get -u github.com/shurcooL/sanitized_anchor_name
-```
-
-Example
--------
-
-```Go
-anchorName := sanitized_anchor_name.Create("This is a header")
-
-fmt.Println(anchorName)
-
-// Output:
-// this-is-a-header
-```
-
-License
--------
-
-- [MIT License](LICENSE)
+++ /dev/null
-module github.com/shurcooL/sanitized_anchor_name
+++ /dev/null
-// Package sanitized_anchor_name provides a func to create sanitized anchor names.
-//
-// Its logic can be reused by multiple packages to create interoperable anchor names
-// and links to those anchors.
-//
-// At this time, it does not try to ensure that generated anchor names
-// are unique, that responsibility falls on the caller.
-package sanitized_anchor_name // import "github.com/shurcooL/sanitized_anchor_name"
-
-import "unicode"
-
-// Create returns a sanitized anchor name for the given text.
-func Create(text string) string {
- var anchorName []rune
- var futureDash = false
- for _, r := range text {
- switch {
- case unicode.IsLetter(r) || unicode.IsNumber(r):
- if futureDash && len(anchorName) > 0 {
- anchorName = append(anchorName, '-')
- }
- futureDash = false
- anchorName = append(anchorName, unicode.ToLower(r))
- default:
- futureDash = true
- }
- }
- return string(anchorName)
-}
--- /dev/null
+# Binaries for programs and plugins
+*.exe
+*.exe~
+*.dll
+*.so
+*.dylib
+
+# Test binary, build with `go test -c`
+*.test
+*.pprof
+
+# Output of the go coverage tool, specifically when used with LiteIDE
+*.out
+
+.DS_Store
+fuzz/corpus
+fuzz/crashers
+fuzz/suppressions
+fuzz/fuzz-fuzz.zip
--- /dev/null
+MIT License
+
+Copyright (c) 2019 Yusuke Inuzuka
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
--- /dev/null
+.PHONY: test fuzz
+
+test:
+ go test -coverprofile=profile.out -coverpkg=github.com/yuin/goldmark,github.com/yuin/goldmark/ast,github.com/yuin/goldmark/extension,github.com/yuin/goldmark/extension/ast,github.com/yuin/goldmark/parser,github.com/yuin/goldmark/renderer,github.com/yuin/goldmark/renderer/html,github.com/yuin/goldmark/text,github.com/yuin/goldmark/util ./...
+
+cov: test
+ go tool cover -html=profile.out
+
+fuzz:
+ which go-fuzz > /dev/null 2>&1 || (GO111MODULE=off go get -u github.com/dvyukov/go-fuzz/go-fuzz github.com/dvyukov/go-fuzz/go-fuzz-build; GO111MODULE=off go get -d github.com/dvyukov/go-fuzz-corpus; true)
+ rm -rf ./fuzz/corpus
+ rm -rf ./fuzz/crashers
+ rm -rf ./fuzz/suppressions
+ rm -f ./fuzz/fuzz-fuzz.zip
+ cd ./fuzz && go-fuzz-build
+ cd ./fuzz && go-fuzz
--- /dev/null
+goldmark
+==========================================
+
+[![http://godoc.org/github.com/yuin/goldmark](https://godoc.org/github.com/yuin/goldmark?status.svg)](http://godoc.org/github.com/yuin/goldmark)
+[![https://github.com/yuin/goldmark/actions?query=workflow:test](https://github.com/yuin/goldmark/workflows/test/badge.svg?branch=master&event=push)](https://github.com/yuin/goldmark/actions?query=workflow:test)
+[![https://coveralls.io/github/yuin/goldmark](https://coveralls.io/repos/github/yuin/goldmark/badge.svg?branch=master)](https://coveralls.io/github/yuin/goldmark)
+[![https://goreportcard.com/report/github.com/yuin/goldmark](https://goreportcard.com/badge/github.com/yuin/goldmark)](https://goreportcard.com/report/github.com/yuin/goldmark)
+
+> A Markdown parser written in Go. Easy to extend, standard compliant, well structured.
+
+goldmark is compliant with CommonMark 0.29.
+
+Motivation
+----------------------
+I need a Markdown parser for Go that meets following conditions:
+
+- Easy to extend.
+ - Markdown is poor in document expressions compared with other light markup languages like reStructuredText.
+ - We have extensions to the Markdown syntax, e.g. PHP Markdown Extra, GitHub Flavored Markdown.
+- Standard compliant.
+ - Markdown has many dialects.
+ - GitHub Flavored Markdown is widely used and it is based on CommonMark aside from whether CommonMark is good specification or not.
+ - CommonMark is too complicated and hard to implement.
+- Well structured.
+ - AST based, and preserves source position of nodes.
+- Written in pure Go.
+
+[golang-commonmark](https://gitlab.com/golang-commonmark/markdown) may be a good choice, but it seems to be a copy of [markdown-it](https://github.com/markdown-it).
+
+[blackfriday.v2](https://github.com/russross/blackfriday/tree/v2) is a fast and widely used implementation, but it is not CommonMark compliant and cannot be extended from outside of the package since its AST uses structs instead of interfaces.
+
+Furthermore, its behavior differs from other implementations in some cases, especially regarding lists: ([Deep nested lists don't output correctly #329](https://github.com/russross/blackfriday/issues/329), [List block cannot have a second line #244](https://github.com/russross/blackfriday/issues/244), etc).
+
+This behavior sometimes causes problems. If you migrate your Markdown text to blackfriday-based wikis from GitHub, many lists will immediately be broken.
+
+As mentioned above, CommonMark is too complicated and hard to implement, so Markdown parsers based on CommonMark barely exist.
+
+Features
+----------------------
+
+- **Standard compliant.** goldmark gets full compliance with the latest CommonMark spec.
+- **Extensible.** Do you want to add a `@username` mention syntax to Markdown?
+ You can easily do it in goldmark. You can add your AST nodes,
+ parsers for block level elements, parsers for inline level elements,
+ transformers for paragraphs, transformers for whole AST structure, and
+ renderers.
+- **Performance.** goldmark performs pretty much equally to cmark,
+ the CommonMark reference implementation written in C.
+- **Robust.** goldmark is tested with [go-fuzz](https://github.com/dvyukov/go-fuzz), a fuzz testing tool.
+- **Builtin extensions.** goldmark ships with common extensions like tables, strikethrough,
+ task lists, and definition lists.
+- **Depends only on standard libraries.**
+
+Installation
+----------------------
+```bash
+$ go get github.com/yuin/goldmark
+```
+
+
+Usage
+----------------------
+Import packages:
+
+```
+import (
+ "bytes"
+ "github.com/yuin/goldmark"
+)
+```
+
+
+Convert Markdown documents with the CommonMark compliant mode:
+
+```go
+var buf bytes.Buffer
+if err := goldmark.Convert(source, &buf); err != nil {
+ panic(err)
+}
+```
+
+With options
+------------------------------
+
+```go
+var buf bytes.Buffer
+if err := goldmark.Convert(source, &buf, parser.WithContext(ctx)); err != nil {
+ panic(err)
+}
+```
+
+| Functional option | Type | Description |
+| ----------------- | ---- | ----------- |
+| `parser.WithContext` | A `parser.Context` | Context for the parsing phase. |
+
+Context options
+----------------------
+
+| Functional option | Type | Description |
+| ----------------- | ---- | ----------- |
+| `parser.WithIDs` | A `parser.IDs` | `IDs` allows you to change logics that are related to element id(ex: Auto heading id generation). |
+
+
+Custom parser and renderer
+--------------------------
+```go
+import (
+ "bytes"
+ "github.com/yuin/goldmark"
+ "github.com/yuin/goldmark/extension"
+ "github.com/yuin/goldmark/parser"
+ "github.com/yuin/goldmark/renderer/html"
+)
+
+md := goldmark.New(
+ goldmark.WithExtensions(extension.GFM),
+ goldmark.WithParserOptions(
+ parser.WithAutoHeadingID(),
+ ),
+ goldmark.WithRendererOptions(
+ html.WithHardWraps(),
+ html.WithXHTML(),
+ ),
+ )
+var buf bytes.Buffer
+if err := md.Convert(source, &buf); err != nil {
+ panic(err)
+}
+```
+
+Parser and Renderer options
+------------------------------
+
+### Parser options
+
+| Functional option | Type | Description |
+| ----------------- | ---- | ----------- |
+| `parser.WithBlockParsers` | A `util.PrioritizedSlice` whose elements are `parser.BlockParser` | Parsers for parsing block level elements. |
+| `parser.WithInlineParsers` | A `util.PrioritizedSlice` whose elements are `parser.InlineParser` | Parsers for parsing inline level elements. |
+| `parser.WithParagraphTransformers` | A `util.PrioritizedSlice` whose elements are `parser.ParagraphTransformer` | Transformers for transforming paragraph nodes. |
+| `parser.WithASTTransformers` | A `util.PrioritizedSlice` whose elements are `parser.ASTTransformer` | Transformers for transforming an AST. |
+| `parser.WithAutoHeadingID` | `-` | Enables auto heading ids. |
+| `parser.WithAttribute` | `-` | Enables custom attributes. Currently only headings supports attributes. |
+
+### HTML Renderer options
+
+| Functional option | Type | Description |
+| ----------------- | ---- | ----------- |
+| `html.WithWriter` | `html.Writer` | `html.Writer` for writing contents to an `io.Writer`. |
+| `html.WithHardWraps` | `-` | Render new lines as `<br>`.|
+| `html.WithXHTML` | `-` | Render as XHTML. |
+| `html.WithUnsafe` | `-` | By default, goldmark does not render raw HTML and potentially dangerous links. With this option, goldmark renders these contents as written. |
+
+### Built-in extensions
+
+- `extension.Table`
+ - [GitHub Flavored Markdown: Tables](https://github.github.com/gfm/#tables-extension-)
+- `extension.Strikethrough`
+ - [GitHub Flavored Markdown: Strikethrough](https://github.github.com/gfm/#strikethrough-extension-)
+- `extension.Linkify`
+ - [GitHub Flavored Markdown: Autolinks](https://github.github.com/gfm/#autolinks-extension-)
+- `extension.TaskList`
+ - [GitHub Flavored Markdown: Task list items](https://github.github.com/gfm/#task-list-items-extension-)
+- `extension.GFM`
+ - This extension enables Table, Strikethrough, Linkify and TaskList.
+ - This extension does not filter tags defined in [6.11: Disallowed Raw HTML (extension)](https://github.github.com/gfm/#disallowed-raw-html-extension-).
+ If you need to filter HTML tags, see [Security](#security)
+- `extension.DefinitionList`
+ - [PHP Markdown Extra: Definition lists](https://michelf.ca/projects/php-markdown/extra/#def-list)
+- `extension.Footnote`
+ - [PHP Markdown Extra: Footnotes](https://michelf.ca/projects/php-markdown/extra/#footnotes)
+- `extension.Typographer`
+ - This extension substitutes punctuations with typographic entities like [smartypants](https://daringfireball.net/projects/smartypants/).
+
+### Attributes
+`parser.WithAttribute` option allows you to define attributes on some elements.
+
+Currently only headings support attributes.
+
+**Attributes are being discussed in the
+[CommonMark forum](https://talk.commonmark.org/t/consistent-attribute-syntax/272).
+This syntax may possibly change in the future.**
+
+
+#### Headings
+
+```
+## heading ## {#id .className attrName=attrValue class="class1 class2"}
+
+## heading {#id .className attrName=attrValue class="class1 class2"}
+```
+
+```
+heading {#id .className attrName=attrValue}
+============
+```
+
+### Typographer extension
+
+Typographer extension translates plain ASCII punctuation characters into typographic punctuation HTML entities.
+
+Default substitutions are:
+
+| Punctuation | Default entity |
+| ------------ | ---------- |
+| `'` | `‘`, `’` |
+| `"` | `“`, `”` |
+| `--` | `–` |
+| `---` | `—` |
+| `...` | `…` |
+| `<<` | `«` |
+| `>>` | `»` |
+
+You can overwrite the substitutions by `extensions.WithTypographicSubstitutions`.
+
+```go
+markdown := goldmark.New(
+ goldmark.WithExtensions(
+ extension.NewTypographer(
+ extension.WithTypographicSubstitutions(extension.TypographicSubstitutions{
+ extension.LeftSingleQuote: []byte("‚"),
+ extension.RightSingleQuote: nil, // nil disables a substitution
+ }),
+ ),
+ ),
+)
+```
+
+Security
+--------------------
+By default, goldmark does not render raw HTML and potentially dangerous URLs.
+If you need to gain more control over untrusted contents, it is recommended to
+use an HTML sanitizer such as [bluemonday](https://github.com/microcosm-cc/bluemonday).
+
+Benchmark
+--------------------
+You can run this benchmark in the `_benchmark` directory.
+
+### against other golang libraries
+
+blackfriday v2 seems to be fastest, but it is not CommonMark compliant, so the performance of
+blackfriday v2 cannot simply be compared with that of the other CommonMark compliant libraries.
+
+Though goldmark builds clean extensible AST structure and get full compliance with
+CommonMark, it is reasonably fast and has lower memory consumption.
+
+```
+goos: darwin
+goarch: amd64
+BenchmarkMarkdown/Blackfriday-v2-12 326 3465240 ns/op 3298861 B/op 20047 allocs/op
+BenchmarkMarkdown/GoldMark-12 303 3927494 ns/op 2574809 B/op 13853 allocs/op
+BenchmarkMarkdown/CommonMark-12 244 4900853 ns/op 2753851 B/op 20527 allocs/op
+BenchmarkMarkdown/Lute-12 130 9195245 ns/op 9175030 B/op 123534 allocs/op
+BenchmarkMarkdown/GoMarkdown-12 9 113541994 ns/op 2187472 B/op 22173 allocs/op
+```
+
+### against cmark (CommonMark reference implementation written in C)
+
+```
+----------- cmark -----------
+file: _data.md
+iteration: 50
+average: 0.0037760639 sec
+go run ./goldmark_benchmark.go
+------- goldmark -------
+file: _data.md
+iteration: 50
+average: 0.0040964230 sec
+```
+
+As you can see, goldmark performs pretty much equally to cmark.
+
+Extensions
+--------------------
+
+- [goldmark-meta](https://github.com/yuin/goldmark-meta): A YAML metadata
+ extension for the goldmark Markdown parser.
+- [goldmark-highlighting](https://github.com/yuin/goldmark-highlighting): A Syntax highlighting extension
+ for the goldmark markdown parser.
+- [goldmark-mathjax](https://github.com/litao91/goldmark-mathjax): Mathjax support for goldmark markdown parser
+
+goldmark internal(for extension developers)
+----------------------------------------------
+### Overview
+goldmark's Markdown processing is outlined as a bellow diagram.
+
+```
+ <Markdown in []byte, parser.Context>
+ |
+ V
+ +-------- parser.Parser ---------------------------
+ | 1. Parse block elements into AST
+ | 1. If a parsed block is a paragraph, apply
+ | ast.ParagraphTransformer
+ | 2. Traverse AST and parse blocks.
+ | 1. Process delimiters(emphasis) at the end of
+ | block parsing
+ | 3. Apply parser.ASTTransformers to AST
+ |
+ V
+ <ast.Node>
+ |
+ V
+ +------- renderer.Renderer ------------------------
+ | 1. Traverse AST and apply renderer.NodeRenderer
+ | corespond to the node type
+
+ |
+ V
+ <Output>
+```
+
+### Parsing
+Markdown documents are read through `text.Reader` interface.
+
+AST nodes do not have concrete text. AST nodes have segment information of the documents. It is represented by `text.Segment` .
+
+`text.Segment` has 3 attributes: `Start`, `End`, `Padding` .
+
+
+**TODO**
+
+See `extension` directory for examples of extensions.
+
+Summary:
+
+1. Define AST Node as a struct in which `ast.BaseBlock` or `ast.BaseInline` is embedded.
+2. Write a parser that implements `parser.BlockParser` or `parser.InlineParser`.
+3. Write a renderer that implements `renderer.NodeRenderer`.
+4. Define your goldmark extension that implements `goldmark.Extender`.
+
+
+Donation
+--------------------
+BTC: 1NEDSyUmo4SMTDP83JJQSWi1MvQUGGNMZB
+
+License
+--------------------
+MIT
+
+Author
+--------------------
+Yusuke Inuzuka
--- /dev/null
+// Package ast defines AST nodes that represent markdown elements.
+package ast
+
+import (
+ "bytes"
+ "fmt"
+ "strings"
+
+ textm "github.com/yuin/goldmark/text"
+ "github.com/yuin/goldmark/util"
+)
+
+// A NodeType indicates what type a node belongs to.
+type NodeType int
+
+const (
+ // TypeBlock indicates that a node is kind of block nodes.
+ TypeBlock NodeType = iota + 1
+ // TypeInline indicates that a node is kind of inline nodes.
+ TypeInline
+ // TypeDocument indicates that a node is kind of document nodes.
+ TypeDocument
+)
+
+// NodeKind indicates more specific type than NodeType.
+type NodeKind int
+
+func (k NodeKind) String() string {
+ return kindNames[k]
+}
+
+var kindMax NodeKind
+var kindNames = []string{""}
+
+// NewNodeKind returns a new Kind value.
+func NewNodeKind(name string) NodeKind {
+ kindMax++
+ kindNames = append(kindNames, name)
+ return kindMax
+}
+
+// An Attribute is an attribute of the Node
+type Attribute struct {
+ Name []byte
+ Value interface{}
+}
+
+var attrNameIDS = []byte("#")
+var attrNameID = []byte("id")
+var attrNameClassS = []byte(".")
+var attrNameClass = []byte("class")
+
+// A Node interface defines basic AST node functionalities.
+type Node interface {
+ // Type returns a type of this node.
+ Type() NodeType
+
+ // Kind returns a kind of this node.
+ Kind() NodeKind
+
+ // NextSibling returns a next sibling node of this node.
+ NextSibling() Node
+
+ // PreviousSibling returns a previous sibling node of this node.
+ PreviousSibling() Node
+
+ // Parent returns a parent node of this node.
+ Parent() Node
+
+ // SetParent sets a parent node to this node.
+ SetParent(Node)
+
+ // SetPreviousSibling sets a previous sibling node to this node.
+ SetPreviousSibling(Node)
+
+ // SetNextSibling sets a next sibling node to this node.
+ SetNextSibling(Node)
+
+ // HasChildren returns true if this node has any children, otherwise false.
+ HasChildren() bool
+
+ // ChildCount returns a total number of children.
+ ChildCount() int
+
+ // FirstChild returns a first child of this node.
+ FirstChild() Node
+
+ // LastChild returns a last child of this node.
+ LastChild() Node
+
+ // AppendChild append a node child to the tail of the children.
+ AppendChild(self, child Node)
+
+ // RemoveChild removes a node child from this node.
+ // If a node child is not children of this node, RemoveChild nothing to do.
+ RemoveChild(self, child Node)
+
+ // RemoveChildren removes all children from this node.
+ RemoveChildren(self Node)
+
+ // SortChildren sorts childrens by comparator.
+ SortChildren(comparator func(n1, n2 Node) int)
+
+ // ReplaceChild replace a node v1 with a node insertee.
+ // If v1 is not children of this node, ReplaceChild append a insetee to the
+ // tail of the children.
+ ReplaceChild(self, v1, insertee Node)
+
+ // InsertBefore inserts a node insertee before a node v1.
+ // If v1 is not children of this node, InsertBefore append a insetee to the
+ // tail of the children.
+ InsertBefore(self, v1, insertee Node)
+
+ // InsertAfterinserts a node insertee after a node v1.
+ // If v1 is not children of this node, InsertBefore append a insetee to the
+ // tail of the children.
+ InsertAfter(self, v1, insertee Node)
+
+ // Dump dumps an AST tree structure to stdout.
+ // This function completely aimed for debugging.
+ // level is a indent level. Implementer should indent informations with
+ // 2 * level spaces.
+ Dump(source []byte, level int)
+
+ // Text returns text values of this node.
+ Text(source []byte) []byte
+
+ // HasBlankPreviousLines returns true if the row before this node is blank,
+ // otherwise false.
+ // This method is valid only for block nodes.
+ HasBlankPreviousLines() bool
+
+ // SetBlankPreviousLines sets whether the row before this node is blank.
+ // This method is valid only for block nodes.
+ SetBlankPreviousLines(v bool)
+
+ // Lines returns text segments that hold positions in a source.
+ // This method is valid only for block nodes.
+ Lines() *textm.Segments
+
+ // SetLines sets text segments that hold positions in a source.
+ // This method is valid only for block nodes.
+ SetLines(*textm.Segments)
+
+ // IsRaw returns true if contents should be rendered as 'raw' contents.
+ IsRaw() bool
+
+ // SetAttribute sets the given value to the attributes.
+ SetAttribute(name []byte, value interface{})
+
+ // SetAttributeString sets the given value to the attributes.
+ SetAttributeString(name string, value interface{})
+
+ // Attribute returns a (attribute value, true) if an attribute
+ // associated with the given name is found, otherwise
+ // (nil, false)
+ Attribute(name []byte) (interface{}, bool)
+
+ // AttributeString returns a (attribute value, true) if an attribute
+ // associated with the given name is found, otherwise
+ // (nil, false)
+ AttributeString(name string) (interface{}, bool)
+
+ // Attributes returns a list of attributes.
+ // This may be a nil if there are no attributes.
+ Attributes() []Attribute
+
+ // RemoveAttributes removes all attributes from this node.
+ RemoveAttributes()
+}
+
+// A BaseNode struct implements the Node interface.
+type BaseNode struct {
+ firstChild Node
+ lastChild Node
+ parent Node
+ next Node
+ prev Node
+ childCount int
+ attributes []Attribute
+}
+
+func ensureIsolated(v Node) {
+ if p := v.Parent(); p != nil {
+ p.RemoveChild(p, v)
+ }
+}
+
+// HasChildren implements Node.HasChildren .
+func (n *BaseNode) HasChildren() bool {
+ return n.firstChild != nil
+}
+
+// SetPreviousSibling implements Node.SetPreviousSibling .
+func (n *BaseNode) SetPreviousSibling(v Node) {
+ n.prev = v
+}
+
+// SetNextSibling implements Node.SetNextSibling .
+func (n *BaseNode) SetNextSibling(v Node) {
+ n.next = v
+}
+
+// PreviousSibling implements Node.PreviousSibling .
+func (n *BaseNode) PreviousSibling() Node {
+ return n.prev
+}
+
+// NextSibling implements Node.NextSibling .
+func (n *BaseNode) NextSibling() Node {
+ return n.next
+}
+
+// RemoveChild implements Node.RemoveChild .
+func (n *BaseNode) RemoveChild(self, v Node) {
+ if v.Parent() != self {
+ return
+ }
+ n.childCount--
+ prev := v.PreviousSibling()
+ next := v.NextSibling()
+ if prev != nil {
+ prev.SetNextSibling(next)
+ } else {
+ n.firstChild = next
+ }
+ if next != nil {
+ next.SetPreviousSibling(prev)
+ } else {
+ n.lastChild = prev
+ }
+ v.SetParent(nil)
+ v.SetPreviousSibling(nil)
+ v.SetNextSibling(nil)
+}
+
+// RemoveChildren implements Node.RemoveChildren .
+func (n *BaseNode) RemoveChildren(self Node) {
+ for c := n.firstChild; c != nil; c = c.NextSibling() {
+ c.SetParent(nil)
+ c.SetPreviousSibling(nil)
+ c.SetNextSibling(nil)
+ }
+ n.firstChild = nil
+ n.lastChild = nil
+ n.childCount = 0
+}
+
+// SortChildren implements Node.SortChildren
+func (n *BaseNode) SortChildren(comparator func(n1, n2 Node) int) {
+ var sorted Node
+ current := n.firstChild
+ for current != nil {
+ next := current.NextSibling()
+ if sorted == nil || comparator(sorted, current) >= 0 {
+ current.SetNextSibling(sorted)
+ if sorted != nil {
+ sorted.SetPreviousSibling(current)
+ }
+ sorted = current
+ sorted.SetPreviousSibling(nil)
+ } else {
+ c := sorted
+ for c.NextSibling() != nil && comparator(c.NextSibling(), current) < 0 {
+ c = c.NextSibling()
+ }
+ current.SetNextSibling(c.NextSibling())
+ current.SetPreviousSibling(c)
+ if c.NextSibling() != nil {
+ c.NextSibling().SetPreviousSibling(current)
+ }
+ c.SetNextSibling(current)
+ }
+ current = next
+ }
+ n.firstChild = sorted
+ for c := n.firstChild; c != nil; c = c.NextSibling() {
+ n.lastChild = c
+ }
+}
+
+// FirstChild implements Node.FirstChild .
+func (n *BaseNode) FirstChild() Node {
+ return n.firstChild
+}
+
+// LastChild implements Node.LastChild .
+func (n *BaseNode) LastChild() Node {
+ return n.lastChild
+}
+
+// ChildCount implements Node.ChildCount .
+func (n *BaseNode) ChildCount() int {
+ return n.childCount
+}
+
+// Parent implements Node.Parent .
+func (n *BaseNode) Parent() Node {
+ return n.parent
+}
+
+// SetParent implements Node.SetParent .
+func (n *BaseNode) SetParent(v Node) {
+ n.parent = v
+}
+
+// AppendChild implements Node.AppendChild .
+func (n *BaseNode) AppendChild(self, v Node) {
+ ensureIsolated(v)
+ if n.firstChild == nil {
+ n.firstChild = v
+ v.SetNextSibling(nil)
+ v.SetPreviousSibling(nil)
+ } else {
+ last := n.lastChild
+ last.SetNextSibling(v)
+ v.SetPreviousSibling(last)
+ }
+ v.SetParent(self)
+ n.lastChild = v
+ n.childCount++
+}
+
+// ReplaceChild implements Node.ReplaceChild .
+func (n *BaseNode) ReplaceChild(self, v1, insertee Node) {
+ n.InsertBefore(self, v1, insertee)
+ n.RemoveChild(self, v1)
+}
+
+// InsertAfter implements Node.InsertAfter .
+func (n *BaseNode) InsertAfter(self, v1, insertee Node) {
+ n.InsertBefore(self, v1.NextSibling(), insertee)
+}
+
+// InsertBefore implements Node.InsertBefore .
+func (n *BaseNode) InsertBefore(self, v1, insertee Node) {
+ n.childCount++
+ if v1 == nil {
+ n.AppendChild(self, insertee)
+ return
+ }
+ ensureIsolated(insertee)
+ if v1.Parent() == self {
+ c := v1
+ prev := c.PreviousSibling()
+ if prev != nil {
+ prev.SetNextSibling(insertee)
+ insertee.SetPreviousSibling(prev)
+ } else {
+ n.firstChild = insertee
+ insertee.SetPreviousSibling(nil)
+ }
+ insertee.SetNextSibling(c)
+ c.SetPreviousSibling(insertee)
+ insertee.SetParent(self)
+ }
+}
+
+// Text implements Node.Text .
+func (n *BaseNode) Text(source []byte) []byte {
+ var buf bytes.Buffer
+ for c := n.firstChild; c != nil; c = c.NextSibling() {
+ buf.Write(c.Text(source))
+ }
+ return buf.Bytes()
+}
+
+// SetAttribute implements Node.SetAttribute.
+func (n *BaseNode) SetAttribute(name []byte, value interface{}) {
+ if n.attributes == nil {
+ n.attributes = make([]Attribute, 0, 10)
+ } else {
+ for i, a := range n.attributes {
+ if bytes.Equal(a.Name, name) {
+ n.attributes[i].Name = name
+ n.attributes[i].Value = value
+ return
+ }
+ }
+ }
+ n.attributes = append(n.attributes, Attribute{name, value})
+}
+
+// SetAttributeString implements Node.SetAttributeString
+func (n *BaseNode) SetAttributeString(name string, value interface{}) {
+ n.SetAttribute(util.StringToReadOnlyBytes(name), value)
+}
+
+// Attribute implements Node.Attribute.
+func (n *BaseNode) Attribute(name []byte) (interface{}, bool) {
+ if n.attributes == nil {
+ return nil, false
+ }
+ for i, a := range n.attributes {
+ if bytes.Equal(a.Name, name) {
+ return n.attributes[i].Value, true
+ }
+ }
+ return nil, false
+}
+
+// AttributeString implements Node.AttributeString.
+func (n *BaseNode) AttributeString(s string) (interface{}, bool) {
+ return n.Attribute(util.StringToReadOnlyBytes(s))
+}
+
+// Attributes implements Node.Attributes
+func (n *BaseNode) Attributes() []Attribute {
+ return n.attributes
+}
+
+// RemoveAttributes implements Node.RemoveAttributes
+func (n *BaseNode) RemoveAttributes() {
+ n.attributes = nil
+}
+
+// DumpHelper is a helper function to implement Node.Dump.
+// kv is pairs of an attribute name and an attribute value.
+// cb is a function called after wrote a name and attributes.
+func DumpHelper(v Node, source []byte, level int, kv map[string]string, cb func(int)) {
+ name := v.Kind().String()
+ indent := strings.Repeat(" ", level)
+ fmt.Printf("%s%s {\n", indent, name)
+ indent2 := strings.Repeat(" ", level+1)
+ if v.Type() == TypeBlock {
+ fmt.Printf("%sRawText: \"", indent2)
+ for i := 0; i < v.Lines().Len(); i++ {
+ line := v.Lines().At(i)
+ fmt.Printf("%s", line.Value(source))
+ }
+ fmt.Printf("\"\n")
+ fmt.Printf("%sHasBlankPreviousLines: %v\n", indent2, v.HasBlankPreviousLines())
+ }
+ for name, value := range kv {
+ fmt.Printf("%s%s: %s\n", indent2, name, value)
+ }
+ if cb != nil {
+ cb(level + 1)
+ }
+ for c := v.FirstChild(); c != nil; c = c.NextSibling() {
+ c.Dump(source, level+1)
+ }
+ fmt.Printf("%s}\n", indent)
+}
+
+// WalkStatus represents a current status of the Walk function.
+type WalkStatus int
+
+const (
+ // WalkStop indicates no more walking needed.
+ WalkStop WalkStatus = iota + 1
+
+ // WalkSkipChildren indicates that Walk wont walk on children of current
+ // node.
+ WalkSkipChildren
+
+ // WalkContinue indicates that Walk can continue to walk.
+ WalkContinue
+)
+
+// Walker is a function that will be called when Walk find a
+// new node.
+// entering is set true before walks children, false after walked children.
+// If Walker returns error, Walk function immediately stop walking.
+type Walker func(n Node, entering bool) (WalkStatus, error)
+
+// Walk walks a AST tree by the depth first search algorithm.
+func Walk(n Node, walker Walker) error {
+ status, err := walker(n, true)
+ if err != nil || status == WalkStop {
+ return err
+ }
+ if status != WalkSkipChildren {
+ for c := n.FirstChild(); c != nil; c = c.NextSibling() {
+ if err = Walk(c, walker); err != nil {
+ return err
+ }
+ }
+ }
+ status, err = walker(n, false)
+ if err != nil || status == WalkStop {
+ return err
+ }
+ return nil
+}
--- /dev/null
+package ast
+
+import (
+ "fmt"
+ "strings"
+
+ textm "github.com/yuin/goldmark/text"
+)
+
+// A BaseBlock struct implements the Node interface.
+type BaseBlock struct {
+ BaseNode
+ blankPreviousLines bool
+ lines *textm.Segments
+}
+
+// Type implements Node.Type
+func (b *BaseBlock) Type() NodeType {
+ return TypeBlock
+}
+
+// IsRaw implements Node.IsRaw
+func (b *BaseBlock) IsRaw() bool {
+ return false
+}
+
+// HasBlankPreviousLines implements Node.HasBlankPreviousLines.
+func (b *BaseBlock) HasBlankPreviousLines() bool {
+ return b.blankPreviousLines
+}
+
+// SetBlankPreviousLines implements Node.SetBlankPreviousLines.
+func (b *BaseBlock) SetBlankPreviousLines(v bool) {
+ b.blankPreviousLines = v
+}
+
+// Lines implements Node.Lines
+func (b *BaseBlock) Lines() *textm.Segments {
+ if b.lines == nil {
+ b.lines = textm.NewSegments()
+ }
+ return b.lines
+}
+
+// SetLines implements Node.SetLines
+func (b *BaseBlock) SetLines(v *textm.Segments) {
+ b.lines = v
+}
+
+// A Document struct is a root node of Markdown text.
+type Document struct {
+ BaseBlock
+}
+
+// KindDocument is a NodeKind of the Document node.
+var KindDocument = NewNodeKind("Document")
+
+// Dump implements Node.Dump .
+func (n *Document) Dump(source []byte, level int) {
+ DumpHelper(n, source, level, nil, nil)
+}
+
+// Type implements Node.Type .
+func (n *Document) Type() NodeType {
+ return TypeDocument
+}
+
+// Kind implements Node.Kind.
+func (n *Document) Kind() NodeKind {
+ return KindDocument
+}
+
+// NewDocument returns a new Document node.
+func NewDocument() *Document {
+ return &Document{
+ BaseBlock: BaseBlock{},
+ }
+}
+
+// A TextBlock struct is a node whose lines
+// should be rendered without any containers.
+type TextBlock struct {
+ BaseBlock
+}
+
+// Dump implements Node.Dump .
+func (n *TextBlock) Dump(source []byte, level int) {
+ DumpHelper(n, source, level, nil, nil)
+}
+
+// KindTextBlock is a NodeKind of the TextBlock node.
+var KindTextBlock = NewNodeKind("TextBlock")
+
+// Kind implements Node.Kind.
+func (n *TextBlock) Kind() NodeKind {
+ return KindTextBlock
+}
+
+// NewTextBlock returns a new TextBlock node.
+func NewTextBlock() *TextBlock {
+ return &TextBlock{
+ BaseBlock: BaseBlock{},
+ }
+}
+
+// A Paragraph struct represents a paragraph of Markdown text.
+type Paragraph struct {
+ BaseBlock
+}
+
+// Dump implements Node.Dump .
+func (n *Paragraph) Dump(source []byte, level int) {
+ DumpHelper(n, source, level, nil, nil)
+}
+
+// KindParagraph is a NodeKind of the Paragraph node.
+var KindParagraph = NewNodeKind("Paragraph")
+
+// Kind implements Node.Kind.
+func (n *Paragraph) Kind() NodeKind {
+ return KindParagraph
+}
+
+// NewParagraph returns a new Paragraph node.
+func NewParagraph() *Paragraph {
+ return &Paragraph{
+ BaseBlock: BaseBlock{},
+ }
+}
+
+// IsParagraph returns true if the given node implements the Paragraph interface,
+// otherwise false.
+func IsParagraph(node Node) bool {
+ _, ok := node.(*Paragraph)
+ return ok
+}
+
+// A Heading struct represents headings like SetextHeading and ATXHeading.
+type Heading struct {
+ BaseBlock
+ // Level returns a level of this heading.
+ // This value is between 1 and 6.
+ Level int
+}
+
+// Dump implements Node.Dump .
+func (n *Heading) Dump(source []byte, level int) {
+ m := map[string]string{
+ "Level": fmt.Sprintf("%d", n.Level),
+ }
+ DumpHelper(n, source, level, m, nil)
+}
+
+// KindHeading is a NodeKind of the Heading node.
+var KindHeading = NewNodeKind("Heading")
+
+// Kind implements Node.Kind.
+func (n *Heading) Kind() NodeKind {
+ return KindHeading
+}
+
+// NewHeading returns a new Heading node.
+func NewHeading(level int) *Heading {
+ return &Heading{
+ BaseBlock: BaseBlock{},
+ Level: level,
+ }
+}
+
+// A ThematicBreak struct represents a thematic break of Markdown text.
+type ThematicBreak struct {
+ BaseBlock
+}
+
+// Dump implements Node.Dump .
+func (n *ThematicBreak) Dump(source []byte, level int) {
+ DumpHelper(n, source, level, nil, nil)
+}
+
+// KindThematicBreak is a NodeKind of the ThematicBreak node.
+var KindThematicBreak = NewNodeKind("ThematicBreak")
+
+// Kind implements Node.Kind.
+func (n *ThematicBreak) Kind() NodeKind {
+ return KindThematicBreak
+}
+
+// NewThematicBreak returns a new ThematicBreak node.
+func NewThematicBreak() *ThematicBreak {
+ return &ThematicBreak{
+ BaseBlock: BaseBlock{},
+ }
+}
+
+// A CodeBlock interface represents an indented code block of Markdown text.
+type CodeBlock struct {
+ BaseBlock
+}
+
+// IsRaw implements Node.IsRaw.
+func (n *CodeBlock) IsRaw() bool {
+ return true
+}
+
+// Dump implements Node.Dump .
+func (n *CodeBlock) Dump(source []byte, level int) {
+ DumpHelper(n, source, level, nil, nil)
+}
+
+// KindCodeBlock is a NodeKind of the CodeBlock node.
+var KindCodeBlock = NewNodeKind("CodeBlock")
+
+// Kind implements Node.Kind.
+func (n *CodeBlock) Kind() NodeKind {
+ return KindCodeBlock
+}
+
+// NewCodeBlock returns a new CodeBlock node.
+func NewCodeBlock() *CodeBlock {
+ return &CodeBlock{
+ BaseBlock: BaseBlock{},
+ }
+}
+
+// A FencedCodeBlock struct represents a fenced code block of Markdown text.
+type FencedCodeBlock struct {
+ BaseBlock
+ // Info returns a info text of this fenced code block.
+ Info *Text
+
+ language []byte
+}
+
+// Language returns an language in an info string.
+// Language returns nil if this node does not have an info string.
+func (n *FencedCodeBlock) Language(source []byte) []byte {
+ if n.language == nil && n.Info != nil {
+ segment := n.Info.Segment
+ info := segment.Value(source)
+ i := 0
+ for ; i < len(info); i++ {
+ if info[i] == ' ' {
+ break
+ }
+ }
+ n.language = info[:i]
+ }
+ return n.language
+}
+
+// IsRaw implements Node.IsRaw.
+func (n *FencedCodeBlock) IsRaw() bool {
+ return true
+}
+
+// Dump implements Node.Dump .
+func (n *FencedCodeBlock) Dump(source []byte, level int) {
+ m := map[string]string{}
+ if n.Info != nil {
+ m["Info"] = fmt.Sprintf("\"%s\"", n.Info.Text(source))
+ }
+ DumpHelper(n, source, level, m, nil)
+}
+
+// KindFencedCodeBlock is a NodeKind of the FencedCodeBlock node.
+var KindFencedCodeBlock = NewNodeKind("FencedCodeBlock")
+
+// Kind implements Node.Kind.
+func (n *FencedCodeBlock) Kind() NodeKind {
+ return KindFencedCodeBlock
+}
+
+// NewFencedCodeBlock return a new FencedCodeBlock node.
+func NewFencedCodeBlock(info *Text) *FencedCodeBlock {
+ return &FencedCodeBlock{
+ BaseBlock: BaseBlock{},
+ Info: info,
+ }
+}
+
+// A Blockquote struct represents an blockquote block of Markdown text.
+type Blockquote struct {
+ BaseBlock
+}
+
+// Dump implements Node.Dump .
+func (n *Blockquote) Dump(source []byte, level int) {
+ DumpHelper(n, source, level, nil, nil)
+}
+
+// KindBlockquote is a NodeKind of the Blockquote node.
+var KindBlockquote = NewNodeKind("Blockquote")
+
+// Kind implements Node.Kind.
+func (n *Blockquote) Kind() NodeKind {
+ return KindBlockquote
+}
+
+// NewBlockquote returns a new Blockquote node.
+func NewBlockquote() *Blockquote {
+ return &Blockquote{
+ BaseBlock: BaseBlock{},
+ }
+}
+
+// A List structr represents a list of Markdown text.
+type List struct {
+ BaseBlock
+
+ // Marker is a markar character like '-', '+', ')' and '.'.
+ Marker byte
+
+ // IsTight is a true if this list is a 'tight' list.
+ // See https://spec.commonmark.org/0.29/#loose for details.
+ IsTight bool
+
+ // Start is an initial number of this ordered list.
+ // If this list is not an ordered list, Start is 0.
+ Start int
+}
+
+// IsOrdered returns true if this list is an ordered list, otherwise false.
+func (l *List) IsOrdered() bool {
+ return l.Marker == '.' || l.Marker == ')'
+}
+
+// CanContinue returns true if this list can continue with
+// the given mark and a list type, otherwise false.
+func (l *List) CanContinue(marker byte, isOrdered bool) bool {
+ return marker == l.Marker && isOrdered == l.IsOrdered()
+}
+
+// Dump implements Node.Dump.
+func (l *List) Dump(source []byte, level int) {
+ m := map[string]string{
+ "Ordered": fmt.Sprintf("%v", l.IsOrdered()),
+ "Marker": fmt.Sprintf("%c", l.Marker),
+ "Tight": fmt.Sprintf("%v", l.IsTight),
+ }
+ if l.IsOrdered() {
+ m["Start"] = fmt.Sprintf("%d", l.Start)
+ }
+ DumpHelper(l, source, level, m, nil)
+}
+
+// KindList is a NodeKind of the List node.
+var KindList = NewNodeKind("List")
+
+// Kind implements Node.Kind.
+func (l *List) Kind() NodeKind {
+ return KindList
+}
+
+// NewList returns a new List node.
+func NewList(marker byte) *List {
+ return &List{
+ BaseBlock: BaseBlock{},
+ Marker: marker,
+ IsTight: true,
+ }
+}
+
+// A ListItem struct represents a list item of Markdown text.
+type ListItem struct {
+ BaseBlock
+
+ // Offset is an offset potision of this item.
+ Offset int
+}
+
+// Dump implements Node.Dump.
+func (n *ListItem) Dump(source []byte, level int) {
+ m := map[string]string{
+ "Offset": fmt.Sprintf("%d", n.Offset),
+ }
+ DumpHelper(n, source, level, m, nil)
+}
+
+// KindListItem is a NodeKind of the ListItem node.
+var KindListItem = NewNodeKind("ListItem")
+
+// Kind implements Node.Kind.
+func (n *ListItem) Kind() NodeKind {
+ return KindListItem
+}
+
+// NewListItem returns a new ListItem node.
+func NewListItem(offset int) *ListItem {
+ return &ListItem{
+ BaseBlock: BaseBlock{},
+ Offset: offset,
+ }
+}
+
+// HTMLBlockType represents kinds of an html blocks.
+// See https://spec.commonmark.org/0.29/#html-blocks
+type HTMLBlockType int
+
+const (
+ // HTMLBlockType1 represents type 1 html blocks
+ HTMLBlockType1 HTMLBlockType = iota + 1
+ // HTMLBlockType2 represents type 2 html blocks
+ HTMLBlockType2
+ // HTMLBlockType3 represents type 3 html blocks
+ HTMLBlockType3
+ // HTMLBlockType4 represents type 4 html blocks
+ HTMLBlockType4
+ // HTMLBlockType5 represents type 5 html blocks
+ HTMLBlockType5
+ // HTMLBlockType6 represents type 6 html blocks
+ HTMLBlockType6
+ // HTMLBlockType7 represents type 7 html blocks
+ HTMLBlockType7
+)
+
+// An HTMLBlock struct represents an html block of Markdown text.
+type HTMLBlock struct {
+ BaseBlock
+
+ // Type is a type of this html block.
+ HTMLBlockType HTMLBlockType
+
+ // ClosureLine is a line that closes this html block.
+ ClosureLine textm.Segment
+}
+
+// IsRaw implements Node.IsRaw.
+func (n *HTMLBlock) IsRaw() bool {
+ return true
+}
+
+// HasClosure returns true if this html block has a closure line,
+// otherwise false.
+func (n *HTMLBlock) HasClosure() bool {
+ return n.ClosureLine.Start >= 0
+}
+
+// Dump implements Node.Dump.
+func (n *HTMLBlock) Dump(source []byte, level int) {
+ indent := strings.Repeat(" ", level)
+ fmt.Printf("%s%s {\n", indent, "HTMLBlock")
+ indent2 := strings.Repeat(" ", level+1)
+ fmt.Printf("%sRawText: \"", indent2)
+ for i := 0; i < n.Lines().Len(); i++ {
+ s := n.Lines().At(i)
+ fmt.Print(string(source[s.Start:s.Stop]))
+ }
+ fmt.Printf("\"\n")
+ for c := n.FirstChild(); c != nil; c = c.NextSibling() {
+ c.Dump(source, level+1)
+ }
+ if n.HasClosure() {
+ cl := n.ClosureLine
+ fmt.Printf("%sClosure: \"%s\"\n", indent2, string(cl.Value(source)))
+ }
+ fmt.Printf("%s}\n", indent)
+}
+
+// KindHTMLBlock is a NodeKind of the HTMLBlock node.
+var KindHTMLBlock = NewNodeKind("HTMLBlock")
+
+// Kind implements Node.Kind.
+func (n *HTMLBlock) Kind() NodeKind {
+ return KindHTMLBlock
+}
+
+// NewHTMLBlock returns a new HTMLBlock node.
+func NewHTMLBlock(typ HTMLBlockType) *HTMLBlock {
+ return &HTMLBlock{
+ BaseBlock: BaseBlock{},
+ HTMLBlockType: typ,
+ ClosureLine: textm.NewSegment(-1, -1),
+ }
+}
--- /dev/null
+package ast
+
+import (
+ "fmt"
+ "strings"
+
+ textm "github.com/yuin/goldmark/text"
+ "github.com/yuin/goldmark/util"
+)
+
+// A BaseInline struct implements the Node interface.
+type BaseInline struct {
+ BaseNode
+}
+
+// Type implements Node.Type
+func (b *BaseInline) Type() NodeType {
+ return TypeInline
+}
+
+// IsRaw implements Node.IsRaw
+func (b *BaseInline) IsRaw() bool {
+ return false
+}
+
+// HasBlankPreviousLines implements Node.HasBlankPreviousLines.
+func (b *BaseInline) HasBlankPreviousLines() bool {
+ panic("can not call with inline nodes.")
+}
+
+// SetBlankPreviousLines implements Node.SetBlankPreviousLines.
+func (b *BaseInline) SetBlankPreviousLines(v bool) {
+ panic("can not call with inline nodes.")
+}
+
+// Lines implements Node.Lines
+func (b *BaseInline) Lines() *textm.Segments {
+ panic("can not call with inline nodes.")
+}
+
+// SetLines implements Node.SetLines
+func (b *BaseInline) SetLines(v *textm.Segments) {
+ panic("can not call with inline nodes.")
+}
+
+// A Text struct represents a textual content of the Markdown text.
+type Text struct {
+ BaseInline
+ // Segment is a position in a source text.
+ Segment textm.Segment
+
+ flags uint8
+}
+
+const (
+ textSoftLineBreak = 1 << iota
+ textHardLineBreak
+ textRaw
+ textCode
+)
+
+func textFlagsString(flags uint8) string {
+ buf := []string{}
+ if flags&textSoftLineBreak != 0 {
+ buf = append(buf, "SoftLineBreak")
+ }
+ if flags&textHardLineBreak != 0 {
+ buf = append(buf, "HardLineBreak")
+ }
+ if flags&textRaw != 0 {
+ buf = append(buf, "Raw")
+ }
+ if flags&textCode != 0 {
+ buf = append(buf, "Code")
+ }
+ return strings.Join(buf, ", ")
+}
+
+// Inline implements Inline.Inline.
+func (n *Text) Inline() {
+}
+
+// SoftLineBreak returns true if this node ends with a new line,
+// otherwise false.
+func (n *Text) SoftLineBreak() bool {
+ return n.flags&textSoftLineBreak != 0
+}
+
+// SetSoftLineBreak sets whether this node ends with a new line.
+func (n *Text) SetSoftLineBreak(v bool) {
+ if v {
+ n.flags |= textSoftLineBreak
+ } else {
+ n.flags = n.flags &^ textHardLineBreak
+ }
+}
+
+// IsRaw returns true if this text should be rendered without unescaping
+// back slash escapes and resolving references.
+func (n *Text) IsRaw() bool {
+ return n.flags&textRaw != 0
+}
+
+// SetRaw sets whether this text should be rendered as raw contents.
+func (n *Text) SetRaw(v bool) {
+ if v {
+ n.flags |= textRaw
+ } else {
+ n.flags = n.flags &^ textRaw
+ }
+}
+
+// HardLineBreak returns true if this node ends with a hard line break.
+// See https://spec.commonmark.org/0.29/#hard-line-breaks for details.
+func (n *Text) HardLineBreak() bool {
+ return n.flags&textHardLineBreak != 0
+}
+
+// SetHardLineBreak sets whether this node ends with a hard line break.
+func (n *Text) SetHardLineBreak(v bool) {
+ if v {
+ n.flags |= textHardLineBreak
+ } else {
+ n.flags = n.flags &^ textHardLineBreak
+ }
+}
+
+// Merge merges a Node n into this node.
+// Merge returns true if the given node has been merged, otherwise false.
+func (n *Text) Merge(node Node, source []byte) bool {
+ t, ok := node.(*Text)
+ if !ok {
+ return false
+ }
+ if n.Segment.Stop != t.Segment.Start || t.Segment.Padding != 0 || source[n.Segment.Stop-1] == '\n' || t.IsRaw() != n.IsRaw() {
+ return false
+ }
+ n.Segment.Stop = t.Segment.Stop
+ n.SetSoftLineBreak(t.SoftLineBreak())
+ n.SetHardLineBreak(t.HardLineBreak())
+ return true
+}
+
+// Text implements Node.Text.
+func (n *Text) Text(source []byte) []byte {
+ return n.Segment.Value(source)
+}
+
+// Dump implements Node.Dump.
+func (n *Text) Dump(source []byte, level int) {
+ fs := textFlagsString(n.flags)
+ if len(fs) != 0 {
+ fs = "(" + fs + ")"
+ }
+ fmt.Printf("%sText%s: \"%s\"\n", strings.Repeat(" ", level), fs, strings.TrimRight(string(n.Text(source)), "\n"))
+}
+
+// KindText is a NodeKind of the Text node.
+var KindText = NewNodeKind("Text")
+
+// Kind implements Node.Kind.
+func (n *Text) Kind() NodeKind {
+ return KindText
+}
+
+// NewText returns a new Text node.
+func NewText() *Text {
+ return &Text{
+ BaseInline: BaseInline{},
+ }
+}
+
+// NewTextSegment returns a new Text node with the given source potision.
+func NewTextSegment(v textm.Segment) *Text {
+ return &Text{
+ BaseInline: BaseInline{},
+ Segment: v,
+ }
+}
+
+// NewRawTextSegment returns a new Text node with the given source position.
+// The new node should be rendered as raw contents.
+func NewRawTextSegment(v textm.Segment) *Text {
+ t := &Text{
+ BaseInline: BaseInline{},
+ Segment: v,
+ }
+ t.SetRaw(true)
+ return t
+}
+
+// MergeOrAppendTextSegment merges a given s into the last child of the parent if
+// it can be merged, otherwise creates a new Text node and appends it to after current
+// last child.
+func MergeOrAppendTextSegment(parent Node, s textm.Segment) {
+ last := parent.LastChild()
+ t, ok := last.(*Text)
+ if ok && t.Segment.Stop == s.Start && !t.SoftLineBreak() {
+ t.Segment = t.Segment.WithStop(s.Stop)
+ } else {
+ parent.AppendChild(parent, NewTextSegment(s))
+ }
+}
+
+// MergeOrReplaceTextSegment merges a given s into a previous sibling of the node n
+// if a previous sibling of the node n is *Text, otherwise replaces Node n with s.
+func MergeOrReplaceTextSegment(parent Node, n Node, s textm.Segment) {
+ prev := n.PreviousSibling()
+ if t, ok := prev.(*Text); ok && t.Segment.Stop == s.Start && !t.SoftLineBreak() {
+ t.Segment = t.Segment.WithStop(s.Stop)
+ parent.RemoveChild(parent, n)
+ } else {
+ parent.ReplaceChild(parent, n, NewTextSegment(s))
+ }
+}
+
+// A String struct is a textual content that has a concrete value
+type String struct {
+ BaseInline
+
+ Value []byte
+ flags uint8
+}
+
+// Inline implements Inline.Inline.
+func (n *String) Inline() {
+}
+
+// IsRaw returns true if this text should be rendered without unescaping
+// back slash escapes and resolving references.
+func (n *String) IsRaw() bool {
+ return n.flags&textRaw != 0
+}
+
+// SetRaw sets whether this text should be rendered as raw contents.
+func (n *String) SetRaw(v bool) {
+ if v {
+ n.flags |= textRaw
+ } else {
+ n.flags = n.flags &^ textRaw
+ }
+}
+
+// IsCode returns true if this text should be rendered without any
+// modifications.
+func (n *String) IsCode() bool {
+ return n.flags&textCode != 0
+}
+
+// SetCode sets whether this text should be rendered without any modifications.
+func (n *String) SetCode(v bool) {
+ if v {
+ n.flags |= textCode
+ } else {
+ n.flags = n.flags &^ textCode
+ }
+}
+
+// Text implements Node.Text.
+func (n *String) Text(source []byte) []byte {
+ return n.Value
+}
+
+// Dump implements Node.Dump.
+func (n *String) Dump(source []byte, level int) {
+ fs := textFlagsString(n.flags)
+ if len(fs) != 0 {
+ fs = "(" + fs + ")"
+ }
+ fmt.Printf("%sString%s: \"%s\"\n", strings.Repeat(" ", level), fs, strings.TrimRight(string(n.Value), "\n"))
+}
+
+// KindString is a NodeKind of the String node.
+var KindString = NewNodeKind("String")
+
+// Kind implements Node.Kind.
+func (n *String) Kind() NodeKind {
+ return KindString
+}
+
+// NewString returns a new String node.
+func NewString(v []byte) *String {
+ return &String{
+ Value: v,
+ }
+}
+
+// A CodeSpan struct represents a code span of Markdown text.
+type CodeSpan struct {
+ BaseInline
+}
+
+// Inline implements Inline.Inline .
+func (n *CodeSpan) Inline() {
+}
+
+// IsBlank returns true if this node consists of spaces, otherwise false.
+func (n *CodeSpan) IsBlank(source []byte) bool {
+ for c := n.FirstChild(); c != nil; c = c.NextSibling() {
+ text := c.(*Text).Segment
+ if !util.IsBlank(text.Value(source)) {
+ return false
+ }
+ }
+ return true
+}
+
+// Dump implements Node.Dump
+func (n *CodeSpan) Dump(source []byte, level int) {
+ DumpHelper(n, source, level, nil, nil)
+}
+
+// KindCodeSpan is a NodeKind of the CodeSpan node.
+var KindCodeSpan = NewNodeKind("CodeSpan")
+
+// Kind implements Node.Kind.
+func (n *CodeSpan) Kind() NodeKind {
+ return KindCodeSpan
+}
+
+// NewCodeSpan returns a new CodeSpan node.
+func NewCodeSpan() *CodeSpan {
+ return &CodeSpan{
+ BaseInline: BaseInline{},
+ }
+}
+
+// An Emphasis struct represents an emphasis of Markdown text.
+type Emphasis struct {
+ BaseInline
+
+ // Level is a level of the emphasis.
+ Level int
+}
+
+// Dump implements Node.Dump.
+func (n *Emphasis) Dump(source []byte, level int) {
+ m := map[string]string{
+ "Level": fmt.Sprintf("%v", n.Level),
+ }
+ DumpHelper(n, source, level, m, nil)
+}
+
+// KindEmphasis is a NodeKind of the Emphasis node.
+var KindEmphasis = NewNodeKind("Emphasis")
+
+// Kind implements Node.Kind.
+func (n *Emphasis) Kind() NodeKind {
+ return KindEmphasis
+}
+
+// NewEmphasis returns a new Emphasis node with the given level.
+func NewEmphasis(level int) *Emphasis {
+ return &Emphasis{
+ BaseInline: BaseInline{},
+ Level: level,
+ }
+}
+
+type baseLink struct {
+ BaseInline
+
+ // Destination is a destination(URL) of this link.
+ Destination []byte
+
+ // Title is a title of this link.
+ Title []byte
+}
+
+// Inline implements Inline.Inline.
+func (n *baseLink) Inline() {
+}
+
+// A Link struct represents a link of the Markdown text.
+type Link struct {
+ baseLink
+}
+
+// Dump implements Node.Dump.
+func (n *Link) Dump(source []byte, level int) {
+ m := map[string]string{}
+ m["Destination"] = string(n.Destination)
+ m["Title"] = string(n.Title)
+ DumpHelper(n, source, level, m, nil)
+}
+
+// KindLink is a NodeKind of the Link node.
+var KindLink = NewNodeKind("Link")
+
+// Kind implements Node.Kind.
+func (n *Link) Kind() NodeKind {
+ return KindLink
+}
+
+// NewLink returns a new Link node.
+func NewLink() *Link {
+ c := &Link{
+ baseLink: baseLink{
+ BaseInline: BaseInline{},
+ },
+ }
+ return c
+}
+
+// An Image struct represents an image of the Markdown text.
+type Image struct {
+ baseLink
+}
+
+// Dump implements Node.Dump.
+func (n *Image) Dump(source []byte, level int) {
+ m := map[string]string{}
+ m["Destination"] = string(n.Destination)
+ m["Title"] = string(n.Title)
+ DumpHelper(n, source, level, m, nil)
+}
+
+// KindImage is a NodeKind of the Image node.
+var KindImage = NewNodeKind("Image")
+
+// Kind implements Node.Kind.
+func (n *Image) Kind() NodeKind {
+ return KindImage
+}
+
+// NewImage returns a new Image node.
+func NewImage(link *Link) *Image {
+ c := &Image{
+ baseLink: baseLink{
+ BaseInline: BaseInline{},
+ },
+ }
+ c.Destination = link.Destination
+ c.Title = link.Title
+ for n := link.FirstChild(); n != nil; {
+ next := n.NextSibling()
+ link.RemoveChild(link, n)
+ c.AppendChild(c, n)
+ n = next
+ }
+
+ return c
+}
+
+// AutoLinkType defines kind of auto links.
+type AutoLinkType int
+
+const (
+ // AutoLinkEmail indicates that an autolink is an email address.
+ AutoLinkEmail AutoLinkType = iota + 1
+ // AutoLinkURL indicates that an autolink is a generic URL.
+ AutoLinkURL
+)
+
+// An AutoLink struct represents an autolink of the Markdown text.
+type AutoLink struct {
+ BaseInline
+ // Type is a type of this autolink.
+ AutoLinkType AutoLinkType
+
+ // Protocol specified a protocol of the link.
+ Protocol []byte
+
+ value *Text
+}
+
+// Inline implements Inline.Inline.
+func (n *AutoLink) Inline() {}
+
+// Dump implenets Node.Dump
+func (n *AutoLink) Dump(source []byte, level int) {
+ segment := n.value.Segment
+ m := map[string]string{
+ "Value": string(segment.Value(source)),
+ }
+ DumpHelper(n, source, level, m, nil)
+}
+
+// KindAutoLink is a NodeKind of the AutoLink node.
+var KindAutoLink = NewNodeKind("AutoLink")
+
+// Kind implements Node.Kind.
+func (n *AutoLink) Kind() NodeKind {
+ return KindAutoLink
+}
+
+// URL returns an url of this node.
+func (n *AutoLink) URL(source []byte) []byte {
+ if n.Protocol != nil {
+ s := n.value.Segment
+ ret := make([]byte, 0, len(n.Protocol)+s.Len()+3)
+ ret = append(ret, n.Protocol...)
+ ret = append(ret, ':', '/', '/')
+ ret = append(ret, n.value.Text(source)...)
+ return ret
+ }
+ return n.value.Text(source)
+}
+
+// Label returns a label of this node.
+func (n *AutoLink) Label(source []byte) []byte {
+ return n.value.Text(source)
+}
+
+// NewAutoLink returns a new AutoLink node.
+func NewAutoLink(typ AutoLinkType, value *Text) *AutoLink {
+ return &AutoLink{
+ BaseInline: BaseInline{},
+ value: value,
+ AutoLinkType: typ,
+ }
+}
+
+// A RawHTML struct represents an inline raw HTML of the Markdown text.
+type RawHTML struct {
+ BaseInline
+ Segments *textm.Segments
+}
+
+// Inline implements Inline.Inline.
+func (n *RawHTML) Inline() {}
+
+// Dump implements Node.Dump.
+func (n *RawHTML) Dump(source []byte, level int) {
+ m := map[string]string{}
+ t := []string{}
+ for i := 0; i < n.Segments.Len(); i++ {
+ segment := n.Segments.At(i)
+ t = append(t, string(segment.Value(source)))
+ }
+ m["RawText"] = strings.Join(t, "")
+ DumpHelper(n, source, level, m, nil)
+}
+
+// KindRawHTML is a NodeKind of the RawHTML node.
+var KindRawHTML = NewNodeKind("RawHTML")
+
+// Kind implements Node.Kind.
+func (n *RawHTML) Kind() NodeKind {
+ return KindRawHTML
+}
+
+// NewRawHTML returns a new RawHTML node.
+func NewRawHTML() *RawHTML {
+ return &RawHTML{
+ Segments: textm.NewSegments(),
+ }
+}
--- /dev/null
+package ast
+
+import (
+ gast "github.com/yuin/goldmark/ast"
+)
+
+// A DefinitionList struct represents a definition list of Markdown
+// (PHPMarkdownExtra) text.
+type DefinitionList struct {
+ gast.BaseBlock
+ Offset int
+ TemporaryParagraph *gast.Paragraph
+}
+
+// Dump implements Node.Dump.
+func (n *DefinitionList) Dump(source []byte, level int) {
+ gast.DumpHelper(n, source, level, nil, nil)
+}
+
+// KindDefinitionList is a NodeKind of the DefinitionList node.
+var KindDefinitionList = gast.NewNodeKind("DefinitionList")
+
+// Kind implements Node.Kind.
+func (n *DefinitionList) Kind() gast.NodeKind {
+ return KindDefinitionList
+}
+
+// NewDefinitionList returns a new DefinitionList node.
+func NewDefinitionList(offset int, para *gast.Paragraph) *DefinitionList {
+ return &DefinitionList{
+ Offset: offset,
+ TemporaryParagraph: para,
+ }
+}
+
+// A DefinitionTerm struct represents a definition list term of Markdown
+// (PHPMarkdownExtra) text.
+type DefinitionTerm struct {
+ gast.BaseBlock
+}
+
+// Dump implements Node.Dump.
+func (n *DefinitionTerm) Dump(source []byte, level int) {
+ gast.DumpHelper(n, source, level, nil, nil)
+}
+
+// KindDefinitionTerm is a NodeKind of the DefinitionTerm node.
+var KindDefinitionTerm = gast.NewNodeKind("DefinitionTerm")
+
+// Kind implements Node.Kind.
+func (n *DefinitionTerm) Kind() gast.NodeKind {
+ return KindDefinitionTerm
+}
+
+// NewDefinitionTerm returns a new DefinitionTerm node.
+func NewDefinitionTerm() *DefinitionTerm {
+ return &DefinitionTerm{}
+}
+
+// A DefinitionDescription struct represents a definition list description of Markdown
+// (PHPMarkdownExtra) text.
+type DefinitionDescription struct {
+ gast.BaseBlock
+ IsTight bool
+}
+
+// Dump implements Node.Dump.
+func (n *DefinitionDescription) Dump(source []byte, level int) {
+ gast.DumpHelper(n, source, level, nil, nil)
+}
+
+// KindDefinitionDescription is a NodeKind of the DefinitionDescription node.
+var KindDefinitionDescription = gast.NewNodeKind("DefinitionDescription")
+
+// Kind implements Node.Kind.
+func (n *DefinitionDescription) Kind() gast.NodeKind {
+ return KindDefinitionDescription
+}
+
+// NewDefinitionDescription returns a new DefinitionDescription node.
+func NewDefinitionDescription() *DefinitionDescription {
+ return &DefinitionDescription{}
+}
--- /dev/null
+package ast
+
+import (
+ "fmt"
+ gast "github.com/yuin/goldmark/ast"
+)
+
+// A FootnoteLink struct represents a link to a footnote of Markdown
+// (PHP Markdown Extra) text.
+type FootnoteLink struct {
+ gast.BaseInline
+ Index int
+}
+
+// Dump implements Node.Dump.
+func (n *FootnoteLink) Dump(source []byte, level int) {
+ m := map[string]string{}
+ m["Index"] = fmt.Sprintf("%v", n.Index)
+ gast.DumpHelper(n, source, level, m, nil)
+}
+
+// KindFootnoteLink is a NodeKind of the FootnoteLink node.
+var KindFootnoteLink = gast.NewNodeKind("FootnoteLink")
+
+// Kind implements Node.Kind.
+func (n *FootnoteLink) Kind() gast.NodeKind {
+ return KindFootnoteLink
+}
+
+// NewFootnoteLink returns a new FootnoteLink node.
+func NewFootnoteLink(index int) *FootnoteLink {
+ return &FootnoteLink{
+ Index: index,
+ }
+}
+
+// A FootnoteBackLink struct represents a link to a footnote of Markdown
+// (PHP Markdown Extra) text.
+type FootnoteBackLink struct {
+ gast.BaseInline
+ Index int
+}
+
+// Dump implements Node.Dump.
+func (n *FootnoteBackLink) Dump(source []byte, level int) {
+ m := map[string]string{}
+ m["Index"] = fmt.Sprintf("%v", n.Index)
+ gast.DumpHelper(n, source, level, m, nil)
+}
+
+// KindFootnoteBackLink is a NodeKind of the FootnoteBackLink node.
+var KindFootnoteBackLink = gast.NewNodeKind("FootnoteBackLink")
+
+// Kind implements Node.Kind.
+func (n *FootnoteBackLink) Kind() gast.NodeKind {
+ return KindFootnoteBackLink
+}
+
+// NewFootnoteBackLink returns a new FootnoteBackLink node.
+func NewFootnoteBackLink(index int) *FootnoteBackLink {
+ return &FootnoteBackLink{
+ Index: index,
+ }
+}
+
+// A Footnote struct represents a footnote of Markdown
+// (PHP Markdown Extra) text.
+type Footnote struct {
+ gast.BaseBlock
+ Ref []byte
+ Index int
+}
+
+// Dump implements Node.Dump.
+func (n *Footnote) Dump(source []byte, level int) {
+ m := map[string]string{}
+ m["Index"] = fmt.Sprintf("%v", n.Index)
+ m["Ref"] = fmt.Sprintf("%s", n.Ref)
+ gast.DumpHelper(n, source, level, m, nil)
+}
+
+// KindFootnote is a NodeKind of the Footnote node.
+var KindFootnote = gast.NewNodeKind("Footnote")
+
+// Kind implements Node.Kind.
+func (n *Footnote) Kind() gast.NodeKind {
+ return KindFootnote
+}
+
+// NewFootnote returns a new Footnote node.
+func NewFootnote(ref []byte) *Footnote {
+ return &Footnote{
+ Ref: ref,
+ Index: -1,
+ }
+}
+
+// A FootnoteList struct represents footnotes of Markdown
+// (PHP Markdown Extra) text.
+type FootnoteList struct {
+ gast.BaseBlock
+ Count int
+}
+
+// Dump implements Node.Dump.
+func (n *FootnoteList) Dump(source []byte, level int) {
+ m := map[string]string{}
+ m["Count"] = fmt.Sprintf("%v", n.Count)
+ gast.DumpHelper(n, source, level, m, nil)
+}
+
+// KindFootnoteList is a NodeKind of the FootnoteList node.
+var KindFootnoteList = gast.NewNodeKind("FootnoteList")
+
+// Kind implements Node.Kind.
+func (n *FootnoteList) Kind() gast.NodeKind {
+ return KindFootnoteList
+}
+
+// NewFootnoteList returns a new FootnoteList node.
+func NewFootnoteList() *FootnoteList {
+ return &FootnoteList{
+ Count: 0,
+ }
+}
--- /dev/null
+// Package ast defines AST nodes that represents extension's elements
+package ast
+
+import (
+ gast "github.com/yuin/goldmark/ast"
+)
+
+// A Strikethrough struct represents a strikethrough of GFM text.
+type Strikethrough struct {
+ gast.BaseInline
+}
+
+// Dump implements Node.Dump.
+func (n *Strikethrough) Dump(source []byte, level int) {
+ gast.DumpHelper(n, source, level, nil, nil)
+}
+
+// KindStrikethrough is a NodeKind of the Strikethrough node.
+var KindStrikethrough = gast.NewNodeKind("Strikethrough")
+
+// Kind implements Node.Kind.
+func (n *Strikethrough) Kind() gast.NodeKind {
+ return KindStrikethrough
+}
+
+// NewStrikethrough returns a new Strikethrough node.
+func NewStrikethrough() *Strikethrough {
+ return &Strikethrough{}
+}
--- /dev/null
+package ast
+
+import (
+ "fmt"
+ gast "github.com/yuin/goldmark/ast"
+ "strings"
+)
+
+// Alignment is a text alignment of table cells.
+type Alignment int
+
+const (
+ // AlignLeft indicates text should be left justified.
+ AlignLeft Alignment = iota + 1
+
+ // AlignRight indicates text should be right justified.
+ AlignRight
+
+ // AlignCenter indicates text should be centered.
+ AlignCenter
+
+ // AlignNone indicates text should be aligned by default manner.
+ AlignNone
+)
+
+func (a Alignment) String() string {
+ switch a {
+ case AlignLeft:
+ return "left"
+ case AlignRight:
+ return "right"
+ case AlignCenter:
+ return "center"
+ case AlignNone:
+ return "none"
+ }
+ return ""
+}
+
+// A Table struct represents a table of Markdown(GFM) text.
+type Table struct {
+ gast.BaseBlock
+
+ // Alignments returns alignments of the columns.
+ Alignments []Alignment
+}
+
+// Dump implements Node.Dump
+func (n *Table) Dump(source []byte, level int) {
+ gast.DumpHelper(n, source, level, nil, func(level int) {
+ indent := strings.Repeat(" ", level)
+ fmt.Printf("%sAlignments {\n", indent)
+ for i, alignment := range n.Alignments {
+ indent2 := strings.Repeat(" ", level+1)
+ fmt.Printf("%s%s", indent2, alignment.String())
+ if i != len(n.Alignments)-1 {
+ fmt.Println("")
+ }
+ }
+ fmt.Printf("\n%s}\n", indent)
+ })
+}
+
+// KindTable is a NodeKind of the Table node.
+var KindTable = gast.NewNodeKind("Table")
+
+// Kind implements Node.Kind.
+func (n *Table) Kind() gast.NodeKind {
+ return KindTable
+}
+
+// NewTable returns a new Table node.
+func NewTable() *Table {
+ return &Table{
+ Alignments: []Alignment{},
+ }
+}
+
+// A TableRow struct represents a table row of Markdown(GFM) text.
+type TableRow struct {
+ gast.BaseBlock
+ Alignments []Alignment
+}
+
+// Dump implements Node.Dump.
+func (n *TableRow) Dump(source []byte, level int) {
+ gast.DumpHelper(n, source, level, nil, nil)
+}
+
+// KindTableRow is a NodeKind of the TableRow node.
+var KindTableRow = gast.NewNodeKind("TableRow")
+
+// Kind implements Node.Kind.
+func (n *TableRow) Kind() gast.NodeKind {
+ return KindTableRow
+}
+
+// NewTableRow returns a new TableRow node.
+func NewTableRow(alignments []Alignment) *TableRow {
+ return &TableRow{}
+}
+
+// A TableHeader struct represents a table header of Markdown(GFM) text.
+type TableHeader struct {
+ gast.BaseBlock
+ Alignments []Alignment
+}
+
+// KindTableHeader is a NodeKind of the TableHeader node.
+var KindTableHeader = gast.NewNodeKind("TableHeader")
+
+// Kind implements Node.Kind.
+func (n *TableHeader) Kind() gast.NodeKind {
+ return KindTableHeader
+}
+
+// Dump implements Node.Dump.
+func (n *TableHeader) Dump(source []byte, level int) {
+ gast.DumpHelper(n, source, level, nil, nil)
+}
+
+// NewTableHeader returns a new TableHeader node.
+func NewTableHeader(row *TableRow) *TableHeader {
+ n := &TableHeader{}
+ for c := row.FirstChild(); c != nil; {
+ next := c.NextSibling()
+ n.AppendChild(n, c)
+ c = next
+ }
+ return n
+}
+
+// A TableCell struct represents a table cell of a Markdown(GFM) text.
+type TableCell struct {
+ gast.BaseBlock
+ Alignment Alignment
+}
+
+// Dump implements Node.Dump.
+func (n *TableCell) Dump(source []byte, level int) {
+ gast.DumpHelper(n, source, level, nil, nil)
+}
+
+// KindTableCell is a NodeKind of the TableCell node.
+var KindTableCell = gast.NewNodeKind("TableCell")
+
+// Kind implements Node.Kind.
+func (n *TableCell) Kind() gast.NodeKind {
+ return KindTableCell
+}
+
+// NewTableCell returns a new TableCell node.
+func NewTableCell() *TableCell {
+ return &TableCell{
+ Alignment: AlignNone,
+ }
+}
--- /dev/null
+package ast
+
+import (
+ "fmt"
+ gast "github.com/yuin/goldmark/ast"
+)
+
+// A TaskCheckBox struct represents a checkbox of a task list.
+type TaskCheckBox struct {
+ gast.BaseInline
+ IsChecked bool
+}
+
+// Dump impelemtns Node.Dump.
+func (n *TaskCheckBox) Dump(source []byte, level int) {
+ m := map[string]string{
+ "Checked": fmt.Sprintf("%v", n.IsChecked),
+ }
+ gast.DumpHelper(n, source, level, m, nil)
+}
+
+// KindTaskCheckBox is a NodeKind of the TaskCheckBox node.
+var KindTaskCheckBox = gast.NewNodeKind("TaskCheckBox")
+
+// Kind implements Node.Kind.
+func (n *TaskCheckBox) Kind() gast.NodeKind {
+ return KindTaskCheckBox
+}
+
+// NewTaskCheckBox returns a new TaskCheckBox node.
+func NewTaskCheckBox(checked bool) *TaskCheckBox {
+ return &TaskCheckBox{
+ IsChecked: checked,
+ }
+}
--- /dev/null
+package extension
+
+import (
+ "github.com/yuin/goldmark"
+ gast "github.com/yuin/goldmark/ast"
+ "github.com/yuin/goldmark/extension/ast"
+ "github.com/yuin/goldmark/parser"
+ "github.com/yuin/goldmark/renderer"
+ "github.com/yuin/goldmark/renderer/html"
+ "github.com/yuin/goldmark/text"
+ "github.com/yuin/goldmark/util"
+)
+
+type definitionListParser struct {
+}
+
+var defaultDefinitionListParser = &definitionListParser{}
+
+// NewDefinitionListParser return a new parser.BlockParser that
+// can parse PHP Markdown Extra Definition lists.
+func NewDefinitionListParser() parser.BlockParser {
+ return defaultDefinitionListParser
+}
+
+func (b *definitionListParser) Trigger() []byte {
+ return []byte{':'}
+}
+
+func (b *definitionListParser) Open(parent gast.Node, reader text.Reader, pc parser.Context) (gast.Node, parser.State) {
+ if _, ok := parent.(*ast.DefinitionList); ok {
+ return nil, parser.NoChildren
+ }
+ line, _ := reader.PeekLine()
+ pos := pc.BlockOffset()
+ indent := pc.BlockIndent()
+ if pos < 0 || line[pos] != ':' || indent != 0 {
+ return nil, parser.NoChildren
+ }
+
+ last := parent.LastChild()
+ // need 1 or more spaces after ':'
+ w, _ := util.IndentWidth(line[pos+1:], pos+1)
+ if w < 1 {
+ return nil, parser.NoChildren
+ }
+ if w >= 8 { // starts with indented code
+ w = 5
+ }
+ w += pos + 1 /* 1 = ':' */
+
+ para, lastIsParagraph := last.(*gast.Paragraph)
+ var list *ast.DefinitionList
+ status := parser.HasChildren
+ var ok bool
+ if lastIsParagraph {
+ list, ok = last.PreviousSibling().(*ast.DefinitionList)
+ if ok { // is not first item
+ list.Offset = w
+ list.TemporaryParagraph = para
+ } else { // is first item
+ list = ast.NewDefinitionList(w, para)
+ status |= parser.RequireParagraph
+ }
+ } else if list, ok = last.(*ast.DefinitionList); ok { // multiple description
+ list.Offset = w
+ list.TemporaryParagraph = nil
+ } else {
+ return nil, parser.NoChildren
+ }
+
+ return list, status
+}
+
+func (b *definitionListParser) Continue(node gast.Node, reader text.Reader, pc parser.Context) parser.State {
+ line, _ := reader.PeekLine()
+ if util.IsBlank(line) {
+ return parser.Continue | parser.HasChildren
+ }
+ list, _ := node.(*ast.DefinitionList)
+ w, _ := util.IndentWidth(line, reader.LineOffset())
+ if w < list.Offset {
+ return parser.Close
+ }
+ pos, padding := util.IndentPosition(line, reader.LineOffset(), list.Offset)
+ reader.AdvanceAndSetPadding(pos, padding)
+ return parser.Continue | parser.HasChildren
+}
+
+func (b *definitionListParser) Close(node gast.Node, reader text.Reader, pc parser.Context) {
+ // nothing to do
+}
+
+func (b *definitionListParser) CanInterruptParagraph() bool {
+ return true
+}
+
+func (b *definitionListParser) CanAcceptIndentedLine() bool {
+ return false
+}
+
+type definitionDescriptionParser struct {
+}
+
+var defaultDefinitionDescriptionParser = &definitionDescriptionParser{}
+
+// NewDefinitionDescriptionParser return a new parser.BlockParser that
+// can parse definition description starts with ':'.
+func NewDefinitionDescriptionParser() parser.BlockParser {
+ return defaultDefinitionDescriptionParser
+}
+
+func (b *definitionDescriptionParser) Trigger() []byte {
+ return []byte{':'}
+}
+
+func (b *definitionDescriptionParser) Open(parent gast.Node, reader text.Reader, pc parser.Context) (gast.Node, parser.State) {
+ line, _ := reader.PeekLine()
+ pos := pc.BlockOffset()
+ indent := pc.BlockIndent()
+ if pos < 0 || line[pos] != ':' || indent != 0 {
+ return nil, parser.NoChildren
+ }
+ list, _ := parent.(*ast.DefinitionList)
+ if list == nil {
+ return nil, parser.NoChildren
+ }
+ para := list.TemporaryParagraph
+ list.TemporaryParagraph = nil
+ if para != nil {
+ lines := para.Lines()
+ l := lines.Len()
+ for i := 0; i < l; i++ {
+ term := ast.NewDefinitionTerm()
+ segment := lines.At(i)
+ term.Lines().Append(segment.TrimRightSpace(reader.Source()))
+ list.AppendChild(list, term)
+ }
+ para.Parent().RemoveChild(para.Parent(), para)
+ }
+ cpos, padding := util.IndentPosition(line[pos+1:], pos+1, list.Offset-pos-1)
+ reader.AdvanceAndSetPadding(cpos, padding)
+
+ return ast.NewDefinitionDescription(), parser.HasChildren
+}
+
+func (b *definitionDescriptionParser) Continue(node gast.Node, reader text.Reader, pc parser.Context) parser.State {
+ // definitionListParser detects end of the description.
+ // so this method will never be called.
+ return parser.Continue | parser.HasChildren
+}
+
+func (b *definitionDescriptionParser) Close(node gast.Node, reader text.Reader, pc parser.Context) {
+ desc := node.(*ast.DefinitionDescription)
+ desc.IsTight = !desc.HasBlankPreviousLines()
+ if desc.IsTight {
+ for gc := desc.FirstChild(); gc != nil; gc = gc.NextSibling() {
+ paragraph, ok := gc.(*gast.Paragraph)
+ if ok {
+ textBlock := gast.NewTextBlock()
+ textBlock.SetLines(paragraph.Lines())
+ desc.ReplaceChild(desc, paragraph, textBlock)
+ }
+ }
+ }
+}
+
+func (b *definitionDescriptionParser) CanInterruptParagraph() bool {
+ return true
+}
+
+func (b *definitionDescriptionParser) CanAcceptIndentedLine() bool {
+ return false
+}
+
+// DefinitionListHTMLRenderer is a renderer.NodeRenderer implementation that
+// renders DefinitionList nodes.
+type DefinitionListHTMLRenderer struct {
+ html.Config
+}
+
+// NewDefinitionListHTMLRenderer returns a new DefinitionListHTMLRenderer.
+func NewDefinitionListHTMLRenderer(opts ...html.Option) renderer.NodeRenderer {
+ r := &DefinitionListHTMLRenderer{
+ Config: html.NewConfig(),
+ }
+ for _, opt := range opts {
+ opt.SetHTMLOption(&r.Config)
+ }
+ return r
+}
+
+// RegisterFuncs implements renderer.NodeRenderer.RegisterFuncs.
+func (r *DefinitionListHTMLRenderer) RegisterFuncs(reg renderer.NodeRendererFuncRegisterer) {
+ reg.Register(ast.KindDefinitionList, r.renderDefinitionList)
+ reg.Register(ast.KindDefinitionTerm, r.renderDefinitionTerm)
+ reg.Register(ast.KindDefinitionDescription, r.renderDefinitionDescription)
+}
+
+// DefinitionListAttributeFilter defines attribute names which dl elements can have.
+var DefinitionListAttributeFilter = html.GlobalAttributeFilter
+
+func (r *DefinitionListHTMLRenderer) renderDefinitionList(w util.BufWriter, source []byte, n gast.Node, entering bool) (gast.WalkStatus, error) {
+ if entering {
+ if n.Attributes() != nil {
+ _, _ = w.WriteString("<dl")
+ html.RenderAttributes(w, n, DefinitionListAttributeFilter)
+ _, _ = w.WriteString(">\n")
+ } else {
+ _, _ = w.WriteString("<dl>\n")
+ }
+ } else {
+ _, _ = w.WriteString("</dl>\n")
+ }
+ return gast.WalkContinue, nil
+}
+
+// DefinitionTermAttributeFilter defines attribute names which dd elements can have.
+var DefinitionTermAttributeFilter = html.GlobalAttributeFilter
+
+func (r *DefinitionListHTMLRenderer) renderDefinitionTerm(w util.BufWriter, source []byte, n gast.Node, entering bool) (gast.WalkStatus, error) {
+ if entering {
+ if n.Attributes() != nil {
+ _, _ = w.WriteString("<dt")
+ html.RenderAttributes(w, n, DefinitionTermAttributeFilter)
+ _ = w.WriteByte('>')
+ } else {
+ _, _ = w.WriteString("<dt>")
+ }
+ } else {
+ _, _ = w.WriteString("</dt>\n")
+ }
+ return gast.WalkContinue, nil
+}
+
+// DefinitionDescriptionAttributeFilter defines attribute names which dd elements can have.
+var DefinitionDescriptionAttributeFilter = html.GlobalAttributeFilter
+
+func (r *DefinitionListHTMLRenderer) renderDefinitionDescription(w util.BufWriter, source []byte, node gast.Node, entering bool) (gast.WalkStatus, error) {
+ if entering {
+ n := node.(*ast.DefinitionDescription)
+ _, _ = w.WriteString("<dd")
+ if n.Attributes() != nil {
+ html.RenderAttributes(w, n, DefinitionDescriptionAttributeFilter)
+ }
+ if n.IsTight {
+ _, _ = w.WriteString(">")
+ } else {
+ _, _ = w.WriteString(">\n")
+ }
+ } else {
+ _, _ = w.WriteString("</dd>\n")
+ }
+ return gast.WalkContinue, nil
+}
+
+type definitionList struct {
+}
+
+// DefinitionList is an extension that allow you to use PHP Markdown Extra Definition lists.
+var DefinitionList = &definitionList{}
+
+func (e *definitionList) Extend(m goldmark.Markdown) {
+ m.Parser().AddOptions(parser.WithBlockParsers(
+ util.Prioritized(NewDefinitionListParser(), 101),
+ util.Prioritized(NewDefinitionDescriptionParser(), 102),
+ ))
+ m.Renderer().AddOptions(renderer.WithNodeRenderers(
+ util.Prioritized(NewDefinitionListHTMLRenderer(), 500),
+ ))
+}
--- /dev/null
+package extension
+
+import (
+ "bytes"
+ "github.com/yuin/goldmark"
+ gast "github.com/yuin/goldmark/ast"
+ "github.com/yuin/goldmark/extension/ast"
+ "github.com/yuin/goldmark/parser"
+ "github.com/yuin/goldmark/renderer"
+ "github.com/yuin/goldmark/renderer/html"
+ "github.com/yuin/goldmark/text"
+ "github.com/yuin/goldmark/util"
+ "strconv"
+)
+
+var footnoteListKey = parser.NewContextKey()
+
+type footnoteBlockParser struct {
+}
+
+var defaultFootnoteBlockParser = &footnoteBlockParser{}
+
+// NewFootnoteBlockParser returns a new parser.BlockParser that can parse
+// footnotes of the Markdown(PHP Markdown Extra) text.
+func NewFootnoteBlockParser() parser.BlockParser {
+ return defaultFootnoteBlockParser
+}
+
+func (b *footnoteBlockParser) Trigger() []byte {
+ return []byte{'['}
+}
+
+func (b *footnoteBlockParser) Open(parent gast.Node, reader text.Reader, pc parser.Context) (gast.Node, parser.State) {
+ line, segment := reader.PeekLine()
+ pos := pc.BlockOffset()
+ if pos < 0 || line[pos] != '[' {
+ return nil, parser.NoChildren
+ }
+ pos++
+ if pos > len(line)-1 || line[pos] != '^' {
+ return nil, parser.NoChildren
+ }
+ open := pos + 1
+ closes := 0
+ closure := util.FindClosure(line[pos+1:], '[', ']', false, false)
+ closes = pos + 1 + closure
+ next := closes + 1
+ if closure > -1 {
+ if next >= len(line) || line[next] != ':' {
+ return nil, parser.NoChildren
+ }
+ } else {
+ return nil, parser.NoChildren
+ }
+ padding := segment.Padding
+ label := reader.Value(text.NewSegment(segment.Start+open-padding, segment.Start+closes-padding))
+ if util.IsBlank(label) {
+ return nil, parser.NoChildren
+ }
+ item := ast.NewFootnote(label)
+
+ pos = next + 1 - padding
+ if pos >= len(line) {
+ reader.Advance(pos)
+ return item, parser.NoChildren
+ }
+ reader.AdvanceAndSetPadding(pos, padding)
+ return item, parser.HasChildren
+}
+
+func (b *footnoteBlockParser) Continue(node gast.Node, reader text.Reader, pc parser.Context) parser.State {
+ line, _ := reader.PeekLine()
+ if util.IsBlank(line) {
+ return parser.Continue | parser.HasChildren
+ }
+ childpos, padding := util.IndentPosition(line, reader.LineOffset(), 4)
+ if childpos < 0 {
+ return parser.Close
+ }
+ reader.AdvanceAndSetPadding(childpos, padding)
+ return parser.Continue | parser.HasChildren
+}
+
+func (b *footnoteBlockParser) Close(node gast.Node, reader text.Reader, pc parser.Context) {
+ var list *ast.FootnoteList
+ if tlist := pc.Get(footnoteListKey); tlist != nil {
+ list = tlist.(*ast.FootnoteList)
+ } else {
+ list = ast.NewFootnoteList()
+ pc.Set(footnoteListKey, list)
+ node.Parent().InsertBefore(node.Parent(), node, list)
+ }
+ node.Parent().RemoveChild(node.Parent(), node)
+ list.AppendChild(list, node)
+}
+
+func (b *footnoteBlockParser) CanInterruptParagraph() bool {
+ return true
+}
+
+func (b *footnoteBlockParser) CanAcceptIndentedLine() bool {
+ return false
+}
+
+type footnoteParser struct {
+}
+
+var defaultFootnoteParser = &footnoteParser{}
+
+// NewFootnoteParser returns a new parser.InlineParser that can parse
+// footnote links of the Markdown(PHP Markdown Extra) text.
+func NewFootnoteParser() parser.InlineParser {
+ return defaultFootnoteParser
+}
+
+func (s *footnoteParser) Trigger() []byte {
+ // footnote syntax probably conflict with the image syntax.
+ // So we need trigger this parser with '!'.
+ return []byte{'!', '['}
+}
+
+func (s *footnoteParser) Parse(parent gast.Node, block text.Reader, pc parser.Context) gast.Node {
+ line, segment := block.PeekLine()
+ pos := 1
+ if len(line) > 0 && line[0] == '!' {
+ pos++
+ }
+ if pos >= len(line) || line[pos] != '^' {
+ return nil
+ }
+ pos++
+ if pos >= len(line) {
+ return nil
+ }
+ open := pos
+ closure := util.FindClosure(line[pos:], '[', ']', false, false)
+ if closure < 0 {
+ return nil
+ }
+ closes := pos + closure
+ value := block.Value(text.NewSegment(segment.Start+open, segment.Start+closes))
+ block.Advance(closes + 1)
+
+ var list *ast.FootnoteList
+ if tlist := pc.Get(footnoteListKey); tlist != nil {
+ list = tlist.(*ast.FootnoteList)
+ }
+ if list == nil {
+ return nil
+ }
+ index := 0
+ for def := list.FirstChild(); def != nil; def = def.NextSibling() {
+ d := def.(*ast.Footnote)
+ if bytes.Equal(d.Ref, value) {
+ if d.Index < 0 {
+ list.Count += 1
+ d.Index = list.Count
+ }
+ index = d.Index
+ break
+ }
+ }
+ if index == 0 {
+ return nil
+ }
+
+ return ast.NewFootnoteLink(index)
+}
+
+type footnoteASTTransformer struct {
+}
+
+var defaultFootnoteASTTransformer = &footnoteASTTransformer{}
+
+// NewFootnoteASTTransformer returns a new parser.ASTTransformer that
+// insert a footnote list to the last of the document.
+func NewFootnoteASTTransformer() parser.ASTTransformer {
+ return defaultFootnoteASTTransformer
+}
+
+func (a *footnoteASTTransformer) Transform(node *gast.Document, reader text.Reader, pc parser.Context) {
+ var list *ast.FootnoteList
+ if tlist := pc.Get(footnoteListKey); tlist != nil {
+ list = tlist.(*ast.FootnoteList)
+ } else {
+ return
+ }
+ pc.Set(footnoteListKey, nil)
+ for footnote := list.FirstChild(); footnote != nil; {
+ var container gast.Node = footnote
+ next := footnote.NextSibling()
+ if fc := container.LastChild(); fc != nil && gast.IsParagraph(fc) {
+ container = fc
+ }
+ index := footnote.(*ast.Footnote).Index
+ if index < 0 {
+ list.RemoveChild(list, footnote)
+ } else {
+ container.AppendChild(container, ast.NewFootnoteBackLink(index))
+ }
+ footnote = next
+ }
+ list.SortChildren(func(n1, n2 gast.Node) int {
+ if n1.(*ast.Footnote).Index < n2.(*ast.Footnote).Index {
+ return -1
+ }
+ return 1
+ })
+ if list.Count <= 0 {
+ list.Parent().RemoveChild(list.Parent(), list)
+ return
+ }
+
+ node.AppendChild(node, list)
+}
+
+// FootnoteHTMLRenderer is a renderer.NodeRenderer implementation that
+// renders FootnoteLink nodes.
+type FootnoteHTMLRenderer struct {
+ html.Config
+}
+
+// NewFootnoteHTMLRenderer returns a new FootnoteHTMLRenderer.
+func NewFootnoteHTMLRenderer(opts ...html.Option) renderer.NodeRenderer {
+ r := &FootnoteHTMLRenderer{
+ Config: html.NewConfig(),
+ }
+ for _, opt := range opts {
+ opt.SetHTMLOption(&r.Config)
+ }
+ return r
+}
+
+// RegisterFuncs implements renderer.NodeRenderer.RegisterFuncs.
+func (r *FootnoteHTMLRenderer) RegisterFuncs(reg renderer.NodeRendererFuncRegisterer) {
+ reg.Register(ast.KindFootnoteLink, r.renderFootnoteLink)
+ reg.Register(ast.KindFootnoteBackLink, r.renderFootnoteBackLink)
+ reg.Register(ast.KindFootnote, r.renderFootnote)
+ reg.Register(ast.KindFootnoteList, r.renderFootnoteList)
+}
+
+func (r *FootnoteHTMLRenderer) renderFootnoteLink(w util.BufWriter, source []byte, node gast.Node, entering bool) (gast.WalkStatus, error) {
+ if entering {
+ n := node.(*ast.FootnoteLink)
+ is := strconv.Itoa(n.Index)
+ _, _ = w.WriteString(`<sup id="fnref:`)
+ _, _ = w.WriteString(is)
+ _, _ = w.WriteString(`"><a href="#fn:`)
+ _, _ = w.WriteString(is)
+ _, _ = w.WriteString(`" class="footnote-ref" role="doc-noteref">`)
+ _, _ = w.WriteString(is)
+ _, _ = w.WriteString(`</a></sup>`)
+ }
+ return gast.WalkContinue, nil
+}
+
+func (r *FootnoteHTMLRenderer) renderFootnoteBackLink(w util.BufWriter, source []byte, node gast.Node, entering bool) (gast.WalkStatus, error) {
+ if entering {
+ n := node.(*ast.FootnoteBackLink)
+ is := strconv.Itoa(n.Index)
+ _, _ = w.WriteString(` <a href="#fnref:`)
+ _, _ = w.WriteString(is)
+ _, _ = w.WriteString(`" class="footnote-backref" role="doc-backlink">`)
+ _, _ = w.WriteString("↩︎")
+ _, _ = w.WriteString(`</a>`)
+ }
+ return gast.WalkContinue, nil
+}
+
+func (r *FootnoteHTMLRenderer) renderFootnote(w util.BufWriter, source []byte, node gast.Node, entering bool) (gast.WalkStatus, error) {
+ n := node.(*ast.Footnote)
+ is := strconv.Itoa(n.Index)
+ if entering {
+ _, _ = w.WriteString(`<li id="fn:`)
+ _, _ = w.WriteString(is)
+ _, _ = w.WriteString(`" role="doc-endnote"`)
+ if node.Attributes() != nil {
+ html.RenderAttributes(w, node, html.ListItemAttributeFilter)
+ }
+ _, _ = w.WriteString(">\n")
+ } else {
+ _, _ = w.WriteString("</li>\n")
+ }
+ return gast.WalkContinue, nil
+}
+
+func (r *FootnoteHTMLRenderer) renderFootnoteList(w util.BufWriter, source []byte, node gast.Node, entering bool) (gast.WalkStatus, error) {
+ tag := "section"
+ if r.Config.XHTML {
+ tag = "div"
+ }
+ if entering {
+ _, _ = w.WriteString("<")
+ _, _ = w.WriteString(tag)
+ _, _ = w.WriteString(` class="footnotes" role="doc-endnotes"`)
+ if node.Attributes() != nil {
+ html.RenderAttributes(w, node, html.GlobalAttributeFilter)
+ }
+ _ = w.WriteByte('>')
+ if r.Config.XHTML {
+ _, _ = w.WriteString("\n<hr />\n")
+ } else {
+ _, _ = w.WriteString("\n<hr>\n")
+ }
+ _, _ = w.WriteString("<ol>\n")
+ } else {
+ _, _ = w.WriteString("</ol>\n")
+ _, _ = w.WriteString("</")
+ _, _ = w.WriteString(tag)
+ _, _ = w.WriteString(">\n")
+ }
+ return gast.WalkContinue, nil
+}
+
+type footnote struct {
+}
+
+// Footnote is an extension that allow you to use PHP Markdown Extra Footnotes.
+var Footnote = &footnote{}
+
+func (e *footnote) Extend(m goldmark.Markdown) {
+ m.Parser().AddOptions(
+ parser.WithBlockParsers(
+ util.Prioritized(NewFootnoteBlockParser(), 999),
+ ),
+ parser.WithInlineParsers(
+ util.Prioritized(NewFootnoteParser(), 101),
+ ),
+ parser.WithASTTransformers(
+ util.Prioritized(NewFootnoteASTTransformer(), 999),
+ ),
+ )
+ m.Renderer().AddOptions(renderer.WithNodeRenderers(
+ util.Prioritized(NewFootnoteHTMLRenderer(), 500),
+ ))
+}
--- /dev/null
+package extension
+
+import (
+ "github.com/yuin/goldmark"
+)
+
+type gfm struct {
+}
+
+// GFM is an extension that provides Github Flavored markdown functionalities.
+var GFM = &gfm{}
+
+func (e *gfm) Extend(m goldmark.Markdown) {
+ Linkify.Extend(m)
+ Table.Extend(m)
+ Strikethrough.Extend(m)
+ TaskList.Extend(m)
+}
--- /dev/null
+package extension
+
+import (
+ "bytes"
+ "github.com/yuin/goldmark"
+ "github.com/yuin/goldmark/ast"
+ "github.com/yuin/goldmark/parser"
+ "github.com/yuin/goldmark/text"
+ "github.com/yuin/goldmark/util"
+ "regexp"
+)
+
+var wwwURLRegxp = regexp.MustCompile(`^www\.[-a-zA-Z0-9@:%._\+~#=]{2,256}\.[a-z]{2,6}((?:/|[#?])[-a-zA-Z0-9@:%_\+.~#!?&//=\(\);,'">\^{}\[\]` + "`" + `]*)?`)
+
+var urlRegexp = regexp.MustCompile(`^(?:http|https|ftp):\/\/(?:www\.)?[-a-zA-Z0-9@:%._\+~#=]{2,256}\.[a-z]{2,6}((?:/|[#?])[-a-zA-Z0-9@:%_+.~#$!?&//=\(\);,'">\^{}\[\]` + "`" + `]*)?`)
+
+type linkifyParser struct {
+}
+
+var defaultLinkifyParser = &linkifyParser{}
+
+// NewLinkifyParser return a new InlineParser can parse
+// text that seems like a URL.
+func NewLinkifyParser() parser.InlineParser {
+ return defaultLinkifyParser
+}
+
+func (s *linkifyParser) Trigger() []byte {
+ // ' ' indicates any white spaces and a line head
+ return []byte{' ', '*', '_', '~', '('}
+}
+
+var protoHTTP = []byte("http:")
+var protoHTTPS = []byte("https:")
+var protoFTP = []byte("ftp:")
+var domainWWW = []byte("www.")
+
+func (s *linkifyParser) Parse(parent ast.Node, block text.Reader, pc parser.Context) ast.Node {
+ if pc.IsInLinkLabel() {
+ return nil
+ }
+ line, segment := block.PeekLine()
+ consumes := 0
+ start := segment.Start
+ c := line[0]
+ // advance if current position is not a line head.
+ if c == ' ' || c == '*' || c == '_' || c == '~' || c == '(' {
+ consumes++
+ start++
+ line = line[1:]
+ }
+
+ var m []int
+ var protocol []byte
+ var typ ast.AutoLinkType = ast.AutoLinkURL
+ if bytes.HasPrefix(line, protoHTTP) || bytes.HasPrefix(line, protoHTTPS) || bytes.HasPrefix(line, protoFTP) {
+ m = urlRegexp.FindSubmatchIndex(line)
+ }
+ if m == nil && bytes.HasPrefix(line, domainWWW) {
+ m = wwwURLRegxp.FindSubmatchIndex(line)
+ protocol = []byte("http")
+ }
+ if m != nil {
+ lastChar := line[m[1]-1]
+ if lastChar == '.' {
+ m[1]--
+ } else if lastChar == ')' {
+ closing := 0
+ for i := m[1] - 1; i >= m[0]; i-- {
+ if line[i] == ')' {
+ closing++
+ } else if line[i] == '(' {
+ closing--
+ }
+ }
+ if closing > 0 {
+ m[1] -= closing
+ }
+ } else if lastChar == ';' {
+ i := m[1] - 2
+ for ; i >= m[0]; i-- {
+ if util.IsAlphaNumeric(line[i]) {
+ continue
+ }
+ break
+ }
+ if i != m[1]-2 {
+ if line[i] == '&' {
+ m[1] -= m[1] - i
+ }
+ }
+ }
+ }
+ if m == nil {
+ if len(line) > 0 && util.IsPunct(line[0]) {
+ return nil
+ }
+ typ = ast.AutoLinkEmail
+ stop := util.FindEmailIndex(line)
+ if stop < 0 {
+ return nil
+ }
+ at := bytes.IndexByte(line, '@')
+ m = []int{0, stop, at, stop - 1}
+ if m == nil || bytes.IndexByte(line[m[2]:m[3]], '.') < 0 {
+ return nil
+ }
+ lastChar := line[m[1]-1]
+ if lastChar == '.' {
+ m[1]--
+ }
+ if m[1] < len(line) {
+ nextChar := line[m[1]]
+ if nextChar == '-' || nextChar == '_' {
+ return nil
+ }
+ }
+ }
+ if m == nil {
+ return nil
+ }
+ if consumes != 0 {
+ s := segment.WithStop(segment.Start + 1)
+ ast.MergeOrAppendTextSegment(parent, s)
+ }
+ consumes += m[1]
+ block.Advance(consumes)
+ n := ast.NewTextSegment(text.NewSegment(start, start+m[1]))
+ link := ast.NewAutoLink(typ, n)
+ link.Protocol = protocol
+ return link
+}
+
+func (s *linkifyParser) CloseBlock(parent ast.Node, pc parser.Context) {
+ // nothing to do
+}
+
+type linkify struct {
+}
+
+// Linkify is an extension that allow you to parse text that seems like a URL.
+var Linkify = &linkify{}
+
+func (e *linkify) Extend(m goldmark.Markdown) {
+ m.Parser().AddOptions(
+ parser.WithInlineParsers(
+ util.Prioritized(NewLinkifyParser(), 999),
+ ),
+ )
+}
--- /dev/null
+package extension
+
+import (
+ "github.com/yuin/goldmark"
+ gast "github.com/yuin/goldmark/ast"
+ "github.com/yuin/goldmark/extension/ast"
+ "github.com/yuin/goldmark/parser"
+ "github.com/yuin/goldmark/renderer"
+ "github.com/yuin/goldmark/renderer/html"
+ "github.com/yuin/goldmark/text"
+ "github.com/yuin/goldmark/util"
+)
+
+type strikethroughDelimiterProcessor struct {
+}
+
+func (p *strikethroughDelimiterProcessor) IsDelimiter(b byte) bool {
+ return b == '~'
+}
+
+func (p *strikethroughDelimiterProcessor) CanOpenCloser(opener, closer *parser.Delimiter) bool {
+ return opener.Char == closer.Char
+}
+
+func (p *strikethroughDelimiterProcessor) OnMatch(consumes int) gast.Node {
+ return ast.NewStrikethrough()
+}
+
+var defaultStrikethroughDelimiterProcessor = &strikethroughDelimiterProcessor{}
+
+type strikethroughParser struct {
+}
+
+var defaultStrikethroughParser = &strikethroughParser{}
+
+// NewStrikethroughParser return a new InlineParser that parses
+// strikethrough expressions.
+func NewStrikethroughParser() parser.InlineParser {
+ return defaultStrikethroughParser
+}
+
+func (s *strikethroughParser) Trigger() []byte {
+ return []byte{'~'}
+}
+
+func (s *strikethroughParser) Parse(parent gast.Node, block text.Reader, pc parser.Context) gast.Node {
+ before := block.PrecendingCharacter()
+ line, segment := block.PeekLine()
+ node := parser.ScanDelimiter(line, before, 2, defaultStrikethroughDelimiterProcessor)
+ if node == nil {
+ return nil
+ }
+ node.Segment = segment.WithStop(segment.Start + node.OriginalLength)
+ block.Advance(node.OriginalLength)
+ pc.PushDelimiter(node)
+ return node
+}
+
+func (s *strikethroughParser) CloseBlock(parent gast.Node, pc parser.Context) {
+ // nothing to do
+}
+
+// StrikethroughHTMLRenderer is a renderer.NodeRenderer implementation that
+// renders Strikethrough nodes.
+type StrikethroughHTMLRenderer struct {
+ html.Config
+}
+
+// NewStrikethroughHTMLRenderer returns a new StrikethroughHTMLRenderer.
+func NewStrikethroughHTMLRenderer(opts ...html.Option) renderer.NodeRenderer {
+ r := &StrikethroughHTMLRenderer{
+ Config: html.NewConfig(),
+ }
+ for _, opt := range opts {
+ opt.SetHTMLOption(&r.Config)
+ }
+ return r
+}
+
+// RegisterFuncs implements renderer.NodeRenderer.RegisterFuncs.
+func (r *StrikethroughHTMLRenderer) RegisterFuncs(reg renderer.NodeRendererFuncRegisterer) {
+ reg.Register(ast.KindStrikethrough, r.renderStrikethrough)
+}
+
+// StrikethroughAttributeFilter defines attribute names which dd elements can have.
+var StrikethroughAttributeFilter = html.GlobalAttributeFilter
+
+func (r *StrikethroughHTMLRenderer) renderStrikethrough(w util.BufWriter, source []byte, n gast.Node, entering bool) (gast.WalkStatus, error) {
+ if entering {
+ if n.Attributes() != nil {
+ _, _ = w.WriteString("<del")
+ html.RenderAttributes(w, n, StrikethroughAttributeFilter)
+ _ = w.WriteByte('>')
+ } else {
+ _, _ = w.WriteString("<del>")
+ }
+ } else {
+ _, _ = w.WriteString("</del>")
+ }
+ return gast.WalkContinue, nil
+}
+
+type strikethrough struct {
+}
+
+// Strikethrough is an extension that allow you to use strikethrough expression like '~~text~~' .
+var Strikethrough = &strikethrough{}
+
+func (e *strikethrough) Extend(m goldmark.Markdown) {
+ m.Parser().AddOptions(parser.WithInlineParsers(
+ util.Prioritized(NewStrikethroughParser(), 500),
+ ))
+ m.Renderer().AddOptions(renderer.WithNodeRenderers(
+ util.Prioritized(NewStrikethroughHTMLRenderer(), 500),
+ ))
+}
--- /dev/null
+package extension
+
+import (
+ "bytes"
+ "fmt"
+ "regexp"
+
+ "github.com/yuin/goldmark"
+ gast "github.com/yuin/goldmark/ast"
+ "github.com/yuin/goldmark/extension/ast"
+ "github.com/yuin/goldmark/parser"
+ "github.com/yuin/goldmark/renderer"
+ "github.com/yuin/goldmark/renderer/html"
+ "github.com/yuin/goldmark/text"
+ "github.com/yuin/goldmark/util"
+)
+
+var tableDelimRegexp = regexp.MustCompile(`^[\s\-\|\:]+$`)
+var tableDelimLeft = regexp.MustCompile(`^\s*\:\-+\s*$`)
+var tableDelimRight = regexp.MustCompile(`^\s*\-+\:\s*$`)
+var tableDelimCenter = regexp.MustCompile(`^\s*\:\-+\:\s*$`)
+var tableDelimNone = regexp.MustCompile(`^\s*\-+\s*$`)
+
+type tableParagraphTransformer struct {
+}
+
+var defaultTableParagraphTransformer = &tableParagraphTransformer{}
+
+// NewTableParagraphTransformer returns a new ParagraphTransformer
+// that can transform pargraphs into tables.
+func NewTableParagraphTransformer() parser.ParagraphTransformer {
+ return defaultTableParagraphTransformer
+}
+
+func (b *tableParagraphTransformer) Transform(node *gast.Paragraph, reader text.Reader, pc parser.Context) {
+ lines := node.Lines()
+ if lines.Len() < 2 {
+ return
+ }
+ alignments := b.parseDelimiter(lines.At(1), reader)
+ if alignments == nil {
+ return
+ }
+ header := b.parseRow(lines.At(0), alignments, true, reader)
+ if header == nil || len(alignments) != header.ChildCount() {
+ return
+ }
+ table := ast.NewTable()
+ table.Alignments = alignments
+ table.AppendChild(table, ast.NewTableHeader(header))
+ for i := 2; i < lines.Len(); i++ {
+ table.AppendChild(table, b.parseRow(lines.At(i), alignments, false, reader))
+ }
+ node.Parent().InsertBefore(node.Parent(), node, table)
+ node.Parent().RemoveChild(node.Parent(), node)
+}
+
+func (b *tableParagraphTransformer) parseRow(segment text.Segment, alignments []ast.Alignment, isHeader bool, reader text.Reader) *ast.TableRow {
+ source := reader.Source()
+ line := segment.Value(source)
+ pos := 0
+ pos += util.TrimLeftSpaceLength(line)
+ limit := len(line)
+ limit -= util.TrimRightSpaceLength(line)
+ row := ast.NewTableRow(alignments)
+ if len(line) > 0 && line[pos] == '|' {
+ pos++
+ }
+ if len(line) > 0 && line[limit-1] == '|' {
+ limit--
+ }
+ i := 0
+ for ; pos < limit; i++ {
+ alignment := ast.AlignNone
+ if i >= len(alignments) {
+ if !isHeader {
+ return row
+ }
+ } else {
+ alignment = alignments[i]
+ }
+ closure := util.FindClosure(line[pos:], byte(0), '|', true, false)
+ if closure < 0 {
+ closure = len(line[pos:])
+ }
+ node := ast.NewTableCell()
+ seg := text.NewSegment(segment.Start+pos, segment.Start+pos+closure)
+ seg = seg.TrimLeftSpace(source)
+ seg = seg.TrimRightSpace(source)
+ node.Lines().Append(seg)
+ node.Alignment = alignment
+ row.AppendChild(row, node)
+ pos += closure + 1
+ }
+ for ; i < len(alignments); i++ {
+ row.AppendChild(row, ast.NewTableCell())
+ }
+ return row
+}
+
+func (b *tableParagraphTransformer) parseDelimiter(segment text.Segment, reader text.Reader) []ast.Alignment {
+ line := segment.Value(reader.Source())
+ if !tableDelimRegexp.Match(line) {
+ return nil
+ }
+ cols := bytes.Split(line, []byte{'|'})
+ if util.IsBlank(cols[0]) {
+ cols = cols[1:]
+ }
+ if len(cols) > 0 && util.IsBlank(cols[len(cols)-1]) {
+ cols = cols[:len(cols)-1]
+ }
+
+ var alignments []ast.Alignment
+ for _, col := range cols {
+ if tableDelimLeft.Match(col) {
+ alignments = append(alignments, ast.AlignLeft)
+ } else if tableDelimRight.Match(col) {
+ alignments = append(alignments, ast.AlignRight)
+ } else if tableDelimCenter.Match(col) {
+ alignments = append(alignments, ast.AlignCenter)
+ } else if tableDelimNone.Match(col) {
+ alignments = append(alignments, ast.AlignNone)
+ } else {
+ return nil
+ }
+ }
+ return alignments
+}
+
+// TableHTMLRenderer is a renderer.NodeRenderer implementation that
+// renders Table nodes.
+type TableHTMLRenderer struct {
+ html.Config
+}
+
+// NewTableHTMLRenderer returns a new TableHTMLRenderer.
+func NewTableHTMLRenderer(opts ...html.Option) renderer.NodeRenderer {
+ r := &TableHTMLRenderer{
+ Config: html.NewConfig(),
+ }
+ for _, opt := range opts {
+ opt.SetHTMLOption(&r.Config)
+ }
+ return r
+}
+
+// RegisterFuncs implements renderer.NodeRenderer.RegisterFuncs.
+func (r *TableHTMLRenderer) RegisterFuncs(reg renderer.NodeRendererFuncRegisterer) {
+ reg.Register(ast.KindTable, r.renderTable)
+ reg.Register(ast.KindTableHeader, r.renderTableHeader)
+ reg.Register(ast.KindTableRow, r.renderTableRow)
+ reg.Register(ast.KindTableCell, r.renderTableCell)
+}
+
+// TableAttributeFilter defines attribute names which table elements can have.
+var TableAttributeFilter = html.GlobalAttributeFilter.Extend(
+ []byte("align"), // [Deprecated]
+ []byte("bgcolor"), // [Deprecated]
+ []byte("border"), // [Deprecated]
+ []byte("cellpadding"), // [Deprecated]
+ []byte("cellspacing"), // [Deprecated]
+ []byte("frame"), // [Deprecated]
+ []byte("rules"), // [Deprecated]
+ []byte("summary"), // [Deprecated]
+ []byte("width"), // [Deprecated]
+)
+
+func (r *TableHTMLRenderer) renderTable(w util.BufWriter, source []byte, n gast.Node, entering bool) (gast.WalkStatus, error) {
+ if entering {
+ _, _ = w.WriteString("<table")
+ if n.Attributes() != nil {
+ html.RenderAttributes(w, n, TableAttributeFilter)
+ }
+ _, _ = w.WriteString(">\n")
+ } else {
+ _, _ = w.WriteString("</table>\n")
+ }
+ return gast.WalkContinue, nil
+}
+
+// TableHeaderAttributeFilter defines attribute names which <thead> elements can have.
+var TableHeaderAttributeFilter = html.GlobalAttributeFilter.Extend(
+ []byte("align"), // [Deprecated since HTML4] [Obsolete since HTML5]
+ []byte("bgcolor"), // [Not Standardized]
+ []byte("char"), // [Deprecated since HTML4] [Obsolete since HTML5]
+ []byte("charoff"), // [Deprecated since HTML4] [Obsolete since HTML5]
+ []byte("valign"), // [Deprecated since HTML4] [Obsolete since HTML5]
+)
+
+func (r *TableHTMLRenderer) renderTableHeader(w util.BufWriter, source []byte, n gast.Node, entering bool) (gast.WalkStatus, error) {
+ if entering {
+ _, _ = w.WriteString("<thead")
+ if n.Attributes() != nil {
+ html.RenderAttributes(w, n, TableHeaderAttributeFilter)
+ }
+ _, _ = w.WriteString(">\n")
+ _, _ = w.WriteString("<tr>\n") // Header <tr> has no separate handle
+ } else {
+ _, _ = w.WriteString("</tr>\n")
+ _, _ = w.WriteString("</thead>\n")
+ if n.NextSibling() != nil {
+ _, _ = w.WriteString("<tbody>\n")
+ }
+ }
+ return gast.WalkContinue, nil
+}
+
+// TableRowAttributeFilter defines attribute names which <tr> elements can have.
+var TableRowAttributeFilter = html.GlobalAttributeFilter.Extend(
+ []byte("align"), // [Obsolete since HTML5]
+ []byte("bgcolor"), // [Obsolete since HTML5]
+ []byte("char"), // [Obsolete since HTML5]
+ []byte("charoff"), // [Obsolete since HTML5]
+ []byte("valign"), // [Obsolete since HTML5]
+)
+
+func (r *TableHTMLRenderer) renderTableRow(w util.BufWriter, source []byte, n gast.Node, entering bool) (gast.WalkStatus, error) {
+ if entering {
+ _, _ = w.WriteString("<tr")
+ if n.Attributes() != nil {
+ html.RenderAttributes(w, n, TableRowAttributeFilter)
+ }
+ _, _ = w.WriteString(">\n")
+ } else {
+ _, _ = w.WriteString("</tr>\n")
+ if n.Parent().LastChild() == n {
+ _, _ = w.WriteString("</tbody>\n")
+ }
+ }
+ return gast.WalkContinue, nil
+}
+
+// TableThCellAttributeFilter defines attribute names which table <th> cells can have.
+var TableThCellAttributeFilter = html.GlobalAttributeFilter.Extend(
+ []byte("abbr"), // [OK] Contains a short abbreviated description of the cell's content [NOT OK in <td>]
+
+ []byte("align"), // [Obsolete since HTML5]
+ []byte("axis"), // [Obsolete since HTML5]
+ []byte("bgcolor"), // [Not Standardized]
+ []byte("char"), // [Obsolete since HTML5]
+ []byte("charoff"), // [Obsolete since HTML5]
+
+ []byte("colspan"), // [OK] Number of columns that the cell is to span
+ []byte("headers"), // [OK] This attribute contains a list of space-separated strings, each corresponding to the id attribute of the <th> elements that apply to this element
+
+ []byte("height"), // [Deprecated since HTML4] [Obsolete since HTML5]
+
+ []byte("rowspan"), // [OK] Number of rows that the cell is to span
+ []byte("scope"), // [OK] This enumerated attribute defines the cells that the header (defined in the <th>) element relates to [NOT OK in <td>]
+
+ []byte("valign"), // [Obsolete since HTML5]
+ []byte("width"), // [Deprecated since HTML4] [Obsolete since HTML5]
+)
+
+// TableTdCellAttributeFilter defines attribute names which table <td> cells can have.
+var TableTdCellAttributeFilter = html.GlobalAttributeFilter.Extend(
+ []byte("abbr"), // [Obsolete since HTML5] [OK in <th>]
+ []byte("align"), // [Obsolete since HTML5]
+ []byte("axis"), // [Obsolete since HTML5]
+ []byte("bgcolor"), // [Not Standardized]
+ []byte("char"), // [Obsolete since HTML5]
+ []byte("charoff"), // [Obsolete since HTML5]
+
+ []byte("colspan"), // [OK] Number of columns that the cell is to span
+ []byte("headers"), // [OK] This attribute contains a list of space-separated strings, each corresponding to the id attribute of the <th> elements that apply to this element
+
+ []byte("height"), // [Deprecated since HTML4] [Obsolete since HTML5]
+
+ []byte("rowspan"), // [OK] Number of rows that the cell is to span
+
+ []byte("scope"), // [Obsolete since HTML5] [OK in <th>]
+ []byte("valign"), // [Obsolete since HTML5]
+ []byte("width"), // [Deprecated since HTML4] [Obsolete since HTML5]
+)
+
+func (r *TableHTMLRenderer) renderTableCell(w util.BufWriter, source []byte, node gast.Node, entering bool) (gast.WalkStatus, error) {
+ n := node.(*ast.TableCell)
+ tag := "td"
+ if n.Parent().Kind() == ast.KindTableHeader {
+ tag = "th"
+ }
+ if entering {
+ align := ""
+ if n.Alignment != ast.AlignNone {
+ if _, ok := n.AttributeString("align"); !ok { // Skip align render if overridden
+ // TODO: "align" is deprecated. style="text-align:%s" instead?
+ align = fmt.Sprintf(` align="%s"`, n.Alignment.String())
+ }
+ }
+ fmt.Fprintf(w, "<%s", tag)
+ if n.Attributes() != nil {
+ if tag == "td" {
+ html.RenderAttributes(w, n, TableTdCellAttributeFilter) // <td>
+ } else {
+ html.RenderAttributes(w, n, TableThCellAttributeFilter) // <th>
+ }
+ }
+ fmt.Fprintf(w, "%s>", align)
+ } else {
+ fmt.Fprintf(w, "</%s>\n", tag)
+ }
+ return gast.WalkContinue, nil
+}
+
+type table struct {
+}
+
+// Table is an extension that allow you to use GFM tables .
+var Table = &table{}
+
+func (e *table) Extend(m goldmark.Markdown) {
+ m.Parser().AddOptions(parser.WithParagraphTransformers(
+ util.Prioritized(NewTableParagraphTransformer(), 200),
+ ))
+ m.Renderer().AddOptions(renderer.WithNodeRenderers(
+ util.Prioritized(NewTableHTMLRenderer(), 500),
+ ))
+}
--- /dev/null
+package extension
+
+import (
+ "github.com/yuin/goldmark"
+ gast "github.com/yuin/goldmark/ast"
+ "github.com/yuin/goldmark/extension/ast"
+ "github.com/yuin/goldmark/parser"
+ "github.com/yuin/goldmark/renderer"
+ "github.com/yuin/goldmark/renderer/html"
+ "github.com/yuin/goldmark/text"
+ "github.com/yuin/goldmark/util"
+ "regexp"
+)
+
+var taskListRegexp = regexp.MustCompile(`^\[([\sxX])\]\s*`)
+
+type taskCheckBoxParser struct {
+}
+
+var defaultTaskCheckBoxParser = &taskCheckBoxParser{}
+
+// NewTaskCheckBoxParser returns a new InlineParser that can parse
+// checkboxes in list items.
+// This parser must take precedence over the parser.LinkParser.
+func NewTaskCheckBoxParser() parser.InlineParser {
+ return defaultTaskCheckBoxParser
+}
+
+func (s *taskCheckBoxParser) Trigger() []byte {
+ return []byte{'['}
+}
+
+func (s *taskCheckBoxParser) Parse(parent gast.Node, block text.Reader, pc parser.Context) gast.Node {
+ // Given AST structure must be like
+ // - List
+ // - ListItem : parent.Parent
+ // - TextBlock : parent
+ // (current line)
+ if parent.Parent() == nil || parent.Parent().FirstChild() != parent {
+ return nil
+ }
+
+ if _, ok := parent.Parent().(*gast.ListItem); !ok {
+ return nil
+ }
+ line, _ := block.PeekLine()
+ m := taskListRegexp.FindSubmatchIndex(line)
+ if m == nil {
+ return nil
+ }
+ value := line[m[2]:m[3]][0]
+ block.Advance(m[1])
+ checked := value == 'x' || value == 'X'
+ return ast.NewTaskCheckBox(checked)
+}
+
+func (s *taskCheckBoxParser) CloseBlock(parent gast.Node, pc parser.Context) {
+ // nothing to do
+}
+
+// TaskCheckBoxHTMLRenderer is a renderer.NodeRenderer implementation that
+// renders checkboxes in list items.
+type TaskCheckBoxHTMLRenderer struct {
+ html.Config
+}
+
+// NewTaskCheckBoxHTMLRenderer returns a new TaskCheckBoxHTMLRenderer.
+func NewTaskCheckBoxHTMLRenderer(opts ...html.Option) renderer.NodeRenderer {
+ r := &TaskCheckBoxHTMLRenderer{
+ Config: html.NewConfig(),
+ }
+ for _, opt := range opts {
+ opt.SetHTMLOption(&r.Config)
+ }
+ return r
+}
+
+// RegisterFuncs implements renderer.NodeRenderer.RegisterFuncs.
+func (r *TaskCheckBoxHTMLRenderer) RegisterFuncs(reg renderer.NodeRendererFuncRegisterer) {
+ reg.Register(ast.KindTaskCheckBox, r.renderTaskCheckBox)
+}
+
+func (r *TaskCheckBoxHTMLRenderer) renderTaskCheckBox(w util.BufWriter, source []byte, node gast.Node, entering bool) (gast.WalkStatus, error) {
+ if !entering {
+ return gast.WalkContinue, nil
+ }
+ n := node.(*ast.TaskCheckBox)
+
+ if n.IsChecked {
+ w.WriteString(`<input checked="" disabled="" type="checkbox"`)
+ } else {
+ w.WriteString(`<input disabled="" type="checkbox"`)
+ }
+ if r.XHTML {
+ w.WriteString(" />")
+ } else {
+ w.WriteString(">")
+ }
+ return gast.WalkContinue, nil
+}
+
+type taskList struct {
+}
+
+// TaskList is an extension that allow you to use GFM task lists.
+var TaskList = &taskList{}
+
+func (e *taskList) Extend(m goldmark.Markdown) {
+ m.Parser().AddOptions(parser.WithInlineParsers(
+ util.Prioritized(NewTaskCheckBoxParser(), 0),
+ ))
+ m.Renderer().AddOptions(renderer.WithNodeRenderers(
+ util.Prioritized(NewTaskCheckBoxHTMLRenderer(), 500),
+ ))
+}
--- /dev/null
+package extension
+
+import (
+ "github.com/yuin/goldmark"
+ gast "github.com/yuin/goldmark/ast"
+ "github.com/yuin/goldmark/parser"
+ "github.com/yuin/goldmark/text"
+ "github.com/yuin/goldmark/util"
+)
+
+// TypographicPunctuation is a key of the punctuations that can be replaced with
+// typographic entities.
+type TypographicPunctuation int
+
+const (
+ // LeftSingleQuote is '
+ LeftSingleQuote TypographicPunctuation = iota + 1
+ // RightSingleQuote is '
+ RightSingleQuote
+ // LeftDoubleQuote is "
+ LeftDoubleQuote
+ // RightDoubleQuote is "
+ RightDoubleQuote
+ // EnDash is --
+ EnDash
+ // EmDash is ---
+ EmDash
+ // Ellipsis is ...
+ Ellipsis
+ // LeftAngleQuote is <<
+ LeftAngleQuote
+ // RightAngleQuote is >>
+ RightAngleQuote
+
+ typographicPunctuationMax
+)
+
+// An TypographerConfig struct is a data structure that holds configuration of the
+// Typographer extension.
+type TypographerConfig struct {
+ Substitutions [][]byte
+}
+
+func newDefaultSubstitutions() [][]byte {
+ replacements := make([][]byte, typographicPunctuationMax)
+ replacements[LeftSingleQuote] = []byte("‘")
+ replacements[RightSingleQuote] = []byte("’")
+ replacements[LeftDoubleQuote] = []byte("“")
+ replacements[RightDoubleQuote] = []byte("”")
+ replacements[EnDash] = []byte("–")
+ replacements[EmDash] = []byte("—")
+ replacements[Ellipsis] = []byte("…")
+ replacements[LeftAngleQuote] = []byte("«")
+ replacements[RightAngleQuote] = []byte("»")
+
+ return replacements
+}
+
+// SetOption implements SetOptioner.
+func (b *TypographerConfig) SetOption(name parser.OptionName, value interface{}) {
+ switch name {
+ case optTypographicSubstitutions:
+ b.Substitutions = value.([][]byte)
+ }
+}
+
+// A TypographerOption interface sets options for the TypographerParser.
+type TypographerOption interface {
+ parser.Option
+ SetTypographerOption(*TypographerConfig)
+}
+
+const optTypographicSubstitutions parser.OptionName = "TypographicSubstitutions"
+
+// TypographicSubstitutions is a list of the substitutions for the Typographer extension.
+type TypographicSubstitutions map[TypographicPunctuation][]byte
+
+type withTypographicSubstitutions struct {
+ value [][]byte
+}
+
+func (o *withTypographicSubstitutions) SetParserOption(c *parser.Config) {
+ c.Options[optTypographicSubstitutions] = o.value
+}
+
+func (o *withTypographicSubstitutions) SetTypographerOption(p *TypographerConfig) {
+ p.Substitutions = o.value
+}
+
+// WithTypographicSubstitutions is a functional otpion that specify replacement text
+// for punctuations.
+func WithTypographicSubstitutions(values map[TypographicPunctuation][]byte) TypographerOption {
+ replacements := newDefaultSubstitutions()
+ for k, v := range values {
+ replacements[k] = v
+ }
+
+ return &withTypographicSubstitutions{replacements}
+}
+
+type typographerDelimiterProcessor struct {
+}
+
+func (p *typographerDelimiterProcessor) IsDelimiter(b byte) bool {
+ return b == '\'' || b == '"'
+}
+
+func (p *typographerDelimiterProcessor) CanOpenCloser(opener, closer *parser.Delimiter) bool {
+ return opener.Char == closer.Char
+}
+
+func (p *typographerDelimiterProcessor) OnMatch(consumes int) gast.Node {
+ return nil
+}
+
+var defaultTypographerDelimiterProcessor = &typographerDelimiterProcessor{}
+
+type typographerParser struct {
+ TypographerConfig
+}
+
+// NewTypographerParser return a new InlineParser that parses
+// typographer expressions.
+func NewTypographerParser(opts ...TypographerOption) parser.InlineParser {
+ p := &typographerParser{
+ TypographerConfig: TypographerConfig{
+ Substitutions: newDefaultSubstitutions(),
+ },
+ }
+ for _, o := range opts {
+ o.SetTypographerOption(&p.TypographerConfig)
+ }
+ return p
+}
+
+func (s *typographerParser) Trigger() []byte {
+ return []byte{'\'', '"', '-', '.', '<', '>'}
+}
+
+func (s *typographerParser) Parse(parent gast.Node, block text.Reader, pc parser.Context) gast.Node {
+ before := block.PrecendingCharacter()
+ line, _ := block.PeekLine()
+ c := line[0]
+ if len(line) > 2 {
+ if c == '-' {
+ if s.Substitutions[EmDash] != nil && line[1] == '-' && line[2] == '-' { // ---
+ node := gast.NewString(s.Substitutions[EmDash])
+ node.SetCode(true)
+ block.Advance(3)
+ return node
+ }
+ } else if c == '.' {
+ if s.Substitutions[Ellipsis] != nil && line[1] == '.' && line[2] == '.' { // ...
+ node := gast.NewString(s.Substitutions[Ellipsis])
+ node.SetCode(true)
+ block.Advance(3)
+ return node
+ }
+ return nil
+ }
+ }
+ if len(line) > 1 {
+ if c == '<' {
+ if s.Substitutions[LeftAngleQuote] != nil && line[1] == '<' { // <<
+ node := gast.NewString(s.Substitutions[LeftAngleQuote])
+ node.SetCode(true)
+ block.Advance(2)
+ return node
+ }
+ return nil
+ } else if c == '>' {
+ if s.Substitutions[RightAngleQuote] != nil && line[1] == '>' { // >>
+ node := gast.NewString(s.Substitutions[RightAngleQuote])
+ node.SetCode(true)
+ block.Advance(2)
+ return node
+ }
+ return nil
+ } else if s.Substitutions[EnDash] != nil && c == '-' && line[1] == '-' { // --
+ node := gast.NewString(s.Substitutions[EnDash])
+ node.SetCode(true)
+ block.Advance(2)
+ return node
+ }
+ }
+ if c == '\'' || c == '"' {
+ d := parser.ScanDelimiter(line, before, 1, defaultTypographerDelimiterProcessor)
+ if d == nil {
+ return nil
+ }
+ if c == '\'' {
+ if s.Substitutions[LeftSingleQuote] != nil && d.CanOpen && !d.CanClose {
+ node := gast.NewString(s.Substitutions[LeftSingleQuote])
+ node.SetCode(true)
+ block.Advance(1)
+ return node
+ }
+ if s.Substitutions[RightSingleQuote] != nil && d.CanClose && !d.CanOpen {
+ node := gast.NewString(s.Substitutions[RightSingleQuote])
+ node.SetCode(true)
+ block.Advance(1)
+ return node
+ }
+ }
+ if c == '"' {
+ if s.Substitutions[LeftDoubleQuote] != nil && d.CanOpen && !d.CanClose {
+ node := gast.NewString(s.Substitutions[LeftDoubleQuote])
+ node.SetCode(true)
+ block.Advance(1)
+ return node
+ }
+ if s.Substitutions[RightDoubleQuote] != nil && d.CanClose && !d.CanOpen {
+ node := gast.NewString(s.Substitutions[RightDoubleQuote])
+ node.SetCode(true)
+ block.Advance(1)
+ return node
+ }
+ }
+ }
+ return nil
+}
+
+func (s *typographerParser) CloseBlock(parent gast.Node, pc parser.Context) {
+ // nothing to do
+}
+
+type typographer struct {
+ options []TypographerOption
+}
+
+// Typographer is an extension that repalace punctuations with typographic entities.
+var Typographer = &typographer{}
+
+// NewTypographer returns a new Entender that repalace punctuations with typographic entities.
+func NewTypographer(opts ...TypographerOption) goldmark.Extender {
+ return &typographer{
+ options: opts,
+ }
+}
+
+func (e *typographer) Extend(m goldmark.Markdown) {
+ m.Parser().AddOptions(parser.WithInlineParsers(
+ util.Prioritized(NewTypographerParser(e.options...), 9999),
+ ))
+}
--- /dev/null
+module github.com/yuin/goldmark
+
+go 1.13
--- /dev/null
+// Package goldmark implements functions to convert markdown text to a desired format.
+package goldmark
+
+import (
+ "github.com/yuin/goldmark/parser"
+ "github.com/yuin/goldmark/renderer"
+ "github.com/yuin/goldmark/renderer/html"
+ "github.com/yuin/goldmark/text"
+ "github.com/yuin/goldmark/util"
+ "io"
+)
+
+// DefaultParser returns a new Parser that is configured by default values.
+func DefaultParser() parser.Parser {
+ return parser.NewParser(parser.WithBlockParsers(parser.DefaultBlockParsers()...),
+ parser.WithInlineParsers(parser.DefaultInlineParsers()...),
+ parser.WithParagraphTransformers(parser.DefaultParagraphTransformers()...),
+ )
+}
+
+// DefaultRenderer returns a new Renderer that is configured by default values.
+func DefaultRenderer() renderer.Renderer {
+ return renderer.NewRenderer(renderer.WithNodeRenderers(util.Prioritized(html.NewRenderer(), 1000)))
+}
+
+var defaultMarkdown = New()
+
+// Convert interprets a UTF-8 bytes source in Markdown and
+// write rendered contents to a writer w.
+func Convert(source []byte, w io.Writer, opts ...parser.ParseOption) error {
+ return defaultMarkdown.Convert(source, w, opts...)
+}
+
+// A Markdown interface offers functions to convert Markdown text to
+// a desired format.
+type Markdown interface {
+ // Convert interprets a UTF-8 bytes source in Markdown and write rendered
+ // contents to a writer w.
+ Convert(source []byte, writer io.Writer, opts ...parser.ParseOption) error
+
+ // Parser returns a Parser that will be used for conversion.
+ Parser() parser.Parser
+
+ // SetParser sets a Parser to this object.
+ SetParser(parser.Parser)
+
+ // Parser returns a Renderer that will be used for conversion.
+ Renderer() renderer.Renderer
+
+ // SetRenderer sets a Renderer to this object.
+ SetRenderer(renderer.Renderer)
+}
+
+// Option is a functional option type for Markdown objects.
+type Option func(*markdown)
+
+// WithExtensions adds extensions.
+func WithExtensions(ext ...Extender) Option {
+ return func(m *markdown) {
+ m.extensions = append(m.extensions, ext...)
+ }
+}
+
+// WithParser allows you to override the default parser.
+func WithParser(p parser.Parser) Option {
+ return func(m *markdown) {
+ m.parser = p
+ }
+}
+
+// WithParserOptions applies options for the parser.
+func WithParserOptions(opts ...parser.Option) Option {
+ return func(m *markdown) {
+ m.parser.AddOptions(opts...)
+ }
+}
+
+// WithRenderer allows you to override the default renderer.
+func WithRenderer(r renderer.Renderer) Option {
+ return func(m *markdown) {
+ m.renderer = r
+ }
+}
+
+// WithRendererOptions applies options for the renderer.
+func WithRendererOptions(opts ...renderer.Option) Option {
+ return func(m *markdown) {
+ m.renderer.AddOptions(opts...)
+ }
+}
+
+type markdown struct {
+ parser parser.Parser
+ renderer renderer.Renderer
+ extensions []Extender
+}
+
+// New returns a new Markdown with given options.
+func New(options ...Option) Markdown {
+ md := &markdown{
+ parser: DefaultParser(),
+ renderer: DefaultRenderer(),
+ extensions: []Extender{},
+ }
+ for _, opt := range options {
+ opt(md)
+ }
+ for _, e := range md.extensions {
+ e.Extend(md)
+ }
+ return md
+}
+
+func (m *markdown) Convert(source []byte, writer io.Writer, opts ...parser.ParseOption) error {
+ reader := text.NewReader(source)
+ doc := m.parser.Parse(reader, opts...)
+ return m.renderer.Render(writer, source, doc)
+}
+
+func (m *markdown) Parser() parser.Parser {
+ return m.parser
+}
+
+func (m *markdown) SetParser(v parser.Parser) {
+ m.parser = v
+}
+
+func (m *markdown) Renderer() renderer.Renderer {
+ return m.renderer
+}
+
+func (m *markdown) SetRenderer(v renderer.Renderer) {
+ m.renderer = v
+}
+
+// An Extender interface is used for extending Markdown.
+type Extender interface {
+ // Extend extends the Markdown.
+ Extend(Markdown)
+}
--- /dev/null
+package parser
+
+import (
+ "bytes"
+ "io"
+ "strconv"
+
+ "github.com/yuin/goldmark/text"
+ "github.com/yuin/goldmark/util"
+)
+
+var attrNameID = []byte("id")
+var attrNameClass = []byte("class")
+
+// An Attribute is an attribute of the markdown elements
+type Attribute struct {
+ Name []byte
+ Value interface{}
+}
+
+// An Attributes is a collection of attributes.
+type Attributes []Attribute
+
+// Find returns a (value, true) if an attribute correspond with given name is found, otherwise (nil, false).
+func (as Attributes) Find(name []byte) (interface{}, bool) {
+ for _, a := range as {
+ if bytes.Equal(a.Name, name) {
+ return a.Value, true
+ }
+ }
+ return nil, false
+}
+
+func (as Attributes) findUpdate(name []byte, cb func(v interface{}) interface{}) bool {
+ for i, a := range as {
+ if bytes.Equal(a.Name, name) {
+ as[i].Value = cb(a.Value)
+ return true
+ }
+ }
+ return false
+}
+
+// ParseAttributes parses attributes into a map.
+// ParseAttributes returns a parsed attributes and true if could parse
+// attributes, otherwise nil and false.
+func ParseAttributes(reader text.Reader) (Attributes, bool) {
+ savedLine, savedPosition := reader.Position()
+ reader.SkipSpaces()
+ if reader.Peek() != '{' {
+ reader.SetPosition(savedLine, savedPosition)
+ return nil, false
+ }
+ reader.Advance(1)
+ attrs := Attributes{}
+ for {
+ if reader.Peek() == '}' {
+ reader.Advance(1)
+ return attrs, true
+ }
+ attr, ok := parseAttribute(reader)
+ if !ok {
+ reader.SetPosition(savedLine, savedPosition)
+ return nil, false
+ }
+ if bytes.Equal(attr.Name, attrNameClass) {
+ if !attrs.findUpdate(attrNameClass, func(v interface{}) interface{} {
+ ret := make([]byte, 0, len(v.([]byte))+1+len(attr.Value.([]byte)))
+ ret = append(ret, v.([]byte)...)
+ return append(append(ret, ' '), attr.Value.([]byte)...)
+ }) {
+ attrs = append(attrs, attr)
+ }
+ } else {
+ attrs = append(attrs, attr)
+ }
+ reader.SkipSpaces()
+ if reader.Peek() == ',' {
+ reader.Advance(1)
+ reader.SkipSpaces()
+ }
+ }
+}
+
+func parseAttribute(reader text.Reader) (Attribute, bool) {
+ reader.SkipSpaces()
+ c := reader.Peek()
+ if c == '#' || c == '.' {
+ reader.Advance(1)
+ line, _ := reader.PeekLine()
+ i := 0
+ for ; i < len(line) && !util.IsSpace(line[i]) && (!util.IsPunct(line[i]) || line[i] == '_' || line[i] == '-'); i++ {
+ }
+ name := attrNameClass
+ if c == '#' {
+ name = attrNameID
+ }
+ reader.Advance(i)
+ return Attribute{Name: name, Value: line[0:i]}, true
+ }
+ line, _ := reader.PeekLine()
+ if len(line) == 0 {
+ return Attribute{}, false
+ }
+ c = line[0]
+ if !((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') ||
+ c == '_' || c == ':') {
+ return Attribute{}, false
+ }
+ i := 0
+ for ; i < len(line); i++ {
+ c = line[i]
+ if !((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') ||
+ (c >= '0' && c <= '9') ||
+ c == '_' || c == ':' || c == '.' || c == '-') {
+ break
+ }
+ }
+ name := line[:i]
+ reader.Advance(i)
+ reader.SkipSpaces()
+ c = reader.Peek()
+ if c != '=' {
+ return Attribute{}, false
+ }
+ reader.Advance(1)
+ reader.SkipSpaces()
+ value, ok := parseAttributeValue(reader)
+ if !ok {
+ return Attribute{}, false
+ }
+ return Attribute{Name: name, Value: value}, true
+}
+
+func parseAttributeValue(reader text.Reader) (interface{}, bool) {
+ reader.SkipSpaces()
+ c := reader.Peek()
+ var value interface{}
+ ok := false
+ switch c {
+ case text.EOF:
+ return Attribute{}, false
+ case '{':
+ value, ok = ParseAttributes(reader)
+ case '[':
+ value, ok = parseAttributeArray(reader)
+ case '"':
+ value, ok = parseAttributeString(reader)
+ default:
+ if c == '-' || c == '+' || util.IsNumeric(c) {
+ value, ok = parseAttributeNumber(reader)
+ } else {
+ value, ok = parseAttributeOthers(reader)
+ }
+ }
+ if !ok {
+ return nil, false
+ }
+ return value, true
+}
+
+func parseAttributeArray(reader text.Reader) ([]interface{}, bool) {
+ reader.Advance(1) // skip [
+ ret := []interface{}{}
+ for i := 0; ; i++ {
+ c := reader.Peek()
+ comma := false
+ if i != 0 && c == ',' {
+ reader.Advance(1)
+ comma = true
+ }
+ if c == ']' {
+ if !comma {
+ reader.Advance(1)
+ return ret, true
+ }
+ return nil, false
+ }
+ reader.SkipSpaces()
+ value, ok := parseAttributeValue(reader)
+ if !ok {
+ return nil, false
+ }
+ ret = append(ret, value)
+ reader.SkipSpaces()
+ }
+}
+
+func parseAttributeString(reader text.Reader) ([]byte, bool) {
+ reader.Advance(1) // skip "
+ line, _ := reader.PeekLine()
+ i := 0
+ l := len(line)
+ var buf bytes.Buffer
+ for i < l {
+ c := line[i]
+ if c == '\\' && i != l-1 {
+ n := line[i+1]
+ switch n {
+ case '"', '/', '\\':
+ buf.WriteByte(n)
+ i += 2
+ case 'b':
+ buf.WriteString("\b")
+ i += 2
+ case 'f':
+ buf.WriteString("\f")
+ i += 2
+ case 'n':
+ buf.WriteString("\n")
+ i += 2
+ case 'r':
+ buf.WriteString("\r")
+ i += 2
+ case 't':
+ buf.WriteString("\t")
+ i += 2
+ default:
+ buf.WriteByte('\\')
+ i++
+ }
+ continue
+ }
+ if c == '"' {
+ reader.Advance(i + 1)
+ return buf.Bytes(), true
+ }
+ buf.WriteByte(c)
+ i++
+ }
+ return nil, false
+}
+
+func scanAttributeDecimal(reader text.Reader, w io.ByteWriter) {
+ for {
+ c := reader.Peek()
+ if util.IsNumeric(c) {
+ w.WriteByte(c)
+ } else {
+ return
+ }
+ reader.Advance(1)
+ }
+}
+
+func parseAttributeNumber(reader text.Reader) (float64, bool) {
+ sign := 1
+ c := reader.Peek()
+ if c == '-' {
+ sign = -1
+ reader.Advance(1)
+ } else if c == '+' {
+ reader.Advance(1)
+ }
+ var buf bytes.Buffer
+ if !util.IsNumeric(reader.Peek()) {
+ return 0, false
+ }
+ scanAttributeDecimal(reader, &buf)
+ if buf.Len() == 0 {
+ return 0, false
+ }
+ c = reader.Peek()
+ if c == '.' {
+ buf.WriteByte(c)
+ reader.Advance(1)
+ scanAttributeDecimal(reader, &buf)
+ }
+ c = reader.Peek()
+ if c == 'e' || c == 'E' {
+ buf.WriteByte(c)
+ reader.Advance(1)
+ c = reader.Peek()
+ if c == '-' || c == '+' {
+ buf.WriteByte(c)
+ reader.Advance(1)
+ }
+ scanAttributeDecimal(reader, &buf)
+ }
+ f, err := strconv.ParseFloat(buf.String(), 10)
+ if err != nil {
+ return 0, false
+ }
+ return float64(sign) * f, true
+}
+
+var bytesTrue = []byte("true")
+var bytesFalse = []byte("false")
+var bytesNull = []byte("null")
+
+func parseAttributeOthers(reader text.Reader) (interface{}, bool) {
+ line, _ := reader.PeekLine()
+ c := line[0]
+ if !((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') ||
+ c == '_' || c == ':') {
+ return nil, false
+ }
+ i := 0
+ for ; i < len(line); i++ {
+ c := line[i]
+ if !((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') ||
+ (c >= '0' && c <= '9') ||
+ c == '_' || c == ':' || c == '.' || c == '-') {
+ break
+ }
+ }
+ value := line[:i]
+ reader.Advance(i)
+ if bytes.Equal(value, bytesTrue) {
+ return true, true
+ }
+ if bytes.Equal(value, bytesFalse) {
+ return false, true
+ }
+ if bytes.Equal(value, bytesNull) {
+ return nil, true
+ }
+ return value, true
+}
--- /dev/null
+package parser
+
+import (
+ "github.com/yuin/goldmark/ast"
+ "github.com/yuin/goldmark/text"
+ "github.com/yuin/goldmark/util"
+)
+
+// A HeadingConfig struct is a data structure that holds configuration of the renderers related to headings.
+type HeadingConfig struct {
+ AutoHeadingID bool
+ Attribute bool
+}
+
+// SetOption implements SetOptioner.
+func (b *HeadingConfig) SetOption(name OptionName, value interface{}) {
+ switch name {
+ case optAutoHeadingID:
+ b.AutoHeadingID = true
+ case optAttribute:
+ b.Attribute = true
+ }
+}
+
+// A HeadingOption interface sets options for heading parsers.
+type HeadingOption interface {
+ Option
+ SetHeadingOption(*HeadingConfig)
+}
+
+// AutoHeadingID is an option name that enables auto IDs for headings.
+const optAutoHeadingID OptionName = "AutoHeadingID"
+
+type withAutoHeadingID struct {
+}
+
+func (o *withAutoHeadingID) SetParserOption(c *Config) {
+ c.Options[optAutoHeadingID] = true
+}
+
+func (o *withAutoHeadingID) SetHeadingOption(p *HeadingConfig) {
+ p.AutoHeadingID = true
+}
+
+// WithAutoHeadingID is a functional option that enables custom heading ids and
+// auto generated heading ids.
+func WithAutoHeadingID() HeadingOption {
+ return &withAutoHeadingID{}
+}
+
+type withHeadingAttribute struct {
+ Option
+}
+
+func (o *withHeadingAttribute) SetHeadingOption(p *HeadingConfig) {
+ p.Attribute = true
+}
+
+// WithHeadingAttribute is a functional option that enables custom heading attributes.
+func WithHeadingAttribute() HeadingOption {
+ return &withHeadingAttribute{WithAttribute()}
+}
+
+type atxHeadingParser struct {
+ HeadingConfig
+}
+
+// NewATXHeadingParser return a new BlockParser that can parse ATX headings.
+func NewATXHeadingParser(opts ...HeadingOption) BlockParser {
+ p := &atxHeadingParser{}
+ for _, o := range opts {
+ o.SetHeadingOption(&p.HeadingConfig)
+ }
+ return p
+}
+
+func (b *atxHeadingParser) Trigger() []byte {
+ return []byte{'#'}
+}
+
+func (b *atxHeadingParser) Open(parent ast.Node, reader text.Reader, pc Context) (ast.Node, State) {
+ line, segment := reader.PeekLine()
+ pos := pc.BlockOffset()
+ if pos < 0 {
+ return nil, NoChildren
+ }
+ i := pos
+ for ; i < len(line) && line[i] == '#'; i++ {
+ }
+ level := i - pos
+ if i == pos || level > 6 {
+ return nil, NoChildren
+ }
+ l := util.TrimLeftSpaceLength(line[i:])
+ if l == 0 {
+ return nil, NoChildren
+ }
+ start := i + l
+ if start >= len(line) {
+ start = len(line) - 1
+ }
+ origstart := start
+ stop := len(line) - util.TrimRightSpaceLength(line)
+
+ node := ast.NewHeading(level)
+ parsed := false
+ if b.Attribute { // handles special case like ### heading ### {#id}
+ start--
+ closureClose := -1
+ closureOpen := -1
+ for j := start; j < stop; {
+ c := line[j]
+ if util.IsEscapedPunctuation(line, j) {
+ j += 2
+ } else if util.IsSpace(c) && j < stop-1 && line[j+1] == '#' {
+ closureOpen = j + 1
+ k := j + 1
+ for ; k < stop && line[k] == '#'; k++ {
+ }
+ closureClose = k
+ break
+ } else {
+ j++
+ }
+ }
+ if closureClose > 0 {
+ reader.Advance(closureClose)
+ attrs, ok := ParseAttributes(reader)
+ parsed = ok
+ if parsed {
+ for _, attr := range attrs {
+ node.SetAttribute(attr.Name, attr.Value)
+ }
+ node.Lines().Append(text.NewSegment(segment.Start+start+1-segment.Padding, segment.Start+closureOpen-segment.Padding))
+ }
+ }
+ }
+ if !parsed {
+ start = origstart
+ stop := len(line) - util.TrimRightSpaceLength(line)
+ if stop <= start { // empty headings like '##[space]'
+ stop = start
+ } else {
+ i = stop - 1
+ for ; line[i] == '#' && i >= start; i-- {
+ }
+ if i != stop-1 && !util.IsSpace(line[i]) {
+ i = stop - 1
+ }
+ i++
+ stop = i
+ }
+
+ if len(util.TrimRight(line[start:stop], []byte{'#'})) != 0 { // empty heading like '### ###'
+ node.Lines().Append(text.NewSegment(segment.Start+start-segment.Padding, segment.Start+stop-segment.Padding))
+ }
+ }
+ return node, NoChildren
+}
+
+func (b *atxHeadingParser) Continue(node ast.Node, reader text.Reader, pc Context) State {
+ return Close
+}
+
+func (b *atxHeadingParser) Close(node ast.Node, reader text.Reader, pc Context) {
+ if b.Attribute {
+ _, ok := node.AttributeString("id")
+ if !ok {
+ parseLastLineAttributes(node, reader, pc)
+ }
+ }
+
+ if b.AutoHeadingID {
+ id, ok := node.AttributeString("id")
+ if !ok {
+ generateAutoHeadingID(node.(*ast.Heading), reader, pc)
+ } else {
+ pc.IDs().Put(id.([]byte))
+ }
+ }
+}
+
+func (b *atxHeadingParser) CanInterruptParagraph() bool {
+ return true
+}
+
+func (b *atxHeadingParser) CanAcceptIndentedLine() bool {
+ return false
+}
+
+func generateAutoHeadingID(node *ast.Heading, reader text.Reader, pc Context) {
+ var line []byte
+ lastIndex := node.Lines().Len() - 1
+ if lastIndex > -1 {
+ lastLine := node.Lines().At(lastIndex)
+ line = lastLine.Value(reader.Source())
+ }
+ headingID := pc.IDs().Generate(line, ast.KindHeading)
+ node.SetAttribute(attrNameID, headingID)
+}
+
+func parseLastLineAttributes(node ast.Node, reader text.Reader, pc Context) {
+ lastIndex := node.Lines().Len() - 1
+ if lastIndex < 0 { // empty headings
+ return
+ }
+ lastLine := node.Lines().At(lastIndex)
+ line := lastLine.Value(reader.Source())
+ lr := text.NewReader(line)
+ var attrs Attributes
+ var ok bool
+ var start text.Segment
+ var sl int
+ var end text.Segment
+ for {
+ c := lr.Peek()
+ if c == text.EOF {
+ break
+ }
+ if c == '\\' {
+ lr.Advance(1)
+ if lr.Peek() == '{' {
+ lr.Advance(1)
+ }
+ continue
+ }
+ if c == '{' {
+ sl, start = lr.Position()
+ attrs, ok = ParseAttributes(lr)
+ _, end = lr.Position()
+ lr.SetPosition(sl, start)
+ }
+ lr.Advance(1)
+ }
+ if ok && util.IsBlank(line[end.Stop:]) {
+ for _, attr := range attrs {
+ node.SetAttribute(attr.Name, attr.Value)
+ }
+ lastLine.Stop = lastLine.Start + start.Start
+ node.Lines().Set(lastIndex, lastLine)
+ }
+}
--- /dev/null
+package parser
+
+import (
+ "github.com/yuin/goldmark/ast"
+ "github.com/yuin/goldmark/text"
+ "github.com/yuin/goldmark/util"
+)
+
+type autoLinkParser struct {
+}
+
+var defaultAutoLinkParser = &autoLinkParser{}
+
+// NewAutoLinkParser returns a new InlineParser that parses autolinks
+// surrounded by '<' and '>' .
+func NewAutoLinkParser() InlineParser {
+ return defaultAutoLinkParser
+}
+
+func (s *autoLinkParser) Trigger() []byte {
+ return []byte{'<'}
+}
+
+func (s *autoLinkParser) Parse(parent ast.Node, block text.Reader, pc Context) ast.Node {
+ line, segment := block.PeekLine()
+ stop := util.FindEmailIndex(line[1:])
+ typ := ast.AutoLinkType(ast.AutoLinkEmail)
+ if stop < 0 {
+ stop = util.FindURLIndex(line[1:])
+ typ = ast.AutoLinkURL
+ }
+ if stop < 0 {
+ return nil
+ }
+ stop++
+ if stop >= len(line) || line[stop] != '>' {
+ return nil
+ }
+ value := ast.NewTextSegment(text.NewSegment(segment.Start+1, segment.Start+stop))
+ block.Advance(stop + 1)
+ return ast.NewAutoLink(typ, value)
+}
--- /dev/null
+package parser
+
+import (
+ "github.com/yuin/goldmark/ast"
+ "github.com/yuin/goldmark/text"
+ "github.com/yuin/goldmark/util"
+)
+
+type blockquoteParser struct {
+}
+
+var defaultBlockquoteParser = &blockquoteParser{}
+
+// NewBlockquoteParser returns a new BlockParser that
+// parses blockquotes.
+func NewBlockquoteParser() BlockParser {
+ return defaultBlockquoteParser
+}
+
+func (b *blockquoteParser) process(reader text.Reader) bool {
+ line, _ := reader.PeekLine()
+ w, pos := util.IndentWidth(line, reader.LineOffset())
+ if w > 3 || pos >= len(line) || line[pos] != '>' {
+ return false
+ }
+ pos++
+ if pos >= len(line) || line[pos] == '\n' {
+ reader.Advance(pos)
+ return true
+ }
+ if line[pos] == ' ' || line[pos] == '\t' {
+ pos++
+ }
+ reader.Advance(pos)
+ if line[pos-1] == '\t' {
+ reader.SetPadding(2)
+ }
+ return true
+}
+
+func (b *blockquoteParser) Trigger() []byte {
+ return []byte{'>'}
+}
+
+func (b *blockquoteParser) Open(parent ast.Node, reader text.Reader, pc Context) (ast.Node, State) {
+ if b.process(reader) {
+ return ast.NewBlockquote(), HasChildren
+ }
+ return nil, NoChildren
+}
+
+func (b *blockquoteParser) Continue(node ast.Node, reader text.Reader, pc Context) State {
+ if b.process(reader) {
+ return Continue | HasChildren
+ }
+ return Close
+}
+
+func (b *blockquoteParser) Close(node ast.Node, reader text.Reader, pc Context) {
+ // nothing to do
+}
+
+func (b *blockquoteParser) CanInterruptParagraph() bool {
+ return true
+}
+
+func (b *blockquoteParser) CanAcceptIndentedLine() bool {
+ return false
+}
--- /dev/null
+package parser
+
+import (
+ "github.com/yuin/goldmark/ast"
+ "github.com/yuin/goldmark/text"
+ "github.com/yuin/goldmark/util"
+)
+
+type codeBlockParser struct {
+}
+
+// CodeBlockParser is a BlockParser implementation that parses indented code blocks.
+var defaultCodeBlockParser = &codeBlockParser{}
+
+// NewCodeBlockParser returns a new BlockParser that
+// parses code blocks.
+func NewCodeBlockParser() BlockParser {
+ return defaultCodeBlockParser
+}
+
+func (b *codeBlockParser) Trigger() []byte {
+ return nil
+}
+
+func (b *codeBlockParser) Open(parent ast.Node, reader text.Reader, pc Context) (ast.Node, State) {
+ line, segment := reader.PeekLine()
+ pos, padding := util.IndentPosition(line, reader.LineOffset(), 4)
+ if pos < 0 || util.IsBlank(line) {
+ return nil, NoChildren
+ }
+ node := ast.NewCodeBlock()
+ reader.AdvanceAndSetPadding(pos, padding)
+ _, segment = reader.PeekLine()
+ node.Lines().Append(segment)
+ reader.Advance(segment.Len() - 1)
+ return node, NoChildren
+
+}
+
+func (b *codeBlockParser) Continue(node ast.Node, reader text.Reader, pc Context) State {
+ line, segment := reader.PeekLine()
+ if util.IsBlank(line) {
+ node.Lines().Append(segment.TrimLeftSpaceWidth(4, reader.Source()))
+ return Continue | NoChildren
+ }
+ pos, padding := util.IndentPosition(line, reader.LineOffset(), 4)
+ if pos < 0 {
+ return Close
+ }
+ reader.AdvanceAndSetPadding(pos, padding)
+ _, segment = reader.PeekLine()
+ node.Lines().Append(segment)
+ reader.Advance(segment.Len() - 1)
+ return Continue | NoChildren
+}
+
+func (b *codeBlockParser) Close(node ast.Node, reader text.Reader, pc Context) {
+ // trim trailing blank lines
+ lines := node.Lines()
+ length := lines.Len() - 1
+ source := reader.Source()
+ for length >= 0 {
+ line := lines.At(length)
+ if util.IsBlank(line.Value(source)) {
+ length--
+ } else {
+ break
+ }
+ }
+ lines.SetSliced(0, length+1)
+}
+
+func (b *codeBlockParser) CanInterruptParagraph() bool {
+ return false
+}
+
+func (b *codeBlockParser) CanAcceptIndentedLine() bool {
+ return true
+}
--- /dev/null
+package parser
+
+import (
+ "github.com/yuin/goldmark/ast"
+ "github.com/yuin/goldmark/text"
+ "github.com/yuin/goldmark/util"
+)
+
+type codeSpanParser struct {
+}
+
+var defaultCodeSpanParser = &codeSpanParser{}
+
+// NewCodeSpanParser return a new InlineParser that parses inline codes
+// surrounded by '`' .
+func NewCodeSpanParser() InlineParser {
+ return defaultCodeSpanParser
+}
+
+func (s *codeSpanParser) Trigger() []byte {
+ return []byte{'`'}
+}
+
+func (s *codeSpanParser) Parse(parent ast.Node, block text.Reader, pc Context) ast.Node {
+ line, startSegment := block.PeekLine()
+ opener := 0
+ for ; opener < len(line) && line[opener] == '`'; opener++ {
+ }
+ block.Advance(opener)
+ l, pos := block.Position()
+ node := ast.NewCodeSpan()
+ for {
+ line, segment := block.PeekLine()
+ if line == nil {
+ block.SetPosition(l, pos)
+ return ast.NewTextSegment(startSegment.WithStop(startSegment.Start + opener))
+ }
+ for i := 0; i < len(line); i++ {
+ c := line[i]
+ if c == '`' {
+ oldi := i
+ for ; i < len(line) && line[i] == '`'; i++ {
+ }
+ closure := i - oldi
+ if closure == opener && (i >= len(line) || line[i] != '`') {
+ segment = segment.WithStop(segment.Start + i - closure)
+ if !segment.IsEmpty() {
+ node.AppendChild(node, ast.NewRawTextSegment(segment))
+ }
+ block.Advance(i)
+ goto end
+ }
+ }
+ }
+ if !util.IsBlank(line) {
+ node.AppendChild(node, ast.NewRawTextSegment(segment))
+ }
+ block.AdvanceLine()
+ }
+end:
+ if !node.IsBlank(block.Source()) {
+ // trim first halfspace and last halfspace
+ segment := node.FirstChild().(*ast.Text).Segment
+ shouldTrimmed := true
+ if !(!segment.IsEmpty() && block.Source()[segment.Start] == ' ') {
+ shouldTrimmed = false
+ }
+ segment = node.LastChild().(*ast.Text).Segment
+ if !(!segment.IsEmpty() && block.Source()[segment.Stop-1] == ' ') {
+ shouldTrimmed = false
+ }
+ if shouldTrimmed {
+ t := node.FirstChild().(*ast.Text)
+ segment := t.Segment
+ t.Segment = segment.WithStart(segment.Start + 1)
+ t = node.LastChild().(*ast.Text)
+ segment = node.LastChild().(*ast.Text).Segment
+ t.Segment = segment.WithStop(segment.Stop - 1)
+ }
+
+ }
+ return node
+}
--- /dev/null
+package parser
+
+import (
+ "fmt"
+ "strings"
+ "unicode"
+
+ "github.com/yuin/goldmark/ast"
+ "github.com/yuin/goldmark/text"
+ "github.com/yuin/goldmark/util"
+)
+
+// A DelimiterProcessor interface provides a set of functions about
+// Deliiter nodes.
+type DelimiterProcessor interface {
+ // IsDelimiter returns true if given character is a delimiter, otherwise false.
+ IsDelimiter(byte) bool
+
+ // CanOpenCloser returns true if given opener can close given closer, otherwise false.
+ CanOpenCloser(opener, closer *Delimiter) bool
+
+ // OnMatch will be called when new matched delimiter found.
+ // OnMatch should return a new Node correspond to the matched delimiter.
+ OnMatch(consumes int) ast.Node
+}
+
+// A Delimiter struct represents a delimiter like '*' of the Markdown text.
+type Delimiter struct {
+ ast.BaseInline
+
+ Segment text.Segment
+
+ // CanOpen is set true if this delimiter can open a span for a new node.
+ // See https://spec.commonmark.org/0.29/#can-open-emphasis for details.
+ CanOpen bool
+
+ // CanClose is set true if this delimiter can close a span for a new node.
+ // See https://spec.commonmark.org/0.29/#can-open-emphasis for details.
+ CanClose bool
+
+ // Length is a remaining length of this delmiter.
+ Length int
+
+ // OriginalLength is a original length of this delimiter.
+ OriginalLength int
+
+ // Char is a character of this delimiter.
+ Char byte
+
+ // PreviousDelimiter is a previous sibling delimiter node of this delimiter.
+ PreviousDelimiter *Delimiter
+
+ // NextDelimiter is a next sibling delimiter node of this delimiter.
+ NextDelimiter *Delimiter
+
+ // Processor is a DelimiterProcessor associated with this delimiter.
+ Processor DelimiterProcessor
+}
+
+// Inline implements Inline.Inline.
+func (d *Delimiter) Inline() {}
+
+// Dump implements Node.Dump.
+func (d *Delimiter) Dump(source []byte, level int) {
+ fmt.Printf("%sDelimiter: \"%s\"\n", strings.Repeat(" ", level), string(d.Text(source)))
+}
+
+var kindDelimiter = ast.NewNodeKind("Delimiter")
+
+// Kind implements Node.Kind
+func (d *Delimiter) Kind() ast.NodeKind {
+ return kindDelimiter
+}
+
+// Text implements Node.Text
+func (d *Delimiter) Text(source []byte) []byte {
+ return d.Segment.Value(source)
+}
+
+// ConsumeCharacters consumes delimiters.
+func (d *Delimiter) ConsumeCharacters(n int) {
+ d.Length -= n
+ d.Segment = d.Segment.WithStop(d.Segment.Start + d.Length)
+}
+
+// CalcComsumption calculates how many characters should be used for opening
+// a new span correspond to given closer.
+func (d *Delimiter) CalcComsumption(closer *Delimiter) int {
+ if (d.CanClose || closer.CanOpen) && (d.OriginalLength+closer.OriginalLength)%3 == 0 && closer.OriginalLength%3 != 0 {
+ return 0
+ }
+ if d.Length >= 2 && closer.Length >= 2 {
+ return 2
+ }
+ return 1
+}
+
+// NewDelimiter returns a new Delimiter node.
+func NewDelimiter(canOpen, canClose bool, length int, char byte, processor DelimiterProcessor) *Delimiter {
+ c := &Delimiter{
+ BaseInline: ast.BaseInline{},
+ CanOpen: canOpen,
+ CanClose: canClose,
+ Length: length,
+ OriginalLength: length,
+ Char: char,
+ PreviousDelimiter: nil,
+ NextDelimiter: nil,
+ Processor: processor,
+ }
+ return c
+}
+
+// ScanDelimiter scans a delimiter by given DelimiterProcessor.
+func ScanDelimiter(line []byte, before rune, min int, processor DelimiterProcessor) *Delimiter {
+ i := 0
+ c := line[i]
+ j := i
+ if !processor.IsDelimiter(c) {
+ return nil
+ }
+ for ; j < len(line) && c == line[j]; j++ {
+ }
+ if (j - i) >= min {
+ after := rune(' ')
+ if j != len(line) {
+ after = util.ToRune(line, j)
+ }
+
+ canOpen, canClose := false, false
+ beforeIsPunctuation := unicode.IsPunct(before)
+ beforeIsWhitespace := unicode.IsSpace(before)
+ afterIsPunctuation := unicode.IsPunct(after)
+ afterIsWhitespace := unicode.IsSpace(after)
+
+ isLeft := !afterIsWhitespace &&
+ (!afterIsPunctuation || beforeIsWhitespace || beforeIsPunctuation)
+ isRight := !beforeIsWhitespace &&
+ (!beforeIsPunctuation || afterIsWhitespace || afterIsPunctuation)
+
+ if line[i] == '_' {
+ canOpen = isLeft && (!isRight || beforeIsPunctuation)
+ canClose = isRight && (!isLeft || afterIsPunctuation)
+ } else {
+ canOpen = isLeft
+ canClose = isRight
+ }
+ return NewDelimiter(canOpen, canClose, j-i, c, processor)
+ }
+ return nil
+}
+
+// ProcessDelimiters processes the delimiter list in the context.
+// Processing will be stop when reaching the bottom.
+//
+// If you implement an inline parser that can have other inline nodes as
+// children, you should call this function when nesting span has closed.
+func ProcessDelimiters(bottom ast.Node, pc Context) {
+ lastDelimiter := pc.LastDelimiter()
+ if lastDelimiter == nil {
+ return
+ }
+ var closer *Delimiter
+ if bottom != nil {
+ if bottom != lastDelimiter {
+ for c := lastDelimiter.PreviousSibling(); c != nil; {
+ if d, ok := c.(*Delimiter); ok {
+ closer = d
+ }
+ prev := c.PreviousSibling()
+ if prev == bottom {
+ break
+ }
+ c = prev
+ }
+ }
+ } else {
+ closer = pc.FirstDelimiter()
+ }
+ if closer == nil {
+ pc.ClearDelimiters(bottom)
+ return
+ }
+ for closer != nil {
+ if !closer.CanClose {
+ closer = closer.NextDelimiter
+ continue
+ }
+ consume := 0
+ found := false
+ maybeOpener := false
+ var opener *Delimiter
+ for opener = closer.PreviousDelimiter; opener != nil; opener = opener.PreviousDelimiter {
+ if opener.CanOpen && opener.Processor.CanOpenCloser(opener, closer) {
+ maybeOpener = true
+ consume = opener.CalcComsumption(closer)
+ if consume > 0 {
+ found = true
+ break
+ }
+ }
+ }
+ if !found {
+ if !maybeOpener && !closer.CanOpen {
+ pc.RemoveDelimiter(closer)
+ }
+ closer = closer.NextDelimiter
+ continue
+ }
+ opener.ConsumeCharacters(consume)
+ closer.ConsumeCharacters(consume)
+
+ node := opener.Processor.OnMatch(consume)
+
+ parent := opener.Parent()
+ child := opener.NextSibling()
+
+ for child != nil && child != closer {
+ next := child.NextSibling()
+ node.AppendChild(node, child)
+ child = next
+ }
+ parent.InsertAfter(parent, opener, node)
+
+ for c := opener.NextDelimiter; c != nil && c != closer; {
+ next := c.NextDelimiter
+ pc.RemoveDelimiter(c)
+ c = next
+ }
+
+ if opener.Length == 0 {
+ pc.RemoveDelimiter(opener)
+ }
+
+ if closer.Length == 0 {
+ next := closer.NextDelimiter
+ pc.RemoveDelimiter(closer)
+ closer = next
+ }
+ }
+ pc.ClearDelimiters(bottom)
+}
--- /dev/null
+package parser
+
+import (
+ "github.com/yuin/goldmark/ast"
+ "github.com/yuin/goldmark/text"
+)
+
+type emphasisDelimiterProcessor struct {
+}
+
+func (p *emphasisDelimiterProcessor) IsDelimiter(b byte) bool {
+ return b == '*' || b == '_'
+}
+
+func (p *emphasisDelimiterProcessor) CanOpenCloser(opener, closer *Delimiter) bool {
+ return opener.Char == closer.Char
+}
+
+func (p *emphasisDelimiterProcessor) OnMatch(consumes int) ast.Node {
+ return ast.NewEmphasis(consumes)
+}
+
+var defaultEmphasisDelimiterProcessor = &emphasisDelimiterProcessor{}
+
+type emphasisParser struct {
+}
+
+var defaultEmphasisParser = &emphasisParser{}
+
+// NewEmphasisParser return a new InlineParser that parses emphasises.
+func NewEmphasisParser() InlineParser {
+ return defaultEmphasisParser
+}
+
+func (s *emphasisParser) Trigger() []byte {
+ return []byte{'*', '_'}
+}
+
+func (s *emphasisParser) Parse(parent ast.Node, block text.Reader, pc Context) ast.Node {
+ before := block.PrecendingCharacter()
+ line, segment := block.PeekLine()
+ node := ScanDelimiter(line, before, 1, defaultEmphasisDelimiterProcessor)
+ if node == nil {
+ return nil
+ }
+ node.Segment = segment.WithStop(segment.Start + node.OriginalLength)
+ block.Advance(node.OriginalLength)
+ pc.PushDelimiter(node)
+ return node
+}
--- /dev/null
+package parser
+
+import (
+ "bytes"
+
+ "github.com/yuin/goldmark/ast"
+ "github.com/yuin/goldmark/text"
+ "github.com/yuin/goldmark/util"
+)
+
+type fencedCodeBlockParser struct {
+}
+
+var defaultFencedCodeBlockParser = &fencedCodeBlockParser{}
+
+// NewFencedCodeBlockParser returns a new BlockParser that
+// parses fenced code blocks.
+func NewFencedCodeBlockParser() BlockParser {
+ return defaultFencedCodeBlockParser
+}
+
+type fenceData struct {
+ char byte
+ indent int
+ length int
+ node ast.Node
+}
+
+var fencedCodeBlockInfoKey = NewContextKey()
+
+func (b *fencedCodeBlockParser) Trigger() []byte {
+ return []byte{'~', '`'}
+}
+
+func (b *fencedCodeBlockParser) Open(parent ast.Node, reader text.Reader, pc Context) (ast.Node, State) {
+ line, segment := reader.PeekLine()
+ pos := pc.BlockOffset()
+ if pos < 0 || (line[pos] != '`' && line[pos] != '~') {
+ return nil, NoChildren
+ }
+ findent := pos
+ fenceChar := line[pos]
+ i := pos
+ for ; i < len(line) && line[i] == fenceChar; i++ {
+ }
+ oFenceLength := i - pos
+ if oFenceLength < 3 {
+ return nil, NoChildren
+ }
+ var info *ast.Text
+ if i < len(line)-1 {
+ rest := line[i:]
+ left := util.TrimLeftSpaceLength(rest)
+ right := util.TrimRightSpaceLength(rest)
+ if left < len(rest)-right {
+ infoStart, infoStop := segment.Start-segment.Padding+i+left, segment.Stop-right
+ value := rest[left : len(rest)-right]
+ if fenceChar == '`' && bytes.IndexByte(value, '`') > -1 {
+ return nil, NoChildren
+ } else if infoStart != infoStop {
+ info = ast.NewTextSegment(text.NewSegment(infoStart, infoStop))
+ }
+ }
+ }
+ node := ast.NewFencedCodeBlock(info)
+ pc.Set(fencedCodeBlockInfoKey, &fenceData{fenceChar, findent, oFenceLength, node})
+ return node, NoChildren
+
+}
+
+func (b *fencedCodeBlockParser) Continue(node ast.Node, reader text.Reader, pc Context) State {
+ line, segment := reader.PeekLine()
+ fdata := pc.Get(fencedCodeBlockInfoKey).(*fenceData)
+ w, pos := util.IndentWidth(line, reader.LineOffset())
+ if w < 4 {
+ i := pos
+ for ; i < len(line) && line[i] == fdata.char; i++ {
+ }
+ length := i - pos
+ if length >= fdata.length && util.IsBlank(line[i:]) {
+ newline := 1
+ if line[len(line)-1] != '\n' {
+ newline = 0
+ }
+ reader.Advance(segment.Stop - segment.Start - newline - segment.Padding)
+ return Close
+ }
+ }
+ pos, padding := util.DedentPositionPadding(line, reader.LineOffset(), segment.Padding, fdata.indent)
+
+ seg := text.NewSegmentPadding(segment.Start+pos, segment.Stop, padding)
+ node.Lines().Append(seg)
+ reader.AdvanceAndSetPadding(segment.Stop-segment.Start-pos-1, padding)
+ return Continue | NoChildren
+}
+
+func (b *fencedCodeBlockParser) Close(node ast.Node, reader text.Reader, pc Context) {
+ fdata := pc.Get(fencedCodeBlockInfoKey).(*fenceData)
+ if fdata.node == node {
+ pc.Set(fencedCodeBlockInfoKey, nil)
+ }
+}
+
+func (b *fencedCodeBlockParser) CanInterruptParagraph() bool {
+ return true
+}
+
+func (b *fencedCodeBlockParser) CanAcceptIndentedLine() bool {
+ return false
+}
--- /dev/null
+package parser
+
+import (
+ "bytes"
+ "regexp"
+ "strings"
+
+ "github.com/yuin/goldmark/ast"
+ "github.com/yuin/goldmark/text"
+ "github.com/yuin/goldmark/util"
+)
+
+var allowedBlockTags = map[string]bool{
+ "address": true,
+ "article": true,
+ "aside": true,
+ "base": true,
+ "basefont": true,
+ "blockquote": true,
+ "body": true,
+ "caption": true,
+ "center": true,
+ "col": true,
+ "colgroup": true,
+ "dd": true,
+ "details": true,
+ "dialog": true,
+ "dir": true,
+ "div": true,
+ "dl": true,
+ "dt": true,
+ "fieldset": true,
+ "figcaption": true,
+ "figure": true,
+ "footer": true,
+ "form": true,
+ "frame": true,
+ "frameset": true,
+ "h1": true,
+ "h2": true,
+ "h3": true,
+ "h4": true,
+ "h5": true,
+ "h6": true,
+ "head": true,
+ "header": true,
+ "hr": true,
+ "html": true,
+ "iframe": true,
+ "legend": true,
+ "li": true,
+ "link": true,
+ "main": true,
+ "menu": true,
+ "menuitem": true,
+ "meta": true,
+ "nav": true,
+ "noframes": true,
+ "ol": true,
+ "optgroup": true,
+ "option": true,
+ "p": true,
+ "param": true,
+ "section": true,
+ "source": true,
+ "summary": true,
+ "table": true,
+ "tbody": true,
+ "td": true,
+ "tfoot": true,
+ "th": true,
+ "thead": true,
+ "title": true,
+ "tr": true,
+ "track": true,
+ "ul": true,
+}
+
+var htmlBlockType1OpenRegexp = regexp.MustCompile(`(?i)^[ ]{0,3}<(script|pre|style)(?:\s.*|>.*|/>.*|)\n?$`)
+var htmlBlockType1CloseRegexp = regexp.MustCompile(`(?i)^.*</(?:script|pre|style)>.*`)
+
+var htmlBlockType2OpenRegexp = regexp.MustCompile(`^[ ]{0,3}<!\-\-`)
+var htmlBlockType2Close = []byte{'-', '-', '>'}
+
+var htmlBlockType3OpenRegexp = regexp.MustCompile(`^[ ]{0,3}<\?`)
+var htmlBlockType3Close = []byte{'?', '>'}
+
+var htmlBlockType4OpenRegexp = regexp.MustCompile(`^[ ]{0,3}<![A-Z]+.*\n?$`)
+var htmlBlockType4Close = []byte{'>'}
+
+var htmlBlockType5OpenRegexp = regexp.MustCompile(`^[ ]{0,3}<\!\[CDATA\[`)
+var htmlBlockType5Close = []byte{']', ']', '>'}
+
+var htmlBlockType6Regexp = regexp.MustCompile(`^[ ]{0,3}</?([a-zA-Z0-9]+)(?:\s.*|>.*|/>.*|)\n?$`)
+
+var htmlBlockType7Regexp = regexp.MustCompile(`^[ ]{0,3}<(/)?([a-zA-Z0-9]+)(` + attributePattern + `*)(:?>|/>)\s*\n?$`)
+
+type htmlBlockParser struct {
+}
+
+var defaultHTMLBlockParser = &htmlBlockParser{}
+
+// NewHTMLBlockParser return a new BlockParser that can parse html
+// blocks.
+func NewHTMLBlockParser() BlockParser {
+ return defaultHTMLBlockParser
+}
+
+func (b *htmlBlockParser) Trigger() []byte {
+ return []byte{'<'}
+}
+
+func (b *htmlBlockParser) Open(parent ast.Node, reader text.Reader, pc Context) (ast.Node, State) {
+ var node *ast.HTMLBlock
+ line, segment := reader.PeekLine()
+ last := pc.LastOpenedBlock().Node
+ if pos := pc.BlockOffset(); pos < 0 || line[pos] != '<' {
+ return nil, NoChildren
+ }
+
+ if m := htmlBlockType1OpenRegexp.FindSubmatchIndex(line); m != nil {
+ node = ast.NewHTMLBlock(ast.HTMLBlockType1)
+ } else if htmlBlockType2OpenRegexp.Match(line) {
+ node = ast.NewHTMLBlock(ast.HTMLBlockType2)
+ } else if htmlBlockType3OpenRegexp.Match(line) {
+ node = ast.NewHTMLBlock(ast.HTMLBlockType3)
+ } else if htmlBlockType4OpenRegexp.Match(line) {
+ node = ast.NewHTMLBlock(ast.HTMLBlockType4)
+ } else if htmlBlockType5OpenRegexp.Match(line) {
+ node = ast.NewHTMLBlock(ast.HTMLBlockType5)
+ } else if match := htmlBlockType7Regexp.FindSubmatchIndex(line); match != nil {
+ isCloseTag := match[2] > -1 && bytes.Equal(line[match[2]:match[3]], []byte("/"))
+ hasAttr := match[6] != match[7]
+ tagName := strings.ToLower(string(line[match[4]:match[5]]))
+ _, ok := allowedBlockTags[tagName]
+ if ok {
+ node = ast.NewHTMLBlock(ast.HTMLBlockType6)
+ } else if tagName != "script" && tagName != "style" && tagName != "pre" && !ast.IsParagraph(last) && !(isCloseTag && hasAttr) { // type 7 can not interrupt paragraph
+ node = ast.NewHTMLBlock(ast.HTMLBlockType7)
+ }
+ }
+ if node == nil {
+ if match := htmlBlockType6Regexp.FindSubmatchIndex(line); match != nil {
+ tagName := string(line[match[2]:match[3]])
+ _, ok := allowedBlockTags[strings.ToLower(tagName)]
+ if ok {
+ node = ast.NewHTMLBlock(ast.HTMLBlockType6)
+ }
+ }
+ }
+ if node != nil {
+ reader.Advance(segment.Len() - 1)
+ node.Lines().Append(segment)
+ return node, NoChildren
+ }
+ return nil, NoChildren
+}
+
+func (b *htmlBlockParser) Continue(node ast.Node, reader text.Reader, pc Context) State {
+ htmlBlock := node.(*ast.HTMLBlock)
+ lines := htmlBlock.Lines()
+ line, segment := reader.PeekLine()
+ var closurePattern []byte
+
+ switch htmlBlock.HTMLBlockType {
+ case ast.HTMLBlockType1:
+ if lines.Len() == 1 {
+ firstLine := lines.At(0)
+ if htmlBlockType1CloseRegexp.Match(firstLine.Value(reader.Source())) {
+ return Close
+ }
+ }
+ if htmlBlockType1CloseRegexp.Match(line) {
+ htmlBlock.ClosureLine = segment
+ reader.Advance(segment.Len() - 1)
+ return Close
+ }
+ case ast.HTMLBlockType2:
+ closurePattern = htmlBlockType2Close
+ fallthrough
+ case ast.HTMLBlockType3:
+ if closurePattern == nil {
+ closurePattern = htmlBlockType3Close
+ }
+ fallthrough
+ case ast.HTMLBlockType4:
+ if closurePattern == nil {
+ closurePattern = htmlBlockType4Close
+ }
+ fallthrough
+ case ast.HTMLBlockType5:
+ if closurePattern == nil {
+ closurePattern = htmlBlockType5Close
+ }
+
+ if lines.Len() == 1 {
+ firstLine := lines.At(0)
+ if bytes.Contains(firstLine.Value(reader.Source()), closurePattern) {
+ return Close
+ }
+ }
+ if bytes.Contains(line, closurePattern) {
+ htmlBlock.ClosureLine = segment
+ reader.Advance(segment.Len() - 1)
+ return Close
+ }
+
+ case ast.HTMLBlockType6, ast.HTMLBlockType7:
+ if util.IsBlank(line) {
+ return Close
+ }
+ }
+ node.Lines().Append(segment)
+ reader.Advance(segment.Len() - 1)
+ return Continue | NoChildren
+}
+
+func (b *htmlBlockParser) Close(node ast.Node, reader text.Reader, pc Context) {
+ // nothing to do
+}
+
+func (b *htmlBlockParser) CanInterruptParagraph() bool {
+ return true
+}
+
+func (b *htmlBlockParser) CanAcceptIndentedLine() bool {
+ return false
+}
--- /dev/null
+package parser
+
+import (
+ "fmt"
+ "regexp"
+ "strings"
+
+ "github.com/yuin/goldmark/ast"
+ "github.com/yuin/goldmark/text"
+ "github.com/yuin/goldmark/util"
+)
+
+var linkLabelStateKey = NewContextKey()
+
+type linkLabelState struct {
+ ast.BaseInline
+
+ Segment text.Segment
+
+ IsImage bool
+
+ Prev *linkLabelState
+
+ Next *linkLabelState
+
+ First *linkLabelState
+
+ Last *linkLabelState
+}
+
+func newLinkLabelState(segment text.Segment, isImage bool) *linkLabelState {
+ return &linkLabelState{
+ Segment: segment,
+ IsImage: isImage,
+ }
+}
+
+func (s *linkLabelState) Text(source []byte) []byte {
+ return s.Segment.Value(source)
+}
+
+func (s *linkLabelState) Dump(source []byte, level int) {
+ fmt.Printf("%slinkLabelState: \"%s\"\n", strings.Repeat(" ", level), s.Text(source))
+}
+
+var kindLinkLabelState = ast.NewNodeKind("LinkLabelState")
+
+func (s *linkLabelState) Kind() ast.NodeKind {
+ return kindLinkLabelState
+}
+
+func pushLinkLabelState(pc Context, v *linkLabelState) {
+ tlist := pc.Get(linkLabelStateKey)
+ var list *linkLabelState
+ if tlist == nil {
+ list = v
+ v.First = v
+ v.Last = v
+ pc.Set(linkLabelStateKey, list)
+ } else {
+ list = tlist.(*linkLabelState)
+ l := list.Last
+ list.Last = v
+ l.Next = v
+ v.Prev = l
+ }
+}
+
+func removeLinkLabelState(pc Context, d *linkLabelState) {
+ tlist := pc.Get(linkLabelStateKey)
+ var list *linkLabelState
+ if tlist == nil {
+ return
+ }
+ list = tlist.(*linkLabelState)
+
+ if d.Prev == nil {
+ list = d.Next
+ if list != nil {
+ list.First = d
+ list.Last = d.Last
+ list.Prev = nil
+ pc.Set(linkLabelStateKey, list)
+ } else {
+ pc.Set(linkLabelStateKey, nil)
+ }
+ } else {
+ d.Prev.Next = d.Next
+ if d.Next != nil {
+ d.Next.Prev = d.Prev
+ }
+ }
+ if list != nil && d.Next == nil {
+ list.Last = d.Prev
+ }
+ d.Next = nil
+ d.Prev = nil
+ d.First = nil
+ d.Last = nil
+}
+
+type linkParser struct {
+}
+
+var defaultLinkParser = &linkParser{}
+
+// NewLinkParser return a new InlineParser that parses links.
+func NewLinkParser() InlineParser {
+ return defaultLinkParser
+}
+
+func (s *linkParser) Trigger() []byte {
+ return []byte{'!', '[', ']'}
+}
+
+var linkDestinationRegexp = regexp.MustCompile(`\s*([^\s].+)`)
+var linkTitleRegexp = regexp.MustCompile(`\s+(\)|["'\(].+)`)
+var linkBottom = NewContextKey()
+
+func (s *linkParser) Parse(parent ast.Node, block text.Reader, pc Context) ast.Node {
+ line, segment := block.PeekLine()
+ if line[0] == '!' {
+ if len(line) > 1 && line[1] == '[' {
+ block.Advance(1)
+ pc.Set(linkBottom, pc.LastDelimiter())
+ return processLinkLabelOpen(block, segment.Start+1, true, pc)
+ }
+ return nil
+ }
+ if line[0] == '[' {
+ pc.Set(linkBottom, pc.LastDelimiter())
+ return processLinkLabelOpen(block, segment.Start, false, pc)
+ }
+
+ // line[0] == ']'
+ tlist := pc.Get(linkLabelStateKey)
+ if tlist == nil {
+ return nil
+ }
+ last := tlist.(*linkLabelState).Last
+ if last == nil {
+ return nil
+ }
+ block.Advance(1)
+ removeLinkLabelState(pc, last)
+ if s.containsLink(last) { // a link in a link text is not allowed
+ ast.MergeOrReplaceTextSegment(last.Parent(), last, last.Segment)
+ return nil
+ }
+ labelValue := block.Value(text.NewSegment(last.Segment.Start+1, segment.Start))
+ if util.IsBlank(labelValue) && !last.IsImage {
+ ast.MergeOrReplaceTextSegment(last.Parent(), last, last.Segment)
+ return nil
+ }
+
+ c := block.Peek()
+ l, pos := block.Position()
+ var link *ast.Link
+ var hasValue bool
+ if c == '(' { // normal link
+ link = s.parseLink(parent, last, block, pc)
+ } else if c == '[' { // reference link
+ link, hasValue = s.parseReferenceLink(parent, last, block, pc)
+ if link == nil && hasValue {
+ ast.MergeOrReplaceTextSegment(last.Parent(), last, last.Segment)
+ return nil
+ }
+ }
+
+ if link == nil {
+ // maybe shortcut reference link
+ block.SetPosition(l, pos)
+ ssegment := text.NewSegment(last.Segment.Stop, segment.Start)
+ maybeReference := block.Value(ssegment)
+ ref, ok := pc.Reference(util.ToLinkReference(maybeReference))
+ if !ok {
+ ast.MergeOrReplaceTextSegment(last.Parent(), last, last.Segment)
+ return nil
+ }
+ link = ast.NewLink()
+ s.processLinkLabel(parent, link, last, pc)
+ link.Title = ref.Title()
+ link.Destination = ref.Destination()
+ }
+ if last.IsImage {
+ last.Parent().RemoveChild(last.Parent(), last)
+ return ast.NewImage(link)
+ }
+ last.Parent().RemoveChild(last.Parent(), last)
+ return link
+}
+
+func (s *linkParser) containsLink(last *linkLabelState) bool {
+ if last.IsImage {
+ return false
+ }
+ var c ast.Node
+ for c = last; c != nil; c = c.NextSibling() {
+ if _, ok := c.(*ast.Link); ok {
+ return true
+ }
+ }
+ return false
+}
+
+func processLinkLabelOpen(block text.Reader, pos int, isImage bool, pc Context) *linkLabelState {
+ start := pos
+ if isImage {
+ start--
+ }
+ state := newLinkLabelState(text.NewSegment(start, pos+1), isImage)
+ pushLinkLabelState(pc, state)
+ block.Advance(1)
+ return state
+}
+
+func (s *linkParser) processLinkLabel(parent ast.Node, link *ast.Link, last *linkLabelState, pc Context) {
+ var bottom ast.Node
+ if v := pc.Get(linkBottom); v != nil {
+ bottom = v.(ast.Node)
+ }
+ pc.Set(linkBottom, nil)
+ ProcessDelimiters(bottom, pc)
+ for c := last.NextSibling(); c != nil; {
+ next := c.NextSibling()
+ parent.RemoveChild(parent, c)
+ link.AppendChild(link, c)
+ c = next
+ }
+}
+
+func (s *linkParser) parseReferenceLink(parent ast.Node, last *linkLabelState, block text.Reader, pc Context) (*ast.Link, bool) {
+ _, orgpos := block.Position()
+ block.Advance(1) // skip '['
+ line, segment := block.PeekLine()
+ endIndex := util.FindClosure(line, '[', ']', false, true)
+ if endIndex < 0 {
+ return nil, false
+ }
+
+ block.Advance(endIndex + 1)
+ ssegment := segment.WithStop(segment.Start + endIndex)
+ maybeReference := block.Value(ssegment)
+ if util.IsBlank(maybeReference) { // collapsed reference link
+ ssegment = text.NewSegment(last.Segment.Stop, orgpos.Start-1)
+ maybeReference = block.Value(ssegment)
+ }
+
+ ref, ok := pc.Reference(util.ToLinkReference(maybeReference))
+ if !ok {
+ return nil, true
+ }
+
+ link := ast.NewLink()
+ s.processLinkLabel(parent, link, last, pc)
+ link.Title = ref.Title()
+ link.Destination = ref.Destination()
+ return link, true
+}
+
+func (s *linkParser) parseLink(parent ast.Node, last *linkLabelState, block text.Reader, pc Context) *ast.Link {
+ block.Advance(1) // skip '('
+ block.SkipSpaces()
+ var title []byte
+ var destination []byte
+ var ok bool
+ if block.Peek() == ')' { // empty link like '[link]()'
+ block.Advance(1)
+ } else {
+ destination, ok = parseLinkDestination(block)
+ if !ok {
+ return nil
+ }
+ block.SkipSpaces()
+ if block.Peek() == ')' {
+ block.Advance(1)
+ } else {
+ title, ok = parseLinkTitle(block)
+ if !ok {
+ return nil
+ }
+ block.SkipSpaces()
+ if block.Peek() == ')' {
+ block.Advance(1)
+ } else {
+ return nil
+ }
+ }
+ }
+
+ link := ast.NewLink()
+ s.processLinkLabel(parent, link, last, pc)
+ link.Destination = destination
+ link.Title = title
+ return link
+}
+
+func parseLinkDestination(block text.Reader) ([]byte, bool) {
+ block.SkipSpaces()
+ line, _ := block.PeekLine()
+ buf := []byte{}
+ if block.Peek() == '<' {
+ i := 1
+ for i < len(line) {
+ c := line[i]
+ if c == '\\' && i < len(line)-1 && util.IsPunct(line[i+1]) {
+ buf = append(buf, '\\', line[i+1])
+ i += 2
+ continue
+ } else if c == '>' {
+ block.Advance(i + 1)
+ return line[1:i], true
+ }
+ buf = append(buf, c)
+ i++
+ }
+ return nil, false
+ }
+ opened := 0
+ i := 0
+ for i < len(line) {
+ c := line[i]
+ if c == '\\' && i < len(line)-1 && util.IsPunct(line[i+1]) {
+ buf = append(buf, '\\', line[i+1])
+ i += 2
+ continue
+ } else if c == '(' {
+ opened++
+ } else if c == ')' {
+ opened--
+ if opened < 0 {
+ break
+ }
+ } else if util.IsSpace(c) {
+ break
+ }
+ buf = append(buf, c)
+ i++
+ }
+ block.Advance(i)
+ return line[:i], len(line[:i]) != 0
+}
+
+func parseLinkTitle(block text.Reader) ([]byte, bool) {
+ block.SkipSpaces()
+ opener := block.Peek()
+ if opener != '"' && opener != '\'' && opener != '(' {
+ return nil, false
+ }
+ closer := opener
+ if opener == '(' {
+ closer = ')'
+ }
+ line, _ := block.PeekLine()
+ pos := util.FindClosure(line[1:], opener, closer, false, true)
+ if pos < 0 {
+ return nil, false
+ }
+ pos += 2 // opener + closer
+ block.Advance(pos)
+ return line[1 : pos-1], true
+}
+
+func (s *linkParser) CloseBlock(parent ast.Node, block text.Reader, pc Context) {
+ tlist := pc.Get(linkLabelStateKey)
+ if tlist == nil {
+ return
+ }
+ for s := tlist.(*linkLabelState); s != nil; {
+ next := s.Next
+ removeLinkLabelState(pc, s)
+ s.Parent().ReplaceChild(s.Parent(), s, ast.NewTextSegment(s.Segment))
+ s = next
+ }
+}
--- /dev/null
+package parser
+
+import (
+ "github.com/yuin/goldmark/ast"
+ "github.com/yuin/goldmark/text"
+ "github.com/yuin/goldmark/util"
+)
+
+type linkReferenceParagraphTransformer struct {
+}
+
+// LinkReferenceParagraphTransformer is a ParagraphTransformer implementation
+// that parses and extracts link reference from paragraphs.
+var LinkReferenceParagraphTransformer = &linkReferenceParagraphTransformer{}
+
+func (p *linkReferenceParagraphTransformer) Transform(node *ast.Paragraph, reader text.Reader, pc Context) {
+ lines := node.Lines()
+ block := text.NewBlockReader(reader.Source(), lines)
+ removes := [][2]int{}
+ for {
+ start, end := parseLinkReferenceDefinition(block, pc)
+ if start > -1 {
+ if start == end {
+ end++
+ }
+ removes = append(removes, [2]int{start, end})
+ continue
+ }
+ break
+ }
+
+ offset := 0
+ for _, remove := range removes {
+ if lines.Len() == 0 {
+ break
+ }
+ s := lines.Sliced(remove[1]-offset, lines.Len())
+ lines.SetSliced(0, remove[0]-offset)
+ lines.AppendAll(s)
+ offset = remove[1]
+ }
+
+ if lines.Len() == 0 {
+ t := ast.NewTextBlock()
+ t.SetBlankPreviousLines(node.HasBlankPreviousLines())
+ node.Parent().ReplaceChild(node.Parent(), node, t)
+ return
+ }
+
+ node.SetLines(lines)
+}
+
+func parseLinkReferenceDefinition(block text.Reader, pc Context) (int, int) {
+ block.SkipSpaces()
+ line, segment := block.PeekLine()
+ if line == nil {
+ return -1, -1
+ }
+ startLine, _ := block.Position()
+ width, pos := util.IndentWidth(line, 0)
+ if width > 3 {
+ return -1, -1
+ }
+ if width != 0 {
+ pos++
+ }
+ if line[pos] != '[' {
+ return -1, -1
+ }
+ open := segment.Start + pos + 1
+ closes := -1
+ block.Advance(pos + 1)
+ for {
+ line, segment = block.PeekLine()
+ if line == nil {
+ return -1, -1
+ }
+ closure := util.FindClosure(line, '[', ']', false, false)
+ if closure > -1 {
+ closes = segment.Start + closure
+ next := closure + 1
+ if next >= len(line) || line[next] != ':' {
+ return -1, -1
+ }
+ block.Advance(next + 1)
+ break
+ }
+ block.AdvanceLine()
+ }
+ if closes < 0 {
+ return -1, -1
+ }
+ label := block.Value(text.NewSegment(open, closes))
+ if util.IsBlank(label) {
+ return -1, -1
+ }
+ block.SkipSpaces()
+ destination, ok := parseLinkDestination(block)
+ if !ok {
+ return -1, -1
+ }
+ line, segment = block.PeekLine()
+ isNewLine := line == nil || util.IsBlank(line)
+
+ endLine, _ := block.Position()
+ _, spaces, _ := block.SkipSpaces()
+ opener := block.Peek()
+ if opener != '"' && opener != '\'' && opener != '(' {
+ if !isNewLine {
+ return -1, -1
+ }
+ ref := NewReference(label, destination, nil)
+ pc.AddReference(ref)
+ return startLine, endLine + 1
+ }
+ if spaces == 0 {
+ return -1, -1
+ }
+ block.Advance(1)
+ open = -1
+ closes = -1
+ closer := opener
+ if opener == '(' {
+ closer = ')'
+ }
+ for {
+ line, segment = block.PeekLine()
+ if line == nil {
+ return -1, -1
+ }
+ if open < 0 {
+ open = segment.Start
+ }
+ closure := util.FindClosure(line, opener, closer, false, true)
+ if closure > -1 {
+ closes = segment.Start + closure
+ block.Advance(closure + 1)
+ break
+ }
+ block.AdvanceLine()
+ }
+ if closes < 0 {
+ return -1, -1
+ }
+
+ line, segment = block.PeekLine()
+ if line != nil && !util.IsBlank(line) {
+ if !isNewLine {
+ return -1, -1
+ }
+ title := block.Value(text.NewSegment(open, closes))
+ ref := NewReference(label, destination, title)
+ pc.AddReference(ref)
+ return startLine, endLine
+ }
+
+ title := block.Value(text.NewSegment(open, closes))
+
+ endLine, _ = block.Position()
+ ref := NewReference(label, destination, title)
+ pc.AddReference(ref)
+ return startLine, endLine + 1
+}
--- /dev/null
+package parser
+
+import (
+ "github.com/yuin/goldmark/ast"
+ "github.com/yuin/goldmark/text"
+ "github.com/yuin/goldmark/util"
+ "strconv"
+)
+
+type listItemType int
+
+const (
+ notList listItemType = iota
+ bulletList
+ orderedList
+)
+
+// Same as
+// `^(([ ]*)([\-\*\+]))(\s+.*)?\n?$`.FindSubmatchIndex or
+// `^(([ ]*)(\d{1,9}[\.\)]))(\s+.*)?\n?$`.FindSubmatchIndex
+func parseListItem(line []byte) ([6]int, listItemType) {
+ i := 0
+ l := len(line)
+ ret := [6]int{}
+ for ; i < l && line[i] == ' '; i++ {
+ c := line[i]
+ if c == '\t' {
+ return ret, notList
+ }
+ }
+ if i > 3 {
+ return ret, notList
+ }
+ ret[0] = 0
+ ret[1] = i
+ ret[2] = i
+ var typ listItemType
+ if i < l && (line[i] == '-' || line[i] == '*' || line[i] == '+') {
+ i++
+ ret[3] = i
+ typ = bulletList
+ } else if i < l {
+ for ; i < l && util.IsNumeric(line[i]); i++ {
+ }
+ ret[3] = i
+ if ret[3] == ret[2] || ret[3]-ret[2] > 9 {
+ return ret, notList
+ }
+ if i < l && (line[i] == '.' || line[i] == ')') {
+ i++
+ ret[3] = i
+ } else {
+ return ret, notList
+ }
+ typ = orderedList
+ } else {
+ return ret, notList
+ }
+ if i < l && line[i] != '\n' {
+ w, _ := util.IndentWidth(line[i:], 0)
+ if w == 0 {
+ return ret, notList
+ }
+ }
+ if i >= l {
+ ret[4] = -1
+ ret[5] = -1
+ return ret, typ
+ }
+ ret[4] = i
+ ret[5] = len(line)
+ if line[ret[5]-1] == '\n' && line[i] != '\n' {
+ ret[5]--
+ }
+ return ret, typ
+}
+
+func matchesListItem(source []byte, strict bool) ([6]int, listItemType) {
+ m, typ := parseListItem(source)
+ if typ != notList && (!strict || strict && m[1] < 4) {
+ return m, typ
+ }
+ return m, notList
+}
+
+func calcListOffset(source []byte, match [6]int) int {
+ offset := 0
+ if match[4] < 0 || util.IsBlank(source[match[4]:]) { // list item starts with a blank line
+ offset = 1
+ } else {
+ offset, _ = util.IndentWidth(source[match[4]:], match[4])
+ if offset > 4 { // offseted codeblock
+ offset = 1
+ }
+ }
+ return offset
+}
+
+func lastOffset(node ast.Node) int {
+ lastChild := node.LastChild()
+ if lastChild != nil {
+ return lastChild.(*ast.ListItem).Offset
+ }
+ return 0
+}
+
+type listParser struct {
+}
+
+var defaultListParser = &listParser{}
+
+// NewListParser returns a new BlockParser that
+// parses lists.
+// This parser must take precedence over the ListItemParser.
+func NewListParser() BlockParser {
+ return defaultListParser
+}
+
+func (b *listParser) Trigger() []byte {
+ return []byte{'-', '+', '*', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9'}
+}
+
+func (b *listParser) Open(parent ast.Node, reader text.Reader, pc Context) (ast.Node, State) {
+ last := pc.LastOpenedBlock().Node
+ if _, lok := last.(*ast.List); lok || pc.Get(skipListParser) != nil {
+ pc.Set(skipListParser, nil)
+ return nil, NoChildren
+ }
+ line, _ := reader.PeekLine()
+ match, typ := matchesListItem(line, true)
+ if typ == notList {
+ return nil, NoChildren
+ }
+ start := -1
+ if typ == orderedList {
+ number := line[match[2] : match[3]-1]
+ start, _ = strconv.Atoi(string(number))
+ }
+
+ if ast.IsParagraph(last) && last.Parent() == parent {
+ // we allow only lists starting with 1 to interrupt paragraphs.
+ if typ == orderedList && start != 1 {
+ return nil, NoChildren
+ }
+ //an empty list item cannot interrupt a paragraph:
+ if match[5]-match[4] == 1 {
+ return nil, NoChildren
+ }
+ }
+
+ marker := line[match[3]-1]
+ node := ast.NewList(marker)
+ if start > -1 {
+ node.Start = start
+ }
+ return node, HasChildren
+}
+
+func (b *listParser) Continue(node ast.Node, reader text.Reader, pc Context) State {
+ list := node.(*ast.List)
+ line, _ := reader.PeekLine()
+ if util.IsBlank(line) {
+ // A list item can begin with at most one blank line
+ if node.ChildCount() == 1 && node.LastChild().ChildCount() == 0 {
+ return Close
+ }
+ return Continue | HasChildren
+ }
+ // Thematic Breaks take precedence over lists
+ if isThematicBreak(line, reader.LineOffset()) {
+ isHeading := false
+ last := pc.LastOpenedBlock().Node
+ if ast.IsParagraph(last) {
+ c, ok := matchesSetextHeadingBar(line)
+ if ok && c == '-' {
+ isHeading = true
+ }
+ }
+ if !isHeading {
+ return Close
+ }
+ }
+
+ // "offset" means a width that bar indicates.
+ // - aaaaaaaa
+ // |----|
+ //
+ // If the indent is less than the last offset like
+ // - a
+ // - b <--- current line
+ // it maybe a new child of the list.
+ offset := lastOffset(node)
+ indent, _ := util.IndentWidth(line, reader.LineOffset())
+
+ if indent < offset {
+ if indent < 4 {
+ match, typ := matchesListItem(line, false) // may have a leading spaces more than 3
+ if typ != notList && match[1]-offset < 4 {
+ marker := line[match[3]-1]
+ if !list.CanContinue(marker, typ == orderedList) {
+ return Close
+ }
+ return Continue | HasChildren
+ }
+ }
+ return Close
+ }
+ return Continue | HasChildren
+}
+
+func (b *listParser) Close(node ast.Node, reader text.Reader, pc Context) {
+ list := node.(*ast.List)
+
+ for c := node.FirstChild(); c != nil && list.IsTight; c = c.NextSibling() {
+ if c.FirstChild() != nil && c.FirstChild() != c.LastChild() {
+ for c1 := c.FirstChild().NextSibling(); c1 != nil; c1 = c1.NextSibling() {
+ if bl, ok := c1.(ast.Node); ok && bl.HasBlankPreviousLines() {
+ list.IsTight = false
+ break
+ }
+ }
+ }
+ if c != node.FirstChild() {
+ if bl, ok := c.(ast.Node); ok && bl.HasBlankPreviousLines() {
+ list.IsTight = false
+ }
+ }
+ }
+
+ if list.IsTight {
+ for child := node.FirstChild(); child != nil; child = child.NextSibling() {
+ for gc := child.FirstChild(); gc != nil; gc = gc.NextSibling() {
+ paragraph, ok := gc.(*ast.Paragraph)
+ if ok {
+ textBlock := ast.NewTextBlock()
+ textBlock.SetLines(paragraph.Lines())
+ child.ReplaceChild(child, paragraph, textBlock)
+ }
+ }
+ }
+ }
+}
+
+func (b *listParser) CanInterruptParagraph() bool {
+ return true
+}
+
+func (b *listParser) CanAcceptIndentedLine() bool {
+ return false
+}
--- /dev/null
+package parser
+
+import (
+ "github.com/yuin/goldmark/ast"
+ "github.com/yuin/goldmark/text"
+ "github.com/yuin/goldmark/util"
+)
+
+type listItemParser struct {
+}
+
+var defaultListItemParser = &listItemParser{}
+
+// NewListItemParser returns a new BlockParser that
+// parses list items.
+func NewListItemParser() BlockParser {
+ return defaultListItemParser
+}
+
+var skipListParser = NewContextKey()
+var skipListParserValue interface{} = true
+
+func (b *listItemParser) Trigger() []byte {
+ return []byte{'-', '+', '*', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9'}
+}
+
+func (b *listItemParser) Open(parent ast.Node, reader text.Reader, pc Context) (ast.Node, State) {
+ list, lok := parent.(*ast.List)
+ if !lok { // list item must be a child of a list
+ return nil, NoChildren
+ }
+ offset := lastOffset(list)
+ line, _ := reader.PeekLine()
+ match, typ := matchesListItem(line, false)
+ if typ == notList {
+ return nil, NoChildren
+ }
+ if match[1]-offset > 3 {
+ return nil, NoChildren
+ }
+ itemOffset := calcListOffset(line, match)
+ node := ast.NewListItem(match[3] + itemOffset)
+ if match[4] < 0 || match[5]-match[4] == 1 {
+ return node, NoChildren
+ }
+
+ pos, padding := util.IndentPosition(line[match[4]:], match[4], itemOffset)
+ child := match[3] + pos
+ reader.AdvanceAndSetPadding(child, padding)
+ return node, HasChildren
+}
+
+func (b *listItemParser) Continue(node ast.Node, reader text.Reader, pc Context) State {
+ line, _ := reader.PeekLine()
+ if util.IsBlank(line) {
+ return Continue | HasChildren
+ }
+
+ indent, _ := util.IndentWidth(line, reader.LineOffset())
+ offset := lastOffset(node.Parent())
+ if indent < offset && indent < 4 {
+ _, typ := matchesListItem(line, true)
+ // new list item found
+ if typ != notList {
+ pc.Set(skipListParser, skipListParserValue)
+ }
+ return Close
+ }
+ pos, padding := util.IndentPosition(line, reader.LineOffset(), offset)
+ reader.AdvanceAndSetPadding(pos, padding)
+
+ return Continue | HasChildren
+}
+
+func (b *listItemParser) Close(node ast.Node, reader text.Reader, pc Context) {
+ // nothing to do
+}
+
+func (b *listItemParser) CanInterruptParagraph() bool {
+ return true
+}
+
+func (b *listItemParser) CanAcceptIndentedLine() bool {
+ return false
+}
--- /dev/null
+package parser
+
+import (
+ "github.com/yuin/goldmark/ast"
+ "github.com/yuin/goldmark/text"
+)
+
+type paragraphParser struct {
+}
+
+var defaultParagraphParser = ¶graphParser{}
+
+// NewParagraphParser returns a new BlockParser that
+// parses paragraphs.
+func NewParagraphParser() BlockParser {
+ return defaultParagraphParser
+}
+
+func (b *paragraphParser) Trigger() []byte {
+ return nil
+}
+
+func (b *paragraphParser) Open(parent ast.Node, reader text.Reader, pc Context) (ast.Node, State) {
+ _, segment := reader.PeekLine()
+ segment = segment.TrimLeftSpace(reader.Source())
+ if segment.IsEmpty() {
+ return nil, NoChildren
+ }
+ node := ast.NewParagraph()
+ node.Lines().Append(segment)
+ reader.Advance(segment.Len() - 1)
+ return node, NoChildren
+}
+
+func (b *paragraphParser) Continue(node ast.Node, reader text.Reader, pc Context) State {
+ _, segment := reader.PeekLine()
+ segment = segment.TrimLeftSpace(reader.Source())
+ if segment.IsEmpty() {
+ return Close
+ }
+ node.Lines().Append(segment)
+ reader.Advance(segment.Len() - 1)
+ return Continue | NoChildren
+}
+
+func (b *paragraphParser) Close(node ast.Node, reader text.Reader, pc Context) {
+ parent := node.Parent()
+ if parent == nil {
+ // paragraph has been transformed
+ return
+ }
+ lines := node.Lines()
+ if lines.Len() != 0 {
+ // trim trailing spaces
+ length := lines.Len()
+ lastLine := node.Lines().At(length - 1)
+ node.Lines().Set(length-1, lastLine.TrimRightSpace(reader.Source()))
+ }
+ if lines.Len() == 0 {
+ node.Parent().RemoveChild(node.Parent(), node)
+ return
+ }
+}
+
+func (b *paragraphParser) CanInterruptParagraph() bool {
+ return false
+}
+
+func (b *paragraphParser) CanAcceptIndentedLine() bool {
+ return false
+}
--- /dev/null
+// Package parser contains stuff that are related to parsing a Markdown text.
+package parser
+
+import (
+ "fmt"
+ "strings"
+ "sync"
+
+ "github.com/yuin/goldmark/ast"
+ "github.com/yuin/goldmark/text"
+ "github.com/yuin/goldmark/util"
+)
+
+// A Reference interface represents a link reference in Markdown text.
+type Reference interface {
+ // String implements Stringer.
+ String() string
+
+ // Label returns a label of the reference.
+ Label() []byte
+
+ // Destination returns a destination(URL) of the reference.
+ Destination() []byte
+
+ // Title returns a title of the reference.
+ Title() []byte
+}
+
+type reference struct {
+ label []byte
+ destination []byte
+ title []byte
+}
+
+// NewReference returns a new Reference.
+func NewReference(label, destination, title []byte) Reference {
+ return &reference{label, destination, title}
+}
+
+func (r *reference) Label() []byte {
+ return r.label
+}
+
+func (r *reference) Destination() []byte {
+ return r.destination
+}
+
+func (r *reference) Title() []byte {
+ return r.title
+}
+
+func (r *reference) String() string {
+ return fmt.Sprintf("Reference{Label:%s, Destination:%s, Title:%s}", r.label, r.destination, r.title)
+}
+
+// An IDs interface is a collection of the element ids.
+type IDs interface {
+ // Generate generates a new element id.
+ Generate(value []byte, kind ast.NodeKind) []byte
+
+ // Put puts a given element id to the used ids table.
+ Put(value []byte)
+}
+
+type ids struct {
+ values map[string]bool
+}
+
+func newIDs() IDs {
+ return &ids{
+ values: map[string]bool{},
+ }
+}
+
+func (s *ids) Generate(value []byte, kind ast.NodeKind) []byte {
+ value = util.TrimLeftSpace(value)
+ value = util.TrimRightSpace(value)
+ result := []byte{}
+ for i := 0; i < len(value); {
+ v := value[i]
+ l := util.UTF8Len(v)
+ i += int(l)
+ if l != 1 {
+ continue
+ }
+ if util.IsAlphaNumeric(v) {
+ if 'A' <= v && v <= 'Z' {
+ v += 'a' - 'A'
+ }
+ result = append(result, v)
+ } else if util.IsSpace(v) || v == '-' || v == '_' {
+ result = append(result, '-')
+ }
+ }
+ if len(result) == 0 {
+ if kind == ast.KindHeading {
+ result = []byte("heading")
+ } else {
+ result = []byte("id")
+ }
+ }
+ if _, ok := s.values[util.BytesToReadOnlyString(result)]; !ok {
+ s.values[util.BytesToReadOnlyString(result)] = true
+ return result
+ }
+ for i := 1; ; i++ {
+ newResult := fmt.Sprintf("%s-%d", result, i)
+ if _, ok := s.values[newResult]; !ok {
+ s.values[newResult] = true
+ return []byte(newResult)
+ }
+
+ }
+}
+
+func (s *ids) Put(value []byte) {
+ s.values[util.BytesToReadOnlyString(value)] = true
+}
+
+// ContextKey is a key that is used to set arbitrary values to the context.
+type ContextKey int
+
+// ContextKeyMax is a maximum value of the ContextKey.
+var ContextKeyMax ContextKey
+
+// NewContextKey return a new ContextKey value.
+func NewContextKey() ContextKey {
+ ContextKeyMax++
+ return ContextKeyMax
+}
+
+// A Context interface holds a information that are necessary to parse
+// Markdown text.
+type Context interface {
+ // String implements Stringer.
+ String() string
+
+ // Get returns a value associated with the given key.
+ Get(ContextKey) interface{}
+
+ // Set sets the given value to the context.
+ Set(ContextKey, interface{})
+
+ // AddReference adds the given reference to this context.
+ AddReference(Reference)
+
+ // Reference returns (a reference, true) if a reference associated with
+ // the given label exists, otherwise (nil, false).
+ Reference(label string) (Reference, bool)
+
+ // References returns a list of references.
+ References() []Reference
+
+ // IDs returns a collection of the element ids.
+ IDs() IDs
+
+ // BlockOffset returns a first non-space character position on current line.
+ // This value is valid only for BlockParser.Open.
+ // BlockOffset returns -1 if current line is blank.
+ BlockOffset() int
+
+ // BlockOffset sets a first non-space character position on current line.
+ // This value is valid only for BlockParser.Open.
+ SetBlockOffset(int)
+
+ // BlockIndent returns an indent width on current line.
+ // This value is valid only for BlockParser.Open.
+ // BlockIndent returns -1 if current line is blank.
+ BlockIndent() int
+
+ // BlockIndent sets an indent width on current line.
+ // This value is valid only for BlockParser.Open.
+ SetBlockIndent(int)
+
+ // FirstDelimiter returns a first delimiter of the current delimiter list.
+ FirstDelimiter() *Delimiter
+
+ // LastDelimiter returns a last delimiter of the current delimiter list.
+ LastDelimiter() *Delimiter
+
+ // PushDelimiter appends the given delimiter to the tail of the current
+ // delimiter list.
+ PushDelimiter(delimiter *Delimiter)
+
+ // RemoveDelimiter removes the given delimiter from the current delimiter list.
+ RemoveDelimiter(d *Delimiter)
+
+ // ClearDelimiters clears the current delimiter list.
+ ClearDelimiters(bottom ast.Node)
+
+ // OpenedBlocks returns a list of nodes that are currently in parsing.
+ OpenedBlocks() []Block
+
+ // SetOpenedBlocks sets a list of nodes that are currently in parsing.
+ SetOpenedBlocks([]Block)
+
+ // LastOpenedBlock returns a last node that is currently in parsing.
+ LastOpenedBlock() Block
+
+ // IsInLinkLabel returns true if current position seems to be in link label.
+ IsInLinkLabel() bool
+}
+
+// A ContextConfig struct is a data structure that holds configuration of the Context.
+type ContextConfig struct {
+ IDs IDs
+}
+
+// An ContextOption is a functional option type for the Context.
+type ContextOption func(*ContextConfig)
+
+// WithIDs is a functional option for the Context.
+func WithIDs(ids IDs) ContextOption {
+ return func(c *ContextConfig) {
+ c.IDs = ids
+ }
+}
+
+type parseContext struct {
+ store []interface{}
+ ids IDs
+ refs map[string]Reference
+ blockOffset int
+ blockIndent int
+ delimiters *Delimiter
+ lastDelimiter *Delimiter
+ openedBlocks []Block
+}
+
+// NewContext returns a new Context.
+func NewContext(options ...ContextOption) Context {
+ cfg := &ContextConfig{
+ IDs: newIDs(),
+ }
+ for _, option := range options {
+ option(cfg)
+ }
+
+ return &parseContext{
+ store: make([]interface{}, ContextKeyMax+1),
+ refs: map[string]Reference{},
+ ids: cfg.IDs,
+ blockOffset: -1,
+ blockIndent: -1,
+ delimiters: nil,
+ lastDelimiter: nil,
+ openedBlocks: []Block{},
+ }
+}
+
+func (p *parseContext) Get(key ContextKey) interface{} {
+ return p.store[key]
+}
+
+func (p *parseContext) Set(key ContextKey, value interface{}) {
+ p.store[key] = value
+}
+
+func (p *parseContext) IDs() IDs {
+ return p.ids
+}
+
+func (p *parseContext) BlockOffset() int {
+ return p.blockOffset
+}
+
+func (p *parseContext) SetBlockOffset(v int) {
+ p.blockOffset = v
+}
+
+func (p *parseContext) BlockIndent() int {
+ return p.blockIndent
+}
+
+func (p *parseContext) SetBlockIndent(v int) {
+ p.blockIndent = v
+}
+
+func (p *parseContext) LastDelimiter() *Delimiter {
+ return p.lastDelimiter
+}
+
+func (p *parseContext) FirstDelimiter() *Delimiter {
+ return p.delimiters
+}
+
+func (p *parseContext) PushDelimiter(d *Delimiter) {
+ if p.delimiters == nil {
+ p.delimiters = d
+ p.lastDelimiter = d
+ } else {
+ l := p.lastDelimiter
+ p.lastDelimiter = d
+ l.NextDelimiter = d
+ d.PreviousDelimiter = l
+ }
+}
+
+func (p *parseContext) RemoveDelimiter(d *Delimiter) {
+ if d.PreviousDelimiter == nil {
+ p.delimiters = d.NextDelimiter
+ } else {
+ d.PreviousDelimiter.NextDelimiter = d.NextDelimiter
+ if d.NextDelimiter != nil {
+ d.NextDelimiter.PreviousDelimiter = d.PreviousDelimiter
+ }
+ }
+ if d.NextDelimiter == nil {
+ p.lastDelimiter = d.PreviousDelimiter
+ }
+ if p.delimiters != nil {
+ p.delimiters.PreviousDelimiter = nil
+ }
+ if p.lastDelimiter != nil {
+ p.lastDelimiter.NextDelimiter = nil
+ }
+ d.NextDelimiter = nil
+ d.PreviousDelimiter = nil
+ if d.Length != 0 {
+ ast.MergeOrReplaceTextSegment(d.Parent(), d, d.Segment)
+ } else {
+ d.Parent().RemoveChild(d.Parent(), d)
+ }
+}
+
+func (p *parseContext) ClearDelimiters(bottom ast.Node) {
+ if p.lastDelimiter == nil {
+ return
+ }
+ var c ast.Node
+ for c = p.lastDelimiter; c != nil && c != bottom; {
+ prev := c.PreviousSibling()
+ if d, ok := c.(*Delimiter); ok {
+ p.RemoveDelimiter(d)
+ }
+ c = prev
+ }
+}
+
+func (p *parseContext) AddReference(ref Reference) {
+ key := util.ToLinkReference(ref.Label())
+ if _, ok := p.refs[key]; !ok {
+ p.refs[key] = ref
+ }
+}
+
+func (p *parseContext) Reference(label string) (Reference, bool) {
+ v, ok := p.refs[label]
+ return v, ok
+}
+
+func (p *parseContext) References() []Reference {
+ ret := make([]Reference, 0, len(p.refs))
+ for _, v := range p.refs {
+ ret = append(ret, v)
+ }
+ return ret
+}
+
+func (p *parseContext) String() string {
+ refs := []string{}
+ for _, r := range p.refs {
+ refs = append(refs, r.String())
+ }
+
+ return fmt.Sprintf("Context{Store:%#v, Refs:%s}", p.store, strings.Join(refs, ","))
+}
+
+func (p *parseContext) OpenedBlocks() []Block {
+ return p.openedBlocks
+}
+
+func (p *parseContext) SetOpenedBlocks(v []Block) {
+ p.openedBlocks = v
+}
+
+func (p *parseContext) LastOpenedBlock() Block {
+ if l := len(p.openedBlocks); l != 0 {
+ return p.openedBlocks[l-1]
+ }
+ return Block{}
+}
+
+func (p *parseContext) IsInLinkLabel() bool {
+ tlist := p.Get(linkLabelStateKey)
+ return tlist != nil
+}
+
+// State represents parser's state.
+// State is designed to use as a bit flag.
+type State int
+
+const (
+ none State = 1 << iota
+
+ // Continue indicates parser can continue parsing.
+ Continue
+
+ // Close indicates parser cannot parse anymore.
+ Close
+
+ // HasChildren indicates parser may have child blocks.
+ HasChildren
+
+ // NoChildren indicates parser does not have child blocks.
+ NoChildren
+
+ // RequireParagraph indicates parser requires that the last node
+ // must be a paragraph and is not converted to other nodes by
+ // ParagraphTransformers.
+ RequireParagraph
+)
+
+// A Config struct is a data structure that holds configuration of the Parser.
+type Config struct {
+ Options map[OptionName]interface{}
+ BlockParsers util.PrioritizedSlice /*<BlockParser>*/
+ InlineParsers util.PrioritizedSlice /*<InlineParser>*/
+ ParagraphTransformers util.PrioritizedSlice /*<ParagraphTransformer>*/
+ ASTTransformers util.PrioritizedSlice /*<ASTTransformer>*/
+}
+
+// NewConfig returns a new Config.
+func NewConfig() *Config {
+ return &Config{
+ Options: map[OptionName]interface{}{},
+ BlockParsers: util.PrioritizedSlice{},
+ InlineParsers: util.PrioritizedSlice{},
+ ParagraphTransformers: util.PrioritizedSlice{},
+ ASTTransformers: util.PrioritizedSlice{},
+ }
+}
+
+// An Option interface is a functional option type for the Parser.
+type Option interface {
+ SetParserOption(*Config)
+}
+
+// OptionName is a name of parser options.
+type OptionName string
+
+// Attribute is an option name that spacify attributes of elements.
+const optAttribute OptionName = "Attribute"
+
+type withAttribute struct {
+}
+
+func (o *withAttribute) SetParserOption(c *Config) {
+ c.Options[optAttribute] = true
+}
+
+// WithAttribute is a functional option that enables custom attributes.
+func WithAttribute() Option {
+ return &withAttribute{}
+}
+
+// A Parser interface parses Markdown text into AST nodes.
+type Parser interface {
+ // Parse parses the given Markdown text into AST nodes.
+ Parse(reader text.Reader, opts ...ParseOption) ast.Node
+
+ // AddOption adds the given option to thie parser.
+ AddOptions(...Option)
+}
+
+// A SetOptioner interface sets the given option to the object.
+type SetOptioner interface {
+ // SetOption sets the given option to the object.
+ // Unacceptable options may be passed.
+ // Thus implementations must ignore unacceptable options.
+ SetOption(name OptionName, value interface{})
+}
+
+// A BlockParser interface parses a block level element like Paragraph, List,
+// Blockquote etc.
+type BlockParser interface {
+ // Trigger returns a list of characters that triggers Parse method of
+ // this parser.
+ // If Trigger returns a nil, Open will be called with any lines.
+ Trigger() []byte
+
+ // Open parses the current line and returns a result of parsing.
+ //
+ // Open must not parse beyond the current line.
+ // If Open has been able to parse the current line, Open must advance a reader
+ // position by consumed byte length.
+ //
+ // If Open has not been able to parse the current line, Open should returns
+ // (nil, NoChildren). If Open has been able to parse the current line, Open
+ // should returns a new Block node and returns HasChildren or NoChildren.
+ Open(parent ast.Node, reader text.Reader, pc Context) (ast.Node, State)
+
+ // Continue parses the current line and returns a result of parsing.
+ //
+ // Continue must not parse beyond the current line.
+ // If Continue has been able to parse the current line, Continue must advance
+ // a reader position by consumed byte length.
+ //
+ // If Continue has not been able to parse the current line, Continue should
+ // returns Close. If Continue has been able to parse the current line,
+ // Continue should returns (Continue | NoChildren) or
+ // (Continue | HasChildren)
+ Continue(node ast.Node, reader text.Reader, pc Context) State
+
+ // Close will be called when the parser returns Close.
+ Close(node ast.Node, reader text.Reader, pc Context)
+
+ // CanInterruptParagraph returns true if the parser can interrupt pargraphs,
+ // otherwise false.
+ CanInterruptParagraph() bool
+
+ // CanAcceptIndentedLine returns true if the parser can open new node when
+ // the given line is being indented more than 3 spaces.
+ CanAcceptIndentedLine() bool
+}
+
+// An InlineParser interface parses an inline level element like CodeSpan, Link etc.
+type InlineParser interface {
+ // Trigger returns a list of characters that triggers Parse method of
+ // this parser.
+ // Trigger characters must be a punctuation or a halfspace.
+ // Halfspaces triggers this parser when character is any spaces characters or
+ // a head of line
+ Trigger() []byte
+
+ // Parse parse the given block into an inline node.
+ //
+ // Parse can parse beyond the current line.
+ // If Parse has been able to parse the current line, it must advance a reader
+ // position by consumed byte length.
+ Parse(parent ast.Node, block text.Reader, pc Context) ast.Node
+}
+
+// A CloseBlocker interface is a callback function that will be
+// called when block is closed in the inline parsing.
+type CloseBlocker interface {
+ // CloseBlock will be called when a block is closed.
+ CloseBlock(parent ast.Node, block text.Reader, pc Context)
+}
+
+// A ParagraphTransformer transforms parsed Paragraph nodes.
+// For example, link references are searched in parsed Paragraphs.
+type ParagraphTransformer interface {
+ // Transform transforms the given paragraph.
+ Transform(node *ast.Paragraph, reader text.Reader, pc Context)
+}
+
+// ASTTransformer transforms entire Markdown document AST tree.
+type ASTTransformer interface {
+ // Transform transforms the given AST tree.
+ Transform(node *ast.Document, reader text.Reader, pc Context)
+}
+
+// DefaultBlockParsers returns a new list of default BlockParsers.
+// Priorities of default BlockParsers are:
+//
+// SetextHeadingParser, 100
+// ThematicBreakParser, 200
+// ListParser, 300
+// ListItemParser, 400
+// CodeBlockParser, 500
+// ATXHeadingParser, 600
+// FencedCodeBlockParser, 700
+// BlockquoteParser, 800
+// HTMLBlockParser, 900
+// ParagraphParser, 1000
+func DefaultBlockParsers() []util.PrioritizedValue {
+ return []util.PrioritizedValue{
+ util.Prioritized(NewSetextHeadingParser(), 100),
+ util.Prioritized(NewThematicBreakParser(), 200),
+ util.Prioritized(NewListParser(), 300),
+ util.Prioritized(NewListItemParser(), 400),
+ util.Prioritized(NewCodeBlockParser(), 500),
+ util.Prioritized(NewATXHeadingParser(), 600),
+ util.Prioritized(NewFencedCodeBlockParser(), 700),
+ util.Prioritized(NewBlockquoteParser(), 800),
+ util.Prioritized(NewHTMLBlockParser(), 900),
+ util.Prioritized(NewParagraphParser(), 1000),
+ }
+}
+
+// DefaultInlineParsers returns a new list of default InlineParsers.
+// Priorities of default InlineParsers are:
+//
+// CodeSpanParser, 100
+// LinkParser, 200
+// AutoLinkParser, 300
+// RawHTMLParser, 400
+// EmphasisParser, 500
+func DefaultInlineParsers() []util.PrioritizedValue {
+ return []util.PrioritizedValue{
+ util.Prioritized(NewCodeSpanParser(), 100),
+ util.Prioritized(NewLinkParser(), 200),
+ util.Prioritized(NewAutoLinkParser(), 300),
+ util.Prioritized(NewRawHTMLParser(), 400),
+ util.Prioritized(NewEmphasisParser(), 500),
+ }
+}
+
+// DefaultParagraphTransformers returns a new list of default ParagraphTransformers.
+// Priorities of default ParagraphTransformers are:
+//
+// LinkReferenceParagraphTransformer, 100
+func DefaultParagraphTransformers() []util.PrioritizedValue {
+ return []util.PrioritizedValue{
+ util.Prioritized(LinkReferenceParagraphTransformer, 100),
+ }
+}
+
+// A Block struct holds a node and correspond parser pair.
+type Block struct {
+ // Node is a BlockNode.
+ Node ast.Node
+ // Parser is a BlockParser.
+ Parser BlockParser
+}
+
+type parser struct {
+ options map[OptionName]interface{}
+ blockParsers [256][]BlockParser
+ freeBlockParsers []BlockParser
+ inlineParsers [256][]InlineParser
+ closeBlockers []CloseBlocker
+ paragraphTransformers []ParagraphTransformer
+ astTransformers []ASTTransformer
+ config *Config
+ initSync sync.Once
+}
+
+type withBlockParsers struct {
+ value []util.PrioritizedValue
+}
+
+func (o *withBlockParsers) SetParserOption(c *Config) {
+ c.BlockParsers = append(c.BlockParsers, o.value...)
+}
+
+// WithBlockParsers is a functional option that allow you to add
+// BlockParsers to the parser.
+func WithBlockParsers(bs ...util.PrioritizedValue) Option {
+ return &withBlockParsers{bs}
+}
+
+type withInlineParsers struct {
+ value []util.PrioritizedValue
+}
+
+func (o *withInlineParsers) SetParserOption(c *Config) {
+ c.InlineParsers = append(c.InlineParsers, o.value...)
+}
+
+// WithInlineParsers is a functional option that allow you to add
+// InlineParsers to the parser.
+func WithInlineParsers(bs ...util.PrioritizedValue) Option {
+ return &withInlineParsers{bs}
+}
+
+type withParagraphTransformers struct {
+ value []util.PrioritizedValue
+}
+
+func (o *withParagraphTransformers) SetParserOption(c *Config) {
+ c.ParagraphTransformers = append(c.ParagraphTransformers, o.value...)
+}
+
+// WithParagraphTransformers is a functional option that allow you to add
+// ParagraphTransformers to the parser.
+func WithParagraphTransformers(ps ...util.PrioritizedValue) Option {
+ return &withParagraphTransformers{ps}
+}
+
+type withASTTransformers struct {
+ value []util.PrioritizedValue
+}
+
+func (o *withASTTransformers) SetParserOption(c *Config) {
+ c.ASTTransformers = append(c.ASTTransformers, o.value...)
+}
+
+// WithASTTransformers is a functional option that allow you to add
+// ASTTransformers to the parser.
+func WithASTTransformers(ps ...util.PrioritizedValue) Option {
+ return &withASTTransformers{ps}
+}
+
+type withOption struct {
+ name OptionName
+ value interface{}
+}
+
+func (o *withOption) SetParserOption(c *Config) {
+ c.Options[o.name] = o.value
+}
+
+// WithOption is a functional option that allow you to set
+// an arbitrary option to the parser.
+func WithOption(name OptionName, value interface{}) Option {
+ return &withOption{name, value}
+}
+
+// NewParser returns a new Parser with given options.
+func NewParser(options ...Option) Parser {
+ config := NewConfig()
+ for _, opt := range options {
+ opt.SetParserOption(config)
+ }
+
+ p := &parser{
+ options: map[OptionName]interface{}{},
+ config: config,
+ }
+
+ return p
+}
+
+func (p *parser) AddOptions(opts ...Option) {
+ for _, opt := range opts {
+ opt.SetParserOption(p.config)
+ }
+}
+
+func (p *parser) addBlockParser(v util.PrioritizedValue, options map[OptionName]interface{}) {
+ bp, ok := v.Value.(BlockParser)
+ if !ok {
+ panic(fmt.Sprintf("%v is not a BlockParser", v.Value))
+ }
+ tcs := bp.Trigger()
+ so, ok := v.Value.(SetOptioner)
+ if ok {
+ for oname, ovalue := range options {
+ so.SetOption(oname, ovalue)
+ }
+ }
+ if tcs == nil {
+ p.freeBlockParsers = append(p.freeBlockParsers, bp)
+ } else {
+ for _, tc := range tcs {
+ if p.blockParsers[tc] == nil {
+ p.blockParsers[tc] = []BlockParser{}
+ }
+ p.blockParsers[tc] = append(p.blockParsers[tc], bp)
+ }
+ }
+}
+
+func (p *parser) addInlineParser(v util.PrioritizedValue, options map[OptionName]interface{}) {
+ ip, ok := v.Value.(InlineParser)
+ if !ok {
+ panic(fmt.Sprintf("%v is not a InlineParser", v.Value))
+ }
+ tcs := ip.Trigger()
+ so, ok := v.Value.(SetOptioner)
+ if ok {
+ for oname, ovalue := range options {
+ so.SetOption(oname, ovalue)
+ }
+ }
+ if cb, ok := ip.(CloseBlocker); ok {
+ p.closeBlockers = append(p.closeBlockers, cb)
+ }
+ for _, tc := range tcs {
+ if p.inlineParsers[tc] == nil {
+ p.inlineParsers[tc] = []InlineParser{}
+ }
+ p.inlineParsers[tc] = append(p.inlineParsers[tc], ip)
+ }
+}
+
+func (p *parser) addParagraphTransformer(v util.PrioritizedValue, options map[OptionName]interface{}) {
+ pt, ok := v.Value.(ParagraphTransformer)
+ if !ok {
+ panic(fmt.Sprintf("%v is not a ParagraphTransformer", v.Value))
+ }
+ so, ok := v.Value.(SetOptioner)
+ if ok {
+ for oname, ovalue := range options {
+ so.SetOption(oname, ovalue)
+ }
+ }
+ p.paragraphTransformers = append(p.paragraphTransformers, pt)
+}
+
+func (p *parser) addASTTransformer(v util.PrioritizedValue, options map[OptionName]interface{}) {
+ at, ok := v.Value.(ASTTransformer)
+ if !ok {
+ panic(fmt.Sprintf("%v is not a ASTTransformer", v.Value))
+ }
+ so, ok := v.Value.(SetOptioner)
+ if ok {
+ for oname, ovalue := range options {
+ so.SetOption(oname, ovalue)
+ }
+ }
+ p.astTransformers = append(p.astTransformers, at)
+}
+
+// A ParseConfig struct is a data structure that holds configuration of the Parser.Parse.
+type ParseConfig struct {
+ Context Context
+}
+
+// A ParseOption is a functional option type for the Parser.Parse.
+type ParseOption func(c *ParseConfig)
+
+// WithContext is a functional option that allow you to override
+// a default context.
+func WithContext(context Context) ParseOption {
+ return func(c *ParseConfig) {
+ c.Context = context
+ }
+}
+
+func (p *parser) Parse(reader text.Reader, opts ...ParseOption) ast.Node {
+ p.initSync.Do(func() {
+ p.config.BlockParsers.Sort()
+ for _, v := range p.config.BlockParsers {
+ p.addBlockParser(v, p.config.Options)
+ }
+ for i := range p.blockParsers {
+ if p.blockParsers[i] != nil {
+ p.blockParsers[i] = append(p.blockParsers[i], p.freeBlockParsers...)
+ }
+ }
+
+ p.config.InlineParsers.Sort()
+ for _, v := range p.config.InlineParsers {
+ p.addInlineParser(v, p.config.Options)
+ }
+ p.config.ParagraphTransformers.Sort()
+ for _, v := range p.config.ParagraphTransformers {
+ p.addParagraphTransformer(v, p.config.Options)
+ }
+ p.config.ASTTransformers.Sort()
+ for _, v := range p.config.ASTTransformers {
+ p.addASTTransformer(v, p.config.Options)
+ }
+ p.config = nil
+ })
+ c := &ParseConfig{}
+ for _, opt := range opts {
+ opt(c)
+ }
+ if c.Context == nil {
+ c.Context = NewContext()
+ }
+ pc := c.Context
+ root := ast.NewDocument()
+ p.parseBlocks(root, reader, pc)
+
+ blockReader := text.NewBlockReader(reader.Source(), nil)
+ p.walkBlock(root, func(node ast.Node) {
+ p.parseBlock(blockReader, node, pc)
+ })
+ for _, at := range p.astTransformers {
+ at.Transform(root, reader, pc)
+ }
+ // root.Dump(reader.Source(), 0)
+ return root
+}
+
+func (p *parser) transformParagraph(node *ast.Paragraph, reader text.Reader, pc Context) bool {
+ for _, pt := range p.paragraphTransformers {
+ pt.Transform(node, reader, pc)
+ if node.Parent() == nil {
+ return true
+ }
+ }
+ return false
+}
+
+func (p *parser) closeBlocks(from, to int, reader text.Reader, pc Context) {
+ blocks := pc.OpenedBlocks()
+ for i := from; i >= to; i-- {
+ node := blocks[i].Node
+ blocks[i].Parser.Close(blocks[i].Node, reader, pc)
+ paragraph, ok := node.(*ast.Paragraph)
+ if ok && node.Parent() != nil {
+ p.transformParagraph(paragraph, reader, pc)
+ }
+ }
+ if from == len(blocks)-1 {
+ blocks = blocks[0:to]
+ } else {
+ blocks = append(blocks[0:to], blocks[from+1:]...)
+ }
+ pc.SetOpenedBlocks(blocks)
+}
+
+type blockOpenResult int
+
+const (
+ paragraphContinuation blockOpenResult = iota + 1
+ newBlocksOpened
+ noBlocksOpened
+)
+
+func (p *parser) openBlocks(parent ast.Node, blankLine bool, reader text.Reader, pc Context) blockOpenResult {
+ result := blockOpenResult(noBlocksOpened)
+ continuable := false
+ lastBlock := pc.LastOpenedBlock()
+ if lastBlock.Node != nil {
+ continuable = ast.IsParagraph(lastBlock.Node)
+ }
+retry:
+ var bps []BlockParser
+ line, _ := reader.PeekLine()
+ w, pos := util.IndentWidth(line, reader.LineOffset())
+ if w >= len(line) {
+ pc.SetBlockOffset(-1)
+ pc.SetBlockIndent(-1)
+ } else {
+ pc.SetBlockOffset(pos)
+ pc.SetBlockIndent(w)
+ }
+ if line == nil || line[0] == '\n' {
+ goto continuable
+ }
+ bps = p.freeBlockParsers
+ if pos < len(line) {
+ bps = p.blockParsers[line[pos]]
+ if bps == nil {
+ bps = p.freeBlockParsers
+ }
+ }
+ if bps == nil {
+ goto continuable
+ }
+
+ for _, bp := range bps {
+ if continuable && result == noBlocksOpened && !bp.CanInterruptParagraph() {
+ continue
+ }
+ if w > 3 && !bp.CanAcceptIndentedLine() {
+ continue
+ }
+ lastBlock = pc.LastOpenedBlock()
+ last := lastBlock.Node
+ node, state := bp.Open(parent, reader, pc)
+ if node != nil {
+ // Parser requires last node to be a paragraph.
+ // With table extension:
+ //
+ // 0
+ // -:
+ // -
+ //
+ // '-' on 3rd line seems a Setext heading because 1st and 2nd lines
+ // are being paragraph when the Settext heading parser tries to parse the 3rd
+ // line.
+ // But 1st line and 2nd line are a table. Thus this paragraph will be transformed
+ // by a paragraph transformer. So this text should be converted to a table and
+ // an empty list.
+ if state&RequireParagraph != 0 {
+ if last == parent.LastChild() {
+ // Opened paragraph may be transformed by ParagraphTransformers in
+ // closeBlocks().
+ lastBlock.Parser.Close(last, reader, pc)
+ blocks := pc.OpenedBlocks()
+ pc.SetOpenedBlocks(blocks[0 : len(blocks)-1])
+ if p.transformParagraph(last.(*ast.Paragraph), reader, pc) {
+ // Paragraph has been transformed.
+ // So this parser is considered as failing.
+ continuable = false
+ goto retry
+ }
+ }
+ }
+ node.SetBlankPreviousLines(blankLine)
+ if last != nil && last.Parent() == nil {
+ lastPos := len(pc.OpenedBlocks()) - 1
+ p.closeBlocks(lastPos, lastPos, reader, pc)
+ }
+ parent.AppendChild(parent, node)
+ result = newBlocksOpened
+ be := Block{node, bp}
+ pc.SetOpenedBlocks(append(pc.OpenedBlocks(), be))
+ if state&HasChildren != 0 {
+ parent = node
+ goto retry // try child block
+ }
+ break // no children, can not open more blocks on this line
+ }
+ }
+
+continuable:
+ if result == noBlocksOpened && continuable {
+ state := lastBlock.Parser.Continue(lastBlock.Node, reader, pc)
+ if state&Continue != 0 {
+ result = paragraphContinuation
+ }
+ }
+ return result
+}
+
+type lineStat struct {
+ lineNum int
+ level int
+ isBlank bool
+}
+
+func isBlankLine(lineNum, level int, stats []lineStat) bool {
+ ret := true
+ for i := len(stats) - 1 - level; i >= 0; i-- {
+ ret = false
+ s := stats[i]
+ if s.lineNum == lineNum {
+ if s.level < level && s.isBlank {
+ return true
+ } else if s.level == level {
+ return s.isBlank
+ }
+ }
+ if s.lineNum < lineNum {
+ return ret
+ }
+ }
+ return ret
+}
+
+func (p *parser) parseBlocks(parent ast.Node, reader text.Reader, pc Context) {
+ pc.SetOpenedBlocks([]Block{})
+ blankLines := make([]lineStat, 0, 128)
+ isBlank := false
+ for { // process blocks separated by blank lines
+ _, lines, ok := reader.SkipBlankLines()
+ if !ok {
+ return
+ }
+ lineNum, _ := reader.Position()
+ if lines != 0 {
+ blankLines = blankLines[0:0]
+ l := len(pc.OpenedBlocks())
+ for i := 0; i < l; i++ {
+ blankLines = append(blankLines, lineStat{lineNum - 1, i, lines != 0})
+ }
+ }
+ isBlank = isBlankLine(lineNum-1, 0, blankLines)
+ // first, we try to open blocks
+ if p.openBlocks(parent, isBlank, reader, pc) != newBlocksOpened {
+ return
+ }
+ reader.AdvanceLine()
+ for { // process opened blocks line by line
+ openedBlocks := pc.OpenedBlocks()
+ l := len(openedBlocks)
+ if l == 0 {
+ break
+ }
+ lastIndex := l - 1
+ for i := 0; i < l; i++ {
+ be := openedBlocks[i]
+ line, _ := reader.PeekLine()
+ if line == nil {
+ p.closeBlocks(lastIndex, 0, reader, pc)
+ reader.AdvanceLine()
+ return
+ }
+ lineNum, _ := reader.Position()
+ blankLines = append(blankLines, lineStat{lineNum, i, util.IsBlank(line)})
+ // If node is a paragraph, p.openBlocks determines whether it is continuable.
+ // So we do not process paragraphs here.
+ if !ast.IsParagraph(be.Node) {
+ state := be.Parser.Continue(be.Node, reader, pc)
+ if state&Continue != 0 {
+ // When current node is a container block and has no children,
+ // we try to open new child nodes
+ if state&HasChildren != 0 && i == lastIndex {
+ isBlank = isBlankLine(lineNum-1, i, blankLines)
+ p.openBlocks(be.Node, isBlank, reader, pc)
+ break
+ }
+ continue
+ }
+ }
+ // current node may be closed or lazy continuation
+ isBlank = isBlankLine(lineNum-1, i, blankLines)
+ thisParent := parent
+ if i != 0 {
+ thisParent = openedBlocks[i-1].Node
+ }
+ lastNode := openedBlocks[lastIndex].Node
+ result := p.openBlocks(thisParent, isBlank, reader, pc)
+ if result != paragraphContinuation {
+ // lastNode is a paragraph and was transformed by the paragraph
+ // transformers.
+ if openedBlocks[lastIndex].Node != lastNode {
+ lastIndex--
+ }
+ p.closeBlocks(lastIndex, i, reader, pc)
+ }
+ break
+ }
+
+ reader.AdvanceLine()
+ }
+ }
+}
+
+func (p *parser) walkBlock(block ast.Node, cb func(node ast.Node)) {
+ for c := block.FirstChild(); c != nil; c = c.NextSibling() {
+ p.walkBlock(c, cb)
+ }
+ cb(block)
+}
+
+func (p *parser) parseBlock(block text.BlockReader, parent ast.Node, pc Context) {
+ if parent.IsRaw() {
+ return
+ }
+ escaped := false
+ source := block.Source()
+ block.Reset(parent.Lines())
+ for {
+ retry:
+ line, _ := block.PeekLine()
+ if line == nil {
+ break
+ }
+ lineLength := len(line)
+ hardlineBreak := false
+ softLinebreak := line[lineLength-1] == '\n'
+ if lineLength >= 2 && line[lineLength-2] == '\\' && softLinebreak { // ends with \\n
+ lineLength -= 2
+ hardlineBreak = true
+
+ } else if lineLength >= 3 && line[lineLength-3] == '\\' && line[lineLength-2] == '\r' && softLinebreak { // ends with \\r\n
+ lineLength -= 3
+ hardlineBreak = true
+ } else if lineLength >= 3 && line[lineLength-3] == ' ' && line[lineLength-2] == ' ' && softLinebreak { // ends with [space][space]\n
+ lineLength -= 3
+ hardlineBreak = true
+ } else if lineLength >= 4 && line[lineLength-4] == ' ' && line[lineLength-3] == ' ' && line[lineLength-2] == '\r' && softLinebreak { // ends with [space][space]\r\n
+ lineLength -= 4
+ hardlineBreak = true
+ }
+
+ l, startPosition := block.Position()
+ n := 0
+ for i := 0; i < lineLength; i++ {
+ c := line[i]
+ if c == '\n' {
+ break
+ }
+ isSpace := util.IsSpace(c)
+ isPunct := util.IsPunct(c)
+ if (isPunct && !escaped) || isSpace || i == 0 {
+ parserChar := c
+ if isSpace || (i == 0 && !isPunct) {
+ parserChar = ' '
+ }
+ ips := p.inlineParsers[parserChar]
+ if ips != nil {
+ block.Advance(n)
+ n = 0
+ savedLine, savedPosition := block.Position()
+ if i != 0 {
+ _, currentPosition := block.Position()
+ ast.MergeOrAppendTextSegment(parent, startPosition.Between(currentPosition))
+ _, startPosition = block.Position()
+ }
+ var inlineNode ast.Node
+ for _, ip := range ips {
+ inlineNode = ip.Parse(parent, block, pc)
+ if inlineNode != nil {
+ break
+ }
+ block.SetPosition(savedLine, savedPosition)
+ }
+ if inlineNode != nil {
+ parent.AppendChild(parent, inlineNode)
+ goto retry
+ }
+ }
+ }
+ if escaped {
+ escaped = false
+ n++
+ continue
+ }
+
+ if c == '\\' {
+ escaped = true
+ n++
+ continue
+ }
+
+ escaped = false
+ n++
+ }
+ if n != 0 {
+ block.Advance(n)
+ }
+ currentL, currentPosition := block.Position()
+ if l != currentL {
+ continue
+ }
+ diff := startPosition.Between(currentPosition)
+ stop := diff.Stop
+ rest := diff.WithStop(stop)
+ text := ast.NewTextSegment(rest.TrimRightSpace(source))
+ text.SetSoftLineBreak(softLinebreak)
+ text.SetHardLineBreak(hardlineBreak)
+ parent.AppendChild(parent, text)
+ block.AdvanceLine()
+ }
+
+ ProcessDelimiters(nil, pc)
+ for _, ip := range p.closeBlockers {
+ ip.CloseBlock(parent, block, pc)
+ }
+}
--- /dev/null
+package parser
+
+import (
+ "bytes"
+ "github.com/yuin/goldmark/ast"
+ "github.com/yuin/goldmark/text"
+ "github.com/yuin/goldmark/util"
+ "regexp"
+)
+
+type rawHTMLParser struct {
+}
+
+var defaultRawHTMLParser = &rawHTMLParser{}
+
+// NewRawHTMLParser return a new InlineParser that can parse
+// inline htmls
+func NewRawHTMLParser() InlineParser {
+ return defaultRawHTMLParser
+}
+
+func (s *rawHTMLParser) Trigger() []byte {
+ return []byte{'<'}
+}
+
+func (s *rawHTMLParser) Parse(parent ast.Node, block text.Reader, pc Context) ast.Node {
+ line, _ := block.PeekLine()
+ if len(line) > 1 && util.IsAlphaNumeric(line[1]) {
+ return s.parseMultiLineRegexp(openTagRegexp, block, pc)
+ }
+ if len(line) > 2 && line[1] == '/' && util.IsAlphaNumeric(line[2]) {
+ return s.parseMultiLineRegexp(closeTagRegexp, block, pc)
+ }
+ if bytes.HasPrefix(line, []byte("<!--")) {
+ return s.parseMultiLineRegexp(commentRegexp, block, pc)
+ }
+ if bytes.HasPrefix(line, []byte("<?")) {
+ return s.parseSingleLineRegexp(processingInstructionRegexp, block, pc)
+ }
+ if len(line) > 2 && line[1] == '!' && line[2] >= 'A' && line[2] <= 'Z' {
+ return s.parseSingleLineRegexp(declRegexp, block, pc)
+ }
+ if bytes.HasPrefix(line, []byte("<![CDATA[")) {
+ return s.parseMultiLineRegexp(cdataRegexp, block, pc)
+ }
+ return nil
+}
+
+var tagnamePattern = `([A-Za-z][A-Za-z0-9-]*)`
+var attributePattern = `(?:\s+[a-zA-Z_:][a-zA-Z0-9:._-]*(?:\s*=\s*(?:[^\"'=<>` + "`" + `\x00-\x20]+|'[^']*'|"[^"]*"))?)`
+var openTagRegexp = regexp.MustCompile("^<" + tagnamePattern + attributePattern + `*\s*/?>`)
+var closeTagRegexp = regexp.MustCompile("^</" + tagnamePattern + `\s*>`)
+var commentRegexp = regexp.MustCompile(`^<!---->|<!--(?:-?[^>-])(?:-?[^-])*-->`)
+var processingInstructionRegexp = regexp.MustCompile(`^(?:<\?).*?(?:\?>)`)
+var declRegexp = regexp.MustCompile(`^<![A-Z]+\s+[^>]*>`)
+var cdataRegexp = regexp.MustCompile(`<!\[CDATA\[[\s\S]*?\]\]>`)
+
+func (s *rawHTMLParser) parseSingleLineRegexp(reg *regexp.Regexp, block text.Reader, pc Context) ast.Node {
+ line, segment := block.PeekLine()
+ match := reg.FindSubmatchIndex(line)
+ if match == nil {
+ return nil
+ }
+ node := ast.NewRawHTML()
+ node.Segments.Append(segment.WithStop(segment.Start + match[1]))
+ block.Advance(match[1])
+ return node
+}
+
+var dummyMatch = [][]byte{}
+
+func (s *rawHTMLParser) parseMultiLineRegexp(reg *regexp.Regexp, block text.Reader, pc Context) ast.Node {
+ sline, ssegment := block.Position()
+ if block.Match(reg) {
+ node := ast.NewRawHTML()
+ eline, esegment := block.Position()
+ block.SetPosition(sline, ssegment)
+ for {
+ line, segment := block.PeekLine()
+ if line == nil {
+ break
+ }
+ l, _ := block.Position()
+ start := segment.Start
+ if l == sline {
+ start = ssegment.Start
+ }
+ end := segment.Stop
+ if l == eline {
+ end = esegment.Start
+ }
+
+ node.Segments.Append(text.NewSegment(start, end))
+ if l == eline {
+ block.Advance(end - start)
+ break
+ } else {
+ block.AdvanceLine()
+ }
+ }
+ return node
+ }
+ return nil
+}
+
+func (s *rawHTMLParser) CloseBlock(parent ast.Node, pc Context) {
+ // nothing to do
+}
--- /dev/null
+package parser
+
+import (
+ "github.com/yuin/goldmark/ast"
+ "github.com/yuin/goldmark/text"
+ "github.com/yuin/goldmark/util"
+)
+
+var temporaryParagraphKey = NewContextKey()
+
+type setextHeadingParser struct {
+ HeadingConfig
+}
+
+func matchesSetextHeadingBar(line []byte) (byte, bool) {
+ start := 0
+ end := len(line)
+ space := util.TrimLeftLength(line, []byte{' '})
+ if space > 3 {
+ return 0, false
+ }
+ start += space
+ level1 := util.TrimLeftLength(line[start:end], []byte{'='})
+ c := byte('=')
+ var level2 int
+ if level1 == 0 {
+ level2 = util.TrimLeftLength(line[start:end], []byte{'-'})
+ c = '-'
+ }
+ if util.IsSpace(line[end-1]) {
+ end -= util.TrimRightSpaceLength(line[start:end])
+ }
+ if !((level1 > 0 && start+level1 == end) || (level2 > 0 && start+level2 == end)) {
+ return 0, false
+ }
+ return c, true
+}
+
+// NewSetextHeadingParser return a new BlockParser that can parse Setext headings.
+func NewSetextHeadingParser(opts ...HeadingOption) BlockParser {
+ p := &setextHeadingParser{}
+ for _, o := range opts {
+ o.SetHeadingOption(&p.HeadingConfig)
+ }
+ return p
+}
+
+func (b *setextHeadingParser) Trigger() []byte {
+ return []byte{'-', '='}
+}
+
+func (b *setextHeadingParser) Open(parent ast.Node, reader text.Reader, pc Context) (ast.Node, State) {
+ last := pc.LastOpenedBlock().Node
+ if last == nil {
+ return nil, NoChildren
+ }
+ paragraph, ok := last.(*ast.Paragraph)
+ if !ok || paragraph.Parent() != parent {
+ return nil, NoChildren
+ }
+ line, segment := reader.PeekLine()
+ c, ok := matchesSetextHeadingBar(line)
+ if !ok {
+ return nil, NoChildren
+ }
+ level := 1
+ if c == '-' {
+ level = 2
+ }
+ node := ast.NewHeading(level)
+ node.Lines().Append(segment)
+ pc.Set(temporaryParagraphKey, last)
+ return node, NoChildren | RequireParagraph
+}
+
+func (b *setextHeadingParser) Continue(node ast.Node, reader text.Reader, pc Context) State {
+ return Close
+}
+
+func (b *setextHeadingParser) Close(node ast.Node, reader text.Reader, pc Context) {
+ heading := node.(*ast.Heading)
+ segment := node.Lines().At(0)
+ heading.Lines().Clear()
+ tmp := pc.Get(temporaryParagraphKey).(*ast.Paragraph)
+ pc.Set(temporaryParagraphKey, nil)
+ if tmp.Lines().Len() == 0 {
+ next := heading.NextSibling()
+ segment = segment.TrimLeftSpace(reader.Source())
+ if next == nil || !ast.IsParagraph(next) {
+ para := ast.NewParagraph()
+ para.Lines().Append(segment)
+ heading.Parent().InsertAfter(heading.Parent(), heading, para)
+ } else {
+ next.(ast.Node).Lines().Unshift(segment)
+ }
+ heading.Parent().RemoveChild(heading.Parent(), heading)
+ } else {
+ heading.SetLines(tmp.Lines())
+ heading.SetBlankPreviousLines(tmp.HasBlankPreviousLines())
+ tp := tmp.Parent()
+ if tp != nil {
+ tp.RemoveChild(tp, tmp)
+ }
+ }
+
+ if b.Attribute {
+ parseLastLineAttributes(node, reader, pc)
+ }
+
+ if b.AutoHeadingID {
+ id, ok := node.AttributeString("id")
+ if !ok {
+ generateAutoHeadingID(heading, reader, pc)
+ } else {
+ pc.IDs().Put(id.([]byte))
+ }
+ }
+}
+
+func (b *setextHeadingParser) CanInterruptParagraph() bool {
+ return true
+}
+
+func (b *setextHeadingParser) CanAcceptIndentedLine() bool {
+ return false
+}
--- /dev/null
+package parser
+
+import (
+ "github.com/yuin/goldmark/ast"
+ "github.com/yuin/goldmark/text"
+ "github.com/yuin/goldmark/util"
+)
+
+type thematicBreakPraser struct {
+}
+
+var defaultThematicBreakPraser = &thematicBreakPraser{}
+
+// NewThematicBreakParser returns a new BlockParser that
+// parses thematic breaks.
+func NewThematicBreakParser() BlockParser {
+ return defaultThematicBreakPraser
+}
+
+func isThematicBreak(line []byte, offset int) bool {
+ w, pos := util.IndentWidth(line, offset)
+ if w > 3 {
+ return false
+ }
+ mark := byte(0)
+ count := 0
+ for i := pos; i < len(line); i++ {
+ c := line[i]
+ if util.IsSpace(c) {
+ continue
+ }
+ if mark == 0 {
+ mark = c
+ count = 1
+ if mark == '*' || mark == '-' || mark == '_' {
+ continue
+ }
+ return false
+ }
+ if c != mark {
+ return false
+ }
+ count++
+ }
+ return count > 2
+}
+
+func (b *thematicBreakPraser) Trigger() []byte {
+ return []byte{'-', '*', '_'}
+}
+
+func (b *thematicBreakPraser) Open(parent ast.Node, reader text.Reader, pc Context) (ast.Node, State) {
+ line, segment := reader.PeekLine()
+ if isThematicBreak(line, reader.LineOffset()) {
+ reader.Advance(segment.Len() - 1)
+ return ast.NewThematicBreak(), NoChildren
+ }
+ return nil, NoChildren
+}
+
+func (b *thematicBreakPraser) Continue(node ast.Node, reader text.Reader, pc Context) State {
+ return Close
+}
+
+func (b *thematicBreakPraser) Close(node ast.Node, reader text.Reader, pc Context) {
+ // nothing to do
+}
+
+func (b *thematicBreakPraser) CanInterruptParagraph() bool {
+ return true
+}
+
+func (b *thematicBreakPraser) CanAcceptIndentedLine() bool {
+ return false
+}
--- /dev/null
+package html
+
+import (
+ "bytes"
+ "fmt"
+ "strconv"
+
+ "github.com/yuin/goldmark/ast"
+ "github.com/yuin/goldmark/renderer"
+ "github.com/yuin/goldmark/util"
+)
+
+// A Config struct has configurations for the HTML based renderers.
+type Config struct {
+ Writer Writer
+ HardWraps bool
+ XHTML bool
+ Unsafe bool
+}
+
+// NewConfig returns a new Config with defaults.
+func NewConfig() Config {
+ return Config{
+ Writer: DefaultWriter,
+ HardWraps: false,
+ XHTML: false,
+ Unsafe: false,
+ }
+}
+
+// SetOption implements renderer.NodeRenderer.SetOption.
+func (c *Config) SetOption(name renderer.OptionName, value interface{}) {
+ switch name {
+ case optHardWraps:
+ c.HardWraps = value.(bool)
+ case optXHTML:
+ c.XHTML = value.(bool)
+ case optUnsafe:
+ c.Unsafe = value.(bool)
+ case optTextWriter:
+ c.Writer = value.(Writer)
+ }
+}
+
+// An Option interface sets options for HTML based renderers.
+type Option interface {
+ SetHTMLOption(*Config)
+}
+
+// TextWriter is an option name used in WithWriter.
+const optTextWriter renderer.OptionName = "Writer"
+
+type withWriter struct {
+ value Writer
+}
+
+func (o *withWriter) SetConfig(c *renderer.Config) {
+ c.Options[optTextWriter] = o.value
+}
+
+func (o *withWriter) SetHTMLOption(c *Config) {
+ c.Writer = o.value
+}
+
+// WithWriter is a functional option that allow you to set the given writer to
+// the renderer.
+func WithWriter(writer Writer) interface {
+ renderer.Option
+ Option
+} {
+ return &withWriter{writer}
+}
+
+// HardWraps is an option name used in WithHardWraps.
+const optHardWraps renderer.OptionName = "HardWraps"
+
+type withHardWraps struct {
+}
+
+func (o *withHardWraps) SetConfig(c *renderer.Config) {
+ c.Options[optHardWraps] = true
+}
+
+func (o *withHardWraps) SetHTMLOption(c *Config) {
+ c.HardWraps = true
+}
+
+// WithHardWraps is a functional option that indicates whether softline breaks
+// should be rendered as '<br>'.
+func WithHardWraps() interface {
+ renderer.Option
+ Option
+} {
+ return &withHardWraps{}
+}
+
+// XHTML is an option name used in WithXHTML.
+const optXHTML renderer.OptionName = "XHTML"
+
+type withXHTML struct {
+}
+
+func (o *withXHTML) SetConfig(c *renderer.Config) {
+ c.Options[optXHTML] = true
+}
+
+func (o *withXHTML) SetHTMLOption(c *Config) {
+ c.XHTML = true
+}
+
+// WithXHTML is a functional option indicates that nodes should be rendered in
+// xhtml instead of HTML5.
+func WithXHTML() interface {
+ Option
+ renderer.Option
+} {
+ return &withXHTML{}
+}
+
+// Unsafe is an option name used in WithUnsafe.
+const optUnsafe renderer.OptionName = "Unsafe"
+
+type withUnsafe struct {
+}
+
+func (o *withUnsafe) SetConfig(c *renderer.Config) {
+ c.Options[optUnsafe] = true
+}
+
+func (o *withUnsafe) SetHTMLOption(c *Config) {
+ c.Unsafe = true
+}
+
+// WithUnsafe is a functional option that renders dangerous contents
+// (raw htmls and potentially dangerous links) as it is.
+func WithUnsafe() interface {
+ renderer.Option
+ Option
+} {
+ return &withUnsafe{}
+}
+
+// A Renderer struct is an implementation of renderer.NodeRenderer that renders
+// nodes as (X)HTML.
+type Renderer struct {
+ Config
+}
+
+// NewRenderer returns a new Renderer with given options.
+func NewRenderer(opts ...Option) renderer.NodeRenderer {
+ r := &Renderer{
+ Config: NewConfig(),
+ }
+
+ for _, opt := range opts {
+ opt.SetHTMLOption(&r.Config)
+ }
+ return r
+}
+
+// RegisterFuncs implements NodeRenderer.RegisterFuncs .
+func (r *Renderer) RegisterFuncs(reg renderer.NodeRendererFuncRegisterer) {
+ // blocks
+
+ reg.Register(ast.KindDocument, r.renderDocument)
+ reg.Register(ast.KindHeading, r.renderHeading)
+ reg.Register(ast.KindBlockquote, r.renderBlockquote)
+ reg.Register(ast.KindCodeBlock, r.renderCodeBlock)
+ reg.Register(ast.KindFencedCodeBlock, r.renderFencedCodeBlock)
+ reg.Register(ast.KindHTMLBlock, r.renderHTMLBlock)
+ reg.Register(ast.KindList, r.renderList)
+ reg.Register(ast.KindListItem, r.renderListItem)
+ reg.Register(ast.KindParagraph, r.renderParagraph)
+ reg.Register(ast.KindTextBlock, r.renderTextBlock)
+ reg.Register(ast.KindThematicBreak, r.renderThematicBreak)
+
+ // inlines
+
+ reg.Register(ast.KindAutoLink, r.renderAutoLink)
+ reg.Register(ast.KindCodeSpan, r.renderCodeSpan)
+ reg.Register(ast.KindEmphasis, r.renderEmphasis)
+ reg.Register(ast.KindImage, r.renderImage)
+ reg.Register(ast.KindLink, r.renderLink)
+ reg.Register(ast.KindRawHTML, r.renderRawHTML)
+ reg.Register(ast.KindText, r.renderText)
+ reg.Register(ast.KindString, r.renderString)
+}
+
+func (r *Renderer) writeLines(w util.BufWriter, source []byte, n ast.Node) {
+ l := n.Lines().Len()
+ for i := 0; i < l; i++ {
+ line := n.Lines().At(i)
+ r.Writer.RawWrite(w, line.Value(source))
+ }
+}
+
+// GlobalAttributeFilter defines attribute names which any elements can have.
+var GlobalAttributeFilter = util.NewBytesFilter(
+ []byte("accesskey"),
+ []byte("autocapitalize"),
+ []byte("class"),
+ []byte("contenteditable"),
+ []byte("contextmenu"),
+ []byte("dir"),
+ []byte("draggable"),
+ []byte("dropzone"),
+ []byte("hidden"),
+ []byte("id"),
+ []byte("itemprop"),
+ []byte("lang"),
+ []byte("slot"),
+ []byte("spellcheck"),
+ []byte("style"),
+ []byte("tabindex"),
+ []byte("title"),
+ []byte("translate"),
+)
+
+func (r *Renderer) renderDocument(w util.BufWriter, source []byte, node ast.Node, entering bool) (ast.WalkStatus, error) {
+ // nothing to do
+ return ast.WalkContinue, nil
+}
+
+// HeadingAttributeFilter defines attribute names which heading elements can have
+var HeadingAttributeFilter = GlobalAttributeFilter
+
+func (r *Renderer) renderHeading(w util.BufWriter, source []byte, node ast.Node, entering bool) (ast.WalkStatus, error) {
+ n := node.(*ast.Heading)
+ if entering {
+ _, _ = w.WriteString("<h")
+ _ = w.WriteByte("0123456"[n.Level])
+ if n.Attributes() != nil {
+ RenderAttributes(w, node, HeadingAttributeFilter)
+ }
+ _ = w.WriteByte('>')
+ } else {
+ _, _ = w.WriteString("</h")
+ _ = w.WriteByte("0123456"[n.Level])
+ _, _ = w.WriteString(">\n")
+ }
+ return ast.WalkContinue, nil
+}
+
+// BlockquoteAttributeFilter defines attribute names which blockquote elements can have
+var BlockquoteAttributeFilter = GlobalAttributeFilter.Extend(
+ []byte("cite"),
+)
+
+func (r *Renderer) renderBlockquote(w util.BufWriter, source []byte, n ast.Node, entering bool) (ast.WalkStatus, error) {
+ if entering {
+ if n.Attributes() != nil {
+ _, _ = w.WriteString("<blockquote")
+ RenderAttributes(w, n, BlockquoteAttributeFilter)
+ _ = w.WriteByte('>')
+ } else {
+ _, _ = w.WriteString("<blockquote>\n")
+ }
+ } else {
+ _, _ = w.WriteString("</blockquote>\n")
+ }
+ return ast.WalkContinue, nil
+}
+
+func (r *Renderer) renderCodeBlock(w util.BufWriter, source []byte, n ast.Node, entering bool) (ast.WalkStatus, error) {
+ if entering {
+ _, _ = w.WriteString("<pre><code>")
+ r.writeLines(w, source, n)
+ } else {
+ _, _ = w.WriteString("</code></pre>\n")
+ }
+ return ast.WalkContinue, nil
+}
+
+func (r *Renderer) renderFencedCodeBlock(w util.BufWriter, source []byte, node ast.Node, entering bool) (ast.WalkStatus, error) {
+ n := node.(*ast.FencedCodeBlock)
+ if entering {
+ _, _ = w.WriteString("<pre><code")
+ language := n.Language(source)
+ if language != nil {
+ _, _ = w.WriteString(" class=\"language-")
+ r.Writer.Write(w, language)
+ _, _ = w.WriteString("\"")
+ }
+ _ = w.WriteByte('>')
+ r.writeLines(w, source, n)
+ } else {
+ _, _ = w.WriteString("</code></pre>\n")
+ }
+ return ast.WalkContinue, nil
+}
+
+func (r *Renderer) renderHTMLBlock(w util.BufWriter, source []byte, node ast.Node, entering bool) (ast.WalkStatus, error) {
+ n := node.(*ast.HTMLBlock)
+ if entering {
+ if r.Unsafe {
+ l := n.Lines().Len()
+ for i := 0; i < l; i++ {
+ line := n.Lines().At(i)
+ _, _ = w.Write(line.Value(source))
+ }
+ } else {
+ _, _ = w.WriteString("<!-- raw HTML omitted -->\n")
+ }
+ } else {
+ if n.HasClosure() {
+ if r.Unsafe {
+ closure := n.ClosureLine
+ _, _ = w.Write(closure.Value(source))
+ } else {
+ _, _ = w.WriteString("<!-- raw HTML omitted -->\n")
+ }
+ }
+ }
+ return ast.WalkContinue, nil
+}
+
+// ListAttributeFilter defines attribute names which list elements can have.
+var ListAttributeFilter = GlobalAttributeFilter.Extend(
+ []byte("start"),
+ []byte("reversed"),
+)
+
+func (r *Renderer) renderList(w util.BufWriter, source []byte, node ast.Node, entering bool) (ast.WalkStatus, error) {
+ n := node.(*ast.List)
+ tag := "ul"
+ if n.IsOrdered() {
+ tag = "ol"
+ }
+ if entering {
+ _ = w.WriteByte('<')
+ _, _ = w.WriteString(tag)
+ if n.IsOrdered() && n.Start != 1 {
+ fmt.Fprintf(w, " start=\"%d\"", n.Start)
+ }
+ if n.Attributes() != nil {
+ RenderAttributes(w, n, ListAttributeFilter)
+ }
+ _, _ = w.WriteString(">\n")
+ } else {
+ _, _ = w.WriteString("</")
+ _, _ = w.WriteString(tag)
+ _, _ = w.WriteString(">\n")
+ }
+ return ast.WalkContinue, nil
+}
+
+// ListItemAttributeFilter defines attribute names which list item elements can have.
+var ListItemAttributeFilter = GlobalAttributeFilter.Extend(
+ []byte("value"),
+)
+
+func (r *Renderer) renderListItem(w util.BufWriter, source []byte, n ast.Node, entering bool) (ast.WalkStatus, error) {
+ if entering {
+ if n.Attributes() != nil {
+ _, _ = w.WriteString("<li")
+ RenderAttributes(w, n, ListItemAttributeFilter)
+ _ = w.WriteByte('>')
+ } else {
+ _, _ = w.WriteString("<li>")
+ }
+ fc := n.FirstChild()
+ if fc != nil {
+ if _, ok := fc.(*ast.TextBlock); !ok {
+ _ = w.WriteByte('\n')
+ }
+ }
+ } else {
+ _, _ = w.WriteString("</li>\n")
+ }
+ return ast.WalkContinue, nil
+}
+
+// ParagraphAttributeFilter defines attribute names which paragraph elements can have.
+var ParagraphAttributeFilter = GlobalAttributeFilter
+
+func (r *Renderer) renderParagraph(w util.BufWriter, source []byte, n ast.Node, entering bool) (ast.WalkStatus, error) {
+ if entering {
+ if n.Attributes() != nil {
+ _, _ = w.WriteString("<p")
+ RenderAttributes(w, n, ParagraphAttributeFilter)
+ _ = w.WriteByte('>')
+ } else {
+ _, _ = w.WriteString("<p>")
+ }
+ } else {
+ _, _ = w.WriteString("</p>\n")
+ }
+ return ast.WalkContinue, nil
+}
+
+func (r *Renderer) renderTextBlock(w util.BufWriter, source []byte, n ast.Node, entering bool) (ast.WalkStatus, error) {
+ if !entering {
+ if _, ok := n.NextSibling().(ast.Node); ok && n.FirstChild() != nil {
+ _ = w.WriteByte('\n')
+ }
+ }
+ return ast.WalkContinue, nil
+}
+
+// ThematicAttributeFilter defines attribute names which hr elements can have.
+var ThematicAttributeFilter = GlobalAttributeFilter.Extend(
+ []byte("align"), // [Deprecated]
+ []byte("color"), // [Not Standardized]
+ []byte("noshade"), // [Deprecated]
+ []byte("size"), // [Deprecated]
+ []byte("width"), // [Deprecated]
+)
+
+func (r *Renderer) renderThematicBreak(w util.BufWriter, source []byte, n ast.Node, entering bool) (ast.WalkStatus, error) {
+ if !entering {
+ return ast.WalkContinue, nil
+ }
+ _, _ = w.WriteString("<hr")
+ if n.Attributes() != nil {
+ RenderAttributes(w, n, ThematicAttributeFilter)
+ }
+ if r.XHTML {
+ _, _ = w.WriteString(" />\n")
+ } else {
+ _, _ = w.WriteString(">\n")
+ }
+ return ast.WalkContinue, nil
+}
+
+// LinkAttributeFilter defines attribute names which link elements can have.
+var LinkAttributeFilter = GlobalAttributeFilter.Extend(
+ []byte("download"),
+ // []byte("href"),
+ []byte("hreflang"),
+ []byte("media"),
+ []byte("ping"),
+ []byte("referrerpolicy"),
+ []byte("rel"),
+ []byte("shape"),
+ []byte("target"),
+)
+
+func (r *Renderer) renderAutoLink(w util.BufWriter, source []byte, node ast.Node, entering bool) (ast.WalkStatus, error) {
+ n := node.(*ast.AutoLink)
+ if !entering {
+ return ast.WalkContinue, nil
+ }
+ _, _ = w.WriteString(`<a href="`)
+ url := n.URL(source)
+ label := n.Label(source)
+ if n.AutoLinkType == ast.AutoLinkEmail && !bytes.HasPrefix(bytes.ToLower(url), []byte("mailto:")) {
+ _, _ = w.WriteString("mailto:")
+ }
+ _, _ = w.Write(util.EscapeHTML(util.URLEscape(url, false)))
+ if n.Attributes() != nil {
+ _ = w.WriteByte('"')
+ RenderAttributes(w, n, LinkAttributeFilter)
+ _ = w.WriteByte('>')
+ } else {
+ _, _ = w.WriteString(`">`)
+ }
+ _, _ = w.Write(util.EscapeHTML(label))
+ _, _ = w.WriteString(`</a>`)
+ return ast.WalkContinue, nil
+}
+
+// CodeAttributeFilter defines attribute names which code elements can have.
+var CodeAttributeFilter = GlobalAttributeFilter
+
+func (r *Renderer) renderCodeSpan(w util.BufWriter, source []byte, n ast.Node, entering bool) (ast.WalkStatus, error) {
+ if entering {
+ if n.Attributes() != nil {
+ _, _ = w.WriteString("<code")
+ RenderAttributes(w, n, CodeAttributeFilter)
+ _ = w.WriteByte('>')
+ } else {
+ _, _ = w.WriteString("<code>")
+ }
+ for c := n.FirstChild(); c != nil; c = c.NextSibling() {
+ segment := c.(*ast.Text).Segment
+ value := segment.Value(source)
+ if bytes.HasSuffix(value, []byte("\n")) {
+ r.Writer.RawWrite(w, value[:len(value)-1])
+ if c != n.LastChild() {
+ r.Writer.RawWrite(w, []byte(" "))
+ }
+ } else {
+ r.Writer.RawWrite(w, value)
+ }
+ }
+ return ast.WalkSkipChildren, nil
+ }
+ _, _ = w.WriteString("</code>")
+ return ast.WalkContinue, nil
+}
+
+// EmphasisAttributeFilter defines attribute names which emphasis elements can have.
+var EmphasisAttributeFilter = GlobalAttributeFilter
+
+func (r *Renderer) renderEmphasis(w util.BufWriter, source []byte, node ast.Node, entering bool) (ast.WalkStatus, error) {
+ n := node.(*ast.Emphasis)
+ tag := "em"
+ if n.Level == 2 {
+ tag = "strong"
+ }
+ if entering {
+ _ = w.WriteByte('<')
+ _, _ = w.WriteString(tag)
+ if n.Attributes() != nil {
+ RenderAttributes(w, n, EmphasisAttributeFilter)
+ }
+ _ = w.WriteByte('>')
+ } else {
+ _, _ = w.WriteString("</")
+ _, _ = w.WriteString(tag)
+ _ = w.WriteByte('>')
+ }
+ return ast.WalkContinue, nil
+}
+
+func (r *Renderer) renderLink(w util.BufWriter, source []byte, node ast.Node, entering bool) (ast.WalkStatus, error) {
+ n := node.(*ast.Link)
+ if entering {
+ _, _ = w.WriteString("<a href=\"")
+ if r.Unsafe || !IsDangerousURL(n.Destination) {
+ _, _ = w.Write(util.EscapeHTML(util.URLEscape(n.Destination, true)))
+ }
+ _ = w.WriteByte('"')
+ if n.Title != nil {
+ _, _ = w.WriteString(` title="`)
+ r.Writer.Write(w, n.Title)
+ _ = w.WriteByte('"')
+ }
+ if n.Attributes() != nil {
+ RenderAttributes(w, n, LinkAttributeFilter)
+ }
+ _ = w.WriteByte('>')
+ } else {
+ _, _ = w.WriteString("</a>")
+ }
+ return ast.WalkContinue, nil
+}
+
+// ImageAttributeFilter defines attribute names which image elements can have.
+var ImageAttributeFilter = GlobalAttributeFilter.Extend(
+ []byte("align"),
+ []byte("border"),
+ []byte("crossorigin"),
+ []byte("decoding"),
+ []byte("height"),
+ []byte("importance"),
+ []byte("intrinsicsize"),
+ []byte("ismap"),
+ []byte("loading"),
+ []byte("referrerpolicy"),
+ []byte("sizes"),
+ []byte("srcset"),
+ []byte("usemap"),
+ []byte("width"),
+)
+
+func (r *Renderer) renderImage(w util.BufWriter, source []byte, node ast.Node, entering bool) (ast.WalkStatus, error) {
+ if !entering {
+ return ast.WalkContinue, nil
+ }
+ n := node.(*ast.Image)
+ _, _ = w.WriteString("<img src=\"")
+ if r.Unsafe || !IsDangerousURL(n.Destination) {
+ _, _ = w.Write(util.EscapeHTML(util.URLEscape(n.Destination, true)))
+ }
+ _, _ = w.WriteString(`" alt="`)
+ _, _ = w.Write(n.Text(source))
+ _ = w.WriteByte('"')
+ if n.Title != nil {
+ _, _ = w.WriteString(` title="`)
+ r.Writer.Write(w, n.Title)
+ _ = w.WriteByte('"')
+ }
+ if n.Attributes() != nil {
+ RenderAttributes(w, n, ImageAttributeFilter)
+ }
+ if r.XHTML {
+ _, _ = w.WriteString(" />")
+ } else {
+ _, _ = w.WriteString(">")
+ }
+ return ast.WalkSkipChildren, nil
+}
+
+func (r *Renderer) renderRawHTML(w util.BufWriter, source []byte, node ast.Node, entering bool) (ast.WalkStatus, error) {
+ if !entering {
+ return ast.WalkSkipChildren, nil
+ }
+ if r.Unsafe {
+ n := node.(*ast.RawHTML)
+ l := n.Segments.Len()
+ for i := 0; i < l; i++ {
+ segment := n.Segments.At(i)
+ _, _ = w.Write(segment.Value(source))
+ }
+ return ast.WalkSkipChildren, nil
+ }
+ _, _ = w.WriteString("<!-- raw HTML omitted -->")
+ return ast.WalkSkipChildren, nil
+}
+
+func (r *Renderer) renderText(w util.BufWriter, source []byte, node ast.Node, entering bool) (ast.WalkStatus, error) {
+ if !entering {
+ return ast.WalkContinue, nil
+ }
+ n := node.(*ast.Text)
+ segment := n.Segment
+ if n.IsRaw() {
+ r.Writer.RawWrite(w, segment.Value(source))
+ } else {
+ r.Writer.Write(w, segment.Value(source))
+ if n.HardLineBreak() || (n.SoftLineBreak() && r.HardWraps) {
+ if r.XHTML {
+ _, _ = w.WriteString("<br />\n")
+ } else {
+ _, _ = w.WriteString("<br>\n")
+ }
+ } else if n.SoftLineBreak() {
+ _ = w.WriteByte('\n')
+ }
+ }
+ return ast.WalkContinue, nil
+}
+
+func (r *Renderer) renderString(w util.BufWriter, source []byte, node ast.Node, entering bool) (ast.WalkStatus, error) {
+ if !entering {
+ return ast.WalkContinue, nil
+ }
+ n := node.(*ast.String)
+ if n.IsCode() {
+ _, _ = w.Write(n.Value)
+ } else {
+ if n.IsRaw() {
+ r.Writer.RawWrite(w, n.Value)
+ } else {
+ r.Writer.Write(w, n.Value)
+ }
+ }
+ return ast.WalkContinue, nil
+}
+
+var dataPrefix = []byte("data-")
+
+// RenderAttributes renders given node's attributes.
+// You can specify attribute names to render by the filter.
+// If filter is nil, RenderAttributes renders all attributes.
+func RenderAttributes(w util.BufWriter, node ast.Node, filter util.BytesFilter) {
+ for _, attr := range node.Attributes() {
+ if filter != nil && !filter.Contains(attr.Name) {
+ if !bytes.HasPrefix(attr.Name, dataPrefix) {
+ continue
+ }
+ }
+ _, _ = w.WriteString(" ")
+ _, _ = w.Write(attr.Name)
+ _, _ = w.WriteString(`="`)
+ // TODO: convert numeric values to strings
+ _, _ = w.Write(util.EscapeHTML(attr.Value.([]byte)))
+ _ = w.WriteByte('"')
+ }
+}
+
+// A Writer interface wirtes textual contents to a writer.
+type Writer interface {
+ // Write writes the given source to writer with resolving references and unescaping
+ // backslash escaped characters.
+ Write(writer util.BufWriter, source []byte)
+
+ // RawWrite wirtes the given source to writer without resolving references and
+ // unescaping backslash escaped characters.
+ RawWrite(writer util.BufWriter, source []byte)
+}
+
+type defaultWriter struct {
+}
+
+func escapeRune(writer util.BufWriter, r rune) {
+ if r < 256 {
+ v := util.EscapeHTMLByte(byte(r))
+ if v != nil {
+ _, _ = writer.Write(v)
+ return
+ }
+ }
+ _, _ = writer.WriteRune(util.ToValidRune(r))
+}
+
+func (d *defaultWriter) RawWrite(writer util.BufWriter, source []byte) {
+ n := 0
+ l := len(source)
+ for i := 0; i < l; i++ {
+ v := util.EscapeHTMLByte(source[i])
+ if v != nil {
+ _, _ = writer.Write(source[i-n : i])
+ n = 0
+ _, _ = writer.Write(v)
+ continue
+ }
+ n++
+ }
+ if n != 0 {
+ _, _ = writer.Write(source[l-n:])
+ }
+}
+
+func (d *defaultWriter) Write(writer util.BufWriter, source []byte) {
+ escaped := false
+ var ok bool
+ limit := len(source)
+ n := 0
+ for i := 0; i < limit; i++ {
+ c := source[i]
+ if escaped {
+ if util.IsPunct(c) {
+ d.RawWrite(writer, source[n:i-1])
+ n = i
+ escaped = false
+ continue
+ }
+ }
+ if c == '&' {
+ pos := i
+ next := i + 1
+ if next < limit && source[next] == '#' {
+ nnext := next + 1
+ if nnext < limit {
+ nc := source[nnext]
+ // code point like #x22;
+ if nnext < limit && nc == 'x' || nc == 'X' {
+ start := nnext + 1
+ i, ok = util.ReadWhile(source, [2]int{start, limit}, util.IsHexDecimal)
+ if ok && i < limit && source[i] == ';' {
+ v, _ := strconv.ParseUint(util.BytesToReadOnlyString(source[start:i]), 16, 32)
+ d.RawWrite(writer, source[n:pos])
+ n = i + 1
+ escapeRune(writer, rune(v))
+ continue
+ }
+ // code point like #1234;
+ } else if nc >= '0' && nc <= '9' {
+ start := nnext
+ i, ok = util.ReadWhile(source, [2]int{start, limit}, util.IsNumeric)
+ if ok && i < limit && i-start < 8 && source[i] == ';' {
+ v, _ := strconv.ParseUint(util.BytesToReadOnlyString(source[start:i]), 0, 32)
+ d.RawWrite(writer, source[n:pos])
+ n = i + 1
+ escapeRune(writer, rune(v))
+ continue
+ }
+ }
+ }
+ } else {
+ start := next
+ i, ok = util.ReadWhile(source, [2]int{start, limit}, util.IsAlphaNumeric)
+ // entity reference
+ if ok && i < limit && source[i] == ';' {
+ name := util.BytesToReadOnlyString(source[start:i])
+ entity, ok := util.LookUpHTML5EntityByName(name)
+ if ok {
+ d.RawWrite(writer, source[n:pos])
+ n = i + 1
+ d.RawWrite(writer, entity.Characters)
+ continue
+ }
+ }
+ }
+ i = next - 1
+ }
+ if c == '\\' {
+ escaped = true
+ continue
+ }
+ escaped = false
+ }
+ d.RawWrite(writer, source[n:])
+}
+
+// DefaultWriter is a default implementation of the Writer.
+var DefaultWriter = &defaultWriter{}
+
+var bDataImage = []byte("data:image/")
+var bPng = []byte("png;")
+var bGif = []byte("gif;")
+var bJpeg = []byte("jpeg;")
+var bWebp = []byte("webp;")
+var bJs = []byte("javascript:")
+var bVb = []byte("vbscript:")
+var bFile = []byte("file:")
+var bData = []byte("data:")
+
+// IsDangerousURL returns true if the given url seems a potentially dangerous url,
+// otherwise false.
+func IsDangerousURL(url []byte) bool {
+ if bytes.HasPrefix(url, bDataImage) && len(url) >= 11 {
+ v := url[11:]
+ if bytes.HasPrefix(v, bPng) || bytes.HasPrefix(v, bGif) ||
+ bytes.HasPrefix(v, bJpeg) || bytes.HasPrefix(v, bWebp) {
+ return false
+ }
+ return true
+ }
+ return bytes.HasPrefix(url, bJs) || bytes.HasPrefix(url, bVb) ||
+ bytes.HasPrefix(url, bFile) || bytes.HasPrefix(url, bData)
+}
--- /dev/null
+// Package renderer renders the given AST to certain formats.
+package renderer
+
+import (
+ "bufio"
+ "io"
+ "sync"
+
+ "github.com/yuin/goldmark/ast"
+ "github.com/yuin/goldmark/util"
+)
+
+// A Config struct is a data structure that holds configuration of the Renderer.
+type Config struct {
+ Options map[OptionName]interface{}
+ NodeRenderers util.PrioritizedSlice
+}
+
+// NewConfig returns a new Config
+func NewConfig() *Config {
+ return &Config{
+ Options: map[OptionName]interface{}{},
+ NodeRenderers: util.PrioritizedSlice{},
+ }
+}
+
+// An OptionName is a name of the option.
+type OptionName string
+
+// An Option interface is a functional option type for the Renderer.
+type Option interface {
+ SetConfig(*Config)
+}
+
+type withNodeRenderers struct {
+ value []util.PrioritizedValue
+}
+
+func (o *withNodeRenderers) SetConfig(c *Config) {
+ c.NodeRenderers = append(c.NodeRenderers, o.value...)
+}
+
+// WithNodeRenderers is a functional option that allow you to add
+// NodeRenderers to the renderer.
+func WithNodeRenderers(ps ...util.PrioritizedValue) Option {
+ return &withNodeRenderers{ps}
+}
+
+type withOption struct {
+ name OptionName
+ value interface{}
+}
+
+func (o *withOption) SetConfig(c *Config) {
+ c.Options[o.name] = o.value
+}
+
+// WithOption is a functional option that allow you to set
+// an arbitrary option to the parser.
+func WithOption(name OptionName, value interface{}) Option {
+ return &withOption{name, value}
+}
+
+// A SetOptioner interface sets given option to the object.
+type SetOptioner interface {
+ // SetOption sets given option to the object.
+ // Unacceptable options may be passed.
+ // Thus implementations must ignore unacceptable options.
+ SetOption(name OptionName, value interface{})
+}
+
+// NodeRendererFunc is a function that renders a given node.
+type NodeRendererFunc func(writer util.BufWriter, source []byte, n ast.Node, entering bool) (ast.WalkStatus, error)
+
+// A NodeRenderer interface offers NodeRendererFuncs.
+type NodeRenderer interface {
+ // RendererFuncs registers NodeRendererFuncs to given NodeRendererFuncRegisterer.
+ RegisterFuncs(NodeRendererFuncRegisterer)
+}
+
+// A NodeRendererFuncRegisterer registers
+type NodeRendererFuncRegisterer interface {
+ // Register registers given NodeRendererFunc to this object.
+ Register(ast.NodeKind, NodeRendererFunc)
+}
+
+// A Renderer interface renders given AST node to given
+// writer with given Renderer.
+type Renderer interface {
+ Render(w io.Writer, source []byte, n ast.Node) error
+
+ // AddOptions adds given option to this renderer.
+ AddOptions(...Option)
+}
+
+type renderer struct {
+ config *Config
+ options map[OptionName]interface{}
+ nodeRendererFuncsTmp map[ast.NodeKind]NodeRendererFunc
+ maxKind int
+ nodeRendererFuncs []NodeRendererFunc
+ initSync sync.Once
+}
+
+// NewRenderer returns a new Renderer with given options.
+func NewRenderer(options ...Option) Renderer {
+ config := NewConfig()
+ for _, opt := range options {
+ opt.SetConfig(config)
+ }
+
+ r := &renderer{
+ options: map[OptionName]interface{}{},
+ config: config,
+ nodeRendererFuncsTmp: map[ast.NodeKind]NodeRendererFunc{},
+ }
+
+ return r
+}
+
+func (r *renderer) AddOptions(opts ...Option) {
+ for _, opt := range opts {
+ opt.SetConfig(r.config)
+ }
+}
+
+func (r *renderer) Register(kind ast.NodeKind, v NodeRendererFunc) {
+ r.nodeRendererFuncsTmp[kind] = v
+ if int(kind) > r.maxKind {
+ r.maxKind = int(kind)
+ }
+}
+
+// Render renders the given AST node to the given writer with the given Renderer.
+func (r *renderer) Render(w io.Writer, source []byte, n ast.Node) error {
+ r.initSync.Do(func() {
+ r.options = r.config.Options
+ r.config.NodeRenderers.Sort()
+ l := len(r.config.NodeRenderers)
+ for i := l - 1; i >= 0; i-- {
+ v := r.config.NodeRenderers[i]
+ nr, _ := v.Value.(NodeRenderer)
+ if se, ok := v.Value.(SetOptioner); ok {
+ for oname, ovalue := range r.options {
+ se.SetOption(oname, ovalue)
+ }
+ }
+ nr.RegisterFuncs(r)
+ }
+ r.nodeRendererFuncs = make([]NodeRendererFunc, r.maxKind+1)
+ for kind, nr := range r.nodeRendererFuncsTmp {
+ r.nodeRendererFuncs[kind] = nr
+ }
+ r.config = nil
+ r.nodeRendererFuncsTmp = nil
+ })
+ writer, ok := w.(util.BufWriter)
+ if !ok {
+ writer = bufio.NewWriter(w)
+ }
+ err := ast.Walk(n, func(n ast.Node, entering bool) (ast.WalkStatus, error) {
+ s := ast.WalkStatus(ast.WalkContinue)
+ var err error
+ f := r.nodeRendererFuncs[n.Kind()]
+ if f != nil {
+ s, err = f(writer, source, n, entering)
+ }
+ return s, err
+ })
+ if err != nil {
+ return err
+ }
+ return writer.Flush()
+}
--- /dev/null
+package text
+
+import (
+ "io"
+ "regexp"
+ "unicode/utf8"
+
+ "github.com/yuin/goldmark/util"
+)
+
+const invalidValue = -1
+
+// EOF indicates the end of file.
+const EOF = byte(0xff)
+
+// A Reader interface provides abstracted method for reading text.
+type Reader interface {
+ io.RuneReader
+
+ // Source returns a source of the reader.
+ Source() []byte
+
+ // ResetPosition resets positions.
+ ResetPosition()
+
+ // Peek returns a byte at current position without advancing the internal pointer.
+ Peek() byte
+
+ // PeekLine returns the current line without advancing the internal pointer.
+ PeekLine() ([]byte, Segment)
+
+ // PrecendingCharacter returns a character just before current internal pointer.
+ PrecendingCharacter() rune
+
+ // Value returns a value of the given segment.
+ Value(Segment) []byte
+
+ // LineOffset returns a distance from the line head to current position.
+ LineOffset() int
+
+ // Position returns current line number and position.
+ Position() (int, Segment)
+
+ // SetPosition sets current line number and position.
+ SetPosition(int, Segment)
+
+ // SetPadding sets padding to the reader.
+ SetPadding(int)
+
+ // Advance advances the internal pointer.
+ Advance(int)
+
+ // AdvanceAndSetPadding advances the internal pointer and add padding to the
+ // reader.
+ AdvanceAndSetPadding(int, int)
+
+ // AdvanceLine advances the internal pointer to the next line head.
+ AdvanceLine()
+
+ // SkipSpaces skips space characters and returns a non-blank line.
+ // If it reaches EOF, returns false.
+ SkipSpaces() (Segment, int, bool)
+
+ // SkipSpaces skips blank lines and returns a non-blank line.
+ // If it reaches EOF, returns false.
+ SkipBlankLines() (Segment, int, bool)
+
+ // Match performs regular expression matching to current line.
+ Match(reg *regexp.Regexp) bool
+
+ // Match performs regular expression searching to current line.
+ FindSubMatch(reg *regexp.Regexp) [][]byte
+}
+
+type reader struct {
+ source []byte
+ sourceLength int
+ line int
+ peekedLine []byte
+ pos Segment
+ head int
+ lineOffset int
+}
+
+// NewReader return a new Reader that can read UTF-8 bytes .
+func NewReader(source []byte) Reader {
+ r := &reader{
+ source: source,
+ sourceLength: len(source),
+ }
+ r.ResetPosition()
+ return r
+}
+
+func (r *reader) ResetPosition() {
+ r.line = -1
+ r.head = 0
+ r.lineOffset = -1
+ r.AdvanceLine()
+}
+
+func (r *reader) Source() []byte {
+ return r.source
+}
+
+func (r *reader) Value(seg Segment) []byte {
+ return seg.Value(r.source)
+}
+
+func (r *reader) Peek() byte {
+ if r.pos.Start >= 0 && r.pos.Start < r.sourceLength {
+ if r.pos.Padding != 0 {
+ return space[0]
+ }
+ return r.source[r.pos.Start]
+ }
+ return EOF
+}
+
+func (r *reader) PeekLine() ([]byte, Segment) {
+ if r.pos.Start >= 0 && r.pos.Start < r.sourceLength {
+ if r.peekedLine == nil {
+ r.peekedLine = r.pos.Value(r.Source())
+ }
+ return r.peekedLine, r.pos
+ }
+ return nil, r.pos
+}
+
+// io.RuneReader interface
+func (r *reader) ReadRune() (rune, int, error) {
+ return readRuneReader(r)
+}
+
+func (r *reader) LineOffset() int {
+ if r.lineOffset < 0 {
+ v := 0
+ for i := r.head; i < r.pos.Start; i++ {
+ if r.source[i] == '\t' {
+ v += util.TabWidth(v)
+ } else {
+ v++
+ }
+ }
+ r.lineOffset = v - r.pos.Padding
+ }
+ return r.lineOffset
+}
+
+func (r *reader) PrecendingCharacter() rune {
+ if r.pos.Start <= 0 {
+ if r.pos.Padding != 0 {
+ return rune(' ')
+ }
+ return rune('\n')
+ }
+ i := r.pos.Start - 1
+ for ; i >= 0; i-- {
+ if utf8.RuneStart(r.source[i]) {
+ break
+ }
+ }
+ rn, _ := utf8.DecodeRune(r.source[i:])
+ return rn
+}
+
+func (r *reader) Advance(n int) {
+ r.lineOffset = -1
+ if n < len(r.peekedLine) && r.pos.Padding == 0 {
+ r.pos.Start += n
+ r.peekedLine = nil
+ return
+ }
+ r.peekedLine = nil
+ l := r.sourceLength
+ for ; n > 0 && r.pos.Start < l; n-- {
+ if r.pos.Padding != 0 {
+ r.pos.Padding--
+ continue
+ }
+ if r.source[r.pos.Start] == '\n' {
+ r.AdvanceLine()
+ continue
+ }
+ r.pos.Start++
+ }
+}
+
+func (r *reader) AdvanceAndSetPadding(n, padding int) {
+ r.Advance(n)
+ if padding > r.pos.Padding {
+ r.SetPadding(padding)
+ }
+}
+
+func (r *reader) AdvanceLine() {
+ r.lineOffset = -1
+ r.peekedLine = nil
+ r.pos.Start = r.pos.Stop
+ r.head = r.pos.Start
+ if r.pos.Start < 0 {
+ return
+ }
+ r.pos.Stop = r.sourceLength
+ for i := r.pos.Start; i < r.sourceLength; i++ {
+ c := r.source[i]
+ if c == '\n' {
+ r.pos.Stop = i + 1
+ break
+ }
+ }
+ r.line++
+ r.pos.Padding = 0
+}
+
+func (r *reader) Position() (int, Segment) {
+ return r.line, r.pos
+}
+
+func (r *reader) SetPosition(line int, pos Segment) {
+ r.lineOffset = -1
+ r.line = line
+ r.pos = pos
+}
+
+func (r *reader) SetPadding(v int) {
+ r.pos.Padding = v
+}
+
+func (r *reader) SkipSpaces() (Segment, int, bool) {
+ return skipSpacesReader(r)
+}
+
+func (r *reader) SkipBlankLines() (Segment, int, bool) {
+ return skipBlankLinesReader(r)
+}
+
+func (r *reader) Match(reg *regexp.Regexp) bool {
+ return matchReader(r, reg)
+}
+
+func (r *reader) FindSubMatch(reg *regexp.Regexp) [][]byte {
+ return findSubMatchReader(r, reg)
+}
+
+// A BlockReader interface is a reader that is optimized for Blocks.
+type BlockReader interface {
+ Reader
+ // Reset resets current state and sets new segments to the reader.
+ Reset(segment *Segments)
+}
+
+type blockReader struct {
+ source []byte
+ segments *Segments
+ segmentsLength int
+ line int
+ pos Segment
+ head int
+ last int
+ lineOffset int
+}
+
+// NewBlockReader returns a new BlockReader.
+func NewBlockReader(source []byte, segments *Segments) BlockReader {
+ r := &blockReader{
+ source: source,
+ }
+ if segments != nil {
+ r.Reset(segments)
+ }
+ return r
+}
+
+func (r *blockReader) ResetPosition() {
+ r.line = -1
+ r.head = 0
+ r.last = 0
+ r.lineOffset = -1
+ r.pos.Start = -1
+ r.pos.Stop = -1
+ r.pos.Padding = 0
+ if r.segmentsLength > 0 {
+ last := r.segments.At(r.segmentsLength - 1)
+ r.last = last.Stop
+ }
+ r.AdvanceLine()
+}
+
+func (r *blockReader) Reset(segments *Segments) {
+ r.segments = segments
+ r.segmentsLength = segments.Len()
+ r.ResetPosition()
+}
+
+func (r *blockReader) Source() []byte {
+ return r.source
+}
+
+func (r *blockReader) Value(seg Segment) []byte {
+ line := r.segmentsLength - 1
+ ret := make([]byte, 0, seg.Stop-seg.Start+1)
+ for ; line >= 0; line-- {
+ if seg.Start >= r.segments.At(line).Start {
+ break
+ }
+ }
+ i := seg.Start
+ for ; line < r.segmentsLength; line++ {
+ s := r.segments.At(line)
+ if i < 0 {
+ i = s.Start
+ }
+ ret = s.ConcatPadding(ret)
+ for ; i < seg.Stop && i < s.Stop; i++ {
+ ret = append(ret, r.source[i])
+ }
+ i = -1
+ if s.Stop > seg.Stop {
+ break
+ }
+ }
+ return ret
+}
+
+// io.RuneReader interface
+func (r *blockReader) ReadRune() (rune, int, error) {
+ return readRuneReader(r)
+}
+
+func (r *blockReader) PrecendingCharacter() rune {
+ if r.pos.Padding != 0 {
+ return rune(' ')
+ }
+ if r.pos.Start <= 0 {
+ return rune('\n')
+ }
+ l := len(r.source)
+ i := r.pos.Start - 1
+ for ; i < l && i >= 0; i-- {
+ if utf8.RuneStart(r.source[i]) {
+ break
+ }
+ }
+ if i < 0 || i >= l {
+ return rune('\n')
+ }
+ rn, _ := utf8.DecodeRune(r.source[i:])
+ return rn
+}
+
+func (r *blockReader) LineOffset() int {
+ if r.lineOffset < 0 {
+ v := 0
+ for i := r.head; i < r.pos.Start; i++ {
+ if r.source[i] == '\t' {
+ v += util.TabWidth(v)
+ } else {
+ v++
+ }
+ }
+ r.lineOffset = v - r.pos.Padding
+ }
+ return r.lineOffset
+}
+
+func (r *blockReader) Peek() byte {
+ if r.line < r.segmentsLength && r.pos.Start >= 0 && r.pos.Start < r.last {
+ if r.pos.Padding != 0 {
+ return space[0]
+ }
+ return r.source[r.pos.Start]
+ }
+ return EOF
+}
+
+func (r *blockReader) PeekLine() ([]byte, Segment) {
+ if r.line < r.segmentsLength && r.pos.Start >= 0 && r.pos.Start < r.last {
+ return r.pos.Value(r.source), r.pos
+ }
+ return nil, r.pos
+}
+
+func (r *blockReader) Advance(n int) {
+ r.lineOffset = -1
+
+ if n < r.pos.Stop-r.pos.Start && r.pos.Padding == 0 {
+ r.pos.Start += n
+ return
+ }
+
+ for ; n > 0; n-- {
+ if r.pos.Padding != 0 {
+ r.pos.Padding--
+ continue
+ }
+ if r.pos.Start >= r.pos.Stop-1 && r.pos.Stop < r.last {
+ r.AdvanceLine()
+ continue
+ }
+ r.pos.Start++
+ }
+}
+
+func (r *blockReader) AdvanceAndSetPadding(n, padding int) {
+ r.Advance(n)
+ if padding > r.pos.Padding {
+ r.SetPadding(padding)
+ }
+}
+
+func (r *blockReader) AdvanceLine() {
+ r.SetPosition(r.line+1, NewSegment(invalidValue, invalidValue))
+ r.head = r.pos.Start
+}
+
+func (r *blockReader) Position() (int, Segment) {
+ return r.line, r.pos
+}
+
+func (r *blockReader) SetPosition(line int, pos Segment) {
+ r.lineOffset = -1
+ r.line = line
+ if pos.Start == invalidValue {
+ if r.line < r.segmentsLength {
+ s := r.segments.At(line)
+ r.head = s.Start
+ r.pos = s
+ }
+ } else {
+ r.pos = pos
+ if r.line < r.segmentsLength {
+ s := r.segments.At(line)
+ r.head = s.Start
+ }
+ }
+}
+
+func (r *blockReader) SetPadding(v int) {
+ r.lineOffset = -1
+ r.pos.Padding = v
+}
+
+func (r *blockReader) SkipSpaces() (Segment, int, bool) {
+ return skipSpacesReader(r)
+}
+
+func (r *blockReader) SkipBlankLines() (Segment, int, bool) {
+ return skipBlankLinesReader(r)
+}
+
+func (r *blockReader) Match(reg *regexp.Regexp) bool {
+ return matchReader(r, reg)
+}
+
+func (r *blockReader) FindSubMatch(reg *regexp.Regexp) [][]byte {
+ return findSubMatchReader(r, reg)
+}
+
+func skipBlankLinesReader(r Reader) (Segment, int, bool) {
+ lines := 0
+ for {
+ line, seg := r.PeekLine()
+ if line == nil {
+ return seg, lines, false
+ }
+ if util.IsBlank(line) {
+ lines++
+ r.AdvanceLine()
+ } else {
+ return seg, lines, true
+ }
+ }
+}
+
+func skipSpacesReader(r Reader) (Segment, int, bool) {
+ chars := 0
+ for {
+ line, segment := r.PeekLine()
+ if line == nil {
+ return segment, chars, false
+ }
+ for i, c := range line {
+ if util.IsSpace(c) {
+ chars++
+ r.Advance(1)
+ continue
+ }
+ return segment.WithStart(segment.Start + i + 1), chars, true
+ }
+ }
+}
+
+func matchReader(r Reader, reg *regexp.Regexp) bool {
+ oldline, oldseg := r.Position()
+ match := reg.FindReaderSubmatchIndex(r)
+ r.SetPosition(oldline, oldseg)
+ if match == nil {
+ return false
+ }
+ r.Advance(match[1] - match[0])
+ return true
+}
+
+func findSubMatchReader(r Reader, reg *regexp.Regexp) [][]byte {
+ oldline, oldseg := r.Position()
+ match := reg.FindReaderSubmatchIndex(r)
+ r.SetPosition(oldline, oldseg)
+ if match == nil {
+ return nil
+ }
+ runes := make([]rune, 0, match[1]-match[0])
+ for i := 0; i < match[1]; {
+ r, size, _ := readRuneReader(r)
+ i += size
+ runes = append(runes, r)
+ }
+ result := [][]byte{}
+ for i := 0; i < len(match); i += 2 {
+ result = append(result, []byte(string(runes[match[i]:match[i+1]])))
+ }
+
+ r.SetPosition(oldline, oldseg)
+ r.Advance(match[1] - match[0])
+ return result
+}
+
+func readRuneReader(r Reader) (rune, int, error) {
+ line, _ := r.PeekLine()
+ if line == nil {
+ return 0, 0, io.EOF
+ }
+ rn, size := utf8.DecodeRune(line)
+ if rn == utf8.RuneError {
+ return 0, 0, io.EOF
+ }
+ r.Advance(size)
+ return rn, size, nil
+}
--- /dev/null
+package text
+
+import (
+ "bytes"
+ "github.com/yuin/goldmark/util"
+)
+
+var space = []byte(" ")
+
+// A Segment struct holds information about source potisions.
+type Segment struct {
+ // Start is a start position of the segment.
+ Start int
+
+ // Stop is a stop position of the segment.
+ // This value should be excluded.
+ Stop int
+
+ // Padding is a padding length of the segment.
+ Padding int
+}
+
+// NewSegment return a new Segment.
+func NewSegment(start, stop int) Segment {
+ return Segment{
+ Start: start,
+ Stop: stop,
+ Padding: 0,
+ }
+}
+
+// NewSegmentPadding returns a new Segment with the given padding.
+func NewSegmentPadding(start, stop, n int) Segment {
+ return Segment{
+ Start: start,
+ Stop: stop,
+ Padding: n,
+ }
+}
+
+// Value returns a value of the segment.
+func (t *Segment) Value(buffer []byte) []byte {
+ if t.Padding == 0 {
+ return buffer[t.Start:t.Stop]
+ }
+ result := make([]byte, 0, t.Padding+t.Stop-t.Start+1)
+ result = append(result, bytes.Repeat(space, t.Padding)...)
+ return append(result, buffer[t.Start:t.Stop]...)
+}
+
+// Len returns a length of the segment.
+func (t *Segment) Len() int {
+ return t.Stop - t.Start + t.Padding
+}
+
+// Between returns a segment between this segment and the given segment.
+func (t *Segment) Between(other Segment) Segment {
+ if t.Stop != other.Stop {
+ panic("invalid state")
+ }
+ return NewSegmentPadding(
+ t.Start,
+ other.Start,
+ t.Padding-other.Padding,
+ )
+}
+
+// IsEmpty returns true if this segment is empty, otherwise false.
+func (t *Segment) IsEmpty() bool {
+ return t.Start >= t.Stop && t.Padding == 0
+}
+
+// TrimRightSpace returns a new segment by slicing off all trailing
+// space characters.
+func (t *Segment) TrimRightSpace(buffer []byte) Segment {
+ v := buffer[t.Start:t.Stop]
+ l := util.TrimRightSpaceLength(v)
+ if l == len(v) {
+ return NewSegment(t.Start, t.Start)
+ }
+ return NewSegmentPadding(t.Start, t.Stop-l, t.Padding)
+}
+
+// TrimLeftSpace returns a new segment by slicing off all leading
+// space characters including padding.
+func (t *Segment) TrimLeftSpace(buffer []byte) Segment {
+ v := buffer[t.Start:t.Stop]
+ l := util.TrimLeftSpaceLength(v)
+ return NewSegment(t.Start+l, t.Stop)
+}
+
+// TrimLeftSpaceWidth returns a new segment by slicing off leading space
+// characters until the given width.
+func (t *Segment) TrimLeftSpaceWidth(width int, buffer []byte) Segment {
+ padding := t.Padding
+ for ; width > 0; width-- {
+ if padding == 0 {
+ break
+ }
+ padding--
+ }
+ if width == 0 {
+ return NewSegmentPadding(t.Start, t.Stop, padding)
+ }
+ text := buffer[t.Start:t.Stop]
+ start := t.Start
+ for _, c := range text {
+ if start >= t.Stop-1 || width <= 0 {
+ break
+ }
+ if c == ' ' {
+ width--
+ } else if c == '\t' {
+ width -= 4
+ } else {
+ break
+ }
+ start++
+ }
+ if width < 0 {
+ padding = width * -1
+ }
+ return NewSegmentPadding(start, t.Stop, padding)
+}
+
+// WithStart returns a new Segment with same value except Start.
+func (t *Segment) WithStart(v int) Segment {
+ return NewSegmentPadding(v, t.Stop, t.Padding)
+}
+
+// WithStop returns a new Segment with same value except Stop.
+func (t *Segment) WithStop(v int) Segment {
+ return NewSegmentPadding(t.Start, v, t.Padding)
+}
+
+// ConcatPadding concats the padding to the given slice.
+func (t *Segment) ConcatPadding(v []byte) []byte {
+ if t.Padding > 0 {
+ return append(v, bytes.Repeat(space, t.Padding)...)
+ }
+ return v
+}
+
+// Segments is a collection of the Segment.
+type Segments struct {
+ values []Segment
+}
+
+// NewSegments return a new Segments.
+func NewSegments() *Segments {
+ return &Segments{
+ values: nil,
+ }
+}
+
+// Append appends the given segment after the tail of the collection.
+func (s *Segments) Append(t Segment) {
+ if s.values == nil {
+ s.values = make([]Segment, 0, 20)
+ }
+ s.values = append(s.values, t)
+}
+
+// AppendAll appends all elements of given segments after the tail of the collection.
+func (s *Segments) AppendAll(t []Segment) {
+ if s.values == nil {
+ s.values = make([]Segment, 0, 20)
+ }
+ s.values = append(s.values, t...)
+}
+
+// Len returns the length of the collection.
+func (s *Segments) Len() int {
+ if s.values == nil {
+ return 0
+ }
+ return len(s.values)
+}
+
+// At returns a segment at the given index.
+func (s *Segments) At(i int) Segment {
+ return s.values[i]
+}
+
+// Set sets the given Segment.
+func (s *Segments) Set(i int, v Segment) {
+ s.values[i] = v
+}
+
+// SetSliced replace the collection with a subsliced value.
+func (s *Segments) SetSliced(lo, hi int) {
+ s.values = s.values[lo:hi]
+}
+
+// Sliced returns a subslice of the collection.
+func (s *Segments) Sliced(lo, hi int) []Segment {
+ return s.values[lo:hi]
+}
+
+// Clear delete all element of the collction.
+func (s *Segments) Clear() {
+ s.values = nil
+}
+
+// Unshift insert the given Segment to head of the collection.
+func (s *Segments) Unshift(v Segment) {
+ s.values = append(s.values[0:1], s.values[0:]...)
+ s.values[0] = v
+}
--- /dev/null
+package util
+
+// An HTML5Entity struct represents HTML5 entitites.
+type HTML5Entity struct {
+ Name string
+ CodePoints []int
+ Characters []byte
+}
+
+// LookUpHTML5EntityByName returns (an HTML5Entity, true) if an entity named
+// given name is found, otherwise (nil, false)
+func LookUpHTML5EntityByName(name string) (*HTML5Entity, bool) {
+ v, ok := html5entities[name]
+ return v, ok
+}
+
+var html5entities = map[string]*HTML5Entity{
+ "AElig": {Name: "AElig", CodePoints: []int{198}, Characters: []byte{0xc3, 0x86}},
+ "AMP": {Name: "AMP", CodePoints: []int{38}, Characters: []byte{0x26}},
+ "Aacute": {Name: "Aacute", CodePoints: []int{193}, Characters: []byte{0xc3, 0x81}},
+ "Acirc": {Name: "Acirc", CodePoints: []int{194}, Characters: []byte{0xc3, 0x82}},
+ "Acy": {Name: "Acy", CodePoints: []int{1040}, Characters: []byte{0xd0, 0x90}},
+ "Afr": {Name: "Afr", CodePoints: []int{120068}, Characters: []byte{0xf0, 0x9d, 0x94, 0x84}},
+ "Agrave": {Name: "Agrave", CodePoints: []int{192}, Characters: []byte{0xc3, 0x80}},
+ "Alpha": {Name: "Alpha", CodePoints: []int{913}, Characters: []byte{0xce, 0x91}},
+ "Amacr": {Name: "Amacr", CodePoints: []int{256}, Characters: []byte{0xc4, 0x80}},
+ "And": {Name: "And", CodePoints: []int{10835}, Characters: []byte{0xe2, 0xa9, 0x93}},
+ "Aogon": {Name: "Aogon", CodePoints: []int{260}, Characters: []byte{0xc4, 0x84}},
+ "Aopf": {Name: "Aopf", CodePoints: []int{120120}, Characters: []byte{0xf0, 0x9d, 0x94, 0xb8}},
+ "ApplyFunction": {Name: "ApplyFunction", CodePoints: []int{8289}, Characters: []byte{0xe2, 0x81, 0xa1}},
+ "Aring": {Name: "Aring", CodePoints: []int{197}, Characters: []byte{0xc3, 0x85}},
+ "Ascr": {Name: "Ascr", CodePoints: []int{119964}, Characters: []byte{0xf0, 0x9d, 0x92, 0x9c}},
+ "Assign": {Name: "Assign", CodePoints: []int{8788}, Characters: []byte{0xe2, 0x89, 0x94}},
+ "Atilde": {Name: "Atilde", CodePoints: []int{195}, Characters: []byte{0xc3, 0x83}},
+ "Auml": {Name: "Auml", CodePoints: []int{196}, Characters: []byte{0xc3, 0x84}},
+ "Backslash": {Name: "Backslash", CodePoints: []int{8726}, Characters: []byte{0xe2, 0x88, 0x96}},
+ "Barv": {Name: "Barv", CodePoints: []int{10983}, Characters: []byte{0xe2, 0xab, 0xa7}},
+ "Barwed": {Name: "Barwed", CodePoints: []int{8966}, Characters: []byte{0xe2, 0x8c, 0x86}},
+ "Bcy": {Name: "Bcy", CodePoints: []int{1041}, Characters: []byte{0xd0, 0x91}},
+ "Because": {Name: "Because", CodePoints: []int{8757}, Characters: []byte{0xe2, 0x88, 0xb5}},
+ "Bernoullis": {Name: "Bernoullis", CodePoints: []int{8492}, Characters: []byte{0xe2, 0x84, 0xac}},
+ "Beta": {Name: "Beta", CodePoints: []int{914}, Characters: []byte{0xce, 0x92}},
+ "Bfr": {Name: "Bfr", CodePoints: []int{120069}, Characters: []byte{0xf0, 0x9d, 0x94, 0x85}},
+ "Bopf": {Name: "Bopf", CodePoints: []int{120121}, Characters: []byte{0xf0, 0x9d, 0x94, 0xb9}},
+ "Breve": {Name: "Breve", CodePoints: []int{728}, Characters: []byte{0xcb, 0x98}},
+ "Bscr": {Name: "Bscr", CodePoints: []int{8492}, Characters: []byte{0xe2, 0x84, 0xac}},
+ "Bumpeq": {Name: "Bumpeq", CodePoints: []int{8782}, Characters: []byte{0xe2, 0x89, 0x8e}},
+ "CHcy": {Name: "CHcy", CodePoints: []int{1063}, Characters: []byte{0xd0, 0xa7}},
+ "COPY": {Name: "COPY", CodePoints: []int{169}, Characters: []byte{0xc2, 0xa9}},
+ "Cacute": {Name: "Cacute", CodePoints: []int{262}, Characters: []byte{0xc4, 0x86}},
+ "Cap": {Name: "Cap", CodePoints: []int{8914}, Characters: []byte{0xe2, 0x8b, 0x92}},
+ "CapitalDifferentialD": {Name: "CapitalDifferentialD", CodePoints: []int{8517}, Characters: []byte{0xe2, 0x85, 0x85}},
+ "Cayleys": {Name: "Cayleys", CodePoints: []int{8493}, Characters: []byte{0xe2, 0x84, 0xad}},
+ "Ccaron": {Name: "Ccaron", CodePoints: []int{268}, Characters: []byte{0xc4, 0x8c}},
+ "Ccedil": {Name: "Ccedil", CodePoints: []int{199}, Characters: []byte{0xc3, 0x87}},
+ "Ccirc": {Name: "Ccirc", CodePoints: []int{264}, Characters: []byte{0xc4, 0x88}},
+ "Cconint": {Name: "Cconint", CodePoints: []int{8752}, Characters: []byte{0xe2, 0x88, 0xb0}},
+ "Cdot": {Name: "Cdot", CodePoints: []int{266}, Characters: []byte{0xc4, 0x8a}},
+ "Cedilla": {Name: "Cedilla", CodePoints: []int{184}, Characters: []byte{0xc2, 0xb8}},
+ "CenterDot": {Name: "CenterDot", CodePoints: []int{183}, Characters: []byte{0xc2, 0xb7}},
+ "Cfr": {Name: "Cfr", CodePoints: []int{8493}, Characters: []byte{0xe2, 0x84, 0xad}},
+ "Chi": {Name: "Chi", CodePoints: []int{935}, Characters: []byte{0xce, 0xa7}},
+ "CircleDot": {Name: "CircleDot", CodePoints: []int{8857}, Characters: []byte{0xe2, 0x8a, 0x99}},
+ "CircleMinus": {Name: "CircleMinus", CodePoints: []int{8854}, Characters: []byte{0xe2, 0x8a, 0x96}},
+ "CirclePlus": {Name: "CirclePlus", CodePoints: []int{8853}, Characters: []byte{0xe2, 0x8a, 0x95}},
+ "CircleTimes": {Name: "CircleTimes", CodePoints: []int{8855}, Characters: []byte{0xe2, 0x8a, 0x97}},
+ "ClockwiseContourIntegral": {Name: "ClockwiseContourIntegral", CodePoints: []int{8754}, Characters: []byte{0xe2, 0x88, 0xb2}},
+ "CloseCurlyDoubleQuote": {Name: "CloseCurlyDoubleQuote", CodePoints: []int{8221}, Characters: []byte{0xe2, 0x80, 0x9d}},
+ "CloseCurlyQuote": {Name: "CloseCurlyQuote", CodePoints: []int{8217}, Characters: []byte{0xe2, 0x80, 0x99}},
+ "Colon": {Name: "Colon", CodePoints: []int{8759}, Characters: []byte{0xe2, 0x88, 0xb7}},
+ "Colone": {Name: "Colone", CodePoints: []int{10868}, Characters: []byte{0xe2, 0xa9, 0xb4}},
+ "Congruent": {Name: "Congruent", CodePoints: []int{8801}, Characters: []byte{0xe2, 0x89, 0xa1}},
+ "Conint": {Name: "Conint", CodePoints: []int{8751}, Characters: []byte{0xe2, 0x88, 0xaf}},
+ "ContourIntegral": {Name: "ContourIntegral", CodePoints: []int{8750}, Characters: []byte{0xe2, 0x88, 0xae}},
+ "Copf": {Name: "Copf", CodePoints: []int{8450}, Characters: []byte{0xe2, 0x84, 0x82}},
+ "Coproduct": {Name: "Coproduct", CodePoints: []int{8720}, Characters: []byte{0xe2, 0x88, 0x90}},
+ "CounterClockwiseContourIntegral": {Name: "CounterClockwiseContourIntegral", CodePoints: []int{8755}, Characters: []byte{0xe2, 0x88, 0xb3}},
+ "Cross": {Name: "Cross", CodePoints: []int{10799}, Characters: []byte{0xe2, 0xa8, 0xaf}},
+ "Cscr": {Name: "Cscr", CodePoints: []int{119966}, Characters: []byte{0xf0, 0x9d, 0x92, 0x9e}},
+ "Cup": {Name: "Cup", CodePoints: []int{8915}, Characters: []byte{0xe2, 0x8b, 0x93}},
+ "CupCap": {Name: "CupCap", CodePoints: []int{8781}, Characters: []byte{0xe2, 0x89, 0x8d}},
+ "DD": {Name: "DD", CodePoints: []int{8517}, Characters: []byte{0xe2, 0x85, 0x85}},
+ "DDotrahd": {Name: "DDotrahd", CodePoints: []int{10513}, Characters: []byte{0xe2, 0xa4, 0x91}},
+ "DJcy": {Name: "DJcy", CodePoints: []int{1026}, Characters: []byte{0xd0, 0x82}},
+ "DScy": {Name: "DScy", CodePoints: []int{1029}, Characters: []byte{0xd0, 0x85}},
+ "DZcy": {Name: "DZcy", CodePoints: []int{1039}, Characters: []byte{0xd0, 0x8f}},
+ "Dagger": {Name: "Dagger", CodePoints: []int{8225}, Characters: []byte{0xe2, 0x80, 0xa1}},
+ "Darr": {Name: "Darr", CodePoints: []int{8609}, Characters: []byte{0xe2, 0x86, 0xa1}},
+ "Dashv": {Name: "Dashv", CodePoints: []int{10980}, Characters: []byte{0xe2, 0xab, 0xa4}},
+ "Dcaron": {Name: "Dcaron", CodePoints: []int{270}, Characters: []byte{0xc4, 0x8e}},
+ "Dcy": {Name: "Dcy", CodePoints: []int{1044}, Characters: []byte{0xd0, 0x94}},
+ "Del": {Name: "Del", CodePoints: []int{8711}, Characters: []byte{0xe2, 0x88, 0x87}},
+ "Delta": {Name: "Delta", CodePoints: []int{916}, Characters: []byte{0xce, 0x94}},
+ "Dfr": {Name: "Dfr", CodePoints: []int{120071}, Characters: []byte{0xf0, 0x9d, 0x94, 0x87}},
+ "DiacriticalAcute": {Name: "DiacriticalAcute", CodePoints: []int{180}, Characters: []byte{0xc2, 0xb4}},
+ "DiacriticalDot": {Name: "DiacriticalDot", CodePoints: []int{729}, Characters: []byte{0xcb, 0x99}},
+ "DiacriticalDoubleAcute": {Name: "DiacriticalDoubleAcute", CodePoints: []int{733}, Characters: []byte{0xcb, 0x9d}},
+ "DiacriticalGrave": {Name: "DiacriticalGrave", CodePoints: []int{96}, Characters: []byte{0x60}},
+ "DiacriticalTilde": {Name: "DiacriticalTilde", CodePoints: []int{732}, Characters: []byte{0xcb, 0x9c}},
+ "Diamond": {Name: "Diamond", CodePoints: []int{8900}, Characters: []byte{0xe2, 0x8b, 0x84}},
+ "DifferentialD": {Name: "DifferentialD", CodePoints: []int{8518}, Characters: []byte{0xe2, 0x85, 0x86}},
+ "Dopf": {Name: "Dopf", CodePoints: []int{120123}, Characters: []byte{0xf0, 0x9d, 0x94, 0xbb}},
+ "Dot": {Name: "Dot", CodePoints: []int{168}, Characters: []byte{0xc2, 0xa8}},
+ "DotDot": {Name: "DotDot", CodePoints: []int{8412}, Characters: []byte{0xe2, 0x83, 0x9c}},
+ "DotEqual": {Name: "DotEqual", CodePoints: []int{8784}, Characters: []byte{0xe2, 0x89, 0x90}},
+ "DoubleContourIntegral": {Name: "DoubleContourIntegral", CodePoints: []int{8751}, Characters: []byte{0xe2, 0x88, 0xaf}},
+ "DoubleDot": {Name: "DoubleDot", CodePoints: []int{168}, Characters: []byte{0xc2, 0xa8}},
+ "DoubleDownArrow": {Name: "DoubleDownArrow", CodePoints: []int{8659}, Characters: []byte{0xe2, 0x87, 0x93}},
+ "DoubleLeftArrow": {Name: "DoubleLeftArrow", CodePoints: []int{8656}, Characters: []byte{0xe2, 0x87, 0x90}},
+ "DoubleLeftRightArrow": {Name: "DoubleLeftRightArrow", CodePoints: []int{8660}, Characters: []byte{0xe2, 0x87, 0x94}},
+ "DoubleLeftTee": {Name: "DoubleLeftTee", CodePoints: []int{10980}, Characters: []byte{0xe2, 0xab, 0xa4}},
+ "DoubleLongLeftArrow": {Name: "DoubleLongLeftArrow", CodePoints: []int{10232}, Characters: []byte{0xe2, 0x9f, 0xb8}},
+ "DoubleLongLeftRightArrow": {Name: "DoubleLongLeftRightArrow", CodePoints: []int{10234}, Characters: []byte{0xe2, 0x9f, 0xba}},
+ "DoubleLongRightArrow": {Name: "DoubleLongRightArrow", CodePoints: []int{10233}, Characters: []byte{0xe2, 0x9f, 0xb9}},
+ "DoubleRightArrow": {Name: "DoubleRightArrow", CodePoints: []int{8658}, Characters: []byte{0xe2, 0x87, 0x92}},
+ "DoubleRightTee": {Name: "DoubleRightTee", CodePoints: []int{8872}, Characters: []byte{0xe2, 0x8a, 0xa8}},
+ "DoubleUpArrow": {Name: "DoubleUpArrow", CodePoints: []int{8657}, Characters: []byte{0xe2, 0x87, 0x91}},
+ "DoubleUpDownArrow": {Name: "DoubleUpDownArrow", CodePoints: []int{8661}, Characters: []byte{0xe2, 0x87, 0x95}},
+ "DoubleVerticalBar": {Name: "DoubleVerticalBar", CodePoints: []int{8741}, Characters: []byte{0xe2, 0x88, 0xa5}},
+ "DownArrow": {Name: "DownArrow", CodePoints: []int{8595}, Characters: []byte{0xe2, 0x86, 0x93}},
+ "DownArrowBar": {Name: "DownArrowBar", CodePoints: []int{10515}, Characters: []byte{0xe2, 0xa4, 0x93}},
+ "DownArrowUpArrow": {Name: "DownArrowUpArrow", CodePoints: []int{8693}, Characters: []byte{0xe2, 0x87, 0xb5}},
+ "DownBreve": {Name: "DownBreve", CodePoints: []int{785}, Characters: []byte{0xcc, 0x91}},
+ "DownLeftRightVector": {Name: "DownLeftRightVector", CodePoints: []int{10576}, Characters: []byte{0xe2, 0xa5, 0x90}},
+ "DownLeftTeeVector": {Name: "DownLeftTeeVector", CodePoints: []int{10590}, Characters: []byte{0xe2, 0xa5, 0x9e}},
+ "DownLeftVector": {Name: "DownLeftVector", CodePoints: []int{8637}, Characters: []byte{0xe2, 0x86, 0xbd}},
+ "DownLeftVectorBar": {Name: "DownLeftVectorBar", CodePoints: []int{10582}, Characters: []byte{0xe2, 0xa5, 0x96}},
+ "DownRightTeeVector": {Name: "DownRightTeeVector", CodePoints: []int{10591}, Characters: []byte{0xe2, 0xa5, 0x9f}},
+ "DownRightVector": {Name: "DownRightVector", CodePoints: []int{8641}, Characters: []byte{0xe2, 0x87, 0x81}},
+ "DownRightVectorBar": {Name: "DownRightVectorBar", CodePoints: []int{10583}, Characters: []byte{0xe2, 0xa5, 0x97}},
+ "DownTee": {Name: "DownTee", CodePoints: []int{8868}, Characters: []byte{0xe2, 0x8a, 0xa4}},
+ "DownTeeArrow": {Name: "DownTeeArrow", CodePoints: []int{8615}, Characters: []byte{0xe2, 0x86, 0xa7}},
+ "Downarrow": {Name: "Downarrow", CodePoints: []int{8659}, Characters: []byte{0xe2, 0x87, 0x93}},
+ "Dscr": {Name: "Dscr", CodePoints: []int{119967}, Characters: []byte{0xf0, 0x9d, 0x92, 0x9f}},
+ "Dstrok": {Name: "Dstrok", CodePoints: []int{272}, Characters: []byte{0xc4, 0x90}},
+ "ENG": {Name: "ENG", CodePoints: []int{330}, Characters: []byte{0xc5, 0x8a}},
+ "ETH": {Name: "ETH", CodePoints: []int{208}, Characters: []byte{0xc3, 0x90}},
+ "Eacute": {Name: "Eacute", CodePoints: []int{201}, Characters: []byte{0xc3, 0x89}},
+ "Ecaron": {Name: "Ecaron", CodePoints: []int{282}, Characters: []byte{0xc4, 0x9a}},
+ "Ecirc": {Name: "Ecirc", CodePoints: []int{202}, Characters: []byte{0xc3, 0x8a}},
+ "Ecy": {Name: "Ecy", CodePoints: []int{1069}, Characters: []byte{0xd0, 0xad}},
+ "Edot": {Name: "Edot", CodePoints: []int{278}, Characters: []byte{0xc4, 0x96}},
+ "Efr": {Name: "Efr", CodePoints: []int{120072}, Characters: []byte{0xf0, 0x9d, 0x94, 0x88}},
+ "Egrave": {Name: "Egrave", CodePoints: []int{200}, Characters: []byte{0xc3, 0x88}},
+ "Element": {Name: "Element", CodePoints: []int{8712}, Characters: []byte{0xe2, 0x88, 0x88}},
+ "Emacr": {Name: "Emacr", CodePoints: []int{274}, Characters: []byte{0xc4, 0x92}},
+ "EmptySmallSquare": {Name: "EmptySmallSquare", CodePoints: []int{9723}, Characters: []byte{0xe2, 0x97, 0xbb}},
+ "EmptyVerySmallSquare": {Name: "EmptyVerySmallSquare", CodePoints: []int{9643}, Characters: []byte{0xe2, 0x96, 0xab}},
+ "Eogon": {Name: "Eogon", CodePoints: []int{280}, Characters: []byte{0xc4, 0x98}},
+ "Eopf": {Name: "Eopf", CodePoints: []int{120124}, Characters: []byte{0xf0, 0x9d, 0x94, 0xbc}},
+ "Epsilon": {Name: "Epsilon", CodePoints: []int{917}, Characters: []byte{0xce, 0x95}},
+ "Equal": {Name: "Equal", CodePoints: []int{10869}, Characters: []byte{0xe2, 0xa9, 0xb5}},
+ "EqualTilde": {Name: "EqualTilde", CodePoints: []int{8770}, Characters: []byte{0xe2, 0x89, 0x82}},
+ "Equilibrium": {Name: "Equilibrium", CodePoints: []int{8652}, Characters: []byte{0xe2, 0x87, 0x8c}},
+ "Escr": {Name: "Escr", CodePoints: []int{8496}, Characters: []byte{0xe2, 0x84, 0xb0}},
+ "Esim": {Name: "Esim", CodePoints: []int{10867}, Characters: []byte{0xe2, 0xa9, 0xb3}},
+ "Eta": {Name: "Eta", CodePoints: []int{919}, Characters: []byte{0xce, 0x97}},
+ "Euml": {Name: "Euml", CodePoints: []int{203}, Characters: []byte{0xc3, 0x8b}},
+ "Exists": {Name: "Exists", CodePoints: []int{8707}, Characters: []byte{0xe2, 0x88, 0x83}},
+ "ExponentialE": {Name: "ExponentialE", CodePoints: []int{8519}, Characters: []byte{0xe2, 0x85, 0x87}},
+ "Fcy": {Name: "Fcy", CodePoints: []int{1060}, Characters: []byte{0xd0, 0xa4}},
+ "Ffr": {Name: "Ffr", CodePoints: []int{120073}, Characters: []byte{0xf0, 0x9d, 0x94, 0x89}},
+ "FilledSmallSquare": {Name: "FilledSmallSquare", CodePoints: []int{9724}, Characters: []byte{0xe2, 0x97, 0xbc}},
+ "FilledVerySmallSquare": {Name: "FilledVerySmallSquare", CodePoints: []int{9642}, Characters: []byte{0xe2, 0x96, 0xaa}},
+ "Fopf": {Name: "Fopf", CodePoints: []int{120125}, Characters: []byte{0xf0, 0x9d, 0x94, 0xbd}},
+ "ForAll": {Name: "ForAll", CodePoints: []int{8704}, Characters: []byte{0xe2, 0x88, 0x80}},
+ "Fouriertrf": {Name: "Fouriertrf", CodePoints: []int{8497}, Characters: []byte{0xe2, 0x84, 0xb1}},
+ "Fscr": {Name: "Fscr", CodePoints: []int{8497}, Characters: []byte{0xe2, 0x84, 0xb1}},
+ "GJcy": {Name: "GJcy", CodePoints: []int{1027}, Characters: []byte{0xd0, 0x83}},
+ "GT": {Name: "GT", CodePoints: []int{62}, Characters: []byte{0x3e}},
+ "Gamma": {Name: "Gamma", CodePoints: []int{915}, Characters: []byte{0xce, 0x93}},
+ "Gammad": {Name: "Gammad", CodePoints: []int{988}, Characters: []byte{0xcf, 0x9c}},
+ "Gbreve": {Name: "Gbreve", CodePoints: []int{286}, Characters: []byte{0xc4, 0x9e}},
+ "Gcedil": {Name: "Gcedil", CodePoints: []int{290}, Characters: []byte{0xc4, 0xa2}},
+ "Gcirc": {Name: "Gcirc", CodePoints: []int{284}, Characters: []byte{0xc4, 0x9c}},
+ "Gcy": {Name: "Gcy", CodePoints: []int{1043}, Characters: []byte{0xd0, 0x93}},
+ "Gdot": {Name: "Gdot", CodePoints: []int{288}, Characters: []byte{0xc4, 0xa0}},
+ "Gfr": {Name: "Gfr", CodePoints: []int{120074}, Characters: []byte{0xf0, 0x9d, 0x94, 0x8a}},
+ "Gg": {Name: "Gg", CodePoints: []int{8921}, Characters: []byte{0xe2, 0x8b, 0x99}},
+ "Gopf": {Name: "Gopf", CodePoints: []int{120126}, Characters: []byte{0xf0, 0x9d, 0x94, 0xbe}},
+ "GreaterEqual": {Name: "GreaterEqual", CodePoints: []int{8805}, Characters: []byte{0xe2, 0x89, 0xa5}},
+ "GreaterEqualLess": {Name: "GreaterEqualLess", CodePoints: []int{8923}, Characters: []byte{0xe2, 0x8b, 0x9b}},
+ "GreaterFullEqual": {Name: "GreaterFullEqual", CodePoints: []int{8807}, Characters: []byte{0xe2, 0x89, 0xa7}},
+ "GreaterGreater": {Name: "GreaterGreater", CodePoints: []int{10914}, Characters: []byte{0xe2, 0xaa, 0xa2}},
+ "GreaterLess": {Name: "GreaterLess", CodePoints: []int{8823}, Characters: []byte{0xe2, 0x89, 0xb7}},
+ "GreaterSlantEqual": {Name: "GreaterSlantEqual", CodePoints: []int{10878}, Characters: []byte{0xe2, 0xa9, 0xbe}},
+ "GreaterTilde": {Name: "GreaterTilde", CodePoints: []int{8819}, Characters: []byte{0xe2, 0x89, 0xb3}},
+ "Gscr": {Name: "Gscr", CodePoints: []int{119970}, Characters: []byte{0xf0, 0x9d, 0x92, 0xa2}},
+ "Gt": {Name: "Gt", CodePoints: []int{8811}, Characters: []byte{0xe2, 0x89, 0xab}},
+ "HARDcy": {Name: "HARDcy", CodePoints: []int{1066}, Characters: []byte{0xd0, 0xaa}},
+ "Hacek": {Name: "Hacek", CodePoints: []int{711}, Characters: []byte{0xcb, 0x87}},
+ "Hat": {Name: "Hat", CodePoints: []int{94}, Characters: []byte{0x5e}},
+ "Hcirc": {Name: "Hcirc", CodePoints: []int{292}, Characters: []byte{0xc4, 0xa4}},
+ "Hfr": {Name: "Hfr", CodePoints: []int{8460}, Characters: []byte{0xe2, 0x84, 0x8c}},
+ "HilbertSpace": {Name: "HilbertSpace", CodePoints: []int{8459}, Characters: []byte{0xe2, 0x84, 0x8b}},
+ "Hopf": {Name: "Hopf", CodePoints: []int{8461}, Characters: []byte{0xe2, 0x84, 0x8d}},
+ "HorizontalLine": {Name: "HorizontalLine", CodePoints: []int{9472}, Characters: []byte{0xe2, 0x94, 0x80}},
+ "Hscr": {Name: "Hscr", CodePoints: []int{8459}, Characters: []byte{0xe2, 0x84, 0x8b}},
+ "Hstrok": {Name: "Hstrok", CodePoints: []int{294}, Characters: []byte{0xc4, 0xa6}},
+ "HumpDownHump": {Name: "HumpDownHump", CodePoints: []int{8782}, Characters: []byte{0xe2, 0x89, 0x8e}},
+ "HumpEqual": {Name: "HumpEqual", CodePoints: []int{8783}, Characters: []byte{0xe2, 0x89, 0x8f}},
+ "IEcy": {Name: "IEcy", CodePoints: []int{1045}, Characters: []byte{0xd0, 0x95}},
+ "IJlig": {Name: "IJlig", CodePoints: []int{306}, Characters: []byte{0xc4, 0xb2}},
+ "IOcy": {Name: "IOcy", CodePoints: []int{1025}, Characters: []byte{0xd0, 0x81}},
+ "Iacute": {Name: "Iacute", CodePoints: []int{205}, Characters: []byte{0xc3, 0x8d}},
+ "Icirc": {Name: "Icirc", CodePoints: []int{206}, Characters: []byte{0xc3, 0x8e}},
+ "Icy": {Name: "Icy", CodePoints: []int{1048}, Characters: []byte{0xd0, 0x98}},
+ "Idot": {Name: "Idot", CodePoints: []int{304}, Characters: []byte{0xc4, 0xb0}},
+ "Ifr": {Name: "Ifr", CodePoints: []int{8465}, Characters: []byte{0xe2, 0x84, 0x91}},
+ "Igrave": {Name: "Igrave", CodePoints: []int{204}, Characters: []byte{0xc3, 0x8c}},
+ "Im": {Name: "Im", CodePoints: []int{8465}, Characters: []byte{0xe2, 0x84, 0x91}},
+ "Imacr": {Name: "Imacr", CodePoints: []int{298}, Characters: []byte{0xc4, 0xaa}},
+ "ImaginaryI": {Name: "ImaginaryI", CodePoints: []int{8520}, Characters: []byte{0xe2, 0x85, 0x88}},
+ "Implies": {Name: "Implies", CodePoints: []int{8658}, Characters: []byte{0xe2, 0x87, 0x92}},
+ "Int": {Name: "Int", CodePoints: []int{8748}, Characters: []byte{0xe2, 0x88, 0xac}},
+ "Integral": {Name: "Integral", CodePoints: []int{8747}, Characters: []byte{0xe2, 0x88, 0xab}},
+ "Intersection": {Name: "Intersection", CodePoints: []int{8898}, Characters: []byte{0xe2, 0x8b, 0x82}},
+ "InvisibleComma": {Name: "InvisibleComma", CodePoints: []int{8291}, Characters: []byte{0xe2, 0x81, 0xa3}},
+ "InvisibleTimes": {Name: "InvisibleTimes", CodePoints: []int{8290}, Characters: []byte{0xe2, 0x81, 0xa2}},
+ "Iogon": {Name: "Iogon", CodePoints: []int{302}, Characters: []byte{0xc4, 0xae}},
+ "Iopf": {Name: "Iopf", CodePoints: []int{120128}, Characters: []byte{0xf0, 0x9d, 0x95, 0x80}},
+ "Iota": {Name: "Iota", CodePoints: []int{921}, Characters: []byte{0xce, 0x99}},
+ "Iscr": {Name: "Iscr", CodePoints: []int{8464}, Characters: []byte{0xe2, 0x84, 0x90}},
+ "Itilde": {Name: "Itilde", CodePoints: []int{296}, Characters: []byte{0xc4, 0xa8}},
+ "Iukcy": {Name: "Iukcy", CodePoints: []int{1030}, Characters: []byte{0xd0, 0x86}},
+ "Iuml": {Name: "Iuml", CodePoints: []int{207}, Characters: []byte{0xc3, 0x8f}},
+ "Jcirc": {Name: "Jcirc", CodePoints: []int{308}, Characters: []byte{0xc4, 0xb4}},
+ "Jcy": {Name: "Jcy", CodePoints: []int{1049}, Characters: []byte{0xd0, 0x99}},
+ "Jfr": {Name: "Jfr", CodePoints: []int{120077}, Characters: []byte{0xf0, 0x9d, 0x94, 0x8d}},
+ "Jopf": {Name: "Jopf", CodePoints: []int{120129}, Characters: []byte{0xf0, 0x9d, 0x95, 0x81}},
+ "Jscr": {Name: "Jscr", CodePoints: []int{119973}, Characters: []byte{0xf0, 0x9d, 0x92, 0xa5}},
+ "Jsercy": {Name: "Jsercy", CodePoints: []int{1032}, Characters: []byte{0xd0, 0x88}},
+ "Jukcy": {Name: "Jukcy", CodePoints: []int{1028}, Characters: []byte{0xd0, 0x84}},
+ "KHcy": {Name: "KHcy", CodePoints: []int{1061}, Characters: []byte{0xd0, 0xa5}},
+ "KJcy": {Name: "KJcy", CodePoints: []int{1036}, Characters: []byte{0xd0, 0x8c}},
+ "Kappa": {Name: "Kappa", CodePoints: []int{922}, Characters: []byte{0xce, 0x9a}},
+ "Kcedil": {Name: "Kcedil", CodePoints: []int{310}, Characters: []byte{0xc4, 0xb6}},
+ "Kcy": {Name: "Kcy", CodePoints: []int{1050}, Characters: []byte{0xd0, 0x9a}},
+ "Kfr": {Name: "Kfr", CodePoints: []int{120078}, Characters: []byte{0xf0, 0x9d, 0x94, 0x8e}},
+ "Kopf": {Name: "Kopf", CodePoints: []int{120130}, Characters: []byte{0xf0, 0x9d, 0x95, 0x82}},
+ "Kscr": {Name: "Kscr", CodePoints: []int{119974}, Characters: []byte{0xf0, 0x9d, 0x92, 0xa6}},
+ "LJcy": {Name: "LJcy", CodePoints: []int{1033}, Characters: []byte{0xd0, 0x89}},
+ "LT": {Name: "LT", CodePoints: []int{60}, Characters: []byte{0x3c}},
+ "Lacute": {Name: "Lacute", CodePoints: []int{313}, Characters: []byte{0xc4, 0xb9}},
+ "Lambda": {Name: "Lambda", CodePoints: []int{923}, Characters: []byte{0xce, 0x9b}},
+ "Lang": {Name: "Lang", CodePoints: []int{10218}, Characters: []byte{0xe2, 0x9f, 0xaa}},
+ "Laplacetrf": {Name: "Laplacetrf", CodePoints: []int{8466}, Characters: []byte{0xe2, 0x84, 0x92}},
+ "Larr": {Name: "Larr", CodePoints: []int{8606}, Characters: []byte{0xe2, 0x86, 0x9e}},
+ "Lcaron": {Name: "Lcaron", CodePoints: []int{317}, Characters: []byte{0xc4, 0xbd}},
+ "Lcedil": {Name: "Lcedil", CodePoints: []int{315}, Characters: []byte{0xc4, 0xbb}},
+ "Lcy": {Name: "Lcy", CodePoints: []int{1051}, Characters: []byte{0xd0, 0x9b}},
+ "LeftAngleBracket": {Name: "LeftAngleBracket", CodePoints: []int{10216}, Characters: []byte{0xe2, 0x9f, 0xa8}},
+ "LeftArrow": {Name: "LeftArrow", CodePoints: []int{8592}, Characters: []byte{0xe2, 0x86, 0x90}},
+ "LeftArrowBar": {Name: "LeftArrowBar", CodePoints: []int{8676}, Characters: []byte{0xe2, 0x87, 0xa4}},
+ "LeftArrowRightArrow": {Name: "LeftArrowRightArrow", CodePoints: []int{8646}, Characters: []byte{0xe2, 0x87, 0x86}},
+ "LeftCeiling": {Name: "LeftCeiling", CodePoints: []int{8968}, Characters: []byte{0xe2, 0x8c, 0x88}},
+ "LeftDoubleBracket": {Name: "LeftDoubleBracket", CodePoints: []int{10214}, Characters: []byte{0xe2, 0x9f, 0xa6}},
+ "LeftDownTeeVector": {Name: "LeftDownTeeVector", CodePoints: []int{10593}, Characters: []byte{0xe2, 0xa5, 0xa1}},
+ "LeftDownVector": {Name: "LeftDownVector", CodePoints: []int{8643}, Characters: []byte{0xe2, 0x87, 0x83}},
+ "LeftDownVectorBar": {Name: "LeftDownVectorBar", CodePoints: []int{10585}, Characters: []byte{0xe2, 0xa5, 0x99}},
+ "LeftFloor": {Name: "LeftFloor", CodePoints: []int{8970}, Characters: []byte{0xe2, 0x8c, 0x8a}},
+ "LeftRightArrow": {Name: "LeftRightArrow", CodePoints: []int{8596}, Characters: []byte{0xe2, 0x86, 0x94}},
+ "LeftRightVector": {Name: "LeftRightVector", CodePoints: []int{10574}, Characters: []byte{0xe2, 0xa5, 0x8e}},
+ "LeftTee": {Name: "LeftTee", CodePoints: []int{8867}, Characters: []byte{0xe2, 0x8a, 0xa3}},
+ "LeftTeeArrow": {Name: "LeftTeeArrow", CodePoints: []int{8612}, Characters: []byte{0xe2, 0x86, 0xa4}},
+ "LeftTeeVector": {Name: "LeftTeeVector", CodePoints: []int{10586}, Characters: []byte{0xe2, 0xa5, 0x9a}},
+ "LeftTriangle": {Name: "LeftTriangle", CodePoints: []int{8882}, Characters: []byte{0xe2, 0x8a, 0xb2}},
+ "LeftTriangleBar": {Name: "LeftTriangleBar", CodePoints: []int{10703}, Characters: []byte{0xe2, 0xa7, 0x8f}},
+ "LeftTriangleEqual": {Name: "LeftTriangleEqual", CodePoints: []int{8884}, Characters: []byte{0xe2, 0x8a, 0xb4}},
+ "LeftUpDownVector": {Name: "LeftUpDownVector", CodePoints: []int{10577}, Characters: []byte{0xe2, 0xa5, 0x91}},
+ "LeftUpTeeVector": {Name: "LeftUpTeeVector", CodePoints: []int{10592}, Characters: []byte{0xe2, 0xa5, 0xa0}},
+ "LeftUpVector": {Name: "LeftUpVector", CodePoints: []int{8639}, Characters: []byte{0xe2, 0x86, 0xbf}},
+ "LeftUpVectorBar": {Name: "LeftUpVectorBar", CodePoints: []int{10584}, Characters: []byte{0xe2, 0xa5, 0x98}},
+ "LeftVector": {Name: "LeftVector", CodePoints: []int{8636}, Characters: []byte{0xe2, 0x86, 0xbc}},
+ "LeftVectorBar": {Name: "LeftVectorBar", CodePoints: []int{10578}, Characters: []byte{0xe2, 0xa5, 0x92}},
+ "Leftarrow": {Name: "Leftarrow", CodePoints: []int{8656}, Characters: []byte{0xe2, 0x87, 0x90}},
+ "Leftrightarrow": {Name: "Leftrightarrow", CodePoints: []int{8660}, Characters: []byte{0xe2, 0x87, 0x94}},
+ "LessEqualGreater": {Name: "LessEqualGreater", CodePoints: []int{8922}, Characters: []byte{0xe2, 0x8b, 0x9a}},
+ "LessFullEqual": {Name: "LessFullEqual", CodePoints: []int{8806}, Characters: []byte{0xe2, 0x89, 0xa6}},
+ "LessGreater": {Name: "LessGreater", CodePoints: []int{8822}, Characters: []byte{0xe2, 0x89, 0xb6}},
+ "LessLess": {Name: "LessLess", CodePoints: []int{10913}, Characters: []byte{0xe2, 0xaa, 0xa1}},
+ "LessSlantEqual": {Name: "LessSlantEqual", CodePoints: []int{10877}, Characters: []byte{0xe2, 0xa9, 0xbd}},
+ "LessTilde": {Name: "LessTilde", CodePoints: []int{8818}, Characters: []byte{0xe2, 0x89, 0xb2}},
+ "Lfr": {Name: "Lfr", CodePoints: []int{120079}, Characters: []byte{0xf0, 0x9d, 0x94, 0x8f}},
+ "Ll": {Name: "Ll", CodePoints: []int{8920}, Characters: []byte{0xe2, 0x8b, 0x98}},
+ "Lleftarrow": {Name: "Lleftarrow", CodePoints: []int{8666}, Characters: []byte{0xe2, 0x87, 0x9a}},
+ "Lmidot": {Name: "Lmidot", CodePoints: []int{319}, Characters: []byte{0xc4, 0xbf}},
+ "LongLeftArrow": {Name: "LongLeftArrow", CodePoints: []int{10229}, Characters: []byte{0xe2, 0x9f, 0xb5}},
+ "LongLeftRightArrow": {Name: "LongLeftRightArrow", CodePoints: []int{10231}, Characters: []byte{0xe2, 0x9f, 0xb7}},
+ "LongRightArrow": {Name: "LongRightArrow", CodePoints: []int{10230}, Characters: []byte{0xe2, 0x9f, 0xb6}},
+ "Longleftarrow": {Name: "Longleftarrow", CodePoints: []int{10232}, Characters: []byte{0xe2, 0x9f, 0xb8}},
+ "Longleftrightarrow": {Name: "Longleftrightarrow", CodePoints: []int{10234}, Characters: []byte{0xe2, 0x9f, 0xba}},
+ "Longrightarrow": {Name: "Longrightarrow", CodePoints: []int{10233}, Characters: []byte{0xe2, 0x9f, 0xb9}},
+ "Lopf": {Name: "Lopf", CodePoints: []int{120131}, Characters: []byte{0xf0, 0x9d, 0x95, 0x83}},
+ "LowerLeftArrow": {Name: "LowerLeftArrow", CodePoints: []int{8601}, Characters: []byte{0xe2, 0x86, 0x99}},
+ "LowerRightArrow": {Name: "LowerRightArrow", CodePoints: []int{8600}, Characters: []byte{0xe2, 0x86, 0x98}},
+ "Lscr": {Name: "Lscr", CodePoints: []int{8466}, Characters: []byte{0xe2, 0x84, 0x92}},
+ "Lsh": {Name: "Lsh", CodePoints: []int{8624}, Characters: []byte{0xe2, 0x86, 0xb0}},
+ "Lstrok": {Name: "Lstrok", CodePoints: []int{321}, Characters: []byte{0xc5, 0x81}},
+ "Lt": {Name: "Lt", CodePoints: []int{8810}, Characters: []byte{0xe2, 0x89, 0xaa}},
+ "Map": {Name: "Map", CodePoints: []int{10501}, Characters: []byte{0xe2, 0xa4, 0x85}},
+ "Mcy": {Name: "Mcy", CodePoints: []int{1052}, Characters: []byte{0xd0, 0x9c}},
+ "MediumSpace": {Name: "MediumSpace", CodePoints: []int{8287}, Characters: []byte{0xe2, 0x81, 0x9f}},
+ "Mellintrf": {Name: "Mellintrf", CodePoints: []int{8499}, Characters: []byte{0xe2, 0x84, 0xb3}},
+ "Mfr": {Name: "Mfr", CodePoints: []int{120080}, Characters: []byte{0xf0, 0x9d, 0x94, 0x90}},
+ "MinusPlus": {Name: "MinusPlus", CodePoints: []int{8723}, Characters: []byte{0xe2, 0x88, 0x93}},
+ "Mopf": {Name: "Mopf", CodePoints: []int{120132}, Characters: []byte{0xf0, 0x9d, 0x95, 0x84}},
+ "Mscr": {Name: "Mscr", CodePoints: []int{8499}, Characters: []byte{0xe2, 0x84, 0xb3}},
+ "Mu": {Name: "Mu", CodePoints: []int{924}, Characters: []byte{0xce, 0x9c}},
+ "NJcy": {Name: "NJcy", CodePoints: []int{1034}, Characters: []byte{0xd0, 0x8a}},
+ "Nacute": {Name: "Nacute", CodePoints: []int{323}, Characters: []byte{0xc5, 0x83}},
+ "Ncaron": {Name: "Ncaron", CodePoints: []int{327}, Characters: []byte{0xc5, 0x87}},
+ "Ncedil": {Name: "Ncedil", CodePoints: []int{325}, Characters: []byte{0xc5, 0x85}},
+ "Ncy": {Name: "Ncy", CodePoints: []int{1053}, Characters: []byte{0xd0, 0x9d}},
+ "NegativeMediumSpace": {Name: "NegativeMediumSpace", CodePoints: []int{8203}, Characters: []byte{0xe2, 0x80, 0x8b}},
+ "NegativeThickSpace": {Name: "NegativeThickSpace", CodePoints: []int{8203}, Characters: []byte{0xe2, 0x80, 0x8b}},
+ "NegativeThinSpace": {Name: "NegativeThinSpace", CodePoints: []int{8203}, Characters: []byte{0xe2, 0x80, 0x8b}},
+ "NegativeVeryThinSpace": {Name: "NegativeVeryThinSpace", CodePoints: []int{8203}, Characters: []byte{0xe2, 0x80, 0x8b}},
+ "NestedGreaterGreater": {Name: "NestedGreaterGreater", CodePoints: []int{8811}, Characters: []byte{0xe2, 0x89, 0xab}},
+ "NestedLessLess": {Name: "NestedLessLess", CodePoints: []int{8810}, Characters: []byte{0xe2, 0x89, 0xaa}},
+ "NewLine": {Name: "NewLine", CodePoints: []int{10}, Characters: []byte{0xa}},
+ "Nfr": {Name: "Nfr", CodePoints: []int{120081}, Characters: []byte{0xf0, 0x9d, 0x94, 0x91}},
+ "NoBreak": {Name: "NoBreak", CodePoints: []int{8288}, Characters: []byte{0xe2, 0x81, 0xa0}},
+ "NonBreakingSpace": {Name: "NonBreakingSpace", CodePoints: []int{160}, Characters: []byte{0xc2, 0xa0}},
+ "Nopf": {Name: "Nopf", CodePoints: []int{8469}, Characters: []byte{0xe2, 0x84, 0x95}},
+ "Not": {Name: "Not", CodePoints: []int{10988}, Characters: []byte{0xe2, 0xab, 0xac}},
+ "NotCongruent": {Name: "NotCongruent", CodePoints: []int{8802}, Characters: []byte{0xe2, 0x89, 0xa2}},
+ "NotCupCap": {Name: "NotCupCap", CodePoints: []int{8813}, Characters: []byte{0xe2, 0x89, 0xad}},
+ "NotDoubleVerticalBar": {Name: "NotDoubleVerticalBar", CodePoints: []int{8742}, Characters: []byte{0xe2, 0x88, 0xa6}},
+ "NotElement": {Name: "NotElement", CodePoints: []int{8713}, Characters: []byte{0xe2, 0x88, 0x89}},
+ "NotEqual": {Name: "NotEqual", CodePoints: []int{8800}, Characters: []byte{0xe2, 0x89, 0xa0}},
+ "NotEqualTilde": {Name: "NotEqualTilde", CodePoints: []int{8770, 824}, Characters: []byte{0xe2, 0x89, 0x82, 0xcc, 0xb8}},
+ "NotExists": {Name: "NotExists", CodePoints: []int{8708}, Characters: []byte{0xe2, 0x88, 0x84}},
+ "NotGreater": {Name: "NotGreater", CodePoints: []int{8815}, Characters: []byte{0xe2, 0x89, 0xaf}},
+ "NotGreaterEqual": {Name: "NotGreaterEqual", CodePoints: []int{8817}, Characters: []byte{0xe2, 0x89, 0xb1}},
+ "NotGreaterFullEqual": {Name: "NotGreaterFullEqual", CodePoints: []int{8807, 824}, Characters: []byte{0xe2, 0x89, 0xa7, 0xcc, 0xb8}},
+ "NotGreaterGreater": {Name: "NotGreaterGreater", CodePoints: []int{8811, 824}, Characters: []byte{0xe2, 0x89, 0xab, 0xcc, 0xb8}},
+ "NotGreaterLess": {Name: "NotGreaterLess", CodePoints: []int{8825}, Characters: []byte{0xe2, 0x89, 0xb9}},
+ "NotGreaterSlantEqual": {Name: "NotGreaterSlantEqual", CodePoints: []int{10878, 824}, Characters: []byte{0xe2, 0xa9, 0xbe, 0xcc, 0xb8}},
+ "NotGreaterTilde": {Name: "NotGreaterTilde", CodePoints: []int{8821}, Characters: []byte{0xe2, 0x89, 0xb5}},
+ "NotHumpDownHump": {Name: "NotHumpDownHump", CodePoints: []int{8782, 824}, Characters: []byte{0xe2, 0x89, 0x8e, 0xcc, 0xb8}},
+ "NotHumpEqual": {Name: "NotHumpEqual", CodePoints: []int{8783, 824}, Characters: []byte{0xe2, 0x89, 0x8f, 0xcc, 0xb8}},
+ "NotLeftTriangle": {Name: "NotLeftTriangle", CodePoints: []int{8938}, Characters: []byte{0xe2, 0x8b, 0xaa}},
+ "NotLeftTriangleBar": {Name: "NotLeftTriangleBar", CodePoints: []int{10703, 824}, Characters: []byte{0xe2, 0xa7, 0x8f, 0xcc, 0xb8}},
+ "NotLeftTriangleEqual": {Name: "NotLeftTriangleEqual", CodePoints: []int{8940}, Characters: []byte{0xe2, 0x8b, 0xac}},
+ "NotLess": {Name: "NotLess", CodePoints: []int{8814}, Characters: []byte{0xe2, 0x89, 0xae}},
+ "NotLessEqual": {Name: "NotLessEqual", CodePoints: []int{8816}, Characters: []byte{0xe2, 0x89, 0xb0}},
+ "NotLessGreater": {Name: "NotLessGreater", CodePoints: []int{8824}, Characters: []byte{0xe2, 0x89, 0xb8}},
+ "NotLessLess": {Name: "NotLessLess", CodePoints: []int{8810, 824}, Characters: []byte{0xe2, 0x89, 0xaa, 0xcc, 0xb8}},
+ "NotLessSlantEqual": {Name: "NotLessSlantEqual", CodePoints: []int{10877, 824}, Characters: []byte{0xe2, 0xa9, 0xbd, 0xcc, 0xb8}},
+ "NotLessTilde": {Name: "NotLessTilde", CodePoints: []int{8820}, Characters: []byte{0xe2, 0x89, 0xb4}},
+ "NotNestedGreaterGreater": {Name: "NotNestedGreaterGreater", CodePoints: []int{10914, 824}, Characters: []byte{0xe2, 0xaa, 0xa2, 0xcc, 0xb8}},
+ "NotNestedLessLess": {Name: "NotNestedLessLess", CodePoints: []int{10913, 824}, Characters: []byte{0xe2, 0xaa, 0xa1, 0xcc, 0xb8}},
+ "NotPrecedes": {Name: "NotPrecedes", CodePoints: []int{8832}, Characters: []byte{0xe2, 0x8a, 0x80}},
+ "NotPrecedesEqual": {Name: "NotPrecedesEqual", CodePoints: []int{10927, 824}, Characters: []byte{0xe2, 0xaa, 0xaf, 0xcc, 0xb8}},
+ "NotPrecedesSlantEqual": {Name: "NotPrecedesSlantEqual", CodePoints: []int{8928}, Characters: []byte{0xe2, 0x8b, 0xa0}},
+ "NotReverseElement": {Name: "NotReverseElement", CodePoints: []int{8716}, Characters: []byte{0xe2, 0x88, 0x8c}},
+ "NotRightTriangle": {Name: "NotRightTriangle", CodePoints: []int{8939}, Characters: []byte{0xe2, 0x8b, 0xab}},
+ "NotRightTriangleBar": {Name: "NotRightTriangleBar", CodePoints: []int{10704, 824}, Characters: []byte{0xe2, 0xa7, 0x90, 0xcc, 0xb8}},
+ "NotRightTriangleEqual": {Name: "NotRightTriangleEqual", CodePoints: []int{8941}, Characters: []byte{0xe2, 0x8b, 0xad}},
+ "NotSquareSubset": {Name: "NotSquareSubset", CodePoints: []int{8847, 824}, Characters: []byte{0xe2, 0x8a, 0x8f, 0xcc, 0xb8}},
+ "NotSquareSubsetEqual": {Name: "NotSquareSubsetEqual", CodePoints: []int{8930}, Characters: []byte{0xe2, 0x8b, 0xa2}},
+ "NotSquareSuperset": {Name: "NotSquareSuperset", CodePoints: []int{8848, 824}, Characters: []byte{0xe2, 0x8a, 0x90, 0xcc, 0xb8}},
+ "NotSquareSupersetEqual": {Name: "NotSquareSupersetEqual", CodePoints: []int{8931}, Characters: []byte{0xe2, 0x8b, 0xa3}},
+ "NotSubset": {Name: "NotSubset", CodePoints: []int{8834, 8402}, Characters: []byte{0xe2, 0x8a, 0x82, 0xe2, 0x83, 0x92}},
+ "NotSubsetEqual": {Name: "NotSubsetEqual", CodePoints: []int{8840}, Characters: []byte{0xe2, 0x8a, 0x88}},
+ "NotSucceeds": {Name: "NotSucceeds", CodePoints: []int{8833}, Characters: []byte{0xe2, 0x8a, 0x81}},
+ "NotSucceedsEqual": {Name: "NotSucceedsEqual", CodePoints: []int{10928, 824}, Characters: []byte{0xe2, 0xaa, 0xb0, 0xcc, 0xb8}},
+ "NotSucceedsSlantEqual": {Name: "NotSucceedsSlantEqual", CodePoints: []int{8929}, Characters: []byte{0xe2, 0x8b, 0xa1}},
+ "NotSucceedsTilde": {Name: "NotSucceedsTilde", CodePoints: []int{8831, 824}, Characters: []byte{0xe2, 0x89, 0xbf, 0xcc, 0xb8}},
+ "NotSuperset": {Name: "NotSuperset", CodePoints: []int{8835, 8402}, Characters: []byte{0xe2, 0x8a, 0x83, 0xe2, 0x83, 0x92}},
+ "NotSupersetEqual": {Name: "NotSupersetEqual", CodePoints: []int{8841}, Characters: []byte{0xe2, 0x8a, 0x89}},
+ "NotTilde": {Name: "NotTilde", CodePoints: []int{8769}, Characters: []byte{0xe2, 0x89, 0x81}},
+ "NotTildeEqual": {Name: "NotTildeEqual", CodePoints: []int{8772}, Characters: []byte{0xe2, 0x89, 0x84}},
+ "NotTildeFullEqual": {Name: "NotTildeFullEqual", CodePoints: []int{8775}, Characters: []byte{0xe2, 0x89, 0x87}},
+ "NotTildeTilde": {Name: "NotTildeTilde", CodePoints: []int{8777}, Characters: []byte{0xe2, 0x89, 0x89}},
+ "NotVerticalBar": {Name: "NotVerticalBar", CodePoints: []int{8740}, Characters: []byte{0xe2, 0x88, 0xa4}},
+ "Nscr": {Name: "Nscr", CodePoints: []int{119977}, Characters: []byte{0xf0, 0x9d, 0x92, 0xa9}},
+ "Ntilde": {Name: "Ntilde", CodePoints: []int{209}, Characters: []byte{0xc3, 0x91}},
+ "Nu": {Name: "Nu", CodePoints: []int{925}, Characters: []byte{0xce, 0x9d}},
+ "OElig": {Name: "OElig", CodePoints: []int{338}, Characters: []byte{0xc5, 0x92}},
+ "Oacute": {Name: "Oacute", CodePoints: []int{211}, Characters: []byte{0xc3, 0x93}},
+ "Ocirc": {Name: "Ocirc", CodePoints: []int{212}, Characters: []byte{0xc3, 0x94}},
+ "Ocy": {Name: "Ocy", CodePoints: []int{1054}, Characters: []byte{0xd0, 0x9e}},
+ "Odblac": {Name: "Odblac", CodePoints: []int{336}, Characters: []byte{0xc5, 0x90}},
+ "Ofr": {Name: "Ofr", CodePoints: []int{120082}, Characters: []byte{0xf0, 0x9d, 0x94, 0x92}},
+ "Ograve": {Name: "Ograve", CodePoints: []int{210}, Characters: []byte{0xc3, 0x92}},
+ "Omacr": {Name: "Omacr", CodePoints: []int{332}, Characters: []byte{0xc5, 0x8c}},
+ "Omega": {Name: "Omega", CodePoints: []int{937}, Characters: []byte{0xce, 0xa9}},
+ "Omicron": {Name: "Omicron", CodePoints: []int{927}, Characters: []byte{0xce, 0x9f}},
+ "Oopf": {Name: "Oopf", CodePoints: []int{120134}, Characters: []byte{0xf0, 0x9d, 0x95, 0x86}},
+ "OpenCurlyDoubleQuote": {Name: "OpenCurlyDoubleQuote", CodePoints: []int{8220}, Characters: []byte{0xe2, 0x80, 0x9c}},
+ "OpenCurlyQuote": {Name: "OpenCurlyQuote", CodePoints: []int{8216}, Characters: []byte{0xe2, 0x80, 0x98}},
+ "Or": {Name: "Or", CodePoints: []int{10836}, Characters: []byte{0xe2, 0xa9, 0x94}},
+ "Oscr": {Name: "Oscr", CodePoints: []int{119978}, Characters: []byte{0xf0, 0x9d, 0x92, 0xaa}},
+ "Oslash": {Name: "Oslash", CodePoints: []int{216}, Characters: []byte{0xc3, 0x98}},
+ "Otilde": {Name: "Otilde", CodePoints: []int{213}, Characters: []byte{0xc3, 0x95}},
+ "Otimes": {Name: "Otimes", CodePoints: []int{10807}, Characters: []byte{0xe2, 0xa8, 0xb7}},
+ "Ouml": {Name: "Ouml", CodePoints: []int{214}, Characters: []byte{0xc3, 0x96}},
+ "OverBar": {Name: "OverBar", CodePoints: []int{8254}, Characters: []byte{0xe2, 0x80, 0xbe}},
+ "OverBrace": {Name: "OverBrace", CodePoints: []int{9182}, Characters: []byte{0xe2, 0x8f, 0x9e}},
+ "OverBracket": {Name: "OverBracket", CodePoints: []int{9140}, Characters: []byte{0xe2, 0x8e, 0xb4}},
+ "OverParenthesis": {Name: "OverParenthesis", CodePoints: []int{9180}, Characters: []byte{0xe2, 0x8f, 0x9c}},
+ "PartialD": {Name: "PartialD", CodePoints: []int{8706}, Characters: []byte{0xe2, 0x88, 0x82}},
+ "Pcy": {Name: "Pcy", CodePoints: []int{1055}, Characters: []byte{0xd0, 0x9f}},
+ "Pfr": {Name: "Pfr", CodePoints: []int{120083}, Characters: []byte{0xf0, 0x9d, 0x94, 0x93}},
+ "Phi": {Name: "Phi", CodePoints: []int{934}, Characters: []byte{0xce, 0xa6}},
+ "Pi": {Name: "Pi", CodePoints: []int{928}, Characters: []byte{0xce, 0xa0}},
+ "PlusMinus": {Name: "PlusMinus", CodePoints: []int{177}, Characters: []byte{0xc2, 0xb1}},
+ "Poincareplane": {Name: "Poincareplane", CodePoints: []int{8460}, Characters: []byte{0xe2, 0x84, 0x8c}},
+ "Popf": {Name: "Popf", CodePoints: []int{8473}, Characters: []byte{0xe2, 0x84, 0x99}},
+ "Pr": {Name: "Pr", CodePoints: []int{10939}, Characters: []byte{0xe2, 0xaa, 0xbb}},
+ "Precedes": {Name: "Precedes", CodePoints: []int{8826}, Characters: []byte{0xe2, 0x89, 0xba}},
+ "PrecedesEqual": {Name: "PrecedesEqual", CodePoints: []int{10927}, Characters: []byte{0xe2, 0xaa, 0xaf}},
+ "PrecedesSlantEqual": {Name: "PrecedesSlantEqual", CodePoints: []int{8828}, Characters: []byte{0xe2, 0x89, 0xbc}},
+ "PrecedesTilde": {Name: "PrecedesTilde", CodePoints: []int{8830}, Characters: []byte{0xe2, 0x89, 0xbe}},
+ "Prime": {Name: "Prime", CodePoints: []int{8243}, Characters: []byte{0xe2, 0x80, 0xb3}},
+ "Product": {Name: "Product", CodePoints: []int{8719}, Characters: []byte{0xe2, 0x88, 0x8f}},
+ "Proportion": {Name: "Proportion", CodePoints: []int{8759}, Characters: []byte{0xe2, 0x88, 0xb7}},
+ "Proportional": {Name: "Proportional", CodePoints: []int{8733}, Characters: []byte{0xe2, 0x88, 0x9d}},
+ "Pscr": {Name: "Pscr", CodePoints: []int{119979}, Characters: []byte{0xf0, 0x9d, 0x92, 0xab}},
+ "Psi": {Name: "Psi", CodePoints: []int{936}, Characters: []byte{0xce, 0xa8}},
+ "QUOT": {Name: "QUOT", CodePoints: []int{34}, Characters: []byte{0x22}},
+ "Qfr": {Name: "Qfr", CodePoints: []int{120084}, Characters: []byte{0xf0, 0x9d, 0x94, 0x94}},
+ "Qopf": {Name: "Qopf", CodePoints: []int{8474}, Characters: []byte{0xe2, 0x84, 0x9a}},
+ "Qscr": {Name: "Qscr", CodePoints: []int{119980}, Characters: []byte{0xf0, 0x9d, 0x92, 0xac}},
+ "RBarr": {Name: "RBarr", CodePoints: []int{10512}, Characters: []byte{0xe2, 0xa4, 0x90}},
+ "REG": {Name: "REG", CodePoints: []int{174}, Characters: []byte{0xc2, 0xae}},
+ "Racute": {Name: "Racute", CodePoints: []int{340}, Characters: []byte{0xc5, 0x94}},
+ "Rang": {Name: "Rang", CodePoints: []int{10219}, Characters: []byte{0xe2, 0x9f, 0xab}},
+ "Rarr": {Name: "Rarr", CodePoints: []int{8608}, Characters: []byte{0xe2, 0x86, 0xa0}},
+ "Rarrtl": {Name: "Rarrtl", CodePoints: []int{10518}, Characters: []byte{0xe2, 0xa4, 0x96}},
+ "Rcaron": {Name: "Rcaron", CodePoints: []int{344}, Characters: []byte{0xc5, 0x98}},
+ "Rcedil": {Name: "Rcedil", CodePoints: []int{342}, Characters: []byte{0xc5, 0x96}},
+ "Rcy": {Name: "Rcy", CodePoints: []int{1056}, Characters: []byte{0xd0, 0xa0}},
+ "Re": {Name: "Re", CodePoints: []int{8476}, Characters: []byte{0xe2, 0x84, 0x9c}},
+ "ReverseElement": {Name: "ReverseElement", CodePoints: []int{8715}, Characters: []byte{0xe2, 0x88, 0x8b}},
+ "ReverseEquilibrium": {Name: "ReverseEquilibrium", CodePoints: []int{8651}, Characters: []byte{0xe2, 0x87, 0x8b}},
+ "ReverseUpEquilibrium": {Name: "ReverseUpEquilibrium", CodePoints: []int{10607}, Characters: []byte{0xe2, 0xa5, 0xaf}},
+ "Rfr": {Name: "Rfr", CodePoints: []int{8476}, Characters: []byte{0xe2, 0x84, 0x9c}},
+ "Rho": {Name: "Rho", CodePoints: []int{929}, Characters: []byte{0xce, 0xa1}},
+ "RightAngleBracket": {Name: "RightAngleBracket", CodePoints: []int{10217}, Characters: []byte{0xe2, 0x9f, 0xa9}},
+ "RightArrow": {Name: "RightArrow", CodePoints: []int{8594}, Characters: []byte{0xe2, 0x86, 0x92}},
+ "RightArrowBar": {Name: "RightArrowBar", CodePoints: []int{8677}, Characters: []byte{0xe2, 0x87, 0xa5}},
+ "RightArrowLeftArrow": {Name: "RightArrowLeftArrow", CodePoints: []int{8644}, Characters: []byte{0xe2, 0x87, 0x84}},
+ "RightCeiling": {Name: "RightCeiling", CodePoints: []int{8969}, Characters: []byte{0xe2, 0x8c, 0x89}},
+ "RightDoubleBracket": {Name: "RightDoubleBracket", CodePoints: []int{10215}, Characters: []byte{0xe2, 0x9f, 0xa7}},
+ "RightDownTeeVector": {Name: "RightDownTeeVector", CodePoints: []int{10589}, Characters: []byte{0xe2, 0xa5, 0x9d}},
+ "RightDownVector": {Name: "RightDownVector", CodePoints: []int{8642}, Characters: []byte{0xe2, 0x87, 0x82}},
+ "RightDownVectorBar": {Name: "RightDownVectorBar", CodePoints: []int{10581}, Characters: []byte{0xe2, 0xa5, 0x95}},
+ "RightFloor": {Name: "RightFloor", CodePoints: []int{8971}, Characters: []byte{0xe2, 0x8c, 0x8b}},
+ "RightTee": {Name: "RightTee", CodePoints: []int{8866}, Characters: []byte{0xe2, 0x8a, 0xa2}},
+ "RightTeeArrow": {Name: "RightTeeArrow", CodePoints: []int{8614}, Characters: []byte{0xe2, 0x86, 0xa6}},
+ "RightTeeVector": {Name: "RightTeeVector", CodePoints: []int{10587}, Characters: []byte{0xe2, 0xa5, 0x9b}},
+ "RightTriangle": {Name: "RightTriangle", CodePoints: []int{8883}, Characters: []byte{0xe2, 0x8a, 0xb3}},
+ "RightTriangleBar": {Name: "RightTriangleBar", CodePoints: []int{10704}, Characters: []byte{0xe2, 0xa7, 0x90}},
+ "RightTriangleEqual": {Name: "RightTriangleEqual", CodePoints: []int{8885}, Characters: []byte{0xe2, 0x8a, 0xb5}},
+ "RightUpDownVector": {Name: "RightUpDownVector", CodePoints: []int{10575}, Characters: []byte{0xe2, 0xa5, 0x8f}},
+ "RightUpTeeVector": {Name: "RightUpTeeVector", CodePoints: []int{10588}, Characters: []byte{0xe2, 0xa5, 0x9c}},
+ "RightUpVector": {Name: "RightUpVector", CodePoints: []int{8638}, Characters: []byte{0xe2, 0x86, 0xbe}},
+ "RightUpVectorBar": {Name: "RightUpVectorBar", CodePoints: []int{10580}, Characters: []byte{0xe2, 0xa5, 0x94}},
+ "RightVector": {Name: "RightVector", CodePoints: []int{8640}, Characters: []byte{0xe2, 0x87, 0x80}},
+ "RightVectorBar": {Name: "RightVectorBar", CodePoints: []int{10579}, Characters: []byte{0xe2, 0xa5, 0x93}},
+ "Rightarrow": {Name: "Rightarrow", CodePoints: []int{8658}, Characters: []byte{0xe2, 0x87, 0x92}},
+ "Ropf": {Name: "Ropf", CodePoints: []int{8477}, Characters: []byte{0xe2, 0x84, 0x9d}},
+ "RoundImplies": {Name: "RoundImplies", CodePoints: []int{10608}, Characters: []byte{0xe2, 0xa5, 0xb0}},
+ "Rrightarrow": {Name: "Rrightarrow", CodePoints: []int{8667}, Characters: []byte{0xe2, 0x87, 0x9b}},
+ "Rscr": {Name: "Rscr", CodePoints: []int{8475}, Characters: []byte{0xe2, 0x84, 0x9b}},
+ "Rsh": {Name: "Rsh", CodePoints: []int{8625}, Characters: []byte{0xe2, 0x86, 0xb1}},
+ "RuleDelayed": {Name: "RuleDelayed", CodePoints: []int{10740}, Characters: []byte{0xe2, 0xa7, 0xb4}},
+ "SHCHcy": {Name: "SHCHcy", CodePoints: []int{1065}, Characters: []byte{0xd0, 0xa9}},
+ "SHcy": {Name: "SHcy", CodePoints: []int{1064}, Characters: []byte{0xd0, 0xa8}},
+ "SOFTcy": {Name: "SOFTcy", CodePoints: []int{1068}, Characters: []byte{0xd0, 0xac}},
+ "Sacute": {Name: "Sacute", CodePoints: []int{346}, Characters: []byte{0xc5, 0x9a}},
+ "Sc": {Name: "Sc", CodePoints: []int{10940}, Characters: []byte{0xe2, 0xaa, 0xbc}},
+ "Scaron": {Name: "Scaron", CodePoints: []int{352}, Characters: []byte{0xc5, 0xa0}},
+ "Scedil": {Name: "Scedil", CodePoints: []int{350}, Characters: []byte{0xc5, 0x9e}},
+ "Scirc": {Name: "Scirc", CodePoints: []int{348}, Characters: []byte{0xc5, 0x9c}},
+ "Scy": {Name: "Scy", CodePoints: []int{1057}, Characters: []byte{0xd0, 0xa1}},
+ "Sfr": {Name: "Sfr", CodePoints: []int{120086}, Characters: []byte{0xf0, 0x9d, 0x94, 0x96}},
+ "ShortDownArrow": {Name: "ShortDownArrow", CodePoints: []int{8595}, Characters: []byte{0xe2, 0x86, 0x93}},
+ "ShortLeftArrow": {Name: "ShortLeftArrow", CodePoints: []int{8592}, Characters: []byte{0xe2, 0x86, 0x90}},
+ "ShortRightArrow": {Name: "ShortRightArrow", CodePoints: []int{8594}, Characters: []byte{0xe2, 0x86, 0x92}},
+ "ShortUpArrow": {Name: "ShortUpArrow", CodePoints: []int{8593}, Characters: []byte{0xe2, 0x86, 0x91}},
+ "Sigma": {Name: "Sigma", CodePoints: []int{931}, Characters: []byte{0xce, 0xa3}},
+ "SmallCircle": {Name: "SmallCircle", CodePoints: []int{8728}, Characters: []byte{0xe2, 0x88, 0x98}},
+ "Sopf": {Name: "Sopf", CodePoints: []int{120138}, Characters: []byte{0xf0, 0x9d, 0x95, 0x8a}},
+ "Sqrt": {Name: "Sqrt", CodePoints: []int{8730}, Characters: []byte{0xe2, 0x88, 0x9a}},
+ "Square": {Name: "Square", CodePoints: []int{9633}, Characters: []byte{0xe2, 0x96, 0xa1}},
+ "SquareIntersection": {Name: "SquareIntersection", CodePoints: []int{8851}, Characters: []byte{0xe2, 0x8a, 0x93}},
+ "SquareSubset": {Name: "SquareSubset", CodePoints: []int{8847}, Characters: []byte{0xe2, 0x8a, 0x8f}},
+ "SquareSubsetEqual": {Name: "SquareSubsetEqual", CodePoints: []int{8849}, Characters: []byte{0xe2, 0x8a, 0x91}},
+ "SquareSuperset": {Name: "SquareSuperset", CodePoints: []int{8848}, Characters: []byte{0xe2, 0x8a, 0x90}},
+ "SquareSupersetEqual": {Name: "SquareSupersetEqual", CodePoints: []int{8850}, Characters: []byte{0xe2, 0x8a, 0x92}},
+ "SquareUnion": {Name: "SquareUnion", CodePoints: []int{8852}, Characters: []byte{0xe2, 0x8a, 0x94}},
+ "Sscr": {Name: "Sscr", CodePoints: []int{119982}, Characters: []byte{0xf0, 0x9d, 0x92, 0xae}},
+ "Star": {Name: "Star", CodePoints: []int{8902}, Characters: []byte{0xe2, 0x8b, 0x86}},
+ "Sub": {Name: "Sub", CodePoints: []int{8912}, Characters: []byte{0xe2, 0x8b, 0x90}},
+ "Subset": {Name: "Subset", CodePoints: []int{8912}, Characters: []byte{0xe2, 0x8b, 0x90}},
+ "SubsetEqual": {Name: "SubsetEqual", CodePoints: []int{8838}, Characters: []byte{0xe2, 0x8a, 0x86}},
+ "Succeeds": {Name: "Succeeds", CodePoints: []int{8827}, Characters: []byte{0xe2, 0x89, 0xbb}},
+ "SucceedsEqual": {Name: "SucceedsEqual", CodePoints: []int{10928}, Characters: []byte{0xe2, 0xaa, 0xb0}},
+ "SucceedsSlantEqual": {Name: "SucceedsSlantEqual", CodePoints: []int{8829}, Characters: []byte{0xe2, 0x89, 0xbd}},
+ "SucceedsTilde": {Name: "SucceedsTilde", CodePoints: []int{8831}, Characters: []byte{0xe2, 0x89, 0xbf}},
+ "SuchThat": {Name: "SuchThat", CodePoints: []int{8715}, Characters: []byte{0xe2, 0x88, 0x8b}},
+ "Sum": {Name: "Sum", CodePoints: []int{8721}, Characters: []byte{0xe2, 0x88, 0x91}},
+ "Sup": {Name: "Sup", CodePoints: []int{8913}, Characters: []byte{0xe2, 0x8b, 0x91}},
+ "Superset": {Name: "Superset", CodePoints: []int{8835}, Characters: []byte{0xe2, 0x8a, 0x83}},
+ "SupersetEqual": {Name: "SupersetEqual", CodePoints: []int{8839}, Characters: []byte{0xe2, 0x8a, 0x87}},
+ "Supset": {Name: "Supset", CodePoints: []int{8913}, Characters: []byte{0xe2, 0x8b, 0x91}},
+ "THORN": {Name: "THORN", CodePoints: []int{222}, Characters: []byte{0xc3, 0x9e}},
+ "TRADE": {Name: "TRADE", CodePoints: []int{8482}, Characters: []byte{0xe2, 0x84, 0xa2}},
+ "TSHcy": {Name: "TSHcy", CodePoints: []int{1035}, Characters: []byte{0xd0, 0x8b}},
+ "TScy": {Name: "TScy", CodePoints: []int{1062}, Characters: []byte{0xd0, 0xa6}},
+ "Tab": {Name: "Tab", CodePoints: []int{9}, Characters: []byte{0x9}},
+ "Tau": {Name: "Tau", CodePoints: []int{932}, Characters: []byte{0xce, 0xa4}},
+ "Tcaron": {Name: "Tcaron", CodePoints: []int{356}, Characters: []byte{0xc5, 0xa4}},
+ "Tcedil": {Name: "Tcedil", CodePoints: []int{354}, Characters: []byte{0xc5, 0xa2}},
+ "Tcy": {Name: "Tcy", CodePoints: []int{1058}, Characters: []byte{0xd0, 0xa2}},
+ "Tfr": {Name: "Tfr", CodePoints: []int{120087}, Characters: []byte{0xf0, 0x9d, 0x94, 0x97}},
+ "Therefore": {Name: "Therefore", CodePoints: []int{8756}, Characters: []byte{0xe2, 0x88, 0xb4}},
+ "Theta": {Name: "Theta", CodePoints: []int{920}, Characters: []byte{0xce, 0x98}},
+ "ThickSpace": {Name: "ThickSpace", CodePoints: []int{8287, 8202}, Characters: []byte{0xe2, 0x81, 0x9f, 0xe2, 0x80, 0x8a}},
+ "ThinSpace": {Name: "ThinSpace", CodePoints: []int{8201}, Characters: []byte{0xe2, 0x80, 0x89}},
+ "Tilde": {Name: "Tilde", CodePoints: []int{8764}, Characters: []byte{0xe2, 0x88, 0xbc}},
+ "TildeEqual": {Name: "TildeEqual", CodePoints: []int{8771}, Characters: []byte{0xe2, 0x89, 0x83}},
+ "TildeFullEqual": {Name: "TildeFullEqual", CodePoints: []int{8773}, Characters: []byte{0xe2, 0x89, 0x85}},
+ "TildeTilde": {Name: "TildeTilde", CodePoints: []int{8776}, Characters: []byte{0xe2, 0x89, 0x88}},
+ "Topf": {Name: "Topf", CodePoints: []int{120139}, Characters: []byte{0xf0, 0x9d, 0x95, 0x8b}},
+ "TripleDot": {Name: "TripleDot", CodePoints: []int{8411}, Characters: []byte{0xe2, 0x83, 0x9b}},
+ "Tscr": {Name: "Tscr", CodePoints: []int{119983}, Characters: []byte{0xf0, 0x9d, 0x92, 0xaf}},
+ "Tstrok": {Name: "Tstrok", CodePoints: []int{358}, Characters: []byte{0xc5, 0xa6}},
+ "Uacute": {Name: "Uacute", CodePoints: []int{218}, Characters: []byte{0xc3, 0x9a}},
+ "Uarr": {Name: "Uarr", CodePoints: []int{8607}, Characters: []byte{0xe2, 0x86, 0x9f}},
+ "Uarrocir": {Name: "Uarrocir", CodePoints: []int{10569}, Characters: []byte{0xe2, 0xa5, 0x89}},
+ "Ubrcy": {Name: "Ubrcy", CodePoints: []int{1038}, Characters: []byte{0xd0, 0x8e}},
+ "Ubreve": {Name: "Ubreve", CodePoints: []int{364}, Characters: []byte{0xc5, 0xac}},
+ "Ucirc": {Name: "Ucirc", CodePoints: []int{219}, Characters: []byte{0xc3, 0x9b}},
+ "Ucy": {Name: "Ucy", CodePoints: []int{1059}, Characters: []byte{0xd0, 0xa3}},
+ "Udblac": {Name: "Udblac", CodePoints: []int{368}, Characters: []byte{0xc5, 0xb0}},
+ "Ufr": {Name: "Ufr", CodePoints: []int{120088}, Characters: []byte{0xf0, 0x9d, 0x94, 0x98}},
+ "Ugrave": {Name: "Ugrave", CodePoints: []int{217}, Characters: []byte{0xc3, 0x99}},
+ "Umacr": {Name: "Umacr", CodePoints: []int{362}, Characters: []byte{0xc5, 0xaa}},
+ "UnderBar": {Name: "UnderBar", CodePoints: []int{95}, Characters: []byte{0x5f}},
+ "UnderBrace": {Name: "UnderBrace", CodePoints: []int{9183}, Characters: []byte{0xe2, 0x8f, 0x9f}},
+ "UnderBracket": {Name: "UnderBracket", CodePoints: []int{9141}, Characters: []byte{0xe2, 0x8e, 0xb5}},
+ "UnderParenthesis": {Name: "UnderParenthesis", CodePoints: []int{9181}, Characters: []byte{0xe2, 0x8f, 0x9d}},
+ "Union": {Name: "Union", CodePoints: []int{8899}, Characters: []byte{0xe2, 0x8b, 0x83}},
+ "UnionPlus": {Name: "UnionPlus", CodePoints: []int{8846}, Characters: []byte{0xe2, 0x8a, 0x8e}},
+ "Uogon": {Name: "Uogon", CodePoints: []int{370}, Characters: []byte{0xc5, 0xb2}},
+ "Uopf": {Name: "Uopf", CodePoints: []int{120140}, Characters: []byte{0xf0, 0x9d, 0x95, 0x8c}},
+ "UpArrow": {Name: "UpArrow", CodePoints: []int{8593}, Characters: []byte{0xe2, 0x86, 0x91}},
+ "UpArrowBar": {Name: "UpArrowBar", CodePoints: []int{10514}, Characters: []byte{0xe2, 0xa4, 0x92}},
+ "UpArrowDownArrow": {Name: "UpArrowDownArrow", CodePoints: []int{8645}, Characters: []byte{0xe2, 0x87, 0x85}},
+ "UpDownArrow": {Name: "UpDownArrow", CodePoints: []int{8597}, Characters: []byte{0xe2, 0x86, 0x95}},
+ "UpEquilibrium": {Name: "UpEquilibrium", CodePoints: []int{10606}, Characters: []byte{0xe2, 0xa5, 0xae}},
+ "UpTee": {Name: "UpTee", CodePoints: []int{8869}, Characters: []byte{0xe2, 0x8a, 0xa5}},
+ "UpTeeArrow": {Name: "UpTeeArrow", CodePoints: []int{8613}, Characters: []byte{0xe2, 0x86, 0xa5}},
+ "Uparrow": {Name: "Uparrow", CodePoints: []int{8657}, Characters: []byte{0xe2, 0x87, 0x91}},
+ "Updownarrow": {Name: "Updownarrow", CodePoints: []int{8661}, Characters: []byte{0xe2, 0x87, 0x95}},
+ "UpperLeftArrow": {Name: "UpperLeftArrow", CodePoints: []int{8598}, Characters: []byte{0xe2, 0x86, 0x96}},
+ "UpperRightArrow": {Name: "UpperRightArrow", CodePoints: []int{8599}, Characters: []byte{0xe2, 0x86, 0x97}},
+ "Upsi": {Name: "Upsi", CodePoints: []int{978}, Characters: []byte{0xcf, 0x92}},
+ "Upsilon": {Name: "Upsilon", CodePoints: []int{933}, Characters: []byte{0xce, 0xa5}},
+ "Uring": {Name: "Uring", CodePoints: []int{366}, Characters: []byte{0xc5, 0xae}},
+ "Uscr": {Name: "Uscr", CodePoints: []int{119984}, Characters: []byte{0xf0, 0x9d, 0x92, 0xb0}},
+ "Utilde": {Name: "Utilde", CodePoints: []int{360}, Characters: []byte{0xc5, 0xa8}},
+ "Uuml": {Name: "Uuml", CodePoints: []int{220}, Characters: []byte{0xc3, 0x9c}},
+ "VDash": {Name: "VDash", CodePoints: []int{8875}, Characters: []byte{0xe2, 0x8a, 0xab}},
+ "Vbar": {Name: "Vbar", CodePoints: []int{10987}, Characters: []byte{0xe2, 0xab, 0xab}},
+ "Vcy": {Name: "Vcy", CodePoints: []int{1042}, Characters: []byte{0xd0, 0x92}},
+ "Vdash": {Name: "Vdash", CodePoints: []int{8873}, Characters: []byte{0xe2, 0x8a, 0xa9}},
+ "Vdashl": {Name: "Vdashl", CodePoints: []int{10982}, Characters: []byte{0xe2, 0xab, 0xa6}},
+ "Vee": {Name: "Vee", CodePoints: []int{8897}, Characters: []byte{0xe2, 0x8b, 0x81}},
+ "Verbar": {Name: "Verbar", CodePoints: []int{8214}, Characters: []byte{0xe2, 0x80, 0x96}},
+ "Vert": {Name: "Vert", CodePoints: []int{8214}, Characters: []byte{0xe2, 0x80, 0x96}},
+ "VerticalBar": {Name: "VerticalBar", CodePoints: []int{8739}, Characters: []byte{0xe2, 0x88, 0xa3}},
+ "VerticalLine": {Name: "VerticalLine", CodePoints: []int{124}, Characters: []byte{0x7c}},
+ "VerticalSeparator": {Name: "VerticalSeparator", CodePoints: []int{10072}, Characters: []byte{0xe2, 0x9d, 0x98}},
+ "VerticalTilde": {Name: "VerticalTilde", CodePoints: []int{8768}, Characters: []byte{0xe2, 0x89, 0x80}},
+ "VeryThinSpace": {Name: "VeryThinSpace", CodePoints: []int{8202}, Characters: []byte{0xe2, 0x80, 0x8a}},
+ "Vfr": {Name: "Vfr", CodePoints: []int{120089}, Characters: []byte{0xf0, 0x9d, 0x94, 0x99}},
+ "Vopf": {Name: "Vopf", CodePoints: []int{120141}, Characters: []byte{0xf0, 0x9d, 0x95, 0x8d}},
+ "Vscr": {Name: "Vscr", CodePoints: []int{119985}, Characters: []byte{0xf0, 0x9d, 0x92, 0xb1}},
+ "Vvdash": {Name: "Vvdash", CodePoints: []int{8874}, Characters: []byte{0xe2, 0x8a, 0xaa}},
+ "Wcirc": {Name: "Wcirc", CodePoints: []int{372}, Characters: []byte{0xc5, 0xb4}},
+ "Wedge": {Name: "Wedge", CodePoints: []int{8896}, Characters: []byte{0xe2, 0x8b, 0x80}},
+ "Wfr": {Name: "Wfr", CodePoints: []int{120090}, Characters: []byte{0xf0, 0x9d, 0x94, 0x9a}},
+ "Wopf": {Name: "Wopf", CodePoints: []int{120142}, Characters: []byte{0xf0, 0x9d, 0x95, 0x8e}},
+ "Wscr": {Name: "Wscr", CodePoints: []int{119986}, Characters: []byte{0xf0, 0x9d, 0x92, 0xb2}},
+ "Xfr": {Name: "Xfr", CodePoints: []int{120091}, Characters: []byte{0xf0, 0x9d, 0x94, 0x9b}},
+ "Xi": {Name: "Xi", CodePoints: []int{926}, Characters: []byte{0xce, 0x9e}},
+ "Xopf": {Name: "Xopf", CodePoints: []int{120143}, Characters: []byte{0xf0, 0x9d, 0x95, 0x8f}},
+ "Xscr": {Name: "Xscr", CodePoints: []int{119987}, Characters: []byte{0xf0, 0x9d, 0x92, 0xb3}},
+ "YAcy": {Name: "YAcy", CodePoints: []int{1071}, Characters: []byte{0xd0, 0xaf}},
+ "YIcy": {Name: "YIcy", CodePoints: []int{1031}, Characters: []byte{0xd0, 0x87}},
+ "YUcy": {Name: "YUcy", CodePoints: []int{1070}, Characters: []byte{0xd0, 0xae}},
+ "Yacute": {Name: "Yacute", CodePoints: []int{221}, Characters: []byte{0xc3, 0x9d}},
+ "Ycirc": {Name: "Ycirc", CodePoints: []int{374}, Characters: []byte{0xc5, 0xb6}},
+ "Ycy": {Name: "Ycy", CodePoints: []int{1067}, Characters: []byte{0xd0, 0xab}},
+ "Yfr": {Name: "Yfr", CodePoints: []int{120092}, Characters: []byte{0xf0, 0x9d, 0x94, 0x9c}},
+ "Yopf": {Name: "Yopf", CodePoints: []int{120144}, Characters: []byte{0xf0, 0x9d, 0x95, 0x90}},
+ "Yscr": {Name: "Yscr", CodePoints: []int{119988}, Characters: []byte{0xf0, 0x9d, 0x92, 0xb4}},
+ "Yuml": {Name: "Yuml", CodePoints: []int{376}, Characters: []byte{0xc5, 0xb8}},
+ "ZHcy": {Name: "ZHcy", CodePoints: []int{1046}, Characters: []byte{0xd0, 0x96}},
+ "Zacute": {Name: "Zacute", CodePoints: []int{377}, Characters: []byte{0xc5, 0xb9}},
+ "Zcaron": {Name: "Zcaron", CodePoints: []int{381}, Characters: []byte{0xc5, 0xbd}},
+ "Zcy": {Name: "Zcy", CodePoints: []int{1047}, Characters: []byte{0xd0, 0x97}},
+ "Zdot": {Name: "Zdot", CodePoints: []int{379}, Characters: []byte{0xc5, 0xbb}},
+ "ZeroWidthSpace": {Name: "ZeroWidthSpace", CodePoints: []int{8203}, Characters: []byte{0xe2, 0x80, 0x8b}},
+ "Zeta": {Name: "Zeta", CodePoints: []int{918}, Characters: []byte{0xce, 0x96}},
+ "Zfr": {Name: "Zfr", CodePoints: []int{8488}, Characters: []byte{0xe2, 0x84, 0xa8}},
+ "Zopf": {Name: "Zopf", CodePoints: []int{8484}, Characters: []byte{0xe2, 0x84, 0xa4}},
+ "Zscr": {Name: "Zscr", CodePoints: []int{119989}, Characters: []byte{0xf0, 0x9d, 0x92, 0xb5}},
+ "aacute": {Name: "aacute", CodePoints: []int{225}, Characters: []byte{0xc3, 0xa1}},
+ "abreve": {Name: "abreve", CodePoints: []int{259}, Characters: []byte{0xc4, 0x83}},
+ "ac": {Name: "ac", CodePoints: []int{8766}, Characters: []byte{0xe2, 0x88, 0xbe}},
+ "acE": {Name: "acE", CodePoints: []int{8766, 819}, Characters: []byte{0xe2, 0x88, 0xbe, 0xcc, 0xb3}},
+ "acd": {Name: "acd", CodePoints: []int{8767}, Characters: []byte{0xe2, 0x88, 0xbf}},
+ "acirc": {Name: "acirc", CodePoints: []int{226}, Characters: []byte{0xc3, 0xa2}},
+ "acute": {Name: "acute", CodePoints: []int{180}, Characters: []byte{0xc2, 0xb4}},
+ "acy": {Name: "acy", CodePoints: []int{1072}, Characters: []byte{0xd0, 0xb0}},
+ "aelig": {Name: "aelig", CodePoints: []int{230}, Characters: []byte{0xc3, 0xa6}},
+ "af": {Name: "af", CodePoints: []int{8289}, Characters: []byte{0xe2, 0x81, 0xa1}},
+ "afr": {Name: "afr", CodePoints: []int{120094}, Characters: []byte{0xf0, 0x9d, 0x94, 0x9e}},
+ "agrave": {Name: "agrave", CodePoints: []int{224}, Characters: []byte{0xc3, 0xa0}},
+ "alefsym": {Name: "alefsym", CodePoints: []int{8501}, Characters: []byte{0xe2, 0x84, 0xb5}},
+ "aleph": {Name: "aleph", CodePoints: []int{8501}, Characters: []byte{0xe2, 0x84, 0xb5}},
+ "alpha": {Name: "alpha", CodePoints: []int{945}, Characters: []byte{0xce, 0xb1}},
+ "amacr": {Name: "amacr", CodePoints: []int{257}, Characters: []byte{0xc4, 0x81}},
+ "amalg": {Name: "amalg", CodePoints: []int{10815}, Characters: []byte{0xe2, 0xa8, 0xbf}},
+ "amp": {Name: "amp", CodePoints: []int{38}, Characters: []byte{0x26}},
+ "and": {Name: "and", CodePoints: []int{8743}, Characters: []byte{0xe2, 0x88, 0xa7}},
+ "andand": {Name: "andand", CodePoints: []int{10837}, Characters: []byte{0xe2, 0xa9, 0x95}},
+ "andd": {Name: "andd", CodePoints: []int{10844}, Characters: []byte{0xe2, 0xa9, 0x9c}},
+ "andslope": {Name: "andslope", CodePoints: []int{10840}, Characters: []byte{0xe2, 0xa9, 0x98}},
+ "andv": {Name: "andv", CodePoints: []int{10842}, Characters: []byte{0xe2, 0xa9, 0x9a}},
+ "ang": {Name: "ang", CodePoints: []int{8736}, Characters: []byte{0xe2, 0x88, 0xa0}},
+ "ange": {Name: "ange", CodePoints: []int{10660}, Characters: []byte{0xe2, 0xa6, 0xa4}},
+ "angle": {Name: "angle", CodePoints: []int{8736}, Characters: []byte{0xe2, 0x88, 0xa0}},
+ "angmsd": {Name: "angmsd", CodePoints: []int{8737}, Characters: []byte{0xe2, 0x88, 0xa1}},
+ "angmsdaa": {Name: "angmsdaa", CodePoints: []int{10664}, Characters: []byte{0xe2, 0xa6, 0xa8}},
+ "angmsdab": {Name: "angmsdab", CodePoints: []int{10665}, Characters: []byte{0xe2, 0xa6, 0xa9}},
+ "angmsdac": {Name: "angmsdac", CodePoints: []int{10666}, Characters: []byte{0xe2, 0xa6, 0xaa}},
+ "angmsdad": {Name: "angmsdad", CodePoints: []int{10667}, Characters: []byte{0xe2, 0xa6, 0xab}},
+ "angmsdae": {Name: "angmsdae", CodePoints: []int{10668}, Characters: []byte{0xe2, 0xa6, 0xac}},
+ "angmsdaf": {Name: "angmsdaf", CodePoints: []int{10669}, Characters: []byte{0xe2, 0xa6, 0xad}},
+ "angmsdag": {Name: "angmsdag", CodePoints: []int{10670}, Characters: []byte{0xe2, 0xa6, 0xae}},
+ "angmsdah": {Name: "angmsdah", CodePoints: []int{10671}, Characters: []byte{0xe2, 0xa6, 0xaf}},
+ "angrt": {Name: "angrt", CodePoints: []int{8735}, Characters: []byte{0xe2, 0x88, 0x9f}},
+ "angrtvb": {Name: "angrtvb", CodePoints: []int{8894}, Characters: []byte{0xe2, 0x8a, 0xbe}},
+ "angrtvbd": {Name: "angrtvbd", CodePoints: []int{10653}, Characters: []byte{0xe2, 0xa6, 0x9d}},
+ "angsph": {Name: "angsph", CodePoints: []int{8738}, Characters: []byte{0xe2, 0x88, 0xa2}},
+ "angst": {Name: "angst", CodePoints: []int{197}, Characters: []byte{0xc3, 0x85}},
+ "angzarr": {Name: "angzarr", CodePoints: []int{9084}, Characters: []byte{0xe2, 0x8d, 0xbc}},
+ "aogon": {Name: "aogon", CodePoints: []int{261}, Characters: []byte{0xc4, 0x85}},
+ "aopf": {Name: "aopf", CodePoints: []int{120146}, Characters: []byte{0xf0, 0x9d, 0x95, 0x92}},
+ "ap": {Name: "ap", CodePoints: []int{8776}, Characters: []byte{0xe2, 0x89, 0x88}},
+ "apE": {Name: "apE", CodePoints: []int{10864}, Characters: []byte{0xe2, 0xa9, 0xb0}},
+ "apacir": {Name: "apacir", CodePoints: []int{10863}, Characters: []byte{0xe2, 0xa9, 0xaf}},
+ "ape": {Name: "ape", CodePoints: []int{8778}, Characters: []byte{0xe2, 0x89, 0x8a}},
+ "apid": {Name: "apid", CodePoints: []int{8779}, Characters: []byte{0xe2, 0x89, 0x8b}},
+ "apos": {Name: "apos", CodePoints: []int{39}, Characters: []byte{0x27}},
+ "approx": {Name: "approx", CodePoints: []int{8776}, Characters: []byte{0xe2, 0x89, 0x88}},
+ "approxeq": {Name: "approxeq", CodePoints: []int{8778}, Characters: []byte{0xe2, 0x89, 0x8a}},
+ "aring": {Name: "aring", CodePoints: []int{229}, Characters: []byte{0xc3, 0xa5}},
+ "ascr": {Name: "ascr", CodePoints: []int{119990}, Characters: []byte{0xf0, 0x9d, 0x92, 0xb6}},
+ "ast": {Name: "ast", CodePoints: []int{42}, Characters: []byte{0x2a}},
+ "asymp": {Name: "asymp", CodePoints: []int{8776}, Characters: []byte{0xe2, 0x89, 0x88}},
+ "asympeq": {Name: "asympeq", CodePoints: []int{8781}, Characters: []byte{0xe2, 0x89, 0x8d}},
+ "atilde": {Name: "atilde", CodePoints: []int{227}, Characters: []byte{0xc3, 0xa3}},
+ "auml": {Name: "auml", CodePoints: []int{228}, Characters: []byte{0xc3, 0xa4}},
+ "awconint": {Name: "awconint", CodePoints: []int{8755}, Characters: []byte{0xe2, 0x88, 0xb3}},
+ "awint": {Name: "awint", CodePoints: []int{10769}, Characters: []byte{0xe2, 0xa8, 0x91}},
+ "bNot": {Name: "bNot", CodePoints: []int{10989}, Characters: []byte{0xe2, 0xab, 0xad}},
+ "backcong": {Name: "backcong", CodePoints: []int{8780}, Characters: []byte{0xe2, 0x89, 0x8c}},
+ "backepsilon": {Name: "backepsilon", CodePoints: []int{1014}, Characters: []byte{0xcf, 0xb6}},
+ "backprime": {Name: "backprime", CodePoints: []int{8245}, Characters: []byte{0xe2, 0x80, 0xb5}},
+ "backsim": {Name: "backsim", CodePoints: []int{8765}, Characters: []byte{0xe2, 0x88, 0xbd}},
+ "backsimeq": {Name: "backsimeq", CodePoints: []int{8909}, Characters: []byte{0xe2, 0x8b, 0x8d}},
+ "barvee": {Name: "barvee", CodePoints: []int{8893}, Characters: []byte{0xe2, 0x8a, 0xbd}},
+ "barwed": {Name: "barwed", CodePoints: []int{8965}, Characters: []byte{0xe2, 0x8c, 0x85}},
+ "barwedge": {Name: "barwedge", CodePoints: []int{8965}, Characters: []byte{0xe2, 0x8c, 0x85}},
+ "bbrk": {Name: "bbrk", CodePoints: []int{9141}, Characters: []byte{0xe2, 0x8e, 0xb5}},
+ "bbrktbrk": {Name: "bbrktbrk", CodePoints: []int{9142}, Characters: []byte{0xe2, 0x8e, 0xb6}},
+ "bcong": {Name: "bcong", CodePoints: []int{8780}, Characters: []byte{0xe2, 0x89, 0x8c}},
+ "bcy": {Name: "bcy", CodePoints: []int{1073}, Characters: []byte{0xd0, 0xb1}},
+ "bdquo": {Name: "bdquo", CodePoints: []int{8222}, Characters: []byte{0xe2, 0x80, 0x9e}},
+ "becaus": {Name: "becaus", CodePoints: []int{8757}, Characters: []byte{0xe2, 0x88, 0xb5}},
+ "because": {Name: "because", CodePoints: []int{8757}, Characters: []byte{0xe2, 0x88, 0xb5}},
+ "bemptyv": {Name: "bemptyv", CodePoints: []int{10672}, Characters: []byte{0xe2, 0xa6, 0xb0}},
+ "bepsi": {Name: "bepsi", CodePoints: []int{1014}, Characters: []byte{0xcf, 0xb6}},
+ "bernou": {Name: "bernou", CodePoints: []int{8492}, Characters: []byte{0xe2, 0x84, 0xac}},
+ "beta": {Name: "beta", CodePoints: []int{946}, Characters: []byte{0xce, 0xb2}},
+ "beth": {Name: "beth", CodePoints: []int{8502}, Characters: []byte{0xe2, 0x84, 0xb6}},
+ "between": {Name: "between", CodePoints: []int{8812}, Characters: []byte{0xe2, 0x89, 0xac}},
+ "bfr": {Name: "bfr", CodePoints: []int{120095}, Characters: []byte{0xf0, 0x9d, 0x94, 0x9f}},
+ "bigcap": {Name: "bigcap", CodePoints: []int{8898}, Characters: []byte{0xe2, 0x8b, 0x82}},
+ "bigcirc": {Name: "bigcirc", CodePoints: []int{9711}, Characters: []byte{0xe2, 0x97, 0xaf}},
+ "bigcup": {Name: "bigcup", CodePoints: []int{8899}, Characters: []byte{0xe2, 0x8b, 0x83}},
+ "bigodot": {Name: "bigodot", CodePoints: []int{10752}, Characters: []byte{0xe2, 0xa8, 0x80}},
+ "bigoplus": {Name: "bigoplus", CodePoints: []int{10753}, Characters: []byte{0xe2, 0xa8, 0x81}},
+ "bigotimes": {Name: "bigotimes", CodePoints: []int{10754}, Characters: []byte{0xe2, 0xa8, 0x82}},
+ "bigsqcup": {Name: "bigsqcup", CodePoints: []int{10758}, Characters: []byte{0xe2, 0xa8, 0x86}},
+ "bigstar": {Name: "bigstar", CodePoints: []int{9733}, Characters: []byte{0xe2, 0x98, 0x85}},
+ "bigtriangledown": {Name: "bigtriangledown", CodePoints: []int{9661}, Characters: []byte{0xe2, 0x96, 0xbd}},
+ "bigtriangleup": {Name: "bigtriangleup", CodePoints: []int{9651}, Characters: []byte{0xe2, 0x96, 0xb3}},
+ "biguplus": {Name: "biguplus", CodePoints: []int{10756}, Characters: []byte{0xe2, 0xa8, 0x84}},
+ "bigvee": {Name: "bigvee", CodePoints: []int{8897}, Characters: []byte{0xe2, 0x8b, 0x81}},
+ "bigwedge": {Name: "bigwedge", CodePoints: []int{8896}, Characters: []byte{0xe2, 0x8b, 0x80}},
+ "bkarow": {Name: "bkarow", CodePoints: []int{10509}, Characters: []byte{0xe2, 0xa4, 0x8d}},
+ "blacklozenge": {Name: "blacklozenge", CodePoints: []int{10731}, Characters: []byte{0xe2, 0xa7, 0xab}},
+ "blacksquare": {Name: "blacksquare", CodePoints: []int{9642}, Characters: []byte{0xe2, 0x96, 0xaa}},
+ "blacktriangle": {Name: "blacktriangle", CodePoints: []int{9652}, Characters: []byte{0xe2, 0x96, 0xb4}},
+ "blacktriangledown": {Name: "blacktriangledown", CodePoints: []int{9662}, Characters: []byte{0xe2, 0x96, 0xbe}},
+ "blacktriangleleft": {Name: "blacktriangleleft", CodePoints: []int{9666}, Characters: []byte{0xe2, 0x97, 0x82}},
+ "blacktriangleright": {Name: "blacktriangleright", CodePoints: []int{9656}, Characters: []byte{0xe2, 0x96, 0xb8}},
+ "blank": {Name: "blank", CodePoints: []int{9251}, Characters: []byte{0xe2, 0x90, 0xa3}},
+ "blk12": {Name: "blk12", CodePoints: []int{9618}, Characters: []byte{0xe2, 0x96, 0x92}},
+ "blk14": {Name: "blk14", CodePoints: []int{9617}, Characters: []byte{0xe2, 0x96, 0x91}},
+ "blk34": {Name: "blk34", CodePoints: []int{9619}, Characters: []byte{0xe2, 0x96, 0x93}},
+ "block": {Name: "block", CodePoints: []int{9608}, Characters: []byte{0xe2, 0x96, 0x88}},
+ "bne": {Name: "bne", CodePoints: []int{61, 8421}, Characters: []byte{0x3d, 0xe2, 0x83, 0xa5}},
+ "bnequiv": {Name: "bnequiv", CodePoints: []int{8801, 8421}, Characters: []byte{0xe2, 0x89, 0xa1, 0xe2, 0x83, 0xa5}},
+ "bnot": {Name: "bnot", CodePoints: []int{8976}, Characters: []byte{0xe2, 0x8c, 0x90}},
+ "bopf": {Name: "bopf", CodePoints: []int{120147}, Characters: []byte{0xf0, 0x9d, 0x95, 0x93}},
+ "bot": {Name: "bot", CodePoints: []int{8869}, Characters: []byte{0xe2, 0x8a, 0xa5}},
+ "bottom": {Name: "bottom", CodePoints: []int{8869}, Characters: []byte{0xe2, 0x8a, 0xa5}},
+ "bowtie": {Name: "bowtie", CodePoints: []int{8904}, Characters: []byte{0xe2, 0x8b, 0x88}},
+ "boxDL": {Name: "boxDL", CodePoints: []int{9559}, Characters: []byte{0xe2, 0x95, 0x97}},
+ "boxDR": {Name: "boxDR", CodePoints: []int{9556}, Characters: []byte{0xe2, 0x95, 0x94}},
+ "boxDl": {Name: "boxDl", CodePoints: []int{9558}, Characters: []byte{0xe2, 0x95, 0x96}},
+ "boxDr": {Name: "boxDr", CodePoints: []int{9555}, Characters: []byte{0xe2, 0x95, 0x93}},
+ "boxH": {Name: "boxH", CodePoints: []int{9552}, Characters: []byte{0xe2, 0x95, 0x90}},
+ "boxHD": {Name: "boxHD", CodePoints: []int{9574}, Characters: []byte{0xe2, 0x95, 0xa6}},
+ "boxHU": {Name: "boxHU", CodePoints: []int{9577}, Characters: []byte{0xe2, 0x95, 0xa9}},
+ "boxHd": {Name: "boxHd", CodePoints: []int{9572}, Characters: []byte{0xe2, 0x95, 0xa4}},
+ "boxHu": {Name: "boxHu", CodePoints: []int{9575}, Characters: []byte{0xe2, 0x95, 0xa7}},
+ "boxUL": {Name: "boxUL", CodePoints: []int{9565}, Characters: []byte{0xe2, 0x95, 0x9d}},
+ "boxUR": {Name: "boxUR", CodePoints: []int{9562}, Characters: []byte{0xe2, 0x95, 0x9a}},
+ "boxUl": {Name: "boxUl", CodePoints: []int{9564}, Characters: []byte{0xe2, 0x95, 0x9c}},
+ "boxUr": {Name: "boxUr", CodePoints: []int{9561}, Characters: []byte{0xe2, 0x95, 0x99}},
+ "boxV": {Name: "boxV", CodePoints: []int{9553}, Characters: []byte{0xe2, 0x95, 0x91}},
+ "boxVH": {Name: "boxVH", CodePoints: []int{9580}, Characters: []byte{0xe2, 0x95, 0xac}},
+ "boxVL": {Name: "boxVL", CodePoints: []int{9571}, Characters: []byte{0xe2, 0x95, 0xa3}},
+ "boxVR": {Name: "boxVR", CodePoints: []int{9568}, Characters: []byte{0xe2, 0x95, 0xa0}},
+ "boxVh": {Name: "boxVh", CodePoints: []int{9579}, Characters: []byte{0xe2, 0x95, 0xab}},
+ "boxVl": {Name: "boxVl", CodePoints: []int{9570}, Characters: []byte{0xe2, 0x95, 0xa2}},
+ "boxVr": {Name: "boxVr", CodePoints: []int{9567}, Characters: []byte{0xe2, 0x95, 0x9f}},
+ "boxbox": {Name: "boxbox", CodePoints: []int{10697}, Characters: []byte{0xe2, 0xa7, 0x89}},
+ "boxdL": {Name: "boxdL", CodePoints: []int{9557}, Characters: []byte{0xe2, 0x95, 0x95}},
+ "boxdR": {Name: "boxdR", CodePoints: []int{9554}, Characters: []byte{0xe2, 0x95, 0x92}},
+ "boxdl": {Name: "boxdl", CodePoints: []int{9488}, Characters: []byte{0xe2, 0x94, 0x90}},
+ "boxdr": {Name: "boxdr", CodePoints: []int{9484}, Characters: []byte{0xe2, 0x94, 0x8c}},
+ "boxh": {Name: "boxh", CodePoints: []int{9472}, Characters: []byte{0xe2, 0x94, 0x80}},
+ "boxhD": {Name: "boxhD", CodePoints: []int{9573}, Characters: []byte{0xe2, 0x95, 0xa5}},
+ "boxhU": {Name: "boxhU", CodePoints: []int{9576}, Characters: []byte{0xe2, 0x95, 0xa8}},
+ "boxhd": {Name: "boxhd", CodePoints: []int{9516}, Characters: []byte{0xe2, 0x94, 0xac}},
+ "boxhu": {Name: "boxhu", CodePoints: []int{9524}, Characters: []byte{0xe2, 0x94, 0xb4}},
+ "boxminus": {Name: "boxminus", CodePoints: []int{8863}, Characters: []byte{0xe2, 0x8a, 0x9f}},
+ "boxplus": {Name: "boxplus", CodePoints: []int{8862}, Characters: []byte{0xe2, 0x8a, 0x9e}},
+ "boxtimes": {Name: "boxtimes", CodePoints: []int{8864}, Characters: []byte{0xe2, 0x8a, 0xa0}},
+ "boxuL": {Name: "boxuL", CodePoints: []int{9563}, Characters: []byte{0xe2, 0x95, 0x9b}},
+ "boxuR": {Name: "boxuR", CodePoints: []int{9560}, Characters: []byte{0xe2, 0x95, 0x98}},
+ "boxul": {Name: "boxul", CodePoints: []int{9496}, Characters: []byte{0xe2, 0x94, 0x98}},
+ "boxur": {Name: "boxur", CodePoints: []int{9492}, Characters: []byte{0xe2, 0x94, 0x94}},
+ "boxv": {Name: "boxv", CodePoints: []int{9474}, Characters: []byte{0xe2, 0x94, 0x82}},
+ "boxvH": {Name: "boxvH", CodePoints: []int{9578}, Characters: []byte{0xe2, 0x95, 0xaa}},
+ "boxvL": {Name: "boxvL", CodePoints: []int{9569}, Characters: []byte{0xe2, 0x95, 0xa1}},
+ "boxvR": {Name: "boxvR", CodePoints: []int{9566}, Characters: []byte{0xe2, 0x95, 0x9e}},
+ "boxvh": {Name: "boxvh", CodePoints: []int{9532}, Characters: []byte{0xe2, 0x94, 0xbc}},
+ "boxvl": {Name: "boxvl", CodePoints: []int{9508}, Characters: []byte{0xe2, 0x94, 0xa4}},
+ "boxvr": {Name: "boxvr", CodePoints: []int{9500}, Characters: []byte{0xe2, 0x94, 0x9c}},
+ "bprime": {Name: "bprime", CodePoints: []int{8245}, Characters: []byte{0xe2, 0x80, 0xb5}},
+ "breve": {Name: "breve", CodePoints: []int{728}, Characters: []byte{0xcb, 0x98}},
+ "brvbar": {Name: "brvbar", CodePoints: []int{166}, Characters: []byte{0xc2, 0xa6}},
+ "bscr": {Name: "bscr", CodePoints: []int{119991}, Characters: []byte{0xf0, 0x9d, 0x92, 0xb7}},
+ "bsemi": {Name: "bsemi", CodePoints: []int{8271}, Characters: []byte{0xe2, 0x81, 0x8f}},
+ "bsim": {Name: "bsim", CodePoints: []int{8765}, Characters: []byte{0xe2, 0x88, 0xbd}},
+ "bsime": {Name: "bsime", CodePoints: []int{8909}, Characters: []byte{0xe2, 0x8b, 0x8d}},
+ "bsol": {Name: "bsol", CodePoints: []int{92}, Characters: []byte{0x5c}},
+ "bsolb": {Name: "bsolb", CodePoints: []int{10693}, Characters: []byte{0xe2, 0xa7, 0x85}},
+ "bsolhsub": {Name: "bsolhsub", CodePoints: []int{10184}, Characters: []byte{0xe2, 0x9f, 0x88}},
+ "bull": {Name: "bull", CodePoints: []int{8226}, Characters: []byte{0xe2, 0x80, 0xa2}},
+ "bullet": {Name: "bullet", CodePoints: []int{8226}, Characters: []byte{0xe2, 0x80, 0xa2}},
+ "bump": {Name: "bump", CodePoints: []int{8782}, Characters: []byte{0xe2, 0x89, 0x8e}},
+ "bumpE": {Name: "bumpE", CodePoints: []int{10926}, Characters: []byte{0xe2, 0xaa, 0xae}},
+ "bumpe": {Name: "bumpe", CodePoints: []int{8783}, Characters: []byte{0xe2, 0x89, 0x8f}},
+ "bumpeq": {Name: "bumpeq", CodePoints: []int{8783}, Characters: []byte{0xe2, 0x89, 0x8f}},
+ "cacute": {Name: "cacute", CodePoints: []int{263}, Characters: []byte{0xc4, 0x87}},
+ "cap": {Name: "cap", CodePoints: []int{8745}, Characters: []byte{0xe2, 0x88, 0xa9}},
+ "capand": {Name: "capand", CodePoints: []int{10820}, Characters: []byte{0xe2, 0xa9, 0x84}},
+ "capbrcup": {Name: "capbrcup", CodePoints: []int{10825}, Characters: []byte{0xe2, 0xa9, 0x89}},
+ "capcap": {Name: "capcap", CodePoints: []int{10827}, Characters: []byte{0xe2, 0xa9, 0x8b}},
+ "capcup": {Name: "capcup", CodePoints: []int{10823}, Characters: []byte{0xe2, 0xa9, 0x87}},
+ "capdot": {Name: "capdot", CodePoints: []int{10816}, Characters: []byte{0xe2, 0xa9, 0x80}},
+ "caps": {Name: "caps", CodePoints: []int{8745, 65024}, Characters: []byte{0xe2, 0x88, 0xa9, 0xef, 0xb8, 0x80}},
+ "caret": {Name: "caret", CodePoints: []int{8257}, Characters: []byte{0xe2, 0x81, 0x81}},
+ "caron": {Name: "caron", CodePoints: []int{711}, Characters: []byte{0xcb, 0x87}},
+ "ccaps": {Name: "ccaps", CodePoints: []int{10829}, Characters: []byte{0xe2, 0xa9, 0x8d}},
+ "ccaron": {Name: "ccaron", CodePoints: []int{269}, Characters: []byte{0xc4, 0x8d}},
+ "ccedil": {Name: "ccedil", CodePoints: []int{231}, Characters: []byte{0xc3, 0xa7}},
+ "ccirc": {Name: "ccirc", CodePoints: []int{265}, Characters: []byte{0xc4, 0x89}},
+ "ccups": {Name: "ccups", CodePoints: []int{10828}, Characters: []byte{0xe2, 0xa9, 0x8c}},
+ "ccupssm": {Name: "ccupssm", CodePoints: []int{10832}, Characters: []byte{0xe2, 0xa9, 0x90}},
+ "cdot": {Name: "cdot", CodePoints: []int{267}, Characters: []byte{0xc4, 0x8b}},
+ "cedil": {Name: "cedil", CodePoints: []int{184}, Characters: []byte{0xc2, 0xb8}},
+ "cemptyv": {Name: "cemptyv", CodePoints: []int{10674}, Characters: []byte{0xe2, 0xa6, 0xb2}},
+ "cent": {Name: "cent", CodePoints: []int{162}, Characters: []byte{0xc2, 0xa2}},
+ "centerdot": {Name: "centerdot", CodePoints: []int{183}, Characters: []byte{0xc2, 0xb7}},
+ "cfr": {Name: "cfr", CodePoints: []int{120096}, Characters: []byte{0xf0, 0x9d, 0x94, 0xa0}},
+ "chcy": {Name: "chcy", CodePoints: []int{1095}, Characters: []byte{0xd1, 0x87}},
+ "check": {Name: "check", CodePoints: []int{10003}, Characters: []byte{0xe2, 0x9c, 0x93}},
+ "checkmark": {Name: "checkmark", CodePoints: []int{10003}, Characters: []byte{0xe2, 0x9c, 0x93}},
+ "chi": {Name: "chi", CodePoints: []int{967}, Characters: []byte{0xcf, 0x87}},
+ "cir": {Name: "cir", CodePoints: []int{9675}, Characters: []byte{0xe2, 0x97, 0x8b}},
+ "cirE": {Name: "cirE", CodePoints: []int{10691}, Characters: []byte{0xe2, 0xa7, 0x83}},
+ "circ": {Name: "circ", CodePoints: []int{710}, Characters: []byte{0xcb, 0x86}},
+ "circeq": {Name: "circeq", CodePoints: []int{8791}, Characters: []byte{0xe2, 0x89, 0x97}},
+ "circlearrowleft": {Name: "circlearrowleft", CodePoints: []int{8634}, Characters: []byte{0xe2, 0x86, 0xba}},
+ "circlearrowright": {Name: "circlearrowright", CodePoints: []int{8635}, Characters: []byte{0xe2, 0x86, 0xbb}},
+ "circledR": {Name: "circledR", CodePoints: []int{174}, Characters: []byte{0xc2, 0xae}},
+ "circledS": {Name: "circledS", CodePoints: []int{9416}, Characters: []byte{0xe2, 0x93, 0x88}},
+ "circledast": {Name: "circledast", CodePoints: []int{8859}, Characters: []byte{0xe2, 0x8a, 0x9b}},
+ "circledcirc": {Name: "circledcirc", CodePoints: []int{8858}, Characters: []byte{0xe2, 0x8a, 0x9a}},
+ "circleddash": {Name: "circleddash", CodePoints: []int{8861}, Characters: []byte{0xe2, 0x8a, 0x9d}},
+ "cire": {Name: "cire", CodePoints: []int{8791}, Characters: []byte{0xe2, 0x89, 0x97}},
+ "cirfnint": {Name: "cirfnint", CodePoints: []int{10768}, Characters: []byte{0xe2, 0xa8, 0x90}},
+ "cirmid": {Name: "cirmid", CodePoints: []int{10991}, Characters: []byte{0xe2, 0xab, 0xaf}},
+ "cirscir": {Name: "cirscir", CodePoints: []int{10690}, Characters: []byte{0xe2, 0xa7, 0x82}},
+ "clubs": {Name: "clubs", CodePoints: []int{9827}, Characters: []byte{0xe2, 0x99, 0xa3}},
+ "clubsuit": {Name: "clubsuit", CodePoints: []int{9827}, Characters: []byte{0xe2, 0x99, 0xa3}},
+ "colon": {Name: "colon", CodePoints: []int{58}, Characters: []byte{0x3a}},
+ "colone": {Name: "colone", CodePoints: []int{8788}, Characters: []byte{0xe2, 0x89, 0x94}},
+ "coloneq": {Name: "coloneq", CodePoints: []int{8788}, Characters: []byte{0xe2, 0x89, 0x94}},
+ "comma": {Name: "comma", CodePoints: []int{44}, Characters: []byte{0x2c}},
+ "commat": {Name: "commat", CodePoints: []int{64}, Characters: []byte{0x40}},
+ "comp": {Name: "comp", CodePoints: []int{8705}, Characters: []byte{0xe2, 0x88, 0x81}},
+ "compfn": {Name: "compfn", CodePoints: []int{8728}, Characters: []byte{0xe2, 0x88, 0x98}},
+ "complement": {Name: "complement", CodePoints: []int{8705}, Characters: []byte{0xe2, 0x88, 0x81}},
+ "complexes": {Name: "complexes", CodePoints: []int{8450}, Characters: []byte{0xe2, 0x84, 0x82}},
+ "cong": {Name: "cong", CodePoints: []int{8773}, Characters: []byte{0xe2, 0x89, 0x85}},
+ "congdot": {Name: "congdot", CodePoints: []int{10861}, Characters: []byte{0xe2, 0xa9, 0xad}},
+ "conint": {Name: "conint", CodePoints: []int{8750}, Characters: []byte{0xe2, 0x88, 0xae}},
+ "copf": {Name: "copf", CodePoints: []int{120148}, Characters: []byte{0xf0, 0x9d, 0x95, 0x94}},
+ "coprod": {Name: "coprod", CodePoints: []int{8720}, Characters: []byte{0xe2, 0x88, 0x90}},
+ "copy": {Name: "copy", CodePoints: []int{169}, Characters: []byte{0xc2, 0xa9}},
+ "copysr": {Name: "copysr", CodePoints: []int{8471}, Characters: []byte{0xe2, 0x84, 0x97}},
+ "crarr": {Name: "crarr", CodePoints: []int{8629}, Characters: []byte{0xe2, 0x86, 0xb5}},
+ "cross": {Name: "cross", CodePoints: []int{10007}, Characters: []byte{0xe2, 0x9c, 0x97}},
+ "cscr": {Name: "cscr", CodePoints: []int{119992}, Characters: []byte{0xf0, 0x9d, 0x92, 0xb8}},
+ "csub": {Name: "csub", CodePoints: []int{10959}, Characters: []byte{0xe2, 0xab, 0x8f}},
+ "csube": {Name: "csube", CodePoints: []int{10961}, Characters: []byte{0xe2, 0xab, 0x91}},
+ "csup": {Name: "csup", CodePoints: []int{10960}, Characters: []byte{0xe2, 0xab, 0x90}},
+ "csupe": {Name: "csupe", CodePoints: []int{10962}, Characters: []byte{0xe2, 0xab, 0x92}},
+ "ctdot": {Name: "ctdot", CodePoints: []int{8943}, Characters: []byte{0xe2, 0x8b, 0xaf}},
+ "cudarrl": {Name: "cudarrl", CodePoints: []int{10552}, Characters: []byte{0xe2, 0xa4, 0xb8}},
+ "cudarrr": {Name: "cudarrr", CodePoints: []int{10549}, Characters: []byte{0xe2, 0xa4, 0xb5}},
+ "cuepr": {Name: "cuepr", CodePoints: []int{8926}, Characters: []byte{0xe2, 0x8b, 0x9e}},
+ "cuesc": {Name: "cuesc", CodePoints: []int{8927}, Characters: []byte{0xe2, 0x8b, 0x9f}},
+ "cularr": {Name: "cularr", CodePoints: []int{8630}, Characters: []byte{0xe2, 0x86, 0xb6}},
+ "cularrp": {Name: "cularrp", CodePoints: []int{10557}, Characters: []byte{0xe2, 0xa4, 0xbd}},
+ "cup": {Name: "cup", CodePoints: []int{8746}, Characters: []byte{0xe2, 0x88, 0xaa}},
+ "cupbrcap": {Name: "cupbrcap", CodePoints: []int{10824}, Characters: []byte{0xe2, 0xa9, 0x88}},
+ "cupcap": {Name: "cupcap", CodePoints: []int{10822}, Characters: []byte{0xe2, 0xa9, 0x86}},
+ "cupcup": {Name: "cupcup", CodePoints: []int{10826}, Characters: []byte{0xe2, 0xa9, 0x8a}},
+ "cupdot": {Name: "cupdot", CodePoints: []int{8845}, Characters: []byte{0xe2, 0x8a, 0x8d}},
+ "cupor": {Name: "cupor", CodePoints: []int{10821}, Characters: []byte{0xe2, 0xa9, 0x85}},
+ "cups": {Name: "cups", CodePoints: []int{8746, 65024}, Characters: []byte{0xe2, 0x88, 0xaa, 0xef, 0xb8, 0x80}},
+ "curarr": {Name: "curarr", CodePoints: []int{8631}, Characters: []byte{0xe2, 0x86, 0xb7}},
+ "curarrm": {Name: "curarrm", CodePoints: []int{10556}, Characters: []byte{0xe2, 0xa4, 0xbc}},
+ "curlyeqprec": {Name: "curlyeqprec", CodePoints: []int{8926}, Characters: []byte{0xe2, 0x8b, 0x9e}},
+ "curlyeqsucc": {Name: "curlyeqsucc", CodePoints: []int{8927}, Characters: []byte{0xe2, 0x8b, 0x9f}},
+ "curlyvee": {Name: "curlyvee", CodePoints: []int{8910}, Characters: []byte{0xe2, 0x8b, 0x8e}},
+ "curlywedge": {Name: "curlywedge", CodePoints: []int{8911}, Characters: []byte{0xe2, 0x8b, 0x8f}},
+ "curren": {Name: "curren", CodePoints: []int{164}, Characters: []byte{0xc2, 0xa4}},
+ "curvearrowleft": {Name: "curvearrowleft", CodePoints: []int{8630}, Characters: []byte{0xe2, 0x86, 0xb6}},
+ "curvearrowright": {Name: "curvearrowright", CodePoints: []int{8631}, Characters: []byte{0xe2, 0x86, 0xb7}},
+ "cuvee": {Name: "cuvee", CodePoints: []int{8910}, Characters: []byte{0xe2, 0x8b, 0x8e}},
+ "cuwed": {Name: "cuwed", CodePoints: []int{8911}, Characters: []byte{0xe2, 0x8b, 0x8f}},
+ "cwconint": {Name: "cwconint", CodePoints: []int{8754}, Characters: []byte{0xe2, 0x88, 0xb2}},
+ "cwint": {Name: "cwint", CodePoints: []int{8753}, Characters: []byte{0xe2, 0x88, 0xb1}},
+ "cylcty": {Name: "cylcty", CodePoints: []int{9005}, Characters: []byte{0xe2, 0x8c, 0xad}},
+ "dArr": {Name: "dArr", CodePoints: []int{8659}, Characters: []byte{0xe2, 0x87, 0x93}},
+ "dHar": {Name: "dHar", CodePoints: []int{10597}, Characters: []byte{0xe2, 0xa5, 0xa5}},
+ "dagger": {Name: "dagger", CodePoints: []int{8224}, Characters: []byte{0xe2, 0x80, 0xa0}},
+ "daleth": {Name: "daleth", CodePoints: []int{8504}, Characters: []byte{0xe2, 0x84, 0xb8}},
+ "darr": {Name: "darr", CodePoints: []int{8595}, Characters: []byte{0xe2, 0x86, 0x93}},
+ "dash": {Name: "dash", CodePoints: []int{8208}, Characters: []byte{0xe2, 0x80, 0x90}},
+ "dashv": {Name: "dashv", CodePoints: []int{8867}, Characters: []byte{0xe2, 0x8a, 0xa3}},
+ "dbkarow": {Name: "dbkarow", CodePoints: []int{10511}, Characters: []byte{0xe2, 0xa4, 0x8f}},
+ "dblac": {Name: "dblac", CodePoints: []int{733}, Characters: []byte{0xcb, 0x9d}},
+ "dcaron": {Name: "dcaron", CodePoints: []int{271}, Characters: []byte{0xc4, 0x8f}},
+ "dcy": {Name: "dcy", CodePoints: []int{1076}, Characters: []byte{0xd0, 0xb4}},
+ "dd": {Name: "dd", CodePoints: []int{8518}, Characters: []byte{0xe2, 0x85, 0x86}},
+ "ddagger": {Name: "ddagger", CodePoints: []int{8225}, Characters: []byte{0xe2, 0x80, 0xa1}},
+ "ddarr": {Name: "ddarr", CodePoints: []int{8650}, Characters: []byte{0xe2, 0x87, 0x8a}},
+ "ddotseq": {Name: "ddotseq", CodePoints: []int{10871}, Characters: []byte{0xe2, 0xa9, 0xb7}},
+ "deg": {Name: "deg", CodePoints: []int{176}, Characters: []byte{0xc2, 0xb0}},
+ "delta": {Name: "delta", CodePoints: []int{948}, Characters: []byte{0xce, 0xb4}},
+ "demptyv": {Name: "demptyv", CodePoints: []int{10673}, Characters: []byte{0xe2, 0xa6, 0xb1}},
+ "dfisht": {Name: "dfisht", CodePoints: []int{10623}, Characters: []byte{0xe2, 0xa5, 0xbf}},
+ "dfr": {Name: "dfr", CodePoints: []int{120097}, Characters: []byte{0xf0, 0x9d, 0x94, 0xa1}},
+ "dharl": {Name: "dharl", CodePoints: []int{8643}, Characters: []byte{0xe2, 0x87, 0x83}},
+ "dharr": {Name: "dharr", CodePoints: []int{8642}, Characters: []byte{0xe2, 0x87, 0x82}},
+ "diam": {Name: "diam", CodePoints: []int{8900}, Characters: []byte{0xe2, 0x8b, 0x84}},
+ "diamond": {Name: "diamond", CodePoints: []int{8900}, Characters: []byte{0xe2, 0x8b, 0x84}},
+ "diamondsuit": {Name: "diamondsuit", CodePoints: []int{9830}, Characters: []byte{0xe2, 0x99, 0xa6}},
+ "diams": {Name: "diams", CodePoints: []int{9830}, Characters: []byte{0xe2, 0x99, 0xa6}},
+ "die": {Name: "die", CodePoints: []int{168}, Characters: []byte{0xc2, 0xa8}},
+ "digamma": {Name: "digamma", CodePoints: []int{989}, Characters: []byte{0xcf, 0x9d}},
+ "disin": {Name: "disin", CodePoints: []int{8946}, Characters: []byte{0xe2, 0x8b, 0xb2}},
+ "div": {Name: "div", CodePoints: []int{247}, Characters: []byte{0xc3, 0xb7}},
+ "divide": {Name: "divide", CodePoints: []int{247}, Characters: []byte{0xc3, 0xb7}},
+ "divideontimes": {Name: "divideontimes", CodePoints: []int{8903}, Characters: []byte{0xe2, 0x8b, 0x87}},
+ "divonx": {Name: "divonx", CodePoints: []int{8903}, Characters: []byte{0xe2, 0x8b, 0x87}},
+ "djcy": {Name: "djcy", CodePoints: []int{1106}, Characters: []byte{0xd1, 0x92}},
+ "dlcorn": {Name: "dlcorn", CodePoints: []int{8990}, Characters: []byte{0xe2, 0x8c, 0x9e}},
+ "dlcrop": {Name: "dlcrop", CodePoints: []int{8973}, Characters: []byte{0xe2, 0x8c, 0x8d}},
+ "dollar": {Name: "dollar", CodePoints: []int{36}, Characters: []byte{0x24}},
+ "dopf": {Name: "dopf", CodePoints: []int{120149}, Characters: []byte{0xf0, 0x9d, 0x95, 0x95}},
+ "dot": {Name: "dot", CodePoints: []int{729}, Characters: []byte{0xcb, 0x99}},
+ "doteq": {Name: "doteq", CodePoints: []int{8784}, Characters: []byte{0xe2, 0x89, 0x90}},
+ "doteqdot": {Name: "doteqdot", CodePoints: []int{8785}, Characters: []byte{0xe2, 0x89, 0x91}},
+ "dotminus": {Name: "dotminus", CodePoints: []int{8760}, Characters: []byte{0xe2, 0x88, 0xb8}},
+ "dotplus": {Name: "dotplus", CodePoints: []int{8724}, Characters: []byte{0xe2, 0x88, 0x94}},
+ "dotsquare": {Name: "dotsquare", CodePoints: []int{8865}, Characters: []byte{0xe2, 0x8a, 0xa1}},
+ "doublebarwedge": {Name: "doublebarwedge", CodePoints: []int{8966}, Characters: []byte{0xe2, 0x8c, 0x86}},
+ "downarrow": {Name: "downarrow", CodePoints: []int{8595}, Characters: []byte{0xe2, 0x86, 0x93}},
+ "downdownarrows": {Name: "downdownarrows", CodePoints: []int{8650}, Characters: []byte{0xe2, 0x87, 0x8a}},
+ "downharpoonleft": {Name: "downharpoonleft", CodePoints: []int{8643}, Characters: []byte{0xe2, 0x87, 0x83}},
+ "downharpoonright": {Name: "downharpoonright", CodePoints: []int{8642}, Characters: []byte{0xe2, 0x87, 0x82}},
+ "drbkarow": {Name: "drbkarow", CodePoints: []int{10512}, Characters: []byte{0xe2, 0xa4, 0x90}},
+ "drcorn": {Name: "drcorn", CodePoints: []int{8991}, Characters: []byte{0xe2, 0x8c, 0x9f}},
+ "drcrop": {Name: "drcrop", CodePoints: []int{8972}, Characters: []byte{0xe2, 0x8c, 0x8c}},
+ "dscr": {Name: "dscr", CodePoints: []int{119993}, Characters: []byte{0xf0, 0x9d, 0x92, 0xb9}},
+ "dscy": {Name: "dscy", CodePoints: []int{1109}, Characters: []byte{0xd1, 0x95}},
+ "dsol": {Name: "dsol", CodePoints: []int{10742}, Characters: []byte{0xe2, 0xa7, 0xb6}},
+ "dstrok": {Name: "dstrok", CodePoints: []int{273}, Characters: []byte{0xc4, 0x91}},
+ "dtdot": {Name: "dtdot", CodePoints: []int{8945}, Characters: []byte{0xe2, 0x8b, 0xb1}},
+ "dtri": {Name: "dtri", CodePoints: []int{9663}, Characters: []byte{0xe2, 0x96, 0xbf}},
+ "dtrif": {Name: "dtrif", CodePoints: []int{9662}, Characters: []byte{0xe2, 0x96, 0xbe}},
+ "duarr": {Name: "duarr", CodePoints: []int{8693}, Characters: []byte{0xe2, 0x87, 0xb5}},
+ "duhar": {Name: "duhar", CodePoints: []int{10607}, Characters: []byte{0xe2, 0xa5, 0xaf}},
+ "dwangle": {Name: "dwangle", CodePoints: []int{10662}, Characters: []byte{0xe2, 0xa6, 0xa6}},
+ "dzcy": {Name: "dzcy", CodePoints: []int{1119}, Characters: []byte{0xd1, 0x9f}},
+ "dzigrarr": {Name: "dzigrarr", CodePoints: []int{10239}, Characters: []byte{0xe2, 0x9f, 0xbf}},
+ "eDDot": {Name: "eDDot", CodePoints: []int{10871}, Characters: []byte{0xe2, 0xa9, 0xb7}},
+ "eDot": {Name: "eDot", CodePoints: []int{8785}, Characters: []byte{0xe2, 0x89, 0x91}},
+ "eacute": {Name: "eacute", CodePoints: []int{233}, Characters: []byte{0xc3, 0xa9}},
+ "easter": {Name: "easter", CodePoints: []int{10862}, Characters: []byte{0xe2, 0xa9, 0xae}},
+ "ecaron": {Name: "ecaron", CodePoints: []int{283}, Characters: []byte{0xc4, 0x9b}},
+ "ecir": {Name: "ecir", CodePoints: []int{8790}, Characters: []byte{0xe2, 0x89, 0x96}},
+ "ecirc": {Name: "ecirc", CodePoints: []int{234}, Characters: []byte{0xc3, 0xaa}},
+ "ecolon": {Name: "ecolon", CodePoints: []int{8789}, Characters: []byte{0xe2, 0x89, 0x95}},
+ "ecy": {Name: "ecy", CodePoints: []int{1101}, Characters: []byte{0xd1, 0x8d}},
+ "edot": {Name: "edot", CodePoints: []int{279}, Characters: []byte{0xc4, 0x97}},
+ "ee": {Name: "ee", CodePoints: []int{8519}, Characters: []byte{0xe2, 0x85, 0x87}},
+ "efDot": {Name: "efDot", CodePoints: []int{8786}, Characters: []byte{0xe2, 0x89, 0x92}},
+ "efr": {Name: "efr", CodePoints: []int{120098}, Characters: []byte{0xf0, 0x9d, 0x94, 0xa2}},
+ "eg": {Name: "eg", CodePoints: []int{10906}, Characters: []byte{0xe2, 0xaa, 0x9a}},
+ "egrave": {Name: "egrave", CodePoints: []int{232}, Characters: []byte{0xc3, 0xa8}},
+ "egs": {Name: "egs", CodePoints: []int{10902}, Characters: []byte{0xe2, 0xaa, 0x96}},
+ "egsdot": {Name: "egsdot", CodePoints: []int{10904}, Characters: []byte{0xe2, 0xaa, 0x98}},
+ "el": {Name: "el", CodePoints: []int{10905}, Characters: []byte{0xe2, 0xaa, 0x99}},
+ "elinters": {Name: "elinters", CodePoints: []int{9191}, Characters: []byte{0xe2, 0x8f, 0xa7}},
+ "ell": {Name: "ell", CodePoints: []int{8467}, Characters: []byte{0xe2, 0x84, 0x93}},
+ "els": {Name: "els", CodePoints: []int{10901}, Characters: []byte{0xe2, 0xaa, 0x95}},
+ "elsdot": {Name: "elsdot", CodePoints: []int{10903}, Characters: []byte{0xe2, 0xaa, 0x97}},
+ "emacr": {Name: "emacr", CodePoints: []int{275}, Characters: []byte{0xc4, 0x93}},
+ "empty": {Name: "empty", CodePoints: []int{8709}, Characters: []byte{0xe2, 0x88, 0x85}},
+ "emptyset": {Name: "emptyset", CodePoints: []int{8709}, Characters: []byte{0xe2, 0x88, 0x85}},
+ "emptyv": {Name: "emptyv", CodePoints: []int{8709}, Characters: []byte{0xe2, 0x88, 0x85}},
+ "emsp": {Name: "emsp", CodePoints: []int{8195}, Characters: []byte{0xe2, 0x80, 0x83}},
+ "emsp13": {Name: "emsp13", CodePoints: []int{8196}, Characters: []byte{0xe2, 0x80, 0x84}},
+ "emsp14": {Name: "emsp14", CodePoints: []int{8197}, Characters: []byte{0xe2, 0x80, 0x85}},
+ "eng": {Name: "eng", CodePoints: []int{331}, Characters: []byte{0xc5, 0x8b}},
+ "ensp": {Name: "ensp", CodePoints: []int{8194}, Characters: []byte{0xe2, 0x80, 0x82}},
+ "eogon": {Name: "eogon", CodePoints: []int{281}, Characters: []byte{0xc4, 0x99}},
+ "eopf": {Name: "eopf", CodePoints: []int{120150}, Characters: []byte{0xf0, 0x9d, 0x95, 0x96}},
+ "epar": {Name: "epar", CodePoints: []int{8917}, Characters: []byte{0xe2, 0x8b, 0x95}},
+ "eparsl": {Name: "eparsl", CodePoints: []int{10723}, Characters: []byte{0xe2, 0xa7, 0xa3}},
+ "eplus": {Name: "eplus", CodePoints: []int{10865}, Characters: []byte{0xe2, 0xa9, 0xb1}},
+ "epsi": {Name: "epsi", CodePoints: []int{949}, Characters: []byte{0xce, 0xb5}},
+ "epsilon": {Name: "epsilon", CodePoints: []int{949}, Characters: []byte{0xce, 0xb5}},
+ "epsiv": {Name: "epsiv", CodePoints: []int{1013}, Characters: []byte{0xcf, 0xb5}},
+ "eqcirc": {Name: "eqcirc", CodePoints: []int{8790}, Characters: []byte{0xe2, 0x89, 0x96}},
+ "eqcolon": {Name: "eqcolon", CodePoints: []int{8789}, Characters: []byte{0xe2, 0x89, 0x95}},
+ "eqsim": {Name: "eqsim", CodePoints: []int{8770}, Characters: []byte{0xe2, 0x89, 0x82}},
+ "eqslantgtr": {Name: "eqslantgtr", CodePoints: []int{10902}, Characters: []byte{0xe2, 0xaa, 0x96}},
+ "eqslantless": {Name: "eqslantless", CodePoints: []int{10901}, Characters: []byte{0xe2, 0xaa, 0x95}},
+ "equals": {Name: "equals", CodePoints: []int{61}, Characters: []byte{0x3d}},
+ "equest": {Name: "equest", CodePoints: []int{8799}, Characters: []byte{0xe2, 0x89, 0x9f}},
+ "equiv": {Name: "equiv", CodePoints: []int{8801}, Characters: []byte{0xe2, 0x89, 0xa1}},
+ "equivDD": {Name: "equivDD", CodePoints: []int{10872}, Characters: []byte{0xe2, 0xa9, 0xb8}},
+ "eqvparsl": {Name: "eqvparsl", CodePoints: []int{10725}, Characters: []byte{0xe2, 0xa7, 0xa5}},
+ "erDot": {Name: "erDot", CodePoints: []int{8787}, Characters: []byte{0xe2, 0x89, 0x93}},
+ "erarr": {Name: "erarr", CodePoints: []int{10609}, Characters: []byte{0xe2, 0xa5, 0xb1}},
+ "escr": {Name: "escr", CodePoints: []int{8495}, Characters: []byte{0xe2, 0x84, 0xaf}},
+ "esdot": {Name: "esdot", CodePoints: []int{8784}, Characters: []byte{0xe2, 0x89, 0x90}},
+ "esim": {Name: "esim", CodePoints: []int{8770}, Characters: []byte{0xe2, 0x89, 0x82}},
+ "eta": {Name: "eta", CodePoints: []int{951}, Characters: []byte{0xce, 0xb7}},
+ "eth": {Name: "eth", CodePoints: []int{240}, Characters: []byte{0xc3, 0xb0}},
+ "euml": {Name: "euml", CodePoints: []int{235}, Characters: []byte{0xc3, 0xab}},
+ "euro": {Name: "euro", CodePoints: []int{8364}, Characters: []byte{0xe2, 0x82, 0xac}},
+ "excl": {Name: "excl", CodePoints: []int{33}, Characters: []byte{0x21}},
+ "exist": {Name: "exist", CodePoints: []int{8707}, Characters: []byte{0xe2, 0x88, 0x83}},
+ "expectation": {Name: "expectation", CodePoints: []int{8496}, Characters: []byte{0xe2, 0x84, 0xb0}},
+ "exponentiale": {Name: "exponentiale", CodePoints: []int{8519}, Characters: []byte{0xe2, 0x85, 0x87}},
+ "fallingdotseq": {Name: "fallingdotseq", CodePoints: []int{8786}, Characters: []byte{0xe2, 0x89, 0x92}},
+ "fcy": {Name: "fcy", CodePoints: []int{1092}, Characters: []byte{0xd1, 0x84}},
+ "female": {Name: "female", CodePoints: []int{9792}, Characters: []byte{0xe2, 0x99, 0x80}},
+ "ffilig": {Name: "ffilig", CodePoints: []int{64259}, Characters: []byte{0xef, 0xac, 0x83}},
+ "fflig": {Name: "fflig", CodePoints: []int{64256}, Characters: []byte{0xef, 0xac, 0x80}},
+ "ffllig": {Name: "ffllig", CodePoints: []int{64260}, Characters: []byte{0xef, 0xac, 0x84}},
+ "ffr": {Name: "ffr", CodePoints: []int{120099}, Characters: []byte{0xf0, 0x9d, 0x94, 0xa3}},
+ "filig": {Name: "filig", CodePoints: []int{64257}, Characters: []byte{0xef, 0xac, 0x81}},
+ "fjlig": {Name: "fjlig", CodePoints: []int{102, 106}, Characters: []byte{0x66, 0x6a}},
+ "flat": {Name: "flat", CodePoints: []int{9837}, Characters: []byte{0xe2, 0x99, 0xad}},
+ "fllig": {Name: "fllig", CodePoints: []int{64258}, Characters: []byte{0xef, 0xac, 0x82}},
+ "fltns": {Name: "fltns", CodePoints: []int{9649}, Characters: []byte{0xe2, 0x96, 0xb1}},
+ "fnof": {Name: "fnof", CodePoints: []int{402}, Characters: []byte{0xc6, 0x92}},
+ "fopf": {Name: "fopf", CodePoints: []int{120151}, Characters: []byte{0xf0, 0x9d, 0x95, 0x97}},
+ "forall": {Name: "forall", CodePoints: []int{8704}, Characters: []byte{0xe2, 0x88, 0x80}},
+ "fork": {Name: "fork", CodePoints: []int{8916}, Characters: []byte{0xe2, 0x8b, 0x94}},
+ "forkv": {Name: "forkv", CodePoints: []int{10969}, Characters: []byte{0xe2, 0xab, 0x99}},
+ "fpartint": {Name: "fpartint", CodePoints: []int{10765}, Characters: []byte{0xe2, 0xa8, 0x8d}},
+ "frac12": {Name: "frac12", CodePoints: []int{189}, Characters: []byte{0xc2, 0xbd}},
+ "frac13": {Name: "frac13", CodePoints: []int{8531}, Characters: []byte{0xe2, 0x85, 0x93}},
+ "frac14": {Name: "frac14", CodePoints: []int{188}, Characters: []byte{0xc2, 0xbc}},
+ "frac15": {Name: "frac15", CodePoints: []int{8533}, Characters: []byte{0xe2, 0x85, 0x95}},
+ "frac16": {Name: "frac16", CodePoints: []int{8537}, Characters: []byte{0xe2, 0x85, 0x99}},
+ "frac18": {Name: "frac18", CodePoints: []int{8539}, Characters: []byte{0xe2, 0x85, 0x9b}},
+ "frac23": {Name: "frac23", CodePoints: []int{8532}, Characters: []byte{0xe2, 0x85, 0x94}},
+ "frac25": {Name: "frac25", CodePoints: []int{8534}, Characters: []byte{0xe2, 0x85, 0x96}},
+ "frac34": {Name: "frac34", CodePoints: []int{190}, Characters: []byte{0xc2, 0xbe}},
+ "frac35": {Name: "frac35", CodePoints: []int{8535}, Characters: []byte{0xe2, 0x85, 0x97}},
+ "frac38": {Name: "frac38", CodePoints: []int{8540}, Characters: []byte{0xe2, 0x85, 0x9c}},
+ "frac45": {Name: "frac45", CodePoints: []int{8536}, Characters: []byte{0xe2, 0x85, 0x98}},
+ "frac56": {Name: "frac56", CodePoints: []int{8538}, Characters: []byte{0xe2, 0x85, 0x9a}},
+ "frac58": {Name: "frac58", CodePoints: []int{8541}, Characters: []byte{0xe2, 0x85, 0x9d}},
+ "frac78": {Name: "frac78", CodePoints: []int{8542}, Characters: []byte{0xe2, 0x85, 0x9e}},
+ "frasl": {Name: "frasl", CodePoints: []int{8260}, Characters: []byte{0xe2, 0x81, 0x84}},
+ "frown": {Name: "frown", CodePoints: []int{8994}, Characters: []byte{0xe2, 0x8c, 0xa2}},
+ "fscr": {Name: "fscr", CodePoints: []int{119995}, Characters: []byte{0xf0, 0x9d, 0x92, 0xbb}},
+ "gE": {Name: "gE", CodePoints: []int{8807}, Characters: []byte{0xe2, 0x89, 0xa7}},
+ "gEl": {Name: "gEl", CodePoints: []int{10892}, Characters: []byte{0xe2, 0xaa, 0x8c}},
+ "gacute": {Name: "gacute", CodePoints: []int{501}, Characters: []byte{0xc7, 0xb5}},
+ "gamma": {Name: "gamma", CodePoints: []int{947}, Characters: []byte{0xce, 0xb3}},
+ "gammad": {Name: "gammad", CodePoints: []int{989}, Characters: []byte{0xcf, 0x9d}},
+ "gap": {Name: "gap", CodePoints: []int{10886}, Characters: []byte{0xe2, 0xaa, 0x86}},
+ "gbreve": {Name: "gbreve", CodePoints: []int{287}, Characters: []byte{0xc4, 0x9f}},
+ "gcirc": {Name: "gcirc", CodePoints: []int{285}, Characters: []byte{0xc4, 0x9d}},
+ "gcy": {Name: "gcy", CodePoints: []int{1075}, Characters: []byte{0xd0, 0xb3}},
+ "gdot": {Name: "gdot", CodePoints: []int{289}, Characters: []byte{0xc4, 0xa1}},
+ "ge": {Name: "ge", CodePoints: []int{8805}, Characters: []byte{0xe2, 0x89, 0xa5}},
+ "gel": {Name: "gel", CodePoints: []int{8923}, Characters: []byte{0xe2, 0x8b, 0x9b}},
+ "geq": {Name: "geq", CodePoints: []int{8805}, Characters: []byte{0xe2, 0x89, 0xa5}},
+ "geqq": {Name: "geqq", CodePoints: []int{8807}, Characters: []byte{0xe2, 0x89, 0xa7}},
+ "geqslant": {Name: "geqslant", CodePoints: []int{10878}, Characters: []byte{0xe2, 0xa9, 0xbe}},
+ "ges": {Name: "ges", CodePoints: []int{10878}, Characters: []byte{0xe2, 0xa9, 0xbe}},
+ "gescc": {Name: "gescc", CodePoints: []int{10921}, Characters: []byte{0xe2, 0xaa, 0xa9}},
+ "gesdot": {Name: "gesdot", CodePoints: []int{10880}, Characters: []byte{0xe2, 0xaa, 0x80}},
+ "gesdoto": {Name: "gesdoto", CodePoints: []int{10882}, Characters: []byte{0xe2, 0xaa, 0x82}},
+ "gesdotol": {Name: "gesdotol", CodePoints: []int{10884}, Characters: []byte{0xe2, 0xaa, 0x84}},
+ "gesl": {Name: "gesl", CodePoints: []int{8923, 65024}, Characters: []byte{0xe2, 0x8b, 0x9b, 0xef, 0xb8, 0x80}},
+ "gesles": {Name: "gesles", CodePoints: []int{10900}, Characters: []byte{0xe2, 0xaa, 0x94}},
+ "gfr": {Name: "gfr", CodePoints: []int{120100}, Characters: []byte{0xf0, 0x9d, 0x94, 0xa4}},
+ "gg": {Name: "gg", CodePoints: []int{8811}, Characters: []byte{0xe2, 0x89, 0xab}},
+ "ggg": {Name: "ggg", CodePoints: []int{8921}, Characters: []byte{0xe2, 0x8b, 0x99}},
+ "gimel": {Name: "gimel", CodePoints: []int{8503}, Characters: []byte{0xe2, 0x84, 0xb7}},
+ "gjcy": {Name: "gjcy", CodePoints: []int{1107}, Characters: []byte{0xd1, 0x93}},
+ "gl": {Name: "gl", CodePoints: []int{8823}, Characters: []byte{0xe2, 0x89, 0xb7}},
+ "glE": {Name: "glE", CodePoints: []int{10898}, Characters: []byte{0xe2, 0xaa, 0x92}},
+ "gla": {Name: "gla", CodePoints: []int{10917}, Characters: []byte{0xe2, 0xaa, 0xa5}},
+ "glj": {Name: "glj", CodePoints: []int{10916}, Characters: []byte{0xe2, 0xaa, 0xa4}},
+ "gnE": {Name: "gnE", CodePoints: []int{8809}, Characters: []byte{0xe2, 0x89, 0xa9}},
+ "gnap": {Name: "gnap", CodePoints: []int{10890}, Characters: []byte{0xe2, 0xaa, 0x8a}},
+ "gnapprox": {Name: "gnapprox", CodePoints: []int{10890}, Characters: []byte{0xe2, 0xaa, 0x8a}},
+ "gne": {Name: "gne", CodePoints: []int{10888}, Characters: []byte{0xe2, 0xaa, 0x88}},
+ "gneq": {Name: "gneq", CodePoints: []int{10888}, Characters: []byte{0xe2, 0xaa, 0x88}},
+ "gneqq": {Name: "gneqq", CodePoints: []int{8809}, Characters: []byte{0xe2, 0x89, 0xa9}},
+ "gnsim": {Name: "gnsim", CodePoints: []int{8935}, Characters: []byte{0xe2, 0x8b, 0xa7}},
+ "gopf": {Name: "gopf", CodePoints: []int{120152}, Characters: []byte{0xf0, 0x9d, 0x95, 0x98}},
+ "grave": {Name: "grave", CodePoints: []int{96}, Characters: []byte{0x60}},
+ "gscr": {Name: "gscr", CodePoints: []int{8458}, Characters: []byte{0xe2, 0x84, 0x8a}},
+ "gsim": {Name: "gsim", CodePoints: []int{8819}, Characters: []byte{0xe2, 0x89, 0xb3}},
+ "gsime": {Name: "gsime", CodePoints: []int{10894}, Characters: []byte{0xe2, 0xaa, 0x8e}},
+ "gsiml": {Name: "gsiml", CodePoints: []int{10896}, Characters: []byte{0xe2, 0xaa, 0x90}},
+ "gt": {Name: "gt", CodePoints: []int{62}, Characters: []byte{0x3e}},
+ "gtcc": {Name: "gtcc", CodePoints: []int{10919}, Characters: []byte{0xe2, 0xaa, 0xa7}},
+ "gtcir": {Name: "gtcir", CodePoints: []int{10874}, Characters: []byte{0xe2, 0xa9, 0xba}},
+ "gtdot": {Name: "gtdot", CodePoints: []int{8919}, Characters: []byte{0xe2, 0x8b, 0x97}},
+ "gtlPar": {Name: "gtlPar", CodePoints: []int{10645}, Characters: []byte{0xe2, 0xa6, 0x95}},
+ "gtquest": {Name: "gtquest", CodePoints: []int{10876}, Characters: []byte{0xe2, 0xa9, 0xbc}},
+ "gtrapprox": {Name: "gtrapprox", CodePoints: []int{10886}, Characters: []byte{0xe2, 0xaa, 0x86}},
+ "gtrarr": {Name: "gtrarr", CodePoints: []int{10616}, Characters: []byte{0xe2, 0xa5, 0xb8}},
+ "gtrdot": {Name: "gtrdot", CodePoints: []int{8919}, Characters: []byte{0xe2, 0x8b, 0x97}},
+ "gtreqless": {Name: "gtreqless", CodePoints: []int{8923}, Characters: []byte{0xe2, 0x8b, 0x9b}},
+ "gtreqqless": {Name: "gtreqqless", CodePoints: []int{10892}, Characters: []byte{0xe2, 0xaa, 0x8c}},
+ "gtrless": {Name: "gtrless", CodePoints: []int{8823}, Characters: []byte{0xe2, 0x89, 0xb7}},
+ "gtrsim": {Name: "gtrsim", CodePoints: []int{8819}, Characters: []byte{0xe2, 0x89, 0xb3}},
+ "gvertneqq": {Name: "gvertneqq", CodePoints: []int{8809, 65024}, Characters: []byte{0xe2, 0x89, 0xa9, 0xef, 0xb8, 0x80}},
+ "gvnE": {Name: "gvnE", CodePoints: []int{8809, 65024}, Characters: []byte{0xe2, 0x89, 0xa9, 0xef, 0xb8, 0x80}},
+ "hArr": {Name: "hArr", CodePoints: []int{8660}, Characters: []byte{0xe2, 0x87, 0x94}},
+ "hairsp": {Name: "hairsp", CodePoints: []int{8202}, Characters: []byte{0xe2, 0x80, 0x8a}},
+ "half": {Name: "half", CodePoints: []int{189}, Characters: []byte{0xc2, 0xbd}},
+ "hamilt": {Name: "hamilt", CodePoints: []int{8459}, Characters: []byte{0xe2, 0x84, 0x8b}},
+ "hardcy": {Name: "hardcy", CodePoints: []int{1098}, Characters: []byte{0xd1, 0x8a}},
+ "harr": {Name: "harr", CodePoints: []int{8596}, Characters: []byte{0xe2, 0x86, 0x94}},
+ "harrcir": {Name: "harrcir", CodePoints: []int{10568}, Characters: []byte{0xe2, 0xa5, 0x88}},
+ "harrw": {Name: "harrw", CodePoints: []int{8621}, Characters: []byte{0xe2, 0x86, 0xad}},
+ "hbar": {Name: "hbar", CodePoints: []int{8463}, Characters: []byte{0xe2, 0x84, 0x8f}},
+ "hcirc": {Name: "hcirc", CodePoints: []int{293}, Characters: []byte{0xc4, 0xa5}},
+ "hearts": {Name: "hearts", CodePoints: []int{9829}, Characters: []byte{0xe2, 0x99, 0xa5}},
+ "heartsuit": {Name: "heartsuit", CodePoints: []int{9829}, Characters: []byte{0xe2, 0x99, 0xa5}},
+ "hellip": {Name: "hellip", CodePoints: []int{8230}, Characters: []byte{0xe2, 0x80, 0xa6}},
+ "hercon": {Name: "hercon", CodePoints: []int{8889}, Characters: []byte{0xe2, 0x8a, 0xb9}},
+ "hfr": {Name: "hfr", CodePoints: []int{120101}, Characters: []byte{0xf0, 0x9d, 0x94, 0xa5}},
+ "hksearow": {Name: "hksearow", CodePoints: []int{10533}, Characters: []byte{0xe2, 0xa4, 0xa5}},
+ "hkswarow": {Name: "hkswarow", CodePoints: []int{10534}, Characters: []byte{0xe2, 0xa4, 0xa6}},
+ "hoarr": {Name: "hoarr", CodePoints: []int{8703}, Characters: []byte{0xe2, 0x87, 0xbf}},
+ "homtht": {Name: "homtht", CodePoints: []int{8763}, Characters: []byte{0xe2, 0x88, 0xbb}},
+ "hookleftarrow": {Name: "hookleftarrow", CodePoints: []int{8617}, Characters: []byte{0xe2, 0x86, 0xa9}},
+ "hookrightarrow": {Name: "hookrightarrow", CodePoints: []int{8618}, Characters: []byte{0xe2, 0x86, 0xaa}},
+ "hopf": {Name: "hopf", CodePoints: []int{120153}, Characters: []byte{0xf0, 0x9d, 0x95, 0x99}},
+ "horbar": {Name: "horbar", CodePoints: []int{8213}, Characters: []byte{0xe2, 0x80, 0x95}},
+ "hscr": {Name: "hscr", CodePoints: []int{119997}, Characters: []byte{0xf0, 0x9d, 0x92, 0xbd}},
+ "hslash": {Name: "hslash", CodePoints: []int{8463}, Characters: []byte{0xe2, 0x84, 0x8f}},
+ "hstrok": {Name: "hstrok", CodePoints: []int{295}, Characters: []byte{0xc4, 0xa7}},
+ "hybull": {Name: "hybull", CodePoints: []int{8259}, Characters: []byte{0xe2, 0x81, 0x83}},
+ "hyphen": {Name: "hyphen", CodePoints: []int{8208}, Characters: []byte{0xe2, 0x80, 0x90}},
+ "iacute": {Name: "iacute", CodePoints: []int{237}, Characters: []byte{0xc3, 0xad}},
+ "ic": {Name: "ic", CodePoints: []int{8291}, Characters: []byte{0xe2, 0x81, 0xa3}},
+ "icirc": {Name: "icirc", CodePoints: []int{238}, Characters: []byte{0xc3, 0xae}},
+ "icy": {Name: "icy", CodePoints: []int{1080}, Characters: []byte{0xd0, 0xb8}},
+ "iecy": {Name: "iecy", CodePoints: []int{1077}, Characters: []byte{0xd0, 0xb5}},
+ "iexcl": {Name: "iexcl", CodePoints: []int{161}, Characters: []byte{0xc2, 0xa1}},
+ "iff": {Name: "iff", CodePoints: []int{8660}, Characters: []byte{0xe2, 0x87, 0x94}},
+ "ifr": {Name: "ifr", CodePoints: []int{120102}, Characters: []byte{0xf0, 0x9d, 0x94, 0xa6}},
+ "igrave": {Name: "igrave", CodePoints: []int{236}, Characters: []byte{0xc3, 0xac}},
+ "ii": {Name: "ii", CodePoints: []int{8520}, Characters: []byte{0xe2, 0x85, 0x88}},
+ "iiiint": {Name: "iiiint", CodePoints: []int{10764}, Characters: []byte{0xe2, 0xa8, 0x8c}},
+ "iiint": {Name: "iiint", CodePoints: []int{8749}, Characters: []byte{0xe2, 0x88, 0xad}},
+ "iinfin": {Name: "iinfin", CodePoints: []int{10716}, Characters: []byte{0xe2, 0xa7, 0x9c}},
+ "iiota": {Name: "iiota", CodePoints: []int{8489}, Characters: []byte{0xe2, 0x84, 0xa9}},
+ "ijlig": {Name: "ijlig", CodePoints: []int{307}, Characters: []byte{0xc4, 0xb3}},
+ "imacr": {Name: "imacr", CodePoints: []int{299}, Characters: []byte{0xc4, 0xab}},
+ "image": {Name: "image", CodePoints: []int{8465}, Characters: []byte{0xe2, 0x84, 0x91}},
+ "imagline": {Name: "imagline", CodePoints: []int{8464}, Characters: []byte{0xe2, 0x84, 0x90}},
+ "imagpart": {Name: "imagpart", CodePoints: []int{8465}, Characters: []byte{0xe2, 0x84, 0x91}},
+ "imath": {Name: "imath", CodePoints: []int{305}, Characters: []byte{0xc4, 0xb1}},
+ "imof": {Name: "imof", CodePoints: []int{8887}, Characters: []byte{0xe2, 0x8a, 0xb7}},
+ "imped": {Name: "imped", CodePoints: []int{437}, Characters: []byte{0xc6, 0xb5}},
+ "in": {Name: "in", CodePoints: []int{8712}, Characters: []byte{0xe2, 0x88, 0x88}},
+ "incare": {Name: "incare", CodePoints: []int{8453}, Characters: []byte{0xe2, 0x84, 0x85}},
+ "infin": {Name: "infin", CodePoints: []int{8734}, Characters: []byte{0xe2, 0x88, 0x9e}},
+ "infintie": {Name: "infintie", CodePoints: []int{10717}, Characters: []byte{0xe2, 0xa7, 0x9d}},
+ "inodot": {Name: "inodot", CodePoints: []int{305}, Characters: []byte{0xc4, 0xb1}},
+ "int": {Name: "int", CodePoints: []int{8747}, Characters: []byte{0xe2, 0x88, 0xab}},
+ "intcal": {Name: "intcal", CodePoints: []int{8890}, Characters: []byte{0xe2, 0x8a, 0xba}},
+ "integers": {Name: "integers", CodePoints: []int{8484}, Characters: []byte{0xe2, 0x84, 0xa4}},
+ "intercal": {Name: "intercal", CodePoints: []int{8890}, Characters: []byte{0xe2, 0x8a, 0xba}},
+ "intlarhk": {Name: "intlarhk", CodePoints: []int{10775}, Characters: []byte{0xe2, 0xa8, 0x97}},
+ "intprod": {Name: "intprod", CodePoints: []int{10812}, Characters: []byte{0xe2, 0xa8, 0xbc}},
+ "iocy": {Name: "iocy", CodePoints: []int{1105}, Characters: []byte{0xd1, 0x91}},
+ "iogon": {Name: "iogon", CodePoints: []int{303}, Characters: []byte{0xc4, 0xaf}},
+ "iopf": {Name: "iopf", CodePoints: []int{120154}, Characters: []byte{0xf0, 0x9d, 0x95, 0x9a}},
+ "iota": {Name: "iota", CodePoints: []int{953}, Characters: []byte{0xce, 0xb9}},
+ "iprod": {Name: "iprod", CodePoints: []int{10812}, Characters: []byte{0xe2, 0xa8, 0xbc}},
+ "iquest": {Name: "iquest", CodePoints: []int{191}, Characters: []byte{0xc2, 0xbf}},
+ "iscr": {Name: "iscr", CodePoints: []int{119998}, Characters: []byte{0xf0, 0x9d, 0x92, 0xbe}},
+ "isin": {Name: "isin", CodePoints: []int{8712}, Characters: []byte{0xe2, 0x88, 0x88}},
+ "isinE": {Name: "isinE", CodePoints: []int{8953}, Characters: []byte{0xe2, 0x8b, 0xb9}},
+ "isindot": {Name: "isindot", CodePoints: []int{8949}, Characters: []byte{0xe2, 0x8b, 0xb5}},
+ "isins": {Name: "isins", CodePoints: []int{8948}, Characters: []byte{0xe2, 0x8b, 0xb4}},
+ "isinsv": {Name: "isinsv", CodePoints: []int{8947}, Characters: []byte{0xe2, 0x8b, 0xb3}},
+ "isinv": {Name: "isinv", CodePoints: []int{8712}, Characters: []byte{0xe2, 0x88, 0x88}},
+ "it": {Name: "it", CodePoints: []int{8290}, Characters: []byte{0xe2, 0x81, 0xa2}},
+ "itilde": {Name: "itilde", CodePoints: []int{297}, Characters: []byte{0xc4, 0xa9}},
+ "iukcy": {Name: "iukcy", CodePoints: []int{1110}, Characters: []byte{0xd1, 0x96}},
+ "iuml": {Name: "iuml", CodePoints: []int{239}, Characters: []byte{0xc3, 0xaf}},
+ "jcirc": {Name: "jcirc", CodePoints: []int{309}, Characters: []byte{0xc4, 0xb5}},
+ "jcy": {Name: "jcy", CodePoints: []int{1081}, Characters: []byte{0xd0, 0xb9}},
+ "jfr": {Name: "jfr", CodePoints: []int{120103}, Characters: []byte{0xf0, 0x9d, 0x94, 0xa7}},
+ "jmath": {Name: "jmath", CodePoints: []int{567}, Characters: []byte{0xc8, 0xb7}},
+ "jopf": {Name: "jopf", CodePoints: []int{120155}, Characters: []byte{0xf0, 0x9d, 0x95, 0x9b}},
+ "jscr": {Name: "jscr", CodePoints: []int{119999}, Characters: []byte{0xf0, 0x9d, 0x92, 0xbf}},
+ "jsercy": {Name: "jsercy", CodePoints: []int{1112}, Characters: []byte{0xd1, 0x98}},
+ "jukcy": {Name: "jukcy", CodePoints: []int{1108}, Characters: []byte{0xd1, 0x94}},
+ "kappa": {Name: "kappa", CodePoints: []int{954}, Characters: []byte{0xce, 0xba}},
+ "kappav": {Name: "kappav", CodePoints: []int{1008}, Characters: []byte{0xcf, 0xb0}},
+ "kcedil": {Name: "kcedil", CodePoints: []int{311}, Characters: []byte{0xc4, 0xb7}},
+ "kcy": {Name: "kcy", CodePoints: []int{1082}, Characters: []byte{0xd0, 0xba}},
+ "kfr": {Name: "kfr", CodePoints: []int{120104}, Characters: []byte{0xf0, 0x9d, 0x94, 0xa8}},
+ "kgreen": {Name: "kgreen", CodePoints: []int{312}, Characters: []byte{0xc4, 0xb8}},
+ "khcy": {Name: "khcy", CodePoints: []int{1093}, Characters: []byte{0xd1, 0x85}},
+ "kjcy": {Name: "kjcy", CodePoints: []int{1116}, Characters: []byte{0xd1, 0x9c}},
+ "kopf": {Name: "kopf", CodePoints: []int{120156}, Characters: []byte{0xf0, 0x9d, 0x95, 0x9c}},
+ "kscr": {Name: "kscr", CodePoints: []int{120000}, Characters: []byte{0xf0, 0x9d, 0x93, 0x80}},
+ "lAarr": {Name: "lAarr", CodePoints: []int{8666}, Characters: []byte{0xe2, 0x87, 0x9a}},
+ "lArr": {Name: "lArr", CodePoints: []int{8656}, Characters: []byte{0xe2, 0x87, 0x90}},
+ "lAtail": {Name: "lAtail", CodePoints: []int{10523}, Characters: []byte{0xe2, 0xa4, 0x9b}},
+ "lBarr": {Name: "lBarr", CodePoints: []int{10510}, Characters: []byte{0xe2, 0xa4, 0x8e}},
+ "lE": {Name: "lE", CodePoints: []int{8806}, Characters: []byte{0xe2, 0x89, 0xa6}},
+ "lEg": {Name: "lEg", CodePoints: []int{10891}, Characters: []byte{0xe2, 0xaa, 0x8b}},
+ "lHar": {Name: "lHar", CodePoints: []int{10594}, Characters: []byte{0xe2, 0xa5, 0xa2}},
+ "lacute": {Name: "lacute", CodePoints: []int{314}, Characters: []byte{0xc4, 0xba}},
+ "laemptyv": {Name: "laemptyv", CodePoints: []int{10676}, Characters: []byte{0xe2, 0xa6, 0xb4}},
+ "lagran": {Name: "lagran", CodePoints: []int{8466}, Characters: []byte{0xe2, 0x84, 0x92}},
+ "lambda": {Name: "lambda", CodePoints: []int{955}, Characters: []byte{0xce, 0xbb}},
+ "lang": {Name: "lang", CodePoints: []int{10216}, Characters: []byte{0xe2, 0x9f, 0xa8}},
+ "langd": {Name: "langd", CodePoints: []int{10641}, Characters: []byte{0xe2, 0xa6, 0x91}},
+ "langle": {Name: "langle", CodePoints: []int{10216}, Characters: []byte{0xe2, 0x9f, 0xa8}},
+ "lap": {Name: "lap", CodePoints: []int{10885}, Characters: []byte{0xe2, 0xaa, 0x85}},
+ "laquo": {Name: "laquo", CodePoints: []int{171}, Characters: []byte{0xc2, 0xab}},
+ "larr": {Name: "larr", CodePoints: []int{8592}, Characters: []byte{0xe2, 0x86, 0x90}},
+ "larrb": {Name: "larrb", CodePoints: []int{8676}, Characters: []byte{0xe2, 0x87, 0xa4}},
+ "larrbfs": {Name: "larrbfs", CodePoints: []int{10527}, Characters: []byte{0xe2, 0xa4, 0x9f}},
+ "larrfs": {Name: "larrfs", CodePoints: []int{10525}, Characters: []byte{0xe2, 0xa4, 0x9d}},
+ "larrhk": {Name: "larrhk", CodePoints: []int{8617}, Characters: []byte{0xe2, 0x86, 0xa9}},
+ "larrlp": {Name: "larrlp", CodePoints: []int{8619}, Characters: []byte{0xe2, 0x86, 0xab}},
+ "larrpl": {Name: "larrpl", CodePoints: []int{10553}, Characters: []byte{0xe2, 0xa4, 0xb9}},
+ "larrsim": {Name: "larrsim", CodePoints: []int{10611}, Characters: []byte{0xe2, 0xa5, 0xb3}},
+ "larrtl": {Name: "larrtl", CodePoints: []int{8610}, Characters: []byte{0xe2, 0x86, 0xa2}},
+ "lat": {Name: "lat", CodePoints: []int{10923}, Characters: []byte{0xe2, 0xaa, 0xab}},
+ "latail": {Name: "latail", CodePoints: []int{10521}, Characters: []byte{0xe2, 0xa4, 0x99}},
+ "late": {Name: "late", CodePoints: []int{10925}, Characters: []byte{0xe2, 0xaa, 0xad}},
+ "lates": {Name: "lates", CodePoints: []int{10925, 65024}, Characters: []byte{0xe2, 0xaa, 0xad, 0xef, 0xb8, 0x80}},
+ "lbarr": {Name: "lbarr", CodePoints: []int{10508}, Characters: []byte{0xe2, 0xa4, 0x8c}},
+ "lbbrk": {Name: "lbbrk", CodePoints: []int{10098}, Characters: []byte{0xe2, 0x9d, 0xb2}},
+ "lbrace": {Name: "lbrace", CodePoints: []int{123}, Characters: []byte{0x7b}},
+ "lbrack": {Name: "lbrack", CodePoints: []int{91}, Characters: []byte{0x5b}},
+ "lbrke": {Name: "lbrke", CodePoints: []int{10635}, Characters: []byte{0xe2, 0xa6, 0x8b}},
+ "lbrksld": {Name: "lbrksld", CodePoints: []int{10639}, Characters: []byte{0xe2, 0xa6, 0x8f}},
+ "lbrkslu": {Name: "lbrkslu", CodePoints: []int{10637}, Characters: []byte{0xe2, 0xa6, 0x8d}},
+ "lcaron": {Name: "lcaron", CodePoints: []int{318}, Characters: []byte{0xc4, 0xbe}},
+ "lcedil": {Name: "lcedil", CodePoints: []int{316}, Characters: []byte{0xc4, 0xbc}},
+ "lceil": {Name: "lceil", CodePoints: []int{8968}, Characters: []byte{0xe2, 0x8c, 0x88}},
+ "lcub": {Name: "lcub", CodePoints: []int{123}, Characters: []byte{0x7b}},
+ "lcy": {Name: "lcy", CodePoints: []int{1083}, Characters: []byte{0xd0, 0xbb}},
+ "ldca": {Name: "ldca", CodePoints: []int{10550}, Characters: []byte{0xe2, 0xa4, 0xb6}},
+ "ldquo": {Name: "ldquo", CodePoints: []int{8220}, Characters: []byte{0xe2, 0x80, 0x9c}},
+ "ldquor": {Name: "ldquor", CodePoints: []int{8222}, Characters: []byte{0xe2, 0x80, 0x9e}},
+ "ldrdhar": {Name: "ldrdhar", CodePoints: []int{10599}, Characters: []byte{0xe2, 0xa5, 0xa7}},
+ "ldrushar": {Name: "ldrushar", CodePoints: []int{10571}, Characters: []byte{0xe2, 0xa5, 0x8b}},
+ "ldsh": {Name: "ldsh", CodePoints: []int{8626}, Characters: []byte{0xe2, 0x86, 0xb2}},
+ "le": {Name: "le", CodePoints: []int{8804}, Characters: []byte{0xe2, 0x89, 0xa4}},
+ "leftarrow": {Name: "leftarrow", CodePoints: []int{8592}, Characters: []byte{0xe2, 0x86, 0x90}},
+ "leftarrowtail": {Name: "leftarrowtail", CodePoints: []int{8610}, Characters: []byte{0xe2, 0x86, 0xa2}},
+ "leftharpoondown": {Name: "leftharpoondown", CodePoints: []int{8637}, Characters: []byte{0xe2, 0x86, 0xbd}},
+ "leftharpoonup": {Name: "leftharpoonup", CodePoints: []int{8636}, Characters: []byte{0xe2, 0x86, 0xbc}},
+ "leftleftarrows": {Name: "leftleftarrows", CodePoints: []int{8647}, Characters: []byte{0xe2, 0x87, 0x87}},
+ "leftrightarrow": {Name: "leftrightarrow", CodePoints: []int{8596}, Characters: []byte{0xe2, 0x86, 0x94}},
+ "leftrightarrows": {Name: "leftrightarrows", CodePoints: []int{8646}, Characters: []byte{0xe2, 0x87, 0x86}},
+ "leftrightharpoons": {Name: "leftrightharpoons", CodePoints: []int{8651}, Characters: []byte{0xe2, 0x87, 0x8b}},
+ "leftrightsquigarrow": {Name: "leftrightsquigarrow", CodePoints: []int{8621}, Characters: []byte{0xe2, 0x86, 0xad}},
+ "leftthreetimes": {Name: "leftthreetimes", CodePoints: []int{8907}, Characters: []byte{0xe2, 0x8b, 0x8b}},
+ "leg": {Name: "leg", CodePoints: []int{8922}, Characters: []byte{0xe2, 0x8b, 0x9a}},
+ "leq": {Name: "leq", CodePoints: []int{8804}, Characters: []byte{0xe2, 0x89, 0xa4}},
+ "leqq": {Name: "leqq", CodePoints: []int{8806}, Characters: []byte{0xe2, 0x89, 0xa6}},
+ "leqslant": {Name: "leqslant", CodePoints: []int{10877}, Characters: []byte{0xe2, 0xa9, 0xbd}},
+ "les": {Name: "les", CodePoints: []int{10877}, Characters: []byte{0xe2, 0xa9, 0xbd}},
+ "lescc": {Name: "lescc", CodePoints: []int{10920}, Characters: []byte{0xe2, 0xaa, 0xa8}},
+ "lesdot": {Name: "lesdot", CodePoints: []int{10879}, Characters: []byte{0xe2, 0xa9, 0xbf}},
+ "lesdoto": {Name: "lesdoto", CodePoints: []int{10881}, Characters: []byte{0xe2, 0xaa, 0x81}},
+ "lesdotor": {Name: "lesdotor", CodePoints: []int{10883}, Characters: []byte{0xe2, 0xaa, 0x83}},
+ "lesg": {Name: "lesg", CodePoints: []int{8922, 65024}, Characters: []byte{0xe2, 0x8b, 0x9a, 0xef, 0xb8, 0x80}},
+ "lesges": {Name: "lesges", CodePoints: []int{10899}, Characters: []byte{0xe2, 0xaa, 0x93}},
+ "lessapprox": {Name: "lessapprox", CodePoints: []int{10885}, Characters: []byte{0xe2, 0xaa, 0x85}},
+ "lessdot": {Name: "lessdot", CodePoints: []int{8918}, Characters: []byte{0xe2, 0x8b, 0x96}},
+ "lesseqgtr": {Name: "lesseqgtr", CodePoints: []int{8922}, Characters: []byte{0xe2, 0x8b, 0x9a}},
+ "lesseqqgtr": {Name: "lesseqqgtr", CodePoints: []int{10891}, Characters: []byte{0xe2, 0xaa, 0x8b}},
+ "lessgtr": {Name: "lessgtr", CodePoints: []int{8822}, Characters: []byte{0xe2, 0x89, 0xb6}},
+ "lesssim": {Name: "lesssim", CodePoints: []int{8818}, Characters: []byte{0xe2, 0x89, 0xb2}},
+ "lfisht": {Name: "lfisht", CodePoints: []int{10620}, Characters: []byte{0xe2, 0xa5, 0xbc}},
+ "lfloor": {Name: "lfloor", CodePoints: []int{8970}, Characters: []byte{0xe2, 0x8c, 0x8a}},
+ "lfr": {Name: "lfr", CodePoints: []int{120105}, Characters: []byte{0xf0, 0x9d, 0x94, 0xa9}},
+ "lg": {Name: "lg", CodePoints: []int{8822}, Characters: []byte{0xe2, 0x89, 0xb6}},
+ "lgE": {Name: "lgE", CodePoints: []int{10897}, Characters: []byte{0xe2, 0xaa, 0x91}},
+ "lhard": {Name: "lhard", CodePoints: []int{8637}, Characters: []byte{0xe2, 0x86, 0xbd}},
+ "lharu": {Name: "lharu", CodePoints: []int{8636}, Characters: []byte{0xe2, 0x86, 0xbc}},
+ "lharul": {Name: "lharul", CodePoints: []int{10602}, Characters: []byte{0xe2, 0xa5, 0xaa}},
+ "lhblk": {Name: "lhblk", CodePoints: []int{9604}, Characters: []byte{0xe2, 0x96, 0x84}},
+ "ljcy": {Name: "ljcy", CodePoints: []int{1113}, Characters: []byte{0xd1, 0x99}},
+ "ll": {Name: "ll", CodePoints: []int{8810}, Characters: []byte{0xe2, 0x89, 0xaa}},
+ "llarr": {Name: "llarr", CodePoints: []int{8647}, Characters: []byte{0xe2, 0x87, 0x87}},
+ "llcorner": {Name: "llcorner", CodePoints: []int{8990}, Characters: []byte{0xe2, 0x8c, 0x9e}},
+ "llhard": {Name: "llhard", CodePoints: []int{10603}, Characters: []byte{0xe2, 0xa5, 0xab}},
+ "lltri": {Name: "lltri", CodePoints: []int{9722}, Characters: []byte{0xe2, 0x97, 0xba}},
+ "lmidot": {Name: "lmidot", CodePoints: []int{320}, Characters: []byte{0xc5, 0x80}},
+ "lmoust": {Name: "lmoust", CodePoints: []int{9136}, Characters: []byte{0xe2, 0x8e, 0xb0}},
+ "lmoustache": {Name: "lmoustache", CodePoints: []int{9136}, Characters: []byte{0xe2, 0x8e, 0xb0}},
+ "lnE": {Name: "lnE", CodePoints: []int{8808}, Characters: []byte{0xe2, 0x89, 0xa8}},
+ "lnap": {Name: "lnap", CodePoints: []int{10889}, Characters: []byte{0xe2, 0xaa, 0x89}},
+ "lnapprox": {Name: "lnapprox", CodePoints: []int{10889}, Characters: []byte{0xe2, 0xaa, 0x89}},
+ "lne": {Name: "lne", CodePoints: []int{10887}, Characters: []byte{0xe2, 0xaa, 0x87}},
+ "lneq": {Name: "lneq", CodePoints: []int{10887}, Characters: []byte{0xe2, 0xaa, 0x87}},
+ "lneqq": {Name: "lneqq", CodePoints: []int{8808}, Characters: []byte{0xe2, 0x89, 0xa8}},
+ "lnsim": {Name: "lnsim", CodePoints: []int{8934}, Characters: []byte{0xe2, 0x8b, 0xa6}},
+ "loang": {Name: "loang", CodePoints: []int{10220}, Characters: []byte{0xe2, 0x9f, 0xac}},
+ "loarr": {Name: "loarr", CodePoints: []int{8701}, Characters: []byte{0xe2, 0x87, 0xbd}},
+ "lobrk": {Name: "lobrk", CodePoints: []int{10214}, Characters: []byte{0xe2, 0x9f, 0xa6}},
+ "longleftarrow": {Name: "longleftarrow", CodePoints: []int{10229}, Characters: []byte{0xe2, 0x9f, 0xb5}},
+ "longleftrightarrow": {Name: "longleftrightarrow", CodePoints: []int{10231}, Characters: []byte{0xe2, 0x9f, 0xb7}},
+ "longmapsto": {Name: "longmapsto", CodePoints: []int{10236}, Characters: []byte{0xe2, 0x9f, 0xbc}},
+ "longrightarrow": {Name: "longrightarrow", CodePoints: []int{10230}, Characters: []byte{0xe2, 0x9f, 0xb6}},
+ "looparrowleft": {Name: "looparrowleft", CodePoints: []int{8619}, Characters: []byte{0xe2, 0x86, 0xab}},
+ "looparrowright": {Name: "looparrowright", CodePoints: []int{8620}, Characters: []byte{0xe2, 0x86, 0xac}},
+ "lopar": {Name: "lopar", CodePoints: []int{10629}, Characters: []byte{0xe2, 0xa6, 0x85}},
+ "lopf": {Name: "lopf", CodePoints: []int{120157}, Characters: []byte{0xf0, 0x9d, 0x95, 0x9d}},
+ "loplus": {Name: "loplus", CodePoints: []int{10797}, Characters: []byte{0xe2, 0xa8, 0xad}},
+ "lotimes": {Name: "lotimes", CodePoints: []int{10804}, Characters: []byte{0xe2, 0xa8, 0xb4}},
+ "lowast": {Name: "lowast", CodePoints: []int{8727}, Characters: []byte{0xe2, 0x88, 0x97}},
+ "lowbar": {Name: "lowbar", CodePoints: []int{95}, Characters: []byte{0x5f}},
+ "loz": {Name: "loz", CodePoints: []int{9674}, Characters: []byte{0xe2, 0x97, 0x8a}},
+ "lozenge": {Name: "lozenge", CodePoints: []int{9674}, Characters: []byte{0xe2, 0x97, 0x8a}},
+ "lozf": {Name: "lozf", CodePoints: []int{10731}, Characters: []byte{0xe2, 0xa7, 0xab}},
+ "lpar": {Name: "lpar", CodePoints: []int{40}, Characters: []byte{0x28}},
+ "lparlt": {Name: "lparlt", CodePoints: []int{10643}, Characters: []byte{0xe2, 0xa6, 0x93}},
+ "lrarr": {Name: "lrarr", CodePoints: []int{8646}, Characters: []byte{0xe2, 0x87, 0x86}},
+ "lrcorner": {Name: "lrcorner", CodePoints: []int{8991}, Characters: []byte{0xe2, 0x8c, 0x9f}},
+ "lrhar": {Name: "lrhar", CodePoints: []int{8651}, Characters: []byte{0xe2, 0x87, 0x8b}},
+ "lrhard": {Name: "lrhard", CodePoints: []int{10605}, Characters: []byte{0xe2, 0xa5, 0xad}},
+ "lrm": {Name: "lrm", CodePoints: []int{8206}, Characters: []byte{0xe2, 0x80, 0x8e}},
+ "lrtri": {Name: "lrtri", CodePoints: []int{8895}, Characters: []byte{0xe2, 0x8a, 0xbf}},
+ "lsaquo": {Name: "lsaquo", CodePoints: []int{8249}, Characters: []byte{0xe2, 0x80, 0xb9}},
+ "lscr": {Name: "lscr", CodePoints: []int{120001}, Characters: []byte{0xf0, 0x9d, 0x93, 0x81}},
+ "lsh": {Name: "lsh", CodePoints: []int{8624}, Characters: []byte{0xe2, 0x86, 0xb0}},
+ "lsim": {Name: "lsim", CodePoints: []int{8818}, Characters: []byte{0xe2, 0x89, 0xb2}},
+ "lsime": {Name: "lsime", CodePoints: []int{10893}, Characters: []byte{0xe2, 0xaa, 0x8d}},
+ "lsimg": {Name: "lsimg", CodePoints: []int{10895}, Characters: []byte{0xe2, 0xaa, 0x8f}},
+ "lsqb": {Name: "lsqb", CodePoints: []int{91}, Characters: []byte{0x5b}},
+ "lsquo": {Name: "lsquo", CodePoints: []int{8216}, Characters: []byte{0xe2, 0x80, 0x98}},
+ "lsquor": {Name: "lsquor", CodePoints: []int{8218}, Characters: []byte{0xe2, 0x80, 0x9a}},
+ "lstrok": {Name: "lstrok", CodePoints: []int{322}, Characters: []byte{0xc5, 0x82}},
+ "lt": {Name: "lt", CodePoints: []int{60}, Characters: []byte{0x3c}},
+ "ltcc": {Name: "ltcc", CodePoints: []int{10918}, Characters: []byte{0xe2, 0xaa, 0xa6}},
+ "ltcir": {Name: "ltcir", CodePoints: []int{10873}, Characters: []byte{0xe2, 0xa9, 0xb9}},
+ "ltdot": {Name: "ltdot", CodePoints: []int{8918}, Characters: []byte{0xe2, 0x8b, 0x96}},
+ "lthree": {Name: "lthree", CodePoints: []int{8907}, Characters: []byte{0xe2, 0x8b, 0x8b}},
+ "ltimes": {Name: "ltimes", CodePoints: []int{8905}, Characters: []byte{0xe2, 0x8b, 0x89}},
+ "ltlarr": {Name: "ltlarr", CodePoints: []int{10614}, Characters: []byte{0xe2, 0xa5, 0xb6}},
+ "ltquest": {Name: "ltquest", CodePoints: []int{10875}, Characters: []byte{0xe2, 0xa9, 0xbb}},
+ "ltrPar": {Name: "ltrPar", CodePoints: []int{10646}, Characters: []byte{0xe2, 0xa6, 0x96}},
+ "ltri": {Name: "ltri", CodePoints: []int{9667}, Characters: []byte{0xe2, 0x97, 0x83}},
+ "ltrie": {Name: "ltrie", CodePoints: []int{8884}, Characters: []byte{0xe2, 0x8a, 0xb4}},
+ "ltrif": {Name: "ltrif", CodePoints: []int{9666}, Characters: []byte{0xe2, 0x97, 0x82}},
+ "lurdshar": {Name: "lurdshar", CodePoints: []int{10570}, Characters: []byte{0xe2, 0xa5, 0x8a}},
+ "luruhar": {Name: "luruhar", CodePoints: []int{10598}, Characters: []byte{0xe2, 0xa5, 0xa6}},
+ "lvertneqq": {Name: "lvertneqq", CodePoints: []int{8808, 65024}, Characters: []byte{0xe2, 0x89, 0xa8, 0xef, 0xb8, 0x80}},
+ "lvnE": {Name: "lvnE", CodePoints: []int{8808, 65024}, Characters: []byte{0xe2, 0x89, 0xa8, 0xef, 0xb8, 0x80}},
+ "mDDot": {Name: "mDDot", CodePoints: []int{8762}, Characters: []byte{0xe2, 0x88, 0xba}},
+ "macr": {Name: "macr", CodePoints: []int{175}, Characters: []byte{0xc2, 0xaf}},
+ "male": {Name: "male", CodePoints: []int{9794}, Characters: []byte{0xe2, 0x99, 0x82}},
+ "malt": {Name: "malt", CodePoints: []int{10016}, Characters: []byte{0xe2, 0x9c, 0xa0}},
+ "maltese": {Name: "maltese", CodePoints: []int{10016}, Characters: []byte{0xe2, 0x9c, 0xa0}},
+ "map": {Name: "map", CodePoints: []int{8614}, Characters: []byte{0xe2, 0x86, 0xa6}},
+ "mapsto": {Name: "mapsto", CodePoints: []int{8614}, Characters: []byte{0xe2, 0x86, 0xa6}},
+ "mapstodown": {Name: "mapstodown", CodePoints: []int{8615}, Characters: []byte{0xe2, 0x86, 0xa7}},
+ "mapstoleft": {Name: "mapstoleft", CodePoints: []int{8612}, Characters: []byte{0xe2, 0x86, 0xa4}},
+ "mapstoup": {Name: "mapstoup", CodePoints: []int{8613}, Characters: []byte{0xe2, 0x86, 0xa5}},
+ "marker": {Name: "marker", CodePoints: []int{9646}, Characters: []byte{0xe2, 0x96, 0xae}},
+ "mcomma": {Name: "mcomma", CodePoints: []int{10793}, Characters: []byte{0xe2, 0xa8, 0xa9}},
+ "mcy": {Name: "mcy", CodePoints: []int{1084}, Characters: []byte{0xd0, 0xbc}},
+ "mdash": {Name: "mdash", CodePoints: []int{8212}, Characters: []byte{0xe2, 0x80, 0x94}},
+ "measuredangle": {Name: "measuredangle", CodePoints: []int{8737}, Characters: []byte{0xe2, 0x88, 0xa1}},
+ "mfr": {Name: "mfr", CodePoints: []int{120106}, Characters: []byte{0xf0, 0x9d, 0x94, 0xaa}},
+ "mho": {Name: "mho", CodePoints: []int{8487}, Characters: []byte{0xe2, 0x84, 0xa7}},
+ "micro": {Name: "micro", CodePoints: []int{181}, Characters: []byte{0xc2, 0xb5}},
+ "mid": {Name: "mid", CodePoints: []int{8739}, Characters: []byte{0xe2, 0x88, 0xa3}},
+ "midast": {Name: "midast", CodePoints: []int{42}, Characters: []byte{0x2a}},
+ "midcir": {Name: "midcir", CodePoints: []int{10992}, Characters: []byte{0xe2, 0xab, 0xb0}},
+ "middot": {Name: "middot", CodePoints: []int{183}, Characters: []byte{0xc2, 0xb7}},
+ "minus": {Name: "minus", CodePoints: []int{8722}, Characters: []byte{0xe2, 0x88, 0x92}},
+ "minusb": {Name: "minusb", CodePoints: []int{8863}, Characters: []byte{0xe2, 0x8a, 0x9f}},
+ "minusd": {Name: "minusd", CodePoints: []int{8760}, Characters: []byte{0xe2, 0x88, 0xb8}},
+ "minusdu": {Name: "minusdu", CodePoints: []int{10794}, Characters: []byte{0xe2, 0xa8, 0xaa}},
+ "mlcp": {Name: "mlcp", CodePoints: []int{10971}, Characters: []byte{0xe2, 0xab, 0x9b}},
+ "mldr": {Name: "mldr", CodePoints: []int{8230}, Characters: []byte{0xe2, 0x80, 0xa6}},
+ "mnplus": {Name: "mnplus", CodePoints: []int{8723}, Characters: []byte{0xe2, 0x88, 0x93}},
+ "models": {Name: "models", CodePoints: []int{8871}, Characters: []byte{0xe2, 0x8a, 0xa7}},
+ "mopf": {Name: "mopf", CodePoints: []int{120158}, Characters: []byte{0xf0, 0x9d, 0x95, 0x9e}},
+ "mp": {Name: "mp", CodePoints: []int{8723}, Characters: []byte{0xe2, 0x88, 0x93}},
+ "mscr": {Name: "mscr", CodePoints: []int{120002}, Characters: []byte{0xf0, 0x9d, 0x93, 0x82}},
+ "mstpos": {Name: "mstpos", CodePoints: []int{8766}, Characters: []byte{0xe2, 0x88, 0xbe}},
+ "mu": {Name: "mu", CodePoints: []int{956}, Characters: []byte{0xce, 0xbc}},
+ "multimap": {Name: "multimap", CodePoints: []int{8888}, Characters: []byte{0xe2, 0x8a, 0xb8}},
+ "mumap": {Name: "mumap", CodePoints: []int{8888}, Characters: []byte{0xe2, 0x8a, 0xb8}},
+ "nGg": {Name: "nGg", CodePoints: []int{8921, 824}, Characters: []byte{0xe2, 0x8b, 0x99, 0xcc, 0xb8}},
+ "nGt": {Name: "nGt", CodePoints: []int{8811, 8402}, Characters: []byte{0xe2, 0x89, 0xab, 0xe2, 0x83, 0x92}},
+ "nGtv": {Name: "nGtv", CodePoints: []int{8811, 824}, Characters: []byte{0xe2, 0x89, 0xab, 0xcc, 0xb8}},
+ "nLeftarrow": {Name: "nLeftarrow", CodePoints: []int{8653}, Characters: []byte{0xe2, 0x87, 0x8d}},
+ "nLeftrightarrow": {Name: "nLeftrightarrow", CodePoints: []int{8654}, Characters: []byte{0xe2, 0x87, 0x8e}},
+ "nLl": {Name: "nLl", CodePoints: []int{8920, 824}, Characters: []byte{0xe2, 0x8b, 0x98, 0xcc, 0xb8}},
+ "nLt": {Name: "nLt", CodePoints: []int{8810, 8402}, Characters: []byte{0xe2, 0x89, 0xaa, 0xe2, 0x83, 0x92}},
+ "nLtv": {Name: "nLtv", CodePoints: []int{8810, 824}, Characters: []byte{0xe2, 0x89, 0xaa, 0xcc, 0xb8}},
+ "nRightarrow": {Name: "nRightarrow", CodePoints: []int{8655}, Characters: []byte{0xe2, 0x87, 0x8f}},
+ "nVDash": {Name: "nVDash", CodePoints: []int{8879}, Characters: []byte{0xe2, 0x8a, 0xaf}},
+ "nVdash": {Name: "nVdash", CodePoints: []int{8878}, Characters: []byte{0xe2, 0x8a, 0xae}},
+ "nabla": {Name: "nabla", CodePoints: []int{8711}, Characters: []byte{0xe2, 0x88, 0x87}},
+ "nacute": {Name: "nacute", CodePoints: []int{324}, Characters: []byte{0xc5, 0x84}},
+ "nang": {Name: "nang", CodePoints: []int{8736, 8402}, Characters: []byte{0xe2, 0x88, 0xa0, 0xe2, 0x83, 0x92}},
+ "nap": {Name: "nap", CodePoints: []int{8777}, Characters: []byte{0xe2, 0x89, 0x89}},
+ "napE": {Name: "napE", CodePoints: []int{10864, 824}, Characters: []byte{0xe2, 0xa9, 0xb0, 0xcc, 0xb8}},
+ "napid": {Name: "napid", CodePoints: []int{8779, 824}, Characters: []byte{0xe2, 0x89, 0x8b, 0xcc, 0xb8}},
+ "napos": {Name: "napos", CodePoints: []int{329}, Characters: []byte{0xc5, 0x89}},
+ "napprox": {Name: "napprox", CodePoints: []int{8777}, Characters: []byte{0xe2, 0x89, 0x89}},
+ "natur": {Name: "natur", CodePoints: []int{9838}, Characters: []byte{0xe2, 0x99, 0xae}},
+ "natural": {Name: "natural", CodePoints: []int{9838}, Characters: []byte{0xe2, 0x99, 0xae}},
+ "naturals": {Name: "naturals", CodePoints: []int{8469}, Characters: []byte{0xe2, 0x84, 0x95}},
+ "nbsp": {Name: "nbsp", CodePoints: []int{160}, Characters: []byte{0xc2, 0xa0}},
+ "nbump": {Name: "nbump", CodePoints: []int{8782, 824}, Characters: []byte{0xe2, 0x89, 0x8e, 0xcc, 0xb8}},
+ "nbumpe": {Name: "nbumpe", CodePoints: []int{8783, 824}, Characters: []byte{0xe2, 0x89, 0x8f, 0xcc, 0xb8}},
+ "ncap": {Name: "ncap", CodePoints: []int{10819}, Characters: []byte{0xe2, 0xa9, 0x83}},
+ "ncaron": {Name: "ncaron", CodePoints: []int{328}, Characters: []byte{0xc5, 0x88}},
+ "ncedil": {Name: "ncedil", CodePoints: []int{326}, Characters: []byte{0xc5, 0x86}},
+ "ncong": {Name: "ncong", CodePoints: []int{8775}, Characters: []byte{0xe2, 0x89, 0x87}},
+ "ncongdot": {Name: "ncongdot", CodePoints: []int{10861, 824}, Characters: []byte{0xe2, 0xa9, 0xad, 0xcc, 0xb8}},
+ "ncup": {Name: "ncup", CodePoints: []int{10818}, Characters: []byte{0xe2, 0xa9, 0x82}},
+ "ncy": {Name: "ncy", CodePoints: []int{1085}, Characters: []byte{0xd0, 0xbd}},
+ "ndash": {Name: "ndash", CodePoints: []int{8211}, Characters: []byte{0xe2, 0x80, 0x93}},
+ "ne": {Name: "ne", CodePoints: []int{8800}, Characters: []byte{0xe2, 0x89, 0xa0}},
+ "neArr": {Name: "neArr", CodePoints: []int{8663}, Characters: []byte{0xe2, 0x87, 0x97}},
+ "nearhk": {Name: "nearhk", CodePoints: []int{10532}, Characters: []byte{0xe2, 0xa4, 0xa4}},
+ "nearr": {Name: "nearr", CodePoints: []int{8599}, Characters: []byte{0xe2, 0x86, 0x97}},
+ "nearrow": {Name: "nearrow", CodePoints: []int{8599}, Characters: []byte{0xe2, 0x86, 0x97}},
+ "nedot": {Name: "nedot", CodePoints: []int{8784, 824}, Characters: []byte{0xe2, 0x89, 0x90, 0xcc, 0xb8}},
+ "nequiv": {Name: "nequiv", CodePoints: []int{8802}, Characters: []byte{0xe2, 0x89, 0xa2}},
+ "nesear": {Name: "nesear", CodePoints: []int{10536}, Characters: []byte{0xe2, 0xa4, 0xa8}},
+ "nesim": {Name: "nesim", CodePoints: []int{8770, 824}, Characters: []byte{0xe2, 0x89, 0x82, 0xcc, 0xb8}},
+ "nexist": {Name: "nexist", CodePoints: []int{8708}, Characters: []byte{0xe2, 0x88, 0x84}},
+ "nexists": {Name: "nexists", CodePoints: []int{8708}, Characters: []byte{0xe2, 0x88, 0x84}},
+ "nfr": {Name: "nfr", CodePoints: []int{120107}, Characters: []byte{0xf0, 0x9d, 0x94, 0xab}},
+ "ngE": {Name: "ngE", CodePoints: []int{8807, 824}, Characters: []byte{0xe2, 0x89, 0xa7, 0xcc, 0xb8}},
+ "nge": {Name: "nge", CodePoints: []int{8817}, Characters: []byte{0xe2, 0x89, 0xb1}},
+ "ngeq": {Name: "ngeq", CodePoints: []int{8817}, Characters: []byte{0xe2, 0x89, 0xb1}},
+ "ngeqq": {Name: "ngeqq", CodePoints: []int{8807, 824}, Characters: []byte{0xe2, 0x89, 0xa7, 0xcc, 0xb8}},
+ "ngeqslant": {Name: "ngeqslant", CodePoints: []int{10878, 824}, Characters: []byte{0xe2, 0xa9, 0xbe, 0xcc, 0xb8}},
+ "nges": {Name: "nges", CodePoints: []int{10878, 824}, Characters: []byte{0xe2, 0xa9, 0xbe, 0xcc, 0xb8}},
+ "ngsim": {Name: "ngsim", CodePoints: []int{8821}, Characters: []byte{0xe2, 0x89, 0xb5}},
+ "ngt": {Name: "ngt", CodePoints: []int{8815}, Characters: []byte{0xe2, 0x89, 0xaf}},
+ "ngtr": {Name: "ngtr", CodePoints: []int{8815}, Characters: []byte{0xe2, 0x89, 0xaf}},
+ "nhArr": {Name: "nhArr", CodePoints: []int{8654}, Characters: []byte{0xe2, 0x87, 0x8e}},
+ "nharr": {Name: "nharr", CodePoints: []int{8622}, Characters: []byte{0xe2, 0x86, 0xae}},
+ "nhpar": {Name: "nhpar", CodePoints: []int{10994}, Characters: []byte{0xe2, 0xab, 0xb2}},
+ "ni": {Name: "ni", CodePoints: []int{8715}, Characters: []byte{0xe2, 0x88, 0x8b}},
+ "nis": {Name: "nis", CodePoints: []int{8956}, Characters: []byte{0xe2, 0x8b, 0xbc}},
+ "nisd": {Name: "nisd", CodePoints: []int{8954}, Characters: []byte{0xe2, 0x8b, 0xba}},
+ "niv": {Name: "niv", CodePoints: []int{8715}, Characters: []byte{0xe2, 0x88, 0x8b}},
+ "njcy": {Name: "njcy", CodePoints: []int{1114}, Characters: []byte{0xd1, 0x9a}},
+ "nlArr": {Name: "nlArr", CodePoints: []int{8653}, Characters: []byte{0xe2, 0x87, 0x8d}},
+ "nlE": {Name: "nlE", CodePoints: []int{8806, 824}, Characters: []byte{0xe2, 0x89, 0xa6, 0xcc, 0xb8}},
+ "nlarr": {Name: "nlarr", CodePoints: []int{8602}, Characters: []byte{0xe2, 0x86, 0x9a}},
+ "nldr": {Name: "nldr", CodePoints: []int{8229}, Characters: []byte{0xe2, 0x80, 0xa5}},
+ "nle": {Name: "nle", CodePoints: []int{8816}, Characters: []byte{0xe2, 0x89, 0xb0}},
+ "nleftarrow": {Name: "nleftarrow", CodePoints: []int{8602}, Characters: []byte{0xe2, 0x86, 0x9a}},
+ "nleftrightarrow": {Name: "nleftrightarrow", CodePoints: []int{8622}, Characters: []byte{0xe2, 0x86, 0xae}},
+ "nleq": {Name: "nleq", CodePoints: []int{8816}, Characters: []byte{0xe2, 0x89, 0xb0}},
+ "nleqq": {Name: "nleqq", CodePoints: []int{8806, 824}, Characters: []byte{0xe2, 0x89, 0xa6, 0xcc, 0xb8}},
+ "nleqslant": {Name: "nleqslant", CodePoints: []int{10877, 824}, Characters: []byte{0xe2, 0xa9, 0xbd, 0xcc, 0xb8}},
+ "nles": {Name: "nles", CodePoints: []int{10877, 824}, Characters: []byte{0xe2, 0xa9, 0xbd, 0xcc, 0xb8}},
+ "nless": {Name: "nless", CodePoints: []int{8814}, Characters: []byte{0xe2, 0x89, 0xae}},
+ "nlsim": {Name: "nlsim", CodePoints: []int{8820}, Characters: []byte{0xe2, 0x89, 0xb4}},
+ "nlt": {Name: "nlt", CodePoints: []int{8814}, Characters: []byte{0xe2, 0x89, 0xae}},
+ "nltri": {Name: "nltri", CodePoints: []int{8938}, Characters: []byte{0xe2, 0x8b, 0xaa}},
+ "nltrie": {Name: "nltrie", CodePoints: []int{8940}, Characters: []byte{0xe2, 0x8b, 0xac}},
+ "nmid": {Name: "nmid", CodePoints: []int{8740}, Characters: []byte{0xe2, 0x88, 0xa4}},
+ "nopf": {Name: "nopf", CodePoints: []int{120159}, Characters: []byte{0xf0, 0x9d, 0x95, 0x9f}},
+ "not": {Name: "not", CodePoints: []int{172}, Characters: []byte{0xc2, 0xac}},
+ "notin": {Name: "notin", CodePoints: []int{8713}, Characters: []byte{0xe2, 0x88, 0x89}},
+ "notinE": {Name: "notinE", CodePoints: []int{8953, 824}, Characters: []byte{0xe2, 0x8b, 0xb9, 0xcc, 0xb8}},
+ "notindot": {Name: "notindot", CodePoints: []int{8949, 824}, Characters: []byte{0xe2, 0x8b, 0xb5, 0xcc, 0xb8}},
+ "notinva": {Name: "notinva", CodePoints: []int{8713}, Characters: []byte{0xe2, 0x88, 0x89}},
+ "notinvb": {Name: "notinvb", CodePoints: []int{8951}, Characters: []byte{0xe2, 0x8b, 0xb7}},
+ "notinvc": {Name: "notinvc", CodePoints: []int{8950}, Characters: []byte{0xe2, 0x8b, 0xb6}},
+ "notni": {Name: "notni", CodePoints: []int{8716}, Characters: []byte{0xe2, 0x88, 0x8c}},
+ "notniva": {Name: "notniva", CodePoints: []int{8716}, Characters: []byte{0xe2, 0x88, 0x8c}},
+ "notnivb": {Name: "notnivb", CodePoints: []int{8958}, Characters: []byte{0xe2, 0x8b, 0xbe}},
+ "notnivc": {Name: "notnivc", CodePoints: []int{8957}, Characters: []byte{0xe2, 0x8b, 0xbd}},
+ "npar": {Name: "npar", CodePoints: []int{8742}, Characters: []byte{0xe2, 0x88, 0xa6}},
+ "nparallel": {Name: "nparallel", CodePoints: []int{8742}, Characters: []byte{0xe2, 0x88, 0xa6}},
+ "nparsl": {Name: "nparsl", CodePoints: []int{11005, 8421}, Characters: []byte{0xe2, 0xab, 0xbd, 0xe2, 0x83, 0xa5}},
+ "npart": {Name: "npart", CodePoints: []int{8706, 824}, Characters: []byte{0xe2, 0x88, 0x82, 0xcc, 0xb8}},
+ "npolint": {Name: "npolint", CodePoints: []int{10772}, Characters: []byte{0xe2, 0xa8, 0x94}},
+ "npr": {Name: "npr", CodePoints: []int{8832}, Characters: []byte{0xe2, 0x8a, 0x80}},
+ "nprcue": {Name: "nprcue", CodePoints: []int{8928}, Characters: []byte{0xe2, 0x8b, 0xa0}},
+ "npre": {Name: "npre", CodePoints: []int{10927, 824}, Characters: []byte{0xe2, 0xaa, 0xaf, 0xcc, 0xb8}},
+ "nprec": {Name: "nprec", CodePoints: []int{8832}, Characters: []byte{0xe2, 0x8a, 0x80}},
+ "npreceq": {Name: "npreceq", CodePoints: []int{10927, 824}, Characters: []byte{0xe2, 0xaa, 0xaf, 0xcc, 0xb8}},
+ "nrArr": {Name: "nrArr", CodePoints: []int{8655}, Characters: []byte{0xe2, 0x87, 0x8f}},
+ "nrarr": {Name: "nrarr", CodePoints: []int{8603}, Characters: []byte{0xe2, 0x86, 0x9b}},
+ "nrarrc": {Name: "nrarrc", CodePoints: []int{10547, 824}, Characters: []byte{0xe2, 0xa4, 0xb3, 0xcc, 0xb8}},
+ "nrarrw": {Name: "nrarrw", CodePoints: []int{8605, 824}, Characters: []byte{0xe2, 0x86, 0x9d, 0xcc, 0xb8}},
+ "nrightarrow": {Name: "nrightarrow", CodePoints: []int{8603}, Characters: []byte{0xe2, 0x86, 0x9b}},
+ "nrtri": {Name: "nrtri", CodePoints: []int{8939}, Characters: []byte{0xe2, 0x8b, 0xab}},
+ "nrtrie": {Name: "nrtrie", CodePoints: []int{8941}, Characters: []byte{0xe2, 0x8b, 0xad}},
+ "nsc": {Name: "nsc", CodePoints: []int{8833}, Characters: []byte{0xe2, 0x8a, 0x81}},
+ "nsccue": {Name: "nsccue", CodePoints: []int{8929}, Characters: []byte{0xe2, 0x8b, 0xa1}},
+ "nsce": {Name: "nsce", CodePoints: []int{10928, 824}, Characters: []byte{0xe2, 0xaa, 0xb0, 0xcc, 0xb8}},
+ "nscr": {Name: "nscr", CodePoints: []int{120003}, Characters: []byte{0xf0, 0x9d, 0x93, 0x83}},
+ "nshortmid": {Name: "nshortmid", CodePoints: []int{8740}, Characters: []byte{0xe2, 0x88, 0xa4}},
+ "nshortparallel": {Name: "nshortparallel", CodePoints: []int{8742}, Characters: []byte{0xe2, 0x88, 0xa6}},
+ "nsim": {Name: "nsim", CodePoints: []int{8769}, Characters: []byte{0xe2, 0x89, 0x81}},
+ "nsime": {Name: "nsime", CodePoints: []int{8772}, Characters: []byte{0xe2, 0x89, 0x84}},
+ "nsimeq": {Name: "nsimeq", CodePoints: []int{8772}, Characters: []byte{0xe2, 0x89, 0x84}},
+ "nsmid": {Name: "nsmid", CodePoints: []int{8740}, Characters: []byte{0xe2, 0x88, 0xa4}},
+ "nspar": {Name: "nspar", CodePoints: []int{8742}, Characters: []byte{0xe2, 0x88, 0xa6}},
+ "nsqsube": {Name: "nsqsube", CodePoints: []int{8930}, Characters: []byte{0xe2, 0x8b, 0xa2}},
+ "nsqsupe": {Name: "nsqsupe", CodePoints: []int{8931}, Characters: []byte{0xe2, 0x8b, 0xa3}},
+ "nsub": {Name: "nsub", CodePoints: []int{8836}, Characters: []byte{0xe2, 0x8a, 0x84}},
+ "nsubE": {Name: "nsubE", CodePoints: []int{10949, 824}, Characters: []byte{0xe2, 0xab, 0x85, 0xcc, 0xb8}},
+ "nsube": {Name: "nsube", CodePoints: []int{8840}, Characters: []byte{0xe2, 0x8a, 0x88}},
+ "nsubset": {Name: "nsubset", CodePoints: []int{8834, 8402}, Characters: []byte{0xe2, 0x8a, 0x82, 0xe2, 0x83, 0x92}},
+ "nsubseteq": {Name: "nsubseteq", CodePoints: []int{8840}, Characters: []byte{0xe2, 0x8a, 0x88}},
+ "nsubseteqq": {Name: "nsubseteqq", CodePoints: []int{10949, 824}, Characters: []byte{0xe2, 0xab, 0x85, 0xcc, 0xb8}},
+ "nsucc": {Name: "nsucc", CodePoints: []int{8833}, Characters: []byte{0xe2, 0x8a, 0x81}},
+ "nsucceq": {Name: "nsucceq", CodePoints: []int{10928, 824}, Characters: []byte{0xe2, 0xaa, 0xb0, 0xcc, 0xb8}},
+ "nsup": {Name: "nsup", CodePoints: []int{8837}, Characters: []byte{0xe2, 0x8a, 0x85}},
+ "nsupE": {Name: "nsupE", CodePoints: []int{10950, 824}, Characters: []byte{0xe2, 0xab, 0x86, 0xcc, 0xb8}},
+ "nsupe": {Name: "nsupe", CodePoints: []int{8841}, Characters: []byte{0xe2, 0x8a, 0x89}},
+ "nsupset": {Name: "nsupset", CodePoints: []int{8835, 8402}, Characters: []byte{0xe2, 0x8a, 0x83, 0xe2, 0x83, 0x92}},
+ "nsupseteq": {Name: "nsupseteq", CodePoints: []int{8841}, Characters: []byte{0xe2, 0x8a, 0x89}},
+ "nsupseteqq": {Name: "nsupseteqq", CodePoints: []int{10950, 824}, Characters: []byte{0xe2, 0xab, 0x86, 0xcc, 0xb8}},
+ "ntgl": {Name: "ntgl", CodePoints: []int{8825}, Characters: []byte{0xe2, 0x89, 0xb9}},
+ "ntilde": {Name: "ntilde", CodePoints: []int{241}, Characters: []byte{0xc3, 0xb1}},
+ "ntlg": {Name: "ntlg", CodePoints: []int{8824}, Characters: []byte{0xe2, 0x89, 0xb8}},
+ "ntriangleleft": {Name: "ntriangleleft", CodePoints: []int{8938}, Characters: []byte{0xe2, 0x8b, 0xaa}},
+ "ntrianglelefteq": {Name: "ntrianglelefteq", CodePoints: []int{8940}, Characters: []byte{0xe2, 0x8b, 0xac}},
+ "ntriangleright": {Name: "ntriangleright", CodePoints: []int{8939}, Characters: []byte{0xe2, 0x8b, 0xab}},
+ "ntrianglerighteq": {Name: "ntrianglerighteq", CodePoints: []int{8941}, Characters: []byte{0xe2, 0x8b, 0xad}},
+ "nu": {Name: "nu", CodePoints: []int{957}, Characters: []byte{0xce, 0xbd}},
+ "num": {Name: "num", CodePoints: []int{35}, Characters: []byte{0x23}},
+ "numero": {Name: "numero", CodePoints: []int{8470}, Characters: []byte{0xe2, 0x84, 0x96}},
+ "numsp": {Name: "numsp", CodePoints: []int{8199}, Characters: []byte{0xe2, 0x80, 0x87}},
+ "nvDash": {Name: "nvDash", CodePoints: []int{8877}, Characters: []byte{0xe2, 0x8a, 0xad}},
+ "nvHarr": {Name: "nvHarr", CodePoints: []int{10500}, Characters: []byte{0xe2, 0xa4, 0x84}},
+ "nvap": {Name: "nvap", CodePoints: []int{8781, 8402}, Characters: []byte{0xe2, 0x89, 0x8d, 0xe2, 0x83, 0x92}},
+ "nvdash": {Name: "nvdash", CodePoints: []int{8876}, Characters: []byte{0xe2, 0x8a, 0xac}},
+ "nvge": {Name: "nvge", CodePoints: []int{8805, 8402}, Characters: []byte{0xe2, 0x89, 0xa5, 0xe2, 0x83, 0x92}},
+ "nvgt": {Name: "nvgt", CodePoints: []int{62, 8402}, Characters: []byte{0x3e, 0xe2, 0x83, 0x92}},
+ "nvinfin": {Name: "nvinfin", CodePoints: []int{10718}, Characters: []byte{0xe2, 0xa7, 0x9e}},
+ "nvlArr": {Name: "nvlArr", CodePoints: []int{10498}, Characters: []byte{0xe2, 0xa4, 0x82}},
+ "nvle": {Name: "nvle", CodePoints: []int{8804, 8402}, Characters: []byte{0xe2, 0x89, 0xa4, 0xe2, 0x83, 0x92}},
+ "nvlt": {Name: "nvlt", CodePoints: []int{60, 8402}, Characters: []byte{0x3c, 0xe2, 0x83, 0x92}},
+ "nvltrie": {Name: "nvltrie", CodePoints: []int{8884, 8402}, Characters: []byte{0xe2, 0x8a, 0xb4, 0xe2, 0x83, 0x92}},
+ "nvrArr": {Name: "nvrArr", CodePoints: []int{10499}, Characters: []byte{0xe2, 0xa4, 0x83}},
+ "nvrtrie": {Name: "nvrtrie", CodePoints: []int{8885, 8402}, Characters: []byte{0xe2, 0x8a, 0xb5, 0xe2, 0x83, 0x92}},
+ "nvsim": {Name: "nvsim", CodePoints: []int{8764, 8402}, Characters: []byte{0xe2, 0x88, 0xbc, 0xe2, 0x83, 0x92}},
+ "nwArr": {Name: "nwArr", CodePoints: []int{8662}, Characters: []byte{0xe2, 0x87, 0x96}},
+ "nwarhk": {Name: "nwarhk", CodePoints: []int{10531}, Characters: []byte{0xe2, 0xa4, 0xa3}},
+ "nwarr": {Name: "nwarr", CodePoints: []int{8598}, Characters: []byte{0xe2, 0x86, 0x96}},
+ "nwarrow": {Name: "nwarrow", CodePoints: []int{8598}, Characters: []byte{0xe2, 0x86, 0x96}},
+ "nwnear": {Name: "nwnear", CodePoints: []int{10535}, Characters: []byte{0xe2, 0xa4, 0xa7}},
+ "oS": {Name: "oS", CodePoints: []int{9416}, Characters: []byte{0xe2, 0x93, 0x88}},
+ "oacute": {Name: "oacute", CodePoints: []int{243}, Characters: []byte{0xc3, 0xb3}},
+ "oast": {Name: "oast", CodePoints: []int{8859}, Characters: []byte{0xe2, 0x8a, 0x9b}},
+ "ocir": {Name: "ocir", CodePoints: []int{8858}, Characters: []byte{0xe2, 0x8a, 0x9a}},
+ "ocirc": {Name: "ocirc", CodePoints: []int{244}, Characters: []byte{0xc3, 0xb4}},
+ "ocy": {Name: "ocy", CodePoints: []int{1086}, Characters: []byte{0xd0, 0xbe}},
+ "odash": {Name: "odash", CodePoints: []int{8861}, Characters: []byte{0xe2, 0x8a, 0x9d}},
+ "odblac": {Name: "odblac", CodePoints: []int{337}, Characters: []byte{0xc5, 0x91}},
+ "odiv": {Name: "odiv", CodePoints: []int{10808}, Characters: []byte{0xe2, 0xa8, 0xb8}},
+ "odot": {Name: "odot", CodePoints: []int{8857}, Characters: []byte{0xe2, 0x8a, 0x99}},
+ "odsold": {Name: "odsold", CodePoints: []int{10684}, Characters: []byte{0xe2, 0xa6, 0xbc}},
+ "oelig": {Name: "oelig", CodePoints: []int{339}, Characters: []byte{0xc5, 0x93}},
+ "ofcir": {Name: "ofcir", CodePoints: []int{10687}, Characters: []byte{0xe2, 0xa6, 0xbf}},
+ "ofr": {Name: "ofr", CodePoints: []int{120108}, Characters: []byte{0xf0, 0x9d, 0x94, 0xac}},
+ "ogon": {Name: "ogon", CodePoints: []int{731}, Characters: []byte{0xcb, 0x9b}},
+ "ograve": {Name: "ograve", CodePoints: []int{242}, Characters: []byte{0xc3, 0xb2}},
+ "ogt": {Name: "ogt", CodePoints: []int{10689}, Characters: []byte{0xe2, 0xa7, 0x81}},
+ "ohbar": {Name: "ohbar", CodePoints: []int{10677}, Characters: []byte{0xe2, 0xa6, 0xb5}},
+ "ohm": {Name: "ohm", CodePoints: []int{937}, Characters: []byte{0xce, 0xa9}},
+ "oint": {Name: "oint", CodePoints: []int{8750}, Characters: []byte{0xe2, 0x88, 0xae}},
+ "olarr": {Name: "olarr", CodePoints: []int{8634}, Characters: []byte{0xe2, 0x86, 0xba}},
+ "olcir": {Name: "olcir", CodePoints: []int{10686}, Characters: []byte{0xe2, 0xa6, 0xbe}},
+ "olcross": {Name: "olcross", CodePoints: []int{10683}, Characters: []byte{0xe2, 0xa6, 0xbb}},
+ "oline": {Name: "oline", CodePoints: []int{8254}, Characters: []byte{0xe2, 0x80, 0xbe}},
+ "olt": {Name: "olt", CodePoints: []int{10688}, Characters: []byte{0xe2, 0xa7, 0x80}},
+ "omacr": {Name: "omacr", CodePoints: []int{333}, Characters: []byte{0xc5, 0x8d}},
+ "omega": {Name: "omega", CodePoints: []int{969}, Characters: []byte{0xcf, 0x89}},
+ "omicron": {Name: "omicron", CodePoints: []int{959}, Characters: []byte{0xce, 0xbf}},
+ "omid": {Name: "omid", CodePoints: []int{10678}, Characters: []byte{0xe2, 0xa6, 0xb6}},
+ "ominus": {Name: "ominus", CodePoints: []int{8854}, Characters: []byte{0xe2, 0x8a, 0x96}},
+ "oopf": {Name: "oopf", CodePoints: []int{120160}, Characters: []byte{0xf0, 0x9d, 0x95, 0xa0}},
+ "opar": {Name: "opar", CodePoints: []int{10679}, Characters: []byte{0xe2, 0xa6, 0xb7}},
+ "operp": {Name: "operp", CodePoints: []int{10681}, Characters: []byte{0xe2, 0xa6, 0xb9}},
+ "oplus": {Name: "oplus", CodePoints: []int{8853}, Characters: []byte{0xe2, 0x8a, 0x95}},
+ "or": {Name: "or", CodePoints: []int{8744}, Characters: []byte{0xe2, 0x88, 0xa8}},
+ "orarr": {Name: "orarr", CodePoints: []int{8635}, Characters: []byte{0xe2, 0x86, 0xbb}},
+ "ord": {Name: "ord", CodePoints: []int{10845}, Characters: []byte{0xe2, 0xa9, 0x9d}},
+ "order": {Name: "order", CodePoints: []int{8500}, Characters: []byte{0xe2, 0x84, 0xb4}},
+ "orderof": {Name: "orderof", CodePoints: []int{8500}, Characters: []byte{0xe2, 0x84, 0xb4}},
+ "ordf": {Name: "ordf", CodePoints: []int{170}, Characters: []byte{0xc2, 0xaa}},
+ "ordm": {Name: "ordm", CodePoints: []int{186}, Characters: []byte{0xc2, 0xba}},
+ "origof": {Name: "origof", CodePoints: []int{8886}, Characters: []byte{0xe2, 0x8a, 0xb6}},
+ "oror": {Name: "oror", CodePoints: []int{10838}, Characters: []byte{0xe2, 0xa9, 0x96}},
+ "orslope": {Name: "orslope", CodePoints: []int{10839}, Characters: []byte{0xe2, 0xa9, 0x97}},
+ "orv": {Name: "orv", CodePoints: []int{10843}, Characters: []byte{0xe2, 0xa9, 0x9b}},
+ "oscr": {Name: "oscr", CodePoints: []int{8500}, Characters: []byte{0xe2, 0x84, 0xb4}},
+ "oslash": {Name: "oslash", CodePoints: []int{248}, Characters: []byte{0xc3, 0xb8}},
+ "osol": {Name: "osol", CodePoints: []int{8856}, Characters: []byte{0xe2, 0x8a, 0x98}},
+ "otilde": {Name: "otilde", CodePoints: []int{245}, Characters: []byte{0xc3, 0xb5}},
+ "otimes": {Name: "otimes", CodePoints: []int{8855}, Characters: []byte{0xe2, 0x8a, 0x97}},
+ "otimesas": {Name: "otimesas", CodePoints: []int{10806}, Characters: []byte{0xe2, 0xa8, 0xb6}},
+ "ouml": {Name: "ouml", CodePoints: []int{246}, Characters: []byte{0xc3, 0xb6}},
+ "ovbar": {Name: "ovbar", CodePoints: []int{9021}, Characters: []byte{0xe2, 0x8c, 0xbd}},
+ "par": {Name: "par", CodePoints: []int{8741}, Characters: []byte{0xe2, 0x88, 0xa5}},
+ "para": {Name: "para", CodePoints: []int{182}, Characters: []byte{0xc2, 0xb6}},
+ "parallel": {Name: "parallel", CodePoints: []int{8741}, Characters: []byte{0xe2, 0x88, 0xa5}},
+ "parsim": {Name: "parsim", CodePoints: []int{10995}, Characters: []byte{0xe2, 0xab, 0xb3}},
+ "parsl": {Name: "parsl", CodePoints: []int{11005}, Characters: []byte{0xe2, 0xab, 0xbd}},
+ "part": {Name: "part", CodePoints: []int{8706}, Characters: []byte{0xe2, 0x88, 0x82}},
+ "pcy": {Name: "pcy", CodePoints: []int{1087}, Characters: []byte{0xd0, 0xbf}},
+ "percnt": {Name: "percnt", CodePoints: []int{37}, Characters: []byte{0x25}},
+ "period": {Name: "period", CodePoints: []int{46}, Characters: []byte{0x2e}},
+ "permil": {Name: "permil", CodePoints: []int{8240}, Characters: []byte{0xe2, 0x80, 0xb0}},
+ "perp": {Name: "perp", CodePoints: []int{8869}, Characters: []byte{0xe2, 0x8a, 0xa5}},
+ "pertenk": {Name: "pertenk", CodePoints: []int{8241}, Characters: []byte{0xe2, 0x80, 0xb1}},
+ "pfr": {Name: "pfr", CodePoints: []int{120109}, Characters: []byte{0xf0, 0x9d, 0x94, 0xad}},
+ "phi": {Name: "phi", CodePoints: []int{966}, Characters: []byte{0xcf, 0x86}},
+ "phiv": {Name: "phiv", CodePoints: []int{981}, Characters: []byte{0xcf, 0x95}},
+ "phmmat": {Name: "phmmat", CodePoints: []int{8499}, Characters: []byte{0xe2, 0x84, 0xb3}},
+ "phone": {Name: "phone", CodePoints: []int{9742}, Characters: []byte{0xe2, 0x98, 0x8e}},
+ "pi": {Name: "pi", CodePoints: []int{960}, Characters: []byte{0xcf, 0x80}},
+ "pitchfork": {Name: "pitchfork", CodePoints: []int{8916}, Characters: []byte{0xe2, 0x8b, 0x94}},
+ "piv": {Name: "piv", CodePoints: []int{982}, Characters: []byte{0xcf, 0x96}},
+ "planck": {Name: "planck", CodePoints: []int{8463}, Characters: []byte{0xe2, 0x84, 0x8f}},
+ "planckh": {Name: "planckh", CodePoints: []int{8462}, Characters: []byte{0xe2, 0x84, 0x8e}},
+ "plankv": {Name: "plankv", CodePoints: []int{8463}, Characters: []byte{0xe2, 0x84, 0x8f}},
+ "plus": {Name: "plus", CodePoints: []int{43}, Characters: []byte{0x2b}},
+ "plusacir": {Name: "plusacir", CodePoints: []int{10787}, Characters: []byte{0xe2, 0xa8, 0xa3}},
+ "plusb": {Name: "plusb", CodePoints: []int{8862}, Characters: []byte{0xe2, 0x8a, 0x9e}},
+ "pluscir": {Name: "pluscir", CodePoints: []int{10786}, Characters: []byte{0xe2, 0xa8, 0xa2}},
+ "plusdo": {Name: "plusdo", CodePoints: []int{8724}, Characters: []byte{0xe2, 0x88, 0x94}},
+ "plusdu": {Name: "plusdu", CodePoints: []int{10789}, Characters: []byte{0xe2, 0xa8, 0xa5}},
+ "pluse": {Name: "pluse", CodePoints: []int{10866}, Characters: []byte{0xe2, 0xa9, 0xb2}},
+ "plusmn": {Name: "plusmn", CodePoints: []int{177}, Characters: []byte{0xc2, 0xb1}},
+ "plussim": {Name: "plussim", CodePoints: []int{10790}, Characters: []byte{0xe2, 0xa8, 0xa6}},
+ "plustwo": {Name: "plustwo", CodePoints: []int{10791}, Characters: []byte{0xe2, 0xa8, 0xa7}},
+ "pm": {Name: "pm", CodePoints: []int{177}, Characters: []byte{0xc2, 0xb1}},
+ "pointint": {Name: "pointint", CodePoints: []int{10773}, Characters: []byte{0xe2, 0xa8, 0x95}},
+ "popf": {Name: "popf", CodePoints: []int{120161}, Characters: []byte{0xf0, 0x9d, 0x95, 0xa1}},
+ "pound": {Name: "pound", CodePoints: []int{163}, Characters: []byte{0xc2, 0xa3}},
+ "pr": {Name: "pr", CodePoints: []int{8826}, Characters: []byte{0xe2, 0x89, 0xba}},
+ "prE": {Name: "prE", CodePoints: []int{10931}, Characters: []byte{0xe2, 0xaa, 0xb3}},
+ "prap": {Name: "prap", CodePoints: []int{10935}, Characters: []byte{0xe2, 0xaa, 0xb7}},
+ "prcue": {Name: "prcue", CodePoints: []int{8828}, Characters: []byte{0xe2, 0x89, 0xbc}},
+ "pre": {Name: "pre", CodePoints: []int{10927}, Characters: []byte{0xe2, 0xaa, 0xaf}},
+ "prec": {Name: "prec", CodePoints: []int{8826}, Characters: []byte{0xe2, 0x89, 0xba}},
+ "precapprox": {Name: "precapprox", CodePoints: []int{10935}, Characters: []byte{0xe2, 0xaa, 0xb7}},
+ "preccurlyeq": {Name: "preccurlyeq", CodePoints: []int{8828}, Characters: []byte{0xe2, 0x89, 0xbc}},
+ "preceq": {Name: "preceq", CodePoints: []int{10927}, Characters: []byte{0xe2, 0xaa, 0xaf}},
+ "precnapprox": {Name: "precnapprox", CodePoints: []int{10937}, Characters: []byte{0xe2, 0xaa, 0xb9}},
+ "precneqq": {Name: "precneqq", CodePoints: []int{10933}, Characters: []byte{0xe2, 0xaa, 0xb5}},
+ "precnsim": {Name: "precnsim", CodePoints: []int{8936}, Characters: []byte{0xe2, 0x8b, 0xa8}},
+ "precsim": {Name: "precsim", CodePoints: []int{8830}, Characters: []byte{0xe2, 0x89, 0xbe}},
+ "prime": {Name: "prime", CodePoints: []int{8242}, Characters: []byte{0xe2, 0x80, 0xb2}},
+ "primes": {Name: "primes", CodePoints: []int{8473}, Characters: []byte{0xe2, 0x84, 0x99}},
+ "prnE": {Name: "prnE", CodePoints: []int{10933}, Characters: []byte{0xe2, 0xaa, 0xb5}},
+ "prnap": {Name: "prnap", CodePoints: []int{10937}, Characters: []byte{0xe2, 0xaa, 0xb9}},
+ "prnsim": {Name: "prnsim", CodePoints: []int{8936}, Characters: []byte{0xe2, 0x8b, 0xa8}},
+ "prod": {Name: "prod", CodePoints: []int{8719}, Characters: []byte{0xe2, 0x88, 0x8f}},
+ "profalar": {Name: "profalar", CodePoints: []int{9006}, Characters: []byte{0xe2, 0x8c, 0xae}},
+ "profline": {Name: "profline", CodePoints: []int{8978}, Characters: []byte{0xe2, 0x8c, 0x92}},
+ "profsurf": {Name: "profsurf", CodePoints: []int{8979}, Characters: []byte{0xe2, 0x8c, 0x93}},
+ "prop": {Name: "prop", CodePoints: []int{8733}, Characters: []byte{0xe2, 0x88, 0x9d}},
+ "propto": {Name: "propto", CodePoints: []int{8733}, Characters: []byte{0xe2, 0x88, 0x9d}},
+ "prsim": {Name: "prsim", CodePoints: []int{8830}, Characters: []byte{0xe2, 0x89, 0xbe}},
+ "prurel": {Name: "prurel", CodePoints: []int{8880}, Characters: []byte{0xe2, 0x8a, 0xb0}},
+ "pscr": {Name: "pscr", CodePoints: []int{120005}, Characters: []byte{0xf0, 0x9d, 0x93, 0x85}},
+ "psi": {Name: "psi", CodePoints: []int{968}, Characters: []byte{0xcf, 0x88}},
+ "puncsp": {Name: "puncsp", CodePoints: []int{8200}, Characters: []byte{0xe2, 0x80, 0x88}},
+ "qfr": {Name: "qfr", CodePoints: []int{120110}, Characters: []byte{0xf0, 0x9d, 0x94, 0xae}},
+ "qint": {Name: "qint", CodePoints: []int{10764}, Characters: []byte{0xe2, 0xa8, 0x8c}},
+ "qopf": {Name: "qopf", CodePoints: []int{120162}, Characters: []byte{0xf0, 0x9d, 0x95, 0xa2}},
+ "qprime": {Name: "qprime", CodePoints: []int{8279}, Characters: []byte{0xe2, 0x81, 0x97}},
+ "qscr": {Name: "qscr", CodePoints: []int{120006}, Characters: []byte{0xf0, 0x9d, 0x93, 0x86}},
+ "quaternions": {Name: "quaternions", CodePoints: []int{8461}, Characters: []byte{0xe2, 0x84, 0x8d}},
+ "quatint": {Name: "quatint", CodePoints: []int{10774}, Characters: []byte{0xe2, 0xa8, 0x96}},
+ "quest": {Name: "quest", CodePoints: []int{63}, Characters: []byte{0x3f}},
+ "questeq": {Name: "questeq", CodePoints: []int{8799}, Characters: []byte{0xe2, 0x89, 0x9f}},
+ "quot": {Name: "quot", CodePoints: []int{34}, Characters: []byte{0x22}},
+ "rAarr": {Name: "rAarr", CodePoints: []int{8667}, Characters: []byte{0xe2, 0x87, 0x9b}},
+ "rArr": {Name: "rArr", CodePoints: []int{8658}, Characters: []byte{0xe2, 0x87, 0x92}},
+ "rAtail": {Name: "rAtail", CodePoints: []int{10524}, Characters: []byte{0xe2, 0xa4, 0x9c}},
+ "rBarr": {Name: "rBarr", CodePoints: []int{10511}, Characters: []byte{0xe2, 0xa4, 0x8f}},
+ "rHar": {Name: "rHar", CodePoints: []int{10596}, Characters: []byte{0xe2, 0xa5, 0xa4}},
+ "race": {Name: "race", CodePoints: []int{8765, 817}, Characters: []byte{0xe2, 0x88, 0xbd, 0xcc, 0xb1}},
+ "racute": {Name: "racute", CodePoints: []int{341}, Characters: []byte{0xc5, 0x95}},
+ "radic": {Name: "radic", CodePoints: []int{8730}, Characters: []byte{0xe2, 0x88, 0x9a}},
+ "raemptyv": {Name: "raemptyv", CodePoints: []int{10675}, Characters: []byte{0xe2, 0xa6, 0xb3}},
+ "rang": {Name: "rang", CodePoints: []int{10217}, Characters: []byte{0xe2, 0x9f, 0xa9}},
+ "rangd": {Name: "rangd", CodePoints: []int{10642}, Characters: []byte{0xe2, 0xa6, 0x92}},
+ "range": {Name: "range", CodePoints: []int{10661}, Characters: []byte{0xe2, 0xa6, 0xa5}},
+ "rangle": {Name: "rangle", CodePoints: []int{10217}, Characters: []byte{0xe2, 0x9f, 0xa9}},
+ "raquo": {Name: "raquo", CodePoints: []int{187}, Characters: []byte{0xc2, 0xbb}},
+ "rarr": {Name: "rarr", CodePoints: []int{8594}, Characters: []byte{0xe2, 0x86, 0x92}},
+ "rarrap": {Name: "rarrap", CodePoints: []int{10613}, Characters: []byte{0xe2, 0xa5, 0xb5}},
+ "rarrb": {Name: "rarrb", CodePoints: []int{8677}, Characters: []byte{0xe2, 0x87, 0xa5}},
+ "rarrbfs": {Name: "rarrbfs", CodePoints: []int{10528}, Characters: []byte{0xe2, 0xa4, 0xa0}},
+ "rarrc": {Name: "rarrc", CodePoints: []int{10547}, Characters: []byte{0xe2, 0xa4, 0xb3}},
+ "rarrfs": {Name: "rarrfs", CodePoints: []int{10526}, Characters: []byte{0xe2, 0xa4, 0x9e}},
+ "rarrhk": {Name: "rarrhk", CodePoints: []int{8618}, Characters: []byte{0xe2, 0x86, 0xaa}},
+ "rarrlp": {Name: "rarrlp", CodePoints: []int{8620}, Characters: []byte{0xe2, 0x86, 0xac}},
+ "rarrpl": {Name: "rarrpl", CodePoints: []int{10565}, Characters: []byte{0xe2, 0xa5, 0x85}},
+ "rarrsim": {Name: "rarrsim", CodePoints: []int{10612}, Characters: []byte{0xe2, 0xa5, 0xb4}},
+ "rarrtl": {Name: "rarrtl", CodePoints: []int{8611}, Characters: []byte{0xe2, 0x86, 0xa3}},
+ "rarrw": {Name: "rarrw", CodePoints: []int{8605}, Characters: []byte{0xe2, 0x86, 0x9d}},
+ "ratail": {Name: "ratail", CodePoints: []int{10522}, Characters: []byte{0xe2, 0xa4, 0x9a}},
+ "ratio": {Name: "ratio", CodePoints: []int{8758}, Characters: []byte{0xe2, 0x88, 0xb6}},
+ "rationals": {Name: "rationals", CodePoints: []int{8474}, Characters: []byte{0xe2, 0x84, 0x9a}},
+ "rbarr": {Name: "rbarr", CodePoints: []int{10509}, Characters: []byte{0xe2, 0xa4, 0x8d}},
+ "rbbrk": {Name: "rbbrk", CodePoints: []int{10099}, Characters: []byte{0xe2, 0x9d, 0xb3}},
+ "rbrace": {Name: "rbrace", CodePoints: []int{125}, Characters: []byte{0x7d}},
+ "rbrack": {Name: "rbrack", CodePoints: []int{93}, Characters: []byte{0x5d}},
+ "rbrke": {Name: "rbrke", CodePoints: []int{10636}, Characters: []byte{0xe2, 0xa6, 0x8c}},
+ "rbrksld": {Name: "rbrksld", CodePoints: []int{10638}, Characters: []byte{0xe2, 0xa6, 0x8e}},
+ "rbrkslu": {Name: "rbrkslu", CodePoints: []int{10640}, Characters: []byte{0xe2, 0xa6, 0x90}},
+ "rcaron": {Name: "rcaron", CodePoints: []int{345}, Characters: []byte{0xc5, 0x99}},
+ "rcedil": {Name: "rcedil", CodePoints: []int{343}, Characters: []byte{0xc5, 0x97}},
+ "rceil": {Name: "rceil", CodePoints: []int{8969}, Characters: []byte{0xe2, 0x8c, 0x89}},
+ "rcub": {Name: "rcub", CodePoints: []int{125}, Characters: []byte{0x7d}},
+ "rcy": {Name: "rcy", CodePoints: []int{1088}, Characters: []byte{0xd1, 0x80}},
+ "rdca": {Name: "rdca", CodePoints: []int{10551}, Characters: []byte{0xe2, 0xa4, 0xb7}},
+ "rdldhar": {Name: "rdldhar", CodePoints: []int{10601}, Characters: []byte{0xe2, 0xa5, 0xa9}},
+ "rdquo": {Name: "rdquo", CodePoints: []int{8221}, Characters: []byte{0xe2, 0x80, 0x9d}},
+ "rdquor": {Name: "rdquor", CodePoints: []int{8221}, Characters: []byte{0xe2, 0x80, 0x9d}},
+ "rdsh": {Name: "rdsh", CodePoints: []int{8627}, Characters: []byte{0xe2, 0x86, 0xb3}},
+ "real": {Name: "real", CodePoints: []int{8476}, Characters: []byte{0xe2, 0x84, 0x9c}},
+ "realine": {Name: "realine", CodePoints: []int{8475}, Characters: []byte{0xe2, 0x84, 0x9b}},
+ "realpart": {Name: "realpart", CodePoints: []int{8476}, Characters: []byte{0xe2, 0x84, 0x9c}},
+ "reals": {Name: "reals", CodePoints: []int{8477}, Characters: []byte{0xe2, 0x84, 0x9d}},
+ "rect": {Name: "rect", CodePoints: []int{9645}, Characters: []byte{0xe2, 0x96, 0xad}},
+ "reg": {Name: "reg", CodePoints: []int{174}, Characters: []byte{0xc2, 0xae}},
+ "rfisht": {Name: "rfisht", CodePoints: []int{10621}, Characters: []byte{0xe2, 0xa5, 0xbd}},
+ "rfloor": {Name: "rfloor", CodePoints: []int{8971}, Characters: []byte{0xe2, 0x8c, 0x8b}},
+ "rfr": {Name: "rfr", CodePoints: []int{120111}, Characters: []byte{0xf0, 0x9d, 0x94, 0xaf}},
+ "rhard": {Name: "rhard", CodePoints: []int{8641}, Characters: []byte{0xe2, 0x87, 0x81}},
+ "rharu": {Name: "rharu", CodePoints: []int{8640}, Characters: []byte{0xe2, 0x87, 0x80}},
+ "rharul": {Name: "rharul", CodePoints: []int{10604}, Characters: []byte{0xe2, 0xa5, 0xac}},
+ "rho": {Name: "rho", CodePoints: []int{961}, Characters: []byte{0xcf, 0x81}},
+ "rhov": {Name: "rhov", CodePoints: []int{1009}, Characters: []byte{0xcf, 0xb1}},
+ "rightarrow": {Name: "rightarrow", CodePoints: []int{8594}, Characters: []byte{0xe2, 0x86, 0x92}},
+ "rightarrowtail": {Name: "rightarrowtail", CodePoints: []int{8611}, Characters: []byte{0xe2, 0x86, 0xa3}},
+ "rightharpoondown": {Name: "rightharpoondown", CodePoints: []int{8641}, Characters: []byte{0xe2, 0x87, 0x81}},
+ "rightharpoonup": {Name: "rightharpoonup", CodePoints: []int{8640}, Characters: []byte{0xe2, 0x87, 0x80}},
+ "rightleftarrows": {Name: "rightleftarrows", CodePoints: []int{8644}, Characters: []byte{0xe2, 0x87, 0x84}},
+ "rightleftharpoons": {Name: "rightleftharpoons", CodePoints: []int{8652}, Characters: []byte{0xe2, 0x87, 0x8c}},
+ "rightrightarrows": {Name: "rightrightarrows", CodePoints: []int{8649}, Characters: []byte{0xe2, 0x87, 0x89}},
+ "rightsquigarrow": {Name: "rightsquigarrow", CodePoints: []int{8605}, Characters: []byte{0xe2, 0x86, 0x9d}},
+ "rightthreetimes": {Name: "rightthreetimes", CodePoints: []int{8908}, Characters: []byte{0xe2, 0x8b, 0x8c}},
+ "ring": {Name: "ring", CodePoints: []int{730}, Characters: []byte{0xcb, 0x9a}},
+ "risingdotseq": {Name: "risingdotseq", CodePoints: []int{8787}, Characters: []byte{0xe2, 0x89, 0x93}},
+ "rlarr": {Name: "rlarr", CodePoints: []int{8644}, Characters: []byte{0xe2, 0x87, 0x84}},
+ "rlhar": {Name: "rlhar", CodePoints: []int{8652}, Characters: []byte{0xe2, 0x87, 0x8c}},
+ "rlm": {Name: "rlm", CodePoints: []int{8207}, Characters: []byte{0xe2, 0x80, 0x8f}},
+ "rmoust": {Name: "rmoust", CodePoints: []int{9137}, Characters: []byte{0xe2, 0x8e, 0xb1}},
+ "rmoustache": {Name: "rmoustache", CodePoints: []int{9137}, Characters: []byte{0xe2, 0x8e, 0xb1}},
+ "rnmid": {Name: "rnmid", CodePoints: []int{10990}, Characters: []byte{0xe2, 0xab, 0xae}},
+ "roang": {Name: "roang", CodePoints: []int{10221}, Characters: []byte{0xe2, 0x9f, 0xad}},
+ "roarr": {Name: "roarr", CodePoints: []int{8702}, Characters: []byte{0xe2, 0x87, 0xbe}},
+ "robrk": {Name: "robrk", CodePoints: []int{10215}, Characters: []byte{0xe2, 0x9f, 0xa7}},
+ "ropar": {Name: "ropar", CodePoints: []int{10630}, Characters: []byte{0xe2, 0xa6, 0x86}},
+ "ropf": {Name: "ropf", CodePoints: []int{120163}, Characters: []byte{0xf0, 0x9d, 0x95, 0xa3}},
+ "roplus": {Name: "roplus", CodePoints: []int{10798}, Characters: []byte{0xe2, 0xa8, 0xae}},
+ "rotimes": {Name: "rotimes", CodePoints: []int{10805}, Characters: []byte{0xe2, 0xa8, 0xb5}},
+ "rpar": {Name: "rpar", CodePoints: []int{41}, Characters: []byte{0x29}},
+ "rpargt": {Name: "rpargt", CodePoints: []int{10644}, Characters: []byte{0xe2, 0xa6, 0x94}},
+ "rppolint": {Name: "rppolint", CodePoints: []int{10770}, Characters: []byte{0xe2, 0xa8, 0x92}},
+ "rrarr": {Name: "rrarr", CodePoints: []int{8649}, Characters: []byte{0xe2, 0x87, 0x89}},
+ "rsaquo": {Name: "rsaquo", CodePoints: []int{8250}, Characters: []byte{0xe2, 0x80, 0xba}},
+ "rscr": {Name: "rscr", CodePoints: []int{120007}, Characters: []byte{0xf0, 0x9d, 0x93, 0x87}},
+ "rsh": {Name: "rsh", CodePoints: []int{8625}, Characters: []byte{0xe2, 0x86, 0xb1}},
+ "rsqb": {Name: "rsqb", CodePoints: []int{93}, Characters: []byte{0x5d}},
+ "rsquo": {Name: "rsquo", CodePoints: []int{8217}, Characters: []byte{0xe2, 0x80, 0x99}},
+ "rsquor": {Name: "rsquor", CodePoints: []int{8217}, Characters: []byte{0xe2, 0x80, 0x99}},
+ "rthree": {Name: "rthree", CodePoints: []int{8908}, Characters: []byte{0xe2, 0x8b, 0x8c}},
+ "rtimes": {Name: "rtimes", CodePoints: []int{8906}, Characters: []byte{0xe2, 0x8b, 0x8a}},
+ "rtri": {Name: "rtri", CodePoints: []int{9657}, Characters: []byte{0xe2, 0x96, 0xb9}},
+ "rtrie": {Name: "rtrie", CodePoints: []int{8885}, Characters: []byte{0xe2, 0x8a, 0xb5}},
+ "rtrif": {Name: "rtrif", CodePoints: []int{9656}, Characters: []byte{0xe2, 0x96, 0xb8}},
+ "rtriltri": {Name: "rtriltri", CodePoints: []int{10702}, Characters: []byte{0xe2, 0xa7, 0x8e}},
+ "ruluhar": {Name: "ruluhar", CodePoints: []int{10600}, Characters: []byte{0xe2, 0xa5, 0xa8}},
+ "rx": {Name: "rx", CodePoints: []int{8478}, Characters: []byte{0xe2, 0x84, 0x9e}},
+ "sacute": {Name: "sacute", CodePoints: []int{347}, Characters: []byte{0xc5, 0x9b}},
+ "sbquo": {Name: "sbquo", CodePoints: []int{8218}, Characters: []byte{0xe2, 0x80, 0x9a}},
+ "sc": {Name: "sc", CodePoints: []int{8827}, Characters: []byte{0xe2, 0x89, 0xbb}},
+ "scE": {Name: "scE", CodePoints: []int{10932}, Characters: []byte{0xe2, 0xaa, 0xb4}},
+ "scap": {Name: "scap", CodePoints: []int{10936}, Characters: []byte{0xe2, 0xaa, 0xb8}},
+ "scaron": {Name: "scaron", CodePoints: []int{353}, Characters: []byte{0xc5, 0xa1}},
+ "sccue": {Name: "sccue", CodePoints: []int{8829}, Characters: []byte{0xe2, 0x89, 0xbd}},
+ "sce": {Name: "sce", CodePoints: []int{10928}, Characters: []byte{0xe2, 0xaa, 0xb0}},
+ "scedil": {Name: "scedil", CodePoints: []int{351}, Characters: []byte{0xc5, 0x9f}},
+ "scirc": {Name: "scirc", CodePoints: []int{349}, Characters: []byte{0xc5, 0x9d}},
+ "scnE": {Name: "scnE", CodePoints: []int{10934}, Characters: []byte{0xe2, 0xaa, 0xb6}},
+ "scnap": {Name: "scnap", CodePoints: []int{10938}, Characters: []byte{0xe2, 0xaa, 0xba}},
+ "scnsim": {Name: "scnsim", CodePoints: []int{8937}, Characters: []byte{0xe2, 0x8b, 0xa9}},
+ "scpolint": {Name: "scpolint", CodePoints: []int{10771}, Characters: []byte{0xe2, 0xa8, 0x93}},
+ "scsim": {Name: "scsim", CodePoints: []int{8831}, Characters: []byte{0xe2, 0x89, 0xbf}},
+ "scy": {Name: "scy", CodePoints: []int{1089}, Characters: []byte{0xd1, 0x81}},
+ "sdot": {Name: "sdot", CodePoints: []int{8901}, Characters: []byte{0xe2, 0x8b, 0x85}},
+ "sdotb": {Name: "sdotb", CodePoints: []int{8865}, Characters: []byte{0xe2, 0x8a, 0xa1}},
+ "sdote": {Name: "sdote", CodePoints: []int{10854}, Characters: []byte{0xe2, 0xa9, 0xa6}},
+ "seArr": {Name: "seArr", CodePoints: []int{8664}, Characters: []byte{0xe2, 0x87, 0x98}},
+ "searhk": {Name: "searhk", CodePoints: []int{10533}, Characters: []byte{0xe2, 0xa4, 0xa5}},
+ "searr": {Name: "searr", CodePoints: []int{8600}, Characters: []byte{0xe2, 0x86, 0x98}},
+ "searrow": {Name: "searrow", CodePoints: []int{8600}, Characters: []byte{0xe2, 0x86, 0x98}},
+ "sect": {Name: "sect", CodePoints: []int{167}, Characters: []byte{0xc2, 0xa7}},
+ "semi": {Name: "semi", CodePoints: []int{59}, Characters: []byte{0x3b}},
+ "seswar": {Name: "seswar", CodePoints: []int{10537}, Characters: []byte{0xe2, 0xa4, 0xa9}},
+ "setminus": {Name: "setminus", CodePoints: []int{8726}, Characters: []byte{0xe2, 0x88, 0x96}},
+ "setmn": {Name: "setmn", CodePoints: []int{8726}, Characters: []byte{0xe2, 0x88, 0x96}},
+ "sext": {Name: "sext", CodePoints: []int{10038}, Characters: []byte{0xe2, 0x9c, 0xb6}},
+ "sfr": {Name: "sfr", CodePoints: []int{120112}, Characters: []byte{0xf0, 0x9d, 0x94, 0xb0}},
+ "sfrown": {Name: "sfrown", CodePoints: []int{8994}, Characters: []byte{0xe2, 0x8c, 0xa2}},
+ "sharp": {Name: "sharp", CodePoints: []int{9839}, Characters: []byte{0xe2, 0x99, 0xaf}},
+ "shchcy": {Name: "shchcy", CodePoints: []int{1097}, Characters: []byte{0xd1, 0x89}},
+ "shcy": {Name: "shcy", CodePoints: []int{1096}, Characters: []byte{0xd1, 0x88}},
+ "shortmid": {Name: "shortmid", CodePoints: []int{8739}, Characters: []byte{0xe2, 0x88, 0xa3}},
+ "shortparallel": {Name: "shortparallel", CodePoints: []int{8741}, Characters: []byte{0xe2, 0x88, 0xa5}},
+ "shy": {Name: "shy", CodePoints: []int{173}, Characters: []byte{0xc2, 0xad}},
+ "sigma": {Name: "sigma", CodePoints: []int{963}, Characters: []byte{0xcf, 0x83}},
+ "sigmaf": {Name: "sigmaf", CodePoints: []int{962}, Characters: []byte{0xcf, 0x82}},
+ "sigmav": {Name: "sigmav", CodePoints: []int{962}, Characters: []byte{0xcf, 0x82}},
+ "sim": {Name: "sim", CodePoints: []int{8764}, Characters: []byte{0xe2, 0x88, 0xbc}},
+ "simdot": {Name: "simdot", CodePoints: []int{10858}, Characters: []byte{0xe2, 0xa9, 0xaa}},
+ "sime": {Name: "sime", CodePoints: []int{8771}, Characters: []byte{0xe2, 0x89, 0x83}},
+ "simeq": {Name: "simeq", CodePoints: []int{8771}, Characters: []byte{0xe2, 0x89, 0x83}},
+ "simg": {Name: "simg", CodePoints: []int{10910}, Characters: []byte{0xe2, 0xaa, 0x9e}},
+ "simgE": {Name: "simgE", CodePoints: []int{10912}, Characters: []byte{0xe2, 0xaa, 0xa0}},
+ "siml": {Name: "siml", CodePoints: []int{10909}, Characters: []byte{0xe2, 0xaa, 0x9d}},
+ "simlE": {Name: "simlE", CodePoints: []int{10911}, Characters: []byte{0xe2, 0xaa, 0x9f}},
+ "simne": {Name: "simne", CodePoints: []int{8774}, Characters: []byte{0xe2, 0x89, 0x86}},
+ "simplus": {Name: "simplus", CodePoints: []int{10788}, Characters: []byte{0xe2, 0xa8, 0xa4}},
+ "simrarr": {Name: "simrarr", CodePoints: []int{10610}, Characters: []byte{0xe2, 0xa5, 0xb2}},
+ "slarr": {Name: "slarr", CodePoints: []int{8592}, Characters: []byte{0xe2, 0x86, 0x90}},
+ "smallsetminus": {Name: "smallsetminus", CodePoints: []int{8726}, Characters: []byte{0xe2, 0x88, 0x96}},
+ "smashp": {Name: "smashp", CodePoints: []int{10803}, Characters: []byte{0xe2, 0xa8, 0xb3}},
+ "smeparsl": {Name: "smeparsl", CodePoints: []int{10724}, Characters: []byte{0xe2, 0xa7, 0xa4}},
+ "smid": {Name: "smid", CodePoints: []int{8739}, Characters: []byte{0xe2, 0x88, 0xa3}},
+ "smile": {Name: "smile", CodePoints: []int{8995}, Characters: []byte{0xe2, 0x8c, 0xa3}},
+ "smt": {Name: "smt", CodePoints: []int{10922}, Characters: []byte{0xe2, 0xaa, 0xaa}},
+ "smte": {Name: "smte", CodePoints: []int{10924}, Characters: []byte{0xe2, 0xaa, 0xac}},
+ "smtes": {Name: "smtes", CodePoints: []int{10924, 65024}, Characters: []byte{0xe2, 0xaa, 0xac, 0xef, 0xb8, 0x80}},
+ "softcy": {Name: "softcy", CodePoints: []int{1100}, Characters: []byte{0xd1, 0x8c}},
+ "sol": {Name: "sol", CodePoints: []int{47}, Characters: []byte{0x2f}},
+ "solb": {Name: "solb", CodePoints: []int{10692}, Characters: []byte{0xe2, 0xa7, 0x84}},
+ "solbar": {Name: "solbar", CodePoints: []int{9023}, Characters: []byte{0xe2, 0x8c, 0xbf}},
+ "sopf": {Name: "sopf", CodePoints: []int{120164}, Characters: []byte{0xf0, 0x9d, 0x95, 0xa4}},
+ "spades": {Name: "spades", CodePoints: []int{9824}, Characters: []byte{0xe2, 0x99, 0xa0}},
+ "spadesuit": {Name: "spadesuit", CodePoints: []int{9824}, Characters: []byte{0xe2, 0x99, 0xa0}},
+ "spar": {Name: "spar", CodePoints: []int{8741}, Characters: []byte{0xe2, 0x88, 0xa5}},
+ "sqcap": {Name: "sqcap", CodePoints: []int{8851}, Characters: []byte{0xe2, 0x8a, 0x93}},
+ "sqcaps": {Name: "sqcaps", CodePoints: []int{8851, 65024}, Characters: []byte{0xe2, 0x8a, 0x93, 0xef, 0xb8, 0x80}},
+ "sqcup": {Name: "sqcup", CodePoints: []int{8852}, Characters: []byte{0xe2, 0x8a, 0x94}},
+ "sqcups": {Name: "sqcups", CodePoints: []int{8852, 65024}, Characters: []byte{0xe2, 0x8a, 0x94, 0xef, 0xb8, 0x80}},
+ "sqsub": {Name: "sqsub", CodePoints: []int{8847}, Characters: []byte{0xe2, 0x8a, 0x8f}},
+ "sqsube": {Name: "sqsube", CodePoints: []int{8849}, Characters: []byte{0xe2, 0x8a, 0x91}},
+ "sqsubset": {Name: "sqsubset", CodePoints: []int{8847}, Characters: []byte{0xe2, 0x8a, 0x8f}},
+ "sqsubseteq": {Name: "sqsubseteq", CodePoints: []int{8849}, Characters: []byte{0xe2, 0x8a, 0x91}},
+ "sqsup": {Name: "sqsup", CodePoints: []int{8848}, Characters: []byte{0xe2, 0x8a, 0x90}},
+ "sqsupe": {Name: "sqsupe", CodePoints: []int{8850}, Characters: []byte{0xe2, 0x8a, 0x92}},
+ "sqsupset": {Name: "sqsupset", CodePoints: []int{8848}, Characters: []byte{0xe2, 0x8a, 0x90}},
+ "sqsupseteq": {Name: "sqsupseteq", CodePoints: []int{8850}, Characters: []byte{0xe2, 0x8a, 0x92}},
+ "squ": {Name: "squ", CodePoints: []int{9633}, Characters: []byte{0xe2, 0x96, 0xa1}},
+ "square": {Name: "square", CodePoints: []int{9633}, Characters: []byte{0xe2, 0x96, 0xa1}},
+ "squarf": {Name: "squarf", CodePoints: []int{9642}, Characters: []byte{0xe2, 0x96, 0xaa}},
+ "squf": {Name: "squf", CodePoints: []int{9642}, Characters: []byte{0xe2, 0x96, 0xaa}},
+ "srarr": {Name: "srarr", CodePoints: []int{8594}, Characters: []byte{0xe2, 0x86, 0x92}},
+ "sscr": {Name: "sscr", CodePoints: []int{120008}, Characters: []byte{0xf0, 0x9d, 0x93, 0x88}},
+ "ssetmn": {Name: "ssetmn", CodePoints: []int{8726}, Characters: []byte{0xe2, 0x88, 0x96}},
+ "ssmile": {Name: "ssmile", CodePoints: []int{8995}, Characters: []byte{0xe2, 0x8c, 0xa3}},
+ "sstarf": {Name: "sstarf", CodePoints: []int{8902}, Characters: []byte{0xe2, 0x8b, 0x86}},
+ "star": {Name: "star", CodePoints: []int{9734}, Characters: []byte{0xe2, 0x98, 0x86}},
+ "starf": {Name: "starf", CodePoints: []int{9733}, Characters: []byte{0xe2, 0x98, 0x85}},
+ "straightepsilon": {Name: "straightepsilon", CodePoints: []int{1013}, Characters: []byte{0xcf, 0xb5}},
+ "straightphi": {Name: "straightphi", CodePoints: []int{981}, Characters: []byte{0xcf, 0x95}},
+ "strns": {Name: "strns", CodePoints: []int{175}, Characters: []byte{0xc2, 0xaf}},
+ "sub": {Name: "sub", CodePoints: []int{8834}, Characters: []byte{0xe2, 0x8a, 0x82}},
+ "subE": {Name: "subE", CodePoints: []int{10949}, Characters: []byte{0xe2, 0xab, 0x85}},
+ "subdot": {Name: "subdot", CodePoints: []int{10941}, Characters: []byte{0xe2, 0xaa, 0xbd}},
+ "sube": {Name: "sube", CodePoints: []int{8838}, Characters: []byte{0xe2, 0x8a, 0x86}},
+ "subedot": {Name: "subedot", CodePoints: []int{10947}, Characters: []byte{0xe2, 0xab, 0x83}},
+ "submult": {Name: "submult", CodePoints: []int{10945}, Characters: []byte{0xe2, 0xab, 0x81}},
+ "subnE": {Name: "subnE", CodePoints: []int{10955}, Characters: []byte{0xe2, 0xab, 0x8b}},
+ "subne": {Name: "subne", CodePoints: []int{8842}, Characters: []byte{0xe2, 0x8a, 0x8a}},
+ "subplus": {Name: "subplus", CodePoints: []int{10943}, Characters: []byte{0xe2, 0xaa, 0xbf}},
+ "subrarr": {Name: "subrarr", CodePoints: []int{10617}, Characters: []byte{0xe2, 0xa5, 0xb9}},
+ "subset": {Name: "subset", CodePoints: []int{8834}, Characters: []byte{0xe2, 0x8a, 0x82}},
+ "subseteq": {Name: "subseteq", CodePoints: []int{8838}, Characters: []byte{0xe2, 0x8a, 0x86}},
+ "subseteqq": {Name: "subseteqq", CodePoints: []int{10949}, Characters: []byte{0xe2, 0xab, 0x85}},
+ "subsetneq": {Name: "subsetneq", CodePoints: []int{8842}, Characters: []byte{0xe2, 0x8a, 0x8a}},
+ "subsetneqq": {Name: "subsetneqq", CodePoints: []int{10955}, Characters: []byte{0xe2, 0xab, 0x8b}},
+ "subsim": {Name: "subsim", CodePoints: []int{10951}, Characters: []byte{0xe2, 0xab, 0x87}},
+ "subsub": {Name: "subsub", CodePoints: []int{10965}, Characters: []byte{0xe2, 0xab, 0x95}},
+ "subsup": {Name: "subsup", CodePoints: []int{10963}, Characters: []byte{0xe2, 0xab, 0x93}},
+ "succ": {Name: "succ", CodePoints: []int{8827}, Characters: []byte{0xe2, 0x89, 0xbb}},
+ "succapprox": {Name: "succapprox", CodePoints: []int{10936}, Characters: []byte{0xe2, 0xaa, 0xb8}},
+ "succcurlyeq": {Name: "succcurlyeq", CodePoints: []int{8829}, Characters: []byte{0xe2, 0x89, 0xbd}},
+ "succeq": {Name: "succeq", CodePoints: []int{10928}, Characters: []byte{0xe2, 0xaa, 0xb0}},
+ "succnapprox": {Name: "succnapprox", CodePoints: []int{10938}, Characters: []byte{0xe2, 0xaa, 0xba}},
+ "succneqq": {Name: "succneqq", CodePoints: []int{10934}, Characters: []byte{0xe2, 0xaa, 0xb6}},
+ "succnsim": {Name: "succnsim", CodePoints: []int{8937}, Characters: []byte{0xe2, 0x8b, 0xa9}},
+ "succsim": {Name: "succsim", CodePoints: []int{8831}, Characters: []byte{0xe2, 0x89, 0xbf}},
+ "sum": {Name: "sum", CodePoints: []int{8721}, Characters: []byte{0xe2, 0x88, 0x91}},
+ "sung": {Name: "sung", CodePoints: []int{9834}, Characters: []byte{0xe2, 0x99, 0xaa}},
+ "sup": {Name: "sup", CodePoints: []int{8835}, Characters: []byte{0xe2, 0x8a, 0x83}},
+ "sup1": {Name: "sup1", CodePoints: []int{185}, Characters: []byte{0xc2, 0xb9}},
+ "sup2": {Name: "sup2", CodePoints: []int{178}, Characters: []byte{0xc2, 0xb2}},
+ "sup3": {Name: "sup3", CodePoints: []int{179}, Characters: []byte{0xc2, 0xb3}},
+ "supE": {Name: "supE", CodePoints: []int{10950}, Characters: []byte{0xe2, 0xab, 0x86}},
+ "supdot": {Name: "supdot", CodePoints: []int{10942}, Characters: []byte{0xe2, 0xaa, 0xbe}},
+ "supdsub": {Name: "supdsub", CodePoints: []int{10968}, Characters: []byte{0xe2, 0xab, 0x98}},
+ "supe": {Name: "supe", CodePoints: []int{8839}, Characters: []byte{0xe2, 0x8a, 0x87}},
+ "supedot": {Name: "supedot", CodePoints: []int{10948}, Characters: []byte{0xe2, 0xab, 0x84}},
+ "suphsol": {Name: "suphsol", CodePoints: []int{10185}, Characters: []byte{0xe2, 0x9f, 0x89}},
+ "suphsub": {Name: "suphsub", CodePoints: []int{10967}, Characters: []byte{0xe2, 0xab, 0x97}},
+ "suplarr": {Name: "suplarr", CodePoints: []int{10619}, Characters: []byte{0xe2, 0xa5, 0xbb}},
+ "supmult": {Name: "supmult", CodePoints: []int{10946}, Characters: []byte{0xe2, 0xab, 0x82}},
+ "supnE": {Name: "supnE", CodePoints: []int{10956}, Characters: []byte{0xe2, 0xab, 0x8c}},
+ "supne": {Name: "supne", CodePoints: []int{8843}, Characters: []byte{0xe2, 0x8a, 0x8b}},
+ "supplus": {Name: "supplus", CodePoints: []int{10944}, Characters: []byte{0xe2, 0xab, 0x80}},
+ "supset": {Name: "supset", CodePoints: []int{8835}, Characters: []byte{0xe2, 0x8a, 0x83}},
+ "supseteq": {Name: "supseteq", CodePoints: []int{8839}, Characters: []byte{0xe2, 0x8a, 0x87}},
+ "supseteqq": {Name: "supseteqq", CodePoints: []int{10950}, Characters: []byte{0xe2, 0xab, 0x86}},
+ "supsetneq": {Name: "supsetneq", CodePoints: []int{8843}, Characters: []byte{0xe2, 0x8a, 0x8b}},
+ "supsetneqq": {Name: "supsetneqq", CodePoints: []int{10956}, Characters: []byte{0xe2, 0xab, 0x8c}},
+ "supsim": {Name: "supsim", CodePoints: []int{10952}, Characters: []byte{0xe2, 0xab, 0x88}},
+ "supsub": {Name: "supsub", CodePoints: []int{10964}, Characters: []byte{0xe2, 0xab, 0x94}},
+ "supsup": {Name: "supsup", CodePoints: []int{10966}, Characters: []byte{0xe2, 0xab, 0x96}},
+ "swArr": {Name: "swArr", CodePoints: []int{8665}, Characters: []byte{0xe2, 0x87, 0x99}},
+ "swarhk": {Name: "swarhk", CodePoints: []int{10534}, Characters: []byte{0xe2, 0xa4, 0xa6}},
+ "swarr": {Name: "swarr", CodePoints: []int{8601}, Characters: []byte{0xe2, 0x86, 0x99}},
+ "swarrow": {Name: "swarrow", CodePoints: []int{8601}, Characters: []byte{0xe2, 0x86, 0x99}},
+ "swnwar": {Name: "swnwar", CodePoints: []int{10538}, Characters: []byte{0xe2, 0xa4, 0xaa}},
+ "szlig": {Name: "szlig", CodePoints: []int{223}, Characters: []byte{0xc3, 0x9f}},
+ "target": {Name: "target", CodePoints: []int{8982}, Characters: []byte{0xe2, 0x8c, 0x96}},
+ "tau": {Name: "tau", CodePoints: []int{964}, Characters: []byte{0xcf, 0x84}},
+ "tbrk": {Name: "tbrk", CodePoints: []int{9140}, Characters: []byte{0xe2, 0x8e, 0xb4}},
+ "tcaron": {Name: "tcaron", CodePoints: []int{357}, Characters: []byte{0xc5, 0xa5}},
+ "tcedil": {Name: "tcedil", CodePoints: []int{355}, Characters: []byte{0xc5, 0xa3}},
+ "tcy": {Name: "tcy", CodePoints: []int{1090}, Characters: []byte{0xd1, 0x82}},
+ "tdot": {Name: "tdot", CodePoints: []int{8411}, Characters: []byte{0xe2, 0x83, 0x9b}},
+ "telrec": {Name: "telrec", CodePoints: []int{8981}, Characters: []byte{0xe2, 0x8c, 0x95}},
+ "tfr": {Name: "tfr", CodePoints: []int{120113}, Characters: []byte{0xf0, 0x9d, 0x94, 0xb1}},
+ "there4": {Name: "there4", CodePoints: []int{8756}, Characters: []byte{0xe2, 0x88, 0xb4}},
+ "therefore": {Name: "therefore", CodePoints: []int{8756}, Characters: []byte{0xe2, 0x88, 0xb4}},
+ "theta": {Name: "theta", CodePoints: []int{952}, Characters: []byte{0xce, 0xb8}},
+ "thetasym": {Name: "thetasym", CodePoints: []int{977}, Characters: []byte{0xcf, 0x91}},
+ "thetav": {Name: "thetav", CodePoints: []int{977}, Characters: []byte{0xcf, 0x91}},
+ "thickapprox": {Name: "thickapprox", CodePoints: []int{8776}, Characters: []byte{0xe2, 0x89, 0x88}},
+ "thicksim": {Name: "thicksim", CodePoints: []int{8764}, Characters: []byte{0xe2, 0x88, 0xbc}},
+ "thinsp": {Name: "thinsp", CodePoints: []int{8201}, Characters: []byte{0xe2, 0x80, 0x89}},
+ "thkap": {Name: "thkap", CodePoints: []int{8776}, Characters: []byte{0xe2, 0x89, 0x88}},
+ "thksim": {Name: "thksim", CodePoints: []int{8764}, Characters: []byte{0xe2, 0x88, 0xbc}},
+ "thorn": {Name: "thorn", CodePoints: []int{254}, Characters: []byte{0xc3, 0xbe}},
+ "tilde": {Name: "tilde", CodePoints: []int{732}, Characters: []byte{0xcb, 0x9c}},
+ "times": {Name: "times", CodePoints: []int{215}, Characters: []byte{0xc3, 0x97}},
+ "timesb": {Name: "timesb", CodePoints: []int{8864}, Characters: []byte{0xe2, 0x8a, 0xa0}},
+ "timesbar": {Name: "timesbar", CodePoints: []int{10801}, Characters: []byte{0xe2, 0xa8, 0xb1}},
+ "timesd": {Name: "timesd", CodePoints: []int{10800}, Characters: []byte{0xe2, 0xa8, 0xb0}},
+ "tint": {Name: "tint", CodePoints: []int{8749}, Characters: []byte{0xe2, 0x88, 0xad}},
+ "toea": {Name: "toea", CodePoints: []int{10536}, Characters: []byte{0xe2, 0xa4, 0xa8}},
+ "top": {Name: "top", CodePoints: []int{8868}, Characters: []byte{0xe2, 0x8a, 0xa4}},
+ "topbot": {Name: "topbot", CodePoints: []int{9014}, Characters: []byte{0xe2, 0x8c, 0xb6}},
+ "topcir": {Name: "topcir", CodePoints: []int{10993}, Characters: []byte{0xe2, 0xab, 0xb1}},
+ "topf": {Name: "topf", CodePoints: []int{120165}, Characters: []byte{0xf0, 0x9d, 0x95, 0xa5}},
+ "topfork": {Name: "topfork", CodePoints: []int{10970}, Characters: []byte{0xe2, 0xab, 0x9a}},
+ "tosa": {Name: "tosa", CodePoints: []int{10537}, Characters: []byte{0xe2, 0xa4, 0xa9}},
+ "tprime": {Name: "tprime", CodePoints: []int{8244}, Characters: []byte{0xe2, 0x80, 0xb4}},
+ "trade": {Name: "trade", CodePoints: []int{8482}, Characters: []byte{0xe2, 0x84, 0xa2}},
+ "triangle": {Name: "triangle", CodePoints: []int{9653}, Characters: []byte{0xe2, 0x96, 0xb5}},
+ "triangledown": {Name: "triangledown", CodePoints: []int{9663}, Characters: []byte{0xe2, 0x96, 0xbf}},
+ "triangleleft": {Name: "triangleleft", CodePoints: []int{9667}, Characters: []byte{0xe2, 0x97, 0x83}},
+ "trianglelefteq": {Name: "trianglelefteq", CodePoints: []int{8884}, Characters: []byte{0xe2, 0x8a, 0xb4}},
+ "triangleq": {Name: "triangleq", CodePoints: []int{8796}, Characters: []byte{0xe2, 0x89, 0x9c}},
+ "triangleright": {Name: "triangleright", CodePoints: []int{9657}, Characters: []byte{0xe2, 0x96, 0xb9}},
+ "trianglerighteq": {Name: "trianglerighteq", CodePoints: []int{8885}, Characters: []byte{0xe2, 0x8a, 0xb5}},
+ "tridot": {Name: "tridot", CodePoints: []int{9708}, Characters: []byte{0xe2, 0x97, 0xac}},
+ "trie": {Name: "trie", CodePoints: []int{8796}, Characters: []byte{0xe2, 0x89, 0x9c}},
+ "triminus": {Name: "triminus", CodePoints: []int{10810}, Characters: []byte{0xe2, 0xa8, 0xba}},
+ "triplus": {Name: "triplus", CodePoints: []int{10809}, Characters: []byte{0xe2, 0xa8, 0xb9}},
+ "trisb": {Name: "trisb", CodePoints: []int{10701}, Characters: []byte{0xe2, 0xa7, 0x8d}},
+ "tritime": {Name: "tritime", CodePoints: []int{10811}, Characters: []byte{0xe2, 0xa8, 0xbb}},
+ "trpezium": {Name: "trpezium", CodePoints: []int{9186}, Characters: []byte{0xe2, 0x8f, 0xa2}},
+ "tscr": {Name: "tscr", CodePoints: []int{120009}, Characters: []byte{0xf0, 0x9d, 0x93, 0x89}},
+ "tscy": {Name: "tscy", CodePoints: []int{1094}, Characters: []byte{0xd1, 0x86}},
+ "tshcy": {Name: "tshcy", CodePoints: []int{1115}, Characters: []byte{0xd1, 0x9b}},
+ "tstrok": {Name: "tstrok", CodePoints: []int{359}, Characters: []byte{0xc5, 0xa7}},
+ "twixt": {Name: "twixt", CodePoints: []int{8812}, Characters: []byte{0xe2, 0x89, 0xac}},
+ "twoheadleftarrow": {Name: "twoheadleftarrow", CodePoints: []int{8606}, Characters: []byte{0xe2, 0x86, 0x9e}},
+ "twoheadrightarrow": {Name: "twoheadrightarrow", CodePoints: []int{8608}, Characters: []byte{0xe2, 0x86, 0xa0}},
+ "uArr": {Name: "uArr", CodePoints: []int{8657}, Characters: []byte{0xe2, 0x87, 0x91}},
+ "uHar": {Name: "uHar", CodePoints: []int{10595}, Characters: []byte{0xe2, 0xa5, 0xa3}},
+ "uacute": {Name: "uacute", CodePoints: []int{250}, Characters: []byte{0xc3, 0xba}},
+ "uarr": {Name: "uarr", CodePoints: []int{8593}, Characters: []byte{0xe2, 0x86, 0x91}},
+ "ubrcy": {Name: "ubrcy", CodePoints: []int{1118}, Characters: []byte{0xd1, 0x9e}},
+ "ubreve": {Name: "ubreve", CodePoints: []int{365}, Characters: []byte{0xc5, 0xad}},
+ "ucirc": {Name: "ucirc", CodePoints: []int{251}, Characters: []byte{0xc3, 0xbb}},
+ "ucy": {Name: "ucy", CodePoints: []int{1091}, Characters: []byte{0xd1, 0x83}},
+ "udarr": {Name: "udarr", CodePoints: []int{8645}, Characters: []byte{0xe2, 0x87, 0x85}},
+ "udblac": {Name: "udblac", CodePoints: []int{369}, Characters: []byte{0xc5, 0xb1}},
+ "udhar": {Name: "udhar", CodePoints: []int{10606}, Characters: []byte{0xe2, 0xa5, 0xae}},
+ "ufisht": {Name: "ufisht", CodePoints: []int{10622}, Characters: []byte{0xe2, 0xa5, 0xbe}},
+ "ufr": {Name: "ufr", CodePoints: []int{120114}, Characters: []byte{0xf0, 0x9d, 0x94, 0xb2}},
+ "ugrave": {Name: "ugrave", CodePoints: []int{249}, Characters: []byte{0xc3, 0xb9}},
+ "uharl": {Name: "uharl", CodePoints: []int{8639}, Characters: []byte{0xe2, 0x86, 0xbf}},
+ "uharr": {Name: "uharr", CodePoints: []int{8638}, Characters: []byte{0xe2, 0x86, 0xbe}},
+ "uhblk": {Name: "uhblk", CodePoints: []int{9600}, Characters: []byte{0xe2, 0x96, 0x80}},
+ "ulcorn": {Name: "ulcorn", CodePoints: []int{8988}, Characters: []byte{0xe2, 0x8c, 0x9c}},
+ "ulcorner": {Name: "ulcorner", CodePoints: []int{8988}, Characters: []byte{0xe2, 0x8c, 0x9c}},
+ "ulcrop": {Name: "ulcrop", CodePoints: []int{8975}, Characters: []byte{0xe2, 0x8c, 0x8f}},
+ "ultri": {Name: "ultri", CodePoints: []int{9720}, Characters: []byte{0xe2, 0x97, 0xb8}},
+ "umacr": {Name: "umacr", CodePoints: []int{363}, Characters: []byte{0xc5, 0xab}},
+ "uml": {Name: "uml", CodePoints: []int{168}, Characters: []byte{0xc2, 0xa8}},
+ "uogon": {Name: "uogon", CodePoints: []int{371}, Characters: []byte{0xc5, 0xb3}},
+ "uopf": {Name: "uopf", CodePoints: []int{120166}, Characters: []byte{0xf0, 0x9d, 0x95, 0xa6}},
+ "uparrow": {Name: "uparrow", CodePoints: []int{8593}, Characters: []byte{0xe2, 0x86, 0x91}},
+ "updownarrow": {Name: "updownarrow", CodePoints: []int{8597}, Characters: []byte{0xe2, 0x86, 0x95}},
+ "upharpoonleft": {Name: "upharpoonleft", CodePoints: []int{8639}, Characters: []byte{0xe2, 0x86, 0xbf}},
+ "upharpoonright": {Name: "upharpoonright", CodePoints: []int{8638}, Characters: []byte{0xe2, 0x86, 0xbe}},
+ "uplus": {Name: "uplus", CodePoints: []int{8846}, Characters: []byte{0xe2, 0x8a, 0x8e}},
+ "upsi": {Name: "upsi", CodePoints: []int{965}, Characters: []byte{0xcf, 0x85}},
+ "upsih": {Name: "upsih", CodePoints: []int{978}, Characters: []byte{0xcf, 0x92}},
+ "upsilon": {Name: "upsilon", CodePoints: []int{965}, Characters: []byte{0xcf, 0x85}},
+ "upuparrows": {Name: "upuparrows", CodePoints: []int{8648}, Characters: []byte{0xe2, 0x87, 0x88}},
+ "urcorn": {Name: "urcorn", CodePoints: []int{8989}, Characters: []byte{0xe2, 0x8c, 0x9d}},
+ "urcorner": {Name: "urcorner", CodePoints: []int{8989}, Characters: []byte{0xe2, 0x8c, 0x9d}},
+ "urcrop": {Name: "urcrop", CodePoints: []int{8974}, Characters: []byte{0xe2, 0x8c, 0x8e}},
+ "uring": {Name: "uring", CodePoints: []int{367}, Characters: []byte{0xc5, 0xaf}},
+ "urtri": {Name: "urtri", CodePoints: []int{9721}, Characters: []byte{0xe2, 0x97, 0xb9}},
+ "uscr": {Name: "uscr", CodePoints: []int{120010}, Characters: []byte{0xf0, 0x9d, 0x93, 0x8a}},
+ "utdot": {Name: "utdot", CodePoints: []int{8944}, Characters: []byte{0xe2, 0x8b, 0xb0}},
+ "utilde": {Name: "utilde", CodePoints: []int{361}, Characters: []byte{0xc5, 0xa9}},
+ "utri": {Name: "utri", CodePoints: []int{9653}, Characters: []byte{0xe2, 0x96, 0xb5}},
+ "utrif": {Name: "utrif", CodePoints: []int{9652}, Characters: []byte{0xe2, 0x96, 0xb4}},
+ "uuarr": {Name: "uuarr", CodePoints: []int{8648}, Characters: []byte{0xe2, 0x87, 0x88}},
+ "uuml": {Name: "uuml", CodePoints: []int{252}, Characters: []byte{0xc3, 0xbc}},
+ "uwangle": {Name: "uwangle", CodePoints: []int{10663}, Characters: []byte{0xe2, 0xa6, 0xa7}},
+ "vArr": {Name: "vArr", CodePoints: []int{8661}, Characters: []byte{0xe2, 0x87, 0x95}},
+ "vBar": {Name: "vBar", CodePoints: []int{10984}, Characters: []byte{0xe2, 0xab, 0xa8}},
+ "vBarv": {Name: "vBarv", CodePoints: []int{10985}, Characters: []byte{0xe2, 0xab, 0xa9}},
+ "vDash": {Name: "vDash", CodePoints: []int{8872}, Characters: []byte{0xe2, 0x8a, 0xa8}},
+ "vangrt": {Name: "vangrt", CodePoints: []int{10652}, Characters: []byte{0xe2, 0xa6, 0x9c}},
+ "varepsilon": {Name: "varepsilon", CodePoints: []int{1013}, Characters: []byte{0xcf, 0xb5}},
+ "varkappa": {Name: "varkappa", CodePoints: []int{1008}, Characters: []byte{0xcf, 0xb0}},
+ "varnothing": {Name: "varnothing", CodePoints: []int{8709}, Characters: []byte{0xe2, 0x88, 0x85}},
+ "varphi": {Name: "varphi", CodePoints: []int{981}, Characters: []byte{0xcf, 0x95}},
+ "varpi": {Name: "varpi", CodePoints: []int{982}, Characters: []byte{0xcf, 0x96}},
+ "varpropto": {Name: "varpropto", CodePoints: []int{8733}, Characters: []byte{0xe2, 0x88, 0x9d}},
+ "varr": {Name: "varr", CodePoints: []int{8597}, Characters: []byte{0xe2, 0x86, 0x95}},
+ "varrho": {Name: "varrho", CodePoints: []int{1009}, Characters: []byte{0xcf, 0xb1}},
+ "varsigma": {Name: "varsigma", CodePoints: []int{962}, Characters: []byte{0xcf, 0x82}},
+ "varsubsetneq": {Name: "varsubsetneq", CodePoints: []int{8842, 65024}, Characters: []byte{0xe2, 0x8a, 0x8a, 0xef, 0xb8, 0x80}},
+ "varsubsetneqq": {Name: "varsubsetneqq", CodePoints: []int{10955, 65024}, Characters: []byte{0xe2, 0xab, 0x8b, 0xef, 0xb8, 0x80}},
+ "varsupsetneq": {Name: "varsupsetneq", CodePoints: []int{8843, 65024}, Characters: []byte{0xe2, 0x8a, 0x8b, 0xef, 0xb8, 0x80}},
+ "varsupsetneqq": {Name: "varsupsetneqq", CodePoints: []int{10956, 65024}, Characters: []byte{0xe2, 0xab, 0x8c, 0xef, 0xb8, 0x80}},
+ "vartheta": {Name: "vartheta", CodePoints: []int{977}, Characters: []byte{0xcf, 0x91}},
+ "vartriangleleft": {Name: "vartriangleleft", CodePoints: []int{8882}, Characters: []byte{0xe2, 0x8a, 0xb2}},
+ "vartriangleright": {Name: "vartriangleright", CodePoints: []int{8883}, Characters: []byte{0xe2, 0x8a, 0xb3}},
+ "vcy": {Name: "vcy", CodePoints: []int{1074}, Characters: []byte{0xd0, 0xb2}},
+ "vdash": {Name: "vdash", CodePoints: []int{8866}, Characters: []byte{0xe2, 0x8a, 0xa2}},
+ "vee": {Name: "vee", CodePoints: []int{8744}, Characters: []byte{0xe2, 0x88, 0xa8}},
+ "veebar": {Name: "veebar", CodePoints: []int{8891}, Characters: []byte{0xe2, 0x8a, 0xbb}},
+ "veeeq": {Name: "veeeq", CodePoints: []int{8794}, Characters: []byte{0xe2, 0x89, 0x9a}},
+ "vellip": {Name: "vellip", CodePoints: []int{8942}, Characters: []byte{0xe2, 0x8b, 0xae}},
+ "verbar": {Name: "verbar", CodePoints: []int{124}, Characters: []byte{0x7c}},
+ "vert": {Name: "vert", CodePoints: []int{124}, Characters: []byte{0x7c}},
+ "vfr": {Name: "vfr", CodePoints: []int{120115}, Characters: []byte{0xf0, 0x9d, 0x94, 0xb3}},
+ "vltri": {Name: "vltri", CodePoints: []int{8882}, Characters: []byte{0xe2, 0x8a, 0xb2}},
+ "vnsub": {Name: "vnsub", CodePoints: []int{8834, 8402}, Characters: []byte{0xe2, 0x8a, 0x82, 0xe2, 0x83, 0x92}},
+ "vnsup": {Name: "vnsup", CodePoints: []int{8835, 8402}, Characters: []byte{0xe2, 0x8a, 0x83, 0xe2, 0x83, 0x92}},
+ "vopf": {Name: "vopf", CodePoints: []int{120167}, Characters: []byte{0xf0, 0x9d, 0x95, 0xa7}},
+ "vprop": {Name: "vprop", CodePoints: []int{8733}, Characters: []byte{0xe2, 0x88, 0x9d}},
+ "vrtri": {Name: "vrtri", CodePoints: []int{8883}, Characters: []byte{0xe2, 0x8a, 0xb3}},
+ "vscr": {Name: "vscr", CodePoints: []int{120011}, Characters: []byte{0xf0, 0x9d, 0x93, 0x8b}},
+ "vsubnE": {Name: "vsubnE", CodePoints: []int{10955, 65024}, Characters: []byte{0xe2, 0xab, 0x8b, 0xef, 0xb8, 0x80}},
+ "vsubne": {Name: "vsubne", CodePoints: []int{8842, 65024}, Characters: []byte{0xe2, 0x8a, 0x8a, 0xef, 0xb8, 0x80}},
+ "vsupnE": {Name: "vsupnE", CodePoints: []int{10956, 65024}, Characters: []byte{0xe2, 0xab, 0x8c, 0xef, 0xb8, 0x80}},
+ "vsupne": {Name: "vsupne", CodePoints: []int{8843, 65024}, Characters: []byte{0xe2, 0x8a, 0x8b, 0xef, 0xb8, 0x80}},
+ "vzigzag": {Name: "vzigzag", CodePoints: []int{10650}, Characters: []byte{0xe2, 0xa6, 0x9a}},
+ "wcirc": {Name: "wcirc", CodePoints: []int{373}, Characters: []byte{0xc5, 0xb5}},
+ "wedbar": {Name: "wedbar", CodePoints: []int{10847}, Characters: []byte{0xe2, 0xa9, 0x9f}},
+ "wedge": {Name: "wedge", CodePoints: []int{8743}, Characters: []byte{0xe2, 0x88, 0xa7}},
+ "wedgeq": {Name: "wedgeq", CodePoints: []int{8793}, Characters: []byte{0xe2, 0x89, 0x99}},
+ "weierp": {Name: "weierp", CodePoints: []int{8472}, Characters: []byte{0xe2, 0x84, 0x98}},
+ "wfr": {Name: "wfr", CodePoints: []int{120116}, Characters: []byte{0xf0, 0x9d, 0x94, 0xb4}},
+ "wopf": {Name: "wopf", CodePoints: []int{120168}, Characters: []byte{0xf0, 0x9d, 0x95, 0xa8}},
+ "wp": {Name: "wp", CodePoints: []int{8472}, Characters: []byte{0xe2, 0x84, 0x98}},
+ "wr": {Name: "wr", CodePoints: []int{8768}, Characters: []byte{0xe2, 0x89, 0x80}},
+ "wreath": {Name: "wreath", CodePoints: []int{8768}, Characters: []byte{0xe2, 0x89, 0x80}},
+ "wscr": {Name: "wscr", CodePoints: []int{120012}, Characters: []byte{0xf0, 0x9d, 0x93, 0x8c}},
+ "xcap": {Name: "xcap", CodePoints: []int{8898}, Characters: []byte{0xe2, 0x8b, 0x82}},
+ "xcirc": {Name: "xcirc", CodePoints: []int{9711}, Characters: []byte{0xe2, 0x97, 0xaf}},
+ "xcup": {Name: "xcup", CodePoints: []int{8899}, Characters: []byte{0xe2, 0x8b, 0x83}},
+ "xdtri": {Name: "xdtri", CodePoints: []int{9661}, Characters: []byte{0xe2, 0x96, 0xbd}},
+ "xfr": {Name: "xfr", CodePoints: []int{120117}, Characters: []byte{0xf0, 0x9d, 0x94, 0xb5}},
+ "xhArr": {Name: "xhArr", CodePoints: []int{10234}, Characters: []byte{0xe2, 0x9f, 0xba}},
+ "xharr": {Name: "xharr", CodePoints: []int{10231}, Characters: []byte{0xe2, 0x9f, 0xb7}},
+ "xi": {Name: "xi", CodePoints: []int{958}, Characters: []byte{0xce, 0xbe}},
+ "xlArr": {Name: "xlArr", CodePoints: []int{10232}, Characters: []byte{0xe2, 0x9f, 0xb8}},
+ "xlarr": {Name: "xlarr", CodePoints: []int{10229}, Characters: []byte{0xe2, 0x9f, 0xb5}},
+ "xmap": {Name: "xmap", CodePoints: []int{10236}, Characters: []byte{0xe2, 0x9f, 0xbc}},
+ "xnis": {Name: "xnis", CodePoints: []int{8955}, Characters: []byte{0xe2, 0x8b, 0xbb}},
+ "xodot": {Name: "xodot", CodePoints: []int{10752}, Characters: []byte{0xe2, 0xa8, 0x80}},
+ "xopf": {Name: "xopf", CodePoints: []int{120169}, Characters: []byte{0xf0, 0x9d, 0x95, 0xa9}},
+ "xoplus": {Name: "xoplus", CodePoints: []int{10753}, Characters: []byte{0xe2, 0xa8, 0x81}},
+ "xotime": {Name: "xotime", CodePoints: []int{10754}, Characters: []byte{0xe2, 0xa8, 0x82}},
+ "xrArr": {Name: "xrArr", CodePoints: []int{10233}, Characters: []byte{0xe2, 0x9f, 0xb9}},
+ "xrarr": {Name: "xrarr", CodePoints: []int{10230}, Characters: []byte{0xe2, 0x9f, 0xb6}},
+ "xscr": {Name: "xscr", CodePoints: []int{120013}, Characters: []byte{0xf0, 0x9d, 0x93, 0x8d}},
+ "xsqcup": {Name: "xsqcup", CodePoints: []int{10758}, Characters: []byte{0xe2, 0xa8, 0x86}},
+ "xuplus": {Name: "xuplus", CodePoints: []int{10756}, Characters: []byte{0xe2, 0xa8, 0x84}},
+ "xutri": {Name: "xutri", CodePoints: []int{9651}, Characters: []byte{0xe2, 0x96, 0xb3}},
+ "xvee": {Name: "xvee", CodePoints: []int{8897}, Characters: []byte{0xe2, 0x8b, 0x81}},
+ "xwedge": {Name: "xwedge", CodePoints: []int{8896}, Characters: []byte{0xe2, 0x8b, 0x80}},
+ "yacute": {Name: "yacute", CodePoints: []int{253}, Characters: []byte{0xc3, 0xbd}},
+ "yacy": {Name: "yacy", CodePoints: []int{1103}, Characters: []byte{0xd1, 0x8f}},
+ "ycirc": {Name: "ycirc", CodePoints: []int{375}, Characters: []byte{0xc5, 0xb7}},
+ "ycy": {Name: "ycy", CodePoints: []int{1099}, Characters: []byte{0xd1, 0x8b}},
+ "yen": {Name: "yen", CodePoints: []int{165}, Characters: []byte{0xc2, 0xa5}},
+ "yfr": {Name: "yfr", CodePoints: []int{120118}, Characters: []byte{0xf0, 0x9d, 0x94, 0xb6}},
+ "yicy": {Name: "yicy", CodePoints: []int{1111}, Characters: []byte{0xd1, 0x97}},
+ "yopf": {Name: "yopf", CodePoints: []int{120170}, Characters: []byte{0xf0, 0x9d, 0x95, 0xaa}},
+ "yscr": {Name: "yscr", CodePoints: []int{120014}, Characters: []byte{0xf0, 0x9d, 0x93, 0x8e}},
+ "yucy": {Name: "yucy", CodePoints: []int{1102}, Characters: []byte{0xd1, 0x8e}},
+ "yuml": {Name: "yuml", CodePoints: []int{255}, Characters: []byte{0xc3, 0xbf}},
+ "zacute": {Name: "zacute", CodePoints: []int{378}, Characters: []byte{0xc5, 0xba}},
+ "zcaron": {Name: "zcaron", CodePoints: []int{382}, Characters: []byte{0xc5, 0xbe}},
+ "zcy": {Name: "zcy", CodePoints: []int{1079}, Characters: []byte{0xd0, 0xb7}},
+ "zdot": {Name: "zdot", CodePoints: []int{380}, Characters: []byte{0xc5, 0xbc}},
+ "zeetrf": {Name: "zeetrf", CodePoints: []int{8488}, Characters: []byte{0xe2, 0x84, 0xa8}},
+ "zeta": {Name: "zeta", CodePoints: []int{950}, Characters: []byte{0xce, 0xb6}},
+ "zfr": {Name: "zfr", CodePoints: []int{120119}, Characters: []byte{0xf0, 0x9d, 0x94, 0xb7}},
+ "zhcy": {Name: "zhcy", CodePoints: []int{1078}, Characters: []byte{0xd0, 0xb6}},
+ "zigrarr": {Name: "zigrarr", CodePoints: []int{8669}, Characters: []byte{0xe2, 0x87, 0x9d}},
+ "zopf": {Name: "zopf", CodePoints: []int{120171}, Characters: []byte{0xf0, 0x9d, 0x95, 0xab}},
+ "zscr": {Name: "zscr", CodePoints: []int{120015}, Characters: []byte{0xf0, 0x9d, 0x93, 0x8f}},
+ "zwj": {Name: "zwj", CodePoints: []int{8205}, Characters: []byte{0xe2, 0x80, 0x8d}},
+ "zwnj": {Name: "zwnj", CodePoints: []int{8204}, Characters: []byte{0xe2, 0x80, 0x8c}},
+}
--- /dev/null
+// Package util provides utility functions for the goldmark.
+package util
+
+import (
+ "bytes"
+ "io"
+ "net/url"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+ "unicode/utf8"
+)
+
+// A CopyOnWriteBuffer is a byte buffer that copies buffer when
+// it need to be changed.
+type CopyOnWriteBuffer struct {
+ buffer []byte
+ copied bool
+}
+
+// NewCopyOnWriteBuffer returns a new CopyOnWriteBuffer.
+func NewCopyOnWriteBuffer(buffer []byte) CopyOnWriteBuffer {
+ return CopyOnWriteBuffer{
+ buffer: buffer,
+ copied: false,
+ }
+}
+
+// Write writes given bytes to the buffer.
+func (b *CopyOnWriteBuffer) Write(value []byte) {
+ if !b.copied {
+ b.buffer = make([]byte, 0, len(b.buffer)+20)
+ b.copied = true
+ }
+ b.buffer = append(b.buffer, value...)
+}
+
+// WriteByte writes the given byte to the buffer.
+func (b *CopyOnWriteBuffer) WriteByte(c byte) {
+ if !b.copied {
+ b.buffer = make([]byte, 0, len(b.buffer)+20)
+ b.copied = true
+ }
+ b.buffer = append(b.buffer, c)
+}
+
+// Bytes returns bytes of this buffer.
+func (b *CopyOnWriteBuffer) Bytes() []byte {
+ return b.buffer
+}
+
+// IsCopied returns true if buffer has been copied, otherwise false.
+func (b *CopyOnWriteBuffer) IsCopied() bool {
+ return b.copied
+}
+
+// IsEscapedPunctuation returns true if caracter at a given index i
+// is an escaped punctuation, otherwise false.
+func IsEscapedPunctuation(source []byte, i int) bool {
+ return source[i] == '\\' && i < len(source)-1 && IsPunct(source[i+1])
+}
+
+// ReadWhile read the given source while pred is true.
+func ReadWhile(source []byte, index [2]int, pred func(byte) bool) (int, bool) {
+ j := index[0]
+ ok := false
+ for ; j < index[1]; j++ {
+ c1 := source[j]
+ if pred(c1) {
+ ok = true
+ continue
+ }
+ break
+ }
+ return j, ok
+}
+
+// IsBlank returns true if the given string is all space characters.
+func IsBlank(bs []byte) bool {
+ for _, b := range bs {
+ if !IsSpace(b) {
+ return false
+ }
+ }
+ return true
+}
+
+// VisualizeSpaces visualize invisible space characters.
+func VisualizeSpaces(bs []byte) []byte {
+ bs = bytes.Replace(bs, []byte(" "), []byte("[SPACE]"), -1)
+ bs = bytes.Replace(bs, []byte("\t"), []byte("[TAB]"), -1)
+ bs = bytes.Replace(bs, []byte("\n"), []byte("[NEWLINE]\n"), -1)
+ bs = bytes.Replace(bs, []byte("\r"), []byte("[CR]"), -1)
+ return bs
+}
+
+// TabWidth calculates actual width of a tab at the given position.
+func TabWidth(currentPos int) int {
+ return 4 - currentPos%4
+}
+
+// IndentPosition searches an indent position with the given width for the given line.
+// If the line contains tab characters, paddings may be not zero.
+// currentPos==0 and width==2:
+//
+// position: 0 1
+// [TAB]aaaa
+// width: 1234 5678
+//
+// width=2 is in the tab character. In this case, IndentPosition returns
+// (pos=1, padding=2)
+func IndentPosition(bs []byte, currentPos, width int) (pos, padding int) {
+ if width == 0 {
+ return 0, 0
+ }
+ w := 0
+ l := len(bs)
+ i := 0
+ hasTab := false
+ for ; i < l; i++ {
+ if bs[i] == '\t' {
+ w += TabWidth(currentPos + w)
+ hasTab = true
+ } else if bs[i] == ' ' {
+ w++
+ } else {
+ break
+ }
+ }
+ if w >= width {
+ if !hasTab {
+ return width, 0
+ }
+ return i, w - width
+ }
+ return -1, -1
+}
+
+// IndentPositionPadding searches an indent position with the given width for the given line.
+// This function is mostly same as IndentPosition except this function
+// takes account into additional paddings.
+func IndentPositionPadding(bs []byte, currentPos, paddingv, width int) (pos, padding int) {
+ if width == 0 {
+ return 0, paddingv
+ }
+ w := 0
+ i := 0
+ l := len(bs)
+ for ; i < l; i++ {
+ if bs[i] == '\t' {
+ w += TabWidth(currentPos + w)
+ } else if bs[i] == ' ' {
+ w++
+ } else {
+ break
+ }
+ }
+ if w >= width {
+ return i - paddingv, w - width
+ }
+ return -1, -1
+}
+
+// DedentPosition dedents lines by the given width.
+func DedentPosition(bs []byte, currentPos, width int) (pos, padding int) {
+ if width == 0 {
+ return 0, 0
+ }
+ w := 0
+ l := len(bs)
+ i := 0
+ for ; i < l; i++ {
+ if bs[i] == '\t' {
+ w += TabWidth(currentPos + w)
+ } else if bs[i] == ' ' {
+ w++
+ } else {
+ break
+ }
+ }
+ if w >= width {
+ return i, w - width
+ }
+ return i, 0
+}
+
+// DedentPositionPadding dedents lines by the given width.
+// This function is mostly same as DedentPosition except this function
+// takes account into additional paddings.
+func DedentPositionPadding(bs []byte, currentPos, paddingv, width int) (pos, padding int) {
+ if width == 0 {
+ return 0, paddingv
+ }
+
+ w := 0
+ i := 0
+ l := len(bs)
+ for ; i < l; i++ {
+ if bs[i] == '\t' {
+ w += TabWidth(currentPos + w)
+ } else if bs[i] == ' ' {
+ w++
+ } else {
+ break
+ }
+ }
+ if w >= width {
+ return i - paddingv, w - width
+ }
+ return i - paddingv, 0
+}
+
+// IndentWidth calculate an indent width for the given line.
+func IndentWidth(bs []byte, currentPos int) (width, pos int) {
+ l := len(bs)
+ for i := 0; i < l; i++ {
+ b := bs[i]
+ if b == ' ' {
+ width++
+ pos++
+ } else if b == '\t' {
+ width += TabWidth(currentPos + width)
+ pos++
+ } else {
+ break
+ }
+ }
+ return
+}
+
+// FirstNonSpacePosition returns a potisoin line that is a first nonspace
+// character.
+func FirstNonSpacePosition(bs []byte) int {
+ i := 0
+ for ; i < len(bs); i++ {
+ c := bs[i]
+ if c == ' ' || c == '\t' {
+ continue
+ }
+ if c == '\n' {
+ return -1
+ }
+ return i
+ }
+ return -1
+}
+
+// FindClosure returns a position that closes the given opener.
+// If codeSpan is set true, it ignores characters in code spans.
+// If allowNesting is set true, closures correspond to nested opener will be
+// ignored.
+func FindClosure(bs []byte, opener, closure byte, codeSpan, allowNesting bool) int {
+ i := 0
+ opened := 1
+ codeSpanOpener := 0
+ for i < len(bs) {
+ c := bs[i]
+ if codeSpan && codeSpanOpener != 0 && c == '`' {
+ codeSpanCloser := 0
+ for ; i < len(bs); i++ {
+ if bs[i] == '`' {
+ codeSpanCloser++
+ } else {
+ i--
+ break
+ }
+ }
+ if codeSpanCloser == codeSpanOpener {
+ codeSpanOpener = 0
+ }
+ } else if c == '\\' && i < len(bs)-1 && IsPunct(bs[i+1]) {
+ i += 2
+ continue
+ } else if codeSpan && codeSpanOpener == 0 && c == '`' {
+ for ; i < len(bs); i++ {
+ if bs[i] == '`' {
+ codeSpanOpener++
+ } else {
+ i--
+ break
+ }
+ }
+ } else if (codeSpan && codeSpanOpener == 0) || !codeSpan {
+ if c == closure {
+ opened--
+ if opened == 0 {
+ return i
+ }
+ } else if c == opener {
+ if !allowNesting {
+ return -1
+ }
+ opened++
+ }
+ }
+ i++
+ }
+ return -1
+}
+
+// TrimLeft trims characters in the given s from head of the source.
+// bytes.TrimLeft offers same functionalities, but bytes.TrimLeft
+// allocates new buffer for the result.
+func TrimLeft(source, b []byte) []byte {
+ i := 0
+ for ; i < len(source); i++ {
+ c := source[i]
+ found := false
+ for j := 0; j < len(b); j++ {
+ if c == b[j] {
+ found = true
+ break
+ }
+ }
+ if !found {
+ break
+ }
+ }
+ return source[i:]
+}
+
+// TrimRight trims characters in the given s from tail of the source.
+func TrimRight(source, b []byte) []byte {
+ i := len(source) - 1
+ for ; i >= 0; i-- {
+ c := source[i]
+ found := false
+ for j := 0; j < len(b); j++ {
+ if c == b[j] {
+ found = true
+ break
+ }
+ }
+ if !found {
+ break
+ }
+ }
+ return source[:i+1]
+}
+
+// TrimLeftLength returns a length of leading specified characters.
+func TrimLeftLength(source, s []byte) int {
+ return len(source) - len(TrimLeft(source, s))
+}
+
+// TrimRightLength returns a length of trailing specified characters.
+func TrimRightLength(source, s []byte) int {
+ return len(source) - len(TrimRight(source, s))
+}
+
+// TrimLeftSpaceLength returns a length of leading space characters.
+func TrimLeftSpaceLength(source []byte) int {
+ i := 0
+ for ; i < len(source); i++ {
+ if !IsSpace(source[i]) {
+ break
+ }
+ }
+ return i
+}
+
+// TrimRightSpaceLength returns a length of trailing space characters.
+func TrimRightSpaceLength(source []byte) int {
+ l := len(source)
+ i := l - 1
+ for ; i >= 0; i-- {
+ if !IsSpace(source[i]) {
+ break
+ }
+ }
+ if i < 0 {
+ return l
+ }
+ return l - 1 - i
+}
+
+// TrimLeftSpace returns a subslice of the given string by slicing off all leading
+// space characters.
+func TrimLeftSpace(source []byte) []byte {
+ return TrimLeft(source, spaces)
+}
+
+// TrimRightSpace returns a subslice of the given string by slicing off all trailing
+// space characters.
+func TrimRightSpace(source []byte) []byte {
+ return TrimRight(source, spaces)
+}
+
+// ReplaceSpaces replaces sequence of spaces with the given repl.
+func ReplaceSpaces(source []byte, repl byte) []byte {
+ var ret []byte
+ start := -1
+ for i, c := range source {
+ iss := IsSpace(c)
+ if start < 0 && iss {
+ start = i
+ continue
+ } else if start >= 0 && iss {
+ continue
+ } else if start >= 0 {
+ if ret == nil {
+ ret = make([]byte, 0, len(source))
+ ret = append(ret, source[:start]...)
+ }
+ ret = append(ret, repl)
+ start = -1
+ }
+ if ret != nil {
+ ret = append(ret, c)
+ }
+ }
+ if start >= 0 && ret != nil {
+ ret = append(ret, repl)
+ }
+ if ret == nil {
+ return source
+ }
+ return ret
+}
+
+// ToRune decode given bytes start at pos and returns a rune.
+func ToRune(source []byte, pos int) rune {
+ i := pos
+ for ; i >= 0; i-- {
+ if utf8.RuneStart(source[i]) {
+ break
+ }
+ }
+ r, _ := utf8.DecodeRune(source[i:])
+ return r
+}
+
+// ToValidRune returns 0xFFFD if the given rune is invalid, otherwise v.
+func ToValidRune(v rune) rune {
+ if v == 0 || !utf8.ValidRune(v) {
+ return rune(0xFFFD)
+ }
+ return v
+}
+
+// ToLinkReference convert given bytes into a valid link reference string.
+// ToLinkReference trims leading and trailing spaces and convert into lower
+// case and replace spaces with a single space character.
+func ToLinkReference(v []byte) string {
+ v = TrimLeftSpace(v)
+ v = TrimRightSpace(v)
+ return strings.ToLower(string(ReplaceSpaces(v, ' ')))
+}
+
+var htmlEscapeTable = [256][]byte{nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, []byte("""), nil, nil, nil, []byte("&"), nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, []byte("<"), nil, []byte(">"), nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil}
+
+// EscapeHTMLByte returns HTML escaped bytes if the given byte should be escaped,
+// otherwise nil.
+func EscapeHTMLByte(b byte) []byte {
+ return htmlEscapeTable[b]
+}
+
+// EscapeHTML escapes characters that should be escaped in HTML text.
+func EscapeHTML(v []byte) []byte {
+ cob := NewCopyOnWriteBuffer(v)
+ n := 0
+ for i := 0; i < len(v); i++ {
+ c := v[i]
+ escaped := htmlEscapeTable[c]
+ if escaped != nil {
+ cob.Write(v[n:i])
+ cob.Write(escaped)
+ n = i + 1
+ }
+ }
+ if cob.IsCopied() {
+ cob.Write(v[n:])
+ }
+ return cob.Bytes()
+}
+
+// UnescapePunctuations unescapes blackslash escaped punctuations.
+func UnescapePunctuations(source []byte) []byte {
+ cob := NewCopyOnWriteBuffer(source)
+ limit := len(source)
+ n := 0
+ for i := 0; i < limit; {
+ c := source[i]
+ if i < limit-1 && c == '\\' && IsPunct(source[i+1]) {
+ cob.Write(source[n:i])
+ cob.WriteByte(source[i+1])
+ i += 2
+ n = i
+ continue
+ }
+ i++
+ }
+ if cob.IsCopied() {
+ cob.Write(source[n:])
+ }
+ return cob.Bytes()
+}
+
+// ResolveNumericReferences resolve numeric references like 'Ӓ" .
+func ResolveNumericReferences(source []byte) []byte {
+ cob := NewCopyOnWriteBuffer(source)
+ buf := make([]byte, 6, 6)
+ limit := len(source)
+ ok := false
+ n := 0
+ for i := 0; i < limit; i++ {
+ if source[i] == '&' {
+ pos := i
+ next := i + 1
+ if next < limit && source[next] == '#' {
+ nnext := next + 1
+ if nnext < limit {
+ nc := source[nnext]
+ // code point like #x22;
+ if nnext < limit && nc == 'x' || nc == 'X' {
+ start := nnext + 1
+ i, ok = ReadWhile(source, [2]int{start, limit}, IsHexDecimal)
+ if ok && i < limit && source[i] == ';' {
+ v, _ := strconv.ParseUint(BytesToReadOnlyString(source[start:i]), 16, 32)
+ cob.Write(source[n:pos])
+ n = i + 1
+ runeSize := utf8.EncodeRune(buf, ToValidRune(rune(v)))
+ cob.Write(buf[:runeSize])
+ continue
+ }
+ // code point like #1234;
+ } else if nc >= '0' && nc <= '9' {
+ start := nnext
+ i, ok = ReadWhile(source, [2]int{start, limit}, IsNumeric)
+ if ok && i < limit && i-start < 8 && source[i] == ';' {
+ v, _ := strconv.ParseUint(BytesToReadOnlyString(source[start:i]), 0, 32)
+ cob.Write(source[n:pos])
+ n = i + 1
+ runeSize := utf8.EncodeRune(buf, ToValidRune(rune(v)))
+ cob.Write(buf[:runeSize])
+ continue
+ }
+ }
+ }
+ }
+ i = next - 1
+ }
+ }
+ if cob.IsCopied() {
+ cob.Write(source[n:])
+ }
+ return cob.Bytes()
+}
+
+// ResolveEntityNames resolve entity references like 'ö" .
+func ResolveEntityNames(source []byte) []byte {
+ cob := NewCopyOnWriteBuffer(source)
+ limit := len(source)
+ ok := false
+ n := 0
+ for i := 0; i < limit; i++ {
+ if source[i] == '&' {
+ pos := i
+ next := i + 1
+ if !(next < limit && source[next] == '#') {
+ start := next
+ i, ok = ReadWhile(source, [2]int{start, limit}, IsAlphaNumeric)
+ if ok && i < limit && source[i] == ';' {
+ name := BytesToReadOnlyString(source[start:i])
+ entity, ok := LookUpHTML5EntityByName(name)
+ if ok {
+ cob.Write(source[n:pos])
+ n = i + 1
+ cob.Write(entity.Characters)
+ continue
+ }
+ }
+ }
+ i = next - 1
+ }
+ }
+ if cob.IsCopied() {
+ cob.Write(source[n:])
+ }
+ return cob.Bytes()
+}
+
+var htmlSpace = []byte("%20")
+
+// URLEscape escape the given URL.
+// If resolveReference is set true:
+// 1. unescape punctuations
+// 2. resolve numeric references
+// 3. resolve entity references
+//
+// URL encoded values (%xx) are keeped as is.
+func URLEscape(v []byte, resolveReference bool) []byte {
+ if resolveReference {
+ v = UnescapePunctuations(v)
+ v = ResolveNumericReferences(v)
+ v = ResolveEntityNames(v)
+ }
+ cob := NewCopyOnWriteBuffer(v)
+ limit := len(v)
+ n := 0
+
+ for i := 0; i < limit; {
+ c := v[i]
+ if urlEscapeTable[c] == 1 {
+ i++
+ continue
+ }
+ if c == '%' && i+2 < limit && IsHexDecimal(v[i+1]) && IsHexDecimal(v[i+1]) {
+ i += 3
+ continue
+ }
+ u8len := utf8lenTable[c]
+ if u8len == 99 { // invalid utf8 leading byte, skip it
+ i++
+ continue
+ }
+ if c == ' ' {
+ cob.Write(v[n:i])
+ cob.Write(htmlSpace)
+ i++
+ n = i
+ continue
+ }
+ if int(u8len) >= len(v) {
+ u8len = int8(len(v) - 1)
+ }
+ if u8len == 0 {
+ i++
+ n = i
+ continue
+ }
+ cob.Write(v[n:i])
+ stop := i + int(u8len)
+ if stop > len(v) {
+ i++
+ n = i
+ continue
+ }
+ cob.Write(StringToReadOnlyBytes(url.QueryEscape(string(v[i:stop]))))
+ i += int(u8len)
+ n = i
+ }
+ if cob.IsCopied() && n < limit {
+ cob.Write(v[n:])
+ }
+ return cob.Bytes()
+}
+
+// FindURLIndex returns a stop index value if the given bytes seem an URL.
+// This function is equivalent to [A-Za-z][A-Za-z0-9.+-]{1,31}:[^<>\x00-\x20]* .
+func FindURLIndex(b []byte) int {
+ i := 0
+ if !(len(b) > 0 && urlTable[b[i]]&7 == 7) {
+ return -1
+ }
+ i++
+ for ; i < len(b); i++ {
+ c := b[i]
+ if urlTable[c]&4 != 4 {
+ break
+ }
+ }
+ if i == 1 || i > 33 || i >= len(b) {
+ return -1
+ }
+ if b[i] != ':' {
+ return -1
+ }
+ i++
+ for ; i < len(b); i++ {
+ c := b[i]
+ if urlTable[c]&1 != 1 {
+ break
+ }
+ }
+ return i
+}
+
+var emailDomainRegexp = regexp.MustCompile(`^[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)*`)
+
+// FindEmailIndex returns a stop index value if the given bytes seem an email address.
+func FindEmailIndex(b []byte) int {
+ // TODO: eliminate regexps
+ i := 0
+ for ; i < len(b); i++ {
+ c := b[i]
+ if emailTable[c]&1 != 1 {
+ break
+ }
+ }
+ if i == 0 {
+ return -1
+ }
+ if i >= len(b) || b[i] != '@' {
+ return -1
+ }
+ i++
+ if i >= len(b) {
+ return -1
+ }
+ match := emailDomainRegexp.FindSubmatchIndex(b[i:])
+ if match == nil {
+ return -1
+ }
+ return i + match[1]
+}
+
+var spaces = []byte(" \t\n\x0b\x0c\x0d")
+
+var spaceTable = [256]int8{0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+
+var punctTable = [256]int8{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+
+// a-zA-Z0-9, ;/?:@&=+$,-_.!~*'()#
+var urlEscapeTable = [256]int8{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+
+var utf8lenTable = [256]int8{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 99, 99, 99, 99, 99, 99, 99, 99}
+
+var urlTable = [256]uint8{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 5, 1, 5, 5, 1, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 1, 1, 0, 1, 0, 1, 1, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 1, 1, 1, 1, 1, 1, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}
+
+var emailTable = [256]uint8{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+
+// UTF8Len returns a byte length of the utf-8 character.
+func UTF8Len(b byte) int8 {
+ return utf8lenTable[b]
+}
+
+// IsPunct returns true if the given character is a punctuation, otherwise false.
+func IsPunct(c byte) bool {
+ return punctTable[c] == 1
+}
+
+// IsSpace returns true if the given character is a space, otherwise false.
+func IsSpace(c byte) bool {
+ return spaceTable[c] == 1
+}
+
+// IsNumeric returns true if the given character is a numeric, otherwise false.
+func IsNumeric(c byte) bool {
+ return c >= '0' && c <= '9'
+}
+
+// IsHexDecimal returns true if the given character is a hexdecimal, otherwise false.
+func IsHexDecimal(c byte) bool {
+ return c >= '0' && c <= '9' || c >= 'a' && c <= 'f' || c >= 'A' && c <= 'F'
+}
+
+// IsAlphaNumeric returns true if the given character is a alphabet or a numeric, otherwise false.
+func IsAlphaNumeric(c byte) bool {
+ return c >= 'a' && c <= 'z' || c >= 'A' && c <= 'Z' || c >= '0' && c <= '9'
+}
+
+// A BufWriter is a subset of the bufio.Writer .
+type BufWriter interface {
+ io.Writer
+ Available() int
+ Buffered() int
+ Flush() error
+ WriteByte(c byte) error
+ WriteRune(r rune) (size int, err error)
+ WriteString(s string) (int, error)
+}
+
+// A PrioritizedValue struct holds pair of an arbitrary value and a priority.
+type PrioritizedValue struct {
+ // Value is an arbitrary value that you want to prioritize.
+ Value interface{}
+ // Priority is a priority of the value.
+ Priority int
+}
+
+// PrioritizedSlice is a slice of the PrioritizedValues
+type PrioritizedSlice []PrioritizedValue
+
+// Sort sorts the PrioritizedSlice in ascending order.
+func (s PrioritizedSlice) Sort() {
+ sort.Slice(s, func(i, j int) bool {
+ return s[i].Priority < s[j].Priority
+ })
+}
+
+// Remove removes the given value from this slice.
+func (s PrioritizedSlice) Remove(v interface{}) PrioritizedSlice {
+ i := 0
+ found := false
+ for ; i < len(s); i++ {
+ if s[i].Value == v {
+ found = true
+ break
+ }
+ }
+ if !found {
+ return s
+ }
+ return append(s[:i], s[i+1:]...)
+}
+
+// Prioritized returns a new PrioritizedValue.
+func Prioritized(v interface{}, priority int) PrioritizedValue {
+ return PrioritizedValue{v, priority}
+}
+
+func bytesHash(b []byte) uint64 {
+ var hash uint64 = 5381
+ for _, c := range b {
+ hash = ((hash << 5) + hash) + uint64(c)
+ }
+ return hash
+}
+
+// BytesFilter is a efficient data structure for checking whether bytes exist or not.
+// BytesFilter is thread-safe.
+type BytesFilter interface {
+ // Add adds given bytes to this set.
+ Add([]byte)
+
+ // Contains return true if this set contains given bytes, otherwise false.
+ Contains([]byte) bool
+
+ // Extend copies this filter and adds given bytes to new filter.
+ Extend(...[]byte) BytesFilter
+}
+
+type bytesFilter struct {
+ chars [256]uint8
+ threshold int
+ slots [][][]byte
+}
+
+// NewBytesFilter returns a new BytesFilter.
+func NewBytesFilter(elements ...[]byte) BytesFilter {
+ s := &bytesFilter{
+ threshold: 3,
+ slots: make([][][]byte, 64),
+ }
+ for _, element := range elements {
+ s.Add(element)
+ }
+ return s
+}
+
+func (s *bytesFilter) Add(b []byte) {
+ l := len(b)
+ m := s.threshold
+ if l < s.threshold {
+ m = l
+ }
+ for i := 0; i < m; i++ {
+ s.chars[b[i]] |= 1 << uint8(i)
+ }
+ h := bytesHash(b) % uint64(len(s.slots))
+ slot := s.slots[h]
+ if slot == nil {
+ slot = [][]byte{}
+ }
+ s.slots[h] = append(slot, b)
+}
+
+func (s *bytesFilter) Extend(bs ...[]byte) BytesFilter {
+ newFilter := NewBytesFilter().(*bytesFilter)
+ newFilter.chars = s.chars
+ newFilter.threshold = s.threshold
+ for k, v := range s.slots {
+ newSlot := make([][]byte, len(v))
+ copy(newSlot, v)
+ newFilter.slots[k] = v
+ }
+ for _, b := range bs {
+ newFilter.Add(b)
+ }
+ return newFilter
+}
+
+func (s *bytesFilter) Contains(b []byte) bool {
+ l := len(b)
+ m := s.threshold
+ if l < s.threshold {
+ m = l
+ }
+ for i := 0; i < m; i++ {
+ if (s.chars[b[i]] & (1 << uint8(i))) == 0 {
+ return false
+ }
+ }
+ h := bytesHash(b) % uint64(len(s.slots))
+ slot := s.slots[h]
+ if slot == nil || len(slot) == 0 {
+ return false
+ }
+ for _, element := range slot {
+ if bytes.Equal(element, b) {
+ return true
+ }
+ }
+ return false
+}
--- /dev/null
+// +build appengine,js
+
+package util
+
+// BytesToReadOnlyString returns a string converted from given bytes.
+func BytesToReadOnlyString(b []byte) string {
+ return string(b)
+}
+
+// StringToReadOnlyBytes returns bytes converted from given string.
+func StringToReadOnlyBytes(s string) []byte {
+ return []byte(s)
+}
--- /dev/null
+// +build !appengine,!js
+
+package util
+
+import (
+ "reflect"
+ "unsafe"
+)
+
+// BytesToReadOnlyString returns a string converted from given bytes.
+func BytesToReadOnlyString(b []byte) string {
+ return *(*string)(unsafe.Pointer(&b))
+}
+
+// StringToReadOnlyBytes returns bytes converted from given string.
+func StringToReadOnlyBytes(s string) []byte {
+ sh := (*reflect.StringHeader)(unsafe.Pointer(&s))
+ bh := reflect.SliceHeader{Data: sh.Data, Len: sh.Len, Cap: sh.Len}
+ return *(*[]byte)(unsafe.Pointer(&bh))
+}
# github.com/quasoft/websspi v1.0.0
github.com/quasoft/websspi
github.com/quasoft/websspi/secctx
-# github.com/russross/blackfriday/v2 v2.0.1
-github.com/russross/blackfriday/v2
# github.com/satori/go.uuid v1.2.0
github.com/satori/go.uuid
# github.com/sergi/go-diff v1.0.0
github.com/sergi/go-diff/diffmatchpatch
# github.com/shurcooL/httpfs v0.0.0-20190527155220-6a4d4a70508b
github.com/shurcooL/httpfs/vfsutil
-# github.com/shurcooL/sanitized_anchor_name v1.0.0
-github.com/shurcooL/sanitized_anchor_name
# github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd
github.com/shurcooL/vfsgen
# github.com/siddontang/go-snappy v0.0.0-20140704025258-d8f7bb82a96d
github.com/xanzy/ssh-agent
# github.com/yohcop/openid-go v0.0.0-20160914080427-2c050d2dae53
github.com/yohcop/openid-go
+# github.com/yuin/goldmark v1.1.19
+github.com/yuin/goldmark
+github.com/yuin/goldmark/ast
+github.com/yuin/goldmark/extension
+github.com/yuin/goldmark/extension/ast
+github.com/yuin/goldmark/parser
+github.com/yuin/goldmark/renderer
+github.com/yuin/goldmark/renderer/html
+github.com/yuin/goldmark/text
+github.com/yuin/goldmark/util
# go.mongodb.org/mongo-driver v1.1.1
go.mongodb.org/mongo-driver/bson
go.mongodb.org/mongo-driver/bson/bsoncodec